aboutsummaryrefslogtreecommitdiffstats
path: root/src/vppinfra
diff options
context:
space:
mode:
Diffstat (limited to 'src/vppinfra')
-rw-r--r--src/vppinfra/README43
-rw-r--r--src/vppinfra/anneal.c172
-rw-r--r--src/vppinfra/anneal.h89
-rw-r--r--src/vppinfra/asm_mips.h351
-rw-r--r--src/vppinfra/asm_x86.c1947
-rw-r--r--src/vppinfra/asm_x86.h125
-rw-r--r--src/vppinfra/backtrace.c267
-rw-r--r--src/vppinfra/bihash_16_8.h84
-rw-r--r--src/vppinfra/bihash_24_8.h85
-rw-r--r--src/vppinfra/bihash_40_8.h87
-rw-r--r--src/vppinfra/bihash_48_8.h89
-rw-r--r--src/vppinfra/bihash_8_8.h99
-rw-r--r--src/vppinfra/bihash_doc.h149
-rw-r--r--src/vppinfra/bihash_template.c624
-rw-r--r--src/vppinfra/bihash_template.h419
-rw-r--r--src/vppinfra/bitmap.h774
-rw-r--r--src/vppinfra/bitops.h179
-rw-r--r--src/vppinfra/byte_order.h202
-rw-r--r--src/vppinfra/cache.h104
-rw-r--r--src/vppinfra/clib.h365
-rw-r--r--src/vppinfra/clib_error.h35
-rw-r--r--src/vppinfra/cpu.c133
-rw-r--r--src/vppinfra/cpu.h112
-rw-r--r--src/vppinfra/crc32.h84
-rw-r--r--src/vppinfra/dir.dox19
-rw-r--r--src/vppinfra/dlist.h156
-rw-r--r--src/vppinfra/elf.c2040
-rw-r--r--src/vppinfra/elf.h1062
-rw-r--r--src/vppinfra/elf_clib.c377
-rw-r--r--src/vppinfra/elf_clib.h144
-rw-r--r--src/vppinfra/elog.c1113
-rw-r--r--src/vppinfra/elog.h567
-rw-r--r--src/vppinfra/error.c292
-rw-r--r--src/vppinfra/error.h199
-rw-r--r--src/vppinfra/error_bootstrap.h106
-rw-r--r--src/vppinfra/fheap.c473
-rw-r--r--src/vppinfra/fheap.h140
-rw-r--r--src/vppinfra/fifo.c137
-rw-r--r--src/vppinfra/fifo.h304
-rw-r--r--src/vppinfra/file.h134
-rw-r--r--src/vppinfra/format.c819
-rw-r--r--src/vppinfra/format.h334
-rw-r--r--src/vppinfra/graph.c182
-rw-r--r--src/vppinfra/graph.h127
-rw-r--r--src/vppinfra/hash.c1095
-rw-r--r--src/vppinfra/hash.h694
-rw-r--r--src/vppinfra/heap.c828
-rw-r--r--src/vppinfra/heap.h357
-rw-r--r--src/vppinfra/linux/mem.c266
-rw-r--r--src/vppinfra/linux/syscall.h56
-rw-r--r--src/vppinfra/linux/sysfs.c250
-rw-r--r--src/vppinfra/linux/sysfs.h46
-rw-r--r--src/vppinfra/lock.h99
-rw-r--r--src/vppinfra/longjmp.S690
-rw-r--r--src/vppinfra/longjmp.h124
-rw-r--r--src/vppinfra/macros.c266
-rw-r--r--src/vppinfra/macros.h54
-rw-r--r--src/vppinfra/math.h71
-rw-r--r--src/vppinfra/md5.c317
-rw-r--r--src/vppinfra/md5.h57
-rw-r--r--src/vppinfra/mem.h365
-rw-r--r--src/vppinfra/mem_mheap.c165
-rw-r--r--src/vppinfra/memcheck.h317
-rw-r--r--src/vppinfra/memcpy_avx.h296
-rw-r--r--src/vppinfra/memcpy_sse3.h356
-rw-r--r--src/vppinfra/mhash.c408
-rw-r--r--src/vppinfra/mhash.h179
-rw-r--r--src/vppinfra/mheap.c1643
-rw-r--r--src/vppinfra/mheap.h94
-rw-r--r--src/vppinfra/mheap_bootstrap.h374
-rw-r--r--src/vppinfra/mod_test_hash.c27
-rw-r--r--src/vppinfra/os.h88
-rw-r--r--src/vppinfra/pfhash.c689
-rw-r--r--src/vppinfra/pfhash.h276
-rw-r--r--src/vppinfra/phash.c1017
-rw-r--r--src/vppinfra/phash.h194
-rw-r--r--src/vppinfra/pipeline.h176
-rw-r--r--src/vppinfra/pool.c131
-rw-r--r--src/vppinfra/pool.h519
-rw-r--r--src/vppinfra/ptclosure.c125
-rw-r--r--src/vppinfra/ptclosure.h40
-rw-r--r--src/vppinfra/qhash.c858
-rw-r--r--src/vppinfra/qhash.h169
-rw-r--r--src/vppinfra/qsort.c269
-rw-r--r--src/vppinfra/random.c51
-rw-r--r--src/vppinfra/random.h178
-rw-r--r--src/vppinfra/random_buffer.c86
-rw-r--r--src/vppinfra/random_buffer.h118
-rw-r--r--src/vppinfra/random_isaac.c434
-rw-r--r--src/vppinfra/random_isaac.h81
-rw-r--r--src/vppinfra/serialize.c1254
-rw-r--r--src/vppinfra/serialize.h443
-rw-r--r--src/vppinfra/slist.c336
-rw-r--r--src/vppinfra/slist.h145
-rw-r--r--src/vppinfra/smp.c325
-rw-r--r--src/vppinfra/smp.h81
-rw-r--r--src/vppinfra/smp_fifo.c91
-rw-r--r--src/vppinfra/smp_fifo.h313
-rw-r--r--src/vppinfra/socket.c559
-rw-r--r--src/vppinfra/socket.h192
-rw-r--r--src/vppinfra/sparse_vec.h244
-rw-r--r--src/vppinfra/std-formats.c330
-rw-r--r--src/vppinfra/string.c94
-rw-r--r--src/vppinfra/string.h83
-rw-r--r--src/vppinfra/test_bihash_template.c369
-rw-r--r--src/vppinfra/test_dlist.c193
-rw-r--r--src/vppinfra/test_elf.c217
-rw-r--r--src/vppinfra/test_elog.c315
-rw-r--r--src/vppinfra/test_fifo.c144
-rw-r--r--src/vppinfra/test_format.c199
-rw-r--r--src/vppinfra/test_fpool.c69
-rw-r--r--src/vppinfra/test_hash.c458
-rw-r--r--src/vppinfra/test_heap.c198
-rw-r--r--src/vppinfra/test_longjmp.c129
-rw-r--r--src/vppinfra/test_macros.c64
-rw-r--r--src/vppinfra/test_md5.c141
-rw-r--r--src/vppinfra/test_mheap.c242
-rw-r--r--src/vppinfra/test_pfhash.c322
-rw-r--r--src/vppinfra/test_phash.c149
-rw-r--r--src/vppinfra/test_pool.c86
-rw-r--r--src/vppinfra/test_pool_iterate.c59
-rw-r--r--src/vppinfra/test_ptclosure.c212
-rw-r--r--src/vppinfra/test_qhash.c333
-rw-r--r--src/vppinfra/test_random.c148
-rw-r--r--src/vppinfra/test_random_isaac.c142
-rw-r--r--src/vppinfra/test_serialize.c274
-rw-r--r--src/vppinfra/test_slist.c228
-rw-r--r--src/vppinfra/test_socket.c134
-rw-r--r--src/vppinfra/test_time.c104
-rw-r--r--src/vppinfra/test_timing_wheel.c389
-rw-r--r--src/vppinfra/test_tw_timer.c1275
-rw-r--r--src/vppinfra/test_vec.c1159
-rw-r--r--src/vppinfra/test_vec.h243
-rw-r--r--src/vppinfra/test_vhash.c757
-rw-r--r--src/vppinfra/test_zvec.c117
-rw-r--r--src/vppinfra/time.c232
-rw-r--r--src/vppinfra/time.h312
-rw-r--r--src/vppinfra/timer.c322
-rw-r--r--src/vppinfra/timer.h46
-rw-r--r--src/vppinfra/timing_wheel.c759
-rw-r--r--src/vppinfra/timing_wheel.h155
-rw-r--r--src/vppinfra/tw_timer_16t_1w_2048sl.c26
-rw-r--r--src/vppinfra/tw_timer_16t_1w_2048sl.h52
-rw-r--r--src/vppinfra/tw_timer_16t_2w_512sl.c26
-rw-r--r--src/vppinfra/tw_timer_16t_2w_512sl.h52
-rw-r--r--src/vppinfra/tw_timer_1t_3w_1024sl_ov.c26
-rw-r--r--src/vppinfra/tw_timer_1t_3w_1024sl_ov.h53
-rw-r--r--src/vppinfra/tw_timer_2t_1w_2048sl.c26
-rw-r--r--src/vppinfra/tw_timer_2t_1w_2048sl.h52
-rw-r--r--src/vppinfra/tw_timer_4t_3w_256sl.c26
-rw-r--r--src/vppinfra/tw_timer_4t_3w_256sl.h52
-rw-r--r--src/vppinfra/tw_timer_4t_3w_4sl_ov.c32
-rw-r--r--src/vppinfra/tw_timer_4t_3w_4sl_ov.h53
-rw-r--r--src/vppinfra/tw_timer_template.c832
-rw-r--r--src/vppinfra/tw_timer_template.h267
-rw-r--r--src/vppinfra/types.h174
-rw-r--r--src/vppinfra/unformat.c1083
-rw-r--r--src/vppinfra/unix-formats.c956
-rw-r--r--src/vppinfra/unix-kelog.c415
-rw-r--r--src/vppinfra/unix-misc.c237
-rw-r--r--src/vppinfra/unix.h64
-rw-r--r--src/vppinfra/unix_error.def145
-rw-r--r--src/vppinfra/valgrind.h4030
-rw-r--r--src/vppinfra/vec.c171
-rw-r--r--src/vppinfra/vec.h1009
-rw-r--r--src/vppinfra/vec_bootstrap.h201
-rw-r--r--src/vppinfra/vector.c54
-rw-r--r--src/vppinfra/vector.h268
-rw-r--r--src/vppinfra/vector_altivec.h178
-rw-r--r--src/vppinfra/vector_funcs.h334
-rw-r--r--src/vppinfra/vector_iwmmxt.h149
-rw-r--r--src/vppinfra/vector_neon.h71
-rw-r--r--src/vppinfra/vector_sse2.h705
-rw-r--r--src/vppinfra/vhash.c772
-rw-r--r--src/vppinfra/vhash.h850
-rw-r--r--src/vppinfra/xxhash.h86
-rw-r--r--src/vppinfra/xy.h56
-rw-r--r--src/vppinfra/zvec.c442
-rw-r--r--src/vppinfra/zvec.h166
179 files changed, 59712 insertions, 0 deletions
diff --git a/src/vppinfra/README b/src/vppinfra/README
new file mode 100644
index 00000000..579696b6
--- /dev/null
+++ b/src/vppinfra/README
@@ -0,0 +1,43 @@
+Welcome to vppinfra a programming library of basic data structures.
+
+vec.c dynamic vectors
+bitmap.h dynamic bitmaps
+heap.c allocation heap of objects (sub-objects have variable size)
+pool.h allocation pool (like heap with size always 1)
+hash.c dynamic hash tables
+mheap.c memory allocator (a la dlmalloc)
+
+format.c extendable printf-like thing built on top of vectors
+std-formats.c formats for unix data structures, networking stuff, ...
+timer.c arrange for functions to be called at given times.
+
+
+Build, Test, Install, Use...
+----------------------------
+ If this package came from the distribution tar ball, skip to the
+ Build Section. If this was a gentoo ebuild, after emerge/ebuild,
+ skip to the Use Section; otherwise, start with Pre-Build.
+
+Pre-Build
+-----------
+ 1) svn checkout svn://teaktechnologies.com/fn/trunk/clib clib
+ 2) autoreconf [-v][-f][-i] # regenerate configuration files
+
+Build
+-----
+ 1) cd BUILD # which may be different than this SRC dir
+ 2) ${SRC}/configure [--host=CHOST]
+ 3) make
+
+Test
+----
+ If not cross-compiling (i.e. CBUILD == CHOST), use "make check" to
+ run the validation programs.
+
+Install
+-------
+ With the root effective user ID (i.e. su or sudo), run "make install".
+
+Use
+---
+ We need to reference man pages and theory of operation.
diff --git a/src/vppinfra/anneal.c b/src/vppinfra/anneal.c
new file mode 100644
index 00000000..35d10946
--- /dev/null
+++ b/src/vppinfra/anneal.c
@@ -0,0 +1,172 @@
+/*
+ Copyright (c) 2011 Cisco and/or its affiliates.
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+#include <vppinfra/anneal.h>
+
+/*
+ * Optimize an objective function by simulated annealing
+ *
+ * Here are a couple of short, easily-understood
+ * descriptions of simulated annealing:
+ *
+ * http://www.cs.sandia.gov/opt/survey/sa.html
+ * Numerical Recipes in C, 2nd ed., 444ff
+ *
+ * The description in the Wikipedia is not helpful.
+ *
+ * The algorithm tries to produce a decent answer to combinatorially
+ * explosive optimization problems by analogy to slow cooling
+ * of hot metal, aka annealing.
+ *
+ * There are (at least) three problem-dependent annealing parameters
+ * to consider:
+ *
+ * t0, the initial "temperature. Should be set so that the probability
+ * of accepting a transition to a higher cost configuration is
+ * initially about 0.8.
+ *
+ * ntemps, the number of temperatures to use. Each successive temperature
+ * is some fraction of the previous temperature.
+ *
+ * nmoves_per_temp, the number of configurations to try at each temperature
+ *
+ * It is a black art to set ntemps, nmoves_per_temp, and the rate
+ * at which the temperature drops. Go too fast with too few iterations,
+ * and the computation falls into a local minimum instead of the
+ * (desired) global minimum.
+ */
+
+void
+clib_anneal (clib_anneal_param_t * p)
+{
+ f64 t;
+ f64 cost, prev_cost, delta_cost, initial_cost, best_cost;
+ f64 random_accept, delta_cost_over_t;
+ f64 total_increase = 0.0, average_increase;
+ u32 i, j;
+ u32 number_of_increases = 0;
+ u32 accepted_this_temperature;
+ u32 best_saves_this_temperature;
+ int accept;
+
+ t = p->initial_temperature;
+ best_cost = initial_cost = prev_cost = p->anneal_metric (p->opaque);
+ p->anneal_save_best_configuration (p->opaque);
+
+ if (p->flags & CLIB_ANNEAL_VERBOSE)
+ fformat (stdout, "Initial cost %.2f\n", initial_cost);
+
+ for (i = 0; i < p->number_of_temperatures; i++)
+ {
+ accepted_this_temperature = 0;
+ best_saves_this_temperature = 0;
+
+ p->anneal_restore_best_configuration (p->opaque);
+ cost = best_cost;
+
+ for (j = 0; j < p->number_of_configurations_per_temperature; j++)
+ {
+ p->anneal_new_configuration (p->opaque);
+ cost = p->anneal_metric (p->opaque);
+
+ delta_cost = cost - prev_cost;
+
+ /* cost function looks better, accept this move */
+ if (p->flags & CLIB_ANNEAL_MINIMIZE)
+ accept = delta_cost < 0.0;
+ else
+ accept = delta_cost > 0.0;
+
+ if (accept)
+ {
+ if (p->flags & CLIB_ANNEAL_MINIMIZE)
+ if (cost < best_cost)
+ {
+ if (p->flags & CLIB_ANNEAL_VERBOSE)
+ fformat (stdout, "New best cost %.2f\n", cost);
+ best_cost = cost;
+ p->anneal_save_best_configuration (p->opaque);
+ best_saves_this_temperature++;
+ }
+
+ accepted_this_temperature++;
+ prev_cost = cost;
+ continue;
+ }
+
+ /* cost function worse, keep stats to suggest t0 */
+ total_increase += (p->flags & CLIB_ANNEAL_MINIMIZE) ?
+ delta_cost : -delta_cost;
+
+ number_of_increases++;
+
+ /*
+ * Accept a higher cost with Pr { e^(-(delta_cost / T)) },
+ * equivalent to rnd[0,1] < e^(-(delta_cost / T))
+ *
+ * AKA, the Boltzmann factor.
+ */
+ random_accept = random_f64 (&p->random_seed);
+
+ delta_cost_over_t = delta_cost / t;
+
+ if (random_accept < exp (-delta_cost_over_t))
+ {
+ accepted_this_temperature++;
+ prev_cost = cost;
+ continue;
+ }
+ p->anneal_restore_previous_configuration (p->opaque);
+ }
+
+ if (p->flags & CLIB_ANNEAL_VERBOSE)
+ {
+ fformat (stdout, "Temp %.2f, cost %.2f, accepted %d, bests %d\n", t,
+ prev_cost, accepted_this_temperature,
+ best_saves_this_temperature);
+ fformat (stdout, "Improvement %.2f\n", initial_cost - prev_cost);
+ fformat (stdout, "-------------\n");
+ }
+
+ t = t * p->temperature_step;
+ }
+
+ /*
+ * Empirically, one wants the probability of accepting a move
+ * at the initial temperature to be about 0.8.
+ */
+ average_increase = total_increase / (f64) number_of_increases;
+ p->suggested_initial_temperature = average_increase / 0.22; /* 0.22 = -ln (0.8) */
+
+ p->final_temperature = t;
+ p->final_metric = p->anneal_metric (p->opaque);
+
+ if (p->flags & CLIB_ANNEAL_VERBOSE)
+ {
+ fformat (stdout, "Average cost increase from a bad move: %.2f\n",
+ average_increase);
+ fformat (stdout, "Suggested t0 = %.2f\n",
+ p->suggested_initial_temperature);
+ }
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/anneal.h b/src/vppinfra/anneal.h
new file mode 100644
index 00000000..148d38ba
--- /dev/null
+++ b/src/vppinfra/anneal.h
@@ -0,0 +1,89 @@
+/*
+ Copyright (c) 2011 Cisco and/or its affiliates.
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+#ifndef __included_anneal_h__
+#define __included_anneal_h__
+
+#include <vppinfra/clib.h>
+#include <vppinfra/format.h>
+#include <vppinfra/random.h>
+#include <math.h>
+
+typedef struct
+{
+ /* Initial temperature */
+ f64 initial_temperature;
+
+ /* Temperature fraction at each step, 0.95 is reasonable */
+ f64 temperature_step;
+
+ /* Number of temperatures used */
+ u32 number_of_temperatures;
+
+ /* Number of configurations tried at each temperature */
+ u32 number_of_configurations_per_temperature;
+
+ u32 flags;
+#define CLIB_ANNEAL_VERBOSE (1<<0)
+#define CLIB_ANNEAL_MINIMIZE (1<<1) /* mutually exclusive */
+#define CLIB_ANNEAL_MAXIMIZE (1<<2) /* mutually exclusive */
+
+ /* Random number seed, set to ensure repeatable results */
+ u32 random_seed;
+
+ /* Opaque data passed to callbacks */
+ void *opaque;
+
+ /* Final temperature (output) */
+ f64 final_temperature;
+
+ /* Final metric (output) */
+ f64 final_metric;
+
+ /* Suggested initial temperature (output) */
+ f64 suggested_initial_temperature;
+
+
+ /*--- Callbacks ---*/
+
+ /* objective function to minimize */
+ f64 (*anneal_metric) (void *opaque);
+
+ /* Generate a new configuration */
+ void (*anneal_new_configuration) (void *opaque);
+
+ /* Restore the previous configuration */
+ void (*anneal_restore_previous_configuration) (void *opaque);
+
+ /* Save best configuration found e.g at a certain temperature */
+ void (*anneal_save_best_configuration) (void *opaque);
+
+ /* restore best configuration found e.g at a certain temperature */
+ void (*anneal_restore_best_configuration) (void *opaque);
+
+} clib_anneal_param_t;
+
+void clib_anneal (clib_anneal_param_t * p);
+
+#endif /* __included_anneal_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/asm_mips.h b/src/vppinfra/asm_mips.h
new file mode 100644
index 00000000..7c9e6958
--- /dev/null
+++ b/src/vppinfra/asm_mips.h
@@ -0,0 +1,351 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2004 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_asm_mips_h
+#define included_asm_mips_h
+
+/* Encoding of MIPS instructions. */
+/* Encoding of opcode field (op). */
+#define mips_foreach_opcode \
+ _(SPECIAL) _(REGIMM) _(j) _(jal) _(beq) _(bne) _(blez) _(bgtz) \
+ _(addi) _(addiu) _(slti) _(sltiu) _(andi) _(ori) _(xori) _(lui) \
+ _(COP0) _(COP1) _(COP2) _(COP1X) _(beql) _(bnel) _(blezl) _(bgtzl) \
+ _(daddi) _(daddiu) _(ldl) _(ldr) _(SPECIAL2) _(jalx) _(MDMX) _(O37) \
+ _(lb) _(lh) _(lwl) _(lw) _(lbu) _(lhu) _(lwr) _(lwu) \
+ _(sb) _(sh) _(swl) _(sw) _(sdl) _(sdr) _(swr) _(cache) \
+ _(ll) _(lwc1) _(lwc2) _(pref) _(lld) _(ldc1) _(ldc2) _(ld) \
+ _(sc) _(swc1) _(swc2) _(o73) _(scd) _(sdc1) _(sdc2) _(sd)
+
+/* Encoding of funct field. */
+#define mips_foreach_special_funct \
+ _(sll) _(MOVCI) _(srl) _(sra) _(sllv) _(o05) _(srlv) _(srav) \
+ _(jr) _(jalr) _(movz) _(movn) _(syscall) _(break) _(o16) _(sync) \
+ _(mfhi) _(mthi) _(mflo) _(mtlo) _(dsllv) _(o25) _(dsrlv) _(dsrav) \
+ _(mult) _(multu) _(div) _(divu) _(dmult) _(dmultu) _(ddiv) _(ddivu) \
+ _(add) _(addu) _(sub) _(subu) _(and) _(or) _(xor) _(nor) \
+ _(o50) _(o51) _(slt) _(sltu) _(dadd) _(daddu) _(dsub) _(dsubu) \
+ _(tge) _(tgeu) _(tlt) _(tltu) _(teq) _(o65) _(tne) _(o67) \
+ _(dsll) _(o71) _(dsrl) _(dsra) _(dsll32) _(o75) _(dsrl32) _(dsra32)
+
+/* SPECIAL2 encoding of funct field. */
+#define mips_foreach_special2_funct \
+ _(madd) _(maddu) _(mul) _(o03) _(msub) _(msubu) _(o06) _(o07) \
+ _(o10) _(o11) _(o12) _(o13) _(o14) _(o15) _(o16) _(o17) \
+ _(o20) _(o21) _(o22) _(o23) _(o24) _(o25) _(o26) _(o27) \
+ _(o30) _(o31) _(o32) _(o33) _(o34) _(o35) _(o36) _(o37) \
+ _(clz) _(clo) _(o42) _(o43) _(dclz) _(dclo) _(o46) _(o47) \
+ _(o50) _(o51) _(o52) _(o53) _(o54) _(o55) _(o56) _(o57) \
+ _(o60) _(o61) _(o62) _(o63) _(o64) _(o65) _(o66) _(o67) \
+ _(o70) _(o71) _(o72) _(o73) _(o74) _(o75) _(o76) _(sdbbp)
+
+/* REGIMM encoding of rt field. */
+#define mips_foreach_regimm_rt \
+ _(bltz) _(bgez) _(bltzl) _(bgezl) _(o04) _(o05) _(o06) _(o07) \
+ _(tgei) _(tgeiu) _(tltiu) _(teqi) _(o14) _(tnei) _(o16) _(o17) \
+ _(bltzal) _(bgezal) _(bltzall) _(bgezall) _(o24) _(o25) _(o26) _(o27) \
+ _(o30) _(o31) _(o32) _(o33) _(o34) _(o35) _(o36) _(o37)
+
+/* COP0 encoding of rs field. */
+#define mips_foreach_cop0_rs \
+ _(mfc0) _(dmfc0) _(o02) _(o03) _(mtc0) _(dmtc0) _(o06) _(o07) \
+ _(o10) _(o11) _(o12) _(o13) _(o14) _(o15) _(o16) _(o17) \
+ _(C0) _(o21) _(o22) _(o23) _(o24) _(o25) _(o26) _(o27) \
+ _(o30) _(o31) _(o32) _(o33) _(o34) _(o35) _(o36) _(o37)
+
+/* COP0 encoding of funct when rs == RS_CO */
+#define mips_foreach_cop0_funct \
+ _(o00) _(tlbr) _(tlbwi) _(o03) _(o04) _(o05) _(tlbwr) _(o07) \
+ _(tlbp) _(o11) _(o12) _(o13) _(o14) _(o15) _(o16) _(o17) \
+ _(o20) _(o21) _(o22) _(o23) _(o24) _(o25) _(o26) _(o27) \
+ _(eret) _(o31) _(o32) _(o33) _(o34) _(o35) _(o36) _(deret) \
+ _(wait) _(o41) _(o42) _(o43) _(o44) _(o45) _(o46) _(o47) \
+ _(o50) _(o51) _(o52) _(o53) _(o54) _(o55) _(o56) _(o57) \
+ _(o60) _(o61) _(o62) _(o63) _(o64) _(o65) _(o66) _(o67) \
+ _(o70) _(o71) _(o72) _(o73) _(o74) _(o75) _(o76) _(o77)
+
+/* COP1 encoding of rs field. */
+#define mips_foreach_cop1_rs \
+ _(mfc1) _(dmfc1) _(cfc1) _(o03) _(mtc1) _(dmtc1) _(ctc1) _(o07) \
+ _(BC1) _(o11) _(o12) _(o13) _(o14) _(o15) _(o16) _(o17) \
+ _(S) _(D) _(o22) _(o23) _(W) _(L) _(o26) _(o27) \
+ _(o30) _(o31) _(o32) _(o33) _(o34) _(o35) _(o36) _(o37)
+
+/* COP1 encoding of funct for S and D */
+#define mips_foreach_cop1_funct \
+ _(add) _(sub) _(mul) _(div) _(sqrt) _(abs) _(mov) _(neg) \
+ _(roundl) _(truncl) _(ceill) _(floorl) _(roundw) _(truncw) _(ceilw) _(floorw) \
+ _(o20) _(MOVCF) _(movz) _(movn) _(o24) _(recip) _(rsqrt) _(o27) \
+ _(o30) _(o31) _(o32) _(o33) _(o34) _(o35) _(o36) _(o37) \
+ _(cvts) _(cvtd) _(o42) _(o43) _(cvtw) _(cvtl) _(o46) _(o47) \
+ _(o50) _(o51) _(o52) _(o53) _(o54) _(o55) _(o56) _(o57) \
+ _(cf) _(cun) _(ceq) _(cueq) _(colt) _(cult) _(cole) _(cule) \
+ _(csf) _(cngle) _(cseq) _(cngl) _(clt) _(cnge) _(cle) _(cngt)
+
+/* COP1X encoding of funct */
+#define mips_foreach_cop1x_funct \
+ _(lwxc1) _(ldxc1) _(o02) _(o03) _(o04) _(luxc1) _(o06) _(o07) \
+ _(swxc1) _(sdxc1) _(o12) _(o13) _(o14) _(suxc1) _(o16) _(prefx) \
+ _(o20) _(o21) _(o22) _(o23) _(o24) _(o25) _(o26) _(o27) \
+ _(o30) _(o31) _(o32) _(o33) _(o34) _(o35) _(o36) _(o37) \
+ _(madds) _(maddd) _(o42) _(o43) _(o44) _(o45) _(o46) _(o47) \
+ _(msubs) _(msubd) _(o52) _(o53) _(o54) _(o55) _(o56) _(o57) \
+ _(nmadds) _(nmaddd) _(o62) _(o63) _(o64) _(o65) _(o66) _(o67) \
+ _(nmsubs) _(nmsubd) _(o72) _(o73) _(o74) _(o75) _(o76) _(o77)
+
+#define mips_foreach_mdmx_funct \
+ _(msgn) _(ceq) _(pickf) _(pickt) _(clt) _(cle) _(min) _(max) \
+ _(o10) _(o11) _(sub) _(add) _(and) _(xor) _(or) _(nor) \
+ _(sll) _(o21) _(srl) _(sra) _(o24) _(o25) _(o26) _(o27) \
+ _(alniob) _(alnvob) _(alniqh) _(alnvqh) _(o34) _(o35) _(o36) _(shfl) \
+ _(rzu) _(rnau) _(rneu) _(o43) _(rzs) _(rnas) _(rnes) _(o47) \
+ _(o50) _(o51) _(o52) _(o53) _(o54) _(o55) _(o56) _(o57) \
+ _(mul) _(o61) _(muls) _(mula) _(o64) _(o65) _(suba) _(adda) \
+ _(o70) _(o71) _(o72) _(o73) _(o74) _(o75) _(wac) _(rac)
+
+#define _(f) MIPS_OPCODE_##f,
+typedef enum
+{
+ mips_foreach_opcode
+} mips_insn_opcode_t;
+#undef _
+
+#define _(f) MIPS_SPECIAL_FUNCT_##f,
+typedef enum
+{
+ mips_foreach_special_funct
+} mips_insn_special_funct_t;
+#undef _
+
+#define _(f) MIPS_SPECIAL2_FUNCT_##f,
+typedef enum
+{
+ mips_foreach_special2_funct
+} mips_insn_special2_funct_t;
+#undef _
+
+#define _(f) MIPS_REGIMM_RT_##f,
+typedef enum
+{
+ mips_foreach_regimm_rt
+} mips_insn_regimm_rt_t;
+#undef _
+
+#define _(f) MIPS_COP0_RS_##f,
+typedef enum
+{
+ mips_foreach_cop0_rs
+} mips_insn_cop0_rs_t;
+#undef _
+
+#define _(f) MIPS_COP0_FUNCT_##f,
+typedef enum
+{
+ mips_foreach_cop0_funct
+} mips_insn_cop0_funct_t;
+#undef _
+
+#define _(f) MIPS_COP1_RS_##f,
+typedef enum
+{
+ mips_foreach_cop1_rs
+} mips_insn_cop1_rs_t;
+#undef _
+
+#define _(f) MIPS_COP1_FUNCT_##f,
+typedef enum
+{
+ mips_foreach_cop1_funct
+} mips_insn_cop1_funct_t;
+#undef _
+
+#define _(f) MIPS_COP1X_FUNCT_##f,
+typedef enum
+{
+ mips_foreach_cop1x_funct
+} mips_insn_cop1x_funct_t;
+#undef _
+
+#define _(f) MIPS_MDMX_FUNCT_##f,
+typedef enum
+{
+ mips_foreach_mdmx_funct
+} mips_insn_mdmx_funct_t;
+#undef _
+
+always_inline mips_insn_opcode_t
+mips_insn_get_op (u32 insn)
+{
+ return (insn >> 26) & 0x3f;
+}
+
+always_inline u32
+mips_insn_get_rs (u32 insn)
+{
+ return (insn >> 21) & 0x1f;
+}
+
+always_inline u32
+mips_insn_get_rt (u32 insn)
+{
+ return (insn >> 16) & 0x1f;
+}
+
+always_inline u32
+mips_insn_get_rd (u32 insn)
+{
+ return (insn >> 11) & 0x1f;
+}
+
+always_inline u32
+mips_insn_get_sa (u32 insn)
+{
+ return (insn >> 6) & 0x1f;
+}
+
+always_inline u32
+mips_insn_get_funct (u32 insn)
+{
+ return (insn >> 0) & 0x3f;
+}
+
+always_inline i32
+mips_insn_get_immediate (u32 insn)
+{
+ return (((i32) insn) << 16) >> 16;
+}
+
+always_inline u32
+mips_insn_encode_i_type (int op, int rs, int rt, int immediate)
+{
+ u32 insn;
+ insn = immediate;
+ insn |= rt << 16;
+ insn |= rs << 21;
+ insn |= op << 26;
+
+ ASSERT (mips_insn_get_immediate (insn) == immediate);
+ ASSERT (mips_insn_get_rt (insn) == rt);
+ ASSERT (mips_insn_get_rs (insn) == rt);
+ ASSERT (mips_insn_get_op (insn) == op);
+
+ return insn;
+}
+
+always_inline u32
+mips_insn_encode_j_type (int op, u32 addr)
+{
+ u32 insn;
+
+ insn = (addr & ((1 << 28) - 1)) / 4;
+ insn |= op << 26;
+
+ return insn;
+}
+
+always_inline u32
+mips_insn_encode_r_type (int op, int rs, int rt, int rd, int sa, int funct)
+{
+ u32 insn;
+ insn = funct;
+ insn |= sa << 6;
+ insn |= rd << 11;
+ insn |= rt << 16;
+ insn |= rs << 21;
+ insn |= op << 26;
+
+ ASSERT (mips_insn_get_funct (insn) == funct);
+ ASSERT (mips_insn_get_sa (insn) == sa);
+ ASSERT (mips_insn_get_rd (insn) == rd);
+ ASSERT (mips_insn_get_rt (insn) == rt);
+ ASSERT (mips_insn_get_rs (insn) == rt);
+ ASSERT (mips_insn_get_op (insn) == op);
+
+ return insn;
+}
+
+#define mips_insn_r(op,funct,rd,rs,rt,sa) \
+ mips_insn_encode_r_type (MIPS_OPCODE_##op, \
+ (rs), (rt), (rd), (sa), \
+ MIPS_##op##_FUNCT_##funct)
+
+#define mips_insn_i(op,rs,rt,imm) \
+ mips_insn_encode_i_type (MIPS_OPCODE_##op, (rs), (rt), (imm))
+
+#define mips_insn_j(op,target) \
+ mips_insn_encode_i_type (MIPS_OPCODE_##op, (rs), (rt), (imm))
+
+/* Generate unsigned load instructions of data of various sizes. */
+always_inline u32
+mips_insn_load (u32 rd, i32 offset, u32 base, u32 log2_bytes)
+{
+ int op;
+
+ ASSERT (log2_bytes < 4);
+ switch (log2_bytes)
+ {
+ case 0:
+ op = MIPS_OPCODE_lbu;
+ break;
+ case 1:
+ op = MIPS_OPCODE_lhu;
+ break;
+ case 2:
+ op = MIPS_OPCODE_lwu;
+ break;
+ case 3:
+ op = MIPS_OPCODE_ld;
+ break;
+ }
+
+ return mips_insn_encode_i_type (op, base, rd, offset);
+}
+
+typedef enum
+{
+ MIPS_REG_SP = 29,
+ MIPS_REG_RA = 31,
+} mips_reg_t;
+
+#endif /* included_asm_mips_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/asm_x86.c b/src/vppinfra/asm_x86.c
new file mode 100644
index 00000000..16e41c24
--- /dev/null
+++ b/src/vppinfra/asm_x86.c
@@ -0,0 +1,1947 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/* FIXME
+ opcode name remove to save table space; enum
+ x87
+ 3dnow
+ cbw naming
+*/
+
+#include <vppinfra/error.h>
+#include <vppinfra/byte_order.h>
+#include <vppinfra/asm_x86.h>
+
+#define foreach_x86_gp_register \
+ _ (AX) _ (CX) _ (DX) _ (BX) \
+ _ (SP) _ (BP) _ (SI) _ (DI)
+
+typedef enum {
+#define _(r) X86_INSN_GP_REG_##r,
+ foreach_x86_gp_register
+#undef _
+} x86_insn_gp_register_t;
+
+typedef union {
+ struct {
+ u8 rm : 3;
+ u8 reg : 3;
+ u8 mode : 2;
+ };
+ u8 byte;
+} x86_insn_modrm_byte_t;
+
+typedef union {
+ struct {
+ u8 base : 3;
+ u8 index : 3;
+ u8 log2_scale : 2;
+ };
+ u8 byte;
+} x86_insn_sib_byte_t;
+
+always_inline uword
+x86_insn_has_modrm_byte (x86_insn_t * insn)
+{
+ int i;
+ for (i = 0; i < ARRAY_LEN (insn->operands); i++)
+ switch (insn->operands[i].code)
+ {
+ case 'G': case 'E': case 'M': case 'R':
+ return 1;
+ }
+ return 0;
+}
+
+always_inline uword
+x86_insn_immediate_type (x86_insn_t * insn)
+{
+ int i;
+ for (i = 0; i < ARRAY_LEN (insn->operands); i++)
+ switch (insn->operands[i].code)
+ {
+ case 'J':
+ case 'I':
+ case 'O':
+ return insn->operands[i].type;
+ }
+ return 0;
+}
+
+/* Opcode extension in modrm byte reg field. */
+#define foreach_x86_insn_modrm_reg_group \
+ _ (1) _ (1a) _ (2) _ (3) _ (4) _ (5) _ (6) _ (7) \
+ _ (8) _ (9) _ (10) _ (11) _ (12) _ (13) _ (14) \
+ _ (15) _ (16) _ (p)
+
+#define foreach_x86_insn_sse_group \
+ _ (10) _ (28) _ (50) _ (58) _ (60) _ (68) _ (70) _ (78) \
+ _ (c0) _ (d0) _ (d8) _ (e0) _ (e8) _ (f0) _ (f8)
+
+enum {
+#define _(x) X86_INSN_MODRM_REG_GROUP_##x,
+ foreach_x86_insn_modrm_reg_group
+#undef _
+#define _(x) X86_INSN_SSE_GROUP_##x,
+ foreach_x86_insn_sse_group
+#undef _
+};
+
+enum {
+#define _(x) \
+ X86_INSN_FLAG_MODRM_REG_GROUP_##x \
+ = X86_INSN_FLAG_SET_MODRM_REG_GROUP (1 + X86_INSN_MODRM_REG_GROUP_##x),
+ foreach_x86_insn_modrm_reg_group
+#undef _
+
+#define _(x) \
+ X86_INSN_FLAG_SSE_GROUP_##x \
+ = X86_INSN_FLAG_SET_SSE_GROUP (1 + X86_INSN_SSE_GROUP_##x),
+ foreach_x86_insn_sse_group
+#undef _
+};
+
+#define foreach_x86_gp_reg \
+ _ (AX) _ (CX) _ (DX) _ (BX) \
+ _ (SP) _ (BP) _ (SI) _ (DI)
+
+#define foreach_x86_condition \
+ _ (o) _ (no) _ (b) _ (nb) \
+ _ (z) _ (nz) _ (be) _ (nbe) \
+ _ (s) _ (ns) _ (p) _ (np) \
+ _ (l) _ (nl) _ (le) _ (nle)
+
+#define _3f(x,f,o0,o1,o2) \
+{ \
+ .name = #x, \
+ .flags = (f), \
+ .operands[0] = { .data = #o0 }, \
+ .operands[1] = { .data = #o1 }, \
+ .operands[2] = { .data = #o2 }, \
+}
+
+#define _2f(x,f,o0,o1) _3f(x,f,o0,o1,__)
+#define _1f(x,f,o0) _2f(x,f,o0,__)
+#define _0f(x,f) _1f(x,f,__)
+
+#define _3(x,o0,o1,o2) _3f(x,0,o0,o1,o2)
+#define _2(x,o0,o1) _2f(x,0,o0,o1)
+#define _1(x,o0) _1f(x,0,o0)
+#define _0(x) _0f(x,0)
+
+static x86_insn_t x86_insns_one_byte[256] = {
+
+#define _(x) \
+ _2 (x, Eb, Gb), \
+ _2 (x, Ev, Gv), \
+ _2 (x, Gb, Eb), \
+ _2 (x, Gv, Ev), \
+ _2 (x, AL, Ib), \
+ _2 (x, AX, Iz)
+
+ /* 0x00 */
+ _ (add),
+ _0 (push_es),
+ _0 (pop_es),
+ _ (or),
+ _0 (push_cs),
+ _0 (escape_two_byte),
+
+ /* 0x10 */
+ _ (adc),
+ _0 (push_ss),
+ _0 (pop_ss),
+ _ (sbb),
+ _0 (push_ds),
+ _0 (pop_ds),
+
+ /* 0x20 */
+ _ (and),
+ _0 (segment_es),
+ _0 (daa),
+ _ (sub),
+ _0 (segment_cs),
+ _0 (das),
+
+ /* 0x30 */
+ _ (xor),
+ _0 (segment_ss),
+ _0 (aaa),
+ _ (cmp),
+ _0 (segment_ds),
+ _0 (aas),
+
+#undef _
+
+ /* 0x40 */
+#define _(r) _1 (inc, r),
+ foreach_x86_gp_reg
+#undef _
+#define _(r) _1 (dec, r),
+ foreach_x86_gp_reg
+#undef _
+
+ /* 0x50 */
+#define _(r) _1f (push, X86_INSN_FLAG_DEFAULT_64_BIT, r),
+ foreach_x86_gp_reg
+#undef _
+#define _(r) _1f (pop, X86_INSN_FLAG_DEFAULT_64_BIT, r),
+ foreach_x86_gp_reg
+#undef _
+
+ /* 0x60 */
+ _0 (pusha),
+ _0 (popa),
+ _2 (bound, Gv, Ma),
+ _2 (movsxd, Gv, Ed),
+ _0 (segment_fs),
+ _0 (segment_gs),
+ _0 (operand_type),
+ _0 (address_size),
+ _1f (push, X86_INSN_FLAG_DEFAULT_64_BIT, Iz),
+ _3 (imul, Gv, Ev, Iz),
+ _1f (push, X86_INSN_FLAG_DEFAULT_64_BIT, Ib),
+ _3 (imul, Gv, Ev, Ib),
+ _1 (insb, DX),
+ _1 (insw, DX),
+ _1 (outsb, DX),
+ _1 (outsw, DX),
+
+ /* 0x70 */
+#define _(x) _1 (j##x, Jb),
+ foreach_x86_condition
+#undef _
+
+ /* 0x80 */
+ _2f (modrm_group_1, X86_INSN_FLAG_MODRM_REG_GROUP_1, Eb, Ib),
+ _2f (modrm_group_1, X86_INSN_FLAG_MODRM_REG_GROUP_1, Ev, Iz),
+ _2f (modrm_group_1, X86_INSN_FLAG_MODRM_REG_GROUP_1, Eb, Ib),
+ _2f (modrm_group_1, X86_INSN_FLAG_MODRM_REG_GROUP_1, Ev, Ib),
+ _2 (test, Eb, Gb),
+ _2 (test, Ev, Gv),
+ _2 (xchg, Eb, Gb),
+ _2 (xchg, Ev, Gv),
+ _2 (mov, Eb, Gb),
+ _2 (mov, Ev, Gv),
+ _2 (mov, Gb, Eb),
+ _2 (mov, Gv, Ev),
+ _2 (mov, Ev, Sw),
+ _2 (lea, Gv, Ev),
+ _2 (mov, Sw, Ew),
+ _1f (modrm_group_1a, X86_INSN_FLAG_MODRM_REG_GROUP_1a, Ev),
+
+ /* 0x90 */
+ _0 (nop),
+ _1 (xchg, CX),
+ _1 (xchg, DX),
+ _1 (xchg, BX),
+ _1 (xchg, SP),
+ _1 (xchg, BP),
+ _1 (xchg, SI),
+ _1 (xchg, DI),
+ _0 (cbw),
+ _0 (cwd),
+ _1 (call, Ap),
+ _0 (wait),
+ _0 (pushf),
+ _0 (popf),
+ _0 (sahf),
+ _0 (lahf),
+
+ /* 0xa0 */
+ _2 (mov, AL, Ob),
+ _2 (mov, AX, Ov),
+ _2 (mov, Ob, AL),
+ _2 (mov, Ov, AX),
+ _0 (movsb),
+ _0 (movsw),
+ _0 (cmpsb),
+ _0 (cmpsw),
+ _2 (test, AL, Ib),
+ _2 (test, AX, Iz),
+ _1 (stosb, AL),
+ _1 (stosw, AX),
+ _1 (lodsb, AL),
+ _1 (lodsw, AX),
+ _1 (scasb, AL),
+ _1 (scasw, AX),
+
+ /* 0xb0 */
+ _2 (mov, AL, Ib),
+ _2 (mov, CL, Ib),
+ _2 (mov, DL, Ib),
+ _2 (mov, BL, Ib),
+ _2 (mov, AH, Ib),
+ _2 (mov, CH, Ib),
+ _2 (mov, DH, Ib),
+ _2 (mov, BH, Ib),
+#define _(r) _2 (mov, r, Iv),
+ foreach_x86_gp_reg
+#undef _
+
+ /* 0xc0 */
+ _2f (modrm_group_2, X86_INSN_FLAG_MODRM_REG_GROUP_2, Eb, Ib),
+ _2f (modrm_group_2, X86_INSN_FLAG_MODRM_REG_GROUP_2, Ev, Ib),
+ _1 (ret, Iw),
+ _0 (ret),
+ _2 (les, Gz, Mp),
+ _2 (lds, Gz, Mp),
+ _2f (modrm_group_11, X86_INSN_FLAG_MODRM_REG_GROUP_11, Eb, Ib),
+ _2f (modrm_group_11, X86_INSN_FLAG_MODRM_REG_GROUP_11, Ev, Iz),
+ _2 (enter, Iw, Ib),
+ _0 (leave),
+ _1 (ret, Iw),
+ _0 (ret),
+ _0 (int3),
+ _1 (int, Ib),
+ _0 (into),
+ _0 (iret),
+
+ /* 0xd0 */
+ _2f (modrm_group_2, X86_INSN_FLAG_MODRM_REG_GROUP_2, Eb, 1b),
+ _2f (modrm_group_2, X86_INSN_FLAG_MODRM_REG_GROUP_2, Ev, 1b),
+ _2f (modrm_group_2, X86_INSN_FLAG_MODRM_REG_GROUP_2, Eb, CL),
+ _2f (modrm_group_2, X86_INSN_FLAG_MODRM_REG_GROUP_2, Ev, CL),
+ _0 (aam),
+ _0 (aad),
+ _0 (salc),
+ _0 (xlat),
+ /* FIXME x87 */
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+
+ /* 0xe0 */
+ _1 (loopnz, Jb),
+ _1 (loopz, Jb),
+ _1 (loop, Jb),
+ _1 (jcxz, Jb),
+ _2 (in, AL, Ib),
+ _2 (in, AX, Ib),
+ _2 (out, Ib, AL),
+ _2 (out, Ib, AX),
+ _1f (call, X86_INSN_FLAG_DEFAULT_64_BIT, Jz),
+ _1f ( jmp, X86_INSN_FLAG_DEFAULT_64_BIT, Jz),
+ _1 (jmp, Ap),
+ _1 (jmp, Jb),
+ _2 (in, AL, DX),
+ _2 (in, AX, DX),
+ _2 (out, DX, AL),
+ _2 (out, DX, AX),
+
+ /* 0xf0 */
+ _0 (lock),
+ _0 (int1),
+ _0 (repne),
+ _0 (rep),
+ _0 (hlt),
+ _0 (cmc),
+ _0f (modrm_group_3, X86_INSN_FLAG_MODRM_REG_GROUP_3),
+ _0f (modrm_group_3, X86_INSN_FLAG_MODRM_REG_GROUP_3),
+ _0 (clc),
+ _0 (stc),
+ _0 (cli),
+ _0 (sti),
+ _0 (cld),
+ _0 (std),
+ _1f (modrm_group_4, X86_INSN_FLAG_MODRM_REG_GROUP_4, Eb),
+ _0f (modrm_group_5, X86_INSN_FLAG_MODRM_REG_GROUP_5),
+};
+
+static x86_insn_t x86_insns_two_byte[256] = {
+ /* 0x00 */
+ _0f (modrm_group_6, X86_INSN_FLAG_MODRM_REG_GROUP_6),
+ _0f (modrm_group_7, X86_INSN_FLAG_MODRM_REG_GROUP_7),
+ _2 (lar, Gv, Ew),
+ _2 (lsl, Gv, Ew),
+ _0 (bad),
+ _0 (syscall),
+ _0 (clts),
+ _0 (sysret),
+ _0 (invd),
+ _0 (wbinvd),
+ _0 (bad),
+ _0 (ud2),
+ _0 (bad),
+ _0f (modrm_group_p, X86_INSN_FLAG_MODRM_REG_GROUP_p),
+ _0 (femms),
+ _0 (escape_3dnow),
+
+ /* 0x10 */
+ _2f (movups, X86_INSN_FLAG_SSE_GROUP_10, Gx, Ex),
+ _2f (movups, X86_INSN_FLAG_SSE_GROUP_10, Ex, Gx),
+ _2f (movlps, X86_INSN_FLAG_SSE_GROUP_10, Ex, Gx),
+ _2f (movlps, X86_INSN_FLAG_SSE_GROUP_10, Gx, Ex),
+ _2f (unpcklps, X86_INSN_FLAG_SSE_GROUP_10, Gx, Ex),
+ _2f (unpckhps, X86_INSN_FLAG_SSE_GROUP_10, Gx, Ex),
+ _2f (movhps, X86_INSN_FLAG_SSE_GROUP_10, Ex, Gx),
+ _2f (movhps, X86_INSN_FLAG_SSE_GROUP_10, Gx, Ex),
+ _0f (modrm_group_16, X86_INSN_FLAG_MODRM_REG_GROUP_16),
+ _0 (nop),
+ _0 (nop),
+ _0 (nop),
+ _0 (nop),
+ _0 (nop),
+ _0 (nop),
+ _0 (nop),
+
+ /* 0x20 */
+ _2 (mov, Rv, Cv),
+ _2 (mov, Rv, Dv),
+ _2 (mov, Cv, Rv),
+ _2 (mov, Dv, Rv),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _2f (movaps, X86_INSN_FLAG_SSE_GROUP_28, Gx, Ex),
+ _2f (movaps, X86_INSN_FLAG_SSE_GROUP_28, Ex, Gx),
+ _2f (cvtpi2ps, X86_INSN_FLAG_SSE_GROUP_28, Gx, Ex),
+ _2f (movntps, X86_INSN_FLAG_SSE_GROUP_28, Mx, Gx),
+ _2f (cvttps2pi, X86_INSN_FLAG_SSE_GROUP_28, Gx, Ex),
+ _2f (cvtps2pi, X86_INSN_FLAG_SSE_GROUP_28, Gx, Ex),
+ _2f (ucomiss, X86_INSN_FLAG_SSE_GROUP_28, Gx, Ex),
+ _2f (comiss, X86_INSN_FLAG_SSE_GROUP_28, Gx, Ex),
+
+ /* 0x30 */
+ _0 (wrmsr),
+ _0 (rdtsc),
+ _0 (rdmsr),
+ _0 (rdpmc),
+ _0 (sysenter),
+ _0 (sysexit),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+
+ /* 0x40 */
+#define _(x) _2 (cmov##x, Gv, Ev),
+ foreach_x86_condition
+#undef _
+
+ /* 0x50 */
+ _2f (movmskps, X86_INSN_FLAG_SSE_GROUP_50, Gd, Rx),
+ _2f (sqrtps, X86_INSN_FLAG_SSE_GROUP_50, Gx, Ex),
+ _2f (rsqrtps, X86_INSN_FLAG_SSE_GROUP_50, Gx, Ex),
+ _2f (rcpps, X86_INSN_FLAG_SSE_GROUP_50, Gx, Ex),
+ _2f (andps, X86_INSN_FLAG_SSE_GROUP_50, Gx, Ex),
+ _2f (andnps, X86_INSN_FLAG_SSE_GROUP_50, Gx, Ex),
+ _2f (orps, X86_INSN_FLAG_SSE_GROUP_50, Gx, Ex),
+ _2f (xorps, X86_INSN_FLAG_SSE_GROUP_50, Gx, Ex),
+ _2f (addps, X86_INSN_FLAG_SSE_GROUP_58, Gx, Ex),
+ _2f (mulps, X86_INSN_FLAG_SSE_GROUP_58, Gx, Ex),
+ _2f (cvtps2pd, X86_INSN_FLAG_SSE_GROUP_58, Gx, Ex),
+ _2f (cvtdq2ps, X86_INSN_FLAG_SSE_GROUP_58, Gx, Ex),
+ _2f (subps, X86_INSN_FLAG_SSE_GROUP_58, Gx, Ex),
+ _2f (minps, X86_INSN_FLAG_SSE_GROUP_58, Gx, Ex),
+ _2f (divps, X86_INSN_FLAG_SSE_GROUP_58, Gx, Ex),
+ _2f (maxps, X86_INSN_FLAG_SSE_GROUP_58, Gx, Ex),
+
+ /* 0x60 */
+ _2f (punpcklbw, X86_INSN_FLAG_SSE_GROUP_60, Gm, Em),
+ _2f (punpcklwd, X86_INSN_FLAG_SSE_GROUP_60, Gm, Em),
+ _2f (punpckldq, X86_INSN_FLAG_SSE_GROUP_60, Gm, Em),
+ _2f (packsswb, X86_INSN_FLAG_SSE_GROUP_60, Gm, Em),
+ _2f (pcmpgtb, X86_INSN_FLAG_SSE_GROUP_60, Gm, Em),
+ _2f (pcmpgtw, X86_INSN_FLAG_SSE_GROUP_60, Gm, Em),
+ _2f (pcmpgtd, X86_INSN_FLAG_SSE_GROUP_60, Gm, Em),
+ _2f (packuswb, X86_INSN_FLAG_SSE_GROUP_60, Gm, Em),
+ _2f (punpckhbw, X86_INSN_FLAG_SSE_GROUP_68, Gm, Em),
+ _2f (punpckhwd, X86_INSN_FLAG_SSE_GROUP_68, Gm, Em),
+ _2f (punpckhdq, X86_INSN_FLAG_SSE_GROUP_68, Gm, Em),
+ _2f (packssdw, X86_INSN_FLAG_SSE_GROUP_68, Gm, Em),
+ _0f (bad, X86_INSN_FLAG_SSE_GROUP_68),
+ _0f (bad, X86_INSN_FLAG_SSE_GROUP_68),
+ _2f (movd, X86_INSN_FLAG_SSE_GROUP_68, Gm, Em),
+ _2f (movq, X86_INSN_FLAG_SSE_GROUP_68, Gm, Em),
+
+ /* 0x70 */
+ _3f (pshufw, X86_INSN_FLAG_SSE_GROUP_70, Gm, Em, Ib),
+ _0f (modrm_group_12, X86_INSN_FLAG_MODRM_REG_GROUP_12),
+ _0f (modrm_group_13, X86_INSN_FLAG_MODRM_REG_GROUP_13),
+ _0f (modrm_group_14, X86_INSN_FLAG_MODRM_REG_GROUP_14),
+ _2f (pcmpeqb, X86_INSN_FLAG_SSE_GROUP_70, Gm, Em),
+ _2f (pcmpeqw, X86_INSN_FLAG_SSE_GROUP_70, Gm, Em),
+ _2f (pcmpeqd, X86_INSN_FLAG_SSE_GROUP_70, Gm, Em),
+ _0f (emms, X86_INSN_FLAG_SSE_GROUP_70),
+ _0f (bad, X86_INSN_FLAG_SSE_GROUP_78),
+ _0f (bad, X86_INSN_FLAG_SSE_GROUP_78),
+ _0f (bad, X86_INSN_FLAG_SSE_GROUP_78),
+ _0f (bad, X86_INSN_FLAG_SSE_GROUP_78),
+ _0f (bad, X86_INSN_FLAG_SSE_GROUP_78),
+ _0f (bad, X86_INSN_FLAG_SSE_GROUP_78),
+ _2f (movd, X86_INSN_FLAG_SSE_GROUP_78, Em, Gm),
+ _2f (movq, X86_INSN_FLAG_SSE_GROUP_78, Em, Gm),
+
+ /* 0x80 */
+#define _(x) _1 (jmp##x, Jz),
+ foreach_x86_condition
+#undef _
+
+ /* 0x90 */
+#define _(x) _1 (set##x, Eb),
+ foreach_x86_condition
+#undef _
+
+ /* 0xa0 */
+ _0 (push_fs),
+ _0 (pop_fs),
+ _0 (cpuid),
+ _2 (bt, Ev, Gv),
+ _3 (shld, Ev, Gv, Ib),
+ _3 (shld, Ev, Gv, CL),
+ _0 (bad),
+ _0 (bad),
+ _0 (push_gs),
+ _0 (pop_gs),
+ _0 (rsm),
+ _2 (bts, Ev, Gv),
+ _3 (shrd, Ev, Gv, Ib),
+ _3 (shrd, Ev, Gv, CL),
+ _0f (modrm_group_15, X86_INSN_FLAG_MODRM_REG_GROUP_15),
+ _2 (imul, Gv, Ev),
+
+ /* 0xb0 */
+ _2 (cmpxchg, Eb, Gb),
+ _2 (cmpxchg, Ev, Gv),
+ _2 (lss, Gz, Mp),
+ _2 (btr, Ev, Gv),
+ _2 (lfs, Gz, Mp),
+ _2 (lgs, Gz, Mp),
+ _2 (movzbl, Gv, Eb),
+ _2 (movzwl, Gv, Ew),
+ _0 (bad),
+ _0f (modrm_group_10, X86_INSN_FLAG_MODRM_REG_GROUP_10),
+ _2f (modrm_group_8, X86_INSN_FLAG_MODRM_REG_GROUP_8, Ev, Ib),
+ _2 (btc, Ev, Gv),
+ _2 (bsf, Gv, Ev),
+ _2 (bsr, Gv, Ev),
+ _2 (movsx, Gv, Eb),
+ _2 (movsx, Gv, Ew),
+
+ /* 0xc0 */
+ _2 (xadd, Eb, Gb),
+ _2 (xadd, Ev, Gv),
+ _3f (cmpps, X86_INSN_FLAG_SSE_GROUP_c0, Gx, Ex, Ib),
+ _2 (movnti, Mv, Gv),
+ _3f (pinsrw, X86_INSN_FLAG_SSE_GROUP_c0, Gm, Ew, Ib),
+ _3f (pextrw, X86_INSN_FLAG_SSE_GROUP_c0, Gd, Rm, Ib),
+ _3f (shufps, X86_INSN_FLAG_SSE_GROUP_c0, Gx, Ex, Ib),
+ _1f (modrm_group_9, X86_INSN_FLAG_MODRM_REG_GROUP_9, Mx),
+#define _(r) _1 (bswap, r),
+ foreach_x86_gp_reg
+#undef _
+
+ /* 0xd0 */
+ _0f (bad, X86_INSN_FLAG_SSE_GROUP_d0),
+ _2f (psrlw, X86_INSN_FLAG_SSE_GROUP_d0, Gm, Em),
+ _2f (psrld, X86_INSN_FLAG_SSE_GROUP_d0, Gm, Em),
+ _2f (psrlq, X86_INSN_FLAG_SSE_GROUP_d0, Gm, Em),
+ _2f (paddq, X86_INSN_FLAG_SSE_GROUP_d0, Gm, Em),
+ _2f (pmullw, X86_INSN_FLAG_SSE_GROUP_d0, Gm, Em),
+ _0f (bad, X86_INSN_FLAG_SSE_GROUP_d0),
+ _2f (pmovmskb, X86_INSN_FLAG_SSE_GROUP_d0, Gd, Rm),
+ _2f (psubusb, X86_INSN_FLAG_SSE_GROUP_d8, Gm, Em),
+ _2f (psubusw, X86_INSN_FLAG_SSE_GROUP_d8, Gm, Em),
+ _2f (pminub, X86_INSN_FLAG_SSE_GROUP_d8, Gm, Em),
+ _2f (pand, X86_INSN_FLAG_SSE_GROUP_d8, Gm, Em),
+ _2f (paddusb, X86_INSN_FLAG_SSE_GROUP_d8, Gm, Em),
+ _2f (paddusw, X86_INSN_FLAG_SSE_GROUP_d8, Gm, Em),
+ _2f (pmaxub, X86_INSN_FLAG_SSE_GROUP_d8, Gm, Em),
+ _2f (pandn, X86_INSN_FLAG_SSE_GROUP_d8, Gm, Em),
+
+ /* 0xe0 */
+ _2f (pavgb, X86_INSN_FLAG_SSE_GROUP_e0, Gm, Em),
+ _2f (psraw, X86_INSN_FLAG_SSE_GROUP_e0, Gm, Em),
+ _2f (psrad, X86_INSN_FLAG_SSE_GROUP_e0, Gm, Em),
+ _2f (pavgw, X86_INSN_FLAG_SSE_GROUP_e0, Gm, Em),
+ _2f (pmulhuw, X86_INSN_FLAG_SSE_GROUP_e0, Gm, Em),
+ _2f (pmulhw, X86_INSN_FLAG_SSE_GROUP_e0, Gm, Em),
+ _2f (bad, X86_INSN_FLAG_SSE_GROUP_e0, Gm, Em),
+ _2f (movntq, X86_INSN_FLAG_SSE_GROUP_e0, Mm, Gm),
+ _2f (psubsb, X86_INSN_FLAG_SSE_GROUP_e8, Gm, Em),
+ _2f (psubsw, X86_INSN_FLAG_SSE_GROUP_e8, Gm, Em),
+ _2f (pminsw, X86_INSN_FLAG_SSE_GROUP_e8, Gm, Em),
+ _2f (por, X86_INSN_FLAG_SSE_GROUP_e8, Gm, Em),
+ _2f (paddsb, X86_INSN_FLAG_SSE_GROUP_e8, Gm, Em),
+ _2f (paddsw, X86_INSN_FLAG_SSE_GROUP_e8, Gm, Em),
+ _2f (pmaxsw, X86_INSN_FLAG_SSE_GROUP_e8, Gm, Em),
+ _2f (pxor, X86_INSN_FLAG_SSE_GROUP_e8, Gm, Em),
+
+ /* 0xf0 */
+ _0f (bad, X86_INSN_FLAG_SSE_GROUP_f0),
+ _2f (psllw, X86_INSN_FLAG_SSE_GROUP_f0, Gm, Em),
+ _2f (pslld, X86_INSN_FLAG_SSE_GROUP_f0, Gm, Em),
+ _2f (psllq, X86_INSN_FLAG_SSE_GROUP_f0, Gm, Em),
+ _2f (pmuludq, X86_INSN_FLAG_SSE_GROUP_f0, Gm, Em),
+ _2f (pmaddwd, X86_INSN_FLAG_SSE_GROUP_f0, Gm, Em),
+ _2f (psadbw, X86_INSN_FLAG_SSE_GROUP_f0, Gm, Em),
+ _2f (maskmovq, X86_INSN_FLAG_SSE_GROUP_f0, Gm, Em),
+ _2f (psubb, X86_INSN_FLAG_SSE_GROUP_f8, Gm, Em),
+ _2f (psubw, X86_INSN_FLAG_SSE_GROUP_f8, Gm, Em),
+ _2f (psubd, X86_INSN_FLAG_SSE_GROUP_f8, Gm, Em),
+ _2f (psubq, X86_INSN_FLAG_SSE_GROUP_f8, Gm, Em),
+ _2f (paddb, X86_INSN_FLAG_SSE_GROUP_f8, Gm, Em),
+ _2f (paddw, X86_INSN_FLAG_SSE_GROUP_f8, Gm, Em),
+ _2f (paddd, X86_INSN_FLAG_SSE_GROUP_f8, Gm, Em),
+ _0f (bad, X86_INSN_FLAG_SSE_GROUP_f8),
+};
+
+typedef struct {
+ x86_insn_t insns[8];
+} x86_insn_group8_t;
+
+/* Escape groups are indexed by modrm reg field. */
+static x86_insn_group8_t x86_insn_modrm_reg_groups[] = {
+ [X86_INSN_MODRM_REG_GROUP_1].insns = {
+ _0 (add), _0 ( or), _0 (adc), _0 (sbb),
+ _0 (and), _0 (sub), _0 (xor), _0 (cmp),
+ },
+
+ [X86_INSN_MODRM_REG_GROUP_1a].insns = {
+ _0f (pop, X86_INSN_FLAG_DEFAULT_64_BIT),
+ _0 (bad), _0 (bad), _0 (bad),
+ _0 (bad), _0 (bad), _0 (bad), _0 (bad),
+ },
+
+ [X86_INSN_MODRM_REG_GROUP_2].insns = {
+ _0 (rol), _0 (ror), _0 (rcl), _0 (rcr),
+ _0 (shl), _0 (shr), _0 (sal), _0 (sar),
+ },
+
+ [X86_INSN_MODRM_REG_GROUP_3].insns = {
+ _0 (test), _0 (test), _0 (not), _0 (neg),
+ _0 (mul), _0 (imul), _0 (div), _0 (idiv),
+ },
+
+ [X86_INSN_MODRM_REG_GROUP_4].insns = {
+ _0 (inc), _0 (dec), _0 (bad), _0 (bad),
+ _0 (bad), _0 (bad), _0 (bad), _0 (bad),
+ },
+
+ [X86_INSN_MODRM_REG_GROUP_5].insns = {
+ _1 (inc, Ev),
+ _1 (dec, Ev),
+ _1f (call, X86_INSN_FLAG_DEFAULT_64_BIT, Ev),
+ _1 (call, Mp),
+ _1f (jmp, X86_INSN_FLAG_DEFAULT_64_BIT, Ev),
+ _1 (jmp, Mp),
+ _1f (push, X86_INSN_FLAG_DEFAULT_64_BIT, Ev),
+ _0 (bad),
+ },
+
+ [X86_INSN_MODRM_REG_GROUP_6].insns = {
+ _1 (sldt, Ev),
+ _1 (str, Ev),
+ _1 (lldt, Ev),
+ _1 (ltr, Ev),
+ _1 (verr, Ev),
+ _1 (verw, Ev),
+ _0 (bad),
+ _0 (bad),
+ },
+
+ [X86_INSN_MODRM_REG_GROUP_7].insns = {
+ _1 (sgdt, Mv),
+ _1 (sidt, Mv),
+ _1 (lgdt, Mv),
+ _1 (lidt, Mv),
+ _1 (smsw, Ev),
+ _0 (bad),
+ _1 (lmsw, Ew),
+ _1 (invlpg, Mv),
+ },
+
+ [X86_INSN_MODRM_REG_GROUP_8].insns = {
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _2 (bt, Ev, Ib),
+ _2 (bts, Ev, Ib),
+ _2 (btr, Ev, Ib),
+ _2 (btc, Ev, Ib),
+ },
+
+ [X86_INSN_MODRM_REG_GROUP_9].insns = {
+ _0 (bad),
+ _1 (cmpxchg, Mx),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ },
+
+ [X86_INSN_MODRM_REG_GROUP_10].insns = {
+ _0 (bad), _0 (bad), _0 (bad), _0 (bad),
+ _0 (bad), _0 (bad), _0 (bad), _0 (bad),
+ },
+
+ [X86_INSN_MODRM_REG_GROUP_11].insns = {
+ _0 (mov), _0 (bad), _0 (bad), _0 (bad),
+ _0 (bad), _0 (bad), _0 (bad), _0 (bad),
+ },
+
+ [X86_INSN_MODRM_REG_GROUP_12].insns = {
+ _0 (bad),
+ _0 (bad),
+ _2 (psrlw, Rm, Ib),
+ _0 (bad),
+ _2 (psraw, Rm, Ib),
+ _0 (bad),
+ _2 (psllw, Rm, Ib),
+ _0 (bad),
+ },
+
+ [X86_INSN_MODRM_REG_GROUP_13].insns = {
+ _0 (bad),
+ _0 (bad),
+ _2 (psrld, Rm, Ib),
+ _0 (bad),
+ _2 (psrad, Rm, Ib),
+ _0 (bad),
+ _2 (pslld, Rm, Ib),
+ _0 (bad),
+ },
+
+ [X86_INSN_MODRM_REG_GROUP_14].insns = {
+ _0 (bad),
+ _0 (bad),
+ _2 (psrlq, Rm, Ib),
+ _0f (bad, 0),
+ _0 (bad),
+ _0 (bad),
+ _2 (psllq, Rm, Ib),
+ _0f (bad, 0),
+ },
+
+ [X86_INSN_MODRM_REG_GROUP_15].insns = {
+ _1 (fxsave, Mv),
+ _1 (fxrstor, Mv),
+ _1 (ldmxcsr, Mv),
+ _1 (stmxcsr, Mv),
+ _0 (bad),
+ _1 (lfence, Mv),
+ _1 (mfence, Mv),
+ _1 (sfence, Mv),
+ },
+
+ [X86_INSN_MODRM_REG_GROUP_16].insns = {
+ _1 (prefetch_nta, Mv),
+ _1 (prefetch_t0, Mv),
+ _1 (prefetch_t1, Mv),
+ _1 (prefetch_t2, Mv),
+ _1 (prefetch_nop, Mv),
+ _1 (prefetch_nop, Mv),
+ _1 (prefetch_nop, Mv),
+ _1 (prefetch_nop, Mv),
+ },
+
+ [X86_INSN_MODRM_REG_GROUP_p].insns = {
+ _1 (prefetch_exclusive, Mv),
+ _1 (prefetch_modified, Mv),
+ _1 (prefetch_nop, Mv),
+ _1 (prefetch_modified, Mv),
+ _1 (prefetch_nop, Mv),
+ _1 (prefetch_nop, Mv),
+ _1 (prefetch_nop, Mv),
+ _1 (prefetch_nop, Mv),
+ },
+};
+
+static x86_insn_group8_t x86_insn_sse_groups_repz[] = {
+ [X86_INSN_SSE_GROUP_10].insns = {
+ _2 (movss, Gx, Ex),
+ _2 (movss, Ex, Gx),
+ _2 (movsldup, Gx, Ex),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _2 (movshdup, Gx, Ex),
+ _0 (bad),
+ },
+
+ [X86_INSN_SSE_GROUP_28].insns = {
+ _0 (bad),
+ _0 (bad),
+ _2 (cvtsi2ss, Gx, Ev),
+ _0 (bad),
+ _2 (cvttss2si, Gv, Ex),
+ _2 (cvtss2si, Gv, Ex),
+ _0 (bad),
+ _0 (bad),
+ },
+
+ [X86_INSN_SSE_GROUP_50].insns = {
+ _0 (bad),
+ _2 (sqrtss, Gx, Ex),
+ _2 (rsqrtps, Gx, Ex),
+ _2 (rcpss, Gx, Ex),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ },
+
+ [X86_INSN_SSE_GROUP_58].insns = {
+ _2 (addss, Gx, Ex),
+ _2 (mulss, Gx, Ex),
+ _2 (cvtss2sd, Gx, Ex),
+ _2 (cvttps2dq, Gx, Ex),
+ _2 (subss, Gx, Ex),
+ _2 (minss, Gx, Ex),
+ _2 (divss, Gx, Ex),
+ _2 (maxss, Gx, Ex),
+ },
+
+ [X86_INSN_SSE_GROUP_60].insns = {
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ },
+
+ [X86_INSN_SSE_GROUP_68].insns = {
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _2 (movdqu, Gx, Ex),
+ },
+
+ [X86_INSN_SSE_GROUP_70].insns = {
+ _3 (pshufhw, Gx, Ex, Ib),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ },
+
+ [X86_INSN_SSE_GROUP_78].insns = {
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _2 (movq, Gx, Ex),
+ _2 (movdqu, Ex, Gx),
+ },
+
+ [X86_INSN_SSE_GROUP_c0].insns = {
+ _0 (bad),
+ _0 (bad),
+ _3 (cmpss, Gx, Ex, Ib),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ },
+
+ [X86_INSN_SSE_GROUP_d0].insns = {
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _2 (movq2dq, Gx, Em),
+ _0 (bad),
+ },
+
+ [X86_INSN_SSE_GROUP_d8].insns = {
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ },
+
+ [X86_INSN_SSE_GROUP_e0].insns = {
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _2 (cvtdq2pd, Gx, Ex),
+ _0 (bad),
+ },
+
+ [X86_INSN_SSE_GROUP_e8].insns = {
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ },
+
+ [X86_INSN_SSE_GROUP_f0].insns = {
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ },
+
+ [X86_INSN_SSE_GROUP_f8].insns = {
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ },
+};
+
+static x86_insn_group8_t x86_insn_sse_groups_operand_size[] = {
+ [X86_INSN_SSE_GROUP_10].insns = {
+ _2 (movupd, Gx, Ex),
+ _2 (movupd, Ex, Gx),
+ _2 (movlpd, Gx, Ex),
+ _2 (movlpd, Ex, Gx),
+ _2 (unpcklpd, Gx, Ex),
+ _2 (unpckhpd, Gx, Ex),
+ _2 (movhpd, Gx, Mx),
+ _2 (movhpd, Mx, Gx),
+ },
+
+ [X86_INSN_SSE_GROUP_28].insns = {
+ _2 (movapd, Gx, Ex),
+ _2 (movapd, Ex, Gx),
+ _2 (cvtpi2pd, Gx, Ex),
+ _2 (movntpd, Mx, Gx),
+ _2 (cvttpd2pi, Gx, Mx),
+ _2 (cvtpd2pi, Gx, Mx),
+ _2 (ucomisd, Gx, Ex),
+ _2 (comisd, Gx, Ex),
+ },
+
+ [X86_INSN_SSE_GROUP_50].insns = {
+ _2 (movmskpd, Gd, Rx),
+ _2 (sqrtpd, Gx, Ex),
+ _0 (bad),
+ _0 (bad),
+ _2 (andpd, Gx, Ex),
+ _2 (andnpd, Gx, Ex),
+ _2 (orpd, Gx, Ex),
+ _2 (xorpd, Gx, Ex),
+ },
+
+ [X86_INSN_SSE_GROUP_58].insns = {
+ _2 (addpd, Gx, Ex),
+ _2 (mulpd, Gx, Ex),
+ _2 (cvtpd2ps, Gx, Ex),
+ _2 (cvtps2dq, Gx, Ex),
+ _2 (subpd, Gx, Ex),
+ _2 (minpd, Gx, Ex),
+ _2 (divpd, Gx, Ex),
+ _2 (maxpd, Gx, Ex),
+ },
+
+ [X86_INSN_SSE_GROUP_60].insns = {
+ _2 (punpcklbw, Gx, Ex),
+ _2 (punpcklwd, Gx, Ex),
+ _2 (punpckldq, Gx, Ex),
+ _2 (packsswb, Gx, Ex),
+ _2 (pcmpgtb, Gx, Ex),
+ _2 (pcmpgtw, Gx, Ex),
+ _2 (pcmpgtd, Gx, Ex),
+ _2 (packuswb, Gx, Ex),
+ },
+
+ [X86_INSN_SSE_GROUP_68].insns = {
+ _2 (punpckhbw, Gx, Ex),
+ _2 (punpckhwd, Gx, Ex),
+ _2 (punpckhdq, Gx, Ex),
+ _2 (packssdw, Gx, Ex),
+ _2 (punpcklqdq, Gx, Ex),
+ _2 (punpckhqdq, Gx, Ex),
+ _2 (movd, Gx, Ev),
+ _2 (movdqa, Gx, Ex),
+ },
+
+ [X86_INSN_SSE_GROUP_70].insns = {
+ _3 (pshufd, Gx, Ex, Ib),
+ _0f (modrm_group_12, X86_INSN_FLAG_MODRM_REG_GROUP_12),
+ _0f (modrm_group_13, X86_INSN_FLAG_MODRM_REG_GROUP_13),
+ _0f (modrm_group_14, X86_INSN_FLAG_MODRM_REG_GROUP_14),
+ _2 (pcmpeqb, Gx, Ex),
+ _2 (pcmpeqw, Gx, Ex),
+ _2 (pcmpeqd, Gx, Ex),
+ _0 (bad),
+ },
+
+ [X86_INSN_SSE_GROUP_78].insns = {
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _2 (haddpd, Gx, Ex),
+ _2 (hsubpd, Gx, Ex),
+ _2 (movd, Ev, Gx),
+ _2 (movdqa, Ex, Gx),
+ },
+
+ [X86_INSN_SSE_GROUP_c0].insns = {
+ _0 (bad),
+ _0 (bad),
+ _3 (cmppd, Gx, Ex, Ib),
+ _0 (bad),
+ _3 (pinsrw, Gx, Ew, Ib),
+ _3 (pextrw, Gd, Gx, Ib),
+ _3 (shufpd, Gx, Ex, Ib),
+ _0 (bad),
+ },
+
+ [X86_INSN_SSE_GROUP_d0].insns = {
+ _2 (addsubpd, Gx, Ex),
+ _2 (psrlw, Gx, Ex),
+ _2 (psrld, Gx, Ex),
+ _2 (psrlq, Gx, Ex),
+ _2 (paddq, Gx, Ex),
+ _2 (pmullw, Gx, Ex),
+ _2 (movq, Ex, Gx),
+ _2 (pmovmskb, Gd, Rx),
+ },
+
+ [X86_INSN_SSE_GROUP_d8].insns = {
+ _2 (psubusb, Gx, Ex),
+ _2 (psubusw, Gx, Ex),
+ _2 (pminub, Gx, Ex),
+ _2 (pand, Gx, Ex),
+ _2 (paddusb, Gx, Ex),
+ _2 (paddusw, Gx, Ex),
+ _2 (pmaxub, Gx, Ex),
+ _2 (pandn, Gx, Ex),
+ },
+
+ [X86_INSN_SSE_GROUP_e0].insns = {
+ _2 (pavgb, Gx, Ex),
+ _2 (psraw, Gx, Ex),
+ _2 (psrad, Gx, Ex),
+ _2 (pavgw, Gx, Ex),
+ _2 (pmulhuw, Gx, Ex),
+ _2 (pmulhw, Gx, Ex),
+ _2 (cvttpd2dq, Gx, Ex),
+ _2 (movntdq, Mx, Gx),
+ },
+
+ [X86_INSN_SSE_GROUP_e8].insns = {
+ _2 (psubsb, Gx, Ex),
+ _2 (psubsw, Gx, Ex),
+ _2 (pminsw, Gx, Ex),
+ _2 (por, Gx, Ex),
+ _2 (paddsb, Gx, Ex),
+ _2 (paddsw, Gx, Ex),
+ _2 (pmaxsw, Gx, Ex),
+ _2 (pxor, Gx, Ex),
+ },
+
+ [X86_INSN_SSE_GROUP_f0].insns = {
+ _0 (bad),
+ _2 (psllw, Gx, Ex),
+ _2 (pslld, Gx, Ex),
+ _2 (psllq, Gx, Ex),
+ _2 (pmuludq, Gx, Ex),
+ _2 (pmaddwd, Gx, Ex),
+ _2 (psadbw, Gx, Ex),
+ _2 (maskmovdqu, Gx, Ex),
+ },
+
+ [X86_INSN_SSE_GROUP_f8].insns = {
+ _2 (psubb, Gx, Ex),
+ _2 (psubw, Gx, Ex),
+ _2 (psubd, Gx, Ex),
+ _2 (psubq, Gx, Ex),
+ _2 (paddb, Gx, Ex),
+ _2 (paddw, Gx, Ex),
+ _2 (paddd, Gx, Ex),
+ _0 (bad),
+ },
+};
+
+static x86_insn_group8_t x86_insn_sse_groups_repnz[] = {
+ [X86_INSN_SSE_GROUP_10].insns = {
+ _2 (movsd, Gx, Ex),
+ _2 (movsd, Ex, Gx),
+ _2 (movddup, Gx, Ex),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ },
+
+ [X86_INSN_SSE_GROUP_28].insns = {
+ _0 (bad),
+ _0 (bad),
+ _2 (cvtsi2sd, Gx, Ev),
+ _0 (bad),
+ _2 (cvttsd2si, Gv, Ex),
+ _2 (cvtsd2si, Gv, Ex),
+ _0 (bad),
+ _0 (bad),
+ },
+
+ [X86_INSN_SSE_GROUP_50].insns = {
+ _0 (bad),
+ _2 (sqrtsd, Gx, Ex),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ },
+
+ [X86_INSN_SSE_GROUP_58].insns = {
+ _2 (addsd, Gx, Ex),
+ _2 (mulsd, Gx, Ex),
+ _2 (cvtsd2ss, Gx, Ex),
+ _0 (bad),
+ _2 (subsd, Gx, Ex),
+ _2 (minsd, Gx, Ex),
+ _2 (divsd, Gx, Ex),
+ _2 (maxsd, Gx, Ex),
+ },
+
+ [X86_INSN_SSE_GROUP_60].insns = {
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ },
+
+ [X86_INSN_SSE_GROUP_68].insns = {
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ },
+
+ [X86_INSN_SSE_GROUP_70].insns = {
+ _3 (pshuflw, Gx, Ex, Ib),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ },
+
+ [X86_INSN_SSE_GROUP_78].insns = {
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _2 (haddps, Gx, Ex),
+ _2 (hsubps, Gx, Ex),
+ _0 (bad),
+ _0 (bad),
+ },
+
+ [X86_INSN_SSE_GROUP_c0].insns = {
+ _0 (bad),
+ _0 (bad),
+ _3 (cmpsd, Gx, Ex, Ib),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ },
+
+ [X86_INSN_SSE_GROUP_d0].insns = {
+ _2 (addsubps, Gx, Ex),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _2 (movdq2q, Gm, Ex),
+ _0 (bad),
+ },
+
+ [X86_INSN_SSE_GROUP_d8].insns = {
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ },
+
+ [X86_INSN_SSE_GROUP_e0].insns = {
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _2 (cvtpd2dq, Gx, Ex),
+ _0 (bad),
+ },
+
+ [X86_INSN_SSE_GROUP_e8].insns = {
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ },
+
+ [X86_INSN_SSE_GROUP_f0].insns = {
+ _2 (lddqu, Gx, Mx),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ },
+
+ [X86_INSN_SSE_GROUP_f8].insns = {
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ _0 (bad),
+ },
+};
+
+#undef _
+
+/* Parses memory displacements and immediates. */
+static u8 * x86_insn_parse_number (u32 log2_n_bytes,
+ u8 * code, u8 * code_end,
+ i64 * result)
+{
+ i64 x = 0;
+
+ if (code + (1 << log2_n_bytes) > code_end)
+ return 0;
+
+ switch (log2_n_bytes)
+ {
+ case 3:
+ x = clib_little_to_host_unaligned_mem_u64 ((u64 *) code);
+ break;
+
+ case 2:
+ x = (i32) clib_little_to_host_unaligned_mem_u32 ((u32 *) code);
+ break;
+
+ case 1:
+ x = (i16) clib_little_to_host_unaligned_mem_u16 ((u16 *) code);
+ break;
+
+ case 0:
+ x = (i8) code[0];
+ break;
+
+ default:
+ ASSERT (0);
+ }
+
+ *result = x;
+ return code + (1 << log2_n_bytes);
+}
+
+static u32
+x86_insn_log2_immediate_bytes (x86_insn_parse_t * p, x86_insn_t * insn)
+{
+ u32 i = ~0;
+ switch (x86_insn_immediate_type (insn))
+ {
+ case 'b': i = 0; break;
+ case 'w': i = 1; break;
+ case 'd': i = 2; break;
+ case 'q': i = 3; break;
+
+ case 'z':
+ i = p->log2_effective_operand_bytes;
+ if (i > 2) i = 2;
+ break;
+
+ case 'v':
+ i = p->log2_effective_operand_bytes;
+ break;
+
+ default:
+ i = ~0;
+ break;
+ }
+
+ return i;
+}
+
+static u8 *
+x86_insn_parse_modrm_byte (x86_insn_parse_t * x,
+ x86_insn_modrm_byte_t modrm,
+ u32 parse_flags,
+ u8 * code,
+ u8 * code_end)
+{
+ u8 effective_address_bits;
+
+ if (parse_flags & X86_INSN_PARSE_64_BIT)
+ effective_address_bits = (x->flags & X86_INSN_ADDRESS_SIZE) ? 32 : 64;
+ else if (parse_flags & X86_INSN_PARSE_32_BIT)
+ effective_address_bits = (x->flags & X86_INSN_ADDRESS_SIZE) ? 16 : 32;
+ else
+ effective_address_bits = (x->flags & X86_INSN_ADDRESS_SIZE) ? 32 : 16;
+
+ x->log2_effective_address_bytes = 1;
+ x->log2_effective_address_bytes += effective_address_bits > 16;
+ x->log2_effective_address_bytes += effective_address_bits > 32;
+
+ x->regs[0] |= modrm.reg;
+ if (modrm.mode == 3)
+ x->regs[1] |= modrm.rm;
+ else
+ {
+ u32 log2_disp_bytes = ~0;
+
+ x->flags |= X86_INSN_IS_ADDRESS;
+
+ if (effective_address_bits != 16)
+ {
+ u8 has_sib_byte = 0;
+
+ switch (modrm.mode)
+ {
+ case 0:
+ /* When base is bp displacement is present for mode 0. */
+ if (modrm.rm == X86_INSN_GP_REG_BP)
+ {
+ log2_disp_bytes = x->log2_effective_address_bytes;
+ break;
+ }
+ else if (modrm.rm == X86_INSN_GP_REG_SP
+ && effective_address_bits != 16)
+ {
+ has_sib_byte = 1;
+ break;
+ }
+ /* fall through */
+ case 1:
+ case 2:
+ x->regs[1] |= modrm.rm;
+ x->flags |= X86_INSN_HAS_BASE;
+ if (modrm.mode != 0)
+ {
+ log2_disp_bytes = (modrm.mode == 1
+ ? 0
+ : x->log2_effective_address_bytes);
+ if (log2_disp_bytes > 2)
+ log2_disp_bytes = 2;
+ }
+ break;
+ }
+
+ if (has_sib_byte)
+ {
+ x86_insn_sib_byte_t sib;
+
+ if (code >= code_end)
+ return 0;
+ sib.byte = *code++;
+
+ x->log2_index_scale = 1 << sib.log2_scale;
+ x->regs[1] |= sib.base;
+ x->flags |= X86_INSN_HAS_BASE;
+
+ if (sib.index != X86_INSN_GP_REG_SP)
+ {
+ x->regs[2] |= sib.index;
+ x->flags |= X86_INSN_HAS_INDEX;
+ }
+ }
+ }
+ else
+ {
+ /* effective_address_bits == 16 */
+ switch (modrm.mode)
+ {
+ case 0:
+ if (modrm.rm == 6)
+ {
+ /* [disp16] */
+ log2_disp_bytes = 1;
+ break;
+ }
+ /* fall through */
+ case 1:
+ case 2:
+ switch (modrm.rm)
+ {
+ case 0: /* [bx + si/di] */
+ case 1:
+ x->regs[1] = X86_INSN_GP_REG_BX;
+ x->regs[2] = X86_INSN_GP_REG_SI + (modrm.rm & 1);
+ x->flags |= X86_INSN_HAS_BASE | X86_INSN_HAS_INDEX;
+ break;
+
+ case 2: /* [bp + si/di] */
+ case 3:
+ x->regs[1] = X86_INSN_GP_REG_BP;
+ x->regs[2] = X86_INSN_GP_REG_SI + (modrm.rm & 1);
+ x->flags |= X86_INSN_HAS_BASE | X86_INSN_HAS_INDEX;
+ break;
+
+ case 4: /* [si/di] */
+ case 5:
+ x->regs[1] = X86_INSN_GP_REG_SI + (modrm.rm & 1);
+ x->flags |= X86_INSN_HAS_BASE;
+ break;
+
+ case 6: /* [bp + disp] */
+ x->regs[1] = X86_INSN_GP_REG_BP;
+ x->flags |= X86_INSN_HAS_BASE;
+ break;
+
+ case 7: /* [bx + disp] */
+ x->regs[1] = X86_INSN_GP_REG_BX;
+ x->flags |= X86_INSN_HAS_BASE;
+ break;
+ }
+
+ if (modrm.mode != 0)
+ log2_disp_bytes = modrm.mode == 1 ? 0 : 1;
+ break;
+ }
+ }
+
+ if (log2_disp_bytes != ~0)
+ {
+ i64 disp;
+ code = x86_insn_parse_number (log2_disp_bytes, code, code_end,
+ &disp);
+ if (code)
+ x->displacement = disp;
+ }
+ }
+
+ return code;
+}
+
+u8 * x86_insn_parse (x86_insn_parse_t * p, u8 * code_start)
+{
+ u8 i, * code, * code_end;
+ x86_insn_t * insn, * group_insn;
+ u8 default_operand_bits, effective_operand_bits;
+ u32 opcode, parse_flags;
+
+ /* Preserve global parse flags. */
+ parse_flags = p->flags & (X86_INSN_PARSE_32_BIT | X86_INSN_PARSE_64_BIT);
+ memset (p, 0, sizeof (p[0]));
+ p->flags = parse_flags;
+
+ /* 64 implies 32 bit parsing. */
+ if (parse_flags & X86_INSN_PARSE_64_BIT)
+ parse_flags |= X86_INSN_PARSE_32_BIT;
+
+ /* Instruction must be <= 15 bytes. */
+ code = code_start;
+ code_end = code + 15;
+
+ /* Parse legacy prefixes. */
+ while (1)
+ {
+ if (code >= code_end)
+ goto insn_too_long;
+ i = code[0];
+ code++;
+ switch (i)
+ {
+ default: goto prefix_done;
+
+ /* Set flags based on prefix. */
+#define _(x,o) case o: p->flags |= X86_INSN_##x; break;
+ foreach_x86_legacy_prefix;
+#undef _
+ }
+ }
+ prefix_done:
+
+ /* REX prefix. */
+ if ((parse_flags & X86_INSN_PARSE_64_BIT) && i >= 0x40 && i <= 0x4f)
+ {
+ p->regs[0] |= ((i & (1 << 2)) != 0) << 3; /* r bit */
+ p->regs[1] |= ((i & (1 << 0)) != 0) << 3; /* b bit */
+ p->regs[2] |= ((i & (1 << 1)) != 0) << 3; /* x bit */
+ p->flags |= ((i & (1 << 3)) /* w bit */
+ ? X86_INSN_OPERAND_SIZE_64 : 0);
+ if (code >= code_end)
+ goto insn_too_long;
+ i = *code++;
+ }
+
+ opcode = i;
+ if (opcode == 0x0f)
+ {
+ /* two byte opcode. */;
+ if (code >= code_end)
+ goto insn_too_long;
+ i = *code++;
+ opcode = (opcode << 8) | i;
+ insn = x86_insns_two_byte + i;
+ }
+ else
+ {
+ static x86_insn_t arpl = {
+ .name = "arpl",
+ .operands[0].data = "Ew",
+ .operands[1].data = "Gw",
+ };
+
+ if (PREDICT_FALSE (i == 0x63
+ && ! (parse_flags & X86_INSN_PARSE_64_BIT)))
+ insn = &arpl;
+ else
+ insn = x86_insns_one_byte + i;
+ }
+
+ if ((i = X86_INSN_FLAG_GET_SSE_GROUP (insn->flags)) != 0)
+ {
+ x86_insn_group8_t * g8;
+
+ if (p->flags & X86_INSN_OPERAND_SIZE)
+ g8 = x86_insn_sse_groups_operand_size;
+ else if (p->flags & X86_INSN_REPZ)
+ g8 = x86_insn_sse_groups_repz;
+ else if (p->flags & X86_INSN_REPNZ)
+ g8 = x86_insn_sse_groups_repnz;
+ else
+ g8 = 0;
+
+ /* insn flags have 1 + group so != 0 test above can work. */
+ ASSERT ((i - 1) < ARRAY_LEN (x86_insn_sse_groups_operand_size));
+ if (g8)
+ insn = g8[i - 1].insns + (opcode & 7);
+ }
+
+ /* Parse modrm and displacement if present. */
+ if (x86_insn_has_modrm_byte (insn))
+ {
+ x86_insn_modrm_byte_t modrm;
+
+ if (code >= code_end)
+ goto insn_too_long;
+ modrm.byte = *code++;
+
+ /* Handle special 0x0f01 and 0x0fae encodings. */
+ if (PREDICT_FALSE (modrm.mode == 3
+ && (opcode == 0x0f01
+ || opcode == 0x0fae)))
+ {
+ static x86_insn_t x86_insns_0f01_special[] = {
+ _0 (swapgs), _0 (rdtscp), _0 (bad), _0 (bad),
+ _0 (bad), _0 (bad), _0 (bad), _0 (bad),
+ };
+ static x86_insn_t x86_insns_0fae_special[] = {
+ _0 (vmrun), _0 (vmmcall), _0 (vmload), _0 (vmsave),
+ _0 (stgi), _0 (clgi), _0 (skinit), _0 (invlpga),
+ };
+
+ if (opcode == 0x0f01)
+ insn = x86_insns_0f01_special;
+ else
+ insn = x86_insns_0fae_special;
+ insn += modrm.rm;
+ opcode = (opcode << 8) | modrm.byte;
+ }
+ else
+ {
+ code = x86_insn_parse_modrm_byte (p, modrm, parse_flags,
+ code, code_end);
+ if (! code)
+ goto insn_too_long;
+ }
+ }
+
+ group_insn = 0;
+ if ((i = X86_INSN_FLAG_GET_MODRM_REG_GROUP (insn->flags)) != 0)
+ {
+ u32 g = i - 1;
+ ASSERT (g < ARRAY_LEN (x86_insn_modrm_reg_groups));
+ group_insn = x86_insn_modrm_reg_groups[g].insns + (p->regs[0] & 7);
+ }
+
+ p->insn = insn[0];
+ if (group_insn)
+ {
+ u32 k;
+ p->insn.name = group_insn->name;
+ p->insn.flags |= group_insn->flags;
+ for (k = 0; k < ARRAY_LEN (group_insn->operands); k++)
+ if (x86_insn_operand_is_valid (group_insn, k))
+ p->insn.operands[k] = group_insn->operands[k];
+ }
+
+ default_operand_bits
+ = ((((parse_flags & X86_INSN_PARSE_32_BIT) != 0)
+ ^ ((p->flags & X86_INSN_OPERAND_SIZE) != 0))
+ ? BITS (u32) : BITS (u16));
+
+ if ((parse_flags & X86_INSN_PARSE_64_BIT)
+ && (p->insn.flags & X86_INSN_FLAG_DEFAULT_64_BIT))
+ default_operand_bits = BITS (u64);
+
+ effective_operand_bits = default_operand_bits;
+ if (p->flags & X86_INSN_OPERAND_SIZE_64)
+ effective_operand_bits = BITS (u64);
+
+ p->log2_effective_operand_bytes = 1;
+ p->log2_effective_operand_bytes += effective_operand_bits > 16;
+ p->log2_effective_operand_bytes += effective_operand_bits > 32;
+
+ /* Parse immediate if present. */
+ {
+ u32 l = x86_insn_log2_immediate_bytes (p, insn);
+ if (l <= 3)
+ {
+ code = x86_insn_parse_number (l, code, code_end, &p->immediate);
+ if (! code)
+ goto insn_too_long;
+ }
+ }
+
+ return code;
+
+ insn_too_long:
+ return 0;
+}
+
+static u8 * format_x86_gp_reg_operand (u8 * s, va_list * va)
+{
+ u32 r = va_arg (*va, u32);
+ u32 log2_n_bytes = va_arg (*va, u32);
+
+ const char names8[8] = "acdbsbsd";
+ const char names16[8] = "xxxxppii";
+
+ ASSERT (r < 16);
+
+ /* Add % register prefix. */
+ vec_add1 (s, '%');
+
+ switch (log2_n_bytes)
+ {
+ case 0:
+ {
+
+ if (r < 8)
+ s = format (s, "%c%c", names8[r & 3], (r >> 2) ? 'l' : 'h');
+ else
+ s = format (s, "r%db", r);
+ }
+ break;
+
+ case 2:
+ case 3:
+ s = format (s, "%c", log2_n_bytes == 2 ? 'e' : 'r');
+ /* fall through */
+ case 1:
+ if (r < 8)
+ s = format (s, "%c%c", names8[r], names16[r]);
+ else
+ {
+ s = format (s, "%d", r);
+ if (log2_n_bytes != 3)
+ s = format (s, "%c", log2_n_bytes == 1 ? 'w' : 'd');
+ }
+ break;
+
+ default:
+ ASSERT (0);
+ }
+
+ return s;
+}
+
+static u8 * format_x86_reg_operand (u8 * s, va_list * va)
+{
+ u32 reg = va_arg (*va, u32);
+ u32 log2_n_bytes = va_arg (*va, u32);
+ u32 type = va_arg (*va, u32);
+
+ switch (type)
+ {
+ default:
+ ASSERT (0);
+ break;
+
+ case 'x':
+ ASSERT (reg < 16);
+ return format (s, "%%xmm%d", reg);
+
+ case 'm':
+ ASSERT (reg < 8);
+ return format (s, "%%mm%d", reg);
+
+ /* Explicit byte/word/double-word/quad-word */
+ case 'b': log2_n_bytes = 0; break;
+ case 'w': log2_n_bytes = 1; break;
+ case 'd': log2_n_bytes = 2; break;
+ case 'q': log2_n_bytes = 3; break;
+
+ /* Use effective operand size. */
+ case 'v': break;
+
+ /* word or double-word depending on effective operand size. */
+ case 'z':
+ log2_n_bytes = clib_min (log2_n_bytes, 2);
+ break;
+ }
+
+ s = format (s, "%U", format_x86_gp_reg_operand, reg, log2_n_bytes);
+ return s;
+}
+
+static u8 * format_x86_mem_operand (u8 * s, va_list * va)
+{
+ x86_insn_parse_t * p = va_arg (*va, x86_insn_parse_t *);
+
+ if (p->displacement != 0)
+ s = format (s, "0x%x", p->displacement);
+
+ if (p->flags & X86_INSN_HAS_BASE)
+ {
+ s = format (s, "(%U",
+ format_x86_gp_reg_operand, p->regs[1],
+ p->log2_effective_address_bytes);
+ if (p->flags & X86_INSN_HAS_INDEX)
+ {
+ s = format (s, ",%U",
+ format_x86_gp_reg_operand, p->regs[2],
+ p->log2_effective_address_bytes);
+ if (p->log2_index_scale != 0)
+ s = format (s, ",%d", 1 << p->log2_index_scale);
+ }
+ s = format (s, ")");
+ }
+
+ /* [RIP+disp] PC relative addressing in 64 bit mode. */
+ else if (p->flags & X86_INSN_PARSE_64_BIT)
+ s = format (s, "(%%rip)");
+
+ return s;
+}
+
+static u8 * format_x86_insn_operand (u8 * s, va_list * va)
+{
+ x86_insn_parse_t * p = va_arg (*va, x86_insn_parse_t *);
+ x86_insn_t * insn = &p->insn;
+ u32 o = va_arg (*va, u32);
+ u8 c, t;
+
+ ASSERT (o < ARRAY_LEN (insn->operands));
+ c = insn->operands[o].code;
+ t = insn->operands[o].type;
+
+ /* Register encoded in instruction. */
+ if (c < 8)
+ return format (s, "%U",
+ format_x86_gp_reg_operand, c,
+ p->log2_effective_operand_bytes);
+
+ switch (c)
+ {
+ /* Memory or reg field from modrm byte. */
+ case 'M':
+ ASSERT (p->flags & X86_INSN_IS_ADDRESS);
+ /* FALLTHROUGH */
+ case 'E':
+ if (p->flags & X86_INSN_IS_ADDRESS)
+ s = format (s, "%U", format_x86_mem_operand, p);
+ else
+ s = format (s, "%U",
+ format_x86_reg_operand, p->regs[1],
+ p->log2_effective_operand_bytes, t);
+ break;
+
+ /* reg field from modrm byte. */
+ case 'R':
+ case 'G':
+ s = format (s, "%U",
+ format_x86_reg_operand, p->regs[0],
+ p->log2_effective_operand_bytes, t);
+ break;
+
+ case 'I':
+ {
+ u32 l = x86_insn_log2_immediate_bytes (p, insn);
+ i64 mask = pow2_mask (8ULL << l);
+ s = format (s, "$0x%Lx", p->immediate & mask);
+ }
+ break;
+
+ case 'J':
+ if (p->immediate < 0)
+ s = format (s, "- 0x%Lx", -p->immediate);
+ else
+ s = format (s, "+ 0x%Lx", p->immediate);
+ break;
+
+ case 'O':
+ s = format (s, "0x%Lx", p->immediate);
+ break;
+
+ case 'A':
+ /* AX/AL */
+ s = format (s, "%U",
+ format_x86_gp_reg_operand, X86_INSN_GP_REG_AX,
+ t == 'L' ? 0 : p->log2_effective_operand_bytes);
+ break;
+
+ case 'B':
+ /* BX/BL/BP */
+ s = format (s, "%U",
+ format_x86_gp_reg_operand,
+ t == 'P' ? X86_INSN_GP_REG_BP : X86_INSN_GP_REG_BX,
+ t == 'L' ? 0 : p->log2_effective_operand_bytes);
+ break;
+
+ case 'C':
+ /* CX/CL */
+ s = format (s, "%U",
+ format_x86_gp_reg_operand, X86_INSN_GP_REG_CX,
+ t == 'L' ? 0 : p->log2_effective_operand_bytes);
+ break;
+
+ case 'D':
+ /* DX/DL/DI */
+ s = format (s, "%U",
+ format_x86_gp_reg_operand,
+ t == 'I' ? X86_INSN_GP_REG_DI : X86_INSN_GP_REG_DX,
+ t == 'L' ? 0 : p->log2_effective_operand_bytes);
+ break;
+
+ case 'S':
+ /* SI/SP */
+ s = format (s, "%U",
+ format_x86_gp_reg_operand,
+ t == 'I' ? X86_INSN_GP_REG_SI : X86_INSN_GP_REG_SP,
+ p->log2_effective_operand_bytes);
+ break;
+
+ case '1':
+ s = format (s, "1");
+ break;
+
+ default:
+ ASSERT (0);
+ }
+
+ return s;
+}
+
+u8 * format_x86_insn_parse (u8 * s, va_list * va)
+{
+ x86_insn_parse_t * p = va_arg (*va, x86_insn_parse_t *);
+ x86_insn_t * insn = &p->insn;
+ u32 o, i, is_src_dst;
+
+ s = format (s, "%s", insn->name);
+
+ if (! x86_insn_operand_is_valid (insn, 0))
+ goto done;
+
+ is_src_dst = x86_insn_operand_is_valid (insn, 1);
+
+ /* If instruction has immediate add suffix to opcode to
+ indicate operand size. */
+ if (is_src_dst)
+ {
+ u32 b;
+
+ b = x86_insn_log2_immediate_bytes (p, insn);
+ if (b < p->log2_effective_operand_bytes
+ && (p->flags & X86_INSN_IS_ADDRESS))
+ s = format (s, "%c", "bwlq"[b]);
+ }
+
+ for (i = 0; i < ARRAY_LEN (insn->operands); i++)
+ {
+ o = is_src_dst + i;
+ if (! x86_insn_operand_is_valid (insn, o))
+ break;
+ s = format (s, "%s%U",
+ i == 0 ? " " : ", ",
+ format_x86_insn_operand, p, o);
+ }
+
+ if (is_src_dst)
+ s = format (s, ", %U",
+ format_x86_insn_operand, p, 0);
+
+ done:
+ return s;
+}
diff --git a/src/vppinfra/asm_x86.h b/src/vppinfra/asm_x86.h
new file mode 100644
index 00000000..dacef617
--- /dev/null
+++ b/src/vppinfra/asm_x86.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef included_asm_x86_h
+#define included_asm_x86_h
+
+#include <vppinfra/format.h>
+
+typedef union
+{
+ struct
+ {
+ u8 code;
+ u8 type;
+ };
+ u8 data[2];
+} x86_insn_operand_t;
+
+typedef struct
+{
+ /* Instruction name. */
+ char *name;
+
+ /* X86 instructions may have up to 3 operands. */
+ x86_insn_operand_t operands[3];
+
+ u16 flags;
+#define X86_INSN_FLAG_DEFAULT_64_BIT (1 << 0)
+#define X86_INSN_FLAG_SET_SSE_GROUP(n) ((n) << 5)
+#define X86_INSN_FLAG_GET_SSE_GROUP(f) (((f) >> 5) & 0x1f)
+#define X86_INSN_FLAG_SET_MODRM_REG_GROUP(n) (((n) & 0x3f) << 10)
+#define X86_INSN_FLAG_GET_MODRM_REG_GROUP(f) (((f) >> 10) & 0x3f)
+} x86_insn_t;
+
+always_inline uword
+x86_insn_operand_is_valid (x86_insn_t * i, uword o)
+{
+ ASSERT (o < ARRAY_LEN (i->operands));
+ return i->operands[o].code != '_';
+}
+
+#define foreach_x86_legacy_prefix \
+ _ (OPERAND_SIZE, 0x66) \
+ _ (ADDRESS_SIZE, 0x67) \
+ _ (SEGMENT_CS, 0x2e) \
+ _ (SEGMENT_DS, 0x3e) \
+ _ (SEGMENT_ES, 0x26) \
+ _ (SEGMENT_FS, 0x64) \
+ _ (SEGMENT_GS, 0x65) \
+ _ (SEGMENT_SS, 0x36) \
+ _ (LOCK, 0xf0) \
+ _ (REPZ, 0xf3) \
+ _ (REPNZ, 0xf2)
+
+#define foreach_x86_insn_parse_flag \
+ /* Parse in 32/64-bit mode. */ \
+ _ (PARSE_32_BIT, 0) \
+ _ (PARSE_64_BIT, 0) \
+ _ (IS_ADDRESS, 0) \
+ /* regs[1/2] is a valid base/index register */ \
+ _ (HAS_BASE, 0) \
+ _ (HAS_INDEX, 0) \
+ /* rex w bit */ \
+ _ (OPERAND_SIZE_64, 0)
+
+typedef enum
+{
+#define _(f,o) X86_INSN_FLAG_BIT_##f,
+ foreach_x86_insn_parse_flag foreach_x86_legacy_prefix
+#undef _
+} x86_insn_parse_flag_bit_t;
+
+typedef enum
+{
+#define _(f,o) X86_INSN_##f = 1 << X86_INSN_FLAG_BIT_##f,
+ foreach_x86_insn_parse_flag foreach_x86_legacy_prefix
+#undef _
+} x86_insn_parse_flag_t;
+
+typedef struct
+{
+ /* Registers in instruction.
+ [0] is modrm reg field
+ [1] is base reg
+ [2] is index reg. */
+ u8 regs[3];
+
+ /* Scale for index register. */
+ u8 log2_index_scale:2;
+ u8 log2_effective_operand_bytes:3;
+ u8 log2_effective_address_bytes:3;
+
+ i32 displacement;
+
+ /* Parser flags: set of x86_insn_parse_flag_t enums. */
+ u32 flags;
+
+ i64 immediate;
+
+ x86_insn_t insn;
+} x86_insn_parse_t;
+
+u8 *x86_insn_parse (x86_insn_parse_t * p, u8 * code_start);
+format_function_t format_x86_insn_parse;
+
+#endif /* included_asm_x86_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/backtrace.c b/src/vppinfra/backtrace.c
new file mode 100644
index 00000000..bbfb792c
--- /dev/null
+++ b/src/vppinfra/backtrace.c
@@ -0,0 +1,267 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2004 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/clib.h>
+#include <vppinfra/error.h>
+
+#ifdef __mips__
+
+/* Let code below know we've defined _clib_backtrace */
+#define clib_backtrace_defined
+
+#include <vppinfra/asm_mips.h>
+
+uword
+clib_backtrace (uword * callers, uword max_callers, uword n_frames_to_skip)
+{
+ u32 *pc;
+ void *sp;
+ uword i, saved_pc;
+
+ /* Figure current PC, saved PC and stack pointer. */
+ asm volatile (".set push\n"
+ ".set noat\n" "move %[saved_pc], $31\n" "move %[sp], $29\n"
+ /* Fetches current PC. */
+ "la $at, 1f\n"
+ "jalr %[pc], $at\n"
+ "nop\n"
+ "1:\n"
+ ".set pop\n":[pc] "=r" (pc),
+ [saved_pc] "=r" (saved_pc),[sp] "=r" (sp));
+
+ /* Also skip current frame. */
+ n_frames_to_skip += 1;
+
+ for (i = 0; i < max_callers + n_frames_to_skip; i++)
+ {
+ mips_insn_opcode_t op;
+ mips_insn_special_funct_t funct;
+ i32 insn, rs, rt, rd, immediate, found_saved_pc;
+ u32 *start_pc;
+
+ /* Parse instructions until we reach prologue for this
+ stack frame. We'll need to figure out where saved
+ PC is and where previous stack frame lives. */
+ start_pc = pc;
+ found_saved_pc = 0;
+ while (1)
+ {
+ insn = *--pc;
+ op = mips_insn_get_op (insn);
+ funct = mips_insn_get_funct (insn);
+ rs = mips_insn_get_rs (insn);
+ rt = mips_insn_get_rt (insn);
+ rd = mips_insn_get_rd (insn);
+ immediate = mips_insn_get_immediate (insn);
+
+ switch (op)
+ {
+ default:
+ break;
+
+ case MIPS_OPCODE_sd:
+ case MIPS_OPCODE_sw:
+ /* Trace stores of return address. */
+ if (rt == MIPS_REG_RA)
+ {
+ void *addr = sp + immediate;
+
+ /* If RA is stored somewhere other than in the
+ stack frame, give up. */
+ if (rs != MIPS_REG_SP)
+ goto backtrace_done;
+
+ ASSERT (immediate % 4 == 0);
+ if (op == MIPS_OPCODE_sw)
+ saved_pc = ((u32 *) addr)[0];
+ else
+ saved_pc = ((u64 *) addr)[0];
+ found_saved_pc = 1;
+ }
+ break;
+
+ case MIPS_OPCODE_addiu:
+ case MIPS_OPCODE_daddiu:
+ case MIPS_OPCODE_addi:
+ case MIPS_OPCODE_daddi:
+ if (rt == MIPS_REG_SP)
+ {
+ if (rs != MIPS_REG_SP)
+ goto backtrace_done;
+
+ ASSERT (immediate % 4 == 0);
+
+ /* Assume positive offset is part of the epilogue.
+ E.g.
+ jr ra
+ add sp,sp,100
+ */
+ if (immediate > 0)
+ continue;
+
+ /* Negative offset means allocate stack space.
+ This could either be the prologue or could be due to
+ alloca. */
+ sp -= immediate;
+
+ /* This frame will not save RA. */
+ if (i == 0)
+ goto found_prologue;
+
+ /* Assume that addiu sp,sp,-N without store of ra means
+ that we have not found the prologue yet. */
+ if (found_saved_pc)
+ goto found_prologue;
+ }
+ break;
+
+ case MIPS_OPCODE_slti:
+ case MIPS_OPCODE_sltiu:
+ case MIPS_OPCODE_andi:
+ case MIPS_OPCODE_ori:
+ case MIPS_OPCODE_xori:
+ case MIPS_OPCODE_lui:
+ case MIPS_OPCODE_ldl:
+ case MIPS_OPCODE_ldr:
+ case MIPS_OPCODE_lb:
+ case MIPS_OPCODE_lh:
+ case MIPS_OPCODE_lwl:
+ case MIPS_OPCODE_lw:
+ case MIPS_OPCODE_lbu:
+ case MIPS_OPCODE_lhu:
+ case MIPS_OPCODE_lwr:
+ case MIPS_OPCODE_lwu:
+ case MIPS_OPCODE_ld:
+ /* Give up when we find anyone setting the stack pointer. */
+ if (rt == MIPS_REG_SP)
+ goto backtrace_done;
+ break;
+
+ case MIPS_OPCODE_SPECIAL:
+ if (rd == MIPS_REG_SP)
+ switch (funct)
+ {
+ default:
+ /* Give up when we find anyone setting the stack pointer. */
+ goto backtrace_done;
+
+ case MIPS_SPECIAL_FUNCT_break:
+ case MIPS_SPECIAL_FUNCT_jr:
+ case MIPS_SPECIAL_FUNCT_sync:
+ case MIPS_SPECIAL_FUNCT_syscall:
+ case MIPS_SPECIAL_FUNCT_tge:
+ case MIPS_SPECIAL_FUNCT_tgeu:
+ case MIPS_SPECIAL_FUNCT_tlt:
+ case MIPS_SPECIAL_FUNCT_tltu:
+ case MIPS_SPECIAL_FUNCT_teq:
+ case MIPS_SPECIAL_FUNCT_tne:
+ /* These instructions can validly have rd == MIPS_REG_SP */
+ break;
+ }
+ break;
+ }
+ }
+
+ found_prologue:
+ /* Check sanity of saved pc. */
+ if (saved_pc & 3)
+ goto backtrace_done;
+ if (saved_pc == 0)
+ goto backtrace_done;
+
+ if (i >= n_frames_to_skip)
+ callers[i - n_frames_to_skip] = saved_pc;
+ pc = uword_to_pointer (saved_pc, u32 *);
+ }
+
+backtrace_done:
+ if (i < n_frames_to_skip)
+ return 0;
+ else
+ return i - n_frames_to_skip;
+}
+#endif /* __mips__ */
+
+#ifndef clib_backtrace_defined
+#define clib_backtrace_defined
+
+typedef struct clib_generic_stack_frame_t
+{
+ struct clib_generic_stack_frame_t *prev;
+ void *return_address;
+} clib_generic_stack_frame_t;
+
+/* This will only work if we have a frame pointer.
+ Without a frame pointer we have to parse the machine code to
+ parse the stack frames. */
+uword
+clib_backtrace (uword * callers, uword max_callers, uword n_frames_to_skip)
+{
+ clib_generic_stack_frame_t *f;
+ uword i;
+
+ f = __builtin_frame_address (0);
+
+ /* Also skip current frame. */
+ n_frames_to_skip += 1;
+
+ for (i = 0; i < max_callers + n_frames_to_skip; i++)
+ {
+ f = f->prev;
+ if (!f)
+ goto backtrace_done;
+ if (clib_abs ((void *) f - (void *) f->prev) > (64 * 1024))
+ goto backtrace_done;
+ if (i >= n_frames_to_skip)
+ callers[i - n_frames_to_skip] = pointer_to_uword (f->return_address);
+ }
+
+backtrace_done:
+ if (i < n_frames_to_skip)
+ return 0;
+ else
+ return i - n_frames_to_skip;
+}
+#endif /* clib_backtrace_defined */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/bihash_16_8.h b/src/vppinfra/bihash_16_8.h
new file mode 100644
index 00000000..361665be
--- /dev/null
+++ b/src/vppinfra/bihash_16_8.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#undef BIHASH_TYPE
+#undef BIHASH_KVP_CACHE_SIZE
+#undef BIHASH_KVP_PER_PAGE
+
+#define BIHASH_TYPE _16_8
+#define BIHASH_KVP_PER_PAGE 4
+#define BIHASH_KVP_CACHE_SIZE 5
+
+#ifndef __included_bihash_16_8_h__
+#define __included_bihash_16_8_h__
+
+#include <vppinfra/heap.h>
+#include <vppinfra/format.h>
+#include <vppinfra/pool.h>
+#include <vppinfra/xxhash.h>
+#include <vppinfra/crc32.h>
+
+typedef struct
+{
+ u64 key[2];
+ u64 value;
+} clib_bihash_kv_16_8_t;
+
+static inline int
+clib_bihash_is_free_16_8 (clib_bihash_kv_16_8_t * v)
+{
+ /* Free values are memset to 0xff, check a bit... */
+ if (v->key[0] == ~0ULL && v->value == ~0ULL)
+ return 1;
+ return 0;
+}
+
+static inline u64
+clib_bihash_hash_16_8 (clib_bihash_kv_16_8_t * v)
+{
+#ifdef clib_crc32c_uses_intrinsics
+ return clib_crc32c ((u8 *) v->key, 16);
+#else
+ u64 tmp = v->key[0] ^ v->key[1];
+ return clib_xxhash (tmp);
+#endif
+}
+
+static inline u8 *
+format_bihash_kvp_16_8 (u8 * s, va_list * args)
+{
+ clib_bihash_kv_16_8_t *v = va_arg (*args, clib_bihash_kv_16_8_t *);
+
+ s = format (s, "key %llu %llu value %llu", v->key[0], v->key[1], v->value);
+ return s;
+}
+
+static inline int
+clib_bihash_key_compare_16_8 (u64 * a, u64 * b)
+{
+ return ((a[0] ^ b[0]) | (a[1] ^ b[1])) == 0;
+}
+
+#undef __included_bihash_template_h__
+#include <vppinfra/bihash_template.h>
+
+#endif /* __included_bihash_16_8_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/bihash_24_8.h b/src/vppinfra/bihash_24_8.h
new file mode 100644
index 00000000..173168fe
--- /dev/null
+++ b/src/vppinfra/bihash_24_8.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#undef BIHASH_TYPE
+#undef BIHASH_KVP_CACHE_SIZE
+#undef BIHASH_KVP_PER_PAGE
+
+#define BIHASH_TYPE _24_8
+#define BIHASH_KVP_PER_PAGE 4
+#define BIHASH_KVP_CACHE_SIZE 0
+
+#ifndef __included_bihash_24_8_h__
+#define __included_bihash_24_8_h__
+
+#include <vppinfra/crc32.h>
+#include <vppinfra/heap.h>
+#include <vppinfra/format.h>
+#include <vppinfra/pool.h>
+#include <vppinfra/xxhash.h>
+
+typedef struct
+{
+ u64 key[3];
+ u64 value;
+} clib_bihash_kv_24_8_t;
+
+static inline int
+clib_bihash_is_free_24_8 (const clib_bihash_kv_24_8_t * v)
+{
+ /* Free values are memset to 0xff, check a bit... */
+ if (v->key[0] == ~0ULL && v->value == ~0ULL)
+ return 1;
+ return 0;
+}
+
+static inline u64
+clib_bihash_hash_24_8 (const clib_bihash_kv_24_8_t * v)
+{
+#ifdef clib_crc32c_uses_intrinsics
+ return clib_crc32c ((u8 *) v->key, 24);
+#else
+ u64 tmp = v->key[0] ^ v->key[1] ^ v->key[2];
+ return clib_xxhash (tmp);
+#endif
+}
+
+static inline u8 *
+format_bihash_kvp_24_8 (u8 * s, va_list * args)
+{
+ clib_bihash_kv_24_8_t *v = va_arg (*args, clib_bihash_kv_24_8_t *);
+
+ s = format (s, "key %llu %llu %llu value %llu",
+ v->key[0], v->key[1], v->key[2], v->value);
+ return s;
+}
+
+static inline int
+clib_bihash_key_compare_24_8 (const u64 * a, const u64 * b)
+{
+ return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) == 0;
+}
+
+#undef __included_bihash_template_h__
+#include <vppinfra/bihash_template.h>
+
+#endif /* __included_bihash_24_8_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/bihash_40_8.h b/src/vppinfra/bihash_40_8.h
new file mode 100644
index 00000000..974a78d8
--- /dev/null
+++ b/src/vppinfra/bihash_40_8.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#undef BIHASH_TYPE
+#undef BIHASH_KVP_CACHE_SIZE
+#undef BIHASH_KVP_PER_PAGE
+
+#define BIHASH_TYPE _40_8
+#define BIHASH_KVP_PER_PAGE 4
+#define BIHASH_KVP_CACHE_SIZE 2
+
+#ifndef __included_bihash_40_8_h__
+#define __included_bihash_40_8_h__
+
+#include <vppinfra/crc32.h>
+#include <vppinfra/heap.h>
+#include <vppinfra/format.h>
+#include <vppinfra/pool.h>
+#include <vppinfra/xxhash.h>
+
+typedef struct
+{
+ u64 key[5];
+ u64 value;
+} clib_bihash_kv_40_8_t;
+
+static inline int
+clib_bihash_is_free_40_8 (const clib_bihash_kv_40_8_t * v)
+{
+ /* Free values are memset to 0xff, check a bit... */
+ if (v->key[0] == ~0ULL && v->value == ~0ULL)
+ return 1;
+ return 0;
+}
+
+static inline u64
+clib_bihash_hash_40_8 (const clib_bihash_kv_40_8_t * v)
+{
+#ifdef clib_crc32c_uses_intrinsics
+ return clib_crc32c ((u8 *) v->key, 40);
+#else
+ u64 tmp = v->key[0] ^ v->key[1] ^ v->key[2] ^ v->key[3] ^ v->key[4];
+ return clib_xxhash (tmp);
+#endif
+}
+
+static inline u8 *
+format_bihash_kvp_40_8 (u8 * s, va_list * args)
+{
+ clib_bihash_kv_40_8_t *v = va_arg (*args, clib_bihash_kv_40_8_t *);
+
+ s = format (s, "key %llu %llu %llu %llu %llu value %llu", v->key[0],
+ v->key[1], v->key[2], v->key[3], v->key[4], v->value);
+ return s;
+}
+
+static inline int
+clib_bihash_key_compare_40_8 (const u64 * a, const u64 * b)
+{
+ return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2]) | (a[3] ^ b[3])
+ | (a[4] ^ b[4])) == 0;
+}
+
+#undef __included_bihash_template_h__
+#include <vppinfra/bihash_template.h>
+
+#endif /* __included_bihash_40_8_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/bihash_48_8.h b/src/vppinfra/bihash_48_8.h
new file mode 100644
index 00000000..107bcace
--- /dev/null
+++ b/src/vppinfra/bihash_48_8.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#undef BIHASH_TYPE
+#undef BIHASH_KVP_CACHE_SIZE
+#undef BIHASH_KVP_PER_PAGE
+
+#define BIHASH_TYPE _48_8
+#define BIHASH_KVP_PER_PAGE 4
+#define BIHASH_KVP_CACHE_SIZE 2
+
+#ifndef __included_bihash_48_8_h__
+#define __included_bihash_48_8_h__
+
+#include <vppinfra/crc32.h>
+#include <vppinfra/heap.h>
+#include <vppinfra/format.h>
+#include <vppinfra/pool.h>
+#include <vppinfra/xxhash.h>
+
+typedef struct
+{
+ u64 key[6];
+ u64 value;
+} clib_bihash_kv_48_8_t;
+
+static inline int
+clib_bihash_is_free_48_8 (const clib_bihash_kv_48_8_t * v)
+{
+ /* Free values are memset to 0xff, check a bit... */
+ if (v->key[0] == ~0ULL && v->value == ~0ULL)
+ return 1;
+ return 0;
+}
+
+static inline u64
+clib_bihash_hash_48_8 (const clib_bihash_kv_48_8_t * v)
+{
+#ifdef clib_crc32c_uses_intrinsics
+ return clib_crc32c ((u8 *) v->key, 48);
+#else
+ u64 tmp = v->key[0] ^ v->key[1] ^ v->key[2] ^ v->key[3] ^ v->key[4]
+ ^ v->key[5];
+ return clib_xxhash (tmp);
+#endif
+}
+
+static inline u8 *
+format_bihash_kvp_48_8 (u8 * s, va_list * args)
+{
+ clib_bihash_kv_48_8_t *v = va_arg (*args, clib_bihash_kv_48_8_t *);
+
+ s = format (s, "key %llu %llu %llu %llu %llu %llu value %llu", v->key[0],
+ v->key[1], v->key[2], v->key[3], v->key[4], v->key[5],
+ v->value);
+ return s;
+}
+
+static inline int
+clib_bihash_key_compare_48_8 (const u64 * a, const u64 * b)
+{
+ return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2]) | (a[3] ^ b[3])
+ | (a[4] ^ b[4]) | (a[5] ^ b[5])) == 0;
+}
+
+#undef __included_bihash_template_h__
+#include <vppinfra/bihash_template.h>
+
+#endif /* __included_bihash_48_8_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/bihash_8_8.h b/src/vppinfra/bihash_8_8.h
new file mode 100644
index 00000000..2deb64ef
--- /dev/null
+++ b/src/vppinfra/bihash_8_8.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#undef BIHASH_TYPE
+#undef BIHASH_KVP_CACHE_SIZE
+#undef BIHASH_KVP_PER_PAGE
+
+#define BIHASH_TYPE _8_8
+#define BIHASH_KVP_PER_PAGE 4
+#define BIHASH_KVP_CACHE_SIZE 0
+
+#ifndef __included_bihash_8_8_h__
+#define __included_bihash_8_8_h__
+
+#include <vppinfra/heap.h>
+#include <vppinfra/format.h>
+#include <vppinfra/pool.h>
+#include <vppinfra/xxhash.h>
+#include <vppinfra/crc32.h>
+
+/** 8 octet key, 8 octet key value pair */
+typedef struct
+{
+ u64 key; /**< the key */
+ u64 value; /**< the value */
+} clib_bihash_kv_8_8_t;
+
+/** Decide if a clib_bihash_kv_8_8_t instance is free
+ @param v- pointer to the (key,value) pair
+*/
+static inline int
+clib_bihash_is_free_8_8 (clib_bihash_kv_8_8_t * v)
+{
+ if (v->key == ~0ULL && v->value == ~0ULL)
+ return 1;
+ return 0;
+}
+
+/** Hash a clib_bihash_kv_8_8_t instance
+ @param v - pointer to the (key,value) pair, hash the key (only)
+*/
+static inline u64
+clib_bihash_hash_8_8 (clib_bihash_kv_8_8_t * v)
+{
+ /* Note: to torture-test linear scan, make this fn return a constant */
+#ifdef clib_crc32c_uses_intrinsics
+ return clib_crc32c ((u8 *) & v->key, 8);
+#else
+ return clib_xxhash (v->key);
+#endif
+}
+
+/** Format a clib_bihash_kv_8_8_t instance
+ @param s - u8 * vector under construction
+ @param args (vararg) - the (key,value) pair to format
+ @return s - the u8 * vector under construction
+*/
+static inline u8 *
+format_bihash_kvp_8_8 (u8 * s, va_list * args)
+{
+ clib_bihash_kv_8_8_t *v = va_arg (*args, clib_bihash_kv_8_8_t *);
+
+ s = format (s, "key %llu value %llu", v->key, v->value);
+ return s;
+}
+
+/** Compare two clib_bihash_kv_8_8_t instances
+ @param a - first key
+ @param b - second key
+*/
+static inline int
+clib_bihash_key_compare_8_8 (u64 a, u64 b)
+{
+ return a == b;
+}
+
+#undef __included_bihash_template_h__
+#include <vppinfra/bihash_template.h>
+
+#endif /* __included_bihash_8_8_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/bihash_doc.h b/src/vppinfra/bihash_doc.h
new file mode 100644
index 00000000..e6ab9db6
--- /dev/null
+++ b/src/vppinfra/bihash_doc.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2014 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+#error do not #include this file!
+
+/** \file
+
+ Bounded-index extensible hashing. The basic algorithm performs
+ thread-safe constant-time lookups in the face of a rational number
+ of hash collisions. The computed hash code h(k) must have
+ reasonable statistics with respect to the key space. It won't do
+ to have h(k) = 0 or 1, for all values of k.
+
+ Each bucket in the power-of-two bucket array contains the index
+ (in a private vppinfra memory heap) of the "backing store" for the
+ bucket, as well as a size field. The size field (log2_pages)
+ corresponds to 1, 2, 4, ... contiguous "pages" containing the
+ (key,value) pairs in the bucket.
+
+ When a single page fills, we allocate two contiguous pages. We
+ recompute h(k) for each (key,value) pair, using an additional bit
+ to deal the (key, value) pairs into the "top" and "bottom" pages.
+
+ At lookup time, we compute h(k), using lg(bucket-array-size) to
+ pick the bucket. We read the bucket to find the base of the
+ backing pages. We use an additional log2_pages' worth of bits
+ from h(k) to compute the offset of the page which will contain the
+ (key,value) pair we're trying to find.
+*/
+
+/** template key/value backing page structure */
+typedef struct clib_bihash_value
+{
+ union
+ {
+
+ clib_bihash_kv kvp[BIHASH_KVP_PER_PAGE]; /**< the actual key/value pairs */
+ clib_bihash_value *next_free; /**< used when a KVP page (or block thereof) is on a freelist */
+ };
+} clib_bihash_value_t
+/** bihash bucket structure */
+ typedef struct
+{
+ union
+ {
+ struct
+ {
+ u32 offset; /**< backing page offset in the clib memory heap */
+ u8 pad[3]; /**< log2 (size of the packing page block) */
+ u8 log2_pages;
+ };
+ u64 as_u64;
+ };
+} clib_bihash_bucket_t;
+
+/** A bounded index extensible hash table */
+typedef struct
+{
+ clib_bihash_bucket_t *buckets; /**< Hash bucket vector, power-of-two in size */
+ volatile u32 *writer_lock; /**< Writer lock, in its own cache line */
+ BVT (clib_bihash_value) ** working_copies;
+ /**< Working copies (various sizes), to avoid locking against readers */
+ clib_bihash_bucket_t saved_bucket; /**< Saved bucket pointer */
+ u32 nbuckets; /**< Number of hash buckets */
+ u32 log2_nbuckets; /**< lg(nbuckets) */
+ u8 *name; /**< hash table name */
+ BVT (clib_bihash_value) ** freelists;
+ /**< power of two freelist vector */
+ void *mheap; /**< clib memory heap */
+} clib_bihash_t;
+
+/** Get pointer to value page given its clib mheap offset */
+static inline void *clib_bihash_get_value (clib_bihash * h, uword offset);
+
+/** Get clib mheap offset given a pointer */
+static inline uword clib_bihash_get_offset (clib_bihash * h, void *v);
+
+/** initialize a bounded index extensible hash table
+
+ @param h - the bi-hash table to initialize
+ @param name - name of the hash table
+ @param nbuckets - the number of buckets, will be rounded up to
+a power of two
+ @param memory_size - clib mheap size, in bytes
+*/
+
+void clib_bihash_init
+ (clib_bihash * h, char *name, u32 nbuckets, uword memory_size);
+
+/** Destroy a bounded index extensible hash table
+ @param h - the bi-hash table to free
+*/
+
+void clib_bihash_free (clib_bihash * h);
+
+/** Add or delete a (key,value) pair from a bi-hash table
+
+ @param h - the bi-hash table to search
+ @param add_v - the (key,value) pair to add
+ @param is_add - add=1, delete=0
+ @returns 0 on success, < 0 on error
+ @note This function will replace an existing (key,value) pair if the
+ new key matches an existing key
+*/
+int clib_bihash_add_del (clib_bihash * h, clib_bihash_kv * add_v, int is_add);
+
+
+/** Search a bi-hash table
+
+ @param h - the bi-hash table to search
+ @param search_v - (key,value) pair containing the search key
+ @param return_v - (key,value) pair which matches search_v.key
+ @returns 0 on success (with return_v set), < 0 on error
+*/
+int clib_bihash_search (clib_bihash * h,
+ clib_bihash_kv * search_v, clib_bihash_kv * return_v);
+
+
+/** Visit active (key,value) pairs in a bi-hash table
+
+ @param h - the bi-hash table to search
+ @param callback - function to call with each active (key,value) pair
+ @param arg - arbitrary second argument passed to the callback function
+ First argument is the (key,value) pair to visit
+ @note Trying to supply a proper function prototype for the
+ callback function appears to be a fool's errand.
+*/
+void clib_bihash_foreach_key_value_pair (clib_bihash * h,
+ void *callback, void *arg);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/bihash_template.c b/src/vppinfra/bihash_template.c
new file mode 100644
index 00000000..56c410b5
--- /dev/null
+++ b/src/vppinfra/bihash_template.c
@@ -0,0 +1,624 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** @cond DOCUMENTATION_IS_IN_BIHASH_DOC_H */
+
+void BV (clib_bihash_init)
+ (BVT (clib_bihash) * h, char *name, u32 nbuckets, uword memory_size)
+{
+ void *oldheap;
+ int i;
+
+ nbuckets = 1 << (max_log2 (nbuckets));
+
+ h->name = (u8 *) name;
+ h->nbuckets = nbuckets;
+ h->log2_nbuckets = max_log2 (nbuckets);
+ h->cache_hits = 0;
+ h->cache_misses = 0;
+
+ h->mheap = mheap_alloc (0 /* use VM */ , memory_size);
+
+ oldheap = clib_mem_set_heap (h->mheap);
+ vec_validate_aligned (h->buckets, nbuckets - 1, CLIB_CACHE_LINE_BYTES);
+ h->writer_lock = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
+ CLIB_CACHE_LINE_BYTES);
+ h->writer_lock[0] = 0;
+
+ for (i = 0; i < nbuckets; i++)
+ BV (clib_bihash_reset_cache) (h->buckets + i);
+
+ clib_mem_set_heap (oldheap);
+}
+
+void BV (clib_bihash_free) (BVT (clib_bihash) * h)
+{
+ mheap_free (h->mheap);
+ memset (h, 0, sizeof (*h));
+}
+
+static
+BVT (clib_bihash_value) *
+BV (value_alloc) (BVT (clib_bihash) * h, u32 log2_pages)
+{
+ BVT (clib_bihash_value) * rv = 0;
+ void *oldheap;
+
+ ASSERT (h->writer_lock[0]);
+ if (log2_pages >= vec_len (h->freelists) || h->freelists[log2_pages] == 0)
+ {
+ oldheap = clib_mem_set_heap (h->mheap);
+
+ vec_validate (h->freelists, log2_pages);
+ rv = clib_mem_alloc_aligned ((sizeof (*rv) * (1 << log2_pages)),
+ CLIB_CACHE_LINE_BYTES);
+ clib_mem_set_heap (oldheap);
+ goto initialize;
+ }
+ rv = h->freelists[log2_pages];
+ h->freelists[log2_pages] = rv->next_free;
+
+initialize:
+ ASSERT (rv);
+ /*
+ * Latest gcc complains that the length arg is zero
+ * if we replace (1<<log2_pages) with vec_len(rv).
+ * No clue.
+ */
+ memset (rv, 0xff, sizeof (*rv) * (1 << log2_pages));
+ return rv;
+}
+
+static void
+BV (value_free) (BVT (clib_bihash) * h, BVT (clib_bihash_value) * v,
+ u32 log2_pages)
+{
+ ASSERT (h->writer_lock[0]);
+
+ ASSERT (vec_len (h->freelists) > log2_pages);
+
+ v->next_free = h->freelists[log2_pages];
+ h->freelists[log2_pages] = v;
+}
+
+static inline void
+BV (make_working_copy) (BVT (clib_bihash) * h, BVT (clib_bihash_bucket) * b)
+{
+ BVT (clib_bihash_value) * v;
+ BVT (clib_bihash_bucket) working_bucket __attribute__ ((aligned (8)));
+ void *oldheap;
+ BVT (clib_bihash_value) * working_copy;
+ u32 thread_index = os_get_thread_index ();
+ int log2_working_copy_length;
+
+ if (thread_index >= vec_len (h->working_copies))
+ {
+ oldheap = clib_mem_set_heap (h->mheap);
+ vec_validate (h->working_copies, thread_index);
+ vec_validate_init_empty (h->working_copy_lengths, thread_index, ~0);
+ clib_mem_set_heap (oldheap);
+ }
+
+ /*
+ * working_copies are per-cpu so that near-simultaneous
+ * updates from multiple threads will not result in sporadic, spurious
+ * lookup failures.
+ */
+ working_copy = h->working_copies[thread_index];
+ log2_working_copy_length = h->working_copy_lengths[thread_index];
+
+ h->saved_bucket.as_u64 = b->as_u64;
+ oldheap = clib_mem_set_heap (h->mheap);
+
+ if (b->log2_pages > log2_working_copy_length)
+ {
+ if (working_copy)
+ clib_mem_free (working_copy);
+
+ working_copy = clib_mem_alloc_aligned
+ (sizeof (working_copy[0]) * (1 << b->log2_pages),
+ CLIB_CACHE_LINE_BYTES);
+ h->working_copy_lengths[thread_index] = b->log2_pages;
+ h->working_copies[thread_index] = working_copy;
+ }
+
+ clib_mem_set_heap (oldheap);
+
+ /* Lock the bucket... */
+ while (BV (clib_bihash_lock_bucket) (b) == 0)
+ ;
+
+ v = BV (clib_bihash_get_value) (h, b->offset);
+
+ clib_memcpy (working_copy, v, sizeof (*v) * (1 << b->log2_pages));
+ working_bucket.as_u64 = b->as_u64;
+ working_bucket.offset = BV (clib_bihash_get_offset) (h, working_copy);
+ CLIB_MEMORY_BARRIER ();
+ b->as_u64 = working_bucket.as_u64;
+ h->working_copies[thread_index] = working_copy;
+}
+
+static
+BVT (clib_bihash_value) *
+BV (split_and_rehash)
+ (BVT (clib_bihash) * h,
+ BVT (clib_bihash_value) * old_values, u32 old_log2_pages,
+ u32 new_log2_pages)
+{
+ BVT (clib_bihash_value) * new_values, *new_v;
+ int i, j, length_in_kvs;
+
+ new_values = BV (value_alloc) (h, new_log2_pages);
+ length_in_kvs = (1 << old_log2_pages) * BIHASH_KVP_PER_PAGE;
+
+ for (i = 0; i < length_in_kvs; i++)
+ {
+ u64 new_hash;
+
+ /* Entry not in use? Forget it */
+ if (BV (clib_bihash_is_free) (&(old_values->kvp[i])))
+ continue;
+
+ /* rehash the item onto its new home-page */
+ new_hash = BV (clib_bihash_hash) (&(old_values->kvp[i]));
+ new_hash >>= h->log2_nbuckets;
+ new_hash &= (1 << new_log2_pages) - 1;
+ new_v = &new_values[new_hash];
+
+ /* Across the new home-page */
+ for (j = 0; j < BIHASH_KVP_PER_PAGE; j++)
+ {
+ /* Empty slot */
+ if (BV (clib_bihash_is_free) (&(new_v->kvp[j])))
+ {
+ clib_memcpy (&(new_v->kvp[j]), &(old_values->kvp[i]),
+ sizeof (new_v->kvp[j]));
+ goto doublebreak;
+ }
+ }
+ /* Crap. Tell caller to try again */
+ BV (value_free) (h, new_values, new_log2_pages);
+ return 0;
+ doublebreak:;
+ }
+
+ return new_values;
+}
+
+static
+BVT (clib_bihash_value) *
+BV (split_and_rehash_linear)
+ (BVT (clib_bihash) * h,
+ BVT (clib_bihash_value) * old_values, u32 old_log2_pages,
+ u32 new_log2_pages)
+{
+ BVT (clib_bihash_value) * new_values;
+ int i, j, new_length, old_length;
+
+ new_values = BV (value_alloc) (h, new_log2_pages);
+ new_length = (1 << new_log2_pages) * BIHASH_KVP_PER_PAGE;
+ old_length = (1 << old_log2_pages) * BIHASH_KVP_PER_PAGE;
+
+ j = 0;
+ /* Across the old value array */
+ for (i = 0; i < old_length; i++)
+ {
+ /* Find a free slot in the new linear scan bucket */
+ for (; j < new_length; j++)
+ {
+ /* Old value not in use? Forget it. */
+ if (BV (clib_bihash_is_free) (&(old_values->kvp[i])))
+ goto doublebreak;
+
+ /* New value should never be in use */
+ if (BV (clib_bihash_is_free) (&(new_values->kvp[j])))
+ {
+ /* Copy the old value and move along */
+ clib_memcpy (&(new_values->kvp[j]), &(old_values->kvp[i]),
+ sizeof (new_values->kvp[j]));
+ j++;
+ goto doublebreak;
+ }
+ }
+ /* This should never happen... */
+ clib_warning ("BUG: linear rehash failed!");
+ BV (value_free) (h, new_values, new_log2_pages);
+ return 0;
+
+ doublebreak:;
+ }
+ return new_values;
+}
+
+int BV (clib_bihash_add_del)
+ (BVT (clib_bihash) * h, BVT (clib_bihash_kv) * add_v, int is_add)
+{
+ u32 bucket_index;
+ BVT (clib_bihash_bucket) * b, tmp_b;
+ BVT (clib_bihash_value) * v, *new_v, *save_new_v, *working_copy;
+ int rv = 0;
+ int i, limit;
+ u64 hash, new_hash;
+ u32 new_log2_pages, old_log2_pages;
+ u32 thread_index = os_get_thread_index ();
+ int mark_bucket_linear;
+ int resplit_once;
+
+ hash = BV (clib_bihash_hash) (add_v);
+
+ bucket_index = hash & (h->nbuckets - 1);
+ b = &h->buckets[bucket_index];
+
+ hash >>= h->log2_nbuckets;
+
+ tmp_b.linear_search = 0;
+
+ while (__sync_lock_test_and_set (h->writer_lock, 1))
+ ;
+
+ /* First elt in the bucket? */
+ if (b->offset == 0)
+ {
+ if (is_add == 0)
+ {
+ rv = -1;
+ goto unlock;
+ }
+
+ v = BV (value_alloc) (h, 0);
+
+ *v->kvp = *add_v;
+ tmp_b.as_u64 = 0;
+ tmp_b.offset = BV (clib_bihash_get_offset) (h, v);
+
+ b->as_u64 = tmp_b.as_u64;
+ goto unlock;
+ }
+
+ /* Note: this leaves the cache disabled */
+ BV (make_working_copy) (h, b);
+
+ v = BV (clib_bihash_get_value) (h, h->saved_bucket.offset);
+
+ limit = BIHASH_KVP_PER_PAGE;
+ v += (b->linear_search == 0) ? hash & ((1 << b->log2_pages) - 1) : 0;
+ if (b->linear_search)
+ limit <<= b->log2_pages;
+
+ if (is_add)
+ {
+ /*
+ * For obvious (in hindsight) reasons, see if we're supposed to
+ * replace an existing key, then look for an empty slot.
+ */
+ for (i = 0; i < limit; i++)
+ {
+ if (!memcmp (&(v->kvp[i]), &add_v->key, sizeof (add_v->key)))
+ {
+ clib_memcpy (&(v->kvp[i]), add_v, sizeof (*add_v));
+ CLIB_MEMORY_BARRIER ();
+ /* Restore the previous (k,v) pairs */
+ b->as_u64 = h->saved_bucket.as_u64;
+ goto unlock;
+ }
+ }
+ for (i = 0; i < limit; i++)
+ {
+ if (BV (clib_bihash_is_free) (&(v->kvp[i])))
+ {
+ clib_memcpy (&(v->kvp[i]), add_v, sizeof (*add_v));
+ CLIB_MEMORY_BARRIER ();
+ b->as_u64 = h->saved_bucket.as_u64;
+ goto unlock;
+ }
+ }
+ /* no room at the inn... split case... */
+ }
+ else
+ {
+ for (i = 0; i < limit; i++)
+ {
+ if (!memcmp (&(v->kvp[i]), &add_v->key, sizeof (add_v->key)))
+ {
+ memset (&(v->kvp[i]), 0xff, sizeof (*(add_v)));
+ CLIB_MEMORY_BARRIER ();
+ b->as_u64 = h->saved_bucket.as_u64;
+ goto unlock;
+ }
+ }
+ rv = -3;
+ b->as_u64 = h->saved_bucket.as_u64;
+ goto unlock;
+ }
+
+ old_log2_pages = h->saved_bucket.log2_pages;
+ new_log2_pages = old_log2_pages + 1;
+ mark_bucket_linear = 0;
+
+ working_copy = h->working_copies[thread_index];
+ resplit_once = 0;
+
+ new_v = BV (split_and_rehash) (h, working_copy, old_log2_pages,
+ new_log2_pages);
+ if (new_v == 0)
+ {
+ try_resplit:
+ resplit_once = 1;
+ new_log2_pages++;
+ /* Try re-splitting. If that fails, fall back to linear search */
+ new_v = BV (split_and_rehash) (h, working_copy, old_log2_pages,
+ new_log2_pages);
+ if (new_v == 0)
+ {
+ mark_linear:
+ new_log2_pages--;
+ /* pinned collisions, use linear search */
+ new_v =
+ BV (split_and_rehash_linear) (h, working_copy, old_log2_pages,
+ new_log2_pages);
+ mark_bucket_linear = 1;
+ }
+ }
+
+ /* Try to add the new entry */
+ save_new_v = new_v;
+ new_hash = BV (clib_bihash_hash) (add_v);
+ limit = BIHASH_KVP_PER_PAGE;
+ if (mark_bucket_linear)
+ limit <<= new_log2_pages;
+ new_hash >>= h->log2_nbuckets;
+ new_hash &= (1 << new_log2_pages) - 1;
+ new_v += mark_bucket_linear ? 0 : new_hash;
+
+ for (i = 0; i < limit; i++)
+ {
+ if (BV (clib_bihash_is_free) (&(new_v->kvp[i])))
+ {
+ clib_memcpy (&(new_v->kvp[i]), add_v, sizeof (*add_v));
+ goto expand_ok;
+ }
+ }
+
+ /* Crap. Try again */
+ BV (value_free) (h, save_new_v, new_log2_pages);
+ /*
+ * If we've already doubled the size of the bucket once,
+ * fall back to linear search now.
+ */
+ if (resplit_once)
+ goto mark_linear;
+ else
+ goto try_resplit;
+
+expand_ok:
+ /* Keep track of the number of linear-scan buckets */
+ if (tmp_b.linear_search ^ mark_bucket_linear)
+ h->linear_buckets += (mark_bucket_linear == 1) ? 1 : -1;
+
+ tmp_b.log2_pages = new_log2_pages;
+ tmp_b.offset = BV (clib_bihash_get_offset) (h, save_new_v);
+ tmp_b.linear_search = mark_bucket_linear;
+
+ CLIB_MEMORY_BARRIER ();
+ b->as_u64 = tmp_b.as_u64;
+ v = BV (clib_bihash_get_value) (h, h->saved_bucket.offset);
+ BV (value_free) (h, v, old_log2_pages);
+
+unlock:
+ BV (clib_bihash_reset_cache) (b);
+ BV (clib_bihash_unlock_bucket) (b);
+ CLIB_MEMORY_BARRIER ();
+ h->writer_lock[0] = 0;
+ return rv;
+}
+
+int BV (clib_bihash_search)
+ (BVT (clib_bihash) * h,
+ BVT (clib_bihash_kv) * search_key, BVT (clib_bihash_kv) * valuep)
+{
+ u64 hash;
+ u32 bucket_index;
+ BVT (clib_bihash_value) * v;
+#if BIHASH_KVP_CACHE_SIZE > 0
+ BVT (clib_bihash_kv) * kvp;
+#endif
+ BVT (clib_bihash_bucket) * b;
+ int i, limit;
+
+ ASSERT (valuep);
+
+ hash = BV (clib_bihash_hash) (search_key);
+
+ bucket_index = hash & (h->nbuckets - 1);
+ b = &h->buckets[bucket_index];
+
+ if (b->offset == 0)
+ return -1;
+
+#if BIHASH_KVP_CACHE_SIZE > 0
+ /* Check the cache, if currently enabled */
+ if (PREDICT_TRUE ((b->cache_lru & (1 << 15)) == 0))
+ {
+ limit = BIHASH_KVP_CACHE_SIZE;
+ kvp = b->cache;
+ for (i = 0; i < limit; i++)
+ {
+ if (BV (clib_bihash_key_compare) (kvp[i].key, search_key->key))
+ {
+ *valuep = kvp[i];
+ h->cache_hits++;
+ return 0;
+ }
+ }
+ }
+#endif
+
+ hash >>= h->log2_nbuckets;
+
+ v = BV (clib_bihash_get_value) (h, b->offset);
+ limit = BIHASH_KVP_PER_PAGE;
+ v += (b->linear_search == 0) ? hash & ((1 << b->log2_pages) - 1) : 0;
+ if (PREDICT_FALSE (b->linear_search))
+ limit <<= b->log2_pages;
+
+ for (i = 0; i < limit; i++)
+ {
+ if (BV (clib_bihash_key_compare) (v->kvp[i].key, search_key->key))
+ {
+ *valuep = v->kvp[i];
+
+#if BIHASH_KVP_CACHE_SIZE > 0
+ u8 cache_slot;
+ /* Shut off the cache */
+ if (BV (clib_bihash_lock_bucket) (b))
+ {
+ cache_slot = BV (clib_bihash_get_lru) (b);
+ b->cache[cache_slot] = v->kvp[i];
+ BV (clib_bihash_update_lru) (b, cache_slot);
+
+ /* Reenable the cache */
+ BV (clib_bihash_unlock_bucket) (b);
+ h->cache_misses++;
+ }
+#endif
+ return 0;
+ }
+ }
+ return -1;
+}
+
+u8 *BV (format_bihash_lru) (u8 * s, va_list * args)
+{
+#if BIHASH_KVP_SIZE > 0
+ int i;
+ BVT (clib_bihash_bucket) * b = va_arg (*args, BVT (clib_bihash_bucket) *);
+ u16 cache_lru = b->cache_lru;
+
+ s = format (s, "cache %s, order ", cache_lru & (1 << 15) ? "on" : "off");
+
+ for (i = 0; i < BIHASH_KVP_CACHE_SIZE; i++)
+ s = format (s, "[%d] ", ((cache_lru >> (3 * i)) & 7));
+
+ return (s);
+#else
+ return format (s, "cache not configured");
+#endif
+}
+
+void
+BV (clib_bihash_update_lru_not_inline) (BVT (clib_bihash_bucket) * b, u8 slot)
+{
+#if BIHASH_KVP_SIZE > 0
+ BV (clib_bihash_update_lru) (b, slot);
+#endif
+}
+
+u8 *BV (format_bihash) (u8 * s, va_list * args)
+{
+ BVT (clib_bihash) * h = va_arg (*args, BVT (clib_bihash) *);
+ int verbose = va_arg (*args, int);
+ BVT (clib_bihash_bucket) * b;
+ BVT (clib_bihash_value) * v;
+ int i, j, k;
+ u64 active_elements = 0;
+
+ s = format (s, "Hash table %s\n", h->name ? h->name : (u8 *) "(unnamed)");
+
+ for (i = 0; i < h->nbuckets; i++)
+ {
+ b = &h->buckets[i];
+ if (b->offset == 0)
+ {
+ if (verbose > 1)
+ s = format (s, "[%d]: empty\n", i);
+ continue;
+ }
+
+ if (verbose)
+ {
+ s = format (s, "[%d]: heap offset %d, len %d, linear %d\n", i,
+ b->offset, (1 << b->log2_pages), b->linear_search);
+ }
+
+ v = BV (clib_bihash_get_value) (h, b->offset);
+ for (j = 0; j < (1 << b->log2_pages); j++)
+ {
+ for (k = 0; k < BIHASH_KVP_PER_PAGE; k++)
+ {
+ if (BV (clib_bihash_is_free) (&v->kvp[k]))
+ {
+ if (verbose > 1)
+ s = format (s, " %d: empty\n",
+ j * BIHASH_KVP_PER_PAGE + k);
+ continue;
+ }
+ if (verbose)
+ {
+ s = format (s, " %d: %U\n",
+ j * BIHASH_KVP_PER_PAGE + k,
+ BV (format_bihash_kvp), &(v->kvp[k]));
+ }
+ active_elements++;
+ }
+ v++;
+ }
+ }
+
+ s = format (s, " %lld active elements\n", active_elements);
+ s = format (s, " %d free lists\n", vec_len (h->freelists));
+ s = format (s, " %d linear search buckets\n", h->linear_buckets);
+ s = format (s, " %lld cache hits, %lld cache misses\n",
+ h->cache_hits, h->cache_misses);
+ return s;
+}
+
+void BV (clib_bihash_foreach_key_value_pair)
+ (BVT (clib_bihash) * h, void *callback, void *arg)
+{
+ int i, j, k;
+ BVT (clib_bihash_bucket) * b;
+ BVT (clib_bihash_value) * v;
+ void (*fp) (BVT (clib_bihash_kv) *, void *) = callback;
+
+ for (i = 0; i < h->nbuckets; i++)
+ {
+ b = &h->buckets[i];
+ if (b->offset == 0)
+ continue;
+
+ v = BV (clib_bihash_get_value) (h, b->offset);
+ for (j = 0; j < (1 << b->log2_pages); j++)
+ {
+ for (k = 0; k < BIHASH_KVP_PER_PAGE; k++)
+ {
+ if (BV (clib_bihash_is_free) (&v->kvp[k]))
+ continue;
+
+ (*fp) (&v->kvp[k], arg);
+ }
+ v++;
+ }
+ }
+}
+
+/** @endcond */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/bihash_template.h b/src/vppinfra/bihash_template.h
new file mode 100644
index 00000000..ea1b6f7b
--- /dev/null
+++ b/src/vppinfra/bihash_template.h
@@ -0,0 +1,419 @@
+/*
+ Copyright (c) 2014 Cisco and/or its affiliates.
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+/** @cond DOCUMENTATION_IS_IN_BIHASH_DOC_H */
+
+/*
+ * Note: to instantiate the template multiple times in a single file,
+ * #undef __included_bihash_template_h__...
+ */
+#ifndef __included_bihash_template_h__
+#define __included_bihash_template_h__
+
+#include <vppinfra/heap.h>
+#include <vppinfra/format.h>
+#include <vppinfra/pool.h>
+
+#ifndef BIHASH_TYPE
+#error BIHASH_TYPE not defined
+#endif
+
+#define _bv(a,b) a##b
+#define __bv(a,b) _bv(a,b)
+#define BV(a) __bv(a,BIHASH_TYPE)
+
+#define _bvt(a,b) a##b##_t
+#define __bvt(a,b) _bvt(a,b)
+#define BVT(a) __bvt(a,BIHASH_TYPE)
+
+typedef struct BV (clib_bihash_value)
+{
+ union
+ {
+ BVT (clib_bihash_kv) kvp[BIHASH_KVP_PER_PAGE];
+ struct BV (clib_bihash_value) * next_free;
+ };
+} BVT (clib_bihash_value);
+
+#if BIHASH_KVP_CACHE_SIZE > 5
+#error Requested KVP cache LRU data exceeds 16 bits
+#endif
+
+typedef struct
+{
+ union
+ {
+ struct
+ {
+ u32 offset;
+ u8 linear_search;
+ u8 log2_pages;
+ u16 cache_lru;
+ };
+ u64 as_u64;
+ };
+#if BIHASH_KVP_CACHE_SIZE > 0
+ BVT (clib_bihash_kv) cache[BIHASH_KVP_CACHE_SIZE];
+#endif
+} BVT (clib_bihash_bucket);
+
+typedef struct
+{
+ BVT (clib_bihash_value) * values;
+ BVT (clib_bihash_bucket) * buckets;
+ volatile u32 *writer_lock;
+
+ BVT (clib_bihash_value) ** working_copies;
+ int *working_copy_lengths;
+ BVT (clib_bihash_bucket) saved_bucket;
+
+ u32 nbuckets;
+ u32 log2_nbuckets;
+ u32 linear_buckets;
+ u8 *name;
+
+ u64 cache_hits;
+ u64 cache_misses;
+
+ BVT (clib_bihash_value) ** freelists;
+ void *mheap;
+
+} BVT (clib_bihash);
+
+
+static inline void
+BV (clib_bihash_update_lru) (BVT (clib_bihash_bucket) * b, u8 slot)
+{
+ u16 value, tmp, mask;
+ u8 found_lru_pos;
+ u16 save_hi;
+
+ if (BIHASH_KVP_CACHE_SIZE < 2)
+ return;
+
+ ASSERT (slot < BIHASH_KVP_CACHE_SIZE);
+
+ /* First, find the slot in cache_lru */
+ mask = slot;
+ if (BIHASH_KVP_CACHE_SIZE > 1)
+ mask |= slot << 3;
+ if (BIHASH_KVP_CACHE_SIZE > 2)
+ mask |= slot << 6;
+ if (BIHASH_KVP_CACHE_SIZE > 3)
+ mask |= slot << 9;
+ if (BIHASH_KVP_CACHE_SIZE > 4)
+ mask |= slot << 12;
+
+ value = b->cache_lru;
+ tmp = value ^ mask;
+
+ /* Already the most-recently used? */
+ if ((tmp & 7) == 0)
+ return;
+
+ found_lru_pos = ((tmp & (7 << 3)) == 0) ? 1 : 0;
+ if (BIHASH_KVP_CACHE_SIZE > 2)
+ found_lru_pos = ((tmp & (7 << 6)) == 0) ? 2 : found_lru_pos;
+ if (BIHASH_KVP_CACHE_SIZE > 3)
+ found_lru_pos = ((tmp & (7 << 9)) == 0) ? 3 : found_lru_pos;
+ if (BIHASH_KVP_CACHE_SIZE > 4)
+ found_lru_pos = ((tmp & (7 << 12)) == 0) ? 4 : found_lru_pos;
+
+ ASSERT (found_lru_pos);
+
+ /* create a mask to kill bits in or above slot */
+ mask = 0xFFFF << found_lru_pos;
+ mask <<= found_lru_pos;
+ mask <<= found_lru_pos;
+ mask ^= 0xFFFF;
+ tmp = value & mask;
+
+ /* Save bits above slot */
+ mask ^= 0xFFFF;
+ mask <<= 3;
+ save_hi = value & mask;
+
+ value = save_hi | (tmp << 3) | slot;
+
+ b->cache_lru = value;
+}
+
+void
+BV (clib_bihash_update_lru_not_inline) (BVT (clib_bihash_bucket) * b,
+ u8 slot);
+
+static inline u8 BV (clib_bihash_get_lru) (BVT (clib_bihash_bucket) * b)
+{
+#if BIHASH_KVP_CACHE_SIZE > 0
+ return (b->cache_lru >> (3 * (BIHASH_KVP_CACHE_SIZE - 1))) & 7;
+#else
+ return 0;
+#endif
+}
+
+static inline void BV (clib_bihash_reset_cache) (BVT (clib_bihash_bucket) * b)
+{
+#if BIHASH_KVP_CACHE_SIZE > 0
+ u16 initial_lru_value;
+
+ memset (b->cache, 0xff, sizeof (b->cache));
+
+ /*
+ * We'll want the cache to be loaded from slot 0 -> slot N, so
+ * the initial LRU order is reverse index order.
+ */
+ if (BIHASH_KVP_CACHE_SIZE == 1)
+ initial_lru_value = 0;
+ else if (BIHASH_KVP_CACHE_SIZE == 2)
+ initial_lru_value = (0 << 3) | (1 << 0);
+ else if (BIHASH_KVP_CACHE_SIZE == 3)
+ initial_lru_value = (0 << 6) | (1 << 3) | (2 << 0);
+ else if (BIHASH_KVP_CACHE_SIZE == 4)
+ initial_lru_value = (0 << 9) | (1 << 6) | (2 << 3) | (3 << 0);
+ else if (BIHASH_KVP_CACHE_SIZE == 5)
+ initial_lru_value = (0 << 12) | (1 << 9) | (2 << 6) | (3 << 3) | (4 << 0);
+
+ b->cache_lru = initial_lru_value;
+#endif
+}
+
+static inline int BV (clib_bihash_lock_bucket) (BVT (clib_bihash_bucket) * b)
+{
+ BVT (clib_bihash_bucket) tmp_b;
+ u64 rv;
+
+ tmp_b.as_u64 = 0;
+ tmp_b.cache_lru = 1 << 15;
+
+ rv = __sync_fetch_and_or (&b->as_u64, tmp_b.as_u64);
+ tmp_b.as_u64 = rv;
+ /* Was already locked? */
+ if (tmp_b.cache_lru & (1 << 15))
+ return 0;
+ return 1;
+}
+
+static inline void BV (clib_bihash_unlock_bucket)
+ (BVT (clib_bihash_bucket) * b)
+{
+ BVT (clib_bihash_bucket) tmp_b;
+
+ tmp_b.as_u64 = b->as_u64;
+ tmp_b.cache_lru &= ~(1 << 15);
+ b->as_u64 = tmp_b.as_u64;
+}
+
+static inline void *BV (clib_bihash_get_value) (BVT (clib_bihash) * h,
+ uword offset)
+{
+ u8 *hp = h->mheap;
+ u8 *vp = hp + offset;
+
+ return (void *) vp;
+}
+
+static inline uword BV (clib_bihash_get_offset) (BVT (clib_bihash) * h,
+ void *v)
+{
+ u8 *hp, *vp;
+
+ hp = (u8 *) h->mheap;
+ vp = (u8 *) v;
+
+ ASSERT ((vp - hp) < 0x100000000ULL);
+ return vp - hp;
+}
+
+void BV (clib_bihash_init)
+ (BVT (clib_bihash) * h, char *name, u32 nbuckets, uword memory_size);
+
+void BV (clib_bihash_free) (BVT (clib_bihash) * h);
+
+int BV (clib_bihash_add_del) (BVT (clib_bihash) * h,
+ BVT (clib_bihash_kv) * add_v, int is_add);
+int BV (clib_bihash_search) (BVT (clib_bihash) * h,
+ BVT (clib_bihash_kv) * search_v,
+ BVT (clib_bihash_kv) * return_v);
+
+void BV (clib_bihash_foreach_key_value_pair) (BVT (clib_bihash) * h,
+ void *callback, void *arg);
+
+format_function_t BV (format_bihash);
+format_function_t BV (format_bihash_kvp);
+format_function_t BV (format_bihash_lru);
+
+static inline int BV (clib_bihash_search_inline)
+ (BVT (clib_bihash) * h, BVT (clib_bihash_kv) * key_result)
+{
+ u64 hash;
+ u32 bucket_index;
+ BVT (clib_bihash_value) * v;
+ BVT (clib_bihash_bucket) * b;
+#if BIHASH_KVP_CACHE_SIZE > 0
+ BVT (clib_bihash_kv) * kvp;
+#endif
+ int i, limit;
+
+ hash = BV (clib_bihash_hash) (key_result);
+
+ bucket_index = hash & (h->nbuckets - 1);
+ b = &h->buckets[bucket_index];
+
+ if (b->offset == 0)
+ return -1;
+
+#if BIHASH_KVP_CACHE_SIZE > 0
+ /* Check the cache, if not currently locked */
+ if (PREDICT_TRUE ((b->cache_lru & (1 << 15)) == 0))
+ {
+ limit = BIHASH_KVP_CACHE_SIZE;
+ kvp = b->cache;
+ for (i = 0; i < limit; i++)
+ {
+ if (BV (clib_bihash_key_compare) (kvp[i].key, key_result->key))
+ {
+ *key_result = kvp[i];
+ h->cache_hits++;
+ return 0;
+ }
+ }
+ }
+#endif
+
+ hash >>= h->log2_nbuckets;
+
+ v = BV (clib_bihash_get_value) (h, b->offset);
+
+ /* If the bucket has unresolvable collisions, use linear search */
+ limit = BIHASH_KVP_PER_PAGE;
+ v += (b->linear_search == 0) ? hash & ((1 << b->log2_pages) - 1) : 0;
+ if (PREDICT_FALSE (b->linear_search))
+ limit <<= b->log2_pages;
+
+ for (i = 0; i < limit; i++)
+ {
+ if (BV (clib_bihash_key_compare) (v->kvp[i].key, key_result->key))
+ {
+ *key_result = v->kvp[i];
+
+#if BIHASH_KVP_CACHE_SIZE > 0
+ u8 cache_slot;
+ /* Try to lock the bucket */
+ if (BV (clib_bihash_lock_bucket) (b))
+ {
+ cache_slot = BV (clib_bihash_get_lru) (b);
+ b->cache[cache_slot] = v->kvp[i];
+ BV (clib_bihash_update_lru) (b, cache_slot);
+
+ /* Unlock the bucket */
+ BV (clib_bihash_unlock_bucket) (b);
+ h->cache_misses++;
+ }
+#endif
+ return 0;
+ }
+ }
+ return -1;
+}
+
+static inline int BV (clib_bihash_search_inline_2)
+ (BVT (clib_bihash) * h,
+ BVT (clib_bihash_kv) * search_key, BVT (clib_bihash_kv) * valuep)
+{
+ u64 hash;
+ u32 bucket_index;
+ BVT (clib_bihash_value) * v;
+ BVT (clib_bihash_bucket) * b;
+#if BIHASH_KVP_CACHE_SIZE > 0
+ BVT (clib_bihash_kv) * kvp;
+#endif
+ int i, limit;
+
+ ASSERT (valuep);
+
+ hash = BV (clib_bihash_hash) (search_key);
+
+ bucket_index = hash & (h->nbuckets - 1);
+ b = &h->buckets[bucket_index];
+
+ if (b->offset == 0)
+ return -1;
+
+ /* Check the cache, if currently unlocked */
+#if BIHASH_KVP_CACHE_SIZE > 0
+ if (PREDICT_TRUE ((b->cache_lru & (1 << 15)) == 0))
+ {
+ limit = BIHASH_KVP_CACHE_SIZE;
+ kvp = b->cache;
+ for (i = 0; i < limit; i++)
+ {
+ if (BV (clib_bihash_key_compare) (kvp[i].key, search_key->key))
+ {
+ *valuep = kvp[i];
+ h->cache_hits++;
+ return 0;
+ }
+ }
+ }
+#endif
+
+ hash >>= h->log2_nbuckets;
+ v = BV (clib_bihash_get_value) (h, b->offset);
+
+ /* If the bucket has unresolvable collisions, use linear search */
+ limit = BIHASH_KVP_PER_PAGE;
+ v += (b->linear_search == 0) ? hash & ((1 << b->log2_pages) - 1) : 0;
+ if (PREDICT_FALSE (b->linear_search))
+ limit <<= b->log2_pages;
+
+ for (i = 0; i < limit; i++)
+ {
+ if (BV (clib_bihash_key_compare) (v->kvp[i].key, search_key->key))
+ {
+ *valuep = v->kvp[i];
+
+#if BIHASH_KVP_CACHE_SIZE > 0
+ u8 cache_slot;
+
+ /* Try to lock the bucket */
+ if (BV (clib_bihash_lock_bucket) (b))
+ {
+ cache_slot = BV (clib_bihash_get_lru) (b);
+ b->cache[cache_slot] = v->kvp[i];
+ BV (clib_bihash_update_lru) (b, cache_slot);
+
+ /* Reenable the cache */
+ BV (clib_bihash_unlock_bucket) (b);
+ h->cache_misses++;
+ }
+#endif
+ return 0;
+ }
+ }
+ return -1;
+}
+
+#endif /* __included_bihash_template_h__ */
+
+/** @endcond */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/bitmap.h b/src/vppinfra/bitmap.h
new file mode 100644
index 00000000..9e1ae493
--- /dev/null
+++ b/src/vppinfra/bitmap.h
@@ -0,0 +1,774 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003, 2005 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_clib_bitmap_h
+#define included_clib_bitmap_h
+
+/** \file
+ Bitmaps built as vectors of machine words
+*/
+
+#include <vppinfra/vec.h>
+#include <vppinfra/random.h>
+#include <vppinfra/error.h>
+#include <vppinfra/bitops.h> /* for count_set_bits */
+
+typedef uword clib_bitmap_t;
+
+/** predicate function; is an entire bitmap empty?
+ @param ai - pointer to a bitmap
+ @returns 1 if the entire bitmap is zero, 0 otherwise
+*/
+always_inline uword
+clib_bitmap_is_zero (uword * ai)
+{
+ uword i;
+ for (i = 0; i < vec_len (ai); i++)
+ if (ai[i] != 0)
+ return 0;
+ return 1;
+}
+
+/** predicate function; are two bitmaps equal?
+ @param a - pointer to a bitmap
+ @param b - pointer to a bitmap
+ @returns 1 if the bitmaps are equal, 0 otherwise
+*/
+always_inline uword
+clib_bitmap_is_equal (uword * a, uword * b)
+{
+ uword i;
+ if (vec_len (a) != vec_len (b))
+ return 0;
+ for (i = 0; i < vec_len (a); i++)
+ if (a[i] != b[i])
+ return 0;
+ return 1;
+}
+
+/** Duplicate a bitmap
+ @param v - pointer to a bitmap
+ @returns a duplicate of the bitmap
+*/
+#define clib_bitmap_dup(v) vec_dup(v)
+
+/** Free a bitmap
+ @param v - pointer to the bitmap to free
+*/
+#define clib_bitmap_free(v) vec_free(v)
+
+/** Number of bytes in a bitmap
+ @param v - pointer to the bitmap
+*/
+#define clib_bitmap_bytes(v) vec_bytes(v)
+
+/** Clear a bitmap
+ @param v - pointer to the bitmap to clear
+*/
+#define clib_bitmap_zero(v) vec_zero(v)
+
+/** Allocate a bitmap with the supplied number of bits
+ @param [out] v - the resulting bitmap
+ @param n_bits - the required number of bits
+*/
+
+#define clib_bitmap_alloc(v,n_bits) \
+ v = vec_new (uword, ((n_bits) + BITS (uword) - 1) / BITS (uword))
+
+#define clib_bitmap_vec_validate(v,i) vec_validate_aligned((v),(i),sizeof(uword))
+
+/* Make sure that a bitmap is at least n_bits in size */
+#define clib_bitmap_validate(v,n_bits) \
+ clib_bitmap_vec_validate ((v), ((n_bits) - 1) / BITS (uword))
+
+/* low-level routine to remove trailing zeros from a bitmap */
+always_inline uword *
+_clib_bitmap_remove_trailing_zeros (uword * a)
+{
+ word i;
+ if (a)
+ {
+ for (i = _vec_len (a) - 1; i >= 0; i--)
+ if (a[i] != 0)
+ break;
+ _vec_len (a) = i + 1;
+ }
+ return a;
+}
+
+/** Sets the ith bit of a bitmap to new_value.
+ No sanity checking. Be careful.
+ @param a - pointer to the bitmap
+ @param i - the bit position to interrogate
+ @param new_value - new value for the bit
+ @returns the old value of the bit
+*/
+always_inline uword
+clib_bitmap_set_no_check (uword * a, uword i, uword new_value)
+{
+ uword i0 = i / BITS (a[0]);
+ uword i1 = i % BITS (a[0]);
+ uword bit = (uword) 1 << i1;
+ uword ai, old_value;
+
+ /* Removed ASSERT since uword * a may not be a vector. */
+ /* ASSERT (i0 < vec_len (a)); */
+
+ ai = a[i0];
+ old_value = (ai & bit) != 0;
+ ai &= ~bit;
+ ai |= ((uword) (new_value != 0)) << i1;
+ a[i0] = ai;
+ return old_value;
+}
+
+/** Sets the ith bit of a bitmap to new_value
+ Removes trailing zeros from the bitmap
+ @param ai - pointer to the bitmap
+ @param i - the bit position to interrogate
+ @param value - new value for the bit
+ @returns the old value of the bit
+*/
+always_inline uword *
+clib_bitmap_set (uword * ai, uword i, uword value)
+{
+ uword i0 = i / BITS (ai[0]);
+ uword i1 = i % BITS (ai[0]);
+ uword a;
+
+ /* Check for writing a zero to beyond end of bitmap. */
+ if (value == 0 && i0 >= vec_len (ai))
+ return ai; /* Implied trailing zeros. */
+
+ clib_bitmap_vec_validate (ai, i0);
+
+ a = ai[i0];
+ a &= ~((uword) 1 << i1);
+ a |= ((uword) (value != 0)) << i1;
+ ai[i0] = a;
+
+ /* If bits have been cleared, test for zero. */
+ if (a == 0)
+ ai = _clib_bitmap_remove_trailing_zeros (ai);
+
+ return ai;
+}
+
+/** Gets the ith bit value from a bitmap
+ @param ai - pointer to the bitmap
+ @param i - the bit position to interrogate
+ @returns the indicated bit value
+*/
+always_inline uword
+clib_bitmap_get (uword * ai, uword i)
+{
+ uword i0 = i / BITS (ai[0]);
+ uword i1 = i % BITS (ai[0]);
+ return i0 < vec_len (ai) && 0 != ((ai[i0] >> i1) & 1);
+}
+
+/** Gets the ith bit value from a bitmap
+ Does not sanity-check the bit position. Be careful.
+ @param ai - pointer to the bitmap
+ @param i - the bit position to interrogate
+ @returns the indicated bit value, or garbage if the bit position is
+ out of range.
+*/
+always_inline uword
+clib_bitmap_get_no_check (uword * ai, uword i)
+{
+ uword i0 = i / BITS (ai[0]);
+ uword i1 = i % BITS (ai[0]);
+ return 0 != ((ai[i0] >> i1) & 1);
+}
+
+always_inline uword
+clib_bitmap_get_multiple_no_check (uword * ai, uword i, uword n_bits)
+{
+ uword i0 = i / BITS (ai[0]);
+ uword i1 = i % BITS (ai[0]);
+ ASSERT (i1 + n_bits <= BITS (uword));
+ return 0 != ((ai[i0] >> i1) & pow2_mask (n_bits));
+}
+
+/** Gets the ith through ith + n_bits bit values from a bitmap
+ @param bitmap - pointer to the bitmap
+ @param i - the first bit position to retrieve
+ @param n_bits - the number of bit positions to retrieve
+ @returns the indicated range of bits
+*/
+always_inline uword
+clib_bitmap_get_multiple (uword * bitmap, uword i, uword n_bits)
+{
+ uword i0, i1, result;
+ uword l = vec_len (bitmap);
+
+ ASSERT (n_bits <= BITS (result));
+
+ i0 = i / BITS (bitmap[0]);
+ i1 = i % BITS (bitmap[0]);
+
+ /* Check first word. */
+ result = 0;
+ if (i0 < l)
+ {
+ result |= (bitmap[i0] >> i1);
+ if (n_bits < BITS (bitmap[0]))
+ result &= (((uword) 1 << n_bits) - 1);
+ }
+
+ /* Check for overlap into next word. */
+ i0++;
+ if (i1 + n_bits > BITS (bitmap[0]) && i0 < l)
+ {
+ n_bits -= BITS (bitmap[0]) - i1;
+ result |=
+ (bitmap[i0] & (((uword) 1 << n_bits) - 1)) << (BITS (bitmap[0]) - i1);
+ }
+
+ return result;
+}
+
+/** sets the ith through ith + n_bits bits in a bitmap
+ @param bitmap - pointer to the bitmap
+ @param i - the first bit position to retrieve
+ @param value - the values to set
+ @param n_bits - the number of bit positions to set
+ @returns a pointer to the updated bitmap, which may expand and move
+*/
+
+always_inline uword *
+clib_bitmap_set_multiple (uword * bitmap, uword i, uword value, uword n_bits)
+{
+ uword i0, i1, l, t, m;
+
+ ASSERT (n_bits <= BITS (value));
+
+ i0 = i / BITS (bitmap[0]);
+ i1 = i % BITS (bitmap[0]);
+
+ /* Allocate bitmap. */
+ clib_bitmap_vec_validate (bitmap, (i + n_bits) / BITS (bitmap[0]));
+ l = vec_len (bitmap);
+
+ m = ~0;
+ if (n_bits < BITS (value))
+ m = (((uword) 1 << n_bits) - 1);
+ value &= m;
+
+ /* Insert into first word. */
+ t = bitmap[i0];
+ t &= ~(m << i1);
+ t |= value << i1;
+ bitmap[i0] = t;
+
+ /* Insert into second word. */
+ i0++;
+ if (i1 + n_bits > BITS (bitmap[0]) && i0 < l)
+ {
+ t = BITS (bitmap[0]) - i1;
+ value >>= t;
+ n_bits -= t;
+ t = bitmap[i0];
+ m = ((uword) 1 << n_bits) - 1;
+ t &= ~m;
+ t |= value;
+ bitmap[i0] = t;
+ }
+
+ return bitmap;
+}
+
+always_inline uword *
+clfib_bitmap_set_region (uword * bitmap, uword i, uword value, uword n_bits)
+{
+ uword a0, a1, b0;
+ uword i_end, mask;
+
+ a0 = i / BITS (bitmap[0]);
+ a1 = i % BITS (bitmap[0]);
+
+ i_end = i + n_bits;
+ b0 = i_end / BITS (bitmap[0]);
+
+ clib_bitmap_vec_validate (bitmap, b0);
+
+ /* First word. */
+ mask = n_bits < BITS (bitmap[0]) ? pow2_mask (n_bits) : ~0;
+ mask <<= a1;
+
+ if (value)
+ bitmap[a0] |= mask;
+ else
+ bitmap[a0] &= ~mask;
+
+ for (a0++; a0 < b0; a0++)
+ bitmap[a0] = value ? ~0 : 0;
+
+ if (a0 == b0)
+ {
+ word n_bits_left = n_bits - (BITS (bitmap[0]) - a1);
+ mask = pow2_mask (n_bits_left);
+ if (value)
+ bitmap[a0] |= mask;
+ else
+ bitmap[a0] &= ~mask;
+ }
+
+ return bitmap;
+}
+
+/** Macro to iterate across set bits in a bitmap
+
+ @param i - the current set bit
+ @param ai - the bitmap
+ @param body - the expression to evaluate for each set bit
+*/
+#define clib_bitmap_foreach(i,ai,body) \
+do { \
+ uword __bitmap_i, __bitmap_ai, __bitmap_len, __bitmap_first_set; \
+ __bitmap_len = vec_len ((ai)); \
+ for (__bitmap_i = 0; __bitmap_i < __bitmap_len; __bitmap_i++) \
+ { \
+ __bitmap_ai = (ai)[__bitmap_i]; \
+ while (__bitmap_ai != 0) \
+ { \
+ __bitmap_first_set = first_set (__bitmap_ai); \
+ (i) = (__bitmap_i * BITS ((ai)[0]) \
+ + min_log2 (__bitmap_first_set)); \
+ do { body; } while (0); \
+ __bitmap_ai ^= __bitmap_first_set; \
+ } \
+ } \
+} while (0)
+
+
+/** Return the lowest numbered set bit in a bitmap
+ @param ai - pointer to the bitmap
+ @returns lowest numbered set bit, or ~0 if the entire bitmap is zero
+*/
+always_inline uword
+clib_bitmap_first_set (uword * ai)
+{
+ uword i;
+ for (i = 0; i < vec_len (ai); i++)
+ {
+ uword x = ai[i];
+ if (x != 0)
+ return i * BITS (ai[0]) + log2_first_set (x);
+ }
+ return ~0;
+}
+
+/** Return the higest numbered set bit in a bitmap
+ @param ai - pointer to the bitmap
+ @returns lowest numbered set bit, or ~0 if the entire bitmap is zero
+*/
+always_inline uword
+clib_bitmap_last_set (uword * ai)
+{
+ uword i;
+
+ for (i = vec_len (ai); i > 0; i--)
+ {
+ uword x = ai[i - 1];
+ if (x != 0)
+ {
+ uword first_bit;
+ count_leading_zeros (first_bit, x);
+ return (i) * BITS (ai[0]) - first_bit - 1;
+ }
+ }
+ return ~0;
+}
+
+/** Return the lowest numbered clear bit in a bitmap
+ @param ai - pointer to the bitmap
+ @returns lowest numbered clear bit
+*/
+always_inline uword
+clib_bitmap_first_clear (uword * ai)
+{
+ uword i;
+ for (i = 0; i < vec_len (ai); i++)
+ {
+ uword x = ~ai[i];
+ if (x != 0)
+ return i * BITS (ai[0]) + log2_first_set (x);
+ }
+ return i * BITS (ai[0]);
+}
+
+/** Return the number of set bits in a bitmap
+ @param ai - pointer to the bitmap
+ @returns the number of set bits in the bitmap
+*/
+always_inline uword
+clib_bitmap_count_set_bits (uword * ai)
+{
+ uword i;
+ uword n_set = 0;
+ for (i = 0; i < vec_len (ai); i++)
+ n_set += count_set_bits (ai[i]);
+ return n_set;
+}
+
+/** Logical operator across two bitmaps
+
+ @param ai - pointer to the destination bitmap
+ @param bi - pointer to the source bitmap
+ @returns ai = ai and bi. ai is modified, bi is not modified
+*/
+always_inline uword *clib_bitmap_and (uword * ai, uword * bi);
+
+/** Logical operator across two bitmaps
+
+ @param ai - pointer to the destination bitmap
+ @param bi - pointer to the source bitmap
+ @returns ai = ai & ~bi. ai is modified, bi is not modified
+*/
+always_inline uword *clib_bitmap_andnot (uword * ai, uword * bi);
+
+/** Logical operator across two bitmaps
+
+ @param ai - pointer to the destination bitmap
+ @param bi - pointer to the source bitmap
+ @returns ai = ai & ~bi. ai is modified, bi is not modified
+*/
+always_inline uword *clib_bitmap_or (uword * ai, uword * bi);
+/** Logical operator across two bitmaps
+
+ @param ai - pointer to the destination bitmap
+ @param bi - pointer to the source bitmap
+ @returns ai = ai or bi. ai is modified, bi is not modified
+*/
+always_inline uword *clib_bitmap_or (uword * ai, uword * bi);
+
+/** Logical operator across two bitmaps
+
+ @param ai - pointer to the destination bitmap
+ @param bi - pointer to the source bitmap
+ @returns ai = ai xor bi. ai is modified, bi is not modified
+*/
+always_inline uword *clib_bitmap_xor (uword * ai, uword * bi);
+
+/* ALU function definition macro for functions taking two bitmaps. */
+#define _(name, body, check_zero) \
+always_inline uword * \
+clib_bitmap_##name (uword * ai, uword * bi) \
+{ \
+ uword i, a, b, bi_len, n_trailing_zeros; \
+ \
+ n_trailing_zeros = 0; \
+ bi_len = vec_len (bi); \
+ if (bi_len > 0) \
+ clib_bitmap_vec_validate (ai, bi_len - 1); \
+ for (i = 0; i < vec_len (ai); i++) \
+ { \
+ a = ai[i]; \
+ b = i < bi_len ? bi[i] : 0; \
+ do { body; } while (0); \
+ ai[i] = a; \
+ if (check_zero) \
+ n_trailing_zeros = a ? 0 : (n_trailing_zeros + 1); \
+ } \
+ if (check_zero) \
+ _vec_len (ai) -= n_trailing_zeros; \
+ return ai; \
+}
+
+/* ALU functions: */
+_(and, a = a & b, 1)
+_(andnot, a = a & ~b, 1) _(or, a = a | b, 0) _(xor, a = a ^ b, 1)
+#undef _
+/** Logical operator across two bitmaps which duplicates the first bitmap
+
+ @param ai - pointer to the destination bitmap
+ @param bi - pointer to the source bitmap
+ @returns aiDup = ai and bi. Neither ai nor bi are modified
+*/
+ always_inline uword *
+ clib_bitmap_dup_and (uword * ai, uword * bi);
+
+/** Logical operator across two bitmaps which duplicates the first bitmap
+
+ @param ai - pointer to the destination bitmap
+ @param bi - pointer to the source bitmap
+ @returns aiDup = ai & ~bi. Neither ai nor bi are modified
+*/
+ always_inline uword *
+ clib_bitmap_dup_andnot (uword * ai, uword * bi);
+
+/** Logical operator across two bitmaps which duplicates the first bitmap
+
+ @param ai - pointer to the destination bitmap
+ @param bi - pointer to the source bitmap
+ @returns aiDup = ai or bi. Neither ai nor bi are modified
+*/
+ always_inline uword *
+ clib_bitmap_dup_or (uword * ai, uword * bi);
+
+/** Logical operator across two bitmaps which duplicates the first bitmap
+
+ @param ai - pointer to the destination bitmap
+ @param bi - pointer to the source bitmap
+ @returns aiDup = ai xor bi. Neither ai nor bi are modified
+*/
+ always_inline uword *
+ clib_bitmap_dup_xor (uword * ai, uword * bi);
+
+#define _(name) \
+ always_inline uword * \
+ clib_bitmap_dup_##name (uword * ai, uword * bi) \
+{ return clib_bitmap_##name (clib_bitmap_dup (ai), bi); }
+
+_(and);
+_(andnot);
+_(or);
+_(xor);
+
+#undef _
+
+/* ALU function definition macro for functions taking one bitmap and an immediate. */
+#define _(name, body, check_zero) \
+always_inline uword * \
+clib_bitmap_##name (uword * ai, uword i) \
+{ \
+ uword i0 = i / BITS (ai[0]); \
+ uword i1 = i % BITS (ai[0]); \
+ uword a, b; \
+ clib_bitmap_vec_validate (ai, i0); \
+ a = ai[i0]; \
+ b = (uword) 1 << i1; \
+ do { body; } while (0); \
+ ai[i0] = a; \
+ if (check_zero && a == 0) \
+ ai = _clib_bitmap_remove_trailing_zeros (ai); \
+ return ai; \
+}
+
+/* ALU functions immediate: */
+_(andi, a = a & b, 1)
+_(andnoti, a = a & ~b, 1) _(ori, a = a | b, 0) _(xori, a = a ^ b, 1)
+#undef _
+/** Return a random bitmap of the requested length
+ @param ai - pointer to the destination bitmap
+ @param n_bits - number of bits to allocate
+ @param [in,out] seed - pointer to the random number seed
+ @returns a reasonably random bitmap based. See random.h.
+*/
+ always_inline uword *
+ clib_bitmap_random (uword * ai, uword n_bits, u32 * seed)
+{
+ vec_reset_length (ai);
+
+ if (n_bits > 0)
+ {
+ uword i = n_bits - 1;
+ uword i0, i1;
+ uword log2_rand_max;
+
+ log2_rand_max = min_log2 (random_u32_max ());
+
+ i0 = i / BITS (ai[0]);
+ i1 = i % BITS (ai[0]);
+
+ clib_bitmap_vec_validate (ai, i0);
+ for (i = 0; i <= i0; i++)
+ {
+ uword n;
+ for (n = 0; n < BITS (ai[i]); n += log2_rand_max)
+ ai[i] |= random_u32 (seed) << n;
+ }
+ if (i1 + 1 < BITS (ai[0]))
+ ai[i0] &= (((uword) 1 << (i1 + 1)) - 1);
+ }
+ return ai;
+}
+
+/** Return the next set bit in a bitmap starting at bit i
+ @param ai - pointer to the bitmap
+ @param i - first bit position to test
+ @returns first set bit position at or after i,
+ ~0 if no further set bits are found
+*/
+always_inline uword
+clib_bitmap_next_set (uword * ai, uword i)
+{
+ uword i0 = i / BITS (ai[0]);
+ uword i1 = i % BITS (ai[0]);
+ uword t;
+
+ if (i0 < vec_len (ai))
+ {
+ t = (ai[i0] >> i1) << i1;
+ if (t)
+ return log2_first_set (t) + i0 * BITS (ai[0]);
+
+ for (i0++; i0 < vec_len (ai); i0++)
+ {
+ t = ai[i0];
+ if (t)
+ return log2_first_set (t) + i0 * BITS (ai[0]);
+ }
+ }
+
+ return ~0;
+}
+
+/** Return the next clear bit in a bitmap starting at bit i
+ @param ai - pointer to the bitmap
+ @param i - first bit position to test
+ @returns first clear bit position at or after i
+*/
+always_inline uword
+clib_bitmap_next_clear (uword * ai, uword i)
+{
+ uword i0 = i / BITS (ai[0]);
+ uword i1 = i % BITS (ai[0]);
+ uword t;
+
+ if (i0 < vec_len (ai))
+ {
+ t = (~ai[i0] >> i1) << i1;
+ if (t)
+ return log2_first_set (t) + i0 * BITS (ai[0]);
+
+ for (i0++; i0 < vec_len (ai); i0++)
+ {
+ t = ~ai[i0];
+ if (t)
+ return log2_first_set (t) + i0 * BITS (ai[0]);
+ }
+ }
+ return i;
+}
+
+/** unformat a list of bit ranges into a bitmap (eg "0-3,5-7,11" )
+
+ uword * bitmap;
+ rv = unformat ("%U", unformat_bitmap_list, &bitmap);
+
+ Standard unformat_function_t arguments
+
+ @param input - pointer an unformat_input_t
+ @param va - varargs list comprising a single uword **
+ @returns 1 on success, 0 on failure
+*/
+static inline uword
+unformat_bitmap_list (unformat_input_t * input, va_list * va)
+{
+ uword **bitmap_return = va_arg (*va, uword **);
+ uword *bitmap = 0;
+
+ u32 a, b;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ int i;
+ if (unformat (input, "%u-%u,", &a, &b))
+ ;
+ else if (unformat (input, "%u,", &a))
+ b = a;
+ else if (unformat (input, "%u-%u", &a, &b))
+ ;
+ else if (unformat (input, "%u", &a))
+ b = a;
+ else if (bitmap)
+ {
+ unformat_put_input (input);
+ break;
+ }
+ else
+ goto error;
+
+ if (b < a)
+ goto error;
+
+ for (i = a; i <= b; i++)
+ bitmap = clib_bitmap_set (bitmap, i, 1);
+ }
+ *bitmap_return = bitmap;
+ return 1;
+error:
+ clib_bitmap_free (bitmap);
+ return 0;
+}
+
+/** Format a bitmap as a string of hex bytes
+
+ uword * bitmap;
+ s = format ("%U", format_bitmap_hex, bitmap);
+
+ Standard format_function_t arguments
+
+ @param s - string under construction
+ @param args - varargs list comprising a single uword *
+ @returns string under construction
+*/
+static inline u8 *
+format_bitmap_hex (u8 * s, va_list * args)
+{
+ uword *bitmap = va_arg (*args, uword *);
+ int i, is_trailing_zero = 1;
+
+ if (!bitmap)
+ return format (s, "0");
+
+ i = vec_bytes (bitmap) * 2;
+
+ while (i > 0)
+ {
+ u8 x = clib_bitmap_get_multiple (bitmap, --i * 4, 4);
+
+ if (x && is_trailing_zero)
+ is_trailing_zero = 0;
+
+ if (x || !is_trailing_zero)
+ s = format (s, "%x", x);
+ }
+ return s;
+}
+#endif /* included_clib_bitmap_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/bitops.h b/src/vppinfra/bitops.h
new file mode 100644
index 00000000..ab91b8ae
--- /dev/null
+++ b/src/vppinfra/bitops.h
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2005 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_clib_bitops_h
+#define included_clib_bitops_h
+
+#include <vppinfra/clib.h>
+
+/* Population count from Hacker's Delight. */
+always_inline uword
+count_set_bits (uword x)
+{
+#if uword_bits == 64
+ const uword c1 = 0x5555555555555555;
+ const uword c2 = 0x3333333333333333;
+ const uword c3 = 0x0f0f0f0f0f0f0f0f;
+#else
+ const uword c1 = 0x55555555;
+ const uword c2 = 0x33333333;
+ const uword c3 = 0x0f0f0f0f;
+#endif
+
+ /* Sum 1 bit at a time. */
+ x = x - ((x >> (uword) 1) & c1);
+
+ /* 2 bits at a time. */
+ x = (x & c2) + ((x >> (uword) 2) & c2);
+
+ /* 4 bits at a time. */
+ x = (x + (x >> (uword) 4)) & c3;
+
+ /* 8, 16, 32 bits at a time. */
+ x = x + (x >> (uword) 8);
+ x = x + (x >> (uword) 16);
+#if uword_bits == 64
+ x = x + (x >> (uword) 32);
+#endif
+
+ return x & (2 * BITS (uword) - 1);
+}
+
+/* Based on "Hacker's Delight" code from GLS. */
+typedef struct
+{
+ uword masks[1 + log2_uword_bits];
+} compress_main_t;
+
+always_inline void
+compress_init (compress_main_t * cm, uword mask)
+{
+ uword q, m, zm, n, i;
+
+ m = ~mask;
+ zm = mask;
+
+ cm->masks[0] = mask;
+ for (i = 0; i < log2_uword_bits; i++)
+ {
+ q = m;
+ m ^= m << 1;
+ m ^= m << 2;
+ m ^= m << 4;
+ m ^= m << 8;
+ m ^= m << 16;
+#if uword_bits > 32
+ m ^= m << (uword) 32;
+#endif
+ cm->masks[1 + i] = n = (m << 1) & zm;
+ m = q & ~m;
+ q = zm & n;
+ zm = zm ^ q ^ (q >> (1 << i));
+ }
+}
+
+always_inline uword
+compress_bits (compress_main_t * cm, uword x)
+{
+ uword q, r;
+
+ r = x & cm->masks[0];
+ q = r & cm->masks[1];
+ r ^= q ^ (q >> 1);
+ q = r & cm->masks[2];
+ r ^= q ^ (q >> 2);
+ q = r & cm->masks[3];
+ r ^= q ^ (q >> 4);
+ q = r & cm->masks[4];
+ r ^= q ^ (q >> 8);
+ q = r & cm->masks[5];
+ r ^= q ^ (q >> 16);
+#if uword_bits > 32
+ q = r & cm->masks[6];
+ r ^= q ^ (q >> (uword) 32);
+#endif
+
+ return r;
+}
+
+always_inline uword
+rotate_left (uword x, uword i)
+{
+ return (x << i) | (x >> (BITS (i) - i));
+}
+
+always_inline uword
+rotate_right (uword x, uword i)
+{
+ return (x >> i) | (x << (BITS (i) - i));
+}
+
+/* Returns snoob from Hacker's Delight. Next highest number
+ with same number of set bits. */
+always_inline uword
+next_with_same_number_of_set_bits (uword x)
+{
+ uword smallest, ripple, ones;
+ smallest = x & -x;
+ ripple = x + smallest;
+ ones = x ^ ripple;
+ ones = ones >> (2 + log2_first_set (x));
+ return ripple | ones;
+}
+
+#define foreach_set_bit(var,mask,body) \
+do { \
+ uword _foreach_set_bit_m_##var = (mask); \
+ uword _foreach_set_bit_f_##var; \
+ while (_foreach_set_bit_m_##var != 0) \
+ { \
+ _foreach_set_bit_f_##var = first_set (_foreach_set_bit_m_##var); \
+ _foreach_set_bit_m_##var ^= _foreach_set_bit_f_##var; \
+ (var) = min_log2 (_foreach_set_bit_f_##var); \
+ do { body; } while (0); \
+ } \
+} while (0)
+
+#endif /* included_clib_bitops_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/byte_order.h b/src/vppinfra/byte_order.h
new file mode 100644
index 00000000..b263538c
--- /dev/null
+++ b/src/vppinfra/byte_order.h
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2004 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_clib_byte_order_h
+#define included_clib_byte_order_h
+
+#include <vppinfra/clib.h>
+
+#if (__BYTE_ORDER__)==( __ORDER_LITTLE_ENDIAN__)
+#define CLIB_ARCH_IS_BIG_ENDIAN (0)
+#define CLIB_ARCH_IS_LITTLE_ENDIAN (1)
+#else
+/* Default is big endian. */
+#define CLIB_ARCH_IS_BIG_ENDIAN (1)
+#define CLIB_ARCH_IS_LITTLE_ENDIAN (0)
+#endif
+
+/* Big/little endian. */
+#define clib_arch_is_big_endian CLIB_ARCH_IS_BIG_ENDIAN
+#define clib_arch_is_little_endian CLIB_ARCH_IS_LITTLE_ENDIAN
+
+always_inline u16
+clib_byte_swap_u16 (u16 x)
+{
+ return (x >> 8) | (x << 8);
+}
+
+always_inline i16
+clib_byte_swap_i16 (i16 x)
+{
+ return clib_byte_swap_u16 (x);
+}
+
+always_inline u32
+clib_byte_swap_u32 (u32 x)
+{
+#if defined (i386) || defined (__x86_64__)
+ if (!__builtin_constant_p (x))
+ {
+ asm volatile ("bswap %0":"=r" (x):"0" (x));
+ return x;
+ }
+#endif
+ return ((x << 24) | ((x & 0xff00) << 8) | ((x >> 8) & 0xff00) | (x >> 24));
+}
+
+always_inline i32
+clib_byte_swap_i32 (i32 x)
+{
+ return clib_byte_swap_u32 (x);
+}
+
+always_inline u64
+clib_byte_swap_u64 (u64 x)
+{
+#if defined (__x86_64__)
+ if (!__builtin_constant_p (x))
+ {
+ asm volatile ("bswapq %0":"=r" (x):"0" (x));
+ return x;
+ }
+#endif
+#define _(x,n,i) \
+ ((((x) >> (8*(i))) & 0xff) << (8*((n)-(i)-1)))
+ return (_(x, 8, 0) | _(x, 8, 1)
+ | _(x, 8, 2) | _(x, 8, 3)
+ | _(x, 8, 4) | _(x, 8, 5) | _(x, 8, 6) | _(x, 8, 7));
+#undef _
+}
+
+always_inline i64
+clib_byte_swap_i64 (i64 x)
+{
+ return clib_byte_swap_u64 (x);
+}
+
+#define _(sex,type) \
+/* HOST -> SEX */ \
+always_inline type \
+clib_host_to_##sex##_##type (type x) \
+{ \
+ if (! clib_arch_is_##sex##_endian) \
+ x = clib_byte_swap_##type (x); \
+ return x; \
+} \
+ \
+always_inline type \
+clib_host_to_##sex##_mem_##type (type * x) \
+{ \
+ type v = x[0]; \
+ return clib_host_to_##sex##_##type (v); \
+} \
+ \
+always_inline type \
+clib_host_to_##sex##_unaligned_mem_##type (type * x) \
+{ \
+ type v = clib_mem_unaligned (x, type); \
+ return clib_host_to_##sex##_##type (v); \
+} \
+ \
+/* SEX -> HOST */ \
+always_inline type \
+clib_##sex##_to_host_##type (type x) \
+{ return clib_host_to_##sex##_##type (x); } \
+ \
+always_inline type \
+clib_##sex##_to_host_mem_##type (type * x) \
+{ return clib_host_to_##sex##_mem_##type (x); } \
+ \
+always_inline type \
+clib_##sex##_to_host_unaligned_mem_##type (type * x) \
+{ return clib_host_to_##sex##_unaligned_mem_##type (x); }
+
+#ifndef __cplusplus
+_(little, u16)
+_(little, u32)
+_(little, u64)
+_(little, i16)
+_(little, i32)
+_(little, i64)
+_(big, u16) _(big, u32) _(big, u64) _(big, i16) _(big, i32) _(big, i64)
+#endif
+#undef _
+/* Network "net" alias for "big". */
+#define _(type) \
+always_inline type \
+clib_net_to_host_##type (type x) \
+{ return clib_big_to_host_##type (x); } \
+ \
+always_inline type \
+clib_net_to_host_mem_##type (type * x) \
+{ return clib_big_to_host_mem_##type (x); } \
+ \
+always_inline type \
+clib_net_to_host_unaligned_mem_##type (type * x) \
+{ return clib_big_to_host_unaligned_mem_##type (x); } \
+ \
+always_inline type \
+clib_host_to_net_##type (type x) \
+{ return clib_host_to_big_##type (x); } \
+ \
+always_inline type \
+clib_host_to_net_mem_##type (type * x) \
+{ return clib_host_to_big_mem_##type (x); } \
+ \
+always_inline type \
+clib_host_to_net_unaligned_mem_##type (type * x) \
+{ return clib_host_to_big_unaligned_mem_##type (x); }
+#ifndef __cplusplus
+ _(u16);
+_(i16);
+_(u32);
+_(i32);
+_(u64);
+_(i64);
+#endif
+
+#undef _
+
+#endif /* included_clib_byte_order_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/cache.h b/src/vppinfra/cache.h
new file mode 100644
index 00000000..7464b77a
--- /dev/null
+++ b/src/vppinfra/cache.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2005 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_clib_cache_h
+#define included_clib_cache_h
+
+#include <vppinfra/error_bootstrap.h>
+
+/*
+ * Allow CFLAGS to override the arch-specific cache line size
+ */
+#ifndef CLIB_LOG2_CACHE_LINE_BYTES
+
+#if defined(__x86_64__) || defined(__ARM_ARCH_7A__) || defined(__i386__)
+#define CLIB_LOG2_CACHE_LINE_BYTES 6
+#endif
+
+#ifdef __aarch64__
+#define CLIB_LOG2_CACHE_LINE_BYTES 7
+#endif
+
+/* Default cache line size of 32 bytes. */
+#ifndef CLIB_LOG2_CACHE_LINE_BYTES
+#define CLIB_LOG2_CACHE_LINE_BYTES 5
+#endif
+
+#endif /* CLIB_LOG2_CACHE_LINE_BYTES defined */
+
+#if (CLIB_LOG2_CACHE_LINE_BYTES >= 9)
+#error Cache line size 512 bytes or greater
+#endif
+
+#define CLIB_CACHE_LINE_BYTES (1 << CLIB_LOG2_CACHE_LINE_BYTES)
+#define CLIB_CACHE_LINE_ALIGN_MARK(mark) u8 mark[0] __attribute__((aligned(CLIB_CACHE_LINE_BYTES)))
+
+/* Read/write arguments to __builtin_prefetch. */
+#define CLIB_PREFETCH_READ 0
+#define CLIB_PREFETCH_LOAD 0 /* alias for read */
+#define CLIB_PREFETCH_WRITE 1
+#define CLIB_PREFETCH_STORE 1 /* alias for write */
+
+#define _CLIB_PREFETCH(n,size,type) \
+ if ((size) > (n)*CLIB_CACHE_LINE_BYTES) \
+ __builtin_prefetch (_addr + (n)*CLIB_CACHE_LINE_BYTES, \
+ CLIB_PREFETCH_##type, \
+ /* locality */ 3);
+
+#define CLIB_PREFETCH(addr,size,type) \
+do { \
+ void * _addr = (addr); \
+ \
+ ASSERT ((size) <= 4*CLIB_CACHE_LINE_BYTES); \
+ _CLIB_PREFETCH (0, size, type); \
+ _CLIB_PREFETCH (1, size, type); \
+ _CLIB_PREFETCH (2, size, type); \
+ _CLIB_PREFETCH (3, size, type); \
+} while (0)
+
+#undef _
+
+#endif /* included_clib_cache_h */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/clib.h b/src/vppinfra/clib.h
new file mode 100644
index 00000000..fbb2a21c
--- /dev/null
+++ b/src/vppinfra/clib.h
@@ -0,0 +1,365 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_clib_h
+#define included_clib_h
+
+/* Standalone means to not assume we are running on a Unix box. */
+#if ! defined (CLIB_STANDALONE) && ! defined (CLIB_LINUX_KERNEL)
+#define CLIB_UNIX
+#endif
+
+#include <vppinfra/types.h>
+
+/* Global DEBUG flag. Setting this to 1 or 0 turns off
+ ASSERT (see vppinfra/error.h) & other debugging code. */
+#ifndef CLIB_DEBUG
+#define CLIB_DEBUG 0
+#endif
+
+#ifndef NULL
+#define NULL ((void *) 0)
+#endif
+
+#define BITS(x) (8*sizeof(x))
+#define ARRAY_LEN(x) (sizeof (x)/sizeof (x[0]))
+
+#define _STRUCT_FIELD(t,f) (((t *) 0)->f)
+#define STRUCT_OFFSET_OF(t,f) ((uword) & _STRUCT_FIELD (t, f))
+#define STRUCT_BIT_OFFSET_OF(t,f) (BITS(u8) * (uword) & _STRUCT_FIELD (t, f))
+#define STRUCT_SIZE_OF(t,f) (sizeof (_STRUCT_FIELD (t, f)))
+#define STRUCT_BITS_OF(t,f) (BITS (_STRUCT_FIELD (t, f)))
+#define STRUCT_ARRAY_LEN(t,f) ARRAY_LEN (_STRUCT_FIELD (t, f))
+#define STRUCT_MARK(mark) u8 mark[0]
+#define STRUCT_MARK_PTR(v, f) &(v)->f
+
+/* Stride in bytes between struct array elements. */
+#define STRUCT_STRIDE_OF(t,f) \
+ ( ((uword) & (((t *) 0)[1].f)) \
+ - ((uword) & (((t *) 0)[0].f)))
+
+#define STRUCT_OFFSET_OF_VAR(v,f) ((uword) (&(v)->f) - (uword) (v))
+
+/* Used to pack structure elements. */
+#define CLIB_PACKED(x) x __attribute__ ((packed))
+#define CLIB_UNUSED(x) x __attribute__ ((unused))
+
+#define never_inline __attribute__ ((__noinline__))
+
+#if CLIB_DEBUG > 0
+#define always_inline static inline
+#define static_always_inline static inline
+#else
+#define always_inline static inline __attribute__ ((__always_inline__))
+#define static_always_inline static inline __attribute__ ((__always_inline__))
+#endif
+
+
+/* Reserved (unused) structure element with address offset between
+ from and to. */
+#define CLIB_PAD_FROM_TO(from,to) u8 pad_##from[(to) - (from)]
+
+/* Hints to compiler about hot/cold code. */
+#define PREDICT_FALSE(x) __builtin_expect((x),0)
+#define PREDICT_TRUE(x) __builtin_expect((x),1)
+
+/* Full memory barrier (read and write). */
+#define CLIB_MEMORY_BARRIER() __sync_synchronize ()
+
+#if __x86_64__
+#define CLIB_MEMORY_STORE_BARRIER() __builtin_ia32_sfence ()
+#else
+#define CLIB_MEMORY_STORE_BARRIER() __sync_synchronize ()
+#endif
+
+/* Arranges for function to be called before main. */
+#define INIT_FUNCTION(decl) \
+ decl __attribute ((constructor)); \
+ decl
+
+/* Arranges for function to be called before exit. */
+#define EXIT_FUNCTION(decl) \
+ decl __attribute ((destructor)); \
+ decl
+
+/* Use __builtin_clz if available. */
+#ifdef __GNUC__
+#include <features.h>
+#if __GNUC_PREREQ(3, 4)
+#if uword_bits == 64
+#define count_leading_zeros(count,x) count = __builtin_clzll (x)
+#define count_trailing_zeros(count,x) count = __builtin_ctzll (x)
+#else
+#define count_leading_zeros(count,x) count = __builtin_clzl (x)
+#define count_trailing_zeros(count,x) count = __builtin_ctzl (x)
+#endif
+#endif
+#endif
+
+#ifndef count_leading_zeros
+
+/* Misc. integer arithmetic functions. */
+#if defined (i386)
+#define count_leading_zeros(count, x) \
+ do { \
+ word _clz; \
+ __asm__ ("bsrl %1,%0" \
+ : "=r" (_clz) : "rm" ((word) (x)));\
+ (count) = _clz ^ 31; \
+ } while (0)
+
+#define count_trailing_zeros(count, x) \
+ __asm__ ("bsfl %1,%0" : "=r" (count) : "rm" ((word)(x)))
+#endif /* i386 */
+
+#if defined (__alpha__) && defined (HAVE_CIX)
+#define count_leading_zeros(count, x) \
+ __asm__ ("ctlz %1,%0" \
+ : "=r" ((word) (count)) \
+ : "r" ((word) (x)))
+#define count_trailing_zeros(count, x) \
+ __asm__ ("cttz %1,%0" \
+ : "=r" ((word) (count)) \
+ : "r" ((word) (x)))
+#endif /* alpha && HAVE_CIX */
+
+#if __mips >= 4
+
+/* Select between 32/64 opcodes. */
+#if uword_bits == 32
+#define count_leading_zeros(_count, _x) \
+ __asm__ ("clz %[count],%[x]" \
+ : [count] "=r" ((word) (_count)) \
+ : [x] "r" ((word) (_x)))
+#else
+#define count_leading_zeros(_count, _x) \
+ __asm__ ("dclz %[count],%[x]" \
+ : [count] "=r" ((word) (_count)) \
+ : [x] "r" ((word) (_x)))
+#endif
+
+#endif /* __mips >= 4 */
+
+#endif /* count_leading_zeros */
+
+#if defined (count_leading_zeros)
+always_inline uword
+min_log2 (uword x)
+{
+ uword n;
+ count_leading_zeros (n, x);
+ return BITS (uword) - n - 1;
+}
+#else
+always_inline uword
+min_log2 (uword x)
+{
+ uword a = x, b = BITS (uword) / 2, c = 0, r = 0;
+
+ /* Reduce x to 4 bit result. */
+#define _ \
+{ \
+ c = a >> b; \
+ if (c) a = c; \
+ if (c) r += b; \
+ b /= 2; \
+}
+
+ if (BITS (uword) > 32)
+ _;
+ _;
+ _;
+ _;
+#undef _
+
+ /* Do table lookup on 4 bit partial. */
+ if (BITS (uword) > 32)
+ {
+ const u64 table = 0x3333333322221104LL;
+ uword t = (table >> (4 * a)) & 0xf;
+ r = t < 4 ? r + t : ~0;
+ }
+ else
+ {
+ const u32 table = 0x22221104;
+ uword t = (a & 8) ? 3 : ((table >> (4 * a)) & 0xf);
+ r = t < 4 ? r + t : ~0;
+ }
+
+ return r;
+}
+#endif
+
+always_inline uword
+max_log2 (uword x)
+{
+ uword l = min_log2 (x);
+ if (x > ((uword) 1 << l))
+ l++;
+ return l;
+}
+
+always_inline u64
+min_log2_u64 (u64 x)
+{
+ if (BITS (uword) == 64)
+ return min_log2 (x);
+ else
+ {
+ uword l, y;
+ y = x;
+ l = 0;
+ if (y == 0)
+ {
+ l += 32;
+ x >>= 32;
+ }
+ l += min_log2 (x);
+ return l;
+ }
+}
+
+always_inline uword
+pow2_mask (uword x)
+{
+ return ((uword) 1 << x) - (uword) 1;
+}
+
+always_inline uword
+max_pow2 (uword x)
+{
+ word y = (word) 1 << min_log2 (x);
+ if (x > y)
+ y *= 2;
+ return y;
+}
+
+always_inline uword
+is_pow2 (uword x)
+{
+ return 0 == (x & (x - 1));
+}
+
+always_inline uword
+round_pow2 (uword x, uword pow2)
+{
+ return (x + pow2 - 1) & ~(pow2 - 1);
+}
+
+always_inline u64
+round_pow2_u64 (u64 x, u64 pow2)
+{
+ return (x + pow2 - 1) & ~(pow2 - 1);
+}
+
+always_inline uword
+first_set (uword x)
+{
+ return x & -x;
+}
+
+always_inline uword
+log2_first_set (uword x)
+{
+ uword result;
+#ifdef count_trailing_zeros
+ count_trailing_zeros (result, x);
+#else
+ result = min_log2 (first_set (x));
+#endif
+ return result;
+}
+
+always_inline f64
+flt_round_down (f64 x)
+{
+ return (int) x;
+}
+
+always_inline word
+flt_round_nearest (f64 x)
+{
+ return (word) (x + .5);
+}
+
+always_inline f64
+flt_round_to_multiple (f64 x, f64 f)
+{
+ return f * flt_round_nearest (x / f);
+}
+
+#define clib_max(x,y) \
+({ \
+ __typeof__ (x) _x = (x); \
+ __typeof__ (y) _y = (y); \
+ _x > _y ? _x : _y; \
+})
+
+#define clib_min(x,y) \
+({ \
+ __typeof__ (x) _x = (x); \
+ __typeof__ (y) _y = (y); \
+ _x < _y ? _x : _y; \
+})
+
+#define clib_abs(x) \
+({ \
+ __typeof__ (x) _x = (x); \
+ _x < 0 ? -_x : _x; \
+})
+
+/* Standard standalone-only function declarations. */
+#ifndef CLIB_UNIX
+void clib_standalone_init (void *memory, uword memory_bytes);
+
+void qsort (void *base, uword n, uword size,
+ int (*)(const void *, const void *));
+#endif
+
+/* Stack backtrace. */
+uword
+clib_backtrace (uword * callers, uword max_callers, uword n_frames_to_skip);
+
+#endif /* included_clib_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/clib_error.h b/src/vppinfra/clib_error.h
new file mode 100644
index 00000000..45f18eb1
--- /dev/null
+++ b/src/vppinfra/clib_error.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_clib_error_h
+#define included_clib_error_h
+
+#include <vppinfra/types.h>
+
+typedef struct
+{
+ /* Error message. */
+ u8 *what;
+
+ /* Where error occurred (e.g. __FUNCTION__ __LINE__) */
+ const u8 *where;
+
+ uword flags;
+
+ /* Error code (e.g. errno for Unix errors). */
+ any code;
+} clib_error_t;
+
+#endif
diff --git a/src/vppinfra/cpu.c b/src/vppinfra/cpu.c
new file mode 100644
index 00000000..a26d5c9a
--- /dev/null
+++ b/src/vppinfra/cpu.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vppinfra/clib.h>
+#include <vppinfra/format.h>
+#include <vppinfra/cpu.h>
+
+#define foreach_x86_cpu_uarch \
+ _(0x06, 0x4f, "Broadwell", "Broadwell-EP/EX") \
+ _(0x06, 0x3d, "Broadwell", "Broadwell") \
+ _(0x06, 0x3f, "Haswell", "Haswell-E") \
+ _(0x06, 0x3c, "Haswell", "Haswell") \
+ _(0x06, 0x3e, "IvyBridge", "IvyBridge-E/EN/EP") \
+ _(0x06, 0x3a, "IvyBridge", "IvyBridge") \
+ _(0x06, 0x2a, "SandyBridge", "SandyBridge") \
+ _(0x06, 0x2d, "SandyBridge", "SandyBridge-E/EN/EP") \
+ _(0x06, 0x25, "Westmere", "Arrandale,Clarksdale") \
+ _(0x06, 0x2c, "Westmere", "Westmere-EP/EX,Gulftown") \
+ _(0x06, 0x2f, "Westmere", "Westmere-EX") \
+ _(0x06, 0x1e, "Nehalem", "Clarksfield,Lynnfield,Jasper Forest") \
+ _(0x06, 0x1a, "Nehalem", "Nehalem-EP,Bloomfield)") \
+ _(0x06, 0x2e, "Nehalem", "Nehalem-EX") \
+ _(0x06, 0x17, "Penryn", "Yorkfield,Wolfdale,Penryn,Harpertown (DP)") \
+ _(0x06, 0x1d, "Penryn", "Dunnington (MP)") \
+ _(0x06, 0x37, "Atom", "Bay Trail") \
+ _(0x06, 0x36, "Atom", "Cedarview") \
+ _(0x06, 0x26, "Atom", "Lincroft") \
+ _(0x06, 0x1c, "Atom", "Pineview/Silverthorne")
+
+u8 *
+format_cpu_uarch (u8 * s, va_list * args)
+{
+#if __x86_64__
+ u32 __attribute__ ((unused)) eax, ebx, ecx, edx;
+ u8 model, family;
+
+ if (__get_cpuid (1, &eax, &ebx, &ecx, &edx) == 0)
+ return format (s, "unknown (missing cpuid)");
+
+ model = ((eax >> 4) & 0x0f) | ((eax >> 12) & 0xf0);
+ family = (eax >> 8) & 0x0f;
+
+#define _(f,m,a,c) if ((model == m) && (family == f)) return format(s, "%s (%s)", a, c);
+ foreach_x86_cpu_uarch
+#undef _
+ return format (s, "unknown (family 0x%02x model 0x%02x)", family, model);
+
+#else /* ! __x86_64__ */
+ return format (s, "unknown");
+#endif
+}
+
+u8 *
+format_cpu_model_name (u8 * s, va_list * args)
+{
+#if __x86_64__
+ u32 __attribute__ ((unused)) eax, ebx, ecx, edx;
+ u8 *name = 0;
+ u32 *name_u32;
+
+ if (__get_cpuid (1, &eax, &ebx, &ecx, &edx) == 0)
+ return format (s, "unknown (missing cpuid)");
+
+ __get_cpuid (0x80000000, &eax, &ebx, &ecx, &edx);
+ if (eax < 0x80000004)
+ return format (s, "unknown (missing ext feature)");
+
+ vec_validate (name, 48);
+ name_u32 = (u32 *) name;
+
+ __get_cpuid (0x80000002, &eax, &ebx, &ecx, &edx);
+ name_u32[0] = eax;
+ name_u32[1] = ebx;
+ name_u32[2] = ecx;
+ name_u32[3] = edx;
+
+ __get_cpuid (0x80000003, &eax, &ebx, &ecx, &edx);
+ name_u32[4] = eax;
+ name_u32[5] = ebx;
+ name_u32[6] = ecx;
+ name_u32[7] = edx;
+
+ __get_cpuid (0x80000004, &eax, &ebx, &ecx, &edx);
+ name_u32[8] = eax;
+ name_u32[9] = ebx;
+ name_u32[10] = ecx;
+ name_u32[11] = edx;
+
+ s = format (s, "%s", name);
+ vec_free (name);
+ return s;
+
+#elif defined(__aarch64__)
+ return format (s, "armv8");
+#else /* ! __x86_64__ */
+ return format (s, "unknown");
+#endif
+}
+
+u8 *
+format_cpu_flags (u8 * s, va_list * args)
+{
+#if defined(__x86_64__)
+#define _(flag, func, reg, bit) \
+ if (clib_cpu_supports_ ## flag()) \
+ s = format (s, #flag " ");
+ foreach_x86_64_flags return s;
+#undef _
+#else /* ! __x86_64__ */
+ return format (s, "unknown");
+#endif
+}
+
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/cpu.h b/src/vppinfra/cpu.h
new file mode 100644
index 00000000..9c149f3f
--- /dev/null
+++ b/src/vppinfra/cpu.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_clib_cpu_h
+#define included_clib_cpu_h
+
+#include <vppinfra/format.h>
+
+/*
+ * multiarchitecture support. Adding new entry will produce
+ * new graph node function variant optimized for specific cpu
+ * microarchitecture.
+ * Order is important for runtime selection, as 1st match wins...
+ */
+
+#if __x86_64__ && CLIB_DEBUG == 0
+#define foreach_march_variant(macro, x) \
+ macro(avx2, x, "arch=core-avx2")
+#else
+#define foreach_march_variant(macro, x)
+#endif
+
+
+#if __GNUC__ > 4 && !__clang__
+#define CLIB_CPU_OPTIMIZED __attribute__ ((optimize ("tree-vectorize")))
+#else
+#define CLIB_CPU_OPTIMIZED
+#endif
+
+
+#define CLIB_MULTIARCH_ARCH_CHECK(arch, fn, tgt) \
+ if (clib_cpu_supports_ ## arch()) \
+ return & fn ## _ ##arch;
+
+#define CLIB_MULTIARCH_SELECT_FN(fn,...) \
+ __VA_ARGS__ void * fn ## _multiarch_select(void) \
+{ \
+ foreach_march_variant(CLIB_MULTIARCH_ARCH_CHECK, fn) \
+ return & fn; \
+}
+
+
+#define foreach_x86_64_flags \
+_ (sse3, 1, ecx, 0) \
+_ (ssse3, 1, ecx, 9) \
+_ (sse41, 1, ecx, 19) \
+_ (sse42, 1, ecx, 20) \
+_ (avx, 1, ecx, 28) \
+_ (avx2, 7, ebx, 5) \
+_ (avx512f, 7, ebx, 16) \
+_ (aes, 1, ecx, 25) \
+_ (sha, 7, ebx, 29) \
+_ (invariant_tsc, 0x80000007, edx, 8)
+
+#if defined(__x86_64__)
+#include "cpuid.h"
+
+static inline int
+clib_get_cpuid (const u32 lev, u32 * eax, u32 * ebx, u32 * ecx, u32 * edx)
+{
+ if ((u32) __get_cpuid_max (0x80000000 & lev, 0) < lev)
+ return 0;
+ if (lev == 7)
+ __cpuid_count (lev, 0, *eax, *ebx, *ecx, *edx);
+ else
+ __cpuid (lev, *eax, *ebx, *ecx, *edx);
+ return 1;
+}
+
+
+#define _(flag, func, reg, bit) \
+static inline int \
+clib_cpu_supports_ ## flag() \
+{ \
+ u32 __attribute__((unused)) eax, ebx = 0, ecx = 0, edx = 0; \
+ clib_get_cpuid (func, &eax, &ebx, &ecx, &edx); \
+ \
+ return ((reg & (1 << bit)) != 0); \
+}
+foreach_x86_64_flags
+#undef _
+#else
+
+#define _(flag, func, reg, bit) \
+static inline int clib_cpu_supports_ ## flag() { return 0; }
+foreach_x86_64_flags
+#undef _
+#endif
+#endif
+ format_function_t format_cpu_uarch;
+format_function_t format_cpu_model_name;
+format_function_t format_cpu_flags;
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/crc32.h b/src/vppinfra/crc32.h
new file mode 100644
index 00000000..bbfc41cc
--- /dev/null
+++ b/src/vppinfra/crc32.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __included_crc32_h__
+#define __included_crc32_h__
+
+#include <vppinfra/clib.h>
+
+#if __SSE4_2__
+#define clib_crc32c_uses_intrinsics
+#include <x86intrin.h>
+
+static_always_inline u32
+clib_crc32c (u8 * s, int len)
+{
+ u32 v = 0;
+
+#if __x86_64__
+ for (; len >= 8; len -= 8, s += 8)
+ v = _mm_crc32_u64 (v, *((u64 *) s));
+#else
+ /* workaround weird GCC bug when using _mm_crc32_u32
+ which happens with -O2 optimization */
+ volatile ("":::"memory");
+#endif
+
+ for (; len >= 4; len -= 4, s += 4)
+ v = _mm_crc32_u32 (v, *((u32 *) s));
+
+ for (; len >= 2; len -= 2, s += 2)
+ v = _mm_crc32_u16 (v, *((u16 *) s));
+
+ for (; len >= 1; len -= 1, s += 1)
+ v = _mm_crc32_u8 (v, *((u16 *) s));
+
+ return v;
+}
+
+#elif __ARM_FEATURE_CRC32
+#define clib_crc32c_with_intrinsics
+#include <arm_acle.h>
+
+static_always_inline u32
+clib_crc32c (u8 * s, int len)
+{
+ u32 v = 0;
+
+ for (; len >= 8; len -= 8, s += 8)
+ v = __crc32cd (v, *((u64 *) s));
+
+ for (; len >= 4; len -= 4, s += 4)
+ v = __crc32cw (v, *((u32 *) s));
+
+ for (; len >= 2; len -= 2, s += 2)
+ v = __crc32ch (v, *((u16 *) s));
+
+ for (; len >= 1; len -= 1, s += 1)
+ v = __crc32cb (v, *((u8 *) s));
+
+ return v;
+}
+
+#endif
+#endif /* __included_crc32_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/dir.dox b/src/vppinfra/dir.dox
new file mode 100644
index 00000000..440c44e8
--- /dev/null
+++ b/src/vppinfra/dir.dox
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Copyright (c) 2016 Comcast Cable Communications Management, LLC.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/** @dir
+ * @brief VPP infrastructure library source.
+ */
diff --git a/src/vppinfra/dlist.h b/src/vppinfra/dlist.h
new file mode 100644
index 00000000..e445b39f
--- /dev/null
+++ b/src/vppinfra/dlist.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ *
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+#ifndef included_dlist_h
+#define included_dlist_h
+
+#include <stdarg.h>
+#include <vppinfra/clib.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/pool.h>
+#include <vppinfra/error.h>
+#include <vppinfra/format.h>
+#include <vppinfra/cache.h>
+
+typedef struct
+{
+ u32 next;
+ u32 prev;
+ u32 value;
+} dlist_elt_t;
+
+static inline void
+clib_dlist_init (dlist_elt_t * pool, u32 index)
+{
+ dlist_elt_t *head = pool_elt_at_index (pool, index);
+ memset (head, 0xFF, sizeof (*head));
+}
+
+static inline void
+clib_dlist_addtail (dlist_elt_t * pool, u32 head_index, u32 new_index)
+{
+ dlist_elt_t *head = pool_elt_at_index (pool, head_index);
+ u32 old_last_index;
+ dlist_elt_t *old_last;
+ dlist_elt_t *new;
+
+ ASSERT (head->value == ~0);
+
+ new = pool_elt_at_index (pool, new_index);
+
+ if (PREDICT_FALSE (head->next == ~0))
+ {
+ head->next = head->prev = new_index;
+ new->next = new->prev = head_index;
+ return;
+ }
+
+ old_last_index = head->prev;
+ old_last = pool_elt_at_index (pool, old_last_index);
+
+ new->next = old_last->next;
+ new->prev = old_last_index;
+ old_last->next = new_index;
+ head->prev = new_index;
+}
+
+static inline void
+clib_dlist_addhead (dlist_elt_t * pool, u32 head_index, u32 new_index)
+{
+ dlist_elt_t *head = pool_elt_at_index (pool, head_index);
+ dlist_elt_t *old_first;
+ u32 old_first_index;
+ dlist_elt_t *new;
+
+ ASSERT (head->value == ~0);
+
+ new = pool_elt_at_index (pool, new_index);
+
+ if (PREDICT_FALSE (head->next == ~0))
+ {
+ head->next = head->prev = new_index;
+ new->next = new->prev = head_index;
+ return;
+ }
+
+ old_first_index = head->next;
+ old_first = pool_elt_at_index (pool, old_first_index);
+
+ new->next = old_first_index;
+ new->prev = old_first->prev;
+ old_first->prev = new_index;
+ head->next = new_index;
+}
+
+static inline void
+clib_dlist_remove (dlist_elt_t * pool, u32 index)
+{
+ dlist_elt_t *elt = pool_elt_at_index (pool, index);
+ dlist_elt_t *next_elt, *prev_elt;
+
+ /* listhead, not so much */
+ ASSERT (elt->value != ~0);
+
+ next_elt = pool_elt_at_index (pool, elt->next);
+ prev_elt = pool_elt_at_index (pool, elt->prev);
+
+ next_elt->prev = elt->prev;
+ prev_elt->next = elt->next;
+
+ elt->prev = elt->next = ~0;
+}
+
+static inline u32
+clib_dlist_remove_head (dlist_elt_t * pool, u32 head_index)
+{
+ dlist_elt_t *head = pool_elt_at_index (pool, head_index);
+ u32 rv;
+
+ ASSERT (head->value == ~0);
+
+ if (head->next == ~0 || (head->next == head_index))
+ return ~0;
+
+ rv = head->next;
+ clib_dlist_remove (pool, rv);
+ return rv;
+}
+
+static inline u32
+clib_dlist_remove_tail (dlist_elt_t * pool, u32 head_index)
+{
+ dlist_elt_t *head = pool_elt_at_index (pool, head_index);
+ u32 rv;
+
+ ASSERT (head->value == ~0);
+
+ if (head->prev == ~0)
+ return ~0;
+
+ rv = head->prev;
+ clib_dlist_remove (pool, rv);
+ return rv;
+}
+
+#endif /* included_dlist_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/elf.c b/src/vppinfra/elf.c
new file mode 100644
index 00000000..931fbccc
--- /dev/null
+++ b/src/vppinfra/elf.c
@@ -0,0 +1,2040 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vppinfra/bitmap.h>
+#include <vppinfra/byte_order.h>
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/elf.h>
+
+always_inline void
+elf_swap_first_header (elf_main_t * em, elf_first_header_t * h)
+{
+ h->architecture = elf_swap_u16 (em, h->architecture);
+ h->file_type = elf_swap_u16 (em, h->file_type);
+ h->file_version = elf_swap_u32 (em, h->file_version);
+}
+
+always_inline void
+elf_swap_verneed (elf_dynamic_version_need_t * n)
+{
+#define _(t,f) n->f = clib_byte_swap_##t (n->f);
+ foreach_elf_dynamic_version_need_field
+#undef _
+}
+
+always_inline void
+elf_swap_verneed_aux (elf_dynamic_version_need_aux_t * n)
+{
+#define _(t,f) n->f = clib_byte_swap_##t (n->f);
+ foreach_elf_dynamic_version_need_aux_field
+#undef _
+}
+
+clib_error_t *
+elf_get_section_by_name (elf_main_t * em, char *section_name,
+ elf_section_t ** result)
+{
+ uword *p;
+
+ p = hash_get_mem (em->section_by_name, section_name);
+ if (!p)
+ return clib_error_return (0, "no such section `%s'", section_name);
+
+ *result = vec_elt_at_index (em->sections, p[0]);
+ return 0;
+}
+
+elf_section_t *
+elf_get_section_by_start_address_no_check (elf_main_t * em,
+ uword start_address)
+{
+ uword *p = hash_get (em->section_by_start_address, start_address);
+ return p ? vec_elt_at_index (em->sections, p[0]) : 0;
+}
+
+clib_error_t *
+elf_get_section_by_start_address (elf_main_t * em, uword start_address,
+ elf_section_t ** result)
+{
+ elf_section_t *s =
+ elf_get_section_by_start_address_no_check (em, start_address);
+ if (!s)
+ return clib_error_return (0, "no section with address 0x%wx",
+ start_address);
+ *result = s;
+ return 0;
+}
+
+static u8 *
+format_elf_section_type (u8 * s, va_list * args)
+{
+ elf_section_type_t type = va_arg (*args, elf_section_type_t);
+ char *t = 0;
+
+ switch (type)
+ {
+#define _(f,i) case ELF_SECTION_##f: t = #f; break;
+ foreach_elf_section_type
+#undef _
+ }
+
+ if (!t)
+ s = format (s, "unknown 0x%x", type);
+ else
+ s = format (s, "%s", t);
+ return s;
+}
+
+static u8 *
+format_elf_section (u8 * s, va_list * args)
+{
+ elf_main_t *em = va_arg (*args, elf_main_t *);
+ elf_section_t *es = va_arg (*args, elf_section_t *);
+ elf64_section_header_t *h = &es->header;
+
+ if (!h)
+ return format (s, "%=40s%=10s%=20s%=8s%=16s%=16s%=16s",
+ "Name", "Index", "Type", "Size", "Align", "Address",
+ "File offset");
+
+ s = format (s, "%-40s%10d%=20U%8Lx%16d%16Lx %Lx-%Lx",
+ elf_section_name (em, es),
+ es->index,
+ format_elf_section_type, h->type,
+ h->file_size,
+ h->align,
+ h->exec_address, h->file_offset, h->file_offset + h->file_size);
+
+ if (h->flags != 0)
+ {
+#define _(f,i) \
+ if (h->flags & ELF_SECTION_FLAG_##f) s = format (s, " %s", #f);
+ foreach_elf_section_flag;
+#undef _
+ }
+
+ return s;
+}
+
+static u8 *
+format_elf_segment_type (u8 * s, va_list * args)
+{
+ elf_segment_type_t type = va_arg (*args, elf_segment_type_t);
+ char *t = 0;
+
+ switch (type)
+ {
+#define _(f,i) case ELF_SEGMENT_##f: t = #f; break;
+ foreach_elf_segment_type
+#undef _
+ }
+
+ if (!t)
+ s = format (s, "unknown 0x%x", type);
+ else
+ s = format (s, "%s", t);
+ return s;
+}
+
+static u8 *
+format_elf_segment (u8 * s, va_list * args)
+{
+ elf_segment_t *es = va_arg (*args, elf_segment_t *);
+ elf64_segment_header_t *h = &es->header;
+
+ if (!h)
+ return format (s, "%=16s%=16s%=16s%=16s",
+ "Type", "Virt. Address", "Phys. Address", "Size");
+
+ s = format (s, "%=16U%16Lx%16Lx%16Lx%16Lx",
+ format_elf_segment_type, h->type,
+ h->virtual_address,
+ h->physical_address, h->memory_size, h->file_offset);
+
+ if (h->flags != 0)
+ {
+#define _(f,i) \
+ if (h->flags & ELF_SEGMENT_FLAG_##f) s = format (s, " %s", #f);
+ foreach_elf_segment_flag;
+#undef _
+ }
+
+ return s;
+}
+
+static u8 *
+format_elf_symbol_binding_and_type (u8 * s, va_list * args)
+{
+ int bt = va_arg (*args, int);
+ int b, t;
+ char *type_string = 0;
+ char *binding_string = 0;
+
+ switch ((b = ((bt >> 4) & 0xf)))
+ {
+#define _(f,n) case n: binding_string = #f; break;
+ foreach_elf_symbol_binding;
+#undef _
+ default:
+ break;
+ }
+
+ switch ((t = ((bt >> 0) & 0xf)))
+ {
+#define _(f,n) case n: type_string = #f; break;
+ foreach_elf_symbol_type;
+#undef _
+ default:
+ break;
+ }
+
+ if (binding_string)
+ s = format (s, "%s", binding_string);
+ else
+ s = format (s, "binding 0x%x", b);
+
+ if (type_string)
+ s = format (s, " %s", type_string);
+ else
+ s = format (s, " type 0x%x", t);
+
+ return s;
+}
+
+static u8 *
+format_elf_symbol_visibility (u8 * s, va_list * args)
+{
+ int visibility = va_arg (*args, int);
+ char *t = 0;
+
+ switch (visibility)
+ {
+#define _(f,n) case n: t = #f; break;
+ foreach_elf_symbol_visibility
+#undef _
+ }
+
+ if (t)
+ return format (s, "%s", t);
+ else
+ return format (s, "unknown 0x%x", visibility);
+}
+
+static u8 *
+format_elf_symbol_section_name (u8 * s, va_list * args)
+{
+ elf_main_t *em = va_arg (*args, elf_main_t *);
+ int si = va_arg (*args, int);
+ char *t = 0;
+
+ if (si < vec_len (em->sections))
+ {
+ elf_section_t *es = vec_elt_at_index (em->sections, si);
+ return format (s, "%s", elf_section_name (em, es));
+ }
+
+ if (si >= ELF_SYMBOL_SECTION_RESERVED_LO
+ && si <= ELF_SYMBOL_SECTION_RESERVED_HI)
+ {
+ switch (si)
+ {
+#define _(f,n) case n: t = #f; break;
+ foreach_elf_symbol_reserved_section_index
+#undef _
+ default:
+ break;
+ }
+ }
+
+ if (t)
+ return format (s, "%s", t);
+ else
+ return format (s, "unknown 0x%x", si);
+}
+
+u8 *
+format_elf_symbol (u8 * s, va_list * args)
+{
+ elf_main_t *em = va_arg (*args, elf_main_t *);
+ elf_symbol_table_t *t = va_arg (*args, elf_symbol_table_t *);
+ elf64_symbol_t *sym = va_arg (*args, elf64_symbol_t *);
+
+ if (!sym)
+ return format (s, "%=32s%=16s%=16s%=16s%=16s%=16s",
+ "Symbol", "Size", "Value", "Type", "Visibility",
+ "Section");
+
+ s = format (s, "%-32s%16Ld%16Lx%=16U%=16U%U",
+ elf_symbol_name (t, sym),
+ sym->size, sym->value,
+ format_elf_symbol_binding_and_type, sym->binding_and_type,
+ format_elf_symbol_visibility, sym->visibility,
+ format_elf_symbol_section_name, em, sym->section_index);
+
+ return s;
+}
+
+static u8 *
+format_elf_relocation_type (u8 * s, va_list * args)
+{
+ elf_main_t *em = va_arg (*args, elf_main_t *);
+ int type = va_arg (*args, int);
+ char *t = 0;
+
+ switch (em->first_header.architecture)
+ {
+#define _(f,i) [i] = #f,
+
+ case ELF_ARCH_X86_64:
+ {
+ static char *tab[] = {
+ foreach_elf_x86_64_relocation_type
+ };
+
+#undef _
+ if (type < ARRAY_LEN (tab))
+ t = tab[type];
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ if (!t)
+ s = format (s, "0x%02x", type);
+ else
+ s = format (s, "%s", t);
+
+ return s;
+}
+
+static u8 *
+format_elf_relocation (u8 * s, va_list * args)
+{
+ elf_main_t *em = va_arg (*args, elf_main_t *);
+ elf_relocation_with_addend_t *r =
+ va_arg (*args, elf_relocation_with_addend_t *);
+ elf_symbol_table_t *t;
+ elf64_symbol_t *sym;
+
+ if (!r)
+ return format (s, "%=16s%=16s%=16s", "Address", "Type", "Symbol");
+
+ t = vec_elt_at_index (em->symbol_tables, 0);
+ sym = vec_elt_at_index (t->symbols, r->symbol_and_type >> 32);
+
+ s = format (s, "%16Lx%16U",
+ r->address,
+ format_elf_relocation_type, em, r->symbol_and_type & 0xff);
+
+ if (sym->section_index != 0)
+ {
+ elf_section_t *es;
+ es = vec_elt_at_index (em->sections, sym->section_index);
+ s = format (s, " (section %s)", elf_section_name (em, es));
+ }
+
+ if (sym->name != 0)
+ s = format (s, " %s", elf_symbol_name (t, sym));
+
+ {
+ i64 a = r->addend;
+ if (a != 0)
+ s = format (s, " %c 0x%Lx", a > 0 ? '+' : '-', a > 0 ? a : -a);
+ }
+
+ return s;
+}
+
+static u8 *
+format_elf_dynamic_entry_type (u8 * s, va_list * args)
+{
+ u32 type = va_arg (*args, u32);
+ char *t = 0;
+ switch (type)
+ {
+#define _(f,n) case n: t = #f; break;
+ foreach_elf_dynamic_entry_type;
+#undef _
+ default:
+ break;
+ }
+ if (t)
+ return format (s, "%s", t);
+ else
+ return format (s, "unknown 0x%x", type);
+}
+
+static u8 *
+format_elf_dynamic_entry (u8 * s, va_list * args)
+{
+ elf_main_t *em = va_arg (*args, elf_main_t *);
+ elf64_dynamic_entry_t *e = va_arg (*args, elf64_dynamic_entry_t *);
+
+ if (!e)
+ return format (s, "%=40s%=16s", "Type", "Data");
+
+ s = format (s, "%=40U", format_elf_dynamic_entry_type, (u32) e->type);
+ switch (e->type)
+ {
+ case ELF_DYNAMIC_ENTRY_NEEDED_LIBRARY:
+ case ELF_DYNAMIC_ENTRY_RPATH:
+ case ELF_DYNAMIC_ENTRY_RUN_PATH:
+ s = format (s, "%s", em->dynamic_string_table + e->data);
+ break;
+
+ case ELF_DYNAMIC_ENTRY_INIT_FUNCTION:
+ case ELF_DYNAMIC_ENTRY_FINI_FUNCTION:
+ case ELF_DYNAMIC_ENTRY_SYMBOL_HASH:
+ case ELF_DYNAMIC_ENTRY_GNU_HASH:
+ case ELF_DYNAMIC_ENTRY_STRING_TABLE:
+ case ELF_DYNAMIC_ENTRY_SYMBOL_TABLE:
+ case ELF_DYNAMIC_ENTRY_PLT_GOT:
+ case ELF_DYNAMIC_ENTRY_PLT_RELOCATION_ADDRESS:
+ case ELF_DYNAMIC_ENTRY_RELA_ADDRESS:
+ case ELF_DYNAMIC_ENTRY_VERSION_NEED:
+ case ELF_DYNAMIC_ENTRY_VERSYM:
+ {
+ elf_section_t *es =
+ elf_get_section_by_start_address_no_check (em, e->data);
+ if (es)
+ s = format (s, "section %s", elf_section_name (em, es));
+ else
+ s = format (s, "0x%Lx", e->data);
+ break;
+ }
+
+ default:
+ s = format (s, "0x%Lx", e->data);
+ break;
+ }
+
+ return s;
+}
+
+static u8 *
+format_elf_architecture (u8 * s, va_list * args)
+{
+ int a = va_arg (*args, int);
+ char *t;
+
+ switch (a)
+ {
+#define _(f,n) case n: t = #f; break;
+ foreach_elf_architecture;
+#undef _
+ default:
+ return format (s, "unknown 0x%x", a);
+ }
+
+ return format (s, "%s", t);
+}
+
+static u8 *
+format_elf_abi (u8 * s, va_list * args)
+{
+ int a = va_arg (*args, int);
+ char *t;
+
+ switch (a)
+ {
+#define _(f,n) case n: t = #f; break;
+ foreach_elf_abi;
+#undef _
+ default:
+ return format (s, "unknown 0x%x", a);
+ }
+
+ return format (s, "%s", t);
+}
+
+static u8 *
+format_elf_file_class (u8 * s, va_list * args)
+{
+ int a = va_arg (*args, int);
+ char *t;
+
+ switch (a)
+ {
+#define _(f) case ELF_##f: t = #f; break;
+ foreach_elf_file_class;
+#undef _
+ default:
+ return format (s, "unknown 0x%x", a);
+ }
+
+ return format (s, "%s", t);
+}
+
+static u8 *
+format_elf_file_type (u8 * s, va_list * args)
+{
+ int a = va_arg (*args, int);
+ char *t;
+
+ if (a >= ELF_ARCH_SPECIFIC_LO && a <= ELF_ARCH_SPECIFIC_HI)
+ return format (s, "arch-specific 0x%x", a - ELF_ARCH_SPECIFIC_LO);
+
+ if (a >= ELF_OS_SPECIFIC_LO && a <= ELF_OS_SPECIFIC_HI)
+ return format (s, "os-specific 0x%x", a - ELF_OS_SPECIFIC_LO);
+
+ switch (a)
+ {
+#define _(f,n) case n: t = #f; break;
+ foreach_elf_file_type;
+#undef _
+ default:
+ return format (s, "unknown 0x%x", a);
+ }
+
+ return format (s, "%s", t);
+}
+
+static u8 *
+format_elf_data_encoding (u8 * s, va_list * args)
+{
+ int a = va_arg (*args, int);
+ char *t;
+
+ switch (a)
+ {
+#define _(f) case ELF_##f: t = #f; break;
+ foreach_elf_data_encoding;
+#undef _
+ default:
+ return format (s, "unknown 0x%x", a);
+ }
+
+ return format (s, "%s", t);
+}
+
+static int
+elf_section_offset_compare (void *a1, void *a2)
+{
+ elf_section_t *s1 = a1;
+ elf_section_t *s2 = a2;
+
+ return ((i64) s1->header.file_offset - (i64) s2->header.file_offset);
+}
+
+static int
+elf_segment_va_compare (void *a1, void *a2)
+{
+ elf_segment_t *s1 = a1;
+ elf_segment_t *s2 = a2;
+
+ return ((i64) s1->header.virtual_address -
+ (i64) s2->header.virtual_address);
+}
+
+u8 *
+format_elf_main (u8 * s, va_list * args)
+{
+ elf_main_t *em = va_arg (*args, elf_main_t *);
+ u32 verbose = va_arg (*args, u32);
+ elf64_file_header_t *fh = &em->file_header;
+
+ s =
+ format (s,
+ "File header: machine: %U, file type/class %U/%U, data-encoding: %U, abi: %U version %d\n",
+ format_elf_architecture, em->first_header.architecture,
+ format_elf_file_type, em->first_header.file_type,
+ format_elf_file_class, em->first_header.file_class,
+ format_elf_data_encoding, em->first_header.data_encoding,
+ format_elf_abi, em->first_header.abi,
+ em->first_header.abi_version);
+
+ s = format (s, " entry 0x%Lx, arch-flags 0x%x",
+ em->file_header.entry_point, em->file_header.flags);
+
+ if (em->interpreter)
+ s = format (s, "\n interpreter: %s", em->interpreter);
+
+ {
+ elf_section_t *h, *copy;
+
+ copy = 0;
+ vec_foreach (h, em->sections) if (h->header.type != ~0)
+ vec_add1 (copy, h[0]);
+
+ vec_sort_with_function (copy, elf_section_offset_compare);
+
+ s = format (s, "\nSections %d at file offset 0x%Lx-0x%Lx:\n",
+ fh->section_header_count,
+ fh->section_header_file_offset,
+ fh->section_header_file_offset +
+ (u64) fh->section_header_count * fh->section_header_size);
+ s = format (s, "%U\n", format_elf_section, em, 0);
+ vec_foreach (h, copy) s = format (s, "%U\n", format_elf_section, em, h);
+
+ vec_free (copy);
+ }
+
+ {
+ elf_segment_t *h, *copy;
+
+ copy = 0;
+ vec_foreach (h, em->segments)
+ if (h->header.type != ELF_SEGMENT_UNUSED && h->header.type != ~0)
+ vec_add1 (copy, h[0]);
+
+ /* Sort segments by address. */
+ vec_sort_with_function (copy, elf_segment_va_compare);
+
+ s = format (s, "\nSegments: %d at file offset 0x%Lx-0x%Lx:\n",
+ fh->segment_header_count,
+ fh->segment_header_file_offset,
+ (u64) fh->segment_header_file_offset +
+ (u64) fh->segment_header_count *
+ (u64) fh->segment_header_size);
+
+ s = format (s, "%U\n", format_elf_segment, 0);
+ vec_foreach (h, copy) s = format (s, "%U\n", format_elf_segment, h);
+
+ vec_free (copy);
+ }
+
+ if ((verbose & FORMAT_ELF_MAIN_SYMBOLS) && vec_len (em->symbol_tables) > 0)
+ {
+ elf_symbol_table_t *t;
+ elf64_symbol_t *sym;
+ elf_section_t *es;
+
+ vec_foreach (t, em->symbol_tables)
+ {
+ es = vec_elt_at_index (em->sections, t->section_index);
+ s =
+ format (s, "\nSymbols for section %s:\n",
+ elf_section_name (em, es));
+
+ s = format (s, "%U\n", format_elf_symbol, em, 0, 0);
+ vec_foreach (sym, t->symbols)
+ s = format (s, "%U\n", format_elf_symbol, em, t, sym);
+ }
+ }
+
+ if ((verbose & FORMAT_ELF_MAIN_RELOCATIONS)
+ && vec_len (em->relocation_tables) > 0)
+ {
+ elf_relocation_table_t *t;
+ elf_relocation_with_addend_t *r;
+ elf_section_t *es;
+
+ vec_foreach (t, em->relocation_tables)
+ {
+ es = vec_elt_at_index (em->sections, t->section_index);
+ r = t->relocations;
+ s = format (s, "\nRelocations for section %s:\n",
+ elf_section_name (em, es));
+
+ s = format (s, "%U\n", format_elf_relocation, em, 0);
+ vec_foreach (r, t->relocations)
+ {
+ s = format (s, "%U\n", format_elf_relocation, em, r);
+ }
+ }
+ }
+
+ if ((verbose & FORMAT_ELF_MAIN_DYNAMIC)
+ && vec_len (em->dynamic_entries) > 0)
+ {
+ elf64_dynamic_entry_t *es, *e;
+ s = format (s, "\nDynamic linker information:\n");
+ es = vec_dup (em->dynamic_entries);
+ s = format (s, "%U\n", format_elf_dynamic_entry, em, 0);
+ vec_foreach (e, es)
+ s = format (s, "%U\n", format_elf_dynamic_entry, em, e);
+ }
+
+ return s;
+}
+
+static void
+elf_parse_segments (elf_main_t * em, void *data)
+{
+ void *d = data + em->file_header.segment_header_file_offset;
+ uword n = em->file_header.segment_header_count;
+ uword i;
+
+ vec_resize (em->segments, n);
+
+ for (i = 0; i < n; i++)
+ {
+ em->segments[i].index = i;
+
+ if (em->first_header.file_class == ELF_64BIT)
+ {
+ elf64_segment_header_t *h = d;
+#define _(t,f) em->segments[i].header.f = elf_swap_##t (em, h->f);
+ foreach_elf64_segment_header
+#undef _
+ d = (h + 1);
+ }
+ else
+ {
+ elf32_segment_header_t *h = d;
+#define _(t,f) em->segments[i].header.f = elf_swap_##t (em, h->f);
+ foreach_elf32_segment_header
+#undef _
+ d = (h + 1);
+ }
+ }
+}
+
+static void
+elf_parse_sections (elf_main_t * em, void *data)
+{
+ elf64_file_header_t *fh = &em->file_header;
+ elf_section_t *s;
+ void *d = data + fh->section_header_file_offset;
+ uword n = fh->section_header_count;
+ uword i;
+
+ vec_resize (em->sections, n);
+
+ for (i = 0; i < n; i++)
+ {
+ s = em->sections + i;
+
+ s->index = i;
+
+ if (em->first_header.file_class == ELF_64BIT)
+ {
+ elf64_section_header_t *h = d;
+#define _(t,f) em->sections[i].header.f = elf_swap_##t (em, h->f);
+ foreach_elf64_section_header
+#undef _
+ d = (h + 1);
+ }
+ else
+ {
+ elf32_section_header_t *h = d;
+#define _(t,f) em->sections[i].header.f = elf_swap_##t (em, h->f);
+ foreach_elf32_section_header
+#undef _
+ d = (h + 1);
+ }
+
+ if (s->header.type != ELF_SECTION_NO_BITS)
+ vec_add (s->contents, data + s->header.file_offset,
+ s->header.file_size);
+ }
+
+ s = vec_elt_at_index (em->sections, fh->section_header_string_table_index);
+
+ em->section_by_name
+ = hash_create_string ( /* # elts */ vec_len (em->sections),
+ /* sizeof of value */ sizeof (uword));
+
+ vec_foreach (s, em->sections)
+ {
+ hash_set_mem (em->section_by_name,
+ elf_section_name (em, s), s - em->sections);
+ hash_set (em->section_by_start_address,
+ s->header.exec_address, s - em->sections);
+ }
+}
+
+static void
+add_symbol_table (elf_main_t * em, elf_section_t * s)
+{
+ elf_symbol_table_t *tab;
+ elf32_symbol_t *sym32;
+ elf64_symbol_t *sym64;
+ uword i;
+
+ if (s->header.type == ELF_SECTION_DYNAMIC_SYMBOL_TABLE)
+ em->dynamic_symbol_table_index = vec_len (em->symbol_tables);
+
+ vec_add2 (em->symbol_tables, tab, 1);
+
+ tab->section_index = s->index;
+
+ if (em->first_header.file_class == ELF_64BIT)
+ {
+ tab->symbols =
+ elf_get_section_contents (em, s - em->sections,
+ sizeof (tab->symbols[0]));
+ for (i = 0; i < vec_len (tab->symbols); i++)
+ {
+#define _(t,f) tab->symbols[i].f = elf_swap_##t (em, tab->symbols[i].f);
+ foreach_elf64_symbol_header;
+#undef _
+ }
+ }
+ else
+ {
+ sym32 =
+ elf_get_section_contents (em, s - em->sections, sizeof (sym32[0]));
+ vec_clone (tab->symbols, sym32);
+ for (i = 0; i < vec_len (tab->symbols); i++)
+ {
+#define _(t,f) tab->symbols[i].f = elf_swap_##t (em, sym32[i].f);
+ foreach_elf32_symbol_header;
+#undef _
+ }
+ }
+
+ if (s->header.link == 0)
+ return;
+
+ tab->string_table =
+ elf_get_section_contents (em, s->header.link,
+ sizeof (tab->string_table[0]));
+ tab->symbol_by_name =
+ hash_create_string ( /* # elts */ vec_len (tab->symbols),
+ /* sizeof of value */ sizeof (uword));
+
+ vec_foreach (sym64, tab->symbols)
+ {
+ if (sym64->name != 0)
+ hash_set_mem (tab->symbol_by_name,
+ tab->string_table + sym64->name, sym64 - tab->symbols);
+ }
+}
+
+static void
+add_relocation_table (elf_main_t * em, elf_section_t * s)
+{
+ uword has_addend = s->header.type == ELF_SECTION_RELOCATION_ADD;
+ elf_relocation_table_t *t;
+ uword i;
+
+ vec_add2 (em->relocation_tables, t, 1);
+ t->section_index = s - em->sections;
+
+ if (em->first_header.file_class == ELF_64BIT)
+ {
+ elf64_relocation_t *r, *rs;
+
+ rs = elf_get_section_contents (em, t->section_index,
+ sizeof (rs[0]) +
+ has_addend * sizeof (rs->addend[0]));
+
+ if (em->need_byte_swap)
+ {
+ r = rs;
+ for (i = 0; i < vec_len (r); i++)
+ {
+ r->address = elf_swap_u64 (em, r->address);
+ r->symbol_and_type = elf_swap_u32 (em, r->symbol_and_type);
+ if (has_addend)
+ r->addend[0] = elf_swap_u64 (em, r->addend[0]);
+ r = elf_relocation_next (r, s->header.type);
+ }
+ }
+
+ vec_resize (t->relocations, vec_len (rs));
+ clib_memcpy (t->relocations, rs, vec_bytes (t->relocations));
+ vec_free (rs);
+ }
+ else
+ {
+ elf_relocation_with_addend_t *r;
+ elf32_relocation_t *r32, *r32s;
+
+ r32s = elf_get_section_contents (em, t->section_index,
+ sizeof (r32s[0]) +
+ has_addend * sizeof (r32s->addend[0]));
+ vec_resize (t->relocations, vec_len (r32s));
+
+ r32 = r32s;
+ vec_foreach (r, t->relocations)
+ {
+ r->address = elf_swap_u32 (em, r32->address);
+ r->symbol_and_type = elf_swap_u32 (em, r->symbol_and_type);
+ r->addend = has_addend ? elf_swap_u32 (em, r32->addend[0]) : 0;
+ r32 = elf_relocation_next (r32, s->header.type);
+ }
+
+ vec_free (r32s);
+ }
+}
+
+void
+elf_parse_symbols (elf_main_t * em)
+{
+ elf_section_t *s;
+
+ /* No need to parse symbols twice. */
+ if (em->parsed_symbols)
+ return;
+ em->parsed_symbols = 1;
+
+ vec_foreach (s, em->sections)
+ {
+ switch (s->header.type)
+ {
+ case ELF_SECTION_SYMBOL_TABLE:
+ case ELF_SECTION_DYNAMIC_SYMBOL_TABLE:
+ add_symbol_table (em, s);
+ break;
+
+ case ELF_SECTION_RELOCATION_ADD:
+ case ELF_SECTION_RELOCATION:
+ add_relocation_table (em, s);
+ break;
+
+ default:
+ break;
+ }
+ }
+}
+
+void
+elf_set_dynamic_entries (elf_main_t * em)
+{
+ uword i;
+
+ /* Start address for sections may have changed. */
+ {
+ elf64_dynamic_entry_t *e;
+
+ vec_foreach (e, em->dynamic_entries)
+ {
+ switch (e->type)
+ {
+ case ELF_DYNAMIC_ENTRY_INIT_FUNCTION:
+ case ELF_DYNAMIC_ENTRY_FINI_FUNCTION:
+ case ELF_DYNAMIC_ENTRY_SYMBOL_HASH:
+ case ELF_DYNAMIC_ENTRY_GNU_HASH:
+ case ELF_DYNAMIC_ENTRY_STRING_TABLE:
+ case ELF_DYNAMIC_ENTRY_SYMBOL_TABLE:
+ case ELF_DYNAMIC_ENTRY_PLT_GOT:
+ case ELF_DYNAMIC_ENTRY_PLT_RELOCATION_ADDRESS:
+ case ELF_DYNAMIC_ENTRY_RELA_ADDRESS:
+ case ELF_DYNAMIC_ENTRY_VERSION_NEED:
+ case ELF_DYNAMIC_ENTRY_VERSYM:
+ {
+ elf_section_t *es =
+ elf_get_section_by_start_address_no_check (em, e->data);
+ /* If section is not found just leave e->data alone. */
+ if (es)
+ e->data = es->header.exec_address;
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+ }
+
+ if (em->first_header.file_class == ELF_64BIT)
+ {
+ elf64_dynamic_entry_t *e, *es;
+
+ es = em->dynamic_entries;
+ if (em->need_byte_swap)
+ {
+ es = vec_dup (es);
+ vec_foreach (e, es)
+ {
+ e->type = elf_swap_u64 (em, e->type);
+ e->data = elf_swap_u64 (em, e->data);
+ }
+ }
+
+ elf_set_section_contents (em, em->dynamic_section_index, es,
+ vec_bytes (es));
+ if (es != em->dynamic_entries)
+ vec_free (es);
+ }
+ else
+ {
+ elf32_dynamic_entry_t *es;
+
+ vec_clone (es, em->dynamic_entries);
+ if (em->need_byte_swap)
+ {
+ for (i = 0; i < vec_len (es); i++)
+ {
+ es[i].type = elf_swap_u32 (em, em->dynamic_entries[i].type);
+ es[i].data = elf_swap_u32 (em, em->dynamic_entries[i].data);
+ }
+ }
+
+ elf_set_section_contents (em, em->dynamic_section_index, es,
+ vec_bytes (es));
+ vec_free (es);
+ }
+}
+
+clib_error_t *
+elf_parse (elf_main_t * em, void *data, uword data_bytes)
+{
+ elf_first_header_t *h = data;
+ elf64_file_header_t *fh = &em->file_header;
+ clib_error_t *error = 0;
+
+ {
+ char *save = em->file_name;
+ memset (em, 0, sizeof (em[0]));
+ em->file_name = save;
+ }
+
+ em->first_header = h[0];
+ em->need_byte_swap =
+ CLIB_ARCH_IS_BIG_ENDIAN != (h->data_encoding ==
+ ELF_TWOS_COMPLEMENT_BIG_ENDIAN);
+ elf_swap_first_header (em, &em->first_header);
+
+ if (!(h->magic[0] == 0x7f
+ && h->magic[1] == 'E' && h->magic[2] == 'L' && h->magic[3] == 'F'))
+ return clib_error_return (0, "`%s': bad magic", em->file_name);
+
+ if (h->file_class == ELF_64BIT)
+ {
+ elf64_file_header_t *h64 = (void *) (h + 1);
+#define _(t,f) fh->f = elf_swap_##t (em, h64->f);
+ foreach_elf64_file_header
+#undef _
+ }
+ else
+ {
+ elf32_file_header_t *h32 = (void *) (h + 1);
+
+#define _(t,f) fh->f = elf_swap_##t (em, h32->f);
+ foreach_elf32_file_header
+#undef _
+ }
+
+ elf_parse_segments (em, data);
+ elf_parse_sections (em, data);
+
+ /* Figure which sections are contained in each segment. */
+ {
+ elf_segment_t *g;
+ elf_section_t *s;
+ vec_foreach (g, em->segments)
+ {
+ u64 g_lo, g_hi;
+ u64 s_lo, s_hi;
+
+ if (g->header.memory_size == 0)
+ continue;
+
+ g_lo = g->header.virtual_address;
+ g_hi = g_lo + g->header.memory_size;
+
+ vec_foreach (s, em->sections)
+ {
+ s_lo = s->header.exec_address;
+ s_hi = s_lo + s->header.file_size;
+
+ if (s_lo >= g_lo && s_hi <= g_hi)
+ {
+ g->section_index_bitmap =
+ clib_bitmap_ori (g->section_index_bitmap, s->index);
+ s->segment_index_bitmap =
+ clib_bitmap_ori (s->segment_index_bitmap, g->index);
+ }
+ }
+ }
+ }
+
+ return error;
+}
+
+#ifdef CLIB_UNIX
+
+static void
+add_dynamic_entries (elf_main_t * em, elf_section_t * s)
+{
+ uword i;
+
+ /* Can't have more than one dynamic section. */
+ ASSERT (em->dynamic_section_index == 0);
+ em->dynamic_section_index = s->index;
+
+ if (em->first_header.file_class == ELF_64BIT)
+ {
+ elf64_dynamic_entry_t *e;
+
+ e = elf_get_section_contents (em, s - em->sections, sizeof (e[0]));
+ if (em->need_byte_swap)
+ for (i = 0; i < vec_len (e); i++)
+ {
+ e[i].type = elf_swap_u64 (em, e[i].type);
+ e[i].data = elf_swap_u64 (em, e[i].data);
+ }
+
+ em->dynamic_entries = e;
+ }
+ else
+ {
+ elf32_dynamic_entry_t *e;
+
+ e = elf_get_section_contents (em, s - em->sections, sizeof (e[0]));
+ vec_clone (em->dynamic_entries, e);
+ if (em->need_byte_swap)
+ for (i = 0; i < vec_len (e); i++)
+ {
+ em->dynamic_entries[i].type = elf_swap_u32 (em, e[i].type);
+ em->dynamic_entries[i].data = elf_swap_u32 (em, e[i].data);
+ }
+
+ vec_free (e);
+ }
+}
+
+static void
+byte_swap_verneed (elf_main_t * em, elf_dynamic_version_need_union_t * vus)
+{
+ uword *entries_swapped = 0;
+ uword i, j;
+
+ for (i = 0; i < vec_len (vus); i++)
+ {
+ elf_dynamic_version_need_union_t *n = vec_elt_at_index (vus, i);
+ elf_dynamic_version_need_union_t *a;
+
+ if (clib_bitmap_get (entries_swapped, i))
+ continue;
+
+ elf_swap_verneed (&n->need);
+ entries_swapped = clib_bitmap_set (entries_swapped, i, 1);
+
+ if (n->need.first_aux_offset != 0)
+ {
+ ASSERT (n->need.first_aux_offset % sizeof (n[0]) == 0);
+ j = i + (n->need.first_aux_offset / sizeof (n[0]));
+ while (1)
+ {
+ a = vec_elt_at_index (vus, j);
+ if (!clib_bitmap_get (entries_swapped, j))
+ {
+ entries_swapped = clib_bitmap_set (entries_swapped, j, 1);
+ elf_swap_verneed_aux (&a->aux);
+ }
+ if (a->aux.next_offset == 0)
+ break;
+ ASSERT (a->aux.next_offset % sizeof (a->aux) == 0);
+ j += (a->aux.next_offset / sizeof (a->aux));
+ }
+ }
+ }
+
+ clib_bitmap_free (entries_swapped);
+}
+
+static void set_dynamic_verneed (elf_main_t * em) __attribute__ ((unused));
+static void
+set_dynamic_verneed (elf_main_t * em)
+{
+ elf_dynamic_version_need_union_t *vus = em->verneed;
+
+ if (em->need_byte_swap)
+ {
+ vus = vec_dup (vus);
+ byte_swap_verneed (em, vus);
+ }
+
+ elf_set_section_contents (em, em->verneed_section_index, vus,
+ vec_bytes (vus));
+ if (vus != em->verneed)
+ vec_free (vus);
+}
+
+static void
+set_symbol_table (elf_main_t * em, u32 table_index) __attribute__ ((unused));
+static void
+set_symbol_table (elf_main_t * em, u32 table_index)
+{
+ elf_symbol_table_t *tab = vec_elt_at_index (em->symbol_tables, table_index);
+
+ if (em->first_header.file_class == ELF_64BIT)
+ {
+ elf64_symbol_t *s, *syms;
+
+ syms = vec_dup (tab->symbols);
+ vec_foreach (s, syms)
+ {
+#define _(t,f) s->f = elf_swap_##t (em, s->f);
+ foreach_elf64_symbol_header;
+#undef _
+ }
+
+ elf_set_section_contents (em, tab->section_index,
+ syms, vec_bytes (syms));
+ }
+ else
+ {
+ elf32_symbol_t *syms;
+ uword i;
+ vec_clone (syms, tab->symbols);
+ for (i = 0; i < vec_len (tab->symbols); i++)
+ {
+#define _(t,f) syms[i].f = elf_swap_##t (em, tab->symbols[i].f);
+ foreach_elf32_symbol_header;
+#undef _
+ }
+
+ elf_set_section_contents (em, tab->section_index,
+ syms, vec_bytes (syms));
+ }
+}
+
+static char *
+elf_find_interpreter (elf_main_t * em, void *data)
+{
+ elf_segment_t *g;
+ elf_section_t *s;
+ uword *p;
+
+ vec_foreach (g, em->segments)
+ {
+ if (g->header.type == ELF_SEGMENT_INTERP)
+ break;
+ }
+
+ if (g >= vec_end (em->segments))
+ return 0;
+
+ p = hash_get (em->section_by_start_address, g->header.virtual_address);
+ if (!p)
+ return 0;
+
+ s = vec_elt_at_index (em->sections, p[0]);
+ return (char *) vec_dup (s->contents);
+}
+
+static void *
+elf_get_section_contents_with_starting_address (elf_main_t * em,
+ uword start_address,
+ uword elt_size,
+ u32 * section_index_result)
+{
+ elf_section_t *s = 0;
+ clib_error_t *error;
+
+ error = elf_get_section_by_start_address (em, start_address, &s);
+ if (error)
+ {
+ clib_error_report (error);
+ return 0;
+ }
+
+ if (section_index_result)
+ *section_index_result = s->index;
+
+ return elf_get_section_contents (em, s->index, elt_size);
+}
+
+static void
+elf_parse_dynamic (elf_main_t * em)
+{
+ elf_section_t *s;
+ elf64_dynamic_entry_t *e;
+
+ vec_foreach (s, em->sections)
+ {
+ switch (s->header.type)
+ {
+ case ELF_SECTION_DYNAMIC:
+ add_dynamic_entries (em, s);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ em->dynamic_string_table_section_index = ~0;
+ em->dynamic_string_table = 0;
+
+ vec_foreach (e, em->dynamic_entries)
+ {
+ switch (e->type)
+ {
+ case ELF_DYNAMIC_ENTRY_STRING_TABLE:
+ ASSERT (vec_len (em->dynamic_string_table) == 0);
+ em->dynamic_string_table
+ =
+ elf_get_section_contents_with_starting_address (em, e->data,
+ sizeof (u8),
+ &em->
+ dynamic_string_table_section_index);
+ break;
+
+ case ELF_DYNAMIC_ENTRY_SYMBOL_TABLE:
+ {
+ elf_section_t *s = 0;
+ clib_error_t *error;
+
+ error = elf_get_section_by_start_address (em, e->data, &s);
+ if (error)
+ {
+ clib_error_report (error);
+ return;
+ }
+
+ em->dynamic_symbol_table_section_index = s - em->sections;
+ }
+ break;
+
+ case ELF_DYNAMIC_ENTRY_VERSYM:
+ em->versym
+ =
+ elf_get_section_contents_with_starting_address (em, e->data,
+ sizeof (em->versym
+ [0]),
+ &em->
+ versym_section_index);
+ if (em->need_byte_swap)
+ {
+ uword i;
+ for (i = 0; i < vec_len (em->versym); i++)
+ em->versym[i] = clib_byte_swap_u16 (em->versym[i]);
+ }
+ break;
+
+ case ELF_DYNAMIC_ENTRY_VERSION_NEED:
+ em->verneed
+ =
+ elf_get_section_contents_with_starting_address (em, e->data,
+ sizeof (em->verneed
+ [0]),
+ &em->
+ verneed_section_index);
+ if (em->need_byte_swap)
+ byte_swap_verneed (em, em->verneed);
+ break;
+
+ default:
+ break;
+ }
+ }
+}
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+clib_error_t *
+elf_read_file (elf_main_t * em, char *file_name)
+{
+ int fd;
+ struct stat fd_stat;
+ uword mmap_length = 0;
+ void *data = 0;
+ clib_error_t *error = 0;
+
+ elf_main_init (em);
+
+ fd = open (file_name, 0);
+ if (fd < 0)
+ {
+ error = clib_error_return_unix (0, "open `%s'", file_name);
+ goto done;
+ }
+
+ if (fstat (fd, &fd_stat) < 0)
+ {
+ error = clib_error_return_unix (0, "fstat `%s'", file_name);
+ goto done;
+ }
+ mmap_length = fd_stat.st_size;
+
+ data = mmap (0, mmap_length, PROT_READ, MAP_SHARED, fd, /* offset */ 0);
+ if (~pointer_to_uword (data) == 0)
+ {
+ error = clib_error_return_unix (0, "mmap `%s'", file_name);
+ goto done;
+ }
+
+ em->file_name = file_name;
+
+ error = elf_parse (em, data, mmap_length);
+ if (error)
+ goto done;
+
+ elf_parse_symbols (em);
+ elf_parse_dynamic (em);
+
+ em->interpreter = elf_find_interpreter (em, data);
+
+ munmap (data, mmap_length);
+ close (fd);
+
+ return /* no error */ 0;
+
+done:
+ elf_main_free (em);
+ if (fd >= 0)
+ close (fd);
+ if (data)
+ munmap (data, mmap_length);
+ return error;
+}
+
+typedef struct
+{
+ u8 *new_table;
+
+ u8 *old_table;
+
+ uword *hash;
+} string_table_builder_t;
+
+static u32
+string_table_add_name (string_table_builder_t * b, u8 * n)
+{
+ uword *p, i, j, l;
+
+ p = hash_get_mem (b->hash, n);
+ if (p)
+ return p[0];
+
+ l = strlen ((char *) n);
+ i = vec_len (b->new_table);
+ vec_add (b->new_table, n, l + 1);
+
+ for (j = 0; j <= l; j++)
+ {
+ if (j > 0)
+ {
+ p = hash_get_mem (b->hash, n + j);
+
+ /* Sub-string already in table? */
+ if (p)
+ continue;
+ }
+
+ hash_set_mem (b->hash, n + j, i + j);
+ }
+
+ return i;
+}
+
+static u32 string_table_add_name_index (string_table_builder_t * b, u32 index)
+ __attribute__ ((unused));
+static u32
+string_table_add_name_index (string_table_builder_t * b, u32 index)
+{
+ u8 *n = b->old_table + index;
+ return string_table_add_name (b, n);
+}
+
+static void string_table_init (string_table_builder_t * b, u8 * old_table)
+ __attribute__ ((unused));
+static void
+string_table_init (string_table_builder_t * b, u8 * old_table)
+{
+ memset (b, 0, sizeof (b[0]));
+ b->old_table = old_table;
+ b->hash = hash_create_string (0, sizeof (uword));
+}
+
+static u8 *string_table_done (string_table_builder_t * b)
+ __attribute__ ((unused));
+static u8 *
+string_table_done (string_table_builder_t * b)
+{
+ hash_free (b->hash);
+ return b->new_table;
+}
+
+static void
+layout_sections (elf_main_t * em)
+{
+ elf_section_t *s;
+ u32 n_sections_with_changed_exec_address = 0;
+ u32 *deferred_symbol_and_string_sections = 0;
+ u32 n_deleted_sections = 0;
+ /* note: rebuild is always zero. Intent lost in the sands of time */
+#if 0
+ int rebuild = 0;
+
+ /* Re-build section string table (sections may have been deleted). */
+ if (rebuild)
+ {
+ u8 *st = 0;
+
+ vec_foreach (s, em->sections)
+ {
+ u8 *name;
+ if (s->header.type == ~0)
+ continue;
+ name = elf_section_name (em, s);
+ s->header.name = vec_len (st);
+ vec_add (st, name, strlen ((char *) name) + 1);
+ }
+
+ s =
+ vec_elt_at_index (em->sections,
+ em->file_header.section_header_string_table_index);
+
+ vec_free (s->contents);
+ s->contents = st;
+ }
+
+ /* Re-build dynamic string table. */
+ if (rebuild && em->dynamic_string_table_section_index != ~0)
+ {
+ string_table_builder_t b;
+
+ string_table_init (&b, em->dynamic_string_table);
+
+ /* Add all dynamic symbols. */
+ {
+ elf_symbol_table_t *symtab;
+ elf64_symbol_t *sym;
+
+ symtab =
+ vec_elt_at_index (em->symbol_tables,
+ em->dynamic_symbol_table_index);
+ vec_foreach (sym, symtab->symbols)
+ {
+ u8 *name = elf_symbol_name (symtab, sym);
+ sym->name = string_table_add_name (&b, name);
+ }
+
+ set_symbol_table (em, em->dynamic_symbol_table_index);
+ }
+
+ /* Add all dynamic entries. */
+ {
+ elf64_dynamic_entry_t *e;
+
+ vec_foreach (e, em->dynamic_entries)
+ {
+ switch (e->type)
+ {
+ case ELF_DYNAMIC_ENTRY_NEEDED_LIBRARY:
+ case ELF_DYNAMIC_ENTRY_RPATH:
+ case ELF_DYNAMIC_ENTRY_RUN_PATH:
+ e->data = string_table_add_name_index (&b, e->data);
+ break;
+ }
+ }
+ }
+
+ /* Add all version needs. */
+ if (vec_len (em->verneed) > 0)
+ {
+ elf_dynamic_version_need_union_t *n, *a;
+
+ n = em->verneed;
+ while (1)
+ {
+ n->need.file_name_offset =
+ string_table_add_name_index (&b, n->need.file_name_offset);
+
+ if (n->need.first_aux_offset != 0)
+ {
+ a = n + n->need.first_aux_offset / sizeof (n[0]);
+ while (1)
+ {
+ a->aux.name =
+ string_table_add_name_index (&b, a->aux.name);
+ if (a->aux.next_offset == 0)
+ break;
+ a += a->aux.next_offset / sizeof (a[0]);
+ }
+ }
+
+ if (n->need.next_offset == 0)
+ break;
+
+ n += n->need.next_offset / sizeof (n[0]);
+ }
+
+ set_dynamic_verneed (em);
+ }
+
+ s =
+ vec_elt_at_index (em->sections,
+ em->dynamic_string_table_section_index);
+
+ vec_free (s->contents);
+ s->contents = string_table_done (&b);
+ }
+#endif /* dead code */
+
+ /* Figure file offsets and exec addresses for sections. */
+ {
+ u64 exec_address = 0, file_offset = 0;
+ u64 file_size, align_size;
+
+ vec_foreach (s, em->sections)
+ {
+ /* Ignore deleted and unused sections. */
+ switch (s->header.type)
+ {
+ case ~0:
+ n_deleted_sections++;
+ case ELF_SECTION_UNUSED:
+ continue;
+
+ case ELF_SECTION_STRING_TABLE:
+ case ELF_SECTION_SYMBOL_TABLE:
+ if (!(s->index == em->dynamic_string_table_section_index
+ || s->index ==
+ em->file_header.section_header_string_table_index))
+ {
+ vec_add1 (deferred_symbol_and_string_sections, s->index);
+ continue;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ exec_address = round_pow2_u64 (exec_address, s->header.align);
+
+ /* Put sections we added at end of file. */
+ if (s->header.file_offset == ~0)
+ s->header.file_offset = file_offset;
+
+ /* Follow gaps in original file. */
+ if (s->header.exec_address > exec_address)
+ {
+ exec_address = s->header.exec_address;
+ file_offset = s->header.file_offset;
+ }
+
+ if (s->header.flags & ELF_SECTION_FLAG_ALLOC)
+ {
+ s->exec_address_change = exec_address - s->header.exec_address;
+ n_sections_with_changed_exec_address += s->exec_address_change != 0;
+ s->header.exec_address = exec_address;
+ }
+
+ if (s->header.type == ELF_SECTION_NO_BITS)
+ file_size = s->header.file_size;
+ else
+ file_size = vec_len (s->contents);
+
+ {
+ u64 align;
+
+ if (s + 1 >= vec_end (em->sections))
+ align = 16;
+ else if (s[1].header.type == ELF_SECTION_NO_BITS)
+ align = 8;
+ else
+ align = s[1].header.align;
+
+ if (s->header.flags & ELF_SECTION_FLAG_ALLOC)
+ {
+ u64 v = round_pow2_u64 (exec_address + file_size, align);
+ align_size = v - exec_address;
+ }
+ else
+ {
+ u64 v = round_pow2_u64 (file_offset + file_size, align);
+ align_size = v - file_offset;
+ }
+ }
+
+ s->header.file_offset = file_offset;
+ s->header.file_size = file_size;
+ s->align_size = align_size;
+
+ if (s->header.type != ELF_SECTION_NO_BITS)
+ file_offset += align_size;
+ exec_address += align_size;
+ }
+
+ /* Section headers go after last section but before symbol/string
+ tables. */
+ {
+ elf64_file_header_t *fh = &em->file_header;
+
+ fh->section_header_file_offset = file_offset;
+ fh->section_header_count = vec_len (em->sections) - n_deleted_sections;
+ file_offset += (u64) fh->section_header_count * fh->section_header_size;
+ }
+
+ {
+ int i;
+ for (i = 0; i < vec_len (deferred_symbol_and_string_sections); i++)
+ {
+ s =
+ vec_elt_at_index (em->sections,
+ deferred_symbol_and_string_sections[i]);
+
+ s->header.file_offset = file_offset;
+ s->header.file_size = vec_len (s->contents);
+
+ align_size = round_pow2 (vec_len (s->contents), 16);
+ s->align_size = align_size;
+ file_offset += align_size;
+ }
+ vec_free (deferred_symbol_and_string_sections);
+ }
+ }
+
+ /* Update dynamic entries now that sections have been assigned
+ possibly new addresses. */
+#if 0
+ if (rebuild)
+ elf_set_dynamic_entries (em);
+#endif
+
+ /* Update segments for changed section addresses. */
+ {
+ elf_segment_t *g;
+ uword si;
+
+ vec_foreach (g, em->segments)
+ {
+ u64 s_lo, s_hi, f_lo = 0;
+ u32 n_sections = 0;
+
+ if (g->header.memory_size == 0)
+ continue;
+
+ s_lo = s_hi = 0;
+ /* *INDENT-OFF* */
+ clib_bitmap_foreach (si, g->section_index_bitmap, ({
+ u64 lo, hi;
+
+ s = vec_elt_at_index (em->sections, si);
+ lo = s->header.exec_address;
+ hi = lo + s->align_size;
+ if (n_sections == 0)
+ {
+ s_lo = lo;
+ s_hi = hi;
+ f_lo = s->header.file_offset;
+ n_sections++;
+ }
+ else
+ {
+ if (lo < s_lo)
+ {
+ s_lo = lo;
+ f_lo = s->header.file_offset;
+ }
+ if (hi > s_hi)
+ s_hi = hi;
+ }
+ }));
+ /* *INDENT-ON* */
+
+ if (n_sections == 0)
+ continue;
+
+ /* File offset zero includes ELF headers/segment headers.
+ Don't change that. */
+ if (g->header.file_offset == 0 && g->header.type == ELF_SEGMENT_LOAD)
+ {
+ s_lo = g->header.virtual_address;
+ f_lo = g->header.file_offset;
+ }
+
+ g->header.virtual_address = s_lo;
+ g->header.physical_address = s_lo;
+ g->header.file_offset = f_lo;
+ g->header.memory_size = s_hi - s_lo;
+ }
+ }
+}
+
+clib_error_t *
+elf_write_file (elf_main_t * em, char *file_name)
+{
+ int fd;
+ FILE *f;
+ clib_error_t *error = 0;
+
+ fd = open (file_name, O_CREAT | O_RDWR | O_TRUNC, 0755);
+ if (fd < 0)
+ return clib_error_return_unix (0, "open `%s'", file_name);
+
+ f = fdopen (fd, "w");
+
+ /* Section contents may have changed. So, we need to update
+ stuff to reflect this. */
+ layout_sections (em);
+
+ /* Write first header. */
+ {
+ elf_first_header_t h = em->first_header;
+
+ elf_swap_first_header (em, &h);
+ if (fwrite (&h, sizeof (h), 1, f) != 1)
+ {
+ error = clib_error_return_unix (0, "write first header");
+ goto error;
+ }
+ }
+
+ /* Write file header. */
+ {
+ elf64_file_header_t h = em->file_header;
+
+ /* Segment headers are after first header. */
+ h.segment_header_file_offset = sizeof (elf_first_header_t);
+ if (em->first_header.file_class == ELF_64BIT)
+ h.segment_header_file_offset += sizeof (elf64_file_header_t);
+ else
+ h.segment_header_file_offset += sizeof (elf32_file_header_t);
+
+ if (em->first_header.file_class == ELF_64BIT)
+ {
+#define _(t,field) h.field = elf_swap_##t (em, h.field);
+ foreach_elf64_file_header;
+#undef _
+
+ if (fwrite (&h, sizeof (h), 1, f) != 1)
+ {
+ error = clib_error_return_unix (0, "write file header");
+ goto error;
+ }
+ }
+ else
+ {
+ elf32_file_header_t h32;
+
+#define _(t,field) h32.field = elf_swap_##t (em, h.field);
+ foreach_elf32_file_header;
+#undef _
+
+ if (fwrite (&h32, sizeof (h32), 1, f) != 1)
+ {
+ error = clib_error_return_unix (0, "write file header");
+ goto error;
+ }
+ }
+ }
+
+ /* Write segment headers. */
+ {
+ elf_segment_t *s;
+
+ vec_foreach (s, em->segments)
+ {
+ elf64_segment_header_t h;
+
+ if (s->header.type == ~0)
+ continue;
+
+ h = s->header;
+
+ if (em->first_header.file_class == ELF_64BIT)
+ {
+#define _(t,field) h.field = elf_swap_##t (em, h.field);
+ foreach_elf64_segment_header;
+#undef _
+
+ if (fwrite (&h, sizeof (h), 1, f) != 1)
+ {
+ error =
+ clib_error_return_unix (0, "write segment header %U",
+ format_elf_segment, em, s);
+ goto error;
+ }
+ }
+ else
+ {
+ elf32_segment_header_t h32;
+
+#define _(t,field) h32.field = elf_swap_##t (em, h.field);
+ foreach_elf32_segment_header;
+#undef _
+
+ if (fwrite (&h32, sizeof (h32), 1, f) != 1)
+ {
+ error =
+ clib_error_return_unix (0, "write segment header %U",
+ format_elf_segment, em, s);
+ goto error;
+ }
+ }
+ }
+ }
+
+ /* Write contents for all sections. */
+ {
+ elf_section_t *s;
+
+ vec_foreach (s, em->sections)
+ {
+ if (s->header.file_size == 0)
+ continue;
+
+ if (fseek (f, s->header.file_offset, SEEK_SET) < 0)
+ {
+ fclose (f);
+ return clib_error_return_unix (0, "fseek 0x%Lx",
+ s->header.file_offset);
+ }
+
+ if (s->header.type == ELF_SECTION_NO_BITS)
+ /* don't write for .bss sections */ ;
+ else if (fwrite (s->contents, vec_len (s->contents), 1, f) != 1)
+ {
+ error =
+ clib_error_return_unix (0, "write %s section contents",
+ elf_section_name (em, s));
+ goto error;
+ }
+ }
+
+ /* Finally write section headers. */
+ if (fseek (f, em->file_header.section_header_file_offset, SEEK_SET) < 0)
+ {
+ fclose (f);
+ return clib_error_return_unix
+ (0, "fseek 0x%Lx", em->file_header.section_header_file_offset);
+ }
+
+ vec_foreach (s, em->sections)
+ {
+ elf64_section_header_t h;
+
+ if (s->header.type == ~0)
+ continue;
+
+ h = s->header;
+
+ if (em->first_header.file_class == ELF_64BIT)
+ {
+#define _(t,field) h.field = elf_swap_##t (em, h.field);
+ foreach_elf64_section_header;
+#undef _
+
+ if (fwrite (&h, sizeof (h), 1, f) != 1)
+ {
+ error =
+ clib_error_return_unix (0, "write %s section header",
+ elf_section_name (em, s));
+ goto error;
+ }
+ }
+ else
+ {
+ elf32_section_header_t h32;
+
+#define _(t,field) h32.field = elf_swap_##t (em, h.field);
+ foreach_elf32_section_header;
+#undef _
+
+ if (fwrite (&h32, sizeof (h32), 1, f) != 1)
+ {
+ error =
+ clib_error_return_unix (0, "write %s section header",
+ elf_section_name (em, s));
+ goto error;
+ }
+ }
+ }
+ }
+
+error:
+ fclose (f);
+ return error;
+}
+
+clib_error_t *
+elf_delete_named_section (elf_main_t * em, char *section_name)
+{
+ elf_section_t *s = 0;
+ clib_error_t *error;
+
+ error = elf_get_section_by_name (em, section_name, &s);
+ if (error)
+ return error;
+
+ s->header.type = ~0;
+
+ return 0;
+}
+
+void
+elf_create_section_with_contents (elf_main_t * em,
+ char *section_name,
+ elf64_section_header_t * header,
+ void *contents, uword n_content_bytes)
+{
+ elf_section_t *s, *sts;
+ u8 *st, *c;
+ uword *p, is_new_section;
+
+ /* See if section already exists with given name.
+ If so, just replace contents. */
+ is_new_section = 0;
+ if ((p = hash_get_mem (em->section_by_name, section_name)))
+ {
+ s = vec_elt_at_index (em->sections, p[0]);
+ _vec_len (s->contents) = 0;
+ c = s->contents;
+ }
+ else
+ {
+ vec_add2 (em->sections, s, 1);
+ is_new_section = 1;
+ c = 0;
+ }
+
+ sts =
+ vec_elt_at_index (em->sections,
+ em->file_header.section_header_string_table_index);
+ st = sts->contents;
+
+ s->header = header[0];
+
+ s->header.file_offset = ~0;
+ s->header.file_size = n_content_bytes;
+ s->index = s - em->sections;
+
+ /* Add name to string table. */
+ s->header.name = vec_len (st);
+ vec_add (st, section_name, strlen (section_name));
+ vec_add1 (st, 0);
+ sts->contents = st;
+
+ vec_resize (c, n_content_bytes);
+ clib_memcpy (c, contents, n_content_bytes);
+ s->contents = c;
+
+ em->file_header.section_header_count += is_new_section
+ && s->header.type != ~0;
+}
+
+uword
+elf_delete_segment_with_type (elf_main_t * em,
+ elf_segment_type_t segment_type)
+{
+ uword n_deleted = 0;
+ elf_segment_t *s;
+
+ vec_foreach (s, em->segments) if (s->header.type == segment_type)
+ {
+ s->header.type = ~0;
+ n_deleted += 1;
+ }
+
+ ASSERT (em->file_header.segment_header_count >= n_deleted);
+ em->file_header.segment_header_count -= n_deleted;
+
+ return n_deleted;
+}
+
+#endif /* CLIB_UNIX */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/elf.h b/src/vppinfra/elf.h
new file mode 100644
index 00000000..008ea284
--- /dev/null
+++ b/src/vppinfra/elf.h
@@ -0,0 +1,1062 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_clib_elf_h
+#define included_clib_elf_h
+
+#include <vppinfra/format.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/byte_order.h>
+
+#define foreach_elf_file_class \
+ _ (CLASS_NONE) _ (32BIT) _ (64BIT)
+
+#define foreach_elf_data_encoding \
+ _ (ENCODING_NONE) \
+ _ (TWOS_COMPLEMENT_LITTLE_ENDIAN) \
+ _ (TWOS_COMPLEMENT_BIG_ENDIAN)
+
+#define ELF_VERSION_NONE (0)
+#define ELF_VERSION_CURRENT (1)
+
+#define foreach_elf_abi \
+ _ (SYSV, 0) \
+ _ (HPUX, 1) \
+ _ (NETBSD, 2) \
+ _ (LINUX, 3) \
+ _ (SOLARIS, 6) \
+ _ (AIX, 7) \
+ _ (IRIX, 8) \
+ _ (FREEBSD, 9) \
+ _ (COMPAQ_TRU64, 10) \
+ _ (MODESTO, 11) \
+ _ (OPENBSD, 12) \
+ _ (ARM, 97) \
+ _ (STANDALONE, 255)
+
+/* Legal values for type (object file type). */
+#define foreach_elf_file_type \
+ _ (NONE, 0) \
+ _ (RELOC, 1) \
+ _ (EXEC, 2) \
+ _ (SHARED, 3) \
+ _ (CORE, 4) \
+ _ (OS_SPECIFIC_LO, 0xfe00) \
+ _ (OS_SPECIFIC_HI, 0xfeff) \
+ _ (ARCH_SPECIFIC_LO, 0xff00) \
+ _ (ARCH_SPECIFIC_HI, 0xffff)
+
+/* Legal values for architecture. */
+#define foreach_elf_architecture \
+ _ (NONE, 0) /* No machine */ \
+ _ (M32, 1) /* AT&T WE 32100 */ \
+ _ (SPARC, 2) /* SUN SPARC */ \
+ _ (386, 3) /* Intel 80386 */ \
+ _ (68K, 4) /* Motorola m68k family */ \
+ _ (88K, 5) /* Motorola m88k family */ \
+ _ (860, 7) /* Intel 80860 */ \
+ _ (MIPS, 8) /* MIPS R3000 big-endian */ \
+ _ (S370, 9) /* IBM System/370 */ \
+ _ (MIPS_RS3_LE, 10) /* MIPS R3000 little-endian */ \
+ _ (PARISC, 15) /* HPPA */ \
+ _ (VPP500, 17) /* Fujitsu VPP500 */ \
+ _ (SPARC32PLUS, 18) /* Sun's "v8plus" */ \
+ _ (960, 19) /* Intel 80960 */ \
+ _ (PPC, 20) /* PowerPC */ \
+ _ (PPC64, 21) /* PowerPC 64-bit */ \
+ _ (S390, 22) /* IBM S390 */ \
+ _ (V800, 36) /* NEC V800 series */ \
+ _ (FR20, 37) /* Fujitsu FR20 */ \
+ _ (RH32, 38) /* TRW RH-32 */ \
+ _ (RCE, 39) /* Motorola RCE */ \
+ _ (ARM, 40) /* ARM */ \
+ _ (FAKE_ALPHA, 41) /* Digital Alpha */ \
+ _ (SH, 42) /* Hitachi SH */ \
+ _ (SPARCV9, 43) /* SPARC v9 64-bit */ \
+ _ (TRICORE, 44) /* Siemens Tricore */ \
+ _ (ARC, 45) /* Argonaut RISC Core */ \
+ _ (H8_300, 46) /* Hitachi H8/300 */ \
+ _ (H8_300H, 47) /* Hitachi H8/300H */ \
+ _ (H8S, 48) /* Hitachi H8S */ \
+ _ (H8_500, 49) /* Hitachi H8/500 */ \
+ _ (IA_64, 50) /* Intel Merced */ \
+ _ (MIPS_X, 51) /* Stanford MIPS-X */ \
+ _ (COLDFIRE, 52) /* Motorola Coldfire */ \
+ _ (68HC12, 53) /* Motorola M68HC12 */ \
+ _ (MMA, 54) /* Fujitsu MMA Multimedia Accel. */ \
+ _ (PCP, 55) /* Siemens PCP */ \
+ _ (NCPU, 56) /* Sony nCPU embeeded RISC */ \
+ _ (NDR1, 57) /* Denso NDR1 microprocessor */ \
+ _ (STARCORE, 58) /* Motorola Start*Core processor */ \
+ _ (ME16, 59) /* Toyota ME16 processor */ \
+ _ (ST100, 60) /* STMicroelectronic ST100 */ \
+ _ (TINYJ, 61) /* Advanced Logic Corp. Tinyj */ \
+ _ (X86_64, 62) /* AMD x86-64 architecture */ \
+ _ (PDSP, 63) /* Sony DSP Processor */ \
+ _ (FX66, 66) /* Siemens FX66 microcontroller */ \
+ _ (ST9PLUS, 67) /* STMicroelectronics ST9+ 8/16 mc */ \
+ _ (ST7, 68) /* STmicroelectronics ST7 8 bit mc */ \
+ _ (68HC16, 69) /* Motorola MC68HC16 */ \
+ _ (68HC11, 70) /* Motorola MC68HC11 */ \
+ _ (68HC08, 71) /* Motorola MC68HC08 */ \
+ _ (68HC05, 72) /* Motorola MC68HC05 */ \
+ _ (SVX, 73) /* Silicon Graphics SVx */ \
+ _ (ST19, 74) /* STMicroelectronics ST19 8 bit mc */ \
+ _ (VAX, 75) /* Digital VAX */ \
+ _ (CRIS, 76) /* Axis 32-bit embedded proc. */ \
+ _ (JAVELIN, 77) /* Infineon 32-bit embedded proc. */ \
+ _ (FIREPATH, 78) /* Element 14 64-bit DSP Processor */ \
+ _ (ZSP, 79) /* LSI Logic 16-bit DSP Processor */ \
+ _ (MMIX, 80) /* Knuth's 64-bit processor */ \
+ _ (HUANY, 81) /* Harvard machine-independent */ \
+ _ (PRISM, 82) /* SiTera Prism */ \
+ _ (AVR, 83) /* Atmel AVR 8-bit microcontroller */ \
+ _ (FR30, 84) /* Fujitsu FR30 */ \
+ _ (D10V, 85) /* Mitsubishi D10V */ \
+ _ (D30V, 86) /* Mitsubishi D30V */ \
+ _ (V850, 87) /* NEC v850 */ \
+ _ (M32R, 88) /* Mitsubishi M32R */ \
+ _ (MN10300, 89) /* Matsushita MN10300 */ \
+ _ (MN10200, 90) /* Matsushita MN10200 */ \
+ _ (PJ, 91) /* picoJava */ \
+ _ (OPENRISC, 92) /* OpenRISC 32-bit processor */ \
+ _ (ARC_A5, 93) /* ARC Cores Tangent-A5 */ \
+ _ (XTENSA, 94) /* Tensilica Xtensa Architecture */ \
+ _ (ALPHA, 0x9026)
+
+#define _(f) ELF_##f,
+
+typedef enum
+{
+ foreach_elf_file_class ELF_N_FILE_CLASS,
+} elf_file_class_t;
+
+typedef enum
+{
+ foreach_elf_data_encoding ELF_N_DATA_ENCODING,
+} elf_data_encoding_t;
+
+#undef _
+
+#define _(f,i) ELF_##f = i,
+
+typedef enum
+{
+ foreach_elf_abi
+} elf_abi_t;
+
+typedef enum
+{
+ foreach_elf_file_type
+} elf_file_type_t;
+
+#undef _
+
+typedef enum
+{
+#define _(f,i) ELF_ARCH_##f = i,
+ foreach_elf_architecture
+#undef _
+} elf_architecture_t;
+
+typedef struct
+{
+ /* 0x7f ELF */
+ u8 magic[4];
+
+ elf_file_class_t file_class:8;
+ elf_data_encoding_t data_encoding:8;
+ u8 file_version_ident;
+ elf_abi_t abi:8;
+ u8 abi_version;
+
+ u8 pad[7];
+
+ elf_file_type_t file_type:16;
+ elf_architecture_t architecture:16;
+
+ u32 file_version;
+} elf_first_header_t;
+
+/* 32/64 bit file header following basic file header. */
+#define foreach_elf32_file_header \
+ _ (u32, entry_point) \
+ _ (u32, segment_header_file_offset) \
+ _ (u32, section_header_file_offset) \
+ _ (u32, flags) \
+ _ (u16, n_bytes_this_header) \
+ _ (u16, segment_header_size) \
+ _ (u16, segment_header_count) \
+ _ (u16, section_header_size) \
+ _ (u16, section_header_count) \
+ _ (u16, section_header_string_table_index)
+
+#define foreach_elf64_file_header \
+ _ (u64, entry_point) \
+ _ (u64, segment_header_file_offset) \
+ _ (u64, section_header_file_offset) \
+ _ (u32, flags) \
+ _ (u16, n_bytes_this_header) \
+ _ (u16, segment_header_size) \
+ _ (u16, segment_header_count) \
+ _ (u16, section_header_size) \
+ _ (u16, section_header_count) \
+ _ (u16, section_header_string_table_index)
+
+/* Section header. */
+#define foreach_elf32_section_header \
+ _ (u32, name) \
+ _ (u32, type) \
+ _ (u32, flags) \
+ _ (u32, exec_address) \
+ _ (u32, file_offset) \
+ _ (u32, file_size) \
+ _ (u32, link) \
+ _ (u32, additional_info) \
+ _ (u32, align) \
+ _ (u32, entry_size)
+
+#define foreach_elf64_section_header \
+ _ (u32, name) \
+ _ (u32, type) \
+ _ (u64, flags) \
+ _ (u64, exec_address) \
+ _ (u64, file_offset) \
+ _ (u64, file_size) \
+ _ (u32, link) \
+ _ (u32, additional_info) \
+ _ (u64, align) \
+ _ (u64, entry_size)
+
+/* Program segment header. */
+#define foreach_elf32_segment_header \
+ _ (u32, type) \
+ _ (u32, file_offset) \
+ _ (u32, virtual_address) \
+ _ (u32, physical_address) \
+ _ (u32, file_size) \
+ _ (u32, memory_size) \
+ _ (u32, flags) \
+ _ (u32, align)
+
+#define foreach_elf64_segment_header \
+ _ (u32, type) \
+ _ (u32, flags) \
+ _ (u64, file_offset) \
+ _ (u64, virtual_address) \
+ _ (u64, physical_address) \
+ _ (u64, file_size) \
+ _ (u64, memory_size) \
+ _ (u64, align)
+
+/* Symbol table. */
+#define foreach_elf32_symbol_header \
+ _ (u32, name) \
+ _ (u32, value) \
+ _ (u32, size) \
+ /* binding upper 4 bits; type lower 4 bits */ \
+ _ (u8, binding_and_type) \
+ _ (u8, visibility) \
+ _ (u16, section_index)
+
+#define foreach_elf64_symbol_header \
+ _ (u32, name) \
+ _ (u8, binding_and_type) \
+ _ (u8, visibility) \
+ _ (u16, section_index) \
+ _ (u64, value) \
+ _ (u64, size)
+
+#define _(t,f) t f;
+
+typedef struct
+{
+foreach_elf32_file_header} elf32_file_header_t;
+
+typedef struct
+{
+foreach_elf64_file_header} elf64_file_header_t;
+
+typedef struct
+{
+foreach_elf32_section_header} elf32_section_header_t;
+
+typedef struct
+{
+foreach_elf64_section_header} elf64_section_header_t;
+
+typedef struct
+{
+foreach_elf32_segment_header} elf32_segment_header_t;
+
+typedef struct
+{
+foreach_elf64_segment_header} elf64_segment_header_t;
+
+typedef struct
+{
+foreach_elf32_symbol_header} elf32_symbol_t;
+
+typedef struct
+{
+foreach_elf64_symbol_header} elf64_symbol_t;
+#undef _
+
+/* Special section names. */
+#define foreach_elf_symbol_reserved_section_index \
+ _ (ABSOLUTE, 0xfff1) /* Associated symbol is absolute */ \
+ _ (COMMON, 0xfff2) /* Associated symbol is common */ \
+ _ (XINDEX, 0xffff) /* Index is in extra table. */
+
+#define ELF_SYMBOL_SECTION_RESERVED_LO 0xff00
+#define ELF_SYMBOL_SECTION_RESERVED_HI 0xffff
+#define ELF_SYMBOL_SECTION_ARCH_SPECIFIC_LO 0xff00
+#define ELF_SYMBOL_SECTION_ARCH_SPECIFIC_HI 0xff1f
+#define ELF_SYMBOL_SECTION_OS_SPECIFIC_LO 0xff20
+#define ELF_SYMBOL_SECTION_OS_SPECIFIC_HI 0xff3f
+
+/* Section types. */
+#define foreach_elf_section_type \
+ _ (UNUSED, 0) \
+ _ (PROGRAM_DATA, 1) \
+ _ (SYMBOL_TABLE, 2) \
+ _ (STRING_TABLE, 3) \
+ _ (RELOCATION_ADD, 4) \
+ _ (SYMBOL_TABLE_HASH, 5) \
+ _ (DYNAMIC, 6) /* Dynamic linking information */ \
+ _ (NOTE, 7) /* Notes */ \
+ _ (NO_BITS, 8) /* Program space with no data (bss) */ \
+ _ (RELOCATION, 9) /* Relocation entries, no addends */ \
+ _ (DYNAMIC_SYMBOL_TABLE, 11) /* Dynamic linker symbol table */ \
+ _ (INIT_ARRAY, 14) /* Array of constructors */ \
+ _ (FINI_ARRAY, 15) /* Array of destructors */ \
+ _ (PREINIT_ARRAY, 16) /* Array of pre-constructors */ \
+ _ (GROUP, 17) /* Section group */ \
+ _ (SYMTAB_SHNDX, 18) /* Extended section indices */ \
+ _ (OS_SPECIFIC_LO, 0x60000000) /* Start OS-specific */ \
+ _ (GNU_LIBLIST, 0x6ffffff7) /* Prelink library list */ \
+ _ (CHECKSUM, 0x6ffffff8) /* Checksum for DSO content. */ \
+ _ (SUNW_MOVE, 0x6ffffffa) \
+ _ (SUNW_COMDAT, 0x6ffffffb) \
+ _ (SUNW_SYMINFO, 0x6ffffffc) \
+ _ (GNU_VERDEF, 0x6ffffffd) /* Version definition section. */ \
+ _ (GNU_VERNEED, 0x6ffffffe) /* Version needs section. */ \
+ _ (GNU_VERSYM, 0x6fffffff) /* Version symbol table. */ \
+ _ (ARCH_SPECIFIC_LO, 0x70000000) /* Start of processor-specific */ \
+ _ (ARCH_SPECIFIC_HI, 0x7fffffff) /* End of processor-specific */ \
+ _ (APP_SPECIFIC_LO, 0x80000000) /* Start of application-specific */ \
+ _ (APP_SPECIFIC_HI, 0x8fffffff) /* End of application-specific */
+
+/* Section flags. */
+#define foreach_elf_section_flag \
+ _ (WRITE, 0) \
+ _ (ALLOC, 1) \
+ _ (EXEC, 2) \
+ _ (MERGE, 3) \
+ _ (STRING_TABLE, 5) \
+ _ (INFO_LINK, 6) \
+ _ (PRESERVE_LINK_ORDER, 7) \
+ _ (OS_NON_CONFORMING, 8) \
+ _ (GROUP, 9) \
+ _ (TLS, 10) \
+ _ (OS_SPECIFIC_LO, 20) \
+ _ (OS_SPECIFIC_HI, 27) \
+ _ (ARCH_SPECIFIC_LO, 28) \
+ _ (ARCH_SPECIFIC_HI, 31)
+
+typedef enum
+{
+#define _(f,i) ELF_SECTION_##f = i,
+ foreach_elf_section_type
+#undef _
+ ELF_SECTION_OS_SPECIFIC_HI = 0x6fffffff,
+} elf_section_type_t;
+
+typedef enum
+{
+#define _(f,i) ELF_SECTION_FLAG_BIT_##f = i,
+ foreach_elf_section_flag
+#undef _
+} elf_section_flag_bit_t;
+
+typedef enum
+{
+#define _(f,i) ELF_SECTION_FLAG_##f = 1 << ELF_SECTION_FLAG_BIT_##f,
+ foreach_elf_section_flag
+#undef _
+} elf_section_flag_t;
+
+/* Symbol bindings (upper 4 bits of binding_and_type). */
+#define foreach_elf_symbol_binding \
+ _ (LOCAL, 0) /* Local symbol */ \
+ _ (GLOBAL, 1) /* Global symbol */ \
+ _ (WEAK, 2) /* Weak symbol */ \
+ _ (OS_SPECIFIC_LO, 10) /* Start of OS-specific */ \
+ _ (OS_SPECIFIC_HI, 12) /* End of OS-specific */ \
+ _ (ARCH_SPECIFIC_LO, 13) /* Start of processor-specific */ \
+ _ (ARCH_SPECIFIC_HI, 15) /* End of processor-specific */
+
+/* Symbol types (lower 4 bits of binding_and_type). */
+#define foreach_elf_symbol_type \
+ _ (NONE, 0) \
+ _ (DATA, 1) /* Symbol is a data object */ \
+ _ (CODE, 2) /* Symbol is a code object */ \
+ _ (SECTION, 3) /* Symbol associated with a section */ \
+ _ (FILE, 4) /* Symbol's name is file name */ \
+ _ (COMMON, 5) /* Symbol is a common data object */ \
+ _ (TLS, 6) /* Symbol is thread-local data */ \
+ _ (OS_SPECIFIC_LO, 10) /* Start of OS-specific */ \
+ _ (OS_SPECIFIC_HI, 12) /* End of OS-specific */ \
+ _ (ARCH_SPECIFIC_LO, 13) /* Start of processor-specific */ \
+ _ (ARCH_SPECIFIC_HI, 15) /* End of processor-specific */
+
+/* Symbol visibility. */
+#define foreach_elf_symbol_visibility \
+ _ (DEFAULT, 0) /* Default symbol visibility rules */ \
+ _ (INTERNAL, 1) /* Processor specific hidden class */ \
+ _ (HIDDEN, 2) /* Unavailable in other modules */ \
+ _ (PROTECTED, 3) /* Not preemptible, not exported */
+
+/* The syminfo section if available contains additional
+ information about every dynamic symbol. */
+typedef struct
+{
+ u16 bound_to;
+ u16 flags;
+} elf_symbol_info_t;
+
+/* Possible values for bound_to. */
+#define foreach_elf_symbol_info_bound_to \
+ _ (SELF, 0xffff) /* Symbol bound to self */ \
+ _ (PARENT, 0xfffe) /* Symbol bound to parent */ \
+ _ (RESERVED_LO, 0xff00) \
+ _ (RESERVED_HI, 0xffff)
+
+/* Symbol info flags. */
+#define foreach_elf_symbol_info_flags \
+ _ (DIRECT) /* Direct bound symbol */ \
+ _ (PASS_THRU) /* Pass-thru symbol for translator */ \
+ _ (COPY) /* Symbol is a copy-reloc */ \
+ _ (LAZY_LOAD) /* Symbol bound to object to be lazy loaded */
+
+/* Relocation table entry with/without addend. */
+typedef struct
+{
+ u32 address;
+ u32 symbol_and_type; /* high 24 symbol, low 8 type. */
+ i32 addend[0];
+} elf32_relocation_t;
+
+typedef struct
+{
+ u64 address;
+ u64 symbol_and_type; /* high 32 symbol, low 32 type. */
+ i64 addend[0];
+} elf64_relocation_t;
+
+typedef struct
+{
+ u64 address;
+ u64 symbol_and_type;
+ u64 addend;
+} elf_relocation_with_addend_t;
+
+#define elf_relocation_next(r,type) \
+ ((void *) ((r) + 1) \
+ + ((type) == ELF_SECTION_RELOCATION_ADD ? sizeof ((r)->addend[0]) : 0))
+
+/* Segment type. */
+#define foreach_elf_segment_type \
+ _ (UNUSED, 0) \
+ _ (LOAD, 1) /* Loadable program segment */ \
+ _ (DYNAMIC, 2) /* Dynamic linking information */ \
+ _ (INTERP, 3) /* Program interpreter */ \
+ _ (NOTE, 4) /* Auxiliary information */ \
+ _ (SEGMENT_TABLE, 6) /* Entry for header table itself */ \
+ _ (TLS, 7) /* Thread-local storage segment */ \
+ _ (OS_SPECIFIC_LO, 0x60000000) /* Start of OS-specific */ \
+ _ (GNU_EH_FRAME, 0x6474e550) /* GCC .eh_frame_hdr segment */ \
+ _ (GNU_STACK, 0x6474e551) /* Indicates stack executability */ \
+ _ (GNU_RELRO, 0x6474e552) /* Read-only after relocation */ \
+ _ (SUNW_BSS, 0x6ffffffa) /* Sun specific BSS */ \
+ _ (SUNW_STACK, 0x6ffffffb) /* Sun specific stack */ \
+ _ (OS_SPECIFIC_HI, 0x6fffffff) /* End of OS-specific */ \
+ _ (ARCH_SPECIFIC_LO, 0x70000000) /* Start of processor-specific */ \
+ _ (ARCH_SPECIFIC_HI, 0x7fffffff) /* End of processor-specific */
+
+/* Segment flags. */
+#define foreach_elf_segment_flag \
+ _ (EXEC, 0) \
+ _ (WRITE, 1) \
+ _ (READ, 2) \
+ _ (OS_SPECIFIC_LO, 20) \
+ _ (OS_SPECIFIC_HI, 27) \
+ _ (ARCH_SPECIFIC_LO, 28) \
+ _ (ARCH_SPECIFIC_HI, 31)
+
+typedef enum
+{
+#define _(f,i) ELF_SEGMENT_##f = i,
+ foreach_elf_segment_type
+#undef _
+} elf_segment_type_t;
+
+typedef enum
+{
+#define _(f,i) ELF_SEGMENT_FLAG_BIT_##f = i,
+ foreach_elf_segment_flag
+#undef _
+} elf_segment_flag_bit_t;
+
+typedef enum
+{
+#define _(f,i) ELF_SEGMENT_FLAG_##f = 1 << ELF_SEGMENT_FLAG_BIT_##f,
+ foreach_elf_segment_flag
+#undef _
+} elf_segment_flag_t;
+
+#define foreach_elf32_dynamic_entry_header \
+ _ (u32, type) \
+ _ (u32, data)
+
+#define foreach_elf64_dynamic_entry_header \
+ _ (u64, type) \
+ _ (u64, data)
+
+#define _(t,f) t f;
+
+typedef struct
+{
+foreach_elf32_dynamic_entry_header} elf32_dynamic_entry_t;
+
+typedef struct
+{
+foreach_elf64_dynamic_entry_header} elf64_dynamic_entry_t;
+
+#undef _
+
+#define foreach_elf_dynamic_entry_type \
+ _ (END, 0) /* Marks end of dynamic section */ \
+ _ (NEEDED_LIBRARY, 1) /* Name of needed library */ \
+ _ (PLT_RELOCATION_SIZE, 2) /* Size in bytes of PLT relocs */ \
+ _ (PLT_GOT, 3) /* Processor defined value */ \
+ _ (SYMBOL_HASH, 4) /* Address of symbol hash table */ \
+ _ (STRING_TABLE, 5) /* Address of string table */ \
+ _ (SYMBOL_TABLE, 6) /* Address of symbol table */ \
+ _ (RELA_ADDRESS, 7) /* Address of Rela relocs */ \
+ _ (RELA_SIZE, 8) /* Total size of Rela relocs */ \
+ _ (RELA_ENTRY_SIZE, 9) /* Size of one Rela reloc */ \
+ _ (STRING_TABLE_SIZE, 10) /* Size of string table */ \
+ _ (SYMBOL_TABLE_ENTRY_SIZE, 11) /* Size of one symbol table entry */ \
+ _ (INIT_FUNCTION, 12) /* Address of init function */ \
+ _ (FINI_FUNCTION, 13) /* Address of termination function */ \
+ _ (SONAME, 14) /* Name of shared object */ \
+ _ (RPATH, 15) /* Library search path (deprecated) */ \
+ _ (SYMBOLIC, 16) /* Start symbol search here */ \
+ _ (REL, 17) /* Address of Rel relocs */ \
+ _ (RELSZ, 18) /* Total size of Rel relocs */ \
+ _ (RELENT, 19) /* Size of one Rel reloc */ \
+ _ (PLT_RELOCATION_TYPE, 20) /* Type of reloc in PLT */ \
+ _ (DEBUG, 21) /* For debugging; unspecified */ \
+ _ (TEXTREL, 22) /* Reloc might modify .text */ \
+ _ (PLT_RELOCATION_ADDRESS, 23) /* Address of PLT relocs */ \
+ _ (BIND_NOW, 24) /* Process relocations of object */ \
+ _ (INIT_ARRAY, 25) /* Array with addresses of init fct */ \
+ _ (FINI_ARRAY, 26) /* Array with addresses of fini fct */ \
+ _ (INIT_ARRAYSZ, 27) /* Size in bytes of DT_INIT_ARRAY */ \
+ _ (FINI_ARRAYSZ, 28) /* Size in bytes of DT_FINI_ARRAY */ \
+ _ (RUN_PATH, 29) /* Library search path */ \
+ _ (FLAGS, 30) /* Flags for object being loaded */ \
+ _ (ENCODING, 31) /* Start of encoded range */ \
+ _ (PREINIT_ARRAY, 32) /* Array with addresses of fns */ \
+ _ (PREINIT_ARRAY_SIZE, 33) /* Size of PREINIT_ARRAY in bytes. */ \
+ _ (GNU_PRELINKED, 0x6ffffdf5) /* Prelinking timestamp */ \
+ _ (GNU_CONFLICTSZ, 0x6ffffdf6) /* Size of conflict section */ \
+ _ (GNU_LIBLISTSZ, 0x6ffffdf7) /* Size of library list */ \
+ _ (CHECKSUM, 0x6ffffdf8) \
+ _ (PLTPADSZ, 0x6ffffdf9) \
+ _ (MOVEENT, 0x6ffffdfa) \
+ _ (MOVESZ, 0x6ffffdfb) \
+ _ (FEATURE_1, 0x6ffffdfc) /* Feature selection (DTF_*). */ \
+ _ (POSFLAG_1, 0x6ffffdfd) /* Flags for following entries. */ \
+ _ (SYMINSZ, 0x6ffffdfe) /* Size of syminfo table (in bytes) */ \
+ _ (SYMINENT, 0x6ffffdff) /* Entry size of syminfo */ \
+ _ (GNU_HASH, 0x6ffffef5) \
+ _ (GNU_CONFLICT, 0x6ffffef8) /* Start of conflict section */ \
+ _ (GNU_LIBLIST, 0x6ffffef9) /* Library list */ \
+ _ (CONFIG, 0x6ffffefa) /* Configuration information. */ \
+ _ (DEPAUDIT, 0x6ffffefb) /* Dependency auditing. */ \
+ _ (AUDIT, 0x6ffffefc) /* Object auditing. */ \
+ _ (PLTPAD, 0x6ffffefd) /* PLT padding. */ \
+ _ (MOVETAB, 0x6ffffefe) /* Move table. */ \
+ _ (SYMINFO, 0x6ffffeff) /* Syminfo table. */ \
+ _ (VERSYM, 0x6ffffff0) \
+ _ (RELACOUNT, 0x6ffffff9) \
+ _ (RELCOUNT, 0x6ffffffa) \
+ _ (FLAGS_1, 0x6ffffffb) /* State flags, see DF_1_* below. */ \
+ _ (VERSION_DEF, 0x6ffffffc) /* Address of version definition table */ \
+ _ (VERSION_DEF_COUNT, 0x6ffffffd) /* Number of version definitions */ \
+ _ (VERSION_NEED, 0x6ffffffe) /* Address of table with needed versions */ \
+ _ (VERSION_NEED_COUNT, 0x6fffffff) /* Number of needed versions */ \
+ _ (AUXILIARY, 0x7ffffffd) /* Shared object to load before self */ \
+ _ (FILTER, 0x7fffffff) /* Shared object to get values from */
+
+typedef enum
+{
+#define _(f,n) ELF_DYNAMIC_ENTRY_##f = (n),
+ foreach_elf_dynamic_entry_type
+#undef _
+} elf_dynamic_entry_type_t;
+
+/* Values of `d_un.d_val' in the DT_FLAGS entry. */
+#define ELF_DYNAMIC_FLAGS_ORIGIN (1 << 0) /* Object may use DF_ORIGIN */
+#define ELF_DYNAMIC_FLAGS_SYMBOLIC (1 << 1) /* Symbol resolutions starts here */
+#define ELF_DYNAMIC_FLAGS_TEXT_RELOCATIONS (1 << 2) /* Object contains text relocations */
+#define ELF_DYNAMIC_FLAGS_BIND_NOW (1 << 3) /* No lazy binding for this object */
+#define ELF_DYNAMIC_FLAGS_STATIC_TLS (1 << 4) /* Module uses the static TLS model */
+
+/* State flags selectable in the `d_un.d_val' element of the DT_FLAGS_1
+ entry in the dynamic section. */
+#define DF_1_NOW 0x00000001 /* Set RTLD_NOW for this object. */
+#define DF_1_GLOBAL 0x00000002 /* Set RTLD_GLOBAL for this object. */
+#define DF_1_GROUP 0x00000004 /* Set RTLD_GROUP for this object. */
+#define DF_1_NODELETE 0x00000008 /* Set RTLD_NODELETE for this object. */
+#define DF_1_LOADFLTR 0x00000010 /* Trigger filtee loading at runtime. */
+#define DF_1_INITFIRST 0x00000020 /* Set RTLD_INITFIRST for this object */
+#define DF_1_NOOPEN 0x00000040 /* Set RTLD_NOOPEN for this object. */
+#define DF_1_ORIGIN 0x00000080 /* $ORIGIN must be handled. */
+#define DF_1_DIRECT 0x00000100 /* Direct binding enabled. */
+#define DF_1_TRANS 0x00000200
+#define DF_1_INTERPOSE 0x00000400 /* Object is used to interpose. */
+#define DF_1_NODEFLIB 0x00000800 /* Ignore default lib search path. */
+#define DF_1_NODUMP 0x00001000 /* Object can't be dldump'ed. */
+#define DF_1_CONFALT 0x00002000 /* Configuration alternative created. */
+#define DF_1_ENDFILTEE 0x00004000 /* Filtee terminates filters search. */
+#define DF_1_DISPRELDNE 0x00008000 /* Disp reloc applied at build time. */
+#define DF_1_DISPRELPND 0x00010000 /* Disp reloc applied at run-time. */
+
+/* Flags for the feature selection in DT_FEATURE_1. */
+#define DTF_1_PARINIT 0x00000001
+#define DTF_1_CONFEXP 0x00000002
+
+/* Flags in the DT_POSFLAG_1 entry effecting only the next DT_* entry. */
+#define DF_P1_LAZYLOAD 0x00000001 /* Lazyload following object. */
+#define DF_P1_GROUPPERM 0x00000002 /* Symbols from next object are not
+ generally available. */
+
+/* Version definition sections. */
+typedef struct
+{
+ u16 version;
+ u16 flags;
+ u16 index;
+ u16 aux_count;
+ u32 name_hash;
+ u32 aux_byte_offset;
+ u32 byte_offset_next_version_definition;
+} elf_dynamic_version_definition_t;
+
+typedef struct
+{
+ u32 name;
+ u32 next_offset; /* byte offset of ver def aux next entry */
+} elf_dynamic_version_definition_aux_t;
+
+/* Version definition flags. */
+#define ELF_DYNAMIC_VERSION_FILE (1 << 0) /* Version definition of file itself */
+#define ELF_DYNAMIC_VERSION_WEAK (1 << 1) /* Weak version identifier */
+
+/* Version symbol index. */
+#define ELF_DYNAMIC_VERSYM_LOCAL 0 /* Symbol is local. */
+#define ELF_DYNAMIC_VERSYM_GLOBAL 1 /* Symbol is global. */
+#define ELF_DYNAMIC_VERSYM_RESERVED_LO 0xff00 /* Beginning of reserved entries. */
+#define ELF_DYNAMIC_VERSYM_ELIMINATE 0xff01 /* Symbol is to be eliminated. */
+
+/* Version dependency section. */
+#define foreach_elf_dynamic_version_need_field \
+ _ (u16, version) \
+ _ (u16, aux_count) \
+ _ (u32, file_name_offset) \
+ _ (u32, first_aux_offset) \
+ _ (u32, next_offset)
+
+#define foreach_elf_dynamic_version_need_aux_field \
+ _ (u32, hash) \
+ _ (u16, flags) \
+ _ (u16, versym_index) \
+ _ (u32, name) \
+ _ (u32, next_offset)
+
+typedef struct
+{
+#define _(t,f) t f;
+ foreach_elf_dynamic_version_need_field
+#undef _
+} elf_dynamic_version_need_t;
+
+typedef struct
+{
+#define _(t,f) t f;
+ foreach_elf_dynamic_version_need_aux_field
+#undef _
+} elf_dynamic_version_need_aux_t;
+
+typedef union
+{
+ elf_dynamic_version_need_t need;
+ elf_dynamic_version_need_aux_t aux;
+} elf_dynamic_version_need_union_t;
+
+/* Note section contents. Each entry in the note section begins with
+ a header of a fixed form. */
+
+typedef struct
+{
+ u32 name_size;
+ u32 descriptor_size;
+ u32 type;
+} elf_note_t;
+
+/* Known names of notes. */
+
+/* Solaris entries in the note section have this name. */
+#define ELF_NOTE_SOLARIS "SUNW Solaris"
+
+/* Note entries for GNU systems have this name. */
+#define ELF_NOTE_GNU "GNU"
+
+
+/* Defined types of notes for Solaris. */
+
+/* Value of descriptor (one word) is desired pagesize for the binary. */
+#define ELF_NOTE_PAGESIZE_HINT 1
+
+
+/* Defined note types for GNU systems. */
+
+/* ABI information. The descriptor consists of words:
+ word 0: OS descriptor
+ word 1: major version of the ABI
+ word 2: minor version of the ABI
+ word 3: subminor version of the ABI
+*/
+#ifndef ELF_NOTE_ABI
+#define ELF_NOTE_ABI 1
+#endif
+
+/* Known OSes. These value can appear in word 0 of an ELF_NOTE_ABI
+ note section entry. */
+#define ELF_NOTE_OS_LINUX 0
+#define ELF_NOTE_OS_GNU 1
+#define ELF_NOTE_OS_SOLARIS2 2
+#define ELF_NOTE_OS_FREEBSD 3
+
+/* AMD x86-64 relocations. */
+#define foreach_elf_x86_64_relocation_type \
+ _ (NONE, 0) /* No reloc */ \
+ _ (DIRECT_64, 1) /* Direct 64 bit */ \
+ _ (PC_REL_I32, 2) /* PC relative 32 bit signed */ \
+ _ (GOT_REL_32, 3) /* 32 bit GOT entry */ \
+ _ (PLT_REL_32, 4) /* 32 bit PLT address */ \
+ _ (COPY, 5) /* Copy symbol at runtime */ \
+ _ (CREATE_GOT, 6) /* Create GOT entry */ \
+ _ (CREATE_PLT, 7) /* Create PLT entry */ \
+ _ (RELATIVE, 8) /* Adjust by program base */ \
+ _ (PC_REL_I32_GOT, 9) /* 32 bit PC relative offset to GOT */ \
+ _ (DIRECT_U32, 10) /* Direct 32 bit zero extended */ \
+ _ (DIRECT_I32, 11) /* Direct 32 bit sign extended */ \
+ _ (DIRECT_U16, 12) /* Direct 16 bit zero extended */ \
+ _ (PC_REL_I16, 13) /* 16 bit sign extended pc relative */ \
+ _ (DIRECT_I8, 14) /* Direct 8 bit sign extended */ \
+ _ (PC_REL_I8, 15) /* 8 bit sign extended pc relative */ \
+ _ (DTPMOD64, 16) /* ID of module containing symbol */ \
+ _ (DTPOFF64, 17) /* Offset in module's TLS block */ \
+ _ (TPOFF64, 18) /* Offset in initial TLS block */ \
+ _ (TLSGD, 19) /* 32 bit signed PC relative offset to two GOT entries for GD symbol */ \
+ _ (TLSLD, 20) /* 32 bit signed PC relative offset to two GOT entries for LD symbol */ \
+ _ (DTPOFF32, 21) /* Offset in TLS block */ \
+ _ (GOTTPOFF, 22) /* 32 bit signed PC relative offset to GOT entry for IE symbol */ \
+ _ (TPOFF32, 23) /* Offset in initial TLS, block) */
+
+typedef struct
+{
+ elf64_symbol_t *symbols;
+
+ u32 section_index;
+
+ u8 *string_table;
+
+ uword *symbol_by_name;
+} elf_symbol_table_t;
+
+always_inline void
+elf_symbol_table_free (elf_symbol_table_t * s)
+{
+ vec_free (s->symbols);
+ hash_free (s->symbol_by_name);
+}
+
+always_inline u8 *
+elf_symbol_name (elf_symbol_table_t * t, elf64_symbol_t * sym)
+{
+ return vec_elt_at_index (t->string_table, sym->name);
+}
+
+typedef struct
+{
+ elf_relocation_with_addend_t *relocations;
+
+ u32 section_index;
+} elf_relocation_table_t;
+
+always_inline void
+elf_relocation_table_free (elf_relocation_table_t * r)
+{
+ vec_free (r->relocations);
+}
+
+typedef struct
+{
+ elf64_section_header_t header;
+
+ u32 index;
+
+ /* Index of segments containing this section. */
+ uword *segment_index_bitmap;
+
+ /* Aligned size (included padding not included in
+ header.file_size). */
+ u64 align_size;
+
+ i64 exec_address_change;
+
+ u8 *contents;
+} elf_section_t;
+
+typedef struct
+{
+ elf64_segment_header_t header;
+
+ /* Sections contained in this segment. */
+ uword *section_index_bitmap;
+
+ u32 index;
+
+ u8 *contents;
+} elf_segment_t;
+
+typedef struct
+{
+ u8 need_byte_swap;
+
+ u8 parsed_symbols;
+
+ char *file_name;
+
+ elf_first_header_t first_header;
+
+ elf64_file_header_t file_header;
+
+ elf_segment_t *segments;
+
+ elf_section_t *sections;
+
+ uword *section_by_name;
+ uword *section_by_start_address;
+
+ elf_symbol_table_t *symbol_tables;
+ elf_relocation_table_t *relocation_tables;
+
+ char *interpreter;
+
+ elf64_dynamic_entry_t *dynamic_entries;
+ u8 *dynamic_string_table;
+ u32 dynamic_string_table_section_index;
+ u32 dynamic_symbol_table_section_index;
+ u32 dynamic_symbol_table_index;
+ u32 dynamic_section_index;
+ u16 *versym;
+ u32 versym_section_index;
+ elf_dynamic_version_need_union_t *verneed;
+ u32 verneed_section_index;
+} elf_main_t;
+
+always_inline void
+elf_main_init (elf_main_t * em)
+{
+ memset (em, 0, sizeof (em[0]));
+}
+
+always_inline void
+elf_main_free (elf_main_t * em)
+{
+ uword i;
+
+ for (i = 0; i < vec_len (em->segments); i++)
+ vec_free (em->segments[i].contents);
+ vec_free (em->segments);
+
+ for (i = 0; i < vec_len (em->sections); i++)
+ vec_free (em->sections[i].contents);
+ vec_free (em->sections);
+
+ hash_free (em->section_by_name);
+ for (i = 0; i < vec_len (em->symbol_tables); i++)
+ elf_symbol_table_free (em->symbol_tables + i);
+ for (i = 0; i < vec_len (em->relocation_tables); i++)
+ elf_relocation_table_free (em->relocation_tables + i);
+
+ vec_free (em->dynamic_entries);
+ vec_free (em->interpreter);
+}
+
+always_inline void
+elf_get_segment_contents (elf_main_t * em, void *data, uword segment_index)
+{
+ elf_segment_t *g = vec_elt_at_index (em->segments, segment_index);
+ if (!g->contents)
+ vec_add (g->contents, data + g->header.file_offset,
+ g->header.memory_size);
+}
+
+always_inline void *
+elf_get_section_contents (elf_main_t * em,
+ uword section_index, uword elt_size)
+{
+ elf_section_t *s;
+ void *result;
+
+ s = vec_elt_at_index (em->sections, section_index);
+
+ result = 0;
+ if (vec_len (s->contents) > 0)
+ {
+ /* Make vector copy of contents with given element size. */
+ result = _vec_resize (result,
+ vec_len (s->contents) / elt_size,
+ vec_len (s->contents),
+ /* header_bytes */ 0,
+ /* align */ 0);
+ clib_memcpy (result, s->contents, vec_len (s->contents));
+ }
+
+ return result;
+}
+
+always_inline void
+elf_set_section_contents (elf_main_t * em,
+ uword section_index,
+ void *new_contents, uword n_content_bytes)
+{
+ elf_section_t *s;
+
+ s = vec_elt_at_index (em->sections, section_index);
+ vec_free (s->contents);
+ vec_add (s->contents, new_contents, n_content_bytes);
+}
+
+always_inline u8 *
+elf_section_name (elf_main_t * em, elf_section_t * s)
+{
+ elf_section_t *es = vec_elt_at_index (em->sections,
+ em->
+ file_header.section_header_string_table_index);
+ return vec_elt_at_index (es->contents, s->header.name);
+}
+
+always_inline u8
+elf_swap_u8 (elf_main_t * em, u8 x)
+{
+ return x;
+}
+
+always_inline u16
+elf_swap_u16 (elf_main_t * em, u16 x)
+{
+ return em->need_byte_swap ? clib_byte_swap_u16 (x) : x;
+}
+
+always_inline u32
+elf_swap_u32 (elf_main_t * em, u32 x)
+{
+ return em->need_byte_swap ? clib_byte_swap_u32 (x) : x;
+}
+
+always_inline u64
+elf_swap_u64 (elf_main_t * em, u64 x)
+{
+ return em->need_byte_swap ? clib_byte_swap_u64 (x) : x;
+}
+
+#define FORMAT_ELF_MAIN_SYMBOLS (1 << 0)
+#define FORMAT_ELF_MAIN_RELOCATIONS (1 << 1)
+#define FORMAT_ELF_MAIN_DYNAMIC (1 << 2)
+
+format_function_t format_elf_main;
+format_function_t format_elf_symbol;
+
+clib_error_t *elf_read_file (elf_main_t * em, char *file_name);
+clib_error_t *elf_write_file (elf_main_t * em, char *file_name);
+clib_error_t *elf_delete_named_section (elf_main_t * em, char *section_name);
+clib_error_t *elf_parse (elf_main_t * em, void *data, uword data_bytes);
+void elf_parse_symbols (elf_main_t * em);
+
+clib_error_t *elf_get_section_by_name (elf_main_t * em, char *section_name,
+ elf_section_t ** result);
+clib_error_t *elf_get_section_by_start_address (elf_main_t * em,
+ uword start_address,
+ elf_section_t ** result);
+
+void
+elf_create_section_with_contents (elf_main_t * em,
+ char *section_name,
+ elf64_section_header_t * header,
+ void *contents, uword n_content_bytes);
+uword elf_delete_segment_with_type (elf_main_t * em,
+ elf_segment_type_t segment_type);
+void elf_set_dynamic_entries (elf_main_t * em);
+
+#endif /* included_clib_elf_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/elf_clib.c b/src/vppinfra/elf_clib.c
new file mode 100644
index 00000000..7bb72ee3
--- /dev/null
+++ b/src/vppinfra/elf_clib.c
@@ -0,0 +1,377 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vppinfra/elf_clib.h>
+
+#include <stdlib.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+
+typedef struct
+{
+ char **path;
+} path_search_t;
+
+always_inline void
+path_search_free (path_search_t * p)
+{
+ uword i;
+ for (i = 0; i < vec_len (p->path); i++)
+ vec_free (p->path[i]);
+ vec_free (p->path);
+}
+
+static char **
+split_string (char *string, u8 delimiter)
+{
+ char **result = 0;
+ char *p, *start, *s;
+
+ p = string;
+ while (1)
+ {
+ start = p;
+ while (*p != 0 && *p != delimiter)
+ p++;
+ s = 0;
+ vec_add (s, start, p - start);
+ vec_add1 (s, 0);
+ vec_add1 (result, s);
+ if (*p == 0)
+ break;
+ p++;
+ }
+
+ return result;
+}
+
+static int
+file_exists_and_is_executable (char *dir, char *file)
+{
+ char *path = (char *) format (0, "%s/%s%c", dir, file, 0);
+ struct stat s;
+ uword yes;
+
+ yes = (stat (path, &s) >= 0
+ && S_ISREG (s.st_mode)
+ && 0 != (s.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH)));
+
+ vec_free (path);
+
+ return yes;
+}
+
+static char *
+path_search (char *file)
+{
+ path_search_t ps;
+ uword i;
+ char *result;
+
+ /* Relative or absolute path. */
+ if (file[0] == '.' || file[0] == '/')
+ return file;
+
+ if (getenv ("PATH") == 0)
+ return file;
+
+ ps.path = split_string (getenv ("PATH"), ':');
+
+ for (i = 0; i < vec_len (ps.path); i++)
+ if (file_exists_and_is_executable (ps.path[i], file))
+ break;
+
+ result = 0;
+ if (i < vec_len (ps.path))
+ result = (char *) format (0, "%s/%s%c", ps.path[i], file);
+
+ path_search_free (&ps);
+
+ return result;
+}
+
+static clib_error_t *
+clib_elf_parse_file (clib_elf_main_t * cem,
+ char *file_name, void *link_address)
+{
+ elf_main_t *em;
+ elf_section_t *s;
+ int fd;
+ struct stat fd_stat;
+ uword mmap_length = 0;
+ void *data = 0;
+ clib_error_t *error = 0;
+
+ vec_add2 (cem->elf_mains, em, 1);
+
+ fd = open (file_name, 0);
+ if (fd < 0)
+ {
+ error = clib_error_return_unix (0, "open `%s'", file_name);
+ goto done;
+ }
+
+ if (fstat (fd, &fd_stat) < 0)
+ {
+ error = clib_error_return_unix (0, "fstat `%s'", file_name);
+ goto done;
+ }
+ mmap_length = fd_stat.st_size;
+
+ data = mmap (0, mmap_length, PROT_READ, MAP_SHARED, fd, /* offset */ 0);
+ if (~pointer_to_uword (data) == 0)
+ {
+ error = clib_error_return_unix (0, "mmap `%s'", file_name);
+ goto done;
+ }
+
+ error = elf_parse (em, data, mmap_length);
+ if (error)
+ goto done;
+
+ /* Look for CLIB special sections. */
+ {
+ char *section_name_start = CLIB_ELF_SECTION_ADD_PREFIX ();
+ uword section_name_start_len = strlen (section_name_start);
+
+ vec_foreach (s, em->sections)
+ {
+ u8 *name = elf_section_name (em, s);
+ uword *p;
+ clib_elf_section_t *vs;
+ clib_elf_section_bounds_t *b;
+
+ /* Section name must begin with CLIB_ELF_SECTION key. */
+ if (memcmp (name, section_name_start, section_name_start_len))
+ continue;
+
+ name += section_name_start_len;
+ p = hash_get_mem (cem->section_by_name, name);
+ if (p)
+ vs = vec_elt_at_index (cem->sections, p[0]);
+ else
+ {
+ name = format (0, "%s%c", name, 0);
+ if (!cem->section_by_name)
+ cem->section_by_name = hash_create_string (0, sizeof (uword));
+ hash_set_mem (cem->section_by_name, name, vec_len (cem->sections));
+ vec_add2 (cem->sections, vs, 1);
+ vs->name = name;
+ }
+
+ vec_add2 (vs->bounds, b, 1);
+ b->lo = link_address + s->header.exec_address;
+ b->hi = b->lo + s->header.file_size;
+ }
+ }
+
+ /* Parse symbols for this file. */
+ {
+ elf_symbol_table_t *t;
+ elf64_symbol_t *s;
+
+ elf_parse_symbols (em);
+ vec_foreach (t, em->symbol_tables)
+ {
+ vec_foreach (s, t->symbols)
+ {
+ s->value += pointer_to_uword (link_address);
+ }
+ }
+ }
+
+ /* No need to keep section contents around. */
+ {
+ elf_section_t *s;
+ vec_foreach (s, em->sections)
+ {
+ if (s->header.type != ELF_SECTION_STRING_TABLE)
+ vec_free (s->contents);
+ }
+ }
+
+done:
+ if (error)
+ elf_main_free (em);
+ if (fd >= 0)
+ close (fd);
+ if (data)
+ munmap (data, mmap_length);
+ return error;
+}
+
+#define __USE_GNU
+#include <link.h>
+
+static int
+add_section (struct dl_phdr_info *info, size_t size, void *opaque)
+{
+ clib_elf_main_t *cem = opaque;
+ clib_error_t *error;
+ char *name = (char *) info->dlpi_name;
+ void *addr = (void *) info->dlpi_addr;
+ uword is_main;
+
+ is_main = strlen (name) == 0;
+ if (is_main)
+ {
+ static int done;
+
+ /* Only do main program once. */
+ if (done++)
+ return 0;
+
+ name = path_search (cem->exec_path);
+ if (!name)
+ {
+ clib_error ("failed to find %s on PATH", cem->exec_path);
+ return 0;
+ }
+ addr = 0;
+ }
+
+ error = clib_elf_parse_file (cem, name, addr);
+ if (error)
+ clib_error_report (error);
+
+ if (is_main && name != cem->exec_path)
+ vec_free (name);
+
+ return 0;
+}
+
+static clib_elf_main_t clib_elf_main;
+
+void
+clib_elf_main_init (char *exec_path)
+{
+ clib_elf_main_t *cem = &clib_elf_main;
+
+ cem->exec_path = exec_path;
+
+ dl_iterate_phdr (add_section, cem);
+}
+
+clib_elf_section_bounds_t *
+clib_elf_get_section_bounds (char *name)
+{
+ clib_elf_main_t *em = &clib_elf_main;
+ uword *p = hash_get (em->section_by_name, name);
+ return p ? vec_elt_at_index (em->sections, p[0])->bounds : 0;
+}
+
+static uword
+symbol_by_address_or_name (char *by_name,
+ uword by_address, clib_elf_symbol_t * s)
+{
+ clib_elf_main_t *cem = &clib_elf_main;
+ elf_main_t *em;
+
+ vec_foreach (em, cem->elf_mains)
+ {
+ elf_symbol_table_t *t;
+ s->elf_main_index = em - cem->elf_mains;
+ vec_foreach (t, em->symbol_tables)
+ {
+ s->symbol_table_index = t - em->symbol_tables;
+ if (by_name)
+ {
+ uword *p = hash_get (t->symbol_by_name, by_name);
+ if (p)
+ {
+ s->symbol = vec_elt (t->symbols, p[0]);
+ return 1;
+ }
+ }
+ else
+ {
+ elf64_symbol_t *x;
+ /* FIXME linear search. */
+ vec_foreach (x, t->symbols)
+ {
+ if (by_address >= x->value && by_address < x->value + x->size)
+ {
+ s->symbol = x[0];
+ return 1;
+ }
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+uword
+clib_elf_symbol_by_name (char *by_name, clib_elf_symbol_t * s)
+{
+ return symbol_by_address_or_name (by_name, /* by_address */ 0, s);
+}
+
+uword
+clib_elf_symbol_by_address (uword by_address, clib_elf_symbol_t * s)
+{
+ return symbol_by_address_or_name ( /* by_name */ 0, by_address, s);
+}
+
+u8 *
+format_clib_elf_symbol (u8 * s, va_list * args)
+{
+ clib_elf_main_t *cem = &clib_elf_main;
+ clib_elf_symbol_t *sym = va_arg (*args, clib_elf_symbol_t *);
+ elf_main_t *em;
+ elf_symbol_table_t *t;
+
+ if (!sym)
+ /* Just print table headings. */
+ return format (s, "%U", format_elf_symbol, 0, 0, 0);
+
+ else
+ {
+ em = vec_elt_at_index (cem->elf_mains, sym->elf_main_index);
+ t = vec_elt_at_index (em->symbol_tables, sym->symbol_table_index);
+ return format (s, "%U", format_elf_symbol, em, t, &sym->symbol);
+ }
+}
+
+u8 *
+format_clib_elf_symbol_with_address (u8 * s, va_list * args)
+{
+ uword address = va_arg (*args, uword);
+ clib_elf_main_t *cem = &clib_elf_main;
+ clib_elf_symbol_t sym;
+ elf_main_t *em;
+ elf_symbol_table_t *t;
+
+ if (clib_elf_symbol_by_address (address, &sym))
+ {
+ em = vec_elt_at_index (cem->elf_mains, sym.elf_main_index);
+ t = vec_elt_at_index (em->symbol_tables, sym.symbol_table_index);
+ s = format (s, "%s + 0x%wx",
+ elf_symbol_name (t, &sym.symbol),
+ address - sym.symbol.value);
+ }
+ else
+ s = format (s, "0x%wx", address);
+
+ return s;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/elf_clib.h b/src/vppinfra/elf_clib.h
new file mode 100644
index 00000000..25b928c2
--- /dev/null
+++ b/src/vppinfra/elf_clib.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2012 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_clib_elf_self_h
+#define included_clib_elf_self_h
+
+#include <vppinfra/elf.h>
+#include <vppinfra/hash.h>
+
+#define CLIB_ELF_SECTION_DATA_ALIGN 32
+
+#define CLIB_ELF_SECTION_ADD_PREFIX(n) "clib_elf_section_" n
+
+/* Attribute used is so that static registrations work even if
+ variable is not referenced. */
+#define CLIB_ELF_SECTION(SECTION) \
+ __attribute__ ((used, \
+ aligned (CLIB_ELF_SECTION_DATA_ALIGN), \
+ section (CLIB_ELF_SECTION_ADD_PREFIX (SECTION))))
+
+/* Given pointer to previous data A get next pointer. EXTRA gives extra
+ space beyond A + 1 used in object. */
+#define clib_elf_section_data_next(a,extra) \
+ uword_to_pointer (round_pow2 (pointer_to_uword (a + 1) + (extra), \
+ CLIB_ELF_SECTION_DATA_ALIGN), \
+ void *)
+
+typedef struct
+{
+ void *lo, *hi;
+} clib_elf_section_bounds_t;
+
+typedef struct
+{
+ /* Vector of bounds for this section. Multiple shared objects may have instances
+ of the same sections. */
+ clib_elf_section_bounds_t *bounds;
+
+ /* Name of ELF section (e.g. .text). */
+ u8 *name;
+} clib_elf_section_t;
+
+typedef struct
+{
+ /* Vector of sections. */
+ clib_elf_section_t *sections;
+
+ /* Hash map of name to section index. */
+ uword *section_by_name;
+
+ /* Unix path that we were exec()ed with. */
+ char *exec_path;
+
+ elf_main_t *elf_mains;
+} clib_elf_main_t;
+
+always_inline void
+clib_elf_main_free (clib_elf_main_t * m)
+{
+ clib_elf_section_t *s;
+ vec_foreach (s, m->sections)
+ {
+ vec_free (s->bounds);
+ vec_free (s->name);
+ }
+ vec_free (m->sections);
+ hash_free (m->section_by_name);
+
+ {
+ elf_main_t *em;
+ vec_foreach (em, m->elf_mains)
+ {
+ elf_main_free (em);
+ }
+ vec_free (m->elf_mains);
+ }
+}
+
+/* Call with exec_path equal to argv[0] from C main. */
+void clib_elf_main_init (char *exec_path);
+
+clib_elf_section_bounds_t *clib_elf_get_section_bounds (char *name);
+
+typedef struct
+{
+ /* The symbol. */
+ elf64_symbol_t symbol;
+
+ /* elf_main_t where symbol came from. */
+ u32 elf_main_index;
+
+ /* Symbol table in elf_main_t where this symbol came from. */
+ u32 symbol_table_index;
+} clib_elf_symbol_t;
+
+/* Returns 1 if found; otherwise zero. */
+uword clib_elf_symbol_by_name (char *name, clib_elf_symbol_t * result);
+uword clib_elf_symbol_by_address (uword address, clib_elf_symbol_t * result);
+
+format_function_t format_clib_elf_symbol, format_clib_elf_symbol_with_address;
+
+#endif /* included_clib_elf_self_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/elog.c b/src/vppinfra/elog.c
new file mode 100644
index 00000000..182ca127
--- /dev/null
+++ b/src/vppinfra/elog.c
@@ -0,0 +1,1113 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2005,2009 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/elog.h>
+#include <vppinfra/cache.h>
+#include <vppinfra/error.h>
+#include <vppinfra/format.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/math.h>
+
+static inline void
+elog_lock (elog_main_t * em)
+{
+ if (PREDICT_FALSE (em->lock != 0))
+ while (__sync_lock_test_and_set (em->lock, 1))
+ ;
+}
+
+static inline void
+elog_unlock (elog_main_t * em)
+{
+ if (PREDICT_FALSE (em->lock != 0))
+ {
+ CLIB_MEMORY_BARRIER ();
+ *em->lock = 0;
+ }
+}
+
+/* Non-inline version. */
+void *
+elog_event_data (elog_main_t * em,
+ elog_event_type_t * type, elog_track_t * track, u64 cpu_time)
+{
+ return elog_event_data_inline (em, type, track, cpu_time);
+}
+
+static void
+new_event_type (elog_main_t * em, uword i)
+{
+ elog_event_type_t *t = vec_elt_at_index (em->event_types, i);
+
+ if (!em->event_type_by_format)
+ em->event_type_by_format =
+ hash_create_vec ( /* size */ 0, sizeof (u8), sizeof (uword));
+
+ t->type_index_plus_one = i + 1;
+ hash_set_mem (em->event_type_by_format, t->format, i);
+}
+
+static uword
+find_or_create_type (elog_main_t * em, elog_event_type_t * t)
+{
+ uword *p = hash_get_mem (em->event_type_by_format, t->format);
+ uword i;
+
+ if (p)
+ i = p[0];
+ else
+ {
+ i = vec_len (em->event_types);
+ vec_add1 (em->event_types, t[0]);
+ new_event_type (em, i);
+ }
+
+ return i;
+}
+
+/* External function to register types. */
+word
+elog_event_type_register (elog_main_t * em, elog_event_type_t * t)
+{
+ elog_event_type_t *static_type = t;
+ word l;
+
+ elog_lock (em);
+
+ /* Multiple simultaneous registration attempts, */
+ if (t->type_index_plus_one > 0)
+ {
+ elog_unlock (em);
+ return t->type_index_plus_one - 1;
+ }
+
+ l = vec_len (em->event_types);
+
+ t->type_index_plus_one = 1 + l;
+
+ ASSERT (t->format);
+
+ /* If format args are not specified try to be smart about providing defaults
+ so most of the time user does not have to specify them. */
+ if (!t->format_args)
+ {
+ uword i, l;
+ char *this_arg;
+
+ l = strlen (t->format);
+ for (i = 0; i < l; i++)
+ {
+ if (t->format[i] != '%')
+ continue;
+ if (i + 1 >= l)
+ continue;
+ if (t->format[i + 1] == '%') /* %% */
+ continue;
+
+ switch (t->format[i + 1])
+ {
+ default:
+ case 'd':
+ case 'x':
+ case 'u':
+ this_arg = "i4"; /* size of u32 */
+ break;
+ case 'f':
+ this_arg = "f8"; /* defaults to f64 */
+ break;
+ case 's':
+ this_arg = "s0"; /* defaults to null terminated string. */
+ break;
+ }
+
+ t->format_args =
+ (char *) format ((u8 *) t->format_args, "%s", this_arg);
+ }
+
+ /* Null terminate. */
+ vec_add1 (t->format_args, 0);
+ }
+
+ vec_add1 (em->event_types, t[0]);
+
+ t = em->event_types + l;
+
+ /* Make copies of strings for hashing etc. */
+ if (t->function)
+ t->format = (char *) format (0, "%s %s%c", t->function, t->format, 0);
+ else
+ t->format = (char *) format (0, "%s%c", t->format, 0);
+
+ t->format_args = (char *) format (0, "%s%c", t->format_args, 0);
+
+ /* Construct string table. */
+ {
+ uword i;
+ t->n_enum_strings = static_type->n_enum_strings;
+ for (i = 0; i < t->n_enum_strings; i++)
+ {
+ if (!static_type->enum_strings[i])
+ static_type->enum_strings[i] = "MISSING";
+ vec_add1 (t->enum_strings_vector,
+ (char *) format (0, "%s%c", static_type->enum_strings[i],
+ 0));
+ }
+ }
+
+ new_event_type (em, l);
+ elog_unlock (em);
+
+ return l;
+}
+
+word
+elog_track_register (elog_main_t * em, elog_track_t * t)
+{
+ word l;
+
+ elog_lock (em);
+
+ l = vec_len (em->tracks);
+
+ t->track_index_plus_one = 1 + l;
+
+ ASSERT (t->name);
+
+ vec_add1 (em->tracks, t[0]);
+
+ t = em->tracks + l;
+
+ t->name = (char *) format (0, "%s%c", t->name, 0);
+
+ elog_unlock (em);
+
+ return l;
+}
+
+static uword
+parse_2digit_decimal (char *p, uword * number)
+{
+ uword i = 0;
+ u8 digits[2];
+
+ digits[0] = digits[1] = 0;
+ while (p[i] >= '0' && p[i] <= '9')
+ {
+ if (i >= 2)
+ break;
+ digits[i] = p[i] - '0';
+ i++;
+ }
+
+ if (i >= 1 && i <= 2)
+ {
+ if (i == 1)
+ *number = digits[0];
+ else
+ *number = 10 * digits[0] + digits[1];
+ return i;
+ }
+ else
+ return 0;
+}
+
+static u8 *
+fixed_format (u8 * s, char *fmt, char *result, uword * result_len)
+{
+ char *f = fmt;
+ char *percent;
+ uword l = 0;
+
+ while (1)
+ {
+ if (f[0] == 0)
+ break;
+ if (f[0] == '%' && f[1] != '%')
+ break;
+ f++;
+ }
+ if (f > fmt)
+ vec_add (s, fmt, f - fmt);
+
+ if (f[0] != '%')
+ goto done;
+
+ /* Skip percent. */
+ percent = f++;
+
+ /* Skip possible +-= justification. */
+ f += f[0] == '+' || f[0] == '-' || f[0] == '=';
+
+ /* Skip possible X.Y width. */
+ while ((f[0] >= '0' && f[0] <= '9') || f[0] == '.')
+ f++;
+
+ /* Skip wlL as in e.g. %Ld. */
+ f += f[0] == 'w' || f[0] == 'l' || f[0] == 'L';
+
+ /* Finally skip format letter. */
+ f += f[0] != 0;
+
+ ASSERT (*result_len > f - percent);
+ l = clib_min (f - percent, *result_len - 1);
+ clib_memcpy (result, percent, l);
+ result[l] = 0;
+
+done:
+ *result_len = f - fmt;
+ return s;
+}
+
+u8 *
+format_elog_event (u8 * s, va_list * va)
+{
+ elog_main_t *em = va_arg (*va, elog_main_t *);
+ elog_event_t *e = va_arg (*va, elog_event_t *);
+ elog_event_type_t *t;
+ char *a, *f;
+ void *d = (u8 *) e->data;
+ char arg_format[64];
+
+ t = vec_elt_at_index (em->event_types, e->type);
+
+ f = t->format;
+ a = t->format_args;
+ while (1)
+ {
+ uword n_bytes = 0, n_digits, f_bytes = 0;
+
+ f_bytes = sizeof (arg_format);
+ s = fixed_format (s, f, arg_format, &f_bytes);
+ f += f_bytes;
+
+ if (a == 0 || a[0] == 0)
+ {
+ /* Format must also be at end. */
+ ASSERT (f[0] == 0);
+ break;
+ }
+
+ /* Don't go past end of event data. */
+ ASSERT (d < (void *) (e->data + sizeof (e->data)));
+
+ n_digits = parse_2digit_decimal (a + 1, &n_bytes);
+ switch (a[0])
+ {
+ case 'i':
+ case 't':
+ case 'T':
+ {
+ u32 i = 0;
+ u64 l = 0;
+
+ if (n_bytes == 1)
+ i = ((u8 *) d)[0];
+ else if (n_bytes == 2)
+ i = clib_mem_unaligned (d, u16);
+ else if (n_bytes == 4)
+ i = clib_mem_unaligned (d, u32);
+ else if (n_bytes == 8)
+ l = clib_mem_unaligned (d, u64);
+ else
+ ASSERT (0);
+ if (a[0] == 't')
+ {
+ char *e =
+ vec_elt (t->enum_strings_vector, n_bytes == 8 ? l : i);
+ s = format (s, arg_format, e);
+ }
+ else if (a[0] == 'T')
+ {
+ char *e =
+ vec_elt_at_index (em->string_table, n_bytes == 8 ? l : i);
+ s = format (s, arg_format, e);
+ }
+ else if (n_bytes == 8)
+ s = format (s, arg_format, l);
+ else
+ s = format (s, arg_format, i);
+ }
+ break;
+
+ case 'f':
+ {
+ f64 x = 0;
+ if (n_bytes == 4)
+ x = clib_mem_unaligned (d, f32);
+ else if (n_bytes == 8)
+ x = clib_mem_unaligned (d, f64);
+ else
+ ASSERT (0);
+ s = format (s, arg_format, x);
+ }
+ break;
+
+ case 's':
+ s = format (s, arg_format, d);
+ if (n_bytes == 0)
+ n_bytes = strlen (d) + 1;
+ break;
+
+ default:
+ ASSERT (0);
+ break;
+ }
+
+ ASSERT (n_digits > 0 && n_digits <= 2);
+ a += 1 + n_digits;
+ d += n_bytes;
+ }
+
+ return s;
+}
+
+u8 *
+format_elog_track (u8 * s, va_list * va)
+{
+ elog_main_t *em = va_arg (*va, elog_main_t *);
+ elog_event_t *e = va_arg (*va, elog_event_t *);
+ elog_track_t *t = vec_elt_at_index (em->tracks, e->track);
+ return format (s, "%s", t->name);
+}
+
+void
+elog_time_now (elog_time_stamp_t * et)
+{
+ u64 cpu_time_now, os_time_now_nsec;
+ struct timespec ts;
+
+#ifdef CLIB_UNIX
+ {
+#include <sys/syscall.h>
+ syscall (SYS_clock_gettime, CLOCK_REALTIME, &ts);
+ cpu_time_now = clib_cpu_time_now ();
+ /* Subtract 3/30/2017's worth of seconds to retain precision */
+ os_time_now_nsec = 1e9 * (ts.tv_sec - 1490885108) + ts.tv_nsec;
+ }
+#else
+ cpu_time_now = clib_cpu_time_now ();
+ os_time_now_nsec = 0;
+#endif
+
+ et->cpu = cpu_time_now;
+ et->os_nsec = os_time_now_nsec;
+}
+
+always_inline i64
+elog_time_stamp_diff_os_nsec (elog_time_stamp_t * t1, elog_time_stamp_t * t2)
+{
+ return (i64) t1->os_nsec - (i64) t2->os_nsec;
+}
+
+always_inline i64
+elog_time_stamp_diff_cpu (elog_time_stamp_t * t1, elog_time_stamp_t * t2)
+{
+ return (i64) t1->cpu - (i64) t2->cpu;
+}
+
+always_inline f64
+elog_nsec_per_clock (elog_main_t * em)
+{
+ return ((f64) elog_time_stamp_diff_os_nsec (&em->serialize_time,
+ &em->init_time)
+ / (f64) elog_time_stamp_diff_cpu (&em->serialize_time,
+ &em->init_time));
+}
+
+void
+elog_alloc (elog_main_t * em, u32 n_events)
+{
+ if (em->event_ring)
+ vec_free (em->event_ring);
+
+ /* Ring size must be a power of 2. */
+ em->event_ring_size = n_events = max_pow2 (n_events);
+
+ /* Leave an empty ievent at end so we can always speculatively write
+ and event there (possibly a long form event). */
+ vec_resize_aligned (em->event_ring, n_events, CLIB_CACHE_LINE_BYTES);
+}
+
+void
+elog_init (elog_main_t * em, u32 n_events)
+{
+ memset (em, 0, sizeof (em[0]));
+
+ em->lock = 0;
+
+ if (n_events > 0)
+ elog_alloc (em, n_events);
+
+ clib_time_init (&em->cpu_timer);
+
+ em->n_total_events_disable_limit = ~0;
+
+ /* Make track 0. */
+ em->default_track.name = "default";
+ elog_track_register (em, &em->default_track);
+
+ elog_time_now (&em->init_time);
+}
+
+/* Returns number of events in ring and start index. */
+static uword
+elog_event_range (elog_main_t * em, uword * lo)
+{
+ uword l = em->event_ring_size;
+ u64 i = em->n_total_events;
+
+ /* Ring never wrapped? */
+ if (i <= (u64) l)
+ {
+ if (lo)
+ *lo = 0;
+ return i;
+ }
+ else
+ {
+ if (lo)
+ *lo = i & (l - 1);
+ return l;
+ }
+}
+
+elog_event_t *
+elog_peek_events (elog_main_t * em)
+{
+ elog_event_t *e, *f, *es = 0;
+ uword i, j, n;
+
+ n = elog_event_range (em, &j);
+ for (i = 0; i < n; i++)
+ {
+ vec_add2 (es, e, 1);
+ f = vec_elt_at_index (em->event_ring, j);
+ e[0] = f[0];
+
+ /* Convert absolute time from cycles to seconds from start. */
+ e->time =
+ (e->time_cycles -
+ em->init_time.cpu) * em->cpu_timer.seconds_per_clock;
+
+ j = (j + 1) & (em->event_ring_size - 1);
+ }
+
+ return es;
+}
+
+/* Add a formatted string to the string table. */
+u32
+elog_string (elog_main_t * em, char *fmt, ...)
+{
+ u32 offset;
+ va_list va;
+
+ va_start (va, fmt);
+ offset = vec_len (em->string_table);
+ em->string_table = (char *) va_format ((u8 *) em->string_table, fmt, &va);
+ va_end (va);
+
+ /* Null terminate string if it is not already. */
+ if (vec_end (em->string_table)[-1] != 0)
+ vec_add1 (em->string_table, 0);
+
+ return offset;
+}
+
+elog_event_t *
+elog_get_events (elog_main_t * em)
+{
+ if (!em->events)
+ em->events = elog_peek_events (em);
+ return em->events;
+}
+
+static void
+maybe_fix_string_table_offset (elog_event_t * e,
+ elog_event_type_t * t, u32 offset)
+{
+ void *d = (u8 *) e->data;
+ char *a;
+
+ if (offset == 0)
+ return;
+
+ a = t->format_args;
+
+ while (1)
+ {
+ uword n_bytes = 0, n_digits;
+
+ if (a[0] == 0)
+ break;
+
+ /* Don't go past end of event data. */
+ ASSERT (d < (void *) (e->data + sizeof (e->data)));
+
+ n_digits = parse_2digit_decimal (a + 1, &n_bytes);
+ switch (a[0])
+ {
+ case 'T':
+ ASSERT (n_bytes == 4);
+ clib_mem_unaligned (d, u32) += offset;
+ break;
+
+ case 'i':
+ case 't':
+ case 'f':
+ case 's':
+ break;
+
+ default:
+ ASSERT (0);
+ break;
+ }
+
+ ASSERT (n_digits > 0 && n_digits <= 2);
+ a += 1 + n_digits;
+ d += n_bytes;
+ }
+}
+
+static int
+elog_cmp (void *a1, void *a2)
+{
+ elog_event_t *e1 = a1;
+ elog_event_t *e2 = a2;
+
+ if (e1->time < e2->time)
+ return -1;
+
+ if (e1->time > e2->time)
+ return 1;
+
+ return 0;
+}
+
+/*
+ * merge two event logs. Complicated and cranky.
+ */
+void
+elog_merge (elog_main_t * dst, u8 * dst_tag, elog_main_t * src, u8 * src_tag,
+ f64 align_tweak)
+{
+ elog_event_t *e;
+ uword l;
+ u32 string_table_offset_for_src_events;
+ u32 track_offset_for_src_tracks;
+ elog_track_t newt;
+ int i;
+
+ memset (&newt, 0, sizeof (newt));
+
+ /* Acquire src and dst events */
+ elog_get_events (src);
+ elog_get_events (dst);
+
+ string_table_offset_for_src_events = vec_len (dst->string_table);
+ vec_append (dst->string_table, src->string_table);
+
+ l = vec_len (dst->events);
+ vec_append (dst->events, src->events);
+
+ /* Prepend the supplied tag (if any) to all dst track names */
+ if (dst_tag)
+ {
+ for (i = 0; i < vec_len (dst->tracks); i++)
+ {
+ elog_track_t *t = vec_elt_at_index (dst->tracks, i);
+ char *new_name;
+
+ new_name = (char *) format (0, "%s:%s%c", dst_tag, t->name, 0);
+ vec_free (t->name);
+ t->name = new_name;
+ }
+ }
+
+ /*
+ * Remember where we started allocating new tracks while merging
+ */
+ track_offset_for_src_tracks = vec_len (dst->tracks);
+
+ /* Copy / tag source tracks */
+ for (i = 0; i < vec_len (src->tracks); i++)
+ {
+ elog_track_t *t = vec_elt_at_index (src->tracks, i);
+ if (src_tag)
+ newt.name = (char *) format (0, "%s:%s%c", src_tag, t->name, 0);
+ else
+ newt.name = (char *) format (0, "%s%c", t->name, 0);
+ (void) elog_track_register (dst, &newt);
+ vec_free (newt.name);
+ }
+
+ /* Across all (copied) src events... */
+ for (e = dst->events + l; e < vec_end (dst->events); e++)
+ {
+ elog_event_type_t *t = vec_elt_at_index (src->event_types, e->type);
+
+ /* Remap type from src -> dst. */
+ e->type = find_or_create_type (dst, t);
+
+ /* Remap string table offsets for 'T' format args */
+ maybe_fix_string_table_offset (e, t,
+ string_table_offset_for_src_events);
+
+ /* Remap track */
+ e->track += track_offset_for_src_tracks;
+ }
+
+ /* Adjust event times for relative starting times of event streams. */
+ {
+ f64 dt_event, dt_os_nsec, dt_clock_nsec;
+
+ /* Set clock parameters if dst was not generated by unserialize. */
+ if (dst->serialize_time.cpu == 0)
+ {
+ dst->init_time = src->init_time;
+ dst->serialize_time = src->serialize_time;
+ dst->nsec_per_cpu_clock = src->nsec_per_cpu_clock;
+ }
+
+ dt_os_nsec =
+ elog_time_stamp_diff_os_nsec (&src->init_time, &dst->init_time);
+
+ dt_event = dt_os_nsec;
+ dt_clock_nsec =
+ (elog_time_stamp_diff_cpu (&src->init_time, &dst->init_time) * .5 *
+ (dst->nsec_per_cpu_clock + src->nsec_per_cpu_clock));
+
+ /*
+ * Heuristic to see if src/dst came from same time source.
+ * If frequencies are "the same" and os clock and cpu clock agree
+ * to within 100e-9 secs about time difference between src/dst
+ * init_time, then we use cpu clock. Otherwise we use OS clock.
+ *
+ * When merging event logs from different systems, time paradoxes
+ * at the O(1ms) level are to be expected. Hence, the "align_tweak"
+ * parameter. If two events logged on different processors are known
+ * to occur in a specific order - and with a reasonably-estimated
+ * interval - supply a non-zero "align_tweak" parameter
+ */
+ if (fabs (src->nsec_per_cpu_clock - dst->nsec_per_cpu_clock) < 1e-2
+ && fabs (dt_os_nsec - dt_clock_nsec) < 100)
+ dt_event = dt_clock_nsec;
+
+ /* Convert to seconds. */
+ dt_event *= 1e-9;
+
+ /*
+ * Move the earlier set of events later, to avoid creating
+ * events which preceed the Big Bang (aka have negative timestamps).
+ *
+ * Not to any scale, we have something like the following picture:
+ *
+ * DST capture start point
+ * ^
+ * +--- dt_event --+
+ * v
+ * SRC capture start point
+ *
+ * In this case dt_event is positive, src started after dst,
+ * to put src events onto a common timebase we have to move them
+ * forward in time. Naturally, the opposite case is
+ * possible, too: dt_event will be negative, and so we have to
+ * move dst events forward in time by the |dt_event|.
+ * In both cases, we add align_tweak.
+ */
+ if (dt_event > 0)
+ {
+ /* Src started after dst. */
+ for (e = dst->events + l; e < vec_end (dst->events); e++)
+ e->time += dt_event + align_tweak;
+ }
+ else
+ {
+ /* Dst started after src. */
+ dt_event = -dt_event;
+ for (e = dst->events + 0; e < dst->events + l; e++)
+ e->time += dt_event + align_tweak;
+ }
+ }
+
+ /* Sort events by increasing time. */
+ vec_sort_with_function (dst->events, elog_cmp);
+
+ dst->n_total_events = vec_len (dst->events);
+
+ /* Recreate the event ring or the results won't serialize */
+ {
+ int i;
+
+ ASSERT (dst->cpu_timer.seconds_per_clock);
+
+ elog_alloc (dst, vec_len (dst->events));
+ for (i = 0; i < vec_len (dst->events); i++)
+ {
+ elog_event_t *es, *ed;
+
+ es = dst->events + i;
+ ed = dst->event_ring + i;
+
+ ed[0] = es[0];
+ }
+ }
+}
+
+static void
+serialize_elog_event (serialize_main_t * m, va_list * va)
+{
+ elog_main_t *em = va_arg (*va, elog_main_t *);
+ elog_event_t *e = va_arg (*va, elog_event_t *);
+ elog_event_type_t *t = vec_elt_at_index (em->event_types, e->type);
+ u8 *d = e->data;
+ u8 *p = (u8 *) t->format_args;
+
+ serialize_integer (m, e->type, sizeof (e->type));
+ serialize_integer (m, e->track, sizeof (e->track));
+ serialize (m, serialize_f64, e->time);
+
+ while (*p)
+ {
+ uword n_digits, n_bytes = 0;
+
+ n_digits = parse_2digit_decimal ((char *) p + 1, &n_bytes);
+
+ switch (p[0])
+ {
+ case 'i':
+ case 't':
+ case 'T':
+ if (n_bytes == 1)
+ serialize_integer (m, d[0], sizeof (u8));
+ else if (n_bytes == 2)
+ serialize_integer (m, clib_mem_unaligned (d, u16), sizeof (u16));
+ else if (n_bytes == 4)
+ serialize_integer (m, clib_mem_unaligned (d, u32), sizeof (u32));
+ else if (n_bytes == 8)
+ serialize (m, serialize_64, clib_mem_unaligned (d, u64));
+ else
+ ASSERT (0);
+ break;
+
+ case 's':
+ serialize_cstring (m, (char *) d);
+ if (n_bytes == 0)
+ n_bytes = strlen ((char *) d) + 1;
+ break;
+
+ case 'f':
+ if (n_bytes == 4)
+ serialize (m, serialize_f32, clib_mem_unaligned (d, f32));
+ else if (n_bytes == 8)
+ serialize (m, serialize_f64, clib_mem_unaligned (d, f64));
+ else
+ ASSERT (0);
+ break;
+
+ default:
+ ASSERT (0);
+ break;
+ }
+
+ p += 1 + n_digits;
+ d += n_bytes;
+ }
+}
+
+static void
+unserialize_elog_event (serialize_main_t * m, va_list * va)
+{
+ elog_main_t *em = va_arg (*va, elog_main_t *);
+ elog_event_t *e = va_arg (*va, elog_event_t *);
+ elog_event_type_t *t;
+ u8 *p, *d;
+
+ {
+ u16 tmp[2];
+
+ unserialize_integer (m, &tmp[0], sizeof (e->type));
+ unserialize_integer (m, &tmp[1], sizeof (e->track));
+
+ e->type = tmp[0];
+ e->track = tmp[1];
+
+ /* Make sure it fits. */
+ ASSERT (e->type == tmp[0]);
+ ASSERT (e->track == tmp[1]);
+ }
+
+ t = vec_elt_at_index (em->event_types, e->type);
+
+ unserialize (m, unserialize_f64, &e->time);
+
+ d = e->data;
+ p = (u8 *) t->format_args;
+
+ while (p && *p)
+ {
+ uword n_digits, n_bytes = 0;
+ u32 tmp;
+
+ n_digits = parse_2digit_decimal ((char *) p + 1, &n_bytes);
+
+ switch (p[0])
+ {
+ case 'i':
+ case 't':
+ case 'T':
+ if (n_bytes == 1)
+ {
+ unserialize_integer (m, &tmp, sizeof (u8));
+ d[0] = tmp;
+ }
+ else if (n_bytes == 2)
+ {
+ unserialize_integer (m, &tmp, sizeof (u16));
+ clib_mem_unaligned (d, u16) = tmp;
+ }
+ else if (n_bytes == 4)
+ {
+ unserialize_integer (m, &tmp, sizeof (u32));
+ clib_mem_unaligned (d, u32) = tmp;
+ }
+ else if (n_bytes == 8)
+ {
+ u64 x;
+ unserialize (m, unserialize_64, &x);
+ clib_mem_unaligned (d, u64) = x;
+ }
+ else
+ ASSERT (0);
+ break;
+
+ case 's':
+ {
+ char *t;
+ unserialize_cstring (m, &t);
+ if (n_bytes == 0)
+ n_bytes = strlen (t) + 1;
+ clib_memcpy (d, t, clib_min (n_bytes, vec_len (t)));
+ vec_free (t);
+ break;
+ }
+
+ case 'f':
+ if (n_bytes == 4)
+ {
+ f32 x;
+ unserialize (m, unserialize_f32, &x);
+ clib_mem_unaligned (d, f32) = x;
+ }
+ else if (n_bytes == 8)
+ {
+ f64 x;
+ unserialize (m, unserialize_f64, &x);
+ clib_mem_unaligned (d, f64) = x;
+ }
+ else
+ ASSERT (0);
+ break;
+
+ default:
+ ASSERT (0);
+ break;
+ }
+
+ p += 1 + n_digits;
+ d += n_bytes;
+ }
+}
+
+static void
+serialize_elog_event_type (serialize_main_t * m, va_list * va)
+{
+ elog_event_type_t *t = va_arg (*va, elog_event_type_t *);
+ int n = va_arg (*va, int);
+ int i, j;
+ for (i = 0; i < n; i++)
+ {
+ serialize_cstring (m, t[i].format);
+ serialize_cstring (m, t[i].format_args);
+ serialize_integer (m, t[i].type_index_plus_one,
+ sizeof (t->type_index_plus_one));
+ serialize_integer (m, t[i].n_enum_strings,
+ sizeof (t[i].n_enum_strings));
+ for (j = 0; j < t[i].n_enum_strings; j++)
+ serialize_cstring (m, t[i].enum_strings_vector[j]);
+ }
+}
+
+static void
+unserialize_elog_event_type (serialize_main_t * m, va_list * va)
+{
+ elog_event_type_t *t = va_arg (*va, elog_event_type_t *);
+ int n = va_arg (*va, int);
+ int i, j;
+ for (i = 0; i < n; i++)
+ {
+ unserialize_cstring (m, &t[i].format);
+ unserialize_cstring (m, &t[i].format_args);
+ unserialize_integer (m, &t[i].type_index_plus_one,
+ sizeof (t->type_index_plus_one));
+ unserialize_integer (m, &t[i].n_enum_strings,
+ sizeof (t[i].n_enum_strings));
+ vec_resize (t[i].enum_strings_vector, t[i].n_enum_strings);
+ for (j = 0; j < t[i].n_enum_strings; j++)
+ unserialize_cstring (m, &t[i].enum_strings_vector[j]);
+ }
+}
+
+static void
+serialize_elog_track (serialize_main_t * m, va_list * va)
+{
+ elog_track_t *t = va_arg (*va, elog_track_t *);
+ int n = va_arg (*va, int);
+ int i;
+ for (i = 0; i < n; i++)
+ {
+ serialize_cstring (m, t[i].name);
+ }
+}
+
+static void
+unserialize_elog_track (serialize_main_t * m, va_list * va)
+{
+ elog_track_t *t = va_arg (*va, elog_track_t *);
+ int n = va_arg (*va, int);
+ int i;
+ for (i = 0; i < n; i++)
+ {
+ unserialize_cstring (m, &t[i].name);
+ }
+}
+
+static void
+serialize_elog_time_stamp (serialize_main_t * m, va_list * va)
+{
+ elog_time_stamp_t *st = va_arg (*va, elog_time_stamp_t *);
+ serialize (m, serialize_64, st->os_nsec);
+ serialize (m, serialize_64, st->cpu);
+}
+
+static void
+unserialize_elog_time_stamp (serialize_main_t * m, va_list * va)
+{
+ elog_time_stamp_t *st = va_arg (*va, elog_time_stamp_t *);
+ unserialize (m, unserialize_64, &st->os_nsec);
+ unserialize (m, unserialize_64, &st->cpu);
+}
+
+static char *elog_serialize_magic = "elog v0";
+
+void
+serialize_elog_main (serialize_main_t * m, va_list * va)
+{
+ elog_main_t *em = va_arg (*va, elog_main_t *);
+ int flush_ring = va_arg (*va, int);
+ elog_event_t *e;
+
+ serialize_magic (m, elog_serialize_magic, strlen (elog_serialize_magic));
+
+ serialize_integer (m, em->event_ring_size, sizeof (u32));
+
+ elog_time_now (&em->serialize_time);
+ serialize (m, serialize_elog_time_stamp, &em->serialize_time);
+ serialize (m, serialize_elog_time_stamp, &em->init_time);
+
+ vec_serialize (m, em->event_types, serialize_elog_event_type);
+ vec_serialize (m, em->tracks, serialize_elog_track);
+ vec_serialize (m, em->string_table, serialize_vec_8);
+
+ /* Free old events (cached) in case they have changed. */
+ if (flush_ring)
+ {
+ vec_free (em->events);
+ elog_get_events (em);
+ }
+
+ serialize_integer (m, vec_len (em->events), sizeof (u32));
+
+ /* SMP logs can easily have local time paradoxes... */
+ vec_sort_with_function (em->events, elog_cmp);
+
+ vec_foreach (e, em->events) serialize (m, serialize_elog_event, em, e);
+}
+
+void
+unserialize_elog_main (serialize_main_t * m, va_list * va)
+{
+ elog_main_t *em = va_arg (*va, elog_main_t *);
+ uword i;
+ u32 rs;
+
+ unserialize_check_magic (m, elog_serialize_magic,
+ strlen (elog_serialize_magic));
+
+ unserialize_integer (m, &rs, sizeof (u32));
+ em->event_ring_size = rs;
+ elog_init (em, em->event_ring_size);
+
+ unserialize (m, unserialize_elog_time_stamp, &em->serialize_time);
+ unserialize (m, unserialize_elog_time_stamp, &em->init_time);
+ em->nsec_per_cpu_clock = elog_nsec_per_clock (em);
+
+ vec_unserialize (m, &em->event_types, unserialize_elog_event_type);
+ for (i = 0; i < vec_len (em->event_types); i++)
+ new_event_type (em, i);
+
+ vec_unserialize (m, &em->tracks, unserialize_elog_track);
+ vec_unserialize (m, &em->string_table, unserialize_vec_8);
+
+ {
+ u32 ne;
+ elog_event_t *e;
+
+ unserialize_integer (m, &ne, sizeof (u32));
+ vec_resize (em->events, ne);
+ vec_foreach (e, em->events)
+ unserialize (m, unserialize_elog_event, em, e);
+ }
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/elog.h b/src/vppinfra/elog.h
new file mode 100644
index 00000000..05085b26
--- /dev/null
+++ b/src/vppinfra/elog.h
@@ -0,0 +1,567 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2005,2009 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+/* High speed event logger */
+
+/** \file
+ The fine-grained event logger allows lightweight, thread-safe
+ event logging at minimum cost. In typical operation, logging
+ a single event costs around 80ns on x86_64. It's appropriate
+ for at-least per-frame event-logging in vector packet processing.
+
+ See https://wiki.fd.io/view/VPP/elog for more information.
+*/
+
+#ifndef included_clib_elog_h
+#define included_clib_elog_h
+
+#include <vppinfra/cache.h>
+#include <vppinfra/error.h> /* for ASSERT */
+#include <vppinfra/serialize.h>
+#include <vppinfra/time.h> /* for clib_cpu_time_now */
+#include <vppinfra/mhash.h>
+
+typedef struct
+{
+ union
+ {
+ /** Absolute time stamp in CPU clock cycles. */
+ u64 time_cycles;
+
+ /** Absolute time as floating point number in seconds. */
+ f64 time;
+ };
+
+ /** Event type index. */
+ u16 type;
+
+ /** Track for this event. Tracks allow events to be sorted and
+ displayed by track. Think of 2 dimensional display with time and
+ track being the x and y axes. */
+ u16 track;
+
+ /** 20-bytes of data follows, pads to 32 bytes. */
+ u8 data[20];
+} elog_event_t;
+
+typedef struct
+{
+ /** Type index plus one assigned to this type.
+ This is used to mark type as seen. */
+ u32 type_index_plus_one;
+
+ /** String table as a vector constructed when type is registered. */
+ char **enum_strings_vector;
+
+ /** Format string. (example: "my-event (%d,%d)"). */
+ char *format;
+
+ /** Specifies how arguments to format are parsed from event data.
+ String of characters '0' '1' or '2' '3' to specify log2 size of data
+ (e.g. for u8, u16, u32 or u64),
+ 's' means a null-terminated C string
+ 't' means argument is an index into enum string table for this type.
+ 'e' is a float,
+ 'f' is a double. */
+ char *format_args;
+
+ /** Function name generating event. */
+ char *function;
+
+ /** Number of elements in string enum table. */
+ u32 n_enum_strings;
+
+ /** String table for enum/number to string formatting. */
+ char *enum_strings[];
+} elog_event_type_t;
+
+typedef struct
+{
+ /** Track name vector. */
+ char *name;
+
+ /** Set to one when track has been added to
+ main structure. */
+ u32 track_index_plus_one;
+} elog_track_t;
+
+typedef struct
+{
+ /** CPU cycle counter. */
+ u64 cpu;
+
+ /** OS timer in nano secs since epoch 3/30/2017, see elog_time_now() */
+ u64 os_nsec;
+} elog_time_stamp_t;
+
+typedef struct
+{
+ /** Total number of events in buffer. */
+ u32 n_total_events;
+
+ /** When count reaches limit logging is disabled. This is
+ used for event triggers. */
+ u32 n_total_events_disable_limit;
+
+ /** Dummy event to use when logger is disabled. */
+ elog_event_t dummy_event;
+
+ /** Power of 2 number of elements in ring. */
+ uword event_ring_size;
+
+ /** Vector of events (circular buffer). Power of 2 size.
+ Used when events are being collected. */
+ elog_event_t *event_ring;
+
+ /** Vector of event types. */
+ elog_event_type_t *event_types;
+
+ /** Hash table mapping type format to type index. */
+ uword *event_type_by_format;
+
+ /** Events may refer to strings in string table. */
+ char *string_table;
+
+ /** Vector of tracks. */
+ elog_track_t *tracks;
+
+ /** Default track. */
+ elog_track_t default_track;
+
+ /** Place holder for CPU clock frequency. */
+ clib_time_t cpu_timer;
+
+ /** Timestamps */
+ elog_time_stamp_t init_time, serialize_time;
+
+ /** SMP lock, non-zero means locking required */
+ uword *lock;
+
+ /** Use serialize_time and init_time to give estimate for
+ cpu clock frequency. */
+ f64 nsec_per_cpu_clock;
+
+ /** Vector of events converted to generic form after collection. */
+ elog_event_t *events;
+} elog_main_t;
+
+/** @brief Return number of events in the event-log buffer
+ @param em elog_main_t *
+ @return number of events in the buffer
+*/
+
+always_inline uword
+elog_n_events_in_buffer (elog_main_t * em)
+{
+ return clib_min (em->n_total_events, em->event_ring_size);
+}
+
+/** @brief Return number of events which can fit in the event buffer
+ @param em elog_main_t *
+ @return number of events which can fit in the buffer
+*/
+always_inline uword
+elog_buffer_capacity (elog_main_t * em)
+{
+ return em->event_ring_size;
+}
+
+/** @brief Reset the event buffer
+ @param em elog_main_t *
+*/
+always_inline void
+elog_reset_buffer (elog_main_t * em)
+{
+ em->n_total_events = 0;
+ em->n_total_events_disable_limit = ~0;
+}
+
+/** @brief Enable or disable event logging
+ @param em elog_main_t *
+*/
+always_inline void
+elog_enable_disable (elog_main_t * em, int is_enabled)
+{
+ em->n_total_events = 0;
+ em->n_total_events_disable_limit = is_enabled ? ~0 : 0;
+}
+
+/** @brief disable logging after specified number of ievents have been logged.
+
+ This is used as a "debug trigger" when a certain event has occurred.
+ Events will be logged both before and after the "event" but the
+ event will not be lost as long as N < RING_SIZE.
+
+ @param em elog_main_t *
+ @param n uword number of events before disabling event logging
+*/
+always_inline void
+elog_disable_after_events (elog_main_t * em, uword n)
+{
+ em->n_total_events_disable_limit = em->n_total_events + n;
+}
+
+/* @brief mid-buffer logic-analyzer trigger
+
+ Currently, only midpoint triggering is supported, but it's pretty obvious
+ how to generalize the scheme.
+ @param em elog_main_t *
+*/
+always_inline void
+elog_disable_trigger (elog_main_t * em)
+{
+ em->n_total_events_disable_limit =
+ em->n_total_events + vec_len (em->event_ring) / 2;
+}
+
+/** @brief register an event type
+ @param em elog_main_t *
+ @param t elog_event_type_t * event to register
+ @return type index
+ @warning Typically not called directly
+*/
+
+word elog_event_type_register (elog_main_t * em, elog_event_type_t * t);
+
+/** @brief register an event track
+ @param em elog_main_t *
+ @param t elog_track_t * track to register
+ @return track index
+ @note this function is often called directly
+*/
+word elog_track_register (elog_main_t * em, elog_track_t * t);
+
+/** @brief event logging enabled predicate
+ @param em elog_main_t *
+ @return 1 if enabled, 0 if not enabled
+*/
+always_inline uword
+elog_is_enabled (elog_main_t * em)
+{
+ return em->n_total_events < em->n_total_events_disable_limit;
+}
+
+/** @brief Allocate an event to be filled in by the caller
+
+ Not normally called directly; this function underlies the
+ ELOG_DATA and ELOG_TRACK_DATA macros
+
+ @param em elog_main_t *
+ @param type elog_event_type_t * type
+ @param track elog_track_t * track
+ @param cpu_time u64 current cpu tick value
+ @returns event to be filled in
+*/
+always_inline void *
+elog_event_data_inline (elog_main_t * em,
+ elog_event_type_t * type,
+ elog_track_t * track, u64 cpu_time)
+{
+ elog_event_t *e;
+ uword ei;
+ word type_index, track_index;
+
+ /* Return the user dummy memory to scribble data into. */
+ if (PREDICT_FALSE (!elog_is_enabled (em)))
+ return em->dummy_event.data;
+
+ type_index = (word) type->type_index_plus_one - 1;
+ track_index = (word) track->track_index_plus_one - 1;
+ if (PREDICT_FALSE ((type_index | track_index) < 0))
+ {
+ if (type_index < 0)
+ type_index = elog_event_type_register (em, type);
+ if (track_index < 0)
+ track_index = elog_track_register (em, track);
+ }
+
+ ASSERT (track_index < vec_len (em->tracks));
+ ASSERT (is_pow2 (vec_len (em->event_ring)));
+
+ if (em->lock)
+ ei = clib_smp_atomic_add (&em->n_total_events, 1);
+ else
+ ei = em->n_total_events++;
+
+ ei &= em->event_ring_size - 1;
+ e = vec_elt_at_index (em->event_ring, ei);
+
+ e->time_cycles = cpu_time;
+ e->type = type_index;
+ e->track = track_index;
+
+ /* Return user data for caller to fill in. */
+ return e->data;
+}
+
+/* External version of inline. */
+void *elog_event_data (elog_main_t * em,
+ elog_event_type_t * type,
+ elog_track_t * track, u64 cpu_time);
+
+/** @brief Allocate an event to be filled in by the caller, non-inline
+
+ Not normally called directly; this function underlies the
+ ELOG_DATA and ELOG_TRACK_DATA macros
+
+ @param em elog_main_t *
+ @param type elog_event_type_t * type
+ @param track elog_track_t * track
+ @param cpu_time u64 current cpu tick value
+ @returns event to be filled in
+*/
+always_inline void *
+elog_event_data_not_inline (elog_main_t * em,
+ elog_event_type_t * type,
+ elog_track_t * track, u64 cpu_time)
+{
+ /* Return the user dummy memory to scribble data into. */
+ if (PREDICT_FALSE (!elog_is_enabled (em)))
+ return em->dummy_event.data;
+ return elog_event_data (em, type, track, cpu_time);
+}
+
+/** @brief Log a single-datum event
+ @param em elog_main_t *
+ @param type elog_event_type_t * type
+ @param data u32 single datum to capture
+*/
+always_inline void
+elog (elog_main_t * em, elog_event_type_t * type, u32 data)
+{
+ u32 *d = elog_event_data_not_inline (em,
+ type,
+ &em->default_track,
+ clib_cpu_time_now ());
+ d[0] = data;
+}
+
+/** @brief Log a single-datum event, inline version
+ @param em elog_main_t *
+ @param type elog_event_type_t * type
+ @param data u32 single datum to capture
+*/
+always_inline void
+elog_inline (elog_main_t * em, elog_event_type_t * type, u32 data)
+{
+ u32 *d = elog_event_data_inline (em,
+ type,
+ &em->default_track,
+ clib_cpu_time_now ());
+ d[0] = data;
+}
+
+/** @brief Log a single-datum event to a specific track, non-inline version
+ @param em elog_main_t *
+ @param type elog_event_type_t * type
+ @param type elog_event_track_t * track
+ @param data u32 single datum to capture
+*/
+always_inline void
+elog_track (elog_main_t * em, elog_event_type_t * type, elog_track_t * track,
+ u32 data)
+{
+ u32 *d = elog_event_data_not_inline (em,
+ type,
+ track,
+ clib_cpu_time_now ());
+ d[0] = data;
+}
+
+/** @brief Log a single-datum event to a specific track
+ @param em elog_main_t *
+ @param type elog_event_type_t * type
+ @param type elog_event_track_t * track
+ @param data u32 single datum to capture
+*/
+always_inline void
+elog_track_inline (elog_main_t * em, elog_event_type_t * type,
+ elog_track_t * track, u32 data)
+{
+ u32 *d = elog_event_data_inline (em,
+ type,
+ track,
+ clib_cpu_time_now ());
+ d[0] = data;
+}
+
+always_inline void *
+elog_data (elog_main_t * em, elog_event_type_t * type, elog_track_t * track)
+{
+ return elog_event_data_not_inline (em, type, track, clib_cpu_time_now ());
+}
+
+always_inline void *
+elog_data_inline (elog_main_t * em, elog_event_type_t * type,
+ elog_track_t * track)
+{
+ return elog_event_data_inline (em, type, track, clib_cpu_time_now ());
+}
+
+/* Macro shorthands for generating/declaring events. */
+#define __ELOG_TYPE_VAR(f) f
+#define __ELOG_TRACK_VAR(f) f
+
+#define ELOG_TYPE_DECLARE(f) static elog_event_type_t __ELOG_TYPE_VAR(f)
+
+#define ELOG_TYPE_INIT_FORMAT_AND_FUNCTION(fmt,func) \
+ { .format = fmt, .function = func, }
+
+#define ELOG_TYPE_INIT(fmt) \
+ ELOG_TYPE_INIT_FORMAT_AND_FUNCTION(fmt,(char *) __FUNCTION__)
+
+#define ELOG_TYPE_DECLARE_HELPER(f,fmt,func) \
+ static elog_event_type_t __ELOG_TYPE_VAR(f) = \
+ ELOG_TYPE_INIT_FORMAT_AND_FUNCTION (fmt, func)
+
+#define ELOG_TYPE_DECLARE_FORMAT_AND_FUNCTION(f,fmt) \
+ ELOG_TYPE_DECLARE_HELPER (f, fmt, (char *) __FUNCTION__)
+
+#define ELOG_TYPE_DECLARE_FORMAT(f,fmt) \
+ ELOG_TYPE_DECLARE_HELPER (f, fmt, 0)
+
+/* Shorthands with and without __FUNCTION__.
+ D for decimal; X for hex. F for __FUNCTION__. */
+#define ELOG_TYPE(f,fmt) ELOG_TYPE_DECLARE_FORMAT_AND_FUNCTION(f,fmt)
+#define ELOG_TYPE_D(f) ELOG_TYPE_DECLARE_FORMAT (f, #f " %d")
+#define ELOG_TYPE_X(f) ELOG_TYPE_DECLARE_FORMAT (f, #f " 0x%x")
+#define ELOG_TYPE_DF(f) ELOG_TYPE_DECLARE_FORMAT_AND_FUNCTION (f, #f " %d")
+#define ELOG_TYPE_XF(f) ELOG_TYPE_DECLARE_FORMAT_AND_FUNCTION (f, #f " 0x%x")
+#define ELOG_TYPE_FD(f) ELOG_TYPE_DECLARE_FORMAT_AND_FUNCTION (f, #f " %d")
+#define ELOG_TYPE_FX(f) ELOG_TYPE_DECLARE_FORMAT_AND_FUNCTION (f, #f " 0x%x")
+
+#define ELOG_TRACK_DECLARE(f) static elog_track_t __ELOG_TRACK_VAR(f)
+#define ELOG_TRACK(f) ELOG_TRACK_DECLARE(f) = { .name = #f, }
+
+/* Log 32 bits of data. */
+#define ELOG(em,f,data) elog ((em), &__ELOG_TYPE_VAR(f), data)
+#define ELOG_INLINE(em,f,data) elog_inline ((em), &__ELOG_TYPE_VAR(f), data)
+
+/* Return data pointer to fill in. */
+#define ELOG_TRACK_DATA(em,f,track) \
+ elog_data ((em), &__ELOG_TYPE_VAR(f), &__ELOG_TRACK_VAR(track))
+#define ELOG_TRACK_DATA_INLINE(em,f,track) \
+ elog_data_inline ((em), &__ELOG_TYPE_VAR(f), &__ELOG_TRACK_VAR(track))
+
+/* Shorthand with default track. */
+#define ELOG_DATA(em,f) elog_data ((em), &__ELOG_TYPE_VAR (f), &(em)->default_track)
+#define ELOG_DATA_INLINE(em,f) elog_data_inline ((em), &__ELOG_TYPE_VAR (f), &(em)->default_track)
+
+/** @brief add a string to the event-log string table
+
+ Often combined with hashing and the T4 elog format specifier to
+ display complex strings in offline tooling
+
+ @param em elog_main_t *
+ @param format char *
+ @param VARARGS
+ @return u32 index to add to event log
+*/
+u32 elog_string (elog_main_t * em, char *format, ...);
+
+void elog_time_now (elog_time_stamp_t * et);
+
+/** @brief convert event ring events to events, and return them as a vector.
+ @param em elog_main_t *
+ @return event vector with timestamps in f64 seconds
+ @note sets em->events to resulting vector.
+*/
+elog_event_t *elog_get_events (elog_main_t * em);
+
+/** @brief convert event ring events to events, and return them as a vector.
+ @param em elog_main_t *
+ @return event vector with timestamps in f64 seconds
+ @note no side effects
+*/
+elog_event_t *elog_peek_events (elog_main_t * em);
+
+/* Merge two logs, add supplied track tags. */
+void elog_merge (elog_main_t * dst, u8 * dst_tag,
+ elog_main_t * src, u8 * src_tag, f64 align_tweak);
+
+/* 2 arguments elog_main_t and elog_event_t to format event or track name. */
+u8 *format_elog_event (u8 * s, va_list * va);
+u8 *format_elog_track (u8 * s, va_list * va);
+
+void serialize_elog_main (serialize_main_t * m, va_list * va);
+void unserialize_elog_main (serialize_main_t * m, va_list * va);
+
+void elog_init (elog_main_t * em, u32 n_events);
+void elog_alloc (elog_main_t * em, u32 n_events);
+
+#ifdef CLIB_UNIX
+always_inline clib_error_t *
+elog_write_file (elog_main_t * em, char *unix_file, int flush_ring)
+{
+ serialize_main_t m;
+ clib_error_t *error;
+
+ error = serialize_open_unix_file (&m, unix_file);
+ if (error)
+ return error;
+ error = serialize (&m, serialize_elog_main, em, flush_ring);
+ if (!error)
+ serialize_close (&m);
+ return error;
+}
+
+always_inline clib_error_t *
+elog_read_file (elog_main_t * em, char *unix_file)
+{
+ serialize_main_t m;
+ clib_error_t *error;
+
+ error = unserialize_open_unix_file (&m, unix_file);
+ if (error)
+ return error;
+ error = unserialize (&m, unserialize_elog_main, em);
+ if (!error)
+ unserialize_close (&m);
+ return error;
+}
+
+#endif /* CLIB_UNIX */
+
+#endif /* included_clib_elog_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/error.c b/src/vppinfra/error.c
new file mode 100644
index 00000000..2722fb7b
--- /dev/null
+++ b/src/vppinfra/error.c
@@ -0,0 +1,292 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+/* Error reporting. */
+#include <stdarg.h>
+
+#include <vppinfra/clib.h> /* for HAVE_ERRNO */
+
+#ifdef CLIB_LINUX_KERNEL
+#include <linux/unistd.h> /* for write */
+#include <linux/kernel.h> /* for printk */
+#endif
+
+#ifdef CLIB_UNIX
+#include <unistd.h> /* for write */
+#include <stdio.h> /* for printf */
+#define HAVE_ERRNO
+#endif
+
+#ifdef CLIB_STANDALONE
+#include <vppinfra/standalone_stdio.h> /* for printf */
+#endif
+
+#include <vppinfra/string.h>
+#include <vppinfra/mem.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/format.h>
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/os.h> /* for os_panic/os_exit/os_puts */
+
+typedef struct
+{
+ clib_error_handler_func_t *func;
+ void *arg;
+} clib_error_handler_t;
+
+static clib_error_handler_t *handlers = 0;
+
+void
+clib_error_register_handler (clib_error_handler_func_t func, void *arg)
+{
+ clib_error_handler_t h = {.func = func,.arg = arg, };
+ vec_add1 (handlers, h);
+}
+
+static void
+debugger (void)
+{
+ os_panic ();
+}
+
+static void
+error_exit (int code)
+{
+ os_exit (code);
+}
+
+static u8 *
+dispatch_message (u8 * msg)
+{
+ word i;
+
+ if (!msg)
+ return msg;
+
+ for (i = 0; i < vec_len (handlers); i++)
+ handlers[i].func (handlers[i].arg, msg, vec_len (msg));
+
+ /* If no message handler is specified provide a default one. */
+ if (vec_len (handlers) == 0)
+ os_puts (msg, vec_len (msg), /* is_error */ 1);
+
+ return msg;
+}
+
+void
+_clib_error (int how_to_die,
+ char *function_name, uword line_number, char *fmt, ...)
+{
+ u8 *msg = 0;
+ va_list va;
+
+ if (function_name)
+ {
+ msg = format (msg, "%s:", function_name);
+ if (line_number > 0)
+ msg = format (msg, "%wd:", line_number);
+ msg = format (msg, " ");
+ }
+
+ va_start (va, fmt);
+ msg = va_format (msg, fmt, &va);
+ va_end (va);
+
+#ifdef HAVE_ERRNO
+ if (how_to_die & CLIB_ERROR_ERRNO_VALID)
+ msg = format (msg, ": %s (errno %d)", strerror (errno), errno);
+#endif
+
+ if (vec_end (msg)[-1] != '\n')
+ vec_add1 (msg, '\n');
+
+ msg = dispatch_message (msg);
+
+ vec_free (msg);
+
+ if (how_to_die & CLIB_ERROR_ABORT)
+ debugger ();
+ if (how_to_die & CLIB_ERROR_FATAL)
+ error_exit (1);
+}
+
+clib_error_t *
+_clib_error_return (clib_error_t * errors,
+ any code, uword flags, char *where, char *fmt, ...)
+{
+ clib_error_t *e;
+ va_list va;
+
+#ifdef HAVE_ERRNO
+ /* Save errno since it may be re-set before we'll need it. */
+ word errno_save = errno;
+#endif
+
+ va_start (va, fmt);
+ vec_add2 (errors, e, 1);
+ if (fmt)
+ e->what = va_format (0, fmt, &va);
+
+#ifdef HAVE_ERRNO
+ if (flags & CLIB_ERROR_ERRNO_VALID)
+ {
+ if (e->what)
+ e->what = format (e->what, ": ");
+ e->what = format (e->what, "%s", strerror (errno_save));
+ }
+#endif
+
+ e->where = (u8 *) where;
+ e->code = code;
+ e->flags = flags;
+ va_end (va);
+ return errors;
+}
+
+void *
+clib_error_free_vector (clib_error_t * errors)
+{
+ clib_error_t *e;
+ vec_foreach (e, errors) vec_free (e->what);
+ vec_free (errors);
+ return 0;
+}
+
+u8 *
+format_clib_error (u8 * s, va_list * va)
+{
+ clib_error_t *errors = va_arg (*va, clib_error_t *);
+ clib_error_t *e;
+
+ vec_foreach (e, errors)
+ {
+ if (!e->what)
+ continue;
+
+ if (e->where)
+ {
+ u8 *where = 0;
+
+ if (e > errors)
+ where = format (where, "from ");
+ where = format (where, "%s", e->where);
+
+ s = format (s, "%v: ", where);
+ vec_free (where);
+ }
+
+ s = format (s, "%v\n", e->what);
+ }
+
+ return s;
+}
+
+clib_error_t *
+_clib_error_report (clib_error_t * errors)
+{
+ if (errors)
+ {
+ u8 *msg = format (0, "%U", format_clib_error, errors);
+
+ msg = dispatch_message (msg);
+ vec_free (msg);
+
+ if (errors->flags & CLIB_ERROR_ABORT)
+ debugger ();
+ if (errors->flags & CLIB_ERROR_FATAL)
+ error_exit (1);
+
+ clib_error_free (errors);
+ }
+ return 0;
+}
+
+#ifdef TEST
+
+static error_t *
+foo1 (int x)
+{
+ return error_return (0, "x is odd %d", x);
+}
+
+static error_t *
+foo2 (int x)
+{
+ return error_return (0, "x is even %d", x);
+}
+
+static error_t *
+foo (int x)
+{
+ error_t *e;
+ if (x & 1)
+ e = foo1 (x);
+ else
+ e = foo2 (x);
+ if (e)
+ return error_return (e, 0);
+}
+
+static void
+error_handler (void *arg, char *msg, int msg_len)
+{
+ write (2, msg, msg_len);
+}
+
+int
+main (int argc, char *argv[])
+{
+ error_t *e;
+
+ register_error_handler (error_handler, 0);
+
+ e = foo (getpid ());
+ if (e)
+ error_report (e);
+ return 0;
+}
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/error.h b/src/vppinfra/error.h
new file mode 100644
index 00000000..e0e2d472
--- /dev/null
+++ b/src/vppinfra/error.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_error_h
+#define included_error_h
+
+#include <vppinfra/clib.h> /* for CLIB_LINUX_KERNEL */
+#include <vppinfra/error_bootstrap.h>
+
+#ifdef CLIB_UNIX
+#include <errno.h>
+#endif
+
+#ifdef CLIB_LINUX_KERNEL
+#include <linux/errno.h>
+#endif
+
+#include <stdarg.h>
+#include <vppinfra/vec.h>
+
+/* Callback functions for error reporting. */
+typedef void clib_error_handler_func_t (void *arg, u8 * msg, int msg_len);
+void clib_error_register_handler (clib_error_handler_func_t func, void *arg);
+
+#define clib_warning(format,args...) \
+ _clib_error (CLIB_ERROR_WARNING, clib_error_function, __LINE__, format, ## args)
+
+#define clib_error(format,args...) \
+ _clib_error (CLIB_ERROR_FATAL, clib_error_function, __LINE__, format, ## args)
+
+#define clib_unix_error(format,args...) \
+ _clib_error (CLIB_ERROR_FATAL | CLIB_ERROR_ERRNO_VALID, clib_error_function, __LINE__, format, ## args)
+
+#define clib_unix_warning(format,args...) \
+ _clib_error (CLIB_ERROR_WARNING | CLIB_ERROR_ERRNO_VALID, clib_error_function, __LINE__, format, ## args)
+
+/* For programming errors and assert. */
+#define clib_panic(format,args...) \
+ _clib_error (CLIB_ERROR_ABORT, (char *) clib_error_function, __LINE__, format, ## args)
+
+#include <vppinfra/clib_error.h>
+
+#define clib_error_get_code(err) ((err) ? (err)->code : 0)
+#define clib_error_set_code(err, c) \
+do { \
+ if (err) \
+ (err)->code = (c); \
+} while (0)
+
+extern void *clib_error_free_vector (clib_error_t * errors);
+
+#define clib_error_free(e) e = clib_error_free_vector(e)
+
+extern clib_error_t *_clib_error_return (clib_error_t * errors,
+ any code,
+ uword flags,
+ char *where, char *fmt, ...);
+
+#define clib_error_return_code(e,code,flags,args...) \
+ _clib_error_return((e),(code),(flags),(char *)clib_error_function,args)
+
+#define clib_error_create(args...) \
+ clib_error_return_code(0,0,0,args)
+
+#define clib_error_return(e,args...) \
+ clib_error_return_code(e,0,0,args)
+
+#define clib_error_return_unix(e,args...) \
+ clib_error_return_code(e,errno,CLIB_ERROR_ERRNO_VALID,args)
+
+#define clib_error_return_fatal(e,args...) \
+ clib_error_return_code(e,0,CLIB_ERROR_FATAL,args)
+
+#define clib_error_return_unix_fatal(e,args...) \
+ clib_error_return_code(e,errno,CLIB_ERROR_ERRNO_VALID|CLIB_ERROR_FATAL,args)
+
+extern clib_error_t *_clib_error_report (clib_error_t * errors);
+
+#define clib_error_report(e) do { (e) = _clib_error_report (e); } while (0)
+
+u8 *format_clib_error (u8 * s, va_list * va);
+
+always_inline word
+unix_error_is_fatal (word error)
+{
+#ifdef CLIB_UNIX
+ switch (error)
+ {
+ case EWOULDBLOCK:
+ case EINTR:
+ return 0;
+ }
+#endif
+ return 1;
+}
+
+#define IF_ERROR_IS_FATAL_RETURN_ELSE_FREE(e) \
+do { \
+ if (e) \
+ { \
+ if (unix_error_is_fatal (clib_error_get_code (e))) \
+ return (e); \
+ else \
+ clib_error_free (e); \
+ } \
+} while (0)
+
+#define ERROR_RETURN_IF(x) \
+do { \
+ clib_error_t * _error_return_if = (x); \
+ if (_error_return_if) \
+ return clib_error_return (_error_return_if, 0); \
+} while (0)
+
+#define ERROR_ASSERT(truth) \
+({ \
+ clib_error_t * _error_assert = 0; \
+ if (CLIB_DEBUG > 0 && ! (truth)) \
+ { \
+ _error_assert = clib_error_return_fatal \
+ (0, "%s:%d (%s) assertion `%s' fails", \
+ __FILE__, \
+ (uword) __LINE__, \
+ clib_error_function, \
+ # truth); \
+ } \
+ _error_assert; \
+})
+
+/* Assert to remain even if CLIB_DEBUG is set to 0. */
+#define CLIB_ERROR_ASSERT(truth) \
+({ \
+ clib_error_t * _error_assert = 0; \
+ if (! (truth)) \
+ { \
+ _error_assert = \
+ clib_error_return_fatal \
+ (0, "%s:%d (%s) assertion `%s' fails", \
+ __FILE__, \
+ (uword) __LINE__, \
+ clib_error_function, \
+ # truth); \
+ } \
+ _error_assert; \
+})
+
+/*
+ * If we're running under Coverity, don't die on
+ * failed static assertions.
+ */
+#ifdef __COVERITY__
+#ifndef _Static_assert
+#define _Static_assert(x,y)
+#endif
+#endif
+
+#endif /* included_error_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/error_bootstrap.h b/src/vppinfra/error_bootstrap.h
new file mode 100644
index 00000000..3416c2f9
--- /dev/null
+++ b/src/vppinfra/error_bootstrap.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_error_bootstrap_h
+#define included_error_bootstrap_h
+
+/* Bootstrap include so that #include <vppinfra/mem.h> can include e.g.
+ <vppinfra/mheap.h> which depends on <vppinfra/vec.h>. */
+
+#include <vppinfra/clib.h> /* for uword */
+
+enum
+{
+ CLIB_ERROR_FATAL = 1 << 0,
+ CLIB_ERROR_ABORT = 1 << 1,
+ CLIB_ERROR_WARNING = 1 << 2,
+ CLIB_ERROR_ERRNO_VALID = 1 << 16,
+ CLIB_ERROR_NO_RATE_LIMIT = 1 << 17,
+};
+
+/* Current function name. Need (char *) cast to silence gcc4 pointer signedness warning. */
+#define clib_error_function ((char *) __FUNCTION__)
+
+#ifndef CLIB_ASSERT_ENABLE
+#define CLIB_ASSERT_ENABLE (CLIB_DEBUG > 0)
+#endif
+
+/* Low level error reporting function.
+ Code specifies whether to call exit, abort or nothing at
+ all (for non-fatal warnings). */
+extern void _clib_error (int code,
+ char *function_name,
+ uword line_number, char *format, ...);
+
+#define ASSERT(truth) \
+do { \
+ if (CLIB_ASSERT_ENABLE && ! (truth)) \
+ { \
+ _clib_error (CLIB_ERROR_ABORT, 0, 0, \
+ "%s:%d (%s) assertion `%s' fails", \
+ __FILE__, \
+ (uword) __LINE__, \
+ clib_error_function, \
+ # truth); \
+ } \
+} while (0)
+
+#if defined(__clang__)
+#define STATIC_ASSERT(truth,...)
+#else
+#define STATIC_ASSERT(truth,...) _Static_assert(truth, __VA_ARGS__)
+#endif
+
+#define STATIC_ASSERT_SIZEOF(d, s) \
+ STATIC_ASSERT (sizeof (d) == s, "Size of " #d " must be " # s " bytes")
+
+/* Assert without allocating memory. */
+#define ASSERT_AND_PANIC(truth) \
+do { \
+ if (CLIB_ASSERT_ENABLE && ! (truth)) \
+ os_panic (); \
+} while (0)
+
+#endif /* included_error_bootstrap_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/fheap.c b/src/vppinfra/fheap.c
new file mode 100644
index 00000000..13692456
--- /dev/null
+++ b/src/vppinfra/fheap.c
@@ -0,0 +1,473 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vppinfra/fheap.h>
+
+/* Fibonacci heaps. */
+always_inline fheap_node_t *
+fheap_get_node (fheap_t * f, u32 i)
+{
+ return i != ~0 ? vec_elt_at_index (f->nodes, i) : 0;
+}
+
+always_inline fheap_node_t *
+fheap_get_root (fheap_t * f)
+{
+ return fheap_get_node (f, f->min_root);
+}
+
+static void
+fheap_validate (fheap_t * f)
+{
+ fheap_node_t *n, *m;
+ uword ni, si;
+
+ if (!CLIB_DEBUG || !f->enable_validate)
+ return;
+
+ vec_foreach_index (ni, f->nodes)
+ {
+ n = vec_elt_at_index (f->nodes, ni);
+
+ if (!n->is_valid)
+ continue;
+
+ /* Min root must have minimal key. */
+ m = vec_elt_at_index (f->nodes, f->min_root);
+ ASSERT (n->key >= m->key);
+
+ /* Min root must have no parent. */
+ if (ni == f->min_root)
+ ASSERT (n->parent == ~0);
+
+ /* Check sibling linkages. */
+ if (n->next_sibling == ~0)
+ ASSERT (n->prev_sibling == ~0);
+ else if (n->prev_sibling == ~0)
+ ASSERT (n->next_sibling == ~0);
+ else
+ {
+ fheap_node_t *prev, *next;
+ u32 si = n->next_sibling, si_start = si;
+ do
+ {
+ m = vec_elt_at_index (f->nodes, si);
+ prev = vec_elt_at_index (f->nodes, m->prev_sibling);
+ next = vec_elt_at_index (f->nodes, m->next_sibling);
+ ASSERT (prev->next_sibling == si);
+ ASSERT (next->prev_sibling == si);
+ si = m->next_sibling;
+ }
+ while (si != si_start);
+ }
+
+ /* Loop through all siblings. */
+ {
+ u32 n_siblings = 0;
+
+ foreach_fheap_node_sibling (f, si, n->next_sibling, (
+ {
+ m =
+ vec_elt_at_index
+ (f->nodes, si);
+ /* All siblings must have same parent. */
+ ASSERT (m->parent
+ ==
+ n->
+ parent);
+ n_siblings += 1;}
+ ));
+
+ /* Either parent is non-empty or there are siblings present. */
+ if (n->parent == ~0 && ni != f->min_root)
+ ASSERT (n_siblings > 0);
+ }
+
+ /* Loop through all children. */
+ {
+ u32 found_first_child = n->first_child == ~0;
+ u32 n_children = 0;
+
+ foreach_fheap_node_sibling (f, si, n->first_child, (
+ {
+ m =
+ vec_elt_at_index
+ (f->nodes, si);
+ /* Children must have larger keys than their parent. */
+ ASSERT (m->key >=
+ n->key);
+ if
+ (!found_first_child)
+ found_first_child =
+ si ==
+ n->first_child;
+ n_children += 1;}
+ ));
+
+ /* Check that first child is present on list. */
+ ASSERT (found_first_child);
+
+ /* Make sure rank is correct. */
+ ASSERT (n->rank == n_children);
+ }
+ }
+
+ /* Increment serial number for each successful validate.
+ Failure can be used as condition for gdb breakpoints. */
+ f->validate_serial++;
+}
+
+always_inline void
+fheap_node_add_sibling (fheap_t * f, u32 ni, u32 ni_to_add)
+{
+ fheap_node_t *n = vec_elt_at_index (f->nodes, ni);
+ fheap_node_t *n_to_add = vec_elt_at_index (f->nodes, ni_to_add);
+ fheap_node_t *n_next = fheap_get_node (f, n->next_sibling);
+ fheap_node_t *parent;
+
+ /* Empty list? */
+ if (n->next_sibling == ~0)
+ {
+ ASSERT (n->prev_sibling == ~0);
+ n->next_sibling = n->prev_sibling = ni_to_add;
+ n_to_add->next_sibling = n_to_add->prev_sibling = ni;
+ }
+ else
+ {
+ /* Add node after existing node. */
+ n_to_add->prev_sibling = ni;
+ n_to_add->next_sibling = n->next_sibling;
+
+ n->next_sibling = ni_to_add;
+ n_next->prev_sibling = ni_to_add;
+ }
+
+ n_to_add->parent = n->parent;
+ parent = fheap_get_node (f, n->parent);
+ if (parent)
+ parent->rank += 1;
+}
+
+void
+fheap_add (fheap_t * f, u32 ni, u32 key)
+{
+ fheap_node_t *r, *n;
+ u32 ri;
+
+ n = vec_elt_at_index (f->nodes, ni);
+
+ memset (n, 0, sizeof (n[0]));
+ n->parent = n->first_child = n->next_sibling = n->prev_sibling = ~0;
+ n->key = key;
+
+ r = fheap_get_root (f);
+ ri = f->min_root;
+ if (!r)
+ {
+ /* No root? Add node as new root. */
+ f->min_root = ni;
+ }
+ else
+ {
+ /* Add node as sibling of current root. */
+ fheap_node_add_sibling (f, ri, ni);
+
+ /* New node may become new root. */
+ if (r->key > n->key)
+ f->min_root = ni;
+ }
+
+ fheap_validate (f);
+}
+
+always_inline u32
+fheap_node_remove_internal (fheap_t * f, u32 ni, u32 invalidate)
+{
+ fheap_node_t *n = vec_elt_at_index (f->nodes, ni);
+ u32 prev_ni = n->prev_sibling;
+ u32 next_ni = n->next_sibling;
+ u32 list_has_single_element = prev_ni == ni;
+ fheap_node_t *prev = fheap_get_node (f, prev_ni);
+ fheap_node_t *next = fheap_get_node (f, next_ni);
+ fheap_node_t *p = fheap_get_node (f, n->parent);
+
+ if (p)
+ {
+ ASSERT (p->rank > 0);
+ p->rank -= 1;
+ p->first_child = list_has_single_element ? ~0 : next_ni;
+ }
+
+ if (prev)
+ {
+ ASSERT (prev->next_sibling == ni);
+ prev->next_sibling = next_ni;
+ }
+ if (next)
+ {
+ ASSERT (next->prev_sibling == ni);
+ next->prev_sibling = prev_ni;
+ }
+
+ n->prev_sibling = n->next_sibling = ni;
+ n->parent = ~0;
+ n->is_valid = invalidate == 0;
+
+ return list_has_single_element ? ~0 : next_ni;
+}
+
+always_inline u32
+fheap_node_remove (fheap_t * f, u32 ni)
+{
+ return fheap_node_remove_internal (f, ni, /* invalidate */ 0);
+}
+
+always_inline u32
+fheap_node_remove_and_invalidate (fheap_t * f, u32 ni)
+{
+ return fheap_node_remove_internal (f, ni, /* invalidate */ 1);
+}
+
+static void
+fheap_link_root (fheap_t * f, u32 ni)
+{
+ fheap_node_t *n = vec_elt_at_index (f->nodes, ni);
+ fheap_node_t *r, *lo, *hi;
+ u32 ri, lo_i, hi_i, k;
+
+ while (1)
+ {
+ k = n->rank;
+ vec_validate_init_empty (f->root_list_by_rank, k, ~0);
+ ri = f->root_list_by_rank[k];
+ r = fheap_get_node (f, ri);
+ if (!r)
+ {
+ f->root_list_by_rank[k] = ni;
+ return;
+ }
+
+ f->root_list_by_rank[k] = ~0;
+
+ /* Sort n/r into lo/hi by their keys. */
+ lo = r, lo_i = ri;
+ hi = n, hi_i = ni;
+ if (hi->key < lo->key)
+ {
+ u32 ti;
+ fheap_node_t *tn;
+ ti = lo_i, tn = lo;
+ lo = hi, lo_i = hi_i;
+ hi = tn, hi_i = ti;
+ }
+
+ /* Remove larger key. */
+ fheap_node_remove (f, hi_i);
+
+ /* Add larger key as child of smaller one. */
+ if (lo->first_child == ~0)
+ {
+ hi->parent = lo_i;
+ lo->first_child = hi_i;
+ lo->rank = 1;
+ }
+ else
+ fheap_node_add_sibling (f, lo->first_child, hi_i);
+
+ /* Following Fredman & Trajan: "When making a root node X a child of another node in a linking step,
+ we unmark X". */
+ hi->is_marked = 0;
+
+ ni = lo_i;
+ n = lo;
+ }
+}
+
+u32
+fheap_del_min (fheap_t * f, u32 * min_key)
+{
+ fheap_node_t *r = fheap_get_root (f);
+ u32 to_delete_min_ri = f->min_root;
+ u32 ri, ni;
+
+ /* Empty heap? */
+ if (!r)
+ return ~0;
+
+ /* Root's children become siblings. Call this step a; see below. */
+ if (r->first_child != ~0)
+ {
+ u32 ci, cni, rni;
+ fheap_node_t *c, *cn, *rn;
+
+ /* Splice child & root circular lists together. */
+ ci = r->first_child;
+ c = vec_elt_at_index (f->nodes, ci);
+
+ cni = c->next_sibling;
+ rni = r->next_sibling;
+ cn = vec_elt_at_index (f->nodes, cni);
+ rn = vec_elt_at_index (f->nodes, rni);
+
+ r->next_sibling = cni;
+ c->next_sibling = rni;
+ cn->prev_sibling = to_delete_min_ri;
+ rn->prev_sibling = ci;
+ }
+
+ /* Remove min root. */
+ ri = fheap_node_remove_and_invalidate (f, to_delete_min_ri);
+
+ /* Find new min root from among siblings including the ones we've just added. */
+ f->min_root = ~0;
+ if (ri != ~0)
+ {
+ u32 ri_last, ri_next, i, min_ds;
+
+ r = fheap_get_node (f, ri);
+ ri_last = r->prev_sibling;
+ while (1)
+ {
+ /* Step a above can put children (with r->parent != ~0) on root list. */
+ r->parent = ~0;
+
+ ri_next = r->next_sibling;
+ fheap_link_root (f, ri);
+ if (ri == ri_last)
+ break;
+ ri = ri_next;
+ r = fheap_get_node (f, ri);
+ }
+
+ min_ds = ~0;
+ vec_foreach_index (i, f->root_list_by_rank)
+ {
+ ni = f->root_list_by_rank[i];
+ if (ni == ~0)
+ continue;
+ f->root_list_by_rank[i] = ~0;
+ r = fheap_get_node (f, ni);
+ if (r->key < min_ds)
+ {
+ f->min_root = ni;
+ min_ds = r->key;
+ ASSERT (r->parent == ~0);
+ }
+ }
+ }
+
+ /* Return deleted min root. */
+ r = vec_elt_at_index (f->nodes, to_delete_min_ri);
+ if (min_key)
+ *min_key = r->key;
+
+ fheap_validate (f);
+
+ return to_delete_min_ri;
+}
+
+static void
+fheap_mark_parent (fheap_t * f, u32 pi)
+{
+ fheap_node_t *p = vec_elt_at_index (f->nodes, pi);
+
+ /* Parent is a root: do nothing. */
+ if (p->parent == ~0)
+ return;
+
+ /* If not marked, mark it. */
+ if (!p->is_marked)
+ {
+ p->is_marked = 1;
+ return;
+ }
+
+ /* Its a previously marked, non-root parent.
+ Cut edge to its parent and add to root list. */
+ fheap_node_remove (f, pi);
+ fheap_node_add_sibling (f, f->min_root, pi);
+
+ /* Unmark it since its now a root node. */
+ p->is_marked = 0;
+
+ /* "Cascading cuts": check parent. */
+ if (p->parent != ~0)
+ fheap_mark_parent (f, p->parent);
+}
+
+/* Set key to new smaller value. */
+void
+fheap_decrease_key (fheap_t * f, u32 ni, u32 new_key)
+{
+ fheap_node_t *n = vec_elt_at_index (f->nodes, ni);
+ fheap_node_t *r = fheap_get_root (f);
+
+ n->key = new_key;
+
+ if (n->parent != ~0)
+ {
+ fheap_mark_parent (f, n->parent);
+
+ /* Remove node and add to root list. */
+ fheap_node_remove (f, ni);
+ fheap_node_add_sibling (f, f->min_root, ni);
+ }
+
+ if (n->key < r->key)
+ f->min_root = ni;
+
+ fheap_validate (f);
+}
+
+void
+fheap_del (fheap_t * f, u32 ni)
+{
+ fheap_node_t *n;
+
+ n = vec_elt_at_index (f->nodes, ni);
+
+ if (n->parent == ~0)
+ {
+ ASSERT (ni == f->min_root);
+ fheap_del_min (f, 0);
+ }
+ else
+ {
+ u32 ci;
+
+ fheap_mark_parent (f, n->parent);
+
+ /* Add children to root list. */
+ foreach_fheap_node_sibling (f, ci, n->first_child, (
+ {
+ fheap_node_remove
+ (f, ci);
+ fheap_node_add_sibling
+ (f, f->min_root,
+ ci);}
+ ));
+
+ fheap_node_remove_and_invalidate (f, ni);
+ }
+
+ fheap_validate (f);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/fheap.h b/src/vppinfra/fheap.h
new file mode 100644
index 00000000..6d4965f1
--- /dev/null
+++ b/src/vppinfra/fheap.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef included_clib_fheap_h
+#define included_clib_fheap_h
+
+/* Fibonacci Heaps Fredman, M. L.; Tarjan (1987).
+ "Fibonacci heaps and their uses in improved network optimization algorithms" */
+
+#include <vppinfra/vec.h>
+
+typedef struct
+{
+ /* Node index of parent. */
+ u32 parent;
+
+ /* Node index of first child. */
+ u32 first_child;
+
+ /* Next and previous nodes in doubly linked list of siblings. */
+ u32 next_sibling, prev_sibling;
+
+ /* Key (distance) for this node. Parent always has key
+ <= than keys of children. */
+ u32 key;
+
+ /* Number of children (as opposed to descendents). */
+ u32 rank;
+
+ u32 is_marked;
+
+ /* Set to one when node is inserted; zero when deleted. */
+ u32 is_valid;
+} fheap_node_t;
+
+#define foreach_fheap_node_sibling(f,ni,first_ni,body) \
+do { \
+ u32 __fheap_foreach_first_ni = (first_ni); \
+ u32 __fheap_foreach_ni = __fheap_foreach_first_ni; \
+ u32 __fheap_foreach_next_ni; \
+ fheap_node_t * __fheap_foreach_n; \
+ if (__fheap_foreach_ni != ~0) \
+ while (1) \
+ { \
+ __fheap_foreach_n = fheap_get_node ((f), __fheap_foreach_ni); \
+ __fheap_foreach_next_ni = __fheap_foreach_n -> next_sibling; \
+ (ni) = __fheap_foreach_ni; \
+ \
+ body; \
+ \
+ /* End of circular list? */ \
+ if (__fheap_foreach_next_ni == __fheap_foreach_first_ni) \
+ break; \
+ \
+ __fheap_foreach_ni = __fheap_foreach_next_ni; \
+ \
+ } \
+} while (0)
+
+typedef struct
+{
+ u32 min_root;
+
+ /* Vector of nodes. */
+ fheap_node_t *nodes;
+
+ u32 *root_list_by_rank;
+
+ u32 enable_validate;
+
+ u32 validate_serial;
+} fheap_t;
+
+/* Initialize empty heap. */
+always_inline void
+fheap_init (fheap_t * f, u32 n_nodes)
+{
+ fheap_node_t *save_nodes = f->nodes;
+ u32 *save_root_list = f->root_list_by_rank;
+
+ memset (f, 0, sizeof (f[0]));
+
+ f->nodes = save_nodes;
+ f->root_list_by_rank = save_root_list;
+
+ vec_validate (f->nodes, n_nodes - 1);
+ vec_reset_length (f->root_list_by_rank);
+
+ f->min_root = ~0;
+}
+
+always_inline void
+fheap_free (fheap_t * f)
+{
+ vec_free (f->nodes);
+ vec_free (f->root_list_by_rank);
+}
+
+always_inline u32
+fheap_find_min (fheap_t * f)
+{
+ return f->min_root;
+}
+
+always_inline u32
+fheap_is_empty (fheap_t * f)
+{
+ return f->min_root == ~0;
+}
+
+/* Add/delete nodes. */
+void fheap_add (fheap_t * f, u32 ni, u32 key);
+void fheap_del (fheap_t * f, u32 ni);
+
+/* Delete and return minimum. */
+u32 fheap_del_min (fheap_t * f, u32 * min_key);
+
+/* Change key value. */
+void fheap_decrease_key (fheap_t * f, u32 ni, u32 new_key);
+
+#endif /* included_clib_fheap_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/fifo.c b/src/vppinfra/fifo.c
new file mode 100644
index 00000000..5b4c76d1
--- /dev/null
+++ b/src/vppinfra/fifo.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/cache.h>
+#include <vppinfra/fifo.h>
+#include <vppinfra/error.h>
+#include <vppinfra/string.h>
+
+/*
+ General first in/first out queues.
+ FIFOs can have arbitrary size and type.
+ Let T be any type (i.e. char, int, struct foo, etc.).
+
+ A null fifo is initialized:
+
+ T * f = 0;
+
+ For example, typedef struct { int a, b; } T;
+
+ Elements can be added in 3 ways.
+
+ #1 1 element is added:
+ T x;
+ x.a = 10; x.b = 20;
+ fifo_add1 (f, x);
+
+ #2 n elements are added
+ T buf[10];
+ initialize buf[0] .. buf[9];
+ fifo_add (f, buf, 10);
+
+ #3 1 element is added, pointer is returned
+ T * x;
+ fifo_add2 (f, x);
+ x->a = 10;
+ x->b = 20;
+
+ Elements are removed 1 at a time:
+ T x;
+ fifo_sub1 (f, x);
+
+ fifo_free (f) frees fifo.
+*/
+
+void *
+_clib_fifo_resize (void *v_old, uword n_new_elts, uword elt_bytes)
+{
+ void *v_new, *end, *head;
+ uword n_old_elts, header_bytes;
+ uword n_copy_bytes, n_zero_bytes;
+ clib_fifo_header_t *f_new, *f_old;
+
+ n_old_elts = clib_fifo_elts (v_old);
+ n_new_elts += n_old_elts;
+ if (n_new_elts < 32)
+ n_new_elts = 32;
+ else
+ n_new_elts = max_pow2 (n_new_elts);
+
+ header_bytes = vec_header_bytes (sizeof (clib_fifo_header_t));
+
+ v_new = clib_mem_alloc_no_fail (n_new_elts * elt_bytes + header_bytes);
+ v_new += header_bytes;
+
+ f_new = clib_fifo_header (v_new);
+ f_new->head_index = 0;
+ f_new->tail_index = n_old_elts;
+ _vec_len (v_new) = n_new_elts;
+
+ /* Copy old -> new. */
+ n_copy_bytes = n_old_elts * elt_bytes;
+ if (n_copy_bytes > 0)
+ {
+ f_old = clib_fifo_header (v_old);
+ end = v_old + _vec_len (v_old) * elt_bytes;
+ head = v_old + f_old->head_index * elt_bytes;
+
+ if (head + n_copy_bytes >= end)
+ {
+ uword n = end - head;
+ clib_memcpy (v_new, head, n);
+ clib_memcpy (v_new + n, v_old, n_copy_bytes - n);
+ }
+ else
+ clib_memcpy (v_new, head, n_copy_bytes);
+ }
+
+ /* Zero empty space. */
+ n_zero_bytes = (n_new_elts - n_old_elts) * elt_bytes;
+ memset (v_new + n_copy_bytes, 0, n_zero_bytes);
+
+ clib_fifo_free (v_old);
+
+ return v_new;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/fifo.h b/src/vppinfra/fifo.h
new file mode 100644
index 00000000..b0b35e25
--- /dev/null
+++ b/src/vppinfra/fifo.h
@@ -0,0 +1,304 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2005 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_fifo_h
+#define included_fifo_h
+
+#include <vppinfra/cache.h>
+#include <vppinfra/error.h> /* for ASSERT */
+#include <vppinfra/vec.h>
+
+typedef struct
+{
+ /* First index of valid data in fifo. */
+ u32 head_index;
+
+ /* One beyond last index in fifo. */
+ u32 tail_index;
+} clib_fifo_header_t;
+
+always_inline clib_fifo_header_t *
+clib_fifo_header (void *f)
+{
+ return vec_header (f, sizeof (clib_fifo_header_t));
+}
+
+/* Aliases. */
+#define clib_fifo_len(v) vec_len(v)
+#define _clib_fifo_len(v) _vec_len(v)
+#define clib_fifo_end(v) vec_end(v)
+
+always_inline uword
+clib_fifo_elts (void *v)
+{
+ word l, r;
+ clib_fifo_header_t *f = clib_fifo_header (v);
+
+ if (!v)
+ return 0;
+
+ l = _clib_fifo_len (v);
+ r = (word) f->tail_index - (word) f->head_index;
+ r = r < 0 ? r + l : r;
+ ASSERT (r >= 0 && r <= l);
+ return r;
+}
+
+always_inline uword
+clib_fifo_free_elts (void *v)
+{
+ return clib_fifo_len (v) - clib_fifo_elts (v);
+}
+
+always_inline void
+clib_fifo_reset (void *v)
+{
+ clib_fifo_header_t *f = clib_fifo_header (v);
+ if (v)
+ {
+ f->head_index = f->tail_index = 0;
+ _vec_len (v) = 0;
+ }
+}
+
+/* External resize function. */
+void *_clib_fifo_resize (void *v, uword n_elts, uword elt_bytes);
+
+#define clib_fifo_resize(f,n_elts) \
+ f = _clib_fifo_resize ((f), (n_elts), sizeof ((f)[0]))
+
+always_inline void *
+_clib_fifo_validate (void *v, uword n_elts, uword elt_bytes)
+{
+ if (clib_fifo_free_elts (v) < n_elts)
+ v = _clib_fifo_resize (v, n_elts, elt_bytes);
+ return v;
+}
+
+#define clib_fifo_validate(f,n_elts) \
+ f = _clib_fifo_validate ((f), (n_elts), sizeof (f[0]))
+
+/* Advance tail pointer by N_ELTS which can be either positive or negative. */
+always_inline void *
+_clib_fifo_advance_tail (void *v, word n_elts, uword elt_bytes,
+ uword * tail_return)
+{
+ word i, l, n_free;
+ clib_fifo_header_t *f;
+
+ n_free = clib_fifo_free_elts (v);
+ if (n_free < n_elts)
+ {
+ v = _clib_fifo_resize (v, n_elts, elt_bytes);
+ n_free = clib_fifo_free_elts (v);
+ }
+
+ ASSERT (n_free >= n_elts);
+ n_free -= n_elts;
+
+ f = clib_fifo_header (v);
+ l = _clib_fifo_len (v);
+ i = f->tail_index;
+
+ if (n_free == 0)
+ {
+ /* Mark fifo full. */
+ f->tail_index = f->head_index + l;
+ }
+ else
+ {
+ word n = f->tail_index + n_elts;
+ if (n >= l)
+ n -= l;
+ else if (n < 0)
+ n += l;
+ ASSERT (n >= 0 && n < l);
+ f->tail_index = n;
+ }
+
+ ASSERT (clib_fifo_free_elts (v) == n_free);
+
+ if (tail_return)
+ *tail_return = n_elts > 0 ? i : f->tail_index;
+
+ return v;
+}
+
+#define clib_fifo_advance_tail(f,n_elts) \
+({ \
+ uword _i; \
+ (f) = _clib_fifo_advance_tail ((f), (n_elts), sizeof ((f)[0]), &_i); \
+ (f) + _i; \
+})
+
+always_inline uword
+clib_fifo_advance_head (void *v, uword n_elts)
+{
+ clib_fifo_header_t *f;
+ uword l, i, n;
+
+ ASSERT (clib_fifo_elts (v) >= n_elts);
+ f = clib_fifo_header (v);
+ l = _clib_fifo_len (v);
+
+ /* If fifo was full, restore tail pointer. */
+ if (f->tail_index == f->head_index + l)
+ f->tail_index = f->head_index;
+
+ n = i = f->head_index;
+ n += n_elts;
+ n = n >= l ? n - l : n;
+ ASSERT (n < l);
+ f->head_index = n;
+
+ return i;
+}
+
+/* Add given element to fifo. */
+#define clib_fifo_add1(f,e) \
+do { \
+ uword _i; \
+ (f) = _clib_fifo_advance_tail ((f), 1, sizeof ((f)[0]), &_i); \
+ (f)[_i] = (e); \
+} while (0)
+
+/* Add element to fifo; return pointer to new element. */
+#define clib_fifo_add2(f,p) \
+do { \
+ uword _i; \
+ (f) = _clib_fifo_advance_tail ((f), 1, sizeof ((f)[0]), &_i); \
+ (p) = (f) + _i; \
+} while (0)
+
+/* Add several elements to fifo. */
+#define clib_fifo_add(f,e,n) \
+do { \
+ uword _i, _l; word _n0, _n1; \
+ \
+ _n0 = (n); \
+ (f) = _clib_fifo_advance_tail ((f), _n0, sizeof ((f)[0]), &_i); \
+ _l = clib_fifo_len (f); \
+ _n1 = _i + _n0 - _l; \
+ _n1 = _n1 < 0 ? 0 : _n1; \
+ _n0 -= _n1; \
+ clib_memcpy ((f) + _i, (e), _n0 * sizeof ((f)[0])); \
+ if (_n1) \
+ clib_memcpy ((f) + 0, (e) + _n0, _n1 * sizeof ((f)[0])); \
+} while (0)
+
+/* Subtract element from fifo. */
+#define clib_fifo_sub1(f,e) \
+do { \
+ uword _i; \
+ ASSERT (clib_fifo_elts (f) >= 1); \
+ _i = clib_fifo_advance_head ((f), 1); \
+ (e) = (f)[_i]; \
+} while (0)
+
+#define clib_fifo_sub2(f,p) \
+do { \
+ uword _i; \
+ ASSERT (clib_fifo_elts (f) >= 1); \
+ _i = clib_fifo_advance_head ((f), 1); \
+ (p) = (f) + _i; \
+} while (0)
+
+always_inline uword
+clib_fifo_head_index (void *v)
+{
+ clib_fifo_header_t *f = clib_fifo_header (v);
+ return v ? f->head_index : 0;
+}
+
+always_inline uword
+clib_fifo_tail_index (void *v)
+{
+ clib_fifo_header_t *f = clib_fifo_header (v);
+ return v ? f->tail_index : 0;
+}
+
+#define clib_fifo_head(v) ((v) + clib_fifo_head_index (v))
+#define clib_fifo_tail(v) ((v) + clib_fifo_tail_index (v))
+
+#define clib_fifo_free(f) vec_free_h((f),sizeof(clib_fifo_header_t))
+
+always_inline uword
+clib_fifo_elt_index (void *v, uword i)
+{
+ clib_fifo_header_t *f = clib_fifo_header (v);
+ uword result = 0;
+
+ ASSERT (i < clib_fifo_elts (v));
+
+ if (v)
+ {
+ result = f->head_index + i;
+ if (result >= _vec_len (v))
+ result -= _vec_len (v);
+ }
+
+ return result;
+}
+
+#define clib_fifo_elt_at_index(v,i) ((v) + clib_fifo_elt_index (v, (i)))
+
+#define clib_fifo_foreach(v,f,body) \
+do { \
+ uword _i, _l, _n; \
+ \
+ _i = clib_fifo_head_index (f); \
+ _l = clib_fifo_len (f); \
+ _n = clib_fifo_elts (f); \
+ while (_n > 0) \
+ { \
+ (v) = (f) + _i; \
+ do { body; } while (0); \
+ _n--; \
+ _i++; \
+ _i = _i >= _l ? 0 : _i; \
+ } \
+} while (0)
+
+#endif /* included_fifo_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/file.h b/src/vppinfra/file.h
new file mode 100644
index 00000000..69facea9
--- /dev/null
+++ b/src/vppinfra/file.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * file.h: unix file handling
+ *
+ * Copyright (c) 2008 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef included_clib_file_h
+#define included_clib_file_h
+
+#include <vppinfra/socket.h>
+#include <termios.h>
+
+
+struct clib_file;
+typedef clib_error_t *(clib_file_function_t) (struct clib_file * f);
+
+typedef struct clib_file
+{
+ /* Unix file descriptor from open/socket. */
+ u32 file_descriptor;
+
+ u32 flags;
+#define UNIX_FILE_DATA_AVAILABLE_TO_WRITE (1 << 0)
+#define UNIX_FILE_EVENT_EDGE_TRIGGERED (1 << 1)
+
+ /* Data available for function's use. */
+ uword private_data;
+
+ /* Functions to be called when read/write data becomes ready. */
+ clib_file_function_t *read_function, *write_function, *error_function;
+} clib_file_t;
+
+typedef enum
+{
+ UNIX_FILE_UPDATE_ADD,
+ UNIX_FILE_UPDATE_MODIFY,
+ UNIX_FILE_UPDATE_DELETE,
+} unix_file_update_type_t;
+
+typedef struct
+{
+ /* Pool of files to poll for input/output. */
+ clib_file_t *file_pool;
+
+ void (*file_update) (clib_file_t * file,
+ unix_file_update_type_t update_type);
+
+} clib_file_main_t;
+
+always_inline uword
+clib_file_add (clib_file_main_t * um, clib_file_t * template)
+{
+ clib_file_t *f;
+ pool_get (um->file_pool, f);
+ f[0] = template[0];
+ um->file_update (f, UNIX_FILE_UPDATE_ADD);
+ return f - um->file_pool;
+}
+
+always_inline void
+clib_file_del (clib_file_main_t * um, clib_file_t * f)
+{
+ um->file_update (f, UNIX_FILE_UPDATE_DELETE);
+ close (f->file_descriptor);
+ f->file_descriptor = ~0;
+ pool_put (um->file_pool, f);
+}
+
+always_inline void
+clib_file_del_by_index (clib_file_main_t * um, uword index)
+{
+ clib_file_t *uf;
+ uf = pool_elt_at_index (um->file_pool, index);
+ clib_file_del (um, uf);
+}
+
+always_inline uword
+clib_file_set_data_available_to_write (clib_file_main_t * um,
+ u32 clib_file_index,
+ uword is_available)
+{
+ clib_file_t *uf = pool_elt_at_index (um->file_pool, clib_file_index);
+ uword was_available = (uf->flags & UNIX_FILE_DATA_AVAILABLE_TO_WRITE);
+ if ((was_available != 0) != (is_available != 0))
+ {
+ uf->flags ^= UNIX_FILE_DATA_AVAILABLE_TO_WRITE;
+ um->file_update (uf, UNIX_FILE_UPDATE_MODIFY);
+ }
+ return was_available != 0;
+}
+
+
+#endif /* included_clib_file_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/format.c b/src/vppinfra/format.c
new file mode 100644
index 00000000..70292c04
--- /dev/null
+++ b/src/vppinfra/format.c
@@ -0,0 +1,819 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*------------------------------------------------------------------
+ * format.c -- see notice below
+ *
+ * October 2003, Eliot Dresselhaus
+ *
+ * Modifications to this file Copyright (c) 2003 by cisco Systems, Inc.
+ * All rights reserved.
+ *------------------------------------------------------------------
+ */
+
+/*
+ Copyright (c) 2001, 2002, 2003, 2006 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <stdarg.h> /* va_start, etc */
+
+#ifdef CLIB_UNIX
+#include <unistd.h>
+#include <stdio.h>
+#endif
+
+#ifdef CLIB_STANDALONE
+#include <vppinfra/standalone_stdio.h>
+#endif
+
+#include <vppinfra/mem.h>
+#include <vppinfra/format.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/error.h>
+#include <vppinfra/string.h>
+#include <vppinfra/os.h> /* os_puts */
+#include <vppinfra/math.h>
+
+typedef struct
+{
+ /* Output number in this base. */
+ u8 base;
+
+ /* Number of show of 64 bit number. */
+ u8 n_bits;
+
+ /* Signed or unsigned. */
+ u8 is_signed;
+
+ /* Output digits uppercase (not lowercase) %X versus %x. */
+ u8 uppercase_digits;
+} format_integer_options_t;
+
+static u8 *format_integer (u8 * s, u64 number,
+ format_integer_options_t * options);
+static u8 *format_float (u8 * s, f64 x, uword n_digits_to_print,
+ uword output_style);
+
+typedef struct
+{
+ /* String justification: + => right, - => left, = => center. */
+ uword justify;
+
+ /* Width of string (before and after decimal point for numbers).
+ 0 => natural width. */
+ uword width[2];
+
+ /* Long => 'l', long long 'L', int 0. */
+ uword how_long;
+
+ /* Pad character. Defaults to space. */
+ uword pad_char;
+} format_info_t;
+
+static u8 *
+justify (u8 * s, format_info_t * fi, uword s_len_orig)
+{
+ uword i0, l0, l1;
+
+ i0 = s_len_orig;
+ l0 = i0 + fi->width[0];
+ l1 = vec_len (s);
+
+ /* If width is zero user returned width. */
+ if (l0 == i0)
+ l0 = l1;
+
+ if (l1 > l0)
+ _vec_len (s) = l0;
+ else if (l0 > l1)
+ {
+ uword n = l0 - l1;
+ uword n_left = 0, n_right = 0;
+
+ switch (fi->justify)
+ {
+ case '-':
+ n_right = n;
+ break;
+
+ case '+':
+ n_left = n;
+ break;
+
+ case '=':
+ n_right = n_left = n / 2;
+ if (n % 2)
+ n_left++;
+ break;
+ }
+ if (n_left > 0)
+ {
+ vec_insert (s, n_left, i0);
+ memset (s + i0, fi->pad_char, n_left);
+ l1 = vec_len (s);
+ }
+ if (n_right > 0)
+ {
+ vec_resize (s, n_right);
+ memset (s + l1, fi->pad_char, n_right);
+ }
+ }
+ return s;
+}
+
+static const u8 *
+do_percent (u8 ** _s, const u8 * fmt, va_list * va)
+{
+ u8 *s = *_s;
+ uword c;
+
+ const u8 *f = fmt;
+
+ format_info_t fi = {
+ .justify = '+',
+ .width = {0},
+ .pad_char = ' ',
+ .how_long = 0,
+ };
+
+ uword i;
+
+ ASSERT (f[0] == '%');
+
+ switch (c = *++f)
+ {
+ case '%':
+ /* %% => % */
+ vec_add1 (s, c);
+ f++;
+ goto done;
+
+ case '-':
+ case '+':
+ case '=':
+ fi.justify = c;
+ c = *++f;
+ break;
+ }
+
+ /* Parse width0 . width1. */
+ {
+ uword is_first_digit = 1;
+
+ fi.width[0] = fi.width[1] = 0;
+ for (i = 0; i < 2; i++)
+ {
+ if (c == '0' && i == 0 && is_first_digit)
+ fi.pad_char = '0';
+ is_first_digit = 0;
+ if (c == '*')
+ {
+ fi.width[i] = va_arg (*va, int);
+ c = *++f;
+ }
+ else
+ {
+ while (c >= '0' && c <= '9')
+ {
+ fi.width[i] = 10 * fi.width[i] + (c - '0');
+ c = *++f;
+ }
+ }
+ if (c != '.')
+ break;
+ c = *++f;
+ }
+ }
+
+ /* Parse %l* and %L* */
+ switch (c)
+ {
+ case 'w':
+ /* word format. */
+ fi.how_long = 'w';
+ c = *++f;
+ break;
+
+ case 'L':
+ case 'l':
+ fi.how_long = c;
+ c = *++f;
+ if (c == 'l' && *f == 'l')
+ {
+ fi.how_long = 'L';
+ c = *++f;
+ }
+ break;
+ }
+
+ /* Finally we are ready for format letter. */
+ if (c != 0)
+ {
+ uword s_initial_len = vec_len (s);
+ format_integer_options_t o = {
+ .is_signed = 0,
+ .base = 10,
+ .n_bits = BITS (uword),
+ .uppercase_digits = 0,
+ };
+
+ f++;
+
+ switch (c)
+ {
+ default:
+ {
+ /* Try to give a helpful error message. */
+ vec_free (s);
+ s = format (s, "**** CLIB unknown format `%%%c' ****", c);
+ goto done;
+ }
+
+ case 'c':
+ vec_add1 (s, va_arg (*va, int));
+ break;
+
+ case 'p':
+ vec_add1 (s, '0');
+ vec_add1 (s, 'x');
+
+ o.is_signed = 0;
+ o.n_bits = BITS (uword *);
+ o.base = 16;
+ o.uppercase_digits = 0;
+
+ s = format_integer (s, pointer_to_uword (va_arg (*va, void *)), &o);
+ break;
+
+ case 'x':
+ case 'X':
+ case 'u':
+ case 'd':
+ {
+ u64 number;
+
+ o.base = 10;
+ if (c == 'x' || c == 'X')
+ o.base = 16;
+ o.is_signed = c == 'd';
+ o.uppercase_digits = c == 'X';
+
+ switch (fi.how_long)
+ {
+ case 'L':
+ number = va_arg (*va, unsigned long long);
+ o.n_bits = BITS (unsigned long long);
+ break;
+
+ case 'l':
+ number = va_arg (*va, long);
+ o.n_bits = BITS (long);
+ break;
+
+ case 'w':
+ number = va_arg (*va, word);
+ o.n_bits = BITS (uword);
+ break;
+
+ default:
+ number = va_arg (*va, int);
+ o.n_bits = BITS (int);
+ break;
+ }
+
+ s = format_integer (s, number, &o);
+ }
+ break;
+
+ case 's':
+ case 'S':
+ {
+ char *cstring = va_arg (*va, char *);
+ uword len;
+
+ if (!cstring)
+ {
+ cstring = "(nil)";
+ len = 5;
+ }
+ else if (fi.width[1] != 0)
+ len = clib_min (strlen (cstring), fi.width[1]);
+ else
+ len = strlen (cstring);
+
+ /* %S => format string as C identifier (replace _ with space). */
+ if (c == 'S')
+ {
+ for (i = 0; i < len; i++)
+ vec_add1 (s, cstring[i] == '_' ? ' ' : cstring[i]);
+ }
+ else
+ vec_add (s, cstring, len);
+ }
+ break;
+
+ case 'v':
+ {
+ u8 *v = va_arg (*va, u8 *);
+ uword len;
+
+ if (fi.width[1] != 0)
+ len = clib_min (vec_len (v), fi.width[1]);
+ else
+ len = vec_len (v);
+
+ vec_add (s, v, len);
+ }
+ break;
+
+ case 'f':
+ case 'g':
+ case 'e':
+ /* Floating point. */
+ ASSERT (fi.how_long == 0 || fi.how_long == 'l');
+ s = format_float (s, va_arg (*va, double), fi.width[1], c);
+ break;
+
+ case 'U':
+ /* User defined function. */
+ {
+ typedef u8 *(user_func_t) (u8 * s, va_list * args);
+ user_func_t *u = va_arg (*va, user_func_t *);
+
+ s = (*u) (s, va);
+ }
+ break;
+ }
+
+ s = justify (s, &fi, s_initial_len);
+ }
+
+done:
+ *_s = s;
+ return f;
+}
+
+u8 *
+va_format (u8 * s, const char *fmt, va_list * va)
+{
+ const u8 *f = (u8 *) fmt, *g;
+ u8 c;
+
+ g = f;
+ while (1)
+ {
+ c = *f;
+
+ if (!c)
+ break;
+
+ if (c == '%')
+ {
+ if (f > g)
+ vec_add (s, g, f - g);
+ f = g = do_percent (&s, f, va);
+ }
+ else
+ {
+ f++;
+ }
+ }
+
+ if (f > g)
+ vec_add (s, g, f - g);
+
+ return s;
+}
+
+u8 *
+format (u8 * s, const char *fmt, ...)
+{
+ va_list va;
+ va_start (va, fmt);
+ s = va_format (s, fmt, &va);
+ va_end (va);
+ return s;
+}
+
+word
+va_fformat (FILE * f, char *fmt, va_list * va)
+{
+ word ret;
+ u8 *s;
+
+ s = va_format (0, fmt, va);
+
+#ifdef CLIB_UNIX
+ if (f)
+ {
+ ret = fwrite (s, vec_len (s), 1, f);
+ }
+ else
+#endif /* CLIB_UNIX */
+ {
+ ret = 0;
+ os_puts (s, vec_len (s), /* is_error */ 0);
+ }
+
+ vec_free (s);
+ return ret;
+}
+
+word
+fformat (FILE * f, char *fmt, ...)
+{
+ va_list va;
+ word ret;
+
+ va_start (va, fmt);
+ ret = va_fformat (f, fmt, &va);
+ va_end (va);
+
+ return (ret);
+}
+
+#ifdef CLIB_UNIX
+word
+fdformat (int fd, char *fmt, ...)
+{
+ word ret;
+ u8 *s;
+ va_list va;
+
+ va_start (va, fmt);
+ s = va_format (0, fmt, &va);
+ va_end (va);
+
+ ret = write (fd, s, vec_len (s));
+ vec_free (s);
+ return ret;
+}
+#endif
+
+/* Format integral type. */
+static u8 *
+format_integer (u8 * s, u64 number, format_integer_options_t * options)
+{
+ u64 q;
+ u32 r;
+ u8 digit_buffer[128];
+ u8 *d = digit_buffer + sizeof (digit_buffer);
+ word c, base;
+
+ if (options->is_signed && (i64) number < 0)
+ {
+ number = -number;
+ vec_add1 (s, '-');
+ }
+
+ if (options->n_bits < BITS (number))
+ number &= ((u64) 1 << options->n_bits) - 1;
+
+ base = options->base;
+
+ while (1)
+ {
+ q = number / base;
+ r = number % base;
+
+ if (r < 10 + 26 + 26)
+ {
+ if (r < 10)
+ c = '0' + r;
+ else if (r < 10 + 26)
+ c = 'a' + (r - 10);
+ else
+ c = 'A' + (r - 10 - 26);
+
+ if (options->uppercase_digits
+ && base <= 10 + 26 && c >= 'a' && c <= 'z')
+ c += 'A' - 'a';
+
+ *--d = c;
+ }
+ else /* will never happen, warning be gone */
+ {
+ *--d = '?';
+ }
+
+ if (q == 0)
+ break;
+
+ number = q;
+ }
+
+ vec_add (s, d, digit_buffer + sizeof (digit_buffer) - d);
+ return s;
+}
+
+/* Floating point formatting. */
+/* Deconstruct IEEE 64 bit number into sign exponent and fraction. */
+#define f64_down(f,sign,expon,fraction) \
+do { \
+ union { u64 u; f64 f; } _f64_down_tmp; \
+ _f64_down_tmp.f = (f); \
+ (sign) = (_f64_down_tmp.u >> 63); \
+ (expon) = ((_f64_down_tmp.u >> 52) & 0x7ff) - 1023; \
+ (fraction) = ((_f64_down_tmp.u << 12) >> 12) | ((u64) 1 << 52); \
+} while (0)
+
+/* Construct IEEE 64 bit number. */
+static f64
+f64_up (uword sign, word expon, u64 fraction)
+{
+ union
+ {
+ u64 u;
+ f64 f;
+ } tmp;
+
+ tmp.u = (u64) ((sign) != 0) << 63;
+
+ expon += 1023;
+ if (expon > 1023)
+ expon = 1023;
+ if (expon < 0)
+ expon = 0;
+ tmp.u |= (u64) expon << 52;
+
+ tmp.u |= fraction & (((u64) 1 << 52) - 1);
+
+ return tmp.f;
+}
+
+/* Returns approximate precision of number given its exponent. */
+static f64
+f64_precision (int base2_expon)
+{
+ static int n_bits = 0;
+
+ if (!n_bits)
+ {
+ /* Compute number of significant bits in floating point representation. */
+ f64 one = 0;
+ f64 small = 1;
+
+ while (one != 1)
+ {
+ small *= .5;
+ n_bits++;
+ one = 1 + small;
+ }
+ }
+
+ return f64_up (0, base2_expon - n_bits, 0);
+}
+
+/* Return x 10^n */
+static f64
+times_power_of_ten (f64 x, int n)
+{
+ if (n >= 0)
+ {
+ static f64 t[8] = { 1e+0, 1e+1, 1e+2, 1e+3, 1e+4, 1e+5, 1e+6, 1e+7, };
+ while (n >= 8)
+ {
+ x *= 1e+8;
+ n -= 8;
+ }
+ return x * t[n];
+ }
+ else
+ {
+ static f64 t[8] = { 1e-0, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7, };
+ while (n <= -8)
+ {
+ x *= 1e-8;
+ n += 8;
+ }
+ return x * t[-n];
+ }
+
+}
+
+/* Write x = y * 10^expon with 1 < y < 10. */
+static f64
+normalize (f64 x, word * expon_return, f64 * prec_return)
+{
+ word expon2, expon10;
+ CLIB_UNUSED (u64 fraction);
+ CLIB_UNUSED (word sign);
+ f64 prec;
+
+ f64_down (x, sign, expon2, fraction);
+
+ expon10 =
+ .5 +
+ expon2 * .301029995663981195213738894724493 /* Log (2) / Log (10) */ ;
+
+ prec = f64_precision (expon2);
+ x = times_power_of_ten (x, -expon10);
+ prec = times_power_of_ten (prec, -expon10);
+
+ while (x < 1)
+ {
+ x *= 10;
+ prec *= 10;
+ expon10--;
+ }
+
+ while (x > 10)
+ {
+ x *= .1;
+ prec *= .1;
+ expon10++;
+ }
+
+ if (x + prec >= 10)
+ {
+ x = 1;
+ expon10++;
+ }
+
+ *expon_return = expon10;
+ *prec_return = prec;
+
+ return x;
+}
+
+static u8 *
+add_some_zeros (u8 * s, uword n_zeros)
+{
+ while (n_zeros > 0)
+ {
+ vec_add1 (s, '0');
+ n_zeros--;
+ }
+ return s;
+}
+
+/* Format a floating point number with the given number of fractional
+ digits (e.g. 1.2345 with 2 fraction digits yields "1.23") and output style. */
+static u8 *
+format_float (u8 * s, f64 x, uword n_fraction_digits, uword output_style)
+{
+ f64 prec;
+ word sign, expon, n_fraction_done, added_decimal_point;
+ /* Position of decimal point relative to where we are. */
+ word decimal_point;
+
+ /* Default number of digits to print when its not specified. */
+ if (n_fraction_digits == ~0)
+ n_fraction_digits = 7;
+ n_fraction_done = 0;
+ decimal_point = 0;
+ added_decimal_point = 0;
+ sign = expon = 0;
+
+ /* Special case: zero. */
+ if (x == 0)
+ {
+ do_zero:
+ vec_add1 (s, '0');
+ goto done;
+ }
+
+ if (x < 0)
+ {
+ x = -x;
+ sign = 1;
+ }
+
+ /* Check for not-a-number. */
+ if (isnan (x))
+ return format (s, "%cNaN", sign ? '-' : '+');
+
+ /* Check for infinity. */
+ if (isinf (x))
+ return format (s, "%cinfinity", sign ? '-' : '+');
+
+ x = normalize (x, &expon, &prec);
+
+ /* Not enough digits to print anything: so just print 0 */
+ if ((word) - expon > (word) n_fraction_digits
+ && (output_style == 'f' || (output_style == 'g')))
+ goto do_zero;
+
+ if (sign)
+ vec_add1 (s, '-');
+
+ if (output_style == 'f'
+ || (output_style == 'g' && expon > -10 && expon < 10))
+ {
+ if (expon < 0)
+ {
+ /* Add decimal point and leading zeros. */
+ vec_add1 (s, '.');
+ n_fraction_done = clib_min (-(expon + 1), n_fraction_digits);
+ s = add_some_zeros (s, n_fraction_done);
+ decimal_point = -n_fraction_done;
+ added_decimal_point = 1;
+ }
+ else
+ decimal_point = expon + 1;
+ }
+ else
+ {
+ /* Exponential output style. */
+ decimal_point = 1;
+ output_style = 'e';
+ }
+
+ while (1)
+ {
+ uword digit;
+
+ /* Number is smaller than precision: call it zero. */
+ if (x < prec)
+ break;
+
+ digit = x;
+ x -= digit;
+ if (x + prec >= 1)
+ {
+ digit++;
+ x -= 1;
+ }
+
+ /* Round last printed digit. */
+ if (decimal_point <= 0
+ && n_fraction_done + 1 == n_fraction_digits && digit < 9)
+ digit += x >= .5;
+
+ vec_add1 (s, '0' + digit);
+
+ /* Move rightwards towards/away from decimal point. */
+ decimal_point--;
+
+ n_fraction_done += decimal_point < 0;
+ if (decimal_point <= 0 && n_fraction_done >= n_fraction_digits)
+ break;
+
+ if (decimal_point == 0 && x != 0)
+ {
+ vec_add1 (s, '.');
+ added_decimal_point = 1;
+ }
+
+ x *= 10;
+ prec *= 10;
+ }
+
+done:
+ if (decimal_point > 0)
+ {
+ s = add_some_zeros (s, decimal_point);
+ decimal_point = 0;
+ }
+
+ if (n_fraction_done < n_fraction_digits)
+ {
+ if (!added_decimal_point)
+ vec_add1 (s, '.');
+ s = add_some_zeros (s, n_fraction_digits - n_fraction_done);
+ }
+
+ if (output_style == 'e')
+ s = format (s, "e%wd", expon);
+
+ return s;
+}
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/format.h b/src/vppinfra/format.h
new file mode 100644
index 00000000..5b7023a3
--- /dev/null
+++ b/src/vppinfra/format.h
@@ -0,0 +1,334 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_format_h
+#define included_format_h
+
+#include <stdarg.h>
+
+#include <vppinfra/clib.h> /* for CLIB_UNIX, etc. */
+#include <vppinfra/vec.h>
+#include <vppinfra/error.h> /* for ASSERT */
+#include <vppinfra/string.h>
+
+typedef u8 *(format_function_t) (u8 * s, va_list * args);
+
+u8 *va_format (u8 * s, const char *format, va_list * args);
+u8 *format (u8 * s, const char *format, ...);
+
+#ifdef CLIB_UNIX
+
+#include <stdio.h>
+
+#else /* ! CLIB_UNIX */
+
+/* We're not Unix and have not stdio.h */
+#define FILE void
+#define stdin ((FILE *) 0)
+#define stdout ((FILE *) 1)
+#define stderr ((FILE *) 2)
+
+#endif
+
+word va_fformat (FILE * f, char *fmt, va_list * va);
+word fformat (FILE * f, char *fmt, ...);
+word fdformat (int fd, char *fmt, ...);
+
+always_inline uword
+format_get_indent (u8 * s)
+{
+ uword indent = 0;
+ u8 *nl;
+
+ if (!s)
+ return indent;
+
+ nl = vec_end (s) - 1;
+ while (nl >= s)
+ {
+ if (*nl-- == '\n')
+ break;
+ indent++;
+ }
+ return indent;
+}
+
+#define _(f) u8 * f (u8 * s, va_list * va)
+
+/* Standard user-defined formats. */
+_(format_vec32);
+_(format_vec_uword);
+_(format_ascii_bytes);
+_(format_hex_bytes);
+_(format_white_space);
+_(format_f64);
+_(format_time_interval);
+
+#ifdef CLIB_UNIX
+/* Unix specific formats. */
+_(format_address_family);
+_(format_unix_arphrd);
+_(format_unix_interface_flags);
+_(format_network_address);
+_(format_network_protocol);
+_(format_network_port);
+_(format_sockaddr);
+_(format_ip4_tos_byte);
+_(format_ip4_packet);
+_(format_icmp4_type_and_code);
+_(format_ethernet_packet);
+_(format_hostname);
+_(format_timeval);
+_(format_time_float);
+_(format_signal);
+_(format_ucontext_pc);
+#endif
+
+#undef _
+
+/* Unformat. */
+
+typedef struct _unformat_input_t
+{
+ /* Input buffer (vector). */
+ u8 *buffer;
+
+ /* Current index in input buffer. */
+ uword index;
+
+ /* Vector of buffer marks. Used to delineate pieces of the buffer
+ for error reporting and for parse recovery. */
+ uword *buffer_marks;
+
+ /* User's function to fill the buffer when its empty
+ (and argument). */
+ uword (*fill_buffer) (struct _unformat_input_t * i);
+
+ /* Return values for fill buffer function which indicate whether not
+ input has been exhausted. */
+#define UNFORMAT_END_OF_INPUT (~0)
+#define UNFORMAT_MORE_INPUT 0
+
+ /* User controlled argument to fill buffer function. */
+ void *fill_buffer_arg;
+} unformat_input_t;
+
+always_inline void
+unformat_init (unformat_input_t * i,
+ uword (*fill_buffer) (unformat_input_t *),
+ void *fill_buffer_arg)
+{
+ memset (i, 0, sizeof (i[0]));
+ i->fill_buffer = fill_buffer;
+ i->fill_buffer_arg = fill_buffer_arg;
+}
+
+always_inline void
+unformat_free (unformat_input_t * i)
+{
+ vec_free (i->buffer);
+ vec_free (i->buffer_marks);
+ memset (i, 0, sizeof (i[0]));
+}
+
+always_inline uword
+unformat_check_input (unformat_input_t * i)
+{
+ /* Low level fill input function. */
+ extern uword _unformat_fill_input (unformat_input_t * i);
+
+ if (i->index >= vec_len (i->buffer) && i->index != UNFORMAT_END_OF_INPUT)
+ _unformat_fill_input (i);
+
+ return i->index;
+}
+
+/* Return true if input is exhausted */
+always_inline uword
+unformat_is_eof (unformat_input_t * input)
+{
+ return unformat_check_input (input) == UNFORMAT_END_OF_INPUT;
+}
+
+/* Return next element in input vector,
+ possibly calling fill input to get more. */
+always_inline uword
+unformat_get_input (unformat_input_t * input)
+{
+ uword i = unformat_check_input (input);
+ if (i < vec_len (input->buffer))
+ {
+ input->index = i + 1;
+ i = input->buffer[i];
+ }
+ return i;
+}
+
+/* Back up input pointer by one. */
+always_inline void
+unformat_put_input (unformat_input_t * input)
+{
+ input->index -= 1;
+}
+
+/* Peek current input character without advancing. */
+always_inline uword
+unformat_peek_input (unformat_input_t * input)
+{
+ uword c = unformat_get_input (input);
+ if (c != UNFORMAT_END_OF_INPUT)
+ unformat_put_input (input);
+ return c;
+}
+
+/* Skip current input line. */
+always_inline void
+unformat_skip_line (unformat_input_t * i)
+{
+ uword c;
+
+ while ((c = unformat_get_input (i)) != UNFORMAT_END_OF_INPUT && c != '\n')
+ ;
+}
+
+uword unformat_skip_white_space (unformat_input_t * input);
+
+/* Unformat function. */
+typedef uword (unformat_function_t) (unformat_input_t * input,
+ va_list * args);
+
+/* External functions. */
+
+/* General unformatting function with programmable input stream. */
+uword unformat (unformat_input_t * i, const char *fmt, ...);
+
+/* Call user defined parse function.
+ unformat_user (i, f, ...) is equivalent to unformat (i, "%U", f, ...) */
+uword unformat_user (unformat_input_t * input, unformat_function_t * func,
+ ...);
+
+/* Alternate version which allows for extensions. */
+uword va_unformat (unformat_input_t * i, const char *fmt, va_list * args);
+
+/* Setup for unformat of Unix style command line. */
+void unformat_init_command_line (unformat_input_t * input, char *argv[]);
+
+/* Setup for unformat of given string. */
+void unformat_init_string (unformat_input_t * input,
+ char *string, int string_len);
+
+always_inline void
+unformat_init_cstring (unformat_input_t * input, char *string)
+{
+ unformat_init_string (input, string, strlen (string));
+}
+
+/* Setup for unformat of given vector string; vector will be freed by unformat_string. */
+void unformat_init_vector (unformat_input_t * input, u8 * vector_string);
+
+/* Format function for unformat input usable when an unformat error
+ has occurred. */
+u8 *format_unformat_error (u8 * s, va_list * va);
+
+#define unformat_parse_error(input) \
+ clib_error_return (0, "parse error `%U'", format_unformat_error, input)
+
+/* Print all input: not just error context. */
+u8 *format_unformat_input (u8 * s, va_list * va);
+
+/* Unformat (parse) function which reads a %s string and converts it
+ to and unformat_input_t. */
+unformat_function_t unformat_input;
+
+/* Parse a line ending with \n and return it. */
+unformat_function_t unformat_line;
+
+/* Parse a line ending with \n and return it as an unformat_input_t. */
+unformat_function_t unformat_line_input;
+
+/* Parse a token containing given set of characters. */
+unformat_function_t unformat_token;
+
+/* Parses a hexstring into a vector of bytes. */
+unformat_function_t unformat_hex_string;
+
+/* Returns non-zero match if input is exhausted.
+ Useful to ensure that the entire input matches with no trailing junk. */
+unformat_function_t unformat_eof;
+
+/* Parse memory size e.g. 100, 100k, 100m, 100g. */
+unformat_function_t unformat_memory_size;
+
+/* Unparse memory size e.g. 100, 100k, 100m, 100g. */
+u8 *format_memory_size (u8 * s, va_list * va);
+
+/* Format c identifier: e.g. a_name -> "a name". */
+u8 *format_c_identifier (u8 * s, va_list * va);
+
+/* Format hexdump with both hex and printable chars - compatible with text2pcap */
+u8 *format_hexdump (u8 * s, va_list * va);
+
+/* Unix specific formats. */
+#ifdef CLIB_UNIX
+/* Setup input from Unix file. */
+void unformat_init_unix_file (unformat_input_t * input, int file_descriptor);
+
+/* Take input from Unix environment variable; returns
+ 1 if variable exists zero otherwise. */
+uword unformat_init_unix_env (unformat_input_t * input, char *var);
+
+/* Unformat unix group id (gid) specified as integer or string */
+unformat_function_t unformat_unix_gid;
+#endif /* CLIB_UNIX */
+
+/* Test code. */
+int test_format_main (unformat_input_t * input);
+int test_unformat_main (unformat_input_t * input);
+
+/* This is not the right place for this, but putting it in vec.h
+created circular dependency problems. */
+int test_vec_main (unformat_input_t * input);
+
+#endif /* included_format_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/graph.c b/src/vppinfra/graph.c
new file mode 100644
index 00000000..98a29046
--- /dev/null
+++ b/src/vppinfra/graph.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vppinfra/graph.h>
+
+/* Set link distance, creating link if not found. */
+u32
+graph_set_link (graph_t * g, u32 src, u32 dst, u32 distance)
+{
+ graph_node_t *src_node, *dst_node;
+ graph_link_t *l;
+ u32 old_distance;
+
+ /* The following validate will not work if src or dst are on the
+ pool free list. */
+ if (src < vec_len (g->nodes))
+ ASSERT (!pool_is_free_index (g->nodes, src));
+ if (dst < vec_len (g->nodes))
+ ASSERT (!pool_is_free_index (g->nodes, dst));
+
+ /* Make new (empty) nodes to make src and dst valid. */
+ pool_validate_index (g->nodes, clib_max (src, dst));
+
+ src_node = pool_elt_at_index (g->nodes, src);
+ dst_node = pool_elt_at_index (g->nodes, dst);
+
+ l = graph_dir_get_link_to_node (&src_node->next, dst);
+ if (l)
+ {
+ old_distance = l->distance;
+ l->distance = distance;
+
+ l = graph_dir_get_link_to_node (&dst_node->prev, src);
+ l->distance = distance;
+ }
+ else
+ {
+ uword li_next, li_prev;
+
+ old_distance = ~0;
+
+ li_next = graph_dir_add_link (&src_node->next, dst, distance);
+ li_prev = graph_dir_add_link (&dst_node->prev, src, distance);
+
+ l = vec_elt_at_index (src_node->next.links, li_next);
+ l->link_to_self_index = li_prev;
+
+ l = vec_elt_at_index (dst_node->prev.links, li_prev);
+ l->link_to_self_index = li_next;
+ }
+
+ return old_distance;
+}
+
+void
+graph_del_link (graph_t * g, u32 src, u32 dst)
+{
+ graph_node_t *src_node, *dst_node;
+
+ src_node = pool_elt_at_index (g->nodes, src);
+ dst_node = pool_elt_at_index (g->nodes, dst);
+
+ graph_dir_del_link (&src_node->next, dst);
+ graph_dir_del_link (&dst_node->next, src);
+}
+
+/* Delete source node and all links from other nodes from/to source. */
+uword
+graph_del_node (graph_t * g, u32 src)
+{
+ graph_node_t *src_node, *n;
+ uword index;
+ graph_link_t *l;
+
+ src_node = pool_elt_at_index (g->nodes, src);
+
+ vec_foreach (l, src_node->next.links)
+ {
+ n = pool_elt_at_index (g->nodes, l->node_index);
+ graph_dir_del_link (&n->prev, src);
+ }
+
+ vec_foreach (l, src_node->prev.links)
+ {
+ n = pool_elt_at_index (g->nodes, l->node_index);
+ graph_dir_del_link (&n->next, src);
+ }
+
+ graph_dir_free (&src_node->next);
+ graph_dir_free (&src_node->prev);
+
+ index = src_node - g->nodes;
+ pool_put (g->nodes, src_node);
+ memset (src_node, ~0, sizeof (src_node[0]));
+
+ return index;
+}
+
+uword
+unformat_graph (unformat_input_t * input, va_list * args)
+{
+ graph_t *g = va_arg (*args, graph_t *);
+ typedef struct
+ {
+ u32 src, dst, distance;
+ } T;
+ T *links = 0, *l;
+ uword result;
+
+ while (1)
+ {
+ vec_add2 (links, l, 1);
+ if (!unformat (input, "%d%d%d", &l->src, &l->dst, &l->distance))
+ break;
+ }
+ _vec_len (links) -= 1;
+ result = vec_len (links) > 0;
+ vec_foreach (l, links)
+ {
+ graph_set_link (g, l->src, l->dst, l->distance);
+ graph_set_link (g, l->dst, l->src, l->distance);
+ }
+
+ vec_free (links);
+ return result;
+}
+
+u8 *
+format_graph_node (u8 * s, va_list * args)
+{
+ graph_t *g = va_arg (*args, graph_t *);
+ u32 node_index = va_arg (*args, u32);
+
+ if (g->format_node)
+ s = format (s, "%U", g->format_node, g, node_index);
+ else
+ s = format (s, "%d", node_index);
+
+ return s;
+}
+
+u8 *
+format_graph (u8 * s, va_list * args)
+{
+ graph_t *g = va_arg (*args, graph_t *);
+ graph_node_t *n;
+ graph_link_t *l;
+ uword indent = format_get_indent (s);
+
+ s = format (s, "graph %d nodes", pool_elts (g->nodes));
+ /* *INDENT-OFF* */
+ pool_foreach (n, g->nodes, ({
+ s = format (s, "\n%U", format_white_space, indent + 2);
+ s = format (s, "%U -> ", format_graph_node, g, n - g->nodes);
+ vec_foreach (l, n->next.links)
+ s = format (s, "%U (%d), ",
+ format_graph_node, g, l->node_index,
+ l->distance);
+ }));
+ /* *INDENT-ON* */
+
+ return s;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/graph.h b/src/vppinfra/graph.h
new file mode 100644
index 00000000..1c26118f
--- /dev/null
+++ b/src/vppinfra/graph.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef included_clib_graph_h
+#define included_clib_graph_h
+
+#include <vppinfra/format.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/pool.h>
+
+/* Generic graphs. */
+typedef struct
+{
+ /* Next node along this link. */
+ u32 node_index;
+
+ /* Other direction link index to reach back to current node. */
+ u32 link_to_self_index;
+
+ /* Distance to next node. */
+ u32 distance;
+} graph_link_t;
+
+/* Direction on graph: either next or previous. */
+typedef struct
+{
+ /* Vector of links. */
+ graph_link_t *links;
+
+ /* Hash mapping node index to link which visits this node. */
+ uword *link_index_by_node_index;
+} graph_dir_t;
+
+always_inline void
+graph_dir_free (graph_dir_t * d)
+{
+ vec_free (d->links);
+ hash_free (d->link_index_by_node_index);
+}
+
+always_inline graph_link_t *
+graph_dir_get_link_to_node (graph_dir_t * d, u32 node_index)
+{
+ uword *p = hash_get (d->link_index_by_node_index, node_index);
+ return p ? vec_elt_at_index (d->links, p[0]) : 0;
+}
+
+always_inline uword
+graph_dir_add_link (graph_dir_t * d, u32 node_index, u32 distance)
+{
+ graph_link_t *l;
+ ASSERT (!graph_dir_get_link_to_node (d, node_index));
+ vec_add2 (d->links, l, 1);
+ l->node_index = node_index;
+ l->distance = distance;
+ hash_set (d->link_index_by_node_index, node_index, l - d->links);
+ return l - d->links;
+}
+
+always_inline void
+graph_dir_del_link (graph_dir_t * d, u32 node_index)
+{
+ graph_link_t *l = graph_dir_get_link_to_node (d, node_index);
+ uword li = l - d->links;
+ uword n_links = vec_len (d->links);
+
+ ASSERT (l != 0);
+ hash_unset (d->link_index_by_node_index, node_index);
+ n_links -= 1;
+ if (li < n_links)
+ d->links[li] = d->links[n_links];
+ _vec_len (d->links) = n_links;
+}
+
+typedef struct
+{
+ /* Nodes we are connected to plus distances. */
+ graph_dir_t next, prev;
+} graph_node_t;
+
+typedef struct
+{
+ /* Pool of nodes. */
+ graph_node_t *nodes;
+
+ void *opaque;
+
+ format_function_t *format_node;
+} graph_t;
+
+/* Set link distance, creating link if not found. */
+u32 graph_set_link (graph_t * g, u32 src, u32 dst, u32 distance);
+
+always_inline void
+graph_set_bidirectional_link (graph_t * g, u32 src, u32 dst, u32 distance)
+{
+ graph_set_link (g, src, dst, distance);
+ graph_set_link (g, dst, src, distance);
+}
+
+void graph_del_link (graph_t * g, u32 src, u32 dst);
+uword graph_del_node (graph_t * g, u32 src);
+
+unformat_function_t unformat_graph;
+format_function_t format_graph;
+format_function_t format_graph_node;
+
+#endif /* included_clib_graph_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/hash.c b/src/vppinfra/hash.c
new file mode 100644
index 00000000..062ad882
--- /dev/null
+++ b/src/vppinfra/hash.c
@@ -0,0 +1,1095 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001-2005 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/hash.h>
+#include <vppinfra/error.h>
+#include <vppinfra/mem.h>
+#include <vppinfra/byte_order.h> /* for clib_arch_is_big_endian */
+
+always_inline void
+zero_pair (hash_t * h, hash_pair_t * p)
+{
+ memset (p, 0, hash_pair_bytes (h));
+}
+
+always_inline void
+init_pair (hash_t * h, hash_pair_t * p)
+{
+ memset (p->value, ~0, hash_value_bytes (h));
+}
+
+always_inline hash_pair_union_t *
+get_pair (void *v, uword i)
+{
+ hash_t *h = hash_header (v);
+ hash_pair_t *p;
+ ASSERT (i < vec_len (v));
+ p = v;
+ p += i << h->log2_pair_size;
+ return (hash_pair_union_t *) p;
+}
+
+always_inline void
+set_is_user (void *v, uword i, uword is_user)
+{
+ hash_t *h = hash_header (v);
+ uword i0 = i / BITS (h->is_user[0]);
+ uword i1 = (uword) 1 << (i % BITS (h->is_user[0]));
+ if (is_user)
+ h->is_user[i0] |= i1;
+ else
+ h->is_user[i0] &= ~i1;
+}
+
+static u8 *hash_format_pair_default (u8 * s, va_list * args);
+
+#if uword_bits == 64
+
+static inline u64
+zap64 (u64 x, word n)
+{
+#define _(n) (((u64) 1 << (u64) (8*(n))) - (u64) 1)
+ static u64 masks_little_endian[] = {
+ 0, _(1), _(2), _(3), _(4), _(5), _(6), _(7),
+ };
+ static u64 masks_big_endian[] = {
+ 0, ~_(7), ~_(6), ~_(5), ~_(4), ~_(3), ~_(2), ~_(1),
+ };
+#undef _
+ if (clib_arch_is_big_endian)
+ return x & masks_big_endian[n];
+ else
+ return x & masks_little_endian[n];
+}
+
+static inline u64
+hash_memory64 (void *p, word n_bytes, u64 state)
+{
+ u64 *q = p;
+ u64 a, b, c, n;
+
+ a = b = 0x9e3779b97f4a7c13LL;
+ c = state;
+ n = n_bytes;
+
+ while (n >= 3 * sizeof (u64))
+ {
+ a += clib_mem_unaligned (q + 0, u64);
+ b += clib_mem_unaligned (q + 1, u64);
+ c += clib_mem_unaligned (q + 2, u64);
+ hash_mix64 (a, b, c);
+ n -= 3 * sizeof (u64);
+ q += 3;
+ }
+
+ c += n_bytes;
+ switch (n / sizeof (u64))
+ {
+ case 2:
+ a += clib_mem_unaligned (q + 0, u64);
+ b += clib_mem_unaligned (q + 1, u64);
+ if (n % sizeof (u64))
+ c += zap64 (clib_mem_unaligned (q + 2, u64), n % sizeof (u64)) << 8;
+ break;
+
+ case 1:
+ a += clib_mem_unaligned (q + 0, u64);
+ if (n % sizeof (u64))
+ b += zap64 (clib_mem_unaligned (q + 1, u64), n % sizeof (u64));
+ break;
+
+ case 0:
+ if (n % sizeof (u64))
+ a += zap64 (clib_mem_unaligned (q + 0, u64), n % sizeof (u64));
+ break;
+ }
+
+ hash_mix64 (a, b, c);
+
+ return c;
+}
+
+#else /* if uword_bits == 64 */
+
+static inline u32
+zap32 (u32 x, word n)
+{
+#define _(n) (((u32) 1 << (u32) (8*(n))) - (u32) 1)
+ static u32 masks_little_endian[] = {
+ 0, _(1), _(2), _(3),
+ };
+ static u32 masks_big_endian[] = {
+ 0, ~_(3), ~_(2), ~_(1),
+ };
+#undef _
+ if (clib_arch_is_big_endian)
+ return x & masks_big_endian[n];
+ else
+ return x & masks_little_endian[n];
+}
+
+static inline u32
+hash_memory32 (void *p, word n_bytes, u32 state)
+{
+ u32 *q = p;
+ u32 a, b, c, n;
+
+ a = b = 0x9e3779b9;
+ c = state;
+ n = n_bytes;
+
+ while (n >= 3 * sizeof (u32))
+ {
+ a += clib_mem_unaligned (q + 0, u32);
+ b += clib_mem_unaligned (q + 1, u32);
+ c += clib_mem_unaligned (q + 2, u32);
+ hash_mix32 (a, b, c);
+ n -= 3 * sizeof (u32);
+ q += 3;
+ }
+
+ c += n_bytes;
+ switch (n / sizeof (u32))
+ {
+ case 2:
+ a += clib_mem_unaligned (q + 0, u32);
+ b += clib_mem_unaligned (q + 1, u32);
+ if (n % sizeof (u32))
+ c += zap32 (clib_mem_unaligned (q + 2, u32), n % sizeof (u32)) << 8;
+ break;
+
+ case 1:
+ a += clib_mem_unaligned (q + 0, u32);
+ if (n % sizeof (u32))
+ b += zap32 (clib_mem_unaligned (q + 1, u32), n % sizeof (u32));
+ break;
+
+ case 0:
+ if (n % sizeof (u32))
+ a += zap32 (clib_mem_unaligned (q + 0, u32), n % sizeof (u32));
+ break;
+ }
+
+ hash_mix32 (a, b, c);
+
+ return c;
+}
+#endif
+
+uword
+hash_memory (void *p, word n_bytes, uword state)
+{
+ uword *q = p;
+
+#if uword_bits == 64
+ return hash_memory64 (q, n_bytes, state);
+#else
+ return hash_memory32 (q, n_bytes, state);
+#endif
+}
+
+#if uword_bits == 64
+always_inline uword
+hash_uword (uword x)
+{
+ u64 a, b, c;
+
+ a = b = 0x9e3779b97f4a7c13LL;
+ c = 0;
+ a += x;
+ hash_mix64 (a, b, c);
+ return c;
+}
+#else
+always_inline uword
+hash_uword (uword x)
+{
+ u32 a, b, c;
+
+ a = b = 0x9e3779b9;
+ c = 0;
+ a += x;
+ hash_mix32 (a, b, c);
+ return c;
+}
+#endif
+
+/* Call sum function. Hash code will be sum function value
+ modulo the prime length of the hash table. */
+always_inline uword
+key_sum (hash_t * h, uword key)
+{
+ uword sum;
+ switch (pointer_to_uword ((void *) h->key_sum))
+ {
+ case KEY_FUNC_NONE:
+ sum = hash_uword (key);
+ break;
+
+ case KEY_FUNC_POINTER_UWORD:
+ sum = hash_uword (*uword_to_pointer (key, uword *));
+ break;
+
+ case KEY_FUNC_POINTER_U32:
+ sum = hash_uword (*uword_to_pointer (key, u32 *));
+ break;
+
+ case KEY_FUNC_STRING:
+ sum = string_key_sum (h, key);
+ break;
+
+ default:
+ sum = h->key_sum (h, key);
+ break;
+ }
+
+ return sum;
+}
+
+always_inline uword
+key_equal1 (hash_t * h, uword key1, uword key2, uword e)
+{
+ switch (pointer_to_uword ((void *) h->key_equal))
+ {
+ case KEY_FUNC_NONE:
+ break;
+
+ case KEY_FUNC_POINTER_UWORD:
+ e =
+ *uword_to_pointer (key1, uword *) == *uword_to_pointer (key2,
+ uword *);
+ break;
+
+ case KEY_FUNC_POINTER_U32:
+ e = *uword_to_pointer (key1, u32 *) == *uword_to_pointer (key2, u32 *);
+ break;
+
+ case KEY_FUNC_STRING:
+ e = string_key_equal (h, key1, key2);
+ break;
+
+ default:
+ e = h->key_equal (h, key1, key2);
+ break;
+ }
+ return e;
+}
+
+/* Compares two keys: returns 1 if equal, 0 if not. */
+always_inline uword
+key_equal (hash_t * h, uword key1, uword key2)
+{
+ uword e = key1 == key2;
+ if (CLIB_DEBUG > 0 && key1 == key2)
+ ASSERT (key_equal1 (h, key1, key2, e));
+ if (!e)
+ e = key_equal1 (h, key1, key2, e);
+ return e;
+}
+
+static hash_pair_union_t *
+get_indirect (void *v, hash_pair_indirect_t * pi, uword key)
+{
+ hash_t *h = hash_header (v);
+ hash_pair_t *p0, *p1;
+
+ p0 = p1 = pi->pairs;
+ if (h->log2_pair_size > 0)
+ p1 = hash_forward (h, p0, indirect_pair_get_len (pi));
+ else
+ p1 += vec_len (p0);
+
+ while (p0 < p1)
+ {
+ if (key_equal (h, p0->key, key))
+ return (hash_pair_union_t *) p0;
+ p0 = hash_forward1 (h, p0);
+ }
+
+ return (hash_pair_union_t *) 0;
+}
+
+static hash_pair_union_t *
+set_indirect_is_user (void *v, uword i, hash_pair_union_t * p, uword key)
+{
+ hash_t *h = hash_header (v);
+ hash_pair_t *q;
+ hash_pair_indirect_t *pi = &p->indirect;
+ uword log2_bytes = 0;
+
+ if (h->log2_pair_size == 0)
+ q = vec_new (hash_pair_t, 2);
+ else
+ {
+ log2_bytes = 1 + hash_pair_log2_bytes (h);
+ q = clib_mem_alloc (1ULL << log2_bytes);
+ }
+ clib_memcpy (q, &p->direct, hash_pair_bytes (h));
+
+ pi->pairs = q;
+ if (h->log2_pair_size > 0)
+ indirect_pair_set (pi, log2_bytes, 2);
+
+ set_is_user (v, i, 0);
+
+ /* First element is used by existing pair, second will be used by caller. */
+ q = hash_forward1 (h, q);
+ q->key = key;
+ init_pair (h, q);
+ return (hash_pair_union_t *) q;
+}
+
+static hash_pair_union_t *
+set_indirect (void *v, hash_pair_indirect_t * pi, uword key,
+ uword * found_key)
+{
+ hash_t *h = hash_header (v);
+ hash_pair_t *new_pair;
+ hash_pair_union_t *q;
+
+ q = get_indirect (v, pi, key);
+ if (q)
+ {
+ *found_key = 1;
+ return q;
+ }
+
+ if (h->log2_pair_size == 0)
+ vec_add2 (pi->pairs, new_pair, 1);
+ else
+ {
+ uword len, new_len, log2_bytes;
+
+ len = indirect_pair_get_len (pi);
+ log2_bytes = indirect_pair_get_log2_bytes (pi);
+
+ new_len = len + 1;
+ if (new_len * hash_pair_bytes (h) > (1ULL << log2_bytes))
+ {
+ pi->pairs = clib_mem_realloc (pi->pairs,
+ 1ULL << (log2_bytes + 1),
+ 1ULL << log2_bytes);
+ log2_bytes++;
+ }
+
+ indirect_pair_set (pi, log2_bytes, new_len);
+ new_pair = pi->pairs + (len << h->log2_pair_size);
+ }
+ new_pair->key = key;
+ init_pair (h, new_pair);
+ *found_key = 0;
+ return (hash_pair_union_t *) new_pair;
+}
+
+static void
+unset_indirect (void *v, uword i, hash_pair_t * q)
+{
+ hash_t *h = hash_header (v);
+ hash_pair_union_t *p = get_pair (v, i);
+ hash_pair_t *e;
+ hash_pair_indirect_t *pi = &p->indirect;
+ uword len, is_vec;
+
+ is_vec = h->log2_pair_size == 0;
+
+ ASSERT (!hash_is_user (v, i));
+ len = is_vec ? vec_len (pi->pairs) : indirect_pair_get_len (pi);
+ e = hash_forward (h, pi->pairs, len - 1);
+ ASSERT (q >= pi->pairs && q <= e);
+
+ /* We have two or fewer pairs and we are delete one pair.
+ Make indirect pointer direct and free indirect memory. */
+ if (len <= 2)
+ {
+ hash_pair_t *r = pi->pairs;
+
+ if (len == 2)
+ {
+ clib_memcpy (p, q == r ? hash_forward1 (h, r) : r,
+ hash_pair_bytes (h));
+ set_is_user (v, i, 1);
+ }
+ else
+ zero_pair (h, &p->direct);
+
+ if (is_vec)
+ vec_free (r);
+ else if (r)
+ clib_mem_free (r);
+ }
+ else
+ {
+ /* If deleting a pair we need to keep non-null pairs together. */
+ if (q < e)
+ clib_memcpy (q, e, hash_pair_bytes (h));
+ else
+ zero_pair (h, q);
+ if (is_vec)
+ _vec_len (pi->pairs) -= 1;
+ else
+ indirect_pair_set (pi, indirect_pair_get_log2_bytes (pi), len - 1);
+ }
+}
+
+enum lookup_opcode
+{
+ GET = 1,
+ SET = 2,
+ UNSET = 3,
+};
+
+static hash_pair_t *
+lookup (void *v, uword key, enum lookup_opcode op,
+ void *new_value, void *old_value)
+{
+ hash_t *h = hash_header (v);
+ hash_pair_union_t *p = 0;
+ uword found_key = 0;
+ uword i;
+
+ if (!v)
+ return 0;
+
+ i = key_sum (h, key) & (_vec_len (v) - 1);
+ p = get_pair (v, i);
+
+ if (hash_is_user (v, i))
+ {
+ found_key = key_equal (h, p->direct.key, key);
+ if (found_key)
+ {
+ if (op == UNSET)
+ {
+ set_is_user (v, i, 0);
+ if (old_value)
+ clib_memcpy (old_value, p->direct.value,
+ hash_value_bytes (h));
+ zero_pair (h, &p->direct);
+ }
+ }
+ else
+ {
+ if (op == SET)
+ p = set_indirect_is_user (v, i, p, key);
+ else
+ p = 0;
+ }
+ }
+ else
+ {
+ hash_pair_indirect_t *pi = &p->indirect;
+
+ if (op == SET)
+ {
+ if (!pi->pairs)
+ {
+ p->direct.key = key;
+ set_is_user (v, i, 1);
+ }
+ else
+ p = set_indirect (v, pi, key, &found_key);
+ }
+ else
+ {
+ p = get_indirect (v, pi, key);
+ found_key = p != 0;
+ if (found_key && op == UNSET)
+ {
+ if (old_value)
+ clib_memcpy (old_value, &p->direct.value,
+ hash_value_bytes (h));
+
+ unset_indirect (v, i, &p->direct);
+
+ /* Nullify p (since it's just been deleted).
+ Otherwise we might be tempted to play with it. */
+ p = 0;
+ }
+ }
+ }
+
+ if (op == SET && p != 0)
+ {
+ /* Save away old value for caller. */
+ if (old_value && found_key)
+ clib_memcpy (old_value, &p->direct.value, hash_value_bytes (h));
+ clib_memcpy (&p->direct.value, new_value, hash_value_bytes (h));
+ }
+
+ if (op == SET)
+ h->elts += !found_key;
+ if (op == UNSET)
+ h->elts -= found_key;
+
+ return &p->direct;
+}
+
+/* Fetch value of key. */
+uword *
+_hash_get (void *v, uword key)
+{
+ hash_t *h = hash_header (v);
+ hash_pair_t *p;
+
+ /* Don't even search table if its empty. */
+ if (!v || h->elts == 0)
+ return 0;
+
+ p = lookup (v, key, GET, 0, 0);
+ if (!p)
+ return 0;
+ if (h->log2_pair_size == 0)
+ return &p->key;
+ else
+ return &p->value[0];
+}
+
+hash_pair_t *
+_hash_get_pair (void *v, uword key)
+{
+ return lookup (v, key, GET, 0, 0);
+}
+
+hash_pair_t *
+hash_next (void *v, hash_next_t * hn)
+{
+ hash_t *h = hash_header (v);
+ hash_pair_t *p;
+
+ while (1)
+ {
+ if (hn->i == 0 && hn->j == 0)
+ {
+ /* Save flags. */
+ hn->f = h->flags;
+
+ /* Prevent others from re-sizing hash table. */
+ h->flags |=
+ (HASH_FLAG_NO_AUTO_GROW
+ | HASH_FLAG_NO_AUTO_SHRINK | HASH_FLAG_HASH_NEXT_IN_PROGRESS);
+ }
+ else if (hn->i >= hash_capacity (v))
+ {
+ /* Restore flags. */
+ h->flags = hn->f;
+ memset (hn, 0, sizeof (hn[0]));
+ return 0;
+ }
+
+ p = hash_forward (h, v, hn->i);
+ if (hash_is_user (v, hn->i))
+ {
+ hn->i++;
+ return p;
+ }
+ else
+ {
+ hash_pair_indirect_t *pi = (void *) p;
+ uword n;
+
+ if (h->log2_pair_size > 0)
+ n = indirect_pair_get_len (pi);
+ else
+ n = vec_len (pi->pairs);
+
+ if (hn->j >= n)
+ {
+ hn->i++;
+ hn->j = 0;
+ }
+ else
+ return hash_forward (h, pi->pairs, hn->j++);
+ }
+ }
+}
+
+/* Remove key from table. */
+void *
+_hash_unset (void *v, uword key, void *old_value)
+{
+ hash_t *h;
+
+ if (!v)
+ return v;
+
+ (void) lookup (v, key, UNSET, 0, old_value);
+
+ h = hash_header (v);
+ if (!(h->flags & HASH_FLAG_NO_AUTO_SHRINK))
+ {
+ /* Resize when 1/4 full. */
+ if (h->elts > 32 && 4 * (h->elts + 1) < vec_len (v))
+ v = hash_resize (v, vec_len (v) / 2);
+ }
+
+ return v;
+}
+
+void *
+_hash_create (uword elts, hash_t * h_user)
+{
+ hash_t *h;
+ uword log2_pair_size;
+ void *v;
+
+ /* Size of hash is power of 2 >= ELTS and larger than
+ number of bits in is_user bitmap elements. */
+ elts = clib_max (elts, BITS (h->is_user[0]));
+ elts = 1ULL << max_log2 (elts);
+
+ log2_pair_size = 1;
+ if (h_user)
+ log2_pair_size = h_user->log2_pair_size;
+
+ v = _vec_resize (0,
+ /* vec len: */ elts,
+ /* data bytes: */
+ (elts << log2_pair_size) * sizeof (hash_pair_t),
+ /* header bytes: */
+ sizeof (h[0]) +
+ (elts / BITS (h->is_user[0])) * sizeof (h->is_user[0]),
+ /* alignment */ sizeof (hash_pair_t));
+ h = hash_header (v);
+
+ if (h_user)
+ h[0] = h_user[0];
+
+ h->log2_pair_size = log2_pair_size;
+ h->elts = 0;
+
+ /* Default flags to never shrinking hash tables.
+ Shrinking tables can cause "jackpot" cases. */
+ if (!h_user)
+ h->flags = HASH_FLAG_NO_AUTO_SHRINK;
+
+ if (!h->format_pair)
+ {
+ h->format_pair = hash_format_pair_default;
+ h->format_pair_arg = 0;
+ }
+
+ return v;
+}
+
+void *
+_hash_free (void *v)
+{
+ hash_t *h = hash_header (v);
+ hash_pair_union_t *p;
+ uword i;
+
+ if (!v)
+ return v;
+
+ /* We zero all freed memory in case user would be tempted to use it. */
+ for (i = 0; i < hash_capacity (v); i++)
+ {
+ if (hash_is_user (v, i))
+ continue;
+ p = get_pair (v, i);
+ if (h->log2_pair_size == 0)
+ vec_free (p->indirect.pairs);
+ else if (p->indirect.pairs)
+ clib_mem_free (p->indirect.pairs);
+ }
+
+ vec_free_header (h);
+
+ return 0;
+}
+
+static void *
+hash_resize_internal (void *old, uword new_size, uword free_old)
+{
+ void *new;
+ hash_pair_t *p;
+
+ new = 0;
+ if (new_size > 0)
+ {
+ hash_t *h = old ? hash_header (old) : 0;
+ new = _hash_create (new_size, h);
+ /* *INDENT-OFF* */
+ hash_foreach_pair (p, old, {
+ new = _hash_set3 (new, p->key, &p->value[0], 0);
+ });
+ /* *INDENT-ON* */
+ }
+
+ if (free_old)
+ hash_free (old);
+ return new;
+}
+
+void *
+hash_resize (void *old, uword new_size)
+{
+ return hash_resize_internal (old, new_size, 1);
+}
+
+void *
+hash_dup (void *old)
+{
+ return hash_resize_internal (old, vec_len (old), 0);
+}
+
+void *
+_hash_set3 (void *v, uword key, void *value, void *old_value)
+{
+ hash_t *h;
+
+ if (!v)
+ v = hash_create (0, sizeof (uword));
+
+ h = hash_header (v);
+ (void) lookup (v, key, SET, value, old_value);
+
+ if (!(h->flags & HASH_FLAG_NO_AUTO_GROW))
+ {
+ /* Resize when 3/4 full. */
+ if (4 * (h->elts + 1) > 3 * vec_len (v))
+ v = hash_resize (v, 2 * vec_len (v));
+ }
+
+ return v;
+}
+
+uword
+vec_key_sum (hash_t * h, uword key)
+{
+ void *v = uword_to_pointer (key, void *);
+ return hash_memory (v, vec_len (v) * h->user, 0);
+}
+
+uword
+vec_key_equal (hash_t * h, uword key1, uword key2)
+{
+ void *v1 = uword_to_pointer (key1, void *);
+ void *v2 = uword_to_pointer (key2, void *);
+ uword l1 = vec_len (v1);
+ uword l2 = vec_len (v2);
+ return l1 == l2 && 0 == memcmp (v1, v2, l1 * h->user);
+}
+
+u8 *
+vec_key_format_pair (u8 * s, va_list * args)
+{
+ void *CLIB_UNUSED (user_arg) = va_arg (*args, void *);
+ void *v = va_arg (*args, void *);
+ hash_pair_t *p = va_arg (*args, hash_pair_t *);
+ hash_t *h = hash_header (v);
+ void *u = uword_to_pointer (p->key, void *);
+ int i;
+
+ switch (h->user)
+ {
+ case 1:
+ s = format (s, "%v", u);
+ break;
+
+ case 2:
+ {
+ u16 *w = u;
+ for (i = 0; i < vec_len (w); i++)
+ s = format (s, "0x%x, ", w[i]);
+ break;
+ }
+
+ case 4:
+ {
+ u32 *w = u;
+ for (i = 0; i < vec_len (w); i++)
+ s = format (s, "0x%x, ", w[i]);
+ break;
+ }
+
+ case 8:
+ {
+ u64 *w = u;
+ for (i = 0; i < vec_len (w); i++)
+ s = format (s, "0x%Lx, ", w[i]);
+ break;
+ }
+
+ default:
+ s = format (s, "0x%U", format_hex_bytes, u, vec_len (u) * h->user);
+ break;
+ }
+
+ if (hash_value_bytes (h) > 0)
+ s = format (s, " -> 0x%wx", p->value[0]);
+
+ return s;
+}
+
+uword
+mem_key_sum (hash_t * h, uword key)
+{
+ uword *v = uword_to_pointer (key, void *);
+ return hash_memory (v, h->user, 0);
+}
+
+uword
+mem_key_equal (hash_t * h, uword key1, uword key2)
+{
+ void *v1 = uword_to_pointer (key1, void *);
+ void *v2 = uword_to_pointer (key2, void *);
+ return v1 && v2 && 0 == memcmp (v1, v2, h->user);
+}
+
+uword
+string_key_sum (hash_t * h, uword key)
+{
+ char *v = uword_to_pointer (key, char *);
+ return hash_memory (v, strlen (v), 0);
+}
+
+uword
+string_key_equal (hash_t * h, uword key1, uword key2)
+{
+ void *v1 = uword_to_pointer (key1, void *);
+ void *v2 = uword_to_pointer (key2, void *);
+ return v1 && v2 && 0 == strcmp (v1, v2);
+}
+
+u8 *
+string_key_format_pair (u8 * s, va_list * args)
+{
+ void *CLIB_UNUSED (user_arg) = va_arg (*args, void *);
+ void *v = va_arg (*args, void *);
+ hash_pair_t *p = va_arg (*args, hash_pair_t *);
+ hash_t *h = hash_header (v);
+ void *u = uword_to_pointer (p->key, void *);
+
+ s = format (s, "%s", u);
+
+ if (hash_value_bytes (h) > 0)
+ s =
+ format (s, " -> 0x%8U", format_hex_bytes, &p->value[0],
+ hash_value_bytes (h));
+
+ return s;
+}
+
+static u8 *
+hash_format_pair_default (u8 * s, va_list * args)
+{
+ void *CLIB_UNUSED (user_arg) = va_arg (*args, void *);
+ void *v = va_arg (*args, void *);
+ hash_pair_t *p = va_arg (*args, hash_pair_t *);
+ hash_t *h = hash_header (v);
+
+ s = format (s, "0x%08x", p->key);
+ if (hash_value_bytes (h) > 0)
+ s =
+ format (s, " -> 0x%8U", format_hex_bytes, &p->value[0],
+ hash_value_bytes (h));
+ return s;
+}
+
+uword
+hash_bytes (void *v)
+{
+ uword i, bytes;
+ hash_t *h = hash_header (v);
+
+ if (!v)
+ return 0;
+
+ bytes = vec_capacity (v, hash_header_bytes (v));
+
+ for (i = 0; i < hash_capacity (v); i++)
+ {
+ if (!hash_is_user (v, i))
+ {
+ hash_pair_union_t *p = get_pair (v, i);
+ if (h->log2_pair_size > 0)
+ bytes += 1 << indirect_pair_get_log2_bytes (&p->indirect);
+ else
+ bytes += vec_capacity (p->indirect.pairs, 0);
+ }
+ }
+ return bytes;
+}
+
+u8 *
+format_hash (u8 * s, va_list * va)
+{
+ void *v = va_arg (*va, void *);
+ int verbose = va_arg (*va, int);
+ hash_pair_t *p;
+ hash_t *h = hash_header (v);
+ uword i;
+
+ s = format (s, "hash %p, %wd elts, capacity %wd, %wd bytes used,\n",
+ v, hash_elts (v), hash_capacity (v), hash_bytes (v));
+
+ {
+ uword *occupancy = 0;
+
+ /* Count number of buckets with each occupancy. */
+ for (i = 0; i < hash_capacity (v); i++)
+ {
+ uword j;
+
+ if (hash_is_user (v, i))
+ {
+ j = 1;
+ }
+ else
+ {
+ hash_pair_union_t *p = get_pair (v, i);
+ if (h->log2_pair_size > 0)
+ j = indirect_pair_get_len (&p->indirect);
+ else
+ j = vec_len (p->indirect.pairs);
+ }
+
+ vec_validate (occupancy, j);
+ occupancy[j]++;
+ }
+
+ s = format (s, " profile ");
+ for (i = 0; i < vec_len (occupancy); i++)
+ s = format (s, "%wd%c", occupancy[i],
+ i + 1 == vec_len (occupancy) ? '\n' : ' ');
+
+ s = format (s, " lookup # of compares: ");
+ for (i = 1; i < vec_len (occupancy); i++)
+ s = format (s, "%wd: .%03d%c", i,
+ (1000 * i * occupancy[i]) / hash_elts (v),
+ i + 1 == vec_len (occupancy) ? '\n' : ' ');
+
+ vec_free (occupancy);
+ }
+
+ if (verbose)
+ {
+ /* *INDENT-OFF* */
+ hash_foreach_pair (p, v, {
+ s = format (s, " %U\n", h->format_pair, h->format_pair_arg, v, p);
+ });
+ /* *INDENT-ON* */
+ }
+
+ return s;
+}
+
+static uword
+unformat_hash_string_internal (unformat_input_t * input,
+ va_list * va, int is_vec)
+{
+ uword *hash = va_arg (*va, uword *);
+ int *result = va_arg (*va, int *);
+ u8 *string = 0;
+ uword *p;
+
+ if (!unformat (input, is_vec ? "%v%_" : "%s%_", &string))
+ return 0;
+
+ p = hash_get_mem (hash, string);
+ if (p)
+ *result = *p;
+
+ vec_free (string);
+ return p ? 1 : 0;
+}
+
+uword
+unformat_hash_vec_string (unformat_input_t * input, va_list * va)
+{
+ return unformat_hash_string_internal (input, va, /* is_vec */ 1);
+}
+
+uword
+unformat_hash_string (unformat_input_t * input, va_list * va)
+{
+ return unformat_hash_string_internal (input, va, /* is_vec */ 0);
+}
+
+clib_error_t *
+hash_validate (void *v)
+{
+ hash_t *h = hash_header (v);
+ uword i, j;
+ uword *keys = 0;
+ clib_error_t *error = 0;
+
+#define CHECK(x) if ((error = ERROR_ASSERT (x))) goto done;
+
+ for (i = 0; i < hash_capacity (v); i++)
+ {
+ hash_pair_union_t *pu = get_pair (v, i);
+
+ if (hash_is_user (v, i))
+ {
+ CHECK (pu->direct.key != 0);
+ vec_add1 (keys, pu->direct.key);
+ }
+ else
+ {
+ hash_pair_t *p;
+ hash_pair_indirect_t *pi = &pu->indirect;
+ uword n;
+
+ n = h->log2_pair_size > 0
+ ? indirect_pair_get_len (pi) : vec_len (pi->pairs);
+
+ for (p = pi->pairs; n-- > 0; p = hash_forward1 (h, p))
+ {
+ /* Assert key uniqueness. */
+ for (j = 0; j < vec_len (keys); j++)
+ CHECK (keys[j] != p->key);
+ vec_add1 (keys, p->key);
+ }
+ }
+ }
+
+ CHECK (vec_len (keys) == h->elts);
+
+ vec_free (keys);
+done:
+ return error;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/hash.h b/src/vppinfra/hash.h
new file mode 100644
index 00000000..4db5a576
--- /dev/null
+++ b/src/vppinfra/hash.h
@@ -0,0 +1,694 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001-2005 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_hash_h
+#define included_hash_h
+
+#include <vppinfra/error.h>
+#include <vppinfra/format.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/vector.h>
+
+struct hash_header;
+
+typedef uword (hash_key_sum_function_t) (struct hash_header *, uword key);
+typedef uword (hash_key_equal_function_t)
+ (struct hash_header *, uword key1, uword key2);
+
+/* Vector header for hash tables. */
+typedef struct hash_header
+{
+ /* Number of elements in hash table. */
+ uword elts;
+
+ /* Flags as follows. */
+ u32 flags;
+
+ /* Set if user does not want table to auto-resize when sufficiently full. */
+#define HASH_FLAG_NO_AUTO_GROW (1 << 0)
+ /* Set if user does not want table to auto-resize when sufficiently empty. */
+#define HASH_FLAG_NO_AUTO_SHRINK (1 << 1)
+ /* Set when hash_next is in the process of iterating through this hash table. */
+#define HASH_FLAG_HASH_NEXT_IN_PROGRESS (1 << 2)
+
+ u32 log2_pair_size;
+
+ /* Function to compute the "sum" of a hash key.
+ Hash function is this sum modulo the prime size of
+ the hash table (vec_len (v)). */
+ hash_key_sum_function_t *key_sum;
+
+ /* Special values for key_sum "function". */
+#define KEY_FUNC_NONE (0) /*< sum = key */
+#define KEY_FUNC_POINTER_UWORD (1) /*< sum = *(uword *) key */
+#define KEY_FUNC_POINTER_U32 (2) /*< sum = *(u32 *) key */
+#define KEY_FUNC_STRING (3) /*< sum = string_key_sum, etc. */
+
+ /* key comparison function */
+ hash_key_equal_function_t *key_equal;
+
+ /* Hook for user's data. Used to parameterize sum/equal functions. */
+ any user;
+
+ /* Format a (k,v) pair */
+ format_function_t *format_pair;
+
+ /* Format function arg */
+ void *format_pair_arg;
+
+ /* Bit i is set if pair i is a user object (as opposed to being
+ either zero or an indirect array of pairs). */
+ uword is_user[0];
+} hash_t;
+
+/* Hash header size in bytes */
+always_inline uword
+hash_header_bytes (void *v)
+{
+ hash_t *h;
+ uword is_user_bytes =
+ (sizeof (h->is_user[0]) * vec_len (v)) / BITS (h->is_user[0]);
+ return sizeof (h[0]) + is_user_bytes;
+}
+
+/* Returns a pointer to the hash header given the vector pointer */
+always_inline hash_t *
+hash_header (void *v)
+{
+ return vec_header (v, hash_header_bytes (v));
+}
+
+/* Number of elements in the hash table */
+always_inline uword
+hash_elts (void *v)
+{
+ hash_t *h = hash_header (v);
+ return v ? h->elts : 0;
+}
+
+/* Number of elements the hash table can hold */
+always_inline uword
+hash_capacity (void *v)
+{
+ return vec_len (v);
+}
+
+/* Returns 1 if the hash pair contains user data */
+always_inline uword
+hash_is_user (void *v, uword i)
+{
+ hash_t *h = hash_header (v);
+ uword i0 = i / BITS (h->is_user[0]);
+ uword i1 = i % BITS (h->is_user[0]);
+ return (h->is_user[i0] & ((uword) 1 << i1)) != 0;
+}
+
+/* Set the format function and format argument for a hash table */
+always_inline void
+hash_set_pair_format (void *v,
+ format_function_t * format_pair, void *format_pair_arg)
+{
+ hash_t *h = hash_header (v);
+ h->format_pair = format_pair;
+ h->format_pair_arg = format_pair_arg;
+}
+
+/* Set hash table flags */
+always_inline void
+hash_set_flags (void *v, uword flags)
+{
+ hash_header (v)->flags |= flags;
+}
+
+/* Key value pairs. */
+typedef struct
+{
+ /* The Key */
+ uword key;
+
+ /* The Value. Length is 2^log2_pair_size - 1. */
+ uword value[0];
+} hash_pair_t;
+
+/* The indirect pair structure
+
+ If log2_pair_size > 0 we overload hash pairs
+ with indirect pairs for buckets with more than one
+ pair. */
+typedef struct
+{
+ /* pair vector */
+ hash_pair_t *pairs;
+ /* padding */
+ u8 pad[sizeof (uword) - sizeof (hash_pair_t *)];
+ /* allocated length */
+ uword alloc_len;
+}
+hash_pair_indirect_t;
+
+/* Direct / Indirect pair union */
+typedef union
+{
+ hash_pair_t direct;
+ hash_pair_indirect_t indirect;
+} hash_pair_union_t;
+
+#define LOG2_ALLOC_BITS (5)
+#define PAIR_BITS (BITS (uword) - LOG2_ALLOC_BITS)
+
+/* Log2 number of bytes allocated in pairs array. */
+always_inline uword
+indirect_pair_get_log2_bytes (hash_pair_indirect_t * p)
+{
+ return p->alloc_len >> PAIR_BITS;
+}
+
+/* Get the length of an indirect pair */
+always_inline uword
+indirect_pair_get_len (hash_pair_indirect_t * p)
+{
+ if (!p->pairs)
+ return 0;
+ else
+ return p->alloc_len & (((uword) 1 << PAIR_BITS) - 1);
+}
+
+/* Set the length of an indirect pair */
+always_inline void
+indirect_pair_set (hash_pair_indirect_t * p, uword log2_alloc, uword len)
+{
+ ASSERT (len < ((uword) 1 << PAIR_BITS));
+ ASSERT (log2_alloc < ((uword) 1 << LOG2_ALLOC_BITS));
+ p->alloc_len = (log2_alloc << PAIR_BITS) | len;
+}
+
+/* internal routine to fetch value for given key */
+uword *_hash_get (void *v, uword key);
+
+/* internal routine to fetch value (key, value) pair for given key */
+hash_pair_t *_hash_get_pair (void *v, uword key);
+
+/* internal routine to unset a (key, value) pair */
+void *_hash_unset (void *v, uword key, void *old_value);
+
+/* internal routine to set a (key, value) pair, return the old value */
+void *_hash_set3 (void *v, uword key, void *value, void *old_value);
+
+/* Resize a hash table */
+void *hash_resize (void *old, uword new_size);
+
+/* duplicate a hash table */
+void *hash_dup (void *old);
+
+/* Returns the number of bytes used by a hash table */
+uword hash_bytes (void *v);
+
+/* Public macro to set a (key, value) pair, return the old value */
+#define hash_set3(h,key,value,old_value) \
+({ \
+ uword _v = (uword) (value); \
+ (h) = _hash_set3 ((h), (uword) (key), (void *) &_v, (old_value)); \
+})
+
+/* Public macro to fetch value for given key */
+#define hash_get(h,key) _hash_get ((h), (uword) (key))
+
+/* Public macro to fetch value (key, value) pair for given key */
+#define hash_get_pair(h,key) _hash_get_pair ((h), (uword) (key))
+
+/* Public macro to set a (key, value) pair */
+#define hash_set(h,key,value) hash_set3(h,key,value,0)
+
+/* Public macro to set (key, 0) pair */
+#define hash_set1(h,key) (h) = _hash_set3(h,(uword) (key),0,0)
+
+/* Public macro to unset a (key, value) pair */
+#define hash_unset(h,key) ((h) = _hash_unset ((h), (uword) (key),0))
+
+/* Public macro to unset a (key, value) pair, return the old value */
+#define hash_unset3(h,key,old_value) ((h) = _hash_unset ((h), (uword) (key), (void *) (old_value)))
+
+/* get/set/unset for pointer keys. */
+
+/* Public macro to fetch value for given pointer key */
+#define hash_get_mem(h,key) _hash_get ((h), pointer_to_uword (key))
+
+/* Public macro to fetch (key, value) for given pointer key */
+#define hash_get_pair_mem(h,key) _hash_get_pair ((h), pointer_to_uword (key))
+
+/* Public macro to set (key, value) for pointer key */
+#define hash_set_mem(h,key,value) hash_set3 (h, pointer_to_uword (key), (value), 0)
+
+/* Public macro to set (key, 0) for pointer key */
+#define hash_set1_mem(h,key) hash_set3 ((h), pointer_to_uword (key), 0, 0)
+
+/* Public macro to unset (key, value) for pointer key */
+#define hash_unset_mem(h,key) ((h) = _hash_unset ((h), pointer_to_uword (key),0))
+
+/* internal routine to free a hash table */
+extern void *_hash_free (void *v);
+
+/* Public macro to free a hash table */
+#define hash_free(h) (h) = _hash_free ((h))
+
+clib_error_t *hash_validate (void *v);
+
+/* Public inline funcion to get the number of value bytes for a hash table */
+always_inline uword
+hash_value_bytes (hash_t * h)
+{
+ hash_pair_t *p;
+ return (sizeof (p->value[0]) << h->log2_pair_size) - sizeof (p->key);
+}
+
+/* Public inline funcion to get log2(size of a (key,value) pair) */
+always_inline uword
+hash_pair_log2_bytes (hash_t * h)
+{
+ uword log2_bytes = h->log2_pair_size;
+ ASSERT (BITS (hash_pair_t) == 32 || BITS (hash_pair_t) == 64);
+ if (BITS (hash_pair_t) == 32)
+ log2_bytes += 2;
+ else if (BITS (hash_pair_t) == 64)
+ log2_bytes += 3;
+ return log2_bytes;
+}
+
+/* Public inline funcion to get size of a (key,value) pair */
+always_inline uword
+hash_pair_bytes (hash_t * h)
+{
+ return (uword) 1 << hash_pair_log2_bytes (h);
+}
+
+/* Public inline funcion to advance a pointer past one (key,value) pair */
+always_inline void *
+hash_forward1 (hash_t * h, void *v)
+{
+ return (u8 *) v + hash_pair_bytes (h);
+}
+
+/* Public inline funcion to advance a pointer past N (key,value) pairs */
+always_inline void *
+hash_forward (hash_t * h, void *v, uword n)
+{
+ return (u8 *) v + ((n * sizeof (hash_pair_t)) << h->log2_pair_size);
+}
+
+/** Iterate over hash pairs.
+
+ @param p The current (key,value) pair. This should be of type
+ <code>(hash_pair_t *)</code>.
+ @param v The hash table to iterate.
+ @param body The operation to perform on each (key,value) pair.
+
+ Executes the expression or code block @c body with each active hash pair.
+*/
+/* A previous version of this macro made use of the hash_pair_union_t
+ * structure; this version does not since that approach mightily upset
+ * the static analysis tool. In the rare chance someone is reading this
+ * code, pretend that _p below is of type hash_pair_union_t and that when
+ * used as an rvalue it's really using one of the union members as the
+ * rvalue. If you were confused before you might be marginally less
+ * confused after.
+ */
+#define hash_foreach_pair(p,v,body) \
+do { \
+ __label__ _hash_foreach_done; \
+ hash_t * _h = hash_header (v); \
+ void * _p; \
+ hash_pair_t * _q, * _q_end; \
+ uword _i, _i1, _id, _pair_increment; \
+ \
+ _p = (v); \
+ _i = 0; \
+ _pair_increment = 1; \
+ if ((v)) \
+ _pair_increment = 1 << _h->log2_pair_size; \
+ while (_i < hash_capacity (v)) \
+ { \
+ _id = _h->is_user[_i / BITS (_h->is_user[0])]; \
+ _i1 = _i + BITS (_h->is_user[0]); \
+ \
+ do { \
+ if (_id & 1) \
+ { \
+ _q = _p; \
+ _q_end = _q + _pair_increment; \
+ } \
+ else \
+ { \
+ hash_pair_indirect_t * _pi = _p; \
+ _q = _pi->pairs; \
+ if (_h->log2_pair_size > 0) \
+ _q_end = hash_forward (_h, _q, indirect_pair_get_len (_pi)); \
+ else \
+ _q_end = vec_end (_q); \
+ } \
+ \
+ /* Loop through all elements in bucket. \
+ Bucket may have 0 1 or more (indirect case) pairs. */ \
+ while (_q < _q_end) \
+ { \
+ uword _break_in_body = 1; \
+ (p) = _q; \
+ do { \
+ body; \
+ _break_in_body = 0; \
+ } while (0); \
+ if (_break_in_body) \
+ goto _hash_foreach_done; \
+ _q += _pair_increment; \
+ } \
+ \
+ _p = (hash_pair_t *)_p + _pair_increment; \
+ _id = _id / 2; \
+ _i++; \
+ } while (_i < _i1); \
+ } \
+ _hash_foreach_done: \
+ /* Be silent Mr. Compiler-Warning. */ \
+ ; \
+ } while (0)
+
+/* Iterate over key/value pairs
+
+ @param key_var the current key
+ @param value_var the current value
+ @param h the hash table to iterate across
+ @param body the operation to perform on each (key_var,value_var) pair.
+
+ calls body with each active hash pair
+*/
+/* Iteratate over key/value pairs. */
+#define hash_foreach(key_var,value_var,h,body) \
+do { \
+ hash_pair_t * _r; \
+ hash_foreach_pair (_r, (h), { \
+ (key_var) = (__typeof__ (key_var)) _r->key; \
+ (value_var) = (__typeof__ (value_var)) _r->value[0]; \
+ do { body; } while (0); \
+ }); \
+} while (0)
+
+/* Iterate over key/value pairs for pointer key hash tables
+
+ @param key_var the current key
+ @param value_var the current value
+ @param h the hash table to iterate across
+ @param body the operation to perform on each (key_var,value_var) pair.
+
+ calls body with each active hash pair
+*/
+#define hash_foreach_mem(key_var,value_var,h,body) \
+do { \
+ hash_pair_t * _r; \
+ hash_foreach_pair (_r, (h), { \
+ (key_var) = (__typeof__ (key_var)) uword_to_pointer (_r->key, void *); \
+ (value_var) = (__typeof__ (value_var)) _r->value[0]; \
+ do { body; } while (0); \
+ }); \
+} while (0)
+
+/* Support for iteration through hash table. */
+
+/* This struct saves iteration state for hash_next.
+ None of these fields are meant to be visible to the user.
+ Hence, the cryptic short-hand names. */
+typedef struct
+{
+ uword i, j, f;
+} hash_next_t;
+
+hash_pair_t *hash_next (void *v, hash_next_t * hn);
+
+void *_hash_create (uword elts, hash_t * h);
+
+always_inline void
+hash_set_value_bytes (hash_t * h, uword value_bytes)
+{
+ hash_pair_t *p;
+ h->log2_pair_size =
+ max_log2 ((sizeof (p->key) + value_bytes + sizeof (p->key) -
+ 1) / sizeof (p->key));
+}
+
+#define hash_create2(_elts,_user,_value_bytes, \
+ _key_sum,_key_equal, \
+ _format_pair,_format_pair_arg) \
+({ \
+ hash_t _h; \
+ memset (&_h, 0, sizeof (_h)); \
+ _h.user = (_user); \
+ _h.key_sum = (hash_key_sum_function_t *) (_key_sum); \
+ _h.key_equal = (_key_equal); \
+ hash_set_value_bytes (&_h, (_value_bytes)); \
+ _h.format_pair = (format_function_t *) (_format_pair); \
+ _h.format_pair_arg = (_format_pair_arg); \
+ _hash_create ((_elts), &_h); \
+})
+
+/* Hash function based on that of Bob Jenkins (bob_jenkins@compuserve.com).
+ Public domain per: http://www.burtleburtle.net/bob/hash/doobs.html
+ Thanks, Bob. */
+
+#define hash_mix_step(a,b,c,s0,s1,s2) \
+do { \
+ (a) -= (b) + (c); (a) ^= (c) >> (s0); \
+ (b) -= (c) + (a); (b) ^= (a) << (s1); \
+ (c) -= (a) + (b); (c) ^= (b) >> (s2); \
+} while (0)
+
+#define hash_mix32_step_1(a,b,c) hash_mix_step(a,b,c,13,8,13)
+#define hash_mix32_step_2(a,b,c) hash_mix_step(a,b,c,12,16,5)
+#define hash_mix32_step_3(a,b,c) hash_mix_step(a,b,c,3,10,15)
+
+#define hash_mix64_step_1(a,b,c) hash_mix_step(a,b,c,43,9,8)
+#define hash_mix64_step_2(a,b,c) hash_mix_step(a,b,c,38,23,5)
+#define hash_mix64_step_3(a,b,c) hash_mix_step(a,b,c,35,49,11)
+#define hash_mix64_step_4(a,b,c) hash_mix_step(a,b,c,12,18,22)
+
+/* Hash function based on that of Bob Jenkins (bob_jenkins@compuserve.com).
+ Thanks, Bob. */
+#define hash_mix64(a0,b0,c0) \
+do { \
+ hash_mix64_step_1 (a0, b0, c0); \
+ hash_mix64_step_2 (a0, b0, c0); \
+ hash_mix64_step_3 (a0, b0, c0); \
+ hash_mix64_step_4 (a0, b0, c0); \
+} while (0) \
+
+#define hash_mix32(a0,b0,c0) \
+do { \
+ hash_mix32_step_1 (a0, b0, c0); \
+ hash_mix32_step_2 (a0, b0, c0); \
+ hash_mix32_step_3 (a0, b0, c0); \
+} while (0) \
+
+/* Finalize from Bob Jenkins lookup3.c */
+
+always_inline uword
+hash32_rotate_left (u32 x, u32 i)
+{
+ return (x << i) | (x >> (BITS (i) - i));
+}
+
+#define hash_v3_mix32(a,b,c) \
+do { \
+ (a) -= (c); (a) ^= hash32_rotate_left ((c), 4); (c) += (b); \
+ (b) -= (a); (b) ^= hash32_rotate_left ((a), 6); (a) += (c); \
+ (c) -= (b); (c) ^= hash32_rotate_left ((b), 8); (b) += (a); \
+ (a) -= (c); (a) ^= hash32_rotate_left ((c),16); (c) += (b); \
+ (b) -= (a); (b) ^= hash32_rotate_left ((a),19); (a) += (c); \
+ (c) -= (b); (c) ^= hash32_rotate_left ((b), 4); (b) += (a); \
+} while (0)
+
+#define hash_v3_finalize32(a,b,c) \
+do { \
+ (c) ^= (b); (c) -= hash32_rotate_left ((b), 14); \
+ (a) ^= (c); (a) -= hash32_rotate_left ((c), 11); \
+ (b) ^= (a); (b) -= hash32_rotate_left ((a), 25); \
+ (c) ^= (b); (c) -= hash32_rotate_left ((b), 16); \
+ (a) ^= (c); (a) -= hash32_rotate_left ((c), 4); \
+ (b) ^= (a); (b) -= hash32_rotate_left ((a), 14); \
+ (c) ^= (b); (c) -= hash32_rotate_left ((b), 24); \
+} while (0)
+
+/* 32 bit mixing/finalize in steps. */
+
+#define hash_v3_mix32_step1(a,b,c) \
+do { \
+ (a) -= (c); (a) ^= hash32_rotate_left ((c), 4); (c) += (b); \
+ (b) -= (a); (b) ^= hash32_rotate_left ((a), 6); (a) += (c); \
+} while (0)
+
+#define hash_v3_mix32_step2(a,b,c) \
+do { \
+ (c) -= (b); (c) ^= hash32_rotate_left ((b), 8); (b) += (a); \
+ (a) -= (c); (a) ^= hash32_rotate_left ((c),16); (c) += (b); \
+} while (0)
+
+#define hash_v3_mix32_step3(a,b,c) \
+do { \
+ (b) -= (a); (b) ^= hash32_rotate_left ((a),19); (a) += (c); \
+ (c) -= (b); (c) ^= hash32_rotate_left ((b), 4); (b) += (a); \
+} while (0)
+
+#define hash_v3_finalize32_step1(a,b,c) \
+do { \
+ (c) ^= (b); (c) -= hash32_rotate_left ((b), 14); \
+ (a) ^= (c); (a) -= hash32_rotate_left ((c), 11); \
+} while (0)
+
+#define hash_v3_finalize32_step2(a,b,c) \
+do { \
+ (b) ^= (a); (b) -= hash32_rotate_left ((a), 25); \
+ (c) ^= (b); (c) -= hash32_rotate_left ((b), 16); \
+} while (0)
+
+#define hash_v3_finalize32_step3(a,b,c) \
+do { \
+ (a) ^= (c); (a) -= hash32_rotate_left ((c), 4); \
+ (b) ^= (a); (b) -= hash32_rotate_left ((a), 14); \
+ (c) ^= (b); (c) -= hash32_rotate_left ((b), 24); \
+} while (0)
+
+/* Vector v3 mixing/finalize. */
+#define hash_v3_mix_step_1_u32x(a,b,c) \
+do { \
+ (a) -= (c); (a) ^= u32x_irotate_left ((c), 4); (c) += (b); \
+ (b) -= (a); (b) ^= u32x_irotate_left ((a), 6); (a) += (c); \
+ (c) -= (b); (c) ^= u32x_irotate_left ((b), 8); (b) += (a); \
+} while (0)
+
+#define hash_v3_mix_step_2_u32x(a,b,c) \
+do { \
+ (a) -= (c); (a) ^= u32x_irotate_left ((c),16); (c) += (b); \
+ (b) -= (a); (b) ^= u32x_irotate_left ((a),19); (a) += (c); \
+ (c) -= (b); (c) ^= u32x_irotate_left ((b), 4); (b) += (a); \
+} while (0)
+
+#define hash_v3_finalize_step_1_u32x(a,b,c) \
+do { \
+ (c) ^= (b); (c) -= u32x_irotate_left ((b), 14); \
+ (a) ^= (c); (a) -= u32x_irotate_left ((c), 11); \
+ (b) ^= (a); (b) -= u32x_irotate_left ((a), 25); \
+} while (0)
+
+#define hash_v3_finalize_step_2_u32x(a,b,c) \
+do { \
+ (c) ^= (b); (c) -= u32x_irotate_left ((b), 16); \
+ (a) ^= (c); (a) -= u32x_irotate_left ((c), 4); \
+ (b) ^= (a); (b) -= u32x_irotate_left ((a), 14); \
+ (c) ^= (b); (c) -= u32x_irotate_left ((b), 24); \
+} while (0)
+
+#define hash_v3_mix_u32x(a,b,c) \
+do { \
+ hash_v3_mix_step_1_u32x(a,b,c); \
+ hash_v3_mix_step_2_u32x(a,b,c); \
+} while (0)
+
+#define hash_v3_finalize_u32x(a,b,c) \
+do { \
+ hash_v3_finalize_step_1_u32x(a,b,c); \
+ hash_v3_finalize_step_2_u32x(a,b,c); \
+} while (0)
+
+extern uword hash_memory (void *p, word n_bytes, uword state);
+
+extern uword mem_key_sum (hash_t * h, uword key);
+extern uword mem_key_equal (hash_t * h, uword key1, uword key2);
+
+#define hash_create_mem(elts,key_bytes,value_bytes) \
+ hash_create2((elts),(key_bytes),(value_bytes),mem_key_sum,mem_key_equal,0,0)
+
+extern uword vec_key_sum (hash_t * h, uword key);
+extern uword vec_key_equal (hash_t * h, uword key1, uword key2);
+extern u8 *vec_key_format_pair (u8 * s, va_list * args);
+
+#define hash_create_vec(elts,key_bytes,value_bytes) \
+ hash_create2((elts),(key_bytes),(value_bytes),\
+ vec_key_sum,vec_key_equal,vec_key_format_pair,0)
+
+extern uword string_key_sum (hash_t * h, uword key);
+extern uword string_key_equal (hash_t * h, uword key1, uword key2);
+extern u8 *string_key_format_pair (u8 * s, va_list * args);
+
+#define hash_create_string(elts,value_bytes) \
+ hash_create2((elts),0,(value_bytes), \
+ (hash_key_sum_function_t *) KEY_FUNC_STRING, \
+ (hash_key_equal_function_t *)KEY_FUNC_STRING, \
+ 0, 0)
+
+#define hash_create(elts,value_bytes) \
+ hash_create2((elts),0,(value_bytes), \
+ (hash_key_sum_function_t *) KEY_FUNC_NONE, \
+ (hash_key_equal_function_t *) KEY_FUNC_NONE, \
+ 0,0)
+
+#define hash_create_uword(elts,value_bytes) \
+ hash_create2((elts),0,(value_bytes), \
+ (hash_key_sum_function_t *) KEY_FUNC_POINTER_UWORD, \
+ (hash_key_equal_function_t *) KEY_FUNC_POINTER_UWORD, \
+ 0,0)
+
+#define hash_create_u32(elts,value_bytes) \
+ hash_create2((elts),0,(value_bytes), \
+ (hash_key_sum_function_t *) KEY_FUNC_POINTER_U32, \
+ (hash_key_equal_function_t *) KEY_FUNC_POINTER_U32, \
+ 0,0)
+
+u8 *format_hash (u8 * s, va_list * va);
+
+/* Looks up input in hash table indexed by either vec string or
+ c string (null terminated). */
+unformat_function_t unformat_hash_vec_string;
+unformat_function_t unformat_hash_string;
+
+/* Main test routine. */
+int test_hash_main (unformat_input_t * input);
+
+#endif /* included_hash_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/heap.c b/src/vppinfra/heap.c
new file mode 100644
index 00000000..2a5fb5c8
--- /dev/null
+++ b/src/vppinfra/heap.c
@@ -0,0 +1,828 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/cache.h> /* for CLIB_CACHE_LINE_BYTES */
+#include <vppinfra/mem.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/heap.h>
+#include <vppinfra/error.h>
+
+always_inline heap_elt_t *
+elt_at (heap_header_t * h, uword i)
+{
+ ASSERT (i < vec_len (h->elts));
+ return h->elts + i;
+}
+
+always_inline heap_elt_t *
+last (heap_header_t * h)
+{
+ return elt_at (h, h->tail);
+}
+
+always_inline heap_elt_t *
+first (heap_header_t * h)
+{
+ return elt_at (h, h->head);
+}
+
+/* Objects sizes are binned into N_BINS bins.
+ Objects with size <= SMALL_BINS have their own bins.
+ Larger objects are grouped together in power or 2 sized
+ bins.
+
+ Sizes are in units of elt_bytes bytes. */
+
+/* Convert size to bin. */
+always_inline uword
+size_to_bin (uword size)
+{
+ uword bin;
+
+ ASSERT (size > 0);
+
+ if (size <= HEAP_SMALL_BINS)
+ {
+ bin = size - 1;
+ if (size == 0)
+ bin = 0;
+ }
+ else
+ {
+ bin = HEAP_SMALL_BINS + max_log2 (size) - (HEAP_LOG2_SMALL_BINS + 1);
+ if (bin >= HEAP_N_BINS)
+ bin = HEAP_N_BINS - 1;
+ }
+
+ return bin;
+}
+
+/* Convert bin to size. */
+always_inline __attribute__ ((unused))
+ uword bin_to_size (uword bin)
+{
+ uword size;
+
+ if (bin <= HEAP_SMALL_BINS - 1)
+ size = bin + 1;
+ else
+ size = (uword) 1 << ((bin - HEAP_SMALL_BINS) + HEAP_LOG2_SMALL_BINS + 1);
+
+ return size;
+}
+
+static void
+elt_delete (heap_header_t * h, heap_elt_t * e)
+{
+ heap_elt_t *l = vec_end (h->elts) - 1;
+
+ ASSERT (e >= h->elts && e <= l);
+
+ /* Update doubly linked pointers. */
+ {
+ heap_elt_t *p = heap_prev (e);
+ heap_elt_t *n = heap_next (e);
+
+ if (p == e)
+ {
+ n->prev = 0;
+ h->head = n - h->elts;
+ }
+ else if (n == e)
+ {
+ p->next = 0;
+ h->tail = p - h->elts;
+ }
+ else
+ {
+ p->next = n - p;
+ n->prev = p - n;
+ }
+ }
+
+ /* Add to index free list or delete from end. */
+ if (e < l)
+ vec_add1 (h->free_elts, e - h->elts);
+ else
+ _vec_len (h->elts)--;
+}
+
+/*
+ Before: P ... E
+ After : P ... NEW ... E
+*/
+always_inline void
+elt_insert_before (heap_header_t * h, heap_elt_t * e, heap_elt_t * new)
+{
+ heap_elt_t *p = heap_prev (e);
+
+ if (p == e)
+ {
+ new->prev = 0;
+ new->next = e - new;
+ p->prev = new - p;
+ h->head = new - h->elts;
+ }
+ else
+ {
+ new->prev = p - new;
+ new->next = e - new;
+ e->prev = new - e;
+ p->next = new - p;
+ }
+}
+
+/*
+ Before: E ... N
+ After : E ... NEW ... N
+*/
+always_inline void
+elt_insert_after (heap_header_t * h, heap_elt_t * e, heap_elt_t * new)
+{
+ heap_elt_t *n = heap_next (e);
+
+ if (n == e)
+ {
+ new->next = 0;
+ new->prev = e - new;
+ e->next = new - e;
+ h->tail = new - h->elts;
+ }
+ else
+ {
+ new->prev = e - new;
+ new->next = n - new;
+ e->next = new - e;
+ n->prev = new - n;
+ }
+}
+
+always_inline heap_elt_t *
+elt_new (heap_header_t * h)
+{
+ heap_elt_t *e;
+ uword l;
+ if ((l = vec_len (h->free_elts)) > 0)
+ {
+ e = elt_at (h, h->free_elts[l - 1]);
+ _vec_len (h->free_elts) -= 1;
+ }
+ else
+ vec_add2 (h->elts, e, 1);
+ return e;
+}
+
+/* Return pointer to object at given offset.
+ Used to write free list index of free objects. */
+always_inline u32 *
+elt_data (void *v, heap_elt_t * e)
+{
+ heap_header_t *h = heap_header (v);
+ return v + heap_offset (e) * h->elt_bytes;
+}
+
+always_inline void
+set_free_elt (void *v, heap_elt_t * e, uword fi)
+{
+ heap_header_t *h = heap_header (v);
+
+ e->offset |= HEAP_ELT_FREE_BIT;
+ if (h->elt_bytes >= sizeof (u32))
+ {
+ *elt_data (v, e) = fi;
+ }
+ else
+ {
+ /* For elt_bytes < 4 we must store free index in separate
+ vector. */
+ uword elt_index = e - h->elts;
+ vec_validate (h->small_free_elt_free_index, elt_index);
+ h->small_free_elt_free_index[elt_index] = fi;
+ }
+}
+
+always_inline uword
+get_free_elt (void *v, heap_elt_t * e, uword * bin_result)
+{
+ heap_header_t *h = heap_header (v);
+ uword fb, fi;
+
+ ASSERT (heap_is_free (e));
+ fb = size_to_bin (heap_elt_size (v, e));
+
+ if (h->elt_bytes >= sizeof (u32))
+ {
+ fi = *elt_data (v, e);
+ }
+ else
+ {
+ uword elt_index = e - h->elts;
+ fi = vec_elt (h->small_free_elt_free_index, elt_index);
+ }
+
+ *bin_result = fb;
+ return fi;
+}
+
+always_inline void
+remove_free_block (void *v, uword b, uword i)
+{
+ heap_header_t *h = heap_header (v);
+ uword l;
+
+ ASSERT (b < vec_len (h->free_lists));
+ ASSERT (i < vec_len (h->free_lists[b]));
+
+ l = vec_len (h->free_lists[b]);
+
+ if (i < l - 1)
+ {
+ uword t = h->free_lists[b][l - 1];
+ h->free_lists[b][i] = t;
+ set_free_elt (v, elt_at (h, t), i);
+ }
+ _vec_len (h->free_lists[b]) = l - 1;
+}
+
+static heap_elt_t *
+search_free_list (void *v, uword size)
+{
+ heap_header_t *h = heap_header (v);
+ heap_elt_t *f, *u;
+ uword b, fb, f_size, f_index;
+ word s, l;
+
+ if (!v)
+ return 0;
+
+ /* Search free lists for bins >= given size. */
+ for (b = size_to_bin (size); b < vec_len (h->free_lists); b++)
+ if ((l = vec_len (h->free_lists[b])) > 0)
+ {
+ /* Find an object that is large enough.
+ Search list in reverse so that more recently freed objects will be
+ allocated again sooner. */
+ do
+ {
+ l--;
+ f_index = h->free_lists[b][l];
+ f = elt_at (h, f_index);
+ f_size = heap_elt_size (v, f);
+ if ((s = f_size - size) >= 0)
+ break;
+ }
+ while (l >= 0);
+
+ /* If we fail to find a large enough object, try the next larger size. */
+ if (l < 0)
+ continue;
+
+ ASSERT (heap_is_free (f));
+
+ /* Link in used object (u) after free object (f). */
+ if (s == 0)
+ {
+ u = f;
+ fb = HEAP_N_BINS;
+ }
+ else
+ {
+ u = elt_new (h);
+ f = elt_at (h, f_index);
+ elt_insert_after (h, f, u);
+ fb = size_to_bin (s);
+ }
+
+ u->offset = heap_offset (f) + s;
+
+ if (fb != b)
+ {
+ if (fb < HEAP_N_BINS)
+ {
+ uword i;
+ vec_validate (h->free_lists, fb);
+ i = vec_len (h->free_lists[fb]);
+ vec_add1 (h->free_lists[fb], f - h->elts);
+ set_free_elt (v, f, i);
+ }
+
+ remove_free_block (v, b, l);
+ }
+
+ return u;
+ }
+
+ return 0;
+}
+
+static void combine_free_blocks (void *v, heap_elt_t * e0, heap_elt_t * e1);
+
+static inline void
+dealloc_elt (void *v, heap_elt_t * e)
+{
+ heap_header_t *h = heap_header (v);
+ uword b, l;
+ heap_elt_t *n, *p;
+
+ b = size_to_bin (heap_elt_size (v, e));
+ vec_validate (h->free_lists, b);
+ l = vec_len (h->free_lists[b]);
+ vec_add1 (h->free_lists[b], e - h->elts);
+ set_free_elt (v, e, l);
+
+ /* See if we can combine the block we just freed with neighboring free blocks. */
+ p = heap_prev (e);
+ if (!heap_is_free (p))
+ p = e;
+
+ n = heap_next (e);
+ if (!heap_is_free (n))
+ n = e;
+
+ if (p != n)
+ combine_free_blocks (v, p, n);
+}
+
+void *
+_heap_alloc (void *v,
+ uword size,
+ uword align,
+ uword elt_bytes, uword * offset_return, uword * handle_return)
+{
+ uword offset = 0, align_size;
+ heap_header_t *h;
+ heap_elt_t *e;
+
+ if (size == 0)
+ goto error;
+
+ /* Round up alignment to power of 2. */
+ if (align <= 1)
+ {
+ align = 0;
+ align_size = size;
+ }
+ else
+ {
+ align = max_pow2 (align);
+ align_size = size + align - 1;
+ }
+
+ e = search_free_list (v, align_size);
+
+ /* If nothing found on free list, allocate object from end of vector. */
+ if (!e)
+ {
+ uword max_len;
+
+ offset = vec_len (v);
+ max_len = heap_get_max_len (v);
+
+ if (max_len && offset + align_size > max_len)
+ goto error;
+
+ h = heap_header (v);
+ if (!v || !(h->flags & HEAP_IS_STATIC))
+ v = _vec_resize (v,
+ align_size,
+ (offset + align_size) * elt_bytes,
+ sizeof (h[0]), HEAP_DATA_ALIGN);
+ else
+ _vec_len (v) += align_size;
+
+ if (offset == 0)
+ {
+ h = heap_header (v);
+ h->elt_bytes = elt_bytes;
+ }
+ }
+
+ h = heap_header (v);
+
+ /* Add new element to doubly linked chain of elements. */
+ if (!e)
+ {
+ e = elt_new (h);
+ e->offset = offset;
+ elt_insert_after (h, last (h), e);
+ }
+
+ if (align > 0)
+ {
+ uword e_index;
+ uword new_offset, old_offset;
+
+ old_offset = e->offset;
+ new_offset = (old_offset + align - 1) & ~(align - 1);
+ e->offset = new_offset;
+ e_index = e - h->elts;
+
+ /* Free fragments before and after aligned object. */
+ if (new_offset > old_offset)
+ {
+ heap_elt_t *before_e = elt_new (h);
+ before_e->offset = old_offset;
+ elt_insert_before (h, h->elts + e_index, before_e);
+ dealloc_elt (v, before_e);
+ }
+
+ if (new_offset + size < old_offset + align_size)
+ {
+ heap_elt_t *after_e = elt_new (h);
+ after_e->offset = new_offset + size;
+ elt_insert_after (h, h->elts + e_index, after_e);
+ dealloc_elt (v, after_e);
+ }
+
+ e = h->elts + e_index;
+ }
+
+ h->used_count++;
+
+ /* Keep track of used elements when debugging.
+ This allows deallocation to check that passed objects are valid. */
+ if (CLIB_DEBUG > 0)
+ {
+ uword handle = e - h->elts;
+ ASSERT (!clib_bitmap_get (h->used_elt_bitmap, handle));
+ h->used_elt_bitmap = clib_bitmap_ori (h->used_elt_bitmap, handle);
+ }
+
+ *offset_return = e->offset;
+ *handle_return = e - h->elts;
+ return v;
+
+error:
+ *offset_return = *handle_return = ~0;
+ return v;
+}
+
+void
+heap_dealloc (void *v, uword handle)
+{
+ heap_header_t *h = heap_header (v);
+ heap_elt_t *e;
+
+ ASSERT (handle < vec_len (h->elts));
+
+ /* For debugging we keep track of indices for valid objects.
+ We make sure user is not trying to free object with an invalid index. */
+ if (CLIB_DEBUG > 0)
+ {
+ ASSERT (clib_bitmap_get (h->used_elt_bitmap, handle));
+ h->used_elt_bitmap = clib_bitmap_andnoti (h->used_elt_bitmap, handle);
+ }
+
+ h->used_count--;
+
+ e = h->elts + handle;
+ ASSERT (!heap_is_free (e));
+
+ dealloc_elt (v, e);
+}
+
+/* While freeing objects at INDEX we noticed free blocks i0 <= index and
+ i1 >= index. We combine these two or three blocks into one big free block. */
+static void
+combine_free_blocks (void *v, heap_elt_t * e0, heap_elt_t * e1)
+{
+ heap_header_t *h = heap_header (v);
+ uword total_size, i, b, tb, ti, i_last, g_offset;
+ heap_elt_t *e;
+
+ struct
+ {
+ u32 index;
+ u32 bin;
+ u32 bin_index;
+ } f[3], g;
+
+ /* Compute total size of free objects i0 through i1. */
+ total_size = 0;
+ for (i = 0, e = e0; 1; e = heap_next (e), i++)
+ {
+ ASSERT (i < ARRAY_LEN (f));
+
+ ti = get_free_elt (v, e, &tb);
+
+ ASSERT (tb < vec_len (h->free_lists));
+ ASSERT (ti < vec_len (h->free_lists[tb]));
+
+ f[i].index = h->free_lists[tb][ti];
+ f[i].bin = tb;
+ f[i].bin_index = ti;
+
+ total_size += heap_elt_size (v, elt_at (h, f[i].index));
+
+ if (e == e1)
+ {
+ i_last = i;
+ break;
+ }
+ }
+
+ /* Compute combined bin. See if all objects can be
+ combined into existing bin. */
+ b = size_to_bin (total_size);
+ g.index = g.bin_index = 0;
+ for (i = 0; i <= i_last; i++)
+ if (b == f[i].bin)
+ {
+ g = f[i];
+ break;
+ }
+
+ /* Make sure we found a bin. */
+ if (i > i_last)
+ {
+ g.index = elt_new (h) - h->elts;
+ vec_validate (h->free_lists, b);
+ g.bin_index = vec_len (h->free_lists[b]);
+ vec_add1 (h->free_lists[b], g.index);
+ elt_insert_before (h, elt_at (h, f[0].index), elt_at (h, g.index));
+ }
+
+ g_offset = elt_at (h, f[0].index)->offset;
+
+ /* Delete unused bins. */
+ for (i = 0; i <= i_last; i++)
+ if (g.index != f[i].index)
+ {
+ ti = get_free_elt (v, elt_at (h, f[i].index), &tb);
+ remove_free_block (v, tb, ti);
+ elt_delete (h, elt_at (h, f[i].index));
+ }
+
+ /* Initialize new element. */
+ elt_at (h, g.index)->offset = g_offset;
+ set_free_elt (v, elt_at (h, g.index), g.bin_index);
+}
+
+uword
+heap_len (void *v, word handle)
+{
+ heap_header_t *h = heap_header (v);
+
+ if (CLIB_DEBUG > 0)
+ ASSERT (clib_bitmap_get (h->used_elt_bitmap, handle));
+ return heap_elt_size (v, elt_at (h, handle));
+}
+
+void *
+_heap_free (void *v)
+{
+ heap_header_t *h = heap_header (v);
+ uword b;
+
+ if (!v)
+ return v;
+
+ clib_bitmap_free (h->used_elt_bitmap);
+ for (b = 0; b < vec_len (h->free_lists); b++)
+ vec_free (h->free_lists[b]);
+ vec_free (h->free_lists);
+ vec_free (h->elts);
+ vec_free (h->free_elts);
+ vec_free (h->small_free_elt_free_index);
+ if (!(h->flags & HEAP_IS_STATIC))
+ vec_free_h (v, sizeof (h[0]));
+ return v;
+}
+
+uword
+heap_bytes (void *v)
+{
+ heap_header_t *h = heap_header (v);
+ uword bytes, b;
+
+ if (!v)
+ return 0;
+
+ bytes = sizeof (h[0]);
+ bytes += vec_len (v) * sizeof (h->elt_bytes);
+ for (b = 0; b < vec_len (h->free_lists); b++)
+ bytes += vec_capacity (h->free_lists[b], 0);
+ bytes += vec_bytes (h->free_lists);
+ bytes += vec_capacity (h->elts, 0);
+ bytes += vec_capacity (h->free_elts, 0);
+ bytes += vec_bytes (h->used_elt_bitmap);
+
+ return bytes;
+}
+
+static u8 *
+debug_elt (u8 * s, void *v, word i, word n)
+{
+ heap_elt_t *e, *e0, *e1;
+ heap_header_t *h = heap_header (v);
+ word j;
+
+ if (vec_len (h->elts) == 0)
+ return s;
+
+ if (i < 0)
+ e0 = first (h);
+ else
+ {
+ e0 = h->elts + i;
+ for (j = 0; j < n / 2; j++)
+ e0 = heap_prev (e0);
+ }
+
+ if (n < 0)
+ e1 = h->elts + h->tail;
+ else
+ {
+ e1 = h->elts + i;
+ for (j = 0; j < n / 2; j++)
+ e1 = heap_next (e1);
+ }
+
+ i = -n / 2;
+ for (e = e0; 1; e = heap_next (e))
+ {
+ if (heap_is_free (e))
+ s = format (s, "index %4d, free\n", e - h->elts);
+ else if (h->format_elt)
+ s = format (s, "%U", h->format_elt, v, elt_data (v, e));
+ else
+ s = format (s, "index %4d, used\n", e - h->elts);
+ i++;
+ if (e == e1)
+ break;
+ }
+
+ return s;
+}
+
+u8 *
+format_heap (u8 * s, va_list * va)
+{
+ void *v = va_arg (*va, void *);
+ uword verbose = va_arg (*va, uword);
+ heap_header_t *h = heap_header (v);
+ heap_header_t zero;
+
+ memset (&zero, 0, sizeof (zero));
+
+ if (!v)
+ h = &zero;
+
+ {
+ f64 elt_bytes = vec_len (v) * h->elt_bytes;
+ f64 overhead_bytes = heap_bytes (v);
+
+ s = format (s, "heap %p, %6d objects, size %.1fk + overhead %.1fk\n",
+ v, h->used_count, elt_bytes / 1024,
+ (overhead_bytes - elt_bytes) / 1024);
+ }
+
+ if (v && verbose)
+ s = debug_elt (s, v, -1, -1);
+
+ return s;
+}
+
+void
+heap_validate (void *v)
+{
+ heap_header_t *h = heap_header (v);
+ uword i, o, s;
+ u8 *free_map;
+ heap_elt_t *e, *n;
+
+ uword used_count, total_size;
+ uword free_count, free_size;
+
+ ASSERT (h->used_count == clib_bitmap_count_set_bits (h->used_elt_bitmap));
+
+ ASSERT (first (h)->prev == 0);
+ ASSERT (last (h)->next == 0);
+
+ /* Validate number of elements and size. */
+ free_size = free_count = 0;
+ for (i = 0; i < vec_len (h->free_lists); i++)
+ {
+ free_count += vec_len (h->free_lists[i]);
+ for (o = 0; o < vec_len (h->free_lists[i]); o++)
+ {
+ e = h->elts + h->free_lists[i][o];
+ s = heap_elt_size (v, e);
+ ASSERT (size_to_bin (s) == i);
+ ASSERT (heap_is_free (e));
+ free_size += s;
+ }
+ }
+
+ {
+ uword elt_free_size, elt_free_count;
+
+ used_count = total_size = elt_free_size = elt_free_count = 0;
+ for (e = first (h); 1; e = n)
+ {
+ int is_free = heap_is_free (e);
+ used_count++;
+ s = heap_elt_size (v, e);
+ total_size += s;
+ ASSERT (is_free ==
+ !clib_bitmap_get (h->used_elt_bitmap, e - h->elts));
+ if (is_free)
+ {
+ elt_free_count++;
+ elt_free_size += s;
+ }
+ n = heap_next (e);
+ if (e == n)
+ {
+ ASSERT (last (h) == n);
+ break;
+ }
+
+ /* We should never have two free adjacent elements. */
+ ASSERT (!(heap_is_free (e) && heap_is_free (n)));
+ }
+
+ ASSERT (free_count == elt_free_count);
+ ASSERT (free_size == elt_free_size);
+ ASSERT (used_count == h->used_count + free_count);
+ ASSERT (total_size == vec_len (v));
+ }
+
+ free_map = vec_new (u8, used_count);
+
+ e = first (h);
+ for (i = o = 0; 1; i++)
+ {
+ ASSERT (heap_offset (e) == o);
+ s = heap_elt_size (v, e);
+
+ if (heap_is_free (e))
+ {
+ uword fb, fi;
+
+ fi = get_free_elt (v, e, &fb);
+
+ ASSERT (fb < vec_len (h->free_lists));
+ ASSERT (fi < vec_len (h->free_lists[fb]));
+ ASSERT (h->free_lists[fb][fi] == e - h->elts);
+
+ ASSERT (!free_map[i]);
+ free_map[i] = 1;
+ }
+
+ n = heap_next (e);
+
+ if (e == n)
+ break;
+
+ ASSERT (heap_prev (n) == e);
+
+ o += s;
+ e = n;
+ }
+
+ vec_free (free_map);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/heap.h b/src/vppinfra/heap.h
new file mode 100644
index 00000000..8c1aae46
--- /dev/null
+++ b/src/vppinfra/heap.h
@@ -0,0 +1,357 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+/* Heaps of objects of type T (e.g. int, struct foo, ...).
+
+ Usage. To declare a null heap:
+
+ T * heap = 0;
+
+ To allocate:
+
+ offset = heap_alloc (heap, size, handle);
+
+ New object is heap[offset] ... heap[offset + size]
+ Handle is used to free/query object.
+
+ To free object:
+
+ heap_dealloc (heap, handle);
+
+ To query the size of an object:
+
+ heap_size (heap, handle)
+
+*/
+
+#ifndef included_heap_h
+#define included_heap_h
+
+#include <vppinfra/clib.h>
+#include <vppinfra/cache.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/format.h>
+#include <vppinfra/bitmap.h>
+
+/* Doubly linked list of elements. */
+typedef struct
+{
+ /* Offset of this element (plus free bit).
+ If element is free, data at offset contains pointer to free list. */
+ u32 offset;
+
+ /* Index of next and previous elements relative to current element. */
+ i32 next, prev;
+} heap_elt_t;
+
+/* Use high bit of offset as free bit. */
+#define HEAP_ELT_FREE_BIT (1 << 31)
+
+always_inline uword
+heap_is_free (heap_elt_t * e)
+{
+ return (e->offset & HEAP_ELT_FREE_BIT) != 0;
+}
+
+always_inline uword
+heap_offset (heap_elt_t * e)
+{
+ return e->offset & ~HEAP_ELT_FREE_BIT;
+}
+
+always_inline heap_elt_t *
+heap_next (heap_elt_t * e)
+{
+ return e + e->next;
+}
+
+always_inline heap_elt_t *
+heap_prev (heap_elt_t * e)
+{
+ return e + e->prev;
+}
+
+always_inline uword
+heap_elt_size (void *v, heap_elt_t * e)
+{
+ heap_elt_t *n = heap_next (e);
+ uword next_offset = n != e ? heap_offset (n) : vec_len (v);
+ return next_offset - heap_offset (e);
+}
+
+/* Sizes are binned. Sizes 1 to 2^log2_small_bins have their
+ own free lists. Larger sizes are grouped in powers of two. */
+#define HEAP_LOG2_SMALL_BINS (5)
+#define HEAP_SMALL_BINS (1 << HEAP_LOG2_SMALL_BINS)
+#define HEAP_N_BINS (2 * HEAP_SMALL_BINS)
+
+/* Header for heaps. */
+typedef struct
+{
+ /* Vector of used and free elements. */
+ heap_elt_t *elts;
+
+ /* For elt_bytes < sizeof (u32) we need some extra space
+ per elt to store free list index. */
+ u32 *small_free_elt_free_index;
+
+ /* Vector of free indices of elts array. */
+ u32 *free_elts;
+
+ /* Indices of free elts indexed by size bin. */
+ u32 **free_lists;
+
+ format_function_t *format_elt;
+
+ /* Used for validattion/debugging. */
+ uword *used_elt_bitmap;
+
+ /* First and last element of doubly linked chain of elements. */
+ u32 head, tail;
+
+ u32 used_count, max_len;
+
+ /* Number of bytes in a help element. */
+ u32 elt_bytes;
+
+ u32 flags;
+ /* Static heaps are made from external memory given to
+ us by user and are not re-sizeable vectors. */
+#define HEAP_IS_STATIC (1)
+} heap_header_t;
+
+/* Start of heap elements is always cache aligned. */
+#define HEAP_DATA_ALIGN (CLIB_CACHE_LINE_BYTES)
+
+always_inline heap_header_t *
+heap_header (void *v)
+{
+ return vec_header (v, sizeof (heap_header_t));
+}
+
+always_inline uword
+heap_header_bytes ()
+{
+ return vec_header_bytes (sizeof (heap_header_t));
+}
+
+always_inline void
+heap_dup_header (heap_header_t * old, heap_header_t * new)
+{
+ uword i;
+
+ new[0] = old[0];
+ new->elts = vec_dup (new->elts);
+ new->free_elts = vec_dup (new->free_elts);
+ new->free_lists = vec_dup (new->free_lists);
+ for (i = 0; i < vec_len (new->free_lists); i++)
+ new->free_lists[i] = vec_dup (new->free_lists[i]);
+ new->used_elt_bitmap = clib_bitmap_dup (new->used_elt_bitmap);
+ new->small_free_elt_free_index = vec_dup (new->small_free_elt_free_index);
+}
+
+/* Make a duplicate copy of a heap. */
+#define heap_dup(v) _heap_dup(v, vec_len (v) * sizeof (v[0]))
+
+always_inline void *
+_heap_dup (void *v_old, uword v_bytes)
+{
+ heap_header_t *h_old, *h_new;
+ void *v_new;
+
+ h_old = heap_header (v_old);
+
+ if (!v_old)
+ return v_old;
+
+ v_new = 0;
+ v_new =
+ _vec_resize (v_new, _vec_len (v_old), v_bytes, sizeof (heap_header_t),
+ HEAP_DATA_ALIGN);
+ h_new = heap_header (v_new);
+ heap_dup_header (h_old, h_new);
+ clib_memcpy (v_new, v_old, v_bytes);
+ return v_new;
+}
+
+always_inline uword
+heap_elts (void *v)
+{
+ heap_header_t *h = heap_header (v);
+ return h->used_count;
+}
+
+uword heap_bytes (void *v);
+
+always_inline void *
+_heap_new (u32 len, u32 n_elt_bytes)
+{
+ void *v = _vec_resize (0, len, (uword) len * n_elt_bytes,
+ sizeof (heap_header_t),
+ HEAP_DATA_ALIGN);
+ heap_header (v)->elt_bytes = n_elt_bytes;
+ return v;
+}
+
+#define heap_new(v) (v) = _heap_new (0, sizeof ((v)[0]))
+
+always_inline void
+heap_set_format (void *v, format_function_t * format_elt)
+{
+ ASSERT (v);
+ heap_header (v)->format_elt = format_elt;
+}
+
+always_inline void
+heap_set_max_len (void *v, uword max_len)
+{
+ ASSERT (v);
+ heap_header (v)->max_len = max_len;
+}
+
+always_inline uword
+heap_get_max_len (void *v)
+{
+ return v ? heap_header (v)->max_len : 0;
+}
+
+/* Create fixed size heap with given block of memory. */
+always_inline void *
+heap_create_from_memory (void *memory, uword max_len, uword elt_bytes)
+{
+ heap_header_t *h;
+ void *v;
+
+ if (max_len * elt_bytes < sizeof (h[0]))
+ return 0;
+
+ h = memory;
+ memset (h, 0, sizeof (h[0]));
+ h->max_len = max_len;
+ h->elt_bytes = elt_bytes;
+ h->flags = HEAP_IS_STATIC;
+
+ v = (void *) (memory + heap_header_bytes ());
+ _vec_len (v) = 0;
+ return v;
+}
+
+/* Execute BODY for each allocated heap element. */
+#define heap_foreach(var,len,heap,body) \
+do { \
+ if (vec_len (heap) > 0) \
+ { \
+ heap_header_t * _h = heap_header (heap); \
+ heap_elt_t * _e = _h->elts + _h->head; \
+ heap_elt_t * _end = _h->elts + _h->tail; \
+ while (1) \
+ { \
+ if (! heap_is_free (_e)) \
+ { \
+ (var) = (heap) + heap_offset (_e); \
+ (len) = heap_elt_size ((heap), _e); \
+ do { body; } while (0); \
+ } \
+ if (_e == _end) \
+ break; \
+ _e = heap_next (_e); \
+ } \
+ } \
+} while (0)
+
+#define heap_elt_at_index(v,index) vec_elt_at_index(v,index)
+
+always_inline heap_elt_t *
+heap_get_elt (void *v, uword handle)
+{
+ heap_header_t *h = heap_header (v);
+ heap_elt_t *e = vec_elt_at_index (h->elts, handle);
+ ASSERT (!heap_is_free (e));
+ return e;
+}
+
+#define heap_elt_with_handle(v,handle) \
+({ \
+ heap_elt_t * _e = heap_get_elt ((v), (handle)); \
+ (v) + heap_offset (_e); \
+})
+
+always_inline uword
+heap_is_free_handle (void *v, uword heap_handle)
+{
+ heap_header_t *h = heap_header (v);
+ heap_elt_t *e = vec_elt_at_index (h->elts, heap_handle);
+ return heap_is_free (e);
+}
+
+extern uword heap_len (void *v, word handle);
+
+/* Low level allocation call. */
+extern void *_heap_alloc (void *v, uword size, uword alignment,
+ uword elt_bytes, uword * offset, uword * handle);
+
+#define heap_alloc_aligned(v,size,align,handle) \
+({ \
+ uword _o, _h; \
+ uword _a = (align); \
+ uword _s = (size); \
+ (v) = _heap_alloc ((v), _s, _a, sizeof ((v)[0]), &_o, &_h); \
+ (handle) = _h; \
+ _o; \
+})
+
+#define heap_alloc(v,size,handle) heap_alloc_aligned((v),(size),0,(handle))
+
+extern void heap_dealloc (void *v, uword handle);
+extern void heap_validate (void *v);
+
+/* Format heap internal data structures as string. */
+extern u8 *format_heap (u8 * s, va_list * va);
+
+void *_heap_free (void *v);
+
+#define heap_free(v) (v)=_heap_free(v)
+
+#endif /* included_heap_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/linux/mem.c b/src/vppinfra/linux/mem.c
new file mode 100644
index 00000000..2d8f593d
--- /dev/null
+++ b/src/vppinfra/linux/mem.c
@@ -0,0 +1,266 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define _GNU_SOURCE
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <sys/mount.h>
+#include <sys/mman.h>
+#include <fcntl.h>
+#include <linux/mempolicy.h>
+#include <linux/memfd.h>
+
+#include <vppinfra/clib.h>
+#include <vppinfra/mem.h>
+#include <vppinfra/format.h>
+#include <vppinfra/clib_error.h>
+#include <vppinfra/linux/syscall.h>
+#include <vppinfra/linux/sysfs.h>
+
+#ifndef F_LINUX_SPECIFIC_BASE
+#define F_LINUX_SPECIFIC_BASE 1024
+#endif
+
+#ifndef F_ADD_SEALS
+#define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9)
+#define F_GET_SEALS (F_LINUX_SPECIFIC_BASE + 10)
+
+#define F_SEAL_SEAL 0x0001 /* prevent further seals from being set */
+#define F_SEAL_SHRINK 0x0002 /* prevent file from shrinking */
+#define F_SEAL_GROW 0x0004 /* prevent file from growing */
+#define F_SEAL_WRITE 0x0008 /* prevent writes */
+#endif
+
+int
+clib_mem_vm_get_log2_page_size (int fd)
+{
+ struct stat st = { 0 };
+ if (fstat (fd, &st) == -1)
+ return 0;
+ return min_log2 (st.st_blksize);
+}
+
+clib_error_t *
+clib_mem_vm_ext_alloc (clib_mem_vm_alloc_t * a)
+{
+ int fd = -1;
+ clib_error_t *err = 0;
+ void *addr = 0;
+ u8 *filename = 0;
+ int mmap_flags = MAP_SHARED;
+ int log2_page_size;
+ int n_pages;
+ int old_mpol = -1;
+ u64 old_mask[16] = { 0 };
+
+ /* save old numa mem policy if needed */
+ if (a->flags & (CLIB_MEM_VM_F_NUMA_PREFER | CLIB_MEM_VM_F_NUMA_FORCE))
+ {
+ int rv;
+ rv =
+ get_mempolicy (&old_mpol, old_mask, sizeof (old_mask) * 8 + 1, 0, 0);
+
+ if (rv == -1)
+ {
+ if ((a->flags & CLIB_MEM_VM_F_NUMA_FORCE) != 0)
+ {
+ err = clib_error_return_unix (0, "get_mempolicy");
+ goto error;
+ }
+ else
+ old_mpol = -1;
+ }
+ }
+
+ /* if we are creating shared segment, we need file descriptor */
+ if (a->flags & CLIB_MEM_VM_F_SHARED)
+ {
+ /* if hugepages are needed we need to create mount point */
+ if (a->flags & CLIB_MEM_VM_F_HUGETLB)
+ {
+ char *mount_dir;
+ char template[] = "/tmp/hugepage_mount.XXXXXX";
+
+ mount_dir = mkdtemp (template);
+ if (mount_dir == 0)
+ return clib_error_return_unix (0, "mkdtemp \'%s\'", template);
+
+ if (mount ("none", (char *) mount_dir, "hugetlbfs", 0, NULL))
+ {
+ err = clib_error_return_unix (0, "mount hugetlb directory '%s'",
+ mount_dir);
+ goto error;
+ }
+
+ filename = format (0, "%s/%s%c", mount_dir, a->name, 0);
+
+ if ((fd = open ((char *) filename, O_CREAT | O_RDWR, 0755)) == -1)
+ {
+ err = clib_error_return_unix (0, "open");
+ goto error;
+ }
+ umount2 ((char *) mount_dir, MNT_DETACH);
+ rmdir ((char *) mount_dir);
+ mmap_flags |= MAP_LOCKED;
+ }
+ else
+ {
+ if ((fd = memfd_create (a->name, MFD_ALLOW_SEALING)) == -1)
+ {
+ err = clib_error_return_unix (0, "memfd_create");
+ goto error;
+ }
+
+ if ((fcntl (fd, F_ADD_SEALS, F_SEAL_SHRINK)) == -1)
+ {
+ err = clib_error_return_unix (0, "fcntl (F_ADD_SEALS)");
+ goto error;
+ }
+ }
+ log2_page_size = clib_mem_vm_get_log2_page_size (fd);
+
+ if (log2_page_size == 0)
+ {
+ err = clib_error_return_unix (0, "cannot determine page size");
+ goto error;
+ }
+ }
+ else /* not CLIB_MEM_VM_F_SHARED */
+ {
+ if (a->flags & CLIB_MEM_VM_F_HUGETLB)
+ {
+ mmap_flags |= MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS;
+ log2_page_size = 21;
+ }
+ else
+ {
+ mmap_flags |= MAP_PRIVATE | MAP_ANONYMOUS;
+ log2_page_size = min_log2 (sysconf (_SC_PAGESIZE));
+ }
+ }
+
+ n_pages = ((a->size - 1) >> log2_page_size) + 1;
+
+
+ if (a->flags & CLIB_MEM_VM_F_HUGETLB_PREALLOC)
+ {
+ err = clib_sysfs_prealloc_hugepages (a->numa_node,
+ 1 << (log2_page_size - 10),
+ n_pages);
+ if (err)
+ goto error;
+
+ }
+
+ if (fd != -1)
+ if ((ftruncate (fd, a->size)) == -1)
+ {
+ err = clib_error_return_unix (0, "ftruncate");
+ goto error;
+ }
+
+ if (old_mpol != -1)
+ {
+ int rv;
+ u64 mask[16] = { 0 };
+ mask[0] = 1 << a->numa_node;
+ rv = set_mempolicy (MPOL_BIND, mask, sizeof (mask) * 8 + 1);
+ if (rv)
+ {
+ err = clib_error_return_unix (0, "set_mempolicy");
+ goto error;
+ }
+ }
+
+ addr = mmap (0, a->size, (PROT_READ | PROT_WRITE), mmap_flags, fd, 0);
+ if (addr == MAP_FAILED)
+ {
+ err = clib_error_return_unix (0, "mmap");
+ goto error;
+ }
+
+ /* re-apply ole numa memory policy */
+ if (old_mpol != -1 &&
+ set_mempolicy (old_mpol, old_mask, sizeof (old_mask) * 8 + 1) == -1)
+ {
+ err = clib_error_return_unix (0, "set_mempolicy");
+ goto error;
+ }
+
+ a->log2_page_size = log2_page_size;
+ a->n_pages = n_pages;
+ a->addr = addr;
+ a->fd = fd;
+ goto done;
+
+error:
+ if (fd != -1)
+ close (fd);
+
+done:
+ vec_free (filename);
+ return err;
+}
+
+u64 *
+clib_mem_vm_get_paddr (void *mem, int log2_page_size, int n_pages)
+{
+ int pagesize = sysconf (_SC_PAGESIZE);
+ int fd;
+ int i;
+ u64 *r = 0;
+
+ if ((fd = open ((char *) "/proc/self/pagemap", O_RDONLY)) == -1)
+ return 0;
+
+ for (i = 0; i < n_pages; i++)
+ {
+ u64 seek, pagemap = 0;
+ uword vaddr = pointer_to_uword (mem) + (((u64) i) << log2_page_size);
+ seek = ((u64) vaddr / pagesize) * sizeof (u64);
+ if (lseek (fd, seek, SEEK_SET) != seek)
+ goto done;
+
+ if (read (fd, &pagemap, sizeof (pagemap)) != (sizeof (pagemap)))
+ goto done;
+
+ if ((pagemap & (1ULL << 63)) == 0)
+ goto done;
+
+ pagemap &= pow2_mask (55);
+ vec_add1 (r, pagemap * pagesize);
+ }
+
+done:
+ close (fd);
+ if (vec_len (r) != n_pages)
+ {
+ vec_free (r);
+ return 0;
+ }
+ return r;
+}
+
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/linux/syscall.h b/src/vppinfra/linux/syscall.h
new file mode 100644
index 00000000..f8ec5919
--- /dev/null
+++ b/src/vppinfra/linux/syscall.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_linux_syscall_h
+#define included_linux_syscall_h
+
+#include <unistd.h>
+#include <sys/syscall.h>
+
+static inline long
+set_mempolicy (int mode, const unsigned long *nodemask, unsigned long maxnode)
+{
+ return syscall (__NR_set_mempolicy, mode, nodemask, maxnode);
+}
+
+static inline int
+get_mempolicy (int *mode, unsigned long *nodemask, unsigned long maxnode,
+ void *addr, unsigned long flags)
+{
+ return syscall (__NR_get_mempolicy, mode, nodemask, maxnode, addr, flags);
+}
+
+static inline long
+move_pages (int pid, unsigned long count, void **pages, const int *nodes,
+ int *status, int flags)
+{
+ return syscall (__NR_move_pages, pid, count, pages, nodes, status, flags);
+}
+
+static inline int
+memfd_create (const char *name, unsigned int flags)
+{
+ return syscall (__NR_memfd_create, name, flags);
+}
+
+#endif /* included_linux_syscall_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/linux/sysfs.c b/src/vppinfra/linux/sysfs.c
new file mode 100644
index 00000000..5f611e6a
--- /dev/null
+++ b/src/vppinfra/linux/sysfs.c
@@ -0,0 +1,250 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vppinfra/clib.h>
+#include <vppinfra/clib_error.h>
+#include <vppinfra/format.h>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <dirent.h>
+
+clib_error_t *
+clib_sysfs_write (char *file_name, char *fmt, ...)
+{
+ u8 *s;
+ int fd;
+ clib_error_t *error = 0;
+
+ fd = open (file_name, O_WRONLY);
+ if (fd < 0)
+ return clib_error_return_unix (0, "open `%s'", file_name);
+
+ va_list va;
+ va_start (va, fmt);
+ s = va_format (0, fmt, &va);
+ va_end (va);
+
+ if (write (fd, s, vec_len (s)) < 0)
+ error = clib_error_return_unix (0, "write `%s'", file_name);
+
+ vec_free (s);
+ close (fd);
+ return error;
+}
+
+clib_error_t *
+clib_sysfs_read (char *file_name, char *fmt, ...)
+{
+ unformat_input_t input;
+ u8 *s = 0;
+ int fd;
+ ssize_t sz;
+ uword result;
+
+ fd = open (file_name, O_RDONLY);
+ if (fd < 0)
+ return clib_error_return_unix (0, "open `%s'", file_name);
+
+ vec_validate (s, 4095);
+
+ sz = read (fd, s, vec_len (s));
+ if (sz < 0)
+ {
+ close (fd);
+ vec_free (s);
+ return clib_error_return_unix (0, "read `%s'", file_name);
+ }
+
+ _vec_len (s) = sz;
+ unformat_init_vector (&input, s);
+
+ va_list va;
+ va_start (va, fmt);
+ result = va_unformat (&input, fmt, &va);
+ va_end (va);
+
+ vec_free (s);
+ close (fd);
+
+ if (result == 0)
+ return clib_error_return (0, "unformat error");
+
+ return 0;
+}
+
+u8 *
+clib_sysfs_link_to_name (char *link)
+{
+ char *p, buffer[64];
+ unformat_input_t in;
+ u8 *s = 0;
+ int r;
+
+ r = readlink (link, buffer, sizeof (buffer) - 1);
+
+ if (r < 0)
+ return 0;
+
+ buffer[r] = 0;
+ p = strrchr (buffer, '/');
+
+ if (!p)
+ return 0;
+
+ unformat_init_string (&in, p + 1, strlen (p + 1));
+ if (unformat (&in, "%s", &s) != 1)
+ clib_unix_warning ("no string?");
+ unformat_free (&in);
+
+ return s;
+}
+
+clib_error_t *
+clib_sysfs_set_nr_hugepages (int numa_node, int page_size, int nr)
+{
+ clib_error_t *error = 0;
+ struct stat sb;
+ u8 *p = 0;
+
+ p = format (p, "/sys/devices/system/node/node%u%c", numa_node, 0);
+
+ if (stat ((char *) p, &sb) == 0)
+ {
+ if (S_ISDIR (sb.st_mode) == 0)
+ {
+ error = clib_error_return (0, "'%s' is not directory", p);
+ goto done;
+ }
+ }
+ else if (numa_node == 0)
+ {
+ vec_reset_length (p);
+ p = format (p, "/sys/kernel/mm%c", 0);
+ if (stat ((char *) p, &sb) < 0 || S_ISDIR (sb.st_mode) == 0)
+ {
+ error = clib_error_return (0, "'%s' does not exist or it is not "
+ "directory", p);
+ goto done;
+ }
+ }
+ else
+ {
+ error = clib_error_return (0, "'%s' does not exist", p);
+ goto done;
+ }
+
+ _vec_len (p) -= 1;
+ p = format (p, "/hugepages/hugepages-%ukB/nr_hugepages%c", page_size, 0);
+ clib_sysfs_write ((char *) p, "%d", nr);
+
+done:
+ vec_free (p);
+ return error;
+}
+
+
+static clib_error_t *
+clib_sysfs_get_xxx_hugepages (char *type, int numa_node,
+ int page_size, int *val)
+{
+ clib_error_t *error = 0;
+ struct stat sb;
+ u8 *p = 0;
+
+ p = format (p, "/sys/devices/system/node/node%u%c", numa_node, 0);
+
+ if (stat ((char *) p, &sb) == 0)
+ {
+ if (S_ISDIR (sb.st_mode) == 0)
+ {
+ error = clib_error_return (0, "'%s' is not directory", p);
+ goto done;
+ }
+ }
+ else if (numa_node == 0)
+ {
+ vec_reset_length (p);
+ p = format (p, "/sys/kernel/mm%c", 0);
+ if (stat ((char *) p, &sb) < 0 || S_ISDIR (sb.st_mode) == 0)
+ {
+ error = clib_error_return (0, "'%s' does not exist or it is not "
+ "directory", p);
+ goto done;
+ }
+ }
+ else
+ {
+ error = clib_error_return (0, "'%s' does not exist", p);
+ goto done;
+ }
+
+ _vec_len (p) -= 1;
+ p = format (p, "/hugepages/hugepages-%ukB/%s_hugepages%c", page_size,
+ type, 0);
+ error = clib_sysfs_read ((char *) p, "%d", val);
+
+done:
+ vec_free (p);
+ return error;
+}
+
+clib_error_t *
+clib_sysfs_get_free_hugepages (int numa_node, int page_size, int *v)
+{
+ return clib_sysfs_get_xxx_hugepages ("free", numa_node, page_size, v);
+}
+
+clib_error_t *
+clib_sysfs_get_nr_hugepages (int numa_node, int page_size, int *v)
+{
+ return clib_sysfs_get_xxx_hugepages ("nr", numa_node, page_size, v);
+}
+
+clib_error_t *
+clib_sysfs_get_surplus_hugepages (int numa_node, int page_size, int *v)
+{
+ return clib_sysfs_get_xxx_hugepages ("surplus", numa_node, page_size, v);
+}
+
+clib_error_t *
+clib_sysfs_prealloc_hugepages (int numa_node, int page_size, int nr)
+{
+ clib_error_t *error = 0;
+ int n, needed;
+ error = clib_sysfs_get_free_hugepages (numa_node, page_size, &n);
+ if (error)
+ return error;
+ needed = nr - n;
+ if (needed <= 0)
+ return 0;
+
+ error = clib_sysfs_get_nr_hugepages (numa_node, page_size, &n);
+ if (error)
+ return error;
+ clib_warning ("pre-allocating %u additional %uK hugepages on numa node %u",
+ needed, page_size, numa_node);
+ return clib_sysfs_set_nr_hugepages (numa_node, page_size, n + needed);
+}
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/linux/sysfs.h b/src/vppinfra/linux/sysfs.h
new file mode 100644
index 00000000..6c80cf95
--- /dev/null
+++ b/src/vppinfra/linux/sysfs.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_linux_sysfs_h
+#define included_linux_sysfs_h
+
+#include <vppinfra/error.h>
+
+clib_error_t *clib_sysfs_write (char *file_name, char *fmt, ...);
+
+clib_error_t *clib_sysfs_read (char *file_name, char *fmt, ...);
+
+u8 *clib_sysfs_link_to_name (char *link);
+
+clib_error_t *clib_sysfs_set_nr_hugepages (int numa_node,
+ int page_size, int nr);
+clib_error_t *clib_sysfs_get_nr_hugepages (int numa_node,
+ int page_size, int *v);
+clib_error_t *clib_sysfs_get_free_hugepages (int numa_node,
+ int page_size, int *v);
+clib_error_t *clib_sysfs_get_surplus_hugepages (int numa_node,
+ int page_size, int *v);
+clib_error_t *clib_sysfs_prealloc_hugepages (int numa_node,
+ int page_size, int nr);
+
+#endif /* included_linux_sysfs_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/lock.h b/src/vppinfra/lock.h
new file mode 100644
index 00000000..7d241675
--- /dev/null
+++ b/src/vppinfra/lock.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_clib_lock_h
+#define included_clib_lock_h
+
+#include <vppinfra/clib.h>
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ u32 lock;
+#if CLIB_DEBUG > 0
+ pid_t pid;
+ uword thread_index;
+ void *frame_address;
+#endif
+} *clib_spinlock_t;
+
+static inline void
+clib_spinlock_init (clib_spinlock_t * p)
+{
+ *p = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
+ memset ((void *) *p, 0, CLIB_CACHE_LINE_BYTES);
+}
+
+static inline void
+clib_spinlock_free (clib_spinlock_t * p)
+{
+ if (*p)
+ {
+ clib_mem_free ((void *) *p);
+ *p = 0;
+ }
+}
+
+static_always_inline void
+clib_spinlock_lock (clib_spinlock_t * p)
+{
+ while (__sync_lock_test_and_set (&(*p)->lock, 1))
+#if __x86_64__
+ __builtin_ia32_pause ()
+#endif
+ ;
+#if CLIB_DEBUG > 0
+ (*p)->frame_address = __builtin_frame_address (0);
+ (*p)->pid = getpid ();
+ (*p)->thread_index = os_get_thread_index ();
+#endif
+}
+
+static_always_inline void
+clib_spinlock_lock_if_init (clib_spinlock_t * p)
+{
+ if (PREDICT_FALSE (*p != 0))
+ clib_spinlock_lock (p);
+}
+
+static_always_inline void
+clib_spinlock_unlock (clib_spinlock_t * p)
+{
+#if CLIB_DEBUG > 0
+ (*p)->frame_address = 0;
+ (*p)->pid = 0;
+ (*p)->thread_index = 0;
+#endif
+ /* Make sure all writes are complete before releasing the lock */
+ CLIB_MEMORY_BARRIER ();
+ (*p)->lock = 0;
+}
+
+static_always_inline void
+clib_spinlock_unlock_if_init (clib_spinlock_t * p)
+{
+ if (PREDICT_FALSE (*p != 0))
+ clib_spinlock_unlock (p);
+}
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/longjmp.S b/src/vppinfra/longjmp.S
new file mode 100644
index 00000000..d4dd4c7d
--- /dev/null
+++ b/src/vppinfra/longjmp.S
@@ -0,0 +1,690 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2005 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#if defined(__x86_64__)
+ .global clib_setjmp
+ .align 4
+ .type clib_setjmp, @function
+clib_setjmp:
+ movq %rbx, 8*0(%rdi)
+ movq %rbp, 8*1(%rdi)
+ movq %r12, 8*2(%rdi)
+ movq %r13, 8*3(%rdi)
+ movq %r14, 8*4(%rdi)
+ movq %r15, 8*5(%rdi)
+
+ /* Save SP after return. */
+ leaq 8(%rsp), %rdx
+ movq %rdx, 8*6(%rdi)
+
+ /* Save PC we are returning to from stack frame. */
+ movq 0(%rsp), %rax
+ movq %rax, 8*7(%rdi)
+
+ /* Give back user's return value. */
+ movq %rsi, %rax
+ ret
+
+ .global clib_longjmp
+ .align 4
+ .type clib_longjmp, @function
+clib_longjmp:
+ /* Restore regs. */
+ movq 8*0(%rdi), %rbx
+ movq 8*1(%rdi), %rbp
+ movq 8*2(%rdi), %r12
+ movq 8*3(%rdi), %r13
+ movq 8*4(%rdi), %r14
+ movq 8*5(%rdi), %r15
+ movq 8*6(%rdi), %rsp
+ movq 8*7(%rdi), %rdx
+
+ /* Give back user's return value. */
+ movq %rsi, %rax
+
+ /* Away we go. */
+ jmpq *%rdx
+
+ .global clib_calljmp
+ .align 4
+ .type clib_calljmp, @function
+clib_calljmp:
+ /* Make sure stack is 16-byte aligned. */
+ movq %rdx, %rax
+ andq $0xf, %rax
+ subq %rax, %rdx
+
+ /* Get return address. */
+ pop %rax
+
+ /* Switch to new stack. */
+ xchgq %rsp, %rdx
+
+ /* Save return address on new stack. */
+ push %rax
+
+ /* Save old stack pointer on new stack. */
+ push %rdx
+
+ /* Get function. */
+ movq %rdi, %rdx
+
+ /* Move argument into place. */
+ movq %rsi, %rdi
+
+ /* Away we go. */
+ callq *%rdx
+
+ /* Switch back to old stack. */
+ movq 8(%rsp), %rdx
+ movq 0(%rsp), %rcx
+ xchgq %rcx, %rsp
+
+ /* Return to caller. */
+ jmpq *%rdx
+
+#elif defined(i386)
+ .global clib_setjmp
+ .align 4
+ .type clib_setjmp, @function
+clib_setjmp:
+ movl 4(%esp), %ecx
+
+ movl %ebp, 4*0(%ecx)
+ movl %ebx, 4*1(%ecx)
+ movl %edi, 4*2(%ecx)
+ movl %esi, 4*3(%ecx)
+
+ /* Save SP after return. */
+ leal 4(%esp), %edx
+ movl %edx, 4*4(%ecx)
+
+ /* Save PC we are returning to from stack frame. */
+ movl 0(%esp), %eax
+ movl %eax, 4*5(%ecx)
+
+ /* Give back user's return value. */
+ movl 8(%esp), %eax
+ ret
+
+ .global clib_longjmp
+ .align 4
+ .type clib_longjmp, @function
+clib_longjmp:
+ movl 4(%esp), %ecx
+
+ /* Give back user's return value. */
+ movl 8(%esp), %eax
+
+ /* Restore regs. */
+ movl 4*0(%ecx), %ebp
+ movl 4*1(%ecx), %ebx
+ movl 4*2(%ecx), %edi
+ movl 4*3(%ecx), %esi
+ movl 4*4(%ecx), %esp
+ movl 4*5(%ecx), %edx
+
+ /* Away we go. */
+ jmp *%edx
+
+ .global clib_calljmp
+ .align 4
+ .type clib_calljmp, @function
+clib_calljmp:
+ /* Get new stack pointer. */
+ movl 12(%esp), %edx
+
+ /* Switch stacks. */
+ xchgl %esp, %edx
+
+ /* Save old stack pointer on new stack. */
+ sub $8, %esp
+ movl %edx, 4(%esp)
+
+ /* Put function argument in stack frame. */
+ movl 8(%edx), %eax
+ movl %eax, 0(%esp)
+
+ /* Get function. */
+ movl 4(%edx), %eax
+
+ /* Away we go. */
+ call *%eax
+
+ /* Switch back to old stack. */
+ movl 4(%esp), %edx
+ xchgl %edx, %esp
+
+ /* Return to caller. */
+ ret
+
+#elif defined(__SPU__)
+
+#elif defined(__powerpc64__)
+
+ .text
+
+#define _prologue(n) \
+ .align 2 ; \
+ .globl n, .##n ; \
+ .section ".opd", "aw" ; \
+ .align 3 ; \
+n: .quad .##n, .TOC.@tocbase, 0 ; \
+ .previous ; \
+ .size n, 24 ; \
+ .type .##n, @function ; \
+.##n:
+
+#define _foreach_14_31 \
+_ (14, 0) _ (15, 1) _ (16, 2) _ (17, 3) _ (18, 4) _ (19, 5) \
+_ (20, 6) _ (21, 7) _ (22, 8) _ (23, 9) _ (24, 10) _ (25, 11) \
+_ (26, 12) _ (27, 13) _ (28, 14) _ (29, 15) _ (30, 16) _ (31, 17)
+
+#define _foreach_20_31 \
+_ (20, 0) _ (21, 1) _ (22, 2) _ (23, 3) _ (24, 4) _ (25, 5) \
+_ (26, 6) _ (27, 7) _ (28, 8) _ (29, 9) _ (30, 10) _ (31, 11)
+
+#ifdef __ALTIVEC__
+#define CLIB_POWERPC_ALTIVEC_N_REGS 12
+#else
+#define CLIB_POWERPC_ALTIVEC_N_REGS 0
+#endif
+
+_prologue (clib_setjmp)
+ mflr 0
+ std 0, 8*0(3)
+ std 1, 8*1(3)
+ std 2, 8*2(3)
+ mfcr 0
+ std 0, 8*3(3)
+ mfspr 0, 256
+ stw 0, 8*4(3)
+
+ /* gprs 14 - 31 */
+#define _(a,b) std a, 8*((b) + 4 + 18*0)(3) ;
+ _foreach_14_31
+#undef _
+
+ /* fprs 14 - 31 */
+#define _(a,b) stfd a, 8*((b) + 4 + 18*1)(3) ;
+ _foreach_14_31
+#undef _
+
+#if CLIB_POWERPC_ALTIVEC_N_REGS > 0
+ /* vrs 20 - 31 */
+ li 5, 8*(4 + 18*2)
+#define _(a,b) stvx a, 5, 3 ; addi 5, 5, 16 ;
+ _foreach_20_31
+#undef _
+#endif /* CLIB_POWERPC_ALTIVEC_N_REGS > 0 */
+
+ /* Return value. */
+ mr 3, 4
+
+ blr
+
+_prologue (clib_longjmp)
+ ld 0, 8*0(3)
+ mtlr 0
+ ld 1, 8*1(3)
+ ld 2, 8*2(3)
+ ld 0, 8*3(3)
+ mtcrf 0xff, 0
+ lwz 0, 8*3(3)
+ mtspr 256, 0
+
+ /* gprs 14 - 31 */
+#define _(a,b) ld a, 8*((b) + 4 + 18*0)(3) ;
+ _foreach_14_31
+#undef _
+
+ /* fprs 14 - 31 */
+#define _(a,b) lfd a, 8*((b) + 4 + 18*1)(3) ;
+ _foreach_14_31
+#undef _
+
+#if CLIB_POWERPC_ALTIVEC_N_REGS > 0
+ /* vrs 20 - 31 */
+ li 5, 8*(4 + 18*2)
+#define _(a,b) lvx a, 5, 3 ; addi 5, 5, 16 ;
+ _foreach_20_31
+#undef _
+#endif /* CLIB_POWERPC_ALTIVEC_N_REGS > 0 */
+
+ /* Return value. */
+ mr 3, 4
+
+ blr
+
+ .globl clib_calljmp
+ .section ".opd","aw"
+ .align 3
+clib_calljmp:
+ .quad .L.clib_calljmp,.TOC.@tocbase,0
+ .previous
+ .type clib_calljmp, @function
+.L.clib_calljmp:
+ mflr 0
+ mr 9,3
+ std 0,16(1)
+ stdu 1,-112(1)
+#APP
+ std 1,-8(5)
+ addi 5,5,-256
+ mr 1,5
+#NO_APP
+ ld 10,0(9)
+ std 2,40(1)
+ mr 3,4
+ mtctr 10
+ ld 11,16(9)
+ ld 2,8(9)
+ bctrl
+ ld 2,40(1)
+#APP
+ addi 1,1,256
+ ld 1,-8(1)
+#NO_APP
+ addi 1,1,112
+ ld 0,16(1)
+ mtlr 0
+ blr
+ .long 0
+ .byte 0,0,0,1,128,0,0,0
+ .size clib_calljmp,.-.L.clib_calljmp
+
+#elif defined(__powerpc__)
+
+#define _foreach_14_31 \
+_ (14, 0) _ (15, 1) _ (16, 2) _ (17, 3) _ (18, 4) _ (19, 5) \
+_ (20, 6) _ (21, 7) _ (22, 8) _ (23, 9) _ (24, 10) _ (25, 11) \
+_ (26, 12) _ (27, 13) _ (28, 14) _ (29, 15) _ (30, 16) _ (31, 17)
+
+#define _foreach_20_31 \
+_ (20, 0) _ (21, 1) _ (22, 2) _ (23, 3) _ (24, 4) _ (25, 5) \
+_ (26, 6) _ (27, 7) _ (28, 8) _ (29, 9) _ (30, 10) _ (31, 11)
+
+#ifdef __ALTIVEC__
+#define CLIB_POWERPC_ALTIVEC_N_REGS 12
+#else
+#define CLIB_POWERPC_ALTIVEC_N_REGS 0
+#endif
+
+ .global clib_setjmp
+ .align 4
+ .type clib_setjmp, @function
+clib_setjmp:
+ mflr 0
+ stw 0, 4*0(3)
+ stw 1, 4*1(3)
+ mfcr 0
+ stw 0, 4*2(3)
+#if CLIB_POWERPC_ALTIVEC_N_REGS > 0
+ mfspr 0, 256
+#endif
+ stw 0, 4*3(3)
+
+#if CLIB_POWERPC_ALTIVEC_N_REGS > 0
+ li 5, 4*4
+#define _(a,b) stvx a, 3, 5 ; addi 5, 5, 16 ;
+ _foreach_20_31
+#undef _
+#endif /* CLIB_POWERPC_ALTIVEC_N_REGS > 0 */
+
+ /* gp 14 - 31 */
+#define _(a,b) stw a, 4*(1*(b) + 4 + 4*CLIB_POWERPC_ALTIVEC_N_REGS + 0*18)(3) ;
+ _foreach_14_31
+#undef _
+
+ /* fp 14 - 31 */
+#define _(a,b) stfd a, 4*(2*(b) + 4 + 4*CLIB_POWERPC_ALTIVEC_N_REGS + 1*18)(3) ;
+ _foreach_14_31
+#undef _
+
+ /* Return value. */
+ mr 3, 4
+
+ blr
+
+ .global clib_longjmp
+ .align 4
+ .type clib_longjmp, @function
+clib_longjmp:
+
+ lwz 0, 4*0(3)
+ mtlr 0
+ lwz 1, 4*1(3)
+ lwz 0, 4*2(3)
+ mtcr 0
+ lwz 0, 4*3(3)
+#if CLIB_POWERPC_ALTIVEC_N_REGS > 0
+ mtspr 256, 0
+#endif
+
+#if CLIB_POWERPC_ALTIVEC_N_REGS > 0
+ li 5, 4*4
+#define _(a,b) lvx a, 3, 5 ; addi 5, 5, 16 ;
+ _foreach_20_31
+#undef _
+#endif /* CLIB_POWERPC_ALTIVEC_N_REGS > 0 */
+
+ /* gp 14 - 31 */
+#define _(a,b) lwz a, 4*(1*(b) + 4 + 4*CLIB_POWERPC_ALTIVEC_N_REGS + 0*18)(3) ;
+ _foreach_14_31
+#undef _
+
+ /* fp 14 - 31 */
+#define _(a,b) lfd a, 4*(2*(b) + 4 + 4*CLIB_POWERPC_ALTIVEC_N_REGS + 1*18)(3) ;
+ _foreach_14_31
+#undef _
+
+ /* Return value. */
+ mr 3, 4
+
+ blr
+
+ .global clib_calljmp
+ .align 4
+ .type clib_calljmp, @function
+clib_calljmp:
+ /* Make sure stack is 16 byte aligned. */
+ andi. 0, 5, 0xf
+ sub 5, 5, 0
+ addi 5, 5, -16
+
+ /* Save old stack/link pointer on new stack. */
+ stw 1, 0(5)
+ mflr 0
+ stw 0, 4(5)
+
+ /* account for (sp, lr) tuple, and keep aligned */
+ addi 5, 5, -16
+
+ /* Switch stacks. */
+ mr 1, 5
+
+ /* Move argument into place. */
+ mtctr 3
+ mr 3, 4
+
+ /* Away we go. */
+ bctrl
+
+ /* back to our synthetic frame */
+ addi 1,1,16
+
+ /* Switch back to old stack. */
+ lwz 0, 4(1)
+ mtlr 0
+ lwz 0, 0(1)
+ mr 1, 0
+
+ /* Return to caller. */
+ blr
+
+#elif defined(__arm__)
+
+ .global clib_setjmp
+ .align 4
+ .type clib_setjmp, %function
+clib_setjmp:
+ mov ip, r0 /* jmp buffer */
+
+ /* Save integer registers */
+ stmia ip!, {v1-v6, sl, fp, sp, lr}
+
+#ifdef __IWMMXT__
+ /* Save the call-preserved iWMMXt registers. */
+ wstrd wr10, [ip], #8
+ wstrd wr11, [ip], #8
+ wstrd wr12, [ip], #8
+ wstrd wr13, [ip], #8
+ wstrd wr14, [ip], #8
+ wstrd wr15, [ip], #8
+#endif
+
+ /* Give back user's return value. */
+ mov r0, r1
+ bx lr
+
+ .global clib_longjmp
+ .align 4
+ .type clib_longjmp, %function
+clib_longjmp:
+ mov ip, r0 /* jmp buffer */
+
+ /* Restore integer registers. */
+ ldmia ip!, {v1-v6, sl, fp, sp, lr}
+
+#ifdef __IWMMXT__
+ /* Save the call-preserved iWMMXt registers. */
+ wldrd wr10, [ip], #8
+ wldrd wr11, [ip], #8
+ wldrd wr12, [ip], #8
+ wldrd wr13, [ip], #8
+ wldrd wr14, [ip], #8
+ wldrd wr15, [ip], #8
+#endif
+
+ /* Give back user's return value. */
+ mov r0, r1
+ bx lr
+
+ .global clib_calljmp
+ .align 4
+ .type clib_calljmp, %function
+clib_calljmp:
+ /* Make sure stack is 8 byte aligned. */
+ bic r2, r2, #7
+
+ /* Allocate space for stack/link pointer on new stack. */
+ sub r2, r2, #8
+
+ /* Save old stack/link pointer on new stack. */
+ str sp, [r2, #0]
+ str lr, [r2, #4]
+
+ /* Switch stacks. */
+ mov sp, r2
+
+ /* Save function to call. */
+ mov ip, r0
+
+ /* Move argument into place. */
+ mov r0, r1
+
+ /* Away we go. */
+ bx ip
+
+ /* Switch back to old stack. */
+ ldr lr, [sp, #4]
+ ldr ip, [sp, #0]
+ mov sp, ip
+
+ /* Return to caller. */
+ bx lr
+
+#elif defined(__xtensa__)
+
+ /* FIXME implement if needed. */
+ .global clib_setjmp
+ .align 4
+ .type clib_setjmp, %function
+clib_setjmp:
+1: j 1b
+
+ .global clib_longjmp
+ .align 4
+ .type clib_longjmp, @function
+clib_longjmp:
+1: j 1b
+
+ .global clib_calljmp
+ .align 4
+ .type clib_calljmp, %function
+clib_calljmp:
+1: j 1b
+
+#elif defined(__TMS320C6X__)
+
+ /* FIXME implement if needed. */
+ .global clib_setjmp
+ .align 4
+ .type clib_setjmp, %function
+clib_setjmp:
+1: B .S1 1b
+
+ .global clib_longjmp
+ .align 4
+ .type clib_longjmp, @function
+clib_longjmp:
+1: B .S1 1b
+
+ .global clib_calljmp
+ .align 4
+ .type clib_calljmp, %function
+clib_calljmp:
+1: B .S1 1b
+
+#elif defined (__aarch64__)
+/*
+ Copyright (c) 2011, 2012 ARM Ltd
+ All rights reserved.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ 3. The name of the company may not be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+ THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#define GPR_LAYOUT \
+ REG_PAIR (x19, x20, 0); \
+ REG_PAIR (x21, x22, 16); \
+ REG_PAIR (x23, x24, 32); \
+ REG_PAIR (x25, x26, 48); \
+ REG_PAIR (x27, x28, 64); \
+ REG_PAIR (x29, x30, 80); \
+ REG_ONE (x16, 96)
+#define FPR_LAYOUT \
+ REG_PAIR ( d8, d9, 112); \
+ REG_PAIR (d10, d11, 128); \
+ REG_PAIR (d12, d13, 144); \
+ REG_PAIR (d14, d15, 160);
+// int clib_setjmp (jmp_buf)
+ .global clib_setjmp
+ .type clib_setjmp, %function
+clib_setjmp:
+ mov x16, sp
+#define REG_PAIR(REG1, REG2, OFFS) stp REG1, REG2, [x0, OFFS]
+#define REG_ONE(REG1, OFFS) str REG1, [x0, OFFS]
+ GPR_LAYOUT
+ FPR_LAYOUT
+#undef REG_PAIR
+#undef REG_ONE
+ mov x0, x1
+ ret
+ .size clib_setjmp, .-clib_setjmp
+// void clib_longjmp (jmp_buf, int) __attribute__ ((noreturn))
+ .global clib_longjmp
+ .type clib_longjmp, %function
+clib_longjmp:
+#define REG_PAIR(REG1, REG2, OFFS) ldp REG1, REG2, [x0, OFFS]
+#define REG_ONE(REG1, OFFS) ldr REG1, [x0, OFFS]
+ GPR_LAYOUT
+ FPR_LAYOUT
+#undef REG_PAIR
+#undef REG_ONE
+ mov sp, x16
+ mov x0, x1
+ // cmp w1, #0
+ // cinc w0, w1, eq
+ // use br not ret, as ret is guaranteed to mispredict
+ br x30
+ .size clib_longjmp, .-clib_longjmp
+
+
+// void clib_calljmp (x0=function, x1=arg, x2=new_stack)
+ .global clib_calljmp
+ .type clib_calljmp, %function
+clib_calljmp:
+ // save fn ptr
+ mov x3, x0
+ // set up fn arg
+ mov x0, x1
+ // switch stacks
+ mov x4, sp
+
+ // space for saved sp, lr on new stack
+ sub x2, x2, #16
+ mov sp, x2
+
+ // save old sp and link register on new stack
+ str x4, [sp]
+ str x30,[sp,#8]
+ mov x4, sp
+
+ // go there
+ blr x3
+
+ // restore old sp and link register
+ mov x4, sp
+
+ ldr x3, [x4]
+ ldr x30,[x4, #8]
+ mov sp, x3
+ ret
+ .size clib_calljmp, .-clib_calljmp
+#else
+#error "unknown machine"
+#endif
+
+.section .note.GNU-stack,"",%progbits
diff --git a/src/vppinfra/longjmp.h b/src/vppinfra/longjmp.h
new file mode 100644
index 00000000..8d83203e
--- /dev/null
+++ b/src/vppinfra/longjmp.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2005 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_clib_longjmp_h
+#define included_clib_longjmp_h
+
+#include <vppinfra/types.h>
+
+#if defined(__x86_64__)
+/* rbx, rbp, r12, r13, r14, r15, eip, rsp */
+#define CLIB_ARCH_LONGJMP_REGS 8
+
+#elif defined(i386)
+/* ebx, ebp, esi, edi, eip, rsp */
+#define CLIB_ARCH_LONGJMP_REGS 6
+
+#elif (defined(__powerpc64__) || defined(__powerpc__))
+
+#ifdef __ALTIVEC__
+#define CLIB_POWERPC_ALTIVEC_N_REGS 12
+#else
+#define CLIB_POWERPC_ALTIVEC_N_REGS 0
+#endif
+
+/* r1 r2 link condition+vsave regs 14-31 fp regs 14-31 vector regs 20-31 */
+#define CLIB_ARCH_LONGJMP_REGS \
+ (/* r1 lr cr vrsave */ \
+ 4 \
+ /* gp */ \
+ + (31 - 14 + 1) \
+ /* fp */ \
+ + (sizeof (f64) / sizeof (uword)) * (31 - 14 + 1) \
+ /* vector regs */ \
+ + (16 / sizeof (uword)) * CLIB_POWERPC_ALTIVEC_N_REGS)
+
+#elif defined(__SPU__)
+/* FIXME */
+#define CLIB_ARCH_LONGJMP_REGS (10)
+
+#elif defined(__arm__)
+
+#ifndef __IWMMXT__
+/* v1-v6 sl fp sp lr */
+#define CLIB_ARCH_LONGJMP_REGS (10)
+#else
+/* For iwmmxt we save 6 extra 8 byte registers. */
+#define CLIB_ARCH_LONGJMP_REGS (10 + (6*2))
+#endif
+
+#elif defined(__xtensa__)
+
+/* setjmp/longjmp not supported for the moment. */
+#define CLIB_ARCH_LONGJMP_REGS 0
+
+#elif defined(__TMS320C6X__)
+
+/* setjmp/longjmp not supported for the moment. */
+#define CLIB_ARCH_LONGJMP_REGS 0
+
+#elif defined(__aarch64__)
+#define CLIB_ARCH_LONGJMP_REGS (22)
+#else
+#error "unknown machine"
+#endif
+
+typedef struct
+{
+ uword regs[CLIB_ARCH_LONGJMP_REGS];
+} clib_longjmp_t __attribute__ ((aligned (16)));
+
+/* Return given value to saved context. */
+void clib_longjmp (clib_longjmp_t * save, uword return_value);
+
+/* Save context. Returns given value if jump is not taken;
+ otherwise returns value from clib_longjmp if long jump is taken. */
+uword clib_setjmp (clib_longjmp_t * save, uword return_value_not_taken);
+
+/* Call function on given stack. */
+uword clib_calljmp (uword (*func) (uword func_arg),
+ uword func_arg, void *stack);
+
+#endif /* included_clib_longjmp_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/macros.c b/src/vppinfra/macros.c
new file mode 100644
index 00000000..ce4cc9bc
--- /dev/null
+++ b/src/vppinfra/macros.c
@@ -0,0 +1,266 @@
+/*
+ macros.c - a simple macro expander
+
+ Copyright (c) 2010, 2014 Cisco and/or its affiliates.
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+#include <vppinfra/macros.h>
+
+static inline int
+macro_isalnum (i8 c)
+{
+ if ((c >= 'A' && c <= 'Z')
+ || (c >= 'a' && c <= 'z') || (c >= '0' && c <= '9') || (c == '_'))
+ return 1;
+ return 0;
+}
+
+static i8 *
+builtin_eval (macro_main_t * mm, i8 * varname, i32 complain)
+{
+ uword *p;
+ i8 *(*fp) (macro_main_t *, i32);
+
+ p = hash_get_mem (mm->the_builtin_eval_hash, varname);
+ if (p == 0)
+ return 0;
+ fp = (void *) (p[0]);
+ return (*fp) (mm, complain);
+}
+
+int
+clib_macro_unset (macro_main_t * mm, char *name)
+{
+ hash_pair_t *p;
+ u8 *key, *value;
+
+ p = hash_get_pair (mm->the_value_table_hash, name);
+
+ if (p == 0)
+ return 1;
+
+ key = (u8 *) (p->key);
+ value = (u8 *) (p->value[0]);
+ hash_unset_mem (mm->the_value_table_hash, name);
+
+ vec_free (value);
+ vec_free (key);
+ return 0;
+}
+
+int
+clib_macro_set_value (macro_main_t * mm, char *name, char *value)
+{
+ u8 *key_copy, *value_copy;
+ int rv;
+
+ rv = clib_macro_unset (mm, name);
+
+ key_copy = format (0, "%s%c", name, 0);
+ value_copy = format (0, "%s%c", value, 0);
+
+ hash_set_mem (mm->the_value_table_hash, key_copy, value_copy);
+ return rv;
+}
+
+i8 *
+clib_macro_get_value (macro_main_t * mm, char *name)
+{
+ uword *p;
+
+ p = hash_get_mem (mm->the_value_table_hash, name);
+ if (p)
+ return (i8 *) (p[0]);
+ else
+ return 0;
+}
+
+/*
+ * eval: takes a string, returns a vector.
+ * looks up $foobar in the variable table.
+ */
+i8 *
+clib_macro_eval (macro_main_t * mm, i8 * s, i32 complain)
+{
+ i8 *rv = 0;
+ i8 *varname, *varvalue;
+ i8 *ts;
+
+ while (*s)
+ {
+ switch (*s)
+ {
+ case '\\':
+ s++;
+ /* fallthrough */
+
+ default:
+ vec_add1 (rv, *s);
+ s++;
+ break;
+
+ case '$':
+ s++;
+ varname = 0;
+ /*
+ * Make vector with variable name in it.
+ */
+ while (*s && (macro_isalnum (*s) || (*s == '_') || (*s == '(')))
+ {
+
+ /* handle $(foo) */
+ if (*s == '(')
+ {
+ s++; /* skip '(' */
+ while (*s && *s != ')')
+ {
+ vec_add1 (varname, *s);
+ s++;
+ }
+ if (*s)
+ s++; /* skip ')' */
+ break;
+ }
+ vec_add1 (varname, *s);
+ s++;
+ }
+ /* null terminate */
+ vec_add1 (varname, 0);
+ /* Look for a builtin, e.g. $my_hostname */
+ if (!(varvalue = builtin_eval (mm, varname, complain)))
+ {
+ /* Look in value table */
+ if (!varvalue)
+ {
+ char *tmp = clib_macro_get_value (mm, varname);
+ if (tmp)
+ varvalue = (i8 *) format (0, "%s%c", tmp, 0);
+ }
+#ifdef CLIB_UNIX
+ /* Look in environment. */
+ if (!varvalue)
+ {
+ char *tmp = getenv (varname);
+ if (tmp)
+ varvalue = (i8 *) format (0, "%s%c", tmp, 0);
+ }
+#endif /* CLIB_UNIX */
+ }
+ if (varvalue)
+ {
+ /* recursively evaluate */
+ ts = clib_macro_eval (mm, varvalue, complain);
+ vec_free (varvalue);
+ /* add results to answer */
+ vec_append (rv, ts);
+ /* Remove NULL termination or the results are sad */
+ _vec_len (rv) = vec_len (rv) - 1;
+ vec_free (ts);
+ }
+ else
+ {
+ if (complain)
+ clib_warning ("Undefined Variable Reference: %s\n", varname);
+ vec_append (rv, format (0, "UNSET "));
+ _vec_len (rv) = vec_len (rv) - 1;
+
+ }
+ vec_free (varname);
+ }
+ }
+ vec_add1 (rv, 0);
+ return (rv);
+}
+
+/*
+ * eval: takes a string, returns a vector.
+ * looks up $foobar in the variable table.
+ */
+i8 *
+clib_macro_eval_dollar (macro_main_t * mm, i8 * s, i32 complain)
+{
+ i8 *s2;
+ i8 *rv;
+
+ s2 = (i8 *) format (0, "$(%s)%c", s, 0);
+ rv = clib_macro_eval (mm, s2, complain);
+ vec_free (s2);
+ return (rv);
+}
+
+void
+clib_macro_add_builtin (macro_main_t * mm, char *name, void *eval_fn)
+{
+ hash_set_mem (mm->the_builtin_eval_hash, name, (uword) eval_fn);
+}
+
+#ifdef CLIB_UNIX
+static i8 *
+eval_hostname (macro_main_t * mm, i32 complain)
+{
+ char tmp[128];
+ if (gethostname (tmp, sizeof (tmp)))
+ return ((i8 *) format (0, "gethostname-error%c", 0));
+ return ((i8 *) format (0, "%s%c", tmp, 0));
+}
+#endif
+
+void
+clib_macro_init (macro_main_t * mm)
+{
+ if (mm->the_builtin_eval_hash != 0)
+ {
+ clib_warning ("mm %p already initialized", mm);
+ return;
+ }
+
+ mm->the_builtin_eval_hash = hash_create_string (0, sizeof (uword));
+ mm->the_value_table_hash = hash_create_string (0, sizeof (uword));
+
+#ifdef CLIB_UNIX
+ hash_set_mem (mm->the_builtin_eval_hash, "hostname", (uword) eval_hostname);
+#endif
+}
+
+void
+clib_macro_free (macro_main_t * mm)
+{
+ hash_pair_t *p;
+ u8 **strings_to_free = 0;
+ int i;
+
+ hash_free (mm->the_builtin_eval_hash);
+
+ /* *INDENT-OFF* */
+ hash_foreach_pair (p, mm->the_value_table_hash,
+ ({
+ vec_add1 (strings_to_free, (u8 *) (p->key));
+ vec_add1 (strings_to_free, (u8 *) (p->value[0]));
+ }));
+ /* *INDENT-ON* */
+
+ for (i = 0; i < vec_len (strings_to_free); i++)
+ vec_free (strings_to_free[i]);
+ vec_free (strings_to_free);
+ hash_free (mm->the_value_table_hash);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/macros.h b/src/vppinfra/macros.h
new file mode 100644
index 00000000..5c2e7033
--- /dev/null
+++ b/src/vppinfra/macros.h
@@ -0,0 +1,54 @@
+/*
+ macros.h - definitions for a simple macro expander
+
+ Copyright (c) 2010, 2014 Cisco and/or its affiliates.
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+#ifndef included_macros_h
+#define included_macros_h
+
+#include <vppinfra/vec.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/format.h>
+
+#ifdef CLIB_UNIX
+#include <stdlib.h>
+#include <unistd.h>
+#endif
+
+typedef struct
+{
+ uword *the_builtin_eval_hash;
+ uword *the_value_table_hash;
+} macro_main_t;
+
+int clib_macro_unset (macro_main_t * mm, char *name);
+int clib_macro_set_value (macro_main_t * mm, char *name, char *value);
+void clib_macro_add_builtin (macro_main_t * mm, char *name, void *eval_fn);
+i8 *clib_macro_get_value (macro_main_t * mm, char *name);
+i8 *clib_macro_eval (macro_main_t * mm, i8 * s, i32 complain);
+i8 *clib_macro_eval_dollar (macro_main_t * mm, i8 * s, i32 complain);
+void clib_macro_init (macro_main_t * mm);
+void clib_macro_free (macro_main_t * mm);
+
+#endif /* included_macros_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/math.h b/src/vppinfra/math.h
new file mode 100644
index 00000000..cafa1cb3
--- /dev/null
+++ b/src/vppinfra/math.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_math_h
+#define included_math_h
+
+#include <vppinfra/clib.h>
+
+always_inline f64
+sqrt (f64 x)
+{
+ return __builtin_sqrt (x);
+}
+
+always_inline f64
+fabs (f64 x)
+{
+ return __builtin_fabs (x);
+}
+
+#ifndef isnan
+#define isnan(x) __builtin_isnan(x)
+#endif
+
+#ifndef isinf
+#define isinf(x) __builtin_isinf(x)
+#endif
+
+#endif /* included_math_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/md5.c b/src/vppinfra/md5.c
new file mode 100644
index 00000000..9ac1efc7
--- /dev/null
+++ b/src/vppinfra/md5.c
@@ -0,0 +1,317 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/* MD5C.C - RSA Data Security, Inc., MD5 message-digest algorithm */
+
+/* Copyright (C) 1991-2, RSA Data Security, Inc. Created 1991. All
+rights reserved.
+
+License to copy and use this software is granted provided that it
+is identified as the "RSA Data Security, Inc. MD5 Message-Digest
+Algorithm" in all material mentioning or referencing this software
+or this function.
+
+License is also granted to make and use derivative works provided
+that such works are identified as "derived from the RSA Data
+Security, Inc. MD5 Message-Digest Algorithm" in all material
+mentioning or referencing the derived work.
+
+RSA Data Security, Inc. makes no representations concerning either
+the merchantability of this software or the suitability of this
+software for any particular purpose. It is provided "as is"
+without express or implied warranty of any kind.
+
+These notices must be retained in any copies of any part of this
+documentation and/or software.
+ */
+
+#include <vppinfra/string.h> /* for memset */
+#include <vppinfra/byte_order.h>
+#include <vppinfra/md5.h>
+
+/* F, G, H and I are basic MD5 functions. */
+#define F(b, c, d) (d ^ (b & (c ^ d)))
+#define G(b, c, d) F (d, b, c)
+#define H(b, c, d) (b ^ c ^ d)
+#define I(b, c, d) (c ^ (b | ~d))
+
+/* ROTATE_LEFT rotates x left n bits. */
+#define ROTATE_LEFT(x,n) \
+ (((x) << (n)) | ((x) >> (32 - (n))))
+
+/* FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4.
+ Rotation is separate from addition to prevent recomputation. */
+#define FF(a,b,c,d,x,s,ac) \
+do { \
+ a += F (b, c, d) + x + ac; \
+ a = ROTATE_LEFT (a, s); \
+ a += b; \
+} while (0)
+
+#define GG(a,b,c,d,x,s,ac) \
+do { \
+ a += G (b, c, d) + x + ac; \
+ a = ROTATE_LEFT (a, s); \
+ a += b; \
+} while (0)
+
+#define HH(a,b,c,d,x,s,ac) \
+do { \
+ a += H (b, c, d) + x + ac; \
+ a = ROTATE_LEFT (a, s); \
+ a += b; \
+} while (0)
+
+#define II(a,b,c,d,x,s,ac) \
+do { \
+ a += I (b, c, d) + x + ac; \
+ a = ROTATE_LEFT (a, s); \
+ a += b; \
+} while (0)
+
+#undef _
+
+/* MD5 basic transformation. Transforms state based on block. */
+static void
+md5_transform (md5_context_t * m, u32 * data, u32 * result, int zero_buffer)
+{
+ u32 a = m->state[0], b = m->state[1], c = m->state[2], d = m->state[3];
+ u32 *x = data;
+
+/* Constants for MD5Transform routine. */
+#define S11 7
+#define S12 12
+#define S13 17
+#define S14 22
+#define S21 5
+#define S22 9
+#define S23 14
+#define S24 20
+#define S31 4
+#define S32 11
+#define S33 16
+#define S34 23
+#define S41 6
+#define S42 10
+#define S43 15
+#define S44 21
+
+ /* Round 1 */
+ FF (a, b, c, d, clib_host_to_little_u32 (x[0]), S11, 0xd76aa478); /* 1 */
+ FF (d, a, b, c, clib_host_to_little_u32 (x[1]), S12, 0xe8c7b756); /* 2 */
+ FF (c, d, a, b, clib_host_to_little_u32 (x[2]), S13, 0x242070db); /* 3 */
+ FF (b, c, d, a, clib_host_to_little_u32 (x[3]), S14, 0xc1bdceee); /* 4 */
+ FF (a, b, c, d, clib_host_to_little_u32 (x[4]), S11, 0xf57c0faf); /* 5 */
+ FF (d, a, b, c, clib_host_to_little_u32 (x[5]), S12, 0x4787c62a); /* 6 */
+ FF (c, d, a, b, clib_host_to_little_u32 (x[6]), S13, 0xa8304613); /* 7 */
+ FF (b, c, d, a, clib_host_to_little_u32 (x[7]), S14, 0xfd469501); /* 8 */
+ FF (a, b, c, d, clib_host_to_little_u32 (x[8]), S11, 0x698098d8); /* 9 */
+ FF (d, a, b, c, clib_host_to_little_u32 (x[9]), S12, 0x8b44f7af); /* 10 */
+ FF (c, d, a, b, clib_host_to_little_u32 (x[10]), S13, 0xffff5bb1); /* 11 */
+ FF (b, c, d, a, clib_host_to_little_u32 (x[11]), S14, 0x895cd7be); /* 12 */
+ FF (a, b, c, d, clib_host_to_little_u32 (x[12]), S11, 0x6b901122); /* 13 */
+ FF (d, a, b, c, clib_host_to_little_u32 (x[13]), S12, 0xfd987193); /* 14 */
+ FF (c, d, a, b, clib_host_to_little_u32 (x[14]), S13, 0xa679438e); /* 15 */
+ FF (b, c, d, a, clib_host_to_little_u32 (x[15]), S14, 0x49b40821); /* 16 */
+
+ /* Round 2 */
+ GG (a, b, c, d, x[1], S21, 0xf61e2562); /* 17 */
+ GG (d, a, b, c, x[6], S22, 0xc040b340); /* 18 */
+ GG (c, d, a, b, x[11], S23, 0x265e5a51); /* 19 */
+ GG (b, c, d, a, x[0], S24, 0xe9b6c7aa); /* 20 */
+ GG (a, b, c, d, x[5], S21, 0xd62f105d); /* 21 */
+ GG (d, a, b, c, x[10], S22, 0x02441453); /* 22 */
+ GG (c, d, a, b, x[15], S23, 0xd8a1e681); /* 23 */
+ GG (b, c, d, a, x[4], S24, 0xe7d3fbc8); /* 24 */
+ GG (a, b, c, d, x[9], S21, 0x21e1cde6); /* 25 */
+ GG (d, a, b, c, x[14], S22, 0xc33707d6); /* 26 */
+ GG (c, d, a, b, x[3], S23, 0xf4d50d87); /* 27 */
+ GG (b, c, d, a, x[8], S24, 0x455a14ed); /* 28 */
+ GG (a, b, c, d, x[13], S21, 0xa9e3e905); /* 29 */
+ GG (d, a, b, c, x[2], S22, 0xfcefa3f8); /* 30 */
+ GG (c, d, a, b, x[7], S23, 0x676f02d9); /* 31 */
+ GG (b, c, d, a, x[12], S24, 0x8d2a4c8a); /* 32 */
+
+ /* Round 3 */
+ HH (a, b, c, d, x[5], S31, 0xfffa3942); /* 33 */
+ HH (d, a, b, c, x[8], S32, 0x8771f681); /* 34 */
+ HH (c, d, a, b, x[11], S33, 0x6d9d6122); /* 35 */
+ HH (b, c, d, a, x[14], S34, 0xfde5380c); /* 36 */
+ HH (a, b, c, d, x[1], S31, 0xa4beea44); /* 37 */
+ HH (d, a, b, c, x[4], S32, 0x4bdecfa9); /* 38 */
+ HH (c, d, a, b, x[7], S33, 0xf6bb4b60); /* 39 */
+ HH (b, c, d, a, x[10], S34, 0xbebfbc70); /* 40 */
+ HH (a, b, c, d, x[13], S31, 0x289b7ec6); /* 41 */
+ HH (d, a, b, c, x[0], S32, 0xeaa127fa); /* 42 */
+ HH (c, d, a, b, x[3], S33, 0xd4ef3085); /* 43 */
+ HH (b, c, d, a, x[6], S34, 0x04881d05); /* 44 */
+ HH (a, b, c, d, x[9], S31, 0xd9d4d039); /* 45 */
+ HH (d, a, b, c, x[12], S32, 0xe6db99e5); /* 46 */
+ HH (c, d, a, b, x[15], S33, 0x1fa27cf8); /* 47 */
+ HH (b, c, d, a, x[2], S34, 0xc4ac5665); /* 48 */
+
+ /* Round 4 */
+ II (a, b, c, d, x[0], S41, 0xf4292244); /* 49 */
+ II (d, a, b, c, x[7], S42, 0x432aff97); /* 50 */
+ II (c, d, a, b, x[14], S43, 0xab9423a7); /* 51 */
+ II (b, c, d, a, x[5], S44, 0xfc93a039); /* 52 */
+ II (a, b, c, d, x[12], S41, 0x655b59c3); /* 53 */
+ II (d, a, b, c, x[3], S42, 0x8f0ccc92); /* 54 */
+ II (c, d, a, b, x[10], S43, 0xffeff47d); /* 55 */
+ II (b, c, d, a, x[1], S44, 0x85845dd1); /* 56 */
+ II (a, b, c, d, x[8], S41, 0x6fa87e4f); /* 57 */
+ II (d, a, b, c, x[15], S42, 0xfe2ce6e0); /* 58 */
+ II (c, d, a, b, x[6], S43, 0xa3014314); /* 59 */
+ II (b, c, d, a, x[13], S44, 0x4e0811a1); /* 60 */
+ II (a, b, c, d, x[4], S41, 0xf7537e82); /* 61 */
+ II (d, a, b, c, x[11], S42, 0xbd3af235); /* 62 */
+ II (c, d, a, b, x[2], S43, 0x2ad7d2bb); /* 63 */
+ II (b, c, d, a, x[9], S44, 0xeb86d391); /* 64 */
+
+ a += m->state[0];
+ b += m->state[1];
+ c += m->state[2];
+ d += m->state[3];
+
+ if (result)
+ {
+ result[0] = clib_host_to_little_u32 (a);
+ result[1] = clib_host_to_little_u32 (b);
+ result[2] = clib_host_to_little_u32 (c);
+ result[3] = clib_host_to_little_u32 (d);
+ }
+ else
+ {
+ m->state[0] = a;
+ m->state[1] = b;
+ m->state[2] = c;
+ m->state[3] = d;
+ }
+
+ /* Zero sensitive information. */
+ if (result)
+ memset (m, ~0, sizeof (m[0]));
+ else if (zero_buffer)
+ memset (m->input_buffer.b8, 0, sizeof (m->input_buffer));
+}
+
+/* MD5 initialization. Begins an MD5 operation, writing a new context. */
+void
+md5_init (md5_context_t * c)
+{
+ memset (c, 0, sizeof (c[0]));
+
+ /* Load magic initialization constants. */
+ c->state[0] = 0x67452301;
+ c->state[1] = 0xefcdab89;
+ c->state[2] = 0x98badcfe;
+ c->state[3] = 0x10325476;
+}
+
+always_inline void __attribute__ ((unused))
+md5_fill_buffer_aligned (md5_context_t * c, u32 * d32)
+{
+ int i;
+ for (i = 0; i < ARRAY_LEN (c->input_buffer.b32); i++)
+ c->input_buffer.b32[i] = d32[i];
+}
+
+/* MD5 block update operation. Continues an MD5 message-digest
+ operation, processing another message block, and updating the
+ context.
+ */
+void
+md5_add (md5_context_t * c, void *data, int data_bytes)
+{
+ u32 data_bytes_left;
+ void *d;
+
+ if (data_bytes == 0)
+ return;
+
+ d = data;
+ data_bytes_left = data_bytes;
+
+ if ((pointer_to_uword (d) % sizeof (u32)) == 0
+ && (c->n_bits % BITS (c->input_buffer)) == 0
+ && data_bytes >= sizeof (c->input_buffer))
+ {
+ int is_last_iteration;
+ /* Fast aligned version. */
+ do
+ {
+ data_bytes_left -= sizeof (c->input_buffer);
+ is_last_iteration = data_bytes_left < sizeof (c->input_buffer);
+ md5_transform (c, d, /* result */ 0, /* zero_buffer */
+ is_last_iteration);
+ d += sizeof (c->input_buffer);
+ }
+ while (!is_last_iteration);
+ }
+
+ /* Slow unaligned version. */
+ {
+ int bi;
+ u8 *d8 = d;
+
+ bi = (c->n_bits / BITS (u8)) % ARRAY_LEN (c->input_buffer.b8);
+
+ while (data_bytes_left > 0)
+ {
+ c->input_buffer.b8[bi] = d8[0];
+ data_bytes_left -= 1;
+ d8++;
+ bi++;
+ if (bi == ARRAY_LEN (c->input_buffer.b8))
+ {
+ bi = 0;
+ md5_transform (c, c->input_buffer.b32,
+ /* result */ 0,
+ /* zero_buffer */ 1);
+ }
+ }
+ }
+
+ c->n_bits += data_bytes * BITS (u8);
+}
+
+void
+md5_finish (md5_context_t * c, u8 * digest)
+{
+ u64 n_bits_save;
+ int bi, n_pad;
+ static u8 padding[sizeof (c->input_buffer)] = { 0x80, 0, };
+
+ n_bits_save = c->n_bits;
+ bi = (n_bits_save / BITS (u8)) % ARRAY_LEN (c->input_buffer.b8);
+
+ n_pad = sizeof (c->input_buffer) - (bi + sizeof (u64));
+ if (n_pad <= 0)
+ n_pad += sizeof (c->input_buffer);
+ md5_add (c, padding, n_pad);
+
+ c->input_buffer.b64[ARRAY_LEN (c->input_buffer.b64) - 1]
+ = clib_host_to_little_u64 (n_bits_save);
+
+ md5_transform (c, c->input_buffer.b32, (u32 *) digest,
+ /* zero_buffer */ 1);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/md5.h b/src/vppinfra/md5.h
new file mode 100644
index 00000000..52123886
--- /dev/null
+++ b/src/vppinfra/md5.h
@@ -0,0 +1,57 @@
+/*
+ Copyright (c) 2004 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_md5_h
+#define included_md5_h
+
+#include <vppinfra/clib.h>
+
+typedef struct
+{
+ u64 n_bits;
+
+ u32 state[4];
+
+ union
+ {
+ u64 b64[8];
+ u32 b32[16];
+ u8 b8[16 * 4];
+ } input_buffer;
+
+ /* Resulting message digest filled in by md5_finish. */
+} md5_context_t;
+
+void md5_init (md5_context_t * c);
+void md5_add (md5_context_t * c, void *data, int data_bytes);
+void md5_finish (md5_context_t * c, u8 digest[16]);
+
+#endif /* included_md5_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/mem.h b/src/vppinfra/mem.h
new file mode 100644
index 00000000..69ab8803
--- /dev/null
+++ b/src/vppinfra/mem.h
@@ -0,0 +1,365 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef _included_clib_mem_h
+#define _included_clib_mem_h
+
+#include <stdarg.h>
+#include <unistd.h>
+#include <sys/mman.h>
+
+#include <vppinfra/clib.h> /* uword, etc */
+#include <vppinfra/clib_error.h>
+#include <vppinfra/mheap_bootstrap.h>
+#include <vppinfra/os.h>
+#include <vppinfra/string.h> /* memcpy, memset */
+#include <vppinfra/valgrind.h>
+
+#define CLIB_MAX_MHEAPS 256
+
+/* Per CPU heaps. */
+extern void *clib_per_cpu_mheaps[CLIB_MAX_MHEAPS];
+
+always_inline void *
+clib_mem_get_per_cpu_heap (void)
+{
+ int cpu = os_get_thread_index ();
+ return clib_per_cpu_mheaps[cpu];
+}
+
+always_inline void *
+clib_mem_set_per_cpu_heap (u8 * new_heap)
+{
+ int cpu = os_get_thread_index ();
+ void *old = clib_per_cpu_mheaps[cpu];
+ clib_per_cpu_mheaps[cpu] = new_heap;
+ return old;
+}
+
+/* Memory allocator which may call os_out_of_memory() if it fails */
+always_inline void *
+clib_mem_alloc_aligned_at_offset (uword size, uword align, uword align_offset,
+ int os_out_of_memory_on_failure)
+{
+ void *heap, *p;
+ uword offset, cpu;
+
+ if (align_offset > align)
+ {
+ if (align > 0)
+ align_offset %= align;
+ else
+ align_offset = align;
+ }
+
+ cpu = os_get_thread_index ();
+ heap = clib_per_cpu_mheaps[cpu];
+ heap = mheap_get_aligned (heap, size, align, align_offset, &offset);
+ clib_per_cpu_mheaps[cpu] = heap;
+
+ if (offset != ~0)
+ {
+ p = heap + offset;
+#if CLIB_DEBUG > 0
+ VALGRIND_MALLOCLIKE_BLOCK (p, mheap_data_bytes (heap, offset), 0, 0);
+#endif
+ return p;
+ }
+ else
+ {
+ if (os_out_of_memory_on_failure)
+ os_out_of_memory ();
+ return 0;
+ }
+}
+
+/* Memory allocator which calls os_out_of_memory() when it fails */
+always_inline void *
+clib_mem_alloc (uword size)
+{
+ return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
+ /* align_offset */ 0,
+ /* os_out_of_memory */ 1);
+}
+
+always_inline void *
+clib_mem_alloc_aligned (uword size, uword align)
+{
+ return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
+ /* os_out_of_memory */ 1);
+}
+
+/* Memory allocator which calls os_out_of_memory() when it fails */
+always_inline void *
+clib_mem_alloc_or_null (uword size)
+{
+ return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
+ /* align_offset */ 0,
+ /* os_out_of_memory */ 0);
+}
+
+always_inline void *
+clib_mem_alloc_aligned_or_null (uword size, uword align)
+{
+ return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
+ /* os_out_of_memory */ 0);
+}
+
+
+
+/* Memory allocator which panics when it fails.
+ Use macro so that clib_panic macro can expand __FUNCTION__ and __LINE__. */
+#define clib_mem_alloc_aligned_no_fail(size,align) \
+({ \
+ uword _clib_mem_alloc_size = (size); \
+ void * _clib_mem_alloc_p; \
+ _clib_mem_alloc_p = clib_mem_alloc_aligned (_clib_mem_alloc_size, (align)); \
+ if (! _clib_mem_alloc_p) \
+ clib_panic ("failed to allocate %d bytes", _clib_mem_alloc_size); \
+ _clib_mem_alloc_p; \
+})
+
+#define clib_mem_alloc_no_fail(size) clib_mem_alloc_aligned_no_fail(size,1)
+
+/* Alias to stack allocator for naming consistency. */
+#define clib_mem_alloc_stack(bytes) __builtin_alloca(bytes)
+
+always_inline uword
+clib_mem_is_heap_object (void *p)
+{
+ void *heap = clib_mem_get_per_cpu_heap ();
+ uword offset = (uword) p - (uword) heap;
+ mheap_elt_t *e, *n;
+
+ if (offset >= vec_len (heap))
+ return 0;
+
+ e = mheap_elt_at_uoffset (heap, offset);
+ n = mheap_next_elt (e);
+
+ /* Check that heap forward and reverse pointers agree. */
+ return e->n_user_data == n->prev_n_user_data;
+}
+
+always_inline void
+clib_mem_free (void *p)
+{
+ u8 *heap = clib_mem_get_per_cpu_heap ();
+
+ /* Make sure object is in the correct heap. */
+ ASSERT (clib_mem_is_heap_object (p));
+
+ mheap_put (heap, (u8 *) p - heap);
+
+#if CLIB_DEBUG > 0
+ VALGRIND_FREELIKE_BLOCK (p, 0);
+#endif
+}
+
+always_inline void *
+clib_mem_realloc (void *p, uword new_size, uword old_size)
+{
+ /* By default use alloc, copy and free to emulate realloc. */
+ void *q = clib_mem_alloc (new_size);
+ if (q)
+ {
+ uword copy_size;
+ if (old_size < new_size)
+ copy_size = old_size;
+ else
+ copy_size = new_size;
+ clib_memcpy (q, p, copy_size);
+ clib_mem_free (p);
+ }
+ return q;
+}
+
+always_inline uword
+clib_mem_size (void *p)
+{
+ ASSERT (clib_mem_is_heap_object (p));
+ mheap_elt_t *e = mheap_user_pointer_to_elt (p);
+ return mheap_elt_data_bytes (e);
+}
+
+always_inline void *
+clib_mem_get_heap (void)
+{
+ return clib_mem_get_per_cpu_heap ();
+}
+
+always_inline void *
+clib_mem_set_heap (void *heap)
+{
+ return clib_mem_set_per_cpu_heap (heap);
+}
+
+void *clib_mem_init (void *heap, uword size);
+
+void clib_mem_exit (void);
+
+uword clib_mem_get_page_size (void);
+
+void clib_mem_validate (void);
+
+void clib_mem_trace (int enable);
+
+typedef struct
+{
+ /* Total number of objects allocated. */
+ uword object_count;
+
+ /* Total allocated bytes. Bytes used and free.
+ used + free = total */
+ uword bytes_total, bytes_used, bytes_free;
+
+ /* Number of bytes used by mheap data structure overhead
+ (e.g. free lists, mheap header). */
+ uword bytes_overhead;
+
+ /* Amount of free space returned to operating system. */
+ uword bytes_free_reclaimed;
+
+ /* For malloc which puts small objects in sbrk region and
+ large objects in mmap'ed regions. */
+ uword bytes_used_sbrk;
+ uword bytes_used_mmap;
+
+ /* Max. number of bytes in this heap. */
+ uword bytes_max;
+} clib_mem_usage_t;
+
+void clib_mem_usage (clib_mem_usage_t * usage);
+
+u8 *format_clib_mem_usage (u8 * s, va_list * args);
+
+/* Allocate virtual address space. */
+always_inline void *
+clib_mem_vm_alloc (uword size)
+{
+ void *mmap_addr;
+ uword flags = MAP_PRIVATE;
+
+#ifdef MAP_ANONYMOUS
+ flags |= MAP_ANONYMOUS;
+#endif
+
+ mmap_addr = mmap (0, size, PROT_READ | PROT_WRITE, flags, -1, 0);
+ if (mmap_addr == (void *) -1)
+ mmap_addr = 0;
+
+ return mmap_addr;
+}
+
+always_inline void
+clib_mem_vm_free (void *addr, uword size)
+{
+ munmap (addr, size);
+}
+
+always_inline void *
+clib_mem_vm_unmap (void *addr, uword size)
+{
+ void *mmap_addr;
+ uword flags = MAP_PRIVATE | MAP_FIXED;
+
+ /* To unmap we "map" with no protection. If we actually called
+ munmap then other callers could steal the address space. By
+ changing to PROT_NONE the kernel can free up the pages which is
+ really what we want "unmap" to mean. */
+ mmap_addr = mmap (addr, size, PROT_NONE, flags, -1, 0);
+ if (mmap_addr == (void *) -1)
+ mmap_addr = 0;
+
+ return mmap_addr;
+}
+
+always_inline void *
+clib_mem_vm_map (void *addr, uword size)
+{
+ void *mmap_addr;
+ uword flags = MAP_PRIVATE | MAP_FIXED;
+
+ mmap_addr = mmap (addr, size, (PROT_READ | PROT_WRITE), flags, -1, 0);
+ if (mmap_addr == (void *) -1)
+ mmap_addr = 0;
+
+ return mmap_addr;
+}
+
+typedef struct
+{
+#define CLIB_MEM_VM_F_SHARED (1 << 0)
+#define CLIB_MEM_VM_F_HUGETLB (1 << 1)
+#define CLIB_MEM_VM_F_NUMA_PREFER (1 << 2)
+#define CLIB_MEM_VM_F_NUMA_FORCE (1 << 3)
+#define CLIB_MEM_VM_F_HUGETLB_PREALLOC (1 << 4)
+ u32 flags; /**< vm allocation flags:
+ <br> CLIB_MEM_VM_F_SHARED: request shared memory, file
+ destiptor will be provided on successful allocation.
+ <br> CLIB_MEM_VM_F_HUGETLB: request hugepages.
+ <br> CLIB_MEM_VM_F_NUMA_PREFER: numa_node field contains valid
+ numa node preference.
+ <br> CLIB_MEM_VM_F_NUMA_FORCE: fail if setting numa policy fails.
+ <br> CLIB_MEM_VM_F_HUGETLB_PREALLOC: pre-allocate hugepages if
+ number of available pages is not sufficient.
+ */
+ char *name; /**< Name for memory allocation, set by caller. */
+ uword size; /**< Allocation size, set by caller. */
+ int numa_node; /**< numa node preference. Valid if CLIB_MEM_VM_F_NUMA_PREFER set. */
+ void *addr; /**< Pointer to allocated memory, set on successful allocation. */
+ int fd; /**< File desriptor, set on successful allocation if CLIB_MEM_VM_F_SHARED is set. */
+ int log2_page_size; /* Page size in log2 format, set on successful allocation. */
+ int n_pages; /* Number of pages. */
+} clib_mem_vm_alloc_t;
+
+clib_error_t *clib_mem_vm_ext_alloc (clib_mem_vm_alloc_t * a);
+int clib_mem_vm_get_log2_page_size (int fd);
+u64 *clib_mem_vm_get_paddr (void *mem, int log2_page_size, int n_pages);
+
+
+#include <vppinfra/error.h> /* clib_panic */
+
+#endif /* _included_clib_mem_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/mem_mheap.c b/src/vppinfra/mem_mheap.c
new file mode 100644
index 00000000..9b2af520
--- /dev/null
+++ b/src/vppinfra/mem_mheap.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/format.h>
+#include <vppinfra/mheap.h>
+#include <vppinfra/os.h>
+
+/* Valgrind stuff. */
+#include <vppinfra/memcheck.h>
+#include <vppinfra/valgrind.h>
+
+void *clib_per_cpu_mheaps[CLIB_MAX_MHEAPS];
+
+void
+clib_mem_exit (void)
+{
+ u8 *heap = clib_mem_get_per_cpu_heap ();
+ if (heap)
+ mheap_free (heap);
+ clib_mem_set_per_cpu_heap (0);
+}
+
+/* Initialize CLIB heap based on memory/size given by user.
+ Set memory to 0 and CLIB will try to allocate its own heap. */
+void *
+clib_mem_init (void *memory, uword memory_size)
+{
+ u8 *heap;
+
+ if (memory || memory_size)
+ heap = mheap_alloc (memory, memory_size);
+ else
+ {
+ /* Allocate lots of address space since this will limit
+ the amount of memory the program can allocate.
+ In the kernel we're more conservative since some architectures
+ (e.g. mips) have pretty small kernel virtual address spaces. */
+#ifdef __KERNEL__
+#define MAX_VM_MEG 64
+#else
+#define MAX_VM_MEG 1024
+#endif
+
+ uword alloc_size = MAX_VM_MEG << 20;
+ uword tries = 16;
+
+ while (1)
+ {
+ heap = mheap_alloc (0, alloc_size);
+ if (heap)
+ break;
+ alloc_size = (alloc_size * 3) / 4;
+ tries--;
+ if (tries == 0)
+ break;
+ }
+ }
+
+ clib_mem_set_heap (heap);
+
+ return heap;
+}
+
+#ifdef CLIB_LINUX_KERNEL
+#include <asm/page.h>
+
+uword
+clib_mem_get_page_size (void)
+{
+ return PAGE_SIZE;
+}
+#endif
+
+#ifdef CLIB_UNIX
+uword
+clib_mem_get_page_size (void)
+{
+ return getpagesize ();
+}
+#endif
+
+/* Make a guess for standalone. */
+#ifdef CLIB_STANDALONE
+uword
+clib_mem_get_page_size (void)
+{
+ return 4096;
+}
+#endif
+
+u8 *
+format_clib_mem_usage (u8 * s, va_list * va)
+{
+ int verbose = va_arg (*va, int);
+ return format (s, "%U", format_mheap, clib_mem_get_heap (), verbose);
+}
+
+void
+clib_mem_usage (clib_mem_usage_t * u)
+{
+ mheap_usage (clib_mem_get_heap (), u);
+}
+
+/* Call serial number for debugger breakpoints. */
+uword clib_mem_validate_serial = 0;
+
+void
+clib_mem_validate (void)
+{
+ if (MHEAP_HAVE_SMALL_OBJECT_CACHE)
+ clib_warning ("clib_mem_validate disabled (small object cache is ON)");
+ else
+ {
+ mheap_validate (clib_mem_get_heap ());
+ clib_mem_validate_serial++;
+ }
+}
+
+void
+clib_mem_trace (int enable)
+{
+ mheap_trace (clib_mem_get_heap (), enable);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/memcheck.h b/src/vppinfra/memcheck.h
new file mode 100644
index 00000000..44db3a8a
--- /dev/null
+++ b/src/vppinfra/memcheck.h
@@ -0,0 +1,317 @@
+
+/*
+ ----------------------------------------------------------------
+
+ Notice that the following BSD-style license applies to this one
+ file (memcheck.h) only. The rest of Valgrind is licensed under the
+ terms of the GNU General Public License, version 2, unless
+ otherwise indicated. See the COPYING file in the source
+ distribution for details.
+
+ ----------------------------------------------------------------
+
+ This file is part of MemCheck, a heavyweight Valgrind tool for
+ detecting memory errors.
+
+ Copyright (C) 2000-2009 Julian Seward. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ 2. The origin of this software must not be misrepresented; you must
+ not claim that you wrote the original software. If you use this
+ software in a product, an acknowledgment in the product
+ documentation would be appreciated but is not required.
+
+ 3. Altered source versions must be plainly marked as such, and must
+ not be misrepresented as being the original software.
+
+ 4. The name of the author may not be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ----------------------------------------------------------------
+
+ Notice that the above BSD-style license applies to this one file
+ (memcheck.h) only. The entire rest of Valgrind is licensed under
+ the terms of the GNU General Public License, version 2. See the
+ COPYING file in the source distribution for details.
+
+ ----------------------------------------------------------------
+*/
+
+
+#ifndef __MEMCHECK_H
+#define __MEMCHECK_H
+
+
+/* This file is for inclusion into client (your!) code.
+
+ You can use these macros to manipulate and query memory permissions
+ inside your own programs.
+
+ See comment near the top of valgrind.h on how to use them.
+*/
+
+#include "valgrind.h"
+
+/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
+ This enum comprises an ABI exported by Valgrind to programs
+ which use client requests. DO NOT CHANGE THE ORDER OF THESE
+ ENTRIES, NOR DELETE ANY -- add new ones at the end. */
+typedef enum
+{
+ VG_USERREQ__MAKE_MEM_NOACCESS = VG_USERREQ_TOOL_BASE ('M', 'C'),
+ VG_USERREQ__MAKE_MEM_UNDEFINED,
+ VG_USERREQ__MAKE_MEM_DEFINED,
+ VG_USERREQ__DISCARD,
+ VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE,
+ VG_USERREQ__CHECK_MEM_IS_DEFINED,
+ VG_USERREQ__DO_LEAK_CHECK,
+ VG_USERREQ__COUNT_LEAKS,
+
+ VG_USERREQ__GET_VBITS,
+ VG_USERREQ__SET_VBITS,
+
+ VG_USERREQ__CREATE_BLOCK,
+
+ VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE,
+
+ /* Not next to VG_USERREQ__COUNT_LEAKS because it was added later. */
+ VG_USERREQ__COUNT_LEAK_BLOCKS,
+
+ /* This is just for memcheck's internal use - don't use it */
+ _VG_USERREQ__MEMCHECK_RECORD_OVERLAP_ERROR
+ = VG_USERREQ_TOOL_BASE ('M', 'C') + 256
+} Vg_MemCheckClientRequest;
+
+
+
+/* Client-code macros to manipulate the state of memory. */
+
+/* Mark memory at _qzz_addr as unaddressable for _qzz_len bytes. */
+#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr,_qzz_len) \
+ (__extension__({unsigned long _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* default return */, \
+ VG_USERREQ__MAKE_MEM_NOACCESS, \
+ _qzz_addr, _qzz_len, 0, 0, 0); \
+ _qzz_res; \
+ }))
+
+/* Similarly, mark memory at _qzz_addr as addressable but undefined
+ for _qzz_len bytes. */
+#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr,_qzz_len) \
+ (__extension__({unsigned long _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* default return */, \
+ VG_USERREQ__MAKE_MEM_UNDEFINED, \
+ _qzz_addr, _qzz_len, 0, 0, 0); \
+ _qzz_res; \
+ }))
+
+/* Similarly, mark memory at _qzz_addr as addressable and defined
+ for _qzz_len bytes. */
+#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr,_qzz_len) \
+ (__extension__({unsigned long _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* default return */, \
+ VG_USERREQ__MAKE_MEM_DEFINED, \
+ _qzz_addr, _qzz_len, 0, 0, 0); \
+ _qzz_res; \
+ }))
+
+/* Similar to VALGRIND_MAKE_MEM_DEFINED except that addressability is
+ not altered: bytes which are addressable are marked as defined,
+ but those which are not addressable are left unchanged. */
+#define VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(_qzz_addr,_qzz_len) \
+ (__extension__({unsigned long _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* default return */, \
+ VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE, \
+ _qzz_addr, _qzz_len, 0, 0, 0); \
+ _qzz_res; \
+ }))
+
+/* Create a block-description handle. The description is an ascii
+ string which is included in any messages pertaining to addresses
+ within the specified memory range. Has no other effect on the
+ properties of the memory range. */
+#define VALGRIND_CREATE_BLOCK(_qzz_addr,_qzz_len, _qzz_desc) \
+ (__extension__({unsigned long _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* default return */, \
+ VG_USERREQ__CREATE_BLOCK, \
+ _qzz_addr, _qzz_len, _qzz_desc, \
+ 0, 0); \
+ _qzz_res; \
+ }))
+
+/* Discard a block-description-handle. Returns 1 for an
+ invalid handle, 0 for a valid handle. */
+#define VALGRIND_DISCARD(_qzz_blkindex) \
+ (__extension__ ({unsigned long _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* default return */, \
+ VG_USERREQ__DISCARD, \
+ 0, _qzz_blkindex, 0, 0, 0); \
+ _qzz_res; \
+ }))
+
+
+/* Client-code macros to check the state of memory. */
+
+/* Check that memory at _qzz_addr is addressable for _qzz_len bytes.
+ If suitable addressibility is not established, Valgrind prints an
+ error message and returns the address of the first offending byte.
+ Otherwise it returns zero. */
+#define VALGRIND_CHECK_MEM_IS_ADDRESSABLE(_qzz_addr,_qzz_len) \
+ (__extension__({unsigned long _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE,\
+ _qzz_addr, _qzz_len, 0, 0, 0); \
+ _qzz_res; \
+ }))
+
+/* Check that memory at _qzz_addr is addressable and defined for
+ _qzz_len bytes. If suitable addressibility and definedness are not
+ established, Valgrind prints an error message and returns the
+ address of the first offending byte. Otherwise it returns zero. */
+#define VALGRIND_CHECK_MEM_IS_DEFINED(_qzz_addr,_qzz_len) \
+ (__extension__({unsigned long _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__CHECK_MEM_IS_DEFINED, \
+ _qzz_addr, _qzz_len, 0, 0, 0); \
+ _qzz_res; \
+ }))
+
+/* Use this macro to force the definedness and addressibility of an
+ lvalue to be checked. If suitable addressibility and definedness
+ are not established, Valgrind prints an error message and returns
+ the address of the first offending byte. Otherwise it returns
+ zero. */
+#define VALGRIND_CHECK_VALUE_IS_DEFINED(__lvalue) \
+ VALGRIND_CHECK_MEM_IS_DEFINED( \
+ (volatile unsigned char *)&(__lvalue), \
+ (unsigned long)(sizeof (__lvalue)))
+
+
+/* Do a full memory leak check (like --leak-check=full) mid-execution. */
+#define VALGRIND_DO_LEAK_CHECK \
+ {unsigned long _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__DO_LEAK_CHECK, \
+ 0, 0, 0, 0, 0); \
+ }
+
+/* Do a summary memory leak check (like --leak-check=summary) mid-execution. */
+#define VALGRIND_DO_QUICK_LEAK_CHECK \
+ {unsigned long _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__DO_LEAK_CHECK, \
+ 1, 0, 0, 0, 0); \
+ }
+
+/* Return number of leaked, dubious, reachable and suppressed bytes found by
+ all previous leak checks. They must be lvalues. */
+#define VALGRIND_COUNT_LEAKS(leaked, dubious, reachable, suppressed) \
+ /* For safety on 64-bit platforms we assign the results to private
+ unsigned long variables, then assign these to the lvalues the user
+ specified, which works no matter what type 'leaked', 'dubious', etc
+ are. We also initialise '_qzz_leaked', etc because
+ VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as
+ defined. */ \
+ {unsigned long _qzz_res; \
+ unsigned long _qzz_leaked = 0, _qzz_dubious = 0; \
+ unsigned long _qzz_reachable = 0, _qzz_suppressed = 0; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__COUNT_LEAKS, \
+ &_qzz_leaked, &_qzz_dubious, \
+ &_qzz_reachable, &_qzz_suppressed, 0); \
+ leaked = _qzz_leaked; \
+ dubious = _qzz_dubious; \
+ reachable = _qzz_reachable; \
+ suppressed = _qzz_suppressed; \
+ }
+
+/* Return number of leaked, dubious, reachable and suppressed bytes found by
+ all previous leak checks. They must be lvalues. */
+#define VALGRIND_COUNT_LEAK_BLOCKS(leaked, dubious, reachable, suppressed) \
+ /* For safety on 64-bit platforms we assign the results to private
+ unsigned long variables, then assign these to the lvalues the user
+ specified, which works no matter what type 'leaked', 'dubious', etc
+ are. We also initialise '_qzz_leaked', etc because
+ VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as
+ defined. */ \
+ {unsigned long _qzz_res; \
+ unsigned long _qzz_leaked = 0, _qzz_dubious = 0; \
+ unsigned long _qzz_reachable = 0, _qzz_suppressed = 0; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__COUNT_LEAK_BLOCKS, \
+ &_qzz_leaked, &_qzz_dubious, \
+ &_qzz_reachable, &_qzz_suppressed, 0); \
+ leaked = _qzz_leaked; \
+ dubious = _qzz_dubious; \
+ reachable = _qzz_reachable; \
+ suppressed = _qzz_suppressed; \
+ }
+
+
+/* Get the validity data for addresses [zza..zza+zznbytes-1] and copy it
+ into the provided zzvbits array. Return values:
+ 0 if not running on valgrind
+ 1 success
+ 2 [previously indicated unaligned arrays; these are now allowed]
+ 3 if any parts of zzsrc/zzvbits are not addressable.
+ The metadata is not copied in cases 0, 2 or 3 so it should be
+ impossible to segfault your system by using this call.
+*/
+#define VALGRIND_GET_VBITS(zza,zzvbits,zznbytes) \
+ (__extension__({unsigned long _qzz_res; \
+ char* czza = (char*)zza; \
+ char* czzvbits = (char*)zzvbits; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__GET_VBITS, \
+ czza, czzvbits, zznbytes, 0, 0 ); \
+ _qzz_res; \
+ }))
+
+/* Set the validity data for addresses [zza..zza+zznbytes-1], copying it
+ from the provided zzvbits array. Return values:
+ 0 if not running on valgrind
+ 1 success
+ 2 [previously indicated unaligned arrays; these are now allowed]
+ 3 if any parts of zza/zzvbits are not addressable.
+ The metadata is not copied in cases 0, 2 or 3 so it should be
+ impossible to segfault your system by using this call.
+*/
+#define VALGRIND_SET_VBITS(zza,zzvbits,zznbytes) \
+ (__extension__({unsigned int _qzz_res; \
+ char* czza = (char*)zza; \
+ char* czzvbits = (char*)zzvbits; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__SET_VBITS, \
+ czza, czzvbits, zznbytes, 0, 0 ); \
+ _qzz_res; \
+ }))
+
+#endif
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/memcpy_avx.h b/src/vppinfra/memcpy_avx.h
new file mode 100644
index 00000000..e987d044
--- /dev/null
+++ b/src/vppinfra/memcpy_avx.h
@@ -0,0 +1,296 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef included_clib_memcpy_avx_h
+#define included_clib_memcpy_avx_h
+
+#include <stdint.h>
+#include <x86intrin.h>
+
+static inline void
+clib_mov16 (u8 * dst, const u8 * src)
+{
+ __m128i xmm0;
+
+ xmm0 = _mm_loadu_si128 ((const __m128i *) src);
+ _mm_storeu_si128 ((__m128i *) dst, xmm0);
+}
+
+static inline void
+clib_mov32 (u8 * dst, const u8 * src)
+{
+ __m256i ymm0;
+
+ ymm0 = _mm256_loadu_si256 ((const __m256i *) src);
+ _mm256_storeu_si256 ((__m256i *) dst, ymm0);
+}
+
+static inline void
+clib_mov64 (u8 * dst, const u8 * src)
+{
+ clib_mov32 ((u8 *) dst + 0 * 32, (const u8 *) src + 0 * 32);
+ clib_mov32 ((u8 *) dst + 1 * 32, (const u8 *) src + 1 * 32);
+}
+
+static inline void
+clib_mov128 (u8 * dst, const u8 * src)
+{
+ clib_mov64 ((u8 *) dst + 0 * 64, (const u8 *) src + 0 * 64);
+ clib_mov64 ((u8 *) dst + 1 * 64, (const u8 *) src + 1 * 64);
+}
+
+static inline void
+clib_mov256 (u8 * dst, const u8 * src)
+{
+ clib_mov128 ((u8 *) dst + 0 * 128, (const u8 *) src + 0 * 128);
+ clib_mov128 ((u8 *) dst + 1 * 128, (const u8 *) src + 1 * 128);
+}
+
+static inline void
+clib_mov64blocks (u8 * dst, const u8 * src, size_t n)
+{
+ __m256i ymm0, ymm1;
+
+ while (n >= 64)
+ {
+ ymm0 =
+ _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 0 * 32));
+ n -= 64;
+ ymm1 =
+ _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 1 * 32));
+ src = (const u8 *) src + 64;
+ _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 0 * 32), ymm0);
+ _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 1 * 32), ymm1);
+ dst = (u8 *) dst + 64;
+ }
+}
+
+static inline void
+clib_mov256blocks (u8 * dst, const u8 * src, size_t n)
+{
+ __m256i ymm0, ymm1, ymm2, ymm3, ymm4, ymm5, ymm6, ymm7;
+
+ while (n >= 256)
+ {
+ ymm0 =
+ _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 0 * 32));
+ n -= 256;
+ ymm1 =
+ _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 1 * 32));
+ ymm2 =
+ _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 2 * 32));
+ ymm3 =
+ _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 3 * 32));
+ ymm4 =
+ _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 4 * 32));
+ ymm5 =
+ _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 5 * 32));
+ ymm6 =
+ _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 6 * 32));
+ ymm7 =
+ _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 7 * 32));
+ src = (const u8 *) src + 256;
+ _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 0 * 32), ymm0);
+ _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 1 * 32), ymm1);
+ _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 2 * 32), ymm2);
+ _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 3 * 32), ymm3);
+ _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 4 * 32), ymm4);
+ _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 5 * 32), ymm5);
+ _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 6 * 32), ymm6);
+ _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 7 * 32), ymm7);
+ dst = (u8 *) dst + 256;
+ }
+}
+
+static inline void *
+clib_memcpy (void *dst, const void *src, size_t n)
+{
+ uword dstu = (uword) dst;
+ uword srcu = (uword) src;
+ void *ret = dst;
+ size_t dstofss;
+ size_t bits;
+
+ /**
+ * Copy less than 16 bytes
+ */
+ if (n < 16)
+ {
+ if (n & 0x01)
+ {
+ *(u8 *) dstu = *(const u8 *) srcu;
+ srcu = (uword) ((const u8 *) srcu + 1);
+ dstu = (uword) ((u8 *) dstu + 1);
+ }
+ if (n & 0x02)
+ {
+ *(uint16_t *) dstu = *(const uint16_t *) srcu;
+ srcu = (uword) ((const uint16_t *) srcu + 1);
+ dstu = (uword) ((uint16_t *) dstu + 1);
+ }
+ if (n & 0x04)
+ {
+ *(uint32_t *) dstu = *(const uint32_t *) srcu;
+ srcu = (uword) ((const uint32_t *) srcu + 1);
+ dstu = (uword) ((uint32_t *) dstu + 1);
+ }
+ if (n & 0x08)
+ {
+ *(uint64_t *) dstu = *(const uint64_t *) srcu;
+ }
+ return ret;
+ }
+
+ /**
+ * Fast way when copy size doesn't exceed 512 bytes
+ */
+ if (n <= 32)
+ {
+ clib_mov16 ((u8 *) dst, (const u8 *) src);
+ clib_mov16 ((u8 *) dst - 16 + n, (const u8 *) src - 16 + n);
+ return ret;
+ }
+ if (n <= 64)
+ {
+ clib_mov32 ((u8 *) dst, (const u8 *) src);
+ clib_mov32 ((u8 *) dst - 32 + n, (const u8 *) src - 32 + n);
+ return ret;
+ }
+ if (n <= 512)
+ {
+ if (n >= 256)
+ {
+ n -= 256;
+ clib_mov256 ((u8 *) dst, (const u8 *) src);
+ src = (const u8 *) src + 256;
+ dst = (u8 *) dst + 256;
+ }
+ if (n >= 128)
+ {
+ n -= 128;
+ clib_mov128 ((u8 *) dst, (const u8 *) src);
+ src = (const u8 *) src + 128;
+ dst = (u8 *) dst + 128;
+ }
+ if (n >= 64)
+ {
+ n -= 64;
+ clib_mov64 ((u8 *) dst, (const u8 *) src);
+ src = (const u8 *) src + 64;
+ dst = (u8 *) dst + 64;
+ }
+ COPY_BLOCK_64_BACK31:
+ if (n > 32)
+ {
+ clib_mov32 ((u8 *) dst, (const u8 *) src);
+ clib_mov32 ((u8 *) dst - 32 + n, (const u8 *) src - 32 + n);
+ return ret;
+ }
+ if (n > 0)
+ {
+ clib_mov32 ((u8 *) dst - 32 + n, (const u8 *) src - 32 + n);
+ }
+ return ret;
+ }
+
+ /**
+ * Make store aligned when copy size exceeds 512 bytes
+ */
+ dstofss = (uword) dst & 0x1F;
+ if (dstofss > 0)
+ {
+ dstofss = 32 - dstofss;
+ n -= dstofss;
+ clib_mov32 ((u8 *) dst, (const u8 *) src);
+ src = (const u8 *) src + dstofss;
+ dst = (u8 *) dst + dstofss;
+ }
+
+ /**
+ * Copy 256-byte blocks.
+ * Use copy block function for better instruction order control,
+ * which is important when load is unaligned.
+ */
+ clib_mov256blocks ((u8 *) dst, (const u8 *) src, n);
+ bits = n;
+ n = n & 255;
+ bits -= n;
+ src = (const u8 *) src + bits;
+ dst = (u8 *) dst + bits;
+
+ /**
+ * Copy 64-byte blocks.
+ * Use copy block function for better instruction order control,
+ * which is important when load is unaligned.
+ */
+ if (n >= 64)
+ {
+ clib_mov64blocks ((u8 *) dst, (const u8 *) src, n);
+ bits = n;
+ n = n & 63;
+ bits -= n;
+ src = (const u8 *) src + bits;
+ dst = (u8 *) dst + bits;
+ }
+
+ /**
+ * Copy whatever left
+ */
+ goto COPY_BLOCK_64_BACK31;
+}
+
+
+#endif /* included_clib_mamcpy_avx_h */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/memcpy_sse3.h b/src/vppinfra/memcpy_sse3.h
new file mode 100644
index 00000000..f61396c8
--- /dev/null
+++ b/src/vppinfra/memcpy_sse3.h
@@ -0,0 +1,356 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef included_clib_memcpy_sse3_h
+#define included_clib_memcpy_sse3_h
+
+#include <stdint.h>
+#include <x86intrin.h>
+
+static inline void
+clib_mov16 (u8 * dst, const u8 * src)
+{
+ __m128i xmm0;
+
+ xmm0 = _mm_loadu_si128 ((const __m128i *) src);
+ _mm_storeu_si128 ((__m128i *) dst, xmm0);
+}
+
+static inline void
+clib_mov32 (u8 * dst, const u8 * src)
+{
+ clib_mov16 ((u8 *) dst + 0 * 16, (const u8 *) src + 0 * 16);
+ clib_mov16 ((u8 *) dst + 1 * 16, (const u8 *) src + 1 * 16);
+}
+
+static inline void
+clib_mov64 (u8 * dst, const u8 * src)
+{
+ clib_mov32 ((u8 *) dst + 0 * 32, (const u8 *) src + 0 * 32);
+ clib_mov32 ((u8 *) dst + 1 * 32, (const u8 *) src + 1 * 32);
+}
+
+static inline void
+clib_mov128 (u8 * dst, const u8 * src)
+{
+ clib_mov64 ((u8 *) dst + 0 * 64, (const u8 *) src + 0 * 64);
+ clib_mov64 ((u8 *) dst + 1 * 64, (const u8 *) src + 1 * 64);
+}
+
+static inline void
+clib_mov256 (u8 * dst, const u8 * src)
+{
+ clib_mov128 ((u8 *) dst + 0 * 128, (const u8 *) src + 0 * 128);
+ clib_mov128 ((u8 *) dst + 1 * 128, (const u8 *) src + 1 * 128);
+}
+
+/**
+ * Macro for copying unaligned block from one location to another with constant load offset,
+ * 47 bytes leftover maximum,
+ * locations should not overlap.
+ * Requirements:
+ * - Store is aligned
+ * - Load offset is <offset>, which must be immediate value within [1, 15]
+ * - For <src>, make sure <offset> bit backwards & <16 - offset> bit forwards are available for loading
+ * - <dst>, <src>, <len> must be variables
+ * - __m128i <xmm0> ~ <xmm8> must be pre-defined
+ */
+#define CLIB_MVUNALIGN_LEFT47_IMM(dst, src, len, offset) \
+({ \
+ int tmp; \
+ while (len >= 128 + 16 - offset) { \
+ xmm0 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 0 * 16)); \
+ len -= 128; \
+ xmm1 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 1 * 16)); \
+ xmm2 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 2 * 16)); \
+ xmm3 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 3 * 16)); \
+ xmm4 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 4 * 16)); \
+ xmm5 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 5 * 16)); \
+ xmm6 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 6 * 16)); \
+ xmm7 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 7 * 16)); \
+ xmm8 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 8 * 16)); \
+ src = (const u8 *)src + 128; \
+ _mm_storeu_si128((__m128i *)((u8 *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset)); \
+ _mm_storeu_si128((__m128i *)((u8 *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset)); \
+ _mm_storeu_si128((__m128i *)((u8 *)dst + 2 * 16), _mm_alignr_epi8(xmm3, xmm2, offset)); \
+ _mm_storeu_si128((__m128i *)((u8 *)dst + 3 * 16), _mm_alignr_epi8(xmm4, xmm3, offset)); \
+ _mm_storeu_si128((__m128i *)((u8 *)dst + 4 * 16), _mm_alignr_epi8(xmm5, xmm4, offset)); \
+ _mm_storeu_si128((__m128i *)((u8 *)dst + 5 * 16), _mm_alignr_epi8(xmm6, xmm5, offset)); \
+ _mm_storeu_si128((__m128i *)((u8 *)dst + 6 * 16), _mm_alignr_epi8(xmm7, xmm6, offset)); \
+ _mm_storeu_si128((__m128i *)((u8 *)dst + 7 * 16), _mm_alignr_epi8(xmm8, xmm7, offset)); \
+ dst = (u8 *)dst + 128; \
+ } \
+ tmp = len; \
+ len = ((len - 16 + offset) & 127) + 16 - offset; \
+ tmp -= len; \
+ src = (const u8 *)src + tmp; \
+ dst = (u8 *)dst + tmp; \
+ if (len >= 32 + 16 - offset) { \
+ while (len >= 32 + 16 - offset) { \
+ xmm0 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 0 * 16)); \
+ len -= 32; \
+ xmm1 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 1 * 16)); \
+ xmm2 = _mm_loadu_si128((const __m128i *)((const u8 *)src - offset + 2 * 16)); \
+ src = (const u8 *)src + 32; \
+ _mm_storeu_si128((__m128i *)((u8 *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset)); \
+ _mm_storeu_si128((__m128i *)((u8 *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset)); \
+ dst = (u8 *)dst + 32; \
+ } \
+ tmp = len; \
+ len = ((len - 16 + offset) & 31) + 16 - offset; \
+ tmp -= len; \
+ src = (const u8 *)src + tmp; \
+ dst = (u8 *)dst + tmp; \
+ } \
+})
+
+/**
+ * Macro for copying unaligned block from one location to another,
+ * 47 bytes leftover maximum,
+ * locations should not overlap.
+ * Use switch here because the aligning instruction requires immediate value for shift count.
+ * Requirements:
+ * - Store is aligned
+ * - Load offset is <offset>, which must be within [1, 15]
+ * - For <src>, make sure <offset> bit backwards & <16 - offset> bit forwards are available for loading
+ * - <dst>, <src>, <len> must be variables
+ * - __m128i <xmm0> ~ <xmm8> used in CLIB_MVUNALIGN_LEFT47_IMM must be pre-defined
+ */
+#define CLIB_MVUNALIGN_LEFT47(dst, src, len, offset) \
+({ \
+ switch (offset) { \
+ case 0x01: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x01); break; \
+ case 0x02: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x02); break; \
+ case 0x03: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x03); break; \
+ case 0x04: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x04); break; \
+ case 0x05: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x05); break; \
+ case 0x06: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x06); break; \
+ case 0x07: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x07); break; \
+ case 0x08: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x08); break; \
+ case 0x09: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x09); break; \
+ case 0x0A: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0A); break; \
+ case 0x0B: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0B); break; \
+ case 0x0C: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0C); break; \
+ case 0x0D: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0D); break; \
+ case 0x0E: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0E); break; \
+ case 0x0F: CLIB_MVUNALIGN_LEFT47_IMM(dst, src, n, 0x0F); break; \
+ default:; \
+ } \
+})
+
+static inline void *
+clib_memcpy (void *dst, const void *src, size_t n)
+{
+ __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8;
+ uword dstu = (uword) dst;
+ uword srcu = (uword) src;
+ void *ret = dst;
+ size_t dstofss;
+ size_t srcofs;
+
+ /**
+ * Copy less than 16 bytes
+ */
+ if (n < 16)
+ {
+ if (n & 0x01)
+ {
+ *(u8 *) dstu = *(const u8 *) srcu;
+ srcu = (uword) ((const u8 *) srcu + 1);
+ dstu = (uword) ((u8 *) dstu + 1);
+ }
+ if (n & 0x02)
+ {
+ *(u16 *) dstu = *(const u16 *) srcu;
+ srcu = (uword) ((const u16 *) srcu + 1);
+ dstu = (uword) ((u16 *) dstu + 1);
+ }
+ if (n & 0x04)
+ {
+ *(u32 *) dstu = *(const u32 *) srcu;
+ srcu = (uword) ((const u32 *) srcu + 1);
+ dstu = (uword) ((u32 *) dstu + 1);
+ }
+ if (n & 0x08)
+ {
+ *(u64 *) dstu = *(const u64 *) srcu;
+ }
+ return ret;
+ }
+
+ /**
+ * Fast way when copy size doesn't exceed 512 bytes
+ */
+ if (n <= 32)
+ {
+ clib_mov16 ((u8 *) dst, (const u8 *) src);
+ clib_mov16 ((u8 *) dst - 16 + n, (const u8 *) src - 16 + n);
+ return ret;
+ }
+ if (n <= 48)
+ {
+ clib_mov32 ((u8 *) dst, (const u8 *) src);
+ clib_mov16 ((u8 *) dst - 16 + n, (const u8 *) src - 16 + n);
+ return ret;
+ }
+ if (n <= 64)
+ {
+ clib_mov32 ((u8 *) dst, (const u8 *) src);
+ clib_mov16 ((u8 *) dst + 32, (const u8 *) src + 32);
+ clib_mov16 ((u8 *) dst - 16 + n, (const u8 *) src - 16 + n);
+ return ret;
+ }
+ if (n <= 128)
+ {
+ goto COPY_BLOCK_128_BACK15;
+ }
+ if (n <= 512)
+ {
+ if (n >= 256)
+ {
+ n -= 256;
+ clib_mov128 ((u8 *) dst, (const u8 *) src);
+ clib_mov128 ((u8 *) dst + 128, (const u8 *) src + 128);
+ src = (const u8 *) src + 256;
+ dst = (u8 *) dst + 256;
+ }
+ COPY_BLOCK_255_BACK15:
+ if (n >= 128)
+ {
+ n -= 128;
+ clib_mov128 ((u8 *) dst, (const u8 *) src);
+ src = (const u8 *) src + 128;
+ dst = (u8 *) dst + 128;
+ }
+ COPY_BLOCK_128_BACK15:
+ if (n >= 64)
+ {
+ n -= 64;
+ clib_mov64 ((u8 *) dst, (const u8 *) src);
+ src = (const u8 *) src + 64;
+ dst = (u8 *) dst + 64;
+ }
+ COPY_BLOCK_64_BACK15:
+ if (n >= 32)
+ {
+ n -= 32;
+ clib_mov32 ((u8 *) dst, (const u8 *) src);
+ src = (const u8 *) src + 32;
+ dst = (u8 *) dst + 32;
+ }
+ if (n > 16)
+ {
+ clib_mov16 ((u8 *) dst, (const u8 *) src);
+ clib_mov16 ((u8 *) dst - 16 + n, (const u8 *) src - 16 + n);
+ return ret;
+ }
+ if (n > 0)
+ {
+ clib_mov16 ((u8 *) dst - 16 + n, (const u8 *) src - 16 + n);
+ }
+ return ret;
+ }
+
+ /**
+ * Make store aligned when copy size exceeds 512 bytes,
+ * and make sure the first 15 bytes are copied, because
+ * unaligned copy functions require up to 15 bytes
+ * backwards access.
+ */
+ dstofss = 16 - ((uword) dst & 0x0F) + 16;
+ n -= dstofss;
+ clib_mov32 ((u8 *) dst, (const u8 *) src);
+ src = (const u8 *) src + dstofss;
+ dst = (u8 *) dst + dstofss;
+ srcofs = ((uword) src & 0x0F);
+
+ /**
+ * For aligned copy
+ */
+ if (srcofs == 0)
+ {
+ /**
+ * Copy 256-byte blocks
+ */
+ for (; n >= 256; n -= 256)
+ {
+ clib_mov256 ((u8 *) dst, (const u8 *) src);
+ dst = (u8 *) dst + 256;
+ src = (const u8 *) src + 256;
+ }
+
+ /**
+ * Copy whatever left
+ */
+ goto COPY_BLOCK_255_BACK15;
+ }
+
+ /**
+ * For copy with unaligned load
+ */
+ CLIB_MVUNALIGN_LEFT47 (dst, src, n, srcofs);
+
+ /**
+ * Copy whatever left
+ */
+ goto COPY_BLOCK_64_BACK15;
+}
+
+
+#undef CLIB_MVUNALIGN_LEFT47_IMM
+#undef CLIB_MVUNALIGN_LEFT47
+
+#endif /* included_clib_memcpy_sse3_h */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/mhash.c b/src/vppinfra/mhash.c
new file mode 100644
index 00000000..00b67c49
--- /dev/null
+++ b/src/vppinfra/mhash.c
@@ -0,0 +1,408 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2010 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/mhash.h>
+
+always_inline u32
+load_partial_u32 (void *d, uword n)
+{
+ if (n == 4)
+ return ((u32 *) d)[0];
+ if (n == 3)
+ return ((u16 *) d)[0] | (((u8 *) d)[2] << 16);
+ if (n == 2)
+ return ((u16 *) d)[0];
+ if (n == 1)
+ return ((u8 *) d)[0];
+ ASSERT (0);
+ return 0;
+}
+
+always_inline u32
+mhash_key_sum_inline (void *data, uword n_data_bytes, u32 seed)
+{
+ u32 *d32 = data;
+ u32 a, b, c, n_left;
+
+ a = b = c = seed;
+ n_left = n_data_bytes;
+ a ^= n_data_bytes;
+
+ while (n_left > 12)
+ {
+ a += d32[0];
+ b += d32[1];
+ c += d32[2];
+ hash_v3_mix32 (a, b, c);
+ n_left -= 12;
+ d32 += 3;
+ }
+
+ if (n_left > 8)
+ {
+ c += load_partial_u32 (d32 + 2, n_left - 8);
+ n_left = 8;
+ }
+ if (n_left > 4)
+ {
+ b += load_partial_u32 (d32 + 1, n_left - 4);
+ n_left = 4;
+ }
+ if (n_left > 0)
+ a += load_partial_u32 (d32 + 0, n_left - 0);
+
+ hash_v3_finalize32 (a, b, c);
+
+ return c;
+}
+
+#define foreach_mhash_key_size \
+ _ (2) _ (3) _ (4) _ (5) _ (6) _ (7) \
+ _ (8) _ (12) _ (16) _ (20) \
+ _ (24) _ (28) _ (32) _ (36) \
+ _ (40) _ (44) _ (48) _ (52) \
+ _ (56) _ (60) _ (64)
+
+#define _(N_KEY_BYTES) \
+ static uword \
+ mhash_key_sum_##N_KEY_BYTES (hash_t * h, uword key) \
+ { \
+ mhash_t * hv = uword_to_pointer (h->user, mhash_t *); \
+ return mhash_key_sum_inline (mhash_key_to_mem (hv, key), \
+ (N_KEY_BYTES), \
+ hv->hash_seed); \
+ } \
+ \
+ static uword \
+ mhash_key_equal_##N_KEY_BYTES (hash_t * h, uword key1, uword key2) \
+ { \
+ mhash_t * hv = uword_to_pointer (h->user, mhash_t *); \
+ void * k1 = mhash_key_to_mem (hv, key1); \
+ void * k2 = mhash_key_to_mem (hv, key2); \
+ return ! memcmp (k1, k2, (N_KEY_BYTES)); \
+ }
+
+foreach_mhash_key_size
+#undef _
+static uword
+mhash_key_sum_c_string (hash_t * h, uword key)
+{
+ mhash_t *hv = uword_to_pointer (h->user, mhash_t *);
+ void *k = mhash_key_to_mem (hv, key);
+ return mhash_key_sum_inline (k, strlen (k), hv->hash_seed);
+}
+
+static uword
+mhash_key_equal_c_string (hash_t * h, uword key1, uword key2)
+{
+ mhash_t *hv = uword_to_pointer (h->user, mhash_t *);
+ void *k1 = mhash_key_to_mem (hv, key1);
+ void *k2 = mhash_key_to_mem (hv, key2);
+ return strcmp (k1, k2) == 0;
+}
+
+static uword
+mhash_key_sum_vec_string (hash_t * h, uword key)
+{
+ mhash_t *hv = uword_to_pointer (h->user, mhash_t *);
+ void *k = mhash_key_to_mem (hv, key);
+ return mhash_key_sum_inline (k, vec_len (k), hv->hash_seed);
+}
+
+static uword
+mhash_key_equal_vec_string (hash_t * h, uword key1, uword key2)
+{
+ mhash_t *hv = uword_to_pointer (h->user, mhash_t *);
+ void *k1 = mhash_key_to_mem (hv, key1);
+ void *k2 = mhash_key_to_mem (hv, key2);
+ return vec_len (k1) == vec_len (k2) && memcmp (k1, k2, vec_len (k1)) == 0;
+}
+
+/* The CLIB hash user pointer must always point to a valid mhash_t.
+ Now, the address of mhash_t can change (think vec_resize).
+ So we must always be careful that it points to the correct
+ address. */
+always_inline void
+mhash_sanitize_hash_user (mhash_t * mh)
+{
+ uword *hash = mh->hash;
+ hash_t *h = hash_header (hash);
+ h->user = pointer_to_uword (mh);
+}
+
+void
+mhash_init (mhash_t * h, uword n_value_bytes, uword n_key_bytes)
+{
+ static struct
+ {
+ hash_key_sum_function_t *key_sum;
+ hash_key_equal_function_t *key_equal;
+ } t[] =
+ {
+#define _(N_KEY_BYTES) \
+ [N_KEY_BYTES] = { \
+ .key_sum = mhash_key_sum_##N_KEY_BYTES, \
+ .key_equal = mhash_key_equal_##N_KEY_BYTES, \
+ },
+
+ foreach_mhash_key_size
+#undef _
+ [MHASH_C_STRING_KEY] =
+ {
+ .key_sum = mhash_key_sum_c_string,.key_equal = mhash_key_equal_c_string,},
+ [MHASH_VEC_STRING_KEY] =
+ {
+ .key_sum = mhash_key_sum_vec_string,.key_equal =
+ mhash_key_equal_vec_string,},};
+
+ if (mhash_key_vector_is_heap (h))
+ heap_free (h->key_vector_or_heap);
+ else
+ vec_free (h->key_vector_or_heap);
+ vec_free (h->key_vector_free_indices);
+ {
+ int i;
+ for (i = 0; i < vec_len (h->key_tmps); i++)
+ vec_free (h->key_tmps[i]);
+ }
+ vec_free (h->key_tmps);
+ hash_free (h->hash);
+
+ memset (h, 0, sizeof (h[0]));
+ h->n_key_bytes = n_key_bytes;
+
+#if 0
+ if (h->n_key_bytes > 0)
+ {
+ vec_validate (h->key_tmp, h->n_key_bytes - 1);
+ _vec_len (h->key_tmp) = 0;
+ }
+#endif
+
+ ASSERT (n_key_bytes < ARRAY_LEN (t));
+ h->hash = hash_create2 ( /* elts */ 0,
+ /* user */ pointer_to_uword (h),
+ /* value_bytes */ n_value_bytes,
+ t[n_key_bytes].key_sum, t[n_key_bytes].key_equal,
+ /* format pair/arg */
+ 0, 0);
+}
+
+static uword
+mhash_set_tmp_key (mhash_t * h, const void *key)
+{
+ u8 *key_tmp;
+ int my_cpu = os_get_thread_index ();
+
+ vec_validate (h->key_tmps, my_cpu);
+ key_tmp = h->key_tmps[my_cpu];
+
+ vec_reset_length (key_tmp);
+
+ if (mhash_key_vector_is_heap (h))
+ {
+ uword is_c_string = h->n_key_bytes == MHASH_C_STRING_KEY;
+
+ if (is_c_string)
+ vec_add (key_tmp, key, strlen (key) + 1);
+ else
+ vec_add (key_tmp, key, vec_len (key));
+ }
+ else
+ vec_add (key_tmp, key, h->n_key_bytes);
+
+ h->key_tmps[my_cpu] = key_tmp;
+
+ return ~0;
+}
+
+hash_pair_t *
+mhash_get_pair (mhash_t * h, const void *key)
+{
+ uword ikey;
+ mhash_sanitize_hash_user (h);
+ ikey = mhash_set_tmp_key (h, key);
+ return hash_get_pair (h->hash, ikey);
+}
+
+typedef struct
+{
+ u32 heap_handle;
+
+ /* Must conincide with vec_header. */
+ vec_header_t vec;
+} mhash_string_key_t;
+
+uword
+mhash_set_mem (mhash_t * h, void *key, uword * new_value, uword * old_value)
+{
+ u8 *k;
+ uword ikey, i, l = 0, n_key_bytes, old_n_elts, key_alloc_from_free_list = 0;
+
+ mhash_sanitize_hash_user (h);
+
+ if (mhash_key_vector_is_heap (h))
+ {
+ mhash_string_key_t *sk;
+ uword is_c_string = h->n_key_bytes == MHASH_C_STRING_KEY;
+ uword handle;
+
+ n_key_bytes = is_c_string ? (strlen (key) + 1) : vec_len (key);
+ i =
+ heap_alloc (h->key_vector_or_heap, n_key_bytes + sizeof (sk[0]),
+ handle);
+
+ sk = (void *) (h->key_vector_or_heap + i);
+ sk->heap_handle = handle;
+ sk->vec.len = n_key_bytes;
+ clib_memcpy (sk->vec.vector_data, key, n_key_bytes);
+
+ /* Advance key past vector header. */
+ i += sizeof (sk[0]);
+ }
+ else
+ {
+ key_alloc_from_free_list = (l =
+ vec_len (h->key_vector_free_indices)) > 0;
+ if (key_alloc_from_free_list)
+ {
+ i = h->key_vector_free_indices[l - 1];
+ k = vec_elt_at_index (h->key_vector_or_heap, i);
+ _vec_len (h->key_vector_free_indices) = l - 1;
+ }
+ else
+ {
+ vec_add2 (h->key_vector_or_heap, k, h->n_key_bytes);
+ i = k - h->key_vector_or_heap;
+ }
+
+ n_key_bytes = h->n_key_bytes;
+ clib_memcpy (k, key, n_key_bytes);
+ }
+ ikey = i;
+
+ old_n_elts = hash_elts (h->hash);
+ h->hash = _hash_set3 (h->hash, ikey, new_value, old_value);
+
+ /* If element already existed remove duplicate key. */
+ if (hash_elts (h->hash) == old_n_elts)
+ {
+ hash_pair_t *p;
+
+ /* Fetch old key for return value. */
+ p = hash_get_pair (h->hash, ikey);
+ ikey = p->key;
+
+ /* Remove duplicate key. */
+ if (mhash_key_vector_is_heap (h))
+ {
+ mhash_string_key_t *sk;
+ sk = (void *) (h->key_vector_or_heap + i - sizeof (sk[0]));
+ heap_dealloc (h->key_vector_or_heap, sk->heap_handle);
+ }
+ else
+ {
+ if (key_alloc_from_free_list)
+ {
+ h->key_vector_free_indices[l] = i;
+ _vec_len (h->key_vector_free_indices) = l + 1;
+ }
+ else
+ _vec_len (h->key_vector_or_heap) -= h->n_key_bytes;
+ }
+ }
+
+ return ikey;
+}
+
+uword
+mhash_unset (mhash_t * h, void *key, uword * old_value)
+{
+ hash_pair_t *p;
+ uword i;
+
+ mhash_sanitize_hash_user (h);
+ i = mhash_set_tmp_key (h, key);
+
+ p = hash_get_pair (h->hash, i);
+ if (!p)
+ return 0;
+
+ ASSERT (p->key != ~0);
+ i = p->key;
+
+ if (mhash_key_vector_is_heap (h))
+ {
+ mhash_string_key_t *sk;
+ sk = (void *) (h->key_vector_or_heap + i) - sizeof (sk[0]);
+ heap_dealloc (h->key_vector_or_heap, sk->heap_handle);
+ }
+ else
+ vec_add1 (h->key_vector_free_indices, i);
+
+ hash_unset3 (h->hash, i, old_value);
+ return 1;
+}
+
+u8 *
+format_mhash_key (u8 * s, va_list * va)
+{
+ mhash_t *h = va_arg (*va, mhash_t *);
+ u32 ki = va_arg (*va, u32);
+ void *k = mhash_key_to_mem (h, ki);
+
+ if (mhash_key_vector_is_heap (h))
+ {
+ uword is_c_string = h->n_key_bytes == MHASH_C_STRING_KEY;
+ u32 l = is_c_string ? strlen (k) : vec_len (k);
+ vec_add (s, k, l);
+ }
+ else if (h->format_key)
+ s = format (s, "%U", h->format_key, k);
+ else
+ s = format (s, "%U", format_hex_bytes, k, h->n_key_bytes);
+
+ return s;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/mhash.h b/src/vppinfra/mhash.h
new file mode 100644
index 00000000..7eb19183
--- /dev/null
+++ b/src/vppinfra/mhash.h
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef included_clib_mhash_h
+#define included_clib_mhash_h
+
+/*
+ Copyright (c) 2010 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/format.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/heap.h>
+
+/* Hash table plus vector of keys. */
+typedef struct
+{
+ /* Vector or heap used to store keys. Hash table stores keys as byte
+ offsets into this vector. */
+ u8 *key_vector_or_heap;
+
+ /* Byte offsets of free keys in vector (used to store free keys when
+ n_key_bytes > 1). */
+ u32 *key_vector_free_indices;
+
+ u8 **key_tmps;
+
+ /* Possibly fixed size of key.
+ 0 means keys are vectors of u8's.
+ 1 means keys are null terminated c strings. */
+#define MHASH_VEC_STRING_KEY 0
+#define MHASH_C_STRING_KEY 1
+ u32 n_key_bytes;
+
+ /* Seed value for Jenkins hash. */
+ u32 hash_seed;
+
+ /* Hash table mapping key -> value. */
+ uword *hash;
+
+ /* Format function for keys. */
+ format_function_t *format_key;
+} mhash_t;
+
+void mhash_init (mhash_t * h, uword n_value_bytes, uword n_key_bytes);
+
+always_inline void
+mhash_init_c_string (mhash_t * h, uword n_value_bytes)
+{
+ mhash_init (h, n_value_bytes, MHASH_C_STRING_KEY);
+}
+
+always_inline void
+mhash_init_vec_string (mhash_t * h, uword n_value_bytes)
+{
+ mhash_init (h, n_value_bytes, MHASH_VEC_STRING_KEY);
+}
+
+always_inline void *
+mhash_key_to_mem (mhash_t * h, uword key)
+{
+ if (key == ~0)
+ {
+ u8 *key_tmp;
+
+ int my_cpu = os_get_thread_index ();
+ vec_validate (h->key_tmps, my_cpu);
+ key_tmp = h->key_tmps[my_cpu];
+ return key_tmp;
+ }
+ return vec_elt_at_index (h->key_vector_or_heap, key);
+}
+
+hash_pair_t *mhash_get_pair (mhash_t * h, const void *key);
+uword mhash_set_mem (mhash_t * h, void *key, uword * new_value,
+ uword * old_value);
+uword mhash_unset (mhash_t * h, void *key, uword * old_value);
+
+always_inline uword *
+mhash_get (mhash_t * h, const void *key)
+{
+ hash_pair_t *p = mhash_get_pair (h, key);
+ return p ? &p->value[0] : 0;
+}
+
+always_inline uword
+mhash_set (mhash_t * h, void *key, uword new_value, uword * old_value)
+{
+ return mhash_set_mem (h, key, &new_value, old_value);
+}
+
+always_inline uword
+mhash_unset_key (mhash_t * h, uword key, uword * old_value)
+{
+ void *k = mhash_key_to_mem (h, key);
+ return mhash_unset (h, k, old_value);
+}
+
+always_inline uword
+mhash_value_bytes (mhash_t * m)
+{
+ hash_t *h = hash_header (m->hash);
+ return hash_value_bytes (h);
+}
+
+always_inline uword
+mhash_elts (mhash_t * m)
+{
+ return hash_elts (m->hash);
+}
+
+always_inline uword
+mhash_key_vector_is_heap (mhash_t * h)
+{
+ return h->n_key_bytes <= 1;
+}
+
+always_inline void
+mhash_free (mhash_t * h)
+{
+ if (mhash_key_vector_is_heap (h))
+ heap_free (h->key_vector_or_heap);
+ else
+ vec_free (h->key_vector_or_heap);
+ vec_free (h->key_vector_free_indices);
+ hash_free (h->hash);
+}
+
+#define mhash_foreach(k,v,mh,body) \
+do { \
+ hash_pair_t * _mhash_foreach_p; \
+ hash_foreach_pair (_mhash_foreach_p, (mh)->hash, ({ \
+ (k) = mhash_key_to_mem ((mh), _mhash_foreach_p->key); \
+ (v) = &_mhash_foreach_p->value[0]; \
+ body; \
+ })); \
+} while (0)
+
+format_function_t format_mhash_key;
+
+#endif /* included_clib_mhash_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/mheap.c b/src/vppinfra/mheap.c
new file mode 100644
index 00000000..5bbbc65f
--- /dev/null
+++ b/src/vppinfra/mheap.c
@@ -0,0 +1,1643 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/bitops.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/format.h>
+#include <vppinfra/mheap.h>
+#include <vppinfra/os.h>
+#include <vppinfra/time.h>
+
+#ifdef CLIB_UNIX
+#include <vppinfra/elf_clib.h>
+#endif
+
+static void mheap_get_trace (void *v, uword offset, uword size);
+static void mheap_put_trace (void *v, uword offset, uword size);
+static int mheap_trace_sort (const void *t1, const void *t2);
+
+always_inline void
+mheap_maybe_lock (void *v)
+{
+ mheap_t *h = mheap_header (v);
+ if (v && (h->flags & MHEAP_FLAG_THREAD_SAFE))
+ {
+ u32 my_cpu = os_get_thread_index ();
+ if (h->owner_cpu == my_cpu)
+ {
+ h->recursion_count++;
+ return;
+ }
+
+ while (__sync_lock_test_and_set (&h->lock, 1))
+ ;
+
+ h->owner_cpu = my_cpu;
+ h->recursion_count = 1;
+ }
+}
+
+always_inline void
+mheap_maybe_unlock (void *v)
+{
+ mheap_t *h = mheap_header (v);
+ if (v && h->flags & MHEAP_FLAG_THREAD_SAFE)
+ {
+ ASSERT (os_get_thread_index () == h->owner_cpu);
+ if (--h->recursion_count == 0)
+ {
+ h->owner_cpu = ~0;
+ CLIB_MEMORY_BARRIER ();
+ h->lock = 0;
+ }
+ }
+}
+
+/* Find bin for objects with size at least n_user_data_bytes. */
+always_inline uword
+user_data_size_to_bin_index (uword n_user_data_bytes)
+{
+ uword n_user_data_words;
+ word small_bin, large_bin;
+
+ /* User size must be at least big enough to hold free elt. */
+ n_user_data_bytes = clib_max (n_user_data_bytes, MHEAP_MIN_USER_DATA_BYTES);
+
+ /* Round to words. */
+ n_user_data_words =
+ (round_pow2 (n_user_data_bytes, MHEAP_USER_DATA_WORD_BYTES) /
+ MHEAP_USER_DATA_WORD_BYTES);
+
+ ASSERT (n_user_data_words > 0);
+ small_bin =
+ n_user_data_words -
+ (MHEAP_MIN_USER_DATA_BYTES / MHEAP_USER_DATA_WORD_BYTES);
+ ASSERT (small_bin >= 0);
+
+ large_bin =
+ MHEAP_N_SMALL_OBJECT_BINS + max_log2 (n_user_data_bytes) -
+ MHEAP_LOG2_N_SMALL_OBJECT_BINS;
+
+ return small_bin < MHEAP_N_SMALL_OBJECT_BINS ? small_bin : large_bin;
+}
+
+always_inline uword
+mheap_elt_size_to_user_n_bytes (uword n_bytes)
+{
+ ASSERT (n_bytes >= sizeof (mheap_elt_t));
+ return (n_bytes - STRUCT_OFFSET_OF (mheap_elt_t, user_data));
+}
+
+always_inline uword __attribute__ ((unused))
+mheap_elt_size_to_user_n_words (uword n_bytes)
+{
+ ASSERT (n_bytes % MHEAP_USER_DATA_WORD_BYTES == 0);
+ return mheap_elt_size_to_user_n_bytes (n_bytes) /
+ MHEAP_USER_DATA_WORD_BYTES;
+}
+
+always_inline void
+mheap_elt_set_size (void *v,
+ uword uoffset, uword n_user_data_bytes, uword is_free)
+{
+ mheap_elt_t *e, *n;
+
+ e = mheap_elt_at_uoffset (v, uoffset);
+
+ ASSERT (n_user_data_bytes % MHEAP_USER_DATA_WORD_BYTES == 0);
+
+ e->n_user_data = n_user_data_bytes / MHEAP_USER_DATA_WORD_BYTES;
+ e->is_free = is_free;
+ ASSERT (e->prev_n_user_data * sizeof (e->user_data[0]) >=
+ MHEAP_MIN_USER_DATA_BYTES);
+
+ n = mheap_next_elt (e);
+ n->prev_n_user_data = e->n_user_data;
+ n->prev_is_free = is_free;
+}
+
+always_inline void
+set_first_free_elt_offset (mheap_t * h, uword bin, uword uoffset)
+{
+ uword i0, i1;
+
+ h->first_free_elt_uoffset_by_bin[bin] = uoffset;
+
+ i0 = bin / BITS (h->non_empty_free_elt_heads[0]);
+ i1 = (uword) 1 << (uword) (bin % BITS (h->non_empty_free_elt_heads[0]));
+
+ ASSERT (i0 < ARRAY_LEN (h->non_empty_free_elt_heads));
+ if (h->first_free_elt_uoffset_by_bin[bin] == MHEAP_GROUNDED)
+ h->non_empty_free_elt_heads[i0] &= ~i1;
+ else
+ h->non_empty_free_elt_heads[i0] |= i1;
+}
+
+always_inline void
+set_free_elt (void *v, uword uoffset, uword n_user_data_bytes)
+{
+ mheap_t *h = mheap_header (v);
+ mheap_elt_t *e = mheap_elt_at_uoffset (v, uoffset);
+ mheap_elt_t *n = mheap_next_elt (e);
+ uword bin = user_data_size_to_bin_index (n_user_data_bytes);
+
+ ASSERT (n->prev_is_free);
+ ASSERT (e->is_free);
+
+ e->free_elt.prev_uoffset = MHEAP_GROUNDED;
+ e->free_elt.next_uoffset = h->first_free_elt_uoffset_by_bin[bin];
+
+ /* Fill in next free elt's previous pointer. */
+ if (e->free_elt.next_uoffset != MHEAP_GROUNDED)
+ {
+ mheap_elt_t *nf = mheap_elt_at_uoffset (v, e->free_elt.next_uoffset);
+ ASSERT (nf->is_free);
+ nf->free_elt.prev_uoffset = uoffset;
+ }
+
+ set_first_free_elt_offset (h, bin, uoffset);
+}
+
+always_inline void
+new_free_elt (void *v, uword uoffset, uword n_user_data_bytes)
+{
+ mheap_elt_set_size (v, uoffset, n_user_data_bytes, /* is_free */ 1);
+ set_free_elt (v, uoffset, n_user_data_bytes);
+}
+
+always_inline void
+remove_free_elt (void *v, mheap_elt_t * e, uword bin)
+{
+ mheap_t *h = mheap_header (v);
+ mheap_elt_t *p, *n;
+#if CLIB_VEC64 > 0
+ u64 no, po;
+#else
+ u32 no, po;
+#endif
+
+ no = e->free_elt.next_uoffset;
+
+ n = no != MHEAP_GROUNDED ? mheap_elt_at_uoffset (v, no) : 0;
+ po = e->free_elt.prev_uoffset;
+ p = po != MHEAP_GROUNDED ? mheap_elt_at_uoffset (v, po) : 0;
+
+ if (!p)
+ set_first_free_elt_offset (h, bin, no);
+ else
+ p->free_elt.next_uoffset = no;
+
+ if (n)
+ n->free_elt.prev_uoffset = po;
+}
+
+always_inline void
+remove_free_elt2 (void *v, mheap_elt_t * e)
+{
+ uword bin;
+ bin = user_data_size_to_bin_index (mheap_elt_data_bytes (e));
+ remove_free_elt (v, e, bin);
+}
+
+#define MHEAP_VM_MAP (1 << 0)
+#define MHEAP_VM_UNMAP (1 << 1)
+#define MHEAP_VM_NOMAP (0 << 1)
+#define MHEAP_VM_ROUND (1 << 2)
+#define MHEAP_VM_ROUND_UP MHEAP_VM_ROUND
+#define MHEAP_VM_ROUND_DOWN (0 << 2)
+
+static uword mheap_page_size;
+
+static_always_inline uword
+mheap_page_round (uword addr)
+{
+ return (addr + mheap_page_size - 1) & ~(mheap_page_size - 1);
+}
+
+static_always_inline uword
+mheap_page_truncate (uword addr)
+{
+ return addr & ~(mheap_page_size - 1);
+}
+
+static_always_inline uword
+mheap_vm (void *v, uword flags, clib_address_t start_addr, uword size)
+{
+ mheap_t *h = mheap_header (v);
+ clib_address_t start_page, end_page, end_addr;
+ uword mapped_bytes;
+
+ ASSERT (!(h->flags & MHEAP_FLAG_DISABLE_VM));
+
+ end_addr = start_addr + size;
+
+ /* Round start/end address up to page boundary. */
+ start_page = mheap_page_round (start_addr);
+
+ if ((flags & MHEAP_VM_ROUND) == MHEAP_VM_ROUND_UP)
+ end_page = mheap_page_round (end_addr);
+ else
+ end_page = mheap_page_truncate (end_addr);
+
+ mapped_bytes = 0;
+ if (end_page > start_page)
+ {
+ mapped_bytes = end_page - start_page;
+ if (flags & MHEAP_VM_MAP)
+ clib_mem_vm_map ((void *) start_page, end_page - start_page);
+ else if (flags & MHEAP_VM_UNMAP)
+ clib_mem_vm_unmap ((void *) start_page, end_page - start_page);
+ }
+
+ return mapped_bytes;
+}
+
+static_always_inline uword
+mheap_vm_elt (void *v, uword flags, uword offset)
+{
+ mheap_elt_t *e;
+ clib_address_t start_addr, end_addr;
+
+ e = mheap_elt_at_uoffset (v, offset);
+ start_addr = (clib_address_t) ((void *) e->user_data);
+ end_addr = (clib_address_t) mheap_next_elt (e);
+ return mheap_vm (v, flags, start_addr, end_addr - start_addr);
+}
+
+always_inline uword
+mheap_small_object_cache_mask (mheap_small_object_cache_t * c, uword bin)
+{
+ uword mask;
+
+/* $$$$ ELIOT FIXME: add Altivec version of this routine */
+#if !defined (CLIB_HAVE_VEC128) || defined (__ALTIVEC__) || defined (__i386__)
+ mask = 0;
+#else
+ u8x16 b = u8x16_splat (bin);
+
+ ASSERT (bin < 256);
+
+#define _(i) ((uword) u8x16_compare_byte_mask (u8x16_is_equal (b, c->bins.as_u8x16[i])) << (uword) ((i)*16))
+ mask = _(0) | _(1);
+ if (BITS (uword) > 32)
+ mask |= _(2) | _(3);
+#undef _
+
+#endif
+ return mask;
+}
+
+always_inline uword
+mheap_get_small_object (mheap_t * h, uword bin)
+{
+ mheap_small_object_cache_t *c = &h->small_object_cache;
+ uword mask = mheap_small_object_cache_mask (c, bin + 1);
+ uword offset = MHEAP_GROUNDED;
+
+ if (mask)
+ {
+ uword i = min_log2 (mask);
+ uword o = c->offsets[i];
+ ASSERT (o != MHEAP_GROUNDED);
+ c->bins.as_u8[i] = 0;
+ offset = o;
+ }
+
+ return offset;
+}
+
+always_inline uword
+mheap_put_small_object (mheap_t * h, uword bin, uword offset)
+{
+ mheap_small_object_cache_t *c = &h->small_object_cache;
+ uword free_mask = mheap_small_object_cache_mask (c, 0);
+ uword b = bin + 1;
+ uword i;
+
+ if (free_mask != 0)
+ {
+ i = min_log2 (free_mask);
+ c->bins.as_u8[i] = b;
+ c->offsets[i] = offset;
+ return 0;
+ }
+ else
+ /* Nothing free with right size: cyclic replacement. */
+ {
+ uword old_offset;
+
+ i = c->replacement_index++;
+ i %= BITS (uword);
+ c->bins.as_u8[i] = b;
+ old_offset = c->offsets[i];
+ c->offsets[i] = offset;
+
+ /* Return old offset so it can be freed. */
+ return old_offset;
+ }
+}
+
+static uword
+mheap_get_search_free_bin (void *v,
+ uword bin,
+ uword * n_user_data_bytes_arg,
+ uword align, uword align_offset)
+{
+ mheap_t *h = mheap_header (v);
+ mheap_elt_t *e;
+
+ /* Free object is at offset f0 ... f1;
+ Allocatted object is at offset o0 ... o1. */
+ word o0, o1, f0, f1, search_n_user_data_bytes;
+ word lo_free_usize, hi_free_usize;
+
+ ASSERT (h->first_free_elt_uoffset_by_bin[bin] != MHEAP_GROUNDED);
+ e = mheap_elt_at_uoffset (v, h->first_free_elt_uoffset_by_bin[bin]);
+
+ search_n_user_data_bytes = *n_user_data_bytes_arg;
+
+ /* Silence compiler warning. */
+ o0 = o1 = f0 = f1 = 0;
+
+ h->stats.free_list.n_search_attempts += 1;
+
+ /* Find an object that is large enough with correct alignment at given alignment offset. */
+ while (1)
+ {
+ uword this_object_n_user_data_bytes = mheap_elt_data_bytes (e);
+
+ ASSERT (e->is_free);
+ if (bin < MHEAP_N_SMALL_OBJECT_BINS)
+ ASSERT (this_object_n_user_data_bytes >= search_n_user_data_bytes);
+
+ h->stats.free_list.n_objects_searched += 1;
+
+ if (this_object_n_user_data_bytes < search_n_user_data_bytes)
+ goto next;
+
+ /* Bounds of free object: from f0 to f1. */
+ f0 = ((void *) e->user_data - v);
+ f1 = f0 + this_object_n_user_data_bytes;
+
+ /* Place candidate object at end of free block and align as requested. */
+ o0 = ((f1 - search_n_user_data_bytes) & ~(align - 1)) - align_offset;
+ while (o0 < f0)
+ o0 += align;
+
+ /* Make sure that first free fragment is either empty or
+ large enough to be valid. */
+ while (1)
+ {
+ lo_free_usize = o0 != f0 ? o0 - f0 - MHEAP_ELT_OVERHEAD_BYTES : 0;
+ if (o0 <= f0 || lo_free_usize >= (word) MHEAP_MIN_USER_DATA_BYTES)
+ break;
+ o0 -= align;
+ }
+
+ o1 = o0 + search_n_user_data_bytes;
+
+ /* Does it fit? */
+ if (o0 >= f0 && o1 <= f1)
+ goto found;
+
+ next:
+ /* Reached end of free list without finding large enough object. */
+ if (e->free_elt.next_uoffset == MHEAP_GROUNDED)
+ return MHEAP_GROUNDED;
+
+ /* Otherwise keep searching for large enough object. */
+ e = mheap_elt_at_uoffset (v, e->free_elt.next_uoffset);
+ }
+
+found:
+ /* Free fragment at end. */
+ hi_free_usize = f1 != o1 ? f1 - o1 - MHEAP_ELT_OVERHEAD_BYTES : 0;
+
+ /* If fragment at end is too small to be a new object,
+ give user's object a bit more space than requested. */
+ if (hi_free_usize < (word) MHEAP_MIN_USER_DATA_BYTES)
+ {
+ search_n_user_data_bytes += f1 - o1;
+ o1 = f1;
+ hi_free_usize = 0;
+ }
+
+ /* Need to make sure that relevant memory areas are mapped. */
+ if (!(h->flags & MHEAP_FLAG_DISABLE_VM))
+ {
+ mheap_elt_t *f0_elt = mheap_elt_at_uoffset (v, f0);
+ mheap_elt_t *f1_elt = mheap_elt_at_uoffset (v, f1);
+ mheap_elt_t *o0_elt = mheap_elt_at_uoffset (v, o0);
+ mheap_elt_t *o1_elt = mheap_elt_at_uoffset (v, o1);
+
+ uword f0_page_start, f0_page_end;
+ uword o0_page_start, o0_page_end;
+
+ /* Free elt is mapped. Addresses after that may not be mapped. */
+ f0_page_start = mheap_page_round (pointer_to_uword (f0_elt->user_data));
+ f0_page_end = mheap_page_truncate (pointer_to_uword (f1_elt));
+
+ o0_page_start = mheap_page_truncate (pointer_to_uword (o0_elt));
+ o0_page_end = mheap_page_round (pointer_to_uword (o1_elt->user_data));
+
+ if (o0_page_start < f0_page_start)
+ o0_page_start = f0_page_start;
+ if (o0_page_end > f0_page_end)
+ o0_page_end = f0_page_end;
+
+ if (o0_page_end > o0_page_start)
+ clib_mem_vm_map (uword_to_pointer (o0_page_start, void *),
+ o0_page_end - o0_page_start);
+ }
+
+ /* Remove free object from free list. */
+ remove_free_elt (v, e, bin);
+
+ /* Free fragment at begining. */
+ if (lo_free_usize > 0)
+ {
+ ASSERT (lo_free_usize >= (word) MHEAP_MIN_USER_DATA_BYTES);
+ mheap_elt_set_size (v, f0, lo_free_usize, /* is_free */ 1);
+ new_free_elt (v, f0, lo_free_usize);
+ }
+
+ mheap_elt_set_size (v, o0, search_n_user_data_bytes, /* is_free */ 0);
+
+ if (hi_free_usize > 0)
+ {
+ uword uo = o1 + MHEAP_ELT_OVERHEAD_BYTES;
+ mheap_elt_set_size (v, uo, hi_free_usize, /* is_free */ 1);
+ new_free_elt (v, uo, hi_free_usize);
+ }
+
+ /* Return actual size of block. */
+ *n_user_data_bytes_arg = search_n_user_data_bytes;
+
+ h->stats.free_list.n_objects_found += 1;
+
+ return o0;
+}
+
+/* Search free lists for object with given size and alignment. */
+static uword
+mheap_get_search_free_list (void *v,
+ uword * n_user_bytes_arg,
+ uword align, uword align_offset)
+{
+ mheap_t *h = mheap_header (v);
+ uword bin, n_user_bytes, i, bi;
+
+ n_user_bytes = *n_user_bytes_arg;
+ bin = user_data_size_to_bin_index (n_user_bytes);
+
+ if (MHEAP_HAVE_SMALL_OBJECT_CACHE
+ && (h->flags & MHEAP_FLAG_SMALL_OBJECT_CACHE)
+ && bin < 255
+ && align == STRUCT_SIZE_OF (mheap_elt_t, user_data[0])
+ && align_offset == 0)
+ {
+ uword r = mheap_get_small_object (h, bin);
+ h->stats.n_small_object_cache_attempts += 1;
+ if (r != MHEAP_GROUNDED)
+ {
+ h->stats.n_small_object_cache_hits += 1;
+ return r;
+ }
+ }
+
+ for (i = bin / BITS (uword); i < ARRAY_LEN (h->non_empty_free_elt_heads);
+ i++)
+ {
+ uword non_empty_bin_mask = h->non_empty_free_elt_heads[i];
+
+ /* No need to search smaller bins. */
+ if (i == bin / BITS (uword))
+ non_empty_bin_mask &= ~pow2_mask (bin % BITS (uword));
+
+ /* Search each occupied free bin which is large enough. */
+ /* *INDENT-OFF* */
+ foreach_set_bit (bi, non_empty_bin_mask,
+ ({
+ uword r =
+ mheap_get_search_free_bin (v, bi + i * BITS (uword),
+ n_user_bytes_arg,
+ align,
+ align_offset);
+ if (r != MHEAP_GROUNDED) return r;
+ }));
+ /* *INDENT-ON* */
+ }
+
+ return MHEAP_GROUNDED;
+}
+
+static never_inline void *
+mheap_get_extend_vector (void *v,
+ uword n_user_data_bytes,
+ uword align,
+ uword align_offset, uword * offset_return)
+{
+ /* Bounds of free and allocated objects (as above). */
+ uword f0, f1, o0, o1;
+ word free_size;
+ mheap_t *h = mheap_header (v);
+ mheap_elt_t *e;
+
+ if (_vec_len (v) == 0)
+ {
+ _vec_len (v) = MHEAP_ELT_OVERHEAD_BYTES;
+
+ /* Create first element of heap. */
+ e = mheap_elt_at_uoffset (v, _vec_len (v));
+ e->prev_n_user_data = MHEAP_N_USER_DATA_INVALID;
+ }
+
+ f0 = _vec_len (v);
+
+ o0 = round_pow2 (f0, align) - align_offset;
+ while (1)
+ {
+ free_size = o0 - f0 - MHEAP_ELT_OVERHEAD_BYTES;
+ if (o0 == f0 || free_size >= (word) sizeof (mheap_elt_t))
+ break;
+
+ o0 += align;
+ }
+
+ o1 = o0 + n_user_data_bytes;
+ f1 = o1 + MHEAP_ELT_OVERHEAD_BYTES;
+
+ ASSERT (v != 0);
+ h = mheap_header (v);
+
+ /* Make sure we have space for object plus overhead. */
+ if (f1 > h->max_size)
+ {
+ *offset_return = MHEAP_GROUNDED;
+ return v;
+ }
+
+ _vec_len (v) = f1;
+
+ if (!(h->flags & MHEAP_FLAG_DISABLE_VM))
+ {
+ mheap_elt_t *f0_elt = mheap_elt_at_uoffset (v, f0);
+ mheap_elt_t *f1_elt = mheap_elt_at_uoffset (v, f1);
+
+ uword f0_page = mheap_page_round (pointer_to_uword (f0_elt->user_data));
+ uword f1_page = mheap_page_round (pointer_to_uword (f1_elt->user_data));
+
+ if (f1_page > f0_page)
+ mheap_vm (v, MHEAP_VM_MAP, f0_page, f1_page - f0_page);
+ }
+
+ if (free_size > 0)
+ new_free_elt (v, f0, free_size);
+
+ mheap_elt_set_size (v, o0, n_user_data_bytes, /* is_free */ 0);
+
+ /* Mark last element. */
+ e = mheap_elt_at_uoffset (v, f1);
+ e->n_user_data = MHEAP_N_USER_DATA_INVALID;
+
+ *offset_return = o0;
+
+ return v;
+}
+
+void *
+mheap_get_aligned (void *v,
+ uword n_user_data_bytes,
+ uword align, uword align_offset, uword * offset_return)
+{
+ mheap_t *h;
+ uword offset;
+ u64 cpu_times[2];
+
+ cpu_times[0] = clib_cpu_time_now ();
+
+ align = clib_max (align, STRUCT_SIZE_OF (mheap_elt_t, user_data[0]));
+ align = max_pow2 (align);
+
+ /* Correct align offset to be smaller than alignment. */
+ align_offset &= (align - 1);
+
+ /* Align offset must be multiple of minimum object size. */
+ if (align_offset % STRUCT_SIZE_OF (mheap_elt_t, user_data[0]) != 0)
+ {
+ *offset_return = MHEAP_GROUNDED;
+ return v;
+ }
+
+ /* Round requested size. */
+ n_user_data_bytes = clib_max (n_user_data_bytes, MHEAP_MIN_USER_DATA_BYTES);
+ n_user_data_bytes =
+ round_pow2 (n_user_data_bytes,
+ STRUCT_SIZE_OF (mheap_elt_t, user_data[0]));
+
+ if (!v)
+ v = mheap_alloc (0, 64 << 20);
+
+ mheap_maybe_lock (v);
+
+ h = mheap_header (v);
+
+ if (h->flags & MHEAP_FLAG_VALIDATE)
+ mheap_validate (v);
+
+ /* First search free lists for object. */
+ offset =
+ mheap_get_search_free_list (v, &n_user_data_bytes, align, align_offset);
+
+ h = mheap_header (v);
+
+ /* If that fails allocate object at end of heap by extending vector. */
+ if (offset == MHEAP_GROUNDED && _vec_len (v) < h->max_size)
+ {
+ v =
+ mheap_get_extend_vector (v, n_user_data_bytes, align, align_offset,
+ &offset);
+ h = mheap_header (v);
+ h->stats.n_vector_expands += offset != MHEAP_GROUNDED;
+ }
+
+ *offset_return = offset;
+ if (offset != MHEAP_GROUNDED)
+ {
+ h->n_elts += 1;
+
+ if (h->flags & MHEAP_FLAG_TRACE)
+ {
+ /* Recursion block for case when we are traceing main clib heap. */
+ h->flags &= ~MHEAP_FLAG_TRACE;
+
+ mheap_get_trace (v, offset, n_user_data_bytes);
+
+ h->flags |= MHEAP_FLAG_TRACE;
+ }
+ }
+
+ if (h->flags & MHEAP_FLAG_VALIDATE)
+ mheap_validate (v);
+
+ mheap_maybe_unlock (v);
+
+ cpu_times[1] = clib_cpu_time_now ();
+ h->stats.n_clocks_get += cpu_times[1] - cpu_times[0];
+ h->stats.n_gets += 1;
+
+ return v;
+}
+
+static void
+free_last_elt (void *v, mheap_elt_t * e)
+{
+ mheap_t *h = mheap_header (v);
+
+ /* Possibly delete preceeding free element also. */
+ if (e->prev_is_free)
+ {
+ e = mheap_prev_elt (e);
+ remove_free_elt2 (v, e);
+ }
+
+ if (e->prev_n_user_data == MHEAP_N_USER_DATA_INVALID)
+ {
+ if (!(h->flags & MHEAP_FLAG_DISABLE_VM))
+ mheap_vm_elt (v, MHEAP_VM_UNMAP, mheap_elt_uoffset (v, e));
+ _vec_len (v) = 0;
+ }
+ else
+ {
+ uword uo = mheap_elt_uoffset (v, e);
+ if (!(h->flags & MHEAP_FLAG_DISABLE_VM))
+ mheap_vm_elt (v, MHEAP_VM_UNMAP, uo);
+ e->n_user_data = MHEAP_N_USER_DATA_INVALID;
+ _vec_len (v) = uo;
+ }
+}
+
+void
+mheap_put (void *v, uword uoffset)
+{
+ mheap_t *h;
+ uword n_user_data_bytes, bin;
+ mheap_elt_t *e, *n;
+ uword trace_uoffset, trace_n_user_data_bytes;
+ u64 cpu_times[2];
+
+ cpu_times[0] = clib_cpu_time_now ();
+
+ h = mheap_header (v);
+
+ mheap_maybe_lock (v);
+
+ if (h->flags & MHEAP_FLAG_VALIDATE)
+ mheap_validate (v);
+
+ ASSERT (h->n_elts > 0);
+ h->n_elts--;
+ h->stats.n_puts += 1;
+
+ e = mheap_elt_at_uoffset (v, uoffset);
+ n = mheap_next_elt (e);
+ n_user_data_bytes = mheap_elt_data_bytes (e);
+
+ trace_uoffset = uoffset;
+ trace_n_user_data_bytes = n_user_data_bytes;
+
+ bin = user_data_size_to_bin_index (n_user_data_bytes);
+ if (MHEAP_HAVE_SMALL_OBJECT_CACHE
+ && bin < 255 && (h->flags & MHEAP_FLAG_SMALL_OBJECT_CACHE))
+ {
+ uoffset = mheap_put_small_object (h, bin, uoffset);
+ if (uoffset == 0)
+ goto done;
+
+ e = mheap_elt_at_uoffset (v, uoffset);
+ n = mheap_next_elt (e);
+ n_user_data_bytes = mheap_elt_data_bytes (e);
+ }
+
+ /* Assert that forward and back pointers are equal. */
+ if (e->n_user_data != n->prev_n_user_data)
+ os_panic ();
+
+ /* Forward and backwards is_free must agree. */
+ if (e->is_free != n->prev_is_free)
+ os_panic ();
+
+ /* Object was already freed. */
+ if (e->is_free)
+ os_panic ();
+
+ /* Special case: delete last element in heap. */
+ if (n->n_user_data == MHEAP_N_USER_DATA_INVALID)
+ free_last_elt (v, e);
+
+ else
+ {
+ uword f0, f1, n_combine;
+
+ f0 = uoffset;
+ f1 = f0 + n_user_data_bytes;
+ n_combine = 0;
+
+ if (e->prev_is_free)
+ {
+ mheap_elt_t *p = mheap_prev_elt (e);
+ f0 = mheap_elt_uoffset (v, p);
+ remove_free_elt2 (v, p);
+ n_combine++;
+ }
+
+ if (n->is_free)
+ {
+ mheap_elt_t *m = mheap_next_elt (n);
+ f1 = (void *) m - v;
+ remove_free_elt2 (v, n);
+ n_combine++;
+ }
+
+ if (n_combine)
+ mheap_elt_set_size (v, f0, f1 - f0, /* is_free */ 1);
+ else
+ e->is_free = n->prev_is_free = 1;
+ set_free_elt (v, f0, f1 - f0);
+
+ if (!(h->flags & MHEAP_FLAG_DISABLE_VM))
+ mheap_vm_elt (v, MHEAP_VM_UNMAP, f0);
+ }
+
+done:
+ h = mheap_header (v);
+
+ if (h->flags & MHEAP_FLAG_TRACE)
+ {
+ /* Recursion block for case when we are traceing main clib heap. */
+ h->flags &= ~MHEAP_FLAG_TRACE;
+
+ mheap_put_trace (v, trace_uoffset, trace_n_user_data_bytes);
+
+ h->flags |= MHEAP_FLAG_TRACE;
+ }
+
+ if (h->flags & MHEAP_FLAG_VALIDATE)
+ mheap_validate (v);
+
+ mheap_maybe_unlock (v);
+
+ cpu_times[1] = clib_cpu_time_now ();
+ h->stats.n_clocks_put += cpu_times[1] - cpu_times[0];
+}
+
+void *
+mheap_alloc_with_flags (void *memory, uword memory_size, uword flags)
+{
+ mheap_t *h;
+ void *v;
+ uword size;
+
+ if (!mheap_page_size)
+ mheap_page_size = clib_mem_get_page_size ();
+
+ if (!memory)
+ {
+ /* No memory given, try to VM allocate some. */
+ memory = clib_mem_vm_alloc (memory_size);
+ if (!memory)
+ return 0;
+
+ /* No memory region implies we have virtual memory. */
+ flags &= ~MHEAP_FLAG_DISABLE_VM;
+ }
+
+ /* Make sure that given memory is page aligned. */
+ {
+ uword am, av, ah;
+
+ am = pointer_to_uword (memory);
+ av = mheap_page_round (am);
+ v = uword_to_pointer (av, void *);
+ h = mheap_header (v);
+ ah = pointer_to_uword (h);
+ while (ah < am)
+ ah += mheap_page_size;
+
+ h = uword_to_pointer (ah, void *);
+ v = mheap_vector (h);
+
+ if (PREDICT_FALSE (memory + memory_size < v))
+ {
+ /*
+ * This will happen when the requested memory_size is too
+ * small to cope with the heap header and/or memory alignment.
+ */
+ clib_mem_vm_free (memory, memory_size);
+ return 0;
+ }
+
+ size = memory + memory_size - v;
+ }
+
+ /* VM map header so we can use memory. */
+ if (!(flags & MHEAP_FLAG_DISABLE_VM))
+ clib_mem_vm_map (h, sizeof (h[0]));
+
+ /* Zero vector header: both heap header and vector length. */
+ memset (h, 0, sizeof (h[0]));
+ _vec_len (v) = 0;
+
+ h->vm_alloc_offset_from_header = (void *) h - memory;
+ h->vm_alloc_size = memory_size;
+
+ h->max_size = size;
+ h->owner_cpu = ~0;
+
+ /* Set flags based on those given less builtin-flags. */
+ h->flags |= (flags & ~MHEAP_FLAG_TRACE);
+
+ /* Unmap remainder of heap until we will be ready to use it. */
+ if (!(h->flags & MHEAP_FLAG_DISABLE_VM))
+ mheap_vm (v, MHEAP_VM_UNMAP | MHEAP_VM_ROUND_UP,
+ (clib_address_t) v, h->max_size);
+
+ /* Initialize free list heads to empty. */
+ memset (h->first_free_elt_uoffset_by_bin, 0xFF,
+ sizeof (h->first_free_elt_uoffset_by_bin));
+
+ return v;
+}
+
+void *
+mheap_alloc (void *memory, uword size)
+{
+ uword flags = 0;
+
+ if (memory != 0)
+ flags |= MHEAP_FLAG_DISABLE_VM;
+
+#ifdef CLIB_HAVE_VEC128
+ flags |= MHEAP_FLAG_SMALL_OBJECT_CACHE;
+#endif
+
+ return mheap_alloc_with_flags (memory, size, flags);
+}
+
+void *
+_mheap_free (void *v)
+{
+ mheap_t *h = mheap_header (v);
+
+ if (v)
+ clib_mem_vm_free ((void *) h - h->vm_alloc_offset_from_header,
+ h->vm_alloc_size);
+
+ return 0;
+}
+
+/* Call user's function with each object in heap. */
+void
+mheap_foreach (void *v,
+ uword (*func) (void *arg, void *v, void *elt_data,
+ uword elt_size), void *arg)
+{
+ mheap_elt_t *e;
+ u8 *stack_heap, *clib_mem_mheap_save;
+ u8 tmp_heap_memory[16 * 1024];
+
+ mheap_maybe_lock (v);
+
+ if (vec_len (v) == 0)
+ goto done;
+
+ clib_mem_mheap_save = 0;
+ stack_heap = 0;
+
+ /* Allocate a new temporary heap on the stack.
+ This is so that our hash table & user's callback function can
+ themselves allocate memory somewhere without getting in the way
+ of the heap we are looking at. */
+ if (v == clib_mem_get_heap ())
+ {
+ stack_heap = mheap_alloc (tmp_heap_memory, sizeof (tmp_heap_memory));
+ clib_mem_mheap_save = v;
+ clib_mem_set_heap (stack_heap);
+ }
+
+ for (e = v;
+ e->n_user_data != MHEAP_N_USER_DATA_INVALID; e = mheap_next_elt (e))
+ {
+ void *p = mheap_elt_data (v, e);
+ if (e->is_free)
+ continue;
+ if ((*func) (arg, v, p, mheap_elt_data_bytes (e)))
+ break;
+ }
+
+ /* Restore main CLIB heap. */
+ if (clib_mem_mheap_save)
+ clib_mem_set_heap (clib_mem_mheap_save);
+
+done:
+ mheap_maybe_unlock (v);
+}
+
+/* Bytes in mheap header overhead not including data bytes. */
+always_inline uword
+mheap_bytes_overhead (void *v)
+{
+ mheap_t *h = mheap_header (v);
+ return v ? sizeof (h[0]) + h->n_elts * sizeof (mheap_elt_t) : 0;
+}
+
+/* Total number of bytes including both data and overhead. */
+uword
+mheap_bytes (void *v)
+{
+ return mheap_bytes_overhead (v) + vec_bytes (v);
+}
+
+static void
+mheap_usage_no_lock (void *v, clib_mem_usage_t * usage)
+{
+ mheap_t *h = mheap_header (v);
+ uword used = 0, free = 0, free_vm_unmapped = 0;
+
+ if (vec_len (v) > 0)
+ {
+ mheap_elt_t *e;
+
+ for (e = v;
+ e->n_user_data != MHEAP_N_USER_DATA_INVALID;
+ e = mheap_next_elt (e))
+ {
+ uword size = mheap_elt_data_bytes (e);
+ if (e->is_free)
+ {
+ free += size;
+ if (!(h->flags & MHEAP_FLAG_DISABLE_VM))
+ free_vm_unmapped +=
+ mheap_vm_elt (v, MHEAP_VM_NOMAP, mheap_elt_uoffset (v, e));
+ }
+ else
+ used += size;
+ }
+ }
+
+ usage->object_count = mheap_elts (v);
+ usage->bytes_total = mheap_bytes (v);
+ usage->bytes_overhead = mheap_bytes_overhead (v);
+ usage->bytes_max = mheap_max_size (v);
+ usage->bytes_used = used;
+ usage->bytes_free = free;
+ usage->bytes_free_reclaimed = free_vm_unmapped;
+}
+
+void
+mheap_usage (void *v, clib_mem_usage_t * usage)
+{
+ mheap_maybe_lock (v);
+ mheap_usage_no_lock (v, usage);
+ mheap_maybe_unlock (v);
+}
+
+static u8 *
+format_mheap_byte_count (u8 * s, va_list * va)
+{
+ uword n_bytes = va_arg (*va, uword);
+ if (n_bytes < 1024)
+ return format (s, "%wd", n_bytes);
+ else
+ return format (s, "%wdk", n_bytes / 1024);
+}
+
+/* Returns first corrupt heap element. */
+static mheap_elt_t *
+mheap_first_corrupt (void *v)
+{
+ mheap_elt_t *e, *n;
+
+ if (vec_len (v) == 0)
+ return 0;
+
+ e = v;
+ while (1)
+ {
+ if (e->n_user_data == MHEAP_N_USER_DATA_INVALID)
+ break;
+
+ n = mheap_next_elt (e);
+
+ if (e->n_user_data != n->prev_n_user_data)
+ return e;
+
+ if (e->is_free != n->prev_is_free)
+ return e;
+
+ e = n;
+ }
+
+ return 0;
+}
+
+static u8 *
+format_mheap_stats (u8 * s, va_list * va)
+{
+ mheap_t *h = va_arg (*va, mheap_t *);
+ mheap_stats_t *st = &h->stats;
+ uword indent = format_get_indent (s);
+
+ s =
+ format (s,
+ "alloc. from small object cache: %Ld hits %Ld attempts (%.2f%%) replacements %d",
+ st->n_small_object_cache_hits, st->n_small_object_cache_attempts,
+ (st->n_small_object_cache_attempts !=
+ 0 ? 100. * (f64) st->n_small_object_cache_hits /
+ (f64) st->n_small_object_cache_attempts : 0.),
+ h->small_object_cache.replacement_index);
+
+ s =
+ format (s,
+ "\n%Ualloc. from free-list: %Ld attempts, %Ld hits (%.2f%%), %Ld considered (per-attempt %.2f)",
+ format_white_space, indent, st->free_list.n_search_attempts,
+ st->free_list.n_objects_found,
+ (st->free_list.n_search_attempts !=
+ 0 ? 100. * (f64) st->free_list.n_objects_found /
+ (f64) st->free_list.n_search_attempts : 0.),
+ st->free_list.n_objects_searched,
+ (st->free_list.n_search_attempts !=
+ 0 ? (f64) st->free_list.n_objects_searched /
+ (f64) st->free_list.n_search_attempts : 0.));
+
+ s = format (s, "\n%Ualloc. from vector-expand: %Ld",
+ format_white_space, indent, st->n_vector_expands);
+
+ s = format (s, "\n%Uallocs: %Ld %.2f clocks/call",
+ format_white_space, indent,
+ st->n_gets, (f64) st->n_clocks_get / (f64) st->n_gets);
+
+ s = format (s, "\n%Ufrees: %Ld %.2f clocks/call",
+ format_white_space, indent,
+ st->n_puts, (f64) st->n_clocks_put / (f64) st->n_puts);
+
+ return s;
+}
+
+u8 *
+format_mheap (u8 * s, va_list * va)
+{
+ void *v = va_arg (*va, u8 *);
+ int verbose = va_arg (*va, int);
+
+ mheap_t *h;
+ uword i, size, indent;
+ clib_mem_usage_t usage;
+ mheap_elt_t *first_corrupt;
+
+ mheap_maybe_lock (v);
+
+ h = mheap_header (v);
+
+ mheap_usage_no_lock (v, &usage);
+
+ indent = format_get_indent (s);
+
+ s =
+ format (s,
+ "%d objects, %U of %U used, %U free, %U reclaimed, %U overhead",
+ usage.object_count, format_mheap_byte_count, usage.bytes_used,
+ format_mheap_byte_count, usage.bytes_total,
+ format_mheap_byte_count, usage.bytes_free,
+ format_mheap_byte_count, usage.bytes_free_reclaimed,
+ format_mheap_byte_count, usage.bytes_overhead);
+
+ if (usage.bytes_max != ~0)
+ s = format (s, ", %U capacity", format_mheap_byte_count, usage.bytes_max);
+
+ /* Show histogram of sizes. */
+ if (verbose > 1)
+ {
+ uword hist[MHEAP_N_BINS];
+ mheap_elt_t *e;
+ uword i, n_hist;
+
+ memset (hist, 0, sizeof (hist));
+
+ n_hist = 0;
+ for (e = v;
+ e->n_user_data != MHEAP_N_USER_DATA_INVALID;
+ e = mheap_next_elt (e))
+ {
+ uword n_user_data_bytes = mheap_elt_data_bytes (e);
+ uword bin = user_data_size_to_bin_index (n_user_data_bytes);
+ if (!e->is_free)
+ {
+ hist[bin] += 1;
+ n_hist += 1;
+ }
+ }
+
+ s = format (s, "\n%U%=12s%=12s%=16s",
+ format_white_space, indent + 2,
+ "Size", "Count", "Fraction");
+
+ for (i = 0; i < ARRAY_LEN (hist); i++)
+ {
+ if (hist[i] == 0)
+ continue;
+ s = format (s, "\n%U%12d%12wd%16.4f",
+ format_white_space, indent + 2,
+ MHEAP_MIN_USER_DATA_BYTES +
+ i * MHEAP_USER_DATA_WORD_BYTES, hist[i],
+ (f64) hist[i] / (f64) n_hist);
+ }
+ }
+
+ if (verbose)
+ s = format (s, "\n%U%U",
+ format_white_space, indent + 2, format_mheap_stats, h);
+
+ if ((h->flags & MHEAP_FLAG_TRACE) && vec_len (h->trace_main.traces) > 0)
+ {
+ /* Make a copy of traces since we'll be sorting them. */
+ mheap_trace_t *t, *traces_copy;
+ uword indent, total_objects_traced;
+
+ traces_copy = vec_dup (h->trace_main.traces);
+ qsort (traces_copy, vec_len (traces_copy), sizeof (traces_copy[0]),
+ mheap_trace_sort);
+
+ total_objects_traced = 0;
+ s = format (s, "\n");
+ vec_foreach (t, traces_copy)
+ {
+ /* Skip over free elements. */
+ if (t->n_allocations == 0)
+ continue;
+
+ total_objects_traced += t->n_allocations;
+
+ /* When not verbose only report allocations of more than 1k. */
+ if (!verbose && t->n_bytes < 1024)
+ continue;
+
+ if (t == traces_copy)
+ s = format (s, "%=9s%=9s %=10s Traceback\n", "Bytes", "Count",
+ "Sample");
+ s = format (s, "%9d%9d %p", t->n_bytes, t->n_allocations,
+ t->offset + v);
+ indent = format_get_indent (s);
+ for (i = 0; i < ARRAY_LEN (t->callers) && t->callers[i]; i++)
+ {
+ if (i > 0)
+ s = format (s, "%U", format_white_space, indent);
+#ifdef CLIB_UNIX
+ s =
+ format (s, " %U\n", format_clib_elf_symbol_with_address,
+ t->callers[i]);
+#else
+ s = format (s, " %p\n", t->callers[i]);
+#endif
+ }
+ }
+
+ s = format (s, "%d total traced objects\n", total_objects_traced);
+
+ vec_free (traces_copy);
+ }
+
+ first_corrupt = mheap_first_corrupt (v);
+ if (first_corrupt)
+ {
+ size = mheap_elt_data_bytes (first_corrupt);
+ s = format (s, "\n first corrupt object: %p, size %wd\n %U",
+ first_corrupt, size, format_hex_bytes, first_corrupt, size);
+ }
+
+ /* FIXME. This output could be wrong in the unlikely case that format
+ uses the same mheap as we are currently inspecting. */
+ if (verbose > 1)
+ {
+ mheap_elt_t *e;
+ uword i, o;
+
+ s = format (s, "\n");
+
+ e = mheap_elt_at_uoffset (v, 0);
+ i = 0;
+ while (1)
+ {
+ if ((i % 8) == 0)
+ s = format (s, "%8d: ", i);
+
+ o = mheap_elt_uoffset (v, e);
+
+ if (e->is_free)
+ s = format (s, "(%8d) ", o);
+ else
+ s = format (s, " %8d ", o);
+
+ if ((i % 8) == 7 || (i + 1) >= h->n_elts)
+ s = format (s, "\n");
+ }
+ }
+
+ mheap_maybe_unlock (v);
+
+ return s;
+}
+
+void
+dmh (void *v)
+{
+ fformat (stderr, "%U", format_mheap, v, 1);
+}
+
+static void
+mheap_validate_breakpoint ()
+{
+ os_panic ();
+}
+
+void
+mheap_validate (void *v)
+{
+ mheap_t *h = mheap_header (v);
+ uword i, s;
+
+ uword elt_count, elt_size;
+ uword free_count_from_free_lists, free_size_from_free_lists;
+ uword small_elt_free_count, small_elt_free_size;
+
+#define CHECK(x) if (! (x)) { mheap_validate_breakpoint (); os_panic (); }
+
+ if (vec_len (v) == 0)
+ return;
+
+ mheap_maybe_lock (v);
+
+ /* Validate number of elements and size. */
+ free_size_from_free_lists = free_count_from_free_lists = 0;
+ for (i = 0; i < ARRAY_LEN (h->first_free_elt_uoffset_by_bin); i++)
+ {
+ mheap_elt_t *e, *n;
+ uword is_first;
+
+ CHECK ((h->first_free_elt_uoffset_by_bin[i] != MHEAP_GROUNDED)
+ ==
+ ((h->non_empty_free_elt_heads[i /
+ BITS (uword)] & ((uword) 1 <<
+ (uword) (i %
+ BITS
+ (uword))))
+ != 0));
+
+ if (h->first_free_elt_uoffset_by_bin[i] == MHEAP_GROUNDED)
+ continue;
+
+ e = mheap_elt_at_uoffset (v, h->first_free_elt_uoffset_by_bin[i]);
+ is_first = 1;
+ while (1)
+ {
+ uword s;
+
+ n = mheap_next_elt (e);
+
+ /* Object must be marked free. */
+ CHECK (e->is_free);
+
+ /* Next object's previous free bit must also be set. */
+ CHECK (n->prev_is_free);
+
+ if (is_first)
+ CHECK (e->free_elt.prev_uoffset == MHEAP_GROUNDED);
+ is_first = 0;
+
+ s = mheap_elt_data_bytes (e);
+ CHECK (user_data_size_to_bin_index (s) == i);
+
+ free_count_from_free_lists += 1;
+ free_size_from_free_lists += s;
+
+ if (e->free_elt.next_uoffset == MHEAP_GROUNDED)
+ break;
+
+ n = mheap_elt_at_uoffset (v, e->free_elt.next_uoffset);
+
+ /* Check free element linkages. */
+ CHECK (n->free_elt.prev_uoffset == mheap_elt_uoffset (v, e));
+
+ e = n;
+ }
+ }
+
+ /* Go through small object cache. */
+ small_elt_free_count = small_elt_free_size = 0;
+ for (i = 0; i < ARRAY_LEN (h->small_object_cache.bins.as_u8); i++)
+ {
+ if (h->small_object_cache.bins.as_u8[i] != 0)
+ {
+ mheap_elt_t *e;
+ uword b = h->small_object_cache.bins.as_u8[i] - 1;
+ uword o = h->small_object_cache.offsets[i];
+ uword s;
+
+ e = mheap_elt_at_uoffset (v, o);
+
+ /* Object must be allocated. */
+ CHECK (!e->is_free);
+
+ s = mheap_elt_data_bytes (e);
+ CHECK (user_data_size_to_bin_index (s) == b);
+
+ small_elt_free_count += 1;
+ small_elt_free_size += s;
+ }
+ }
+
+ {
+ mheap_elt_t *e, *n;
+ uword elt_free_size, elt_free_count;
+
+ elt_count = elt_size = elt_free_size = elt_free_count = 0;
+ for (e = v; e->n_user_data != MHEAP_N_USER_DATA_INVALID; e = n)
+ {
+ if (e->prev_n_user_data != MHEAP_N_USER_DATA_INVALID)
+ CHECK (e->prev_n_user_data * sizeof (e->user_data[0]) >=
+ MHEAP_MIN_USER_DATA_BYTES);
+
+ CHECK (e->n_user_data * sizeof (e->user_data[0]) >=
+ MHEAP_MIN_USER_DATA_BYTES);
+
+ n = mheap_next_elt (e);
+
+ CHECK (e->is_free == n->prev_is_free);
+
+ elt_count++;
+ s = mheap_elt_data_bytes (e);
+ elt_size += s;
+
+ if (e->is_free)
+ {
+ elt_free_count++;
+ elt_free_size += s;
+ }
+
+ /* Consecutive free objects should have been combined. */
+ CHECK (!(e->prev_is_free && n->prev_is_free));
+ }
+
+ CHECK (free_count_from_free_lists == elt_free_count);
+ CHECK (free_size_from_free_lists == elt_free_size);
+ CHECK (elt_count == h->n_elts + elt_free_count + small_elt_free_count);
+ CHECK (elt_size + (elt_count + 1) * MHEAP_ELT_OVERHEAD_BYTES ==
+ vec_len (v));
+ }
+
+ {
+ mheap_elt_t *e, *n;
+
+ for (e = v; e->n_user_data == MHEAP_N_USER_DATA_INVALID; e = n)
+ {
+ n = mheap_next_elt (e);
+ CHECK (e->n_user_data == n->prev_n_user_data);
+ }
+ }
+
+#undef CHECK
+
+ mheap_maybe_unlock (v);
+
+ h->validate_serial += 1;
+}
+
+static void
+mheap_get_trace (void *v, uword offset, uword size)
+{
+ mheap_t *h;
+ mheap_trace_main_t *tm;
+ mheap_trace_t *t;
+ uword i, n_callers, trace_index, *p;
+ mheap_trace_t trace;
+
+ /* Spurious Coverity warnings be gone. */
+ memset (&trace, 0, sizeof (trace));
+
+ n_callers = clib_backtrace (trace.callers, ARRAY_LEN (trace.callers),
+ /* Skip mheap_get_aligned's frame */ 1);
+ if (n_callers == 0)
+ return;
+
+ for (i = n_callers; i < ARRAY_LEN (trace.callers); i++)
+ trace.callers[i] = 0;
+
+ h = mheap_header (v);
+ tm = &h->trace_main;
+
+ if (!tm->trace_by_callers)
+ tm->trace_by_callers =
+ hash_create_mem (0, sizeof (trace.callers), sizeof (uword));
+
+ p = hash_get_mem (tm->trace_by_callers, &trace.callers);
+ if (p)
+ {
+ trace_index = p[0];
+ t = tm->traces + trace_index;
+ }
+ else
+ {
+ i = vec_len (tm->trace_free_list);
+ if (i > 0)
+ {
+ trace_index = tm->trace_free_list[i - 1];
+ _vec_len (tm->trace_free_list) = i - 1;
+ }
+ else
+ {
+ mheap_trace_t *old_start = tm->traces;
+ mheap_trace_t *old_end = vec_end (tm->traces);
+
+ vec_add2 (tm->traces, t, 1);
+
+ if (tm->traces != old_start)
+ {
+ hash_pair_t *p;
+ mheap_trace_t *q;
+ /* *INDENT-OFF* */
+ hash_foreach_pair (p, tm->trace_by_callers,
+ ({
+ q = uword_to_pointer (p->key, mheap_trace_t *);
+ ASSERT (q >= old_start && q < old_end);
+ p->key = pointer_to_uword (tm->traces + (q - old_start));
+ }));
+ /* *INDENT-ON* */
+ }
+ trace_index = t - tm->traces;
+ }
+
+ t = tm->traces + trace_index;
+ t[0] = trace;
+ t->n_allocations = 0;
+ t->n_bytes = 0;
+ hash_set_mem (tm->trace_by_callers, t->callers, trace_index);
+ }
+
+ t->n_allocations += 1;
+ t->n_bytes += size;
+ t->offset = offset; /* keep a sample to autopsy */
+ hash_set (tm->trace_index_by_offset, offset, t - tm->traces);
+}
+
+static void
+mheap_put_trace (void *v, uword offset, uword size)
+{
+ mheap_t *h;
+ mheap_trace_main_t *tm;
+ mheap_trace_t *t;
+ uword trace_index, *p;
+
+ h = mheap_header (v);
+ tm = &h->trace_main;
+ p = hash_get (tm->trace_index_by_offset, offset);
+ if (!p)
+ return;
+
+ trace_index = p[0];
+ hash_unset (tm->trace_index_by_offset, offset);
+ ASSERT (trace_index < vec_len (tm->traces));
+
+ t = tm->traces + trace_index;
+ ASSERT (t->n_allocations > 0);
+ ASSERT (t->n_bytes >= size);
+ t->n_allocations -= 1;
+ t->n_bytes -= size;
+ if (t->n_allocations == 0)
+ {
+ hash_unset_mem (tm->trace_by_callers, t->callers);
+ vec_add1 (tm->trace_free_list, trace_index);
+ memset (t, 0, sizeof (t[0]));
+ }
+}
+
+static int
+mheap_trace_sort (const void *_t1, const void *_t2)
+{
+ const mheap_trace_t *t1 = _t1;
+ const mheap_trace_t *t2 = _t2;
+ word cmp;
+
+ cmp = (word) t2->n_bytes - (word) t1->n_bytes;
+ if (!cmp)
+ cmp = (word) t2->n_allocations - (word) t1->n_allocations;
+ return cmp;
+}
+
+always_inline void
+mheap_trace_main_free (mheap_trace_main_t * tm)
+{
+ vec_free (tm->traces);
+ vec_free (tm->trace_free_list);
+ hash_free (tm->trace_by_callers);
+ hash_free (tm->trace_index_by_offset);
+}
+
+void
+mheap_trace (void *v, int enable)
+{
+ mheap_t *h;
+
+ h = mheap_header (v);
+
+ if (enable)
+ {
+ h->flags |= MHEAP_FLAG_TRACE;
+ }
+ else
+ {
+ mheap_trace_main_free (&h->trace_main);
+ h->flags &= ~MHEAP_FLAG_TRACE;
+ }
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/mheap.h b/src/vppinfra/mheap.h
new file mode 100644
index 00000000..5b7cdfba
--- /dev/null
+++ b/src/vppinfra/mheap.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_mheap_h
+#define included_mheap_h
+
+#include <vppinfra/vec.h>
+#include <vppinfra/error.h> /* clib_error_t */
+#include <vppinfra/mem.h> /* clib_mem_usage_t */
+#include <vppinfra/format.h> /* for unformat_input_t */
+
+/* Allocate size bytes. New heap and offset are returned.
+ offset == ~0 means allocation failed. */
+always_inline void *
+mheap_get (void *v, uword size, uword * offset_return)
+{
+ return mheap_get_aligned (v, size, 0, 0, offset_return);
+}
+
+/* Create allocation heap of given size.
+ * The actual usable size is smaller than the requested size.
+ * memory_bytes must be greater than mheap_page_size + sizeof (mheap_t) + 16.
+ * Otherwise, allocation may fail and return 0.
+ */
+void *mheap_alloc (void *memory, uword memory_bytes);
+void *mheap_alloc_with_flags (void *memory, uword memory_bytes, uword flags);
+
+#define mheap_free(v) (v) = _mheap_free(v)
+void *_mheap_free (void *v);
+
+void mheap_foreach (void *v,
+ uword (*func) (void *arg, void *v, void *elt_data,
+ uword elt_size), void *arg);
+
+/* Format mheap data structures as string. */
+u8 *format_mheap (u8 * s, va_list * va);
+
+/* Validate internal consistency. */
+void mheap_validate (void *h);
+
+/* Query bytes used. */
+uword mheap_bytes (void *v);
+
+void mheap_usage (void *v, clib_mem_usage_t * usage);
+
+/* Enable disable traceing. */
+void mheap_trace (void *v, int enable);
+
+/* Test routine. */
+int test_mheap_main (unformat_input_t * input);
+
+#endif /* included_mheap_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/mheap_bootstrap.h b/src/vppinfra/mheap_bootstrap.h
new file mode 100644
index 00000000..38f0ac84
--- /dev/null
+++ b/src/vppinfra/mheap_bootstrap.h
@@ -0,0 +1,374 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_mem_mheap_h
+#define included_mem_mheap_h
+
+/* Bootstrap include so that #include <vppinfra/mem.h> can include e.g.
+ <vppinfra/mheap.h> which depends on <vppinfra/vec.h>. */
+
+#include <vppinfra/vec_bootstrap.h>
+#include <vppinfra/error_bootstrap.h>
+#include <vppinfra/os.h>
+#include <vppinfra/vector.h>
+
+/* Each element in heap is immediately followed by this struct. */
+typedef struct
+{
+ /* Number of mheap_size_t words of user data in previous object.
+ Used to find mheap_elt_t for previous object. */
+#if CLIB_VEC64 > 0
+ u64 prev_n_user_data:63;
+
+ /* Used to mark end/start of of doubly-linked list of mheap_elt_t's. */
+#define MHEAP_N_USER_DATA_INVALID (0x7fffffffffffffffULL)
+#define MHEAP_GROUNDED (~0ULL)
+
+ /* Set if previous object is free. */
+ u64 prev_is_free:1;
+
+ /* Number of mheap_size_t words of user data that follow this object. */
+ u64 n_user_data:63;
+
+ /* Set if this object is on free list (and therefore following free_elt
+ is valid). */
+ u64 is_free:1;
+
+#else
+ u32 prev_n_user_data:31;
+
+ /* Used to mark end/start of of doubly-linked list of mheap_elt_t's. */
+#define MHEAP_N_USER_DATA_INVALID (0x7fffffff)
+#define MHEAP_GROUNDED (~0)
+
+ /* Set if previous object is free. */
+ u32 prev_is_free:1;
+
+ /* Number of mheap_size_t words of user data that follow this object. */
+ u32 n_user_data:31;
+
+ /* Set if this object is on free list (and therefore following free_elt
+ is valid). */
+ u32 is_free:1;
+#endif
+
+ union
+ {
+#if CLIB_VEC64 > 0
+ /* For allocated objects: user data follows.
+ User data is allocated in units of typeof (user_data[0]). */
+ u64 user_data[0];
+
+ /* For free objects, offsets of next and previous free objects of this size;
+ ~0 means end of doubly-linked list.
+ This is stored in user data (guaranteed to be at least 8 bytes)
+ but only for *free* objects. */
+ struct
+ {
+ u64 next_uoffset, prev_uoffset;
+ } free_elt;
+#else
+ /* For allocated objects: user data follows.
+ User data is allocated in units of typeof (user_data[0]). */
+ u32 user_data[0];
+
+ /* For free objects, offsets of next and previous free objects of this size;
+ ~0 means end of doubly-linked list.
+ This is stored in user data (guaranteed to be at least 8 bytes)
+ but only for *free* objects. */
+ struct
+ {
+ u32 next_uoffset, prev_uoffset;
+ } free_elt;
+#endif
+ };
+} mheap_elt_t;
+
+/* Number of bytes of "overhead": e.g. not user data. */
+#define MHEAP_ELT_OVERHEAD_BYTES (sizeof (mheap_elt_t) - STRUCT_OFFSET_OF (mheap_elt_t, user_data))
+
+/* User objects must be large enough to hold 2 x u32 free offsets in free elt. */
+#define MHEAP_MIN_USER_DATA_BYTES MHEAP_ELT_OVERHEAD_BYTES
+
+/* Number of byte in user data "words". */
+#define MHEAP_USER_DATA_WORD_BYTES STRUCT_SIZE_OF (mheap_elt_t, user_data[0])
+
+typedef struct
+{
+ /* Address of callers: outer first, inner last. */
+ uword callers[12];
+
+ /* Count of allocations with this traceback. */
+#if CLIB_VEC64 > 0
+ u64 n_allocations;
+#else
+ u32 n_allocations;
+#endif
+
+ /* Count of bytes allocated with this traceback. */
+ u32 n_bytes;
+
+ /* Offset of this item */
+ uword offset;
+} mheap_trace_t;
+
+typedef struct
+{
+ mheap_trace_t *traces;
+
+ /* Indices of free traces. */
+ u32 *trace_free_list;
+
+ /* Hash table mapping callers to trace index. */
+ uword *trace_by_callers;
+
+ /* Hash table mapping mheap offset to trace index. */
+ uword *trace_index_by_offset;
+} mheap_trace_main_t;
+
+/* Without vector instructions don't bother with small object cache. */
+#ifdef CLIB_HAVE_VEC128
+#define MHEAP_HAVE_SMALL_OBJECT_CACHE 1
+#else
+#define MHEAP_HAVE_SMALL_OBJECT_CACHE 0
+#endif
+
+ /* Small object bin i is for objects with
+ user_size > sizeof (mheap_elt_t) + sizeof (mheap_elt_t) * (i - 1)
+ user_size <= sizeof (mheap_elt_t) + sizeof (mheap_size_t) * i. */
+#if MHEAP_HAVE_SMALL_OBJECT_CACHE > 0
+#define MHEAP_LOG2_N_SMALL_OBJECT_BINS 8
+#define MHEAP_N_SMALL_OBJECT_BINS (1 << MHEAP_LOG2_N_SMALL_OBJECT_BINS)
+#else
+#define MHEAP_LOG2_N_SMALL_OBJECT_BINS 0
+#define MHEAP_N_SMALL_OBJECT_BINS 0
+#endif
+
+#define MHEAP_N_BINS \
+ (MHEAP_N_SMALL_OBJECT_BINS \
+ + (STRUCT_BITS_OF (mheap_elt_t, user_data[0]) - MHEAP_LOG2_N_SMALL_OBJECT_BINS))
+
+typedef struct
+{
+ struct
+ {
+ u64 n_search_attempts;
+ u64 n_objects_searched;
+ u64 n_objects_found;
+ } free_list;
+
+ u64 n_vector_expands;
+
+ u64 n_small_object_cache_hits;
+ u64 n_small_object_cache_attempts;
+
+ u64 n_gets, n_puts;
+ u64 n_clocks_get, n_clocks_put;
+} mheap_stats_t;
+
+/* For objects with align == 4 and align_offset == 0 (e.g. vector strings). */
+typedef struct
+{
+ union
+ {
+#ifdef CLIB_HAVE_VEC128
+ u8x16 as_u8x16[BITS (uword) / 16];
+#endif
+
+ /* Store bin + 1; zero means unused. */
+ u8 as_u8[BITS (uword)];
+ } bins;
+
+ uword offsets[BITS (uword)];
+
+ u32 replacement_index;
+} mheap_small_object_cache_t;
+
+/* Vec header for heaps. */
+typedef struct
+{
+ /* User offsets for head of doubly-linked list of free objects of this size. */
+#if CLIB_VEC64 > 0
+ u64 first_free_elt_uoffset_by_bin[MHEAP_N_BINS];
+#else
+ u32 first_free_elt_uoffset_by_bin[MHEAP_N_BINS];
+#endif
+
+ /* Bitmap of non-empty free list bins. */
+ uword non_empty_free_elt_heads[(MHEAP_N_BINS + BITS (uword) - 1) /
+ BITS (uword)];
+
+ mheap_small_object_cache_t small_object_cache;
+
+ u32 flags;
+#define MHEAP_FLAG_TRACE (1 << 0)
+#define MHEAP_FLAG_DISABLE_VM (1 << 1)
+#define MHEAP_FLAG_THREAD_SAFE (1 << 2)
+#define MHEAP_FLAG_SMALL_OBJECT_CACHE (1 << 3)
+#define MHEAP_FLAG_VALIDATE (1 << 4)
+
+ /* Lock use when MHEAP_FLAG_THREAD_SAFE is set. */
+ volatile u32 lock;
+ volatile u32 owner_cpu;
+ int recursion_count;
+
+ /* Number of allocated objects. */
+ u64 n_elts;
+
+ /* Maximum size (in bytes) this heap is allowed to grow to.
+ Set to ~0 to grow heap (via vec_resize) arbitrarily. */
+ u64 max_size;
+
+ uword vm_alloc_offset_from_header;
+ uword vm_alloc_size;
+
+ /* Each successful mheap_validate call increments this serial number.
+ Used to debug heap corruption problems. GDB breakpoints can be
+ made conditional on validate_serial. */
+ u64 validate_serial;
+
+ mheap_trace_main_t trace_main;
+
+ mheap_stats_t stats;
+} mheap_t;
+
+always_inline mheap_t *
+mheap_header (u8 * v)
+{
+ return vec_aligned_header (v, sizeof (mheap_t), 16);
+}
+
+always_inline u8 *
+mheap_vector (mheap_t * h)
+{
+ return vec_aligned_header_end (h, sizeof (mheap_t), 16);
+}
+
+always_inline uword
+mheap_elt_uoffset (void *v, mheap_elt_t * e)
+{
+ return (uword) e->user_data - (uword) v;
+}
+
+always_inline mheap_elt_t *
+mheap_user_pointer_to_elt (void *v)
+{
+ return v - STRUCT_OFFSET_OF (mheap_elt_t, user_data);
+}
+
+/* For debugging we keep track of offsets for valid objects.
+ We make sure user is not trying to free object with invalid offset. */
+always_inline uword
+mheap_offset_is_valid (void *v, uword uo)
+{
+ return uo >= MHEAP_ELT_OVERHEAD_BYTES && uo <= vec_len (v);
+}
+
+always_inline mheap_elt_t *
+mheap_elt_at_uoffset (void *v, uword uo)
+{
+ ASSERT (mheap_offset_is_valid (v, uo));
+ return (mheap_elt_t *) (v + uo - STRUCT_OFFSET_OF (mheap_elt_t, user_data));
+}
+
+always_inline void *
+mheap_elt_data (void *v, mheap_elt_t * e)
+{
+ return v + mheap_elt_uoffset (v, e);
+}
+
+always_inline uword
+mheap_elt_data_bytes (mheap_elt_t * e)
+{
+ return e->n_user_data * sizeof (e->user_data[0]);
+}
+
+always_inline uword
+mheap_data_bytes (void *v, uword uo)
+{
+ mheap_elt_t *e = mheap_elt_at_uoffset (v, uo);
+ return mheap_elt_data_bytes (e);
+}
+
+#define mheap_len(v,d) (mheap_data_bytes((v),(void *) (d) - (void *) (v)) / sizeof ((d)[0]))
+
+always_inline mheap_elt_t *
+mheap_next_elt (mheap_elt_t * e)
+{
+ ASSERT (e->n_user_data < MHEAP_N_USER_DATA_INVALID);
+ return (mheap_elt_t *) (e->user_data + e->n_user_data);
+}
+
+always_inline mheap_elt_t *
+mheap_prev_elt (mheap_elt_t * e)
+{
+ ASSERT (e->prev_n_user_data < MHEAP_N_USER_DATA_INVALID);
+ return ((void *) e
+ - e->prev_n_user_data * sizeof (e->user_data[0])
+ - MHEAP_ELT_OVERHEAD_BYTES);
+}
+
+/* Exported operations. */
+
+always_inline uword
+mheap_elts (void *v)
+{
+ return v ? mheap_header (v)->n_elts : 0;
+}
+
+always_inline uword
+mheap_max_size (void *v)
+{
+ return v ? mheap_header (v)->max_size : ~0;
+}
+
+/* Free previously allocated offset. */
+void mheap_put (void *v, uword offset);
+
+/* Allocate object from mheap. */
+void *mheap_get_aligned (void *v, uword size, uword align, uword align_offset,
+ uword * offset_return);
+
+#endif /* included_mem_mheap_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/mod_test_hash.c b/src/vppinfra/mod_test_hash.c
new file mode 100644
index 00000000..b3fa676d
--- /dev/null
+++ b/src/vppinfra/mod_test_hash.c
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vppinfra/linux_kernel_init.h>
+#include <vppinfra/hash.h>
+
+CLIB_LINUX_KERNEL_MODULE ("test_hash", test_hash_main,
+ /* kernel-thread flags */ 0 & CLONE_KERNEL);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/os.h b/src/vppinfra/os.h
new file mode 100644
index 00000000..33300716
--- /dev/null
+++ b/src/vppinfra/os.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001-2005 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_os_h
+#define included_os_h
+
+#include <vppinfra/clib.h>
+#include <vppinfra/types.h>
+
+/* External panic function. */
+void os_panic (void);
+
+/* External exit function analagous to unix exit. */
+void os_exit (int code);
+
+/* External function to print a line. */
+void os_puts (u8 * string, uword length, uword is_error);
+
+/* External function to handle out of memory. */
+void os_out_of_memory (void);
+
+/* Estimate, measure or divine CPU timestamp clock frequency. */
+f64 os_cpu_clock_frequency (void);
+
+extern __thread uword __os_thread_index;
+
+static_always_inline uword
+os_get_thread_index (void)
+{
+ return __os_thread_index;
+}
+
+static_always_inline uword
+os_get_cpu_number (void) __attribute__ ((deprecated));
+
+static_always_inline uword
+os_get_cpu_number (void)
+{
+ return __os_thread_index;
+}
+
+uword os_get_nthreads (void);
+
+#include <vppinfra/smp.h>
+
+#endif /* included_os_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/pfhash.c b/src/vppinfra/pfhash.c
new file mode 100644
index 00000000..3b9fa8f3
--- /dev/null
+++ b/src/vppinfra/pfhash.c
@@ -0,0 +1,689 @@
+/*
+ Copyright (c) 2013 Cisco and/or its affiliates.
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+#include <vppinfra/pfhash.h>
+#include <vppinfra/format.h>
+
+/* This is incredibly handy when debugging */
+u32 vl (void *v) __attribute__ ((weak));
+u32
+vl (void *v)
+{
+ return vec_len (v);
+}
+
+#if defined(CLIB_HAVE_VEC128) && ! defined (__ALTIVEC__)
+
+typedef struct
+{
+ u8 *key[16];
+ u64 value;
+} pfhash_show_t;
+
+static int
+sh_compare (pfhash_show_t * sh0, pfhash_show_t * sh1)
+{
+ return ((i32) (sh0->value) - ((i32) sh1->value));
+}
+
+u8 *
+format_pfhash (u8 * s, va_list * args)
+{
+ pfhash_t *p = va_arg (*args, pfhash_t *);
+ int verbose = va_arg (*args, int);
+
+ if (p == 0 || p->overflow_hash == 0 || p->buckets == 0)
+ {
+ s = format (s, "*** uninitialized ***");
+ return s;
+ }
+
+ s = format (s, "Prefetch hash '%s'\n", p->name);
+ s =
+ format (s, " %d buckets, %u bucket overflows, %.1f%% bucket overflow \n",
+ vec_len (p->buckets), p->overflow_count,
+ 100.0 * ((f64) p->overflow_count) / ((f64) vec_len (p->buckets)));
+ if (p->nitems)
+ s =
+ format (s,
+ " %u items, %u items in overflow, %.1f%% items in overflow\n",
+ p->nitems, p->nitems_in_overflow,
+ 100.0 * ((f64) p->nitems_in_overflow) / ((f64) p->nitems));
+
+ if (verbose)
+ {
+ pfhash_show_t *shs = 0, *sh;
+ hash_pair_t *hp;
+ int i, j;
+
+ for (i = 0; i < vec_len (p->buckets); i++)
+ {
+ pfhash_kv_t *kv;
+ pfhash_kv_16_t *kv16;
+ pfhash_kv_8_t *kv8;
+ pfhash_kv_8v8_t *kv8v8;
+ pfhash_kv_4_t *kv4;
+
+ if (p->buckets[i] == 0 || p->buckets[i] == PFHASH_BUCKET_OVERFLOW)
+ continue;
+
+ kv = pool_elt_at_index (p->kvp, p->buckets[i]);
+
+ switch (p->key_size)
+ {
+ case 16:
+ kv16 = &kv->kv16;
+ for (j = 0; j < 3; j++)
+ {
+ if (kv16->values[j] != (u32) ~ 0)
+ {
+ vec_add2 (shs, sh, 1);
+ clib_memcpy (sh->key, &kv16->kb.k_u32x4[j],
+ p->key_size);
+ sh->value = kv16->values[j];
+ }
+ }
+ break;
+ case 8:
+ if (p->value_size == 4)
+ {
+ kv8 = &kv->kv8;
+ for (j = 0; j < 5; j++)
+ {
+ if (kv8->values[j] != (u32) ~ 0)
+ {
+ vec_add2 (shs, sh, 1);
+ clib_memcpy (sh->key, &kv8->kb.k_u64[j],
+ p->key_size);
+ sh->value = kv8->values[j];
+ }
+ }
+ }
+ else
+ {
+ kv8v8 = &kv->kv8v8;
+ for (j = 0; j < 4; j++)
+ {
+ if (kv8v8->values[j] != (u64) ~ 0)
+ {
+ vec_add2 (shs, sh, 1);
+ clib_memcpy (sh->key, &kv8v8->kb.k_u64[j],
+ p->key_size);
+ sh->value = kv8v8->values[j];
+ }
+ }
+
+ }
+ break;
+ case 4:
+ kv4 = &kv->kv4;
+ for (j = 0; j < 8; j++)
+ {
+ if (kv4->values[j] != (u32) ~ 0)
+ {
+ vec_add2 (shs, sh, 1);
+ clib_memcpy (sh->key, &kv4->kb.kb[j], p->key_size);
+ sh->value = kv4->values[j];
+ }
+ }
+ break;
+ }
+ }
+
+ /* *INDENT-OFF* */
+ hash_foreach_pair (hp, p->overflow_hash,
+ ({
+ vec_add2 (shs, sh, 1);
+ clib_memcpy (sh->key, (u8 *)hp->key, p->key_size);
+ sh->value = hp->value[0];
+ }));
+ /* *INDENT-ON* */
+
+ vec_sort_with_function (shs, sh_compare);
+
+ for (i = 0; i < vec_len (shs); i++)
+ {
+ sh = vec_elt_at_index (shs, i);
+ s = format (s, " %U value %u\n", format_hex_bytes, sh->key,
+ p->key_size, sh->value);
+ }
+ vec_free (shs);
+ }
+ return s;
+}
+
+
+void abort (void);
+
+void
+pfhash_init (pfhash_t * p, char *name, u32 key_size, u32 value_size,
+ u32 nbuckets)
+{
+ pfhash_kv_t *kv;
+ memset (p, 0, sizeof (*p));
+ u32 key_bytes;
+
+ switch (key_size)
+ {
+ case 4:
+ key_bytes = 4;
+ break;
+ case 8:
+ key_bytes = 8;
+ break;
+ case 16:
+ key_bytes = 16;
+ break;
+ default:
+ ASSERT (0);
+ abort ();
+ }
+
+ switch (value_size)
+ {
+ case 4:
+ case 8:
+ break;
+ default:
+ ASSERT (0);
+ abort ();
+ }
+
+
+ p->name = format (0, "%s", name);
+ vec_add1 (p->name, 0);
+ p->overflow_hash = hash_create_mem (0, key_bytes, sizeof (uword));
+
+ nbuckets = 1 << (max_log2 (nbuckets));
+
+ /* This sets the entire bucket array to zero */
+ vec_validate (p->buckets, nbuckets - 1);
+ p->key_size = key_size;
+ p->value_size = value_size;
+
+ /*
+ * Unset buckets implicitly point at the 0th pool elt.
+ * All search routines will return ~0 if they go there.
+ */
+ pool_get_aligned (p->kvp, kv, 16);
+ memset (kv, 0xff, sizeof (*kv));
+}
+
+static pfhash_kv_16_t *
+pfhash_get_kv_16 (pfhash_t * p, u32 bucket_contents,
+ u32x4 * key, u32 * match_index)
+{
+ u32x4 diff[3];
+ u32 is_equal[3];
+ pfhash_kv_16_t *kv = 0;
+
+ *match_index = (u32) ~ 0;
+
+ kv = &p->kvp[bucket_contents].kv16;
+
+ diff[0] = u32x4_sub (kv->kb.k_u32x4[0], key[0]);
+ diff[1] = u32x4_sub (kv->kb.k_u32x4[1], key[0]);
+ diff[2] = u32x4_sub (kv->kb.k_u32x4[2], key[0]);
+
+ is_equal[0] = u32x4_zero_byte_mask (diff[0]) == 0xffff;
+ is_equal[1] = u32x4_zero_byte_mask (diff[1]) == 0xffff;
+ is_equal[2] = u32x4_zero_byte_mask (diff[2]) == 0xffff;
+
+ if (is_equal[0])
+ *match_index = 0;
+ if (is_equal[1])
+ *match_index = 1;
+ if (is_equal[2])
+ *match_index = 2;
+
+ return kv;
+}
+
+static pfhash_kv_8_t *
+pfhash_get_kv_8 (pfhash_t * p, u32 bucket_contents,
+ u64 * key, u32 * match_index)
+{
+ pfhash_kv_8_t *kv;
+
+ *match_index = (u32) ~ 0;
+
+ kv = &p->kvp[bucket_contents].kv8;
+
+ if (kv->kb.k_u64[0] == key[0])
+ *match_index = 0;
+ if (kv->kb.k_u64[1] == key[0])
+ *match_index = 1;
+ if (kv->kb.k_u64[2] == key[0])
+ *match_index = 2;
+ if (kv->kb.k_u64[3] == key[0])
+ *match_index = 3;
+ if (kv->kb.k_u64[4] == key[0])
+ *match_index = 4;
+
+ return kv;
+}
+
+static pfhash_kv_8v8_t *
+pfhash_get_kv_8v8 (pfhash_t * p,
+ u32 bucket_contents, u64 * key, u32 * match_index)
+{
+ pfhash_kv_8v8_t *kv;
+
+ *match_index = (u32) ~ 0;
+
+ kv = &p->kvp[bucket_contents].kv8v8;
+
+ if (kv->kb.k_u64[0] == key[0])
+ *match_index = 0;
+ if (kv->kb.k_u64[1] == key[0])
+ *match_index = 1;
+ if (kv->kb.k_u64[2] == key[0])
+ *match_index = 2;
+ if (kv->kb.k_u64[3] == key[0])
+ *match_index = 3;
+
+ return kv;
+}
+
+static pfhash_kv_4_t *
+pfhash_get_kv_4 (pfhash_t * p, u32 bucket_contents,
+ u32 * key, u32 * match_index)
+{
+ u32x4 vector_key;
+ u32x4 is_equal[2];
+ u32 zbm[2], winner_index;
+ pfhash_kv_4_t *kv;
+
+ *match_index = (u32) ~ 0;
+
+ kv = &p->kvp[bucket_contents].kv4;
+
+ vector_key = u32x4_splat (key[0]);
+
+ is_equal[0] = u32x4_is_equal (kv->kb.k_u32x4[0], vector_key);
+ is_equal[1] = u32x4_is_equal (kv->kb.k_u32x4[1], vector_key);
+ zbm[0] = ~u32x4_zero_byte_mask (is_equal[0]) & 0xFFFF;
+ zbm[1] = ~u32x4_zero_byte_mask (is_equal[1]) & 0xFFFF;
+
+ if (PREDICT_FALSE ((zbm[0] == 0) && (zbm[1] == 0)))
+ return kv;
+
+ winner_index = min_log2 (zbm[0]) >> 2;
+ winner_index = zbm[1] ? (4 + (min_log2 (zbm[1]) >> 2)) : winner_index;
+
+ *match_index = winner_index;
+ return kv;
+}
+
+static pfhash_kv_t *
+pfhash_get_internal (pfhash_t * p, u32 bucket_contents,
+ void *key, u32 * match_index)
+{
+ pfhash_kv_t *kv = 0;
+
+ switch (p->key_size)
+ {
+ case 16:
+ kv =
+ (pfhash_kv_t *) pfhash_get_kv_16 (p, bucket_contents, key,
+ match_index);
+ break;
+ case 8:
+ if (p->value_size == 4)
+ kv = (pfhash_kv_t *) pfhash_get_kv_8 (p, bucket_contents,
+ key, match_index);
+ else
+ kv = (pfhash_kv_t *) pfhash_get_kv_8v8 (p, bucket_contents,
+ key, match_index);
+ break;
+ case 4:
+ kv =
+ (pfhash_kv_t *) pfhash_get_kv_4 (p, bucket_contents, key,
+ match_index);
+ break;
+ default:
+ ASSERT (0);
+ }
+ return kv;
+}
+
+u64
+pfhash_get (pfhash_t * p, u32 bucket, void *key)
+{
+ pfhash_kv_t *kv;
+ u32 match_index = ~0;
+ pfhash_kv_16_t *kv16;
+ pfhash_kv_8_t *kv8;
+ pfhash_kv_8v8_t *kv8v8;
+ pfhash_kv_4_t *kv4;
+
+ u32 bucket_contents = pfhash_read_bucket_prefetch_kv (p, bucket);
+
+ if (bucket_contents == PFHASH_BUCKET_OVERFLOW)
+ {
+ uword *hp;
+
+ hp = hash_get_mem (p->overflow_hash, key);
+ if (hp)
+ return hp[0];
+ return (u64) ~ 0;
+ }
+
+ kv = pfhash_get_internal (p, bucket_contents, key, &match_index);
+ if (match_index == (u32) ~ 0)
+ return (u64) ~ 0;
+
+ kv16 = (void *) kv;
+ kv8 = (void *) kv;
+ kv4 = (void *) kv;
+ kv8v8 = (void *) kv;
+
+ switch (p->key_size)
+ {
+ case 16:
+ return (kv16->values[match_index] == (u32) ~ 0)
+ ? (u64) ~ 0 : (u64) kv16->values[match_index];
+ case 8:
+ if (p->value_size == 4)
+ return (kv8->values[match_index] == (u32) ~ 0)
+ ? (u64) ~ 0 : (u64) kv8->values[match_index];
+ else
+ return kv8v8->values[match_index];
+ case 4:
+ return (kv4->values[match_index] == (u32) ~ 0)
+ ? (u64) ~ 0 : (u64) kv4->values[match_index];
+ default:
+ ASSERT (0);
+ }
+ return (u64) ~ 0;
+}
+
+void
+pfhash_set (pfhash_t * p, u32 bucket, void *key, void *value)
+{
+ u32 bucket_contents = pfhash_read_bucket_prefetch_kv (p, bucket);
+ u32 match_index = (u32) ~ 0;
+ pfhash_kv_t *kv;
+ pfhash_kv_16_t *kv16;
+ pfhash_kv_8_t *kv8;
+ pfhash_kv_8v8_t *kv8v8;
+ pfhash_kv_4_t *kv4;
+ int i;
+ u8 *kcopy;
+
+ if (bucket_contents == PFHASH_BUCKET_OVERFLOW)
+ {
+ hash_pair_t *hp;
+ hp = hash_get_pair_mem (p->overflow_hash, key);
+ if (hp)
+ {
+ clib_warning ("replace value 0x%08x with value 0x%08x",
+ hp->value[0], (u64) value);
+ hp->value[0] = (u64) value;
+ return;
+ }
+ kcopy = clib_mem_alloc (p->key_size);
+ clib_memcpy (kcopy, key, p->key_size);
+ hash_set_mem (p->overflow_hash, kcopy, value);
+ p->nitems++;
+ p->nitems_in_overflow++;
+ return;
+ }
+
+ if (bucket_contents == 0)
+ {
+ pool_get_aligned (p->kvp, kv, 16);
+ memset (kv, 0xff, sizeof (*kv));
+ p->buckets[bucket] = kv - p->kvp;
+ }
+ else
+ kv = pfhash_get_internal (p, bucket_contents, key, &match_index);
+
+ kv16 = (void *) kv;
+ kv8 = (void *) kv;
+ kv8v8 = (void *) kv;
+ kv4 = (void *) kv;
+
+ p->nitems++;
+
+ if (match_index != (u32) ~ 0)
+ {
+ switch (p->key_size)
+ {
+ case 16:
+ kv16->values[match_index] = (u32) (u64) value;
+ return;
+
+ case 8:
+ if (p->value_size == 4)
+ kv8->values[match_index] = (u32) (u64) value;
+ else
+ kv8v8->values[match_index] = (u64) value;
+ return;
+
+ case 4:
+ kv4->values[match_index] = (u64) value;
+ return;
+
+ default:
+ ASSERT (0);
+ }
+ }
+
+ switch (p->key_size)
+ {
+ case 16:
+ for (i = 0; i < 3; i++)
+ {
+ if (kv16->values[i] == (u32) ~ 0)
+ {
+ clib_memcpy (&kv16->kb.k_u32x4[i], key, p->key_size);
+ kv16->values[i] = (u32) (u64) value;
+ return;
+ }
+ }
+ /* copy bucket contents to overflow hash tbl */
+ for (i = 0; i < 3; i++)
+ {
+ kcopy = clib_mem_alloc (p->key_size);
+ clib_memcpy (kcopy, &kv16->kb.k_u32x4[i], p->key_size);
+ hash_set_mem (p->overflow_hash, kcopy, kv16->values[i]);
+ p->nitems_in_overflow++;
+ }
+ /* Add new key to overflow */
+ kcopy = clib_mem_alloc (p->key_size);
+ clib_memcpy (kcopy, key, p->key_size);
+ hash_set_mem (p->overflow_hash, kcopy, value);
+ p->buckets[bucket] = PFHASH_BUCKET_OVERFLOW;
+ p->overflow_count++;
+ p->nitems_in_overflow++;
+ return;
+
+ case 8:
+ if (p->value_size == 4)
+ {
+ for (i = 0; i < 5; i++)
+ {
+ if (kv8->values[i] == (u32) ~ 0)
+ {
+ clib_memcpy (&kv8->kb.k_u64[i], key, 8);
+ kv8->values[i] = (u32) (u64) value;
+ return;
+ }
+ }
+ /* copy bucket contents to overflow hash tbl */
+ for (i = 0; i < 5; i++)
+ {
+ kcopy = clib_mem_alloc (p->key_size);
+ clib_memcpy (kcopy, &kv8->kb.k_u64[i], 8);
+ hash_set_mem (p->overflow_hash, kcopy, kv8->values[i]);
+ p->nitems_in_overflow++;
+ }
+ }
+ else
+ {
+ for (i = 0; i < 4; i++)
+ {
+ if (kv8v8->values[i] == (u64) ~ 0)
+ {
+ clib_memcpy (&kv8v8->kb.k_u64[i], key, 8);
+ kv8v8->values[i] = (u64) value;
+ return;
+ }
+ }
+ /* copy bucket contents to overflow hash tbl */
+ for (i = 0; i < 4; i++)
+ {
+ kcopy = clib_mem_alloc (p->key_size);
+ clib_memcpy (kcopy, &kv8v8->kb.k_u64[i], 8);
+ hash_set_mem (p->overflow_hash, kcopy, kv8v8->values[i]);
+ p->nitems_in_overflow++;
+ }
+
+ }
+ /* Add new key to overflow */
+ kcopy = clib_mem_alloc (p->key_size);
+ clib_memcpy (kcopy, key, p->key_size);
+ hash_set_mem (p->overflow_hash, kcopy, value);
+ p->buckets[bucket] = PFHASH_BUCKET_OVERFLOW;
+ p->overflow_count++;
+ p->nitems_in_overflow++;
+ return;
+
+ case 4:
+ for (i = 0; i < 8; i++)
+ {
+ if (kv4->values[i] == (u32) ~ 0)
+ {
+ clib_memcpy (&kv4->kb.kb[i], key, 4);
+ kv4->values[i] = (u32) (u64) value;
+ return;
+ }
+ }
+ /* copy bucket contents to overflow hash tbl */
+ for (i = 0; i < 8; i++)
+ {
+ kcopy = clib_mem_alloc (p->key_size);
+ clib_memcpy (kcopy, &kv4->kb.kb[i], 4);
+ hash_set_mem (p->overflow_hash, kcopy, kv4->values[i]);
+ p->nitems_in_overflow++;
+ }
+ /* Add new key to overflow */
+ kcopy = clib_mem_alloc (p->key_size);
+ clib_memcpy (kcopy, key, p->key_size);
+ hash_set_mem (p->overflow_hash, kcopy, value);
+ p->buckets[bucket] = PFHASH_BUCKET_OVERFLOW;
+ p->overflow_count++;
+ p->nitems_in_overflow++;
+ return;
+
+ default:
+ ASSERT (0);
+ }
+}
+
+void
+pfhash_unset (pfhash_t * p, u32 bucket, void *key)
+{
+ u32 bucket_contents = pfhash_read_bucket_prefetch_kv (p, bucket);
+ u32 match_index = (u32) ~ 0;
+ pfhash_kv_t *kv;
+ pfhash_kv_16_t *kv16;
+ pfhash_kv_8_t *kv8;
+ pfhash_kv_8v8_t *kv8v8;
+ pfhash_kv_4_t *kv4;
+ void *oldkey;
+
+ if (bucket_contents == PFHASH_BUCKET_OVERFLOW)
+ {
+ hash_pair_t *hp;
+ hp = hash_get_pair_mem (p->overflow_hash, key);
+ if (hp)
+ {
+ oldkey = (void *) hp->key;
+ hash_unset_mem (p->overflow_hash, key);
+ clib_mem_free (oldkey);
+ p->nitems--;
+ p->nitems_in_overflow--;
+ }
+ return;
+ }
+
+ kv = pfhash_get_internal (p, bucket_contents, key, &match_index);
+ if (match_index == (u32) ~ 0)
+ return;
+
+ p->nitems--;
+
+ kv16 = (void *) kv;
+ kv8 = (void *) kv;
+ kv8v8 = (void *) kv;
+ kv4 = (void *) kv;
+
+ switch (p->key_size)
+ {
+ case 16:
+ kv16->values[match_index] = (u32) ~ 0;
+ return;
+
+ case 8:
+ if (p->value_size == 4)
+ kv8->values[match_index] = (u32) ~ 0;
+ else
+ kv8v8->values[match_index] = (u64) ~ 0;
+ return;
+
+ case 4:
+ kv4->values[match_index] = (u32) ~ 0;
+ return;
+
+ default:
+ ASSERT (0);
+ }
+}
+
+void
+pfhash_free (pfhash_t * p)
+{
+ hash_pair_t *hp;
+ int i;
+ u8 **keys = 0;
+
+ vec_free (p->name);
+
+ pool_free (p->kvp);
+
+ /* *INDENT-OFF* */
+ hash_foreach_pair (hp, p->overflow_hash,
+ ({
+ vec_add1 (keys, (u8 *)hp->key);
+ }));
+ /* *INDENT-ON* */
+ hash_free (p->overflow_hash);
+ for (i = 0; i < vec_len (keys); i++)
+ vec_free (keys[i]);
+ vec_free (keys);
+}
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/pfhash.h b/src/vppinfra/pfhash.h
new file mode 100644
index 00000000..e054c668
--- /dev/null
+++ b/src/vppinfra/pfhash.h
@@ -0,0 +1,276 @@
+/*
+ Copyright (c) 2013 Cisco and/or its affiliates.
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+#ifndef included_clib_pfhash_h
+#define included_clib_pfhash_h
+
+
+#include <vppinfra/clib.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/pool.h>
+
+#if defined(CLIB_HAVE_VEC128) && ! defined (__ALTIVEC__)
+
+typedef struct
+{
+ /* 3 x 16 = 48 key bytes */
+ union
+ {
+ u32x4 k_u32x4[3];
+ u64 k_u64[6];
+ } kb;
+ /* 3 x 4 = 12 value bytes */
+ u32 values[3];
+ u32 pad;
+} pfhash_kv_16_t;
+
+typedef struct
+{
+ /* 5 x 8 = 40 key bytes */
+ union
+ {
+ u64 k_u64[5];
+ } kb;
+
+ /* 5 x 4 = 20 value bytes */
+ u32 values[5];
+ u32 pad;
+} pfhash_kv_8_t;
+
+typedef struct
+{
+ /* 4 x 8 = 32 key bytes */
+ union
+ {
+ u64 k_u64[4];
+ } kb;
+
+ /* 4 x 8 = 32 value bytes */
+ u64 values[4];
+} pfhash_kv_8v8_t;
+
+typedef struct
+{
+ /* 8 x 4 = 32 key bytes */
+ union
+ {
+ u32x4 k_u32x4[2];
+ u32 kb[8];
+ } kb;
+
+ /* 8 x 4 = 32 value bytes */
+ u32 values[8];
+} pfhash_kv_4_t;
+
+typedef union
+{
+ pfhash_kv_16_t kv16;
+ pfhash_kv_8_t kv8;
+ pfhash_kv_8v8_t kv8v8;
+ pfhash_kv_4_t kv4;
+} pfhash_kv_t;
+
+typedef struct
+{
+ /* Bucket vector */
+ u32 *buckets;
+#define PFHASH_BUCKET_OVERFLOW (u32)~0
+
+ /* Pool of key/value pairs */
+ pfhash_kv_t *kvp;
+
+ /* overflow plain-o-hash */
+ uword *overflow_hash;
+
+ /* Pretty-print name */
+ u8 *name;
+
+ u32 key_size;
+ u32 value_size;
+
+ u32 overflow_count;
+ u32 nitems;
+ u32 nitems_in_overflow;
+} pfhash_t;
+
+void pfhash_init (pfhash_t * p, char *name, u32 key_size, u32 value_size,
+ u32 nbuckets);
+void pfhash_free (pfhash_t * p);
+u64 pfhash_get (pfhash_t * p, u32 bucket, void *key);
+void pfhash_set (pfhash_t * p, u32 bucket, void *key, void *value);
+void pfhash_unset (pfhash_t * p, u32 bucket, void *key);
+
+format_function_t format_pfhash;
+
+static inline void
+pfhash_prefetch_bucket (pfhash_t * p, u32 bucket)
+{
+ CLIB_PREFETCH (&p->buckets[bucket], CLIB_CACHE_LINE_BYTES, LOAD);
+}
+
+static inline u32
+pfhash_read_bucket_prefetch_kv (pfhash_t * p, u32 bucket)
+{
+ u32 bucket_contents = p->buckets[bucket];
+ if (PREDICT_TRUE ((bucket_contents & PFHASH_BUCKET_OVERFLOW) == 0))
+ CLIB_PREFETCH (&p->kvp[bucket_contents], CLIB_CACHE_LINE_BYTES, LOAD);
+ return bucket_contents;
+}
+
+/*
+ * pfhash_search_kv_16
+ * See if the supplied 16-byte key matches one of three 16-byte (key,value) pairs.
+ * Return the indicated value, or ~0 if no match
+ *
+ * Note: including the overflow test, the fast path is 35 instrs
+ * on x86_64. Elves will steal your keyboard in the middle of the night if
+ * you "improve" it without checking the generated code!
+ */
+static inline u32
+pfhash_search_kv_16 (pfhash_t * p, u32 bucket_contents, u32x4 * key)
+{
+ u32x4 diff0, diff1, diff2;
+ u32 is_equal0, is_equal1, is_equal2;
+ u32 no_match;
+ pfhash_kv_16_t *kv;
+ u32 rv;
+
+ if (PREDICT_FALSE (bucket_contents == PFHASH_BUCKET_OVERFLOW))
+ {
+ uword *hp;
+ hp = hash_get_mem (p->overflow_hash, key);
+ if (hp)
+ return hp[0];
+ return (u32) ~ 0;
+ }
+
+ kv = &p->kvp[bucket_contents].kv16;
+
+ diff0 = u32x4_sub (kv->kb.k_u32x4[0], key[0]);
+ diff1 = u32x4_sub (kv->kb.k_u32x4[1], key[0]);
+ diff2 = u32x4_sub (kv->kb.k_u32x4[2], key[0]);
+
+ no_match = is_equal0 = (i16) u32x4_zero_byte_mask (diff0);
+ is_equal1 = (i16) u32x4_zero_byte_mask (diff1);
+ no_match |= is_equal1;
+ is_equal2 = (i16) u32x4_zero_byte_mask (diff2);
+ no_match |= is_equal2;
+ /* If any of the three items matched, no_match will be zero after this line */
+ no_match = ~no_match;
+
+ rv = (is_equal0 & kv->values[0])
+ | (is_equal1 & kv->values[1]) | (is_equal2 & kv->values[2]) | no_match;
+
+ return rv;
+}
+
+static inline u32
+pfhash_search_kv_8 (pfhash_t * p, u32 bucket_contents, u64 * key)
+{
+ pfhash_kv_8_t *kv;
+ u32 rv = (u32) ~ 0;
+
+ if (PREDICT_FALSE (bucket_contents == PFHASH_BUCKET_OVERFLOW))
+ {
+ uword *hp;
+ hp = hash_get_mem (p->overflow_hash, key);
+ if (hp)
+ return hp[0];
+ return (u32) ~ 0;
+ }
+
+ kv = &p->kvp[bucket_contents].kv8;
+
+ rv = (kv->kb.k_u64[0] == key[0]) ? kv->values[0] : rv;
+ rv = (kv->kb.k_u64[1] == key[0]) ? kv->values[1] : rv;
+ rv = (kv->kb.k_u64[2] == key[0]) ? kv->values[2] : rv;
+ rv = (kv->kb.k_u64[3] == key[0]) ? kv->values[3] : rv;
+ rv = (kv->kb.k_u64[4] == key[0]) ? kv->values[4] : rv;
+
+ return rv;
+}
+
+static inline u64
+pfhash_search_kv_8v8 (pfhash_t * p, u32 bucket_contents, u64 * key)
+{
+ pfhash_kv_8v8_t *kv;
+ u64 rv = (u64) ~ 0;
+
+ if (PREDICT_FALSE (bucket_contents == PFHASH_BUCKET_OVERFLOW))
+ {
+ uword *hp;
+ hp = hash_get_mem (p->overflow_hash, key);
+ if (hp)
+ return hp[0];
+ return (u64) ~ 0;
+ }
+
+ kv = &p->kvp[bucket_contents].kv8v8;
+
+ rv = (kv->kb.k_u64[0] == key[0]) ? kv->values[0] : rv;
+ rv = (kv->kb.k_u64[1] == key[0]) ? kv->values[1] : rv;
+ rv = (kv->kb.k_u64[2] == key[0]) ? kv->values[2] : rv;
+ rv = (kv->kb.k_u64[3] == key[0]) ? kv->values[3] : rv;
+
+ return rv;
+}
+
+static inline u32
+pfhash_search_kv_4 (pfhash_t * p, u32 bucket_contents, u32 * key)
+{
+ u32x4 vector_key;
+ u32x4 is_equal[2];
+ u32 zbm[2], winner_index;
+ pfhash_kv_4_t *kv;
+
+ if (PREDICT_FALSE (bucket_contents == PFHASH_BUCKET_OVERFLOW))
+ {
+ uword *hp;
+ hp = hash_get_mem (p->overflow_hash, key);
+ if (hp)
+ return hp[0];
+ return (u32) ~ 0;
+ }
+
+ kv = &p->kvp[bucket_contents].kv4;
+
+ vector_key = u32x4_splat (key[0]);
+
+ is_equal[0] = u32x4_is_equal (kv->kb.k_u32x4[0], vector_key);
+ is_equal[1] = u32x4_is_equal (kv->kb.k_u32x4[1], vector_key);
+ zbm[0] = ~u32x4_zero_byte_mask (is_equal[0]) & 0xFFFF;
+ zbm[1] = ~u32x4_zero_byte_mask (is_equal[1]) & 0xFFFF;
+
+ if (PREDICT_FALSE ((zbm[0] == 0) && (zbm[1] == 0)))
+ return (u32) ~ 0;
+
+ winner_index = min_log2 (zbm[0]) >> 2;
+ winner_index = zbm[1] ? (4 + (min_log2 (zbm[1]) >> 2)) : winner_index;
+
+ return kv->values[winner_index];
+}
+
+#endif /* CLIB_HAVE_VEC128 */
+
+#endif /* included_clib_pfhash_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/phash.c b/src/vppinfra/phash.c
new file mode 100644
index 00000000..14da5225
--- /dev/null
+++ b/src/vppinfra/phash.c
@@ -0,0 +1,1017 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2005 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+/* This is all stolen from Bob Jenkins and reworked for clib. Thanks
+ once again Bob for the great work. */
+
+/*
+------------------------------------------------------------------------------
+perfect.c: code to generate code for a hash for perfect hashing.
+(c) Bob Jenkins, September 1996, December 1999
+You may use this code in any way you wish, and it is free. No warranty.
+I hereby place this in the public domain.
+Source is http://burtleburtle.net/bob/c/perfect.c
+
+This generates a minimal perfect hash function. That means, given a
+set of n keys, this determines a hash function that maps each of
+those keys into a value in 0..n-1 with no collisions.
+
+The perfect hash function first uses a normal hash function on the key
+to determine (a,b) such that the pair (a,b) is distinct for all
+keys, then it computes a^scramble[tab[b]] to get the final perfect hash.
+tab[] is an array of 1-byte values and scramble[] is a 256-term array of
+2-byte or 4-byte values. If there are n keys, the length of tab[] is a
+power of two between n/3 and n.
+
+I found the idea of computing distinct (a,b) values in "Practical minimal
+perfect hash functions for large databases", Fox, Heath, Chen, and Daoud,
+Communications of the ACM, January 1992. They found the idea in Chichelli
+(CACM Jan 1980). Beyond that, our methods differ.
+
+The key is hashed to a pair (a,b) where a in 0..*alen*-1 and b in
+0..*blen*-1. A fast hash function determines both a and b
+simultaneously. Any decent hash function is likely to produce
+hashes so that (a,b) is distinct for all pairs. I try the hash
+using different values of *salt* until all pairs are distinct.
+
+The final hash is (a XOR scramble[tab[b]]). *scramble* is a
+predetermined mapping of 0..255 into 0..smax-1. *tab* is an
+array that we fill in in such a way as to make the hash perfect.
+
+First we fill in all values of *tab* that are used by more than one
+key. We try all possible values for each position until one works.
+
+This leaves m unmapped keys and m values that something could hash to.
+If you treat unmapped keys as lefthand nodes and unused hash values
+as righthand nodes, and draw a line connecting each key to each hash
+value it could map to, you get a bipartite graph. We attempt to
+find a perfect matching in this graph. If we succeed, we have
+determined a perfect hash for the whole set of keys.
+
+*scramble* is used because (a^tab[i]) clusters keys around *a*.
+------------------------------------------------------------------------------
+*/
+
+#include <vppinfra/bitmap.h>
+#include <vppinfra/format.h>
+#include <vppinfra/phash.h>
+#include <vppinfra/random.h>
+
+static void
+init_keys_direct_u32 (phash_main_t * pm)
+{
+ int n_keys_left, b_mask, a_shift;
+ u32 seed;
+ phash_key_t *k;
+
+ seed = pm->hash_seed;
+ b_mask = (1 << pm->b_bits) - 1;
+ a_shift = BITS (seed) - pm->a_bits;
+
+ k = pm->keys;
+ n_keys_left = vec_len (pm->keys);
+
+ while (n_keys_left >= 2)
+ {
+ u32 x0, y0, z0;
+ u32 x1, y1, z1;
+
+ x0 = y0 = z0 = seed;
+ x1 = y1 = z1 = seed;
+ x0 += (u32) k[0].key;
+ x1 += (u32) k[1].key;
+
+ hash_mix32 (x0, y0, z0);
+ hash_mix32 (x1, y1, z1);
+
+ k[0].b = z0 & b_mask;
+ k[1].b = z1 & b_mask;
+ k[0].a = z0 >> a_shift;
+ k[1].a = z1 >> a_shift;
+ if (PREDICT_FALSE (a_shift >= BITS (z0)))
+ k[0].a = k[1].a = 0;
+
+ k += 2;
+ n_keys_left -= 2;
+ }
+
+ if (n_keys_left >= 1)
+ {
+ u32 x0, y0, z0;
+
+ x0 = y0 = z0 = seed;
+ x0 += k[0].key;
+
+ hash_mix32 (x0, y0, z0);
+
+ k[0].b = z0 & b_mask;
+ k[0].a = z0 >> a_shift;
+ if (PREDICT_FALSE (a_shift >= BITS (z0)))
+ k[0].a = 0;
+
+ k += 1;
+ n_keys_left -= 1;
+ }
+}
+
+static void
+init_keys_direct_u64 (phash_main_t * pm)
+{
+ int n_keys_left, b_mask, a_shift;
+ u64 seed;
+ phash_key_t *k;
+
+ seed = pm->hash_seed;
+ b_mask = (1 << pm->b_bits) - 1;
+ a_shift = BITS (seed) - pm->a_bits;
+
+ k = pm->keys;
+ n_keys_left = vec_len (pm->keys);
+
+ while (n_keys_left >= 2)
+ {
+ u64 x0, y0, z0;
+ u64 x1, y1, z1;
+
+ x0 = y0 = z0 = seed;
+ x1 = y1 = z1 = seed;
+ x0 += (u64) k[0].key;
+ x1 += (u64) k[1].key;
+
+ hash_mix64 (x0, y0, z0);
+ hash_mix64 (x1, y1, z1);
+
+ k[0].b = z0 & b_mask;
+ k[1].b = z1 & b_mask;
+ k[0].a = z0 >> a_shift;
+ k[1].a = z1 >> a_shift;
+ if (PREDICT_FALSE (a_shift >= BITS (z0)))
+ k[0].a = k[1].a = 0;
+
+ k += 2;
+ n_keys_left -= 2;
+ }
+
+ if (n_keys_left >= 1)
+ {
+ u64 x0, y0, z0;
+
+ x0 = y0 = z0 = seed;
+ x0 += k[0].key;
+
+ hash_mix64 (x0, y0, z0);
+
+ k[0].b = z0 & b_mask;
+ k[0].a = z0 >> a_shift;
+ if (PREDICT_FALSE (a_shift >= BITS (z0)))
+ k[0].a = 0;
+
+ k += 1;
+ n_keys_left -= 1;
+ }
+}
+
+static void
+init_keys_indirect_u32 (phash_main_t * pm)
+{
+ int n_keys_left, b_mask, a_shift;
+ u32 seed;
+ phash_key_t *k;
+
+ seed = pm->hash_seed;
+ b_mask = (1 << pm->b_bits) - 1;
+ a_shift = BITS (seed) - pm->a_bits;
+
+ k = pm->keys;
+ n_keys_left = vec_len (pm->keys);
+
+ while (n_keys_left >= 2)
+ {
+ u32 xyz[6];
+ u32 x0, y0, z0;
+ u32 x1, y1, z1;
+
+ pm->key_seed2 (pm->private, k[0].key, k[1].key, &xyz);
+
+ x0 = y0 = z0 = seed;
+ x1 = y1 = z1 = seed;
+ x0 += xyz[0];
+ y0 += xyz[1];
+ z0 += xyz[2];
+ x1 += xyz[3];
+ y1 += xyz[4];
+ z1 += xyz[5];
+
+ hash_mix32 (x0, y0, z0);
+ hash_mix32 (x1, y1, z1);
+
+ k[0].b = z0 & b_mask;
+ k[1].b = z1 & b_mask;
+ k[0].a = z0 >> a_shift;
+ k[1].a = z1 >> a_shift;
+ if (PREDICT_FALSE (a_shift >= BITS (z0)))
+ k[0].a = k[1].a = 0;
+
+ k += 2;
+ n_keys_left -= 2;
+ }
+
+ if (n_keys_left >= 1)
+ {
+ u32 xyz[3];
+ u32 x0, y0, z0;
+
+ pm->key_seed1 (pm->private, k[0].key, &xyz);
+
+ x0 = y0 = z0 = seed;
+ x0 += xyz[0];
+ y0 += xyz[1];
+ z0 += xyz[2];
+
+ hash_mix32 (x0, y0, z0);
+
+ k[0].b = z0 & b_mask;
+ k[0].a = z0 >> a_shift;
+ if (PREDICT_FALSE (a_shift >= BITS (z0)))
+ k[0].a = 0;
+
+ k += 1;
+ n_keys_left -= 1;
+ }
+}
+
+static void
+init_keys_indirect_u64 (phash_main_t * pm)
+{
+ int n_keys_left, b_mask, a_shift;
+ u64 seed;
+ phash_key_t *k;
+
+ seed = pm->hash_seed;
+ b_mask = (1 << pm->b_bits) - 1;
+ a_shift = BITS (seed) - pm->a_bits;
+
+ k = pm->keys;
+ n_keys_left = vec_len (pm->keys);
+
+ while (n_keys_left >= 2)
+ {
+ u64 xyz[6];
+ u64 x0, y0, z0;
+ u64 x1, y1, z1;
+
+ pm->key_seed2 (pm->private, k[0].key, k[1].key, &xyz);
+
+ x0 = y0 = z0 = seed;
+ x1 = y1 = z1 = seed;
+ x0 += xyz[0];
+ y0 += xyz[1];
+ z0 += xyz[2];
+ x1 += xyz[3];
+ y1 += xyz[4];
+ z1 += xyz[5];
+
+ hash_mix64 (x0, y0, z0);
+ hash_mix64 (x1, y1, z1);
+
+ k[0].b = z0 & b_mask;
+ k[1].b = z1 & b_mask;
+ k[0].a = z0 >> a_shift;
+ k[1].a = z1 >> a_shift;
+ if (PREDICT_FALSE (a_shift >= BITS (z0)))
+ k[0].a = k[1].a = 0;
+
+ k += 2;
+ n_keys_left -= 2;
+ }
+
+ if (n_keys_left >= 1)
+ {
+ u64 xyz[3];
+ u64 x0, y0, z0;
+
+ pm->key_seed1 (pm->private, k[0].key, &xyz);
+
+ x0 = y0 = z0 = seed;
+ x0 += xyz[0];
+ y0 += xyz[1];
+ z0 += xyz[2];
+
+ hash_mix64 (x0, y0, z0);
+
+ k[0].b = z0 & b_mask;
+ k[0].a = z0 >> a_shift;
+ if (PREDICT_FALSE (a_shift >= BITS (z0)))
+ k[0].a = 0;
+
+ k += 1;
+ n_keys_left -= 1;
+ }
+}
+
+/*
+ * insert keys into table according to key->b
+ * check if the initial hash might work
+ */
+static int
+init_tabb (phash_main_t * pm)
+{
+ int no_collisions;
+ phash_tabb_t *tb;
+ phash_key_t *k, *l;
+
+ if (pm->key_seed1)
+ {
+ if (pm->flags & PHASH_FLAG_MIX64)
+ init_keys_indirect_u64 (pm);
+ else
+ init_keys_indirect_u32 (pm);
+ }
+ else
+ {
+ if (pm->flags & PHASH_FLAG_MIX64)
+ init_keys_direct_u64 (pm);
+ else
+ init_keys_direct_u32 (pm);
+ }
+
+ if (!pm->tabb)
+ vec_resize (pm->tabb, 1 << pm->b_bits);
+ else
+ vec_foreach (tb, pm->tabb) phash_tabb_free (tb);
+
+ /* Two keys with the same (a,b) guarantees a collision */
+ no_collisions = 1;
+ vec_foreach (k, pm->keys)
+ {
+ u32 i, *ki;
+
+ tb = pm->tabb + k->b;
+ ki = tb->keys;
+ for (i = 0; i < vec_len (ki); i++)
+ {
+ l = pm->keys + ki[i];
+ if (k->a == l->a)
+ {
+ /* Given keys are supposed to be unique. */
+ if (pm->key_is_equal
+ && pm->key_is_equal (pm->private, l->key, k->key))
+ clib_error ("duplicate keys");
+ no_collisions = 0;
+ goto done;
+ }
+ }
+
+ vec_add1 (tb->keys, k - pm->keys);
+ }
+
+done:
+ return no_collisions;
+}
+
+/* Try to apply an augmenting list */
+static int
+apply (phash_main_t * pm, u32 tail, u32 rollback)
+{
+ phash_key_t *k;
+ phash_tabb_t *pb;
+ phash_tabq_t *q_child, *q_parent;
+ u32 ki, i, hash, child, parent;
+ u32 stabb; /* scramble[tab[b]] */
+ int no_collision;
+
+ no_collision = 1;
+
+ /* Walk from child to parent until root is reached. */
+ for (child = tail - 1; child; child = parent)
+ {
+ q_child = &pm->tabq[child];
+ parent = q_child->parent_q;
+ q_parent = &pm->tabq[parent];
+
+ /* find parent's list of siblings */
+ ASSERT (q_parent->b_q < vec_len (pm->tabb));
+ pb = pm->tabb + q_parent->b_q;
+
+ /* erase old hash values */
+ stabb = pm->scramble[pb->val_b];
+ for (i = 0; i < vec_len (pb->keys); i++)
+ {
+ ki = pb->keys[i];
+ k = pm->keys + ki;
+ hash = k->a ^ stabb;
+
+ /* Erase hash for all of child's siblings. */
+ if (ki == pm->tabh[hash])
+ pm->tabh[hash] = ~0;
+ }
+
+ /* change pb->val_b, which will change the hashes of all parent siblings */
+ pb->val_b = rollback ? q_child->oldval_q : q_child->newval_q;
+
+ /* set new hash values */
+ stabb = pm->scramble[pb->val_b];
+ for (i = 0; i < vec_len (pb->keys); i++)
+ {
+ ki = pb->keys[i];
+ k = pm->keys + ki;
+
+ hash = k->a ^ stabb;
+ if (rollback)
+ {
+ if (parent == 0)
+ continue; /* root never had a hash */
+ }
+ else if (pm->tabh[hash] != ~0)
+ {
+ /* Very rare case: roll back any changes. */
+ apply (pm, tail, /* rollback changes */ 1);
+ no_collision = 0;
+ goto done;
+ }
+ pm->tabh[hash] = ki;
+ }
+ }
+
+done:
+ return no_collision;
+}
+
+
+/*
+-------------------------------------------------------------------------------
+augment(): Add item to the mapping.
+
+Construct a spanning tree of *b*s with *item* as root, where each
+parent can have all its hashes changed (by some new val_b) with
+at most one collision, and each child is the b of that collision.
+
+I got this from Tarjan's "Data Structures and Network Algorithms". The
+path from *item* to a *b* that can be remapped with no collision is
+an "augmenting path". Change values of tab[b] along the path so that
+the unmapped key gets mapped and the unused hash value gets used.
+
+Assuming 1 key per b, if m out of n hash values are still unused,
+you should expect the transitive closure to cover n/m nodes before
+an unused node is found. Sum(i=1..n)(n/i) is about nlogn, so expect
+this approach to take about nlogn time to map all single-key b's.
+-------------------------------------------------------------------------------
+
+high_water: a value higher than any now in tabb[].water_b.
+*/
+static int
+augment (phash_main_t * pm, u32 b_root, u32 high_water)
+{
+ u32 q; /* current position walking through the queue */
+ u32 tail; /* tail of the queue. 0 is the head of the queue. */
+ phash_tabb_t *tb_parent, *tb_child, *tb_hit;
+ phash_key_t *k_parent, *k_child;
+ u32 v, v_limit; /* possible value for myb->val_b */
+ u32 i, ki, hash;
+
+ v_limit =
+ 1 << ((pm->flags & PHASH_FLAG_USE_SCRAMBLE) ? pm->s_bits : BITS (u8));
+
+ /* Initialize the root of the spanning tree. */
+ pm->tabq[0].b_q = b_root;
+ tail = 1;
+
+ /* construct the spanning tree by walking the queue, add children to tail */
+ for (q = 0; q < tail; q++)
+ {
+ if ((pm->flags & PHASH_FLAG_FAST_MODE)
+ && !(pm->flags & PHASH_FLAG_MINIMAL) && q == 1)
+ break; /* don't do transitive closure */
+
+ tb_parent = pm->tabb + pm->tabq[q].b_q; /* the b for this node */
+
+ for (v = 0; v < v_limit; v++)
+ {
+ tb_child = 0;
+
+ for (i = 0; i < vec_len (tb_parent->keys); i++)
+ {
+ ki = tb_parent->keys[i];
+ k_parent = pm->keys + ki;
+
+ hash = k_parent->a ^ pm->scramble[v];
+ if (hash >= pm->hash_max)
+ goto try_next_v; /* hash code out of bounds => we can't use this v */
+
+ ki = pm->tabh[hash];
+ if (ki == ~0)
+ continue;
+
+ k_child = pm->keys + ki;
+ tb_hit = pm->tabb + k_child->b;
+
+ if (tb_child)
+ {
+ /* Hit at most one child b. */
+ if (tb_child == tb_hit)
+ goto try_next_v;
+ }
+ else
+ {
+ /* Remember this as child b. */
+ tb_child = tb_hit;
+ if (tb_hit->water_b == high_water)
+ goto try_next_v; /* already explored */
+ }
+ }
+
+ /* tb_parent with v has either one or zero collisions. */
+
+ /* add childb to the queue of reachable things */
+ if (tb_child)
+ tb_child->water_b = high_water;
+ pm->tabq[tail].b_q = tb_child ? tb_child - pm->tabb : ~0;
+ pm->tabq[tail].newval_q = v; /* how to make parent (myb) use this hash */
+ pm->tabq[tail].oldval_q = tb_parent->val_b; /* need this for rollback */
+ pm->tabq[tail].parent_q = q;
+ ++tail;
+
+ /* Found a v with no collisions? */
+ if (!tb_child)
+ {
+ /* Try to apply the augmenting path. */
+ if (apply (pm, tail, /* rollback */ 0))
+ return 1; /* success, item was added to the perfect hash */
+ --tail; /* don't know how to handle such a child! */
+ }
+
+ try_next_v:
+ ;
+ }
+ }
+ return 0;
+}
+
+
+static phash_tabb_t *sort_tabb;
+
+static int
+phash_tabb_compare (void *a1, void *a2)
+{
+ u32 *b1 = a1;
+ u32 *b2 = a2;
+ phash_tabb_t *tb1, *tb2;
+
+ tb1 = sort_tabb + b1[0];
+ tb2 = sort_tabb + b2[0];
+
+ return ((int) vec_len (tb2->keys) - (int) vec_len (tb1->keys));
+}
+
+/* find a mapping that makes this a perfect hash */
+static int
+perfect (phash_main_t * pm)
+{
+ u32 i;
+
+ /* clear any state from previous attempts */
+ if (vec_bytes (pm->tabh))
+ memset (pm->tabh, ~0, vec_bytes (pm->tabh));
+
+ vec_validate (pm->tabb_sort, vec_len (pm->tabb) - 1);
+ for (i = 0; i < vec_len (pm->tabb_sort); i++)
+ pm->tabb_sort[i] = i;
+
+ sort_tabb = pm->tabb;
+
+ vec_sort_with_function (pm->tabb_sort, phash_tabb_compare);
+
+ /* In descending order by number of keys, map all *b*s */
+ for (i = 0; i < vec_len (pm->tabb_sort); i++)
+ {
+ if (!augment (pm, pm->tabb_sort[i], i + 1))
+ return 0;
+ }
+
+ /* Success! We found a perfect hash of all keys into 0..nkeys-1. */
+ return 1;
+}
+
+
+/*
+ * Find initial a_bits = log2 (a_max), b_bits = log2 (b_max).
+ * Initial a_max and b_max values were found empirically. Some factors:
+ *
+ * If s_max<256 there is no scramble, so tab[b] needs to cover 0..s_max-1.
+ *
+ * a_max and b_max must be powers of 2 because the values in 0..a_max-1 and
+ * 0..b_max-1 are produced by applying a bitmask to the initial hash function.
+ *
+ * a_max must be less than s_max, in fact less than n_keys, because otherwise
+ * there would often be no i such that a^scramble[i] is in 0..n_keys-1 for
+ * all the *a*s associated with a given *b*, so there would be no legal
+ * value to assign to tab[b]. This only matters when we're doing a minimal
+ * perfect hash.
+ *
+ * It takes around 800 trials to find distinct (a,b) with nkey=s_max*(5/8)
+ * and a_max*b_max = s_max*s_max/32.
+ *
+ * Values of b_max less than s_max/4 never work, and s_max/2 always works.
+ *
+ * We want b_max as small as possible because it is the number of bytes in
+ * the huge array we must create for the perfect hash.
+ *
+ * When nkey <= s_max*(5/8), b_max=s_max/4 works much more often with
+ * a_max=s_max/8 than with a_max=s_max/4. Above s_max*(5/8), b_max=s_max/4
+ * doesn't seem to care whether a_max=s_max/8 or a_max=s_max/4. I think it
+ * has something to do with 5/8 = 1/8 * 5. For example examine 80000,
+ * 85000, and 90000 keys with different values of a_max. This only matters
+ * if we're doing a minimal perfect hash.
+ *
+ * When a_max*b_max <= 1<<U32BITS, the initial hash must produce one integer.
+ * Bigger than that it must produce two integers, which increases the
+ * cost of the hash per character hashed.
+ */
+static void
+guess_initial_parameters (phash_main_t * pm)
+{
+ u32 s_bits, s_max, a_max, b_max, n_keys;
+ int is_minimal, is_fast_mode;
+ const u32 b_max_use_scramble_threshold = 4096;
+
+ is_minimal = (pm->flags & PHASH_FLAG_MINIMAL) != 0;
+ is_fast_mode = (pm->flags & PHASH_FLAG_FAST_MODE) != 0;
+
+ n_keys = vec_len (pm->keys);
+ s_bits = max_log2 (n_keys);
+ s_max = 1 << s_bits;
+ a_max = 0;
+
+ if (is_minimal)
+ {
+ switch (s_bits)
+ {
+ case 0:
+ a_max = 1;
+ b_max = 1;
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ case 8:
+ /*
+ * Was: a_max = is_minimal ? s_max / 2 : s_max;
+ * However, we know that is_minimal must be true, so the
+ * if-arm of the ternary expression is always executed.
+ */
+ a_max = s_max / 2;
+ b_max = s_max / 2;
+ break;
+ case 9:
+ case 10:
+ case 11:
+ case 12:
+ case 13:
+ case 14:
+ case 15:
+ case 16:
+ case 17:
+ if (is_fast_mode)
+ {
+ a_max = s_max / 2;
+ b_max = s_max / 4;
+ }
+ else if (s_max / 4 < b_max_use_scramble_threshold)
+ {
+ if (n_keys <= s_max * 0.52)
+ a_max = b_max = s_max / 8;
+ else
+ a_max = b_max = s_max / 4;
+ }
+ else
+ {
+ a_max = ((n_keys <= s_max * (5.0 / 8.0)) ? s_max / 8 :
+ (n_keys <=
+ s_max * (3.0 / 4.0)) ? s_max / 4 : s_max / 2);
+ b_max = s_max / 4; /* always give the small size a shot */
+ }
+ break;
+ case 18:
+ if (is_fast_mode)
+ a_max = b_max = s_max / 2;
+ else
+ {
+ a_max = s_max / 8; /* never require the multiword hash */
+ b_max = (n_keys <= s_max * (5.0 / 8.0)) ? s_max / 4 : s_max / 2;
+ }
+ break;
+ case 19:
+ case 20:
+ a_max = (n_keys <= s_max * (5.0 / 8.0)) ? s_max / 8 : s_max / 2;
+ b_max = (n_keys <= s_max * (5.0 / 8.0)) ? s_max / 4 : s_max / 2;
+ break;
+ default:
+ /* Just find a hash as quick as possible.
+ We'll be thrashing virtual memory at this size. */
+ a_max = b_max = s_max / 2;
+ break;
+ }
+ }
+ else
+ {
+ /* Non-minimal perfect hash. */
+ if (is_fast_mode && n_keys > s_max * 0.8)
+ {
+ s_max *= 2;
+ s_bits += 1;
+ }
+
+ if (s_max / 4 <= (1 << 14))
+ b_max = ((n_keys <= s_max * 0.56) ? s_max / 32 :
+ (n_keys <= s_max * 0.74) ? s_max / 16 : s_max / 8);
+ else
+ b_max = ((n_keys <= s_max * 0.6) ? s_max / 16 :
+ (n_keys <= s_max * 0.8) ? s_max / 8 : s_max / 4);
+
+ if (is_fast_mode && b_max < s_max / 8)
+ b_max = s_max / 8;
+
+ if (a_max < 1)
+ a_max = 1;
+ if (b_max < 1)
+ b_max = 1;
+ }
+
+ ASSERT (s_max == (1 << s_bits));
+ ASSERT (is_pow2 (a_max));
+ ASSERT (is_pow2 (b_max));
+ pm->s_bits = s_bits;
+ pm->a_bits = min_log2 (a_max);
+ pm->b_bits = min_log2 (b_max);
+ if (b_max >= b_max_use_scramble_threshold)
+ pm->flags |= PHASH_FLAG_USE_SCRAMBLE;
+}
+
+/* compute p(x), where p is a permutation of 0..(1<<nbits)-1 */
+/* permute(0)=0. This is intended and useful. */
+always_inline u32
+scramble_permute (u32 x, u32 nbits)
+{
+ int i;
+ int mask = (1 << nbits) - 1;
+ int const2 = 1 + nbits / 2;
+ int const3 = 1 + nbits / 3;
+ int const4 = 1 + nbits / 4;
+ int const5 = 1 + nbits / 5;
+ for (i = 0; i < 20; i++)
+ {
+ x = (x + (x << const2)) & mask;
+ x = (x ^ (x >> const3));
+ x = (x + (x << const4)) & mask;
+ x = (x ^ (x >> const5));
+ }
+ return x;
+}
+
+/* initialize scramble[] with distinct random values in 0..smax-1 */
+static void
+scramble_init (phash_main_t * pm)
+{
+ u32 i;
+
+ /* fill scramble[] with distinct random integers in 0..smax-1 */
+ vec_validate (pm->scramble, (1 << (pm->s_bits < 8 ? 8 : pm->s_bits)) - 1);
+ for (i = 0; i < vec_len (pm->scramble); i++)
+ pm->scramble[i] = scramble_permute (i, pm->s_bits);
+}
+
+/* Try to find a perfect hash function. */
+clib_error_t *
+phash_find_perfect_hash (phash_main_t * pm)
+{
+ clib_error_t *error = 0;
+ u32 max_a_bits, n_tries_this_a_b, want_minimal;
+
+ /* guess initial values for s_max, a_max and b_max */
+ guess_initial_parameters (pm);
+
+ want_minimal = pm->flags & PHASH_FLAG_MINIMAL;
+
+new_s:
+ if (pm->b_bits == 0)
+ pm->a_bits = pm->s_bits;
+
+ max_a_bits = pm->s_bits - want_minimal;
+ if (max_a_bits < 1)
+ max_a_bits = 1;
+
+ pm->hash_max = want_minimal ? vec_len (pm->keys) : (1 << pm->s_bits);
+
+ scramble_init (pm);
+
+ /* Allocate working memory. */
+ vec_free (pm->tabh);
+ vec_validate_init_empty (pm->tabh, pm->hash_max - 1, ~0);
+ vec_free (pm->tabq);
+ vec_validate (pm->tabq, 1 << pm->b_bits);
+
+ /* Actually find the perfect hash */
+ n_tries_this_a_b = 0;
+ while (1)
+ {
+ /* Choose random hash seeds until keys become unique. */
+ pm->hash_seed = random_u64 (&pm->random_seed);
+ pm->n_seed_trials++;
+ if (init_tabb (pm))
+ {
+ /* Found unique (A, B). */
+
+ /* Hash may already be perfect. */
+ if (pm->b_bits == 0)
+ goto done;
+
+ pm->n_perfect_calls++;
+ if (perfect (pm))
+ goto done;
+
+ goto increase_b;
+ }
+
+ /* Keep trying with different seed value. */
+ n_tries_this_a_b++;
+ if (n_tries_this_a_b < 2048)
+ continue;
+
+ /* Try to put more bits in (A,B) to make distinct (A,B) more likely */
+ if (pm->a_bits < max_a_bits)
+ pm->a_bits++;
+ else if (pm->b_bits < pm->s_bits)
+ {
+ increase_b:
+ vec_resize (pm->tabb, vec_len (pm->tabb));
+ vec_resize (pm->tabq, vec_len (pm->tabq));
+ pm->b_bits++;
+ }
+ else
+ {
+ /* Can't increase (A, B) any more, so try increasing S. */
+ goto new_s;
+ }
+ }
+
+done:
+ /* Construct mapping table for hash lookups. */
+ if (!error)
+ {
+ u32 b, v;
+
+ pm->a_shift = ((pm->flags & PHASH_FLAG_MIX64) ? 64 : 32) - pm->a_bits;
+ pm->b_mask = (1 << pm->b_bits) - 1;
+
+ vec_resize (pm->tab, vec_len (pm->tabb));
+ for (b = 0; b < vec_len (pm->tabb); b++)
+ {
+ v = pm->tabb[b].val_b;
+
+ /* Apply scramble now for small enough value of b_bits. */
+ if (!(pm->flags & PHASH_FLAG_USE_SCRAMBLE))
+ v = pm->scramble[v];
+
+ pm->tab[b] = v;
+ }
+ }
+
+ /* Free working memory. */
+ phash_main_free_working_memory (pm);
+
+ return error;
+}
+
+/* Slow hash computation for general keys. */
+uword
+phash_hash_slow (phash_main_t * pm, uword key)
+{
+ u32 a, b, v;
+
+ if (pm->flags & PHASH_FLAG_MIX64)
+ {
+ u64 x0, y0, z0;
+
+ x0 = y0 = z0 = pm->hash_seed;
+
+ if (pm->key_seed1)
+ {
+ u64 xyz[3];
+ pm->key_seed1 (pm->private, key, &xyz);
+ x0 += xyz[0];
+ y0 += xyz[1];
+ z0 += xyz[2];
+ }
+ else
+ x0 += key;
+
+ hash_mix64 (x0, y0, z0);
+
+ a = z0 >> pm->a_shift;
+ b = z0 & pm->b_mask;
+ }
+ else
+ {
+ u32 x0, y0, z0;
+
+ x0 = y0 = z0 = pm->hash_seed;
+
+ if (pm->key_seed1)
+ {
+ u32 xyz[3];
+ pm->key_seed1 (pm->private, key, &xyz);
+ x0 += xyz[0];
+ y0 += xyz[1];
+ z0 += xyz[2];
+ }
+ else
+ x0 += key;
+
+ hash_mix32 (x0, y0, z0);
+
+ a = z0 >> pm->a_shift;
+ b = z0 & pm->b_mask;
+ }
+
+ v = pm->tab[b];
+ if (pm->flags & PHASH_FLAG_USE_SCRAMBLE)
+ v = pm->scramble[v];
+ return a ^ v;
+}
+
+/* Verify that perfect hash is perfect. */
+clib_error_t *
+phash_validate (phash_main_t * pm)
+{
+ phash_key_t *k;
+ uword *unique_bitmap = 0;
+ clib_error_t *error = 0;
+
+ vec_foreach (k, pm->keys)
+ {
+ uword h = phash_hash_slow (pm, k->key);
+
+ if (h >= pm->hash_max)
+ {
+ error = clib_error_return (0, "hash out of range %wd", h);
+ goto done;
+ }
+
+ if (clib_bitmap_get (unique_bitmap, h))
+ {
+ error = clib_error_return (0, "hash non-unique");
+ goto done;
+ }
+
+ unique_bitmap = clib_bitmap_ori (unique_bitmap, h);
+ }
+
+done:
+ clib_bitmap_free (unique_bitmap);
+ return error;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/phash.h b/src/vppinfra/phash.h
new file mode 100644
index 00000000..746a0fdd
--- /dev/null
+++ b/src/vppinfra/phash.h
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2005 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_phash_h
+#define included_phash_h
+
+#include <vppinfra/hash.h> /* for Bob's mixing functions */
+
+typedef struct
+{
+ /* Maybe either pointer to vector or inline word. */
+ uword key;
+
+ /* Hash code (A, B). */
+ u32 a, b;
+} phash_key_t;
+
+/* Table indexed by B. */
+typedef struct
+{
+ /* Vector of key indices with this same value of B. */
+ u32 *keys;
+
+ /* hash=a^tabb[b].val_b */
+ u32 val_b;
+
+ /* High watermark of who has visited this map node. */
+ u32 water_b;
+} phash_tabb_t;
+
+always_inline void
+phash_tabb_free (phash_tabb_t * b)
+{
+ vec_free (b->keys);
+ b->val_b = b->water_b = 0;
+}
+
+typedef struct
+{
+ /* b that currently occupies this hash */
+ u32 b_q;
+
+ /* Queue position of parent that could use this hash. */
+ u32 parent_q;
+
+ /* What to change parent tab[b] to use this hash. */
+ u32 newval_q;
+
+ /* Original value of tab[b]. */
+ u32 oldval_q;
+} phash_tabq_t;
+
+typedef struct
+{
+ u8 a_bits, b_bits, s_bits, a_shift;
+ u32 b_mask;
+ u32 *tab;
+ u32 *scramble;
+
+ /* Seed value for hash mixer. */
+ u64 hash_seed;
+
+ u32 flags;
+
+ /* Key functions want 64 bit keys.
+ Use hash_mix64 rather than hash_mix32. */
+#define PHASH_FLAG_MIX64 (1 << 0)
+#define PHASH_FLAG_MIX32 (0 << 0)
+
+ /* When b_bits is large enough (>= 12) we scramble. */
+#define PHASH_FLAG_USE_SCRAMBLE (1 << 1)
+
+ /* Slow mode gives smaller tables but at the expense of more run time. */
+#define PHASH_FLAG_SLOW_MODE (0 << 2)
+#define PHASH_FLAG_FAST_MODE (1 << 2)
+
+ /* Generate minimal perfect hash instead of perfect hash. */
+#define PHASH_FLAG_NON_MINIMAL (0 << 3)
+#define PHASH_FLAG_MINIMAL (1 << 3)
+
+ /* vec_len (keys) for minimal hash;
+ 1 << s_bits for non-minimal hash. */
+ u32 hash_max;
+
+ /* Vector of keys. */
+ phash_key_t *keys;
+
+ /* Used by callbacks to identify keys. */
+ void *private;
+
+ /* Key comparison callback. */
+ int (*key_is_equal) (void *private, uword key1, uword key2);
+
+ /* Callback to reduce single key -> hash seeds. */
+ void (*key_seed1) (void *private, uword key, void *seed);
+
+ /* Callback to reduce two key2 -> hash seeds. */
+ void (*key_seed2) (void *private, uword key1, uword key2, void *seed);
+
+ /* Stuff used to compute perfect hash. */
+ u32 random_seed;
+
+ /* Stuff indexed by B. */
+ phash_tabb_t *tabb;
+
+ /* Table of B ordered by number of keys in tabb[b]. */
+ u32 *tabb_sort;
+
+ /* Unique key (or ~0 if none) for a given hash
+ H = A ^ scramble[tab[B].val_b]. */
+ u32 *tabh;
+
+ /* Stuff indexed by q. */
+ phash_tabq_t *tabq;
+
+ /* Stats. */
+ u32 n_seed_trials, n_perfect_calls;
+} phash_main_t;
+
+always_inline void
+phash_main_free_working_memory (phash_main_t * pm)
+{
+ vec_free (pm->tabb);
+ vec_free (pm->tabq);
+ vec_free (pm->tabh);
+ vec_free (pm->tabb_sort);
+ if (!(pm->flags & PHASH_FLAG_USE_SCRAMBLE))
+ vec_free (pm->scramble);
+}
+
+always_inline void
+phash_main_free (phash_main_t * pm)
+{
+ phash_main_free_working_memory (pm);
+ vec_free (pm->tab);
+ vec_free (pm->keys);
+ memset (pm, 0, sizeof (pm[0]));
+}
+
+/* Slow hash computation for general keys. */
+uword phash_hash_slow (phash_main_t * pm, uword key);
+
+/* Main routine to compute perfect hash. */
+clib_error_t *phash_find_perfect_hash (phash_main_t * pm);
+
+/* Validates that hash is indeed perfect. */
+clib_error_t *phash_validate (phash_main_t * pm);
+
+/* Unit test. */
+int phash_test_main (unformat_input_t * input);
+
+#endif /* included_phash_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/pipeline.h b/src/vppinfra/pipeline.h
new file mode 100644
index 00000000..5a9799b4
--- /dev/null
+++ b/src/vppinfra/pipeline.h
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * pipeline.h: software pipeline infrastructure
+ *
+ * Copyright (c) 2010 Eliot Dresselhaus
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef included_clib_pipeline_h
+#define included_clib_pipeline_h
+
+#define clib_pipeline_stage(F,TYPE,ARG,I,BODY) \
+ always_inline void F##_inline (void * _, u32 I) \
+ { TYPE ARG = _; { BODY; } } \
+ never_inline void F##_no_inline (TYPE ARG, u32 I) \
+ { F##_inline (ARG, I); }
+
+#define clib_pipeline_stage_static(F,TYPE,ARG,I,BODY) \
+ static_always_inline void F##_inline (void * _, u32 I) \
+ { TYPE ARG = _; { BODY; } } \
+ never_inline void F##_no_inline (TYPE ARG, u32 I) \
+ { F##_inline (ARG, I); }
+
+#define clib_pipeline_stage_no_inline(F,TYPE,ARG,I,BODY) \
+ never_inline void F##_no_inline (void * _, u32 I) \
+ { TYPE ARG = _; { BODY; } } \
+ never_inline void F##_inline (TYPE ARG, u32 I) \
+ { F##_no_inline (ARG, I); }
+
+#define _clib_pipeline_var(v) _clib_pipeline_##v
+
+#define clib_pipeline_stage_execute(F,A,I,S) \
+ F##_##S (A, _clib_pipeline_var(i) - (I))
+
+#define clib_pipeline_main_stage(F,A,I) \
+ clib_pipeline_stage_execute (F, A, I, inline)
+#define clib_pipeline_init_stage(F,A,I) \
+ if (_clib_pipeline_var(i) >= (I)) clib_pipeline_stage_execute (F, A, I, no_inline)
+#define clib_pipeline_exit_stage(F,A,I) \
+ if (_clib_pipeline_var(i) >= (I) && _clib_pipeline_var(i) - (I) < _clib_pipeline_var(n_vectors)) \
+ clib_pipeline_stage_execute (F, A, I, no_inline)
+
+#define clib_pipeline_init_loop \
+ for (_clib_pipeline_var(i) = 0; \
+ _clib_pipeline_var(i) < \
+ clib_min (_clib_pipeline_var(n_stages) - 1, \
+ _clib_pipeline_var(n_vectors)); \
+ _clib_pipeline_var(i)++)
+
+#define clib_pipeline_main_loop \
+ for (; _clib_pipeline_var(i) < _clib_pipeline_var(n_vectors); \
+ _clib_pipeline_var(i)++)
+
+#define clib_pipeline_exit_loop \
+ for (; _clib_pipeline_var(i) < (_clib_pipeline_var(n_vectors) \
+ + _clib_pipeline_var(n_stages) - 1); \
+ _clib_pipeline_var(i)++)
+
+#define clib_pipeline_run_2_stage(N,ARG,STAGE0,STAGE1) \
+do { \
+ uword _clib_pipeline_var(n_vectors) = (N); \
+ uword _clib_pipeline_var(n_stages) = 2; \
+ uword _clib_pipeline_var(i); \
+ \
+ clib_pipeline_init_loop \
+ { \
+ clib_pipeline_init_stage (STAGE0, ARG, 0); \
+ } \
+ \
+ clib_pipeline_main_loop \
+ { \
+ clib_pipeline_main_stage (STAGE0, ARG, 0); \
+ clib_pipeline_main_stage (STAGE1, ARG, 1); \
+ } \
+ \
+ clib_pipeline_exit_loop \
+ { \
+ clib_pipeline_exit_stage (STAGE1, ARG, 1); \
+ } \
+} while (0)
+
+#define clib_pipeline_run_3_stage(N,ARG,STAGE0,STAGE1,STAGE2) \
+do { \
+ uword _clib_pipeline_var(n_vectors) = (N); \
+ uword _clib_pipeline_var(n_stages) = 3; \
+ uword _clib_pipeline_var(i); \
+ \
+ clib_pipeline_init_loop \
+ { \
+ clib_pipeline_init_stage (STAGE0, ARG, 0); \
+ clib_pipeline_init_stage (STAGE1, ARG, 1); \
+ } \
+ \
+ clib_pipeline_main_loop \
+ { \
+ clib_pipeline_main_stage (STAGE0, ARG, 0); \
+ clib_pipeline_main_stage (STAGE1, ARG, 1); \
+ clib_pipeline_main_stage (STAGE2, ARG, 2); \
+ } \
+ \
+ clib_pipeline_exit_loop \
+ { \
+ clib_pipeline_exit_stage (STAGE1, ARG, 1); \
+ clib_pipeline_exit_stage (STAGE2, ARG, 2); \
+ } \
+} while (0)
+
+#define clib_pipeline_run_4_stage(N,ARG,STAGE0,STAGE1,STAGE2,STAGE3) \
+do { \
+ uword _clib_pipeline_var(n_vectors) = (N); \
+ uword _clib_pipeline_var(n_stages) = 4; \
+ uword _clib_pipeline_var(i); \
+ \
+ clib_pipeline_init_loop \
+ { \
+ clib_pipeline_init_stage (STAGE0, ARG, 0); \
+ clib_pipeline_init_stage (STAGE1, ARG, 1); \
+ clib_pipeline_init_stage (STAGE2, ARG, 2); \
+ } \
+ \
+ clib_pipeline_main_loop \
+ { \
+ clib_pipeline_main_stage (STAGE0, ARG, 0); \
+ clib_pipeline_main_stage (STAGE1, ARG, 1); \
+ clib_pipeline_main_stage (STAGE2, ARG, 2); \
+ clib_pipeline_main_stage (STAGE3, ARG, 3); \
+ } \
+ \
+ clib_pipeline_exit_loop \
+ { \
+ clib_pipeline_exit_stage (STAGE1, ARG, 1); \
+ clib_pipeline_exit_stage (STAGE2, ARG, 2); \
+ clib_pipeline_exit_stage (STAGE3, ARG, 3); \
+ } \
+} while (0)
+
+#endif /* included_clib_pipeline_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/pool.c b/src/vppinfra/pool.c
new file mode 100644
index 00000000..ed83b41a
--- /dev/null
+++ b/src/vppinfra/pool.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003, 2004 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/pool.h>
+
+void
+_pool_init_fixed (void **pool_ptr, u32 elt_size, u32 max_elts)
+{
+ u8 *mmap_base;
+ u64 vector_size;
+ u64 free_index_size;
+ u64 total_size;
+ u64 page_size;
+ pool_header_t *fh;
+ vec_header_t *vh;
+ u8 *v;
+ u32 *fi;
+ u32 i;
+ u32 set_bits;
+
+ ASSERT (elt_size);
+ ASSERT (max_elts);
+
+ vector_size = pool_aligned_header_bytes + vec_header_bytes (0)
+ + (u64) elt_size *max_elts;
+
+ free_index_size = vec_header_bytes (0) + sizeof (u32) * max_elts;
+
+ /* Round up to a cache line boundary */
+ vector_size = (vector_size + CLIB_CACHE_LINE_BYTES - 1)
+ & ~(CLIB_CACHE_LINE_BYTES - 1);
+
+ free_index_size = (free_index_size + CLIB_CACHE_LINE_BYTES - 1)
+ & ~(CLIB_CACHE_LINE_BYTES - 1);
+
+ total_size = vector_size + free_index_size;
+
+ /* Round up to an even number of pages */
+ page_size = clib_mem_get_page_size ();
+ total_size = (total_size + page_size - 1) & ~(page_size - 1);
+
+ /* mmap demand zero memory */
+
+ mmap_base = mmap (0, total_size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+
+ if (mmap_base == MAP_FAILED)
+ {
+ clib_unix_warning ("mmap");
+ *pool_ptr = 0;
+ }
+
+ /* First comes the pool header */
+ fh = (pool_header_t *) mmap_base;
+ /* Find the user vector pointer */
+ v = (u8 *) (mmap_base + pool_aligned_header_bytes);
+ /* Finally, the vector header */
+ vh = _vec_find (v);
+
+ fh->free_bitmap = 0; /* No free elts (yet) */
+ fh->max_elts = max_elts;
+ fh->mmap_base = mmap_base;
+ fh->mmap_size = total_size;
+
+ vh->len = max_elts;
+
+ /* Build the free-index vector */
+ vh = (vec_header_t *) (v + vector_size);
+ vh->len = max_elts;
+ fi = (u32 *) (vh + 1);
+
+ fh->free_indices = fi;
+
+ /* Set the entire free bitmap */
+ clib_bitmap_alloc (fh->free_bitmap, max_elts);
+ memset (fh->free_bitmap, 0xff, vec_len (fh->free_bitmap) * sizeof (uword));
+
+ /* Clear any extraneous set bits */
+ set_bits = vec_len (fh->free_bitmap) * BITS (uword);
+
+ for (i = max_elts; i < set_bits; i++)
+ fh->free_bitmap = clib_bitmap_set (fh->free_bitmap, i, 0);
+
+ /* Create the initial free vector */
+ for (i = 0; i < max_elts; i++)
+ fi[i] = (max_elts - 1) - i;
+
+ *pool_ptr = v;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/pool.h b/src/vppinfra/pool.h
new file mode 100644
index 00000000..62d5b54e
--- /dev/null
+++ b/src/vppinfra/pool.h
@@ -0,0 +1,519 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003, 2004 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+/** @file
+ * @brief Fixed length block allocator.
+ Pools are built from clib vectors and bitmaps. Use pools when
+ repeatedly allocating and freeing fixed-size data. Pools are
+ fast, and avoid memory fragmentation.
+ */
+
+#ifndef included_pool_h
+#define included_pool_h
+
+#include <vppinfra/bitmap.h>
+#include <vppinfra/error.h>
+#include <vppinfra/mheap.h>
+
+
+typedef struct
+{
+ /** Bitmap of indices of free objects. */
+ uword *free_bitmap;
+
+ /** Vector of free indices. One element for each set bit in bitmap. */
+ u32 *free_indices;
+
+ /* The following fields are set for fixed-size, preallocated pools */
+
+ /** Maximum size of the pool, in elements */
+ u32 max_elts;
+
+ /** mmap segment info: base + length */
+ u8 *mmap_base;
+ u64 mmap_size;
+
+} pool_header_t;
+
+/** Align pool header so that pointers are naturally aligned. */
+#define pool_aligned_header_bytes \
+ vec_aligned_header_bytes (sizeof (pool_header_t), sizeof (void *))
+
+/** Get pool header from user pool pointer */
+always_inline pool_header_t *
+pool_header (void *v)
+{
+ return vec_aligned_header (v, sizeof (pool_header_t), sizeof (void *));
+}
+
+extern void _pool_init_fixed (void **, u32, u32);
+extern void fpool_free (void *);
+
+/** initialize a fixed-size, preallocated pool */
+#define pool_init_fixed(pool,max_elts) \
+{ \
+ _pool_init_fixed((void **)&(pool),sizeof(pool[0]),max_elts); \
+}
+
+/** Validate a pool */
+always_inline void
+pool_validate (void *v)
+{
+ pool_header_t *p = pool_header (v);
+ uword i, n_free_bitmap;
+
+ if (!v)
+ return;
+
+ n_free_bitmap = clib_bitmap_count_set_bits (p->free_bitmap);
+ ASSERT (n_free_bitmap == vec_len (p->free_indices));
+ for (i = 0; i < vec_len (p->free_indices); i++)
+ ASSERT (clib_bitmap_get (p->free_bitmap, p->free_indices[i]) == 1);
+}
+
+always_inline void
+pool_header_validate_index (void *v, uword index)
+{
+ pool_header_t *p = pool_header (v);
+
+ if (v)
+ vec_validate (p->free_bitmap, index / BITS (uword));
+}
+
+#define pool_validate_index(v,i) \
+do { \
+ uword __pool_validate_index = (i); \
+ vec_validate_ha ((v), __pool_validate_index, \
+ pool_aligned_header_bytes, /* align */ 0); \
+ pool_header_validate_index ((v), __pool_validate_index); \
+} while (0)
+
+/** Number of active elements in a pool.
+ * @return Number of active elements in a pool
+ */
+always_inline uword
+pool_elts (void *v)
+{
+ uword ret = vec_len (v);
+ if (v)
+ ret -= vec_len (pool_header (v)->free_indices);
+ return ret;
+}
+
+/** Number of elements in pool vector.
+
+ @note You probably want to call pool_elts() instead.
+*/
+#define pool_len(p) vec_len(p)
+
+/** Number of elements in pool vector (usable as an lvalue)
+
+ @note You probably don't want to use this macro.
+*/
+#define _pool_len(p) _vec_len(p)
+
+/** Memory usage of pool header. */
+always_inline uword
+pool_header_bytes (void *v)
+{
+ pool_header_t *p = pool_header (v);
+
+ if (!v)
+ return 0;
+
+ return vec_bytes (p->free_bitmap) + vec_bytes (p->free_indices);
+}
+
+/** Memory usage of pool. */
+#define pool_bytes(P) (vec_bytes (P) + pool_header_bytes (P))
+
+/** Local variable naming macro. */
+#define _pool_var(v) _pool_##v
+
+/** Queries whether pool has at least N_FREE free elements. */
+always_inline uword
+pool_free_elts (void *v)
+{
+ pool_header_t *p = pool_header (v);
+ uword n_free = 0;
+
+ if (v)
+ {
+ n_free += vec_len (p->free_indices);
+
+ /* Space left at end of vector? */
+ n_free += vec_capacity (v, sizeof (p[0])) - vec_len (v);
+ }
+
+ return n_free;
+}
+
+/** Allocate an object E from a pool P (general version).
+
+ First search free list. If nothing is free extend vector of objects.
+*/
+#define pool_get_aligned(P,E,A) \
+do { \
+ pool_header_t * _pool_var (p) = pool_header (P); \
+ uword _pool_var (l); \
+ \
+ _pool_var (l) = 0; \
+ if (P) \
+ _pool_var (l) = vec_len (_pool_var (p)->free_indices); \
+ \
+ if (_pool_var (l) > 0) \
+ { \
+ /* Return free element from free list. */ \
+ uword _pool_var (i) = _pool_var (p)->free_indices[_pool_var (l) - 1]; \
+ (E) = (P) + _pool_var (i); \
+ _pool_var (p)->free_bitmap = \
+ clib_bitmap_andnoti (_pool_var (p)->free_bitmap, _pool_var (i)); \
+ _vec_len (_pool_var (p)->free_indices) = _pool_var (l) - 1; \
+ } \
+ else \
+ { \
+ /* fixed-size, preallocated pools cannot expand */ \
+ if ((P) && _pool_var(p)->max_elts) \
+ { \
+ clib_warning ("can't expand fixed-size pool"); \
+ os_out_of_memory(); \
+ } \
+ /* Nothing on free list, make a new element and return it. */ \
+ P = _vec_resize (P, \
+ /* length_increment */ 1, \
+ /* new size */ (vec_len (P) + 1) * sizeof (P[0]), \
+ pool_aligned_header_bytes, \
+ /* align */ (A)); \
+ E = vec_end (P) - 1; \
+ } \
+} while (0)
+
+/** Allocate an object E from a pool P (unspecified alignment). */
+#define pool_get(P,E) pool_get_aligned(P,E,0)
+
+/** See if pool_get will expand the pool or not */
+#define pool_get_aligned_will_expand(P,YESNO,A) \
+do { \
+ pool_header_t * _pool_var (p) = pool_header (P); \
+ uword _pool_var (l); \
+ \
+ _pool_var (l) = 0; \
+ if (P) \
+ { \
+ if (_pool_var (p)->max_elts) \
+ return 0; \
+ _pool_var (l) = vec_len (_pool_var (p)->free_indices); \
+ } \
+ \
+ /* Free elements, certainly won't expand */ \
+ if (_pool_var (l) > 0) \
+ YESNO=0; \
+ else \
+ { \
+ /* Nothing on free list, make a new element and return it. */ \
+ YESNO = _vec_resize_will_expand \
+ (P, \
+ /* length_increment */ 1, \
+ /* new size */ (vec_len (P) + 1) * sizeof (P[0]), \
+ pool_aligned_header_bytes, \
+ /* align */ (A)); \
+ } \
+} while (0)
+
+#define pool_get_will_expand(P,YESNO) pool_get_aligned_will_expand(P,YESNO,0)
+
+/** Use free bitmap to query whether given element is free. */
+#define pool_is_free(P,E) \
+({ \
+ pool_header_t * _pool_var (p) = pool_header (P); \
+ uword _pool_var (i) = (E) - (P); \
+ (_pool_var (i) < vec_len (P)) ? clib_bitmap_get (_pool_var (p)->free_bitmap, _pool_i) : 1; \
+})
+
+/** Use free bitmap to query whether given index is free */
+#define pool_is_free_index(P,I) pool_is_free((P),(P)+(I))
+
+/** Free an object E in pool P. */
+#define pool_put(P,E) \
+do { \
+ pool_header_t * _pool_var (p) = pool_header (P); \
+ uword _pool_var (l) = (E) - (P); \
+ ASSERT (vec_is_member (P, E)); \
+ ASSERT (! pool_is_free (P, E)); \
+ \
+ /* Add element to free bitmap and to free list. */ \
+ _pool_var (p)->free_bitmap = \
+ clib_bitmap_ori (_pool_var (p)->free_bitmap, _pool_var (l)); \
+ /* Preallocated pool? */ \
+ if (_pool_var (p)->max_elts) \
+ { \
+ ASSERT(_pool_var(l) < _pool_var (p)->max_elts); \
+ _pool_var(p)->free_indices[_vec_len(_pool_var(p)->free_indices)] = \
+ _pool_var(l); \
+ _vec_len(_pool_var(p)->free_indices) += 1; \
+ } \
+ else \
+ vec_add1 (_pool_var (p)->free_indices, _pool_var (l)); \
+} while (0)
+
+/** Free pool element with given index. */
+#define pool_put_index(p,i) \
+do { \
+ typeof (p) _e = (p) + (i); \
+ pool_put (p, _e); \
+} while (0)
+
+/** Allocate N more free elements to pool (general version). */
+#define pool_alloc_aligned(P,N,A) \
+do { \
+ pool_header_t * _p; \
+ \
+ if ((P)) \
+ { \
+ _p = pool_header (P); \
+ if (_p->max_elts) \
+ { \
+ clib_warning ("Can't expand fixed-size pool"); \
+ os_out_of_memory(); \
+ } \
+ } \
+ \
+ (P) = _vec_resize ((P), 0, (vec_len (P) + (N)) * sizeof (P[0]), \
+ pool_aligned_header_bytes, \
+ (A)); \
+ _p = pool_header (P); \
+ vec_resize (_p->free_indices, (N)); \
+ _vec_len (_p->free_indices) -= (N); \
+} while (0)
+
+/** Allocate N more free elements to pool (unspecified alignment). */
+#define pool_alloc(P,N) pool_alloc_aligned(P,N,0)
+
+/** Low-level free pool operator (do not call directly). */
+always_inline void *
+_pool_free (void *v)
+{
+ pool_header_t *p = pool_header (v);
+ if (!v)
+ return v;
+ clib_bitmap_free (p->free_bitmap);
+
+ if (p->max_elts)
+ {
+ int rv;
+
+ rv = munmap (p->mmap_base, p->mmap_size);
+ if (rv)
+ clib_unix_warning ("munmap");
+ }
+ else
+ {
+ vec_free (p->free_indices);
+ vec_free_h (v, pool_aligned_header_bytes);
+ }
+ return 0;
+}
+
+/** Free a pool. */
+#define pool_free(p) (p) = _pool_free(p)
+
+/** Optimized iteration through pool.
+
+ @param LO pointer to first element in chunk
+ @param HI pointer to last element in chunk
+ @param POOL pool to iterate across
+ @param BODY operation to perform
+
+ Optimized version which assumes that BODY is smart enough to
+ process multiple (LOW,HI) chunks. See also pool_foreach().
+ */
+#define pool_foreach_region(LO,HI,POOL,BODY) \
+do { \
+ uword _pool_var (i), _pool_var (lo), _pool_var (hi), _pool_var (len); \
+ uword _pool_var (bl), * _pool_var (b); \
+ pool_header_t * _pool_var (p); \
+ \
+ _pool_var (p) = pool_header (POOL); \
+ _pool_var (b) = (POOL) ? _pool_var (p)->free_bitmap : 0; \
+ _pool_var (bl) = vec_len (_pool_var (b)); \
+ _pool_var (len) = vec_len (POOL); \
+ _pool_var (lo) = 0; \
+ \
+ for (_pool_var (i) = 0; \
+ _pool_var (i) <= _pool_var (bl); \
+ _pool_var (i)++) \
+ { \
+ uword _pool_var (m), _pool_var (f); \
+ _pool_var (m) = (_pool_var (i) < _pool_var (bl) \
+ ? _pool_var (b) [_pool_var (i)] \
+ : 1); \
+ while (_pool_var (m) != 0) \
+ { \
+ _pool_var (f) = first_set (_pool_var (m)); \
+ _pool_var (hi) = (_pool_var (i) * BITS (_pool_var (b)[0]) \
+ + min_log2 (_pool_var (f))); \
+ _pool_var (hi) = (_pool_var (i) < _pool_var (bl) \
+ ? _pool_var (hi) : _pool_var (len)); \
+ _pool_var (m) ^= _pool_var (f); \
+ if (_pool_var (hi) > _pool_var (lo)) \
+ { \
+ (LO) = _pool_var (lo); \
+ (HI) = _pool_var (hi); \
+ do { BODY; } while (0); \
+ } \
+ _pool_var (lo) = _pool_var (hi) + 1; \
+ } \
+ } \
+} while (0)
+
+/** Iterate through pool.
+
+ @param VAR A variable of same type as pool vector to be used as an
+ iterator.
+ @param POOL The pool to iterate across.
+ @param BODY The operation to perform, typically a code block. See
+ the example below.
+
+ This macro will call @c BODY with each active pool element.
+
+ It is a bad idea to allocate or free pool element from within
+ @c pool_foreach. Build a vector of indices and dispose of them later.
+ Or call pool_flush.
+
+
+ @par Example
+ @code{.c}
+ proc_t *procs; // a pool of processes.
+ proc_t *proc; // pointer to one process; used as the iterator.
+
+ pool_foreach (proc, procs, ({
+ if (proc->state != PROC_STATE_RUNNING)
+ continue;
+
+ // check a running proc in some way
+ ...
+ }));
+ @endcode
+
+ @warning Because @c pool_foreach is a macro, syntax errors can be
+ difficult to find inside @c BODY, let alone actual code bugs. One
+ can temporarily split a complex @c pool_foreach into a trivial
+ @c pool_foreach which builds a vector of active indices, and a
+ vec_foreach() (or plain for-loop) to walk the active index vector.
+ */
+#define pool_foreach(VAR,POOL,BODY) \
+do { \
+ uword _pool_foreach_lo, _pool_foreach_hi; \
+ pool_foreach_region (_pool_foreach_lo, _pool_foreach_hi, (POOL), \
+ ({ \
+ for ((VAR) = (POOL) + _pool_foreach_lo; \
+ (VAR) < (POOL) + _pool_foreach_hi; \
+ (VAR)++) \
+ do { BODY; } while (0); \
+ })); \
+} while (0)
+
+/** Returns pointer to element at given index.
+
+ ASSERTs that the supplied index is valid.
+ Even though one can write correct code of the form
+ @code
+ p = pool_base + index;
+ @endcode
+ use of @c pool_elt_at_index is strongly suggested.
+ */
+#define pool_elt_at_index(p,i) \
+({ \
+ typeof (p) _e = (p) + (i); \
+ ASSERT (! pool_is_free (p, _e)); \
+ _e; \
+})
+
+/** Return next occupied pool index after @c i, useful for safe iteration. */
+#define pool_next_index(P,I) \
+({ \
+ pool_header_t * _pool_var (p) = pool_header (P); \
+ uword _pool_var (rv) = (I) + 1; \
+ \
+ _pool_var(rv) = \
+ (_pool_var (rv) < vec_len (P) ? \
+ clib_bitmap_next_clear (_pool_var (p)->free_bitmap, _pool_var(rv)) \
+ : ~0); \
+ _pool_var(rv); \
+})
+
+/** Iterate pool by index. */
+#define pool_foreach_index(i,v,body) \
+ for ((i) = 0; (i) < vec_len (v); (i)++) \
+ { \
+ if (! pool_is_free_index ((v), (i))) \
+ do { body; } while (0); \
+ }
+
+/**
+ * @brief Remove all elemenets from a pool in a safe way
+ *
+ * @param VAR each element in the pool
+ * @param POOL The pool to flush
+ * @param BODY The actions to perform on each element before it is returned to
+ * the pool. i.e. before it is 'freed'
+ */
+#define pool_flush(VAR, POOL, BODY) \
+{ \
+ uword *_pool_var(ii), *_pool_var(dv) = NULL; \
+ \
+ pool_foreach((VAR), (POOL), \
+ ({ \
+ vec_add1(_pool_var(dv), (VAR) - (POOL)); \
+ })); \
+ vec_foreach(_pool_var(ii), _pool_var(dv)) \
+ { \
+ (VAR) = pool_elt_at_index((POOL), *_pool_var(ii)); \
+ do { BODY; } while (0); \
+ pool_put((POOL), (VAR)); \
+ } \
+ vec_free(_pool_var(dv)); \
+}
+
+#endif /* included_pool_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/ptclosure.c b/src/vppinfra/ptclosure.c
new file mode 100644
index 00000000..cda873ef
--- /dev/null
+++ b/src/vppinfra/ptclosure.c
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vppinfra/ptclosure.h>
+
+u8 **
+clib_ptclosure_alloc (int n)
+{
+ u8 **rv = 0;
+ u8 *row;
+ int i;
+
+ ASSERT (n > 0);
+
+ vec_validate (rv, n - 1);
+ for (i = 0; i < n; i++)
+ {
+ row = 0;
+ vec_validate (row, n - 1);
+
+ rv[i] = row;
+ }
+ return rv;
+}
+
+void
+clib_ptclosure_free (u8 ** ptc)
+{
+ u8 *row;
+ int n = vec_len (ptc);
+ int i;
+
+ ASSERT (n > 0);
+
+ for (i = 0; i < n; i++)
+ {
+ row = ptc[i];
+ vec_free (row);
+ }
+ vec_free (ptc);
+}
+
+void
+clib_ptclosure_copy (u8 ** dst, u8 ** src)
+{
+ int i, n;
+ u8 *src_row, *dst_row;
+
+ n = vec_len (dst);
+
+ for (i = 0; i < vec_len (dst); i++)
+ {
+ src_row = src[i];
+ dst_row = dst[i];
+ clib_memcpy (dst_row, src_row, n);
+ }
+}
+
+/*
+ * compute the positive transitive closure
+ * of a relation via Warshall's algorithm.
+ *
+ * Ref:
+ * Warshall, Stephen (January 1962). "A theorem on Boolean matrices".
+ * Journal of the ACM 9 (1): 11–12.
+ *
+ * foo[i][j] = 1 means that item i
+ * "bears the relation" to item j.
+ *
+ * For example: "item i must be before item j"
+ *
+ * You could use a bitmap, but since the algorithm is
+ * O(n**3) in the first place, large N is inadvisable...
+ *
+ */
+
+u8 **
+clib_ptclosure (u8 ** orig)
+{
+ int i, j, k;
+ int n;
+ u8 **prev, **cur;
+
+ n = vec_len (orig);
+ prev = clib_ptclosure_alloc (n);
+ cur = clib_ptclosure_alloc (n);
+
+ clib_ptclosure_copy (prev, orig);
+
+ for (k = 0; k < n; k++)
+ {
+ for (i = 0; i < n; i++)
+ {
+ for (j = 0; j < n; j++)
+ {
+ cur[i][j] = prev[i][j] || (prev[i][k] && prev[k][j]);
+ }
+ }
+ clib_ptclosure_copy (prev, cur);
+ }
+ clib_ptclosure_free (prev);
+ return cur;
+}
+
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/ptclosure.h b/src/vppinfra/ptclosure.h
new file mode 100644
index 00000000..ee1609a1
--- /dev/null
+++ b/src/vppinfra/ptclosure.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef included_clib_ptclosure_h
+#define included_clib_ptclosure_h
+
+#include <vppinfra/vec.h>
+#include <vppinfra/format.h>
+#include <vppinfra/error.h>
+
+/*
+ * set r[i][j] if item i "bears the relation to" item j
+ *
+ */
+
+u8 **clib_ptclosure_alloc (int n);
+void clib_ptclosure_free (u8 ** ptc);
+void clib_ptclosure_copy (u8 ** dst, u8 ** src);
+u8 **clib_ptclosure (u8 ** orig);
+
+#endif /* included_clib_ptclosure_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/qhash.c b/src/vppinfra/qhash.c
new file mode 100644
index 00000000..f4e38c4a
--- /dev/null
+++ b/src/vppinfra/qhash.c
@@ -0,0 +1,858 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2006 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/qhash.h>
+
+#define QHASH_ALL_VALID ((1 << QHASH_KEYS_PER_BUCKET) - 1)
+
+void *
+_qhash_resize (void *v, uword length, uword elt_bytes)
+{
+ qhash_t *h;
+ uword l;
+
+ l = clib_max (max_log2 (length), 2 + QHASH_LOG2_KEYS_PER_BUCKET);
+
+ /* Round up if less than 1/2 full. */
+ l += ((f64) length / (f64) (1 << l)) < .5;
+
+ v = _vec_resize (0, 1 << l, elt_bytes << l, sizeof (h[0]),
+ /* align */ sizeof (uword));
+
+ h = qhash_header (v);
+ h->n_elts = 0;
+ h->log2_hash_size = l;
+ h->hash_keys =
+ clib_mem_alloc_aligned_no_fail (sizeof (h->hash_keys[0]) << l,
+ CLIB_CACHE_LINE_BYTES);
+ vec_resize (h->hash_key_valid_bitmap,
+ 1 << (l - QHASH_LOG2_KEYS_PER_BUCKET));
+ memset (v, ~0, elt_bytes << l);
+
+ return v;
+}
+
+static u8 min_log2_table[256];
+
+static inline uword
+qhash_min_log2 (uword x)
+{
+ ASSERT (is_pow2 (x));
+ ASSERT (x < 256);
+ return min_log2_table[x];
+}
+
+static void
+qhash_min_log2_init ()
+{
+ int i;
+ for (i = 0; i < 256; i++)
+ min_log2_table[i] = min_log2 (i);
+}
+
+always_inline uword
+qhash_get_valid_elt_mask (qhash_t * h, uword i)
+{
+ return h->hash_key_valid_bitmap[i / QHASH_KEYS_PER_BUCKET];
+}
+
+always_inline void
+qhash_set_valid_elt_mask (qhash_t * h, uword i, uword mask)
+{
+ h->hash_key_valid_bitmap[i / QHASH_KEYS_PER_BUCKET] = mask;
+}
+
+always_inline uword
+qhash_search_bucket (uword * hash_keys, uword search_key, uword m)
+{
+ uword t;
+#define _(i) ((hash_keys[i] == search_key) << i)
+ t = (_(0) | _(1) | _(2) | _(3));
+ if (QHASH_KEYS_PER_BUCKET > 4)
+ t |= (_(4) | _(5) | _(6) | _(7));
+ if (QHASH_KEYS_PER_BUCKET > 8)
+ t |= (_(8) | _(9) | _(10) | _(11) | _(12) | _(13) | _(14) | _(15));
+#undef _
+ return m & t;
+}
+
+/* Lookup multiple keys in the same hash table. */
+void
+qhash_get_multiple (void *v,
+ uword * search_keys,
+ uword n_search_keys, u32 * result_indices)
+{
+ qhash_t *h = qhash_header (v);
+ uword *k, *hash_keys;
+ uword n_left, bucket_mask;
+ u32 *r;
+
+ if (!v)
+ {
+ memset (result_indices, ~0, sizeof (result_indices[0]) * n_search_keys);
+ return;
+ }
+
+ bucket_mask = pow2_mask (h->log2_hash_size) & ~(QHASH_KEYS_PER_BUCKET - 1);
+
+ k = search_keys;
+ n_left = n_search_keys;
+ hash_keys = h->hash_keys;
+ r = result_indices;
+
+ while (n_left >= 2)
+ {
+ u32 a0, b0, c0, bi0, valid0, match0;
+ u32 a1, b1, c1, bi1, valid1, match1;
+ uword k0, k1, *h0, *h1;
+
+ k0 = k[0];
+ k1 = k[1];
+ n_left -= 2;
+ k += 2;
+
+ a0 = a1 = h->hash_seeds[0];
+ b0 = b1 = h->hash_seeds[1];
+ c0 = c1 = h->hash_seeds[2];
+ a0 ^= k0;
+ a1 ^= k1;
+#if uword_bits == 64
+ b0 ^= k0 >> 32;
+ b1 ^= k1 >> 32;
+#endif
+
+ hash_mix32_step_1 (a0, b0, c0);
+ hash_mix32_step_1 (a1, b1, c1);
+ hash_mix32_step_2 (a0, b0, c0);
+ hash_mix32_step_2 (a1, b1, c1);
+ hash_mix32_step_3 (a0, b0, c0);
+ hash_mix32_step_3 (a1, b1, c1);
+
+ bi0 = c0 & bucket_mask;
+ bi1 = c1 & bucket_mask;
+
+ h0 = hash_keys + bi0;
+ h1 = hash_keys + bi1;
+
+ /* Search two buckets. */
+ valid0 = qhash_get_valid_elt_mask (h, bi0);
+ valid1 = qhash_get_valid_elt_mask (h, bi1);
+
+ match0 = qhash_search_bucket (h0, k0, valid0);
+ match1 = qhash_search_bucket (h1, k1, valid1);
+
+ bi0 += qhash_min_log2 (match0);
+ bi1 += qhash_min_log2 (match1);
+
+ r[0] = match0 ? bi0 : ~0;
+ r[1] = match1 ? bi1 : ~0;
+ r += 2;
+
+ /* Full buckets trigger search of overflow hash. */
+ if (PREDICT_FALSE (!match0 && valid0 == QHASH_ALL_VALID))
+ {
+ uword *p = hash_get (h->overflow_hash, k0);
+ r[-2] = p ? p[0] : ~0;
+ }
+
+ /* Full buckets trigger search of overflow hash. */
+ if (PREDICT_FALSE (!match1 && valid1 == QHASH_ALL_VALID))
+ {
+ uword *p = hash_get (h->overflow_hash, k1);
+ r[-1] = p ? p[0] : ~0;
+ }
+ }
+
+ while (n_left >= 1)
+ {
+ u32 a0, b0, c0, bi0, valid0, match0;
+ uword k0, *h0;
+
+ k0 = k[0];
+ n_left -= 1;
+ k += 1;
+
+ a0 = h->hash_seeds[0];
+ b0 = h->hash_seeds[1];
+ c0 = h->hash_seeds[2];
+ a0 ^= k0;
+#if uword_bits == 64
+ b0 ^= k0 >> 32;
+#endif
+
+ hash_mix32 (a0, b0, c0);
+
+ bi0 = c0 & bucket_mask;
+
+ h0 = hash_keys + bi0;
+
+ /* Search one bucket. */
+ valid0 = qhash_get_valid_elt_mask (h, bi0);
+ match0 = qhash_search_bucket (h0, k0, valid0);
+
+ bi0 += qhash_min_log2 (match0);
+
+ r[0] = match0 ? bi0 : ~0;
+ r += 1;
+
+ /* Full buckets trigger search of overflow hash. */
+ if (PREDICT_FALSE (!match0 && valid0 == QHASH_ALL_VALID))
+ {
+ uword *p = hash_get (h->overflow_hash, k0);
+ r[-1] = p ? p[0] : ~0;
+ }
+ }
+}
+
+/* Lookup multiple keys in the same hash table.
+ Returns index of first matching key. */
+u32
+qhash_get_first_match (void *v,
+ uword * search_keys,
+ uword n_search_keys, uword * matching_key)
+{
+ qhash_t *h = qhash_header (v);
+ uword *k, *hash_keys;
+ uword n_left, match_mask, bucket_mask;
+
+ if (!v)
+ return ~0;
+
+ match_mask = 0;
+ bucket_mask = pow2_mask (h->log2_hash_size) & ~(QHASH_KEYS_PER_BUCKET - 1);
+
+ k = search_keys;
+ n_left = n_search_keys;
+ hash_keys = h->hash_keys;
+ while (n_left >= 2)
+ {
+ u32 a0, b0, c0, bi0, valid0;
+ u32 a1, b1, c1, bi1, valid1;
+ uword k0, k1, *h0, *h1;
+
+ k0 = k[0];
+ k1 = k[1];
+ n_left -= 2;
+ k += 2;
+
+ a0 = a1 = h->hash_seeds[0];
+ b0 = b1 = h->hash_seeds[1];
+ c0 = c1 = h->hash_seeds[2];
+ a0 ^= k0;
+ a1 ^= k1;
+#if uword_bits == 64
+ b0 ^= k0 >> 32;
+ b1 ^= k1 >> 32;
+#endif
+
+ hash_mix32_step_1 (a0, b0, c0);
+ hash_mix32_step_1 (a1, b1, c1);
+ hash_mix32_step_2 (a0, b0, c0);
+ hash_mix32_step_2 (a1, b1, c1);
+ hash_mix32_step_3 (a0, b0, c0);
+ hash_mix32_step_3 (a1, b1, c1);
+
+ bi0 = c0 & bucket_mask;
+ bi1 = c1 & bucket_mask;
+
+ h0 = hash_keys + bi0;
+ h1 = hash_keys + bi1;
+
+ /* Search two buckets. */
+ valid0 = qhash_get_valid_elt_mask (h, bi0);
+ valid1 = qhash_get_valid_elt_mask (h, bi1);
+ match_mask = qhash_search_bucket (h0, k0, valid0);
+ match_mask |= (qhash_search_bucket (h1, k1, valid1)
+ << QHASH_KEYS_PER_BUCKET);
+ if (match_mask)
+ {
+ uword bi, is_match1;
+
+ bi = qhash_min_log2 (match_mask);
+ is_match1 = bi >= QHASH_KEYS_PER_BUCKET;
+
+ bi += ((is_match1 ? bi1 : bi0)
+ - (is_match1 << QHASH_LOG2_KEYS_PER_BUCKET));
+ *matching_key = (k - 2 - search_keys) + is_match1;
+ return bi;
+ }
+
+ /* Full buckets trigger search of overflow hash. */
+ if (PREDICT_FALSE (valid0 == QHASH_ALL_VALID
+ || valid1 == QHASH_ALL_VALID))
+ {
+ uword *p = 0;
+ uword ki = k - 2 - search_keys;
+
+ if (valid0 == QHASH_ALL_VALID)
+ p = hash_get (h->overflow_hash, k0);
+
+ if (!p && valid1 == QHASH_ALL_VALID)
+ {
+ p = hash_get (h->overflow_hash, k1);
+ ki++;
+ }
+
+ if (p)
+ {
+ *matching_key = ki;
+ return p[0];
+ }
+ }
+ }
+
+ while (n_left >= 1)
+ {
+ u32 a0, b0, c0, bi0, valid0;
+ uword k0, *h0;
+
+ k0 = k[0];
+ n_left -= 1;
+ k += 1;
+
+ a0 = h->hash_seeds[0];
+ b0 = h->hash_seeds[1];
+ c0 = h->hash_seeds[2];
+ a0 ^= k0;
+#if uword_bits == 64
+ b0 ^= k0 >> 32;
+#endif
+
+ hash_mix32 (a0, b0, c0);
+
+ bi0 = c0 & bucket_mask;
+
+ h0 = hash_keys + bi0;
+
+ /* Search one bucket. */
+ valid0 = qhash_get_valid_elt_mask (h, bi0);
+ match_mask = qhash_search_bucket (h0, k0, valid0);
+ if (match_mask)
+ {
+ uword bi;
+ bi = bi0 + qhash_min_log2 (match_mask);
+ *matching_key = (k - 1 - search_keys);
+ return bi;
+ }
+
+ /* Full buckets trigger search of overflow hash. */
+ if (PREDICT_FALSE (valid0 == QHASH_ALL_VALID))
+ {
+ uword *p = hash_get (h->overflow_hash, k0);
+ if (p)
+ {
+ *matching_key = (k - 1 - search_keys);
+ return p[0];
+ }
+ }
+ }
+
+ return ~0;
+}
+
+static void *
+qhash_set_overflow (void *v, uword elt_bytes,
+ uword key, uword bi, uword * n_elts, u32 * result)
+{
+ qhash_t *h = qhash_header (v);
+ uword *p = hash_get (h->overflow_hash, key);
+ uword i;
+
+ bi /= QHASH_KEYS_PER_BUCKET;
+
+ if (p)
+ i = p[0];
+ else
+ {
+ uword l = vec_len (h->overflow_free_indices);
+ if (l > 0)
+ {
+ i = h->overflow_free_indices[l - 1];
+ _vec_len (h->overflow_free_indices) = l - 1;
+ }
+ else
+ i = (1 << h->log2_hash_size) + hash_elts (h->overflow_hash);
+ hash_set (h->overflow_hash, key, i);
+ vec_validate (h->overflow_counts, bi);
+ h->overflow_counts[bi] += 1;
+ *n_elts += 1;
+
+ l = vec_len (v);
+ if (i >= l)
+ {
+ uword dl = round_pow2 (1 + i - l, 8);
+ v = _vec_resize (v, dl, (l + dl) * elt_bytes, sizeof (h[0]),
+ /* align */ sizeof (uword));
+ memset (v + l * elt_bytes, ~0, dl * elt_bytes);
+ }
+ }
+
+ *result = i;
+
+ return v;
+}
+
+static uword
+qhash_unset_overflow (void *v, uword key, uword bi, uword * n_elts)
+{
+ qhash_t *h = qhash_header (v);
+ uword *p = hash_get (h->overflow_hash, key);
+ uword result;
+
+ bi /= QHASH_KEYS_PER_BUCKET;
+
+ if (p)
+ {
+ result = p[0];
+ hash_unset (h->overflow_hash, key);
+ ASSERT (bi < vec_len (h->overflow_counts));
+ ASSERT (h->overflow_counts[bi] > 0);
+ ASSERT (*n_elts > 0);
+ vec_add1 (h->overflow_free_indices, result);
+ h->overflow_counts[bi] -= 1;
+ *n_elts -= 1;
+ }
+ else
+ result = ~0;
+
+ return result;
+}
+
+always_inline uword
+qhash_find_free (uword i, uword valid_mask)
+{
+ return first_set (~valid_mask & pow2_mask (QHASH_KEYS_PER_BUCKET));
+}
+
+void *
+_qhash_set_multiple (void *v,
+ uword elt_bytes,
+ uword * search_keys,
+ uword n_search_keys, u32 * result_indices)
+{
+ qhash_t *h = qhash_header (v);
+ uword *k, *hash_keys;
+ uword n_left, n_elts, bucket_mask;
+ u32 *r;
+
+ if (vec_len (v) < n_search_keys)
+ v = _qhash_resize (v, n_search_keys, elt_bytes);
+
+ if (qhash_min_log2 (2) != 1)
+ {
+ qhash_min_log2_init ();
+ ASSERT (qhash_min_log2 (2) == 1);
+ }
+
+ ASSERT (v != 0);
+
+ bucket_mask = pow2_mask (h->log2_hash_size) & ~(QHASH_KEYS_PER_BUCKET - 1);
+
+ hash_keys = h->hash_keys;
+ k = search_keys;
+ r = result_indices;
+ n_left = n_search_keys;
+ n_elts = h->n_elts;
+
+ while (n_left >= 2)
+ {
+ u32 a0, b0, c0, bi0, match0, valid0, free0;
+ u32 a1, b1, c1, bi1, match1, valid1, free1;
+ uword k0, *h0;
+ uword k1, *h1;
+
+ k0 = k[0];
+ k1 = k[1];
+
+ /* Keys must be unique. */
+ ASSERT (k0 != k1);
+
+ n_left -= 2;
+ k += 2;
+
+ a0 = a1 = h->hash_seeds[0];
+ b0 = b1 = h->hash_seeds[1];
+ c0 = c1 = h->hash_seeds[2];
+ a0 ^= k0;
+ a1 ^= k1;
+#if uword_bits == 64
+ b0 ^= k0 >> 32;
+ b1 ^= k1 >> 32;
+#endif
+
+ hash_mix32_step_1 (a0, b0, c0);
+ hash_mix32_step_1 (a1, b1, c1);
+ hash_mix32_step_2 (a0, b0, c0);
+ hash_mix32_step_2 (a1, b1, c1);
+ hash_mix32_step_3 (a0, b0, c0);
+ hash_mix32_step_3 (a1, b1, c1);
+
+ bi0 = c0 & bucket_mask;
+ bi1 = c1 & bucket_mask;
+
+ h0 = hash_keys + bi0;
+ h1 = hash_keys + bi1;
+
+ /* Search two buckets. */
+ valid0 = qhash_get_valid_elt_mask (h, bi0);
+ valid1 = qhash_get_valid_elt_mask (h, bi1);
+
+ match0 = qhash_search_bucket (h0, k0, valid0);
+ match1 = qhash_search_bucket (h1, k1, valid1);
+
+ /* Find first free element starting at hash offset into bucket. */
+ free0 = qhash_find_free (c0 & (QHASH_KEYS_PER_BUCKET - 1), valid0);
+
+ valid1 = valid1 | (bi0 == bi1 ? free0 : 0);
+ free1 = qhash_find_free (c1 & (QHASH_KEYS_PER_BUCKET - 1), valid1);
+
+ n_elts += (match0 == 0) + (match1 == 0);
+
+ match0 = match0 ? match0 : free0;
+ match1 = match1 ? match1 : free1;
+
+ valid0 |= match0;
+ valid1 |= match1;
+
+ h0 += qhash_min_log2 (match0);
+ h1 += qhash_min_log2 (match1);
+
+ if (PREDICT_FALSE (!match0 || !match1))
+ goto slow_path2;
+
+ h0[0] = k0;
+ h1[0] = k1;
+ r[0] = h0 - hash_keys;
+ r[1] = h1 - hash_keys;
+ r += 2;
+ qhash_set_valid_elt_mask (h, bi0, valid0);
+ qhash_set_valid_elt_mask (h, bi1, valid1);
+ continue;
+
+ slow_path2:
+ if (!match0)
+ {
+ n_elts -= 1;
+ v = qhash_set_overflow (v, elt_bytes, k0, bi0, &n_elts, &r[0]);
+ }
+ else
+ {
+ h0[0] = k0;
+ r[0] = h0 - hash_keys;
+ qhash_set_valid_elt_mask (h, bi0, valid0);
+ }
+ if (!match1)
+ {
+ n_elts -= 1;
+ v = qhash_set_overflow (v, elt_bytes, k1, bi1, &n_elts, &r[1]);
+ }
+ else
+ {
+ h1[0] = k1;
+ r[1] = h1 - hash_keys;
+ qhash_set_valid_elt_mask (h, bi1, valid1);
+ }
+ r += 2;
+ }
+
+ while (n_left >= 1)
+ {
+ u32 a0, b0, c0, bi0, match0, valid0, free0;
+ uword k0, *h0;
+
+ k0 = k[0];
+ n_left -= 1;
+ k += 1;
+
+ a0 = h->hash_seeds[0];
+ b0 = h->hash_seeds[1];
+ c0 = h->hash_seeds[2];
+ a0 ^= k0;
+#if uword_bits == 64
+ b0 ^= k0 >> 32;
+#endif
+
+ hash_mix32 (a0, b0, c0);
+
+ bi0 = c0 & bucket_mask;
+
+ h0 = hash_keys + bi0;
+
+ valid0 = qhash_get_valid_elt_mask (h, bi0);
+
+ /* Find first free element starting at hash offset into bucket. */
+ free0 = qhash_find_free (c0 & (QHASH_KEYS_PER_BUCKET - 1), valid0);
+
+ match0 = qhash_search_bucket (h0, k0, valid0);
+
+ n_elts += (match0 == 0);
+
+ match0 = match0 ? match0 : free0;
+
+ valid0 |= match0;
+
+ h0 += qhash_min_log2 (match0);
+
+ if (PREDICT_FALSE (!match0))
+ goto slow_path1;
+
+ h0[0] = k0;
+ r[0] = h0 - hash_keys;
+ r += 1;
+ qhash_set_valid_elt_mask (h, bi0, valid0);
+ continue;
+
+ slow_path1:
+ n_elts -= 1;
+ v = qhash_set_overflow (v, elt_bytes, k0, bi0, &n_elts, &r[0]);
+ r += 1;
+ }
+
+ h = qhash_header (v);
+ h->n_elts = n_elts;
+
+ return v;
+}
+
+static uword
+unset_slow_path (void *v, uword elt_bytes,
+ uword k0, uword bi0, uword valid0, uword match0,
+ uword * n_elts)
+{
+ qhash_t *h = qhash_header (v);
+ uword i, j = 0, k, l, t = ~0;
+ hash_pair_t *p, *found;
+
+ if (!match0)
+ {
+ if (valid0 == QHASH_ALL_VALID)
+ t = qhash_unset_overflow (v, k0, bi0, n_elts);
+ return t;
+ }
+
+ i = bi0 / QHASH_KEYS_PER_BUCKET;
+ t = bi0 + qhash_min_log2 (match0);
+
+ if (valid0 == QHASH_ALL_VALID
+ && i < vec_len (h->overflow_counts) && h->overflow_counts[i] > 0)
+ {
+ found = 0;
+ /* *INDENT-OFF* */
+ hash_foreach_pair (p, h->overflow_hash, ({
+ j = qhash_hash_mix (h, p->key) / QHASH_KEYS_PER_BUCKET;
+ if (j == i)
+ {
+ found = p;
+ break;
+ }
+ }));
+ /* *INDENT-ON* */
+ ASSERT (found != 0);
+ ASSERT (j == i);
+
+ l = found->value[0];
+ k = found->key;
+ hash_unset3 (h->overflow_hash, k, &j);
+ vec_add1 (h->overflow_free_indices, j);
+ h->overflow_counts[i] -= 1;
+
+ qhash_set_valid_elt_mask (h, bi0, valid0);
+
+ h->hash_keys[t] = k;
+ clib_memswap (v + t * elt_bytes, v + l * elt_bytes, elt_bytes);
+ t = l;
+ }
+ else
+ qhash_set_valid_elt_mask (h, bi0, valid0 ^ match0);
+
+ return t;
+}
+
+void
+_qhash_unset_multiple (void *v,
+ uword elt_bytes,
+ uword * search_keys,
+ uword n_search_keys, u32 * result_indices)
+{
+ qhash_t *h = qhash_header (v);
+ uword *k, *hash_keys;
+ uword n_left, n_elts, bucket_mask;
+ u32 *r;
+
+ if (!v)
+ {
+ uword i;
+ for (i = 0; i < n_search_keys; i++)
+ result_indices[i] = ~0;
+ }
+
+ bucket_mask = pow2_mask (h->log2_hash_size) & ~(QHASH_KEYS_PER_BUCKET - 1);
+
+ hash_keys = h->hash_keys;
+ k = search_keys;
+ r = result_indices;
+ n_left = n_search_keys;
+ n_elts = h->n_elts;
+
+ while (n_left >= 2)
+ {
+ u32 a0, b0, c0, bi0, match0, valid0;
+ u32 a1, b1, c1, bi1, match1, valid1;
+ uword k0, *h0;
+ uword k1, *h1;
+
+ k0 = k[0];
+ k1 = k[1];
+
+ /* Keys must be unique. */
+ ASSERT (k0 != k1);
+
+ n_left -= 2;
+ k += 2;
+
+ a0 = a1 = h->hash_seeds[0];
+ b0 = b1 = h->hash_seeds[1];
+ c0 = c1 = h->hash_seeds[2];
+ a0 ^= k0;
+ a1 ^= k1;
+#if uword_bits == 64
+ b0 ^= k0 >> 32;
+ b1 ^= k1 >> 32;
+#endif
+
+ hash_mix32_step_1 (a0, b0, c0);
+ hash_mix32_step_1 (a1, b1, c1);
+ hash_mix32_step_2 (a0, b0, c0);
+ hash_mix32_step_2 (a1, b1, c1);
+ hash_mix32_step_3 (a0, b0, c0);
+ hash_mix32_step_3 (a1, b1, c1);
+
+ bi0 = c0 & bucket_mask;
+ bi1 = c1 & bucket_mask;
+
+ h0 = hash_keys + bi0;
+ h1 = hash_keys + bi1;
+
+ /* Search two buckets. */
+ valid0 = qhash_get_valid_elt_mask (h, bi0);
+ valid1 = qhash_get_valid_elt_mask (h, bi1);
+
+ match0 = qhash_search_bucket (h0, k0, valid0);
+ match1 = qhash_search_bucket (h1, k1, valid1);
+
+ n_elts -= (match0 != 0) + (match1 != 0);
+
+ if (PREDICT_FALSE (valid0 == QHASH_ALL_VALID
+ || valid1 == QHASH_ALL_VALID))
+ goto slow_path2;
+
+ valid0 ^= match0;
+ qhash_set_valid_elt_mask (h, bi0, valid0);
+
+ valid1 = bi0 == bi1 ? valid0 : valid1;
+ valid1 ^= match1;
+
+ qhash_set_valid_elt_mask (h, bi1, valid1);
+
+ r[0] = match0 ? bi0 + qhash_min_log2 (match0) : ~0;
+ r[1] = match1 ? bi1 + qhash_min_log2 (match1) : ~0;
+ r += 2;
+ continue;
+
+ slow_path2:
+ r[0] = unset_slow_path (v, elt_bytes, k0, bi0, valid0, match0, &n_elts);
+ if (bi0 == bi1)
+ {
+ /* Search again in same bucket to test new overflow element. */
+ valid1 = qhash_get_valid_elt_mask (h, bi0);
+ if (!match1)
+ {
+ match1 = qhash_search_bucket (h1, k1, valid1);
+ n_elts -= (match1 != 0);
+ }
+ }
+ r[1] = unset_slow_path (v, elt_bytes, k1, bi1, valid1, match1, &n_elts);
+ r += 2;
+ }
+
+ while (n_left >= 1)
+ {
+ u32 a0, b0, c0, bi0, match0, valid0;
+ uword k0, *h0;
+
+ k0 = k[0];
+ n_left -= 1;
+ k += 1;
+
+ a0 = h->hash_seeds[0];
+ b0 = h->hash_seeds[1];
+ c0 = h->hash_seeds[2];
+ a0 ^= k0;
+#if uword_bits == 64
+ b0 ^= k0 >> 32;
+#endif
+
+ hash_mix32 (a0, b0, c0);
+
+ bi0 = c0 & bucket_mask;
+
+ h0 = hash_keys + bi0;
+
+ valid0 = qhash_get_valid_elt_mask (h, bi0);
+
+ match0 = qhash_search_bucket (h0, k0, valid0);
+ n_elts -= (match0 != 0);
+ qhash_set_valid_elt_mask (h, bi0, valid0 ^ match0);
+
+ r[0] = match0 ? bi0 + qhash_min_log2 (match0) : ~0;
+ r += 1;
+
+ if (PREDICT_FALSE (valid0 == QHASH_ALL_VALID))
+ r[-1] = unset_slow_path (v, elt_bytes, k0, bi0, valid0, match0,
+ &n_elts);
+ }
+
+ h->n_elts = n_elts;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/qhash.h b/src/vppinfra/qhash.h
new file mode 100644
index 00000000..9dbbd971
--- /dev/null
+++ b/src/vppinfra/qhash.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2006 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_qhash_h
+#define included_qhash_h
+
+#include <vppinfra/cache.h>
+#include <vppinfra/hash.h>
+
+/* Word hash tables. */
+typedef struct
+{
+ /* Number of elements in hash. */
+ u32 n_elts;
+
+ u32 log2_hash_size;
+
+ /* Jenkins hash seeds. */
+ u32 hash_seeds[3];
+
+ /* Fall back CLIB hash for overflow in fixed sized buckets. */
+ uword *overflow_hash;
+
+ u32 *overflow_counts, *overflow_free_indices;
+
+ u8 *hash_key_valid_bitmap;
+
+ uword *hash_keys;
+} qhash_t;
+
+always_inline qhash_t *
+qhash_header (void *v)
+{
+ return vec_header (v, sizeof (qhash_t));
+}
+
+always_inline uword
+qhash_elts (void *v)
+{
+ return v ? qhash_header (v)->n_elts : 0;
+}
+
+always_inline uword
+qhash_n_overflow (void *v)
+{
+ return v ? hash_elts (qhash_header (v)->overflow_hash) : 0;
+}
+
+#define QHASH_LOG2_KEYS_PER_BUCKET 2
+#define QHASH_KEYS_PER_BUCKET (1 << QHASH_LOG2_KEYS_PER_BUCKET)
+
+always_inline uword
+qhash_hash_mix (qhash_t * h, uword key)
+{
+ u32 a, b, c;
+
+ a = h->hash_seeds[0];
+ b = h->hash_seeds[1];
+ c = h->hash_seeds[2];
+
+ a ^= key;
+#if uword_bits == 64
+ b ^= key >> 32;
+#endif
+
+ hash_mix32 (a, b, c);
+
+ return c & pow2_mask (h->log2_hash_size);
+}
+
+#define qhash_resize(v,n) (v) = _qhash_resize ((v), (n), sizeof ((v)[0]))
+
+#define qhash_foreach(var,v,body)
+
+#define qhash_set_multiple(v,keys,n,results) \
+ (v) = _qhash_set_multiple ((v), sizeof ((v)[0]), (keys), (n), (results))
+
+#define qhash_unset_multiple(v,keys,n,results) \
+ _qhash_unset_multiple ((v), sizeof ((v)[0]), (keys), (n), (results))
+
+#define qhash_get(v,key) \
+({ \
+ uword _qhash_get_k = (key); \
+ qhash_get_first_match ((v), &_qhash_get_k, 1, &_qhash_get_k); \
+})
+
+#define qhash_set(v,k) \
+({ \
+ uword _qhash_set_k = (k); \
+ qhash_set_multiple ((v), &_qhash_set_k, 1, &_qhash_set_k); \
+ _qhash_set_k; \
+})
+
+#define qhash_unset(v,k) \
+({ \
+ uword _qhash_unset_k = (k); \
+ qhash_unset_multiple ((v), &_qhash_unset_k, 1, &_qhash_unset_k); \
+ _qhash_unset_k; \
+})
+
+void *_qhash_resize (void *v, uword length, uword elt_bytes);
+
+/* Lookup multiple keys in the same hash table. */
+void
+qhash_get_multiple (void *v,
+ uword * search_keys,
+ uword n_search_keys, u32 * result_indices);
+
+/* Lookup multiple keys in the same hash table.
+ Returns index of first matching key. */
+u32
+qhash_get_first_match (void *v,
+ uword * search_keys,
+ uword n_search_keys, uword * matching_key);
+
+/* Set/unset helper functions. */
+void *_qhash_set_multiple (void *v,
+ uword elt_bytes,
+ uword * search_keys,
+ uword n_search_keys, u32 * result_indices);
+void
+_qhash_unset_multiple (void *v,
+ uword elt_bytes,
+ uword * search_keys,
+ uword n_search_keys, u32 * result_indices);
+
+#endif /* included_qhash_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/qsort.c b/src/vppinfra/qsort.c
new file mode 100644
index 00000000..2faa5897
--- /dev/null
+++ b/src/vppinfra/qsort.c
@@ -0,0 +1,269 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Imported into CLIB by Eliot Dresselhaus from:
+ *
+ * This file is part of
+ * MakeIndex - A formatter and format independent index processor
+ *
+ * This file is public domain software donated by
+ * Nelson Beebe (beebe@science.utah.edu).
+ *
+ * modifications copyright (c) 2003 Cisco Systems, Inc.
+ */
+
+#include <vppinfra/clib.h>
+
+/*
+ * qsort.c: Our own version of the system qsort routine which is faster by an
+ * average of 25%, with lows and highs of 10% and 50%. The THRESHold below is
+ * the insertion sort threshold, and has been adjusted for records of size 48
+ * bytes. The MTHREShold is where we stop finding a better median.
+ */
+
+#define THRESH 4 /* threshold for insertion */
+#define MTHRESH 6 /* threshold for median */
+
+typedef struct
+{
+ word qsz; /* size of each record */
+ word thresh; /* THRESHold in chars */
+ word mthresh; /* MTHRESHold in chars */
+ int (*qcmp) (const void *, const void *); /* the comparison routine */
+} qst_t;
+
+static void qst (qst_t * q, char *base, char *max);
+
+/*
+ * qqsort: First, set up some global parameters for qst to share.
+ * Then, quicksort with qst(), and then a cleanup insertion sort ourselves.
+ * Sound simple? It's not...
+ */
+
+void
+qsort (void *base, uword n, uword size,
+ int (*compar) (const void *, const void *))
+{
+ char *i;
+ char *j;
+ char *lo;
+ char *hi;
+ char *min;
+ char c;
+ char *max;
+ qst_t _q, *q = &_q;
+
+ if (n <= 1)
+ return;
+
+ q->qsz = size;
+ q->qcmp = compar;
+ q->thresh = q->qsz * THRESH;
+ q->mthresh = q->qsz * MTHRESH;
+ max = base + n * q->qsz;
+ if (n >= THRESH)
+ {
+ qst (q, base, max);
+ hi = base + q->thresh;
+ }
+ else
+ {
+ hi = max;
+ }
+ /*
+ * First put smallest element, which must be in the first THRESH, in the
+ * first position as a sentinel. This is done just by searching the
+ * first THRESH elements (or the first n if n < THRESH), finding the min,
+ * and swapping it into the first position.
+ */
+ for (j = lo = base; (lo += q->qsz) < hi;)
+ {
+ if ((*compar) (j, lo) > 0)
+ j = lo;
+ }
+ if (j != base)
+ { /* swap j into place */
+ for (i = base, hi = base + q->qsz; i < hi;)
+ {
+ c = *j;
+ *j++ = *i;
+ *i++ = c;
+ }
+ }
+ /*
+ * With our sentinel in place, we now run the following hyper-fast
+ * insertion sort. For each remaining element, min, from [1] to [n-1],
+ * set hi to the index of the element AFTER which this one goes. Then, do
+ * the standard insertion sort shift on a character at a time basis for
+ * each element in the frob.
+ */
+ for (min = base; (hi = min += q->qsz) < max;)
+ {
+ while ((*q->qcmp) (hi -= q->qsz, min) > 0);
+ if ((hi += q->qsz) != min)
+ {
+ for (lo = min + q->qsz; --lo >= min;)
+ {
+ c = *lo;
+ for (i = j = lo; (j -= q->qsz) >= hi; i = j)
+ *i = *j;
+ *i = c;
+ }
+ }
+ }
+}
+
+
+
+/*
+ * qst: Do a quicksort. First, find the median element, and put that one in
+ * the first place as the discriminator. (This "median" is just the median
+ * of the first, last and middle elements). (Using this median instead of
+ * the first element is a big win). Then, the usual partitioning/swapping,
+ * followed by moving the discriminator into the right place. Then, figure
+ * out the sizes of the two partions, do the smaller one recursively and the
+ * larger one via a repeat of this code. Stopping when there are less than
+ * THRESH elements in a partition and cleaning up with an insertion sort (in
+ * our caller) is a huge win. All data swaps are done in-line, which is
+ * space-losing but time-saving. (And there are only three places where this
+ * is done).
+ */
+
+static void
+qst (qst_t * q, char *base, char *max)
+{
+ char *i;
+ char *j;
+ char *jj;
+ char *mid;
+ int ii;
+ char c;
+ char *tmp;
+ int lo;
+ int hi;
+ int qsz = q->qsz;
+
+ lo = (int) (max - base); /* number of elements as chars */
+ do
+ {
+ /*
+ * At the top here, lo is the number of characters of elements in the
+ * current partition. (Which should be max - base). Find the median
+ * of the first, last, and middle element and make that the middle
+ * element. Set j to largest of first and middle. If max is larger
+ * than that guy, then it's that guy, else compare max with loser of
+ * first and take larger. Things are set up to prefer the middle,
+ * then the first in case of ties.
+ */
+ mid = i = base + qsz * ((unsigned) (lo / qsz) >> 1);
+ if (lo >= q->mthresh)
+ {
+ j = ((*q->qcmp) ((jj = base), i) > 0 ? jj : i);
+ if ((*q->qcmp) (j, (tmp = max - qsz)) > 0)
+ {
+ /* switch to first loser */
+ j = (j == jj ? i : jj);
+ if ((*q->qcmp) (j, tmp) < 0)
+ j = tmp;
+ }
+ if (j != i)
+ {
+ ii = qsz;
+ do
+ {
+ c = *i;
+ *i++ = *j;
+ *j++ = c;
+ }
+ while (--ii);
+ }
+ }
+ /* Semi-standard quicksort partitioning/swapping */
+ for (i = base, j = max - qsz;;)
+ {
+ while (i < mid && (*q->qcmp) (i, mid) <= 0)
+ i += qsz;
+ while (j > mid)
+ {
+ if ((*q->qcmp) (mid, j) <= 0)
+ {
+ j -= qsz;
+ continue;
+ }
+ tmp = i + qsz; /* value of i after swap */
+ if (i == mid)
+ { /* j <-> mid, new mid is j */
+ mid = jj = j;
+ }
+ else
+ { /* i <-> j */
+ jj = j;
+ j -= qsz;
+ }
+ goto swap;
+ }
+ if (i == mid)
+ {
+ break;
+ }
+ else
+ { /* i <-> mid, new mid is i */
+ jj = mid;
+ tmp = mid = i; /* value of i after swap */
+ j -= qsz;
+ }
+ swap:
+ ii = qsz;
+ do
+ {
+ c = *i;
+ *i++ = *jj;
+ *jj++ = c;
+ }
+ while (--ii);
+ i = tmp;
+ }
+ /*
+ * Look at sizes of the two partitions, do the smaller one first by
+ * recursion, then do the larger one by making sure lo is its size,
+ * base and max are update correctly, and branching back. But only
+ * repeat (recursively or by branching) if the partition is of at
+ * least size THRESH.
+ */
+ i = (j = mid) + qsz;
+ if ((lo = (int) (j - base)) <= (hi = (int) (max - i)))
+ {
+ if (lo >= q->thresh)
+ qst (q, base, j);
+ base = i;
+ lo = hi;
+ }
+ else
+ {
+ if (hi >= q->thresh)
+ qst (q, i, max);
+ max = j;
+ }
+ }
+ while (lo >= q->thresh);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/random.c b/src/vppinfra/random.c
new file mode 100644
index 00000000..fa5bcc8c
--- /dev/null
+++ b/src/vppinfra/random.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/random.h>
+
+/* Default random seed for standalone version of library.
+ Value can be overridden by platform code from e.g.
+ machine's clock count register. */
+u32 standalone_random_default_seed = 1;
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/random.h b/src/vppinfra/random.h
new file mode 100644
index 00000000..5c139d05
--- /dev/null
+++ b/src/vppinfra/random.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_random_h
+#define included_random_h
+
+#include <vppinfra/clib.h>
+#include <vppinfra/vec.h> /* for vec_resize */
+#include <vppinfra/format.h> /* for unformat_input_t */
+
+/** \file
+ Linear Congruential Random Number Generator
+
+ This specific random number generator is described in
+ "Numerical Recipes in C", 2nd edition, page 284. If you need
+ random numbers with really excellent statistics, take a look
+ at Chapter 7...
+
+ By definition, a linear congruential random number generator
+ is of the form: rand[i+1] = a*rand[i] + c (mod m) for specific
+ values of (a,c,m).
+
+ In this case, choose m = 2**32 and use the low-order 32-bits of
+ the 64-bit product a*N[i]. Knuth suggests the use of a=1664525,
+ H.W. Lewis has tested C=1013904223 extensively. This routine is
+ reputedly as good as any 32-bit LCRN, and costs only a single
+ multiply-add.
+
+ Several variants: 32/64-bit, machine word width,
+ f64 on the closed interval [0,1].
+*/
+
+/** \brief 32-bit random number generator */
+always_inline u32
+random_u32 (u32 * seed)
+{
+ *seed = (1664525 * *seed) + 1013904223;
+ return *seed;
+}
+
+/* External test routine. */
+int test_random_main (unformat_input_t * input);
+
+/** \brief Maximum value returned by random_u32() */
+always_inline u32
+random_u32_max (void)
+{
+ return 0xffffffff;
+}
+
+#ifdef CLIB_UNIX
+
+#include <unistd.h> /* for getpid */
+
+/** \brief Default random seed (unix/linux user-mode) */
+always_inline uword
+random_default_seed (void)
+{
+ return getpid ();
+}
+
+#endif
+
+#ifdef CLIB_LINUX_KERNEL
+
+#include <linux/sched.h> /* for jiffies */
+
+/** \brief Default random seed (Linux kernel) */
+always_inline uword
+random_default_seed (void)
+{
+ return jiffies;
+}
+
+#endif
+
+#ifdef CLIB_STANDALONE
+extern u32 standalone_random_default_seed;
+
+always_inline u32
+random_default_seed (void)
+{
+ return standalone_random_default_seed;
+}
+#endif
+
+/** \brief 64-bit random number generator
+ * Again, constants courtesy of Donald Knuth.
+ *
+ */
+always_inline u64
+random_u64 (u64 * seed)
+{
+ *seed = 6364136223846793005ULL * *seed + 1442695040888963407ULL;
+ return *seed;
+}
+
+/** \brief machine word size random number generator */
+
+always_inline uword
+random_uword (u32 * seed)
+{
+ if (sizeof (uword) == sizeof (u64))
+ return random_u64 ((u64 *) seed);
+ else
+ return random_u32 (seed);
+}
+
+/** \brief Generate f64 random number in the interval [0,1] */
+always_inline f64
+random_f64 (u32 * seed)
+{
+ return (f64) random_u32 (seed) / (f64) random_u32_max ();
+}
+
+/** \brief Generate random character vector
+
+ From the alphabet a-z, lower case.
+ Returns a vector of the supplied length which is NOT guaranteed to be
+ NULL-terminated. FIXME?
+*/
+always_inline u8 *
+random_string (u32 * seed, uword len)
+{
+ u8 *alphabet = (u8 *) "abcdefghijklmnopqrstuvwxyz";
+ u8 *s = 0;
+ word i;
+
+ vec_resize (s, len);
+ for (i = 0; i < len; i++)
+ s[i] = alphabet[random_u32 (seed) % 26];
+
+ return s;
+}
+
+#endif /* included_random_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/random_buffer.c b/src/vppinfra/random_buffer.c
new file mode 100644
index 00000000..df036980
--- /dev/null
+++ b/src/vppinfra/random_buffer.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2005 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/random_buffer.h>
+
+/* Fill random buffer. */
+void
+clib_random_buffer_fill (clib_random_buffer_t * b, uword n_words)
+{
+ uword *w, n = n_words;
+
+ if (n < 256)
+ n = 256;
+
+ n = round_pow2 (n, 2 << ISAAC_LOG2_SIZE);
+
+ vec_add2 (b->buffer, w, n);
+ do
+ {
+ isaac2 (b->ctx, w);
+ w += 2 * ISAAC_SIZE;
+ n -= 2 * ISAAC_SIZE;
+ }
+ while (n > 0);
+}
+
+void
+clib_random_buffer_init (clib_random_buffer_t * b, uword seed)
+{
+ uword i, j;
+
+ memset (b, 0, sizeof (b[0]));
+
+ /* Seed ISAAC. */
+ for (i = 0; i < ARRAY_LEN (b->ctx); i++)
+ {
+ uword s[ISAAC_SIZE];
+
+ for (j = 0; j < ARRAY_LEN (s); j++)
+ s[j] = ARRAY_LEN (b->ctx) * (seed + j) + i;
+
+ isaac_init (&b->ctx[i], s);
+ }
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/random_buffer.h b/src/vppinfra/random_buffer.h
new file mode 100644
index 00000000..eb318548
--- /dev/null
+++ b/src/vppinfra/random_buffer.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2005 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_clib_random_buffer_h
+#define included_clib_random_buffer_h
+
+#include <vppinfra/clib.h>
+#include <vppinfra/random_isaac.h>
+
+typedef struct
+{
+ /* Two parallel ISAAC contexts for speed. */
+ isaac_t ctx[2];
+
+ /* Random buffer. */
+ uword *buffer;
+
+ /* Cache up to 1 word worth of bytes for random data
+ less than one word at a time. */
+ uword n_cached_bytes;
+
+ union
+ {
+ u8 cached_bytes[sizeof (uword)];
+ uword cached_word;
+ };
+}
+clib_random_buffer_t;
+
+always_inline void
+clib_random_buffer_free (clib_random_buffer_t * b)
+{
+ vec_free (b->buffer);
+}
+
+/* Fill random buffer. */
+void clib_random_buffer_fill (clib_random_buffer_t * b, uword n_words);
+
+/* Initialize random buffer. */
+void clib_random_buffer_init (clib_random_buffer_t * b, uword seed);
+
+/* Returns word aligned random data, possibly filling buffer. */
+always_inline void *
+clib_random_buffer_get_data (clib_random_buffer_t * b, uword n_bytes)
+{
+ uword n_words, i, l;
+
+ l = b->n_cached_bytes;
+ if (n_bytes <= l)
+ {
+ b->n_cached_bytes = l - n_bytes;
+ return &b->cached_bytes[l];
+ }
+
+ n_words = n_bytes / sizeof (uword);
+ if (n_bytes % sizeof (uword))
+ n_words++;
+
+ /* Enough random words left? */
+ if (PREDICT_FALSE (n_words > vec_len (b->buffer)))
+ clib_random_buffer_fill (b, n_words);
+
+ i = vec_len (b->buffer) - n_words;
+ _vec_len (b->buffer) = i;
+
+ if (n_bytes < sizeof (uword))
+ {
+ b->cached_word = b->buffer[i];
+ b->n_cached_bytes = sizeof (uword) - n_bytes;
+ return b->cached_bytes;
+ }
+ else
+ return b->buffer + i;
+}
+
+#endif /* included_clib_random_buffer_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/random_isaac.c b/src/vppinfra/random_isaac.c
new file mode 100644
index 00000000..6f00fc32
--- /dev/null
+++ b/src/vppinfra/random_isaac.c
@@ -0,0 +1,434 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ ------------------------------------------------------------------------------
+ By Bob Jenkins, 1996, Public Domain
+ MODIFIED:
+ 960327: Creation (addition of randinit, really)
+ 970719: use context, not global variables, for internal state
+ 980324: renamed seed to flag
+ 980605: recommend ISAAC_LOG2_SIZE=4 for noncryptography.
+ 010626: note this is public domain
+ ------------------------------------------------------------------------------
+
+ Modified for CLIB by Eliot Dresselhaus.
+ Dear Bob, Thanks for all the great work. - Eliot
+
+ modifications copyright (c) 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+/* ISAAC is Bob Jenkins' random number generator.
+ http://burtleburtle.net/bob/rand/isaacafa.html */
+
+#include <vppinfra/random_isaac.h>
+
+#if uword_bits != 32 && uword_bits != 64
+#error "isaac only works for 32 or 64 bit words"
+#endif
+
+#if uword_bits == 32
+
+#define ind32(mm,x) (*(u32 *)((u8 *)(mm) + ((x) & ((ISAAC_SIZE-1)<<2))))
+#define rngstep32(mix,a,b,mm,m,m2,r,x,y) \
+{ \
+ x = *m; \
+ a = (a^(mix)) + *(m2++); \
+ *(m++) = y = ind32(mm,x) + a + b; \
+ *(r++) = b = ind32(mm,y>>ISAAC_LOG2_SIZE) + x; \
+}
+
+void
+isaac (isaac_t * ctx, uword * results)
+{
+ u32 a, b, c, x, y, *m, *mm, *m2, *r, *mend;
+
+ mm = ctx->memory;
+ r = results;
+ a = ctx->a;
+ b = ctx->b;
+ c = ctx->c;
+
+ b += ++c;
+ mend = m2 = mm + ARRAY_LEN (ctx->memory) / 2;
+ m = mm;
+ while (m < mend)
+ {
+ rngstep32 (a << 13, a, b, mm, m, m2, r, x, y);
+ rngstep32 (a >> 6, a, b, mm, m, m2, r, x, y);
+ rngstep32 (a << 2, a, b, mm, m, m2, r, x, y);
+ rngstep32 (a >> 16, a, b, mm, m, m2, r, x, y);
+ }
+
+ m2 = mm;
+ while (m2 < mend)
+ {
+ rngstep32 (a << 13, a, b, mm, m, m2, r, x, y);
+ rngstep32 (a >> 6, a, b, mm, m, m2, r, x, y);
+ rngstep32 (a << 2, a, b, mm, m, m2, r, x, y);
+ rngstep32 (a >> 16, a, b, mm, m, m2, r, x, y);
+ }
+
+ ctx->a = a;
+ ctx->b = b;
+ ctx->c = c;
+}
+
+/* Perform 2 isaac runs with different contexts simultaneously. */
+void
+isaac2 (isaac_t * ctx, uword * results)
+{
+#define _(n) \
+ u32 a##n, b##n, c##n, x##n, y##n, * m##n, * mm##n, * m2##n, * r##n, * mend##n
+
+ _(0);
+ _(1);
+ (void) mend1; /* "set but unused variable" error on mend1 with gcc 4.9 */
+#undef _
+
+#define _(n) \
+do { \
+ mm##n = ctx[(n)].memory; \
+ r##n = results + (n) * ISAAC_SIZE; \
+ a##n = ctx[(n)].a; \
+ b##n = ctx[(n)].b; \
+ c##n = ctx[(n)].c; \
+ b##n += ++c##n; \
+ mend##n = m2##n = mm##n + ARRAY_LEN (ctx[(n)].memory) / 2; \
+ m##n = mm##n; \
+} while (0)
+
+ _(0);
+ _(1);
+
+#undef _
+
+ while (m0 < mend0)
+ {
+ rngstep32 (a0 << 13, a0, b0, mm0, m0, m20, r0, x0, y0);
+ rngstep32 (a1 << 13, a1, b1, mm1, m1, m21, r1, x1, y1);
+ rngstep32 (a0 >> 6, a0, b0, mm0, m0, m20, r0, x0, y0);
+ rngstep32 (a1 >> 6, a1, b1, mm1, m1, m21, r1, x1, y1);
+ rngstep32 (a0 << 2, a0, b0, mm0, m0, m20, r0, x0, y0);
+ rngstep32 (a1 << 2, a1, b1, mm1, m1, m21, r1, x1, y1);
+ rngstep32 (a0 >> 16, a0, b0, mm0, m0, m20, r0, x0, y0);
+ rngstep32 (a1 >> 16, a1, b1, mm1, m1, m21, r1, x1, y1);
+ }
+
+ m20 = mm0;
+ m21 = mm1;
+ while (m20 < mend0)
+ {
+ rngstep32 (a0 << 13, a0, b0, mm0, m0, m20, r0, x0, y0);
+ rngstep32 (a1 << 13, a1, b1, mm1, m1, m21, r1, x1, y1);
+ rngstep32 (a0 >> 6, a0, b0, mm0, m0, m20, r0, x0, y0);
+ rngstep32 (a1 >> 6, a1, b1, mm1, m1, m21, r1, x1, y1);
+ rngstep32 (a0 << 2, a0, b0, mm0, m0, m20, r0, x0, y0);
+ rngstep32 (a1 << 2, a1, b1, mm1, m1, m21, r1, x1, y1);
+ rngstep32 (a0 >> 16, a0, b0, mm0, m0, m20, r0, x0, y0);
+ rngstep32 (a1 >> 16, a1, b1, mm1, m1, m21, r1, x1, y1);
+ }
+
+ ctx[0].a = a0;
+ ctx[0].b = b0;
+ ctx[0].c = c0;
+ ctx[1].a = a1;
+ ctx[1].b = b1;
+ ctx[1].c = c1;
+}
+
+#define mix32(a,b,c,d,e,f,g,h) \
+{ \
+ a^=b<<11; d+=a; b+=c; \
+ b^=c>>2; e+=b; c+=d; \
+ c^=d<<8; f+=c; d+=e; \
+ d^=e>>16; g+=d; e+=f; \
+ e^=f<<10; h+=e; f+=g; \
+ f^=g>>4; a+=f; g+=h; \
+ g^=h<<8; b+=g; h+=a; \
+ h^=a>>9; c+=h; a+=b; \
+}
+
+void
+isaac_init (isaac_t * ctx, uword * seeds)
+{
+ word i;
+ u32 a, b, c, d, e, f, g, h, *m, *r;
+
+ ctx->a = ctx->b = ctx->c = 0;
+ m = ctx->memory;
+ r = seeds;
+
+ a = b = c = d = e = f = g = h = 0x9e3779b9; /* the golden ratio */
+
+ for (i = 0; i < 4; ++i) /* scramble it */
+ mix32 (a, b, c, d, e, f, g, h);
+
+ /* initialize using the contents of r[] as the seed */
+ for (i = 0; i < ISAAC_SIZE; i += 8)
+ {
+ a += r[i];
+ b += r[i + 1];
+ c += r[i + 2];
+ d += r[i + 3];
+ e += r[i + 4];
+ f += r[i + 5];
+ g += r[i + 6];
+ h += r[i + 7];
+ mix32 (a, b, c, d, e, f, g, h);
+ m[i] = a;
+ m[i + 1] = b;
+ m[i + 2] = c;
+ m[i + 3] = d;
+ m[i + 4] = e;
+ m[i + 5] = f;
+ m[i + 6] = g;
+ m[i + 7] = h;
+ }
+
+ /* do a second pass to make all of the seed affect all of m */
+ for (i = 0; i < ISAAC_SIZE; i += 8)
+ {
+ a += m[i];
+ b += m[i + 1];
+ c += m[i + 2];
+ d += m[i + 3];
+ e += m[i + 4];
+ f += m[i + 5];
+ g += m[i + 6];
+ h += m[i + 7];
+ mix32 (a, b, c, d, e, f, g, h);
+ m[i] = a;
+ m[i + 1] = b;
+ m[i + 2] = c;
+ m[i + 3] = d;
+ m[i + 4] = e;
+ m[i + 5] = f;
+ m[i + 6] = g;
+ m[i + 7] = h;
+ }
+}
+#endif /* uword_bits == 32 */
+
+#if uword_bits == 64
+
+#define ind64(mm,x) (*(u64 *)((u8 *)(mm) + ((x) & ((ISAAC_SIZE-1)<<3))))
+#define rngstep64(mix,a,b,mm,m,m2,r,x,y) \
+{ \
+ x = *m; \
+ a = (mix) + *(m2++); \
+ *(m++) = y = ind64(mm,x) + a + b; \
+ *(r++) = b = ind64(mm,y>>ISAAC_LOG2_SIZE) + x; \
+}
+
+void
+isaac (isaac_t * ctx, uword * results)
+{
+ u64 a, b, c, x, y, *m, *mm, *m2, *r, *mend;
+
+ mm = ctx->memory;
+ r = results;
+ a = ctx->a;
+ b = ctx->b;
+ c = ctx->c;
+
+ b += ++c;
+ mend = m2 = mm + ARRAY_LEN (ctx->memory) / 2;
+ m = mm;
+ while (m < mend)
+ {
+ rngstep64 (~(a ^ (a << 21)), a, b, mm, m, m2, r, x, y);
+ rngstep64 (a ^ (a >> 5), a, b, mm, m, m2, r, x, y);
+ rngstep64 (a ^ (a << 12), a, b, mm, m, m2, r, x, y);
+ rngstep64 (a ^ (a >> 33), a, b, mm, m, m2, r, x, y);
+ }
+
+ m2 = mm;
+ while (m2 < mend)
+ {
+ rngstep64 (~(a ^ (a << 21)), a, b, mm, m, m2, r, x, y);
+ rngstep64 (a ^ (a >> 5), a, b, mm, m, m2, r, x, y);
+ rngstep64 (a ^ (a << 12), a, b, mm, m, m2, r, x, y);
+ rngstep64 (a ^ (a >> 33), a, b, mm, m, m2, r, x, y);
+ }
+
+ ctx->a = a;
+ ctx->b = b;
+ ctx->c = c;
+}
+
+/* Perform 2 isaac runs with different contexts simultaneously. */
+void
+isaac2 (isaac_t * ctx, uword * results)
+{
+#define _(n) \
+ u64 a##n, b##n, c##n, x##n, y##n, * m##n, * mm##n, * m2##n, * r##n, * mend##n
+
+ _(0);
+ _(1);
+
+#undef _
+
+#define _(n) \
+do { \
+ mm##n = ctx[(n)].memory; \
+ r##n = results + (n) * ISAAC_SIZE; \
+ a##n = ctx[(n)].a; \
+ b##n = ctx[(n)].b; \
+ c##n = ctx[(n)].c; \
+ b##n += ++c##n; \
+ mend##n = m2##n = mm##n + ARRAY_LEN (ctx[(n)].memory) / 2; \
+ m##n = mm##n; \
+} while (0)
+
+ _(0);
+ _(1);
+
+#undef _
+
+ (void) mend1; /* compiler warning */
+
+ while (m0 < mend0)
+ {
+ rngstep64 (~(a0 ^ (a0 << 21)), a0, b0, mm0, m0, m20, r0, x0, y0);
+ rngstep64 (~(a1 ^ (a1 << 21)), a1, b1, mm1, m1, m21, r1, x1, y1);
+ rngstep64 (a0 ^ (a0 >> 5), a0, b0, mm0, m0, m20, r0, x0, y0);
+ rngstep64 (a1 ^ (a1 >> 5), a1, b1, mm1, m1, m21, r1, x1, y1);
+ rngstep64 (a0 ^ (a0 << 12), a0, b0, mm0, m0, m20, r0, x0, y0);
+ rngstep64 (a1 ^ (a1 << 12), a1, b1, mm1, m1, m21, r1, x1, y1);
+ rngstep64 (a0 ^ (a0 >> 33), a0, b0, mm0, m0, m20, r0, x0, y0);
+ rngstep64 (a1 ^ (a1 >> 33), a1, b1, mm1, m1, m21, r1, x1, y1);
+ }
+
+ m20 = mm0;
+ m21 = mm1;
+ while (m20 < mend0)
+ {
+ rngstep64 (~(a0 ^ (a0 << 21)), a0, b0, mm0, m0, m20, r0, x0, y0);
+ rngstep64 (~(a1 ^ (a1 << 21)), a1, b1, mm1, m1, m21, r1, x1, y1);
+ rngstep64 (a0 ^ (a0 >> 5), a0, b0, mm0, m0, m20, r0, x0, y0);
+ rngstep64 (a1 ^ (a1 >> 5), a1, b1, mm1, m1, m21, r1, x1, y1);
+ rngstep64 (a0 ^ (a0 << 12), a0, b0, mm0, m0, m20, r0, x0, y0);
+ rngstep64 (a1 ^ (a1 << 12), a1, b1, mm1, m1, m21, r1, x1, y1);
+ rngstep64 (a0 ^ (a0 >> 33), a0, b0, mm0, m0, m20, r0, x0, y0);
+ rngstep64 (a1 ^ (a1 >> 33), a1, b1, mm1, m1, m21, r1, x1, y1);
+ }
+
+ ctx[0].a = a0;
+ ctx[0].b = b0;
+ ctx[0].c = c0;
+ ctx[1].a = a1;
+ ctx[1].b = b1;
+ ctx[1].c = c1;
+}
+
+#define mix64(a,b,c,d,e,f,g,h) \
+{ \
+ a-=e; f^=h>>9; h+=a; \
+ b-=f; g^=a<<9; a+=b; \
+ c-=g; h^=b>>23; b+=c; \
+ d-=h; a^=c<<15; c+=d; \
+ e-=a; b^=d>>14; d+=e; \
+ f-=b; c^=e<<20; e+=f; \
+ g-=c; d^=f>>17; f+=g; \
+ h-=d; e^=g<<14; g+=h; \
+}
+
+void
+isaac_init (isaac_t * ctx, uword * seeds)
+{
+ word i;
+ u64 a, b, c, d, e, f, g, h, *m, *r;
+
+ ctx->a = ctx->b = ctx->c = 0;
+ m = ctx->memory;
+ r = seeds;
+
+ a = b = c = d = e = f = g = h = 0x9e3779b97f4a7c13LL; /* the golden ratio */
+
+ for (i = 0; i < 4; ++i) /* scramble it */
+ mix64 (a, b, c, d, e, f, g, h);
+
+ for (i = 0; i < ISAAC_SIZE; i += 8) /* fill in mm[] with messy stuff */
+ {
+ a += r[i];
+ b += r[i + 1];
+ c += r[i + 2];
+ d += r[i + 3];
+ e += r[i + 4];
+ f += r[i + 5];
+ g += r[i + 6];
+ h += r[i + 7];
+ mix64 (a, b, c, d, e, f, g, h);
+ m[i] = a;
+ m[i + 1] = b;
+ m[i + 2] = c;
+ m[i + 3] = d;
+ m[i + 4] = e;
+ m[i + 5] = f;
+ m[i + 6] = g;
+ m[i + 7] = h;
+ }
+
+ /* do a second pass to make all of the seed affect all of mm */
+ for (i = 0; i < ISAAC_SIZE; i += 8)
+ {
+ a += m[i];
+ b += m[i + 1];
+ c += m[i + 2];
+ d += m[i + 3];
+ e += m[i + 4];
+ f += m[i + 5];
+ g += m[i + 6];
+ h += m[i + 7];
+ mix64 (a, b, c, d, e, f, g, h);
+ m[i] = a;
+ m[i + 1] = b;
+ m[i + 2] = c;
+ m[i + 3] = d;
+ m[i + 4] = e;
+ m[i + 5] = f;
+ m[i + 6] = g;
+ m[i + 7] = h;
+ }
+}
+#endif /* uword_bits == 64 */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/random_isaac.h b/src/vppinfra/random_isaac.h
new file mode 100644
index 00000000..803fbd62
--- /dev/null
+++ b/src/vppinfra/random_isaac.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ ------------------------------------------------------------------------------
+ By Bob Jenkins, 1996, Public Domain
+ MODIFIED:
+ 960327: Creation (addition of randinit, really)
+ 970719: use context, not global variables, for internal state
+ 980324: renamed seed to flag
+ 980605: recommend ISAAC_LOG2_SIZE=4 for noncryptography.
+ 010626: note this is public domain
+ ------------------------------------------------------------------------------
+
+ Modified for CLIB by Eliot Dresselhaus.
+ Dear Bob, Thanks for all the great work. - Eliot
+
+ modifications copyright (c) 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_random_isaac_h
+#define included_random_isaac_h
+
+#include <vppinfra/clib.h> /* for u32/u64 */
+#include <vppinfra/format.h> /* for unformat_input_t */
+
+/* Bob recommends 8 for crypto, 4 for simulations */
+#define ISAAC_LOG2_SIZE (4)
+#define ISAAC_SIZE (1 << ISAAC_LOG2_SIZE)
+
+typedef struct
+{
+ uword memory[ISAAC_SIZE];
+ uword a, b, c;
+} isaac_t;
+
+void isaac (isaac_t * ctx, uword * results);
+void isaac2 (isaac_t * ctx, uword * results);
+void isaac_init (isaac_t * ctx, uword * results);
+
+int test_isaac_main (unformat_input_t * input);
+
+#endif /* included_random_isaac_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/serialize.c b/src/vppinfra/serialize.c
new file mode 100644
index 00000000..5d401a08
--- /dev/null
+++ b/src/vppinfra/serialize.c
@@ -0,0 +1,1254 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2005 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+/* Turn data structures into byte streams for saving or transport. */
+
+#include <vppinfra/heap.h>
+#include <vppinfra/pool.h>
+#include <vppinfra/serialize.h>
+
+void
+serialize_64 (serialize_main_t * m, va_list * va)
+{
+ u64 x = va_arg (*va, u64);
+ u32 lo, hi;
+ lo = x;
+ hi = x >> 32;
+ serialize_integer (m, lo, sizeof (lo));
+ serialize_integer (m, hi, sizeof (hi));
+}
+
+void
+serialize_32 (serialize_main_t * m, va_list * va)
+{
+ u32 x = va_arg (*va, u32);
+ serialize_integer (m, x, sizeof (x));
+}
+
+void
+serialize_16 (serialize_main_t * m, va_list * va)
+{
+ u32 x = va_arg (*va, u32);
+ serialize_integer (m, x, sizeof (u16));
+}
+
+void
+serialize_8 (serialize_main_t * m, va_list * va)
+{
+ u32 x = va_arg (*va, u32);
+ serialize_integer (m, x, sizeof (u8));
+}
+
+void
+unserialize_64 (serialize_main_t * m, va_list * va)
+{
+ u64 *x = va_arg (*va, u64 *);
+ u32 lo, hi;
+ unserialize_integer (m, &lo, sizeof (lo));
+ unserialize_integer (m, &hi, sizeof (hi));
+ *x = ((u64) hi << 32) | (u64) lo;
+}
+
+void
+unserialize_32 (serialize_main_t * m, va_list * va)
+{
+ u32 *x = va_arg (*va, u32 *);
+ unserialize_integer (m, x, sizeof (x[0]));
+}
+
+void
+unserialize_16 (serialize_main_t * m, va_list * va)
+{
+ u16 *x = va_arg (*va, u16 *);
+ u32 t;
+ unserialize_integer (m, &t, sizeof (x[0]));
+ x[0] = t;
+}
+
+void
+unserialize_8 (serialize_main_t * m, va_list * va)
+{
+ u8 *x = va_arg (*va, u8 *);
+ u32 t;
+ unserialize_integer (m, &t, sizeof (x[0]));
+ x[0] = t;
+}
+
+void
+serialize_f64 (serialize_main_t * m, va_list * va)
+{
+ f64 x = va_arg (*va, f64);
+ union
+ {
+ f64 f;
+ u64 i;
+ } y;
+ y.f = x;
+ serialize (m, serialize_64, y.i);
+}
+
+void
+serialize_f32 (serialize_main_t * m, va_list * va)
+{
+ f32 x = va_arg (*va, f64);
+ union
+ {
+ f32 f;
+ u32 i;
+ } y;
+ y.f = x;
+ serialize_integer (m, y.i, sizeof (y.i));
+}
+
+void
+unserialize_f64 (serialize_main_t * m, va_list * va)
+{
+ f64 *x = va_arg (*va, f64 *);
+ union
+ {
+ f64 f;
+ u64 i;
+ } y;
+ unserialize (m, unserialize_64, &y.i);
+ *x = y.f;
+}
+
+void
+unserialize_f32 (serialize_main_t * m, va_list * va)
+{
+ f32 *x = va_arg (*va, f32 *);
+ union
+ {
+ f32 f;
+ u32 i;
+ } y;
+ unserialize_integer (m, &y.i, sizeof (y.i));
+ *x = y.f;
+}
+
+void
+serialize_cstring (serialize_main_t * m, char *s)
+{
+ u32 len = s ? strlen (s) : 0;
+ void *p;
+
+ serialize_likely_small_unsigned_integer (m, len);
+ if (len > 0)
+ {
+ p = serialize_get (m, len);
+ clib_memcpy (p, s, len);
+ }
+}
+
+void
+unserialize_cstring (serialize_main_t * m, char **s)
+{
+ char *p, *r = 0;
+ u32 len;
+
+ len = unserialize_likely_small_unsigned_integer (m);
+
+ /*
+ * Given broken enough data, we could get len = 0xFFFFFFFF.
+ * Add one, it overflows, we call vec_new (char, 0), then
+ * memcpy until we bus error.
+ */
+ if (len > 0 && len != 0xFFFFFFFF)
+ {
+ r = vec_new (char, len + 1);
+ p = unserialize_get (m, len);
+ clib_memcpy (r, p, len);
+
+ /* Null terminate. */
+ r[len] = 0;
+ }
+ *s = r;
+}
+
+/* vec_serialize/vec_unserialize helper functions for basic vector types. */
+void
+serialize_vec_8 (serialize_main_t * m, va_list * va)
+{
+ u8 *s = va_arg (*va, u8 *);
+ u32 n = va_arg (*va, u32);
+ u8 *p = serialize_get (m, n * sizeof (u8));
+ clib_memcpy (p, s, n * sizeof (u8));
+}
+
+void
+unserialize_vec_8 (serialize_main_t * m, va_list * va)
+{
+ u8 *s = va_arg (*va, u8 *);
+ u32 n = va_arg (*va, u32);
+ u8 *p = unserialize_get (m, n);
+ clib_memcpy (s, p, n);
+}
+
+#define _(n_bits) \
+ void serialize_vec_##n_bits (serialize_main_t * m, va_list * va) \
+ { \
+ u##n_bits * s = va_arg (*va, u##n_bits *); \
+ u32 n = va_arg (*va, u32); \
+ u##n_bits * p = serialize_get (m, n * sizeof (s[0])); \
+ \
+ while (n >= 4) \
+ { \
+ p[0] = clib_host_to_net_u##n_bits (s[0]); \
+ p[1] = clib_host_to_net_u##n_bits (s[1]); \
+ p[2] = clib_host_to_net_u##n_bits (s[2]); \
+ p[3] = clib_host_to_net_u##n_bits (s[3]); \
+ s += 4; \
+ p += 4; \
+ n -= 4; \
+ } \
+ \
+ while (n >= 1) \
+ { \
+ p[0] = clib_host_to_net_u##n_bits (s[0]); \
+ s += 1; \
+ p += 1; \
+ n -= 1; \
+ } \
+ } \
+ \
+ void unserialize_vec_##n_bits (serialize_main_t * m, va_list * va) \
+ { \
+ u##n_bits * s = va_arg (*va, u##n_bits *); \
+ u32 n = va_arg (*va, u32); \
+ u##n_bits * p = unserialize_get (m, n * sizeof (s[0])); \
+ \
+ while (n >= 4) \
+ { \
+ s[0] = clib_net_to_host_mem_u##n_bits (&p[0]); \
+ s[1] = clib_net_to_host_mem_u##n_bits (&p[1]); \
+ s[2] = clib_net_to_host_mem_u##n_bits (&p[2]); \
+ s[3] = clib_net_to_host_mem_u##n_bits (&p[3]); \
+ s += 4; \
+ p += 4; \
+ n -= 4; \
+ } \
+ \
+ while (n >= 1) \
+ { \
+ s[0] = clib_net_to_host_mem_u##n_bits (&p[0]); \
+ s += 1; \
+ p += 1; \
+ n -= 1; \
+ } \
+ }
+
+_(16);
+_(32);
+_(64);
+
+#undef _
+
+#define SERIALIZE_VECTOR_CHUNK_SIZE 64
+
+void
+serialize_vector (serialize_main_t * m, va_list * va)
+{
+ void *vec = va_arg (*va, void *);
+ u32 elt_bytes = va_arg (*va, u32);
+ serialize_function_t *f = va_arg (*va, serialize_function_t *);
+ u32 l = vec_len (vec);
+ void *p = vec;
+
+ serialize_integer (m, l, sizeof (l));
+
+ /* Serialize vector in chunks for cache locality. */
+ while (l != 0)
+ {
+ u32 n = clib_min (SERIALIZE_VECTOR_CHUNK_SIZE, l);
+ serialize (m, f, p, n);
+ l -= n;
+ p += SERIALIZE_VECTOR_CHUNK_SIZE * elt_bytes;
+ }
+}
+
+void *
+unserialize_vector_ha (serialize_main_t * m,
+ u32 elt_bytes,
+ u32 header_bytes,
+ u32 align, u32 max_length, serialize_function_t * f)
+{
+ void *v, *p;
+ u32 l;
+
+ unserialize_integer (m, &l, sizeof (l));
+ if (l > max_length)
+ serialize_error (&m->header,
+ clib_error_create ("bad vector length %d", l));
+ p = v = _vec_resize (0, l, (uword) l * elt_bytes, header_bytes,
+ /* align */ align);
+
+ while (l != 0)
+ {
+ u32 n = clib_min (SERIALIZE_VECTOR_CHUNK_SIZE, l);
+ unserialize (m, f, p, n);
+ l -= n;
+ p += SERIALIZE_VECTOR_CHUNK_SIZE * elt_bytes;
+ }
+ return v;
+}
+
+void
+unserialize_aligned_vector (serialize_main_t * m, va_list * va)
+{
+ void **vec = va_arg (*va, void **);
+ u32 elt_bytes = va_arg (*va, u32);
+ serialize_function_t *f = va_arg (*va, serialize_function_t *);
+ u32 align = va_arg (*va, u32);
+
+ *vec = unserialize_vector_ha (m, elt_bytes,
+ /* header_bytes */ 0,
+ /* align */ align,
+ /* max_length */ ~0,
+ f);
+}
+
+void
+unserialize_vector (serialize_main_t * m, va_list * va)
+{
+ void **vec = va_arg (*va, void **);
+ u32 elt_bytes = va_arg (*va, u32);
+ serialize_function_t *f = va_arg (*va, serialize_function_t *);
+
+ *vec = unserialize_vector_ha (m, elt_bytes,
+ /* header_bytes */ 0,
+ /* align */ 0,
+ /* max_length */ ~0,
+ f);
+}
+
+void
+serialize_bitmap (serialize_main_t * m, uword * b)
+{
+ u32 l, i, n_u32s;
+
+ l = vec_len (b);
+ n_u32s = l * sizeof (b[0]) / sizeof (u32);
+ serialize_integer (m, n_u32s, sizeof (n_u32s));
+
+ /* Send 32 bit words, low-order word first on 64 bit. */
+ for (i = 0; i < l; i++)
+ {
+ serialize_integer (m, b[i], sizeof (u32));
+ if (BITS (uword) == 64)
+ serialize_integer (m, (u64) b[i] >> (u64) 32, sizeof (u32));
+ }
+}
+
+uword *
+unserialize_bitmap (serialize_main_t * m)
+{
+ uword *b = 0;
+ u32 i, n_u32s;
+
+ unserialize_integer (m, &n_u32s, sizeof (n_u32s));
+ if (n_u32s == 0)
+ return b;
+
+ i = (n_u32s * sizeof (u32) + sizeof (b[0]) - 1) / sizeof (b[0]);
+ vec_resize (b, i);
+ for (i = 0; i < n_u32s; i++)
+ {
+ u32 data;
+ unserialize_integer (m, &data, sizeof (u32));
+
+ /* Low-word is first on 64 bit. */
+ if (BITS (uword) == 64)
+ {
+ if ((i % 2) == 0)
+ b[i / 2] |= (u64) data << (u64) 0;
+ else
+ b[i / 2] |= (u64) data << (u64) 32;
+ }
+ else
+ {
+ b[i] = data;
+ }
+ }
+
+ return b;
+}
+
+void
+serialize_pool (serialize_main_t * m, va_list * va)
+{
+ void *pool = va_arg (*va, void *);
+ u32 elt_bytes = va_arg (*va, u32);
+ serialize_function_t *f = va_arg (*va, serialize_function_t *);
+ u32 l, lo, hi;
+ pool_header_t *p;
+
+ l = vec_len (pool);
+ serialize_integer (m, l, sizeof (u32));
+ if (l == 0)
+ return;
+ p = pool_header (pool);
+
+ /* No need to send free bitmap. Need to send index vector
+ to guarantee that unserialized pool will be identical. */
+ vec_serialize (m, p->free_indices, serialize_vec_32);
+
+ pool_foreach_region (lo, hi, pool,
+ serialize (m, f, pool + lo * elt_bytes, hi - lo));
+}
+
+static void *
+unserialize_pool_helper (serialize_main_t * m,
+ u32 elt_bytes, u32 align, serialize_function_t * f)
+{
+ void *v;
+ u32 i, l, lo, hi;
+ pool_header_t *p;
+
+ unserialize_integer (m, &l, sizeof (l));
+ if (l == 0)
+ {
+ return 0;
+ }
+
+ v = _vec_resize (0, l, (uword) l * elt_bytes, sizeof (p[0]), align);
+ p = pool_header (v);
+
+ vec_unserialize (m, &p->free_indices, unserialize_vec_32);
+
+ /* Construct free bitmap. */
+ p->free_bitmap = 0;
+ for (i = 0; i < vec_len (p->free_indices); i++)
+ p->free_bitmap = clib_bitmap_ori (p->free_bitmap, p->free_indices[i]);
+
+ pool_foreach_region (lo, hi, v,
+ unserialize (m, f, v + lo * elt_bytes, hi - lo));
+
+ return v;
+}
+
+void
+unserialize_pool (serialize_main_t * m, va_list * va)
+{
+ void **result = va_arg (*va, void **);
+ u32 elt_bytes = va_arg (*va, u32);
+ serialize_function_t *f = va_arg (*va, serialize_function_t *);
+ *result = unserialize_pool_helper (m, elt_bytes, /* align */ 0, f);
+}
+
+void
+unserialize_aligned_pool (serialize_main_t * m, va_list * va)
+{
+ void **result = va_arg (*va, void **);
+ u32 elt_bytes = va_arg (*va, u32);
+ u32 align = va_arg (*va, u32);
+ serialize_function_t *f = va_arg (*va, serialize_function_t *);
+ *result = unserialize_pool_helper (m, elt_bytes, align, f);
+}
+
+static void
+serialize_vec_heap_elt (serialize_main_t * m, va_list * va)
+{
+ heap_elt_t *e = va_arg (*va, heap_elt_t *);
+ u32 i, n = va_arg (*va, u32);
+ for (i = 0; i < n; i++)
+ {
+ serialize_integer (m, e[i].offset, sizeof (e[i].offset));
+ serialize_integer (m, e[i].next, sizeof (e[i].next));
+ serialize_integer (m, e[i].prev, sizeof (e[i].prev));
+ }
+}
+
+static void
+unserialize_vec_heap_elt (serialize_main_t * m, va_list * va)
+{
+ heap_elt_t *e = va_arg (*va, heap_elt_t *);
+ u32 i, n = va_arg (*va, u32);
+ for (i = 0; i < n; i++)
+ {
+ unserialize_integer (m, &e[i].offset, sizeof (e[i].offset));
+ unserialize_integer (m, &e[i].next, sizeof (e[i].next));
+ unserialize_integer (m, &e[i].prev, sizeof (e[i].prev));
+ }
+}
+
+void
+serialize_heap (serialize_main_t * m, va_list * va)
+{
+ void *heap = va_arg (*va, void *);
+ serialize_function_t *f = va_arg (*va, serialize_function_t *);
+ u32 i, l;
+ heap_header_t *h;
+
+ l = vec_len (heap);
+ serialize_integer (m, l, sizeof (u32));
+ if (l == 0)
+ return;
+
+ h = heap_header (heap);
+
+#define foreach_serialize_heap_header_integer \
+ _ (head) _ (tail) _ (used_count) _ (max_len) _ (flags) _ (elt_bytes)
+
+#define _(f) serialize_integer (m, h->f, sizeof (h->f));
+ foreach_serialize_heap_header_integer;
+#undef _
+
+ serialize_integer (m, vec_len (h->free_lists), sizeof (u32));
+ for (i = 0; i < vec_len (h->free_lists); i++)
+ vec_serialize (m, h->free_lists[i], serialize_vec_32);
+
+ vec_serialize (m, h->elts, serialize_vec_heap_elt);
+ vec_serialize (m, h->small_free_elt_free_index, serialize_vec_32);
+ vec_serialize (m, h->free_elts, serialize_vec_32);
+
+ /* Serialize data in heap. */
+ {
+ heap_elt_t *e, *end;
+ e = h->elts + h->head;
+ end = h->elts + h->tail;
+ while (1)
+ {
+ if (!heap_is_free (e))
+ {
+ void *v = heap + heap_offset (e) * h->elt_bytes;
+ u32 n = heap_elt_size (heap, e);
+ serialize (m, f, v, n);
+ }
+ if (e == end)
+ break;
+ e = heap_next (e);
+ }
+ }
+}
+
+void
+unserialize_heap (serialize_main_t * m, va_list * va)
+{
+ void **result = va_arg (*va, void **);
+ serialize_function_t *f = va_arg (*va, serialize_function_t *);
+ u32 i, vl, fl;
+ heap_header_t h;
+ void *heap;
+
+ unserialize_integer (m, &vl, sizeof (u32));
+ if (vl == 0)
+ {
+ *result = 0;
+ return;
+ }
+
+ memset (&h, 0, sizeof (h));
+#define _(f) unserialize_integer (m, &h.f, sizeof (h.f));
+ foreach_serialize_heap_header_integer;
+#undef _
+
+ unserialize_integer (m, &fl, sizeof (u32));
+ vec_resize (h.free_lists, fl);
+
+ for (i = 0; i < vec_len (h.free_lists); i++)
+ vec_unserialize (m, &h.free_lists[i], unserialize_vec_32);
+
+ vec_unserialize (m, &h.elts, unserialize_vec_heap_elt);
+ vec_unserialize (m, &h.small_free_elt_free_index, unserialize_vec_32);
+ vec_unserialize (m, &h.free_elts, unserialize_vec_32);
+
+ /* Re-construct used elt bitmap. */
+ if (CLIB_DEBUG > 0)
+ {
+ heap_elt_t *e;
+ vec_foreach (e, h.elts)
+ {
+ if (!heap_is_free (e))
+ h.used_elt_bitmap = clib_bitmap_ori (h.used_elt_bitmap, e - h.elts);
+ }
+ }
+
+ heap = *result = _heap_new (vl, h.elt_bytes);
+ heap_header (heap)[0] = h;
+
+ /* Unserialize data in heap. */
+ {
+ heap_elt_t *e, *end;
+ e = h.elts + h.head;
+ end = h.elts + h.tail;
+ while (1)
+ {
+ if (!heap_is_free (e))
+ {
+ void *v = heap + heap_offset (e) * h.elt_bytes;
+ u32 n = heap_elt_size (heap, e);
+ unserialize (m, f, v, n);
+ }
+ if (e == end)
+ break;
+ e = heap_next (e);
+ }
+ }
+}
+
+void
+serialize_magic (serialize_main_t * m, void *magic, u32 magic_bytes)
+{
+ void *p;
+ serialize_integer (m, magic_bytes, sizeof (magic_bytes));
+ p = serialize_get (m, magic_bytes);
+ clib_memcpy (p, magic, magic_bytes);
+}
+
+void
+unserialize_check_magic (serialize_main_t * m, void *magic, u32 magic_bytes)
+{
+ u32 l;
+ void *d;
+
+ unserialize_integer (m, &l, sizeof (l));
+ if (l != magic_bytes)
+ {
+ bad:
+ serialize_error_return (m, "bad magic number");
+ }
+ d = serialize_get (m, magic_bytes);
+ if (memcmp (magic, d, magic_bytes))
+ goto bad;
+}
+
+clib_error_t *
+va_serialize (serialize_main_t * sm, va_list * va)
+{
+ serialize_main_header_t *m = &sm->header;
+ serialize_function_t *f = va_arg (*va, serialize_function_t *);
+ clib_error_t *error = 0;
+
+ m->recursion_level += 1;
+ if (m->recursion_level == 1)
+ {
+ uword r = clib_setjmp (&m->error_longjmp, 0);
+ error = uword_to_pointer (r, clib_error_t *);
+ }
+
+ if (!error)
+ f (sm, va);
+
+ m->recursion_level -= 1;
+ return error;
+}
+
+clib_error_t *
+serialize (serialize_main_t * m, ...)
+{
+ clib_error_t *error;
+ va_list va;
+
+ va_start (va, m);
+ error = va_serialize (m, &va);
+ va_end (va);
+ return error;
+}
+
+clib_error_t *
+unserialize (serialize_main_t * m, ...)
+{
+ clib_error_t *error;
+ va_list va;
+
+ va_start (va, m);
+ error = va_serialize (m, &va);
+ va_end (va);
+ return error;
+}
+
+static void *
+serialize_write_not_inline (serialize_main_header_t * m,
+ serialize_stream_t * s,
+ uword n_bytes_to_write, uword flags)
+{
+ uword cur_bi, n_left_b, n_left_o;
+
+ ASSERT (s->current_buffer_index <= s->n_buffer_bytes);
+ cur_bi = s->current_buffer_index;
+ n_left_b = s->n_buffer_bytes - cur_bi;
+ n_left_o = vec_len (s->overflow_buffer);
+
+ /* Prepend overflow buffer if present. */
+ do
+ {
+ if (n_left_o > 0 && n_left_b > 0)
+ {
+ uword n = clib_min (n_left_b, n_left_o);
+ clib_memcpy (s->buffer + cur_bi, s->overflow_buffer, n);
+ cur_bi += n;
+ n_left_b -= n;
+ n_left_o -= n;
+ if (n_left_o == 0)
+ _vec_len (s->overflow_buffer) = 0;
+ else
+ vec_delete (s->overflow_buffer, n, 0);
+ }
+
+ /* Call data function when buffer is complete. Data function should
+ dispatch with current buffer and give us a new one to write more
+ data into. */
+ if (n_left_b == 0)
+ {
+ s->current_buffer_index = cur_bi;
+ m->data_function (m, s);
+ cur_bi = s->current_buffer_index;
+ n_left_b = s->n_buffer_bytes - cur_bi;
+ }
+ }
+ while (n_left_o > 0);
+
+ if (n_left_o > 0 || n_left_b < n_bytes_to_write)
+ {
+ u8 *r;
+ vec_add2 (s->overflow_buffer, r, n_bytes_to_write);
+ return r;
+ }
+ else
+ {
+ s->current_buffer_index = cur_bi + n_bytes_to_write;
+ return s->buffer + cur_bi;
+ }
+}
+
+static void *
+serialize_read_not_inline (serialize_main_header_t * m,
+ serialize_stream_t * s,
+ uword n_bytes_to_read, uword flags)
+{
+ uword cur_bi, cur_oi, n_left_b, n_left_o, n_left_to_read;
+
+ ASSERT (s->current_buffer_index <= s->n_buffer_bytes);
+
+ cur_bi = s->current_buffer_index;
+ cur_oi = s->current_overflow_index;
+
+ n_left_b = s->n_buffer_bytes - cur_bi;
+ n_left_o = vec_len (s->overflow_buffer) - cur_oi;
+
+ /* Read from overflow? */
+ if (n_left_o >= n_bytes_to_read)
+ {
+ s->current_overflow_index = cur_oi + n_bytes_to_read;
+ return vec_elt_at_index (s->overflow_buffer, cur_oi);
+ }
+
+ /* Reset overflow buffer. */
+ if (n_left_o == 0 && s->overflow_buffer)
+ {
+ s->current_overflow_index = 0;
+ _vec_len (s->overflow_buffer) = 0;
+ }
+
+ n_left_to_read = n_bytes_to_read;
+ while (n_left_to_read > 0)
+ {
+ uword n;
+
+ /* If we don't have enough data between overflow and normal buffer
+ call read function. */
+ if (n_left_o + n_left_b < n_bytes_to_read)
+ {
+ /* Save any left over buffer in overflow vector. */
+ if (n_left_b > 0)
+ {
+ vec_add (s->overflow_buffer, s->buffer + cur_bi, n_left_b);
+ n_left_o += n_left_b;
+ n_left_to_read -= n_left_b;
+ /* Advance buffer to end --- even if
+ SERIALIZE_FLAG_NO_ADVANCE_CURRENT_BUFFER_INDEX is set. */
+ cur_bi = s->n_buffer_bytes;
+ n_left_b = 0;
+ }
+
+ if (m->data_function)
+ {
+ m->data_function (m, s);
+ cur_bi = s->current_buffer_index;
+ n_left_b = s->n_buffer_bytes - cur_bi;
+ }
+ }
+
+ /* For first time through loop return if we have enough data
+ in normal buffer and overflow vector is empty. */
+ if (n_left_o == 0
+ && n_left_to_read == n_bytes_to_read && n_left_b >= n_left_to_read)
+ {
+ s->current_buffer_index = cur_bi + n_bytes_to_read;
+ return s->buffer + cur_bi;
+ }
+
+ if (!m->data_function || serialize_stream_is_end_of_stream (s))
+ {
+ /* This can happen for a peek at end of file.
+ Pad overflow buffer with 0s. */
+ vec_resize (s->overflow_buffer, n_left_to_read);
+ n_left_o += n_left_to_read;
+ n_left_to_read = 0;
+ }
+ else
+ {
+ /* Copy from buffer to overflow vector. */
+ n = clib_min (n_left_to_read, n_left_b);
+ vec_add (s->overflow_buffer, s->buffer + cur_bi, n);
+ cur_bi += n;
+ n_left_b -= n;
+ n_left_o += n;
+ n_left_to_read -= n;
+ }
+ }
+
+ s->current_buffer_index = cur_bi;
+ s->current_overflow_index = cur_oi + n_bytes_to_read;
+ return vec_elt_at_index (s->overflow_buffer, cur_oi);
+}
+
+void *
+serialize_read_write_not_inline (serialize_main_header_t * m,
+ serialize_stream_t * s,
+ uword n_bytes, uword flags)
+{
+ return (((flags & SERIALIZE_FLAG_IS_READ) ? serialize_read_not_inline :
+ serialize_write_not_inline) (m, s, n_bytes, flags));
+}
+
+static void
+serialize_read_write_close (serialize_main_header_t * m,
+ serialize_stream_t * s, uword flags)
+{
+ if (serialize_stream_is_end_of_stream (s))
+ return;
+
+ if (flags & SERIALIZE_FLAG_IS_WRITE)
+ /* "Write" 0 bytes to flush overflow vector. */
+ serialize_write_not_inline (m, s, /* n bytes */ 0, flags);
+
+ serialize_stream_set_end_of_stream (s);
+
+ /* Call it one last time to flush buffer and close. */
+ m->data_function (m, s);
+
+ vec_free (s->overflow_buffer);
+}
+
+void
+serialize_close (serialize_main_t * m)
+{
+ serialize_read_write_close (&m->header, &m->stream,
+ SERIALIZE_FLAG_IS_WRITE);
+}
+
+void
+unserialize_close (serialize_main_t * m)
+{
+ serialize_read_write_close (&m->header, &m->stream, SERIALIZE_FLAG_IS_READ);
+}
+
+void
+serialize_open_data (serialize_main_t * m, u8 * data, uword n_data_bytes)
+{
+ memset (m, 0, sizeof (m[0]));
+ m->stream.buffer = data;
+ m->stream.n_buffer_bytes = n_data_bytes;
+}
+
+void
+unserialize_open_data (serialize_main_t * m, u8 * data, uword n_data_bytes)
+{
+ serialize_open_data (m, data, n_data_bytes);
+}
+
+static void
+serialize_vector_write (serialize_main_header_t * m, serialize_stream_t * s)
+{
+ if (!serialize_stream_is_end_of_stream (s))
+ {
+ /* Double buffer size. */
+ uword l = vec_len (s->buffer);
+ vec_resize (s->buffer, l > 0 ? l : 64);
+ s->n_buffer_bytes = vec_len (s->buffer);
+ }
+}
+
+void
+serialize_open_vector (serialize_main_t * m, u8 * vector)
+{
+ memset (m, 0, sizeof (m[0]));
+ m->header.data_function = serialize_vector_write;
+ m->stream.buffer = vector;
+ m->stream.current_buffer_index = 0;
+ m->stream.n_buffer_bytes = vec_len (vector);
+}
+
+void *
+serialize_close_vector (serialize_main_t * m)
+{
+ serialize_stream_t *s = &m->stream;
+ void *result;
+
+ serialize_close (m); /* frees overflow buffer */
+
+ if (s->buffer)
+ _vec_len (s->buffer) = s->current_buffer_index;
+ result = s->buffer;
+ memset (m, 0, sizeof (m[0]));
+ return result;
+}
+
+void
+serialize_multiple_1 (serialize_main_t * m,
+ void *data, uword data_stride, uword n_data)
+{
+ u8 *d = data;
+ u8 *p;
+ uword n_left = n_data;
+
+ while (n_left >= 4)
+ {
+ p = serialize_get (m, 4 * sizeof (d[0]));
+ p[0] = d[0 * data_stride];
+ p[1] = d[1 * data_stride];
+ p[2] = d[2 * data_stride];
+ p[3] = d[3 * data_stride];
+ n_left -= 4;
+ d += 4 * data_stride;
+ }
+
+ if (n_left > 0)
+ {
+ p = serialize_get (m, n_left * sizeof (p[0]));
+ while (n_left > 0)
+ {
+ p[0] = d[0];
+ p += 1;
+ d += 1 * data_stride;
+ n_left -= 1;
+ }
+ }
+}
+
+void
+serialize_multiple_2 (serialize_main_t * m,
+ void *data, uword data_stride, uword n_data)
+{
+ void *d = data;
+ u16 *p;
+ uword n_left = n_data;
+
+ while (n_left >= 4)
+ {
+ p = serialize_get (m, 4 * sizeof (p[0]));
+ clib_mem_unaligned (p + 0, u16) =
+ clib_host_to_net_mem_u16 (d + 0 * data_stride);
+ clib_mem_unaligned (p + 1, u16) =
+ clib_host_to_net_mem_u16 (d + 1 * data_stride);
+ clib_mem_unaligned (p + 2, u16) =
+ clib_host_to_net_mem_u16 (d + 2 * data_stride);
+ clib_mem_unaligned (p + 3, u16) =
+ clib_host_to_net_mem_u16 (d + 3 * data_stride);
+ n_left -= 4;
+ d += 4 * data_stride;
+ }
+
+ if (n_left > 0)
+ {
+ p = serialize_get (m, n_left * sizeof (p[0]));
+ while (n_left > 0)
+ {
+ clib_mem_unaligned (p + 0, u16) =
+ clib_host_to_net_mem_u16 (d + 0 * data_stride);
+ p += 1;
+ d += 1 * data_stride;
+ n_left -= 1;
+ }
+ }
+}
+
+void
+serialize_multiple_4 (serialize_main_t * m,
+ void *data, uword data_stride, uword n_data)
+{
+ void *d = data;
+ u32 *p;
+ uword n_left = n_data;
+
+ while (n_left >= 4)
+ {
+ p = serialize_get (m, 4 * sizeof (p[0]));
+ clib_mem_unaligned (p + 0, u32) =
+ clib_host_to_net_mem_u32 (d + 0 * data_stride);
+ clib_mem_unaligned (p + 1, u32) =
+ clib_host_to_net_mem_u32 (d + 1 * data_stride);
+ clib_mem_unaligned (p + 2, u32) =
+ clib_host_to_net_mem_u32 (d + 2 * data_stride);
+ clib_mem_unaligned (p + 3, u32) =
+ clib_host_to_net_mem_u32 (d + 3 * data_stride);
+ n_left -= 4;
+ d += 4 * data_stride;
+ }
+
+ if (n_left > 0)
+ {
+ p = serialize_get (m, n_left * sizeof (p[0]));
+ while (n_left > 0)
+ {
+ clib_mem_unaligned (p + 0, u32) =
+ clib_host_to_net_mem_u32 (d + 0 * data_stride);
+ p += 1;
+ d += 1 * data_stride;
+ n_left -= 1;
+ }
+ }
+}
+
+void
+unserialize_multiple_1 (serialize_main_t * m,
+ void *data, uword data_stride, uword n_data)
+{
+ u8 *d = data;
+ u8 *p;
+ uword n_left = n_data;
+
+ while (n_left >= 4)
+ {
+ p = unserialize_get (m, 4 * sizeof (d[0]));
+ d[0 * data_stride] = p[0];
+ d[1 * data_stride] = p[1];
+ d[2 * data_stride] = p[2];
+ d[3 * data_stride] = p[3];
+ n_left -= 4;
+ d += 4 * data_stride;
+ }
+
+ if (n_left > 0)
+ {
+ p = unserialize_get (m, n_left * sizeof (p[0]));
+ while (n_left > 0)
+ {
+ d[0] = p[0];
+ p += 1;
+ d += 1 * data_stride;
+ n_left -= 1;
+ }
+ }
+}
+
+void
+unserialize_multiple_2 (serialize_main_t * m,
+ void *data, uword data_stride, uword n_data)
+{
+ void *d = data;
+ u16 *p;
+ uword n_left = n_data;
+
+ while (n_left >= 4)
+ {
+ p = unserialize_get (m, 4 * sizeof (p[0]));
+ clib_mem_unaligned (d + 0 * data_stride, u16) =
+ clib_net_to_host_mem_u16 (p + 0);
+ clib_mem_unaligned (d + 1 * data_stride, u16) =
+ clib_net_to_host_mem_u16 (p + 1);
+ clib_mem_unaligned (d + 2 * data_stride, u16) =
+ clib_net_to_host_mem_u16 (p + 2);
+ clib_mem_unaligned (d + 3 * data_stride, u16) =
+ clib_net_to_host_mem_u16 (p + 3);
+ n_left -= 4;
+ d += 4 * data_stride;
+ }
+
+ if (n_left > 0)
+ {
+ p = unserialize_get (m, n_left * sizeof (p[0]));
+ while (n_left > 0)
+ {
+ clib_mem_unaligned (d + 0 * data_stride, u16) =
+ clib_net_to_host_mem_u16 (p + 0);
+ p += 1;
+ d += 1 * data_stride;
+ n_left -= 1;
+ }
+ }
+}
+
+void
+unserialize_multiple_4 (serialize_main_t * m,
+ void *data, uword data_stride, uword n_data)
+{
+ void *d = data;
+ u32 *p;
+ uword n_left = n_data;
+
+ while (n_left >= 4)
+ {
+ p = unserialize_get (m, 4 * sizeof (p[0]));
+ clib_mem_unaligned (d + 0 * data_stride, u32) =
+ clib_net_to_host_mem_u32 (p + 0);
+ clib_mem_unaligned (d + 1 * data_stride, u32) =
+ clib_net_to_host_mem_u32 (p + 1);
+ clib_mem_unaligned (d + 2 * data_stride, u32) =
+ clib_net_to_host_mem_u32 (p + 2);
+ clib_mem_unaligned (d + 3 * data_stride, u32) =
+ clib_net_to_host_mem_u32 (p + 3);
+ n_left -= 4;
+ d += 4 * data_stride;
+ }
+
+ if (n_left > 0)
+ {
+ p = unserialize_get (m, n_left * sizeof (p[0]));
+ while (n_left > 0)
+ {
+ clib_mem_unaligned (d + 0 * data_stride, u32) =
+ clib_net_to_host_mem_u32 (p + 0);
+ p += 1;
+ d += 1 * data_stride;
+ n_left -= 1;
+ }
+ }
+}
+
+#ifdef CLIB_UNIX
+
+#include <unistd.h>
+#include <fcntl.h>
+
+static void
+unix_file_write (serialize_main_header_t * m, serialize_stream_t * s)
+{
+ int fd, n;
+
+ fd = s->data_function_opaque;
+ n = write (fd, s->buffer, s->current_buffer_index);
+ if (n < 0)
+ {
+ if (!unix_error_is_fatal (errno))
+ n = 0;
+ else
+ serialize_error (m, clib_error_return_unix (0, "write"));
+ }
+ if (n == s->current_buffer_index)
+ _vec_len (s->buffer) = 0;
+ else
+ vec_delete (s->buffer, n, 0);
+ s->current_buffer_index = vec_len (s->buffer);
+}
+
+static void
+unix_file_read (serialize_main_header_t * m, serialize_stream_t * s)
+{
+ int fd, n;
+
+ fd = s->data_function_opaque;
+ n = read (fd, s->buffer, vec_len (s->buffer));
+ if (n < 0)
+ {
+ if (!unix_error_is_fatal (errno))
+ n = 0;
+ else
+ serialize_error (m, clib_error_return_unix (0, "read"));
+ }
+ else if (n == 0)
+ serialize_stream_set_end_of_stream (s);
+ s->current_buffer_index = 0;
+ s->n_buffer_bytes = n;
+}
+
+static void
+serialize_open_unix_file_descriptor_helper (serialize_main_t * m, int fd,
+ uword is_read)
+{
+ memset (m, 0, sizeof (m[0]));
+ vec_resize (m->stream.buffer, 4096);
+
+ if (!is_read)
+ {
+ m->stream.n_buffer_bytes = vec_len (m->stream.buffer);
+ _vec_len (m->stream.buffer) = 0;
+ }
+
+ m->header.data_function = is_read ? unix_file_read : unix_file_write;
+ m->stream.data_function_opaque = fd;
+}
+
+void
+serialize_open_unix_file_descriptor (serialize_main_t * m, int fd)
+{
+ serialize_open_unix_file_descriptor_helper (m, fd, /* is_read */ 0);
+}
+
+void
+unserialize_open_unix_file_descriptor (serialize_main_t * m, int fd)
+{
+ serialize_open_unix_file_descriptor_helper (m, fd, /* is_read */ 1);
+}
+
+static clib_error_t *
+serialize_open_unix_file_helper (serialize_main_t * m, char *file,
+ uword is_read)
+{
+ int fd, mode;
+
+ mode = is_read ? O_RDONLY : O_RDWR | O_CREAT | O_TRUNC;
+ fd = open (file, mode, 0666);
+ if (fd < 0)
+ return clib_error_return_unix (0, "open `%s'", file);
+
+ serialize_open_unix_file_descriptor_helper (m, fd, is_read);
+ return 0;
+}
+
+clib_error_t *
+serialize_open_unix_file (serialize_main_t * m, char *file)
+{
+ return serialize_open_unix_file_helper (m, file, /* is_read */ 0);
+}
+
+clib_error_t *
+unserialize_open_unix_file (serialize_main_t * m, char *file)
+{
+ return serialize_open_unix_file_helper (m, file, /* is_read */ 1);
+}
+
+#endif /* CLIB_UNIX */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/serialize.h b/src/vppinfra/serialize.h
new file mode 100644
index 00000000..6cc2372e
--- /dev/null
+++ b/src/vppinfra/serialize.h
@@ -0,0 +1,443 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2005 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_clib_serialize_h
+#define included_clib_serialize_h
+
+#include <stdarg.h>
+#include <vppinfra/byte_order.h>
+#include <vppinfra/types.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/longjmp.h>
+
+struct serialize_main_header_t;
+struct serialize_stream_t;
+
+typedef void (serialize_data_function_t) (struct serialize_main_header_t * h,
+ struct serialize_stream_t * s);
+
+typedef struct serialize_stream_t
+{
+ /* Current data buffer being serialized/unserialized. */
+ u8 *buffer;
+
+ /* Size of buffer in bytes. */
+ u32 n_buffer_bytes;
+
+ /* Current index into buffer. */
+ u32 current_buffer_index;
+
+ /* Overflow buffer for when there is not enough room at the end of
+ buffer to hold serialized/unserialized data. */
+ u8 *overflow_buffer;
+
+ /* Current index in overflow buffer for reads. */
+ u32 current_overflow_index;
+
+ u32 flags;
+#define SERIALIZE_END_OF_STREAM (1 << 0)
+
+ uword data_function_opaque;
+
+ u32 opaque[64 - 4 * sizeof (u32) - 1 * sizeof (uword) -
+ 2 * sizeof (void *)];
+} serialize_stream_t;
+
+always_inline void
+serialize_stream_set_end_of_stream (serialize_stream_t * s)
+{
+ s->flags |= SERIALIZE_END_OF_STREAM;
+}
+
+always_inline uword
+serialize_stream_is_end_of_stream (serialize_stream_t * s)
+{
+ return (s->flags & SERIALIZE_END_OF_STREAM) != 0;
+}
+
+typedef struct serialize_main_header_t
+{
+ u32 recursion_level;
+
+ /* Data callback function and opaque data. */
+ serialize_data_function_t *data_function;
+
+ /* Error if signaled by data function. */
+ clib_error_t *error;
+
+ /* Exit unwind point if error occurs. */
+ clib_longjmp_t error_longjmp;
+} serialize_main_header_t;
+
+always_inline void
+serialize_error (serialize_main_header_t * m, clib_error_t * error)
+{
+ clib_longjmp (&m->error_longjmp, pointer_to_uword (error));
+}
+
+#define serialize_error_return(m,args...) \
+ serialize_error (&(m)->header, clib_error_return (0, args))
+
+void *serialize_read_write_not_inline (serialize_main_header_t * m,
+ serialize_stream_t * s,
+ uword n_bytes, uword flags);
+
+#define SERIALIZE_FLAG_IS_READ (1 << 0)
+#define SERIALIZE_FLAG_IS_WRITE (1 << 1)
+
+always_inline void *
+serialize_stream_read_write (serialize_main_header_t * header,
+ serialize_stream_t * s,
+ uword n_bytes, uword flags)
+{
+ uword i, j, l;
+
+ l = vec_len (s->overflow_buffer);
+ i = s->current_buffer_index;
+ j = i + n_bytes;
+ s->current_buffer_index = j;
+ if (l == 0 && j <= s->n_buffer_bytes)
+ {
+ return s->buffer + i;
+ }
+ else
+ {
+ s->current_buffer_index = i;
+ return serialize_read_write_not_inline (header, s, n_bytes, flags);
+ }
+}
+
+typedef struct
+{
+ serialize_main_header_t header;
+ serialize_stream_t stream;
+} serialize_main_t;
+
+always_inline void
+serialize_set_end_of_stream (serialize_main_t * m)
+{
+ serialize_stream_set_end_of_stream (&m->stream);
+}
+
+always_inline uword
+serialize_is_end_of_stream (serialize_main_t * m)
+{
+ return serialize_stream_is_end_of_stream (&m->stream);
+}
+
+typedef struct
+{
+ serialize_main_header_t header;
+ serialize_stream_t *streams;
+} serialize_multiple_main_t;
+
+typedef void (serialize_function_t) (serialize_main_t * m, va_list * va);
+
+always_inline void *
+unserialize_get (serialize_main_t * m, uword n_bytes)
+{
+ return serialize_stream_read_write (&m->header, &m->stream, n_bytes,
+ SERIALIZE_FLAG_IS_READ);
+}
+
+always_inline void *
+serialize_get (serialize_main_t * m, uword n_bytes)
+{
+ return serialize_stream_read_write (&m->header, &m->stream, n_bytes,
+ SERIALIZE_FLAG_IS_WRITE);
+}
+
+always_inline void
+serialize_integer (serialize_main_t * m, u64 x, u32 n_bytes)
+{
+ u8 *p = serialize_get (m, n_bytes);
+ if (n_bytes == 1)
+ p[0] = x;
+ else if (n_bytes == 2)
+ clib_mem_unaligned (p, u16) = clib_host_to_net_u16 (x);
+ else if (n_bytes == 4)
+ clib_mem_unaligned (p, u32) = clib_host_to_net_u32 (x);
+ else if (n_bytes == 8)
+ clib_mem_unaligned (p, u64) = clib_host_to_net_u64 (x);
+ else
+ ASSERT (0);
+}
+
+always_inline void
+unserialize_integer (serialize_main_t * m, void *x, u32 n_bytes)
+{
+ u8 *p = unserialize_get (m, n_bytes);
+ if (n_bytes == 1)
+ *(u8 *) x = p[0];
+ else if (n_bytes == 2)
+ *(u16 *) x = clib_net_to_host_unaligned_mem_u16 ((u16 *) p);
+ else if (n_bytes == 4)
+ *(u32 *) x = clib_net_to_host_unaligned_mem_u32 ((u32 *) p);
+ else if (n_bytes == 8)
+ *(u64 *) x = clib_net_to_host_unaligned_mem_u64 ((u64 *) p);
+ else
+ ASSERT (0);
+}
+
+/* As above but tries to be more compact. */
+always_inline void
+serialize_likely_small_unsigned_integer (serialize_main_t * m, u64 x)
+{
+ u64 r = x;
+ u8 *p;
+
+ /* Low bit set means it fits into 1 byte. */
+ if (r < (1 << 7))
+ {
+ p = serialize_get (m, 1);
+ p[0] = 1 + 2 * r;
+ return;
+ }
+
+ /* Low 2 bits 1 0 means it fits into 2 bytes. */
+ r -= (1 << 7);
+ if (r < (1 << 14))
+ {
+ p = serialize_get (m, 2);
+ clib_mem_unaligned (p, u16) = clib_host_to_little_u16 (4 * r + 2);
+ return;
+ }
+
+ r -= (1 << 14);
+ if (r < (1 << 29))
+ {
+ p = serialize_get (m, 4);
+ clib_mem_unaligned (p, u32) = clib_host_to_little_u32 (8 * r + 4);
+ return;
+ }
+
+ p = serialize_get (m, 9);
+ p[0] = 0; /* Only low 3 bits are used. */
+ clib_mem_unaligned (p + 1, u64) = clib_host_to_little_u64 (x);
+}
+
+always_inline u64
+unserialize_likely_small_unsigned_integer (serialize_main_t * m)
+{
+ u8 *p = unserialize_get (m, 1);
+ u64 r;
+ u32 y = p[0];
+
+ if (y & 1)
+ return y / 2;
+
+ r = 1 << 7;
+ if (y & 2)
+ {
+ p = unserialize_get (m, 1);
+ r += (y / 4) + (p[0] << 6);
+ return r;
+ }
+
+ r += 1 << 14;
+ if (y & 4)
+ {
+ p = unserialize_get (m, 3);
+ r += ((y / 8)
+ + (p[0] << (5 + 8 * 0))
+ + (p[1] << (5 + 8 * 1)) + (p[2] << (5 + 8 * 2)));
+ return r;
+ }
+
+ p = unserialize_get (m, 8);
+ r = clib_mem_unaligned (p, u64);
+ r = clib_little_to_host_u64 (r);
+
+ return r;
+}
+
+always_inline void
+serialize_likely_small_signed_integer (serialize_main_t * m, i64 s)
+{
+ u64 u = s < 0 ? -(2 * s + 1) : 2 * s;
+ serialize_likely_small_unsigned_integer (m, u);
+}
+
+always_inline i64
+unserialize_likely_small_signed_integer (serialize_main_t * m)
+{
+ u64 u = unserialize_likely_small_unsigned_integer (m);
+ i64 s = u / 2;
+ return (u & 1) ? -s : s;
+}
+
+void
+serialize_multiple_1 (serialize_main_t * m,
+ void *data, uword data_stride, uword n_data);
+void
+serialize_multiple_2 (serialize_main_t * m,
+ void *data, uword data_stride, uword n_data);
+void
+serialize_multiple_4 (serialize_main_t * m,
+ void *data, uword data_stride, uword n_data);
+
+void
+unserialize_multiple_1 (serialize_main_t * m,
+ void *data, uword data_stride, uword n_data);
+void
+unserialize_multiple_2 (serialize_main_t * m,
+ void *data, uword data_stride, uword n_data);
+void
+unserialize_multiple_4 (serialize_main_t * m,
+ void *data, uword data_stride, uword n_data);
+
+always_inline void
+serialize_multiple (serialize_main_t * m,
+ void *data,
+ uword n_data_bytes, uword data_stride, uword n_data)
+{
+ if (n_data_bytes == 1)
+ serialize_multiple_1 (m, data, data_stride, n_data);
+ else if (n_data_bytes == 2)
+ serialize_multiple_2 (m, data, data_stride, n_data);
+ else if (n_data_bytes == 4)
+ serialize_multiple_4 (m, data, data_stride, n_data);
+ else
+ ASSERT (0);
+}
+
+always_inline void
+unserialize_multiple (serialize_main_t * m,
+ void *data,
+ uword n_data_bytes, uword data_stride, uword n_data)
+{
+ if (n_data_bytes == 1)
+ unserialize_multiple_1 (m, data, data_stride, n_data);
+ else if (n_data_bytes == 2)
+ unserialize_multiple_2 (m, data, data_stride, n_data);
+ else if (n_data_bytes == 4)
+ unserialize_multiple_4 (m, data, data_stride, n_data);
+ else
+ ASSERT (0);
+}
+
+/* Basic types. */
+serialize_function_t serialize_64, unserialize_64;
+serialize_function_t serialize_32, unserialize_32;
+serialize_function_t serialize_16, unserialize_16;
+serialize_function_t serialize_8, unserialize_8;
+serialize_function_t serialize_f64, unserialize_f64;
+serialize_function_t serialize_f32, unserialize_f32;
+
+/* Basic vector types. */
+serialize_function_t serialize_vec_8, unserialize_vec_8;
+serialize_function_t serialize_vec_16, unserialize_vec_16;
+serialize_function_t serialize_vec_32, unserialize_vec_32;
+serialize_function_t serialize_vec_64, unserialize_vec_64;
+
+/* Serialize generic vectors. */
+serialize_function_t serialize_vector, unserialize_vector,
+ unserialize_aligned_vector;
+
+#define vec_serialize(m,v,f) \
+ serialize ((m), serialize_vector, (v), sizeof ((v)[0]), (f))
+
+#define vec_unserialize(m,v,f) \
+ unserialize ((m), unserialize_vector, (v), sizeof ((*(v))[0]), (f))
+
+#define vec_unserialize_aligned(m,v,f) \
+ unserialize ((m), unserialize_aligned_vector, (v), sizeof ((*(v))[0]), (f))
+
+/* Serialize pools. */
+serialize_function_t serialize_pool, unserialize_pool,
+ unserialize_aligned_pool;
+
+#define pool_serialize(m,v,f) \
+ serialize ((m), serialize_pool, (v), sizeof ((v)[0]), (f))
+
+#define pool_unserialize(m,v,f) \
+ unserialize ((m), unserialize_pool, (v), sizeof ((*(v))[0]), (f))
+
+#define pool_unserialize_aligned(m,v,a,f) \
+ unserialize ((m), unserialize_aligned_pool, (v), sizeof ((*(v))[0]), (a), (f))
+
+/* Serialize heaps. */
+serialize_function_t serialize_heap, unserialize_heap;
+
+void serialize_bitmap (serialize_main_t * m, uword * b);
+uword *unserialize_bitmap (serialize_main_t * m);
+
+void serialize_cstring (serialize_main_t * m, char *string);
+void unserialize_cstring (serialize_main_t * m, char **string);
+
+void serialize_close (serialize_main_t * m);
+void unserialize_close (serialize_main_t * m);
+
+void serialize_open_data (serialize_main_t * m, u8 * data,
+ uword n_data_bytes);
+void unserialize_open_data (serialize_main_t * m, u8 * data,
+ uword n_data_bytes);
+
+/* Starts serialization with expanding vector as buffer. */
+void serialize_open_vector (serialize_main_t * m, u8 * vector);
+
+/* Serialization is done: returns vector buffer to caller. */
+void *serialize_close_vector (serialize_main_t * m);
+
+void unserialize_open_vector (serialize_main_t * m, u8 * vector);
+
+#ifdef CLIB_UNIX
+clib_error_t *serialize_open_unix_file (serialize_main_t * m, char *file);
+clib_error_t *unserialize_open_unix_file (serialize_main_t * m, char *file);
+
+void serialize_open_unix_file_descriptor (serialize_main_t * m, int fd);
+void unserialize_open_unix_file_descriptor (serialize_main_t * m, int fd);
+#endif /* CLIB_UNIX */
+
+/* Main routines. */
+clib_error_t *serialize (serialize_main_t * m, ...);
+clib_error_t *unserialize (serialize_main_t * m, ...);
+clib_error_t *va_serialize (serialize_main_t * m, va_list * va);
+
+void serialize_magic (serialize_main_t * m, void *magic, u32 magic_bytes);
+void unserialize_check_magic (serialize_main_t * m, void *magic,
+ u32 magic_bytes);
+
+#endif /* included_clib_serialize_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/slist.c b/src/vppinfra/slist.c
new file mode 100644
index 00000000..892517bb
--- /dev/null
+++ b/src/vppinfra/slist.c
@@ -0,0 +1,336 @@
+/*
+ Copyright (c) 2012 Cisco and/or its affiliates.
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+#include <vppinfra/slist.h>
+
+/*
+ * skip-list implementation
+ *
+ * Good news / bad news. As balanced binary tree schemes go,
+ * this one seems pretty fast and is reasonably simple. There's a very
+ * limited amount that can be done to mitigate sdram read latency.
+ *
+ * Each active clib_slist_elt_t is on from 1 to N lists. Each active element
+ * is always on the "level-0" list. Since most elements are *only* on
+ * level 0, we keep the level 0 (and level 1) in the element. For those
+ * elements on more than two lists, we switch to a vector. Hence, the
+ * "n" union in slib_slist_elt_t.
+ *
+ * The low-order bit of elt->n.next0[0] is 1 for inlined next indices,
+ * 0 for vector indices (since the allocator always aligns to at least
+ * a 4-byte boundary). We can only represent 2e9 items, but since the
+ * practical performance limit is O(1e7), it doesn't matter.
+ *
+ * We create a "head" element which (by construction) is always
+ * lexically lighter than any other element. This makes a large number
+ * of irritating special cases go away.
+ *
+ * User code is in charge of comparing a supplied key with
+ * the key component of a user pool element. The user tells this code
+ * to add or delete (opaque key, 32-bit integer) pairs to the skip-list.
+ *
+ * The algorithm adds new elements to one or more lists.
+ * For levels greater than zero, the probability of a new element landing on
+ * a list is branching_factor**N. Branching_factor = 0.2 seems to work
+ * OK, yielding about 50 compares per search at O(1e7) items.
+ */
+
+clib_error_t *
+clib_slist_init (clib_slist_t * sp, f64 branching_factor,
+ clib_slist_key_compare_function_t compare,
+ format_function_t format_user_element)
+{
+ clib_slist_elt_t *head;
+ memset (sp, 0, sizeof (sp[0]));
+ sp->branching_factor = branching_factor;
+ sp->format_user_element = format_user_element;
+ sp->compare = compare;
+ sp->seed = 0xdeaddabe;
+ pool_get (sp->elts, head);
+ vec_add1 (head->n.nexts, (u32) ~ 0);
+ head->user_pool_index = (u32) ~ 0;
+ vec_validate (sp->path, 1);
+ vec_validate (sp->occupancy, 0);
+
+ return 0;
+}
+
+/*
+ * slist_search_internal
+ */
+static inline clib_slist_search_result_t
+slist_search_internal (clib_slist_t * sp, void *key, int need_full_path)
+{
+ int level, comp_result;
+ clib_slist_elt_t *search_elt, *head_elt;
+
+ sp->ncompares = 0;
+ /*
+ * index 0 is the magic listhead element which is
+ * lexically lighter than / to the left of every element
+ */
+ search_elt = head_elt = pool_elt_at_index (sp->elts, 0);
+
+ /*
+ * Initial negotiating position, only the head_elt is
+ * lighter than the supplied key
+ */
+ memset (sp->path, 0, vec_len (head_elt->n.nexts) * sizeof (u32));
+
+ /* Walk the fastest lane first */
+ level = vec_len (head_elt->n.nexts) - 1;
+ _vec_len (sp->path) = level + 1;
+
+ while (1)
+ {
+ u32 next_index_this_level;
+ clib_slist_elt_t *prefetch_elt;
+
+ /*
+ * Prefetching the next element at this level makes a measurable
+ * difference, but doesn't fix the dependent read stall problem
+ */
+ prefetch_elt = sp->elts +
+ clib_slist_get_next_at_level (search_elt, level);
+
+ CLIB_PREFETCH (prefetch_elt, CLIB_CACHE_LINE_BYTES, READ);
+
+ /* Compare the key with the current element */
+ comp_result = (search_elt == head_elt) ? 1 :
+ sp->compare (key, search_elt->user_pool_index);
+
+ sp->ncompares++;
+ /* key "lighter" than this element */
+ if (comp_result < 0)
+ {
+ /*
+ * Back up to previous item on this list
+ * and search the next finer-grained list
+ * starting there.
+ */
+ search_elt = pool_elt_at_index (sp->elts, sp->path[level]);
+ next_list:
+ if (level > 0)
+ {
+ level--;
+ continue;
+ }
+ else
+ {
+ return CLIB_SLIST_NO_MATCH;
+ }
+ }
+ /* Match */
+ if (comp_result == 0)
+ {
+ /*
+ * If we're trying to delete an element, we need to
+ * track down all of the elements which point at it.
+ * Otherwise, don't bother with it
+ */
+ if (need_full_path && level > 0)
+ {
+ search_elt = pool_elt_at_index (sp->elts, sp->path[level]);
+ level--;
+ continue;
+ }
+ level = vec_len (head_elt->n.nexts);
+ sp->path[level] = search_elt - sp->elts;
+ _vec_len (sp->path) = level + 1;
+ return CLIB_SLIST_MATCH;
+ }
+ /*
+ * comp_result positive, key is to the right of
+ * this element
+ */
+ sp->path[level] = search_elt - sp->elts;
+
+ /* Out of list at this level? */
+ next_index_this_level =
+ clib_slist_get_next_at_level (search_elt, level);
+ if (next_index_this_level == (u32) ~ 0)
+ goto next_list;
+
+ /* No, try the next element */
+ search_elt = pool_elt_at_index (sp->elts, next_index_this_level);
+ }
+ return 0; /* notreached */
+}
+
+u32
+clib_slist_search (clib_slist_t * sp, void *key, u32 * ncompares)
+{
+ clib_slist_search_result_t rv;
+
+ rv = slist_search_internal (sp, key, 0 /* dont need full path */ );
+ if (rv == CLIB_SLIST_MATCH)
+ {
+ clib_slist_elt_t *elt;
+ elt = pool_elt_at_index (sp->elts, sp->path[vec_len (sp->path) - 1]);
+ if (ncompares)
+ *ncompares = sp->ncompares;
+ return elt->user_pool_index;
+ }
+ return (u32) ~ 0;
+}
+
+void
+clib_slist_add (clib_slist_t * sp, void *key, u32 user_pool_index)
+{
+ clib_slist_elt_t *new_elt;
+ clib_slist_search_result_t search_result;
+ int level;
+
+ search_result = slist_search_internal (sp, key,
+ 0 /* don't need full path */ );
+
+ /* Special case: key exists, just replace user_pool_index */
+ if (PREDICT_FALSE (search_result == CLIB_SLIST_MATCH))
+ {
+ clib_slist_elt_t *elt;
+ elt = pool_elt_at_index (sp->elts, sp->path[0]);
+ elt->user_pool_index = user_pool_index;
+ return;
+ }
+
+ pool_get (sp->elts, new_elt);
+ new_elt->n.nexts = 0;
+ new_elt->user_pool_index = user_pool_index;
+
+ /* sp->path lists elements to the left of key, by level */
+ for (level = 0; level < vec_len (sp->path); level++)
+ {
+ clib_slist_elt_t *prev_elt_this_level;
+ u32 prev_elt_next_index_this_level;
+
+ /* Add to list at the current level */
+ prev_elt_this_level = pool_elt_at_index (sp->elts, sp->path[level]);
+ prev_elt_next_index_this_level = clib_slist_get_next_at_level
+ (prev_elt_this_level, level);
+
+ clib_slist_set_next_at_level (new_elt, prev_elt_next_index_this_level,
+ level);
+
+ clib_slist_set_next_at_level (prev_elt_this_level, new_elt - sp->elts,
+ level);
+ sp->occupancy[level]++;
+
+ /* Randomly add to the next-higher level */
+ if (random_f64 (&sp->seed) > sp->branching_factor)
+ break;
+ }
+ {
+ /* Time to add a new ply? */
+ clib_slist_elt_t *head_elt = pool_elt_at_index (sp->elts, 0);
+ int top_level = vec_len (head_elt->n.nexts) - 1;
+ if (((f64) sp->occupancy[top_level]) * sp->branching_factor > 1.0)
+ {
+ vec_add1 (sp->occupancy, 0);
+ vec_add1 (head_elt->n.nexts, (u32) ~ 0);
+ /* full match case returns n+1 items */
+ vec_validate (sp->path, vec_len (head_elt->n.nexts));
+ }
+ }
+}
+
+clib_slist_search_result_t
+clib_slist_del (clib_slist_t * sp, void *key)
+{
+ clib_slist_search_result_t search_result;
+ clib_slist_elt_t *del_elt;
+ int level;
+
+ search_result = slist_search_internal (sp, key, 1 /* need full path */ );
+
+ if (PREDICT_FALSE (search_result == CLIB_SLIST_NO_MATCH))
+ return search_result;
+
+ del_elt = pool_elt_at_index (sp->elts, sp->path[vec_len (sp->path) - 1]);
+ ASSERT (vec_len (sp->path) > 1);
+
+ for (level = 0; level < vec_len (sp->path) - 1; level++)
+ {
+ clib_slist_elt_t *path_elt;
+ u32 path_elt_next_index;
+
+ path_elt = pool_elt_at_index (sp->elts, sp->path[level]);
+ path_elt_next_index = clib_slist_get_next_at_level (path_elt, level);
+
+ /* Splice the item out of the list if it's adjacent to the victim */
+ if (path_elt_next_index == del_elt - sp->elts)
+ {
+ sp->occupancy[level]--;
+ path_elt_next_index = clib_slist_get_next_at_level (del_elt, level);
+ clib_slist_set_next_at_level (path_elt, path_elt_next_index, level);
+ }
+ }
+
+ /* If this element is on more than two lists it has a vector of nexts */
+ if (!(del_elt->n.next0[0] & 1))
+ vec_free (del_elt->n.nexts);
+ pool_put (sp->elts, del_elt);
+ return CLIB_SLIST_MATCH;
+}
+
+u8 *
+format_slist (u8 * s, va_list * args)
+{
+ clib_slist_t *sl = va_arg (*args, clib_slist_t *);
+ int verbose = va_arg (*args, int);
+ int i;
+ clib_slist_elt_t *head_elt, *elt;
+
+ s = format (s, "slist 0x%x, %u items, branching_factor %.2f\n", sl,
+ sl->occupancy ? sl->occupancy[0] : 0, sl->branching_factor);
+
+ if (pool_elts (sl->elts) == 0)
+ return s;
+
+ head_elt = pool_elt_at_index (sl->elts, 0);
+
+ for (i = 0; i < vec_len (head_elt->n.nexts); i++)
+ {
+ s = format (s, "level %d: %d elts\n", i,
+ sl->occupancy ? sl->occupancy[i] : 0);
+
+ if (verbose && head_elt->n.nexts[i] != (u32) ~ 0)
+ {
+ elt = pool_elt_at_index (sl->elts, head_elt->n.nexts[i]);
+ while (elt)
+ {
+ u32 next_index;
+ s = format (s, "%U(%d) ", sl->format_user_element,
+ elt->user_pool_index, elt - sl->elts);
+ next_index = clib_slist_get_next_at_level (elt, i);
+ ASSERT (next_index != 0x7fffffff);
+ if (next_index == (u32) ~ 0)
+ break;
+ else
+ elt = pool_elt_at_index (sl->elts, next_index);
+ }
+ }
+ s = format (s, "\n");
+ }
+ return s;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/slist.h b/src/vppinfra/slist.h
new file mode 100644
index 00000000..a7c77e27
--- /dev/null
+++ b/src/vppinfra/slist.h
@@ -0,0 +1,145 @@
+/*
+ Copyright (c) 2012 Cisco and/or its affiliates.
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+#ifndef included_slist_h
+#define included_slist_h
+
+#include <stdarg.h>
+#include <vppinfra/clib.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/pool.h>
+#include <vppinfra/error.h>
+#include <vppinfra/format.h>
+#include <vppinfra/cache.h>
+
+typedef word (clib_slist_key_compare_function_t)
+ (void *key, u32 elt_pool_index);
+
+typedef enum
+{
+ CLIB_SLIST_MATCH = 0,
+ CLIB_SLIST_NO_MATCH
+} clib_slist_search_result_t;
+
+typedef struct
+{
+ /* Vector of next elements. Every valid instance has at least one */
+ union
+ {
+ u32 next0[2];
+ u32 *nexts;
+ } n;
+
+ /* Index of item in user's pool */
+ u32 user_pool_index;
+ /* $$$ pad to even divisor of cache line */
+} clib_slist_elt_t;
+
+static inline u32
+clib_slist_get_next_at_level (clib_slist_elt_t * elt, int level)
+{
+ if (elt->n.next0[0] & 1)
+ {
+ ASSERT (level < 2);
+ if (level == 1)
+ return elt->n.next0[1];
+ /* preserve ~0 (end of list) */
+ return (elt->n.next0[0] == (u32) ~ 0) ? elt->n.next0[0] :
+ (elt->n.next0[0] >> 1);
+ }
+ else
+ {
+ ASSERT (level < vec_len (elt->n.nexts));
+ return elt->n.nexts[level];
+ }
+}
+
+static inline void
+clib_slist_set_next_at_level (clib_slist_elt_t * elt, u32 index, int level)
+{
+ u32 old_level0_value[2];
+ /* level0 and not a vector */
+ if (level < 2 && (elt->n.next0[0] == 0 || elt->n.next0[0] & 1))
+ {
+ if (level == 0)
+ {
+ elt->n.next0[0] = (index << 1) | 1;
+ return;
+ }
+ elt->n.next0[1] = index;
+ return;
+ }
+ /* have to save old level0 values? */
+ if (elt->n.next0[0] & 1)
+ {
+ old_level0_value[0] = (elt->n.next0[0] == (u32) ~ 0) ?
+ elt->n.next0[0] : elt->n.next0[0] >> 1;
+ old_level0_value[1] = elt->n.next0[1];
+ elt->n.nexts = 0;
+ vec_add1 (elt->n.nexts, old_level0_value[0]);
+ vec_add1 (elt->n.nexts, old_level0_value[1]);
+ }
+ vec_validate (elt->n.nexts, level);
+ elt->n.nexts[level] = index;
+}
+
+
+typedef struct
+{
+ /* pool of skip-list elements */
+ clib_slist_elt_t *elts;
+
+ /* last search path */
+ u32 *path;
+
+ /* last search number of compares */
+ u32 ncompares;
+
+ /* occupancy stats */
+ u32 *occupancy;
+
+ /* Comparison function */
+ clib_slist_key_compare_function_t *compare;
+
+ /* Format function */
+ format_function_t *format_user_element;
+
+ /* items appear in successive plies with Pr (1 / branching_factor) */
+ f64 branching_factor;
+
+ /* random seed */
+ u32 seed;
+} clib_slist_t;
+
+clib_error_t *clib_slist_init (clib_slist_t * sp, f64 branching_factor,
+ clib_slist_key_compare_function_t compare,
+ format_function_t format_user_element);
+
+format_function_t format_slist;
+
+void clib_slist_add (clib_slist_t * sp, void *key, u32 user_pool_index);
+clib_slist_search_result_t clib_slist_del (clib_slist_t * sp, void *key);
+u32 clib_slist_search (clib_slist_t * sp, void *key, u32 * ncompares);
+
+#endif /* included_slist_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/smp.c b/src/vppinfra/smp.c
new file mode 100644
index 00000000..f603283e
--- /dev/null
+++ b/src/vppinfra/smp.c
@@ -0,0 +1,325 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/longjmp.h>
+#include <vppinfra/mheap.h>
+#include <vppinfra/os.h>
+
+void
+clib_smp_free (clib_smp_main_t * m)
+{
+ clib_mem_vm_free (m->vm_base,
+ (uword) ((1 + m->n_cpus) << m->log2_n_per_cpu_vm_bytes));
+}
+
+static uword
+allocate_per_cpu_mheap (uword cpu)
+{
+ clib_smp_main_t *m = &clib_smp_main;
+ void *heap;
+ uword vm_size, stack_size, mheap_flags;
+
+ ASSERT (os_get_thread_index () == cpu);
+
+ vm_size = (uword) 1 << m->log2_n_per_cpu_vm_bytes;
+ stack_size = (uword) 1 << m->log2_n_per_cpu_stack_bytes;
+
+ mheap_flags = MHEAP_FLAG_SMALL_OBJECT_CACHE;
+
+ /* Heap extends up to start of stack. */
+ heap = mheap_alloc_with_flags (clib_smp_vm_base_for_cpu (m, cpu),
+ vm_size - stack_size, mheap_flags);
+ clib_mem_set_heap (heap);
+
+ if (cpu == 0)
+ {
+ /* Now that we have a heap, allocate main structure on cpu 0. */
+ vec_resize (m->per_cpu_mains, m->n_cpus);
+
+ /* Allocate shared global heap (thread safe). */
+ m->global_heap =
+ mheap_alloc_with_flags (clib_smp_vm_base_for_cpu (m, cpu + m->n_cpus),
+ vm_size,
+ mheap_flags | MHEAP_FLAG_THREAD_SAFE);
+ }
+
+ m->per_cpu_mains[cpu].heap = heap;
+ return 0;
+}
+
+void
+clib_smp_init (void)
+{
+ clib_smp_main_t *m = &clib_smp_main;
+ uword cpu;
+
+ m->vm_base =
+ clib_mem_vm_alloc ((uword) (m->n_cpus + 1) << m->log2_n_per_cpu_vm_bytes);
+ if (!m->vm_base)
+ clib_error ("error allocating virtual memory");
+
+ for (cpu = 0; cpu < m->n_cpus; cpu++)
+ clib_calljmp (allocate_per_cpu_mheap, cpu,
+ clib_smp_stack_top_for_cpu (m, cpu));
+}
+
+void
+clib_smp_lock_init (clib_smp_lock_t ** pl)
+{
+ clib_smp_lock_t *l;
+ uword i, n_bytes, n_fifo_elts;
+
+ /* No locking necessary if n_cpus <= 1.
+ Null means no locking is necessary. */
+ if (clib_smp_main.n_cpus < 2)
+ {
+ *pl = 0;
+ return;
+ }
+
+ /* Need n_cpus - 1 elts in waiting fifo. One CPU holds lock
+ and others could potentially be waiting. */
+ n_fifo_elts = clib_smp_main.n_cpus - 1;
+
+ n_bytes = sizeof (l[0]) + n_fifo_elts * sizeof (l->waiting_fifo[0]);
+ ASSERT_AND_PANIC (n_bytes % CLIB_CACHE_LINE_BYTES == 0);
+
+ l = clib_mem_alloc_aligned (n_bytes, CLIB_CACHE_LINE_BYTES);
+
+ memset (l, 0, n_bytes);
+ l->n_waiting_fifo_elts = n_fifo_elts;
+
+ for (i = 0; i < l->n_waiting_fifo_elts; i++)
+ l->waiting_fifo[i].wait_type = CLIB_SMP_LOCK_WAIT_EMPTY;
+
+ *pl = l;
+}
+
+void
+clib_smp_lock_free (clib_smp_lock_t ** pl)
+{
+ if (*pl)
+ clib_mem_free (*pl);
+ *pl = 0;
+}
+
+void
+clib_smp_lock_slow_path (clib_smp_lock_t * l,
+ uword my_cpu,
+ clib_smp_lock_header_t h0, clib_smp_lock_type_t type)
+{
+ clib_smp_lock_header_t h1, h2, h3;
+ uword is_reader = type == CLIB_SMP_LOCK_TYPE_READER;
+ uword n_fifo_elts = l->n_waiting_fifo_elts;
+ uword my_tail;
+
+ /* Atomically advance waiting FIFO tail pointer; my_tail will point
+ to entry where we can insert ourselves to wait for lock to be granted. */
+ while (1)
+ {
+ h1 = h0;
+ my_tail = h1.waiting_fifo.head_index + h1.waiting_fifo.n_elts;
+ my_tail = my_tail >= n_fifo_elts ? my_tail - n_fifo_elts : my_tail;
+ h1.waiting_fifo.n_elts += 1;
+ h1.request_cpu = my_cpu;
+
+ ASSERT_AND_PANIC (h1.waiting_fifo.n_elts <= n_fifo_elts);
+ ASSERT_AND_PANIC (my_tail >= 0 && my_tail < n_fifo_elts);
+
+ h2 = clib_smp_lock_set_header (l, h1, h0);
+
+ /* Tail successfully advanced? */
+ if (clib_smp_lock_header_is_equal (h0, h2))
+ break;
+
+ /* It is possible that if head and tail are both zero, CPU with lock would have unlocked lock. */
+ else if (type == CLIB_SMP_LOCK_TYPE_SPIN)
+ {
+ while (!h2.writer_has_lock)
+ {
+ ASSERT_AND_PANIC (h2.waiting_fifo.n_elts == 0);
+ h1 = h2;
+ h1.request_cpu = my_cpu;
+ h1.writer_has_lock = 1;
+
+ h3 = clib_smp_lock_set_header (l, h1, h2);
+
+ /* Got it? */
+ if (clib_smp_lock_header_is_equal (h2, h3))
+ return;
+
+ h2 = h3;
+ }
+ }
+
+ /* Try to advance tail again. */
+ h0 = h2;
+ }
+
+ {
+ clib_smp_lock_waiting_fifo_elt_t *w;
+
+ w = l->waiting_fifo + my_tail;
+
+ while (w->wait_type != CLIB_SMP_LOCK_WAIT_EMPTY)
+ clib_smp_pause ();
+
+ w->wait_type = (is_reader
+ ? CLIB_SMP_LOCK_WAIT_READER : CLIB_SMP_LOCK_WAIT_WRITER);
+
+ /* Wait until CPU holding the lock grants us the lock. */
+ while (w->wait_type != CLIB_SMP_LOCK_WAIT_DONE)
+ clib_smp_pause ();
+
+ w->wait_type = CLIB_SMP_LOCK_WAIT_EMPTY;
+ }
+}
+
+void
+clib_smp_unlock_slow_path (clib_smp_lock_t * l,
+ uword my_cpu,
+ clib_smp_lock_header_t h0,
+ clib_smp_lock_type_t type)
+{
+ clib_smp_lock_header_t h1, h2;
+ clib_smp_lock_waiting_fifo_elt_t *head;
+ clib_smp_lock_wait_type_t head_wait_type;
+ uword is_reader = type == CLIB_SMP_LOCK_TYPE_READER;
+ uword n_fifo_elts = l->n_waiting_fifo_elts;
+ uword head_index, must_wait_for_readers;
+
+ while (1)
+ {
+ /* Advance waiting fifo giving lock to first waiter. */
+ while (1)
+ {
+ ASSERT_AND_PANIC (h0.waiting_fifo.n_elts != 0);
+
+ h1 = h0;
+
+ head_index = h1.waiting_fifo.head_index;
+ head = l->waiting_fifo + head_index;
+ if (is_reader)
+ {
+ ASSERT_AND_PANIC (h1.n_readers_with_lock > 0);
+ h1.n_readers_with_lock -= 1;
+ }
+ else
+ {
+ /* Writer will already have lock. */
+ ASSERT_AND_PANIC (h1.writer_has_lock);
+ }
+
+ while ((head_wait_type =
+ head->wait_type) == CLIB_SMP_LOCK_WAIT_EMPTY)
+ clib_smp_pause ();
+
+ /* Don't advance FIFO to writer unless all readers have unlocked. */
+ must_wait_for_readers =
+ (type != CLIB_SMP_LOCK_TYPE_SPIN
+ && head_wait_type == CLIB_SMP_LOCK_WAIT_WRITER
+ && h1.n_readers_with_lock != 0);
+
+ if (!must_wait_for_readers)
+ {
+ head_index += 1;
+ h1.waiting_fifo.n_elts -= 1;
+ if (type != CLIB_SMP_LOCK_TYPE_SPIN)
+ {
+ if (head_wait_type == CLIB_SMP_LOCK_WAIT_WRITER)
+ h1.writer_has_lock = h1.n_readers_with_lock == 0;
+ else
+ {
+ h1.writer_has_lock = 0;
+ h1.n_readers_with_lock += 1;
+ }
+ }
+ }
+
+ h1.waiting_fifo.head_index =
+ head_index == n_fifo_elts ? 0 : head_index;
+ h1.request_cpu = my_cpu;
+
+ ASSERT_AND_PANIC (h1.waiting_fifo.head_index >= 0
+ && h1.waiting_fifo.head_index < n_fifo_elts);
+ ASSERT_AND_PANIC (h1.waiting_fifo.n_elts >= 0
+ && h1.waiting_fifo.n_elts <= n_fifo_elts);
+
+ h2 = clib_smp_lock_set_header (l, h1, h0);
+
+ if (clib_smp_lock_header_is_equal (h2, h0))
+ break;
+
+ h0 = h2;
+
+ if (h0.waiting_fifo.n_elts == 0)
+ return clib_smp_unlock_inline (l, type);
+ }
+
+ if (must_wait_for_readers)
+ return;
+
+ /* Wake up head of waiting fifo. */
+ {
+ uword done_waking;
+
+ /* Shift lock to first thread waiting in fifo. */
+ head->wait_type = CLIB_SMP_LOCK_WAIT_DONE;
+
+ /* For read locks we may be able to wake multiple readers. */
+ done_waking = 1;
+ if (head_wait_type == CLIB_SMP_LOCK_WAIT_READER)
+ {
+ uword hi = h0.waiting_fifo.head_index;
+ if (h0.waiting_fifo.n_elts != 0
+ && l->waiting_fifo[hi].wait_type == CLIB_SMP_LOCK_WAIT_READER)
+ done_waking = 0;
+ }
+
+ if (done_waking)
+ break;
+ }
+ }
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/smp.h b/src/vppinfra/smp.h
new file mode 100644
index 00000000..7e703b3d
--- /dev/null
+++ b/src/vppinfra/smp.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001-2005 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_clib_smp_h
+#define included_clib_smp_h
+
+#include <vppinfra/cache.h>
+#include <vppinfra/os.h> /* for os_panic */
+
+#define clib_smp_compare_and_swap(addr,new,old) __sync_val_compare_and_swap(addr,old,new)
+#define clib_smp_swap(addr,new) __sync_lock_test_and_set(addr,new)
+#define clib_smp_atomic_add(addr,increment) __sync_fetch_and_add(addr,increment)
+
+#if defined (i386) || defined (__x86_64__)
+#define clib_smp_pause() do { asm volatile ("pause"); } while (0)
+#endif
+
+#ifndef clib_smp_pause
+#define clib_smp_pause() do { } while (0)
+#endif
+
+#ifdef CLIB_UNIX
+#include <sched.h>
+
+always_inline void
+os_sched_yield (void)
+{
+ sched_yield ();
+}
+#else
+always_inline void
+os_sched_yield (void)
+{
+ clib_smp_pause ();
+}
+#endif
+
+
+#endif /* included_clib_smp_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/smp_fifo.c b/src/vppinfra/smp_fifo.c
new file mode 100644
index 00000000..bb74064d
--- /dev/null
+++ b/src/vppinfra/smp_fifo.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2012 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/smp_fifo.h>
+#include <vppinfra/mem.h>
+
+clib_smp_fifo_t *
+clib_smp_fifo_init (uword max_n_elts, uword n_bytes_per_elt)
+{
+ clib_smp_fifo_t *f;
+ uword n_bytes_per_elt_cache_aligned;
+
+ f = clib_mem_alloc_aligned (sizeof (f[0]), CLIB_CACHE_LINE_BYTES);
+
+ memset (f, 0, sizeof (f[0]));
+
+ max_n_elts = max_n_elts ? max_n_elts : 32;
+ f->log2_max_n_elts = max_log2 (max_n_elts);
+ f->max_n_elts_less_one = (1 << f->log2_max_n_elts) - 1;
+
+ n_bytes_per_elt_cache_aligned =
+ clib_smp_fifo_round_elt_bytes (n_bytes_per_elt);
+ clib_exec_on_global_heap (
+ {
+ f->data =
+ clib_mem_alloc_aligned
+ (n_bytes_per_elt_cache_aligned <<
+ f->log2_max_n_elts, CLIB_CACHE_LINE_BYTES);}
+ );
+
+ /* Zero all data and mark all elements free. */
+ {
+ uword i;
+ for (i = 0; i <= f->max_n_elts_less_one; i++)
+ {
+ void *d = clib_smp_fifo_elt_at_index (f, n_bytes_per_elt, i);
+ clib_smp_fifo_data_footer_t *t;
+
+ memset (d, 0, n_bytes_per_elt_cache_aligned);
+
+ t = clib_smp_fifo_get_data_footer (d, n_bytes_per_elt);
+ clib_smp_fifo_data_footer_set_state (t,
+ CLIB_SMP_FIFO_DATA_STATE_free);
+ }
+ }
+
+ return f;
+}
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/smp_fifo.h b/src/vppinfra/smp_fifo.h
new file mode 100644
index 00000000..c74a77c8
--- /dev/null
+++ b/src/vppinfra/smp_fifo.h
@@ -0,0 +1,313 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2012 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_clib_smp_vec_h
+#define included_clib_smp_vec_h
+
+#include <vppinfra/smp.h>
+
+#define foreach_clib_smp_fifo_data_state \
+ _ (free) \
+ _ (write_alloc) \
+ _ (write_done) \
+ _ (read_fetch)
+
+typedef enum
+{
+#define _(f) CLIB_SMP_FIFO_DATA_STATE_##f,
+ foreach_clib_smp_fifo_data_state
+#undef _
+ CLIB_SMP_FIFO_N_DATA_STATE,
+} clib_smp_fifo_data_state_t;
+
+/* Footer at end of each data element. */
+typedef struct
+{
+ /* Magic number marking valid footer plus state encoded in low bits. */
+ u32 magic_state;
+} clib_smp_fifo_data_footer_t;
+
+#define CLIB_SMP_DATA_FOOTER_MAGIC 0xfafbfcf0
+
+always_inline clib_smp_fifo_data_state_t
+clib_smp_fifo_data_footer_get_state (clib_smp_fifo_data_footer_t * f)
+{
+ u32 s = f->magic_state - CLIB_SMP_DATA_FOOTER_MAGIC;
+
+ /* Check that magic number plus state is still valid. */
+ if (s >= CLIB_SMP_FIFO_N_DATA_STATE)
+ os_panic ();
+
+ return s;
+}
+
+always_inline void
+clib_smp_fifo_data_footer_set_state (clib_smp_fifo_data_footer_t * f,
+ clib_smp_fifo_data_state_t s)
+{
+ f->magic_state = CLIB_SMP_DATA_FOOTER_MAGIC + s;
+}
+
+typedef struct
+{
+ /* Read/write indices each on their own cache line.
+ Atomic incremented for each read/write. */
+ u32 read_index, write_index;
+
+ /* Power of 2 number of elements in fifo less one. */
+ u32 max_n_elts_less_one;
+
+ /* Log2 of above. */
+ u32 log2_max_n_elts;
+
+ /* Cache aligned data. */
+ void *data;
+} clib_smp_fifo_t;
+
+/* External functions. */
+clib_smp_fifo_t *clib_smp_fifo_init (uword max_n_elts, uword n_bytes_per_elt);
+
+/* Elements are always cache-line sized; this is to avoid smp cache thrashing. */
+always_inline uword
+clib_smp_fifo_round_elt_bytes (uword n_bytes_per_elt)
+{
+ return round_pow2 (n_bytes_per_elt, CLIB_CACHE_LINE_BYTES);
+}
+
+always_inline uword
+clib_smp_fifo_n_elts (clib_smp_fifo_t * f)
+{
+ uword n = f->write_index - f->read_index;
+ ASSERT (n <= f->max_n_elts_less_one + 1);
+ return n;
+}
+
+always_inline clib_smp_fifo_data_footer_t *
+clib_smp_fifo_get_data_footer (void *d, uword n_bytes_per_elt)
+{
+ clib_smp_fifo_data_footer_t *f;
+ f = d + clib_smp_fifo_round_elt_bytes (n_bytes_per_elt) - sizeof (f[0]);
+ return f;
+}
+
+always_inline void *
+clib_smp_fifo_elt_at_index (clib_smp_fifo_t * f, uword n_bytes_per_elt,
+ uword i)
+{
+ uword n_bytes_per_elt_cache_aligned;
+
+ ASSERT (i <= f->max_n_elts_less_one);
+
+ n_bytes_per_elt_cache_aligned =
+ clib_smp_fifo_round_elt_bytes (n_bytes_per_elt);
+
+ return f->data + i * n_bytes_per_elt_cache_aligned;
+}
+
+always_inline void *
+clib_smp_fifo_write_alloc (clib_smp_fifo_t * f, uword n_bytes_per_elt)
+{
+ void *d;
+ clib_smp_fifo_data_footer_t *t;
+ clib_smp_fifo_data_state_t s;
+ u32 wi0, wi1;
+
+ wi0 = f->write_index;
+
+ /* Fifo full? */
+ if (wi0 - f->read_index > f->max_n_elts_less_one)
+ return 0;
+
+ while (1)
+ {
+ wi1 = wi0 + 1;
+
+ d =
+ clib_smp_fifo_elt_at_index (f, n_bytes_per_elt,
+ wi0 & f->max_n_elts_less_one);
+ t = clib_smp_fifo_get_data_footer (d, n_bytes_per_elt);
+
+ s = clib_smp_fifo_data_footer_get_state (t);
+ if (s != CLIB_SMP_FIFO_DATA_STATE_free)
+ {
+ d = 0;
+ break;
+ }
+
+ wi1 = clib_smp_compare_and_swap (&f->write_index, wi1, wi0);
+
+ if (wi1 == wi0)
+ {
+ clib_smp_fifo_data_footer_set_state (t,
+ CLIB_SMP_FIFO_DATA_STATE_write_alloc);
+ break;
+ }
+
+ /* Other cpu wrote write index first: try again. */
+ wi0 = wi1;
+ }
+
+ return d;
+}
+
+always_inline void
+clib_smp_fifo_write_done (clib_smp_fifo_t * f, void *d, uword n_bytes_per_elt)
+{
+ clib_smp_fifo_data_footer_t *t;
+
+ /* Flush out pending writes before we change state to write_done.
+ This will hold off readers until data is flushed. */
+ CLIB_MEMORY_BARRIER ();
+
+ t = clib_smp_fifo_get_data_footer (d, n_bytes_per_elt);
+
+ ASSERT (clib_smp_fifo_data_footer_get_state (t) ==
+ CLIB_SMP_FIFO_DATA_STATE_write_alloc);
+ clib_smp_fifo_data_footer_set_state (t,
+ CLIB_SMP_FIFO_DATA_STATE_write_done);
+}
+
+always_inline void *
+clib_smp_fifo_read_fetch (clib_smp_fifo_t * f, uword n_bytes_per_elt)
+{
+ void *d;
+ clib_smp_fifo_data_footer_t *t;
+ clib_smp_fifo_data_state_t s;
+ u32 ri0, ri1;
+
+ ri0 = f->read_index;
+
+ /* Fifo empty? */
+ if (f->write_index - ri0 == 0)
+ return 0;
+
+ while (1)
+ {
+ ri1 = ri0 + 1;
+
+ d =
+ clib_smp_fifo_elt_at_index (f, n_bytes_per_elt,
+ ri0 & f->max_n_elts_less_one);
+ t = clib_smp_fifo_get_data_footer (d, n_bytes_per_elt);
+
+ s = clib_smp_fifo_data_footer_get_state (t);
+ if (s != CLIB_SMP_FIFO_DATA_STATE_write_done)
+ {
+ d = 0;
+ break;
+ }
+
+ ri1 = clib_smp_compare_and_swap (&f->read_index, ri1, ri0);
+ if (ri1 == ri0)
+ {
+ clib_smp_fifo_data_footer_set_state (t,
+ CLIB_SMP_FIFO_DATA_STATE_read_fetch);
+ break;
+ }
+
+ ri0 = ri1;
+ }
+
+ return d;
+}
+
+always_inline void
+clib_smp_fifo_read_done (clib_smp_fifo_t * f, void *d, uword n_bytes_per_elt)
+{
+ clib_smp_fifo_data_footer_t *t;
+
+ t = clib_smp_fifo_get_data_footer (d, n_bytes_per_elt);
+
+ ASSERT (clib_smp_fifo_data_footer_get_state (t) ==
+ CLIB_SMP_FIFO_DATA_STATE_read_fetch);
+ clib_smp_fifo_data_footer_set_state (t, CLIB_SMP_FIFO_DATA_STATE_free);
+}
+
+always_inline void
+clib_smp_fifo_memcpy (uword * dst, uword * src, uword n_bytes)
+{
+ word n_bytes_left = n_bytes;
+
+ while (n_bytes_left >= 4 * sizeof (uword))
+ {
+ dst[0] = src[0];
+ dst[1] = src[1];
+ dst[2] = src[2];
+ dst[3] = src[3];
+ dst += 4;
+ src += 4;
+ n_bytes_left -= 4 * sizeof (dst[0]);
+ }
+
+ while (n_bytes_left > 0)
+ {
+ dst[0] = src[0];
+ dst += 1;
+ src += 1;
+ n_bytes_left -= 1 * sizeof (dst[0]);
+ }
+}
+
+always_inline void
+clib_smp_fifo_write_inline (clib_smp_fifo_t * f, void *elt_to_write,
+ uword n_bytes_per_elt)
+{
+ uword *dst;
+ dst = clib_smp_fifo_write_alloc (f, n_bytes_per_elt);
+ clib_smp_fifo_memcpy (dst, elt_to_write, n_bytes_per_elt);
+ clib_smp_fifo_write_done (f, dst, n_bytes_per_elt);
+}
+
+always_inline void
+clib_smp_fifo_read_inline (clib_smp_fifo_t * f, void *elt_to_read,
+ uword n_bytes_per_elt)
+{
+ uword *src;
+ src = clib_smp_fifo_read_fetch (f, n_bytes_per_elt);
+ clib_smp_fifo_memcpy (elt_to_read, src, n_bytes_per_elt);
+ clib_smp_fifo_read_done (f, src, n_bytes_per_elt);
+}
+
+#endif /* included_clib_smp_vec_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/socket.c b/src/vppinfra/socket.c
new file mode 100644
index 00000000..87a9333f
--- /dev/null
+++ b/src/vppinfra/socket.c
@@ -0,0 +1,559 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003, 2005 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <stdio.h>
+#include <string.h> /* strchr */
+#define __USE_GNU
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <sys/stat.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <netdb.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include <vppinfra/mem.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/socket.h>
+#include <vppinfra/format.h>
+#include <vppinfra/error.h>
+
+void
+clib_socket_tx_add_formatted (clib_socket_t * s, char *fmt, ...)
+{
+ va_list va;
+ va_start (va, fmt);
+ clib_socket_tx_add_va_formatted (s, fmt, &va);
+ va_end (va);
+}
+
+/* Return and bind to an unused port. */
+static word
+find_free_port (word sock)
+{
+ word port;
+
+ for (port = IPPORT_USERRESERVED; port < 1 << 16; port++)
+ {
+ struct sockaddr_in a;
+
+ memset (&a, 0, sizeof (a)); /* Warnings be gone */
+
+ a.sin_family = PF_INET;
+ a.sin_addr.s_addr = INADDR_ANY;
+ a.sin_port = htons (port);
+
+ if (bind (sock, (struct sockaddr *) &a, sizeof (a)) >= 0)
+ break;
+ }
+
+ return port < 1 << 16 ? port : -1;
+}
+
+/* Convert a config string to a struct sockaddr and length for use
+ with bind or connect. */
+static clib_error_t *
+socket_config (char *config,
+ void *addr, socklen_t * addr_len, u32 ip4_default_address)
+{
+ clib_error_t *error = 0;
+
+ if (!config)
+ config = "";
+
+ /* Anything that begins with a / is a local PF_LOCAL socket. */
+ if (config[0] == '/')
+ {
+ struct sockaddr_un *su = addr;
+ su->sun_family = PF_LOCAL;
+ clib_memcpy (&su->sun_path, config,
+ clib_min (sizeof (su->sun_path), 1 + strlen (config)));
+ *addr_len = sizeof (su[0]);
+ }
+
+ /* Hostname or hostname:port or port. */
+ else
+ {
+ char *host_name;
+ int port = -1;
+ struct sockaddr_in *sa = addr;
+
+ host_name = 0;
+ port = -1;
+ if (config[0] != 0)
+ {
+ unformat_input_t i;
+
+ unformat_init_string (&i, config, strlen (config));
+ if (unformat (&i, "%s:%d", &host_name, &port)
+ || unformat (&i, "%s:0x%x", &host_name, &port))
+ ;
+ else if (unformat (&i, "%s", &host_name))
+ ;
+ else
+ error = clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, &i);
+ unformat_free (&i);
+
+ if (error)
+ goto done;
+ }
+
+ sa->sin_family = PF_INET;
+ *addr_len = sizeof (sa[0]);
+ if (port != -1)
+ sa->sin_port = htons (port);
+ else
+ sa->sin_port = 0;
+
+ if (host_name)
+ {
+ struct in_addr host_addr;
+
+ /* Recognize localhost to avoid host lookup in most common cast. */
+ if (!strcmp (host_name, "localhost"))
+ sa->sin_addr.s_addr = htonl (INADDR_LOOPBACK);
+
+ else if (inet_aton (host_name, &host_addr))
+ sa->sin_addr = host_addr;
+
+ else if (host_name && strlen (host_name) > 0)
+ {
+ struct hostent *host = gethostbyname (host_name);
+ if (!host)
+ error = clib_error_return (0, "unknown host `%s'", config);
+ else
+ clib_memcpy (&sa->sin_addr.s_addr, host->h_addr_list[0],
+ host->h_length);
+ }
+
+ else
+ sa->sin_addr.s_addr = htonl (ip4_default_address);
+
+ vec_free (host_name);
+ if (error)
+ goto done;
+ }
+ }
+
+done:
+ return error;
+}
+
+static clib_error_t *
+default_socket_write (clib_socket_t * s)
+{
+ clib_error_t *err = 0;
+ word written = 0;
+ word fd = 0;
+ word tx_len;
+
+ fd = s->fd;
+
+ /* Map standard input to standard output.
+ Typically, fd is a socket for which read/write both work. */
+ if (fd == 0)
+ fd = 1;
+
+ tx_len = vec_len (s->tx_buffer);
+ written = write (fd, s->tx_buffer, tx_len);
+
+ /* Ignore certain errors. */
+ if (written < 0 && !unix_error_is_fatal (errno))
+ written = 0;
+
+ /* A "real" error occurred. */
+ if (written < 0)
+ {
+ err = clib_error_return_unix (0, "write %wd bytes (fd %d, '%s')",
+ tx_len, s->fd, s->config);
+ vec_free (s->tx_buffer);
+ goto done;
+ }
+
+ /* Reclaim the transmitted part of the tx buffer on successful writes. */
+ else if (written > 0)
+ {
+ if (written == tx_len)
+ _vec_len (s->tx_buffer) = 0;
+ else
+ vec_delete (s->tx_buffer, written, 0);
+ }
+
+ /* If a non-fatal error occurred AND
+ the buffer is full, then we must free it. */
+ else if (written == 0 && tx_len > 64 * 1024)
+ {
+ vec_free (s->tx_buffer);
+ }
+
+done:
+ return err;
+}
+
+static clib_error_t *
+default_socket_read (clib_socket_t * sock, int n_bytes)
+{
+ word fd, n_read;
+ u8 *buf;
+
+ /* RX side of socket is down once end of file is reached. */
+ if (sock->flags & CLIB_SOCKET_F_RX_END_OF_FILE)
+ return 0;
+
+ fd = sock->fd;
+
+ n_bytes = clib_max (n_bytes, 4096);
+ vec_add2 (sock->rx_buffer, buf, n_bytes);
+
+ if ((n_read = read (fd, buf, n_bytes)) < 0)
+ {
+ n_read = 0;
+
+ /* Ignore certain errors. */
+ if (!unix_error_is_fatal (errno))
+ goto non_fatal;
+
+ return clib_error_return_unix (0, "read %d bytes (fd %d, '%s')",
+ n_bytes, sock->fd, sock->config);
+ }
+
+ /* Other side closed the socket. */
+ if (n_read == 0)
+ sock->flags |= CLIB_SOCKET_F_RX_END_OF_FILE;
+
+non_fatal:
+ _vec_len (sock->rx_buffer) += n_read - n_bytes;
+
+ return 0;
+}
+
+static clib_error_t *
+default_socket_close (clib_socket_t * s)
+{
+ if (close (s->fd) < 0)
+ return clib_error_return_unix (0, "close (fd %d, %s)", s->fd, s->config);
+ return 0;
+}
+
+static clib_error_t *
+default_socket_sendmsg (clib_socket_t * s, void *msg, int msglen,
+ int fds[], int num_fds)
+{
+ struct msghdr mh = { 0 };
+ struct iovec iov[1];
+ char ctl[CMSG_SPACE (sizeof (int)) * num_fds];
+ int rv;
+
+ iov[0].iov_base = msg;
+ iov[0].iov_len = msglen;
+ mh.msg_iov = iov;
+ mh.msg_iovlen = 1;
+
+ if (num_fds > 0)
+ {
+ struct cmsghdr *cmsg;
+ memset (&ctl, 0, sizeof (ctl));
+ mh.msg_control = ctl;
+ mh.msg_controllen = sizeof (ctl);
+ cmsg = CMSG_FIRSTHDR (&mh);
+ cmsg->cmsg_len = CMSG_LEN (sizeof (int) * num_fds);
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_RIGHTS;
+ memcpy (CMSG_DATA (cmsg), fds, sizeof (int) * num_fds);
+ }
+ rv = sendmsg (s->fd, &mh, 0);
+ if (rv < 0)
+ return clib_error_return_unix (0, "sendmsg");
+ return 0;
+}
+
+
+static clib_error_t *
+default_socket_recvmsg (clib_socket_t * s, void *msg, int msglen,
+ int fds[], int num_fds)
+{
+ char ctl[CMSG_SPACE (sizeof (int) * num_fds) +
+ CMSG_SPACE (sizeof (struct ucred))];
+ struct msghdr mh = { 0 };
+ struct iovec iov[1];
+ ssize_t size;
+ struct ucred *cr = 0;
+ struct cmsghdr *cmsg;
+
+ iov[0].iov_base = msg;
+ iov[0].iov_len = msglen;
+ mh.msg_iov = iov;
+ mh.msg_iovlen = 1;
+ mh.msg_control = ctl;
+ mh.msg_controllen = sizeof (ctl);
+
+ memset (ctl, 0, sizeof (ctl));
+
+ /* receive the incoming message */
+ size = recvmsg (s->fd, &mh, 0);
+ if (size != msglen)
+ {
+ return (size == 0) ? clib_error_return (0, "disconnected") :
+ clib_error_return_unix (0, "recvmsg: malformed message (fd %d, '%s')",
+ s->fd, s->config);
+ }
+
+ cmsg = CMSG_FIRSTHDR (&mh);
+ while (cmsg)
+ {
+ if (cmsg->cmsg_level == SOL_SOCKET)
+ {
+ if (cmsg->cmsg_type == SCM_CREDENTIALS)
+ {
+ cr = (struct ucred *) CMSG_DATA (cmsg);
+ s->uid = cr->uid;
+ s->gid = cr->gid;
+ s->pid = cr->pid;
+ }
+ else if (cmsg->cmsg_type == SCM_RIGHTS)
+ {
+ clib_memcpy (fds, CMSG_DATA (cmsg), num_fds * sizeof (int));
+ }
+ }
+ cmsg = CMSG_NXTHDR (&mh, cmsg);
+ }
+ return 0;
+}
+
+static void
+socket_init_funcs (clib_socket_t * s)
+{
+ if (!s->write_func)
+ s->write_func = default_socket_write;
+ if (!s->read_func)
+ s->read_func = default_socket_read;
+ if (!s->close_func)
+ s->close_func = default_socket_close;
+ if (!s->sendmsg_func)
+ s->sendmsg_func = default_socket_sendmsg;
+ if (!s->recvmsg_func)
+ s->recvmsg_func = default_socket_recvmsg;
+}
+
+clib_error_t *
+clib_socket_init (clib_socket_t * s)
+{
+ union
+ {
+ struct sockaddr sa;
+ struct sockaddr_un su;
+ } addr;
+ socklen_t addr_len = 0;
+ int socket_type;
+ clib_error_t *error = 0;
+ word port;
+
+ error = socket_config (s->config, &addr.sa, &addr_len,
+ (s->flags & CLIB_SOCKET_F_IS_SERVER
+ ? INADDR_LOOPBACK : INADDR_ANY));
+ if (error)
+ goto done;
+
+ socket_init_funcs (s);
+
+ socket_type = s->flags & CLIB_SOCKET_F_SEQPACKET ?
+ SOCK_SEQPACKET : SOCK_STREAM;
+
+ s->fd = socket (addr.sa.sa_family, socket_type, 0);
+ if (s->fd < 0)
+ {
+ error = clib_error_return_unix (0, "socket (fd %d, '%s')",
+ s->fd, s->config);
+ goto done;
+ }
+
+ port = 0;
+ if (addr.sa.sa_family == PF_INET)
+ port = ((struct sockaddr_in *) &addr)->sin_port;
+
+ if (s->flags & CLIB_SOCKET_F_IS_SERVER)
+ {
+ uword need_bind = 1;
+
+ if (addr.sa.sa_family == PF_INET)
+ {
+ if (port == 0)
+ {
+ port = find_free_port (s->fd);
+ if (port < 0)
+ {
+ error = clib_error_return (0, "no free port (fd %d, '%s')",
+ s->fd, s->config);
+ goto done;
+ }
+ need_bind = 0;
+ }
+ }
+ if (addr.sa.sa_family == PF_LOCAL)
+ unlink (((struct sockaddr_un *) &addr)->sun_path);
+
+ /* Make address available for multiple users. */
+ {
+ int v = 1;
+ if (setsockopt (s->fd, SOL_SOCKET, SO_REUSEADDR, &v, sizeof (v)) < 0)
+ clib_unix_warning ("setsockopt SO_REUSEADDR fails");
+ }
+
+ if (addr.sa.sa_family == PF_LOCAL && s->flags & CLIB_SOCKET_F_PASSCRED)
+ {
+ int x = 1;
+ if (setsockopt (s->fd, SOL_SOCKET, SO_PASSCRED, &x, sizeof (x)) < 0)
+ {
+ error = clib_error_return_unix (0, "setsockopt (SO_PASSCRED, "
+ "fd %d, '%s')", s->fd,
+ s->config);
+ goto done;
+ }
+ }
+
+ if (need_bind && bind (s->fd, &addr.sa, addr_len) < 0)
+ {
+ error = clib_error_return_unix (0, "bind (fd %d, '%s')",
+ s->fd, s->config);
+ goto done;
+ }
+
+ if (listen (s->fd, 5) < 0)
+ {
+ error = clib_error_return_unix (0, "listen (fd %d, '%s')",
+ s->fd, s->config);
+ goto done;
+ }
+ if (addr.sa.sa_family == PF_LOCAL
+ && s->flags & CLIB_SOCKET_F_ALLOW_GROUP_WRITE)
+ {
+ struct stat st = { 0 };
+ if (stat (((struct sockaddr_un *) &addr)->sun_path, &st) < 0)
+ {
+ error = clib_error_return_unix (0, "stat (fd %d, '%s')",
+ s->fd, s->config);
+ goto done;
+ }
+ st.st_mode |= S_IWGRP;
+ if (chmod (((struct sockaddr_un *) &addr)->sun_path, st.st_mode) <
+ 0)
+ {
+ error =
+ clib_error_return_unix (0, "chmod (fd %d, '%s', mode %o)",
+ s->fd, s->config, st.st_mode);
+ goto done;
+ }
+ }
+ }
+ else
+ {
+ if ((s->flags & CLIB_SOCKET_F_NON_BLOCKING_CONNECT)
+ && fcntl (s->fd, F_SETFL, O_NONBLOCK) < 0)
+ {
+ error = clib_error_return_unix (0, "fcntl NONBLOCK (fd %d, '%s')",
+ s->fd, s->config);
+ goto done;
+ }
+
+ if (connect (s->fd, &addr.sa, addr_len) < 0
+ && !((s->flags & CLIB_SOCKET_F_NON_BLOCKING_CONNECT) &&
+ errno == EINPROGRESS))
+ {
+ error = clib_error_return_unix (0, "connect (fd %d, '%s')",
+ s->fd, s->config);
+ goto done;
+ }
+ }
+
+ return error;
+
+done:
+ if (s->fd > 0)
+ close (s->fd);
+ return error;
+}
+
+clib_error_t *
+clib_socket_accept (clib_socket_t * server, clib_socket_t * client)
+{
+ clib_error_t *err = 0;
+ socklen_t len = 0;
+
+ memset (client, 0, sizeof (client[0]));
+
+ /* Accept the new socket connection. */
+ client->fd = accept (server->fd, 0, 0);
+ if (client->fd < 0)
+ return clib_error_return_unix (0, "accept (fd %d, '%s')",
+ server->fd, server->config);
+
+ /* Set the new socket to be non-blocking. */
+ if (fcntl (client->fd, F_SETFL, O_NONBLOCK) < 0)
+ {
+ err = clib_error_return_unix (0, "fcntl O_NONBLOCK (fd %d)",
+ client->fd);
+ goto close_client;
+ }
+
+ /* Get peer info. */
+ len = sizeof (client->peer);
+ if (getpeername (client->fd, (struct sockaddr *) &client->peer, &len) < 0)
+ {
+ err = clib_error_return_unix (0, "getpeername (fd %d)", client->fd);
+ goto close_client;
+ }
+
+ client->flags = CLIB_SOCKET_F_IS_CLIENT;
+
+ socket_init_funcs (client);
+ return 0;
+
+close_client:
+ close (client->fd);
+ return err;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/socket.h b/src/vppinfra/socket.h
new file mode 100644
index 00000000..4f9e9509
--- /dev/null
+++ b/src/vppinfra/socket.h
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef _clib_included_socket_h
+#define _clib_included_socket_h
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+
+#include <vppinfra/clib.h>
+#include <vppinfra/error.h>
+#include <vppinfra/format.h>
+
+typedef struct _socket_t
+{
+ /* File descriptor. */
+ i32 fd;
+
+ /* Config string for socket HOST:PORT or just HOST. */
+ char *config;
+
+ u32 flags;
+#define CLIB_SOCKET_F_IS_SERVER (1 << 0)
+#define CLIB_SOCKET_F_IS_CLIENT (0 << 0)
+#define CLIB_SOCKET_F_RX_END_OF_FILE (1 << 2)
+#define CLIB_SOCKET_F_NON_BLOCKING_CONNECT (1 << 3)
+#define CLIB_SOCKET_F_ALLOW_GROUP_WRITE (1 << 4)
+#define CLIB_SOCKET_F_SEQPACKET (1 << 5)
+#define CLIB_SOCKET_F_PASSCRED (1 << 6)
+
+
+ /* Transmit buffer. Holds data waiting to be written. */
+ u8 *tx_buffer;
+
+ /* Receive buffer. Holds data read from socket. */
+ u8 *rx_buffer;
+
+ /* Peer socket we are connected to. */
+ struct sockaddr_in peer;
+
+ /* Credentials, populated if CLIB_SOCKET_F_PASSCRED is set */
+ pid_t pid;
+ uid_t uid;
+ gid_t gid;
+
+ clib_error_t *(*write_func) (struct _socket_t * sock);
+ clib_error_t *(*read_func) (struct _socket_t * sock, int min_bytes);
+ clib_error_t *(*close_func) (struct _socket_t * sock);
+ clib_error_t *(*recvmsg_func) (struct _socket_t * s, void *msg, int msglen,
+ int fds[], int num_fds);
+ clib_error_t *(*sendmsg_func) (struct _socket_t * s, void *msg, int msglen,
+ int fds[], int num_fds);
+ uword private_data;
+} clib_socket_t;
+
+/* socket config format is host:port.
+ Unspecified port causes a free one to be chosen starting
+ from IPPORT_USERRESERVED (5000). */
+clib_error_t *clib_socket_init (clib_socket_t * socket);
+
+clib_error_t *clib_socket_accept (clib_socket_t * server,
+ clib_socket_t * client);
+
+always_inline uword
+clib_socket_is_server (clib_socket_t * sock)
+{
+ return (sock->flags & CLIB_SOCKET_F_IS_SERVER) != 0;
+}
+
+always_inline uword
+clib_socket_is_client (clib_socket_t * s)
+{
+ return !clib_socket_is_server (s);
+}
+
+always_inline uword
+clib_socket_is_connected (clib_socket_t * sock)
+{
+ return sock->fd > 0;
+}
+
+
+always_inline int
+clib_socket_rx_end_of_file (clib_socket_t * s)
+{
+ return s->flags & CLIB_SOCKET_F_RX_END_OF_FILE;
+}
+
+always_inline void *
+clib_socket_tx_add (clib_socket_t * s, int n_bytes)
+{
+ u8 *result;
+ vec_add2 (s->tx_buffer, result, n_bytes);
+ return result;
+}
+
+always_inline void
+clib_socket_tx_add_va_formatted (clib_socket_t * s, char *fmt, va_list * va)
+{
+ s->tx_buffer = va_format (s->tx_buffer, fmt, va);
+}
+
+always_inline clib_error_t *
+clib_socket_tx (clib_socket_t * s)
+{
+ return s->write_func (s);
+}
+
+always_inline clib_error_t *
+clib_socket_rx (clib_socket_t * s, int n_bytes)
+{
+ return s->read_func (s, n_bytes);
+}
+
+always_inline clib_error_t *
+clib_socket_sendmsg (clib_socket_t * s, void *msg, int msglen,
+ int fds[], int num_fds)
+{
+ return s->sendmsg_func (s, msg, msglen, fds, num_fds);
+}
+
+always_inline clib_error_t *
+clib_socket_recvmsg (clib_socket_t * s, void *msg, int msglen,
+ int fds[], int num_fds)
+{
+ return s->recvmsg_func (s, msg, msglen, fds, num_fds);
+}
+
+always_inline void
+clib_socket_free (clib_socket_t * s)
+{
+ vec_free (s->tx_buffer);
+ vec_free (s->rx_buffer);
+ if (clib_mem_is_heap_object (s->config))
+ vec_free (s->config);
+ memset (s, 0, sizeof (s[0]));
+}
+
+always_inline clib_error_t *
+clib_socket_close (clib_socket_t * sock)
+{
+ clib_error_t *err;
+ err = (*sock->close_func) (sock);
+ return err;
+}
+
+void clib_socket_tx_add_formatted (clib_socket_t * s, char *fmt, ...);
+
+#endif /* _clib_included_socket_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/sparse_vec.h b/src/vppinfra/sparse_vec.h
new file mode 100644
index 00000000..ec8f0a1c
--- /dev/null
+++ b/src/vppinfra/sparse_vec.h
@@ -0,0 +1,244 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2005 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_sparse_vec_h
+#define included_sparse_vec_h
+
+#include <vppinfra/vec.h>
+#include <vppinfra/bitops.h>
+
+/* Sparsely indexed vectors. Basic idea taken from Hacker's delight.
+ Eliot added ranges. */
+typedef struct
+{
+ /* Bitmap one for each sparse index. */
+ uword *is_member_bitmap;
+
+ /* member_counts[i] = total number of members with j < i. */
+ u16 *member_counts;
+
+#define SPARSE_VEC_IS_RANGE (1 << 0)
+#define SPARSE_VEC_IS_VALID_RANGE (1 << 1)
+ u8 *range_flags;
+} sparse_vec_header_t;
+
+always_inline sparse_vec_header_t *
+sparse_vec_header (void *v)
+{
+ return vec_header (v, sizeof (sparse_vec_header_t));
+}
+
+/* Index 0 is always used to mark indices that are not valid in
+ sparse vector. For example, you look up V[0x1234] and 0x1234 is not
+ known you'll get 0 back as an index. */
+#define SPARSE_VEC_INVALID_INDEX (0)
+
+always_inline void *
+sparse_vec_new (uword elt_bytes, uword sparse_index_bits)
+{
+ void *v;
+ sparse_vec_header_t *h;
+ word n;
+
+ ASSERT (sparse_index_bits <= 16);
+
+ v = _vec_resize (0,
+ /* length increment */ 8,
+ /* data bytes */ 8 * elt_bytes,
+ /* header bytes */ sizeof (h[0]),
+ /* data align */ 0);
+
+ /* Make space for invalid entry (entry 0). */
+ _vec_len (v) = 1;
+
+ h = sparse_vec_header (v);
+
+ n = sparse_index_bits - min_log2 (BITS (uword));
+ if (n < 0)
+ n = 0;
+ n = 1ULL << n;
+ vec_resize (h->is_member_bitmap, n);
+ vec_resize (h->member_counts, n);
+
+ return v;
+}
+
+always_inline uword
+sparse_vec_index_internal (void *v,
+ uword sparse_index,
+ uword maybe_range, u32 * insert)
+{
+ sparse_vec_header_t *h;
+ uword i, b, d, w;
+ u8 is_member;
+
+ h = sparse_vec_header (v);
+ i = sparse_index / BITS (h->is_member_bitmap[0]);
+ b = (uword) 1 << (uword) (sparse_index % BITS (h->is_member_bitmap[0]));
+
+ ASSERT (i < vec_len (h->is_member_bitmap));
+ ASSERT (i < vec_len (h->member_counts));
+
+ w = h->is_member_bitmap[i];
+ d = h->member_counts[i] + count_set_bits (w & (b - 1));
+
+ is_member = (w & b) != 0;
+ if (maybe_range)
+ {
+ u8 r = h->range_flags[d];
+ u8 is_range, is_valid_range;
+
+ is_range = maybe_range & (r & SPARSE_VEC_IS_RANGE);
+ is_valid_range = (r & SPARSE_VEC_IS_VALID_RANGE) != 0;
+
+ is_member = is_range ? is_valid_range : is_member;
+ }
+
+ if (insert)
+ {
+ *insert = !is_member;
+ if (!is_member)
+ {
+ uword j;
+ w |= b;
+ h->is_member_bitmap[i] = w;
+ for (j = i + 1; j < vec_len (h->member_counts); j++)
+ h->member_counts[j] += 1;
+ }
+
+ return 1 + d;
+ }
+
+ d = is_member ? d : 0;
+
+ return is_member + d;
+}
+
+always_inline uword
+sparse_vec_index (void *v, uword sparse_index)
+{
+ return sparse_vec_index_internal (v, sparse_index,
+ /* maybe range */ 0,
+ /* insert? */ 0);
+}
+
+always_inline void
+sparse_vec_index2 (void *v,
+ u32 si0, u32 si1, u32 * i0_return, u32 * i1_return)
+{
+ sparse_vec_header_t *h;
+ uword b0, b1, w0, w1, v0, v1;
+ u32 i0, i1, d0, d1;
+ u8 is_member0, is_member1;
+
+ h = sparse_vec_header (v);
+
+ i0 = si0 / BITS (h->is_member_bitmap[0]);
+ i1 = si1 / BITS (h->is_member_bitmap[0]);
+
+ b0 = (uword) 1 << (uword) (si0 % BITS (h->is_member_bitmap[0]));
+ b1 = (uword) 1 << (uword) (si1 % BITS (h->is_member_bitmap[0]));
+
+ ASSERT (i0 < vec_len (h->is_member_bitmap));
+ ASSERT (i1 < vec_len (h->is_member_bitmap));
+
+ ASSERT (i0 < vec_len (h->member_counts));
+ ASSERT (i1 < vec_len (h->member_counts));
+
+ w0 = h->is_member_bitmap[i0];
+ w1 = h->is_member_bitmap[i1];
+
+ v0 = w0 & (b0 - 1);
+ v1 = w1 & (b1 - 1);
+
+ /* Speculate that masks will have zero or one bits set. */
+ d0 = h->member_counts[i0] + (v0 != 0);
+ d1 = h->member_counts[i1] + (v1 != 0);
+
+ /* Validate speculation. */
+ if (PREDICT_FALSE (!is_pow2 (v0) || !is_pow2 (v1)))
+ {
+ d0 += count_set_bits (v0) - (v0 != 0);
+ d1 += count_set_bits (v1) - (v1 != 0);
+ }
+
+ is_member0 = (w0 & b0) != 0;
+ is_member1 = (w1 & b1) != 0;
+
+ d0 = is_member0 ? d0 : 0;
+ d1 = is_member1 ? d1 : 0;
+
+ *i0_return = is_member0 + d0;
+ *i1_return = is_member1 + d1;
+}
+
+#define sparse_vec_free(v) vec_free(v)
+
+#define sparse_vec_elt_at_index(v,i) \
+ vec_elt_at_index ((v), sparse_vec_index ((v), (i)))
+
+#define sparse_vec_validate(v,i) \
+({ \
+ uword _i; \
+ u32 _insert; \
+ \
+ if (! (v)) \
+ (v) = sparse_vec_new (sizeof ((v)[0]), BITS (u16)); \
+ \
+ _i = sparse_vec_index_internal ((v), (i), \
+ /* maybe range */ 0, \
+ /* insert? */ &_insert); \
+ if (_insert) \
+ vec_insert_ha ((v), 1, _i, \
+ /* header size */ sizeof (sparse_vec_header_t), \
+ /* align */ 0); \
+ \
+ /* Invalid index is 0. */ \
+ ASSERT (_i > 0); \
+ \
+ (v) + _i; \
+})
+
+#endif /* included_sparse_vec_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/std-formats.c b/src/vppinfra/std-formats.c
new file mode 100644
index 00000000..ac98f999
--- /dev/null
+++ b/src/vppinfra/std-formats.c
@@ -0,0 +1,330 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/format.h>
+#include <ctype.h>
+
+/* Format vectors. */
+u8 *
+format_vec32 (u8 * s, va_list * va)
+{
+ u32 *v = va_arg (*va, u32 *);
+ char *fmt = va_arg (*va, char *);
+ uword i;
+ for (i = 0; i < vec_len (v); i++)
+ {
+ if (i > 0)
+ s = format (s, ", ");
+ s = format (s, fmt, v[i]);
+ }
+ return s;
+}
+
+u8 *
+format_vec_uword (u8 * s, va_list * va)
+{
+ uword *v = va_arg (*va, uword *);
+ char *fmt = va_arg (*va, char *);
+ uword i;
+ for (i = 0; i < vec_len (v); i++)
+ {
+ if (i > 0)
+ s = format (s, ", ");
+ s = format (s, fmt, v[i]);
+ }
+ return s;
+}
+
+/* Ascii buffer and length. */
+u8 *
+format_ascii_bytes (u8 * s, va_list * va)
+{
+ u8 *v = va_arg (*va, u8 *);
+ uword n_bytes = va_arg (*va, uword);
+ vec_add (s, v, n_bytes);
+ return s;
+}
+
+/* Format hex dump. */
+u8 *
+format_hex_bytes (u8 * s, va_list * va)
+{
+ u8 *bytes = va_arg (*va, u8 *);
+ int n_bytes = va_arg (*va, int);
+ uword i;
+
+ /* Print short or long form depending on byte count. */
+ uword short_form = n_bytes <= 32;
+ uword indent = format_get_indent (s);
+
+ if (n_bytes == 0)
+ return s;
+
+ for (i = 0; i < n_bytes; i++)
+ {
+ if (!short_form && (i % 32) == 0)
+ s = format (s, "%08x: ", i);
+
+ s = format (s, "%02x", bytes[i]);
+
+ if (!short_form && ((i + 1) % 32) == 0 && (i + 1) < n_bytes)
+ s = format (s, "\n%U", format_white_space, indent);
+ }
+
+ return s;
+}
+
+/* Add variable number of spaces. */
+u8 *
+format_white_space (u8 * s, va_list * va)
+{
+ uword n = va_arg (*va, uword);
+ while (n-- > 0)
+ vec_add1 (s, ' ');
+ return s;
+}
+
+u8 *
+format_time_interval (u8 * s, va_list * args)
+{
+ u8 *fmt = va_arg (*args, u8 *);
+ f64 t = va_arg (*args, f64);
+ u8 *f;
+
+ const f64 seconds_per_minute = 60;
+ const f64 seconds_per_hour = 60 * seconds_per_minute;
+ const f64 seconds_per_day = 24 * seconds_per_hour;
+ uword days, hours, minutes, secs, msecs, usecs;
+
+ days = t / seconds_per_day;
+ t -= days * seconds_per_day;
+
+ hours = t / seconds_per_hour;
+ t -= hours * seconds_per_hour;
+
+ minutes = t / seconds_per_minute;
+ t -= minutes * seconds_per_minute;
+
+ secs = t;
+ t -= secs;
+
+ msecs = 1e3 * t;
+ usecs = 1e6 * t;
+
+ for (f = fmt; *f; f++)
+ {
+ uword what, c;
+ char *what_fmt = "%d";
+
+ switch (c = *f)
+ {
+ default:
+ vec_add1 (s, c);
+ continue;
+
+ case 'd':
+ what = days;
+ what_fmt = "%d";
+ break;
+ case 'h':
+ what = hours;
+ what_fmt = "%02d";
+ break;
+ case 'm':
+ what = minutes;
+ what_fmt = "%02d";
+ break;
+ case 's':
+ what = secs;
+ what_fmt = "%02d";
+ break;
+ case 'f':
+ what = msecs;
+ what_fmt = "%03d";
+ break;
+ case 'u':
+ what = usecs;
+ what_fmt = "%06d";
+ break;
+ }
+
+ s = format (s, what_fmt, what);
+ }
+
+ return s;
+}
+
+/* Unparse memory size e.g. 100, 100k, 100m, 100g. */
+u8 *
+format_memory_size (u8 * s, va_list * va)
+{
+ uword size = va_arg (*va, uword);
+ uword l, u, log_u;
+
+ l = size > 0 ? min_log2 (size) : 0;
+ if (l < 10)
+ log_u = 0;
+ else if (l < 20)
+ log_u = 10;
+ else if (l < 30)
+ log_u = 20;
+ else
+ log_u = 30;
+
+ u = (uword) 1 << log_u;
+ if (size & (u - 1))
+ s = format (s, "%.2f", (f64) size / (f64) u);
+ else
+ s = format (s, "%d", size >> log_u);
+
+ if (log_u != 0)
+ s = format (s, "%c", " kmg"[log_u / 10]);
+
+ return s;
+}
+
+/* Parse memory size e.g. 100, 100k, 100m, 100g. */
+uword
+unformat_memory_size (unformat_input_t * input, va_list * va)
+{
+ uword amount, shift, c;
+ uword *result = va_arg (*va, uword *);
+
+ if (!unformat (input, "%wd%_", &amount))
+ return 0;
+
+ c = unformat_get_input (input);
+ switch (c)
+ {
+ case 'k':
+ case 'K':
+ shift = 10;
+ break;
+ case 'm':
+ case 'M':
+ shift = 20;
+ break;
+ case 'g':
+ case 'G':
+ shift = 30;
+ break;
+ default:
+ shift = 0;
+ unformat_put_input (input);
+ break;
+ }
+
+ *result = amount << shift;
+ return 1;
+}
+
+/* Format c identifier: e.g. a_name -> "a name".
+ Words for both vector names and null terminated c strings. */
+u8 *
+format_c_identifier (u8 * s, va_list * va)
+{
+ u8 *id = va_arg (*va, u8 *);
+ uword i, l;
+
+ l = ~0;
+ if (clib_mem_is_vec (id))
+ l = vec_len (id);
+
+ if (id)
+ for (i = 0; id[i] != 0 && i < l; i++)
+ {
+ u8 c = id[i];
+
+ if (c == '_')
+ c = ' ';
+ vec_add1 (s, c);
+ }
+
+ return s;
+}
+
+u8 *
+format_hexdump (u8 * s, va_list * args)
+{
+ u8 *data = va_arg (*args, u8 *);
+ uword len = va_arg (*args, uword);
+ int i, index = 0;
+ const int line_len = 16;
+ u8 *line_hex = 0;
+ u8 *line_str = 0;
+ uword indent = format_get_indent (s);
+
+ if (!len)
+ return s;
+
+ for (i = 0; i < len; i++)
+ {
+ line_hex = format (line_hex, "%02x ", data[i]);
+ line_str = format (line_str, "%c", isprint (data[i]) ? data[i] : '.');
+ if (!((i + 1) % line_len))
+ {
+ s = format (s, "%U%05x: %v[%v]",
+ format_white_space, index ? indent : 0,
+ index, line_hex, line_str);
+ if (i < len - 1)
+ s = format (s, "\n");
+ index = i + 1;
+ vec_reset_length (line_hex);
+ vec_reset_length (line_str);
+ }
+ }
+
+ while (i++ % line_len)
+ line_hex = format (line_hex, " ");
+
+ if (vec_len (line_hex))
+ s = format (s, "%U%05x: %v[%v]",
+ format_white_space, indent, index, line_hex, line_str);
+
+ vec_free (line_hex);
+ vec_free (line_str);
+
+ return s;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/string.c b/src/vppinfra/string.c
new file mode 100644
index 00000000..ba21e7b3
--- /dev/null
+++ b/src/vppinfra/string.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2006 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/string.h>
+#include <vppinfra/error.h>
+
+/* Exchanges source and destination. */
+void
+clib_memswap (void *_a, void *_b, uword bytes)
+{
+ uword pa = pointer_to_uword (_a);
+ uword pb = pointer_to_uword (_b);
+
+#define _(TYPE) \
+ if (0 == ((pa | pb) & (sizeof (TYPE) - 1))) \
+ { \
+ TYPE * a = uword_to_pointer (pa, TYPE *); \
+ TYPE * b = uword_to_pointer (pb, TYPE *); \
+ \
+ while (bytes >= 2*sizeof (TYPE)) \
+ { \
+ TYPE a0, a1, b0, b1; \
+ bytes -= 2*sizeof (TYPE); \
+ a += 2; \
+ b += 2; \
+ a0 = a[-2]; a1 = a[-1]; \
+ b0 = b[-2]; b1 = b[-1]; \
+ a[-2] = b0; a[-1] = b1; \
+ b[-2] = a0; b[-1] = a1; \
+ } \
+ pa = pointer_to_uword (a); \
+ pb = pointer_to_uword (b); \
+ }
+
+ if (BITS (uword) == BITS (u64))
+ _(u64);
+ _(u32);
+ _(u16);
+ _(u8);
+
+#undef _
+
+ ASSERT (bytes < 2);
+ if (bytes)
+ {
+ u8 *a = uword_to_pointer (pa, u8 *);
+ u8 *b = uword_to_pointer (pb, u8 *);
+ u8 a0 = a[0], b0 = b[0];
+ a[0] = b0;
+ b[0] = a0;
+ }
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/string.h b/src/vppinfra/string.h
new file mode 100644
index 00000000..69a99a3f
--- /dev/null
+++ b/src/vppinfra/string.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_clib_string_h
+#define included_clib_string_h
+
+#include <vppinfra/clib.h> /* for CLIB_LINUX_KERNEL */
+#include <vppinfra/vector.h>
+
+#ifdef CLIB_LINUX_KERNEL
+#include <linux/string.h>
+#endif
+
+#ifdef CLIB_UNIX
+#include <string.h>
+#endif
+
+#ifdef CLIB_STANDALONE
+#include <vppinfra/standalone_string.h>
+#endif
+
+/* Exchanges source and destination. */
+void clib_memswap (void *_a, void *_b, uword bytes);
+
+/*
+ * the vector unit memcpy variants confuse coverity
+ * so don't let it anywhere near them.
+ */
+#ifndef __COVERITY__
+#if __AVX__
+#include <vppinfra/memcpy_avx.h>
+#elif __SSSE3__
+#include <vppinfra/memcpy_sse3.h>
+#else
+#define clib_memcpy(a,b,c) memcpy(a,b,c)
+#endif
+#else /* __COVERITY__ */
+#define clib_memcpy(a,b,c) memcpy(a,b,c)
+#endif
+
+#endif /* included_clib_string_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/test_bihash_template.c b/src/vppinfra/test_bihash_template.c
new file mode 100644
index 00000000..589c815d
--- /dev/null
+++ b/src/vppinfra/test_bihash_template.c
@@ -0,0 +1,369 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vppinfra/time.h>
+#include <vppinfra/cache.h>
+#include <vppinfra/error.h>
+
+#include <vppinfra/bihash_8_8.h>
+#include <vppinfra/bihash_template.h>
+
+#include <vppinfra/bihash_template.c>
+
+typedef struct
+{
+ u64 seed;
+ u32 nbuckets;
+ u32 nitems;
+ u32 search_iter;
+ int careful_delete_tests;
+ int verbose;
+ int non_random_keys;
+ uword *key_hash;
+ u64 *keys;
+ BVT (clib_bihash) hash;
+ clib_time_t clib_time;
+
+ unformat_input_t *input;
+
+} test_main_t;
+
+test_main_t test_main;
+
+uword
+vl (void *v)
+{
+ return vec_len (v);
+}
+
+static clib_error_t *
+test_bihash_vec64 (test_main_t * tm)
+{
+ u32 user_buckets = 1228800;
+ u32 user_memory_size = 209715200;
+ BVT (clib_bihash_kv) kv;
+ int i, j;
+ f64 before;
+ f64 *cum_times = 0;
+ BVT (clib_bihash) * h;
+
+ h = &tm->hash;
+
+ BV (clib_bihash_init) (h, "test", user_buckets, user_memory_size);
+
+ before = clib_time_now (&tm->clib_time);
+
+ for (j = 0; j < 10; j++)
+ {
+ for (i = 1; i <= j * 1000 + 1; i++)
+ {
+ kv.key = i;
+ kv.value = 1;
+
+ BV (clib_bihash_add_del) (h, &kv, 1 /* is_add */ );
+ }
+
+ vec_add1 (cum_times, clib_time_now (&tm->clib_time) - before);
+ }
+
+ for (j = 0; j < vec_len (cum_times); j++)
+ fformat (stdout, "Cum time for %d: %.4f (us)\n", (j + 1) * 1000,
+ cum_times[j] * 1e6);
+
+ return 0;
+}
+
+static clib_error_t *
+test_bihash (test_main_t * tm)
+{
+ int i, j;
+ uword *p;
+ uword total_searches;
+ f64 before, delta;
+ BVT (clib_bihash) * h;
+ BVT (clib_bihash_kv) kv;
+
+ h = &tm->hash;
+
+ BV (clib_bihash_init) (h, "test", tm->nbuckets, 3ULL << 30);
+
+ fformat (stdout, "Pick %lld unique %s keys...\n",
+ tm->nitems, tm->non_random_keys ? "non-random" : "random");
+
+ for (i = 0; i < tm->nitems; i++)
+ {
+ u64 rndkey;
+
+ if (tm->non_random_keys == 0)
+ {
+
+ again:
+ rndkey = random_u64 (&tm->seed);
+
+ p = hash_get (tm->key_hash, rndkey);
+ if (p)
+ goto again;
+ }
+ else
+ rndkey = (u64) (i + 1) << 16;
+
+ hash_set (tm->key_hash, rndkey, i + 1);
+ vec_add1 (tm->keys, rndkey);
+ }
+
+ fformat (stdout, "Add items...\n");
+ for (i = 0; i < tm->nitems; i++)
+ {
+ kv.key = tm->keys[i];
+ kv.value = i + 1;
+
+ BV (clib_bihash_add_del) (h, &kv, 1 /* is_add */ );
+
+ if (tm->verbose > 1)
+ {
+ fformat (stdout, "--------------------\n");
+ fformat (stdout, "After adding key %llu value %lld...\n",
+ tm->keys[i], (u64) (i + 1));
+ fformat (stdout, "%U", BV (format_bihash), h,
+ 2 /* very verbose */ );
+ }
+ }
+
+ fformat (stdout, "%U", BV (format_bihash), h, 0 /* very verbose */ );
+
+ fformat (stdout, "Search for items %d times...\n", tm->search_iter);
+
+ before = clib_time_now (&tm->clib_time);
+
+ for (j = 0; j < tm->search_iter; j++)
+ {
+ for (i = 0; i < tm->nitems; i++)
+ {
+ kv.key = tm->keys[i];
+ if (BV (clib_bihash_search) (h, &kv, &kv) < 0)
+ if (BV (clib_bihash_search) (h, &kv, &kv) < 0)
+ clib_warning ("[%d] search for key %lld failed unexpectedly\n",
+ i, tm->keys[i]);
+ if (kv.value != (u64) (i + 1))
+ clib_warning
+ ("[%d] search for key %lld returned %lld, not %lld\n", i,
+ tm->keys, kv.value, (u64) (i + 1));
+ }
+ }
+
+ delta = clib_time_now (&tm->clib_time) - before;
+ total_searches = (uword) tm->search_iter * (uword) tm->nitems;
+
+ if (delta > 0)
+ fformat (stdout, "%.f searches per second\n",
+ ((f64) total_searches) / delta);
+
+ fformat (stdout, "%lld searches in %.6f seconds\n", total_searches, delta);
+
+ fformat (stdout, "Standard E-hash search for items %d times...\n",
+ tm->search_iter);
+
+ before = clib_time_now (&tm->clib_time);
+
+ for (j = 0; j < tm->search_iter; j++)
+ {
+ for (i = 0; i < tm->nitems; i++)
+ {
+ p = hash_get (tm->key_hash, tm->keys[i]);
+ if (p == 0 || p[0] != (uword) (i + 1))
+ clib_warning ("ugh, couldn't find %lld\n", tm->keys[i]);
+ }
+ }
+
+ delta = clib_time_now (&tm->clib_time) - before;
+ total_searches = (uword) tm->search_iter * (uword) tm->nitems;
+
+ fformat (stdout, "%lld searches in %.6f seconds\n", total_searches, delta);
+
+ if (delta > 0)
+ fformat (stdout, "%.f searches per second\n",
+ ((f64) total_searches) / delta);
+
+ fformat (stdout, "Delete items...\n");
+
+ for (i = 0; i < tm->nitems; i++)
+ {
+ int j;
+ int rv;
+
+ kv.key = tm->keys[i];
+ kv.value = (u64) (i + 1);
+ rv = BV (clib_bihash_add_del) (h, &kv, 0 /* is_add */ );
+
+ if (rv < 0)
+ clib_warning ("delete key %lld not ok but should be", tm->keys[i]);
+
+ if (tm->careful_delete_tests)
+ {
+ for (j = 0; j < tm->nitems; j++)
+ {
+ kv.key = tm->keys[j];
+ rv = BV (clib_bihash_search) (h, &kv, &kv);
+ if (j <= i && rv >= 0)
+ {
+ clib_warning
+ ("i %d j %d search ok but should not be, value %lld",
+ i, j, kv.value);
+ }
+ if (j > i && rv < 0)
+ {
+ clib_warning ("i %d j %d search not ok but should be",
+ i, j);
+ }
+ }
+ }
+ }
+
+ fformat (stdout, "After deletions, should be empty...\n");
+
+ fformat (stdout, "%U", BV (format_bihash), h, 0 /* very verbose */ );
+ return 0;
+}
+
+clib_error_t *
+test_bihash_cache (test_main_t * tm)
+{
+ u32 lru;
+ BVT (clib_bihash_bucket) _b, *b = &_b;
+
+ BV (clib_bihash_reset_cache) (b);
+
+ fformat (stdout, "Initial LRU config: %U\n", BV (format_bihash_lru), b);
+
+ BV (clib_bihash_update_lru_not_inline) (b, 3);
+
+ fformat (stdout, "use slot 3, LRU config: %U\n", BV (format_bihash_lru), b);
+
+ BV (clib_bihash_update_lru) (b, 1);
+
+ fformat (stdout, "use slot 1 LRU config: %U\n", BV (format_bihash_lru), b);
+
+ lru = BV (clib_bihash_get_lru) (b);
+
+ fformat (stdout, "least-recently-used is %d\n", lru);
+
+ BV (clib_bihash_update_lru) (b, 4);
+
+ fformat (stdout, "use slot 4 LRU config: %U\n", BV (format_bihash_lru), b);
+
+ lru = BV (clib_bihash_get_lru) (b);
+
+ fformat (stdout, "least-recently-used is %d\n", lru);
+
+ return 0;
+}
+
+clib_error_t *
+test_bihash_main (test_main_t * tm)
+{
+ unformat_input_t *i = tm->input;
+ clib_error_t *error;
+ int which = 0;
+
+ while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (i, "seed %u", &tm->seed))
+ ;
+
+ else if (unformat (i, "nbuckets %d", &tm->nbuckets))
+ ;
+ else if (unformat (i, "non-random-keys"))
+ tm->non_random_keys = 1;
+ else if (unformat (i, "nitems %d", &tm->nitems))
+ ;
+ else if (unformat (i, "careful %d", &tm->careful_delete_tests))
+ ;
+ else if (unformat (i, "verbose %d", &tm->verbose))
+ ;
+ else if (unformat (i, "search %d", &tm->search_iter))
+ ;
+ else if (unformat (i, "vec64"))
+ which = 1;
+ else if (unformat (i, "cache"))
+ which = 2;
+
+ else if (unformat (i, "verbose"))
+ tm->verbose = 1;
+ else
+ return clib_error_return (0, "unknown input '%U'",
+ format_unformat_error, i);
+ }
+
+ switch (which)
+ {
+ case 0:
+ error = test_bihash (tm);
+ break;
+
+ case 1:
+ error = test_bihash_vec64 (tm);
+ break;
+
+ case 2:
+ error = test_bihash_cache (tm);
+ break;
+
+ default:
+ return clib_error_return (0, "no such test?");
+ }
+
+ return error;
+}
+
+#ifdef CLIB_UNIX
+int
+main (int argc, char *argv[])
+{
+ unformat_input_t i;
+ clib_error_t *error;
+ test_main_t *tm = &test_main;
+
+ clib_mem_init (0, 3ULL << 30);
+
+ tm->input = &i;
+ tm->seed = 0xdeaddabe;
+
+ tm->nbuckets = 2;
+ tm->nitems = 5;
+ tm->verbose = 1;
+ tm->search_iter = 1;
+ tm->careful_delete_tests = 0;
+ tm->key_hash = hash_create (0, sizeof (uword));
+ clib_time_init (&tm->clib_time);
+
+ unformat_init_command_line (&i, argv);
+ error = test_bihash_main (tm);
+ unformat_free (&i);
+
+ if (error)
+ {
+ clib_error_report (error);
+ return 1;
+ }
+ return 0;
+}
+#endif /* CLIB_UNIX */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/test_dlist.c b/src/vppinfra/test_dlist.c
new file mode 100644
index 00000000..c5535c85
--- /dev/null
+++ b/src/vppinfra/test_dlist.c
@@ -0,0 +1,193 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vppinfra/dlist.h>
+
+typedef struct
+{
+ dlist_elt_t *test_pool;
+ u32 head_index;
+} test_main_t;
+
+test_main_t test_main;
+
+int
+test_dlist_main (unformat_input_t * input)
+{
+ test_main_t *tm = &test_main;
+ dlist_elt_t *head, *elt;
+ u32 elt_index, head_index;
+ u32 value;
+ int i;
+
+ pool_get (tm->test_pool, head);
+ head_index = head - tm->test_pool;
+ clib_dlist_init (tm->test_pool, head - tm->test_pool);
+
+ for (i = 1; i <= 3; i++)
+ {
+ pool_get (tm->test_pool, elt);
+ elt_index = elt - tm->test_pool;
+
+ clib_dlist_init (tm->test_pool, elt_index);
+ elt->value = i;
+ clib_dlist_addtail (tm->test_pool, head_index, elt_index);
+ }
+
+ head = pool_elt_at_index (tm->test_pool, head_index);
+
+ fformat (stdout, "Dump forward links\n");
+ elt_index = head->next;
+ i = 1;
+ value = 0;
+ while (value != ~0)
+ {
+ elt = pool_elt_at_index (tm->test_pool, elt_index);
+ fformat (stdout, "elt %d value %d\n", i++, elt->value);
+ elt_index = elt->next;
+ value = elt->value;
+ }
+
+ fformat (stdout, "Dump reverse links\n");
+ elt_index = head->prev;
+ i = 1;
+ value = 0;
+ while (value != ~0)
+ {
+ elt = pool_elt_at_index (tm->test_pool, elt_index);
+ fformat (stdout, "elt %d value %d\n", i++, elt->value);
+ elt_index = elt->prev;
+ value = elt->value;
+ }
+
+ fformat (stdout, "remove first element\n");
+
+ elt_index = clib_dlist_remove_head (tm->test_pool, head_index);
+ elt = pool_elt_at_index (tm->test_pool, elt_index);
+
+ fformat (stdout, "removed index %d value %d\n", elt_index, elt->value);
+
+ head = pool_elt_at_index (tm->test_pool, head_index);
+
+ fformat (stdout, "Dump forward links\n");
+ elt_index = head->next;
+ i = 1;
+ value = 0;
+ while (value != ~0)
+ {
+ elt = pool_elt_at_index (tm->test_pool, elt_index);
+ fformat (stdout, "elt %d value %d\n", i++, elt->value);
+ elt_index = elt->next;
+ value = elt->value;
+ }
+
+ fformat (stdout, "Dump reverse links\n");
+ elt_index = head->prev;
+ i = 1;
+ value = 0;
+ while (value != ~0)
+ {
+ elt = pool_elt_at_index (tm->test_pool, elt_index);
+ fformat (stdout, "elt %d value %d\n", i++, elt->value);
+ elt_index = elt->prev;
+ value = elt->value;
+ }
+
+ fformat (stdout, "re-insert index %d value %d at head\n", 1, 1);
+
+ clib_dlist_addhead (tm->test_pool, head_index, 1);
+
+ fformat (stdout, "Dump forward links\n");
+ elt_index = head->next;
+ i = 1;
+ value = 0;
+ while (value != ~0)
+ {
+ elt = pool_elt_at_index (tm->test_pool, elt_index);
+ fformat (stdout, "elt %d value %d\n", i++, elt->value);
+ elt_index = elt->next;
+ value = elt->value;
+ }
+
+ fformat (stdout, "Dump reverse links\n");
+ elt_index = head->prev;
+ i = 1;
+ value = 0;
+ while (value != ~0)
+ {
+ elt = pool_elt_at_index (tm->test_pool, elt_index);
+ fformat (stdout, "elt %d value %d\n", i++, elt->value);
+ elt_index = elt->prev;
+ value = elt->value;
+ }
+
+ fformat (stdout, "Remove middle element\n");
+
+ clib_dlist_remove (tm->test_pool, 2);
+ elt = pool_elt_at_index (tm->test_pool, 2);
+
+ fformat (stdout, "removed index %d value %d\n", elt_index, elt->value);
+
+ fformat (stdout, "Dump forward links\n");
+ elt_index = head->next;
+ i = 1;
+ value = 0;
+ while (value != ~0)
+ {
+ elt = pool_elt_at_index (tm->test_pool, elt_index);
+ fformat (stdout, "elt %d value %d\n", i++, elt->value);
+ elt_index = elt->next;
+ value = elt->value;
+ }
+
+ fformat (stdout, "Dump reverse links\n");
+ elt_index = head->prev;
+ i = 1;
+ value = 0;
+ while (value != ~0)
+ {
+ elt = pool_elt_at_index (tm->test_pool, elt_index);
+ fformat (stdout, "elt %d value %d\n", i++, elt->value);
+ elt_index = elt->prev;
+ value = elt->value;
+ }
+
+ return 0;
+}
+
+#ifdef CLIB_UNIX
+int
+main (int argc, char *argv[])
+{
+ unformat_input_t i;
+ int ret;
+
+ clib_mem_init (0, 3ULL << 30);
+
+ unformat_init_command_line (&i, argv);
+ ret = test_dlist_main (&i);
+ unformat_free (&i);
+
+ return ret;
+}
+#endif /* CLIB_UNIX */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/test_elf.c b/src/vppinfra/test_elf.c
new file mode 100644
index 00000000..84fe0776
--- /dev/null
+++ b/src/vppinfra/test_elf.c
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2008 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/elf.h>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#ifndef CLIB_UNIX
+#error "unix only"
+#endif
+
+static clib_error_t *
+elf_set_interpreter (elf_main_t * em, char *interp)
+{
+ elf_segment_t *g;
+ elf_section_t *s;
+ clib_error_t *error;
+
+ vec_foreach (g, em->segments)
+ {
+ if (g->header.type == ELF_SEGMENT_INTERP)
+ break;
+ }
+
+ if (g >= vec_end (em->segments))
+ return clib_error_return (0, "interpreter not found");
+
+ if (g->header.memory_size < 1 + strlen (interp))
+ return clib_error_return (0,
+ "given interpreter does not fit; must be less than %d bytes (`%s' given)",
+ g->header.memory_size, interp);
+
+ error =
+ elf_get_section_by_start_address (em, g->header.virtual_address, &s);
+ if (error)
+ return error;
+
+ /* Put in new null terminated string. */
+ memset (s->contents, 0, vec_len (s->contents));
+ clib_memcpy (s->contents, interp, strlen (interp));
+
+ return 0;
+}
+
+static void
+delete_dynamic_rpath_entries_from_section (elf_main_t * em, elf_section_t * s)
+{
+ elf64_dynamic_entry_t *e;
+ elf64_dynamic_entry_t *new_es = 0;
+
+ vec_foreach (e, em->dynamic_entries)
+ {
+ switch (e->type)
+ {
+ case ELF_DYNAMIC_ENTRY_RPATH:
+ case ELF_DYNAMIC_ENTRY_RUN_PATH:
+ break;
+
+ default:
+ vec_add1 (new_es, e[0]);
+ break;
+ }
+ }
+
+ /* Pad so as to keep section size constant. */
+ {
+ elf64_dynamic_entry_t e_end;
+ e_end.type = ELF_DYNAMIC_ENTRY_END;
+ e_end.data = 0;
+ while (vec_len (new_es) < vec_len (em->dynamic_entries))
+ vec_add1 (new_es, e_end);
+ }
+
+ elf_set_dynamic_entries (em);
+}
+
+static void
+elf_delete_dynamic_rpath_entries (elf_main_t * em)
+{
+ elf_section_t *s;
+
+ vec_foreach (s, em->sections)
+ {
+ switch (s->header.type)
+ {
+ case ELF_SECTION_DYNAMIC:
+ delete_dynamic_rpath_entries_from_section (em, s);
+ break;
+
+ default:
+ break;
+ }
+ }
+}
+
+typedef struct
+{
+ elf_main_t elf_main;
+ char *input_file;
+ char *output_file;
+ char *set_interpreter;
+ int verbose;
+} elf_test_main_t;
+
+int
+main (int argc, char *argv[])
+{
+ elf_test_main_t _tm, *tm = &_tm;
+ elf_main_t *em = &tm->elf_main;
+ unformat_input_t i;
+ clib_error_t *error = 0;
+
+ memset (tm, 0, sizeof (tm[0]));
+
+ unformat_init_command_line (&i, argv);
+ while (unformat_check_input (&i) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (&i, "in %s", &tm->input_file))
+ ;
+ else if (unformat (&i, "out %s", &tm->output_file))
+ ;
+ else if (unformat (&i, "set-interpreter %s", &tm->set_interpreter))
+ ;
+ else if (unformat (&i, "verbose"))
+ tm->verbose = ~0;
+ else if (unformat (&i, "verbose-symbols"))
+ tm->verbose |= FORMAT_ELF_MAIN_SYMBOLS;
+ else if (unformat (&i, "verbose-relocations"))
+ tm->verbose |= FORMAT_ELF_MAIN_RELOCATIONS;
+ else if (unformat (&i, "verbose-dynamic"))
+ tm->verbose |= FORMAT_ELF_MAIN_DYNAMIC;
+ else
+ {
+ error = unformat_parse_error (&i);
+ goto done;
+ }
+ }
+
+ if (!tm->input_file)
+ {
+ clib_warning ("No input file! Using test_bihash_template");
+ tm->input_file = "test_bihash_template";
+ }
+
+ error = elf_read_file (em, tm->input_file);
+ if (error)
+ goto done;
+
+ if (tm->set_interpreter)
+ {
+ clib_error_t *error = elf_set_interpreter (em, tm->set_interpreter);
+ if (error)
+ goto done;
+ elf_delete_dynamic_rpath_entries (em);
+ }
+
+ if (tm->verbose)
+ fformat (stdout, "%U", format_elf_main, em, tm->verbose);
+
+ if (tm->output_file)
+ error = elf_write_file (em, tm->output_file);
+
+ elf_main_free (em);
+
+done:
+ if (error)
+ {
+ clib_error_report (error);
+ return 1;
+ }
+ else
+ return 0;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/test_elog.c b/src/vppinfra/test_elog.c
new file mode 100644
index 00000000..1cf5ba1f
--- /dev/null
+++ b/src/vppinfra/test_elog.c
@@ -0,0 +1,315 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2005 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/elog.h>
+#include <vppinfra/error.h>
+#include <vppinfra/format.h>
+#include <vppinfra/random.h>
+#include <vppinfra/serialize.h>
+#include <vppinfra/unix.h>
+
+int
+test_elog_main (unformat_input_t * input)
+{
+ clib_error_t *error = 0;
+ u32 i, n_iter, seed, max_events;
+ elog_main_t _em, *em = &_em;
+ u32 verbose;
+ f64 min_sample_time;
+ char *dump_file, *load_file, *merge_file, **merge_files;
+ u8 *tag, **tags;
+ f64 align_tweak;
+ f64 *align_tweaks;
+
+ n_iter = 100;
+ max_events = 100000;
+ seed = 1;
+ verbose = 0;
+ dump_file = 0;
+ load_file = 0;
+ merge_files = 0;
+ tags = 0;
+ align_tweaks = 0;
+ min_sample_time = 2;
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "iter %d", &n_iter))
+ ;
+ else if (unformat (input, "seed %d", &seed))
+ ;
+ else if (unformat (input, "dump %s", &dump_file))
+ ;
+ else if (unformat (input, "load %s", &load_file))
+ ;
+ else if (unformat (input, "tag %s", &tag))
+ vec_add1 (tags, tag);
+ else if (unformat (input, "merge %s", &merge_file))
+ vec_add1 (merge_files, merge_file);
+
+ else if (unformat (input, "verbose %=", &verbose, 1))
+ ;
+ else if (unformat (input, "max-events %d", &max_events))
+ ;
+ else if (unformat (input, "sample-time %f", &min_sample_time))
+ ;
+ else if (unformat (input, "align-tweak %f", &align_tweak))
+ vec_add1 (align_tweaks, align_tweak);
+ else
+ {
+ error = clib_error_create ("unknown input `%U'\n",
+ format_unformat_error, input);
+ goto done;
+ }
+ }
+
+#ifdef CLIB_UNIX
+ if (load_file)
+ {
+ if ((error = elog_read_file (em, load_file)))
+ goto done;
+ }
+
+ else if (merge_files)
+ {
+ uword i;
+ elog_main_t *ems;
+ vec_clone (ems, merge_files);
+
+ /* Supply default tags as needed */
+ if (vec_len (tags) < vec_len (ems))
+ {
+ for (i = vec_len (tags); i < vec_len (ems); i++)
+ vec_add1 (tags, format (0, "F%d%c", i, 0));
+ }
+
+ elog_init (em, max_events);
+ for (i = 0; i < vec_len (ems); i++)
+ {
+ if ((error =
+ elog_read_file (i == 0 ? em : &ems[i], merge_files[i])))
+ goto done;
+ if (i > 0)
+ {
+ align_tweak = 0.0;
+ if (i <= vec_len (align_tweaks))
+ align_tweak = align_tweaks[i - 1];
+ elog_merge (em, tags[0], &ems[i], tags[i], align_tweak);
+ tags[0] = 0;
+ }
+ }
+ }
+
+ else
+#endif /* CLIB_UNIX */
+ {
+ f64 t[2];
+
+ elog_init (em, max_events);
+ elog_enable_disable (em, 1);
+ t[0] = unix_time_now ();
+
+ for (i = 0; i < n_iter; i++)
+ {
+ u32 j, n, sum;
+
+ n = 1 + (random_u32 (&seed) % 128);
+ sum = 0;
+ for (j = 0; j < n; j++)
+ sum += random_u32 (&seed);
+
+ {
+ ELOG_TYPE_XF (e);
+ ELOG (em, e, sum);
+ }
+
+ {
+ ELOG_TYPE_XF (e);
+ ELOG (em, e, sum + 1);
+ }
+
+ {
+ struct
+ {
+ u32 string_index;
+ f32 f;
+ } *d;
+ ELOG_TYPE_DECLARE (e) =
+ {
+ .format = "fumble %s %.9f",.format_args =
+ "t4f4",.n_enum_strings = 4,.enum_strings =
+ {
+ "string0", "string1", "string2", "string3",},};
+
+ d = ELOG_DATA (em, e);
+
+ d->string_index = sum & 3;
+ d->f = (sum & 0xff) / 128.;
+ }
+
+ {
+ ELOG_TYPE_DECLARE (e) =
+ {
+ .format = "bar %d.%d.%d.%d",.format_args = "i1i1i1i1",};
+ ELOG_TRACK (my_track);
+ u8 *d = ELOG_TRACK_DATA (em, e, my_track);
+ d[0] = i + 0;
+ d[1] = i + 1;
+ d[2] = i + 2;
+ d[3] = i + 3;
+ }
+
+ {
+ ELOG_TYPE_DECLARE (e) =
+ {
+ .format = "bar `%s'",.format_args = "s20",};
+ struct
+ {
+ char s[20];
+ } *d;
+ u8 *v;
+
+ d = ELOG_DATA (em, e);
+ v = format (0, "foo %d%c", i, 0);
+ clib_memcpy (d->s, v, clib_min (vec_len (v), sizeof (d->s)));
+ }
+
+ {
+ ELOG_TYPE_DECLARE (e) =
+ {
+ .format = "bar `%s'",.format_args = "T4",};
+ struct
+ {
+ u32 offset;
+ } *d;
+
+ d = ELOG_DATA (em, e);
+ d->offset = elog_string (em, "string table %d", i);
+ }
+ }
+
+ do
+ {
+ t[1] = unix_time_now ();
+ }
+ while (t[1] - t[0] < min_sample_time);
+ }
+
+#ifdef CLIB_UNIX
+ if (dump_file)
+ {
+ if ((error =
+ elog_write_file (em, dump_file, 0 /* do not flush ring */ )))
+ goto done;
+ }
+#endif
+
+ if (verbose)
+ {
+ elog_event_t *e, *es;
+ es = elog_get_events (em);
+ vec_foreach (e, es)
+ {
+ clib_warning ("%18.9f: %12U %U\n", e->time,
+ format_elog_track, em, e, format_elog_event, em, e);
+ }
+ }
+
+done:
+ if (error)
+ clib_error_report (error);
+ return 0;
+}
+
+#ifdef CLIB_UNIX
+int
+main (int argc, char *argv[])
+{
+ unformat_input_t i;
+ int r;
+
+ clib_mem_init (0, 3ULL << 30);
+
+ unformat_init_command_line (&i, argv);
+ r = test_elog_main (&i);
+ unformat_free (&i);
+ return r;
+}
+#endif
+
+/**
+ * @brief GDB callable function: vl - Return vector length of vector
+ *
+ * @param *p - void - address of vector
+ *
+ * @return length - u32
+ *
+ */
+u32
+vl (void *p)
+{
+ return vec_len (p);
+}
+
+/**
+ * @brief GDB callable function: pe - call pool_elts - number of elements in a pool
+ *
+ * @param *v - void - address of pool
+ *
+ * @return number - uword
+ *
+ */
+#include <vppinfra/pool.h>
+uword
+pe (void *v)
+{
+ return (pool_elts (v));
+}
+
+#include <vppinfra/hash.h>
+uword
+he (void *v)
+{
+ return (hash_elts (v));
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/test_fifo.c b/src/vppinfra/test_fifo.c
new file mode 100644
index 00000000..45392bc3
--- /dev/null
+++ b/src/vppinfra/test_fifo.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2005 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/fifo.h>
+#include <vppinfra/random.h>
+
+typedef struct
+{
+ int a, b, c;
+} A;
+
+always_inline void
+A_set (A * a, int k)
+{
+ a->a = 1 * k;
+ a->b = 2 * k;
+ a->c = 3 * k;
+}
+
+always_inline int
+A_is_valid (A * a, int k)
+{
+ return a->a == 1 * k && a->b == 2 * k && a->c == 3 * k;
+}
+
+int
+test_fifo_main (unformat_input_t * input)
+{
+ u32 n_added, n_removed, i, j, n_iter, seed, verbose;
+ A *as = 0, *a;
+
+ n_iter = 1000;
+ seed = random_default_seed ();
+ verbose = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "iter %d", &n_iter))
+ ;
+ else if (unformat (input, "seed %d", &seed))
+ ;
+ else if (unformat (input, "verbose %=", &verbose, 1))
+ ;
+ else
+ {
+ clib_warning ("unknown input `%U'\n", format_unformat_error, input);
+ return 1;
+ }
+ }
+
+ if (verbose)
+ clib_warning ("iter %d seed %d\n", n_iter, seed);
+
+ n_added = n_removed = 0;
+ for (i = 0; i < n_iter; i++)
+ {
+ if (clib_fifo_elts (as) > 0 && (random_u32 (&seed) & 1))
+ {
+ A tmp;
+ clib_fifo_sub1 (as, tmp);
+ ASSERT (A_is_valid (&tmp, n_removed));
+ n_removed++;
+ }
+ else
+ {
+ clib_fifo_add2 (as, a);
+ A_set (a, n_added);
+ n_added++;
+ }
+
+ ASSERT (clib_fifo_elts (as) == n_added - n_removed);
+
+ j = 0;
+ /* *INDENT-OFF* */
+ clib_fifo_foreach (a, as, {
+ ASSERT (A_is_valid (a, n_removed + j));
+ j++;
+ });
+ /* *INDENT-ON* */
+
+ ASSERT (j == clib_fifo_elts (as));
+ }
+
+ clib_fifo_free (as);
+
+ return 0;
+}
+
+#ifdef CLIB_UNIX
+int
+main (int argc, char *argv[])
+{
+ unformat_input_t i;
+ int r;
+
+ unformat_init_command_line (&i, argv);
+ r = test_fifo_main (&i);
+ unformat_free (&i);
+
+ return r;
+}
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/test_format.c b/src/vppinfra/test_format.c
new file mode 100644
index 00000000..cc95a00e
--- /dev/null
+++ b/src/vppinfra/test_format.c
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/format.h>
+
+static int verbose;
+static u8 *test_vec;
+
+static u8 *
+format_test1 (u8 * s, va_list * va)
+{
+ uword x = va_arg (*va, uword);
+ f64 y = va_arg (*va, f64);
+ return format (s, "%12d %12f%12.4e", x, y, y);
+}
+
+static int
+expectation (const char *exp, char *fmt, ...)
+{
+ int ret = 0;
+
+ va_list va;
+ va_start (va, fmt);
+ test_vec = va_format (test_vec, fmt, &va);
+ va_end (va);
+
+ vec_add1 (test_vec, 0);
+ if (strcmp (exp, (char *) test_vec))
+ {
+ fformat (stdout, "FAIL: %s (expected vs. result)\n\"%s\"\n\"%v\"\n",
+ fmt, exp, test_vec);
+ ret = 1;
+ }
+ else if (verbose)
+ fformat (stdout, "PASS: %s\n", fmt);
+ vec_delete (test_vec, vec_len (test_vec), 0);
+ return ret;
+}
+
+int
+test_format_main (unformat_input_t * input)
+{
+ int ret = 0;
+ u8 *food = format (0, "food");
+
+ ret |= expectation ("foo", "foo");
+ ret |= expectation ("foo", "%s", "foo");
+ ret |= expectation ("9876", "%d", 9876);
+ ret |= expectation ("-9876", "%wd", (word) - 9876);
+ ret |= expectation ("98765432", "%u", 98765432);
+ ret |= expectation ("1200ffee", "%x", 0x1200ffee);
+ ret |= expectation ("BABEBABE", "%X", 0xbabebabe);
+ ret |= expectation ("10%a", "%d%%%c", 10, 'a');
+ ret |= expectation ("123456789abcdef0", "%016Lx", 0x123456789abcdef0LL);
+ ret |= expectation ("00000123", "%08x", 0x123);
+ ret |= expectation (" 23 23 2.3037e1",
+ "%40U", format_test1, 23, 23.0367);
+ ret |= expectation ("left ", "%-10s", "left");
+ ret |= expectation (" center ", "%=10s", "center");
+ ret |= expectation (" right", "%+10s", "right");
+ ret |= expectation ("123456", "%.0f", 123456.);
+ ret |= expectation ("1234567.0", "%.1f", 1234567.);
+ ret |= expectation ("foo", "%.*s", 3, "food");
+ ret |= expectation ("food ", "%.*s", 10, "food ");
+ ret |= expectation ("(nil)", "%.*s", 3, (void *) 0);
+ ret |= expectation ("foo", "%.*v", 3, food);
+ ret |= expectation ("foobar", "%.*v%s", 3, food, "bar");
+ ret |= expectation ("foo bar", "%S", "foo_bar");
+ vec_free (food);
+ vec_free (test_vec);
+ return ret;
+}
+
+typedef struct
+{
+ int a, b;
+} foo_t;
+
+static u8 *
+format_foo (u8 * s, va_list * va)
+{
+ foo_t *foo = va_arg (*va, foo_t *);
+ return format (s, "{a %d, b %d}", foo->a, foo->b);
+}
+
+static uword
+unformat_foo (unformat_input_t * i, va_list * va)
+{
+ foo_t *foo = va_arg (*va, foo_t *);
+ return unformat (i, "{%D,%D}",
+ sizeof (foo->a), &foo->a, sizeof (foo->b), &foo->b);
+}
+
+int
+test_unformat_main (unformat_input_t * input)
+{
+ u32 v[8];
+ long l;
+ long long ll;
+ f64 f;
+ u8 *s;
+ foo_t foo = {.a = ~0,.b = ~0 };
+
+ v[0] = v[1] = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "01 %d %d", &v[0], &v[1]))
+ fformat (stdout, "got 01 %d %d\n", v[0], v[1]);
+ else if (unformat (input, "d %d", &v[0]))
+ fformat (stdout, "got it d %d\n", v[0]);
+ else if (unformat (input, "ld %ld", &l))
+ fformat (stdout, "got it ld %ld\n", l);
+ else if (unformat (input, "lld %lld", &ll))
+ fformat (stdout, "got it lld %lld\n", ll);
+ else if (unformat (input, "string %s", &s))
+ fformat (stdout, "got string `%s'\n", s);
+ else if (unformat (input, "float %f", &f))
+ fformat (stdout, "got float `%.4f'\n", f);
+ else if (unformat (input, "foo %U", unformat_foo, &foo))
+ fformat (stdout, "got a foo `%U'\n", format_foo, &foo);
+ else if (unformat (input, "ignore-me1"))
+ fformat (stdout, "got an `ignore-me1'\n");
+ else if (unformat (input, "ignore-me2"))
+ fformat (stdout, "got an `ignore-me2'\n");
+ else if (unformat (input, "gi%d_%d@-", &v[0], &v[1]))
+ fformat (stdout, "got `gi%d_%d@-'\n", v[0], v[1]);
+ else if (unformat (input, "%_%d.%d.%d.%d%_->%_%d.%d.%d.%d%_",
+ &v[0], &v[1], &v[2], &v[3],
+ &v[4], &v[5], &v[6], &v[7]))
+ fformat (stdout, "got %d.%d.%d.%d -> %d.%d.%d.%d",
+ v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7]);
+ else
+ {
+ clib_warning ("unknown input `%U'\n", format_unformat_error, input);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+#ifdef CLIB_UNIX
+int
+main (int argc, char *argv[])
+{
+ unformat_input_t i;
+
+ verbose = (argc > 1);
+ unformat_init_command_line (&i, argv);
+
+ if (unformat (&i, "unformat"))
+ return test_unformat_main (&i);
+ else
+ return test_format_main (&i);
+}
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/test_fpool.c b/src/vppinfra/test_fpool.c
new file mode 100644
index 00000000..e2d67f16
--- /dev/null
+++ b/src/vppinfra/test_fpool.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+#include <vppinfra/pool.h>
+
+/* can be a very large size */
+#define NELTS 1024
+
+int
+main (int argc, char *argv[])
+{
+ u32 *junk = 0;
+ int i;
+ u32 *tp = 0;
+ u32 *indices = 0;
+
+ clib_mem_init (0, 3ULL << 30);
+
+ vec_validate (indices, NELTS - 1);
+ _vec_len (indices) = 0;
+
+ pool_init_fixed (tp, NELTS);
+
+ for (i = 0; i < NELTS; i++)
+ {
+ pool_get (tp, junk);
+ vec_add1 (indices, junk - tp);
+ *junk = i;
+ }
+
+ for (i = 0; i < NELTS; i++)
+ {
+ junk = pool_elt_at_index (tp, indices[i]);
+ ASSERT (*junk == i);
+ }
+
+ fformat (stdout, "%d pool elts before deletes\n", pool_elts (tp));
+
+ pool_put_index (tp, indices[12]);
+ pool_put_index (tp, indices[43]);
+
+ fformat (stdout, "%d pool elts after deletes\n", pool_elts (tp));
+
+ pool_validate (tp);
+
+ pool_free (tp);
+ return 0;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/test_hash.c b/src/vppinfra/test_hash.c
new file mode 100644
index 00000000..94110ab6
--- /dev/null
+++ b/src/vppinfra/test_hash.c
@@ -0,0 +1,458 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifdef CLIB_LINUX_KERNEL
+#include <linux/unistd.h>
+#endif
+
+#ifdef CLIB_UNIX
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <vppinfra/time.h>
+#endif
+
+#include <vppinfra/random.h>
+#include <vppinfra/mem.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/error.h>
+#include <vppinfra/format.h>
+#include <vppinfra/bitmap.h>
+
+static int verbose;
+#define if_verbose(format,args...) \
+ if (verbose) { clib_warning(format, ## args); }
+
+typedef struct
+{
+ int n_iterations;
+
+ int n_iterations_per_print;
+
+ /* Number of pairs to insert into hash. */
+ int n_pairs;
+
+ /* True to validate correctness of hash functions. */
+ int n_iterations_per_validate;
+
+ /* Non-zero if hash table size is to be fixed. */
+ int fixed_hash_size;
+
+ /* Verbosity level for hash formats. */
+ int verbose;
+
+ /* Random number seed. */
+ u32 seed;
+} hash_test_t;
+
+static clib_error_t *
+hash_next_test (word * h)
+{
+ hash_next_t hn = { 0 };
+ hash_pair_t *p0, *p1;
+ clib_error_t *error = 0;
+
+ /* *INDENT-OFF* */
+ hash_foreach_pair (p0, h, {
+ p1 = hash_next (h, &hn);
+ error = CLIB_ERROR_ASSERT (p0 == p1);
+ if (error)
+ break;
+ });
+ /* *INDENT-ON* */
+
+ if (!error)
+ error = CLIB_ERROR_ASSERT (!hash_next (h, &hn));
+
+ return error;
+}
+
+static u8 *
+test1_format (u8 * s, va_list * args)
+{
+ void *CLIB_UNUSED (user_arg) = va_arg (*args, void *);
+ void *v = va_arg (*args, void *);
+ hash_pair_t *p = va_arg (*args, hash_pair_t *);
+ hash_t *h = hash_header (v);
+
+ return format (s, "0x%8U -> 0x%8U",
+ format_hex_bytes, &p->key, sizeof (p->key),
+ format_hex_bytes, &p->value[0], hash_value_bytes (h));
+}
+
+static clib_error_t *
+test_word_key (hash_test_t * ht)
+{
+ word *h = 0;
+ word i, j;
+
+ word *keys = 0, *vals = 0;
+ uword *is_inserted = 0;
+
+ clib_error_t *error = 0;
+
+ vec_resize (keys, ht->n_pairs);
+ vec_resize (vals, vec_len (keys));
+
+ h = hash_create (ht->fixed_hash_size, sizeof (vals[0]));
+
+ hash_set_pair_format (h, test1_format, 0);
+ if (ht->fixed_hash_size)
+ hash_set_flags (h, HASH_FLAG_NO_AUTO_GROW | HASH_FLAG_NO_AUTO_SHRINK);
+
+ {
+ uword *unique = 0;
+ u32 k;
+
+ for (i = 0; i < vec_len (keys); i++)
+ {
+ do
+ {
+ k = random_u32 (&ht->seed) & 0xfffff;
+ }
+ while (clib_bitmap_get (unique, k));
+ unique = clib_bitmap_ori (unique, k);
+ keys[i] = k;
+ vals[i] = i;
+ }
+
+ clib_bitmap_free (unique);
+ }
+
+ for (i = 0; i < ht->n_iterations; i++)
+ {
+ u32 vi = random_u32 (&ht->seed) % vec_len (keys);
+
+ if (clib_bitmap_get (is_inserted, vi))
+ hash_unset (h, keys[vi]);
+ else
+ hash_set (h, keys[vi], vals[vi]);
+
+ is_inserted = clib_bitmap_xori (is_inserted, vi);
+
+ if (ht->n_iterations_per_print > 0
+ && ((i + 1) % ht->n_iterations_per_print) == 0)
+ if_verbose ("iteration %d\n %U", i + 1, format_hash, h, ht->verbose);
+
+ if (ht->n_iterations_per_validate == 0
+ || (i + 1) % ht->n_iterations_per_validate)
+ continue;
+
+ {
+ hash_pair_t *p;
+ uword ki;
+
+ /* *INDENT-OFF* */
+ hash_foreach_pair (p, h, {
+ ki = p->value[0];
+ ASSERT (keys[ki] == p->key);
+ });
+ /* *INDENT-ON* */
+ }
+
+ clib_mem_validate ();
+
+ if ((error = hash_validate (h)))
+ goto done;
+
+ for (j = 0; j < vec_len (keys); j++)
+ {
+ uword *v;
+ v = hash_get (h, keys[j]);
+ if ((error =
+ CLIB_ERROR_ASSERT (clib_bitmap_get (is_inserted, j) ==
+ (v != 0))))
+ goto done;
+ if (v)
+ {
+ if ((error = CLIB_ERROR_ASSERT (v[0] == vals[j])))
+ goto done;
+ }
+ }
+ }
+
+ if ((error = hash_next_test (h)))
+ goto done;
+
+ if_verbose ("%U", format_hash, h, ht->verbose);
+
+ for (i = 0; i < vec_len (keys); i++)
+ {
+ if (!clib_bitmap_get (is_inserted, i))
+ continue;
+
+ hash_unset (h, keys[i]);
+ is_inserted = clib_bitmap_xori (is_inserted, i);
+
+ if (ht->n_iterations_per_validate == 0
+ || (i + 1) % ht->n_iterations_per_validate)
+ continue;
+
+ clib_mem_validate ();
+
+ if ((error = hash_validate (h)))
+ goto done;
+
+ for (j = 0; j < vec_len (keys); j++)
+ {
+ uword *v;
+ v = hash_get (h, keys[j]);
+ if ((error =
+ CLIB_ERROR_ASSERT (clib_bitmap_get (is_inserted, j) ==
+ (v != 0))))
+ goto done;
+ if (v)
+ {
+ if ((error = CLIB_ERROR_ASSERT (v[0] == vals[j])))
+ goto done;
+ }
+ }
+ }
+
+done:
+ hash_free (h);
+ vec_free (keys);
+ vec_free (vals);
+ clib_bitmap_free (is_inserted);
+
+ if (verbose)
+ fformat (stderr, "%U\n", format_clib_mem_usage, /* verbose */ 0);
+
+ return error;
+}
+
+static u8 *
+test2_format (u8 * s, va_list * args)
+{
+ void *CLIB_UNUSED (user_arg) = va_arg (*args, void *);
+ void *v = va_arg (*args, void *);
+ hash_pair_t *p = va_arg (*args, hash_pair_t *);
+ hash_t *h = hash_header (v);
+
+ return format (s, "0x%8U <- %v",
+ format_hex_bytes, &p->value[0], hash_value_bytes (h),
+ p->key);
+}
+
+static clib_error_t *
+test_string_key (hash_test_t * ht)
+{
+ word i, j;
+
+ u8 **keys = 0;
+ word *vals = 0;
+ uword *is_inserted = 0;
+
+ word *h = 0;
+
+ clib_error_t *error = 0;
+
+ vec_resize (keys, ht->n_pairs);
+ vec_resize (vals, vec_len (keys));
+
+ h =
+ hash_create_vec (ht->fixed_hash_size, sizeof (keys[0][0]),
+ sizeof (uword));
+ hash_set_pair_format (h, test2_format, 0);
+ if (ht->fixed_hash_size)
+ hash_set_flags (h, HASH_FLAG_NO_AUTO_SHRINK | HASH_FLAG_NO_AUTO_GROW);
+
+ for (i = 0; i < vec_len (keys); i++)
+ {
+ keys[i] = random_string (&ht->seed, 5 + (random_u32 (&ht->seed) & 0xf));
+ keys[i] = format (keys[i], "%x", i);
+ vals[i] = random_u32 (&ht->seed);
+ }
+
+ for (i = 0; i < ht->n_iterations; i++)
+ {
+ u32 vi = random_u32 (&ht->seed) % vec_len (keys);
+
+ if (clib_bitmap_get (is_inserted, vi))
+ hash_unset_mem (h, keys[vi]);
+ else
+ hash_set_mem (h, keys[vi], vals[vi]);
+
+ is_inserted = clib_bitmap_xori (is_inserted, vi);
+
+ if (ht->n_iterations_per_print > 0
+ && ((i + 1) % ht->n_iterations_per_print) == 0)
+ if_verbose ("iteration %d\n %U", i + 1, format_hash, h, ht->verbose);
+
+ if (ht->n_iterations_per_validate == 0
+ || (i + 1) % ht->n_iterations_per_validate)
+ continue;
+
+ clib_mem_validate ();
+
+ if ((error = hash_validate (h)))
+ goto done;
+
+ for (j = 0; j < vec_len (keys); j++)
+ {
+ uword *v;
+ v = hash_get_mem (h, keys[j]);
+ if ((error =
+ CLIB_ERROR_ASSERT (clib_bitmap_get (is_inserted, j) ==
+ (v != 0))))
+ goto done;
+ if (v)
+ {
+ if ((error = CLIB_ERROR_ASSERT (v[0] == vals[j])))
+ goto done;
+ }
+ }
+ }
+
+ if ((error = hash_next_test (h)))
+ goto done;
+
+ if_verbose ("%U", format_hash, h, ht->verbose);
+
+ for (i = 0; i < vec_len (keys); i++)
+ {
+ if (!clib_bitmap_get (is_inserted, i))
+ continue;
+
+ hash_unset_mem (h, keys[i]);
+ is_inserted = clib_bitmap_xori (is_inserted, i);
+
+ if (ht->n_iterations_per_validate == 0
+ || (i + 1) % ht->n_iterations_per_validate)
+ continue;
+
+ clib_mem_validate ();
+
+ if ((error = hash_validate (h)))
+ goto done;
+
+ for (j = 0; j < vec_len (keys); j++)
+ {
+ uword *v;
+ v = hash_get_mem (h, keys[j]);
+ if ((error =
+ CLIB_ERROR_ASSERT (clib_bitmap_get (is_inserted, j) ==
+ (v != 0))))
+ goto done;
+ if (v)
+ {
+ if ((error = CLIB_ERROR_ASSERT (v[0] == vals[j])))
+ goto done;
+ }
+ }
+ }
+
+done:
+ hash_free (h);
+ vec_free (vals);
+ clib_bitmap_free (is_inserted);
+
+ for (i = 0; i < vec_len (keys); i++)
+ vec_free (keys[i]);
+ vec_free (keys);
+
+ if (verbose)
+ fformat (stderr, "%U\n", format_clib_mem_usage, /* verbose */ 0);
+
+ return error;
+}
+
+int
+test_hash_main (unformat_input_t * input)
+{
+ hash_test_t _ht = { 0 }, *ht = &_ht;
+ clib_error_t *error;
+
+ ht->n_iterations = 100;
+ ht->n_pairs = 10;
+ ht->fixed_hash_size = 0; /* zero means non-fixed size */
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (0 == unformat (input, "iter %d", &ht->n_iterations)
+ && 0 == unformat (input, "print %d", &ht->n_iterations_per_print)
+ && 0 == unformat (input, "elts %d", &ht->n_pairs)
+ && 0 == unformat (input, "size %d", &ht->fixed_hash_size)
+ && 0 == unformat (input, "seed %d", &ht->seed)
+ && 0 == unformat (input, "verbose %=", &ht->verbose, 1)
+ && 0 == unformat (input, "valid %d",
+ &ht->n_iterations_per_validate))
+ {
+ clib_warning ("unknown input `%U'", format_unformat_error, input);
+ return 1;
+ }
+ }
+
+ if (!ht->seed)
+ ht->seed = random_default_seed ();
+
+ if_verbose ("testing %d iterations, seed %d", ht->n_iterations, ht->seed);
+
+ error = test_word_key (ht);
+ if (error)
+ clib_error_report (error);
+
+ error = test_string_key (ht);
+ if (error)
+ clib_error_report (error);
+
+ return 0;
+}
+
+#ifdef CLIB_UNIX
+int
+main (int argc, char *argv[])
+{
+ unformat_input_t i;
+ int ret;
+
+ verbose = (argc > 1);
+ unformat_init_command_line (&i, argv);
+ ret = test_hash_main (&i);
+ unformat_free (&i);
+
+ return ret;
+}
+#endif /* CLIB_UNIX */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/test_heap.c b/src/vppinfra/test_heap.c
new file mode 100644
index 00000000..3d5171bf
--- /dev/null
+++ b/src/vppinfra/test_heap.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <unistd.h>
+#include <stdlib.h>
+
+#include <vppinfra/mem.h>
+#include <vppinfra/heap.h>
+#include <vppinfra/format.h>
+
+static int verbose;
+#define if_verbose(format,args...) \
+ if (verbose) { clib_warning(format, ## args); }
+
+int
+main (int argc, char *argv[])
+{
+ word i, j, k, n, check_mask;
+ u32 seed;
+ u32 *h = 0;
+ uword *objects = 0;
+ uword *handles = 0;
+ uword objects_used;
+ uword align, fixed_size;
+
+ n = 10;
+ seed = (u32) getpid ();
+ check_mask = 0;
+ fixed_size = 0;
+
+ if (argc > 1)
+ {
+ n = atoi (argv[1]);
+ verbose = 1;
+ }
+ if (argc > 2)
+ {
+ word i = atoi (argv[2]);
+ if (i)
+ seed = i;
+ }
+ if (argc > 3)
+ check_mask = atoi (argv[3]);
+
+ align = 0;
+ if (argc > 4)
+ align = 1 << atoi (argv[4]);
+
+ if_verbose ("testing %wd iterations seed %wd\n", n, seed);
+
+ if (verbose)
+ fformat (stderr, "%U\n", format_clib_mem_usage, /* verbose */ 0);
+
+ vec_resize (objects, 1000);
+ if (vec_bytes (objects)) /* stupid warning be gone */
+ memset (objects, ~0, vec_bytes (objects));
+ vec_resize (handles, vec_len (objects));
+
+ objects_used = 0;
+
+ if (fixed_size)
+ {
+ uword max_len = 1024 * 1024;
+ void *memory = clib_mem_alloc (max_len * sizeof (h[0]));
+ h = heap_create_from_memory (memory, max_len, sizeof (h[0]));
+ }
+
+ for (i = 0; i < n; i++)
+ {
+ while (1)
+ {
+ j = random_u32 (&seed) % vec_len (objects);
+ if (objects[j] != ~0 || i + objects_used < n)
+ break;
+ }
+
+ if (objects[j] != ~0)
+ {
+ heap_dealloc (h, handles[j]);
+ objects_used--;
+ objects[j] = ~0;
+ }
+ else
+ {
+ u32 *data;
+ uword size;
+
+ size = 1 + (random_u32 (&seed) % 100);
+ objects[j] = heap_alloc_aligned (h, size, align, handles[j]);
+ objects_used++;
+
+ if (align)
+ ASSERT (0 == (objects[j] & (align - 1)));
+ ASSERT (objects[j] < vec_len (h));
+ ASSERT (size <= heap_len (h, handles[j]));
+
+ /* Set newly allocated object with test data. */
+ if (check_mask & 2)
+ {
+ data = h + objects[j];
+
+ for (k = 0; k < size; k++)
+ data[k] = objects[j] + k;
+ }
+ }
+
+ if (check_mask & 1)
+ heap_validate (h);
+
+ if (check_mask & 4)
+ {
+ /* Duplicate heap at each iteration. */
+ u32 *h1 = heap_dup (h);
+ heap_free (h);
+ h = h1;
+ }
+
+ /* Verify that all used objects have correct test data. */
+ if (check_mask & 2)
+ {
+ for (j = 0; j < vec_len (objects); j++)
+ if (objects[j] != ~0)
+ {
+ u32 *data = h + objects[j];
+ for (k = 0; k < heap_len (h, handles[j]); k++)
+ ASSERT (data[k] == objects[j] + k);
+ }
+ }
+ }
+
+ if (verbose)
+ fformat (stderr, "%U\n", format_heap, h, 1);
+
+ {
+ u32 *h1 = heap_dup (h);
+ if (verbose)
+ fformat (stderr, "%U\n", format_heap, h1, 1);
+ heap_free (h1);
+ }
+
+ heap_free (h);
+ if (verbose)
+ fformat (stderr, "%U\n", format_heap, h, 1);
+ ASSERT (objects_used == 0);
+
+ vec_free (objects);
+ vec_free (handles);
+
+ if (fixed_size)
+ vec_free_h (h, sizeof (heap_header_t));
+
+ if (verbose)
+ fformat (stderr, "%U\n", format_clib_mem_usage, /* verbose */ 0);
+
+ return 0;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/test_longjmp.c b/src/vppinfra/test_longjmp.c
new file mode 100644
index 00000000..2415c4f0
--- /dev/null
+++ b/src/vppinfra/test_longjmp.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2005 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/clib.h>
+#include <vppinfra/longjmp.h>
+#include <vppinfra/format.h>
+
+static void test_calljmp (unformat_input_t * input);
+
+static int i;
+
+static int verbose;
+#define if_verbose(format,args...) \
+ if (verbose) { clib_warning(format, ## args); }
+
+static never_inline void
+f2 (clib_longjmp_t * env)
+{
+ i++;
+ clib_longjmp (env, 1);
+}
+
+static never_inline void
+f1 (clib_longjmp_t * env)
+{
+ i++;
+ f2 (env);
+}
+
+int
+test_longjmp_main (unformat_input_t * input)
+{
+ clib_longjmp_t env;
+
+ i = 0;
+ if (clib_setjmp (&env, 0) == 0)
+ {
+ if_verbose ("calling long jumper %d", i);
+ f1 (&env);
+ }
+ if_verbose ("back from long jump %d", i);
+
+ test_calljmp (input);
+
+ return 0;
+}
+
+static uword
+f3 (uword arg)
+{
+ uword i, j, array[10];
+
+ for (i = 0; i < ARRAY_LEN (array); i++)
+ array[i] = arg + i;
+
+ j = 0;
+ for (i = 0; i < ARRAY_LEN (array); i++)
+ j ^= array[i];
+
+ return j;
+}
+
+static void
+test_calljmp (unformat_input_t * input)
+{
+ static u8 stack[32 * 1024] __attribute__ ((aligned (16)));
+ uword v;
+
+ v = clib_calljmp (f3, 0, stack + sizeof (stack));
+ ASSERT (v == f3 (0));
+ if_verbose ("calljump ok");
+}
+
+#ifdef CLIB_UNIX
+int
+main (int argc, char *argv[])
+{
+ unformat_input_t i;
+ int res;
+
+ verbose = (argc > 1);
+ unformat_init_command_line (&i, argv);
+ res = test_longjmp_main (&i);
+ unformat_free (&i);
+ return res;
+}
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/test_macros.c b/src/vppinfra/test_macros.c
new file mode 100644
index 00000000..de8f2c49
--- /dev/null
+++ b/src/vppinfra/test_macros.c
@@ -0,0 +1,64 @@
+/*
+ Copyright (c) 2014 Cisco and/or its affiliates.
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+#include <vppinfra/macros.h>
+
+macro_main_t macro_main;
+
+int
+test_macros_main (unformat_input_t * input)
+{
+ macro_main_t *mm = &macro_main;
+
+ clib_macro_init (mm);
+
+ fformat (stdout, "hostname: %s\n",
+ clib_macro_eval_dollar (mm, "hostname", 1 /* complain */ ));
+
+ clib_macro_set_value (mm, "foo", "this is foo which contains $(bar)");
+ clib_macro_set_value (mm, "bar", "bar");
+
+ fformat (stdout, "evaluate: %s\n",
+ clib_macro_eval (mm, "returns '$(foo)'", 1 /* complain */ ));
+
+ clib_macro_free (mm);
+
+ return 0;
+}
+
+#ifdef CLIB_UNIX
+int
+main (int argc, char *argv[])
+{
+ unformat_input_t i;
+ int ret;
+
+ unformat_init_command_line (&i, argv);
+ ret = test_macros_main (&i);
+ unformat_free (&i);
+
+ return ret;
+}
+#endif /* CLIB_UNIX */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/test_md5.c b/src/vppinfra/test_md5.c
new file mode 100644
index 00000000..4be6f964
--- /dev/null
+++ b/src/vppinfra/test_md5.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2004 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/vec.h>
+#include <vppinfra/format.h>
+#include <vppinfra/error.h>
+#include <vppinfra/md5.h>
+
+#include <fcntl.h>
+#include <unistd.h>
+
+static clib_error_t *md5_test_suite (void);
+
+int
+main (int argc, char *argv[])
+{
+ int i;
+
+ if (argc == 1)
+ {
+ clib_error_t *e;
+ e = md5_test_suite ();
+ if (e)
+ {
+ clib_error_report (e);
+ exit (1);
+ }
+ }
+
+ for (i = 1; i < argc; i++)
+ {
+ md5_context_t m;
+ u8 digest[16];
+ u8 buffer[64 * 1024];
+ int fd, n;
+
+ fd = open (argv[i], 0);
+ if (fd < 0)
+ clib_unix_error ("can't open %s", argv[i]);
+
+ md5_init (&m);
+ while ((n = read (fd, buffer, sizeof (buffer))) > 0)
+ md5_add (&m, buffer, n);
+ close (fd);
+ md5_finish (&m, digest);
+ fformat (stdout, "%U %s\n",
+ format_hex_bytes, digest, sizeof (digest), argv[i]);
+ }
+
+ return 0;
+}
+
+static clib_error_t *
+md5_test_suite (void)
+{
+ typedef struct
+ {
+ char *input;
+ char *output;
+ } md5_test_t;
+
+ static md5_test_t tests[] = {
+ {.input = "",
+ .output = "d41d8cd98f00b204e9800998ecf8427e",},
+ {.input = "a",
+ .output = "0cc175b9c0f1b6a831c399e269772661",},
+ {.input = "abc",
+ .output = "900150983cd24fb0d6963f7d28e17f72",},
+ {.input = "message digest",
+ .output = "f96b697d7cb7938d525a2f31aaf161d0",},
+ {.input = "abcdefghijklmnopqrstuvwxyz",
+ .output = "c3fcd3d76192e4007dfb496cca67e13b",},
+ {.input =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789",
+ .output = "d174ab98d277d9f5a5611c2c9f419d9f",},
+ {.input =
+ "12345678901234567890123456789012345678901234567890123456789012345678901234567890",
+ .output = "57edf4a22be3c955ac49da2e2107b67a",},
+ };
+
+ int i;
+ u8 *s;
+ md5_context_t m;
+ u8 digest[16];
+
+ for (i = 0; i < ARRAY_LEN (tests); i++)
+ {
+ md5_init (&m);
+ md5_add (&m, tests[i].input, strlen (tests[i].input));
+ md5_finish (&m, digest);
+ s = format (0, "%U", format_hex_bytes, digest, sizeof (digest));
+ if (memcmp (s, tests[i].output, 2 * sizeof (digest)))
+ return clib_error_return
+ (0, "%s -> %v expected %s", tests[i].input, s, tests[i].output);
+ vec_free (s);
+ }
+
+ return 0;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/test_mheap.c b/src/vppinfra/test_mheap.c
new file mode 100644
index 00000000..6bc36b89
--- /dev/null
+++ b/src/vppinfra/test_mheap.c
@@ -0,0 +1,242 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifdef CLIB_LINUX_KERNEL
+#include <linux/unistd.h>
+#endif
+
+#ifdef CLIB_UNIX
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h> /* scanf */
+#endif
+
+#include <vppinfra/mheap.h>
+#include <vppinfra/format.h>
+#include <vppinfra/random.h>
+
+static int verbose = 0;
+#define if_verbose(format,args...) \
+ if (verbose) { clib_warning(format, ## args); }
+
+int
+test_mheap_main (unformat_input_t * input)
+{
+ int i, j, k, n_iterations;
+ void *h, *h_mem;
+ uword *objects = 0;
+ u32 objects_used, really_verbose, n_objects, max_object_size;
+ u32 check_mask, seed, trace, use_vm;
+ u32 print_every = 0;
+ u32 *data;
+ mheap_t *mh;
+
+ /* Validation flags. */
+ check_mask = 0;
+#define CHECK_VALIDITY 1
+#define CHECK_DATA 2
+#define CHECK_ALIGN 4
+
+ n_iterations = 10;
+ seed = 0;
+ max_object_size = 100;
+ n_objects = 1000;
+ trace = 0;
+ really_verbose = 0;
+ use_vm = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (0 == unformat (input, "iter %d", &n_iterations)
+ && 0 == unformat (input, "count %d", &n_objects)
+ && 0 == unformat (input, "size %d", &max_object_size)
+ && 0 == unformat (input, "seed %d", &seed)
+ && 0 == unformat (input, "print %d", &print_every)
+ && 0 == unformat (input, "validdata %|",
+ &check_mask, CHECK_DATA | CHECK_VALIDITY)
+ && 0 == unformat (input, "valid %|",
+ &check_mask, CHECK_VALIDITY)
+ && 0 == unformat (input, "verbose %=", &really_verbose, 1)
+ && 0 == unformat (input, "trace %=", &trace, 1)
+ && 0 == unformat (input, "vm %=", &use_vm, 1)
+ && 0 == unformat (input, "align %|", &check_mask, CHECK_ALIGN))
+ {
+ clib_warning ("unknown input `%U'", format_unformat_error, input);
+ return 1;
+ }
+ }
+
+ /* Zero seed means use default. */
+ if (!seed)
+ seed = random_default_seed ();
+
+ if_verbose
+ ("testing %d iterations, %d %saligned objects, max. size %d, seed %d",
+ n_iterations, n_objects, (check_mask & CHECK_ALIGN) ? "randomly " : "un",
+ max_object_size, seed);
+
+ vec_resize (objects, n_objects);
+ if (vec_bytes (objects)) /* stupid warning be gone */
+ memset (objects, ~0, vec_bytes (objects));
+ objects_used = 0;
+
+ /* Allocate initial heap. */
+ {
+ uword size =
+ max_pow2 (2 * n_objects * max_object_size * sizeof (data[0]));
+
+ h_mem = clib_mem_alloc (size);
+ if (!h_mem)
+ return 0;
+
+ h = mheap_alloc (h_mem, size);
+ }
+
+ if (trace)
+ mheap_trace (h, trace);
+
+ mh = mheap_header (h);
+
+ if (use_vm)
+ mh->flags &= ~MHEAP_FLAG_DISABLE_VM;
+ else
+ mh->flags |= MHEAP_FLAG_DISABLE_VM;
+
+ if (check_mask & CHECK_VALIDITY)
+ mh->flags |= MHEAP_FLAG_VALIDATE;
+
+ for (i = 0; i < n_iterations; i++)
+ {
+ while (1)
+ {
+ j = random_u32 (&seed) % vec_len (objects);
+ if (objects[j] != ~0 || i + objects_used < n_iterations)
+ break;
+ }
+
+ if (objects[j] != ~0)
+ {
+ mheap_put (h, objects[j]);
+ objects_used--;
+ objects[j] = ~0;
+ }
+ else
+ {
+ uword size, align, align_offset;
+
+ size = (random_u32 (&seed) % max_object_size) * sizeof (data[0]);
+ align = align_offset = 0;
+ if (check_mask & CHECK_ALIGN)
+ {
+ align = 1 << (random_u32 (&seed) % 10);
+ align_offset = round_pow2 (random_u32 (&seed) & (align - 1),
+ sizeof (u32));
+ }
+
+ h = mheap_get_aligned (h, size, align, align_offset, &objects[j]);
+
+ if (align > 0)
+ ASSERT (0 == ((objects[j] + align_offset) & (align - 1)));
+
+ ASSERT (objects[j] != ~0);
+ objects_used++;
+
+ /* Set newly allocated object with test data. */
+ if (check_mask & CHECK_DATA)
+ {
+ uword len;
+
+ data = (void *) h + objects[j];
+ len = mheap_len (h, data);
+
+ ASSERT (size <= mheap_data_bytes (h, objects[j]));
+
+ data[0] = len;
+ for (k = 1; k < len; k++)
+ data[k] = objects[j] + k;
+ }
+ }
+
+ /* Verify that all used objects have correct test data. */
+ if (check_mask & 2)
+ {
+ for (j = 0; j < vec_len (objects); j++)
+ if (objects[j] != ~0)
+ {
+ u32 *data = h + objects[j];
+ uword len = data[0];
+ for (k = 1; k < len; k++)
+ ASSERT (data[k] == objects[j] + k);
+ }
+ }
+ if (print_every != 0 && i > 0 && (i % print_every) == 0)
+ fformat (stderr, "iteration %d: %U\n", i, format_mheap, h,
+ really_verbose);
+ }
+
+ if (verbose)
+ fformat (stderr, "%U\n", format_mheap, h, really_verbose);
+ mheap_free (h);
+ clib_mem_free (h_mem);
+ vec_free (objects);
+
+ return 0;
+}
+
+#ifdef CLIB_UNIX
+int
+main (int argc, char *argv[])
+{
+ unformat_input_t i;
+ int ret;
+
+ verbose = (argc > 1);
+ unformat_init_command_line (&i, argv);
+ ret = test_mheap_main (&i);
+ unformat_free (&i);
+
+ return ret;
+}
+#endif /* CLIB_UNIX */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/test_pfhash.c b/src/vppinfra/test_pfhash.c
new file mode 100644
index 00000000..ddbdbb34
--- /dev/null
+++ b/src/vppinfra/test_pfhash.c
@@ -0,0 +1,322 @@
+/*
+ Copyright (c) 2013 Cisco and/or its affiliates.
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+#include <vppinfra/pfhash.h>
+#include <vppinfra/format.h>
+#include <vppinfra/random.h>
+
+#if defined(CLIB_HAVE_VEC128) && ! defined (__ALTIVEC__)
+
+int verbose = 0;
+
+always_inline u8 *
+random_aligned_string (u32 * seed, uword len)
+{
+ u8 *alphabet = (u8 *) "abcdefghijklmnopqrstuvwxyz";
+ u8 *s = 0;
+ word i;
+
+ vec_resize_aligned (s, len, 16);
+ for (i = 0; i < len; i++)
+ s[i] = alphabet[random_u32 (seed) % 26];
+
+ return s;
+}
+
+void exit (int);
+
+int
+test_pfhash_main (unformat_input_t * input)
+{
+ u32 seed = 0xdeaddabe;
+ int i, iter;
+ u32 nkeys = 4;
+ u32 niter = 1;
+ u32 nbuckets = 1;
+ u32 bucket;
+ u32 sizes[3] = { 16, 8, 4 }, this_size, size;
+ u8 **keys = 0;
+ pfhash_t _rec, *p = &_rec;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "seed %d", &seed))
+ ;
+ else if (unformat (input, "niter %d", &niter))
+ ;
+ else if (unformat (input, "nkeys %d", &nkeys))
+ ;
+ else if (unformat (input, "nbuckets %d", &nbuckets))
+ ;
+ else if (unformat (input, "verbose %d", &verbose))
+ ;
+ else if (unformat (input, "verbose"))
+ verbose = 1;
+ else
+ clib_error ("unknown input `%U'", format_unformat_error, input);
+ }
+
+ vec_validate (keys, nkeys - 1);
+
+ for (i = 0; i < nkeys; i++)
+ {
+ int j, k;
+
+ again:
+ keys[i] = random_aligned_string (&seed, 16);
+ for (j = 0; j < (i - 1); j++)
+ {
+ /* Make sure we don't have a dup key in the min key size */
+ for (k = 0; k < 4; k++)
+ {
+ if (keys[i][k] != keys[j][k])
+ goto check_next_key;
+ }
+ vec_free (keys[i]);
+ goto again;
+ check_next_key:
+ ;
+ }
+ }
+
+ /* test 8 byte key, 8 byte value case separately */
+
+ for (size = 8; size < 9; size++)
+ {
+ this_size = 8;
+
+ fformat (stdout, "%d-byte key 8 byte value test\n", this_size);
+
+ pfhash_init (p, "test", 8 /* key size */ , 8 /* value size */ ,
+ nbuckets + 1);
+
+ for (iter = 0; iter < niter; iter++)
+ {
+ bucket = 0;
+ for (i = 0; i < nkeys; i++)
+ {
+ bucket = (i % nbuckets) + 1;
+ pfhash_set (p, bucket, keys[i],
+ (void *) (u64) 0x100000000ULL + i + 1);
+ }
+
+ for (i = 0; i < nkeys; i++)
+ {
+ bucket = (i % nbuckets) + 1;
+ if (pfhash_get (p, bucket, keys[i])
+ != (u64) 0x100000000ULL + i + 1)
+ {
+ clib_warning ("key %d bucket %d lookup FAIL\n", i, bucket);
+ (void) pfhash_get (p, bucket, keys[i]);
+ }
+ }
+
+ /* test inline functions */
+ for (i = 0; i < nkeys; i++)
+ {
+ u32 bucket_contents;
+ u64 value = 0xdeadbeef;
+ bucket = (i % nbuckets) + 1;
+
+ pfhash_prefetch_bucket (p, bucket);
+ bucket_contents = pfhash_read_bucket_prefetch_kv (p, bucket);
+
+ value = pfhash_search_kv_8v8 (p, bucket_contents,
+ (u64 *) keys[i]);
+ if (value != (u64) 0x100000000ULL + i + 1)
+ clib_warning ("key %d bucket %d lookup FAIL\n", i, bucket);
+ }
+
+ if (verbose)
+ fformat (stdout, "%U\n", format_pfhash, p, verbose > 1);
+
+ for (i = 0; i < nkeys; i++)
+ {
+ bucket = (i % nbuckets) + 1;
+ pfhash_unset (p, bucket, keys[i]);
+ }
+
+ for (i = 0; i < nkeys; i++)
+ {
+ bucket = (i % nbuckets) + 1;
+ if (pfhash_get (p, bucket, keys[i]) != (u64) ~ 0)
+ {
+ clib_warning ("key %d bucket %d lookup FAIL\n", i, bucket);
+ (void) pfhash_get (p, bucket, keys[i]);
+ }
+ }
+ /* test inline functions */
+ for (i = 0; i < nkeys; i++)
+ {
+ u32 bucket_contents;
+ u64 value = 0xdeadbeef;
+ bucket = (i % nbuckets) + 1;
+
+ pfhash_prefetch_bucket (p, bucket);
+ bucket_contents = pfhash_read_bucket_prefetch_kv (p, bucket);
+
+ value = pfhash_search_kv_8v8 (p, bucket_contents,
+ (u64 *) keys[i]);
+
+ if (value != (u64) ~ 0)
+ clib_warning ("key %d bucket %d lookup FAIL\n", i, bucket);
+ }
+ }
+ pfhash_free (p);
+ }
+
+ /* test other cases */
+
+ for (size = 0; size < ARRAY_LEN (sizes); size++)
+ {
+ this_size = sizes[size];
+
+ fformat (stdout, "%d-byte key test\n", this_size);
+
+ pfhash_init (p, "test", this_size, 4 /* value size */ , nbuckets + 1);
+
+ for (iter = 0; iter < niter; iter++)
+ {
+ bucket = 0;
+ for (i = 0; i < nkeys; i++)
+ {
+ bucket = (i % nbuckets) + 1;
+ pfhash_set (p, bucket, keys[i], (void *) (u64) i + 1);
+ }
+
+ for (i = 0; i < nkeys; i++)
+ {
+ bucket = (i % nbuckets) + 1;
+ if (pfhash_get (p, bucket, keys[i]) != i + 1)
+ {
+ clib_warning ("key %d bucket %d lookup FAIL\n", i, bucket);
+ (void) pfhash_get (p, bucket, keys[i]);
+ }
+ }
+
+ /* test inline functions */
+ for (i = 0; i < nkeys; i++)
+ {
+ u32 bucket_contents;
+ u32 value = 0xdeadbeef;
+ bucket = (i % nbuckets) + 1;
+
+ pfhash_prefetch_bucket (p, bucket);
+ bucket_contents = pfhash_read_bucket_prefetch_kv (p, bucket);
+ switch (p->key_size)
+ {
+ case 16:
+ value =
+ pfhash_search_kv_16 (p, bucket_contents,
+ (u32x4 *) keys[i]);
+ break;
+ case 8:
+ value =
+ pfhash_search_kv_8 (p, bucket_contents, (u64 *) keys[i]);
+ break;
+ case 4:
+ value =
+ pfhash_search_kv_4 (p, bucket_contents, (u32 *) keys[i]);
+ break;
+ }
+
+ if (value != (i + 1))
+ clib_warning ("key %d bucket %d lookup FAIL\n", i, bucket);
+ }
+
+ if (verbose)
+ fformat (stdout, "%U\n", format_pfhash, p, verbose > 1);
+
+ for (i = 0; i < nkeys; i++)
+ {
+ bucket = (i % nbuckets) + 1;
+ pfhash_unset (p, bucket, keys[i]);
+ }
+
+ for (i = 0; i < nkeys; i++)
+ {
+ bucket = (i % nbuckets) + 1;
+ if (pfhash_get (p, bucket, keys[i]) != (u64) ~ 0)
+ {
+ clib_warning ("key %d bucket %d lookup FAIL\n", i, bucket);
+ (void) pfhash_get (p, bucket, keys[i]);
+ }
+ }
+ /* test inline functions */
+ for (i = 0; i < nkeys; i++)
+ {
+ u32 bucket_contents;
+ u32 value = 0xdeadbeef;
+ bucket = (i % nbuckets) + 1;
+
+ pfhash_prefetch_bucket (p, bucket);
+ bucket_contents = pfhash_read_bucket_prefetch_kv (p, bucket);
+ switch (p->key_size)
+ {
+ case 16:
+ value =
+ pfhash_search_kv_16 (p, bucket_contents,
+ (u32x4 *) keys[i]);
+ break;
+ case 8:
+ value =
+ pfhash_search_kv_8 (p, bucket_contents, (u64 *) keys[i]);
+ break;
+ case 4:
+ value =
+ pfhash_search_kv_4 (p, bucket_contents, (u32 *) keys[i]);
+ break;
+ }
+ if (value != (u32) ~ 0)
+ clib_warning ("key %d bucket %d lookup FAIL\n", i, bucket);
+ }
+ }
+ pfhash_free (p);
+ }
+
+ exit (0);
+}
+#else
+int
+test_pfhash_main (unformat_input_t * input)
+{
+ clib_warning ("MMX unit not available");
+ return 0;
+}
+#endif
+
+#ifdef CLIB_UNIX
+int
+main (int argc, char *argv[])
+{
+ unformat_input_t i;
+ int ret;
+
+ unformat_init_command_line (&i, argv);
+ ret = test_pfhash_main (&i);
+ unformat_free (&i);
+
+ return ret;
+}
+#endif /* CLIB_UNIX */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/test_phash.c b/src/vppinfra/test_phash.c
new file mode 100644
index 00000000..9ed2ac7b
--- /dev/null
+++ b/src/vppinfra/test_phash.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2005 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/phash.h>
+#include <vppinfra/format.h>
+#include <vppinfra/random.h>
+
+static int verbose;
+#define if_verbose(format,args...) \
+ if (verbose) { clib_warning(format, ## args); }
+
+int
+test_phash_main (unformat_input_t * input)
+{
+ phash_main_t _pm = { 0 }, *pm = &_pm;
+ int n_keys, random_keys;
+ u32 seed;
+ clib_error_t *error;
+
+ random_keys = 1;
+ n_keys = 1000;
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (0 == unformat (input, "keys %d", &n_keys)
+ && 0 == unformat (input, "verbose %=", &verbose, 1)
+ && 0 == unformat (input, "random-keys %=", &random_keys, 1)
+ && 0 == unformat (input, "sequential-keys %=", &random_keys, 0)
+ && 0 == unformat (input, "seed %d", &pm->random_seed)
+ && 0 == unformat (input, "64-bit %|", &pm->flags, PHASH_FLAG_MIX64)
+ && 0 == unformat (input, "32-bit %|", &pm->flags, PHASH_FLAG_MIX32)
+ && 0 == unformat (input, "fast %|", &pm->flags,
+ PHASH_FLAG_FAST_MODE)
+ && 0 == unformat (input, "slow %|", &pm->flags,
+ PHASH_FLAG_SLOW_MODE)
+ && 0 == unformat (input, "minimal %|", &pm->flags,
+ PHASH_FLAG_MINIMAL)
+ && 0 == unformat (input, "non-minimal %|", &pm->flags,
+ PHASH_FLAG_NON_MINIMAL))
+ clib_error ("unknown input `%U'", format_unformat_error, input);
+ }
+
+ if (!pm->random_seed)
+ pm->random_seed = random_default_seed ();
+
+ if_verbose
+ ("%d %d-bit keys, random seed %d, %s mode, looking for %sminimal hash",
+ n_keys, (pm->flags & PHASH_FLAG_MIX64) ? 64 : 32, pm->random_seed,
+ (pm->flags & PHASH_FLAG_FAST_MODE) ? "fast" : "slow",
+ (pm->flags & PHASH_FLAG_MINIMAL) ? "" : "non-");
+
+ seed = pm->random_seed;
+
+ /* Initialize random keys. */
+ {
+ phash_key_t *k;
+
+ vec_resize (pm->keys, n_keys);
+ vec_foreach (k, pm->keys)
+ {
+ k->key = k - pm->keys;
+ if (random_keys)
+ {
+ if (pm->flags & PHASH_FLAG_MIX64)
+ k->key = random_u64 (&seed);
+ else
+ k->key = random_u32 (&seed);
+ }
+ }
+ }
+
+ error = phash_find_perfect_hash (pm);
+ if (error)
+ {
+ clib_error_report (error);
+ return 1;
+ }
+ else
+ {
+ if_verbose ("(%d,%d) (a,b) bits, %d seeds tried, %d tree walks",
+ pm->a_bits, pm->b_bits,
+ pm->n_seed_trials, pm->n_perfect_calls);
+
+ error = phash_validate (pm);
+ if (error)
+ {
+ clib_error_report (error);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+#ifdef CLIB_UNIX
+int
+main (int argc, char *argv[])
+{
+ unformat_input_t i;
+ int res;
+
+ verbose = (argc > 1);
+ unformat_init_command_line (&i, argv);
+ res = test_phash_main (&i);
+ unformat_free (&i);
+ return res;
+}
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/test_pool.c b/src/vppinfra/test_pool.c
new file mode 100644
index 00000000..67a5e50a
--- /dev/null
+++ b/src/vppinfra/test_pool.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/mem.h>
+#include <vppinfra/pool.h>
+
+#ifdef __KERNEL__
+#include <linux/unistd.h>
+#else
+#include <unistd.h>
+#endif
+
+int
+main (int argc, char *argv[])
+{
+ int i, n, seed;
+
+ int *p = 0, *e, j, *o = 0;
+
+ n = atoi (argv[1]);
+ seed = getpid ();
+ srandom (1);
+
+ for (i = 0; i < n; i++)
+ {
+ if (vec_len (o) < 10 || (random () & 1))
+ {
+ pool_get (p, e);
+ j = e - p;
+ *e = j;
+ vec_add1 (o, j);
+ }
+ else
+ {
+ j = random () % vec_len (o);
+ e = p + j;
+ pool_put (p, e);
+ vec_delete (o, 1, j);
+ }
+ }
+ p = pool_free (p);
+ vec_free (o);
+ return 0;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/test_pool_iterate.c b/src/vppinfra/test_pool_iterate.c
new file mode 100644
index 00000000..27ce4bb3
--- /dev/null
+++ b/src/vppinfra/test_pool_iterate.c
@@ -0,0 +1,59 @@
+/*
+ Copyright (c) 2011 Cisco and/or its affiliates.
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+#include <vppinfra/mem.h>
+#include <vppinfra/pool.h>
+
+#ifdef __KERNEL__
+#include <linux/unistd.h>
+#else
+#include <unistd.h>
+#endif
+
+int
+main (int argc, char *argv[])
+{
+ int i;
+ uword next;
+ u32 *tp = 0;
+ u32 *junk;
+
+ for (i = 0; i < 70; i++)
+ pool_get (tp, junk);
+
+ (void) junk; /* compiler warning */
+
+ pool_put_index (tp, 1);
+ pool_put_index (tp, 65);
+
+ next = ~0;
+ do
+ {
+ next = pool_next_index (tp, next);
+ fformat (stdout, "next index %d\n", next);
+ }
+ while (next != ~0);
+
+ return 0;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/test_ptclosure.c b/src/vppinfra/test_ptclosure.c
new file mode 100644
index 00000000..be7d51df
--- /dev/null
+++ b/src/vppinfra/test_ptclosure.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vppinfra/ptclosure.h>
+#include <vppinfra/hash.h>
+
+typedef struct
+{
+ uword *index_by_name;
+ u8 *items;
+} test_main_t;
+
+test_main_t test_main;
+
+static char *items[] = {
+ "d",
+ "a",
+ "b",
+ "c",
+};
+
+char *constraints[] = {
+ "a,b",
+ "b,c",
+ "d,b",
+ // "c,a", /* no partial order possible */
+};
+
+u32
+vl (void *p)
+{
+ return vec_len (p);
+}
+
+static void
+dump_closure (test_main_t * tm, char *s, u8 ** orig)
+{
+ int i, j;
+
+ fformat (stdout, "--------- %s --------------\n", s);
+ for (i = 0; i < vec_len (orig); i++)
+ {
+ for (j = 0; j < vec_len (orig); j++)
+ if (orig[i][j])
+ {
+ fformat (stdout, "%s <before> %s\n", items[i], items[j]);
+ }
+ }
+}
+
+int
+comma_split (u8 * s, u8 ** a, u8 ** b)
+{
+ *a = s;
+
+ while (*s && *s != ',')
+ s++;
+
+ if (*s == ',')
+ *s = 0;
+ else
+ return 1;
+
+ *b = (u8 *) (s + 1);
+ return 0;
+}
+
+int
+test_ptclosure_main (unformat_input_t * input)
+{
+ test_main_t *tm = &test_main;
+ u8 *item_name;
+ int i, j;
+ u8 **orig;
+ u8 **closure;
+ u8 *a_name, *b_name;
+ int a_index, b_index;
+ uword *p;
+ u8 *this_constraint;
+ int n;
+ u32 *result = 0;
+
+ tm->index_by_name = hash_create_string (0, sizeof (uword));
+
+ n = ARRAY_LEN (items);
+
+ for (i = 0; i < n; i++)
+ {
+ item_name = (u8 *) items[i];
+ hash_set_mem (tm->index_by_name, item_name, i);
+ }
+
+ orig = clib_ptclosure_alloc (n);
+
+ for (i = 0; i < ARRAY_LEN (constraints); i++)
+ {
+ this_constraint = format (0, "%s%c", constraints[i], 0);
+
+ if (comma_split (this_constraint, &a_name, &b_name))
+ {
+ clib_warning ("couldn't split '%s'", constraints[i]);
+ return 1;
+ }
+
+ p = hash_get_mem (tm->index_by_name, a_name);
+ if (p == 0)
+ {
+ clib_warning ("couldn't find '%s'", a_name);
+ return 1;
+ }
+ a_index = p[0];
+
+ p = hash_get_mem (tm->index_by_name, b_name);
+ if (p == 0)
+ {
+ clib_warning ("couldn't find '%s'", b_name);
+ return 1;
+ }
+ b_index = p[0];
+
+ orig[a_index][b_index] = 1;
+ vec_free (this_constraint);
+ }
+
+ dump_closure (tm, "original relation", orig);
+
+ closure = clib_ptclosure (orig);
+
+ dump_closure (tm, "closure", closure);
+
+ /*
+ * Output partial order
+ */
+
+again:
+ for (i = 0; i < n; i++)
+ {
+ for (j = 0; j < n; j++)
+ {
+ if (closure[i][j])
+ goto item_constrained;
+ }
+ /* Item i can be output */
+ vec_add1 (result, i);
+ {
+ int k;
+ for (k = 0; k < n; k++)
+ closure[k][i] = 0;
+ /* "Magic" a before a, to keep from ever outputting it again */
+ closure[i][i] = 1;
+ goto again;
+ }
+ item_constrained:
+ ;
+ }
+
+ if (vec_len (result) != n)
+ {
+ clib_warning ("no partial order exists");
+ exit (1);
+ }
+
+ fformat (stdout, "Partial order:\n");
+
+ for (i = vec_len (result) - 1; i >= 0; i--)
+ {
+ fformat (stdout, "%s\n", items[result[i]]);
+ }
+
+ vec_free (result);
+ clib_ptclosure_free (orig);
+ clib_ptclosure_free (closure);
+
+ return 0;
+}
+
+#ifdef CLIB_UNIX
+int
+main (int argc, char *argv[])
+{
+ unformat_input_t i;
+ int ret;
+
+ clib_mem_init (0, 3ULL << 30);
+
+ unformat_init_command_line (&i, argv);
+ ret = test_ptclosure_main (&i);
+ unformat_free (&i);
+
+ return ret;
+}
+#endif /* CLIB_UNIX */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/test_qhash.c b/src/vppinfra/test_qhash.c
new file mode 100644
index 00000000..fdbf0bbe
--- /dev/null
+++ b/src/vppinfra/test_qhash.c
@@ -0,0 +1,333 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vppinfra/bitmap.h>
+#include <vppinfra/os.h>
+#include <vppinfra/qhash.h>
+#include <vppinfra/random.h>
+#include <vppinfra/time.h>
+
+typedef struct
+{
+ u32 n_iter, seed, n_keys, n_hash_keys, verbose;
+
+ u32 max_vector;
+
+ uword *hash;
+
+ uword *keys_in_hash_bitmap;
+
+ u32 *qhash;
+
+ uword *keys;
+
+ uword *lookup_keys;
+ uword *lookup_key_indices;
+ u32 *lookup_results;
+
+ u32 *get_multiple_results;
+
+ clib_time_t time;
+
+ f64 overflow_fraction, ave_elts;
+ f64 get_time, hash_get_time;
+ f64 set_time, set_count;
+ f64 unset_time, unset_count;
+ f64 hash_set_time, hash_unset_time;
+} test_qhash_main_t;
+
+clib_error_t *
+test_qhash_main (unformat_input_t * input)
+{
+ clib_error_t *error = 0;
+ test_qhash_main_t _tm, *tm = &_tm;
+ uword i, iter;
+
+ memset (tm, 0, sizeof (tm[0]));
+ tm->n_iter = 10;
+ tm->seed = 1;
+ tm->n_keys = 10;
+ tm->max_vector = 1;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "iter %d", &tm->n_iter))
+ ;
+ else if (unformat (input, "seed %d", &tm->seed))
+ ;
+ else if (unformat (input, "keys %d", &tm->n_keys))
+ ;
+ else if (unformat (input, "size %d", &tm->n_hash_keys))
+ ;
+ else if (unformat (input, "vector %d", &tm->max_vector))
+ ;
+ else if (unformat (input, "verbose"))
+ tm->verbose = 1;
+ else
+ {
+ error = clib_error_create ("unknown input `%U'\n",
+ format_unformat_error, input);
+ goto done;
+ }
+ }
+
+ if (!tm->seed)
+ tm->seed = random_default_seed ();
+
+ clib_time_init (&tm->time);
+
+ clib_warning ("iter %d, seed %u, keys %d, max vector %d, ",
+ tm->n_iter, tm->seed, tm->n_keys, tm->max_vector);
+
+ vec_resize (tm->keys, tm->n_keys);
+ vec_resize (tm->get_multiple_results, tm->n_keys);
+ for (i = 0; i < vec_len (tm->keys); i++)
+ tm->keys[i] = random_uword (&tm->seed);
+
+ if (!tm->n_hash_keys)
+ tm->n_hash_keys = 2 * max_pow2 (tm->n_keys);
+ tm->n_hash_keys = clib_max (tm->n_keys, tm->n_hash_keys);
+ qhash_resize (tm->qhash, tm->n_hash_keys);
+
+ {
+ qhash_t *h = qhash_header (tm->qhash);
+ int i;
+ for (i = 0; i < ARRAY_LEN (h->hash_seeds); i++)
+ h->hash_seeds[i] = random_uword (&tm->seed);
+ }
+
+ vec_resize (tm->lookup_keys, tm->max_vector);
+ vec_resize (tm->lookup_key_indices, tm->max_vector);
+ vec_resize (tm->lookup_results, tm->max_vector);
+
+ for (iter = 0; iter < tm->n_iter; iter++)
+ {
+ uword *p, j, n, is_set;
+
+ n = tm->max_vector;
+
+ is_set = random_u32 (&tm->seed) & 1;
+ is_set |= hash_elts (tm->hash) < (tm->n_keys / 4);
+ if (hash_elts (tm->hash) > (3 * tm->n_keys) / 4)
+ is_set = 0;
+
+ _vec_len (tm->lookup_keys) = n;
+ _vec_len (tm->lookup_key_indices) = n;
+ j = 0;
+ while (j < n)
+ {
+ i = random_u32 (&tm->seed) % vec_len (tm->keys);
+ if (clib_bitmap_get (tm->keys_in_hash_bitmap, i) != is_set)
+ {
+ f64 t[2];
+ tm->lookup_key_indices[j] = i;
+ tm->lookup_keys[j] = tm->keys[i];
+ t[0] = clib_time_now (&tm->time);
+ if (is_set)
+ hash_set (tm->hash, tm->keys[i], i);
+ else
+ hash_unset (tm->hash, tm->keys[i]);
+ t[1] = clib_time_now (&tm->time);
+ if (is_set)
+ tm->hash_set_time += t[1] - t[0];
+ else
+ tm->hash_unset_time += t[1] - t[0];
+ tm->keys_in_hash_bitmap
+ = clib_bitmap_set (tm->keys_in_hash_bitmap, i, is_set);
+ j++;
+ }
+ }
+
+ {
+ f64 t[2];
+
+ if (is_set)
+ {
+ t[0] = clib_time_now (&tm->time);
+ qhash_set_multiple (tm->qhash,
+ tm->lookup_keys,
+ vec_len (tm->lookup_keys),
+ tm->lookup_results);
+ t[1] = clib_time_now (&tm->time);
+ tm->set_time += t[1] - t[0];
+ tm->set_count += vec_len (tm->lookup_keys);
+ for (i = 0; i < vec_len (tm->lookup_keys); i++)
+ {
+ uword r = tm->lookup_results[i];
+ *vec_elt_at_index (tm->qhash, r) = tm->lookup_key_indices[i];
+ }
+ }
+ else
+ {
+ t[0] = clib_time_now (&tm->time);
+ qhash_unset_multiple (tm->qhash,
+ tm->lookup_keys,
+ vec_len (tm->lookup_keys),
+ tm->lookup_results);
+ t[1] = clib_time_now (&tm->time);
+ tm->unset_time += t[1] - t[0];
+ tm->unset_count += vec_len (tm->lookup_keys);
+
+ for (i = 0; i < vec_len (tm->lookup_keys); i++)
+ {
+ uword r = tm->lookup_results[i];
+ *vec_elt_at_index (tm->qhash, r) = ~0;
+ }
+ }
+ }
+
+ if (qhash_elts (tm->qhash) != hash_elts (tm->hash))
+ os_panic ();
+
+ {
+ qhash_t *h;
+ uword i, k, l, count;
+
+ h = qhash_header (tm->qhash);
+
+ for (i = k = 0; k < vec_len (h->hash_key_valid_bitmap); k++)
+ i += count_set_bits (h->hash_key_valid_bitmap[k]);
+ k = hash_elts (h->overflow_hash);
+ l = qhash_elts (tm->qhash);
+ if (i + k != l)
+ os_panic ();
+
+ count = hash_elts (h->overflow_hash);
+ for (i = 0; i < (1 << h->log2_hash_size); i++)
+ count += tm->qhash[i] != ~0;
+ if (count != qhash_elts (tm->qhash))
+ os_panic ();
+
+ {
+ u32 *tmp = 0;
+
+ /* *INDENT-OFF* */
+ hash_foreach (k, l, h->overflow_hash, ({
+ j = qhash_hash_mix (h, k) / QHASH_KEYS_PER_BUCKET;
+ vec_validate (tmp, j);
+ tmp[j] += 1;
+ }));
+ /* *INDENT-ON* */
+
+ for (k = 0; k < vec_len (tmp); k++)
+ {
+ if (k >= vec_len (h->overflow_counts))
+ os_panic ();
+ if (h->overflow_counts[k] != tmp[k])
+ os_panic ();
+ }
+ for (; k < vec_len (h->overflow_counts); k++)
+ if (h->overflow_counts[k] != 0)
+ os_panic ();
+
+ vec_free (tmp);
+ }
+ }
+
+ {
+ f64 t[2];
+
+ t[0] = clib_time_now (&tm->time);
+ qhash_get_multiple (tm->qhash, tm->keys, vec_len (tm->keys),
+ tm->get_multiple_results);
+ t[1] = clib_time_now (&tm->time);
+ tm->get_time += t[1] - t[0];
+
+ for (i = 0; i < vec_len (tm->keys); i++)
+ {
+ u32 r;
+
+ t[0] = clib_time_now (&tm->time);
+ p = hash_get (tm->hash, tm->keys[i]);
+ t[1] = clib_time_now (&tm->time);
+ tm->hash_get_time += t[1] - t[0];
+
+ r = qhash_get (tm->qhash, tm->keys[i]);
+ if (p)
+ {
+ if (p[0] != i)
+ os_panic ();
+ if (*vec_elt_at_index (tm->qhash, r) != i)
+ os_panic ();
+ }
+ else
+ {
+ if (r != ~0)
+ os_panic ();
+ }
+ if (r != tm->get_multiple_results[i])
+ os_panic ();
+ }
+ }
+
+ tm->overflow_fraction +=
+ ((f64) qhash_n_overflow (tm->qhash) / qhash_elts (tm->qhash));
+ tm->ave_elts += qhash_elts (tm->qhash);
+ }
+
+ fformat (stderr, "%d iter %.6e overflow, %.4f ave. elts\n",
+ tm->n_iter,
+ tm->overflow_fraction / tm->n_iter, tm->ave_elts / tm->n_iter);
+
+ tm->get_time /= tm->n_iter * vec_len (tm->keys);
+ tm->hash_get_time /= tm->n_iter * vec_len (tm->keys);
+
+ tm->set_time /= tm->set_count;
+ tm->unset_time /= tm->unset_count;
+ tm->hash_set_time /= tm->set_count;
+ tm->hash_unset_time /= tm->unset_count;
+
+ fformat (stderr,
+ "get/set/unset clocks %.2e %.2e %.2e clib %.2e %.2e %.2e ratio %.2f %.2f %.2f\n",
+ tm->get_time * tm->time.clocks_per_second,
+ tm->set_time * tm->time.clocks_per_second,
+ tm->unset_time * tm->time.clocks_per_second,
+ tm->hash_get_time * tm->time.clocks_per_second,
+ tm->hash_set_time * tm->time.clocks_per_second,
+ tm->hash_unset_time * tm->time.clocks_per_second,
+ tm->hash_get_time / tm->get_time, tm->hash_set_time / tm->set_time,
+ tm->hash_unset_time / tm->unset_time);
+
+
+done:
+ return error;
+}
+
+#ifdef CLIB_UNIX
+int
+main (int argc, char *argv[])
+{
+ unformat_input_t i;
+ clib_error_t *error;
+
+ unformat_init_command_line (&i, argv);
+ error = test_qhash_main (&i);
+ unformat_free (&i);
+ if (error)
+ {
+ clib_error_report (error);
+ return 1;
+ }
+ else
+ return 0;
+}
+#endif /* CLIB_UNIX */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/test_random.c b/src/vppinfra/test_random.c
new file mode 100644
index 00000000..49759eac
--- /dev/null
+++ b/src/vppinfra/test_random.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/format.h>
+#include <vppinfra/bitmap.h>
+
+static u32 known_random_sequence[] = {
+ 0x00000000, 0x3c6ef35f, 0x47502932, 0xd1ccf6e9,
+ 0xaaf95334, 0x6252e503, 0x9f2ec686, 0x57fe6c2d,
+ 0xa3d95fa8, 0x81fdbee7, 0x94f0af1a, 0xcbf633b1,
+};
+
+
+int
+test_random_main (unformat_input_t * input)
+{
+ uword n_iterations;
+ uword i, repeat_count;
+ uword *bitmap = 0;
+ uword print;
+ u32 seed;
+ u32 *seedp = &seed;
+
+ /* first, check known sequence from Numerical Recipes in C, 2nd ed.
+ page 284 */
+ seed = known_random_sequence[0];
+ for (i = 0; i < ARRAY_LEN (known_random_sequence) - 1; i++)
+ {
+ u32 rv;
+ rv = random_u32 (seedp);
+ if (rv != known_random_sequence[i + 1])
+ {
+ fformat (stderr, "known sequence check FAILS at index %d", i + 1);
+ break;
+ }
+ }
+
+ clib_warning ("known sequence check passes");
+
+ n_iterations = 1000;
+ seed = 0;
+ print = 1 << 24;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (0 == unformat (input, "iter %d", &n_iterations)
+ && 0 == unformat (input, "print %d", &print)
+ && 0 == unformat (input, "seed %d", &seed))
+ clib_error ("unknown input `%U'", format_unformat_error, input);
+ }
+
+ if (!seed)
+ seed = random_default_seed ();
+
+ if (n_iterations == 0)
+ n_iterations = random_u32_max ();
+
+ clib_warning ("%d iterations, seed %d\n", n_iterations, seed);
+
+ repeat_count = 0;
+ for (i = 0; i < n_iterations; i++)
+ {
+ uword r = random_u32 (&seed);
+ uword b, ri, rj;
+
+ ri = r / BITS (bitmap[0]);
+ rj = (uword) 1 << (r % BITS (bitmap[0]));
+
+ vec_validate (bitmap, ri);
+ b = bitmap[ri];
+
+ if (b & rj)
+ goto repeat;
+ b |= rj;
+ bitmap[ri] = b;
+
+ if (0 == (i & (print - 1)))
+ fformat (stderr, "0x%08x iterations %d repeats\n", i, repeat_count);
+ continue;
+
+ repeat:
+ fformat (stderr, "repeat found at iteration %d/%d\n", i, n_iterations);
+ repeat_count++;
+ continue;
+ }
+
+ return 0;
+}
+
+#ifdef CLIB_UNIX
+int
+main (int argc, char *argv[])
+{
+ unformat_input_t i;
+ int ret;
+
+ clib_mem_init (0, 3ULL << 30);
+
+ unformat_init_command_line (&i, argv);
+ ret = test_random_main (&i);
+ unformat_free (&i);
+
+ return ret;
+}
+#endif /* CLIB_UNIX */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/test_random_isaac.c b/src/vppinfra/test_random_isaac.c
new file mode 100644
index 00000000..337d30dd
--- /dev/null
+++ b/src/vppinfra/test_random_isaac.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2005 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/format.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/random.h>
+#include <vppinfra/random_isaac.h>
+
+static int verbose;
+#define if_verbose(format,args...) \
+ if (verbose) { clib_warning(format, ## args); }
+
+int
+test_isaac_main (unformat_input_t * input)
+{
+ uword n_iterations, seed;
+ uword i, repeat_count;
+ uword *hash = 0;
+ uword print;
+ isaac_t ctx;
+ uword results[ISAAC_SIZE] = { 0 };
+ uword n_results;
+
+ n_iterations = 1000;
+ seed = 0;
+ print = 1 << 24;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (0 == unformat (input, "iter %d", &n_iterations)
+ && 0 == unformat (input, "print %d", &print)
+ && 0 == unformat (input, "seed %d", &seed))
+ clib_error ("unknown input `%U'", format_unformat_error, input);
+ }
+
+ if (!seed)
+ seed = random_default_seed ();
+
+ results[0] = seed;
+
+ if (n_iterations == 0)
+ n_iterations = ~0;
+
+ if_verbose ("%d iterations, seed %d\n", n_iterations, seed);
+
+ repeat_count = 0;
+ isaac_init (&ctx, results);
+ isaac (&ctx, results);
+ n_results = 0;
+ for (i = 0; i < n_iterations; i++)
+ {
+ uword r = results[n_results++];
+
+ if (!hash)
+ hash = hash_create (0, /* value bytes */ 0);
+
+ if (hash_get (hash, r))
+ goto repeat;
+
+ hash_set1 (hash, r);
+
+ if (n_results >= ARRAY_LEN (results))
+ {
+ isaac (&ctx, results);
+ n_results = 0;
+ }
+
+ if (verbose && 0 == (i & (print - 1)))
+ fformat (stderr, "0x%08x iterations %d repeats\n", i, repeat_count);
+
+ if (hash_elts (hash) > 0x100000)
+ hash_free (hash);
+
+ continue;
+
+ repeat:
+ fformat (stderr, "repeat found at iteration %d/%d\n", i, n_iterations);
+ repeat_count++;
+ continue;
+ }
+
+ return repeat_count > 0 ? 1 : 0;
+}
+
+#ifdef CLIB_UNIX
+int
+main (int argc, char *argv[])
+{
+ unformat_input_t i;
+ int ret;
+
+ verbose = (argc > 1);
+ unformat_init_command_line (&i, argv);
+ ret = test_isaac_main (&i);
+ unformat_free (&i);
+
+ return ret;
+}
+#endif /* CLIB_UNIX */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/test_serialize.c b/src/vppinfra/test_serialize.c
new file mode 100644
index 00000000..e00eec32
--- /dev/null
+++ b/src/vppinfra/test_serialize.c
@@ -0,0 +1,274 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2005 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/format.h>
+#include <vppinfra/random.h>
+#include <vppinfra/serialize.h>
+#include <vppinfra/os.h>
+
+#define foreach_my_vector_type \
+ _ (u8, a8) \
+ _ (u16, a16) \
+ _ (u32, a32)
+
+typedef struct
+{
+#define _(t,f) t f;
+ foreach_my_vector_type
+#undef _
+} my_vector_type_t;
+
+static void
+serialize_my_vector_type_single (serialize_main_t * m, va_list * va)
+{
+ my_vector_type_t *v = va_arg (*va, my_vector_type_t *);
+ u32 n = va_arg (*va, u32);
+ u32 i;
+
+ for (i = 0; i < n; i++)
+ {
+#define _(t,f) serialize_integer (m, v[i].f, sizeof (v[i].f));
+ foreach_my_vector_type;
+ }
+#undef _
+}
+
+static void
+unserialize_my_vector_type_single (serialize_main_t * m, va_list * va)
+{
+ my_vector_type_t *v = va_arg (*va, my_vector_type_t *);
+ u32 n = va_arg (*va, u32);
+ u32 i;
+
+ for (i = 0; i < n; i++)
+ {
+#define _(t,f) { u32 tmp; unserialize_integer (m, &tmp, sizeof (v[i].f)); v[i].f = tmp; }
+ foreach_my_vector_type;
+#undef _
+ }
+}
+
+static void
+serialize_my_vector_type_multiple (serialize_main_t * m, va_list * va)
+{
+ my_vector_type_t *v = va_arg (*va, my_vector_type_t *);
+ u32 n = va_arg (*va, u32);
+
+#define _(t,f) \
+ serialize_multiple \
+ (m, \
+ &v[0].f, \
+ STRUCT_SIZE_OF (my_vector_type_t, f), \
+ STRUCT_STRIDE_OF (my_vector_type_t, f), \
+ n);
+
+ foreach_my_vector_type;
+
+#undef _
+}
+
+static void
+unserialize_my_vector_type_multiple (serialize_main_t * m, va_list * va)
+{
+ my_vector_type_t *v = va_arg (*va, my_vector_type_t *);
+ u32 n = va_arg (*va, u32);
+
+#define _(t,f) \
+ unserialize_multiple \
+ (m, \
+ &v[0].f, \
+ STRUCT_SIZE_OF (my_vector_type_t, f), \
+ STRUCT_STRIDE_OF (my_vector_type_t, f), \
+ n);
+
+ foreach_my_vector_type;
+
+#undef _
+}
+
+typedef struct
+{
+ u32 n_iter;
+ u32 seed;
+ u32 verbose;
+ u32 multiple;
+ u32 max_len;
+
+ my_vector_type_t **test_vectors;
+
+ char *dump_file;
+
+ serialize_main_t serialize_main;
+ serialize_main_t unserialize_main;
+} test_serialize_main_t;
+
+int
+test_serialize_main (unformat_input_t * input)
+{
+ clib_error_t *error = 0;
+ test_serialize_main_t _tm, *tm = &_tm;
+ serialize_main_t *sm = &tm->serialize_main;
+ serialize_main_t *um = &tm->unserialize_main;
+ uword i;
+
+ memset (tm, 0, sizeof (tm[0]));
+ tm->n_iter = 100;
+ tm->seed = 1;
+ tm->max_len = 128;
+ tm->verbose = 0;
+ tm->multiple = 1;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "iter %d", &tm->n_iter))
+ ;
+ else if (unformat (input, "seed %d", &tm->seed))
+ ;
+ else if (unformat (input, "file %s", &tm->dump_file))
+ ;
+ else if (unformat (input, "max-len %d", &tm->max_len))
+ ;
+ else if (unformat (input, "multiple %=", &tm->multiple, 1))
+ ;
+ else if (unformat (input, "single %=", &tm->multiple, 0))
+ ;
+ else if (unformat (input, "verbose %=", &tm->verbose, 1))
+ ;
+ else
+ {
+ error = clib_error_create ("unknown input `%U'\n",
+ format_unformat_error, input);
+ goto done;
+ }
+ }
+
+ if (tm->seed == 0)
+ tm->seed = random_default_seed ();
+
+ clib_warning ("iter %d seed %d max-len %d", tm->n_iter, tm->seed,
+ tm->max_len);
+
+#ifdef CLIB_UNIX
+ if (tm->dump_file)
+ serialize_open_unix_file (sm, tm->dump_file);
+ else
+#endif
+ serialize_open_vector (sm, 0);
+
+ vec_resize (tm->test_vectors, tm->n_iter);
+ for (i = 0; i < tm->n_iter; i++)
+ {
+ uword l = 1 + (random_u32 (&tm->seed) % tm->max_len);
+ my_vector_type_t *mv;
+
+ vec_resize (tm->test_vectors[i], l);
+ vec_foreach (mv, tm->test_vectors[i])
+ {
+#define _(t,f) mv->f = random_u32 (&tm->seed) & pow2_mask (31);
+ foreach_my_vector_type;
+#undef _
+ }
+
+ vec_serialize (sm, tm->test_vectors[i],
+ tm->multiple ? serialize_my_vector_type_multiple :
+ serialize_my_vector_type_single);
+ }
+
+ if (tm->verbose)
+ clib_warning ("overflow vector max bytes %d",
+ vec_max_len (sm->stream.overflow_buffer));
+
+ serialize_close (sm);
+
+#ifdef CLIB_UNIX
+ if (tm->dump_file)
+ {
+ if ((error = unserialize_open_unix_file (um, tm->dump_file)))
+ goto done;
+ }
+ else
+#endif
+ {
+ u8 *v = serialize_close_vector (sm);
+ unserialize_open_data (um, v, vec_len (v));
+ }
+
+ for (i = 0; i < tm->n_iter; i++)
+ {
+ my_vector_type_t *mv0;
+ my_vector_type_t *mv1;
+
+ vec_unserialize (um, &mv0,
+ tm->multiple ? unserialize_my_vector_type_multiple :
+ unserialize_my_vector_type_single);
+ mv1 = tm->test_vectors[i];
+
+ if (vec_len (mv0) != vec_len (mv1))
+ os_panic ();
+ if (memcmp (mv0, mv1, vec_len (mv0) * sizeof (mv0[0])))
+ os_panic ();
+
+ vec_free (mv0);
+ }
+
+done:
+ if (error)
+ clib_error_report (error);
+ return 0;
+}
+
+#ifdef CLIB_UNIX
+int
+main (int argc, char *argv[])
+{
+ unformat_input_t i;
+ int r;
+
+ unformat_init_command_line (&i, argv);
+ r = test_serialize_main (&i);
+ unformat_free (&i);
+ return r;
+}
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/test_slist.c b/src/vppinfra/test_slist.c
new file mode 100644
index 00000000..3c3cbf73
--- /dev/null
+++ b/src/vppinfra/test_slist.c
@@ -0,0 +1,228 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifdef CLIB_UNIX
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#endif
+
+#include <vppinfra/slist.h>
+
+typedef struct
+{
+ u32 *random_pool;
+ u32 seed;
+ u32 iter;
+ u32 verbose;
+ f64 branching_factor;
+ clib_slist_t slist;
+} test_main_t;
+
+test_main_t test_main;
+
+#define foreach_simple_test \
+_(2) \
+_(4) \
+_(3) \
+_(1)
+
+
+void
+run_test (test_main_t * tm)
+{
+ int i;
+ u32 *tv;
+ u32 ncompares;
+ u64 total_compares = 0;
+
+ if (1)
+ {
+ /*
+ * Add a bunch of random numbers to the skip-list,
+ * sorting them.
+ */
+ for (i = 0; i < tm->iter; i++)
+ {
+ pool_get (tm->random_pool, tv);
+ *tv = random_u32 (&tm->seed);
+ clib_slist_add (&tm->slist, tv, tv - tm->random_pool);
+ }
+ /* make sure we can find each one */
+ for (i = 0; i < tm->iter; i++)
+ {
+ u32 search_result;
+ tv = pool_elt_at_index (tm->random_pool, i);
+
+ search_result = clib_slist_search (&tm->slist, tv, &ncompares);
+ ASSERT (search_result == i);
+
+ total_compares += ncompares;
+ }
+
+ fformat (stdout, "%.2f avg compares/search\n",
+ (f64) total_compares / (f64) i);
+
+ fformat (stdout, "%U\n", format_slist, &tm->slist,
+ tm->iter < 1000 /* verbose */ );
+
+ /* delete half of them */
+ for (i = tm->iter / 2; i < tm->iter; i++)
+ {
+ tv = pool_elt_at_index (tm->random_pool, i);
+ (void) clib_slist_del (&tm->slist, tv);
+ }
+
+ /* make sure we can find the set we should find, and no others */
+ for (i = 0; i < tm->iter; i++)
+ {
+ u32 search_result;
+ tv = pool_elt_at_index (tm->random_pool, i);
+
+ search_result = clib_slist_search (&tm->slist, tv, &ncompares);
+ if (i >= tm->iter / 2)
+ ASSERT (search_result == (u32) ~ 0);
+ else
+ ASSERT (search_result == i);
+
+ }
+
+ fformat (stdout, "%U\n", format_slist, &tm->slist,
+ tm->iter < 1000 /* verbose */ );
+
+ /* delete the rest */
+ for (i = 0; i < tm->iter; i++)
+ {
+ tv = pool_elt_at_index (tm->random_pool, i);
+
+ (void) clib_slist_del (&tm->slist, tv);
+ }
+
+ fformat (stdout, "%U\n", format_slist, &tm->slist,
+ tm->iter < 1000 /* verbose */ );
+ }
+ else
+ {
+
+#define _(n) \
+ do { \
+ pool_get (tm->random_pool, tv); \
+ *tv = n; \
+ clib_slist_add (&tm->slist, tv, tv - tm->random_pool); \
+ fformat(stdout, "%U\n", format_slist, &tm->slist, 1 /* verbose */); \
+ } while (0);
+ foreach_simple_test;
+#undef _
+ }
+
+ return;
+}
+
+word
+test_compare (void *key, u32 elt_index)
+{
+ u32 *k = (u32 *) key;
+ u32 elt = test_main.random_pool[elt_index];
+
+ if (*k < elt)
+ return -1;
+ if (*k > elt)
+ return 1;
+ return 0;
+}
+
+u8 *
+test_format (u8 * s, va_list * args)
+{
+ u32 elt_index = va_arg (*args, u32);
+ u32 elt = test_main.random_pool[elt_index];
+
+ return format (s, "%u", elt);
+}
+
+void
+initialize_slist (test_main_t * tm)
+{
+ clib_slist_init (&tm->slist, tm->branching_factor,
+ test_compare, test_format);
+}
+
+int
+test_slist_main (unformat_input_t * input)
+{
+ test_main_t *tm = &test_main;
+ u32 tmp;
+
+ tm->seed = 0xbabeb00b;
+ tm->iter = 100000;
+ tm->verbose = 1;
+ tm->branching_factor = 1.0 / 5.0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "seed %d", &tm->seed))
+ continue;
+ else if (unformat (input, "iter %d", &tm->iter))
+ continue;
+ else if (unformat (input, "verbose"))
+ tm->verbose = 1;
+ else if (unformat (input, "branch %d", &tmp))
+ {
+ if (tmp > 0)
+ tm->branching_factor = 1.0 / (f64) tmp;
+ else
+ fformat (stderr, "warning: branch = 0, ignored\n");
+ }
+ else
+ {
+ clib_error ("unknown input `%U'", format_unformat_error, input);
+ goto usage;
+ }
+ }
+ initialize_slist (tm);
+ run_test (tm);
+
+ return 0;
+
+usage:
+ fformat (stderr, "usage: test_slist seed <seed> iter <iter> [verbose]\n");
+ return 1;
+
+}
+
+#ifdef CLIB_UNIX
+int
+main (int argc, char *argv[])
+{
+ unformat_input_t i;
+ int ret;
+
+ clib_mem_init (0, (u64) 4 << 30);
+
+ unformat_init_command_line (&i, argv);
+ ret = test_slist_main (&i);
+ unformat_free (&i);
+
+ return ret;
+}
+#endif /* CLIB_UNIX */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/test_socket.c b/src/vppinfra/test_socket.c
new file mode 100644
index 00000000..2f25eccd
--- /dev/null
+++ b/src/vppinfra/test_socket.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2005 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/format.h>
+#include <vppinfra/socket.h>
+
+static int verbose;
+#define if_verbose(format,args...) \
+ if (verbose) { clib_warning(format, ## args); }
+
+int
+test_socket_main (unformat_input_t * input)
+{
+ clib_socket_t _s = { 0 }, *s = &_s;
+ char *config;
+ clib_error_t *error;
+
+ s->config = "localhost:22";
+ s->flags = CLIB_SOCKET_F_IS_CLIENT;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "server %s %=", &config,
+ &s->flags, CLIB_SOCKET_F_IS_SERVER))
+ ;
+ else if (unformat (input, "client %s %=", &config,
+ &s->flags, CLIB_SOCKET_F_IS_CLIENT))
+ ;
+ else
+ {
+ error = clib_error_create ("unknown input `%U'\n",
+ format_unformat_error, input);
+ goto done;
+ }
+ }
+
+ error = clib_socket_init (s);
+ if (error)
+ goto done;
+
+ if (0)
+ {
+ struct
+ {
+ int a, b;
+ } *msg;
+ msg = clib_socket_tx_add (s, sizeof (msg[0]));
+ msg->a = 99;
+ msg->b = 100;
+ }
+ else
+ clib_socket_tx_add_formatted (s, "hello there mr server %d\n", 99);
+
+ error = clib_socket_tx (s);
+ if (error)
+ goto done;
+
+ while (1)
+ {
+ error = clib_socket_rx (s, 100);
+ if (error)
+ break;
+
+ if (clib_socket_rx_end_of_file (s))
+ break;
+
+ if_verbose ("%v", s->rx_buffer);
+ _vec_len (s->rx_buffer) = 0;
+ }
+
+ error = clib_socket_close (s);
+
+done:
+ if (error)
+ clib_error_report (error);
+ return 0;
+}
+
+#ifdef CLIB_UNIX
+int
+main (int argc, char *argv[])
+{
+ unformat_input_t i;
+ int r;
+
+ verbose = (argc > 1);
+ unformat_init_command_line (&i, argv);
+ r = test_socket_main (&i);
+ unformat_free (&i);
+ return r;
+}
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/test_time.c b/src/vppinfra/test_time.c
new file mode 100644
index 00000000..63cfeac5
--- /dev/null
+++ b/src/vppinfra/test_time.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2005 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/format.h>
+#include <vppinfra/time.h>
+#include <vppinfra/math.h> /* for sqrt */
+
+static int verbose;
+#define if_verbose(format,args...) \
+ if (verbose) { clib_warning(format, ## args); }
+
+static int
+test_time_main (unformat_input_t * input)
+{
+ f64 wait, error;
+ f64 t, tu[3], ave, rms;
+ clib_time_t c;
+ int i, n, j;
+
+ clib_time_init (&c);
+ wait = 1e-3;
+ n = 1000;
+ unformat (input, "%f %d", &wait, &n);
+ ave = rms = 0;
+ tu[0] = unix_time_now ();
+ tu[1] = unix_time_now ();
+ for (i = 0; i < n; i++)
+ {
+ j = 0;
+ t = clib_time_now (&c);
+ while (clib_time_now (&c) < t + wait)
+ j++;
+ t = j;
+ ave += t;
+ rms += t * t;
+ }
+ tu[2] = unix_time_now ();
+ ave /= n;
+ rms = sqrt (rms / n - ave * ave);
+
+ error = ((tu[2] - tu[1]) - 2 * (tu[1] - tu[0]) - n * wait) / n;
+ if_verbose ("tested %d x %.6e sec waits, error %.6e loops %.6e +- %.6e\n",
+ n, wait, error, ave, rms);
+
+ return 0;
+}
+
+#ifdef CLIB_UNIX
+int
+main (int argc, char *argv[])
+{
+ unformat_input_t i;
+ int ret;
+
+ verbose = (argc > 1);
+ unformat_init_command_line (&i, argv);
+ ret = test_time_main (&i);
+ unformat_free (&i);
+
+ return ret;
+}
+#endif /* CLIB_UNIX */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/test_timing_wheel.c b/src/vppinfra/test_timing_wheel.c
new file mode 100644
index 00000000..0ce15ad8
--- /dev/null
+++ b/src/vppinfra/test_timing_wheel.c
@@ -0,0 +1,389 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vppinfra/bitmap.h>
+#include <vppinfra/error.h>
+#include <vppinfra/format.h>
+#include <vppinfra/pool.h>
+#include <vppinfra/random.h>
+#include <vppinfra/time.h>
+#include <vppinfra/timing_wheel.h>
+#include <vppinfra/zvec.h>
+
+#include <vppinfra/math.h>
+
+#if __GNUC__ < 4
+#define SQRT(a) a
+#else
+#define SQRT(a) sqrt(a)
+#endif
+
+typedef struct
+{
+ uword n_iter;
+
+ u32 n_events;
+ u32 seed;
+ u32 verbose;
+
+ /* Time is "synthetic" e.g. not taken from CPU timer. */
+ u32 synthetic_time;
+
+ clib_time_t time;
+ timing_wheel_t timing_wheel;
+
+ u64 *events;
+
+ f64 max_time;
+ f64 wait_time;
+
+ f64 total_iterate_time;
+ f64 time_iterate_start;
+
+ f64 time_per_status_update;
+ f64 time_next_status_update;
+} test_timing_wheel_main_t;
+
+typedef struct
+{
+ f64 dt;
+ f64 fraction;
+ u64 count;
+} test_timing_wheel_tmp_t;
+
+static void
+set_event (test_timing_wheel_main_t * tm, uword i)
+{
+ timing_wheel_t *w = &tm->timing_wheel;
+ u64 cpu_time;
+
+ cpu_time = w->current_time_index << w->log2_clocks_per_bin;
+ if (tm->synthetic_time)
+ cpu_time += random_u32 (&tm->seed) % tm->n_iter;
+ else
+ cpu_time +=
+ random_f64 (&tm->seed) * tm->max_time * tm->time.clocks_per_second;
+
+ timing_wheel_insert (w, cpu_time, i);
+ timing_wheel_validate (w);
+ tm->events[i] = cpu_time;
+}
+
+static int
+test_timing_wheel_tmp_cmp (void *a1, void *a2)
+{
+ test_timing_wheel_tmp_t *f1 = a1;
+ test_timing_wheel_tmp_t *f2 = a2;
+
+ return f1->dt < f2->dt ? -1 : (f1->dt > f2->dt ? +1 : 0);
+}
+
+clib_error_t *
+test_timing_wheel_main (unformat_input_t * input)
+{
+ clib_error_t *error = 0;
+ test_timing_wheel_main_t _tm, *tm = &_tm;
+ timing_wheel_t *w = &tm->timing_wheel;
+ uword iter, i;
+
+ memset (tm, 0, sizeof (tm[0]));
+ tm->n_iter = 10;
+ tm->time_per_status_update = 0;
+ tm->n_events = 100;
+ tm->seed = 1;
+ tm->synthetic_time = 1;
+ tm->max_time = 1;
+ tm->wait_time = 1e-3;
+
+ w->validate = 0;
+ w->n_wheel_elt_time_bits = 32;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "iter %wd", &tm->n_iter))
+ ;
+ else if (unformat (input, "events %d", &tm->n_events))
+ ;
+ else
+ if (unformat (input, "elt-time-bits %d", &w->n_wheel_elt_time_bits))
+ ;
+ else if (unformat (input, "seed %d", &tm->seed))
+ ;
+ else if (unformat (input, "verbose"))
+ tm->verbose = 1;
+ else if (unformat (input, "validate"))
+ w->validate = 1;
+
+ else if (unformat (input, "real-time"))
+ tm->synthetic_time = 0;
+ else if (unformat (input, "synthetic-time"))
+ tm->synthetic_time = 1;
+ else if (unformat (input, "max-time %f", &tm->max_time))
+ ;
+ else if (unformat (input, "wait-time %f", &tm->wait_time))
+ ;
+ else if (unformat (input, "iter-time %f", &tm->total_iterate_time))
+ ;
+ else if (unformat (input, "print %f", &tm->time_per_status_update))
+ ;
+
+ else
+ {
+ error = clib_error_create ("unknown input `%U'\n",
+ format_unformat_error, input);
+ goto done;
+ }
+ }
+
+ if (!tm->seed)
+ tm->seed = random_default_seed ();
+
+ clib_time_init (&tm->time);
+
+ if (tm->synthetic_time)
+ {
+ w->min_sched_time = tm->time.seconds_per_clock;
+ w->max_sched_time = w->min_sched_time * 256;
+ timing_wheel_init (w, 0, tm->time.clocks_per_second);
+ }
+ else
+ {
+ timing_wheel_init (w, clib_cpu_time_now (), tm->time.clocks_per_second);
+ }
+
+ clib_warning ("iter %wd, events %d, seed %u, %U",
+ tm->n_iter, tm->n_events, tm->seed,
+ format_timing_wheel, &tm->timing_wheel, /* verbose */ 0);
+
+ /* Make some events. */
+ vec_resize (tm->events, tm->n_events);
+ for (i = 0; i < vec_len (tm->events); i++)
+ set_event (tm, i);
+
+ {
+ u32 *expired = 0;
+ f64 ave_error = 0;
+ f64 rms_error = 0;
+ f64 max_error = 0, min_error = 1e30;
+ u32 *error_hist = 0;
+ uword n_expired = 0;
+ uword *expired_bitmap[2] = { 0 };
+ uword n_events_in_wheel = vec_len (tm->events);
+
+ vec_resize (expired, 32);
+ vec_resize (error_hist, 1024);
+
+ tm->time_iterate_start = clib_time_now (&tm->time);
+ tm->time_next_status_update =
+ tm->time_iterate_start + tm->time_per_status_update;
+
+ if (tm->total_iterate_time != 0)
+ tm->n_iter = ~0;
+
+ for (iter = 0; iter < tm->n_iter || n_events_in_wheel > 0; iter++)
+ {
+ u64 cpu_time, min_next_time[2];
+
+ if (tm->synthetic_time)
+ cpu_time = iter << w->log2_clocks_per_bin;
+ else
+ cpu_time = clib_cpu_time_now ();
+
+ _vec_len (expired) = 0;
+ expired =
+ timing_wheel_advance (w, cpu_time, expired, &min_next_time[0]);
+ timing_wheel_validate (w);
+
+ /* Update bitmap of expired events. */
+ if (w->validate)
+ {
+ for (i = 0; i < vec_len (tm->events); i++)
+ {
+ uword is_expired;
+
+ is_expired =
+ (cpu_time >> w->log2_clocks_per_bin) >=
+ (tm->events[i] >> w->log2_clocks_per_bin);
+ expired_bitmap[0] =
+ clib_bitmap_set (expired_bitmap[0], i, is_expired);
+
+ /* Validate min next time. */
+ if (is_expired)
+ ASSERT (min_next_time[0] > tm->events[i]);
+ else
+ ASSERT (min_next_time[0] <= tm->events[i]);
+ }
+ }
+
+ n_expired += vec_len (expired);
+ for (i = 0; i < vec_len (expired); i++)
+ {
+ word j, idt;
+ i64 dt_cpu;
+ f64 fdt_cpu;
+
+ j = expired[i];
+ expired_bitmap[1] = clib_bitmap_ori (expired_bitmap[1], j);
+
+ dt_cpu = cpu_time - tm->events[j];
+
+ /* Event must be scheduled in correct bin. */
+ if (tm->synthetic_time)
+ ASSERT (dt_cpu >= 0 && dt_cpu <= (1 << w->log2_clocks_per_bin));
+
+ fdt_cpu = dt_cpu * tm->time.seconds_per_clock;
+
+ ave_error += fdt_cpu;
+ rms_error += fdt_cpu * fdt_cpu;
+
+ if (fdt_cpu > max_error)
+ max_error = fdt_cpu;
+ if (fdt_cpu < min_error)
+ min_error = fdt_cpu;
+
+ idt =
+ (cpu_time >> w->log2_clocks_per_bin) -
+ (tm->events[j] >> w->log2_clocks_per_bin);
+ idt = zvec_signed_to_unsigned (idt);
+ vec_validate (error_hist, idt);
+ error_hist[idt] += 1;
+ }
+
+ if (w->validate)
+ for (i = 0; i < vec_len (tm->events); i++)
+ {
+ int is_expired = clib_bitmap_get (expired_bitmap[0], i);
+ int is_expired_w = clib_bitmap_get (expired_bitmap[1], i);
+ ASSERT (is_expired == is_expired_w);
+ }
+
+ min_next_time[1] = ~0;
+ for (i = 0; i < vec_len (tm->events); i++)
+ {
+ if (!clib_bitmap_get (expired_bitmap[1], i))
+ min_next_time[1] = clib_min (min_next_time[1], tm->events[i]);
+ }
+ if (min_next_time[0] != min_next_time[1])
+ clib_error ("min next time wrong 0x%Lx != 0x%Lx", min_next_time[0],
+ min_next_time[1]);
+
+ if (tm->time_per_status_update != 0
+ && clib_time_now (&tm->time) >= tm->time_next_status_update)
+ {
+ f64 ave = 0, rms = 0;
+
+ tm->time_next_status_update += tm->time_per_status_update;
+ if (n_expired > 0)
+ {
+ ave = ave_error / n_expired;
+ rms = SQRT (rms_error / n_expired - ave * ave);
+ }
+
+ clib_warning
+ ("%12wd iter done %10wd expired; ave. error %.4e +- %.4e, range %.4e %.4e",
+ iter, n_expired, ave, rms, min_error, max_error);
+ }
+
+ if (tm->total_iterate_time != 0
+ && (clib_time_now (&tm->time) - tm->time_iterate_start
+ >= tm->total_iterate_time))
+ tm->n_iter = iter;
+
+ /* Add new events to wheel to replace expired ones. */
+ n_events_in_wheel -= vec_len (expired);
+ if (iter < tm->n_iter)
+ {
+ for (i = 0; i < vec_len (expired); i++)
+ {
+ uword j = expired[i];
+ set_event (tm, j);
+ expired_bitmap[1] =
+ clib_bitmap_andnoti (expired_bitmap[1], j);
+ }
+ n_events_in_wheel += vec_len (expired);
+ }
+ }
+
+ ave_error /= n_expired;
+ rms_error = SQRT (rms_error / n_expired - ave_error * ave_error);
+
+ clib_warning
+ ("%wd iter done %wd expired; ave. error %.4e +- %.4e, range %.4e %.4e",
+ 1 + iter, n_expired, ave_error, rms_error, min_error, max_error);
+
+ {
+ test_timing_wheel_tmp_t *fs, *f;
+ f64 total_fraction;
+
+ fs = 0;
+ for (i = 0; i < vec_len (error_hist); i++)
+ {
+ if (error_hist[i] == 0)
+ continue;
+ vec_add2 (fs, f, 1);
+ f->dt =
+ (((i64) zvec_unsigned_to_signed (i) << w->log2_clocks_per_bin) *
+ tm->time.seconds_per_clock);
+ f->fraction = (f64) error_hist[i] / (f64) n_expired;
+ f->count = error_hist[i];
+ }
+
+ vec_sort_with_function (fs, test_timing_wheel_tmp_cmp);
+
+ total_fraction = 0;
+ vec_foreach (f, fs)
+ {
+ total_fraction += f->fraction;
+ if (f == fs)
+ fformat (stdout, "%=12s %=16s %=16s %s\n", "Error max", "Fraction",
+ "Total", "Count");
+ fformat (stdout, "%12.4e %16.4f%% %16.4f%% %Ld\n", f->dt,
+ f->fraction * 100, total_fraction * 100, f->count);
+ }
+ }
+
+ clib_warning ("%U", format_timing_wheel, w, /* verbose */ 1);
+ }
+
+done:
+ return error;
+}
+
+#ifdef CLIB_UNIX
+int
+main (int argc, char *argv[])
+{
+ unformat_input_t i;
+ clib_error_t *error;
+
+ unformat_init_command_line (&i, argv);
+ error = test_timing_wheel_main (&i);
+ unformat_free (&i);
+ if (error)
+ {
+ clib_error_report (error);
+ return 1;
+ }
+ else
+ return 0;
+}
+#endif /* CLIB_UNIX */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/test_tw_timer.c b/src/vppinfra/test_tw_timer.c
new file mode 100644
index 00000000..ec0baa07
--- /dev/null
+++ b/src/vppinfra/test_tw_timer.c
@@ -0,0 +1,1275 @@
+#include <vppinfra/time.h>
+#include <vppinfra/cache.h>
+#include <vppinfra/error.h>
+#include <vppinfra/tw_timer_2t_1w_2048sl.h>
+#include <vppinfra/tw_timer_16t_2w_512sl.h>
+#include <vppinfra/tw_timer_4t_3w_256sl.h>
+#include <vppinfra/tw_timer_1t_3w_1024sl_ov.h>
+
+typedef struct
+{
+ /** Handle returned from tw_start_timer */
+ u32 stop_timer_handle;
+
+ /** Test item should expire at this clock tick */
+ u64 expected_to_expire;
+} tw_timer_test_elt_t;
+
+typedef struct
+{
+ /** Pool of test objects */
+ tw_timer_test_elt_t *test_elts;
+
+ /** The single-wheel */
+ tw_timer_wheel_2t_1w_2048sl_t single_wheel;
+
+ /** The double-wheel */
+ tw_timer_wheel_16t_2w_512sl_t double_wheel;
+
+ /* The triple wheel */
+ tw_timer_wheel_4t_3w_256sl_t triple_wheel;
+
+ /* The triple wheel with overflow vector */
+ tw_timer_wheel_1t_3w_1024sl_ov_t triple_ov_wheel;
+
+ /** random number seed */
+ u64 seed;
+
+ /** number of timers */
+ u32 ntimers;
+
+ /** number of "churn" iterations */
+ u32 niter;
+
+ /** number of clock ticks per churn iteration */
+ u32 ticks_per_iter;
+
+ /** cpu timer */
+ clib_time_t clib_time;
+} tw_timer_test_main_t;
+
+tw_timer_test_main_t tw_timer_test_main;
+
+static void
+run_single_wheel (tw_timer_wheel_2t_1w_2048sl_t * tw, u32 n_ticks)
+{
+ u32 i;
+ f64 now = tw->last_run_time + 1.01;
+
+ for (i = 0; i < n_ticks; i++)
+ {
+ tw_timer_expire_timers_2t_1w_2048sl (tw, now);
+ now += 1.01;
+ }
+}
+
+static void
+run_double_wheel (tw_timer_wheel_16t_2w_512sl_t * tw, u32 n_ticks)
+{
+ u32 i;
+ f64 now = tw->last_run_time + 1.01;
+
+ for (i = 0; i < n_ticks; i++)
+ {
+ tw_timer_expire_timers_16t_2w_512sl (tw, now);
+ now += 1.01;
+ }
+}
+
+static void
+run_triple_wheel (tw_timer_wheel_4t_3w_256sl_t * tw, u32 n_ticks)
+{
+ u32 i;
+ f64 now = tw->last_run_time + 1.01;
+
+ for (i = 0; i < n_ticks; i++)
+ {
+ tw_timer_expire_timers_4t_3w_256sl (tw, now);
+ now += 1.01;
+ }
+}
+
+static void
+run_triple_ov_wheel (tw_timer_wheel_1t_3w_1024sl_ov_t * tw, u32 n_ticks)
+{
+ u32 i;
+ f64 now = tw->last_run_time + 1.01;
+
+ for (i = 0; i < n_ticks; i++)
+ {
+ tw_timer_expire_timers_1t_3w_1024sl_ov (tw, now);
+ now += 1.01;
+ }
+}
+
+static void
+expired_timer_single_callback (u32 * expired_timers)
+{
+ int i;
+ u32 pool_index, timer_id;
+ tw_timer_test_elt_t *e;
+ tw_timer_test_main_t *tm = &tw_timer_test_main;
+
+ for (i = 0; i < vec_len (expired_timers); i++)
+ {
+ pool_index = expired_timers[i] & 0x7FFFFFFF;
+ timer_id = expired_timers[i] >> 31;
+
+ ASSERT (timer_id == 1);
+
+ e = pool_elt_at_index (tm->test_elts, pool_index);
+
+ if (e->expected_to_expire != tm->single_wheel.current_tick)
+ {
+ fformat (stdout, "[%d] expired at %lld not %lld\n",
+ e - tm->test_elts, tm->single_wheel.current_tick,
+ e->expected_to_expire);
+ }
+ pool_put (tm->test_elts, e);
+ }
+}
+
+static void
+expired_timer_double_callback (u32 * expired_timers)
+{
+ int i;
+ u32 pool_index, timer_id;
+ tw_timer_test_elt_t *e;
+ tw_timer_test_main_t *tm = &tw_timer_test_main;
+
+ for (i = 0; i < vec_len (expired_timers); i++)
+ {
+ pool_index = expired_timers[i] & 0x0FFFFFFF;
+ timer_id = expired_timers[i] >> 28;
+
+ ASSERT (timer_id == 14);
+
+ e = pool_elt_at_index (tm->test_elts, pool_index);
+
+ if (e->expected_to_expire != tm->double_wheel.current_tick)
+ {
+ fformat (stdout, "[%d] expired at %lld not %lld\n",
+ e - tm->test_elts, tm->double_wheel.current_tick,
+ e->expected_to_expire);
+ }
+ pool_put (tm->test_elts, e);
+ }
+}
+
+static void
+expired_timer_triple_callback (u32 * expired_timers)
+{
+ int i;
+ u32 pool_index, timer_id;
+ tw_timer_test_elt_t *e;
+ tw_timer_test_main_t *tm = &tw_timer_test_main;
+
+ for (i = 0; i < vec_len (expired_timers); i++)
+ {
+ pool_index = expired_timers[i] & 0x3FFFFFFF;
+ timer_id = expired_timers[i] >> 30;
+
+ ASSERT (timer_id == 3);
+
+ e = pool_elt_at_index (tm->test_elts, pool_index);
+
+ if (e->expected_to_expire != tm->triple_wheel.current_tick)
+ {
+ fformat (stdout, "[%d] expired at %lld not %lld\n",
+ e - tm->test_elts, tm->triple_wheel.current_tick,
+ e->expected_to_expire);
+ }
+ pool_put (tm->test_elts, e);
+ }
+}
+
+static void
+expired_timer_triple_ov_callback (u32 * expired_timers)
+{
+ int i;
+ u32 pool_index;
+ tw_timer_test_elt_t *e;
+ tw_timer_test_main_t *tm = &tw_timer_test_main;
+
+ for (i = 0; i < vec_len (expired_timers); i++)
+ {
+ pool_index = expired_timers[i];
+
+ e = pool_elt_at_index (tm->test_elts, pool_index);
+
+ if (e->expected_to_expire != tm->triple_ov_wheel.current_tick)
+ {
+ fformat (stdout, "[%d] expired at %lld not %lld\n",
+ e - tm->test_elts, tm->triple_ov_wheel.current_tick,
+ e->expected_to_expire);
+ }
+ pool_put (tm->test_elts, e);
+ }
+}
+
+static clib_error_t *
+test2_single (tw_timer_test_main_t * tm)
+{
+ u32 i, j;
+ tw_timer_test_elt_t *e;
+ u32 initial_wheel_offset;
+ u64 expiration_time;
+ u32 max_expiration_time = 0;
+ u32 *deleted_indices = 0;
+ u32 adds = 0, deletes = 0;
+ f64 before, after;
+
+ clib_time_init (&tm->clib_time);
+
+ tw_timer_wheel_init_2t_1w_2048sl (&tm->single_wheel,
+ expired_timer_single_callback,
+ 1.0 /* timer interval */ , ~0);
+
+ /* Prime offset */
+ initial_wheel_offset = 757;
+
+ run_single_wheel (&tm->single_wheel, initial_wheel_offset);
+
+ fformat (stdout, "initial wheel time %d, fast index %d\n",
+ tm->single_wheel.current_tick,
+ tm->single_wheel.current_index[TW_TIMER_RING_FAST]);
+
+ initial_wheel_offset = tm->single_wheel.current_tick;
+
+ fformat (stdout,
+ "test %d timers, %d iter, %d ticks per iter, 0x%llx seed\n",
+ tm->ntimers, tm->niter, tm->ticks_per_iter, tm->seed);
+
+ before = clib_time_now (&tm->clib_time);
+
+ /* Prime the pump */
+ for (i = 0; i < tm->ntimers; i++)
+ {
+ pool_get (tm->test_elts, e);
+ memset (e, 0, sizeof (*e));
+
+ do
+ {
+ expiration_time = random_u64 (&tm->seed) & (2047);
+ }
+ while (expiration_time == 0);
+
+ if (expiration_time > max_expiration_time)
+ max_expiration_time = expiration_time;
+
+ e->expected_to_expire = expiration_time + initial_wheel_offset;
+ e->stop_timer_handle =
+ tw_timer_start_2t_1w_2048sl (&tm->single_wheel, e - tm->test_elts,
+ 1 /* timer id */ ,
+ expiration_time);
+ }
+
+ adds += i;
+
+ for (i = 0; i < tm->niter; i++)
+ {
+ run_single_wheel (&tm->single_wheel, tm->ticks_per_iter);
+
+ j = 0;
+ vec_reset_length (deleted_indices);
+ /* *INDENT-OFF* */
+ pool_foreach (e, tm->test_elts,
+ ({
+ tw_timer_stop_2t_1w_2048sl (&tm->single_wheel, e->stop_timer_handle);
+ vec_add1 (deleted_indices, e - tm->test_elts);
+ if (++j >= tm->ntimers / 4)
+ goto del_and_re_add;
+ }));
+ /* *INDENT-ON* */
+
+ del_and_re_add:
+ for (j = 0; j < vec_len (deleted_indices); j++)
+ {
+ pool_put_index (tm->test_elts, deleted_indices[j]);
+ }
+
+ deletes += j;
+
+ for (j = 0; j < tm->ntimers / 4; j++)
+ {
+ pool_get (tm->test_elts, e);
+ memset (e, 0, sizeof (*e));
+
+ do
+ {
+ expiration_time = random_u64 (&tm->seed) & (2047);
+ }
+ while (expiration_time == 0);
+
+ if (expiration_time > max_expiration_time)
+ max_expiration_time = expiration_time;
+
+ e->expected_to_expire =
+ expiration_time + tm->single_wheel.current_tick;
+ e->stop_timer_handle = tw_timer_start_2t_1w_2048sl
+ (&tm->single_wheel, e - tm->test_elts, 1 /* timer id */ ,
+ expiration_time);
+ }
+ adds += j;
+ }
+
+ vec_free (deleted_indices);
+
+ run_single_wheel (&tm->single_wheel, max_expiration_time + 1);
+
+ after = clib_time_now (&tm->clib_time);
+
+ fformat (stdout, "%d adds, %d deletes, %d ticks\n", adds, deletes,
+ tm->single_wheel.current_tick);
+ fformat (stdout, "test ran %.2f seconds, %.2f ops/second\n",
+ (after - before),
+ ((f64) adds + (f64) deletes +
+ (f64) tm->single_wheel.current_tick) / (after - before));
+
+ if (pool_elts (tm->test_elts))
+ fformat (stdout, "Note: %d elements remain in pool\n",
+ pool_elts (tm->test_elts));
+
+ /* *INDENT-OFF* */
+ pool_foreach (e, tm->test_elts,
+ ({
+ fformat (stdout, "[%d] expected to expire %d\n",
+ e - tm->test_elts,
+ e->expected_to_expire);
+ }));
+ /* *INDENT-ON* */
+
+ pool_free (tm->test_elts);
+ tw_timer_wheel_free_2t_1w_2048sl (&tm->single_wheel);
+ return 0;
+}
+
+static clib_error_t *
+test2_double (tw_timer_test_main_t * tm)
+{
+ u32 i, j;
+ tw_timer_test_elt_t *e;
+ u32 initial_wheel_offset;
+ u32 expiration_time;
+ u32 max_expiration_time = 0;
+ u32 *deleted_indices = 0;
+ u32 adds = 0, deletes = 0;
+ f64 before, after;
+
+ clib_time_init (&tm->clib_time);
+
+ tw_timer_wheel_init_16t_2w_512sl (&tm->double_wheel,
+ expired_timer_double_callback,
+ 1.0 /* timer interval */ , ~0);
+
+ /* Prime offset */
+ initial_wheel_offset = 7577;
+
+ run_double_wheel (&tm->double_wheel, initial_wheel_offset);
+
+ fformat (stdout, "initial wheel time %d, fast index %d slow index %d\n",
+ tm->double_wheel.current_tick,
+ tm->double_wheel.current_index[TW_TIMER_RING_FAST],
+ tm->double_wheel.current_index[TW_TIMER_RING_SLOW]);
+
+ initial_wheel_offset = tm->double_wheel.current_tick;
+
+ fformat (stdout,
+ "test %d timers, %d iter, %d ticks per iter, 0x%llx seed\n",
+ tm->ntimers, tm->niter, tm->ticks_per_iter, tm->seed);
+
+ before = clib_time_now (&tm->clib_time);
+
+ /* Prime the pump */
+ for (i = 0; i < tm->ntimers; i++)
+ {
+ pool_get (tm->test_elts, e);
+ memset (e, 0, sizeof (*e));
+
+ do
+ {
+ expiration_time = random_u64 (&tm->seed) & ((1 << 17) - 1);
+ }
+ while (expiration_time == 0);
+
+ if (expiration_time > max_expiration_time)
+ max_expiration_time = expiration_time;
+
+ e->expected_to_expire = expiration_time + initial_wheel_offset;
+
+ e->stop_timer_handle =
+ tw_timer_start_16t_2w_512sl (&tm->double_wheel, e - tm->test_elts,
+ 14 /* timer id */ ,
+ expiration_time);
+ }
+
+ adds += i;
+
+ for (i = 0; i < tm->niter; i++)
+ {
+ run_double_wheel (&tm->double_wheel, tm->ticks_per_iter);
+
+ j = 0;
+ vec_reset_length (deleted_indices);
+ /* *INDENT-OFF* */
+ pool_foreach (e, tm->test_elts,
+ ({
+ tw_timer_stop_16t_2w_512sl (&tm->double_wheel, e->stop_timer_handle);
+ vec_add1 (deleted_indices, e - tm->test_elts);
+ if (++j >= tm->ntimers / 4)
+ goto del_and_re_add;
+ }));
+ /* *INDENT-ON* */
+
+ del_and_re_add:
+ for (j = 0; j < vec_len (deleted_indices); j++)
+ pool_put_index (tm->test_elts, deleted_indices[j]);
+
+ deletes += j;
+
+ for (j = 0; j < tm->ntimers / 4; j++)
+ {
+ pool_get (tm->test_elts, e);
+ memset (e, 0, sizeof (*e));
+
+ do
+ {
+ expiration_time = random_u64 (&tm->seed) & ((1 << 17) - 1);
+ }
+ while (expiration_time == 0);
+
+ if (expiration_time > max_expiration_time)
+ max_expiration_time = expiration_time;
+
+ e->expected_to_expire = expiration_time +
+ tm->double_wheel.current_tick;
+
+ e->stop_timer_handle = tw_timer_start_16t_2w_512sl
+ (&tm->double_wheel, e - tm->test_elts, 14 /* timer id */ ,
+ expiration_time);
+ }
+ adds += j;
+ }
+
+ vec_free (deleted_indices);
+
+ run_double_wheel (&tm->double_wheel, max_expiration_time + 1);
+
+ after = clib_time_now (&tm->clib_time);
+
+ fformat (stdout, "%d adds, %d deletes, %d ticks\n", adds, deletes,
+ tm->double_wheel.current_tick);
+ fformat (stdout, "test ran %.2f seconds, %.2f ops/second\n",
+ (after - before),
+ ((f64) adds + (f64) deletes +
+ (f64) tm->double_wheel.current_tick) / (after - before));
+
+ if (pool_elts (tm->test_elts))
+ fformat (stdout, "Note: %d elements remain in pool\n",
+ pool_elts (tm->test_elts));
+
+ /* *INDENT-OFF* */
+ pool_foreach (e, tm->test_elts,
+ ({
+ fformat (stdout, "[%d] expected to expire %d\n",
+ e - tm->test_elts,
+ e->expected_to_expire);
+ }));
+ /* *INDENT-ON* */
+
+ pool_free (tm->test_elts);
+ tw_timer_wheel_free_16t_2w_512sl (&tm->double_wheel);
+ return 0;
+}
+
+static clib_error_t *
+test2_triple (tw_timer_test_main_t * tm)
+{
+ u32 i, j;
+ tw_timer_test_elt_t *e;
+ u32 initial_wheel_offset = 0;
+ u32 expiration_time;
+ u32 max_expiration_time = 0;
+ u32 *deleted_indices = 0;
+ u32 adds = 0, deletes = 0;
+ f64 before, after;
+
+ clib_time_init (&tm->clib_time);
+
+ tw_timer_wheel_init_4t_3w_256sl (&tm->triple_wheel,
+ expired_timer_triple_callback,
+ 1.0 /* timer interval */ , ~0);
+
+
+ /* Prime offset */
+ initial_wheel_offset = 75700;
+ run_triple_wheel (&tm->triple_wheel, initial_wheel_offset);
+
+ fformat (stdout,
+ "initial wheel time %d, fi %d si %d gi %d\n",
+ tm->triple_wheel.current_tick,
+ tm->triple_wheel.current_index[TW_TIMER_RING_FAST],
+ tm->triple_wheel.current_index[TW_TIMER_RING_SLOW],
+ tm->triple_wheel.current_index[TW_TIMER_RING_GLACIER]);
+
+ initial_wheel_offset = tm->triple_wheel.current_tick;
+
+ fformat (stdout,
+ "test %d timers, %d iter, %d ticks per iter, 0x%llx seed\n",
+ tm->ntimers, tm->niter, tm->ticks_per_iter, tm->seed);
+
+ before = clib_time_now (&tm->clib_time);
+
+ /* Prime the pump */
+ for (i = 0; i < tm->ntimers; i++)
+ {
+ pool_get (tm->test_elts, e);
+ memset (e, 0, sizeof (*e));
+
+ do
+ {
+ expiration_time = random_u64 (&tm->seed) & ((1 << 17) - 1);
+ }
+ while (expiration_time == 0);
+
+ if (expiration_time > max_expiration_time)
+ max_expiration_time = expiration_time;
+
+ e->expected_to_expire = expiration_time + initial_wheel_offset;
+
+ e->stop_timer_handle =
+ tw_timer_start_4t_3w_256sl (&tm->triple_wheel, e - tm->test_elts,
+ 3 /* timer id */ ,
+ expiration_time);
+ }
+
+ adds += i;
+
+ for (i = 0; i < tm->niter; i++)
+ {
+ run_triple_wheel (&tm->triple_wheel, tm->ticks_per_iter);
+
+ j = 0;
+ vec_reset_length (deleted_indices);
+ /* *INDENT-OFF* */
+ pool_foreach (e, tm->test_elts,
+ ({
+ tw_timer_stop_4t_3w_256sl (&tm->triple_wheel, e->stop_timer_handle);
+ vec_add1 (deleted_indices, e - tm->test_elts);
+ if (++j >= tm->ntimers / 4)
+ goto del_and_re_add;
+ }));
+ /* *INDENT-ON* */
+
+ del_and_re_add:
+ for (j = 0; j < vec_len (deleted_indices); j++)
+ pool_put_index (tm->test_elts, deleted_indices[j]);
+
+ deletes += j;
+
+ for (j = 0; j < tm->ntimers / 4; j++)
+ {
+ pool_get (tm->test_elts, e);
+ memset (e, 0, sizeof (*e));
+
+ do
+ {
+ expiration_time = random_u64 (&tm->seed) & ((1 << 17) - 1);
+ }
+ while (expiration_time == 0);
+
+ if (expiration_time > max_expiration_time)
+ max_expiration_time = expiration_time;
+
+ e->expected_to_expire = expiration_time +
+ tm->triple_wheel.current_tick;
+
+ e->stop_timer_handle = tw_timer_start_4t_3w_256sl
+ (&tm->triple_wheel, e - tm->test_elts, 3 /* timer id */ ,
+ expiration_time);
+ }
+ adds += j;
+ }
+
+ vec_free (deleted_indices);
+
+ run_triple_wheel (&tm->triple_wheel, max_expiration_time + 1);
+
+ after = clib_time_now (&tm->clib_time);
+
+ fformat (stdout, "%d adds, %d deletes, %d ticks\n", adds, deletes,
+ tm->triple_wheel.current_tick);
+ fformat (stdout, "test ran %.2f seconds, %.2f ops/second\n",
+ (after - before),
+ ((f64) adds + (f64) deletes +
+ (f64) tm->triple_wheel.current_tick) / (after - before));
+
+ if (pool_elts (tm->test_elts))
+ fformat (stdout, "Note: %d elements remain in pool\n",
+ pool_elts (tm->test_elts));
+
+ /* *INDENT-OFF* */
+ pool_foreach (e, tm->test_elts,
+ ({
+ fformat (stdout, "[%d] expected to expire %d\n",
+ e - tm->test_elts,
+ e->expected_to_expire);
+ }));
+ /* *INDENT-ON* */
+
+ pool_free (tm->test_elts);
+ tw_timer_wheel_free_4t_3w_256sl (&tm->triple_wheel);
+ return 0;
+}
+
+static clib_error_t *
+test2_triple_ov (tw_timer_test_main_t * tm)
+{
+ u32 i, j;
+ tw_timer_test_elt_t *e;
+ u32 initial_wheel_offset = 0;
+ u32 expiration_time;
+ u32 max_expiration_time = 0;
+ u32 *deleted_indices = 0;
+ u32 adds = 0, deletes = 0;
+ f64 before, after;
+
+ clib_time_init (&tm->clib_time);
+
+ tw_timer_wheel_init_1t_3w_1024sl_ov (&tm->triple_ov_wheel,
+ expired_timer_triple_ov_callback,
+ 1.0 /* timer interval */ , ~0);
+
+
+ /* Prime offset */
+ initial_wheel_offset = 75700;
+ run_triple_ov_wheel (&tm->triple_ov_wheel, initial_wheel_offset);
+
+ fformat (stdout,
+ "initial wheel time %d, fi %d si %d gi %d\n",
+ tm->triple_ov_wheel.current_tick,
+ tm->triple_ov_wheel.current_index[TW_TIMER_RING_FAST],
+ tm->triple_ov_wheel.current_index[TW_TIMER_RING_SLOW],
+ tm->triple_ov_wheel.current_index[TW_TIMER_RING_GLACIER]);
+
+ initial_wheel_offset = tm->triple_ov_wheel.current_tick;
+
+ fformat (stdout,
+ "test %d timers, %d iter, %d ticks per iter, 0x%llx seed\n",
+ tm->ntimers, tm->niter, tm->ticks_per_iter, tm->seed);
+
+ before = clib_time_now (&tm->clib_time);
+
+ /* Prime the pump */
+ for (i = 0; i < tm->ntimers; i++)
+ {
+ pool_get (tm->test_elts, e);
+ memset (e, 0, sizeof (*e));
+
+ do
+ {
+ expiration_time = random_u64 (&tm->seed) & ((1 << 17) - 1);
+ }
+ while (expiration_time == 0);
+
+ if (expiration_time > max_expiration_time)
+ max_expiration_time = expiration_time;
+
+ e->expected_to_expire = expiration_time + initial_wheel_offset;
+
+ e->stop_timer_handle =
+ tw_timer_start_1t_3w_1024sl_ov (&tm->triple_ov_wheel,
+ e - tm->test_elts, 0 /* timer id */ ,
+ expiration_time);
+ }
+
+ adds += i;
+
+ for (i = 0; i < tm->niter; i++)
+ {
+ run_triple_ov_wheel (&tm->triple_ov_wheel, tm->ticks_per_iter);
+
+ j = 0;
+ vec_reset_length (deleted_indices);
+ /* *INDENT-OFF* */
+ pool_foreach (e, tm->test_elts,
+ ({
+ tw_timer_stop_1t_3w_1024sl_ov (&tm->triple_ov_wheel,
+ e->stop_timer_handle);
+ vec_add1 (deleted_indices, e - tm->test_elts);
+ if (++j >= tm->ntimers / 4)
+ goto del_and_re_add;
+ }));
+ /* *INDENT-ON* */
+
+ del_and_re_add:
+ for (j = 0; j < vec_len (deleted_indices); j++)
+ pool_put_index (tm->test_elts, deleted_indices[j]);
+
+ deletes += j;
+
+ for (j = 0; j < tm->ntimers / 4; j++)
+ {
+ pool_get (tm->test_elts, e);
+ memset (e, 0, sizeof (*e));
+
+ do
+ {
+ expiration_time = random_u64 (&tm->seed) & ((1 << 17) - 1);
+ }
+ while (expiration_time == 0);
+
+ if (expiration_time > max_expiration_time)
+ max_expiration_time = expiration_time;
+
+ e->expected_to_expire = expiration_time +
+ tm->triple_ov_wheel.current_tick;
+
+ e->stop_timer_handle = tw_timer_start_1t_3w_1024sl_ov
+ (&tm->triple_ov_wheel, e - tm->test_elts, 0 /* timer id */ ,
+ expiration_time);
+ }
+ adds += j;
+ }
+
+ vec_free (deleted_indices);
+
+ run_triple_ov_wheel (&tm->triple_ov_wheel, max_expiration_time + 1);
+
+ after = clib_time_now (&tm->clib_time);
+
+ fformat (stdout, "%d adds, %d deletes, %d ticks\n", adds, deletes,
+ tm->triple_ov_wheel.current_tick);
+ fformat (stdout, "test ran %.2f seconds, %.2f ops/second\n",
+ (after - before),
+ ((f64) adds + (f64) deletes +
+ (f64) tm->triple_ov_wheel.current_tick) / (after - before));
+
+ if (pool_elts (tm->test_elts))
+ fformat (stdout, "Note: %d elements remain in pool\n",
+ pool_elts (tm->test_elts));
+
+ /* *INDENT-OFF* */
+ pool_foreach (e, tm->test_elts,
+ ({
+ TWT (tw_timer) * t;
+
+ fformat (stdout, "[%d] expected to expire %d\n",
+ e - tm->test_elts,
+ e->expected_to_expire);
+ t = pool_elt_at_index (tm->triple_ov_wheel.timers, e->stop_timer_handle);
+ fformat (stdout, " expiration_time %lld\n", t->expiration_time);
+ }));
+ /* *INDENT-ON* */
+
+ pool_free (tm->test_elts);
+ tw_timer_wheel_free_1t_3w_1024sl_ov (&tm->triple_ov_wheel);
+ return 0;
+}
+
+static clib_error_t *
+test1_single (tw_timer_test_main_t * tm)
+{
+ u32 i;
+ tw_timer_test_elt_t *e;
+ u32 offset;
+
+ tw_timer_wheel_init_2t_1w_2048sl (&tm->single_wheel,
+ expired_timer_single_callback,
+ 1.0 /* timer interval */ , ~0);
+
+ /*
+ * Prime offset, to make sure that the wheel starts in a
+ * non-trivial position
+ */
+ offset = 123;
+
+ run_single_wheel (&tm->single_wheel, offset);
+
+ fformat (stdout, "initial wheel time %d, fast index %d\n",
+ tm->single_wheel.current_tick,
+ tm->single_wheel.current_index[TW_TIMER_RING_FAST]);
+
+ offset = tm->single_wheel.current_tick;
+
+ for (i = 0; i < tm->ntimers; i++)
+ {
+ u32 expected_to_expire;
+ u32 timer_arg;
+
+ timer_arg = 1 + i;
+ timer_arg &= 2047;
+ if (timer_arg == 0)
+ timer_arg = 1;
+
+ expected_to_expire = timer_arg + offset;
+
+ pool_get (tm->test_elts, e);
+ memset (e, 0, sizeof (*e));
+ e->expected_to_expire = expected_to_expire;
+ e->stop_timer_handle = tw_timer_start_2t_1w_2048sl
+ (&tm->single_wheel, e - tm->test_elts, 1 /* timer id */ ,
+ timer_arg);
+ }
+ run_single_wheel (&tm->single_wheel, tm->ntimers + 3);
+
+ if (pool_elts (tm->test_elts))
+ fformat (stdout, "Note: %d elements remain in pool\n",
+ pool_elts (tm->test_elts));
+
+ /* *INDENT-OFF* */
+ pool_foreach (e, tm->test_elts,
+ ({
+ fformat(stdout, "[%d] expected to expire %d\n",
+ e - tm->test_elts,
+ e->expected_to_expire);
+ }));
+ /* *INDENT-ON* */
+
+ fformat (stdout,
+ "final wheel time %d, fast index %d\n",
+ tm->single_wheel.current_tick,
+ tm->single_wheel.current_index[TW_TIMER_RING_FAST]);
+
+ pool_free (tm->test_elts);
+ tw_timer_wheel_free_2t_1w_2048sl (&tm->single_wheel);
+ return 0;
+}
+
+static clib_error_t *
+test1_double (tw_timer_test_main_t * tm)
+{
+ u32 i;
+ tw_timer_test_elt_t *e;
+ u32 offset;
+
+ tw_timer_wheel_init_16t_2w_512sl (&tm->double_wheel,
+ expired_timer_double_callback,
+ 1.0 /* timer interval */ , ~0);
+
+ /*
+ * Prime offset, to make sure that the wheel starts in a
+ * non-trivial position
+ */
+ offset = 227989;
+
+ run_double_wheel (&tm->double_wheel, offset);
+
+ fformat (stdout, "initial wheel time %d, fast index %d\n",
+ tm->double_wheel.current_tick,
+ tm->double_wheel.current_index[TW_TIMER_RING_FAST]);
+
+ for (i = 0; i < tm->ntimers; i++)
+ {
+ pool_get (tm->test_elts, e);
+ memset (e, 0, sizeof (*e));
+
+ e->expected_to_expire = i + offset + 1;
+ e->stop_timer_handle = tw_timer_start_16t_2w_512sl
+ (&tm->double_wheel, e - tm->test_elts, 14 /* timer id */ ,
+ i + 1);
+ }
+ run_double_wheel (&tm->double_wheel, tm->ntimers + 3);
+
+ if (pool_elts (tm->test_elts))
+ fformat (stdout, "Note: %d elements remain in pool\n",
+ pool_elts (tm->test_elts));
+
+ /* *INDENT-OFF* */
+ pool_foreach (e, tm->test_elts,
+ ({
+ fformat(stdout, "[%d] expected to expire %d\n",
+ e - tm->test_elts,
+ e->expected_to_expire);
+ }));
+ /* *INDENT-ON* */
+
+ fformat (stdout,
+ "final wheel time %d, fast index %d\n",
+ tm->double_wheel.current_tick,
+ tm->double_wheel.current_index[TW_TIMER_RING_FAST]);
+
+ pool_free (tm->test_elts);
+ tw_timer_wheel_free_16t_2w_512sl (&tm->double_wheel);
+ return 0;
+}
+
+static clib_error_t *
+test3_triple_double (tw_timer_test_main_t * tm)
+{
+ tw_timer_test_elt_t *e;
+ u32 initial_wheel_offset = 0;
+ u32 expiration_time;
+ u32 max_expiration_time = 0;
+ u32 adds = 0, deletes = 0;
+ f64 before, after;
+
+ clib_time_init (&tm->clib_time);
+
+ tw_timer_wheel_init_4t_3w_256sl (&tm->triple_wheel,
+ expired_timer_triple_callback,
+ 1.0 /* timer interval */ , ~0);
+
+ initial_wheel_offset = 0;
+ run_triple_wheel (&tm->triple_wheel, initial_wheel_offset);
+
+ fformat (stdout,
+ "initial wheel time %d, fi %d si %d gi %d\n",
+ tm->triple_wheel.current_tick,
+ tm->triple_wheel.current_index[TW_TIMER_RING_FAST],
+ tm->triple_wheel.current_index[TW_TIMER_RING_SLOW],
+ tm->triple_wheel.current_index[TW_TIMER_RING_GLACIER]);
+
+ initial_wheel_offset = tm->triple_wheel.current_tick;
+
+ fformat (stdout, "Create a timer which expires at wheel-time (1, 0, 0)\n");
+
+ before = clib_time_now (&tm->clib_time);
+
+ /* Prime the pump */
+ pool_get (tm->test_elts, e);
+ memset (e, 0, sizeof (*e));
+
+ /* 1 glacier ring tick from now */
+ expiration_time = TW_SLOTS_PER_RING * TW_SLOTS_PER_RING;
+ e->expected_to_expire = expiration_time + initial_wheel_offset;
+ max_expiration_time = expiration_time;
+
+ e->stop_timer_handle =
+ tw_timer_start_4t_3w_256sl (&tm->triple_wheel, e - tm->test_elts,
+ 3 /* timer id */ ,
+ expiration_time);
+
+ run_triple_wheel (&tm->triple_wheel, max_expiration_time + 1);
+
+ after = clib_time_now (&tm->clib_time);
+
+ fformat (stdout, "%d adds, %d deletes, %d ticks\n", adds, deletes,
+ tm->triple_wheel.current_tick);
+ fformat (stdout, "test ran %.2f seconds, %.2f ops/second\n",
+ (after - before),
+ ((f64) adds + (f64) deletes +
+ (f64) tm->triple_wheel.current_tick) / (after - before));
+
+ if (pool_elts (tm->test_elts))
+ fformat (stdout, "Note: %d elements remain in pool\n",
+ pool_elts (tm->test_elts));
+
+ /* *INDENT-OFF* */
+ pool_foreach (e, tm->test_elts,
+ ({
+ fformat (stdout, "[%d] expected to expire %d\n",
+ e - tm->test_elts,
+ e->expected_to_expire);
+ }));
+ /* *INDENT-ON* */
+
+ pool_free (tm->test_elts);
+ tw_timer_wheel_free_4t_3w_256sl (&tm->triple_wheel);
+ return 0;
+}
+
+static clib_error_t *
+test4_double_double (tw_timer_test_main_t * tm)
+{
+ u32 i;
+ tw_timer_test_elt_t *e;
+ u32 initial_wheel_offset;
+ u32 expiration_time;
+ u32 max_expiration_time = 0;
+ u32 *deleted_indices = 0;
+ u32 adds = 0, deletes = 0;
+ f64 before, after;
+
+ clib_time_init (&tm->clib_time);
+
+ tw_timer_wheel_init_16t_2w_512sl (&tm->double_wheel,
+ expired_timer_double_callback,
+ 1.0 /* timer interval */ , ~0);
+ /* Prime offset */
+ initial_wheel_offset = 0;
+
+ run_double_wheel (&tm->double_wheel, initial_wheel_offset);
+
+ fformat (stdout, "initial wheel time %d, fast index %d slow index %d\n",
+ tm->double_wheel.current_tick,
+ tm->double_wheel.current_index[TW_TIMER_RING_FAST],
+ tm->double_wheel.current_index[TW_TIMER_RING_SLOW]);
+
+ initial_wheel_offset = tm->double_wheel.current_tick;
+
+ fformat (stdout, "test timer which expires at 512 ticks\n");
+
+ before = clib_time_now (&tm->clib_time);
+
+ /* Prime the pump */
+ for (i = 0; i < tm->ntimers; i++)
+ {
+ pool_get (tm->test_elts, e);
+ memset (e, 0, sizeof (*e));
+
+ expiration_time = 512;
+
+ if (expiration_time > max_expiration_time)
+ max_expiration_time = expiration_time;
+
+ e->expected_to_expire = expiration_time + initial_wheel_offset;
+ e->stop_timer_handle =
+ tw_timer_start_16t_2w_512sl (&tm->double_wheel, e - tm->test_elts,
+ 14 /* timer id */ ,
+ expiration_time);
+ }
+
+ adds = 1;
+
+ vec_free (deleted_indices);
+
+ run_double_wheel (&tm->double_wheel, max_expiration_time + 1);
+
+ after = clib_time_now (&tm->clib_time);
+
+ fformat (stdout, "%d adds, %d deletes, %d ticks\n", adds, deletes,
+ tm->double_wheel.current_tick);
+ fformat (stdout, "test ran %.2f seconds, %.2f ops/second\n",
+ (after - before),
+ ((f64) adds + (f64) deletes +
+ (f64) tm->double_wheel.current_tick) / (after - before));
+
+ if (pool_elts (tm->test_elts))
+ fformat (stdout, "Note: %d elements remain in pool\n",
+ pool_elts (tm->test_elts));
+
+ /* *INDENT-OFF* */
+ pool_foreach (e, tm->test_elts,
+ ({
+ fformat (stdout, "[%d] expected to expire %d\n",
+ e - tm->test_elts,
+ e->expected_to_expire);
+ }));
+ /* *INDENT-ON* */
+
+ pool_free (tm->test_elts);
+ tw_timer_wheel_free_16t_2w_512sl (&tm->double_wheel);
+ return 0;
+}
+
+static clib_error_t *
+test5_double (tw_timer_test_main_t * tm)
+{
+ u32 i;
+ tw_timer_test_elt_t *e;
+ u32 initial_wheel_offset;
+ u32 expiration_time;
+ u32 max_expiration_time = 0;
+ u32 adds = 0, deletes = 0;
+ f64 before, after;
+
+ clib_time_init (&tm->clib_time);
+
+ tw_timer_wheel_init_16t_2w_512sl (&tm->double_wheel,
+ expired_timer_double_callback,
+ 1.0 /* timer interval */ , ~0);
+
+ /* Prime offset */
+ initial_wheel_offset = 7567;
+
+ run_double_wheel (&tm->double_wheel, initial_wheel_offset);
+
+ fformat (stdout, "initial wheel time %d, fast index %d slow index %d\n",
+ tm->double_wheel.current_tick,
+ tm->double_wheel.current_index[TW_TIMER_RING_FAST],
+ tm->double_wheel.current_index[TW_TIMER_RING_SLOW]);
+
+ initial_wheel_offset = tm->double_wheel.current_tick;
+
+ fformat (stdout,
+ "test %d timers, %d iter, %d ticks per iter, 0x%llx seed\n",
+ tm->ntimers, tm->niter, tm->ticks_per_iter, tm->seed);
+
+ before = clib_time_now (&tm->clib_time);
+
+ /* Prime the pump */
+ for (i = 0; i < tm->ntimers; i++)
+ {
+ pool_get (tm->test_elts, e);
+ memset (e, 0, sizeof (*e));
+
+ expiration_time = i + 1;
+
+ if (expiration_time > max_expiration_time)
+ max_expiration_time = expiration_time;
+
+ e->expected_to_expire = expiration_time + initial_wheel_offset;
+ e->stop_timer_handle =
+ tw_timer_start_16t_2w_512sl (&tm->double_wheel, e - tm->test_elts,
+ 14 /* timer id */ ,
+ expiration_time);
+ }
+
+ adds += i;
+
+ run_double_wheel (&tm->double_wheel, max_expiration_time + 1);
+
+ after = clib_time_now (&tm->clib_time);
+
+ fformat (stdout, "%d adds, %d deletes, %d ticks\n", adds, deletes,
+ tm->double_wheel.current_tick);
+ fformat (stdout, "test ran %.2f seconds, %.2f ops/second\n",
+ (after - before),
+ ((f64) adds + (f64) deletes +
+ (f64) tm->double_wheel.current_tick) / (after - before));
+
+ if (pool_elts (tm->test_elts))
+ fformat (stdout, "Note: %d elements remain in pool\n",
+ pool_elts (tm->test_elts));
+
+ /* *INDENT-OFF* */
+ pool_foreach (e, tm->test_elts,
+ ({
+ fformat (stdout, "[%d] expected to expire %d\n",
+ e - tm->test_elts,
+ e->expected_to_expire);
+ }));
+ /* *INDENT-ON* */
+
+ pool_free (tm->test_elts);
+ tw_timer_wheel_free_16t_2w_512sl (&tm->double_wheel);
+ return 0;
+}
+
+static clib_error_t *
+timer_test_command_fn (tw_timer_test_main_t * tm, unformat_input_t * input)
+{
+
+ int is_test1 = 0;
+ int num_wheels = 1;
+ int is_test2 = 0;
+ int is_test3 = 0;
+ int is_test4 = 0;
+ int is_test5 = 0;
+ int overflow = 0;
+
+ memset (tm, 0, sizeof (*tm));
+ /* Default values */
+ tm->ntimers = 100000;
+ tm->seed = 0xDEADDABEB00BFACE;
+ tm->niter = 1000;
+ tm->ticks_per_iter = 727;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "seed %lld", &tm->seed))
+ ;
+ else if (unformat (input, "test1"))
+ is_test1 = 1;
+ else if (unformat (input, "test2"))
+ is_test2 = 1;
+ else if (unformat (input, "overflow"))
+ overflow = 1;
+ else if (unformat (input, "lebron"))
+ is_test3 = 1;
+ else if (unformat (input, "wilt"))
+ is_test4 = 1;
+ else if (unformat (input, "linear"))
+ is_test5 = 1;
+ else if (unformat (input, "wheels %d", &num_wheels))
+ ;
+ else if (unformat (input, "ntimers %d", &tm->ntimers))
+ ;
+ else if (unformat (input, "niter %d", &tm->niter))
+ ;
+ else if (unformat (input, "ticks_per_iter %d", &tm->ticks_per_iter))
+ ;
+ else
+ break;
+ }
+
+ if (is_test1 + is_test2 + is_test3 + is_test4 + is_test5 == 0)
+ return clib_error_return (0, "No test specified [test1..n]");
+
+ if (num_wheels < 1 || num_wheels > 3)
+ return clib_error_return (0, "unsupported... 1 or 2 wheels only");
+
+ if (is_test1)
+ {
+ if (num_wheels == 1)
+ return test1_single (tm);
+ else
+ return test1_double (tm);
+ }
+ if (is_test2)
+ {
+ if (num_wheels == 1)
+ return test2_single (tm);
+ else if (num_wheels == 2)
+ return test2_double (tm);
+ else if (num_wheels == 3)
+ {
+ if (overflow == 0)
+ return test2_triple (tm);
+ else
+ return test2_triple_ov (tm);
+ }
+ }
+ if (is_test3)
+ return test3_triple_double (tm);
+
+ if (is_test4)
+ return test4_double_double (tm);
+
+ if (is_test5)
+ return test5_double (tm);
+
+ /* NOTREACHED */
+ return 0;
+}
+
+#ifdef CLIB_UNIX
+int
+main (int argc, char *argv[])
+{
+ unformat_input_t i;
+ clib_error_t *error;
+ tw_timer_test_main_t *tm = &tw_timer_test_main;
+
+ clib_mem_init (0, 3ULL << 30);
+
+ unformat_init_command_line (&i, argv);
+ error = timer_test_command_fn (tm, &i);
+ unformat_free (&i);
+
+ if (error)
+ {
+ clib_error_report (error);
+ return 1;
+ }
+ return 0;
+}
+#endif /* CLIB_UNIX */
+
+/* For debugging... */
+int
+pifi (void *p, u32 index)
+{
+ return pool_is_free_index (p, index);
+}
+
+u32
+vl (void *p)
+{
+ return vec_len (p);
+}
+
+uword
+pe (void *v)
+{
+ return (pool_elts (v));
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/test_vec.c b/src/vppinfra/test_vec.c
new file mode 100644
index 00000000..f0497ac6
--- /dev/null
+++ b/src/vppinfra/test_vec.c
@@ -0,0 +1,1159 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+ Written by Fred Delley <fdelley@cisco.com> .
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifdef CLIB_LINUX_KERNEL
+#include <linux/unistd.h>
+#endif
+
+#ifdef CLIB_UNIX
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#endif
+
+#include <vppinfra/clib.h>
+#include <vppinfra/mheap.h>
+#include <vppinfra/format.h>
+#include <vppinfra/error.h>
+#include <vppinfra/random.h>
+#include <vppinfra/time.h>
+
+#include "test_vec.h"
+
+static int verbose;
+#define if_verbose(format,args...) \
+ if (verbose) { clib_warning(format, ## args); }
+
+#define MAX_CHANGE 100
+
+
+typedef enum
+{
+ /* Values have to be sequential and start with 0. */
+ OP_IS_VEC_RESIZE = 0,
+ OP_IS_VEC_ADD1,
+ OP_IS_VEC_ADD2,
+ OP_IS_VEC_ADD,
+ OP_IS_VEC_INSERT,
+ OP_IS_VEC_INSERT_ELTS,
+ OP_IS_VEC_DELETE,
+ OP_IS_VEC_DUP,
+ OP_IS_VEC_IS_EQUAL,
+ OP_IS_VEC_ZERO,
+ OP_IS_VEC_SET,
+ OP_IS_VEC_VALIDATE,
+ OP_IS_VEC_FREE,
+ OP_IS_VEC_INIT,
+ OP_IS_VEC_CLONE,
+ OP_IS_VEC_APPEND,
+ OP_IS_VEC_PREPEND,
+ /* Operations on vectors with custom headers. */
+ OP_IS_VEC_INIT_H,
+ OP_IS_VEC_RESIZE_H,
+ OP_IS_VEC_FREE_H,
+ OP_MAX,
+} op_t;
+
+#define FIRST_VEC_OP OP_IS_VEC_RESIZE
+#define LAST_VEC_OP OP_IS_VEC_PREPEND
+#define FIRST_VEC_HDR_OP OP_IS_VEC_INIT_H
+#define LAST_VEC_HDR_OP OP_IS_VEC_FREE_H
+
+uword g_prob_ratio[] = {
+ [OP_IS_VEC_RESIZE] = 5,
+ [OP_IS_VEC_ADD1] = 5,
+ [OP_IS_VEC_ADD2] = 5,
+ [OP_IS_VEC_ADD] = 5,
+ [OP_IS_VEC_INSERT] = 5,
+ [OP_IS_VEC_INSERT_ELTS] = 5,
+ [OP_IS_VEC_DELETE] = 30,
+ [OP_IS_VEC_DUP] = 5,
+ [OP_IS_VEC_IS_EQUAL] = 5,
+ [OP_IS_VEC_ZERO] = 2,
+ [OP_IS_VEC_SET] = 3,
+ [OP_IS_VEC_VALIDATE] = 5,
+ [OP_IS_VEC_FREE] = 5,
+ [OP_IS_VEC_INIT] = 5,
+ [OP_IS_VEC_CLONE] = 5,
+ [OP_IS_VEC_APPEND] = 5,
+ [OP_IS_VEC_PREPEND] = 5,
+ /* Operations on vectors with custom headers. */
+ [OP_IS_VEC_INIT_H] = 5,
+ [OP_IS_VEC_RESIZE_H] = 5,
+ [OP_IS_VEC_FREE_H] = 5,
+};
+
+op_t *g_prob;
+op_t *g_prob_wh;
+
+uword g_call_stats[OP_MAX];
+
+
+/* A structure for both vector headers and vector elements might be useful to
+ uncover potential alignement issues. */
+
+typedef struct
+{
+ u8 field1[4];
+ CLIB_PACKED (u32 field2);
+} hdr_t;
+
+typedef struct
+{
+ u8 field1[3];
+ CLIB_PACKED (u32 field2);
+} elt_t;
+
+#ifdef CLIB_UNIX
+u32 g_seed = 0xdeadbabe;
+uword g_verbose = 1;
+#endif
+
+op_t *g_op_prob;
+uword g_set_verbose_at = ~0;
+uword g_dump_period = ~0;
+
+
+static u8 *
+format_vec_op_type (u8 * s, va_list * args)
+{
+ op_t op = va_arg (*args, int);
+
+ switch (op)
+ {
+#define _(n) \
+ case OP_IS_##n: \
+ s = format (s, "OP_IS_" #n); \
+ break;
+
+ _(VEC_RESIZE);
+ _(VEC_ADD1);
+ _(VEC_ADD2);
+ _(VEC_ADD);
+ _(VEC_INSERT);
+ _(VEC_INSERT_ELTS);
+ _(VEC_DELETE);
+ _(VEC_DUP);
+ _(VEC_IS_EQUAL);
+ _(VEC_ZERO);
+ _(VEC_SET);
+ _(VEC_VALIDATE);
+ _(VEC_FREE);
+ _(VEC_INIT);
+ _(VEC_CLONE);
+ _(VEC_APPEND);
+ _(VEC_PREPEND);
+ _(VEC_INIT_H);
+ _(VEC_RESIZE_H);
+ _(VEC_FREE_H);
+
+ default:
+ s = format (s, "Unknown vec op (%d)", op);
+ break;
+ }
+
+#undef _
+
+ return s;
+}
+
+static void
+dump_call_stats (uword * stats)
+{
+ uword i;
+
+ fformat (stdout, "Call Stats\n----------\n");
+
+ for (i = 0; i < OP_MAX; i++)
+ fformat (stdout, "%-8d %U\n", stats[i], format_vec_op_type, i);
+}
+
+
+/* XXX - Purposely low value for debugging the validator. Will be set it to a
+ more sensible value later. */
+#define MAX_VEC_LEN 10
+
+#define create_random_vec_wh(elt_type, len, hdr_bytes, seed) \
+({ \
+ elt_type * _v(v) = NULL; \
+ uword _v(l) = (len); \
+ uword _v(h) = (hdr_bytes); \
+ u8 * _v(hdr); \
+ \
+ if (_v(l) == 0) \
+ goto __done__; \
+ \
+ /* ~0 means select random length between 0 and MAX_VEC_LEN. */ \
+ if (_v(l) == ~0) \
+ _v(l) = bounded_random_u32 (&(seed), 0, MAX_VEC_LEN); \
+ \
+ _v(v) = _vec_resize (NULL, _v(l), _v(l) * sizeof (elt_type), _v(h), 0); \
+ fill_with_random_data (_v(v), vec_bytes (_v(v)), (seed)); \
+ \
+ /* Fill header with random data as well. */ \
+ if (_v(h) > 0) \
+ { \
+ _v(hdr) = vec_header (_v(v), _v(h)); \
+ fill_with_random_data (_v(hdr), _v(h), (seed)); \
+ } \
+ \
+__done__: \
+ _v(v); \
+})
+
+#define create_random_vec(elt_type, len, seed) \
+create_random_vec_wh (elt_type, len, 0, seed)
+
+#define compute_vec_hash(hash, vec) \
+({ \
+ u8 * _v(v) = (u8 *) (vec); \
+ uword _v(n) = vec_len (vec) * sizeof ((vec)[0]); \
+ u8 _v(hh) = (u8) (hash); \
+ \
+ compute_mem_hash (_v(hh), _v(v), _v(n)); \
+})
+
+static elt_t *
+validate_vec_free (elt_t * vec)
+{
+ vec_free (vec);
+ ASSERT (vec == NULL);
+ return vec;
+}
+
+static elt_t *
+validate_vec_free_h (elt_t * vec, uword hdr_bytes)
+{
+ vec_free_h (vec, hdr_bytes);
+ ASSERT (vec == NULL);
+ return vec;
+}
+
+static void
+validate_vec_hdr (elt_t * vec, uword hdr_bytes)
+{
+ u8 *hdr;
+ u8 *hdr_end;
+ vec_header_t *vh;
+
+ if (!vec)
+ return;
+
+ vh = _vec_find (vec);
+ hdr = vec_header (vec, hdr_bytes);
+ hdr_end = vec_header_end (hdr, hdr_bytes);
+
+ ASSERT (hdr_end == (u8 *) vec);
+ ASSERT ((u8 *) vh - (u8 *) hdr >= hdr_bytes);
+}
+
+static void
+validate_vec_len (elt_t * vec)
+{
+ u8 *ptr;
+ u8 *end;
+ uword len;
+ uword bytes;
+ uword i;
+ elt_t *elt;
+
+ if (!vec)
+ return;
+
+ ptr = (u8 *) vec;
+ end = (u8 *) vec_end (vec);
+ len = vec_len (vec);
+ bytes = sizeof (vec[0]) * len;
+
+ ASSERT (bytes == vec_bytes (vec));
+ ASSERT ((ptr + bytes) == end);
+
+ i = 0;
+
+ /* XXX - TODO: confirm that auto-incrementing in vec_is_member() would not
+ have the expected result. */
+ while (vec_is_member (vec, (__typeof__ (vec[0]) *) ptr))
+ {
+ ptr++;
+ i++;
+ }
+
+ ASSERT (ptr == end);
+ ASSERT (i == bytes);
+
+ i = 0;
+
+ vec_foreach (elt, vec) i++;
+
+ ASSERT (i == len);
+}
+
+static void
+validate_vec (elt_t * vec, uword hdr_bytes)
+{
+ validate_vec_hdr (vec, hdr_bytes);
+ validate_vec_len (vec);
+
+ if (!vec || vec_len (vec) == 0)
+ {
+ VERBOSE3 ("Vector at %p has zero elements.\n\n", vec);
+ }
+ else
+ {
+ if (hdr_bytes > 0)
+ VERBOSE3 ("Header: %U\n",
+ format_hex_bytes, vec_header (vec, sizeof (vec[0])),
+ sizeof (vec[0]));
+
+ VERBOSE3 ("%U\n\n",
+ format_hex_bytes, vec, vec_len (vec) * sizeof (vec[0]));
+ }
+}
+
+static elt_t *
+validate_vec_resize (elt_t * vec, uword num_elts)
+{
+ uword len1 = vec_len (vec);
+ uword len2;
+ u8 hash = compute_vec_hash (0, vec);
+
+ vec_resize (vec, num_elts);
+ len2 = vec_len (vec);
+
+ ASSERT (len2 == len1 + num_elts);
+ ASSERT (compute_vec_hash (hash, vec) == 0);
+ validate_vec (vec, 0);
+ return vec;
+}
+
+static elt_t *
+validate_vec_resize_h (elt_t * vec, uword num_elts, uword hdr_bytes)
+{
+ uword len1, len2;
+ u8 *end1, *end2;
+ u8 *hdr = NULL;
+ u8 hash, hdr_hash;
+
+ len1 = vec_len (vec);
+
+ if (vec)
+ hdr = vec_header (vec, hdr_bytes);
+
+ hash = compute_vec_hash (0, vec);
+ hdr_hash = compute_mem_hash (0, hdr, hdr_bytes);
+
+ vec_resize_ha (vec, num_elts, hdr_bytes, 0);
+ len2 = vec_len (vec);
+
+ ASSERT (len2 == len1 + num_elts);
+
+ end1 = (u8 *) (vec + len1);
+ end2 = (u8 *) vec_end (vec);
+
+ while (end1 != end2)
+ {
+ ASSERT (*end1 == 0);
+ end1++;
+ }
+
+ if (vec)
+ hdr = vec_header (vec, hdr_bytes);
+
+ ASSERT (compute_vec_hash (hash, vec) == 0);
+ ASSERT (compute_mem_hash (hdr_hash, hdr, hdr_bytes) == 0);
+ validate_vec (vec, 1);
+ return vec;
+}
+
+static elt_t *
+generic_validate_vec_add (elt_t * vec, uword num_elts, uword is_add2)
+{
+ uword len1 = vec_len (vec);
+ uword len2;
+ u8 hash = compute_vec_hash (0, vec);
+ elt_t *new;
+
+ if (is_add2)
+ {
+ vec_add2 (vec, new, num_elts);
+ }
+ else
+ {
+ new = create_random_vec (elt_t, num_elts, g_seed);
+
+ VERBOSE3 ("%U\n", format_hex_bytes, new,
+ vec_len (new) * sizeof (new[0]));
+
+ /* Add the hash value of the new elements to that of the old vector. */
+ hash = compute_vec_hash (hash, new);
+
+ if (num_elts == 1)
+ vec_add1 (vec, new[0]);
+ else if (num_elts > 1)
+ vec_add (vec, new, num_elts);
+
+ vec_free (new);
+ }
+
+ len2 = vec_len (vec);
+ ASSERT (len2 == len1 + num_elts);
+
+ ASSERT (compute_vec_hash (hash, vec) == 0);
+ validate_vec (vec, 0);
+ return vec;
+}
+
+static elt_t *
+validate_vec_add1 (elt_t * vec)
+{
+ return generic_validate_vec_add (vec, 1, 0);
+}
+
+static elt_t *
+validate_vec_add2 (elt_t * vec, uword num_elts)
+{
+ return generic_validate_vec_add (vec, num_elts, 1);
+}
+
+static elt_t *
+validate_vec_add (elt_t * vec, uword num_elts)
+{
+ return generic_validate_vec_add (vec, num_elts, 0);
+}
+
+static elt_t *
+validate_vec_insert (elt_t * vec, uword num_elts, uword start_elt)
+{
+ uword len1 = vec_len (vec);
+ uword len2;
+ u8 hash;
+
+ /* vec_insert() would not handle it properly. */
+ if (start_elt > len1 || num_elts == 0)
+ return vec;
+
+ hash = compute_vec_hash (0, vec);
+ vec_insert (vec, num_elts, start_elt);
+ len2 = vec_len (vec);
+
+ ASSERT (len2 == len1 + num_elts);
+ ASSERT (compute_vec_hash (hash, vec) == 0);
+ validate_vec (vec, 0);
+ return vec;
+}
+
+static elt_t *
+validate_vec_insert_elts (elt_t * vec, uword num_elts, uword start_elt)
+{
+ uword len1 = vec_len (vec);
+ uword len2;
+ elt_t *new;
+ u8 hash;
+
+ /* vec_insert_elts() would not handle it properly. */
+ if (start_elt > len1 || num_elts == 0)
+ return vec;
+
+ new = create_random_vec (elt_t, num_elts, g_seed);
+
+ VERBOSE3 ("%U\n", format_hex_bytes, new, vec_len (new) * sizeof (new[0]));
+
+ /* Add the hash value of the new elements to that of the old vector. */
+ hash = compute_vec_hash (0, vec);
+ hash = compute_vec_hash (hash, new);
+
+ vec_insert_elts (vec, new, num_elts, start_elt);
+ len2 = vec_len (vec);
+
+ vec_free (new);
+
+ ASSERT (len2 == len1 + num_elts);
+ ASSERT (compute_vec_hash (hash, vec) == 0);
+ validate_vec (vec, 0);
+ return vec;
+}
+
+static elt_t *
+validate_vec_delete (elt_t * vec, uword num_elts, uword start_elt)
+{
+ uword len1 = vec_len (vec);
+ uword len2;
+ u8 *start;
+ u8 hash;
+ u8 hash_del;
+
+ /* vec_delete() would not handle it properly. */
+ if (start_elt + num_elts > len1)
+ return vec;
+
+ start = (u8 *) vec + (start_elt * sizeof (vec[0]));
+
+ hash = compute_vec_hash (0, vec);
+ hash_del = compute_mem_hash (0, start, num_elts * sizeof (vec[0]));
+ hash ^= hash_del;
+
+ vec_delete (vec, num_elts, start_elt);
+ len2 = vec_len (vec);
+
+ ASSERT (len2 == len1 - num_elts);
+ ASSERT (compute_vec_hash (hash, vec) == 0);
+ validate_vec (vec, 0);
+ return vec;
+}
+
+static elt_t *
+validate_vec_dup (elt_t * vec)
+{
+ elt_t *new;
+ u8 hash;
+
+ hash = compute_vec_hash (0, vec);
+ new = vec_dup (vec);
+
+ ASSERT (compute_vec_hash (hash, new) == 0);
+
+ validate_vec (new, 0);
+ return new;
+}
+
+static elt_t *
+validate_vec_zero (elt_t * vec)
+{
+ u8 *ptr;
+ u8 *end;
+
+ vec_zero (vec);
+
+ ptr = (u8 *) vec;
+ end = (u8 *) (vec + vec_len (vec));
+
+ while (ptr != end)
+ {
+ ASSERT (ptr < (u8 *) vec_end (vec));
+ ASSERT (ptr[0] == 0);
+ ptr++;
+ }
+
+ validate_vec (vec, 0);
+ return vec;
+}
+
+static void
+validate_vec_is_equal (elt_t * vec)
+{
+ elt_t *new = NULL;
+
+ if (vec_len (vec) <= 0)
+ return;
+
+ new = vec_dup (vec);
+ ASSERT (vec_is_equal (new, vec));
+ vec_free (new);
+}
+
+static elt_t *
+validate_vec_set (elt_t * vec)
+{
+ uword i;
+ uword len = vec_len (vec);
+ elt_t *new;
+
+ if (!vec)
+ return NULL;
+
+ new = create_random_vec (elt_t, 1, g_seed);
+
+ VERBOSE3 ("%U\n", format_hex_bytes, new, vec_len (new) * sizeof (new[0]));
+
+ vec_set (vec, new[0]);
+
+ for (i = 0; i < len; i++)
+ ASSERT (memcmp (&vec[i], &new[0], sizeof (vec[0])) == 0);
+
+ vec_free (new);
+ validate_vec (vec, 0);
+ return vec;
+}
+
+static elt_t *
+validate_vec_validate (elt_t * vec, uword index)
+{
+ uword len = vec_len (vec);
+ word num_new = index - len + 1;
+ u8 *ptr;
+ u8 *end;
+ u8 hash = compute_vec_hash (0, vec);
+
+ if (num_new < 0)
+ num_new = 0;
+
+ vec_validate (vec, index);
+
+ /* Old len but new vec pointer! */
+ ptr = (u8 *) (vec + len);
+ end = (u8 *) (vec + len + num_new);
+
+ ASSERT (len + num_new == vec_len (vec));
+ ASSERT (compute_vec_hash (hash, vec) == 0);
+
+ while (ptr != end)
+ {
+ ASSERT (ptr < (u8 *) vec_end (vec));
+ ASSERT (ptr[0] == 0);
+ ptr++;
+ }
+
+ validate_vec (vec, 0);
+ return vec;
+}
+
+static elt_t *
+validate_vec_init (uword num_elts)
+{
+ u8 *ptr;
+ u8 *end;
+ uword len;
+ elt_t *new;
+
+ new = vec_new (elt_t, num_elts);
+ len = vec_len (new);
+
+ ASSERT (len == num_elts);
+
+ ptr = (u8 *) new;
+ end = (u8 *) (new + len);
+
+ while (ptr != end)
+ {
+ ASSERT (ptr < (u8 *) vec_end (new));
+ ASSERT (ptr[0] == 0);
+ ptr++;
+ }
+
+ validate_vec (new, 0);
+ return new;
+}
+
+static elt_t *
+validate_vec_init_h (uword num_elts, uword hdr_bytes)
+{
+ uword i = 0;
+ u8 *ptr;
+ u8 *end;
+ uword len;
+ elt_t *new;
+
+ new = vec_new_ha (elt_t, num_elts, hdr_bytes, 0);
+ len = vec_len (new);
+
+ ASSERT (len == num_elts);
+
+ /* We have 2 zero-regions to check: header & vec data (skip _VEC struct). */
+ for (i = 0; i < 2; i++)
+ {
+ if (i == 0)
+ {
+ ptr = (u8 *) vec_header (new, hdr_bytes);
+ end = ptr + hdr_bytes;
+ }
+ else
+ {
+ ptr = (u8 *) new;
+ end = (u8 *) (new + len);
+ }
+
+ while (ptr != end)
+ {
+ ASSERT (ptr < (u8 *) vec_end (new));
+ ASSERT (ptr[0] == 0);
+ ptr++;
+ }
+ }
+
+ validate_vec (new, 1);
+ return new;
+}
+
+/* XXX - I don't understand the purpose of the vec_clone() call. */
+static elt_t *
+validate_vec_clone (elt_t * vec)
+{
+ elt_t *new;
+
+ vec_clone (new, vec);
+
+ ASSERT (vec_len (new) == vec_len (vec));
+ ASSERT (compute_vec_hash (0, new) == 0);
+ validate_vec (new, 0);
+ return new;
+}
+
+static elt_t *
+validate_vec_append (elt_t * vec)
+{
+ elt_t *new;
+ uword num_elts = bounded_random_u32 (&g_seed, 0, MAX_CHANGE);
+ uword len;
+ u8 hash = 0;
+
+ new = create_random_vec (elt_t, num_elts, g_seed);
+
+ len = vec_len (vec) + vec_len (new);
+ hash = compute_vec_hash (0, vec);
+ hash = compute_vec_hash (hash, new);
+
+ vec_append (vec, new);
+ vec_free (new);
+
+ ASSERT (vec_len (vec) == len);
+ ASSERT (compute_vec_hash (hash, vec) == 0);
+ validate_vec (vec, 0);
+ return vec;
+}
+
+static elt_t *
+validate_vec_prepend (elt_t * vec)
+{
+ elt_t *new;
+ uword num_elts = bounded_random_u32 (&g_seed, 0, MAX_CHANGE);
+ uword len;
+ u8 hash = 0;
+
+ new = create_random_vec (elt_t, num_elts, g_seed);
+
+ len = vec_len (vec) + vec_len (new);
+ hash = compute_vec_hash (0, vec);
+ hash = compute_vec_hash (hash, new);
+
+ vec_prepend (vec, new);
+ vec_free (new);
+
+ ASSERT (vec_len (vec) == len);
+ ASSERT (compute_vec_hash (hash, vec) == 0);
+ validate_vec (vec, 0);
+ return vec;
+}
+
+static void
+run_validator_wh (uword iter)
+{
+ elt_t *vec;
+ uword i;
+ uword op;
+ uword num_elts;
+ uword len;
+ uword dump_time;
+ f64 time[3]; /* [0]: start, [1]: last, [2]: current */
+
+ vec = create_random_vec_wh (elt_t, ~0, sizeof (hdr_t), g_seed);
+ validate_vec (vec, 0);
+ VERBOSE2 ("Start with len %d\n", vec_len (vec));
+
+ time[0] = unix_time_now ();
+ time[1] = time[0];
+ dump_time = g_dump_period;
+
+ for (i = 1; i <= iter; i++)
+ {
+ if (i >= g_set_verbose_at)
+ g_verbose = 2;
+
+ op = bounded_random_u32 (&g_seed, 0, vec_len (g_prob_wh) - 1);
+ op = g_prob_wh[op];
+
+ switch (op)
+ {
+ case OP_IS_VEC_INIT_H:
+ num_elts = bounded_random_u32 (&g_seed, 0, MAX_CHANGE);
+ vec_free_h (vec, sizeof (hdr_t));
+ VERBOSE2 ("vec_init_h(), new elts %d\n", num_elts);
+ vec = validate_vec_init_h (num_elts, sizeof (hdr_t));
+ break;
+
+ case OP_IS_VEC_RESIZE_H:
+ len = vec_len (vec);
+ num_elts = bounded_random_u32 (&g_seed, len, len + MAX_CHANGE);
+ VERBOSE2 ("vec_resize_h(), %d new elts.\n", num_elts);
+ vec = validate_vec_resize_h (vec, num_elts, sizeof (hdr_t));
+ break;
+
+ case OP_IS_VEC_FREE_H:
+ VERBOSE2 ("vec_free_h()\n");
+ vec = validate_vec_free_h (vec, sizeof (hdr_t));
+ break;
+
+ default:
+ ASSERT (0);
+ break;
+ }
+
+ g_call_stats[op]++;
+
+ if (i == dump_time)
+ {
+ time[2] = unix_time_now ();
+ VERBOSE1 ("%d vec ops in %f secs. (last %d in %f secs.).\n",
+ i, time[2] - time[0], g_dump_period, time[2] - time[1]);
+ time[1] = time[2];
+ dump_time += g_dump_period;
+
+ VERBOSE1 ("vec len %d\n", vec_len (vec));
+ VERBOSE2 ("%U\n\n",
+ format_hex_bytes, vec, vec_len (vec) * sizeof (vec[0]));
+ }
+
+ VERBOSE2 ("len %d\n", vec_len (vec));
+ }
+
+ validate_vec (vec, sizeof (hdr_t));
+ vec_free_h (vec, sizeof (hdr_t));
+}
+
+static void
+run_validator (uword iter)
+{
+ elt_t *vec;
+ elt_t *new;
+ uword i;
+ uword op;
+ uword num_elts;
+ uword index;
+ uword len;
+ uword dump_time;
+ f64 time[3]; /* [0]: start, [1]: last, [2]: current */
+
+ vec = create_random_vec (elt_t, ~0, g_seed);
+ validate_vec (vec, 0);
+ VERBOSE2 ("Start with len %d\n", vec_len (vec));
+
+ time[0] = unix_time_now ();
+ time[1] = time[0];
+ dump_time = g_dump_period;
+
+ for (i = 1; i <= iter; i++)
+ {
+ if (i >= g_set_verbose_at)
+ g_verbose = 2;
+
+ op = bounded_random_u32 (&g_seed, 0, vec_len (g_prob) - 1);
+ op = g_prob[op];
+
+ switch (op)
+ {
+ case OP_IS_VEC_RESIZE:
+ len = vec_len (vec);
+ num_elts = bounded_random_u32 (&g_seed, len, len + MAX_CHANGE);
+ VERBOSE2 ("vec_resize(), %d new elts.\n", num_elts);
+ vec = validate_vec_resize (vec, num_elts);
+ break;
+
+ case OP_IS_VEC_ADD1:
+ VERBOSE2 ("vec_add1()\n");
+ vec = validate_vec_add1 (vec);
+ break;
+
+ case OP_IS_VEC_ADD2:
+ num_elts = bounded_random_u32 (&g_seed, 0, MAX_CHANGE);
+ VERBOSE2 ("vec_add2(), %d new elts.\n", num_elts);
+ vec = validate_vec_add2 (vec, num_elts);
+ break;
+
+ case OP_IS_VEC_ADD:
+ num_elts = bounded_random_u32 (&g_seed, 0, MAX_CHANGE);
+ VERBOSE2 ("vec_add(), %d new elts.\n", num_elts);
+ vec = validate_vec_add (vec, num_elts);
+ break;
+
+ case OP_IS_VEC_INSERT:
+ len = vec_len (vec);
+ num_elts = bounded_random_u32 (&g_seed, 0, MAX_CHANGE);
+ index = bounded_random_u32 (&g_seed, 0,
+ (len > 0) ? (len - 1) : (0));
+ VERBOSE2 ("vec_insert(), %d new elts, index %d.\n", num_elts,
+ index);
+ vec = validate_vec_insert (vec, num_elts, index);
+ break;
+
+ case OP_IS_VEC_INSERT_ELTS:
+ len = vec_len (vec);
+ num_elts = bounded_random_u32 (&g_seed, 0, MAX_CHANGE);
+ index = bounded_random_u32 (&g_seed, 0,
+ (len > 0) ? (len - 1) : (0));
+ VERBOSE2 ("vec_insert_elts(), %d new elts, index %d.\n",
+ num_elts, index);
+ vec = validate_vec_insert_elts (vec, num_elts, index);
+ break;
+
+ case OP_IS_VEC_DELETE:
+ len = vec_len (vec);
+ index = bounded_random_u32 (&g_seed, 0, len - 1);
+ num_elts = bounded_random_u32 (&g_seed, 0,
+ (len > index) ? (len - index) : (0));
+ VERBOSE2 ("vec_delete(), %d elts, index %d.\n", num_elts, index);
+ vec = validate_vec_delete (vec, num_elts, index);
+ break;
+
+ case OP_IS_VEC_DUP:
+ VERBOSE2 ("vec_dup()\n");
+ new = validate_vec_dup (vec);
+ vec_free (new);
+ break;
+
+ case OP_IS_VEC_IS_EQUAL:
+ VERBOSE2 ("vec_is_equal()\n");
+ validate_vec_is_equal (vec);
+ break;
+
+ case OP_IS_VEC_ZERO:
+ VERBOSE2 ("vec_zero()\n");
+ vec = validate_vec_zero (vec);
+ break;
+
+ case OP_IS_VEC_SET:
+ VERBOSE2 ("vec_set()\n");
+ vec = validate_vec_set (vec);
+ break;
+
+ case OP_IS_VEC_VALIDATE:
+ len = vec_len (vec);
+ index = bounded_random_u32 (&g_seed, 0, len - 1 + MAX_CHANGE);
+ VERBOSE2 ("vec_validate(), index %d\n", index);
+ vec = validate_vec_validate (vec, index);
+ break;
+
+ case OP_IS_VEC_FREE:
+ VERBOSE2 ("vec_free()\n");
+ vec = validate_vec_free (vec);
+ break;
+
+ case OP_IS_VEC_INIT:
+ num_elts = bounded_random_u32 (&g_seed, 0, MAX_CHANGE);
+ vec_free (vec);
+ VERBOSE2 ("vec_init(), new elts %d\n", num_elts);
+ vec = validate_vec_init (num_elts);
+ break;
+
+ case OP_IS_VEC_CLONE:
+ VERBOSE2 ("vec_clone()\n");
+ new = validate_vec_clone (vec);
+ vec_free (new);
+ break;
+
+ case OP_IS_VEC_APPEND:
+ VERBOSE2 ("vec_append()\n");
+ vec = validate_vec_append (vec);
+ break;
+
+ case OP_IS_VEC_PREPEND:
+ VERBOSE2 ("vec_prepend()\n");
+ vec = validate_vec_prepend (vec);
+ break;
+
+ default:
+ ASSERT (0);
+ break;
+ }
+
+ g_call_stats[op]++;
+
+ if (i == dump_time)
+ {
+ time[2] = unix_time_now ();
+ VERBOSE1 ("%d vec ops in %f secs. (last %d in %f secs.).\n",
+ i, time[2] - time[0], g_dump_period, time[2] - time[1]);
+ time[1] = time[2];
+ dump_time += g_dump_period;
+
+ VERBOSE1 ("vec len %d\n", vec_len (vec));
+ VERBOSE2 ("%U\n\n",
+ format_hex_bytes, vec, vec_len (vec) * sizeof (vec[0]));
+ }
+
+ VERBOSE2 ("len %d\n", vec_len (vec));
+ }
+
+ validate_vec (vec, 0);
+ vec_free (vec);
+}
+
+static void
+prob_init (void)
+{
+ uword i, j, ratio, len, index;
+
+ /* Create the vector to implement the statistical profile:
+ vec [ op1 op1 op1 op2 op3 op3 op3 op4 op4 .... ] */
+ for (i = FIRST_VEC_OP; i <= LAST_VEC_OP; i++)
+ {
+ ratio = g_prob_ratio[i];
+ if (ratio <= 0)
+ continue;
+
+ len = vec_len (g_prob);
+ index = len - 1 + ratio;
+ ASSERT (index >= 0);
+
+ /* Pre-allocate new elements. */
+ vec_validate (g_prob, index);
+
+ for (j = len; j <= index; j++)
+ g_prob[j] = i;
+ }
+
+ /* Operations on vectors with headers. */
+ for (i = FIRST_VEC_HDR_OP; i <= LAST_VEC_HDR_OP; i++)
+ {
+ ratio = g_prob_ratio[i];
+ if (ratio <= 0)
+ continue;
+
+ len = vec_len (g_prob_wh);
+ index = len - 1 + ratio;
+ ASSERT (index >= 0);
+
+ /* Pre-allocate new elements. */
+ vec_validate (g_prob_wh, index);
+
+ for (j = len; j <= index; j++)
+ g_prob_wh[j] = i;
+ }
+
+ VERBOSE3 ("prob_vec, len %d\n%U\n", vec_len (g_prob),
+ format_hex_bytes, g_prob, vec_len (g_prob) * sizeof (g_prob[0]));
+ VERBOSE3 ("prob_vec_wh, len %d\n%U\n", vec_len (g_prob_wh),
+ format_hex_bytes, g_prob_wh,
+ vec_len (g_prob_wh) * sizeof (g_prob_wh[0]));
+}
+
+static void
+prob_free (void)
+{
+ vec_free (g_prob);
+ vec_free (g_prob_wh);
+}
+
+int
+test_vec_main (unformat_input_t * input)
+{
+ uword iter = 1000;
+ uword help = 0;
+ uword big = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (0 == unformat (input, "iter %d", &iter)
+ && 0 == unformat (input, "seed %d", &g_seed)
+ && 0 == unformat (input, "verbose %d", &g_verbose)
+ && 0 == unformat (input, "set %d", &g_set_verbose_at)
+ && 0 == unformat (input, "dump %d", &g_dump_period)
+ && 0 == unformat (input, "help %=", &help, 1)
+ && 0 == unformat (input, "big %=", &big, 1))
+ {
+ clib_error ("unknown input `%U'", format_unformat_error, input);
+ goto usage;
+ }
+ }
+
+ if (big)
+ {
+ u8 *bigboy = 0;
+ u64 one_gig = (1 << 30);
+ u64 size;
+ u64 index;
+
+ fformat (stdout, "giant vector test...");
+ size = 5ULL * one_gig;
+
+ vec_validate (bigboy, size);
+
+ for (index = size; index >= 0; index--)
+ bigboy[index] = index & 0xff;
+ return 0;
+ }
+
+
+ if (help)
+ goto usage;
+
+ prob_init ();
+ run_validator (iter);
+ run_validator_wh (iter);
+ if (verbose)
+ dump_call_stats (g_call_stats);
+ prob_free ();
+
+ if (verbose)
+ {
+ memory_snap ();
+ }
+ return 0;
+
+usage:
+ fformat (stdout, "Usage: test_vec iter <N> seed <N> verbose <N> "
+ "set <N> dump <N>\n");
+ if (help)
+ return 0;
+
+ return -1;
+}
+
+#ifdef CLIB_UNIX
+int
+main (int argc, char *argv[])
+{
+ unformat_input_t i;
+ int ret;
+
+ mheap_alloc (0, (uword) 10ULL << 30);
+
+ verbose = (argc > 1);
+ unformat_init_command_line (&i, argv);
+ ret = test_vec_main (&i);
+ unformat_free (&i);
+
+ return ret;
+}
+#endif /* CLIB_UNIX */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/test_vec.h b/src/vppinfra/test_vec.h
new file mode 100644
index 00000000..28e8e2a0
--- /dev/null
+++ b/src/vppinfra/test_vec.h
@@ -0,0 +1,243 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_test_vec_h
+#define included_test_vec_h
+
+
+#include <vppinfra/clib.h>
+#include <vppinfra/mem.h>
+#include <vppinfra/format.h>
+#include <vppinfra/error.h>
+
+
+extern uword g_verbose;
+extern u32 g_seed;
+
+always_inline u8 *
+format_u32_binary (u8 * s, va_list * va)
+{
+ u32 val = va_arg (*va, u32);
+ word i = 0;
+
+ for (i = BITS (val) - 1; i >= 0; i--)
+ {
+ if (val & (1 << i))
+ s = format (s, "1");
+ else
+ s = format (s, "0");
+ }
+
+ return s;
+}
+
+#define VERBOSE1(fmt, args...) \
+do { \
+ if (g_verbose >= 1) \
+ fformat (stdout, fmt, ## args); \
+} while (0)
+
+#define VERBOSE2(fmt, args...) \
+do { \
+ if (g_verbose >= 2) \
+ fformat (stdout, fmt, ## args); \
+} while (0)
+
+#define VERBOSE3(fmt, args...) \
+do { \
+ if (g_verbose >= 3) \
+ fformat (stdout, fmt, ## args); \
+} while (0)
+
+#define clib_mem_free_safe(p) \
+do { \
+ if (p) \
+ { \
+ clib_mem_free (p); \
+ (p) = NULL; \
+ } \
+} while (0)
+
+/* XXX - I get undefined symbol trying to call random_u32() <vppinfra/random.h> */
+/* Simple random number generator with period 2^31 - 1. */
+static u32
+my_random_u32 (u32 * seed_return)
+{
+ /* Unlikely mask value to XOR into seed.
+ Otherwise small seed values would give
+ non-random seeming smallish numbers. */
+ const u32 mask = 0x12345678;
+ u32 seed, a, b, result;
+
+ seed = *seed_return;
+ seed ^= mask;
+
+ a = seed / 127773;
+ b = seed % 127773;
+ seed = 16807 * b - 2836 * a;
+
+ if ((i32) seed < 0)
+ seed += ((u32) 1 << 31) - 1;
+
+ result = seed;
+
+ *seed_return = seed ^ mask;
+
+ return result;
+}
+
+static u32
+bounded_random_u32 (u32 * seed, uword lo, uword hi)
+{
+ if (lo == hi)
+ return lo;
+
+ ASSERT (lo < hi);
+
+ return ((my_random_u32 (seed) % (hi - lo + ((hi != ~0) ? (1) : (0)))) + lo);
+}
+
+#define fill_with_random_data(ptr, bytes, seed) \
+do { \
+ u8 * _v(p) = (u8 *) (ptr); \
+ uword _v(b) = (bytes); \
+ uword _v(i); \
+ \
+ for (_v(i) = 0; _v(i) < _v(b); _v(i)++) \
+ _v(p)[_v(i)] = (u8) bounded_random_u32 (&(seed), 0, 255); \
+ \
+} while (0)
+
+#define compute_mem_hash(hash, ptr, bytes) \
+({ \
+ u8 * _v(p) = (u8 *) (ptr); \
+ uword _v(b) = (uword) (bytes); \
+ uword _v(i); \
+ uword _v(h) = (u8) (hash); \
+ \
+ if (_v(p) && _v(b) > 0) \
+ { \
+ for (_v(i) = 0; _v(i) < _v(b); _v(i)++) \
+ _v(h) ^= _v(p)[_v(i)]; \
+ } \
+ \
+ _v(h); \
+})
+
+#define log2_align_down(value, align) \
+({ \
+ uword _v = (uword) (value); \
+ uword _a = (uword) (align); \
+ uword _m = (1 << _a) - 1; \
+ \
+ _v = _v & ~_m; \
+})
+
+#define log2_align_up(value, align) \
+({ \
+ uword _v = (uword) (value); \
+ uword _a = (uword) (align); \
+ uword _m = (1 << _a) - 1; \
+ \
+ _v = (_v + _m) & ~_m; \
+})
+
+#define log2_align_ptr_down(ptr, align) \
+uword_to_pointer (log2_align_down (pointer_to_uword (ptr), align), void *)
+
+#define log2_align_ptr_up(ptr, align) \
+uword_to_pointer (log2_align_up (pointer_to_uword (ptr), align), void *)
+
+#define MAX_LOG2_ALIGN 6
+#define MAX_UNALIGN_OFFSET ((1 << MAX_LOG2_ALIGN) - 1)
+
+/* Allocates pointer to memory whose address is:
+ addr = <log2_align>-aligned address */
+always_inline void *
+alloc_aligned (uword size, uword log2_align, void **ptr_to_free)
+{
+ void *p;
+
+ if (size <= 0)
+ return NULL;
+
+ p = (void *) clib_mem_alloc (size + (1 << log2_align) - 1);
+
+ if (ptr_to_free)
+ *ptr_to_free = p;
+
+ return (p) ? log2_align_ptr_up (p, log2_align) : (NULL);
+}
+
+/* Allocates pointer to memory whose address is:
+ addr = MAX_LOG2_ALIGN-aligned address + <offset> */
+always_inline void *
+alloc_unaligned (uword size, uword offset, void **ptr_to_free)
+{
+ void *p;
+
+ if (size <= 0)
+ return NULL;
+
+ ASSERT (offset <= MAX_UNALIGN_OFFSET);
+
+ p =
+ alloc_aligned (size + (1 << MAX_LOG2_ALIGN), MAX_LOG2_ALIGN, ptr_to_free);
+
+ if (!p)
+ return NULL;
+
+ return (void *) ((u8 *) p + (offset % MAX_UNALIGN_OFFSET));
+}
+
+#define memory_snap() \
+do { \
+ clib_mem_usage_t _usage = { 0 }; \
+ clib_mem_usage (&_usage); \
+ fformat (stdout, "%U\n", format_clib_mem_usage, _usage, 0); \
+} while (0)
+
+
+#endif /* included_test_vec_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/test_vhash.c b/src/vppinfra/test_vhash.c
new file mode 100644
index 00000000..7293fdde
--- /dev/null
+++ b/src/vppinfra/test_vhash.c
@@ -0,0 +1,757 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2010 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#if 0
+#ifdef __OPTIMIZE__
+#undef CLIB_DEBUG
+#endif
+#endif
+
+#include <vppinfra/bitmap.h>
+#include <vppinfra/error.h>
+#include <vppinfra/os.h>
+#include <vppinfra/random.h>
+#include <vppinfra/time.h>
+#include <vppinfra/vhash.h>
+
+#ifdef CLIB_HAVE_VEC128
+
+typedef struct
+{
+ u32 n_iter;
+ u32 seed;
+ u32 verbose;
+ u32 n_keys;
+ u32 log2_size;
+ u32 n_key_u32;
+
+ u32 n_vectors_div_4;
+ u32 n_vectors_mod_4;
+
+ u32 *keys;
+ u32 *results;
+
+ u32 *vhash_get_key_indices;
+ u32 *vhash_get_results;
+
+ u32 *vhash_key_indices;
+ u32 *vhash_results;
+
+ vhash_t vhash;
+
+ uword **key_hash;
+
+ struct
+ {
+ u64 n_clocks;
+ u64 n_vectors;
+ u64 n_calls;
+ } get_stats, set_stats, unset_stats;
+} test_vhash_main_t;
+
+always_inline u32
+test_vhash_key_gather (void *_tm, u32 vi, u32 wi, u32 n_key_u32s)
+{
+ test_vhash_main_t *tm = _tm;
+ ASSERT (n_key_u32s == tm->n_key_u32);
+ ASSERT (wi < n_key_u32s);
+ vi = vec_elt (tm->vhash_key_indices, vi);
+ return vec_elt (tm->keys, vi * n_key_u32s + wi);
+}
+
+always_inline u32x4
+test_vhash_4key_gather (void *_tm, u32 vi, u32 wi, u32 n_key_u32s)
+{
+ test_vhash_main_t *tm = _tm;
+ u32 *p;
+ u32x4_union_t x;
+
+ ASSERT (n_key_u32s == tm->n_key_u32);
+ ASSERT (wi < n_key_u32s);
+
+ p = vec_elt_at_index (tm->vhash_key_indices, vi + 0);
+ x.as_u32[0] = tm->keys[p[0] * n_key_u32s + wi];
+ x.as_u32[1] = tm->keys[p[1] * n_key_u32s + wi];
+ x.as_u32[2] = tm->keys[p[2] * n_key_u32s + wi];
+ x.as_u32[3] = tm->keys[p[3] * n_key_u32s + wi];
+ return x.as_u32x4;
+}
+
+always_inline u32
+test_vhash_get_result (void *_tm,
+ u32 vector_index, u32 result_index, u32 n_key_u32s)
+{
+ test_vhash_main_t *tm = _tm;
+ u32 *p = vec_elt_at_index (tm->vhash_results, vector_index);
+ p[0] = result_index;
+ return result_index;
+}
+
+always_inline u32x4
+test_vhash_get_4result (void *_tm,
+ u32 vector_index, u32x4 results, u32 n_key_u32s)
+{
+ test_vhash_main_t *tm = _tm;
+ u32 *p = vec_elt_at_index (tm->vhash_results, vector_index);
+ *(u32x4 *) p = results;
+ return results;
+}
+
+always_inline u32
+test_vhash_set_result (void *_tm,
+ u32 vector_index, u32 old_result, u32 n_key_u32s)
+{
+ test_vhash_main_t *tm = _tm;
+ u32 *p = vec_elt_at_index (tm->vhash_results, vector_index);
+ u32 new_result = p[0];
+ p[0] = old_result;
+ return new_result;
+}
+
+always_inline u32
+test_vhash_unset_result (void *_tm, u32 i, u32 old_result, u32 n_key_u32s)
+{
+ test_vhash_main_t *tm = _tm;
+ u32 *p = vec_elt_at_index (tm->vhash_results, i);
+ p[0] = old_result;
+ return 0;
+}
+
+#define _(N_KEY_U32) \
+ always_inline u32 \
+ test_vhash_key_gather_##N_KEY_U32 (void * _tm, u32 vi, u32 i) \
+ { return test_vhash_key_gather (_tm, vi, i, N_KEY_U32); } \
+ \
+ always_inline u32x4 \
+ test_vhash_key_gather_4_##N_KEY_U32 (void * _tm, u32 vi, u32 i) \
+ { return test_vhash_4key_gather (_tm, vi, i, N_KEY_U32); } \
+ \
+ clib_pipeline_stage \
+ (test_vhash_gather_keys_stage_##N_KEY_U32, \
+ test_vhash_main_t *, tm, i, \
+ { \
+ vhash_gather_4key_stage \
+ (&tm->vhash, \
+ /* vector_index */ i, \
+ test_vhash_key_gather_4_##N_KEY_U32, \
+ tm, \
+ N_KEY_U32); \
+ }) \
+ \
+ clib_pipeline_stage_no_inline \
+ (test_vhash_gather_keys_mod_stage_##N_KEY_U32, \
+ test_vhash_main_t *, tm, i, \
+ { \
+ vhash_gather_key_stage \
+ (&tm->vhash, \
+ /* vector_index */ tm->n_vectors_div_4, \
+ /* n_vectors */ tm->n_vectors_mod_4, \
+ test_vhash_key_gather_##N_KEY_U32, \
+ tm, \
+ N_KEY_U32); \
+ }) \
+ \
+ clib_pipeline_stage \
+ (test_vhash_hash_finalize_stage_##N_KEY_U32, \
+ test_vhash_main_t *, tm, i, \
+ { \
+ vhash_finalize_stage (&tm->vhash, i, N_KEY_U32); \
+ }) \
+ \
+ clib_pipeline_stage_no_inline \
+ (test_vhash_hash_finalize_mod_stage_##N_KEY_U32, \
+ test_vhash_main_t *, tm, i, \
+ { \
+ vhash_finalize_stage (&tm->vhash, tm->n_vectors_div_4, N_KEY_U32); \
+ }) \
+ \
+ clib_pipeline_stage \
+ (test_vhash_get_stage_##N_KEY_U32, \
+ test_vhash_main_t *, tm, i, \
+ { \
+ vhash_get_4_stage (&tm->vhash, \
+ /* vector_index */ i, \
+ test_vhash_get_4result, \
+ tm, N_KEY_U32); \
+ }) \
+ \
+ clib_pipeline_stage_no_inline \
+ (test_vhash_get_mod_stage_##N_KEY_U32, \
+ test_vhash_main_t *, tm, i, \
+ { \
+ vhash_get_stage (&tm->vhash, \
+ /* vector_index */ tm->n_vectors_div_4, \
+ /* n_vectors */ tm->n_vectors_mod_4, \
+ test_vhash_get_result, \
+ tm, N_KEY_U32); \
+ }) \
+ \
+ clib_pipeline_stage \
+ (test_vhash_set_stage_##N_KEY_U32, \
+ test_vhash_main_t *, tm, i, \
+ { \
+ vhash_set_stage (&tm->vhash, \
+ /* vector_index */ i, \
+ /* n_vectors */ VECTOR_WORD_TYPE_LEN (u32), \
+ test_vhash_set_result, \
+ tm, N_KEY_U32); \
+ }) \
+ \
+ clib_pipeline_stage_no_inline \
+ (test_vhash_set_mod_stage_##N_KEY_U32, \
+ test_vhash_main_t *, tm, i, \
+ { \
+ vhash_set_stage (&tm->vhash, \
+ /* vector_index */ tm->n_vectors_div_4, \
+ /* n_vectors */ tm->n_vectors_mod_4, \
+ test_vhash_set_result, \
+ tm, N_KEY_U32); \
+ }) \
+ \
+ clib_pipeline_stage \
+ (test_vhash_unset_stage_##N_KEY_U32, \
+ test_vhash_main_t *, tm, i, \
+ { \
+ vhash_unset_stage (&tm->vhash, \
+ /* vector_index */ i, \
+ /* n_vectors */ VECTOR_WORD_TYPE_LEN (u32), \
+ test_vhash_unset_result, \
+ tm, N_KEY_U32); \
+ }) \
+ \
+ clib_pipeline_stage_no_inline \
+ (test_vhash_unset_mod_stage_##N_KEY_U32, \
+ test_vhash_main_t *, tm, i, \
+ { \
+ vhash_unset_stage (&tm->vhash, \
+ /* vector_index */ tm->n_vectors_div_4, \
+ /* n_vectors */ tm->n_vectors_mod_4, \
+ test_vhash_unset_result, \
+ tm, N_KEY_U32); \
+ })
+
+_(1);
+_(2);
+_(3);
+_(4);
+_(5);
+_(6);
+
+#undef _
+
+#define _(N_KEY_U32) \
+ clib_pipeline_stage \
+ (test_vhash_hash_mix_stage_##N_KEY_U32, \
+ test_vhash_main_t *, tm, i, \
+ { \
+ vhash_mix_stage (&tm->vhash, i, N_KEY_U32); \
+ }) \
+ \
+ clib_pipeline_stage_no_inline \
+ (test_vhash_hash_mix_mod_stage_##N_KEY_U32, \
+ test_vhash_main_t *, tm, i, \
+ { \
+ vhash_mix_stage (&tm->vhash, tm->n_vectors_div_4, N_KEY_U32); \
+ })
+
+_(4);
+_(5);
+_(6);
+
+#undef _
+
+typedef enum
+{
+ GET, SET, UNSET,
+} test_vhash_op_t;
+
+static void
+test_vhash_op (test_vhash_main_t * tm,
+ u32 * key_indices,
+ u32 * results, uword n_keys, test_vhash_op_t op)
+{
+ vhash_validate_sizes (&tm->vhash, tm->n_key_u32, n_keys);
+
+ tm->vhash_results = results;
+ tm->vhash_key_indices = key_indices;
+ tm->n_vectors_div_4 = n_keys / 4;
+ tm->n_vectors_mod_4 = n_keys % 4;
+
+ if (tm->n_vectors_div_4 > 0)
+ {
+ switch (tm->n_key_u32)
+ {
+ default:
+ ASSERT (0);
+ break;
+
+#define _(N_KEY_U32) \
+ case N_KEY_U32: \
+ if (op == GET) \
+ clib_pipeline_run_3_stage \
+ (tm->n_vectors_div_4, \
+ tm, \
+ test_vhash_gather_keys_stage_##N_KEY_U32, \
+ test_vhash_hash_finalize_stage_##N_KEY_U32, \
+ test_vhash_get_stage_##N_KEY_U32); \
+ else if (op == SET) \
+ clib_pipeline_run_3_stage \
+ (tm->n_vectors_div_4, \
+ tm, \
+ test_vhash_gather_keys_stage_##N_KEY_U32, \
+ test_vhash_hash_finalize_stage_##N_KEY_U32, \
+ test_vhash_set_stage_##N_KEY_U32); \
+ else \
+ clib_pipeline_run_3_stage \
+ (tm->n_vectors_div_4, \
+ tm, \
+ test_vhash_gather_keys_stage_##N_KEY_U32, \
+ test_vhash_hash_finalize_stage_##N_KEY_U32, \
+ test_vhash_unset_stage_##N_KEY_U32); \
+ break;
+
+ _(1);
+ _(2);
+ _(3);
+
+#undef _
+
+#define _(N_KEY_U32) \
+ case N_KEY_U32: \
+ if (op == GET) \
+ clib_pipeline_run_4_stage \
+ (tm->n_vectors_div_4, \
+ tm, \
+ test_vhash_gather_keys_stage_##N_KEY_U32, \
+ test_vhash_hash_mix_stage_##N_KEY_U32, \
+ test_vhash_hash_finalize_stage_##N_KEY_U32, \
+ test_vhash_get_stage_##N_KEY_U32); \
+ else if (op == SET) \
+ clib_pipeline_run_4_stage \
+ (tm->n_vectors_div_4, \
+ tm, \
+ test_vhash_gather_keys_stage_##N_KEY_U32, \
+ test_vhash_hash_mix_stage_##N_KEY_U32, \
+ test_vhash_hash_finalize_stage_##N_KEY_U32, \
+ test_vhash_set_stage_##N_KEY_U32); \
+ else \
+ clib_pipeline_run_4_stage \
+ (tm->n_vectors_div_4, \
+ tm, \
+ test_vhash_gather_keys_stage_##N_KEY_U32, \
+ test_vhash_hash_mix_stage_##N_KEY_U32, \
+ test_vhash_hash_finalize_stage_##N_KEY_U32, \
+ test_vhash_unset_stage_##N_KEY_U32); \
+ break;
+
+ _(4);
+ _(5);
+ _(6);
+
+#undef _
+ }
+ }
+
+
+ if (tm->n_vectors_mod_4 > 0)
+ {
+ switch (tm->n_key_u32)
+ {
+ default:
+ ASSERT (0);
+ break;
+
+#define _(N_KEY_U32) \
+ case N_KEY_U32: \
+ if (op == GET) \
+ clib_pipeline_run_3_stage \
+ (1, \
+ tm, \
+ test_vhash_gather_keys_mod_stage_##N_KEY_U32, \
+ test_vhash_hash_finalize_mod_stage_##N_KEY_U32, \
+ test_vhash_get_mod_stage_##N_KEY_U32); \
+ else if (op == SET) \
+ clib_pipeline_run_3_stage \
+ (1, \
+ tm, \
+ test_vhash_gather_keys_mod_stage_##N_KEY_U32, \
+ test_vhash_hash_finalize_mod_stage_##N_KEY_U32, \
+ test_vhash_set_mod_stage_##N_KEY_U32); \
+ else \
+ clib_pipeline_run_3_stage \
+ (1, \
+ tm, \
+ test_vhash_gather_keys_mod_stage_##N_KEY_U32, \
+ test_vhash_hash_finalize_mod_stage_##N_KEY_U32, \
+ test_vhash_unset_mod_stage_##N_KEY_U32); \
+ break;
+
+ _(1);
+ _(2);
+ _(3);
+
+#undef _
+
+#define _(N_KEY_U32) \
+ case N_KEY_U32: \
+ if (op == GET) \
+ clib_pipeline_run_4_stage \
+ (1, \
+ tm, \
+ test_vhash_gather_keys_mod_stage_##N_KEY_U32, \
+ test_vhash_hash_mix_mod_stage_##N_KEY_U32, \
+ test_vhash_hash_finalize_mod_stage_##N_KEY_U32, \
+ test_vhash_get_mod_stage_##N_KEY_U32); \
+ else if (op == SET) \
+ clib_pipeline_run_4_stage \
+ (1, \
+ tm, \
+ test_vhash_gather_keys_mod_stage_##N_KEY_U32, \
+ test_vhash_hash_mix_mod_stage_##N_KEY_U32, \
+ test_vhash_hash_finalize_mod_stage_##N_KEY_U32, \
+ test_vhash_set_mod_stage_##N_KEY_U32); \
+ else \
+ clib_pipeline_run_4_stage \
+ (1, \
+ tm, \
+ test_vhash_gather_keys_mod_stage_##N_KEY_U32, \
+ test_vhash_hash_mix_mod_stage_##N_KEY_U32, \
+ test_vhash_hash_finalize_mod_stage_##N_KEY_U32, \
+ test_vhash_unset_mod_stage_##N_KEY_U32); \
+ break;
+
+ _(4);
+ _(5);
+ _(6);
+
+#undef _
+ }
+ }
+}
+
+int
+test_vhash_main (unformat_input_t * input)
+{
+ clib_error_t *error = 0;
+ test_vhash_main_t _tm, *tm = &_tm;
+ vhash_t *vh = &tm->vhash;
+ uword i, j;
+
+ memset (tm, 0, sizeof (tm[0]));
+ tm->n_iter = 100;
+ tm->seed = 1;
+ tm->n_keys = 1;
+ tm->n_key_u32 = 1;
+ tm->log2_size = 8;
+ tm->verbose = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "iter %d", &tm->n_iter))
+ ;
+ else if (unformat (input, "seed %d", &tm->seed))
+ ;
+ else if (unformat (input, "n-keys %d", &tm->n_keys))
+ ;
+ else if (unformat (input, "log2-size %d", &tm->log2_size))
+ ;
+ else if (unformat (input, "key-words %d", &tm->n_key_u32))
+ ;
+ else if (unformat (input, "verbose %=", &tm->verbose, 1))
+ ;
+ else
+ {
+ error = clib_error_create ("unknown input `%U'\n",
+ format_unformat_error, input);
+ goto done;
+ }
+ }
+
+ if (tm->seed == 0)
+ tm->seed = random_default_seed ();
+
+ clib_warning ("iter %d seed %d n-keys %d log2-size %d key-words %d",
+ tm->n_iter, tm->seed, tm->n_keys, tm->log2_size,
+ tm->n_key_u32);
+
+ {
+ u32 seeds[3];
+ seeds[0] = seeds[1] = seeds[2] = 0xdeadbeef;
+ vhash_init (vh, tm->log2_size, tm->n_key_u32, seeds);
+ }
+
+ /* Choose unique keys. */
+ vec_resize (tm->keys, tm->n_keys * tm->n_key_u32);
+ vec_resize (tm->key_hash, tm->n_key_u32);
+ for (i = j = 0; i < vec_len (tm->keys); i++, j++)
+ {
+ j = j == tm->n_key_u32 ? 0 : j;
+ do
+ {
+ tm->keys[i] = random_u32 (&tm->seed);
+ }
+ while (hash_get (tm->key_hash[j], tm->keys[i]));
+ hash_set (tm->key_hash[j], tm->keys[i], 0);
+ }
+
+ vec_resize (tm->results, tm->n_keys);
+ for (i = 0; i < vec_len (tm->results); i++)
+ {
+ do
+ {
+ tm->results[i] = random_u32 (&tm->seed);
+ }
+ while (tm->results[i] == ~0);
+ }
+
+ vec_resize_aligned (tm->vhash_get_results, tm->n_keys,
+ CLIB_CACHE_LINE_BYTES);
+ vec_clone (tm->vhash_get_key_indices, tm->results);
+ for (i = 0; i < vec_len (tm->vhash_get_key_indices); i++)
+ tm->vhash_get_key_indices[i] = i;
+
+ {
+ uword *is_set_bitmap = 0;
+ uword *to_set_bitmap = 0;
+ uword *to_unset_bitmap = 0;
+ u32 *to_set = 0, *to_unset = 0;
+ u32 *to_set_results = 0, *to_unset_results = 0;
+ u64 t[2];
+
+ for (i = 0; i < tm->n_iter; i++)
+ {
+ vec_reset_length (to_set);
+ vec_reset_length (to_unset);
+ vec_reset_length (to_set_results);
+ vec_reset_length (to_unset_results);
+
+ do
+ {
+ to_set_bitmap = clib_bitmap_random (to_set_bitmap,
+ tm->n_keys, &tm->seed);
+ }
+ while (clib_bitmap_is_zero (to_set_bitmap));
+ to_unset_bitmap = clib_bitmap_dup_and (to_set_bitmap, is_set_bitmap);
+ to_set_bitmap = clib_bitmap_andnot (to_set_bitmap, to_unset_bitmap);
+
+ /* *INDENT-OFF* */
+ clib_bitmap_foreach (j, to_set_bitmap, ({
+ vec_add1 (to_set, j);
+ vec_add1 (to_set_results, tm->results[j]);
+ }));
+ /* *INDENT-ON* */
+ /* *INDENT-OFF* */
+ clib_bitmap_foreach (j, to_unset_bitmap, ({
+ vec_add1 (to_unset, j);
+ vec_add1 (to_unset_results, 0xdeadbeef);
+ }));
+ /* *INDENT-ON* */
+
+ if (vec_len (to_set) > 0)
+ {
+ t[0] = clib_cpu_time_now ();
+ test_vhash_op (tm, to_set, to_set_results, vec_len (to_set), SET);
+ t[1] = clib_cpu_time_now ();
+ tm->set_stats.n_clocks += t[1] - t[0];
+ tm->set_stats.n_vectors += vec_len (to_set);
+ tm->set_stats.n_calls += 1;
+ is_set_bitmap = clib_bitmap_or (is_set_bitmap, to_set_bitmap);
+ }
+
+ t[0] = clib_cpu_time_now ();
+ test_vhash_op (tm, tm->vhash_get_key_indices,
+ tm->vhash_get_results,
+ vec_len (tm->vhash_get_key_indices), GET);
+ t[1] = clib_cpu_time_now ();
+ tm->get_stats.n_clocks += t[1] - t[0];
+ tm->get_stats.n_vectors += vec_len (tm->vhash_get_key_indices);
+ tm->get_stats.n_calls += 1;
+
+ for (j = 0; j < vec_len (tm->vhash_get_results); j++)
+ {
+ u32 r0 = tm->vhash_get_results[j];
+ u32 r1 = tm->results[j];
+ if (clib_bitmap_get (is_set_bitmap, j))
+ {
+ if (r0 != r1)
+ os_panic ();
+ }
+ else
+ {
+ if (r0 != ~0)
+ os_panic ();
+ }
+ }
+
+ if (vh->n_elts != clib_bitmap_count_set_bits (is_set_bitmap))
+ os_panic ();
+
+ if (vec_len (to_unset) > 0)
+ {
+ t[0] = clib_cpu_time_now ();
+ test_vhash_op (tm, to_unset, to_unset_results,
+ vec_len (to_unset), UNSET);
+ t[1] = clib_cpu_time_now ();
+ tm->unset_stats.n_clocks += t[1] - t[0];
+ tm->unset_stats.n_vectors += vec_len (to_unset);
+ tm->unset_stats.n_calls += 1;
+ is_set_bitmap =
+ clib_bitmap_andnot (is_set_bitmap, to_unset_bitmap);
+ }
+
+ t[0] = clib_cpu_time_now ();
+ test_vhash_op (tm, tm->vhash_get_key_indices,
+ tm->vhash_get_results,
+ vec_len (tm->vhash_get_key_indices), GET);
+ t[1] = clib_cpu_time_now ();
+ tm->get_stats.n_clocks += t[1] - t[0];
+ tm->get_stats.n_vectors += vec_len (tm->vhash_get_key_indices);
+ tm->get_stats.n_calls += 1;
+
+ for (j = 0; j < vec_len (tm->vhash_get_results); j++)
+ {
+ u32 r0 = tm->vhash_get_results[j];
+ u32 r1 = tm->results[j];
+ if (clib_bitmap_get (is_set_bitmap, j))
+ {
+ if (r0 != r1)
+ os_panic ();
+ }
+ else
+ {
+ if (r0 != ~0)
+ os_panic ();
+ }
+ }
+
+ if (vh->n_elts != clib_bitmap_count_set_bits (is_set_bitmap))
+ os_panic ();
+ }
+
+ vhash_resize (vh, tm->log2_size + 1);
+
+ test_vhash_op (tm, tm->vhash_get_key_indices,
+ tm->vhash_get_results,
+ vec_len (tm->vhash_get_key_indices), GET);
+
+ for (j = 0; j < vec_len (tm->vhash_get_results); j++)
+ {
+ u32 r0 = tm->vhash_get_results[j];
+ u32 r1 = tm->results[j];
+ if (clib_bitmap_get (is_set_bitmap, j))
+ {
+ if (r0 != r1)
+ os_panic ();
+ }
+ else
+ {
+ if (r0 != ~0)
+ os_panic ();
+ }
+ }
+
+ if (vh->n_elts != clib_bitmap_count_set_bits (is_set_bitmap))
+ os_panic ();
+ }
+
+ {
+ clib_time_t ct;
+
+ clib_time_init (&ct);
+
+ clib_warning ("%.4e clocks/get %.4e gets/call %.4e gets/sec",
+ (f64) tm->get_stats.n_clocks /
+ (f64) tm->get_stats.n_vectors,
+ (f64) tm->get_stats.n_vectors / (f64) tm->get_stats.n_calls,
+ (f64) tm->get_stats.n_vectors /
+ (f64) (tm->get_stats.n_clocks * ct.seconds_per_clock));
+ if (tm->set_stats.n_calls > 0)
+ clib_warning ("%.4e clocks/set %.4e sets/call %.4e sets/sec",
+ (f64) tm->set_stats.n_clocks /
+ (f64) tm->set_stats.n_vectors,
+ (f64) tm->set_stats.n_vectors /
+ (f64) tm->set_stats.n_calls,
+ (f64) tm->set_stats.n_vectors /
+ (f64) (tm->set_stats.n_clocks * ct.seconds_per_clock));
+ if (tm->unset_stats.n_calls > 0)
+ clib_warning ("%.4e clocks/unset %.4e unsets/call %.4e unsets/sec",
+ (f64) tm->unset_stats.n_clocks /
+ (f64) tm->unset_stats.n_vectors,
+ (f64) tm->unset_stats.n_vectors /
+ (f64) tm->unset_stats.n_calls,
+ (f64) tm->unset_stats.n_vectors /
+ (f64) (tm->unset_stats.n_clocks * ct.seconds_per_clock));
+ }
+
+done:
+ if (error)
+ clib_error_report (error);
+ return 0;
+}
+
+#endif /* CLIB_HAVE_VEC128 */
+
+#ifndef CLIB_HAVE_VEC128
+int
+test_vhash_main (unformat_input_t * input)
+{
+ clib_error ("compiled without vector support");
+ return 0;
+}
+#endif
+
+#ifdef CLIB_UNIX
+int
+main (int argc, char *argv[])
+{
+ unformat_input_t i;
+ int r;
+
+ unformat_init_command_line (&i, argv);
+ r = test_vhash_main (&i);
+ unformat_free (&i);
+ return r;
+}
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/test_zvec.c b/src/vppinfra/test_zvec.c
new file mode 100644
index 00000000..874fdefa
--- /dev/null
+++ b/src/vppinfra/test_zvec.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2005 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/zvec.h>
+#include <vppinfra/format.h>
+#include <vppinfra/random.h>
+
+static int verbose;
+#define if_verbose(format,args...) \
+ if (verbose) { clib_warning(format, ## args); }
+
+int
+test_zvec_main (unformat_input_t * input)
+{
+ uword n_iterations;
+ uword i;
+ u32 seed;
+
+ n_iterations = 1024;
+ seed = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (0 == unformat (input, "iter %d", &n_iterations)
+ && 0 == unformat (input, "seed %d", &seed))
+ clib_error ("unknown input `%U'", format_unformat_error, input);
+ }
+
+ if_verbose ("%d iterations, seed %d\n", n_iterations, seed);
+
+ for (i = 0; i < n_iterations; i++)
+ {
+ uword coding, data, d[2], limit, n_zdata_bits[2];
+
+ if (seed)
+ coding = random_u32 (&seed);
+ else
+ coding = i;
+
+ limit = coding - 1;
+ if (limit > (1 << 16))
+ limit = 1 << 16;
+ for (data = 0; data <= limit; data++)
+ {
+ d[0] = zvec_encode (coding, data, &n_zdata_bits[0]);
+
+ if (coding != 0)
+ ASSERT ((d[0] >> n_zdata_bits[0]) == 0);
+
+ d[1] = zvec_decode (coding, d[0], &n_zdata_bits[1]);
+ ASSERT (data == d[1]);
+
+ ASSERT (n_zdata_bits[0] == n_zdata_bits[1]);
+ }
+ }
+
+ return 0;
+}
+
+#ifdef CLIB_UNIX
+int
+main (int argc, char *argv[])
+{
+ unformat_input_t i;
+ int ret;
+
+ verbose = (argc > 1);
+ unformat_init_command_line (&i, argv);
+ ret = test_zvec_main (&i);
+ unformat_free (&i);
+
+ return ret;
+}
+#endif /* CLIB_UNIX */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/time.c b/src/vppinfra/time.c
new file mode 100644
index 00000000..168d7375
--- /dev/null
+++ b/src/vppinfra/time.c
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2005 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/os.h>
+#include <vppinfra/time.h>
+#include <vppinfra/format.h>
+#include <vppinfra/cpu.h>
+
+#ifdef CLIB_UNIX
+
+#include <math.h>
+#include <sys/time.h>
+#include <fcntl.h>
+
+/* Not very accurate way of determining cpu clock frequency
+ for unix. Better to use /proc/cpuinfo on linux. */
+static f64
+estimate_clock_frequency (f64 sample_time)
+{
+ /* Round to nearest 100KHz. */
+ const f64 round_to_units = 100e5;
+
+ f64 time_now, time_start, time_limit, freq;
+ u64 ifreq, t[2];
+
+ time_start = time_now = unix_time_now ();
+ time_limit = time_now + sample_time;
+ t[0] = clib_cpu_time_now ();
+ while (time_now < time_limit)
+ time_now = unix_time_now ();
+ t[1] = clib_cpu_time_now ();
+
+ freq = (t[1] - t[0]) / (time_now - time_start);
+ ifreq = flt_round_nearest (freq / round_to_units);
+ freq = ifreq * round_to_units;
+
+ return freq;
+}
+
+/* Fetch cpu frequency via parseing /proc/cpuinfo.
+ Only works for Linux. */
+static f64
+clock_frequency_from_proc_filesystem (void)
+{
+ f64 cpu_freq = 1e9; /* better than 40... */
+ f64 ppc_timebase = 0; /* warnings be gone */
+ int fd;
+ unformat_input_t input;
+
+/* $$$$ aarch64 kernel doesn't report "cpu MHz" */
+#if defined(__aarch64__)
+ return 0.0;
+#endif
+
+ cpu_freq = 0;
+ fd = open ("/proc/cpuinfo", 0);
+ if (fd < 0)
+ return cpu_freq;
+
+ unformat_init_unix_file (&input, fd);
+
+ ppc_timebase = 0;
+ while (unformat_check_input (&input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (&input, "cpu MHz : %f", &cpu_freq))
+ cpu_freq *= 1e6;
+ else if (unformat (&input, "timebase : %f", &ppc_timebase))
+ ;
+ else
+ unformat_skip_line (&input);
+ }
+
+ unformat_free (&input);
+
+ close (fd);
+
+ /* Override CPU frequency with time base for PPC. */
+ if (ppc_timebase != 0)
+ cpu_freq = ppc_timebase;
+
+ return cpu_freq;
+}
+
+/* Fetch cpu frequency via reading /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq
+ Only works for Linux. */
+static f64
+clock_frequency_from_sys_filesystem (void)
+{
+ f64 cpu_freq;
+ int fd;
+ unformat_input_t input;
+
+ /* Time stamp always runs at max frequency. */
+ cpu_freq = 0;
+ fd = open ("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq", 0);
+ if (fd < 0)
+ goto done;
+
+ unformat_init_unix_file (&input, fd);
+ unformat (&input, "%f", &cpu_freq);
+ cpu_freq *= 1e3; /* measured in kHz */
+ unformat_free (&input);
+ close (fd);
+done:
+ return cpu_freq;
+}
+
+f64
+os_cpu_clock_frequency (void)
+{
+ f64 cpu_freq;
+
+ if (clib_cpu_supports_invariant_tsc ())
+ return estimate_clock_frequency (1e-3);
+
+#if defined (__aarch64__)
+ u64 tsc;
+ asm volatile ("mrs %0, CNTFRQ_EL0":"=r" (tsc));
+ return (f64) tsc;
+#endif
+
+ /* First try /sys version. */
+ cpu_freq = clock_frequency_from_sys_filesystem ();
+ if (cpu_freq != 0)
+ return cpu_freq;
+
+ /* Next try /proc version. */
+ cpu_freq = clock_frequency_from_proc_filesystem ();
+ if (cpu_freq != 0)
+ return cpu_freq;
+
+ /* If /proc/cpuinfo fails (e.g. not running on Linux) fall back to
+ gettimeofday based estimated clock frequency. */
+ return estimate_clock_frequency (1e-3);
+}
+
+#endif /* CLIB_UNIX */
+
+/* Initialize time. */
+void
+clib_time_init (clib_time_t * c)
+{
+ memset (c, 0, sizeof (c[0]));
+ c->clocks_per_second = os_cpu_clock_frequency ();
+ c->seconds_per_clock = 1 / c->clocks_per_second;
+ c->log2_clocks_per_second = min_log2_u64 ((u64) c->clocks_per_second);
+
+ /* Initially verify frequency every sec */
+ c->log2_clocks_per_frequency_verify = c->log2_clocks_per_second;
+
+ c->last_verify_reference_time = unix_time_now ();
+ c->last_cpu_time = clib_cpu_time_now ();
+ c->init_cpu_time = c->last_verify_cpu_time = c->last_cpu_time;
+}
+
+void
+clib_time_verify_frequency (clib_time_t * c)
+{
+ f64 now_reference = unix_time_now ();
+ f64 dtr = now_reference - c->last_verify_reference_time;
+ f64 dtr_max;
+ u64 dtc = c->last_cpu_time - c->last_verify_cpu_time;
+ f64 round_units = 100e5;
+
+ c->last_verify_cpu_time = c->last_cpu_time;
+ c->last_verify_reference_time = now_reference;
+
+ /*
+ * Is the reported reference interval non-positive,
+ * or off by a factor of two - or 8 seconds - whichever is larger?
+ * Someone reset the clock behind our back.
+ */
+ dtr_max = (f64) (2ULL << c->log2_clocks_per_frequency_verify) /
+ (f64) (1ULL << c->log2_clocks_per_second);
+ dtr_max = dtr_max > 8.0 ? dtr_max : 8.0;
+
+ if (dtr <= 0.0 || dtr > dtr_max)
+ {
+ c->log2_clocks_per_frequency_verify = c->log2_clocks_per_second;
+ return;
+ }
+
+ c->clocks_per_second =
+ flt_round_nearest ((f64) dtc / (dtr * round_units)) * round_units;
+ c->seconds_per_clock = 1 / c->clocks_per_second;
+
+ /* Double time between verifies; max at 64 secs ~ 1 minute. */
+ if (c->log2_clocks_per_frequency_verify < c->log2_clocks_per_second + 6)
+ c->log2_clocks_per_frequency_verify += 1;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/time.h b/src/vppinfra/time.h
new file mode 100644
index 00000000..3fdc7d43
--- /dev/null
+++ b/src/vppinfra/time.h
@@ -0,0 +1,312 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_time_h
+#define included_time_h
+
+#include <vppinfra/clib.h>
+
+typedef struct
+{
+ /* Total run time in clock cycles
+ since clib_time_init call. */
+ u64 total_cpu_time;
+
+ /* Last recorded time stamp. */
+ u64 last_cpu_time;
+
+ /* CPU clock frequency. */
+ f64 clocks_per_second;
+
+ /* 1 / cpu clock frequency: conversion factor
+ from clock cycles into seconds. */
+ f64 seconds_per_clock;
+
+ /* Time stamp of call to clib_time_init call. */
+ u64 init_cpu_time;
+
+ u64 last_verify_cpu_time;
+
+ /* Same but for reference time (if present). */
+ f64 last_verify_reference_time;
+
+ u32 log2_clocks_per_second, log2_clocks_per_frequency_verify;
+} clib_time_t;
+
+/* Return CPU time stamp as 64bit number. */
+#if defined(__x86_64__) || defined(i386)
+always_inline u64
+clib_cpu_time_now (void)
+{
+ u32 a, d;
+ asm volatile ("rdtsc":"=a" (a), "=d" (d));
+ return (u64) a + ((u64) d << (u64) 32);
+}
+
+#elif defined (__powerpc64__)
+
+always_inline u64
+clib_cpu_time_now (void)
+{
+ u64 t;
+ asm volatile ("mftb %0":"=r" (t));
+ return t;
+}
+
+#elif defined (__SPU__)
+
+always_inline u64
+clib_cpu_time_now (void)
+{
+#ifdef _XLC
+ return spu_rdch (0x8);
+#else
+ return 0 /* __builtin_si_rdch (0x8) FIXME */ ;
+#endif
+}
+
+#elif defined (__powerpc__)
+
+always_inline u64
+clib_cpu_time_now (void)
+{
+ u32 hi1, hi2, lo;
+ asm volatile ("1:\n"
+ "mftbu %[hi1]\n"
+ "mftb %[lo]\n"
+ "mftbu %[hi2]\n"
+ "cmpw %[hi1],%[hi2]\n"
+ "bne 1b\n":[hi1] "=r" (hi1),[hi2] "=r" (hi2),[lo] "=r" (lo));
+ return (u64) lo + ((u64) hi2 << (u64) 32);
+}
+
+#elif defined (__arm__)
+#if defined(__ARM_ARCH_8A__)
+always_inline u64
+clib_cpu_time_now (void) /* We may run arm64 in aarch32 mode, to leverage 64bit counter */
+{
+ u64 tsc;
+ asm volatile ("mrrc p15, 0, %Q0, %R0, c9":"=r" (tsc));
+ return tsc;
+}
+#elif defined(__ARM_ARCH_7A__)
+always_inline u64
+clib_cpu_time_now (void)
+{
+ u32 tsc;
+ asm volatile ("mrc p15, 0, %0, c9, c13, 0":"=r" (tsc));
+ return (u64) tsc;
+}
+#else
+always_inline u64
+clib_cpu_time_now (void)
+{
+ u32 lo;
+ asm volatile ("mrc p15, 0, %[lo], c15, c12, 1":[lo] "=r" (lo));
+ return (u64) lo;
+}
+#endif
+
+#elif defined (__xtensa__)
+
+/* Stub for now. */
+always_inline u64
+clib_cpu_time_now (void)
+{
+ return 0;
+}
+
+#elif defined (__TMS320C6X__)
+
+always_inline u64
+clib_cpu_time_now (void)
+{
+ u32 l, h;
+
+ asm volatile (" dint\n"
+ " mvc .s2 TSCL,%0\n"
+ " mvc .s2 TSCH,%1\n" " rint\n":"=b" (l), "=b" (h));
+
+ return ((u64) h << 32) | l;
+}
+
+#elif defined (__aarch64__)
+always_inline u64
+clib_cpu_time_now (void)
+{
+ u64 tsc;
+
+ /* Works on Cavium ThunderX. Other platforms: YMMV */
+ asm volatile ("mrs %0, cntvct_el0":"=r" (tsc));
+
+ return tsc;
+}
+
+#else
+#error "don't know how to read CPU time stamp"
+
+#endif
+
+void clib_time_verify_frequency (clib_time_t * c);
+
+always_inline f64
+clib_time_now_internal (clib_time_t * c, u64 n)
+{
+ u64 l = c->last_cpu_time;
+ u64 t = c->total_cpu_time;
+ t += n - l;
+ c->total_cpu_time = t;
+ c->last_cpu_time = n;
+ if (PREDICT_FALSE
+ ((c->last_cpu_time -
+ c->last_verify_cpu_time) >> c->log2_clocks_per_frequency_verify))
+ clib_time_verify_frequency (c);
+ return t * c->seconds_per_clock;
+}
+
+always_inline f64
+clib_time_now (clib_time_t * c)
+{
+ return clib_time_now_internal (c, clib_cpu_time_now ());
+}
+
+always_inline void
+clib_cpu_time_wait (u64 dt)
+{
+ u64 t_end = clib_cpu_time_now () + dt;
+ while (clib_cpu_time_now () < t_end)
+ ;
+}
+
+void clib_time_init (clib_time_t * c);
+
+#ifdef CLIB_UNIX
+
+#include <time.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <unistd.h>
+#include <sys/syscall.h>
+
+/* Use 64bit floating point to represent time offset from epoch. */
+always_inline f64
+unix_time_now (void)
+{
+ /* clock_gettime without indirect syscall uses GLIBC wrappers which
+ we don't want. Just the bare metal, please. */
+ struct timespec ts;
+ syscall (SYS_clock_gettime, CLOCK_REALTIME, &ts);
+ return ts.tv_sec + 1e-9 * ts.tv_nsec;
+}
+
+/* As above but integer number of nano-seconds. */
+always_inline u64
+unix_time_now_nsec (void)
+{
+ struct timespec ts;
+ syscall (SYS_clock_gettime, CLOCK_REALTIME, &ts);
+ return 1e9 * ts.tv_sec + ts.tv_nsec;
+}
+
+always_inline void
+unix_time_now_nsec_fraction (u32 * sec, u32 * nsec)
+{
+ struct timespec ts;
+ syscall (SYS_clock_gettime, CLOCK_REALTIME, &ts);
+ *sec = ts.tv_sec;
+ *nsec = ts.tv_nsec;
+}
+
+always_inline f64
+unix_usage_now (void)
+{
+ struct rusage u;
+ getrusage (RUSAGE_SELF, &u);
+ return u.ru_utime.tv_sec + 1e-6 * u.ru_utime.tv_usec
+ + u.ru_stime.tv_sec + 1e-6 * u.ru_stime.tv_usec;
+}
+
+always_inline void
+unix_sleep (f64 dt)
+{
+ struct timespec t;
+ t.tv_sec = dt;
+ t.tv_nsec = 1e9 * dt;
+ nanosleep (&t, 0);
+}
+
+#else /* ! CLIB_UNIX */
+
+always_inline f64
+unix_time_now (void)
+{
+ return 0;
+}
+
+always_inline u64
+unix_time_now_nsec (void)
+{
+ return 0;
+}
+
+always_inline void
+unix_time_now_nsec_fraction (u32 * sec, u32 * nsec)
+{
+}
+
+always_inline f64
+unix_usage_now (void)
+{
+ return 0;
+}
+
+always_inline void
+unix_sleep (f64 dt)
+{
+}
+
+#endif
+
+#endif /* included_time_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/timer.c b/src/vppinfra/timer.c
new file mode 100644
index 00000000..0221cb74
--- /dev/null
+++ b/src/vppinfra/timer.c
@@ -0,0 +1,322 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/param.h>
+
+#include <vppinfra/vec.h>
+#include <vppinfra/time.h>
+#include <vppinfra/timer.h>
+#include <vppinfra/error.h>
+
+typedef struct
+{
+ f64 time;
+ timer_func_t *func;
+ any arg;
+} timer_callback_t;
+
+/* Vector of currently unexpired timers. */
+static timer_callback_t *timers;
+
+/* Convert time from 64bit floating format to struct timeval. */
+always_inline void
+f64_to_tv (f64 t, struct timeval *tv)
+{
+ tv->tv_sec = t;
+ tv->tv_usec = 1e6 * (t - tv->tv_sec);
+ while (tv->tv_usec >= 1000000)
+ {
+ tv->tv_usec -= 1000000;
+ tv->tv_sec += 1;
+ }
+}
+
+/* Sort timers so that timer soonest to expire is at end. */
+static int
+timer_compare (const void *_a, const void *_b)
+{
+ const timer_callback_t *a = _a;
+ const timer_callback_t *b = _b;
+ f64 dt = b->time - a->time;
+ return dt < 0 ? -1 : (dt > 0 ? +1 : 0);
+}
+
+static inline void
+sort_timers (timer_callback_t * timers)
+{
+ qsort (timers, vec_len (timers), sizeof (timers[0]), timer_compare);
+}
+
+#define TIMER_SIGNAL SIGALRM
+
+/* Don't bother set timer if time different is less than this value. */
+/* We would like to initialize this to 0.75 / (f64) HZ,
+ * but HZ may not be a compile-time constant on some systems,
+ * so instead we do the initialization before first use.
+ */
+static f64 time_resolution;
+
+/* Interrupt handler. Call functions for all expired timers.
+ Set time for next timer interrupt. */
+static void
+timer_interrupt (int signum)
+{
+ f64 now = unix_time_now ();
+ f64 dt;
+ timer_callback_t *t;
+
+ while (1)
+ {
+ if (vec_len (timers) <= 0)
+ return;
+
+ /* Consider last (earliest) timer in reverse sorted
+ vector of pending timers. */
+ t = vec_end (timers) - 1;
+
+ ASSERT (now >= 0 && finite (now));
+
+ /* Time difference between when timer goes off and now. */
+ dt = t->time - now;
+
+ /* If timer is within threshold of going off
+ call user's callback. */
+ if (dt <= time_resolution && finite (dt))
+ {
+ _vec_len (timers) -= 1;
+ (*t->func) (t->arg, -dt);
+ }
+ else
+ {
+ /* Set timer for to go off in future. */
+ struct itimerval itv;
+ memset (&itv, 0, sizeof (itv));
+ f64_to_tv (dt, &itv.it_value);
+ if (setitimer (ITIMER_REAL, &itv, 0) < 0)
+ clib_unix_error ("sititmer");
+ return;
+ }
+ }
+}
+
+void
+timer_block (sigset_t * save)
+{
+ sigset_t block_timer;
+
+ memset (&block_timer, 0, sizeof (block_timer));
+ sigaddset (&block_timer, TIMER_SIGNAL);
+ sigprocmask (SIG_BLOCK, &block_timer, save);
+}
+
+void
+timer_unblock (sigset_t * save)
+{
+ sigprocmask (SIG_SETMASK, save, 0);
+}
+
+/* Arrange for function to be called some time,
+ roughly equal to dt seconds, in the future. */
+void
+timer_call (timer_func_t * func, any arg, f64 dt)
+{
+ timer_callback_t *t;
+ sigset_t save;
+
+ /* Install signal handler on first call. */
+ static word signal_installed = 0;
+
+ if (!signal_installed)
+ {
+ struct sigaction sa;
+
+ /* Initialize time_resolution before first call to timer_interrupt */
+ time_resolution = 0.75 / (f64) HZ;
+
+ memset (&sa, 0, sizeof (sa));
+ sa.sa_handler = timer_interrupt;
+
+ if (sigaction (TIMER_SIGNAL, &sa, 0) < 0)
+ clib_panic ("sigaction");
+
+ signal_installed = 1;
+ }
+
+ timer_block (&save);
+
+ /* Add new timer. */
+ vec_add2 (timers, t, 1);
+
+ t->time = unix_time_now () + dt;
+ t->func = func;
+ t->arg = arg;
+
+ {
+ word reset_timer = vec_len (timers) == 1;
+
+ if (_vec_len (timers) > 1)
+ {
+ reset_timer += t->time < (t - 1)->time;
+ sort_timers (timers);
+ }
+
+ if (reset_timer)
+ timer_interrupt (TIMER_SIGNAL);
+ }
+
+ timer_unblock (&save);
+}
+
+#ifdef TEST
+
+#include <vppinfra/random.h>
+
+/* Compute average delay of function calls to foo.
+ If this is a small number over a lot of iterations we know
+ the code is working. */
+
+static f64 ave_delay = 0;
+static word ave_delay_count = 0;
+
+always_inline
+update (f64 delay)
+{
+ ave_delay += delay;
+ ave_delay_count += 1;
+}
+
+typedef struct
+{
+ f64 time_requested, time_called;
+} foo_t;
+
+static f64 foo_base_time = 0;
+static foo_t *foos = 0;
+
+void
+foo (any arg, f64 delay)
+{
+ foos[arg].time_called = unix_time_now () - foo_base_time;
+ update (delay);
+}
+
+typedef struct
+{
+ word count;
+ word limit;
+} bar_t;
+
+void
+bar (any arg, f64 delay)
+{
+ bar_t *b = (bar_t *) arg;
+
+ fformat (stdout, "bar %d delay %g\n", b->count++, delay);
+
+ update (delay);
+ if (b->count < b->limit)
+ timer_call (bar, arg, random_f64 ());
+}
+
+int
+main (int argc, char *argv[])
+{
+ word i, n = atoi (argv[1]);
+ word run_foo = argc > 2;
+bar_t b = { limit:10 };
+
+ if (run_foo)
+ {
+ f64 time_limit;
+
+ time_limit = atof (argv[2]);
+
+ vec_resize (foos, n);
+ for (i = 0; i < n; i++)
+ {
+ foos[i].time_requested = time_limit * random_f64 ();
+ foos[i].time_called = 1e100;
+ }
+
+ foo_base_time = unix_time_now ();
+ for (i = 0; i < n; i++)
+ timer_call (foo, i, foos[i].time_requested);
+ }
+ else
+ timer_call (bar, (any) & b, random_f64 ());
+
+ while (vec_len (timers) > 0)
+ sched_yield ();
+
+ if (vec_len (foos) > 0)
+ {
+ f64 min = 1e100, max = -min;
+ f64 ave = 0, rms = 0;
+
+ for (i = 0; i < n; i++)
+ {
+ f64 dt = foos[i].time_requested - foos[i].time_called;
+ if (dt < min)
+ min = dt;
+ if (dt > max)
+ max = dt;
+ ave += dt;
+ rms += dt * dt;
+ }
+ ave /= n;
+ rms = sqrt (rms / n - ave * ave);
+ fformat (stdout, "error min %g max %g ave %g +- %g\n", min, max, ave,
+ rms);
+ }
+
+ fformat (stdout, "%d function calls, ave. timer delay %g secs\n",
+ ave_delay_count, ave_delay / ave_delay_count);
+
+ return 0;
+}
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/timer.h b/src/vppinfra/timer.h
new file mode 100644
index 00000000..764103f7
--- /dev/null
+++ b/src/vppinfra/timer.h
@@ -0,0 +1,46 @@
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_timer_h
+#define included_timer_h
+
+#include <signal.h>
+
+typedef void (timer_func_t) (any arg, f64 delay);
+
+/* Arrange for function to be called after time interval (in seconds) has elapsed. */
+extern void timer_call (timer_func_t * func, any arg, f64 time_interval);
+
+/* Block/unblock timer interrupts. */
+extern void timer_block (sigset_t * save);
+extern void timer_unblock (sigset_t * save);
+
+#endif /* included_timer_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/timing_wheel.c b/src/vppinfra/timing_wheel.c
new file mode 100644
index 00000000..064171ab
--- /dev/null
+++ b/src/vppinfra/timing_wheel.c
@@ -0,0 +1,759 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vppinfra/bitmap.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/pool.h>
+#include <vppinfra/timing_wheel.h>
+
+void
+timing_wheel_init (timing_wheel_t * w, u64 current_cpu_time,
+ f64 cpu_clocks_per_second)
+{
+ if (w->max_sched_time <= w->min_sched_time)
+ {
+ w->min_sched_time = 1e-6;
+ w->max_sched_time = 1e-3;
+ }
+
+ w->cpu_clocks_per_second = cpu_clocks_per_second;
+ w->log2_clocks_per_bin =
+ max_log2 (w->cpu_clocks_per_second * w->min_sched_time);
+ w->log2_bins_per_wheel =
+ max_log2 (w->cpu_clocks_per_second * w->max_sched_time);
+ w->log2_bins_per_wheel -= w->log2_clocks_per_bin;
+ w->log2_clocks_per_wheel = w->log2_bins_per_wheel + w->log2_clocks_per_bin;
+ w->bins_per_wheel = 1 << w->log2_bins_per_wheel;
+ w->bins_per_wheel_mask = w->bins_per_wheel - 1;
+
+ w->current_time_index = current_cpu_time >> w->log2_clocks_per_bin;
+
+ if (w->n_wheel_elt_time_bits <= 0 ||
+ w->n_wheel_elt_time_bits >= STRUCT_BITS_OF (timing_wheel_elt_t,
+ cpu_time_relative_to_base))
+ w->n_wheel_elt_time_bits =
+ STRUCT_BITS_OF (timing_wheel_elt_t, cpu_time_relative_to_base) - 1;
+
+ w->cpu_time_base = current_cpu_time;
+ w->time_index_next_cpu_time_base_update
+ =
+ w->current_time_index +
+ ((u64) 1 << (w->n_wheel_elt_time_bits - w->log2_clocks_per_bin));
+}
+
+always_inline uword
+get_level_and_relative_time (timing_wheel_t * w, u64 cpu_time,
+ uword * rtime_result)
+{
+ u64 dt, rtime;
+ uword level_index;
+
+ dt = (cpu_time >> w->log2_clocks_per_bin);
+
+ /* Time should always move forward. */
+ ASSERT (dt >= w->current_time_index);
+
+ dt -= w->current_time_index;
+
+ /* Find level and offset within level. Level i has bins of size 2^((i+1)*M) */
+ rtime = dt;
+ for (level_index = 0; (rtime >> w->log2_bins_per_wheel) != 0; level_index++)
+ rtime = (rtime >> w->log2_bins_per_wheel) - 1;
+
+ /* Return offset within level and level index. */
+ ASSERT (rtime < w->bins_per_wheel);
+ *rtime_result = rtime;
+ return level_index;
+}
+
+always_inline uword
+time_index_to_wheel_index (timing_wheel_t * w, uword level_index, u64 ti)
+{
+ return (ti >> (level_index * w->log2_bins_per_wheel)) &
+ w->bins_per_wheel_mask;
+}
+
+/* Find current time on this level. */
+always_inline uword
+current_time_wheel_index (timing_wheel_t * w, uword level_index)
+{
+ return time_index_to_wheel_index (w, level_index, w->current_time_index);
+}
+
+/* Circular wheel indexing. */
+always_inline uword
+wheel_add (timing_wheel_t * w, word x)
+{
+ return x & w->bins_per_wheel_mask;
+}
+
+always_inline uword
+rtime_to_wheel_index (timing_wheel_t * w, uword level_index, uword rtime)
+{
+ uword t = current_time_wheel_index (w, level_index);
+ return wheel_add (w, t + rtime);
+}
+
+static clib_error_t *
+validate_level (timing_wheel_t * w, uword level_index, uword * n_elts)
+{
+ timing_wheel_level_t *level;
+ timing_wheel_elt_t *e;
+ uword wi;
+ clib_error_t *error = 0;
+
+#define _(x) \
+ do { \
+ error = CLIB_ERROR_ASSERT (x); \
+ ASSERT (! error); \
+ if (error) return error; \
+ } while (0)
+
+ level = vec_elt_at_index (w->levels, level_index);
+ for (wi = 0; wi < vec_len (level->elts); wi++)
+ {
+ /* Validate occupancy bitmap. */
+ _(clib_bitmap_get_no_check (level->occupancy_bitmap, wi) ==
+ (vec_len (level->elts[wi]) > 0));
+
+ *n_elts += vec_len (level->elts[wi]);
+
+ vec_foreach (e, level->elts[wi])
+ {
+ /* Validate time bin and level. */
+ u64 e_time;
+ uword e_ti, e_li, e_wi;
+
+ e_time = e->cpu_time_relative_to_base + w->cpu_time_base;
+ e_li = get_level_and_relative_time (w, e_time, &e_ti);
+ e_wi = rtime_to_wheel_index (w, level_index, e_ti);
+
+ if (e_li == level_index - 1)
+ /* If this element was scheduled on the previous level
+ it must be wrapped. */
+ _(e_ti + current_time_wheel_index (w, level_index - 1)
+ >= w->bins_per_wheel);
+ else
+ {
+ _(e_li == level_index);
+ if (e_li == 0)
+ _(e_wi == wi);
+ else
+ _(e_wi == wi || e_wi + 1 == wi || e_wi - 1 == wi);
+ }
+ }
+ }
+
+#undef _
+
+ return error;
+}
+
+void
+timing_wheel_validate (timing_wheel_t * w)
+{
+ uword l;
+ clib_error_t *error = 0;
+ uword n_elts;
+
+ if (!w->validate)
+ return;
+
+ n_elts = pool_elts (w->overflow_pool);
+ for (l = 0; l < vec_len (w->levels); l++)
+ {
+ error = validate_level (w, l, &n_elts);
+ if (error)
+ clib_error_report (error);
+ }
+}
+
+always_inline void
+free_elt_vector (timing_wheel_t * w, timing_wheel_elt_t * ev)
+{
+ /* Poison free elements so we never use them by mistake. */
+ if (CLIB_DEBUG > 0)
+ memset (ev, ~0, vec_len (ev) * sizeof (ev[0]));
+ _vec_len (ev) = 0;
+ vec_add1 (w->free_elt_vectors, ev);
+}
+
+static timing_wheel_elt_t *
+insert_helper (timing_wheel_t * w, uword level_index, uword rtime)
+{
+ timing_wheel_level_t *level;
+ timing_wheel_elt_t *e;
+ uword wheel_index;
+
+ /* Circular buffer. */
+ vec_validate (w->levels, level_index);
+ level = vec_elt_at_index (w->levels, level_index);
+
+ if (PREDICT_FALSE (!level->elts))
+ {
+ uword max = w->bins_per_wheel - 1;
+ clib_bitmap_validate (level->occupancy_bitmap, max);
+ vec_validate (level->elts, max);
+ }
+
+ wheel_index = rtime_to_wheel_index (w, level_index, rtime);
+
+ level->occupancy_bitmap =
+ clib_bitmap_ori (level->occupancy_bitmap, wheel_index);
+
+ /* Allocate an elt vector from free list if there is one. */
+ if (!level->elts[wheel_index] && vec_len (w->free_elt_vectors))
+ level->elts[wheel_index] = vec_pop (w->free_elt_vectors);
+
+ /* Add element to vector for this time bin. */
+ vec_add2 (level->elts[wheel_index], e, 1);
+
+ return e;
+}
+
+/* Insert user data on wheel at given CPU time stamp. */
+static void
+timing_wheel_insert_helper (timing_wheel_t * w, u64 insert_cpu_time,
+ u32 user_data)
+{
+ timing_wheel_elt_t *e;
+ u64 dt;
+ uword rtime, level_index;
+
+ level_index = get_level_and_relative_time (w, insert_cpu_time, &rtime);
+
+ dt = insert_cpu_time - w->cpu_time_base;
+ if (PREDICT_TRUE (0 == (dt >> BITS (e->cpu_time_relative_to_base))))
+ {
+ e = insert_helper (w, level_index, rtime);
+ e->user_data = user_data;
+ e->cpu_time_relative_to_base = dt;
+ if (insert_cpu_time < w->cached_min_cpu_time_on_wheel)
+ w->cached_min_cpu_time_on_wheel = insert_cpu_time;
+ }
+ else
+ {
+ /* Time too far in the future: add to overflow vector. */
+ timing_wheel_overflow_elt_t *oe;
+ pool_get (w->overflow_pool, oe);
+ oe->user_data = user_data;
+ oe->cpu_time = insert_cpu_time;
+ }
+}
+
+always_inline uword
+elt_is_deleted (timing_wheel_t * w, u32 user_data)
+{
+ return (hash_elts (w->deleted_user_data_hash) > 0
+ && hash_get (w->deleted_user_data_hash, user_data));
+}
+
+static timing_wheel_elt_t *
+delete_user_data (timing_wheel_elt_t * elts, u32 user_data)
+{
+ uword found_match;
+ timing_wheel_elt_t *e, *new_elts;
+
+ /* Quickly scan to see if there are any elements to delete
+ in this bucket. */
+ found_match = 0;
+ vec_foreach (e, elts)
+ {
+ found_match = e->user_data == user_data;
+ if (found_match)
+ break;
+ }
+ if (!found_match)
+ return elts;
+
+ /* Re-scan to build vector of new elts with matching user_data deleted. */
+ new_elts = 0;
+ vec_foreach (e, elts)
+ {
+ if (e->user_data != user_data)
+ vec_add1 (new_elts, e[0]);
+ }
+
+ vec_free (elts);
+ return new_elts;
+}
+
+/* Insert user data on wheel at given CPU time stamp. */
+void
+timing_wheel_insert (timing_wheel_t * w, u64 insert_cpu_time, u32 user_data)
+{
+ /* Remove previously deleted elements. */
+ if (elt_is_deleted (w, user_data))
+ {
+ timing_wheel_level_t *l;
+ uword wi;
+
+ /* Delete elts with given user data so that stale events don't expire. */
+ vec_foreach (l, w->levels)
+ {
+ /* *INDENT-OFF* */
+ clib_bitmap_foreach (wi, l->occupancy_bitmap, ({
+ l->elts[wi] = delete_user_data (l->elts[wi], user_data);
+ if (vec_len (l->elts[wi]) == 0)
+ l->occupancy_bitmap = clib_bitmap_andnoti (l->occupancy_bitmap, wi);
+ }));
+ /* *INDENT-ON* */
+ }
+
+ {
+ timing_wheel_overflow_elt_t *oe;
+ /* *INDENT-OFF* */
+ pool_foreach (oe, w->overflow_pool, ({
+ if (oe->user_data == user_data)
+ pool_put (w->overflow_pool, oe);
+ }));
+ /* *INDENT-ON* */
+ }
+
+ hash_unset (w->deleted_user_data_hash, user_data);
+ }
+
+ timing_wheel_insert_helper (w, insert_cpu_time, user_data);
+}
+
+void
+timing_wheel_delete (timing_wheel_t * w, u32 user_data)
+{
+ if (!w->deleted_user_data_hash)
+ w->deleted_user_data_hash =
+ hash_create ( /* capacity */ 0, /* value bytes */ 0);
+
+ hash_set1 (w->deleted_user_data_hash, user_data);
+}
+
+/* Returns time of next expiring element. */
+u64
+timing_wheel_next_expiring_elt_time (timing_wheel_t * w)
+{
+ timing_wheel_level_t *l;
+ timing_wheel_elt_t *e;
+ uword li, wi, wi0;
+ u32 min_dt;
+ u64 min_t;
+ uword wrapped = 0;
+
+ min_dt = ~0;
+ min_t = ~0ULL;
+ vec_foreach (l, w->levels)
+ {
+ if (!l->occupancy_bitmap)
+ continue;
+
+ li = l - w->levels;
+ wi0 = wi = current_time_wheel_index (w, li);
+ wrapped = 0;
+ while (1)
+ {
+ if (clib_bitmap_get_no_check (l->occupancy_bitmap, wi))
+ {
+ vec_foreach (e, l->elts[wi])
+ min_dt = clib_min (min_dt, e->cpu_time_relative_to_base);
+
+ if (wrapped && li + 1 < vec_len (w->levels))
+ {
+ uword wi1 = current_time_wheel_index (w, li + 1);
+ if (l[1].occupancy_bitmap
+ && clib_bitmap_get_no_check (l[1].occupancy_bitmap, wi1))
+ {
+ vec_foreach (e, l[1].elts[wi1])
+ {
+ min_dt =
+ clib_min (min_dt, e->cpu_time_relative_to_base);
+ }
+ }
+ }
+
+ min_t = w->cpu_time_base + min_dt;
+ goto done;
+ }
+
+ wi = wheel_add (w, wi + 1);
+ if (wi == wi0)
+ break;
+
+ wrapped = wi != wi + 1;
+ }
+ }
+
+ {
+ timing_wheel_overflow_elt_t *oe;
+
+ if (min_dt != ~0)
+ min_t = w->cpu_time_base + min_dt;
+
+ /* *INDENT-OFF* */
+ pool_foreach (oe, w->overflow_pool,
+ ({ min_t = clib_min (min_t, oe->cpu_time); }));
+ /* *INDENT-ON* */
+
+ done:
+ return min_t;
+ }
+}
+
+static inline void
+insert_elt (timing_wheel_t * w, timing_wheel_elt_t * e)
+{
+ u64 t = w->cpu_time_base + e->cpu_time_relative_to_base;
+ timing_wheel_insert_helper (w, t, e->user_data);
+}
+
+always_inline u64
+elt_cpu_time (timing_wheel_t * w, timing_wheel_elt_t * e)
+{
+ return w->cpu_time_base + e->cpu_time_relative_to_base;
+}
+
+always_inline void
+validate_expired_elt (timing_wheel_t * w, timing_wheel_elt_t * e,
+ u64 current_cpu_time)
+{
+ if (CLIB_DEBUG > 0)
+ {
+ u64 e_time = elt_cpu_time (w, e);
+
+ /* Verify that element is actually expired. */
+ ASSERT ((e_time >> w->log2_clocks_per_bin)
+ <= (current_cpu_time >> w->log2_clocks_per_bin));
+ }
+}
+
+static u32 *
+expire_bin (timing_wheel_t * w,
+ uword level_index,
+ uword wheel_index, u64 advance_cpu_time, u32 * expired_user_data)
+{
+ timing_wheel_level_t *level = vec_elt_at_index (w->levels, level_index);
+ timing_wheel_elt_t *e;
+ u32 *x;
+ uword i, j, e_len;
+
+ e = vec_elt (level->elts, wheel_index);
+ e_len = vec_len (e);
+
+ vec_add2 (expired_user_data, x, e_len);
+ for (i = j = 0; i < e_len; i++)
+ {
+ validate_expired_elt (w, &e[i], advance_cpu_time);
+ x[j] = e[i].user_data;
+
+ /* Only advance if elt is not to be deleted. */
+ j += !elt_is_deleted (w, e[i].user_data);
+ }
+
+ /* Adjust for deleted elts. */
+ if (j < e_len)
+ _vec_len (expired_user_data) -= e_len - j;
+
+ free_elt_vector (w, e);
+
+ level->elts[wheel_index] = 0;
+ clib_bitmap_set_no_check (level->occupancy_bitmap, wheel_index, 0);
+
+ return expired_user_data;
+}
+
+/* Called rarely. 32 bit times should only overflow every 4 seconds or so on a fast machine. */
+static u32 *
+advance_cpu_time_base (timing_wheel_t * w, u32 * expired_user_data)
+{
+ timing_wheel_level_t *l;
+ timing_wheel_elt_t *e;
+ u64 delta;
+
+ w->stats.cpu_time_base_advances++;
+ delta = ((u64) 1 << w->n_wheel_elt_time_bits);
+ w->cpu_time_base += delta;
+ w->time_index_next_cpu_time_base_update += delta >> w->log2_clocks_per_bin;
+
+ vec_foreach (l, w->levels)
+ {
+ uword wi;
+ /* *INDENT-OFF* */
+ clib_bitmap_foreach (wi, l->occupancy_bitmap, ({
+ vec_foreach (e, l->elts[wi])
+ {
+ /* This should always be true since otherwise we would have already expired
+ this element. Note that in the second half of this function we need
+ to take care not to place the expired elements ourselves. */
+ ASSERT (e->cpu_time_relative_to_base >= delta);
+ e->cpu_time_relative_to_base -= delta;
+ }
+ }));
+ /* *INDENT-ON* */
+ }
+
+ /* See which overflow elements fit now. */
+ {
+ timing_wheel_overflow_elt_t *oe;
+ /* *INDENT-OFF* */
+ pool_foreach (oe, w->overflow_pool, ({
+ /* It fits now into 32 bits. */
+ if (0 == ((oe->cpu_time - w->cpu_time_base) >> BITS (e->cpu_time_relative_to_base)))
+ {
+ u64 ti = oe->cpu_time >> w->log2_clocks_per_bin;
+ if (ti <= w->current_time_index)
+ {
+ /* This can happen when timing wheel is not advanced for a long time
+ (for example when at a gdb breakpoint for a while). */
+ /* Note: the ti == w->current_time_index means it is also an expired timer */
+ if (! elt_is_deleted (w, oe->user_data))
+ vec_add1 (expired_user_data, oe->user_data);
+ }
+ else
+ timing_wheel_insert_helper (w, oe->cpu_time, oe->user_data);
+ pool_put (w->overflow_pool, oe);
+ }
+ }));
+ /* *INDENT-ON* */
+ }
+ return expired_user_data;
+}
+
+static u32 *
+refill_level (timing_wheel_t * w,
+ uword level_index,
+ u64 advance_cpu_time,
+ uword from_wheel_index,
+ uword to_wheel_index, u32 * expired_user_data)
+{
+ timing_wheel_level_t *level;
+ timing_wheel_elt_t *to_insert = w->unexpired_elts_pending_insert;
+ u64 advance_time_index = advance_cpu_time >> w->log2_clocks_per_bin;
+
+ vec_validate (w->stats.refills, level_index);
+ w->stats.refills[level_index] += 1;
+
+ if (level_index + 1 >= vec_len (w->levels))
+ goto done;
+
+ level = vec_elt_at_index (w->levels, level_index + 1);
+ if (!level->occupancy_bitmap)
+ goto done;
+
+ while (1)
+ {
+ timing_wheel_elt_t *e, *es;
+
+ if (clib_bitmap_get_no_check
+ (level->occupancy_bitmap, from_wheel_index))
+ {
+ es = level->elts[from_wheel_index];
+ level->elts[from_wheel_index] = 0;
+ clib_bitmap_set_no_check (level->occupancy_bitmap, from_wheel_index,
+ 0);
+
+ vec_foreach (e, es)
+ {
+ u64 e_time = elt_cpu_time (w, e);
+ u64 ti = e_time >> w->log2_clocks_per_bin;
+ if (ti <= advance_time_index)
+ {
+ validate_expired_elt (w, e, advance_cpu_time);
+ if (!elt_is_deleted (w, e->user_data))
+ vec_add1 (expired_user_data, e->user_data);
+ }
+ else
+ vec_add1 (to_insert, e[0]);
+ }
+ free_elt_vector (w, es);
+ }
+
+ if (from_wheel_index == to_wheel_index)
+ break;
+
+ from_wheel_index = wheel_add (w, from_wheel_index + 1);
+ }
+
+ timing_wheel_validate (w);
+done:
+ w->unexpired_elts_pending_insert = to_insert;
+ return expired_user_data;
+}
+
+/* Advance wheel and return any expired user data in vector. */
+u32 *
+timing_wheel_advance (timing_wheel_t * w, u64 advance_cpu_time,
+ u32 * expired_user_data,
+ u64 * next_expiring_element_cpu_time)
+{
+ timing_wheel_level_t *level;
+ uword level_index, advance_rtime, advance_level_index, advance_wheel_index;
+ uword n_expired_user_data_before;
+ u64 current_time_index, advance_time_index;
+
+ n_expired_user_data_before = vec_len (expired_user_data);
+
+ /* Re-fill lower levels when time wraps. */
+ current_time_index = w->current_time_index;
+ advance_time_index = advance_cpu_time >> w->log2_clocks_per_bin;
+
+ {
+ u64 current_ti, advance_ti;
+
+ current_ti = current_time_index >> w->log2_bins_per_wheel;
+ advance_ti = advance_time_index >> w->log2_bins_per_wheel;
+
+ if (PREDICT_FALSE (current_ti != advance_ti))
+ {
+ if (w->unexpired_elts_pending_insert)
+ _vec_len (w->unexpired_elts_pending_insert) = 0;
+
+ level_index = 0;
+ while (current_ti != advance_ti)
+ {
+ uword c, a;
+ c = current_ti & (w->bins_per_wheel - 1);
+ a = advance_ti & (w->bins_per_wheel - 1);
+ if (c != a)
+ expired_user_data = refill_level (w,
+ level_index,
+ advance_cpu_time,
+ c, a, expired_user_data);
+ current_ti >>= w->log2_bins_per_wheel;
+ advance_ti >>= w->log2_bins_per_wheel;
+ level_index++;
+ }
+ }
+ }
+
+ advance_level_index =
+ get_level_and_relative_time (w, advance_cpu_time, &advance_rtime);
+ advance_wheel_index =
+ rtime_to_wheel_index (w, advance_level_index, advance_rtime);
+
+ /* Empty all occupied bins for entire levels that we advance past. */
+ for (level_index = 0; level_index < advance_level_index; level_index++)
+ {
+ uword wi;
+
+ if (level_index >= vec_len (w->levels))
+ break;
+
+ level = vec_elt_at_index (w->levels, level_index);
+ /* *INDENT-OFF* */
+ clib_bitmap_foreach (wi, level->occupancy_bitmap, ({
+ expired_user_data = expire_bin (w, level_index, wi, advance_cpu_time,
+ expired_user_data);
+ }));
+ /* *INDENT-ON* */
+ }
+
+ if (PREDICT_TRUE (level_index < vec_len (w->levels)))
+ {
+ uword wi;
+ level = vec_elt_at_index (w->levels, level_index);
+ wi = current_time_wheel_index (w, level_index);
+ if (level->occupancy_bitmap)
+ while (1)
+ {
+ if (clib_bitmap_get_no_check (level->occupancy_bitmap, wi))
+ expired_user_data =
+ expire_bin (w, advance_level_index, wi, advance_cpu_time,
+ expired_user_data);
+
+ /* When we jump out, we have already just expired the bin,
+ corresponding to advance_wheel_index */
+ if (wi == advance_wheel_index)
+ break;
+
+ wi = wheel_add (w, wi + 1);
+ }
+ }
+
+ /* Advance current time index. */
+ w->current_time_index = advance_time_index;
+
+ if (vec_len (w->unexpired_elts_pending_insert) > 0)
+ {
+ timing_wheel_elt_t *e;
+ vec_foreach (e, w->unexpired_elts_pending_insert) insert_elt (w, e);
+ _vec_len (w->unexpired_elts_pending_insert) = 0;
+ }
+
+ /* Don't advance until necessary. */
+ /* However, if the timing_wheel_advance() hasn't been called for some time,
+ the while() loop will ensure multiple calls to advance_cpu_time_base()
+ in a row until the w->cpu_time_base is fresh enough. */
+ while (PREDICT_FALSE
+ (advance_time_index >= w->time_index_next_cpu_time_base_update))
+ expired_user_data = advance_cpu_time_base (w, expired_user_data);
+
+ if (next_expiring_element_cpu_time)
+ {
+ u64 min_t;
+
+ /* Anything expired? If so we need to recompute next expiring elt time. */
+ if (vec_len (expired_user_data) == n_expired_user_data_before
+ && w->cached_min_cpu_time_on_wheel != 0ULL)
+ min_t = w->cached_min_cpu_time_on_wheel;
+ else
+ {
+ min_t = timing_wheel_next_expiring_elt_time (w);
+ w->cached_min_cpu_time_on_wheel = min_t;
+ }
+
+ *next_expiring_element_cpu_time = min_t;
+ }
+
+ return expired_user_data;
+}
+
+u8 *
+format_timing_wheel (u8 * s, va_list * va)
+{
+ timing_wheel_t *w = va_arg (*va, timing_wheel_t *);
+ int verbose = va_arg (*va, int);
+ uword indent = format_get_indent (s);
+
+ s = format (s, "level 0: %.4e - %.4e secs, 2^%d - 2^%d clocks",
+ (f64) (1 << w->log2_clocks_per_bin) / w->cpu_clocks_per_second,
+ (f64) (1 << w->log2_clocks_per_wheel) /
+ w->cpu_clocks_per_second, w->log2_clocks_per_bin,
+ w->log2_clocks_per_wheel);
+
+ if (verbose)
+ {
+ int l;
+
+ s = format (s, "\n%Utime base advances %Ld, every %.4e secs",
+ format_white_space, indent + 2,
+ w->stats.cpu_time_base_advances,
+ (f64) ((u64) 1 << w->n_wheel_elt_time_bits) /
+ w->cpu_clocks_per_second);
+
+ for (l = 0; l < vec_len (w->levels); l++)
+ s = format (s, "\n%Ulevel %d: refills %Ld",
+ format_white_space, indent + 2,
+ l,
+ l <
+ vec_len (w->stats.refills) ? w->stats.
+ refills[l] : (u64) 0);
+ }
+
+ return s;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/timing_wheel.h b/src/vppinfra/timing_wheel.h
new file mode 100644
index 00000000..7daea994
--- /dev/null
+++ b/src/vppinfra/timing_wheel.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef included_clib_timing_wheel_h
+#define included_clib_timing_wheel_h
+
+#include <vppinfra/format.h>
+
+typedef struct
+{
+ /* Time of this element in units cpu clock ticks relative to time
+ base. 32 bits should be large enough for serveral kilo-seconds
+ to elapse before we have to re-set time base. */
+ u32 cpu_time_relative_to_base;
+
+ /* User data to store in this bin. */
+ u32 user_data;
+} timing_wheel_elt_t;
+
+/* Overflow wheel elements where time does not fit into 32 bits. */
+typedef struct
+{
+ /* Absolute time of this element. */
+ u64 cpu_time;
+
+ /* User data to store in this bin. */
+ u32 user_data;
+
+ u32 pad;
+} timing_wheel_overflow_elt_t;
+
+typedef struct
+{
+ /* 2^M bits: 1 means vector is non-zero else zero. */
+ uword *occupancy_bitmap;
+
+ /* 2^M element table of element vectors, one for each time bin. */
+ timing_wheel_elt_t **elts;
+} timing_wheel_level_t;
+
+typedef struct
+{
+ /* Vector of refill counts per level. */
+ u64 *refills;
+
+ /* Number of times cpu time base was rescaled. */
+ u64 cpu_time_base_advances;
+} timing_wheel_stats_t;
+
+typedef struct
+{
+ /* Each bin is a power of two clock ticks (N)
+ chosen so that 2^N >= min_sched_time. */
+ u8 log2_clocks_per_bin;
+
+ /* Wheels are 2^M bins where 2^(N+M) >= max_sched_time. */
+ u8 log2_bins_per_wheel;
+
+ /* N + M. */
+ u8 log2_clocks_per_wheel;
+
+ /* Number of bits to use in cpu_time_relative_to_base field
+ of timing_wheel_elt_t. */
+ u8 n_wheel_elt_time_bits;
+
+ /* 2^M. */
+ u32 bins_per_wheel;
+
+ /* 2^M - 1. */
+ u32 bins_per_wheel_mask;
+
+ timing_wheel_level_t *levels;
+
+ timing_wheel_overflow_elt_t *overflow_pool;
+
+ /* Free list of element vector so we can recycle old allocated vectors. */
+ timing_wheel_elt_t **free_elt_vectors;
+
+ timing_wheel_elt_t *unexpired_elts_pending_insert;
+
+ /* Hash table of user data values which have been deleted but not yet re-inserted. */
+ uword *deleted_user_data_hash;
+
+ /* Enable validation for debugging. */
+ u32 validate;
+
+ /* Time index. Measures time in units of 2^N clock ticks from
+ when wheel starts. */
+ u64 current_time_index;
+
+ /* All times are 32 bit numbers relative to cpu_time_base.
+ So, roughly every 2^(32 + N) clocks we'll need to subtract from
+ all timing_wheel_elt_t times to make sure they never overflow. */
+ u64 cpu_time_base;
+
+ /* When current_time_index is >= this we update cpu_time_base
+ to avoid overflowing 32 bit cpu_time_relative_to_base
+ in timing_wheel_elt_t. */
+ u64 time_index_next_cpu_time_base_update;
+
+ /* Cached earliest element on wheel; 0 if not valid. */
+ u64 cached_min_cpu_time_on_wheel;
+
+ f64 min_sched_time, max_sched_time, cpu_clocks_per_second;
+
+ timing_wheel_stats_t stats;
+} timing_wheel_t;
+
+/* Initialization function. */
+void timing_wheel_init (timing_wheel_t * w,
+ u64 current_cpu_time, f64 cpu_clocks_per_second);
+
+/* Insert user data on wheel at given CPU time stamp. */
+void timing_wheel_insert (timing_wheel_t * w, u64 insert_cpu_time,
+ u32 user_data);
+
+/* Delete user data from wheel (until it is again inserted). */
+void timing_wheel_delete (timing_wheel_t * w, u32 user_data);
+
+/* Advance wheel and return any expired user data in vector. If non-zero
+ min_next_expiring_element_cpu_time will return a cpu time stamp
+ before which there are guaranteed to be no elements in the current wheel. */
+u32 *timing_wheel_advance (timing_wheel_t * w, u64 advance_cpu_time,
+ u32 * expired_user_data,
+ u64 * min_next_expiring_element_cpu_time);
+
+/* Returns absolute time in clock cycles of next expiring element. */
+u64 timing_wheel_next_expiring_elt_time (timing_wheel_t * w);
+
+/* Format a timing wheel. */
+format_function_t format_timing_wheel;
+
+/* Testing function to validate wheel. */
+void timing_wheel_validate (timing_wheel_t * w);
+
+#endif /* included_clib_timing_wheel_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/tw_timer_16t_1w_2048sl.c b/src/vppinfra/tw_timer_16t_1w_2048sl.c
new file mode 100644
index 00000000..3f342045
--- /dev/null
+++ b/src/vppinfra/tw_timer_16t_1w_2048sl.c
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vppinfra/error.h>
+#include "tw_timer_16t_1w_2048sl.h"
+#include "tw_timer_template.c"
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/tw_timer_16t_1w_2048sl.h b/src/vppinfra/tw_timer_16t_1w_2048sl.h
new file mode 100644
index 00000000..761646b3
--- /dev/null
+++ b/src/vppinfra/tw_timer_16t_1w_2048sl.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __included_tw_timer_16t_2w_512sl_h__
+#define __included_tw_timer_16t_2w_512sl_h__
+
+/* ... So that a client app can create multiple wheel geometries */
+#undef TW_TIMER_WHEELS
+#undef TW_SLOTS_PER_RING
+#undef TW_RING_SHIFT
+#undef TW_RING_MASK
+#undef TW_TIMERS_PER_OBJECT
+#undef LOG2_TW_TIMERS_PER_OBJECT
+#undef TW_SUFFIX
+#undef TW_OVERFLOW_VECTOR
+#undef TW_FAST_WHEEL_BITMAP
+#undef TW_TIMER_ALLOW_DUPLICATE_STOP
+#undef TW_START_STOP_TRACE_SIZE
+
+#define TW_TIMER_WHEELS 1
+#define TW_SLOTS_PER_RING 2048
+#define TW_RING_SHIFT 11
+#define TW_RING_MASK (TW_SLOTS_PER_RING -1)
+#define TW_TIMERS_PER_OBJECT 16
+#define LOG2_TW_TIMERS_PER_OBJECT 4
+#define TW_SUFFIX _16t_1w_2048sl
+#define TW_FAST_WHEEL_BITMAP 0
+#define TW_TIMER_ALLOW_DUPLICATE_STOP 0
+
+#include <vppinfra/tw_timer_template.h>
+
+#endif /* __included_tw_timer_16t_2w_512sl_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/tw_timer_16t_2w_512sl.c b/src/vppinfra/tw_timer_16t_2w_512sl.c
new file mode 100644
index 00000000..ad1b9a4a
--- /dev/null
+++ b/src/vppinfra/tw_timer_16t_2w_512sl.c
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vppinfra/error.h>
+#include "tw_timer_16t_2w_512sl.h"
+#include "tw_timer_template.c"
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/tw_timer_16t_2w_512sl.h b/src/vppinfra/tw_timer_16t_2w_512sl.h
new file mode 100644
index 00000000..029f529d
--- /dev/null
+++ b/src/vppinfra/tw_timer_16t_2w_512sl.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __included_tw_timer_16t_2w_512sl_h__
+#define __included_tw_timer_16t_2w_512sl_h__
+
+/* ... So that a client app can create multiple wheel geometries */
+#undef TW_TIMER_WHEELS
+#undef TW_SLOTS_PER_RING
+#undef TW_RING_SHIFT
+#undef TW_RING_MASK
+#undef TW_TIMERS_PER_OBJECT
+#undef LOG2_TW_TIMERS_PER_OBJECT
+#undef TW_SUFFIX
+#undef TW_OVERFLOW_VECTOR
+#undef TW_FAST_WHEEL_BITMAP
+#undef TW_TIMER_ALLOW_DUPLICATE_STOP
+#undef TW_START_STOP_TRACE_SIZE
+
+#define TW_TIMER_WHEELS 2
+#define TW_SLOTS_PER_RING 512
+#define TW_RING_SHIFT 9
+#define TW_RING_MASK (TW_SLOTS_PER_RING -1)
+#define TW_TIMERS_PER_OBJECT 16
+#define LOG2_TW_TIMERS_PER_OBJECT 4
+#define TW_SUFFIX _16t_2w_512sl
+#define TW_FAST_WHEEL_BITMAP 0
+#define TW_TIMER_ALLOW_DUPLICATE_STOP 1
+
+#include <vppinfra/tw_timer_template.h>
+
+#endif /* __included_tw_timer_16t_2w_512sl_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/tw_timer_1t_3w_1024sl_ov.c b/src/vppinfra/tw_timer_1t_3w_1024sl_ov.c
new file mode 100644
index 00000000..8a65752c
--- /dev/null
+++ b/src/vppinfra/tw_timer_1t_3w_1024sl_ov.c
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vppinfra/error.h>
+#include "tw_timer_1t_3w_1024sl_ov.h"
+#include "tw_timer_template.c"
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/tw_timer_1t_3w_1024sl_ov.h b/src/vppinfra/tw_timer_1t_3w_1024sl_ov.h
new file mode 100644
index 00000000..0b455e02
--- /dev/null
+++ b/src/vppinfra/tw_timer_1t_3w_1024sl_ov.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __included_tw_timer_1t_3w_1024sl_ov_h__
+#define __included_tw_timer_1t_3w_1024sl_ov_h__
+
+/* ... So that a client app can create multiple wheel geometries */
+#undef TW_TIMER_WHEELS
+#undef TW_SLOTS_PER_RING
+#undef TW_RING_SHIFT
+#undef TW_RING_MASK
+#undef TW_TIMERS_PER_OBJECT
+#undef LOG2_TW_TIMERS_PER_OBJECT
+#undef TW_SUFFIX
+#undef TW_OVERFLOW_VECTOR
+#undef TW_FAST_WHEEL_BITMAP
+#undef TW_TIMER_ALLOW_DUPLICATE_STOP
+#undef TW_START_STOP_TRACE_SIZE
+
+#define TW_TIMER_WHEELS 3
+#define TW_SLOTS_PER_RING 1024
+#define TW_RING_SHIFT 10
+#define TW_RING_MASK (TW_SLOTS_PER_RING -1)
+#define TW_TIMERS_PER_OBJECT 1
+#define LOG2_TW_TIMERS_PER_OBJECT 0
+#define TW_SUFFIX _1t_3w_1024sl_ov
+#define TW_OVERFLOW_VECTOR 1
+#define TW_FAST_WHEEL_BITMAP 1
+#define TW_TIMER_ALLOW_DUPLICATE_STOP 1
+
+#include <vppinfra/tw_timer_template.h>
+
+#endif /* __included_tw_timer_1t_3w_1024sl_ov_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/tw_timer_2t_1w_2048sl.c b/src/vppinfra/tw_timer_2t_1w_2048sl.c
new file mode 100644
index 00000000..79d293e1
--- /dev/null
+++ b/src/vppinfra/tw_timer_2t_1w_2048sl.c
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vppinfra/error.h>
+#include "tw_timer_2t_1w_2048sl.h"
+#include "tw_timer_template.c"
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/tw_timer_2t_1w_2048sl.h b/src/vppinfra/tw_timer_2t_1w_2048sl.h
new file mode 100644
index 00000000..6ae86688
--- /dev/null
+++ b/src/vppinfra/tw_timer_2t_1w_2048sl.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __included_tw_timer_2t_1w_2048sl_h__
+#define __included_tw_timer_2t_1w_2048sl_h__
+
+/* ... So that a client app can create multiple wheel geometries */
+#undef TW_TIMER_WHEELS
+#undef TW_SLOTS_PER_RING
+#undef TW_RING_SHIFT
+#undef TW_RING_MASK
+#undef TW_TIMERS_PER_OBJECT
+#undef LOG2_TW_TIMERS_PER_OBJECT
+#undef TW_SUFFIX
+#undef TW_OVERFLOW_VECTOR
+#undef TW_FAST_WHEEL_BITMAP
+#undef TW_TIMER_ALLOW_DUPLICATE_STOP
+#undef TW_START_STOP_TRACE_SIZE
+
+#define TW_TIMER_WHEELS 1
+#define TW_SLOTS_PER_RING 2048
+#define TW_RING_SHIFT 11
+#define TW_RING_MASK (TW_SLOTS_PER_RING -1)
+#define TW_TIMERS_PER_OBJECT 2
+#define LOG2_TW_TIMERS_PER_OBJECT 1
+#define TW_SUFFIX _2t_1w_2048sl
+#define TW_FAST_WHEEL_BITMAP 0
+#define TW_TIMER_ALLOW_DUPLICATE_STOP 0
+
+#include <vppinfra/tw_timer_template.h>
+
+#endif /* __included_tw_timer_2t_1w_2048sl_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/tw_timer_4t_3w_256sl.c b/src/vppinfra/tw_timer_4t_3w_256sl.c
new file mode 100644
index 00000000..73bb34b2
--- /dev/null
+++ b/src/vppinfra/tw_timer_4t_3w_256sl.c
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vppinfra/error.h>
+#include "tw_timer_4t_3w_256sl.h"
+#include "tw_timer_template.c"
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/tw_timer_4t_3w_256sl.h b/src/vppinfra/tw_timer_4t_3w_256sl.h
new file mode 100644
index 00000000..16c41bcd
--- /dev/null
+++ b/src/vppinfra/tw_timer_4t_3w_256sl.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __included_tw_timer_4t_3w_256sl_h__
+#define __included_tw_timer_4t_3w_256sl_h__
+
+/* ... So that a client app can create multiple wheel geometries */
+#undef TW_TIMER_WHEELS
+#undef TW_SLOTS_PER_RING
+#undef TW_RING_SHIFT
+#undef TW_RING_MASK
+#undef TW_TIMERS_PER_OBJECT
+#undef LOG2_TW_TIMERS_PER_OBJECT
+#undef TW_SUFFIX
+#undef TW_OVERFLOW_VECTOR
+#undef TW_FAST_WHEEL_BITMAP
+#undef TW_TIMER_ALLOW_DUPLICATE_STOP
+#undef TW_START_STOP_TRACE_SIZE
+
+#define TW_TIMER_WHEELS 3
+#define TW_SLOTS_PER_RING 256
+#define TW_RING_SHIFT 8
+#define TW_RING_MASK (TW_SLOTS_PER_RING -1)
+#define TW_TIMERS_PER_OBJECT 4
+#define LOG2_TW_TIMERS_PER_OBJECT 2
+#define TW_SUFFIX _4t_3w_256sl
+#define TW_FAST_WHEEL_BITMAP 0
+#define TW_TIMER_ALLOW_DUPLICATE_STOP 0
+
+#include <vppinfra/tw_timer_template.h>
+
+#endif /* __included_tw_timer_4t_3w_256sl_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/tw_timer_4t_3w_4sl_ov.c b/src/vppinfra/tw_timer_4t_3w_4sl_ov.c
new file mode 100644
index 00000000..e2af7b5d
--- /dev/null
+++ b/src/vppinfra/tw_timer_4t_3w_4sl_ov.c
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This wheel geometry is not prima facie useful, except for testing
+ */
+
+#if TW_TIMER_TEST_GEOMETRY > 0
+#include <vppinfra/error.h>
+#include "tw_timer_4t_3w_4sl_ov.h"
+#include "tw_timer_template.c"
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/tw_timer_4t_3w_4sl_ov.h b/src/vppinfra/tw_timer_4t_3w_4sl_ov.h
new file mode 100644
index 00000000..845ffeac
--- /dev/null
+++ b/src/vppinfra/tw_timer_4t_3w_4sl_ov.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __included_tw_timer_4t_3w_4sl_ov_h__
+#define __included_tw_timer_4t_3w_4sl_ov_h__
+
+/* ... So that a client app can create multiple wheel geometries */
+#undef TW_TIMER_WHEELS
+#undef TW_SLOTS_PER_RING
+#undef TW_RING_SHIFT
+#undef TW_RING_MASK
+#undef TW_TIMERS_PER_OBJECT
+#undef LOG2_TW_TIMERS_PER_OBJECT
+#undef TW_SUFFIX
+#undef TW_OVERFLOW_VECTOR
+#undef TW_FAST_WHEEL_BITMAP
+#undef TW_TIMER_ALLOW_DUPLICATE_STOP
+#undef TW_START_STOP_TRACE_SIZE
+
+#define TW_TIMER_WHEELS 3
+#define TW_SLOTS_PER_RING 4
+#define TW_RING_SHIFT 2
+#define TW_RING_MASK (TW_SLOTS_PER_RING -1)
+#define TW_TIMERS_PER_OBJECT 4
+#define LOG2_TW_TIMERS_PER_OBJECT 2
+#define TW_SUFFIX _4t_3w_4sl_ov
+#define TW_OVERFLOW_VECTOR 1
+#define TW_FAST_WHEEL_BITMAP 0
+#define TW_TIMER_ALLOW_DUPLICATE_STOP 0
+
+#include <vppinfra/tw_timer_template.h>
+
+#endif /* __included_tw_timer_4t_3w_256sl_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/tw_timer_template.c b/src/vppinfra/tw_timer_template.c
new file mode 100644
index 00000000..abad3718
--- /dev/null
+++ b/src/vppinfra/tw_timer_template.c
@@ -0,0 +1,832 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** @file
+ * @brief TW timer implementation TEMPLATE ONLY, do not compile directly
+ *
+ *
+ */
+#if TW_START_STOP_TRACE_SIZE > 0
+
+void TW (tw_timer_trace) (TWT (tw_timer_wheel) * tw, u32 timer_id,
+ u32 pool_index, u32 handle)
+{
+ TWT (trace) * t = &tw->traces[tw->trace_index];
+
+ t->timer_id = timer_id;
+ t->pool_index = pool_index;
+ t->handle = handle;
+
+ tw->trace_index++;
+ if (tw->trace_index == TW_START_STOP_TRACE_SIZE)
+ {
+ tw->trace_index = 0;
+ tw->trace_wrapped++;
+ }
+}
+
+void TW (tw_search_trace) (TWT (tw_timer_wheel) * tw, u32 handle)
+{
+ u32 i, start_pos;
+ TWT (trace) * t;
+ char *s = "bogus!";
+
+ /* reverse search for the supplied handle */
+
+ start_pos = tw->trace_index;
+ if (start_pos == 0)
+ start_pos = TW_START_STOP_TRACE_SIZE - 1;
+ else
+ start_pos--;
+
+ for (i = start_pos; i > 0; i--)
+ {
+ t = &tw->traces[i];
+ if (t->handle == handle)
+ {
+ switch (t->timer_id)
+ {
+ case 0xFF:
+ s = "stopped";
+ break;
+ case 0xFE:
+ s = "expired";
+ break;
+ default:
+ s = "started";
+ break;
+ }
+ fformat (stderr, "handle 0x%x (%d) %s at trace %d\n",
+ handle, handle, s, i);
+ }
+ }
+ if (tw->trace_wrapped > 0)
+ {
+ for (i = TW_START_STOP_TRACE_SIZE; i >= tw->trace_index; i--)
+ {
+ t = &tw->traces[i];
+ if (t->handle == handle)
+ {
+ switch (t->timer_id)
+ {
+ case 0xFF:
+ s = "stopped";
+ break;
+ case 0xFE:
+ s = "expired";
+ break;
+ default:
+ s = "started";
+ break;
+ }
+ fformat (stderr, "handle 0x%x (%d) %s at trace %d\n",
+ handle, handle, s, i);
+ }
+ }
+ }
+}
+#endif /* TW_START_STOP_TRACE_SIZE > 0 */
+
+static inline u32
+TW (make_internal_timer_handle) (u32 pool_index, u32 timer_id)
+{
+ u32 handle;
+
+ ASSERT (timer_id < TW_TIMERS_PER_OBJECT);
+#if LOG2_TW_TIMERS_PER_OBJECT > 0
+ ASSERT (pool_index < (1 << (32 - LOG2_TW_TIMERS_PER_OBJECT)));
+
+ handle = (timer_id << (32 - LOG2_TW_TIMERS_PER_OBJECT)) | (pool_index);
+#else
+ handle = pool_index;
+#endif
+ return handle;
+}
+
+static inline void
+timer_addhead (TWT (tw_timer) * pool, u32 head_index, u32 new_index)
+{
+ TWT (tw_timer) * head = pool_elt_at_index (pool, head_index);
+ TWT (tw_timer) * old_first;
+ u32 old_first_index;
+ TWT (tw_timer) * new;
+
+ new = pool_elt_at_index (pool, new_index);
+
+ if (PREDICT_FALSE (head->next == head_index))
+ {
+ head->next = head->prev = new_index;
+ new->next = new->prev = head_index;
+ return;
+ }
+
+ old_first_index = head->next;
+ old_first = pool_elt_at_index (pool, old_first_index);
+
+ new->next = old_first_index;
+ new->prev = old_first->prev;
+ old_first->prev = new_index;
+ head->next = new_index;
+}
+
+static inline void
+timer_remove (TWT (tw_timer) * pool, u32 index)
+{
+ TWT (tw_timer) * elt = pool_elt_at_index (pool, index);
+ TWT (tw_timer) * next_elt, *prev_elt;
+
+ ASSERT (elt->user_handle != ~0);
+
+ next_elt = pool_elt_at_index (pool, elt->next);
+ prev_elt = pool_elt_at_index (pool, elt->prev);
+
+ next_elt->prev = elt->prev;
+ prev_elt->next = elt->next;
+
+ elt->prev = elt->next = ~0;
+}
+
+/**
+ * @brief Start a Tw Timer
+ * @param tw_timer_wheel_t * tw timer wheel object pointer
+ * @param u32 pool_index user pool index, presumably for a tw session
+ * @param u32 timer_id app-specific timer ID. 4 bits.
+ * @param u64 interval timer interval in ticks
+ * @returns handle needed to cancel the timer
+ */
+u32
+TW (tw_timer_start) (TWT (tw_timer_wheel) * tw, u32 pool_index, u32 timer_id,
+ u64 interval)
+{
+#if TW_TIMER_WHEELS > 1
+ u16 slow_ring_offset;
+ u32 carry;
+#endif
+#if TW_TIMER_WHEELS > 2
+ u16 glacier_ring_offset;
+#endif
+#if TW_OVERFLOW_VECTOR > 0
+ u64 interval_plus_time_to_wrap, triple_wrap_mask;
+#endif
+ u16 fast_ring_offset;
+ tw_timer_wheel_slot_t *ts;
+ TWT (tw_timer) * t;
+
+ ASSERT (interval);
+
+ pool_get (tw->timers, t);
+ memset (t, 0xff, sizeof (*t));
+
+ t->user_handle = TW (make_internal_timer_handle) (pool_index, timer_id);
+
+ /* Factor interval into 1..3 wheel offsets */
+#if TW_TIMER_WHEELS > 2
+#if TW_OVERFLOW_VECTOR > 0
+ /*
+ * This is tricky. Put a timer onto the overflow
+ * vector if the interval PLUS the time
+ * until the next triple-wrap exceeds one full revolution
+ * of all three wheels.
+ */
+ triple_wrap_mask = (1 << (3 * TW_RING_SHIFT)) - 1;
+ interval_plus_time_to_wrap =
+ interval + (tw->current_tick & triple_wrap_mask);
+ if ((interval_plus_time_to_wrap >= 1 << (3 * TW_RING_SHIFT)))
+ {
+ t->expiration_time = tw->current_tick + interval;
+ ts = &tw->overflow;
+ timer_addhead (tw->timers, ts->head_index, t - tw->timers);
+#if TW_START_STOP_TRACE_SIZE > 0
+ TW (tw_timer_trace) (tw, timer_id, pool_index, t - tw->timers);
+#endif
+ return t - tw->timers;
+ }
+#endif
+
+ glacier_ring_offset = interval >> (2 * TW_RING_SHIFT);
+ ASSERT ((u64) glacier_ring_offset < TW_SLOTS_PER_RING);
+ interval -= (((u64) glacier_ring_offset) << (2 * TW_RING_SHIFT));
+#endif
+#if TW_TIMER_WHEELS > 1
+ slow_ring_offset = interval >> TW_RING_SHIFT;
+ ASSERT ((u64) slow_ring_offset < TW_SLOTS_PER_RING);
+ interval -= (((u64) slow_ring_offset) << TW_RING_SHIFT);
+#endif
+ fast_ring_offset = interval & TW_RING_MASK;
+
+ /*
+ * Account for the current wheel positions(s)
+ * This is made slightly complicated by the fact that the current
+ * index vector will contain (TW_SLOTS_PER_RING, ...) when
+ * the actual position is (0, ...)
+ */
+
+ fast_ring_offset += tw->current_index[TW_TIMER_RING_FAST] & TW_RING_MASK;
+
+#if TW_TIMER_WHEELS > 1
+ carry = fast_ring_offset >= TW_SLOTS_PER_RING ? 1 : 0;
+ fast_ring_offset %= TW_SLOTS_PER_RING;
+ slow_ring_offset += (tw->current_index[TW_TIMER_RING_SLOW] & TW_RING_MASK)
+ + carry;
+ carry = slow_ring_offset >= TW_SLOTS_PER_RING ? 1 : 0;
+ slow_ring_offset %= TW_SLOTS_PER_RING;
+#endif
+
+#if TW_TIMER_WHEELS > 2
+ glacier_ring_offset +=
+ (tw->current_index[TW_TIMER_RING_GLACIER] & TW_RING_MASK) + carry;
+ glacier_ring_offset %= TW_SLOTS_PER_RING;
+#endif
+
+#if TW_TIMER_WHEELS > 2
+ if (glacier_ring_offset !=
+ (tw->current_index[TW_TIMER_RING_GLACIER] & TW_RING_MASK))
+ {
+ /* We'll need slow and fast ring offsets later */
+ t->slow_ring_offset = slow_ring_offset;
+ t->fast_ring_offset = fast_ring_offset;
+
+ ts = &tw->w[TW_TIMER_RING_GLACIER][glacier_ring_offset];
+
+ timer_addhead (tw->timers, ts->head_index, t - tw->timers);
+#if TW_START_STOP_TRACE_SIZE > 0
+ TW (tw_timer_trace) (tw, timer_id, pool_index, t - tw->timers);
+#endif
+ return t - tw->timers;
+ }
+#endif
+
+#if TW_TIMER_WHEELS > 1
+ /* Timer expires more than 51.2 seconds from now? */
+ if (slow_ring_offset !=
+ (tw->current_index[TW_TIMER_RING_SLOW] & TW_RING_MASK))
+ {
+ /* We'll need the fast ring offset later... */
+ t->fast_ring_offset = fast_ring_offset;
+
+ ts = &tw->w[TW_TIMER_RING_SLOW][slow_ring_offset];
+
+ timer_addhead (tw->timers, ts->head_index, t - tw->timers);
+#if TW_START_STOP_TRACE_SIZE > 0
+ TW (tw_timer_trace) (tw, timer_id, pool_index, t - tw->timers);
+#endif
+ return t - tw->timers;
+ }
+#else
+ fast_ring_offset %= TW_SLOTS_PER_RING;
+#endif
+
+ /* Timer expires less than one fast-ring revolution from now */
+ ts = &tw->w[TW_TIMER_RING_FAST][fast_ring_offset];
+
+ timer_addhead (tw->timers, ts->head_index, t - tw->timers);
+
+#if TW_FAST_WHEEL_BITMAP
+ tw->fast_slot_bitmap = clib_bitmap_set (tw->fast_slot_bitmap,
+ fast_ring_offset, 1);
+#endif
+#if TW_START_STOP_TRACE_SIZE > 0
+ TW (tw_timer_trace) (tw, timer_id, pool_index, t - tw->timers);
+#endif
+ return t - tw->timers;
+}
+
+#if TW_TIMER_SCAN_FOR_HANDLE > 0
+int TW (scan_for_handle) (TWT (tw_timer_wheel) * tw, u32 handle)
+{
+ int i, j;
+ tw_timer_wheel_slot_t *ts;
+ TWT (tw_timer) * t, *head;
+ u32 next_index;
+ int rv = 0;
+
+ for (i = 0; i < TW_TIMER_WHEELS; i++)
+ {
+ for (j = 0; j < TW_SLOTS_PER_RING; j++)
+ {
+ ts = &tw->w[i][j];
+ head = pool_elt_at_index (tw->timers, ts->head_index);
+ next_index = head->next;
+
+ while (next_index != ts->head_index)
+ {
+ t = pool_elt_at_index (tw->timers, next_index);
+ if (next_index == handle)
+ {
+ clib_warning ("handle %d found in ring %d slot %d",
+ handle, i, j);
+ clib_warning ("user handle 0x%x", t->user_handle);
+ rv = 1;
+ }
+ next_index = t->next;
+ }
+ }
+ }
+ return rv;
+}
+#endif /* TW_TIMER_SCAN_FOR_HANDLE */
+
+/**
+ * @brief Stop a tw timer
+ * @param tw_timer_wheel_t * tw timer wheel object pointer
+ * @param u32 handle timer cancellation returned by tw_timer_start
+ */
+void TW (tw_timer_stop) (TWT (tw_timer_wheel) * tw, u32 handle)
+{
+ TWT (tw_timer) * t;
+
+#if TW_TIMER_ALLOW_DUPLICATE_STOP
+ /*
+ * A vlib process may have its timer expire, and receive
+ * an event before the expiration is processed.
+ * That results in a duplicate tw_timer_stop.
+ */
+ if (pool_is_free_index (tw->timers, handle))
+ return;
+#endif
+#if TW_START_STOP_TRACE_SIZE > 0
+ TW (tw_timer_trace) (tw, ~0, ~0, handle);
+#endif
+
+ t = pool_elt_at_index (tw->timers, handle);
+
+ /* in case of idiotic handle (e.g. passing a listhead index) */
+ ASSERT (t->user_handle != ~0);
+
+ timer_remove (tw->timers, handle);
+
+ pool_put_index (tw->timers, handle);
+}
+
+/**
+ * @brief Initialize a tw timer wheel template instance
+ * @param tw_timer_wheel_t * tw timer wheel object pointer
+ * @param void * expired_timer_callback. Passed a u32 * vector of
+ * expired timer handles. The callback is optional.
+ * @param f64 timer_interval_in_seconds
+ */
+void
+TW (tw_timer_wheel_init) (TWT (tw_timer_wheel) * tw,
+ void *expired_timer_callback,
+ f64 timer_interval_in_seconds, u32 max_expirations)
+{
+ int ring, slot;
+ tw_timer_wheel_slot_t *ts;
+ TWT (tw_timer) * t;
+ memset (tw, 0, sizeof (*tw));
+ tw->expired_timer_callback = expired_timer_callback;
+ tw->max_expirations = max_expirations;
+ if (timer_interval_in_seconds == 0.0)
+ {
+ clib_warning ("timer interval is zero");
+ abort ();
+ }
+ tw->timer_interval = timer_interval_in_seconds;
+ tw->ticks_per_second = 1.0 / timer_interval_in_seconds;
+ tw->first_expires_tick = ~0ULL;
+
+ vec_validate (tw->expired_timer_handles, 0);
+ _vec_len (tw->expired_timer_handles) = 0;
+
+ for (ring = 0; ring < TW_TIMER_WHEELS; ring++)
+ {
+ for (slot = 0; slot < TW_SLOTS_PER_RING; slot++)
+ {
+ ts = &tw->w[ring][slot];
+ pool_get (tw->timers, t);
+ memset (t, 0xff, sizeof (*t));
+ t->next = t->prev = t - tw->timers;
+ ts->head_index = t - tw->timers;
+ }
+ }
+
+#if TW_OVERFLOW_VECTOR > 0
+ ts = &tw->overflow;
+ pool_get (tw->timers, t);
+ memset (t, 0xff, sizeof (*t));
+ t->next = t->prev = t - tw->timers;
+ ts->head_index = t - tw->timers;
+#endif
+}
+
+/**
+ * @brief Free a tw timer wheel template instance
+ * @param tw_timer_wheel_t * tw timer wheel object pointer
+ */
+void TW (tw_timer_wheel_free) (TWT (tw_timer_wheel) * tw)
+{
+ int i, j;
+ tw_timer_wheel_slot_t *ts;
+ TWT (tw_timer) * head, *t;
+ u32 next_index;
+
+ for (i = 0; i < TW_TIMER_WHEELS; i++)
+ {
+ for (j = 0; j < TW_SLOTS_PER_RING; j++)
+ {
+ ts = &tw->w[i][j];
+ head = pool_elt_at_index (tw->timers, ts->head_index);
+ next_index = head->next;
+
+ while (next_index != ts->head_index)
+ {
+ t = pool_elt_at_index (tw->timers, next_index);
+ next_index = t->next;
+ pool_put (tw->timers, t);
+ }
+ pool_put (tw->timers, head);
+ }
+ }
+
+#if TW_OVERFLOW_VECVOR > 0
+ ts = &tw->overflow;
+ head = pool_elt_at_index (tw->timers, ts->head_index);
+ next_index = head->next;
+
+ while (next_index != ts->head_index)
+ {
+ t = pool_elt_at_index (tw->timers, next_index);
+ next_index = t->next;
+ pool_put (tw->timers, t);
+ }
+ pool_put (tw->timers, head);
+#endif
+
+ memset (tw, 0, sizeof (*tw));
+}
+
+/**
+ * @brief Advance a tw timer wheel. Calls the expired timer callback
+ * as needed. This routine should be called once every timer_interval seconds
+ * @param tw_timer_wheel_t * tw timer wheel template instance pointer
+ * @param f64 now the current time, e.g. from vlib_time_now(vm)
+ * @returns u32 * vector of expired user handles
+ */
+static inline
+ u32 * TW (tw_timer_expire_timers_internal) (TWT (tw_timer_wheel) * tw,
+ f64 now,
+ u32 * callback_vector_arg)
+{
+ u32 nticks, i;
+ tw_timer_wheel_slot_t *ts;
+ TWT (tw_timer) * t, *head;
+ u32 *callback_vector;
+ u32 fast_wheel_index;
+ u32 next_index;
+ u32 slow_wheel_index __attribute__ ((unused));
+ u32 glacier_wheel_index __attribute__ ((unused));
+
+ /* Shouldn't happen */
+ if (PREDICT_FALSE (now < tw->next_run_time))
+ return callback_vector_arg;
+
+ /* Number of ticks which have occurred */
+ nticks = tw->ticks_per_second * (now - tw->last_run_time);
+ if (nticks == 0)
+ return callback_vector_arg;
+
+ /* Remember when we ran, compute next runtime */
+ tw->next_run_time = (now + tw->timer_interval);
+
+ if (callback_vector_arg == 0)
+ {
+ _vec_len (tw->expired_timer_handles) = 0;
+ callback_vector = tw->expired_timer_handles;
+ }
+ else
+ callback_vector = callback_vector_arg;
+
+ for (i = 0; i < nticks; i++)
+ {
+ fast_wheel_index = tw->current_index[TW_TIMER_RING_FAST];
+ if (TW_TIMER_WHEELS > 1)
+ slow_wheel_index = tw->current_index[TW_TIMER_RING_SLOW];
+ if (TW_TIMER_WHEELS > 2)
+ glacier_wheel_index = tw->current_index[TW_TIMER_RING_GLACIER];
+
+#if TW_OVERFLOW_VECTOR > 0
+ /* Triple odometer-click? Process the overflow vector... */
+ if (PREDICT_FALSE (fast_wheel_index == TW_SLOTS_PER_RING
+ && slow_wheel_index == TW_SLOTS_PER_RING
+ && glacier_wheel_index == TW_SLOTS_PER_RING))
+ {
+ u64 interval;
+ u32 new_glacier_ring_offset, new_slow_ring_offset;
+ u32 new_fast_ring_offset;
+
+ ts = &tw->overflow;
+ head = pool_elt_at_index (tw->timers, ts->head_index);
+ next_index = head->next;
+
+ /* Make slot empty */
+ head->next = head->prev = ts->head_index;
+
+ /* traverse slot, place timers wherever they go */
+ while (next_index != head - tw->timers)
+ {
+ t = pool_elt_at_index (tw->timers, next_index);
+ next_index = t->next;
+
+ /* Remove from the overflow vector (hammer) */
+ t->next = t->prev = ~0;
+
+ ASSERT (t->expiration_time >= tw->current_tick);
+
+ interval = t->expiration_time - tw->current_tick;
+
+ /* Right back onto the overflow vector? */
+ if (interval >= (1 << (3 * TW_RING_SHIFT)))
+ {
+ ts = &tw->overflow;
+ timer_addhead (tw->timers, ts->head_index, t - tw->timers);
+ continue;
+ }
+ /* Compute ring offsets */
+ new_glacier_ring_offset = interval >> (2 * TW_RING_SHIFT);
+
+ interval -= (new_glacier_ring_offset << (2 * TW_RING_SHIFT));
+
+ /* Note: the wheels are at (0,0,0), no add-with-carry needed */
+ new_slow_ring_offset = interval >> TW_RING_SHIFT;
+ interval -= (new_slow_ring_offset << TW_RING_SHIFT);
+ new_fast_ring_offset = interval & TW_RING_MASK;
+ t->slow_ring_offset = new_slow_ring_offset;
+ t->fast_ring_offset = new_fast_ring_offset;
+
+ /* Timer expires Right Now */
+ if (PREDICT_FALSE (t->slow_ring_offset == 0 &&
+ t->fast_ring_offset == 0 &&
+ new_glacier_ring_offset == 0))
+ {
+ vec_add1 (callback_vector, t->user_handle);
+#if TW_START_STOP_TRACE_SIZE > 0
+ TW (tw_timer_trace) (tw, 0xfe, t->user_handle,
+ t - tw->timers);
+#endif
+ pool_put (tw->timers, t);
+ }
+ /* Timer moves to the glacier ring */
+ else if (new_glacier_ring_offset)
+ {
+ ts = &tw->w[TW_TIMER_RING_GLACIER][new_glacier_ring_offset];
+ timer_addhead (tw->timers, ts->head_index, t - tw->timers);
+ }
+ /* Timer moves to the slow ring */
+ else if (t->slow_ring_offset)
+ {
+ /* Add to slow ring */
+ ts = &tw->w[TW_TIMER_RING_SLOW][t->slow_ring_offset];
+ timer_addhead (tw->timers, ts->head_index, t - tw->timers);
+ }
+ /* Timer timer moves to the fast ring */
+ else
+ {
+ ts = &tw->w[TW_TIMER_RING_FAST][t->fast_ring_offset];
+ timer_addhead (tw->timers, ts->head_index, t - tw->timers);
+#if TW_FAST_WHEEL_BITMAP
+ tw->fast_slot_bitmap =
+ clib_bitmap_set (tw->fast_slot_bitmap,
+ t->fast_ring_offset, 1);
+#endif
+ }
+ }
+ }
+#endif
+
+#if TW_TIMER_WHEELS > 2
+ /*
+ * Double odometer-click? Process one slot in the glacier ring...
+ */
+ if (PREDICT_FALSE (fast_wheel_index == TW_SLOTS_PER_RING
+ && slow_wheel_index == TW_SLOTS_PER_RING))
+ {
+ glacier_wheel_index %= TW_SLOTS_PER_RING;
+ ts = &tw->w[TW_TIMER_RING_GLACIER][glacier_wheel_index];
+
+ head = pool_elt_at_index (tw->timers, ts->head_index);
+ next_index = head->next;
+
+ /* Make slot empty */
+ head->next = head->prev = ts->head_index;
+
+ /* traverse slot, deal timers into slow ring */
+ while (next_index != head - tw->timers)
+ {
+ t = pool_elt_at_index (tw->timers, next_index);
+ next_index = t->next;
+
+ /* Remove from glacier ring slot (hammer) */
+ t->next = t->prev = ~0;
+
+ /* Timer expires Right Now */
+ if (PREDICT_FALSE (t->slow_ring_offset == 0 &&
+ t->fast_ring_offset == 0))
+ {
+ vec_add1 (callback_vector, t->user_handle);
+#if TW_START_STOP_TRACE_SIZE > 0
+ TW (tw_timer_trace) (tw, 0xfe, t->user_handle,
+ t - tw->timers);
+#endif
+ pool_put (tw->timers, t);
+ }
+ /* Timer expires during slow-wheel tick 0 */
+ else if (PREDICT_FALSE (t->slow_ring_offset == 0))
+ {
+ ts = &tw->w[TW_TIMER_RING_FAST][t->fast_ring_offset];
+ timer_addhead (tw->timers, ts->head_index, t - tw->timers);
+#if TW_FAST_WHEEL_BITMAP
+ tw->fast_slot_bitmap =
+ clib_bitmap_set (tw->fast_slot_bitmap,
+ t->fast_ring_offset, 1);
+#endif
+ }
+ else /* typical case */
+ {
+ /* Add to slow ring */
+ ts = &tw->w[TW_TIMER_RING_SLOW][t->slow_ring_offset];
+ timer_addhead (tw->timers, ts->head_index, t - tw->timers);
+ }
+ }
+ }
+#endif
+
+#if TW_TIMER_WHEELS > 1
+ /*
+ * Single odometer-click? Process a slot in the slow ring,
+ */
+ if (PREDICT_FALSE (fast_wheel_index == TW_SLOTS_PER_RING))
+ {
+ slow_wheel_index %= TW_SLOTS_PER_RING;
+ ts = &tw->w[TW_TIMER_RING_SLOW][slow_wheel_index];
+
+ head = pool_elt_at_index (tw->timers, ts->head_index);
+ next_index = head->next;
+
+ /* Make slot empty */
+ head->next = head->prev = ts->head_index;
+
+ /* traverse slot, deal timers into fast ring */
+ while (next_index != head - tw->timers)
+ {
+ t = pool_elt_at_index (tw->timers, next_index);
+ next_index = t->next;
+
+ /* Remove from sloe ring slot (hammer) */
+ t->next = t->prev = ~0;
+
+ /* Timer expires Right Now */
+ if (PREDICT_FALSE (t->fast_ring_offset == 0))
+ {
+ vec_add1 (callback_vector, t->user_handle);
+#if TW_START_STOP_TRACE_SIZE > 0
+ TW (tw_timer_trace) (tw, 0xfe, t->user_handle,
+ t - tw->timers);
+#endif
+ pool_put (tw->timers, t);
+ }
+ else /* typical case */
+ {
+ /* Add to fast ring */
+ ts = &tw->w[TW_TIMER_RING_FAST][t->fast_ring_offset];
+ timer_addhead (tw->timers, ts->head_index, t - tw->timers);
+#if TW_FAST_WHEEL_BITMAP
+ tw->fast_slot_bitmap =
+ clib_bitmap_set (tw->fast_slot_bitmap,
+ t->fast_ring_offset, 1);
+#endif
+ }
+ }
+ }
+#endif
+
+ /* Handle the fast ring */
+ fast_wheel_index %= TW_SLOTS_PER_RING;
+ ts = &tw->w[TW_TIMER_RING_FAST][fast_wheel_index];
+
+ head = pool_elt_at_index (tw->timers, ts->head_index);
+ next_index = head->next;
+
+ /* Make slot empty */
+ head->next = head->prev = ts->head_index;
+
+ /* Construct vector of expired timer handles to give the user */
+ while (next_index != ts->head_index)
+ {
+ t = pool_elt_at_index (tw->timers, next_index);
+ next_index = t->next;
+ vec_add1 (callback_vector, t->user_handle);
+#if TW_START_STOP_TRACE_SIZE > 0
+ TW (tw_timer_trace) (tw, 0xfe, t->user_handle, t - tw->timers);
+#endif
+ pool_put (tw->timers, t);
+ }
+
+ /* If any timers expired, tell the user */
+ if (callback_vector_arg == 0 && vec_len (callback_vector))
+ {
+ /* The callback is optional. We return the u32 * handle vector */
+ if (tw->expired_timer_callback)
+ tw->expired_timer_callback (callback_vector);
+ tw->expired_timer_handles = callback_vector;
+ }
+
+#if TW_FAST_WHEEL_BITMAP
+ tw->fast_slot_bitmap = clib_bitmap_set (tw->fast_slot_bitmap,
+ fast_wheel_index, 0);
+#endif
+
+ tw->current_tick++;
+ fast_wheel_index++;
+ tw->current_index[TW_TIMER_RING_FAST] = fast_wheel_index;
+
+#if TW_TIMER_WHEELS > 1
+ if (PREDICT_FALSE (fast_wheel_index == TW_SLOTS_PER_RING))
+ slow_wheel_index++;
+ tw->current_index[TW_TIMER_RING_SLOW] = slow_wheel_index;
+#endif
+
+#if TW_TIMER_WHEELS > 2
+ if (PREDICT_FALSE (slow_wheel_index == TW_SLOTS_PER_RING))
+ glacier_wheel_index++;
+ tw->current_index[TW_TIMER_RING_GLACIER] = glacier_wheel_index;
+#endif
+
+ if (vec_len (callback_vector) >= tw->max_expirations)
+ break;
+ }
+
+ if (callback_vector_arg == 0)
+ tw->expired_timer_handles = callback_vector;
+
+ tw->last_run_time += i * tw->timer_interval;
+ return callback_vector;
+}
+
+u32 *TW (tw_timer_expire_timers) (TWT (tw_timer_wheel) * tw, f64 now)
+{
+ return TW (tw_timer_expire_timers_internal) (tw, now, 0 /* no vector */ );
+}
+
+u32 *TW (tw_timer_expire_timers_vec) (TWT (tw_timer_wheel) * tw, f64 now,
+ u32 * vec)
+{
+ return TW (tw_timer_expire_timers_internal) (tw, now, vec);
+}
+
+#if TW_FAST_WHEEL_BITMAP
+/** Returns an approximation to the first timer expiration in
+ * timer-ticks from "now". To avoid wasting an unjustifiable
+ * amount of time on the problem, we maintain an approximate fast-wheel slot
+ * occupancy bitmap. We don't worry about clearing fast wheel bits
+ * when timers are removed from fast wheel slots.
+ */
+
+u32 TW (tw_timer_first_expires_in_ticks) (TWT (tw_timer_wheel) * tw)
+{
+ u32 first_expiring_index, fast_ring_index;
+ i32 delta;
+
+ if (clib_bitmap_is_zero (tw->fast_slot_bitmap))
+ return TW_SLOTS_PER_RING;
+
+ fast_ring_index = tw->current_index[TW_TIMER_RING_FAST];
+ if (fast_ring_index == TW_SLOTS_PER_RING)
+ fast_ring_index = 0;
+
+ first_expiring_index = clib_bitmap_next_set (tw->fast_slot_bitmap,
+ fast_ring_index);
+ if (first_expiring_index == ~0 && fast_ring_index != 0)
+ first_expiring_index = clib_bitmap_first_set (tw->fast_slot_bitmap);
+
+ ASSERT (first_expiring_index != ~0);
+
+ delta = (i32) first_expiring_index - (i32) fast_ring_index;
+ if (delta < 0)
+ delta += TW_SLOTS_PER_RING;
+
+ ASSERT (delta >= 0);
+
+ return (u32) delta;
+}
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/tw_timer_template.h b/src/vppinfra/tw_timer_template.h
new file mode 100644
index 00000000..0217644d
--- /dev/null
+++ b/src/vppinfra/tw_timer_template.h
@@ -0,0 +1,267 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef TW_SUFFIX
+#error do not include tw_timer_template.h directly
+#endif
+
+#include <vppinfra/clib.h>
+#include <vppinfra/pool.h>
+#include <vppinfra/bitmap.h>
+
+#ifndef _twt
+#define _twt(a,b) a##b##_t
+#define __twt(a,b) _twt(a,b)
+#define TWT(a) __twt(a,TW_SUFFIX)
+
+#define _tw(a,b) a##b
+#define __tw(a,b) _tw(a,b)
+#define TW(a) __tw(a,TW_SUFFIX)
+#endif
+
+/** @file
+ @brief TW timer template header file, do not compile directly
+
+Instantiation of tw_timer_template.h generates named structures to
+implement specific timer wheel geometries. Choices include: number of
+timer wheels (currently, 1 or 2), number of slots per ring (a power of
+two), and the number of timers per "object handle".
+
+Internally, user object/timer handles are 32-bit integers, so if one
+selects 16 timers/object (4 bits), the resulting timer wheel handle is
+limited to 2**28 objects.
+
+Here are the specific settings required to generate a single 2048 slot
+wheel which supports 2 timers per object:
+
+ #define TW_TIMER_WHEELS 1
+ #define TW_SLOTS_PER_RING 2048
+ #define TW_RING_SHIFT 11
+ #define TW_RING_MASK (TW_SLOTS_PER_RING -1)
+ #define TW_TIMERS_PER_OBJECT 2
+ #define LOG2_TW_TIMERS_PER_OBJECT 1
+ #define TW_SUFFIX _2t_1w_2048sl
+
+See tw_timer_2t_1w_2048sl.h for a complete
+example.
+
+tw_timer_template.h is not intended to be #included directly. Client
+codes can include multiple timer geometry header files, although
+extreme caution would required to use the TW and TWT macros in such a
+case.
+
+API usage example:
+
+Initialize a two-timer, single 2048-slot wheel w/ a 1-second
+timer granularity:
+
+ tw_timer_wheel_init_2t_1w_2048sl (&tm->single_wheel,
+ expired_timer_single_callback,
+ 1.0 / * timer interval * / );
+
+Start a timer:
+
+ handle = tw_timer_start_2t_1w_2048sl (&tm->single_wheel, elt_index,
+ [0 | 1] / * timer id * / ,
+ expiration_time_in_u32_ticks);
+
+Stop a timer:
+
+ tw_timer_stop_2t_1w_2048sl (&tm->single_wheel, handle);
+
+Expired timer callback:
+
+ static void
+ expired_timer_single_callback (u32 * expired_timers)
+ {
+ int i;
+ u32 pool_index, timer_id;
+ tw_timer_test_elt_t *e;
+ tw_timer_test_main_t *tm = &tw_timer_test_main;
+
+ for (i = 0; i < vec_len (expired_timers);
+ {
+ pool_index = expired_timers[i] & 0x7FFFFFFF;
+ timer_id = expired_timers[i] >> 31;
+
+ ASSERT (timer_id == 1);
+
+ e = pool_elt_at_index (tm->test_elts, pool_index);
+
+ if (e->expected_to_expire != tm->single_wheel.current_tick)
+ {
+ fformat (stdout, "[%d] expired at %d not %d\n",
+ e - tm->test_elts, tm->single_wheel.current_tick,
+ e->expected_to_expire);
+ }
+ pool_put (tm->test_elts, e);
+ }
+ }
+ */
+
+#if (TW_TIMER_WHEELS != 1 && TW_TIMER_WHEELS != 2 && TW_TIMER_WHEELS != 3)
+#error TW_TIMER_WHEELS must be 1, 2 or 3
+#endif
+
+typedef struct
+{
+ /** next, previous pool indices */
+ u32 next;
+ u32 prev;
+
+ union
+ {
+ struct
+ {
+#if (TW_TIMER_WHEELS == 3)
+ /** fast ring offset, only valid in the slow ring */
+ u16 fast_ring_offset;
+ /** slow ring offset, only valid in the glacier ring */
+ u16 slow_ring_offset;
+#endif
+#if (TW_TIMER_WHEELS == 2)
+ /** fast ring offset, only valid in the slow ring */
+ u16 fast_ring_offset;
+ /** slow ring offset, only valid in the glacier ring */
+ u16 pad;
+#endif
+ };
+
+#if (TW_OVERFLOW_VECTOR > 0)
+ u64 expiration_time;
+#endif
+ };
+
+ /** user timer handle */
+ u32 user_handle;
+} TWT (tw_timer);
+
+/*
+ * These structures ar used by all geometries,
+ * so they need a private #include block...
+ */
+#ifndef __defined_tw_timer_wheel_slot__
+#define __defined_tw_timer_wheel_slot__
+typedef struct
+{
+ /** Listhead of timers which expire in this interval */
+ u32 head_index;
+} tw_timer_wheel_slot_t;
+typedef enum
+{
+ /** Fast timer ring ID */
+ TW_TIMER_RING_FAST,
+ /** Slow timer ring ID */
+ TW_TIMER_RING_SLOW,
+ /** Glacier ring ID */
+ TW_TIMER_RING_GLACIER,
+} tw_ring_index_t;
+#endif /* __defined_tw_timer_wheel_slot__ */
+
+typedef CLIB_PACKED (struct
+ {
+ u8 timer_id;
+ u32 pool_index;
+ u32 handle;
+ }) TWT (trace);
+
+typedef struct
+{
+ /** Timer pool */
+ TWT (tw_timer) * timers;
+
+ /** Next time the wheel should run */
+ f64 next_run_time;
+
+ /** Last time the wheel ran */
+ f64 last_run_time;
+
+ /** Timer ticks per second */
+ f64 ticks_per_second;
+
+ /** Timer interval, also needed to avoid fp divide in speed path */
+ f64 timer_interval;
+
+ /** current tick */
+ u64 current_tick;
+
+ /** first expiration time */
+ u64 first_expires_tick;
+
+ /** current wheel indices */
+ u32 current_index[TW_TIMER_WHEELS];
+
+ /** wheel arrays */
+ tw_timer_wheel_slot_t w[TW_TIMER_WHEELS][TW_SLOTS_PER_RING];
+
+#if TW_OVERFLOW_VECTOR > 0
+ tw_timer_wheel_slot_t overflow;
+#endif
+
+#if TW_FAST_WHEEL_BITMAP > 0
+ /** Fast wheel slot occupancy bitmap */
+ uword *fast_slot_bitmap;
+#endif
+
+ /** expired timer callback, receives a vector of handles */
+ void (*expired_timer_callback) (u32 * expired_timer_handles);
+
+ /** vectors of expired timers */
+ u32 *expired_timer_handles;
+
+ /** maximum expirations */
+ u32 max_expirations;
+
+ /** current trace index */
+#if TW_START_STOP_TRACE_SIZE > 0
+ /* Start/stop/expire tracing */
+ u32 trace_index;
+ u32 trace_wrapped;
+ TWT (trace) traces[TW_START_STOP_TRACE_SIZE];
+#endif
+
+} TWT (tw_timer_wheel);
+
+u32 TW (tw_timer_start) (TWT (tw_timer_wheel) * tw,
+ u32 pool_index, u32 timer_id, u64 interval);
+
+void TW (tw_timer_stop) (TWT (tw_timer_wheel) * tw, u32 handle);
+
+void TW (tw_timer_wheel_init) (TWT (tw_timer_wheel) * tw,
+ void *expired_timer_callback,
+ f64 timer_interval, u32 max_expirations);
+
+void TW (tw_timer_wheel_free) (TWT (tw_timer_wheel) * tw);
+
+u32 *TW (tw_timer_expire_timers) (TWT (tw_timer_wheel) * tw, f64 now);
+u32 *TW (tw_timer_expire_timers_vec) (TWT (tw_timer_wheel) * tw, f64 now,
+ u32 * vec);
+#if TW_FAST_WHEEL_BITMAP
+u32 TW (tw_timer_first_expires_in_ticks) (TWT (tw_timer_wheel) * tw);
+#endif
+
+#if TW_START_STOP_TRACE_SIZE > 0
+void TW (tw_search_trace) (TWT (tw_timer_wheel) * tw, u32 handle);
+void TW (tw_timer_trace) (TWT (tw_timer_wheel) * tw, u32 timer_id,
+ u32 pool_index, u32 handle);
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/types.h b/src/vppinfra/types.h
new file mode 100644
index 00000000..f87bb48c
--- /dev/null
+++ b/src/vppinfra/types.h
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001-2005 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_clib_types_h
+#define included_clib_types_h
+
+/* Standard CLIB types. */
+
+/* Define signed and unsigned 8, 16, 32, and 64 bit types
+ and machine signed/unsigned word for all architectures. */
+typedef char i8;
+typedef short i16;
+
+/* Avoid conflicts with Linux asm/types.h when __KERNEL__ */
+#if defined(CLIB_LINUX_KERNEL)
+/* Linux also defines u8/u16/u32/u64 types. */
+#include <asm/types.h>
+#define CLIB_AVOID_CLASH_WITH_LINUX_TYPES
+
+#else /* ! CLIB_LINUX_KERNEL */
+
+typedef unsigned char u8;
+typedef unsigned short u16;
+#endif /* ! CLIB_LINUX_KERNEL */
+
+#if defined (__x86_64__)
+#ifndef __COVERITY__
+typedef int i128 __attribute__ ((mode (TI)));
+typedef unsigned int u128 __attribute__ ((mode (TI)));
+#endif
+#endif
+
+#if (defined(i386) || defined(_mips) || defined(powerpc) || defined (__SPU__) || defined(__sparc__) || defined(__arm__) || defined (__xtensa__) || defined(__TMS320C6X__))
+typedef int i32;
+typedef long long i64;
+
+#ifndef CLIB_AVOID_CLASH_WITH_LINUX_TYPES
+typedef unsigned int u32;
+typedef unsigned long long u64;
+#endif /* CLIB_AVOID_CLASH_WITH_LINUX_TYPES */
+
+#elif defined(_mips) && __mips == 64
+#define log2_uword_bits 6
+#define clib_address_bits _MIPS_SZPTR
+
+#elif defined(alpha) || defined(__x86_64__) || defined (__powerpc64__) || defined (__aarch64__)
+typedef int i32;
+typedef long i64;
+
+#define log2_uword_bits 6
+#define clib_address_bits 64
+
+#ifndef CLIB_AVOID_CLASH_WITH_LINUX_TYPES
+typedef unsigned int u32;
+typedef unsigned long u64;
+#endif /* CLIB_AVOID_CLASH_WITH_LINUX_TYPES */
+
+#else
+#error "can't define types"
+#endif
+
+/* Default to 32 bit machines with 32 bit addresses. */
+#ifndef log2_uword_bits
+#define log2_uword_bits 5
+#endif
+
+/* #ifdef's above define log2_uword_bits. */
+#define uword_bits (1 << log2_uword_bits)
+
+#ifndef clib_address_bits
+#define clib_address_bits 32
+#endif
+
+/* Word types. */
+#if uword_bits == 64
+/* 64 bit word machines. */
+typedef i64 word;
+typedef u64 uword;
+#else
+/* 32 bit word machines. */
+typedef i32 word;
+typedef u32 uword;
+#endif
+
+/* integral type of a pointer (used to cast pointers). */
+#if clib_address_bits == 64
+typedef u64 clib_address_t;
+#else
+typedef u32 clib_address_t;
+#endif
+
+/* These are needed to convert between pointers and machine words.
+ MIPS is currently the only machine that can have different sized
+ pointers and machine words (but only when compiling with 64 bit
+ registers and 32 bit pointers). */
+static inline __attribute__ ((always_inline)) uword
+pointer_to_uword (const void *p)
+{
+ return (uword) (clib_address_t) p;
+}
+
+#define uword_to_pointer(u,type) ((type) (clib_address_t) (u))
+
+/* Any type: can be either word or pointer. */
+typedef word any;
+
+/* Floating point types. */
+typedef double f64;
+typedef float f32;
+
+typedef __complex__ float cf32;
+typedef __complex__ double cf64;
+
+/* Floating point word size. */
+typedef f64 fword;
+
+/* Can be used as either {r,l}value, e.g. these both work
+ clib_mem_unaligned (p, u64) = 99
+ clib_mem_unaligned (p, u64) += 99 */
+
+#define clib_mem_unaligned(pointer,type) \
+ (((struct { CLIB_PACKED (type _data); } *) (pointer))->_data)
+
+/* Access memory with specified alignment depending on align argument.
+ As with clib_mem_unaligned, may be used as {r,l}value. */
+#define clib_mem_aligned(addr,type,align) \
+ (((struct { \
+ type _data \
+ __attribute__ ((aligned (align), packed)); \
+ } *) (addr))->_data)
+
+#endif /* included_clib_types_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/unformat.c b/src/vppinfra/unformat.c
new file mode 100644
index 00000000..5b17562f
--- /dev/null
+++ b/src/vppinfra/unformat.c
@@ -0,0 +1,1083 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/format.h>
+
+/* Call user's function to fill input buffer. */
+uword
+_unformat_fill_input (unformat_input_t * i)
+{
+ uword l, first_mark;
+
+ if (i->index == UNFORMAT_END_OF_INPUT)
+ return i->index;
+
+ first_mark = l = vec_len (i->buffer);
+ if (vec_len (i->buffer_marks) > 0)
+ first_mark = i->buffer_marks[0];
+
+ /* Re-use buffer when no marks. */
+ if (first_mark > 0)
+ vec_delete (i->buffer, first_mark, 0);
+
+ i->index = vec_len (i->buffer);
+ for (l = 0; l < vec_len (i->buffer_marks); l++)
+ i->buffer_marks[l] -= first_mark;
+
+ /* Call user's function to fill the buffer. */
+ if (i->fill_buffer)
+ i->index = i->fill_buffer (i);
+
+ /* If input pointer is still beyond end of buffer even after
+ fill then we've run out of input. */
+ if (i->index >= vec_len (i->buffer))
+ i->index = UNFORMAT_END_OF_INPUT;
+
+ return i->index;
+}
+
+always_inline uword
+is_white_space (uword c)
+{
+ switch (c)
+ {
+ case ' ':
+ case '\t':
+ case '\n':
+ case '\r':
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+/* Format function for dumping input stream. */
+u8 *
+format_unformat_error (u8 * s, va_list * va)
+{
+ unformat_input_t *i = va_arg (*va, unformat_input_t *);
+ uword l = vec_len (i->buffer);
+
+ /* Only show so much of the input buffer (it could be really large). */
+ uword n_max = 30;
+
+ if (i->index < l)
+ {
+ uword n = l - i->index;
+ u8 *p, *p_end;
+
+ p = i->buffer + i->index;
+ p_end = p + (n > n_max ? n_max : n);
+
+ /* Skip white space at end. */
+ if (n <= n_max)
+ {
+ while (p_end > p && is_white_space (p_end[-1]))
+ p_end--;
+ }
+
+ while (p < p_end)
+ {
+ switch (*p)
+ {
+ case '\r':
+ vec_add (s, "\\r", 2);
+ break;
+ case '\n':
+ vec_add (s, "\\n", 2);
+ break;
+ case '\t':
+ vec_add (s, "\\t", 2);
+ break;
+ default:
+ vec_add1 (s, *p);
+ break;
+ }
+ p++;
+ }
+
+ if (n > n_max)
+ vec_add (s, "...", 3);
+ }
+
+ return s;
+}
+
+/* Print everything: not just error context. */
+u8 *
+format_unformat_input (u8 * s, va_list * va)
+{
+ unformat_input_t *i = va_arg (*va, unformat_input_t *);
+ uword l, n;
+
+ if (i->index == UNFORMAT_END_OF_INPUT)
+ s = format (s, "{END_OF_INPUT}");
+ else
+ {
+ l = vec_len (i->buffer);
+ n = l - i->index;
+ if (n > 0)
+ vec_add (s, i->buffer + i->index, n);
+ }
+
+ return s;
+}
+
+#if CLIB_DEBUG > 0
+void
+di (unformat_input_t * i)
+{
+ fformat (stderr, "%U\n", format_unformat_input, i);
+}
+#endif
+
+/* Parse delimited vector string. If string starts with { then string
+ is delimited by balenced parenthesis. Other string is delimited by
+ white space. {} were chosen since they are special to the shell. */
+static uword
+unformat_string (unformat_input_t * input,
+ uword delimiter_character,
+ uword format_character, va_list * va)
+{
+ u8 **string_return = va_arg (*va, u8 **);
+ u8 *s = 0;
+ word paren = 0;
+ word is_paren_delimited = 0;
+ word backslash = 0;
+ uword c;
+
+ switch (delimiter_character)
+ {
+ case '%':
+ case ' ':
+ case '\t':
+ delimiter_character = 0;
+ break;
+ }
+
+ while ((c = unformat_get_input (input)) != UNFORMAT_END_OF_INPUT)
+ {
+ word add_to_vector;
+
+ /* Null return string means to skip over delimited input. */
+ add_to_vector = string_return != 0;
+
+ if (backslash)
+ backslash = 0;
+ else
+ switch (c)
+ {
+ case '\\':
+ backslash = 1;
+ add_to_vector = 0;
+ break;
+
+ case '{':
+ if (paren == 0 && vec_len (s) == 0)
+ {
+ is_paren_delimited = 1;
+ add_to_vector = 0;
+ }
+ paren++;
+ break;
+
+ case '}':
+ paren--;
+ if (is_paren_delimited && paren == 0)
+ goto done;
+ break;
+
+ case ' ':
+ case '\t':
+ case '\n':
+ case '\r':
+ if (!is_paren_delimited)
+ {
+ unformat_put_input (input);
+ goto done;
+ }
+ break;
+
+ default:
+ if (!is_paren_delimited && c == delimiter_character)
+ {
+ unformat_put_input (input);
+ goto done;
+ }
+ }
+
+ if (add_to_vector)
+ vec_add1 (s, c);
+ }
+
+done:
+ if (string_return)
+ {
+ /* Match the string { END-OF-INPUT as a single brace. */
+ if (c == UNFORMAT_END_OF_INPUT && vec_len (s) == 0 && paren == 1)
+ vec_add1 (s, '{');
+
+ /* Don't match null string. */
+ if (c == UNFORMAT_END_OF_INPUT && vec_len (s) == 0)
+ return 0;
+
+ /* Null terminate C string. */
+ if (format_character == 's')
+ vec_add1 (s, 0);
+
+ *string_return = s;
+ }
+ else
+ vec_free (s); /* just to make sure */
+
+ return 1;
+}
+
+uword
+unformat_hex_string (unformat_input_t * input, va_list * va)
+{
+ u8 **hexstring_return = va_arg (*va, u8 **);
+ u8 *s;
+ uword n, d, c;
+
+ n = 0;
+ d = 0;
+ s = 0;
+ while ((c = unformat_get_input (input)) != UNFORMAT_END_OF_INPUT)
+ {
+ if (c >= '0' && c <= '9')
+ d = 16 * d + c - '0';
+ else if (c >= 'a' && c <= 'f')
+ d = 16 * d + 10 + c - 'a';
+ else if (c >= 'A' && c <= 'F')
+ d = 16 * d + 10 + c - 'A';
+ else
+ {
+ unformat_put_input (input);
+ break;
+ }
+ n++;
+
+ if (n == 2)
+ {
+ vec_add1 (s, d);
+ n = d = 0;
+ }
+ }
+
+ /* Hex string must have even number of digits. */
+ if (n % 2)
+ {
+ vec_free (s);
+ return 0;
+ }
+ /* Make sure something was processed. */
+ else if (s == 0)
+ {
+ return 0;
+ }
+
+ *hexstring_return = s;
+ return 1;
+}
+
+/* unformat (input "foo%U", unformat_eof) matches terminal foo only */
+uword
+unformat_eof (unformat_input_t * input, va_list * va)
+{
+ return unformat_check_input (input) == UNFORMAT_END_OF_INPUT;
+}
+
+/* Parse a token containing given set of characters. */
+uword
+unformat_token (unformat_input_t * input, va_list * va)
+{
+ u8 *token_chars = va_arg (*va, u8 *);
+ u8 **string_return = va_arg (*va, u8 **);
+ u8 *s, map[256];
+ uword i, c;
+
+ if (!token_chars)
+ token_chars = (u8 *) "a-zA-Z0-9_";
+
+ memset (map, 0, sizeof (map));
+ for (s = token_chars; *s;)
+ {
+ /* Parse range. */
+ if (s[0] < s[2] && s[1] == '-')
+ {
+ for (i = s[0]; i <= s[2]; i++)
+ map[i] = 1;
+ s = s + 3;
+ }
+ else
+ {
+ map[s[0]] = 1;
+ s = s + 1;
+ }
+ }
+
+ s = 0;
+ while ((c = unformat_get_input (input)) != UNFORMAT_END_OF_INPUT)
+ {
+ if (!map[c])
+ {
+ unformat_put_input (input);
+ break;
+ }
+
+ vec_add1 (s, c);
+ }
+
+ if (vec_len (s) == 0)
+ return 0;
+
+ *string_return = s;
+ return 1;
+}
+
+/* Unformat (parse) function which reads a %s string and converts it
+ to and unformat_input_t. */
+uword
+unformat_input (unformat_input_t * i, va_list * args)
+{
+ unformat_input_t *sub_input = va_arg (*args, unformat_input_t *);
+ u8 *s;
+
+ if (unformat (i, "%v", &s))
+ {
+ unformat_init_vector (sub_input, s);
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Parse a line ending with \n and return it. */
+uword
+unformat_line (unformat_input_t * i, va_list * va)
+{
+ u8 *line = 0, **result = va_arg (*va, u8 **);
+ uword c;
+
+ while ((c = unformat_get_input (i)) != '\n' && c != UNFORMAT_END_OF_INPUT)
+ {
+ vec_add1 (line, c);
+ }
+
+ *result = line;
+ return vec_len (line);
+}
+
+/* Parse a line ending with \n and return it as an unformat_input_t. */
+uword
+unformat_line_input (unformat_input_t * i, va_list * va)
+{
+ unformat_input_t *result = va_arg (*va, unformat_input_t *);
+ u8 *line;
+ if (!unformat_user (i, unformat_line, &line))
+ return 0;
+ unformat_init_vector (result, line);
+ return 1;
+}
+
+/* Values for is_signed. */
+#define UNFORMAT_INTEGER_SIGNED 1
+#define UNFORMAT_INTEGER_UNSIGNED 0
+
+static uword
+unformat_integer (unformat_input_t * input,
+ va_list * va, uword base, uword is_signed, uword data_bytes)
+{
+ uword c, digit;
+ uword value = 0;
+ uword n_digits = 0;
+ uword n_input = 0;
+ uword sign = 0;
+
+ /* We only support bases <= 64. */
+ if (base < 2 || base > 64)
+ goto error;
+
+ while ((c = unformat_get_input (input)) != UNFORMAT_END_OF_INPUT)
+ {
+ switch (c)
+ {
+ case '-':
+ if (n_input == 0)
+ {
+ if (is_signed)
+ {
+ sign = 1;
+ goto next_digit;
+ }
+ else
+ /* Leading sign for unsigned number. */
+ goto error;
+ }
+ /* Sign after input (e.g. 100-200). */
+ goto put_input_done;
+
+ case '+':
+ if (n_input > 0)
+ goto put_input_done;
+ sign = 0;
+ goto next_digit;
+
+ case '0' ... '9':
+ digit = c - '0';
+ break;
+
+ case 'a' ... 'z':
+ digit = 10 + (c - 'a');
+ break;
+
+ case 'A' ... 'Z':
+ digit = 10 + (base >= 36 ? 26 : 0) + (c - 'A');
+ break;
+
+ case '/':
+ digit = 62;
+ break;
+
+ case '?':
+ digit = 63;
+ break;
+
+ default:
+ goto put_input_done;
+ }
+
+ if (digit >= base)
+ {
+ put_input_done:
+ unformat_put_input (input);
+ goto done;
+ }
+
+ {
+ uword new_value = base * value + digit;
+
+ /* Check for overflow. */
+ if (new_value < value)
+ goto error;
+ value = new_value;
+ }
+ n_digits += 1;
+
+ next_digit:
+ n_input++;
+ }
+
+done:
+ if (sign)
+ value = -value;
+
+ if (n_digits > 0)
+ {
+ void *v = va_arg (*va, void *);
+
+ if (data_bytes == ~0)
+ data_bytes = sizeof (int);
+
+ switch (data_bytes)
+ {
+ case 1:
+ *(u8 *) v = value;
+ break;
+ case 2:
+ *(u16 *) v = value;
+ break;
+ case 4:
+ *(u32 *) v = value;
+ break;
+ case 8:
+ *(u64 *) v = value;
+ break;
+ default:
+ goto error;
+ }
+
+ return 1;
+ }
+
+error:
+ return 0;
+}
+
+/* Return x 10^n */
+static f64
+times_power_of_ten (f64 x, int n)
+{
+ if (n >= 0)
+ {
+ static f64 t[8] = { 1e+0, 1e+1, 1e+2, 1e+3, 1e+4, 1e+5, 1e+6, 1e+7, };
+ while (n >= 8)
+ {
+ x *= 1e+8;
+ n -= 8;
+ }
+ return x * t[n];
+ }
+ else
+ {
+ static f64 t[8] = { 1e-0, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7, };
+ while (n <= -8)
+ {
+ x *= 1e-8;
+ n += 8;
+ }
+ return x * t[-n];
+ }
+
+}
+
+static uword
+unformat_float (unformat_input_t * input, va_list * va)
+{
+ uword c;
+ u64 values[3];
+ uword n_digits[3], value_index = 0;
+ uword signs[2], sign_index = 0;
+ uword n_input = 0;
+
+ memset (values, 0, sizeof (values));
+ memset (n_digits, 0, sizeof (n_digits));
+ memset (signs, 0, sizeof (signs));
+
+ while ((c = unformat_get_input (input)) != UNFORMAT_END_OF_INPUT)
+ {
+ switch (c)
+ {
+ case '-':
+ if (value_index == 2 && n_digits[2] == 0)
+ /* sign of exponent: it's ok. */ ;
+
+ else if (value_index < 2 && n_digits[0] > 0)
+ {
+ /* 123- */
+ unformat_put_input (input);
+ goto done;
+ }
+
+ else if (n_input > 0)
+ goto error;
+
+ signs[sign_index++] = 1;
+ goto next_digit;
+
+ case '+':
+ if (value_index == 2 && n_digits[2] == 0)
+ /* sign of exponent: it's ok. */ ;
+
+ else if (value_index < 2 && n_digits[0] > 0)
+ {
+ /* 123+ */
+ unformat_put_input (input);
+ goto done;
+ }
+
+ else if (n_input > 0)
+ goto error;
+ signs[sign_index++] = 0;
+ goto next_digit;
+
+ case 'e':
+ case 'E':
+ if (n_input == 0)
+ goto error;
+ value_index = 2;
+ sign_index = 1;
+ break;
+
+ case '.':
+ if (value_index > 0)
+ goto error;
+ value_index = 1;
+ break;
+
+ case '0' ... '9':
+ {
+ u64 tmp;
+
+ tmp = values[value_index] * 10 + c - '0';
+
+ /* Check for overflow. */
+ if (tmp < values[value_index])
+ goto error;
+ values[value_index] = tmp;
+ n_digits[value_index] += 1;
+ }
+ break;
+
+ default:
+ unformat_put_input (input);
+ goto done;
+ }
+
+ next_digit:
+ n_input++;
+ }
+
+done:
+ {
+ f64 f_values[2], *value_return;
+ word expon;
+
+ /* Must have either whole or fraction digits. */
+ if (n_digits[0] + n_digits[1] <= 0)
+ goto error;
+
+ f_values[0] = values[0];
+ if (signs[0])
+ f_values[0] = -f_values[0];
+
+ f_values[1] = values[1];
+ f_values[1] = times_power_of_ten (f_values[1], -n_digits[1]);
+
+ f_values[0] += f_values[1];
+
+ expon = values[2];
+ if (signs[1])
+ expon = -expon;
+
+ f_values[0] = times_power_of_ten (f_values[0], expon);
+
+ value_return = va_arg (*va, f64 *);
+ *value_return = f_values[0];
+ return 1;
+ }
+
+error:
+ return 0;
+}
+
+static const char *
+match_input_with_format (unformat_input_t * input, const char *f)
+{
+ uword cf, ci;
+
+ ASSERT (*f != 0);
+
+ while (1)
+ {
+ cf = *f;
+ if (cf == 0 || cf == '%' || cf == ' ')
+ break;
+ f++;
+
+ ci = unformat_get_input (input);
+
+ if (cf != ci)
+ return 0;
+ }
+ return f;
+}
+
+static const char *
+do_percent (unformat_input_t * input, va_list * va, const char *f)
+{
+ uword cf, n, data_bytes = ~0;
+
+ cf = *f++;
+
+ switch (cf)
+ {
+ default:
+ break;
+
+ case 'w':
+ /* Word types. */
+ cf = *f++;
+ data_bytes = sizeof (uword);
+ break;
+
+ case 'l':
+ cf = *f++;
+ if (cf == 'l')
+ {
+ cf = *f++;
+ data_bytes = sizeof (long long);
+ }
+ else
+ {
+ data_bytes = sizeof (long);
+ }
+ break;
+
+ case 'L':
+ cf = *f++;
+ data_bytes = sizeof (long long);
+ break;
+ }
+
+ n = 0;
+ switch (cf)
+ {
+ case 'D':
+ data_bytes = va_arg (*va, int);
+ case 'd':
+ n = unformat_integer (input, va, 10,
+ UNFORMAT_INTEGER_SIGNED, data_bytes);
+ break;
+
+ case 'u':
+ n = unformat_integer (input, va, 10,
+ UNFORMAT_INTEGER_UNSIGNED, data_bytes);
+ break;
+
+ case 'b':
+ n = unformat_integer (input, va, 2,
+ UNFORMAT_INTEGER_UNSIGNED, data_bytes);
+ break;
+
+ case 'o':
+ n = unformat_integer (input, va, 8,
+ UNFORMAT_INTEGER_UNSIGNED, data_bytes);
+ break;
+
+ case 'X':
+ data_bytes = va_arg (*va, int);
+ case 'x':
+ n = unformat_integer (input, va, 16,
+ UNFORMAT_INTEGER_UNSIGNED, data_bytes);
+ break;
+
+ case 'f':
+ n = unformat_float (input, va);
+ break;
+
+ case 's':
+ case 'v':
+ n = unformat_string (input, f[0], cf, va);
+ break;
+
+ case 'U':
+ {
+ unformat_function_t *f = va_arg (*va, unformat_function_t *);
+ n = f (input, va);
+ }
+ break;
+
+ case '=':
+ case '|':
+ {
+ int *var = va_arg (*va, int *);
+ uword val = va_arg (*va, int);
+
+ if (cf == '|')
+ val |= *var;
+ *var = val;
+ n = 1;
+ }
+ break;
+ }
+
+ return n ? f : 0;
+}
+
+uword
+unformat_skip_white_space (unformat_input_t * input)
+{
+ uword n = 0;
+ uword c;
+
+ while ((c = unformat_get_input (input)) != UNFORMAT_END_OF_INPUT)
+ {
+ if (!is_white_space (c))
+ {
+ unformat_put_input (input);
+ break;
+ }
+ n++;
+ }
+ return n;
+}
+
+uword
+va_unformat (unformat_input_t * input, const char *fmt, va_list * va)
+{
+ const char *f;
+ uword input_matches_format;
+ uword default_skip_input_white_space;
+ uword n_input_white_space_skipped;
+ uword last_non_white_space_match_percent;
+ uword last_non_white_space_match_format;
+
+ vec_add1_aligned (input->buffer_marks, input->index,
+ sizeof (input->buffer_marks[0]));
+
+ f = fmt;
+ default_skip_input_white_space = 1;
+ input_matches_format = 0;
+ last_non_white_space_match_percent = 0;
+ last_non_white_space_match_format = 0;
+
+ while (1)
+ {
+ char cf;
+ uword is_percent, skip_input_white_space;
+
+ cf = *f;
+ is_percent = 0;
+
+ /* Always skip input white space at start of format string.
+ Otherwise use default skip value which can be changed by %_
+ (see below). */
+ skip_input_white_space = f == fmt || default_skip_input_white_space;
+
+ /* Spaces in format request skipping input white space. */
+ if (is_white_space (cf))
+ {
+ skip_input_white_space = 1;
+
+ /* Multiple format spaces are equivalent to a single white
+ space. */
+ while (is_white_space (*++f))
+ ;
+ }
+ else if (cf == '%')
+ {
+ /* %_ toggles whether or not to skip input white space. */
+ switch (*++f)
+ {
+ case '_':
+ default_skip_input_white_space =
+ !default_skip_input_white_space;
+ f++;
+ /* For transition from skip to no-skip in middle of format
+ string, skip input white space. For example, the following matches:
+ fmt = "%_%d.%d%_->%_%d.%d%_"
+ input "1.2 -> 3.4"
+ Without this the space after -> does not get skipped. */
+ if (!default_skip_input_white_space
+ && !(f == fmt + 2 || *f == 0))
+ unformat_skip_white_space (input);
+ continue;
+
+ /* %% means match % */
+ case '%':
+ break;
+
+ /* % at end of format string. */
+ case 0:
+ goto parse_fail;
+
+ default:
+ is_percent = 1;
+ break;
+ }
+ }
+
+ n_input_white_space_skipped = 0;
+ if (skip_input_white_space)
+ n_input_white_space_skipped = unformat_skip_white_space (input);
+
+ /* End of format string. */
+ if (cf == 0)
+ {
+ /* Force parse error when format string ends and input is
+ not white or at end. As an example, this is to prevent
+ format "foo" from matching input "food".
+ The last_non_white_space_match_percent is to make
+ "foo %d" match input "foo 10,bletch" with %d matching 10. */
+ if (skip_input_white_space
+ && !last_non_white_space_match_percent
+ && !last_non_white_space_match_format
+ && n_input_white_space_skipped == 0
+ && input->index != UNFORMAT_END_OF_INPUT)
+ goto parse_fail;
+ break;
+ }
+
+ last_non_white_space_match_percent = is_percent;
+ last_non_white_space_match_format = 0;
+
+ /* Explicit spaces in format must match input white space. */
+ if (cf == ' ' && !default_skip_input_white_space)
+ {
+ if (n_input_white_space_skipped == 0)
+ goto parse_fail;
+ }
+
+ else if (is_percent)
+ {
+ if (!(f = do_percent (input, va, f)))
+ goto parse_fail;
+ }
+
+ else
+ {
+ const char *g = match_input_with_format (input, f);
+ if (!g)
+ goto parse_fail;
+ last_non_white_space_match_format = g > f;
+ f = g;
+ }
+ }
+
+ input_matches_format = 1;
+parse_fail:
+
+ /* Rewind buffer marks. */
+ {
+ uword l = vec_len (input->buffer_marks);
+
+ /* If we did not match back up buffer to last mark. */
+ if (!input_matches_format)
+ input->index = input->buffer_marks[l - 1];
+
+ _vec_len (input->buffer_marks) = l - 1;
+ }
+
+ return input_matches_format;
+}
+
+uword
+unformat (unformat_input_t * input, const char *fmt, ...)
+{
+ va_list va;
+ uword result;
+ va_start (va, fmt);
+ result = va_unformat (input, fmt, &va);
+ va_end (va);
+ return result;
+}
+
+uword
+unformat_user (unformat_input_t * input, unformat_function_t * func, ...)
+{
+ va_list va;
+ uword result, l;
+
+ /* Save place in input buffer in case parse fails. */
+ l = vec_len (input->buffer_marks);
+ vec_add1_aligned (input->buffer_marks, input->index,
+ sizeof (input->buffer_marks[0]));
+
+ va_start (va, func);
+ result = func (input, &va);
+ va_end (va);
+
+ if (!result && input->index != UNFORMAT_END_OF_INPUT)
+ input->index = input->buffer_marks[l];
+
+ _vec_len (input->buffer_marks) = l;
+
+ return result;
+}
+
+/* Setup for unformat of Unix style command line. */
+void
+unformat_init_command_line (unformat_input_t * input, char *argv[])
+{
+ uword i;
+
+ unformat_init (input, 0, 0);
+
+ /* Concatenate argument strings with space in between. */
+ for (i = 1; argv[i]; i++)
+ {
+ vec_add (input->buffer, argv[i], strlen (argv[i]));
+ if (argv[i + 1])
+ vec_add1 (input->buffer, ' ');
+ }
+}
+
+void
+unformat_init_string (unformat_input_t * input, char *string, int string_len)
+{
+ unformat_init (input, 0, 0);
+ if (string_len > 0)
+ vec_add (input->buffer, string, string_len);
+}
+
+void
+unformat_init_vector (unformat_input_t * input, u8 * vector_string)
+{
+ unformat_init (input, 0, 0);
+ input->buffer = vector_string;
+}
+
+#ifdef CLIB_UNIX
+
+static uword
+unix_file_fill_buffer (unformat_input_t * input)
+{
+ int fd = pointer_to_uword (input->fill_buffer_arg);
+ uword l, n;
+
+ l = vec_len (input->buffer);
+ vec_resize (input->buffer, 4096);
+ n = read (fd, input->buffer + l, 4096);
+ if (n > 0)
+ _vec_len (input->buffer) = l + n;
+
+ if (n <= 0)
+ return UNFORMAT_END_OF_INPUT;
+ else
+ return input->index;
+}
+
+void
+unformat_init_unix_file (unformat_input_t * input, int file_descriptor)
+{
+ unformat_init (input, unix_file_fill_buffer,
+ uword_to_pointer (file_descriptor, void *));
+}
+
+/* Take input from Unix environment variable. */
+uword
+unformat_init_unix_env (unformat_input_t * input, char *var)
+{
+ char *val = getenv (var);
+ if (val)
+ unformat_init_string (input, val, strlen (val));
+ return val != 0;
+}
+
+#endif /* CLIB_UNIX */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/unix-formats.c b/src/vppinfra/unix-formats.c
new file mode 100644
index 00000000..b09433c9
--- /dev/null
+++ b/src/vppinfra/unix-formats.c
@@ -0,0 +1,956 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifdef __KERNEL__
+
+# include <linux/unistd.h>
+# include <linux/signal.h>
+
+#else /* ! __KERNEL__ */
+
+#define _GNU_SOURCE /* to get REG_* in ucontext.h */
+#include <ucontext.h>
+#undef _GNU_SOURCE
+#undef __USE_GNU
+
+#include <unistd.h>
+#include <signal.h>
+#include <grp.h>
+
+#include <time.h>
+#include <sys/socket.h>
+#include <netdb.h>
+#include <math.h>
+
+#include <vppinfra/time.h>
+
+#ifdef AF_NETLINK
+#include <linux/types.h>
+#include <linux/netlink.h>
+#endif
+
+#endif /* ! __KERNEL__ */
+
+
+#ifdef __KERNEL__
+# include <linux/socket.h>
+# include <linux/in.h>
+# include <linux/ip.h>
+# include <linux/tcp.h>
+# include <linux/udp.h>
+# include <linux/icmp.h>
+# include <linux/if_ether.h>
+# include <linux/if_arp.h>
+#else
+# include <net/if.h> /* struct ifnet may live here */
+# include <netinet/in.h>
+# include <netinet/ip.h>
+# include <netinet/tcp.h>
+# include <netinet/udp.h>
+# include <netinet/ip_icmp.h>
+# include <netinet/if_ether.h>
+#endif /* __KERNEL__ */
+
+#include <vppinfra/bitops.h> /* foreach_set_bit */
+#include <vppinfra/format.h>
+#include <vppinfra/error.h>
+
+/* Format unix network address family (e.g. AF_INET). */
+u8 * format_address_family (u8 * s, va_list * va)
+{
+ uword family = va_arg (*va, uword);
+ u8 * t = (u8 *) "UNKNOWN";
+ switch (family)
+ {
+#define _(x) case PF_##x: t = (u8 *) #x; break
+ _ (UNSPEC);
+ _ (UNIX); /* Unix domain sockets */
+ _ (INET); /* Internet IP Protocol */
+#ifdef PF_AX25
+ _ (AX25); /* Amateur Radio AX.25 */
+#endif
+#ifdef PF_IPX
+ _ (IPX); /* Novell IPX */
+#endif
+#ifdef PF_APPLETALK
+ _ (APPLETALK); /* AppleTalk DDP */
+#endif
+#ifdef PF_NETROM
+ _ (NETROM); /* Amateur Radio NET/ROM */
+#endif
+#ifdef PF_BRIDGE
+ _ (BRIDGE); /* Multiprotocol bridge */
+#endif
+#ifdef PF_ATMPVC
+ _ (ATMPVC); /* ATM PVCs */
+#endif
+#ifdef PF_X25
+ _ (X25); /* Reserved for X.25 project */
+#endif
+#ifdef PF_INET6
+ _ (INET6); /* IP version 6 */
+#endif
+#ifdef PF_ROSE
+ _ (ROSE); /* Amateur Radio X.25 PLP */
+#endif
+#ifdef PF_DECnet
+ _ (DECnet); /* Reserved for DECnet project */
+#endif
+#ifdef PF_NETBEUI
+ _ (NETBEUI); /* Reserved for 802.2LLC project*/
+#endif
+#ifdef PF_SECURITY
+ _ (SECURITY); /* Security callback pseudo AF */
+#endif
+#ifdef PF_KEY
+ _ (KEY); /* PF_KEY key management API */
+#endif
+#ifdef PF_NETLINK
+ _ (NETLINK);
+#endif
+#ifdef PF_PACKET
+ _ (PACKET); /* Packet family */
+#endif
+#ifdef PF_ASH
+ _ (ASH); /* Ash */
+#endif
+#ifdef PF_ECONET
+ _ (ECONET); /* Acorn Econet */
+#endif
+#ifdef PF_ATMSVC
+ _ (ATMSVC); /* ATM SVCs */
+#endif
+#ifdef PF_SNA
+ _ (SNA); /* Linux SNA Project */
+#endif
+#ifdef PF_IRDA
+ _ (IRDA); /* IRDA sockets */
+#endif
+#undef _
+ }
+ vec_add (s, t, strlen ((char *) t));
+ return s;
+}
+
+u8 * format_network_protocol (u8 * s, va_list * args)
+{
+ uword family = va_arg (*args, uword);
+ uword protocol = va_arg (*args, uword);
+
+#ifndef __KERNEL__
+ struct protoent * p = getprotobynumber (protocol);
+
+ ASSERT (family == AF_INET);
+ if (p)
+ return format (s, "%s", p->p_name);
+ else
+ return format (s, "%d", protocol);
+#else
+ return format (s, "%d/%d", family, protocol);
+#endif
+}
+
+u8 * format_network_port (u8 * s, va_list * args)
+{
+ uword proto = va_arg (*args, uword);
+ uword port = va_arg (*args, uword);
+
+#ifndef __KERNEL__
+ struct servent * p = getservbyport (port, proto == IPPROTO_UDP ? "udp" : "tcp");
+
+ if (p)
+ return format (s, "%s", p->s_name);
+ else
+ return format (s, "%d", port);
+#else
+ return format (s, "%s/%d", proto == IPPROTO_UDP ? "udp" : "tcp", port);
+#endif
+}
+
+/* Format generic network address: takes two arguments family and address.
+ Assumes network byte order. */
+u8 * format_network_address (u8 * s, va_list * args)
+{
+ uword family = va_arg (*args, uword);
+ u8 * addr = va_arg (*args, u8 *);
+
+ switch (family)
+ {
+ case AF_INET:
+ s = format (s, "%d.%d.%d.%d", addr[0], addr[1], addr[2], addr[3]);
+ break;
+
+ case AF_UNSPEC:
+ /* We use AF_UNSPEC for ethernet addresses. */
+ s = format (s, "%02x:%02x:%02x:%02x:%02x:%02x",
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+ break;
+
+ default:
+ clib_error ("unsupported address family %d", family);
+ }
+
+ return s;
+}
+
+u8 * format_sockaddr (u8 * s, va_list * args)
+{
+ void * v = va_arg (*args, void *);
+ struct sockaddr * sa = v;
+ static u32 local_counter;
+
+ switch (sa->sa_family)
+ {
+ case AF_INET:
+ {
+ struct sockaddr_in * i = v;
+ s = format (s, "%U:%U",
+ format_network_address, AF_INET, &i->sin_addr.s_addr,
+ format_network_port, IPPROTO_TCP, ntohs (i->sin_port));
+ }
+ break;
+
+ case AF_LOCAL:
+ {
+ /*
+ * There isn't anything useful to print.
+ * The unix cli world uses the output to make a node name,
+ * so we need to return a unique name.
+ */
+ s = format (s, "local:%u", local_counter++);
+ }
+ break;
+
+#ifndef __KERNEL__
+#ifdef AF_NETLINK
+ case AF_NETLINK:
+ {
+ struct sockaddr_nl * n = v;
+ s = format (s, "KERNEL-NETLINK");
+ if (n->nl_groups)
+ s = format (s, " (groups 0x%x)", n->nl_groups);
+ break;
+ }
+#endif
+#endif
+
+ default:
+ s = format (s, "sockaddr family %d", sa->sa_family);
+ break;
+ }
+
+ return s;
+}
+
+u8 * format_tcp4_packet (u8 * s, va_list * args)
+{
+ u8 * p = va_arg (*args, u8 *);
+ struct iphdr * ip = (void *) p;
+ struct tcphdr * tcp = (void *) (ip + 1);
+
+ s = format (s, "tcp %U:%U -> %U:%U",
+ format_network_address, AF_INET, &ip->saddr,
+ format_network_port, IPPROTO_TCP, ntohs (tcp->source),
+ format_network_address, AF_INET, &ip->daddr,
+ format_network_port, IPPROTO_TCP, ntohs (tcp->dest));
+
+ s = format (s, ", seq 0x%08x -> 0x%08x", tcp->seq, tcp->ack_seq);
+#define _(f) if (tcp->f) s = format (s, ", " #f);
+ _ (syn); _ (ack); _ (fin); _ (rst); _ (psh); _ (urg);
+#undef _
+
+ if (tcp->window)
+ s = format (s, ", window 0x%04x", tcp->window);
+ if (tcp->urg)
+ s = format (s, ", urg 0x%04x", tcp->urg_ptr);
+
+ return s;
+}
+
+u8 * format_udp4_packet (u8 * s, va_list * args)
+{
+ u8 * p = va_arg (*args, u8 *);
+ struct iphdr * ip = (void *) p;
+ struct udphdr * udp = (void *) (ip + 1);
+
+ s = format (s, "udp %U:%U -> %U:%U",
+ format_network_address, AF_INET, &ip->saddr,
+ format_network_port, IPPROTO_UDP, ntohs (udp->source),
+ format_network_address, AF_INET, &ip->daddr,
+ format_network_port, IPPROTO_UDP, ntohs (udp->dest));
+
+ return s;
+}
+
+u8 * format_icmp4_type_and_code (u8 * s, va_list * args)
+{
+ uword icmp_type = va_arg (*args, uword);
+ uword icmp_code = va_arg (*args, uword);
+
+ switch (icmp_type)
+ {
+#define _(f,str) case ICMP_##f: s = format (s, str); break;
+ _ (ECHOREPLY, "echo reply");
+ _ (DEST_UNREACH, "unreachable");
+ _ (SOURCE_QUENCH, "source quench");
+ _ (REDIRECT, "redirect");
+ _ (ECHO, "echo request");
+ _ (TIME_EXCEEDED, "time exceeded");
+ _ (PARAMETERPROB, "parameter problem");
+ _ (TIMESTAMP, "timestamp request");
+ _ (TIMESTAMPREPLY, "timestamp reply");
+ _ (INFO_REQUEST, "information request");
+ _ (INFO_REPLY, "information reply");
+ _ (ADDRESS, "address mask request");
+ _ (ADDRESSREPLY, "address mask reply");
+#undef _
+ default:
+ s = format (s, "unknown type 0x%x", icmp_type);
+ }
+
+ if (icmp_type == ICMP_DEST_UNREACH)
+ {
+ switch (icmp_code)
+ {
+#define _(f,str) case ICMP_##f: s = format (s, " " # str); break;
+ _ (NET_UNREACH, "network");
+ _ (HOST_UNREACH, "host");
+ _ (PROT_UNREACH, "protocol");
+ _ (PORT_UNREACH, "port");
+ _ (FRAG_NEEDED, ": fragmentation needed/DF set");
+ _ (SR_FAILED, "source route failed");
+ _ (NET_UNKNOWN, "network unknown");
+ _ (HOST_UNKNOWN, "host unknown");
+ _ (HOST_ISOLATED, "host isolated");
+ _ (NET_ANO, "network: admin. prohibited");
+ _ (HOST_ANO, "host: admin. prohibited");
+ _ (NET_UNR_TOS, "network for type-of-service");
+ _ (HOST_UNR_TOS, "host for type-of-service");
+ _ (PKT_FILTERED, ": packet filtered");
+ _ (PREC_VIOLATION, "precedence violation");
+ _ (PREC_CUTOFF, "precedence cut off");
+#undef _
+ default:
+ s = format (s, "unknown code 0x%x", icmp_code);
+ }
+ }
+ else if (icmp_type == ICMP_REDIRECT)
+ {
+ switch (icmp_code)
+ {
+#define _(f,str) case ICMP_##f: s = format (s, " " # str); break;
+ _ (REDIR_NET, "network");
+ _ (REDIR_HOST, "host");
+ _ (REDIR_NETTOS, "network for type-of-service");
+ _ (REDIR_HOSTTOS, "host for type-of-service");
+#undef _
+ default:
+ s = format (s, "unknown code 0x%x", icmp_code);
+ }
+ }
+ else if (icmp_type == ICMP_TIME_EXCEEDED)
+ {
+ switch (icmp_code)
+ {
+#define _(f,str) case ICMP_##f: s = format (s, " " # str); break;
+ _ (EXC_TTL, "time-to-live zero in transit");
+ _ (EXC_FRAGTIME, "time-to-live zero during reassembly");
+#undef _
+ default:
+ s = format (s, "unknown code 0x%x", icmp_code);
+ }
+ }
+
+ return s;
+}
+
+typedef struct {
+ u8 type;
+ u8 code;
+ u16 checksum;
+} icmp4_t;
+
+u8 * format_icmp4_packet (u8 * s, va_list * args)
+{
+ u8 * p = va_arg (*args, u8 *);
+ struct iphdr * ip = (void *) p;
+ icmp4_t * icmp = (void *) (ip + 1);
+ s = format (s, "icmp %U %U -> %U",
+ format_icmp4_type_and_code, icmp->type, icmp->code,
+ format_network_address, AF_INET, &ip->saddr,
+ format_network_address, AF_INET, &ip->daddr);
+
+ return s;
+}
+
+u8 * format_ip4_tos_byte (u8 * s, va_list * args)
+{
+ uword tos = va_arg (*args, uword);
+
+ if (tos & IPTOS_LOWDELAY)
+ s = format (s, "minimize-delay, ");
+ if (tos & IPTOS_MINCOST)
+ s = format (s, "minimize-cost, ");
+ if (tos & IPTOS_THROUGHPUT)
+ s = format (s, "maximize-throughput, ");
+ if (tos & IPTOS_RELIABILITY)
+ s = format (s, "maximize-reliability, ");
+
+ switch (IPTOS_PREC (tos))
+ {
+#define _(x,y) case IPTOS_PREC_##x: s = format (s, y); break
+ _ (NETCONTROL, "network");
+ _ (INTERNETCONTROL, "internet");
+ _ (CRITIC_ECP, "critical");
+ _ (FLASH, "flash");
+ _ (FLASHOVERRIDE, "flash-override");
+ _ (IMMEDIATE, "immediate");
+ _ (PRIORITY, "priority");
+ _ (ROUTINE, "routine");
+#undef _
+ }
+
+ return s;
+}
+
+u8 * format_ip4_packet (u8 * s, va_list * args)
+{
+ u8 * p = va_arg (*args, u8 *);
+ struct iphdr * ip = (void *) p;
+
+ static format_function_t * f[256];
+
+ if (! f[IPPROTO_TCP])
+ {
+ f[IPPROTO_TCP] = format_tcp4_packet;
+ f[IPPROTO_UDP] = format_udp4_packet;
+ f[IPPROTO_ICMP] = format_icmp4_packet;
+ }
+
+ if (f[ip->protocol])
+ return format (s, "%U", f[ip->protocol], p);
+
+ s = format (s, "%U: %U -> %U",
+ format_network_protocol, AF_INET, ip->protocol,
+ format_network_address, AF_INET, &ip->saddr,
+ format_network_address, AF_INET, &ip->daddr);
+
+ return s;
+}
+
+#define foreach_unix_arphrd_type \
+ _ (NETROM, 0) \
+ _ (ETHER, 1) \
+ _ (EETHER, 2) \
+ _ (AX25, 3) \
+ _ (PRONET, 4) \
+ _ (CHAOS, 5) \
+ _ (IEEE802, 6) \
+ _ (ARCNET, 7) \
+ _ (APPLETLK, 8) \
+ _ (DLCI, 15) \
+ _ (ATM, 19) \
+ _ (METRICOM, 23) \
+ _ (IEEE1394, 24) \
+ _ (EUI64, 27) \
+ _ (INFINIBAND, 32) \
+ _ (SLIP, 256) \
+ _ (CSLIP, 257) \
+ _ (SLIP6, 258) \
+ _ (CSLIP6, 259) \
+ _ (RSRVD, 260) \
+ _ (ADAPT, 264) \
+ _ (ROSE, 270) \
+ _ (X25, 271) \
+ _ (HWX25, 272) \
+ _ (PPP, 512) \
+ _ (HDLC, 513) \
+ _ (LAPB, 516) \
+ _ (DDCMP, 517) \
+ _ (RAWHDLC, 518) \
+ _ (TUNNEL, 768) \
+ _ (TUNNEL6, 769) \
+ _ (FRAD, 770) \
+ _ (SKIP, 771) \
+ _ (LOOPBACK, 772) \
+ _ (LOCALTLK, 773) \
+ _ (FDDI, 774) \
+ _ (BIF, 775) \
+ _ (SIT, 776) \
+ _ (IPDDP, 777) \
+ _ (IPGRE, 778) \
+ _ (PIMREG, 779) \
+ _ (HIPPI, 780) \
+ _ (ASH, 781) \
+ _ (ECONET, 782) \
+ _ (IRDA, 783) \
+ _ (FCPP, 784) \
+ _ (FCAL, 785) \
+ _ (FCPL, 786) \
+ _ (FCFABRIC, 787) \
+ _ (IEEE802_TR, 800) \
+ _ (IEEE80211, 801) \
+ _ (IEEE80211_PRISM, 802) \
+ _ (IEEE80211_RADIOTAP, 803) \
+ _ (VOID, 0xFFFF) \
+ _ (NONE, 0xFFFE)
+
+u8 * format_unix_arphrd (u8 * s, va_list * args)
+{
+#ifndef __COVERITY__ /* doesn't understand this at all... */
+ u32 x = va_arg (*args, u32);
+ char * t;
+ switch (x)
+ {
+#define _(f,n) case ARPHRD_##f: t = #f; break;
+ foreach_unix_arphrd_type
+#undef _
+ default:
+ t = 0;
+ break;
+ }
+
+ if (t)
+ s = format (s, "%s", t);
+ else
+ s = format (s, "unknown 0x%x", x);
+#endif
+ return s;
+}
+
+#define foreach_unix_interface_flag \
+ _ (up) \
+ _ (broadcast) \
+ _ (debug) \
+ _ (loopback) \
+ _ (pointopoint) \
+ _ (notrailers) \
+ _ (running) \
+ _ (noarp) \
+ _ (promisc) \
+ _ (allmulti) \
+ _ (master) \
+ _ (slave) \
+ _ (multicast) \
+ _ (portsel) \
+ _ (automedia) \
+ _ (dynamic) \
+ _ (lower_up) \
+ _ (dormant) \
+ _ (echo)
+
+static char * unix_interface_flag_names[] = {
+#define _(f) #f,
+ foreach_unix_interface_flag
+#undef _
+};
+
+u8 * format_unix_interface_flags (u8 * s, va_list * args)
+{
+ u32 x = va_arg (*args, u32);
+ u32 i;
+
+ if (x == 0)
+ s = format (s, "none");
+ else foreach_set_bit (i, x, ({
+ if (i < ARRAY_LEN (unix_interface_flag_names))
+ s = format (s, "%s", unix_interface_flag_names[i]);
+ else
+ s = format (s, "unknown %d", i);
+ if (x >> (i + 1))
+ s = format (s, ", ");
+ }));
+ return s;
+}
+
+typedef struct {
+ u16 ar_hrd; /* format of hardware address */
+ u16 ar_pro; /* format of protocol address */
+ u8 ar_hln; /* length of hardware address */
+ u8 ar_pln; /* length of protocol address */
+ u16 ar_op; /* ARP opcode (command) */
+ u8 ar_sha[6]; /* sender hardware address */
+ u8 ar_spa[4]; /* sender IP address */
+ u8 ar_tha[6]; /* target hardware address */
+ u8 ar_tpa[4]; /* target IP address */
+} arp_ether_ip4_t;
+
+u8 * format_arp_packet (u8 * s, va_list * args)
+{
+ arp_ether_ip4_t * a = va_arg (*args, arp_ether_ip4_t *);
+ char * op = "unknown";
+
+ if (a->ar_pro != ETH_P_IP ||
+ a->ar_hrd != ARPHRD_ETHER)
+ return s;
+
+ switch (a->ar_op)
+ {
+#define _(f) case ARPOP_##f: op = #f; break;
+ _ (REQUEST);
+ _ (REPLY);
+ _ (RREQUEST);
+ _ (RREPLY);
+#undef _
+ }
+
+ s = format (s, "%s %U %U -> %U %U",
+ op,
+ format_network_address, AF_INET, a->ar_spa,
+ format_network_address, AF_UNSPEC, a->ar_sha,
+ format_network_address, AF_INET, a->ar_tpa,
+ format_network_address, AF_UNSPEC, a->ar_tha);
+ return s;
+}
+
+u8 * format_ethernet_proto (u8 * s, va_list * args)
+{
+ uword type = va_arg (*args, uword);
+ char * t = 0;
+
+ switch (type)
+ {
+ case 0: t = "BPDU"; break;
+#define _(f) case ETH_P_##f: t = #f; break;
+ _ (LOOP);
+ _ (PUP);
+#ifdef ETH_P_PUPAT
+ _ (PUPAT);
+#endif
+ _ (IP);
+ _ (X25);
+ _ (ARP);
+ _ (BPQ);
+#ifdef ETH_P_PUPAT
+ _ (IEEEPUP);
+ _ (IEEEPUPAT);
+#endif
+ _ (DEC);
+ _ (DNA_DL);
+ _ (DNA_RC);
+ _ (DNA_RT);
+ _ (LAT);
+ _ (DIAG);
+ _ (CUST);
+ _ (SCA);
+ _ (RARP);
+ _ (ATALK);
+ _ (AARP);
+ _ (IPX);
+ _ (IPV6);
+#ifdef ETH_P_PPP_DISC
+ _ (PPP_DISC);
+ _ (PPP_SES);
+#endif
+#ifdef ETH_P_ATMMPOA
+ _ (ATMMPOA);
+ _ (ATMFATE);
+#endif
+ _ (802_3);
+ _ (AX25);
+ _ (ALL);
+ _ (802_2);
+ _ (SNAP);
+ _ (DDCMP);
+ _ (WAN_PPP);
+ _ (PPP_MP);
+ _ (LOCALTALK);
+ _ (PPPTALK);
+ _ (TR_802_2);
+ _ (MOBITEX);
+ _ (CONTROL);
+ _ (IRDA);
+#ifdef ETH_P_ECONET
+ _ (ECONET);
+#endif
+#undef _
+ }
+
+ if (t)
+ vec_add (s, t, strlen (t));
+ else
+ s = format (s, "ether-type 0x%x", type);
+ return s;
+}
+
+u8 * format_ethernet_packet (u8 * s, va_list * args)
+{
+ struct ethhdr * h = va_arg (*args, struct ethhdr *);
+ uword proto = h->h_proto;
+ u8 * payload = (void *) (h + 1);
+ uword indent;
+
+ /* Check for 802.2/802.3 encapsulation. */
+ if (proto < ETH_DATA_LEN)
+ {
+ typedef struct {
+ u8 dsap, ssap, control;
+ u8 orig_code[3];
+ u16 proto;
+ } ethhdr_802_t;
+ ethhdr_802_t * h1 = (void *) (h + 1);
+ proto = h1->proto;
+ payload = (void *) (h1 + 1);
+ }
+
+ indent = format_get_indent (s);
+
+ s = format (s, "%U: %U -> %U",
+ format_ethernet_proto, proto,
+ format_network_address, AF_UNSPEC, h->h_source,
+ format_network_address, AF_UNSPEC, h->h_dest);
+
+ switch (proto)
+ {
+ case ETH_P_ARP:
+ s = format (s, "\n%U%U",
+ format_white_space, indent,
+ format_arp_packet, payload);
+ break;
+ }
+
+ return s;
+}
+
+#ifndef __KERNEL__
+u8 * format_hostname (u8 * s, va_list * args)
+{
+ char buffer[1024];
+ char * b = buffer;
+ if (gethostname (b, sizeof (buffer)) < 0)
+ b = "noname";
+ return format (s, "%s", b);
+}
+#endif
+
+#ifndef __KERNEL__
+u8 * format_timeval (u8 * s, va_list * args)
+{
+ char * fmt = va_arg (*args, char *);
+ struct timeval * tv = va_arg (*args, struct timeval *);
+ struct tm * tm;
+ word msec;
+ char * f, c;
+
+ if (! fmt)
+ fmt = "y/m/d H:M:S:F";
+
+ if (! tv)
+ {
+ static struct timeval now;
+ gettimeofday (&now, 0);
+ tv = &now;
+ }
+
+ msec = flt_round_nearest (1e-3 * tv->tv_usec);
+ if (msec >= 1000)
+ { msec = 0; tv->tv_sec++; }
+
+ {
+ time_t t = tv->tv_sec;
+ tm = localtime (&t);
+ }
+
+ for (f = fmt; *f; f++)
+ {
+ uword what;
+ char * what_fmt = "%d";
+
+ switch (c = *f)
+ {
+ default:
+ vec_add1 (s, c);
+ continue;
+
+ case 'y':
+ what = 1900 + tm->tm_year;
+ what_fmt = "%4d";
+ break;
+ case 'm':
+ what = tm->tm_mon + 1;
+ what_fmt = "%2d";
+ break;
+ case 'd':
+ what = tm->tm_mday;
+ what_fmt = "%2d";
+ break;
+ case 'H':
+ what = tm->tm_hour;
+ what_fmt = "%02d";
+ break;
+ case 'M':
+ what = tm->tm_min;
+ what_fmt = "%02d";
+ break;
+ case 'S':
+ what = tm->tm_sec;
+ what_fmt = "%02d";
+ break;
+ case 'F':
+ what = msec;
+ what_fmt = "%03d";
+ break;
+ }
+
+ s = format (s, what_fmt, what);
+ }
+
+ return s;
+}
+
+u8 * format_time_float (u8 * s, va_list * args)
+{
+ u8 * fmt = va_arg (*args, u8 *);
+ f64 t = va_arg (*args, f64);
+ struct timeval tv;
+ if (t <= 0)
+ t = unix_time_now ();
+ tv.tv_sec = t;
+ tv.tv_usec = 1e6*(t - tv.tv_sec);
+ return format (s, "%U", format_timeval, fmt, &tv);
+}
+
+u8 * format_signal (u8 * s, va_list * args)
+{
+ uword signum = va_arg (*args, uword);
+ char * t = 0;
+ switch (signum)
+ {
+#define _(x) case x: t = #x; break;
+ _ (SIGHUP);
+ _ (SIGINT);
+ _ (SIGQUIT);
+ _ (SIGILL);
+ _ (SIGTRAP);
+ _ (SIGABRT);
+ _ (SIGBUS);
+ _ (SIGFPE);
+ _ (SIGKILL);
+ _ (SIGUSR1);
+ _ (SIGSEGV);
+ _ (SIGUSR2);
+ _ (SIGPIPE);
+ _ (SIGALRM);
+ _ (SIGTERM);
+#ifdef SIGSTKFLT
+ _ (SIGSTKFLT);
+#endif
+ _ (SIGCHLD);
+ _ (SIGCONT);
+ _ (SIGSTOP);
+ _ (SIGTSTP);
+ _ (SIGTTIN);
+ _ (SIGTTOU);
+ _ (SIGURG);
+ _ (SIGXCPU);
+ _ (SIGXFSZ);
+ _ (SIGVTALRM);
+ _ (SIGPROF);
+ _ (SIGWINCH);
+ _ (SIGIO);
+ _ (SIGPWR);
+#ifdef SIGSYS
+ _ (SIGSYS);
+#endif
+#undef _
+ default:
+ return format (s, "unknown %d", signum);
+ }
+
+ vec_add (s, t, strlen (t));
+ return s;
+}
+
+u8 * format_ucontext_pc (u8 * s, va_list * args)
+{
+ ucontext_t * uc __attribute__((unused));
+ unsigned long * regs = 0;
+ uword reg_no = 0;
+
+ uc = va_arg (*args, ucontext_t *);
+
+#if defined (powerpc)
+ regs = &uc->uc_mcontext.uc_regs->gregs[0];
+#elif defined (powerpc64)
+ regs = &uc->uc_mcontext.uc_regs->gp_regs[0];
+#elif defined (i386) || defined (__x86_64__)
+ regs = (void *) &uc->uc_mcontext.gregs[0];
+#endif
+
+#if defined (powerpc) || defined (powerpc64)
+ reg_no = PT_NIP;
+#elif defined (i386)
+ reg_no = REG_EIP;
+#elif defined (__x86_64__)
+ reg_no = REG_RIP;
+#else
+ reg_no = 0;
+ regs = 0;
+#endif
+
+ if (! regs)
+ return format (s, "unsupported");
+ else
+ return format (s, "%p", regs[reg_no]);
+}
+
+uword
+unformat_unix_gid (unformat_input_t * input, va_list * args)
+{
+ gid_t *gid = va_arg (*args, gid_t *);
+ struct group *grp = 0;
+ int r;
+ u8 *s;
+
+ if (unformat (input, "%d", &r))
+ {
+ grp = getgrgid (r);
+ }
+ else if (unformat (input, "%s", &s))
+ {
+ grp = getgrnam ((char *) s);
+ vec_free (s);
+ }
+ if (grp)
+ {
+ *gid = grp->gr_gid;
+ return 1;
+ }
+ return 0;
+}
+
+#endif /* __KERNEL__ */
diff --git a/src/vppinfra/unix-kelog.c b/src/vppinfra/unix-kelog.c
new file mode 100644
index 00000000..88428ee8
--- /dev/null
+++ b/src/vppinfra/unix-kelog.c
@@ -0,0 +1,415 @@
+/*
+ Copyright (c) 2010 Cisco and/or its affiliates.
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+#include <vppinfra/error.h>
+#include <vppinfra/unix.h>
+#include <vppinfra/elog.h>
+#include <vppinfra/format.h>
+#include <vppinfra/os.h>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <time.h>
+
+typedef enum
+{
+ RUNNING = 0,
+ WAKEUP,
+} sched_event_type_t;
+
+typedef struct
+{
+ u32 cpu;
+ u8 *task;
+ u32 pid;
+ f64 timestamp;
+ sched_event_type_t type;
+} sched_event_t;
+
+void
+kelog_init (elog_main_t * em, char *kernel_tracer, u32 n_events)
+{
+ int enable_fd, current_tracer_fd, data_fd;
+ int len;
+ struct timespec ts, ts2;
+ char *trace_enable = "/debug/tracing/tracing_enabled";
+ char *current_tracer = "/debug/tracing/current_tracer";
+ char *trace_data = "/debug/tracing/trace";
+ f64 realtime, monotonic;
+ f64 freq, secs_per_clock;
+
+ ASSERT (kernel_tracer);
+
+ /*$$$$ fixme */
+ n_events = 1 << 18;
+
+ /* init first so we won't hurt ourselves if we bail */
+ elog_init (em, n_events);
+
+ enable_fd = open (trace_enable, O_RDWR);
+ if (enable_fd < 0)
+ {
+ clib_warning ("Couldn't open %s", trace_enable);
+ return;
+ }
+ /* disable kernel tracing */
+ if (write (enable_fd, "0\n", 2) != 2)
+ {
+ clib_unix_warning ("disable tracing");
+ close (enable_fd);
+ return;
+ }
+
+ /*
+ * open + clear the data buffer.
+ * see .../linux/kernel/trace/trace.c:tracing_open()
+ */
+ data_fd = open (trace_data, O_RDWR | O_TRUNC);
+ if (data_fd < 0)
+ {
+ clib_warning ("Couldn't open+clear %s", trace_data);
+ return;
+ }
+ close (data_fd);
+
+ /* configure tracing */
+ current_tracer_fd = open (current_tracer, O_RDWR);
+
+ if (current_tracer_fd < 0)
+ {
+ clib_warning ("Couldn't open %s", current_tracer);
+ close (enable_fd);
+ return;
+ }
+
+ len = strlen (kernel_tracer);
+
+ if (write (current_tracer_fd, kernel_tracer, len) != len)
+ {
+ clib_unix_warning ("configure trace");
+ close (current_tracer_fd);
+ close (enable_fd);
+ return;
+ }
+
+ close (current_tracer_fd);
+
+ /*
+ * The kernel event log uses CLOCK_MONOTONIC timestamps,
+ * not CLOCK_REALTIME timestamps. These differ by a constant
+ * but the constant is not available in user mode.
+ * This estimate will be off by one syscall round-trip.
+ */
+ clib_time_init (&em->cpu_timer);
+ em->init_time.cpu = em->cpu_timer.init_cpu_time;
+ syscall (SYS_clock_gettime, CLOCK_MONOTONIC, &ts);
+
+ /* enable kernel tracing */
+ if (write (enable_fd, "1\n", 2) != 2)
+ {
+ clib_unix_warning ("enable tracing");
+ close (enable_fd);
+ return;
+ }
+
+ close (enable_fd);
+}
+
+
+u8 *
+format_sched_event (u8 * s, va_list * va)
+{
+ sched_event_t *e = va_arg (*va, sched_event_t *);
+
+ s = format (s, "cpu %d task %10s type %s timestamp %12.6f\n",
+ e->cpu, e->task, e->type ? "WAKEUP " : "RUNNING", e->timestamp);
+
+ return s;
+}
+
+sched_event_t *
+parse_sched_switch_trace (u8 * tdata, u32 * index)
+{
+ u8 *cp = tdata + *index;
+ u8 *limit = tdata + vec_len (tdata);
+ int colons;
+ static sched_event_t event;
+ sched_event_t *e = &event;
+ static u8 *task_name;
+ u32 secs, usecs;
+ int i;
+
+again:
+ /* eat leading w/s */
+ while (cp < limit && (*cp == ' ' && *cp == '\t'))
+ cp++;
+ if (cp == limit)
+ return 0;
+
+ /* header line */
+ if (*cp == '#')
+ {
+ while (cp < limit && (*cp != '\n'))
+ cp++;
+ if (*cp == '\n')
+ {
+ cp++;
+ goto again;
+ }
+ clib_warning ("bugger 0");
+ return 0;
+ }
+
+ while (cp < limit && *cp != ']')
+ cp++;
+
+ if (*cp == 0)
+ return 0;
+
+ if (*cp != ']')
+ {
+ clib_warning ("bugger 0.1");
+ return 0;
+ }
+
+ cp++;
+ while (cp < limit && (*cp == ' ' && *cp == '\t'))
+ cp++;
+ if (cp == limit)
+ {
+ clib_warning ("bugger 0.2");
+ return 0;
+ }
+
+ secs = atoi (cp);
+
+ while (cp < limit && (*cp != '.'))
+ cp++;
+
+ if (cp == limit)
+ {
+ clib_warning ("bugger 0.3");
+ return 0;
+ }
+
+ cp++;
+
+ usecs = atoi (cp);
+
+ e->timestamp = ((f64) secs) + ((f64) usecs) * 1e-6;
+
+ /* eat up to third colon */
+ for (i = 0; i < 3; i++)
+ {
+ while (cp < limit && *cp != ':')
+ cp++;
+ cp++;
+ }
+ --cp;
+ if (*cp != ':')
+ {
+ clib_warning ("bugger 1");
+ return 0;
+ }
+ /* aim at '>' (switch-to) / '+' (wakeup) */
+ cp += 5;
+ if (cp >= limit)
+ {
+ clib_warning ("bugger 2");
+ return 0;
+ }
+ if (*cp == '>')
+ e->type = RUNNING;
+ else if (*cp == '+')
+ e->type = WAKEUP;
+ else
+ {
+ clib_warning ("bugger 3");
+ return 0;
+ }
+
+ cp += 3;
+ if (cp >= limit)
+ {
+ clib_warning ("bugger 4");
+ return 0;
+ }
+
+ e->cpu = atoi (cp);
+ cp += 4;
+
+ if (cp >= limit)
+ {
+ clib_warning ("bugger 4");
+ return 0;
+ }
+ while (cp < limit && (*cp == ' ' || *cp == '\t'))
+ cp++;
+
+ e->pid = atoi (cp);
+
+ for (i = 0; i < 2; i++)
+ {
+ while (cp < limit && *cp != ':')
+ cp++;
+ cp++;
+ }
+ --cp;
+ if (*cp != ':')
+ {
+ clib_warning ("bugger 5");
+ return 0;
+ }
+
+ cp += 3;
+ if (cp >= limit)
+ {
+ clib_warning ("bugger 6");
+ return 0;
+ }
+ while (cp < limit && (*cp != ' ' && *cp != '\n'))
+ {
+ vec_add1 (task_name, *cp);
+ cp++;
+ }
+ vec_add1 (task_name, 0);
+ /* _vec_len() = 0 in caller */
+ e->task = task_name;
+
+ if (cp < limit)
+ cp++;
+
+ *index = cp - tdata;
+ return e;
+}
+
+static u32
+elog_id_for_pid (elog_main_t * em, u8 * name, u32 pid)
+{
+ uword *p, r;
+ mhash_t *h = &em->string_table_hash;
+
+ if (!em->string_table_hash.hash)
+ mhash_init (h, sizeof (uword), sizeof (pid));
+
+ p = mhash_get (h, &pid);
+ if (p)
+ return p[0];
+ r = elog_string (em, "%s(%d)", name, pid);
+ mhash_set (h, &pid, r, /* old_value */ 0);
+ return r;
+}
+
+void
+kelog_collect_sched_switch_trace (elog_main_t * em)
+{
+ int enable_fd, data_fd;
+ char *trace_enable = "/debug/tracing/tracing_enabled";
+ char *trace_data = "/debug/tracing/trace";
+ u8 *data = 0;
+ u8 *dp;
+ int bytes, total_bytes;
+ u32 pos;
+ sched_event_t *evt;
+ u64 nsec_to_add;
+ u32 index;
+ f64 clocks_per_sec;
+
+ enable_fd = open (trace_enable, O_RDWR);
+ if (enable_fd < 0)
+ {
+ clib_warning ("Couldn't open %s", trace_enable);
+ return;
+ }
+ /* disable kernel tracing */
+ if (write (enable_fd, "0\n", 2) != 2)
+ {
+ clib_unix_warning ("disable tracing");
+ close (enable_fd);
+ return;
+ }
+ close (enable_fd);
+
+ /* Read the trace data */
+ data_fd = open (trace_data, O_RDWR);
+ if (data_fd < 0)
+ {
+ clib_warning ("Couldn't open %s", trace_data);
+ return;
+ }
+
+ /*
+ * Extract trace into a vector. Note that seq_printf() [kernel]
+ * is not guaranteed to produce 4096 bytes at a time.
+ */
+ vec_validate (data, 4095);
+ total_bytes = 0;
+ pos = 0;
+ while (1)
+ {
+ bytes = read (data_fd, data + pos, 4096);
+ if (bytes <= 0)
+ break;
+
+ total_bytes += bytes;
+ _vec_len (data) = total_bytes;
+
+ pos = vec_len (data);
+ vec_validate (data, vec_len (data) + 4095);
+ }
+ vec_add1 (data, 0);
+
+ /* Synthesize events */
+ em->is_enabled = 1;
+
+ index = 0;
+ while ((evt = parse_sched_switch_trace (data, &index)))
+ {
+ u64 fake_cpu_clock;
+
+ fake_cpu_clock = evt->timestamp * em->cpu_timer.clocks_per_second;
+ {
+ ELOG_TYPE_DECLARE (e) =
+ {
+ .format = "%d: %s %s",.format_args = "i4T4t4",.n_enum_strings =
+ 2,.enum_strings =
+ {
+ "running", "wakeup",}
+ ,};
+ struct
+ {
+ u32 cpu, string_table_offset, which;
+ } *ed;
+
+ ed = elog_event_data_not_inline (em, &__ELOG_TYPE_VAR (e),
+ &em->default_track, fake_cpu_clock);
+ ed->cpu = evt->cpu;
+ ed->string_table_offset = elog_id_for_pid (em, evt->task, evt->pid);
+ ed->which = evt->type;
+ }
+ _vec_len (evt->task) = 0;
+ }
+ em->is_enabled = 0;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/unix-misc.c b/src/vppinfra/unix-misc.c
new file mode 100644
index 00000000..361015b4
--- /dev/null
+++ b/src/vppinfra/unix-misc.c
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2005 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/error.h>
+#include <vppinfra/os.h>
+#include <vppinfra/unix.h>
+
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/uio.h> /* writev */
+#include <fcntl.h>
+#include <stdio.h> /* for sprintf */
+
+__thread uword __os_thread_index = 0;
+
+clib_error_t *
+unix_file_n_bytes (char *file, uword * result)
+{
+ struct stat s;
+
+ if (stat (file, &s) < 0)
+ return clib_error_return_unix (0, "stat `%s'", file);
+
+ if (S_ISREG (s.st_mode))
+ *result = s.st_size;
+ else
+ *result = 0;
+
+ return /* no error */ 0;
+}
+
+clib_error_t *
+unix_file_read_contents (char *file, u8 * result, uword n_bytes)
+{
+ int fd = -1;
+ uword n_done, n_left;
+ clib_error_t *error = 0;
+ u8 *v = result;
+
+ if ((fd = open (file, 0)) < 0)
+ return clib_error_return_unix (0, "open `%s'", file);
+
+ n_left = n_bytes;
+ n_done = 0;
+ while (n_left > 0)
+ {
+ int n_read;
+ if ((n_read = read (fd, v + n_done, n_left)) < 0)
+ {
+ error = clib_error_return_unix (0, "open `%s'", file);
+ goto done;
+ }
+
+ /* End of file. */
+ if (n_read == 0)
+ break;
+
+ n_left -= n_read;
+ n_done += n_read;
+ }
+
+ if (n_left > 0)
+ {
+ error =
+ clib_error_return (0,
+ " `%s' expected to read %wd bytes; read only %wd",
+ file, n_bytes, n_bytes - n_left);
+ goto done;
+ }
+
+done:
+ close (fd);
+ return error;
+}
+
+clib_error_t *
+unix_file_contents (char *file, u8 ** result)
+{
+ uword n_bytes;
+ clib_error_t *error = 0;
+ u8 *v;
+
+ if ((error = unix_file_n_bytes (file, &n_bytes)))
+ return error;
+
+ v = 0;
+ vec_resize (v, n_bytes);
+
+ error = unix_file_read_contents (file, v, n_bytes);
+
+ if (error)
+ vec_free (v);
+ else
+ *result = v;
+
+ return error;
+}
+
+clib_error_t *
+unix_proc_file_contents (char *file, u8 ** result)
+{
+ u8 *rv = 0;
+ uword pos;
+ int bytes, fd;
+
+ /* Unfortunately, stat(/proc/XXX) returns zero... */
+ fd = open (file, O_RDONLY);
+
+ if (fd < 0)
+ return clib_error_return_unix (0, "open `%s'", file);
+
+ vec_validate (rv, 4095);
+ pos = 0;
+ while (1)
+ {
+ bytes = read (fd, rv + pos, 4096);
+ if (bytes < 0)
+ {
+ close (fd);
+ vec_free (rv);
+ return clib_error_return_unix (0, "read '%s'", file);
+ }
+
+ if (bytes == 0)
+ {
+ _vec_len (rv) = pos;
+ break;
+ }
+ pos += bytes;
+ vec_validate (rv, pos + 4095);
+ }
+ *result = rv;
+ close (fd);
+ return 0;
+}
+
+void os_panic (void) __attribute__ ((weak));
+
+void
+os_panic (void)
+{
+ abort ();
+}
+
+void os_exit (int) __attribute__ ((weak));
+
+void
+os_exit (int code)
+{
+ exit (code);
+}
+
+void os_puts (u8 * string, uword string_length, uword is_error)
+ __attribute__ ((weak));
+
+void
+os_puts (u8 * string, uword string_length, uword is_error)
+{
+ int cpu = os_get_thread_index ();
+ int nthreads = os_get_nthreads ();
+ char buf[64];
+ int fd = is_error ? 2 : 1;
+ struct iovec iovs[2];
+ int n_iovs = 0;
+
+ if (nthreads > 1)
+ {
+ snprintf (buf, sizeof (buf), "%d: ", cpu);
+
+ iovs[n_iovs].iov_base = buf;
+ iovs[n_iovs].iov_len = strlen (buf);
+ n_iovs++;
+ }
+
+ iovs[n_iovs].iov_base = string;
+ iovs[n_iovs].iov_len = string_length;
+ n_iovs++;
+
+ if (writev (fd, iovs, n_iovs) < 0)
+ ;
+}
+
+void os_out_of_memory (void) __attribute__ ((weak));
+void
+os_out_of_memory (void)
+{
+ os_panic ();
+}
+
+uword os_get_nthreads (void) __attribute__ ((weak));
+uword
+os_get_nthreads (void)
+{
+ return 1;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/unix.h b/src/vppinfra/unix.h
new file mode 100644
index 00000000..29114cfe
--- /dev/null
+++ b/src/vppinfra/unix.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2005 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_clib_unix_h
+#define included_clib_unix_h
+
+#include <vppinfra/error.h>
+
+/* Number of bytes in a Unix file. */
+clib_error_t *unix_file_n_bytes (char *file, uword * result);
+
+/* Read file contents into given buffer. */
+clib_error_t *unix_file_read_contents (char *file, u8 * result,
+ uword n_bytes);
+
+/* Read and return contents of Unix file. */
+clib_error_t *unix_file_contents (char *file, u8 ** result);
+
+/* As above but for /proc file system on Linux. */
+clib_error_t *unix_proc_file_contents (char *file, u8 ** result);
+
+#endif /* included_clib_unix_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/unix_error.def b/src/vppinfra/unix_error.def
new file mode 100644
index 00000000..76633dbb
--- /dev/null
+++ b/src/vppinfra/unix_error.def
@@ -0,0 +1,145 @@
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+_ (EPERM, "Operation not permitted")
+_ (ENOENT, "No such file or directory")
+_ (ESRCH, "No such process")
+_ (EINTR, "Interrupted system call")
+_ (EIO, "I/O error")
+_ (ENXIO, "No such device or address")
+_ (E2BIG, "Arg list too long")
+_ (ENOEXEC, "Exec format error")
+_ (EBADF, "Bad file number")
+_ (ECHILD, "No child processes")
+_ (ENOMEM, "Out of memory")
+_ (EACCES, "Permission denied")
+_ (EFAULT, "Bad address")
+_ (ENOTBLK, "Block device required")
+_ (EBUSY, "Device or resource busy")
+_ (EEXIST, "File exists")
+_ (EXDEV, "Cross-device link")
+_ (ENODEV, "No such device")
+_ (ENOTDIR, "Not a directory")
+_ (EISDIR, "Is a directory")
+_ (EINVAL, "Invalid argument")
+_ (ENFILE, "File table overflow")
+_ (EMFILE, "Too many open files")
+_ (ENOTTY, "Not a typewriter")
+_ (ETXTBSY, "Text file busy")
+_ (EFBIG, "File too large")
+_ (ENOSPC, "No space left on device")
+_ (ESPIPE, "Illegal seek")
+_ (EROFS, "Read-only file system")
+_ (EMLINK, "Too many links")
+_ (EPIPE, "Broken pipe")
+_ (EDOM, "Math argument out of domain of func")
+_ (ERANGE, "Math result not representable")
+_ (EDEADLK, "Resource deadlock would occur")
+_ (ENAMETOOLONG, "File name too long")
+_ (ENOLCK, "No record locks available")
+_ (ENOSYS, "Function not implemented")
+_ (ENOTEMPTY, "Directory not empty")
+_ (ELOOP, "Too many symbolic links encountered")
+_ (EWOULDBLOCK, "Operation would block")
+_ (ENOMSG, "No message of desired type")
+_ (EIDRM, "Identifier removed")
+_ (ECHRNG, "Channel number out of range")
+_ (EL2NSYNC, "Level 2 not synchronized")
+_ (EL3HLT, "Level 3 halted")
+_ (EL3RST, "Level 3 reset")
+_ (ELNRNG, "Link number out of range")
+_ (EUNATCH, "Protocol driver not attached")
+_ (ENOCSI, "No CSI structure available")
+_ (EL2HLT, "Level 2 halted")
+_ (EBADE, "Invalid exchange")
+_ (EBADR, "Invalid request descriptor")
+_ (EXFULL, "Exchange full")
+_ (ENOANO, "No anode")
+_ (EBADRQC, "Invalid request code")
+_ (EBADSLT, "Invalid slot")
+_ (EBFONT, "Bad font file format")
+_ (ENOSTR, "Device not a stream")
+_ (ENODATA, "No data available")
+_ (ETIME, "Timer expired")
+_ (ENOSR, "Out of streams resources")
+_ (ENONET, "Machine is not on the network")
+_ (ENOPKG, "Package not installed")
+_ (EREMOTE, "Object is remote")
+_ (ENOLINK, "Link has been severed")
+_ (EADV, "Advertise error")
+_ (ESRMNT, "Srmount error")
+_ (ECOMM, "Communication error on send")
+_ (EPROTO, "Protocol error")
+_ (EMULTIHOP, "Multihop attempted")
+_ (EDOTDOT, "RFS specific error")
+_ (EBADMSG, "Not a data message")
+_ (EOVERFLOW, "Value too large for defined data type")
+_ (ENOTUNIQ, "Name not unique on network")
+_ (EBADFD, "File descriptor in bad state")
+_ (EREMCHG, "Remote address changed")
+_ (ELIBACC, "Can not access a needed shared library")
+_ (ELIBBAD, "Accessing a corrupted shared library")
+_ (ELIBSCN, "lib section in a.out corrupted")
+_ (ELIBMAX, "Attempting to link in too many shared libraries")
+_ (ELIBEXEC, "Cannot exec a shared library directly")
+_ (EILSEQ, "Illegal byte sequence")
+_ (ERESTART, "Interrupted system call should be restarted")
+_ (ESTRPIPE, "Streams pipe error")
+_ (EUSERS, "Too many users")
+_ (ENOTSOCK, "Socket operation on non-socket")
+_ (EDESTADDRREQ, "Destination address required")
+_ (EMSGSIZE, "Message too long")
+_ (EPROTOTYPE, "Protocol wrong type for socket")
+_ (ENOPROTOOPT, "Protocol not available")
+_ (EPROTONOSUPPORT, "Protocol not supported")
+_ (ESOCKTNOSUPPORT, "Socket type not supported")
+_ (EOPNOTSUPP, "Operation not supported on transport endpoint")
+_ (EPFNOSUPPORT, "Protocol family not supported")
+_ (EAFNOSUPPORT, "Address family not supported by protocol")
+_ (EADDRINUSE, "Address already in use")
+_ (EADDRNOTAVAIL, "Cannot assign requested address")
+_ (ENETDOWN, "Network is down")
+_ (ENETUNREACH, "Network is unreachable")
+_ (ENETRESET, "Network dropped connection because of reset")
+_ (ECONNABORTED, "Software caused connection abort")
+_ (ECONNRESET, "Connection reset by peer")
+_ (ENOBUFS, "No buffer space available")
+_ (EISCONN, "Transport endpoint is already connected")
+_ (ENOTCONN, "Transport endpoint is not connected")
+_ (ESHUTDOWN, "Cannot send after transport endpoint shutdown")
+_ (ETOOMANYREFS, "Too many references: cannot splice")
+_ (ETIMEDOUT, "Connection timed out")
+_ (ECONNREFUSED, "Connection refused")
+_ (EHOSTDOWN, "Host is down")
+_ (EHOSTUNREACH, "No route to host")
+_ (EALREADY, "Operation already in progress")
+_ (EINPROGRESS, "Operation now in progress")
+_ (ESTALE, "Stale NFS file handle")
+_ (EUCLEAN, "Structure needs cleaning")
+_ (ENOTNAM, "Not a XENIX named type file")
+_ (ENAVAIL, "No XENIX semaphores available")
+_ (EISNAM, "Is a named type file")
+_ (EREMOTEIO, "Remote I/O error")
+_ (EDQUOT, "Quota exceeded")
+_ (ENOMEDIUM, "No medium found")
+_ (EMEDIUMTYPE, "Wrong medium type")
diff --git a/src/vppinfra/valgrind.h b/src/vppinfra/valgrind.h
new file mode 100644
index 00000000..e74d7e82
--- /dev/null
+++ b/src/vppinfra/valgrind.h
@@ -0,0 +1,4030 @@
+/* -*- c -*-
+ ----------------------------------------------------------------
+
+ Notice that the following BSD-style license applies to this one
+ file (valgrind.h) only. The rest of Valgrind is licensed under the
+ terms of the GNU General Public License, version 2, unless
+ otherwise indicated. See the COPYING file in the source
+ distribution for details.
+
+ ----------------------------------------------------------------
+
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ 2. The origin of this software must not be misrepresented; you must
+ not claim that you wrote the original software. If you use this
+ software in a product, an acknowledgment in the product
+ documentation would be appreciated but is not required.
+
+ 3. Altered source versions must be plainly marked as such, and must
+ not be misrepresented as being the original software.
+
+ 4. The name of the author may not be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ ----------------------------------------------------------------
+
+ Notice that the above BSD-style license applies to this one file
+ (valgrind.h) only. The entire rest of Valgrind is licensed under
+ the terms of the GNU General Public License, version 2. See the
+ COPYING file in the source distribution for details.
+
+ ----------------------------------------------------------------
+*/
+
+
+/* This file is for inclusion into client (your!) code.
+
+ You can use these macros to manipulate and query Valgrind's
+ execution inside your own programs.
+
+ The resulting executables will still run without Valgrind, just a
+ little bit more slowly than they otherwise would, but otherwise
+ unchanged. When not running on valgrind, each client request
+ consumes very few (eg. 7) instructions, so the resulting performance
+ loss is negligible unless you plan to execute client requests
+ millions of times per second. Nevertheless, if that is still a
+ problem, you can compile with the NVALGRIND symbol defined (gcc
+ -DNVALGRIND) so that client requests are not even compiled in. */
+
+#ifndef __VALGRIND_H
+#define __VALGRIND_H
+
+#include <stdarg.h>
+
+/* Nb: this file might be included in a file compiled with -ansi. So
+ we can't use C++ style "//" comments nor the "asm" keyword (instead
+ use "__asm__"). */
+
+/* Derive some tags indicating what the target platform is. Note
+ that in this file we're using the compiler's CPP symbols for
+ identifying architectures, which are different to the ones we use
+ within the rest of Valgrind. Note, __powerpc__ is active for both
+ 32 and 64-bit PPC, whereas __powerpc64__ is only active for the
+ latter (on Linux, that is). */
+#undef PLAT_x86_linux
+#undef PLAT_amd64_linux
+#undef PLAT_ppc32_linux
+#undef PLAT_ppc64_linux
+#undef PLAT_ppc32_aix5
+#undef PLAT_ppc64_aix5
+
+
+#if defined(_AIX) && defined(__64BIT__)
+#define PLAT_ppc64_aix5 1
+#elif defined(_AIX) && !defined(__64BIT__)
+#define PLAT_ppc32_aix5 1
+#elif defined(__APPLE__) && defined(__i386__)
+#define PLAT_x86_darwin 1
+#elif defined(__APPLE__) && defined(__x86_64__)
+#define PLAT_amd64_darwin 1
+#elif defined(__i386__)
+#define PLAT_x86_linux 1
+#elif defined(__x86_64__)
+#define PLAT_amd64_linux 1
+#elif defined(__powerpc__) && !defined(__powerpc64__)
+#define PLAT_ppc32_linux 1
+#elif defined(__powerpc__) && defined(__powerpc64__)
+#define PLAT_ppc64_linux 1
+#else
+/* If we're not compiling for our target platform, don't generate
+ any inline asms. */
+#if !defined(NVALGRIND)
+#define NVALGRIND 1
+#endif
+#endif
+
+
+/* ------------------------------------------------------------------ */
+/* ARCHITECTURE SPECIFICS for SPECIAL INSTRUCTIONS. There is nothing */
+/* in here of use to end-users -- skip to the next section. */
+/* ------------------------------------------------------------------ */
+
+#if defined(NVALGRIND)
+
+/* Define NVALGRIND to completely remove the Valgrind magic sequence
+ from the compiled code (analogous to NDEBUG's effects on
+ assert()) */
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ { \
+ (_zzq_rlval) = (_zzq_default); \
+ }
+
+#else /* ! NVALGRIND */
+
+/* The following defines the magic code sequences which the JITter
+ spots and handles magically. Don't look too closely at them as
+ they will rot your brain.
+
+ The assembly code sequences for all architectures is in this one
+ file. This is because this file must be stand-alone, and we don't
+ want to have multiple files.
+
+ For VALGRIND_DO_CLIENT_REQUEST, we must ensure that the default
+ value gets put in the return slot, so that everything works when
+ this is executed not under Valgrind. Args are passed in a memory
+ block, and so there's no intrinsic limit to the number that could
+ be passed, but it's currently five.
+
+ The macro args are:
+ _zzq_rlval result lvalue
+ _zzq_default default value (result returned when running on real CPU)
+ _zzq_request request code
+ _zzq_arg1..5 request params
+
+ The other two macros are used to support function wrapping, and are
+ a lot simpler. VALGRIND_GET_NR_CONTEXT returns the value of the
+ guest's NRADDR pseudo-register and whatever other information is
+ needed to safely run the call original from the wrapper: on
+ ppc64-linux, the R2 value at the divert point is also needed. This
+ information is abstracted into a user-visible type, OrigFn.
+
+ VALGRIND_CALL_NOREDIR_* behaves the same as the following on the
+ guest, but guarantees that the branch instruction will not be
+ redirected: x86: call *%eax, amd64: call *%rax, ppc32/ppc64:
+ branch-and-link-to-r11. VALGRIND_CALL_NOREDIR is just text, not a
+ complete inline asm, since it needs to be combined with more magic
+ inline asm stuff to be useful.
+*/
+
+/* ------------------------- x86-{linux,darwin} ---------------- */
+
+#if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin)
+
+typedef struct
+{
+ unsigned int nraddr; /* where's the code? */
+}
+OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "roll $3, %%edi ; roll $13, %%edi\n\t" \
+ "roll $29, %%edi ; roll $19, %%edi\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ { volatile unsigned int _zzq_args[6]; \
+ volatile unsigned int _zzq_result; \
+ _zzq_args[0] = (unsigned int)(_zzq_request); \
+ _zzq_args[1] = (unsigned int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned int)(_zzq_arg5); \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %EDX = client_request ( %EAX ) */ \
+ "xchgl %%ebx,%%ebx" \
+ : "=d" (_zzq_result) \
+ : "a" (&_zzq_args[0]), "0" (_zzq_default) \
+ : "cc", "memory" \
+ ); \
+ _zzq_rlval = _zzq_result; \
+ }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ volatile unsigned int __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %EAX = guest_NRADDR */ \
+ "xchgl %%ecx,%%ecx" \
+ : "=a" (__addr) \
+ : \
+ : "cc", "memory" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ }
+
+#define VALGRIND_CALL_NOREDIR_EAX \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* call-noredir *%EAX */ \
+ "xchgl %%edx,%%edx\n\t"
+#endif /* PLAT_x86_linux || PLAT_x86_darwin */
+
+/* ------------------------ amd64-{linux,darwin} --------------- */
+
+#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin)
+
+typedef struct
+{
+ unsigned long long int nraddr; /* where's the code? */
+}
+OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "rolq $3, %%rdi ; rolq $13, %%rdi\n\t" \
+ "rolq $61, %%rdi ; rolq $51, %%rdi\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ { volatile unsigned long long int _zzq_args[6]; \
+ volatile unsigned long long int _zzq_result; \
+ _zzq_args[0] = (unsigned long long int)(_zzq_request); \
+ _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned long long int)(_zzq_arg5); \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %RDX = client_request ( %RAX ) */ \
+ "xchgq %%rbx,%%rbx" \
+ : "=d" (_zzq_result) \
+ : "a" (&_zzq_args[0]), "0" (_zzq_default) \
+ : "cc", "memory" \
+ ); \
+ _zzq_rlval = _zzq_result; \
+ }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ volatile unsigned long long int __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %RAX = guest_NRADDR */ \
+ "xchgq %%rcx,%%rcx" \
+ : "=a" (__addr) \
+ : \
+ : "cc", "memory" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ }
+
+#define VALGRIND_CALL_NOREDIR_RAX \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* call-noredir *%RAX */ \
+ "xchgq %%rdx,%%rdx\n\t"
+#endif /* PLAT_amd64_linux || PLAT_amd64_darwin */
+
+/* ------------------------ ppc32-linux ------------------------ */
+
+#if defined(PLAT_ppc32_linux)
+
+typedef struct
+{
+ unsigned int nraddr; /* where's the code? */
+}
+OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "rlwinm 0,0,3,0,0 ; rlwinm 0,0,13,0,0\n\t" \
+ "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ \
+ { unsigned int _zzq_args[6]; \
+ unsigned int _zzq_result; \
+ unsigned int* _zzq_ptr; \
+ _zzq_args[0] = (unsigned int)(_zzq_request); \
+ _zzq_args[1] = (unsigned int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned int)(_zzq_arg5); \
+ _zzq_ptr = _zzq_args; \
+ __asm__ volatile("mr 3,%1\n\t" /*default*/ \
+ "mr 4,%2\n\t" /*ptr*/ \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = client_request ( %R4 ) */ \
+ "or 1,1,1\n\t" \
+ "mr %0,3" /*result*/ \
+ : "=b" (_zzq_result) \
+ : "b" (_zzq_default), "b" (_zzq_ptr) \
+ : "cc", "memory", "r3", "r4"); \
+ _zzq_rlval = _zzq_result; \
+ }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ unsigned int __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = guest_NRADDR */ \
+ "or 2,2,2\n\t" \
+ "mr %0,3" \
+ : "=b" (__addr) \
+ : \
+ : "cc", "memory", "r3" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* branch-and-link-to-noredir *%R11 */ \
+ "or 3,3,3\n\t"
+#endif /* PLAT_ppc32_linux */
+
+/* ------------------------ ppc64-linux ------------------------ */
+
+#if defined(PLAT_ppc64_linux)
+
+typedef struct
+{
+ unsigned long long int nraddr; /* where's the code? */
+ unsigned long long int r2; /* what tocptr do we need? */
+}
+OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \
+ "rotldi 0,0,61 ; rotldi 0,0,51\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ \
+ { unsigned long long int _zzq_args[6]; \
+ register unsigned long long int _zzq_result __asm__("r3"); \
+ register unsigned long long int* _zzq_ptr __asm__("r4"); \
+ _zzq_args[0] = (unsigned long long int)(_zzq_request); \
+ _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned long long int)(_zzq_arg5); \
+ _zzq_ptr = _zzq_args; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = client_request ( %R4 ) */ \
+ "or 1,1,1" \
+ : "=r" (_zzq_result) \
+ : "0" (_zzq_default), "r" (_zzq_ptr) \
+ : "cc", "memory"); \
+ _zzq_rlval = _zzq_result; \
+ }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ register unsigned long long int __addr __asm__("r3"); \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = guest_NRADDR */ \
+ "or 2,2,2" \
+ : "=r" (__addr) \
+ : \
+ : "cc", "memory" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = guest_NRADDR_GPR2 */ \
+ "or 4,4,4" \
+ : "=r" (__addr) \
+ : \
+ : "cc", "memory" \
+ ); \
+ _zzq_orig->r2 = __addr; \
+ }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* branch-and-link-to-noredir *%R11 */ \
+ "or 3,3,3\n\t"
+
+#endif /* PLAT_ppc64_linux */
+
+/* ------------------------ ppc32-aix5 ------------------------- */
+
+#if defined(PLAT_ppc32_aix5)
+
+typedef struct
+{
+ unsigned int nraddr; /* where's the code? */
+ unsigned int r2; /* what tocptr do we need? */
+}
+OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "rlwinm 0,0,3,0,0 ; rlwinm 0,0,13,0,0\n\t" \
+ "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ \
+ { unsigned int _zzq_args[7]; \
+ register unsigned int _zzq_result; \
+ register unsigned int* _zzq_ptr; \
+ _zzq_args[0] = (unsigned int)(_zzq_request); \
+ _zzq_args[1] = (unsigned int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned int)(_zzq_arg5); \
+ _zzq_args[6] = (unsigned int)(_zzq_default); \
+ _zzq_ptr = _zzq_args; \
+ __asm__ volatile("mr 4,%1\n\t" \
+ "lwz 3, 24(4)\n\t" \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = client_request ( %R4 ) */ \
+ "or 1,1,1\n\t" \
+ "mr %0,3" \
+ : "=b" (_zzq_result) \
+ : "b" (_zzq_ptr) \
+ : "r3", "r4", "cc", "memory"); \
+ _zzq_rlval = _zzq_result; \
+ }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ register unsigned int __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = guest_NRADDR */ \
+ "or 2,2,2\n\t" \
+ "mr %0,3" \
+ : "=b" (__addr) \
+ : \
+ : "r3", "cc", "memory" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = guest_NRADDR_GPR2 */ \
+ "or 4,4,4\n\t" \
+ "mr %0,3" \
+ : "=b" (__addr) \
+ : \
+ : "r3", "cc", "memory" \
+ ); \
+ _zzq_orig->r2 = __addr; \
+ }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* branch-and-link-to-noredir *%R11 */ \
+ "or 3,3,3\n\t"
+
+#endif /* PLAT_ppc32_aix5 */
+
+/* ------------------------ ppc64-aix5 ------------------------- */
+
+#if defined(PLAT_ppc64_aix5)
+
+typedef struct
+{
+ unsigned long long int nraddr; /* where's the code? */
+ unsigned long long int r2; /* what tocptr do we need? */
+}
+OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \
+ "rotldi 0,0,61 ; rotldi 0,0,51\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ \
+ { unsigned long long int _zzq_args[7]; \
+ register unsigned long long int _zzq_result; \
+ register unsigned long long int* _zzq_ptr; \
+ _zzq_args[0] = (unsigned int long long)(_zzq_request); \
+ _zzq_args[1] = (unsigned int long long)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned int long long)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned int long long)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned int long long)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned int long long)(_zzq_arg5); \
+ _zzq_args[6] = (unsigned int long long)(_zzq_default); \
+ _zzq_ptr = _zzq_args; \
+ __asm__ volatile("mr 4,%1\n\t" \
+ "ld 3, 48(4)\n\t" \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = client_request ( %R4 ) */ \
+ "or 1,1,1\n\t" \
+ "mr %0,3" \
+ : "=b" (_zzq_result) \
+ : "b" (_zzq_ptr) \
+ : "r3", "r4", "cc", "memory"); \
+ _zzq_rlval = _zzq_result; \
+ }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ register unsigned long long int __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = guest_NRADDR */ \
+ "or 2,2,2\n\t" \
+ "mr %0,3" \
+ : "=b" (__addr) \
+ : \
+ : "r3", "cc", "memory" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = guest_NRADDR_GPR2 */ \
+ "or 4,4,4\n\t" \
+ "mr %0,3" \
+ : "=b" (__addr) \
+ : \
+ : "r3", "cc", "memory" \
+ ); \
+ _zzq_orig->r2 = __addr; \
+ }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* branch-and-link-to-noredir *%R11 */ \
+ "or 3,3,3\n\t"
+
+#endif /* PLAT_ppc64_aix5 */
+
+/* Insert assembly code for other platforms here... */
+
+#endif /* NVALGRIND */
+
+
+/* ------------------------------------------------------------------ */
+/* PLATFORM SPECIFICS for FUNCTION WRAPPING. This is all very */
+/* ugly. It's the least-worst tradeoff I can think of. */
+/* ------------------------------------------------------------------ */
+
+/* This section defines magic (a.k.a appalling-hack) macros for doing
+ guaranteed-no-redirection macros, so as to get from function
+ wrappers to the functions they are wrapping. The whole point is to
+ construct standard call sequences, but to do the call itself with a
+ special no-redirect call pseudo-instruction that the JIT
+ understands and handles specially. This section is long and
+ repetitious, and I can't see a way to make it shorter.
+
+ The naming scheme is as follows:
+
+ CALL_FN_{W,v}_{v,W,WW,WWW,WWWW,5W,6W,7W,etc}
+
+ 'W' stands for "word" and 'v' for "void". Hence there are
+ different macros for calling arity 0, 1, 2, 3, 4, etc, functions,
+ and for each, the possibility of returning a word-typed result, or
+ no result.
+*/
+
+/* Use these to write the name of your wrapper. NOTE: duplicates
+ VG_WRAP_FUNCTION_Z{U,Z} in pub_tool_redir.h. */
+
+/* Use an extra level of macroisation so as to ensure the soname/fnname
+ args are fully macro-expanded before pasting them together. */
+#define VG_CONCAT4(_aa,_bb,_cc,_dd) _aa##_bb##_cc##_dd
+
+#define I_WRAP_SONAME_FNNAME_ZU(soname,fnname) \
+ VG_CONCAT4(_vgwZU_,soname,_,fnname)
+
+#define I_WRAP_SONAME_FNNAME_ZZ(soname,fnname) \
+ VG_CONCAT4(_vgwZZ_,soname,_,fnname)
+
+/* Use this macro from within a wrapper function to collect the
+ context (address and possibly other info) of the original function.
+ Once you have that you can then use it in one of the CALL_FN_
+ macros. The type of the argument _lval is OrigFn. */
+#define VALGRIND_GET_ORIG_FN(_lval) VALGRIND_GET_NR_CONTEXT(_lval)
+
+/* Derivatives of the main macros below, for calling functions
+ returning void. */
+
+#define CALL_FN_v_v(fnptr) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_v(_junk,fnptr); } while (0)
+
+#define CALL_FN_v_W(fnptr, arg1) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_W(_junk,fnptr,arg1); } while (0)
+
+#define CALL_FN_v_WW(fnptr, arg1,arg2) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_WW(_junk,fnptr,arg1,arg2); } while (0)
+
+#define CALL_FN_v_WWW(fnptr, arg1,arg2,arg3) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_WWW(_junk,fnptr,arg1,arg2,arg3); } while (0)
+
+#define CALL_FN_v_WWWW(fnptr, arg1,arg2,arg3,arg4) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_WWWW(_junk,fnptr,arg1,arg2,arg3,arg4); } while (0)
+
+#define CALL_FN_v_5W(fnptr, arg1,arg2,arg3,arg4,arg5) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_5W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5); } while (0)
+
+#define CALL_FN_v_6W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_6W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6); } while (0)
+
+#define CALL_FN_v_7W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6,arg7) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_7W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6,arg7); } while (0)
+
+/* ------------------------- x86-{linux,darwin} ---------------- */
+
+#if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin)
+
+/* These regs are trashed by the hidden call. No need to mention eax
+ as gcc can already see that, plus causes gcc to bomb. */
+#define __CALLER_SAVED_REGS /*"eax"*/ "ecx", "edx"
+
+/* These CALL_FN_ macros assume that on x86-linux, sizeof(unsigned
+ long) == 4. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[1]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[2]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ __asm__ volatile( \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $4, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ __asm__ volatile( \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $8, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[4]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ __asm__ volatile( \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $12, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[5]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ __asm__ volatile( \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $16, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[6]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ __asm__ volatile( \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $20, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[7]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ __asm__ volatile( \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $24, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[8]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ __asm__ volatile( \
+ "pushl 28(%%eax)\n\t" \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $28, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[9]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ __asm__ volatile( \
+ "pushl 32(%%eax)\n\t" \
+ "pushl 28(%%eax)\n\t" \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $32, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[10]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ __asm__ volatile( \
+ "pushl 36(%%eax)\n\t" \
+ "pushl 32(%%eax)\n\t" \
+ "pushl 28(%%eax)\n\t" \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $36, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[11]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ __asm__ volatile( \
+ "pushl 40(%%eax)\n\t" \
+ "pushl 36(%%eax)\n\t" \
+ "pushl 32(%%eax)\n\t" \
+ "pushl 28(%%eax)\n\t" \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $40, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
+ arg6,arg7,arg8,arg9,arg10, \
+ arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[12]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ __asm__ volatile( \
+ "pushl 44(%%eax)\n\t" \
+ "pushl 40(%%eax)\n\t" \
+ "pushl 36(%%eax)\n\t" \
+ "pushl 32(%%eax)\n\t" \
+ "pushl 28(%%eax)\n\t" \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $44, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
+ arg6,arg7,arg8,arg9,arg10, \
+ arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[13]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ _argvec[12] = (unsigned long)(arg12); \
+ __asm__ volatile( \
+ "pushl 48(%%eax)\n\t" \
+ "pushl 44(%%eax)\n\t" \
+ "pushl 40(%%eax)\n\t" \
+ "pushl 36(%%eax)\n\t" \
+ "pushl 32(%%eax)\n\t" \
+ "pushl 28(%%eax)\n\t" \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $48, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_x86_linux || PLAT_x86_darwin */
+
+/* ------------------------ amd64-{linux,darwin} --------------- */
+
+#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin)
+
+/* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS /*"rax",*/ "rcx", "rdx", "rsi", \
+ "rdi", "r8", "r9", "r10", "r11"
+
+/* These CALL_FN_ macros assume that on amd64-linux, sizeof(unsigned
+ long) == 8. */
+
+/* NB 9 Sept 07. There is a nasty kludge here in all these CALL_FN_
+ macros. In order not to trash the stack redzone, we need to drop
+ %rsp by 128 before the hidden call, and restore afterwards. The
+ nastyness is that it is only by luck that the stack still appears
+ to be unwindable during the hidden call - since then the behaviour
+ of any routine using this macro does not match what the CFI data
+ says. Sigh.
+
+ Why is this important? Imagine that a wrapper has a stack
+ allocated local, and passes to the hidden call, a pointer to it.
+ Because gcc does not know about the hidden call, it may allocate
+ that local in the redzone. Unfortunately the hidden call may then
+ trash it before it comes to use it. So we must step clear of the
+ redzone, for the duration of the hidden call, to make it safe.
+
+ Probably the same problem afflicts the other redzone-style ABIs too
+ (ppc64-linux, ppc32-aix5, ppc64-aix5); but for those, the stack is
+ self describing (none of this CFI nonsense) so at least messing
+ with the stack pointer doesn't give a danger of non-unwindable
+ stack. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[1]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $128,%%rsp\n\t" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[2]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $128,%%rsp\n\t" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $128,%%rsp\n\t" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[4]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $128,%%rsp\n\t" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[5]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $128,%%rsp\n\t" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[6]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $128,%%rsp\n\t" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[7]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ "addq $128,%%rsp\n\t" \
+ VALGRIND_CALL_NOREDIR_RAX \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[8]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $8, %%rsp\n" \
+ "addq $128,%%rsp\n\t" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[9]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "pushq 64(%%rax)\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $16, %%rsp\n" \
+ "addq $128,%%rsp\n\t" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[10]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "pushq 72(%%rax)\n\t" \
+ "pushq 64(%%rax)\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $24, %%rsp\n" \
+ "addq $128,%%rsp\n\t" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[11]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "pushq 80(%%rax)\n\t" \
+ "pushq 72(%%rax)\n\t" \
+ "pushq 64(%%rax)\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $32, %%rsp\n" \
+ "addq $128,%%rsp\n\t" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[12]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "pushq 88(%%rax)\n\t" \
+ "pushq 80(%%rax)\n\t" \
+ "pushq 72(%%rax)\n\t" \
+ "pushq 64(%%rax)\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $40, %%rsp\n" \
+ "addq $128,%%rsp\n\t" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[13]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ _argvec[12] = (unsigned long)(arg12); \
+ __asm__ volatile( \
+ "subq $128,%%rsp\n\t" \
+ "pushq 96(%%rax)\n\t" \
+ "pushq 88(%%rax)\n\t" \
+ "pushq 80(%%rax)\n\t" \
+ "pushq 72(%%rax)\n\t" \
+ "pushq 64(%%rax)\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ "addq $48, %%rsp\n" \
+ "addq $128,%%rsp\n\t" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_amd64_linux || PLAT_amd64_darwin */
+
+/* ------------------------ ppc32-linux ------------------------ */
+
+#if defined(PLAT_ppc32_linux)
+
+/* This is useful for finding out about the on-stack stuff:
+
+ extern int f9 ( int,int,int,int,int,int,int,int,int );
+ extern int f10 ( int,int,int,int,int,int,int,int,int,int );
+ extern int f11 ( int,int,int,int,int,int,int,int,int,int,int );
+ extern int f12 ( int,int,int,int,int,int,int,int,int,int,int,int );
+
+ int g9 ( void ) {
+ return f9(11,22,33,44,55,66,77,88,99);
+ }
+ int g10 ( void ) {
+ return f10(11,22,33,44,55,66,77,88,99,110);
+ }
+ int g11 ( void ) {
+ return f11(11,22,33,44,55,66,77,88,99,110,121);
+ }
+ int g12 ( void ) {
+ return f12(11,22,33,44,55,66,77,88,99,110,121,132);
+ }
+*/
+
+/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS \
+ "lr", "ctr", "xer", \
+ "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
+ "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
+ "r11", "r12", "r13"
+
+/* These CALL_FN_ macros assume that on ppc32-linux,
+ sizeof(unsigned long) == 4. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[1]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[2]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[4]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[5]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[6]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[7]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[8]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 9,28(11)\n\t" \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[9]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 9,28(11)\n\t" \
+ "lwz 10,32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[10]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ _argvec[9] = (unsigned long)arg9; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "addi 1,1,-16\n\t" \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,8(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 9,28(11)\n\t" \
+ "lwz 10,32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "addi 1,1,16\n\t" \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[11]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ _argvec[9] = (unsigned long)arg9; \
+ _argvec[10] = (unsigned long)arg10; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "addi 1,1,-16\n\t" \
+ /* arg10 */ \
+ "lwz 3,40(11)\n\t" \
+ "stw 3,12(1)\n\t" \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,8(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 9,28(11)\n\t" \
+ "lwz 10,32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "addi 1,1,16\n\t" \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[12]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ _argvec[9] = (unsigned long)arg9; \
+ _argvec[10] = (unsigned long)arg10; \
+ _argvec[11] = (unsigned long)arg11; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "addi 1,1,-32\n\t" \
+ /* arg11 */ \
+ "lwz 3,44(11)\n\t" \
+ "stw 3,16(1)\n\t" \
+ /* arg10 */ \
+ "lwz 3,40(11)\n\t" \
+ "stw 3,12(1)\n\t" \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,8(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 9,28(11)\n\t" \
+ "lwz 10,32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "addi 1,1,32\n\t" \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[13]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ _argvec[9] = (unsigned long)arg9; \
+ _argvec[10] = (unsigned long)arg10; \
+ _argvec[11] = (unsigned long)arg11; \
+ _argvec[12] = (unsigned long)arg12; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "addi 1,1,-32\n\t" \
+ /* arg12 */ \
+ "lwz 3,48(11)\n\t" \
+ "stw 3,20(1)\n\t" \
+ /* arg11 */ \
+ "lwz 3,44(11)\n\t" \
+ "stw 3,16(1)\n\t" \
+ /* arg10 */ \
+ "lwz 3,40(11)\n\t" \
+ "stw 3,12(1)\n\t" \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,8(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 9,28(11)\n\t" \
+ "lwz 10,32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "addi 1,1,32\n\t" \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_ppc32_linux */
+
+/* ------------------------ ppc64-linux ------------------------ */
+
+#if defined(PLAT_ppc64_linux)
+
+/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS \
+ "lr", "ctr", "xer", \
+ "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
+ "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
+ "r11", "r12", "r13"
+
+/* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned
+ long) == 8. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+0]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+1]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+2]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+3]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+4]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+5]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+6]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+7]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+8]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)" /* restore tocptr */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+9]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "addi 1,1,-128\n\t" /* expand stack frame */ \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ "addi 1,1,128" /* restore frame */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+10]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "addi 1,1,-128\n\t" /* expand stack frame */ \
+ /* arg10 */ \
+ "ld 3,80(11)\n\t" \
+ "std 3,120(1)\n\t" \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ "addi 1,1,128" /* restore frame */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+11]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ _argvec[2+11] = (unsigned long)arg11; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "addi 1,1,-144\n\t" /* expand stack frame */ \
+ /* arg11 */ \
+ "ld 3,88(11)\n\t" \
+ "std 3,128(1)\n\t" \
+ /* arg10 */ \
+ "ld 3,80(11)\n\t" \
+ "std 3,120(1)\n\t" \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ "addi 1,1,144" /* restore frame */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+12]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ _argvec[2+11] = (unsigned long)arg11; \
+ _argvec[2+12] = (unsigned long)arg12; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "addi 1,1,-144\n\t" /* expand stack frame */ \
+ /* arg12 */ \
+ "ld 3,96(11)\n\t" \
+ "std 3,136(1)\n\t" \
+ /* arg11 */ \
+ "ld 3,88(11)\n\t" \
+ "std 3,128(1)\n\t" \
+ /* arg10 */ \
+ "ld 3,80(11)\n\t" \
+ "std 3,120(1)\n\t" \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ "addi 1,1,144" /* restore frame */ \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_ppc64_linux */
+
+/* ------------------------ ppc32-aix5 ------------------------- */
+
+#if defined(PLAT_ppc32_aix5)
+
+/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS \
+ "lr", "ctr", "xer", \
+ "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
+ "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
+ "r11", "r12", "r13"
+
+/* Expand the stack frame, copying enough info that unwinding
+ still works. Trashes r3. */
+
+#define VG_EXPAND_FRAME_BY_trashes_r3(_n_fr) \
+ "addi 1,1,-" #_n_fr "\n\t" \
+ "lwz 3," #_n_fr "(1)\n\t" \
+ "stw 3,0(1)\n\t"
+
+#define VG_CONTRACT_FRAME_BY(_n_fr) \
+ "addi 1,1," #_n_fr "\n\t"
+
+/* These CALL_FN_ macros assume that on ppc32-aix5, sizeof(unsigned
+ long) == 4. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+0]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+1]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+2]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+3]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+4]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+5]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+6]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+7]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
+ "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+8]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
+ "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
+ "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+9]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(64) \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,56(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
+ "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
+ "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(64) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+10]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(64) \
+ /* arg10 */ \
+ "lwz 3,40(11)\n\t" \
+ "stw 3,60(1)\n\t" \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,56(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
+ "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
+ "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(64) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+11]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ _argvec[2+11] = (unsigned long)arg11; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(72) \
+ /* arg11 */ \
+ "lwz 3,44(11)\n\t" \
+ "stw 3,64(1)\n\t" \
+ /* arg10 */ \
+ "lwz 3,40(11)\n\t" \
+ "stw 3,60(1)\n\t" \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,56(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
+ "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
+ "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(72) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+12]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ _argvec[2+11] = (unsigned long)arg11; \
+ _argvec[2+12] = (unsigned long)arg12; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "stw 2,-8(11)\n\t" /* save tocptr */ \
+ "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(72) \
+ /* arg12 */ \
+ "lwz 3,48(11)\n\t" \
+ "stw 3,68(1)\n\t" \
+ /* arg11 */ \
+ "lwz 3,44(11)\n\t" \
+ "stw 3,64(1)\n\t" \
+ /* arg10 */ \
+ "lwz 3,40(11)\n\t" \
+ "stw 3,60(1)\n\t" \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,56(1)\n\t" \
+ /* args1-8 */ \
+ "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
+ "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
+ "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
+ "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
+ "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
+ "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
+ "lwz 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "lwz 2,-8(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(72) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_ppc32_aix5 */
+
+/* ------------------------ ppc64-aix5 ------------------------- */
+
+#if defined(PLAT_ppc64_aix5)
+
+/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS \
+ "lr", "ctr", "xer", \
+ "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
+ "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
+ "r11", "r12", "r13"
+
+/* Expand the stack frame, copying enough info that unwinding
+ still works. Trashes r3. */
+
+#define VG_EXPAND_FRAME_BY_trashes_r3(_n_fr) \
+ "addi 1,1,-" #_n_fr "\n\t" \
+ "ld 3," #_n_fr "(1)\n\t" \
+ "std 3,0(1)\n\t"
+
+#define VG_CONTRACT_FRAME_BY(_n_fr) \
+ "addi 1,1," #_n_fr "\n\t"
+
+/* These CALL_FN_ macros assume that on ppc64-aix5, sizeof(unsigned
+ long) == 8. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+0]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+1]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+2]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+3]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+4]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+5]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+6]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+7]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+8]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+9]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(128) \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(128) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+10]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(128) \
+ /* arg10 */ \
+ "ld 3,80(11)\n\t" \
+ "std 3,120(1)\n\t" \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(128) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+11]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ _argvec[2+11] = (unsigned long)arg11; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(144) \
+ /* arg11 */ \
+ "ld 3,88(11)\n\t" \
+ "std 3,128(1)\n\t" \
+ /* arg10 */ \
+ "ld 3,80(11)\n\t" \
+ "std 3,120(1)\n\t" \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(144) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3+12]; \
+ volatile unsigned long _res; \
+ /* _argvec[0] holds current r2 across the call */ \
+ _argvec[1] = (unsigned long)_orig.r2; \
+ _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[2+10] = (unsigned long)arg10; \
+ _argvec[2+11] = (unsigned long)arg11; \
+ _argvec[2+12] = (unsigned long)arg12; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ VG_EXPAND_FRAME_BY_trashes_r3(512) \
+ "std 2,-16(11)\n\t" /* save tocptr */ \
+ "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
+ VG_EXPAND_FRAME_BY_trashes_r3(144) \
+ /* arg12 */ \
+ "ld 3,96(11)\n\t" \
+ "std 3,136(1)\n\t" \
+ /* arg11 */ \
+ "ld 3,88(11)\n\t" \
+ "std 3,128(1)\n\t" \
+ /* arg10 */ \
+ "ld 3,80(11)\n\t" \
+ "std 3,120(1)\n\t" \
+ /* arg9 */ \
+ "ld 3,72(11)\n\t" \
+ "std 3,112(1)\n\t" \
+ /* args1-8 */ \
+ "ld 3, 8(11)\n\t" /* arg1->r3 */ \
+ "ld 4, 16(11)\n\t" /* arg2->r4 */ \
+ "ld 5, 24(11)\n\t" /* arg3->r5 */ \
+ "ld 6, 32(11)\n\t" /* arg4->r6 */ \
+ "ld 7, 40(11)\n\t" /* arg5->r7 */ \
+ "ld 8, 48(11)\n\t" /* arg6->r8 */ \
+ "ld 9, 56(11)\n\t" /* arg7->r9 */ \
+ "ld 10, 64(11)\n\t" /* arg8->r10 */ \
+ "ld 11, 0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr 11,%1\n\t" \
+ "mr %0,3\n\t" \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VG_CONTRACT_FRAME_BY(144) \
+ VG_CONTRACT_FRAME_BY(512) \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[2]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_ppc64_aix5 */
+
+
+/* ------------------------------------------------------------------ */
+/* ARCHITECTURE INDEPENDENT MACROS for CLIENT REQUESTS. */
+/* */
+/* ------------------------------------------------------------------ */
+
+/* Some request codes. There are many more of these, but most are not
+ exposed to end-user view. These are the public ones, all of the
+ form 0x1000 + small_number.
+
+ Core ones are in the range 0x00000000--0x0000ffff. The non-public
+ ones start at 0x2000.
+*/
+
+/* These macros are used by tools -- they must be public, but don't
+ embed them into other programs. */
+#define VG_USERREQ_TOOL_BASE(a,b) \
+ ((unsigned int)(((a)&0xff) << 24 | ((b)&0xff) << 16))
+#define VG_IS_TOOL_USERREQ(a, b, v) \
+ (VG_USERREQ_TOOL_BASE(a,b) == ((v) & 0xffff0000))
+
+/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
+ This enum comprises an ABI exported by Valgrind to programs
+ which use client requests. DO NOT CHANGE THE ORDER OF THESE
+ ENTRIES, NOR DELETE ANY -- add new ones at the end. */
+typedef enum
+{ VG_USERREQ__RUNNING_ON_VALGRIND = 0x1001,
+ VG_USERREQ__DISCARD_TRANSLATIONS = 0x1002,
+
+ /* These allow any function to be called from the simulated
+ CPU but run on the real CPU. Nb: the first arg passed to
+ the function is always the ThreadId of the running
+ thread! So CLIENT_CALL0 actually requires a 1 arg
+ function, etc. */
+ VG_USERREQ__CLIENT_CALL0 = 0x1101,
+ VG_USERREQ__CLIENT_CALL1 = 0x1102,
+ VG_USERREQ__CLIENT_CALL2 = 0x1103,
+ VG_USERREQ__CLIENT_CALL3 = 0x1104,
+
+ /* Can be useful in regression testing suites -- eg. can
+ send Valgrind's output to /dev/null and still count
+ errors. */
+ VG_USERREQ__COUNT_ERRORS = 0x1201,
+
+ /* These are useful and can be interpreted by any tool that
+ tracks malloc() et al, by using vg_replace_malloc.c. */
+ VG_USERREQ__MALLOCLIKE_BLOCK = 0x1301,
+ VG_USERREQ__FREELIKE_BLOCK = 0x1302,
+ /* Memory pool support. */
+ VG_USERREQ__CREATE_MEMPOOL = 0x1303,
+ VG_USERREQ__DESTROY_MEMPOOL = 0x1304,
+ VG_USERREQ__MEMPOOL_ALLOC = 0x1305,
+ VG_USERREQ__MEMPOOL_FREE = 0x1306,
+ VG_USERREQ__MEMPOOL_TRIM = 0x1307,
+ VG_USERREQ__MOVE_MEMPOOL = 0x1308,
+ VG_USERREQ__MEMPOOL_CHANGE = 0x1309,
+ VG_USERREQ__MEMPOOL_EXISTS = 0x130a,
+
+ /* Allow printfs to valgrind log. */
+ VG_USERREQ__PRINTF = 0x1401,
+ VG_USERREQ__PRINTF_BACKTRACE = 0x1402,
+
+ /* Stack support. */
+ VG_USERREQ__STACK_REGISTER = 0x1501,
+ VG_USERREQ__STACK_DEREGISTER = 0x1502,
+ VG_USERREQ__STACK_CHANGE = 0x1503,
+
+ /* Wine support */
+ VG_USERREQ__LOAD_PDB_DEBUGINFO = 0x1601
+} Vg_ClientRequest;
+
+#if !defined(__GNUC__)
+#define __extension__ /* */
+#endif
+
+/* Returns the number of Valgrinds this code is running under. That
+ is, 0 if running natively, 1 if running under Valgrind, 2 if
+ running under Valgrind which is running under another Valgrind,
+ etc. */
+#define RUNNING_ON_VALGRIND __extension__ \
+ ({unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* if not */, \
+ VG_USERREQ__RUNNING_ON_VALGRIND, \
+ 0, 0, 0, 0, 0); \
+ _qzz_res; \
+ })
+
+
+/* Discard translation of code in the range [_qzz_addr .. _qzz_addr +
+ _qzz_len - 1]. Useful if you are debugging a JITter or some such,
+ since it provides a way to make sure valgrind will retranslate the
+ invalidated area. Returns no value. */
+#define VALGRIND_DISCARD_TRANSLATIONS(_qzz_addr,_qzz_len) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__DISCARD_TRANSLATIONS, \
+ _qzz_addr, _qzz_len, 0, 0, 0); \
+ }
+
+
+/* These requests are for getting Valgrind itself to print something.
+ Possibly with a backtrace. This is a really ugly hack. The return value
+ is the number of characters printed, excluding the "**<pid>** " part at the
+ start and the backtrace (if present). */
+
+#if defined(NVALGRIND)
+
+#define VALGRIND_PRINTF(...)
+#define VALGRIND_PRINTF_BACKTRACE(...)
+
+#else /* NVALGRIND */
+
+/* Modern GCC will optimize the static routine out if unused,
+ and unused attribute will shut down warnings about it. */
+static int VALGRIND_PRINTF (const char *format, ...)
+ __attribute__ ((format (__printf__, 1, 2), __unused__));
+static int
+VALGRIND_PRINTF (const char *format, ...)
+{
+ unsigned long _qzz_res;
+ va_list vargs;
+ va_start (vargs, format);
+ VALGRIND_DO_CLIENT_REQUEST (_qzz_res, 0, VG_USERREQ__PRINTF,
+ (unsigned long) format, (unsigned long) vargs,
+ 0, 0, 0);
+ va_end (vargs);
+ return (int) _qzz_res;
+}
+
+static int VALGRIND_PRINTF_BACKTRACE (const char *format, ...)
+ __attribute__ ((format (__printf__, 1, 2), __unused__));
+static int
+VALGRIND_PRINTF_BACKTRACE (const char *format, ...)
+{
+ unsigned long _qzz_res;
+ va_list vargs;
+ va_start (vargs, format);
+ VALGRIND_DO_CLIENT_REQUEST (_qzz_res, 0, VG_USERREQ__PRINTF_BACKTRACE,
+ (unsigned long) format, (unsigned long) vargs,
+ 0, 0, 0);
+ va_end (vargs);
+ return (int) _qzz_res;
+}
+
+#endif /* NVALGRIND */
+
+
+/* These requests allow control to move from the simulated CPU to the
+ real CPU, calling an arbitary function.
+
+ Note that the current ThreadId is inserted as the first argument.
+ So this call:
+
+ VALGRIND_NON_SIMD_CALL2(f, arg1, arg2)
+
+ requires f to have this signature:
+
+ Word f(Word tid, Word arg1, Word arg2)
+
+ where "Word" is a word-sized type.
+
+ Note that these client requests are not entirely reliable. For example,
+ if you call a function with them that subsequently calls printf(),
+ there's a high chance Valgrind will crash. Generally, your prospects of
+ these working are made higher if the called function does not refer to
+ any global variables, and does not refer to any libc or other functions
+ (printf et al). Any kind of entanglement with libc or dynamic linking is
+ likely to have a bad outcome, for tricky reasons which we've grappled
+ with a lot in the past.
+*/
+#define VALGRIND_NON_SIMD_CALL0(_qyy_fn) \
+ __extension__ \
+ ({unsigned long _qyy_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
+ VG_USERREQ__CLIENT_CALL0, \
+ _qyy_fn, \
+ 0, 0, 0, 0); \
+ _qyy_res; \
+ })
+
+#define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1) \
+ __extension__ \
+ ({unsigned long _qyy_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
+ VG_USERREQ__CLIENT_CALL1, \
+ _qyy_fn, \
+ _qyy_arg1, 0, 0, 0); \
+ _qyy_res; \
+ })
+
+#define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2) \
+ __extension__ \
+ ({unsigned long _qyy_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
+ VG_USERREQ__CLIENT_CALL2, \
+ _qyy_fn, \
+ _qyy_arg1, _qyy_arg2, 0, 0); \
+ _qyy_res; \
+ })
+
+#define VALGRIND_NON_SIMD_CALL3(_qyy_fn, _qyy_arg1, _qyy_arg2, _qyy_arg3) \
+ __extension__ \
+ ({unsigned long _qyy_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
+ VG_USERREQ__CLIENT_CALL3, \
+ _qyy_fn, \
+ _qyy_arg1, _qyy_arg2, \
+ _qyy_arg3, 0); \
+ _qyy_res; \
+ })
+
+
+/* Counts the number of errors that have been recorded by a tool. Nb:
+ the tool must record the errors with VG_(maybe_record_error)() or
+ VG_(unique_error)() for them to be counted. */
+#define VALGRIND_COUNT_ERRORS \
+ __extension__ \
+ ({unsigned int _qyy_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
+ VG_USERREQ__COUNT_ERRORS, \
+ 0, 0, 0, 0, 0); \
+ _qyy_res; \
+ })
+
+/* Several Valgrind tools (Memcheck, Massif, Helgrind, DRD) rely on knowing
+ when heap blocks are allocated in order to give accurate results. This
+ happens automatically for the standard allocator functions such as
+ malloc(), calloc(), realloc(), memalign(), new, new[], free(), delete,
+ delete[], etc.
+
+ But if your program uses a custom allocator, this doesn't automatically
+ happen, and Valgrind will not do as well. For example, if you allocate
+ superblocks with mmap() and then allocates chunks of the superblocks, all
+ Valgrind's observations will be at the mmap() level and it won't know that
+ the chunks should be considered separate entities. In Memcheck's case,
+ that means you probably won't get heap block overrun detection (because
+ there won't be redzones marked as unaddressable) and you definitely won't
+ get any leak detection.
+
+ The following client requests allow a custom allocator to be annotated so
+ that it can be handled accurately by Valgrind.
+
+ VALGRIND_MALLOCLIKE_BLOCK marks a region of memory as having been allocated
+ by a malloc()-like function. For Memcheck (an illustrative case), this
+ does two things:
+
+ - It records that the block has been allocated. This means any addresses
+ within the block mentioned in error messages will be
+ identified as belonging to the block. It also means that if the block
+ isn't freed it will be detected by the leak checker.
+
+ - It marks the block as being addressable and undefined (if 'is_zeroed' is
+ not set), or addressable and defined (if 'is_zeroed' is set). This
+ controls how accesses to the block by the program are handled.
+
+ 'addr' is the start of the usable block (ie. after any
+ redzone), 'sizeB' is its size. 'rzB' is the redzone size if the allocator
+ can apply redzones -- these are blocks of padding at the start and end of
+ each block. Adding redzones is recommended as it makes it much more likely
+ Valgrind will spot block overruns. `is_zeroed' indicates if the memory is
+ zeroed (or filled with another predictable value), as is the case for
+ calloc().
+
+ VALGRIND_MALLOCLIKE_BLOCK should be put immediately after the point where a
+ heap block -- that will be used by the client program -- is allocated.
+ It's best to put it at the outermost level of the allocator if possible;
+ for example, if you have a function my_alloc() which calls
+ internal_alloc(), and the client request is put inside internal_alloc(),
+ stack traces relating to the heap block will contain entries for both
+ my_alloc() and internal_alloc(), which is probably not what you want.
+
+ For Memcheck users: if you use VALGRIND_MALLOCLIKE_BLOCK to carve out
+ custom blocks from within a heap block, B, that has been allocated with
+ malloc/calloc/new/etc, then block B will be *ignored* during leak-checking
+ -- the custom blocks will take precedence.
+
+ VALGRIND_FREELIKE_BLOCK is the partner to VALGRIND_MALLOCLIKE_BLOCK. For
+ Memcheck, it does two things:
+
+ - It records that the block has been deallocated. This assumes that the
+ block was annotated as having been allocated via
+ VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued.
+
+ - It marks the block as being unaddressable.
+
+ VALGRIND_FREELIKE_BLOCK should be put immediately after the point where a
+ heap block is deallocated.
+
+ In many cases, these two client requests will not be enough to get your
+ allocator working well with Memcheck. More specifically, if your allocator
+ writes to freed blocks in any way then a VALGRIND_MAKE_MEM_UNDEFINED call
+ will be necessary to mark the memory as addressable just before the zeroing
+ occurs, otherwise you'll get a lot of invalid write errors. For example,
+ you'll need to do this if your allocator recycles freed blocks, but it
+ zeroes them before handing them back out (via VALGRIND_MALLOCLIKE_BLOCK).
+ Alternatively, if your allocator reuses freed blocks for allocator-internal
+ data structures, VALGRIND_MAKE_MEM_UNDEFINED calls will also be necessary.
+
+ Really, what's happening is a blurring of the lines between the client
+ program and the allocator... after VALGRIND_FREELIKE_BLOCK is called, the
+ memory should be considered unaddressable to the client program, but the
+ allocator knows more than the rest of the client program and so may be able
+ to safely access it. Extra client requests are necessary for Valgrind to
+ understand the distinction between the allocator and the rest of the
+ program.
+
+ Note: there is currently no VALGRIND_REALLOCLIKE_BLOCK client request; it
+ has to be emulated with MALLOCLIKE/FREELIKE and memory copying.
+
+ Ignored if addr == 0.
+*/
+#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MALLOCLIKE_BLOCK, \
+ addr, sizeB, rzB, is_zeroed, 0); \
+ (void) _qzz_res; /* compiler warning */ \
+ }
+
+/* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details.
+ Ignored if addr == 0.
+*/
+#define VALGRIND_FREELIKE_BLOCK(addr, rzB) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__FREELIKE_BLOCK, \
+ addr, rzB, 0, 0, 0); \
+ (void) _qzz_res; /* compiler warning */ \
+ }
+
+/* Create a memory pool. */
+#define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__CREATE_MEMPOOL, \
+ pool, rzB, is_zeroed, 0, 0); \
+ }
+
+/* Destroy a memory pool. */
+#define VALGRIND_DESTROY_MEMPOOL(pool) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__DESTROY_MEMPOOL, \
+ pool, 0, 0, 0, 0); \
+ }
+
+/* Associate a piece of memory with a memory pool. */
+#define VALGRIND_MEMPOOL_ALLOC(pool, addr, size) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MEMPOOL_ALLOC, \
+ pool, addr, size, 0, 0); \
+ }
+
+/* Disassociate a piece of memory from a memory pool. */
+#define VALGRIND_MEMPOOL_FREE(pool, addr) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MEMPOOL_FREE, \
+ pool, addr, 0, 0, 0); \
+ }
+
+/* Disassociate any pieces outside a particular range. */
+#define VALGRIND_MEMPOOL_TRIM(pool, addr, size) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MEMPOOL_TRIM, \
+ pool, addr, size, 0, 0); \
+ }
+
+/* Resize and/or move a piece associated with a memory pool. */
+#define VALGRIND_MOVE_MEMPOOL(poolA, poolB) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MOVE_MEMPOOL, \
+ poolA, poolB, 0, 0, 0); \
+ }
+
+/* Resize and/or move a piece associated with a memory pool. */
+#define VALGRIND_MEMPOOL_CHANGE(pool, addrA, addrB, size) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MEMPOOL_CHANGE, \
+ pool, addrA, addrB, size, 0); \
+ }
+
+/* Return 1 if a mempool exists, else 0. */
+#define VALGRIND_MEMPOOL_EXISTS(pool) \
+ __extension__ \
+ ({unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MEMPOOL_EXISTS, \
+ pool, 0, 0, 0, 0); \
+ _qzz_res; \
+ })
+
+/* Mark a piece of memory as being a stack. Returns a stack id. */
+#define VALGRIND_STACK_REGISTER(start, end) \
+ __extension__ \
+ ({unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__STACK_REGISTER, \
+ start, end, 0, 0, 0); \
+ _qzz_res; \
+ })
+
+/* Unmark the piece of memory associated with a stack id as being a
+ stack. */
+#define VALGRIND_STACK_DEREGISTER(id) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__STACK_DEREGISTER, \
+ id, 0, 0, 0, 0); \
+ }
+
+/* Change the start and end address of the stack id. */
+#define VALGRIND_STACK_CHANGE(id, start, end) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__STACK_CHANGE, \
+ id, start, end, 0, 0); \
+ }
+
+/* Load PDB debug info for Wine PE image_map. */
+#define VALGRIND_LOAD_PDB_DEBUGINFO(fd, ptr, total_size, delta) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__LOAD_PDB_DEBUGINFO, \
+ fd, ptr, total_size, delta, 0); \
+ }
+
+
+#undef PLAT_x86_linux
+#undef PLAT_amd64_linux
+#undef PLAT_ppc32_linux
+#undef PLAT_ppc64_linux
+#undef PLAT_ppc32_aix5
+#undef PLAT_ppc64_aix5
+
+#endif /* __VALGRIND_H */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/vec.c b/src/vppinfra/vec.c
new file mode 100644
index 00000000..2d7ae1d4
--- /dev/null
+++ b/src/vppinfra/vec.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/vec.h>
+#include <vppinfra/mem.h>
+
+/* Vector resize operator. Called as needed by various macros such as
+ vec_add1() when we need to allocate memory. */
+void *
+vec_resize_allocate_memory (void *v,
+ word length_increment,
+ uword data_bytes,
+ uword header_bytes, uword data_align)
+{
+ vec_header_t *vh = _vec_find (v);
+ uword old_alloc_bytes, new_alloc_bytes;
+ void *old, *new;
+
+ header_bytes = vec_header_bytes (header_bytes);
+
+ data_bytes += header_bytes;
+
+ if (!v)
+ {
+ new = clib_mem_alloc_aligned_at_offset (data_bytes, data_align, header_bytes, 1 /* yes, call os_out_of_memory */
+ );
+ data_bytes = clib_mem_size (new);
+ memset (new, 0, data_bytes);
+ v = new + header_bytes;
+ _vec_len (v) = length_increment;
+ return v;
+ }
+
+ vh->len += length_increment;
+ old = v - header_bytes;
+
+ /* Vector header must start heap object. */
+ ASSERT (clib_mem_is_heap_object (old));
+
+ old_alloc_bytes = clib_mem_size (old);
+
+ /* Need to resize? */
+ if (data_bytes <= old_alloc_bytes)
+ return v;
+
+ new_alloc_bytes = (old_alloc_bytes * 3) / 2;
+ if (new_alloc_bytes < data_bytes)
+ new_alloc_bytes = data_bytes;
+
+ new =
+ clib_mem_alloc_aligned_at_offset (new_alloc_bytes, data_align,
+ header_bytes,
+ 1 /* yes, call os_out_of_memory */ );
+
+ /* FIXME fail gracefully. */
+ if (!new)
+ clib_panic
+ ("vec_resize fails, length increment %d, data bytes %d, alignment %d",
+ length_increment, data_bytes, data_align);
+
+ clib_memcpy (new, old, old_alloc_bytes);
+ clib_mem_free (old);
+ v = new;
+
+ /* Allocator may give a bit of extra room. */
+ new_alloc_bytes = clib_mem_size (v);
+
+ /* Zero new memory. */
+ memset (v + old_alloc_bytes, 0, new_alloc_bytes - old_alloc_bytes);
+
+ return v + header_bytes;
+}
+
+uword
+clib_mem_is_vec_h (void *v, uword header_bytes)
+{
+ return clib_mem_is_heap_object (vec_header (v, header_bytes));
+}
+
+/** \cond */
+
+#ifdef TEST
+
+#include <stdio.h>
+
+void
+main (int argc, char *argv[])
+{
+ word n = atoi (argv[1]);
+ word i, *x = 0;
+
+ typedef struct
+ {
+ word x, y, z;
+ } FOO;
+
+ FOO *foos = vec_init (FOO, 10), *f;
+
+ vec_validate (foos, 100);
+ foos[100].x = 99;
+
+ _vec_len (foos) = 0;
+ for (i = 0; i < n; i++)
+ {
+ vec_add1 (x, i);
+ vec_add2 (foos, f, 1);
+ f->x = 2 * i;
+ f->y = 3 * i;
+ f->z = 4 * i;
+ }
+
+ {
+ word n = 2;
+ word m = 42;
+ vec_delete (foos, n, m);
+ }
+
+ {
+ word n = 2;
+ word m = 42;
+ vec_insert (foos, n, m);
+ }
+
+ vec_free (x);
+ vec_free (foos);
+ exit (0);
+}
+#endif
+/** \endcond */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/vec.h b/src/vppinfra/vec.h
new file mode 100644
index 00000000..e2cb24c5
--- /dev/null
+++ b/src/vppinfra/vec.h
@@ -0,0 +1,1009 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_vec_h
+#define included_vec_h
+
+#include <vppinfra/clib.h> /* word, etc */
+#include <vppinfra/mem.h> /* clib_mem_free */
+#include <vppinfra/string.h> /* memcpy, memmove */
+#include <vppinfra/vec_bootstrap.h>
+
+/** \file
+
+ CLIB vectors are ubiquitous dynamically resized arrays with by user
+ defined "headers". Many CLIB data structures (e.g. hash, heap,
+ pool) are vectors with various different headers.
+
+ The memory layout looks like this:
+
+~~~~~~~~
+ user header (aligned to uword boundary)
+ vector length: number of elements
+ user's pointer-> vector element #0
+ vector element #1
+ ...
+~~~~~~~~
+
+ The user pointer contains the address of vector element # 0. Null
+ pointer vectors are valid and mean a zero length vector.
+
+ You can reset the length of an allocated vector to zero via the
+ vec_reset_length(v) macro, or by setting the vector length field to
+ zero (e.g. _vec_len (v) = 0). Vec_reset_length(v) preferred: it
+ understands Null pointers.
+
+ Typically, the header is not present. Headers allow for other
+ data structures to be built atop CLIB vectors.
+
+ Users may specify the alignment for data elements via the
+ vec_*_aligned macros.
+
+ Vectors elements can be any C type e.g. (int, double, struct bar).
+ This is also true for data types built atop vectors (e.g. heap,
+ pool, etc.).
+
+ Many macros have _a variants supporting alignment of vector data
+ and _h variants supporting non zero length vector headers.
+ The _ha variants support both.
+
+ Standard programming error: memorize a pointer to the ith element
+ of a vector then expand it. Vectors expand by 3/2, so such code
+ may appear to work for a period of time. Memorize vector indices
+ which are invariant.
+ */
+
+/** \brief Low-level resize allocation function, usually not called directly
+
+ @param v pointer to a vector
+ @param length_increment length increment in elements
+ @param data_bytes requested size in bytes
+ @param header_bytes header size in bytes (may be zero)
+ @param data_align alignment (may be zero)
+ @return v_prime pointer to resized vector, may or may not equal v
+*/
+void *vec_resize_allocate_memory (void *v,
+ word length_increment,
+ uword data_bytes,
+ uword header_bytes, uword data_align);
+
+/** \brief Low-level vector resize function, usually not called directly
+
+ @param v pointer to a vector
+ @param length_increment length increment in elements
+ @param data_bytes requested size in bytes
+ @param header_bytes header size in bytes (may be zero)
+ @param data_align alignment (may be zero)
+ @return v_prime pointer to resized vector, may or may not equal v
+*/
+
+always_inline void *
+_vec_resize (void *v,
+ word length_increment,
+ uword data_bytes, uword header_bytes, uword data_align)
+{
+ vec_header_t *vh = _vec_find (v);
+ uword new_data_bytes, aligned_header_bytes;
+
+ aligned_header_bytes = vec_header_bytes (header_bytes);
+
+ new_data_bytes = data_bytes + aligned_header_bytes;
+
+ if (PREDICT_TRUE (v != 0))
+ {
+ void *p = v - aligned_header_bytes;
+
+ /* Vector header must start heap object. */
+ ASSERT (clib_mem_is_heap_object (p));
+
+ /* Typically we'll not need to resize. */
+ if (new_data_bytes <= clib_mem_size (p))
+ {
+ vh->len += length_increment;
+ return v;
+ }
+ }
+
+ /* Slow path: call helper function. */
+ return vec_resize_allocate_memory (v, length_increment, data_bytes,
+ header_bytes,
+ clib_max (sizeof (vec_header_t),
+ data_align));
+}
+
+/** \brief Determine if vector will resize with next allocation
+
+ @param v pointer to a vector
+ @param length_increment length increment in elements
+ @param data_bytes requested size in bytes
+ @param header_bytes header size in bytes (may be zero)
+ @param data_align alignment (may be zero)
+ @return 1 if vector will resize 0 otherwise
+*/
+
+always_inline int
+_vec_resize_will_expand (void *v,
+ word length_increment,
+ uword data_bytes, uword header_bytes,
+ uword data_align)
+{
+ uword new_data_bytes, aligned_header_bytes;
+
+ aligned_header_bytes = vec_header_bytes (header_bytes);
+
+ new_data_bytes = data_bytes + aligned_header_bytes;
+
+ if (PREDICT_TRUE (v != 0))
+ {
+ void *p = v - aligned_header_bytes;
+
+ /* Vector header must start heap object. */
+ ASSERT (clib_mem_is_heap_object (p));
+
+ /* Typically we'll not need to resize. */
+ if (new_data_bytes <= clib_mem_size (p))
+ return 0;
+ }
+ return 1;
+}
+
+/** \brief Predicate function, says whether the supplied vector is a clib heap
+ object (general version).
+
+ @param v pointer to a vector
+ @param header_bytes vector header size in bytes (may be zero)
+ @return 0 or 1
+*/
+uword clib_mem_is_vec_h (void *v, uword header_bytes);
+
+
+/** \brief Predicate function, says whether the supplied vector is a clib heap
+ object
+
+ @param v pointer to a vector
+ @return 0 or 1
+*/
+always_inline uword
+clib_mem_is_vec (void *v)
+{
+ return clib_mem_is_vec_h (v, 0);
+}
+
+/* Local variable naming macro (prevents collisions with other macro naming). */
+#define _v(var) _vec_##var
+
+/** \brief Resize a vector (general version).
+ Add N elements to end of given vector V, return pointer to start of vector.
+ Vector will have room for H header bytes and will have user's data aligned
+ at alignment A (rounded to next power of 2).
+
+ @param V pointer to a vector
+ @param N number of elements to add
+ @param H header size in bytes (may be zero)
+ @param A alignment (may be zero)
+ @return V (value-result macro parameter)
+*/
+
+#define vec_resize_ha(V,N,H,A) \
+do { \
+ word _v(n) = (N); \
+ word _v(l) = vec_len (V); \
+ V = _vec_resize ((V), _v(n), (_v(l) + _v(n)) * sizeof ((V)[0]), (H), (A)); \
+} while (0)
+
+/** \brief Resize a vector (no header, unspecified alignment)
+ Add N elements to end of given vector V, return pointer to start of vector.
+ Vector will have room for H header bytes and will have user's data aligned
+ at alignment A (rounded to next power of 2).
+
+ @param V pointer to a vector
+ @param N number of elements to add
+ @return V (value-result macro parameter)
+*/
+#define vec_resize(V,N) vec_resize_ha(V,N,0,0)
+
+/** \brief Resize a vector (no header, alignment specified).
+ Add N elements to end of given vector V, return pointer to start of vector.
+ Vector will have room for H header bytes and will have user's data aligned
+ at alignment A (rounded to next power of 2).
+
+ @param V pointer to a vector
+ @param N number of elements to add
+ @param A alignment (may be zero)
+ @return V (value-result macro parameter)
+*/
+
+#define vec_resize_aligned(V,N,A) vec_resize_ha(V,N,0,A)
+
+/** \brief Allocate space for N more elements
+
+ @param V pointer to a vector
+ @param N number of elements to add
+ @param H header size in bytes (may be zero)
+ @param A alignment (may be zero)
+ @return V (value-result macro parameter)
+*/
+
+#define vec_alloc_ha(V,N,H,A) \
+do { \
+ uword _v(l) = vec_len (V); \
+ vec_resize_ha (V, N, H, A); \
+ _vec_len (V) = _v(l); \
+} while (0)
+
+/** \brief Allocate space for N more elements
+ (no header, unspecified alignment)
+
+ @param V pointer to a vector
+ @param N number of elements to add
+ @return V (value-result macro parameter)
+*/
+#define vec_alloc(V,N) vec_alloc_ha(V,N,0,0)
+
+/** \brief Allocate space for N more elements (no header, given alignment)
+ @param V pointer to a vector
+ @param N number of elements to add
+ @param A alignment (may be zero)
+ @return V (value-result macro parameter)
+*/
+
+#define vec_alloc_aligned(V,N,A) vec_alloc_ha(V,N,0,A)
+
+/** \brief Create new vector of given type and length (general version).
+ @param T type of elements in new vector
+ @param N number of elements to add
+ @param H header size in bytes (may be zero)
+ @param A alignment (may be zero)
+ @return V new vector
+*/
+#define vec_new_ha(T,N,H,A) \
+({ \
+ word _v(n) = (N); \
+ _vec_resize ((T *) 0, _v(n), _v(n) * sizeof (T), (H), (A)); \
+})
+
+/** \brief Create new vector of given type and length
+ (unspecified alignment, no header).
+
+ @param T type of elements in new vector
+ @param N number of elements to add
+ @return V new vector
+*/
+#define vec_new(T,N) vec_new_ha(T,N,0,0)
+/** \brief Create new vector of given type and length
+ (alignment specified, no header).
+
+ @param T type of elements in new vector
+ @param N number of elements to add
+ @param A alignment (may be zero)
+ @return V new vector
+*/
+#define vec_new_aligned(T,N,A) vec_new_ha(T,N,0,A)
+
+/** \brief Free vector's memory (general version)
+
+ @param V pointer to a vector
+ @param H size of header in bytes
+ @return V (value-result parameter, V=0)
+*/
+#define vec_free_h(V,H) \
+do { \
+ if (V) \
+ { \
+ clib_mem_free (vec_header ((V), (H))); \
+ V = 0; \
+ } \
+} while (0)
+
+/** \brief Free vector's memory (no header).
+ @param V pointer to a vector
+ @return V (value-result parameter, V=0)
+*/
+#define vec_free(V) vec_free_h(V,0)
+
+/**\brief Free vector user header (syntactic sugar)
+ @param h vector header
+ @void
+*/
+#define vec_free_header(h) clib_mem_free (h)
+
+/** \brief Return copy of vector (general version).
+
+ @param V pointer to a vector
+ @param H size of header in bytes
+ @param A alignment (may be zero)
+
+ @return Vdup copy of vector
+*/
+
+#define vec_dup_ha(V,H,A) \
+({ \
+ __typeof__ ((V)[0]) * _v(v) = 0; \
+ uword _v(l) = vec_len (V); \
+ if (_v(l) > 0) \
+ { \
+ vec_resize_ha (_v(v), _v(l), (H), (A)); \
+ clib_memcpy (_v(v), (V), _v(l) * sizeof ((V)[0]));\
+ } \
+ _v(v); \
+})
+
+/** \brief Return copy of vector (no header, no alignment)
+
+ @param V pointer to a vector
+ @return Vdup copy of vector
+*/
+#define vec_dup(V) vec_dup_ha(V,0,0)
+
+/** \brief Return copy of vector (no header, alignment specified).
+
+ @param V pointer to a vector
+ @param A alignment (may be zero)
+
+ @return Vdup copy of vector
+*/
+#define vec_dup_aligned(V,A) vec_dup_ha(V,0,A)
+
+/** \brief Copy a vector, memcpy wrapper. Assumes sizeof(SRC[0]) ==
+ sizeof(DST[0])
+
+ @param DST destination
+ @param SRC source
+*/
+#define vec_copy(DST,SRC) clib_memcpy (DST, SRC, vec_len (DST) * \
+ sizeof ((DST)[0]))
+
+/** \brief Clone a vector. Make a new vector with the
+ same size as a given vector but possibly with a different type.
+
+ @param NEW_V pointer to new vector
+ @param OLD_V pointer to old vector
+*/
+#define vec_clone(NEW_V,OLD_V) \
+do { \
+ (NEW_V) = 0; \
+ (NEW_V) = _vec_resize ((NEW_V), vec_len (OLD_V), \
+ vec_len (OLD_V) * sizeof ((NEW_V)[0]), (0), (0)); \
+} while (0)
+
+/** \brief Make sure vector is long enough for given index (general version).
+
+ @param V (possibly NULL) pointer to a vector.
+ @param I vector index which will be valid upon return
+ @param H header size in bytes (may be zero)
+ @param A alignment (may be zero)
+ @return V (value-result macro parameter)
+*/
+
+#define vec_validate_ha(V,I,H,A) \
+do { \
+ word _v(i) = (I); \
+ word _v(l) = vec_len (V); \
+ if (_v(i) >= _v(l)) \
+ { \
+ vec_resize_ha ((V), 1 + (_v(i) - _v(l)), (H), (A)); \
+ /* Must zero new space since user may have previously \
+ used e.g. _vec_len (v) -= 10 */ \
+ memset ((V) + _v(l), 0, (1 + (_v(i) - _v(l))) * sizeof ((V)[0])); \
+ } \
+} while (0)
+
+/** \brief Make sure vector is long enough for given index
+ (no header, unspecified alignment)
+
+ @param V (possibly NULL) pointer to a vector.
+ @param I vector index which will be valid upon return
+ @return V (value-result macro parameter)
+*/
+#define vec_validate(V,I) vec_validate_ha(V,I,0,0)
+
+/** \brief Make sure vector is long enough for given index
+ (no header, specified alignment)
+
+ @param V (possibly NULL) pointer to a vector.
+ @param I vector index which will be valid upon return
+ @param A alignment (may be zero)
+ @return V (value-result macro parameter)
+*/
+
+#define vec_validate_aligned(V,I,A) vec_validate_ha(V,I,0,A)
+
+/** \brief Make sure vector is long enough for given index
+ and initialize empty space (general version)
+
+ @param V (possibly NULL) pointer to a vector.
+ @param I vector index which will be valid upon return
+ @param INIT initial value (can be a complex expression!)
+ @param H header size in bytes (may be zero)
+ @param A alignment (may be zero)
+ @return V (value-result macro parameter)
+*/
+#define vec_validate_init_empty_ha(V,I,INIT,H,A) \
+do { \
+ word _v(i) = (I); \
+ word _v(l) = vec_len (V); \
+ if (_v(i) >= _v(l)) \
+ { \
+ vec_resize_ha ((V), 1 + (_v(i) - _v(l)), (H), (A)); \
+ while (_v(l) <= _v(i)) \
+ { \
+ (V)[_v(l)] = (INIT); \
+ _v(l)++; \
+ } \
+ } \
+} while (0)
+
+/** \brief Make sure vector is long enough for given index
+ and initialize empty space (no header, unspecified alignment)
+
+ @param V (possibly NULL) pointer to a vector.
+ @param I vector index which will be valid upon return
+ @param INIT initial value (can be a complex expression!)
+ @param H header size in bytes (may be zero)
+ @param A alignment (may be zero)
+ @return V (value-result macro parameter)
+*/
+
+#define vec_validate_init_empty(V,I,INIT) \
+ vec_validate_init_empty_ha(V,I,INIT,0,0)
+
+/** \brief Make sure vector is long enough for given index
+ and initialize empty space (no header, alignment alignment)
+
+ @param V (possibly NULL) pointer to a vector.
+ @param I vector index which will be valid upon return
+ @param INIT initial value (can be a complex expression!)
+ @param H header size in bytes (may be zero)
+ @param A alignment (may be zero)
+ @return V (value-result macro parameter)
+*/
+#define vec_validate_init_empty_aligned(V,I,INIT,A) \
+ vec_validate_init_empty_ha(V,I,INIT,0,A)
+
+/** \brief Add 1 element to end of vector (general version).
+
+ @param V pointer to a vector
+ @param E element to add
+ @param H header size in bytes (may be zero)
+ @param A alignment (may be zero)
+ @return V (value-result macro parameter)
+*/
+#define vec_add1_ha(V,E,H,A) \
+do { \
+ word _v(l) = vec_len (V); \
+ V = _vec_resize ((V), 1, (_v(l) + 1) * sizeof ((V)[0]), (H), (A)); \
+ (V)[_v(l)] = (E); \
+} while (0)
+
+/** \brief Add 1 element to end of vector (unspecified alignment).
+
+ @param V pointer to a vector
+ @param E element to add
+ @return V (value-result macro parameter)
+*/
+#define vec_add1(V,E) vec_add1_ha(V,E,0,0)
+
+/** \brief Add 1 element to end of vector (alignment specified).
+
+ @param V pointer to a vector
+ @param E element to add
+ @param H header size in bytes (may be zero)
+ @param A alignment (may be zero)
+ @return V (value-result macro parameter)
+*/
+#define vec_add1_aligned(V,E,A) vec_add1_ha(V,E,0,A)
+
+/** \brief Add N elements to end of vector V,
+ return pointer to new elements in P. (general version)
+
+ @param V pointer to a vector
+ @param P pointer to new vector element(s)
+ @param N number of elements to add
+ @param H header size in bytes (may be zero)
+ @param A alignment (may be zero)
+ @return V and P (value-result macro parameters)
+*/
+#define vec_add2_ha(V,P,N,H,A) \
+do { \
+ word _v(n) = (N); \
+ word _v(l) = vec_len (V); \
+ V = _vec_resize ((V), _v(n), (_v(l) + _v(n)) * sizeof ((V)[0]), (H), (A)); \
+ P = (V) + _v(l); \
+} while (0)
+
+/** \brief Add N elements to end of vector V,
+ return pointer to new elements in P. (no header, unspecified alignment)
+
+ @param V pointer to a vector
+ @param P pointer to new vector element(s)
+ @param N number of elements to add
+ @return V and P (value-result macro parameters)
+*/
+
+#define vec_add2(V,P,N) vec_add2_ha(V,P,N,0,0)
+
+/** \brief Add N elements to end of vector V,
+ return pointer to new elements in P. (no header, alignment specified)
+
+ @param V pointer to a vector
+ @param P pointer to new vector element(s)
+ @param N number of elements to add
+ @param A alignment (may be zero)
+ @return V and P (value-result macro parameters)
+*/
+
+#define vec_add2_aligned(V,P,N,A) vec_add2_ha(V,P,N,0,A)
+
+/** \brief Add N elements to end of vector V (general version)
+
+ @param V pointer to a vector
+ @param E pointer to element(s) to add
+ @param N number of elements to add
+ @param H header size in bytes (may be zero)
+ @param A alignment (may be zero)
+ @return V (value-result macro parameter)
+*/
+#define vec_add_ha(V,E,N,H,A) \
+do { \
+ word _v(n) = (N); \
+ word _v(l) = vec_len (V); \
+ V = _vec_resize ((V), _v(n), (_v(l) + _v(n)) * sizeof ((V)[0]), (H), (A)); \
+ clib_memcpy ((V) + _v(l), (E), _v(n) * sizeof ((V)[0])); \
+} while (0)
+
+/** \brief Add N elements to end of vector V (no header, unspecified alignment)
+
+ @param V pointer to a vector
+ @param E pointer to element(s) to add
+ @param N number of elements to add
+ @return V (value-result macro parameter)
+*/
+#define vec_add(V,E,N) vec_add_ha(V,E,N,0,0)
+
+/** \brief Add N elements to end of vector V (no header, specified alignment)
+
+ @param V pointer to a vector
+ @param E pointer to element(s) to add
+ @param N number of elements to add
+ @param A alignment (may be zero)
+ @return V (value-result macro parameter)
+*/
+#define vec_add_aligned(V,E,N,A) vec_add_ha(V,E,N,0,A)
+
+/** \brief Returns last element of a vector and decrements its length
+
+ @param V pointer to a vector
+ @return E element removed from the end of the vector
+*/
+#define vec_pop(V) \
+({ \
+ uword _v(l) = vec_len (V); \
+ ASSERT (_v(l) > 0); \
+ _v(l) -= 1; \
+ _vec_len (V) = _v (l); \
+ (V)[_v(l)]; \
+})
+
+/** \brief Set E to the last element of a vector, decrement vector length
+ @param V pointer to a vector
+ @param E pointer to the last vector element
+ @return E element removed from the end of the vector
+ (value-result macro parameter
+*/
+
+#define vec_pop2(V,E) \
+({ \
+ uword _v(l) = vec_len (V); \
+ if (_v(l) > 0) (E) = vec_pop (V); \
+ _v(l) > 0; \
+})
+
+/** \brief Insert N vector elements starting at element M,
+ initialize new elements (general version).
+
+ @param V (possibly NULL) pointer to a vector.
+ @param N number of elements to insert
+ @param M insertion point
+ @param INIT initial value (can be a complex expression!)
+ @param H header size in bytes (may be zero)
+ @param A alignment (may be zero)
+ @return V (value-result macro parameter)
+*/
+#define vec_insert_init_empty_ha(V,N,M,INIT,H,A) \
+do { \
+ word _v(l) = vec_len (V); \
+ word _v(n) = (N); \
+ word _v(m) = (M); \
+ V = _vec_resize ((V), \
+ _v(n), \
+ (_v(l) + _v(n))*sizeof((V)[0]), \
+ (H), (A)); \
+ ASSERT (_v(m) <= _v(l)); \
+ memmove ((V) + _v(m) + _v(n), \
+ (V) + _v(m), \
+ (_v(l) - _v(m)) * sizeof ((V)[0])); \
+ memset ((V) + _v(m), INIT, _v(n) * sizeof ((V)[0])); \
+} while (0)
+
+/** \brief Insert N vector elements starting at element M,
+ initialize new elements to zero (general version)
+
+ @param V (possibly NULL) pointer to a vector.
+ @param N number of elements to insert
+ @param M insertion point
+ @param H header size in bytes (may be zero)
+ @param A alignment (may be zero)
+ @return V (value-result macro parameter)
+*/
+#define vec_insert_ha(V,N,M,H,A) vec_insert_init_empty_ha(V,N,M,0,H,A)
+
+/** \brief Insert N vector elements starting at element M,
+ initialize new elements to zero (no header, unspecified alignment)
+
+ @param V (possibly NULL) pointer to a vector.
+ @param N number of elements to insert
+ @param M insertion point
+ @return V (value-result macro parameter)
+*/
+#define vec_insert(V,N,M) vec_insert_ha(V,N,M,0,0)
+
+/** \brief Insert N vector elements starting at element M,
+ initialize new elements to zero (no header, alignment specified)
+
+ @param V (possibly NULL) pointer to a vector.
+ @param N number of elements to insert
+ @param M insertion point
+ @param A alignment (may be zero)
+ @return V (value-result macro parameter)
+*/
+#define vec_insert_aligned(V,N,M,A) vec_insert_ha(V,N,M,0,A)
+
+/** \brief Insert N vector elements starting at element M,
+ initialize new elements (no header, unspecified alignment)
+
+ @param V (possibly NULL) pointer to a vector.
+ @param N number of elements to insert
+ @param M insertion point
+ @param INIT initial value (can be a complex expression!)
+ @return V (value-result macro parameter)
+*/
+
+#define vec_insert_init_empty(V,N,M,INIT) \
+ vec_insert_init_empty_ha(V,N,M,INIT,0,0)
+/* Resize vector by N elements starting from element M, initialize new elements to INIT (alignment specified, no header). */
+
+/** \brief Insert N vector elements starting at element M,
+ initialize new elements (no header, specified alignment)
+
+ @param V (possibly NULL) pointer to a vector.
+ @param N number of elements to insert
+ @param M insertion point
+ @param INIT initial value (can be a complex expression!)
+ @param A alignment (may be zero)
+ @return V (value-result macro parameter)
+*/
+#define vec_insert_init_empty_aligned(V,N,M,INIT,A) \
+ vec_insert_init_empty_ha(V,N,M,INIT,0,A)
+
+/** \brief Insert N vector elements starting at element M,
+ insert given elements (general version)
+
+ @param V (possibly NULL) pointer to a vector.
+ @param E element(s) to insert
+ @param N number of elements to insert
+ @param M insertion point
+ @param H header size in bytes (may be zero)
+ @param A alignment (may be zero)
+ @return V (value-result macro parameter)
+*/
+
+#define vec_insert_elts_ha(V,E,N,M,H,A) \
+do { \
+ word _v(l) = vec_len (V); \
+ word _v(n) = (N); \
+ word _v(m) = (M); \
+ V = _vec_resize ((V), \
+ _v(n), \
+ (_v(l) + _v(n))*sizeof((V)[0]), \
+ (H), (A)); \
+ ASSERT (_v(m) <= _v(l)); \
+ memmove ((V) + _v(m) + _v(n), \
+ (V) + _v(m), \
+ (_v(l) - _v(m)) * sizeof ((V)[0])); \
+ clib_memcpy ((V) + _v(m), (E), \
+ _v(n) * sizeof ((V)[0])); \
+} while (0)
+
+/** \brief Insert N vector elements starting at element M,
+ insert given elements (no header, unspecified alignment)
+
+ @param V (possibly NULL) pointer to a vector.
+ @param E element(s) to insert
+ @param N number of elements to insert
+ @param M insertion point
+ @return V (value-result macro parameter)
+*/
+#define vec_insert_elts(V,E,N,M) vec_insert_elts_ha(V,E,N,M,0,0)
+
+/** \brief Insert N vector elements starting at element M,
+ insert given elements (no header, specified alignment)
+
+ @param V (possibly NULL) pointer to a vector.
+ @param E element(s) to insert
+ @param N number of elements to insert
+ @param M insertion point
+ @param A alignment (may be zero)
+ @return V (value-result macro parameter)
+*/
+#define vec_insert_elts_aligned(V,E,N,M,A) vec_insert_elts_ha(V,E,N,M,0,A)
+
+/** \brief Delete N elements starting at element M
+
+ @param V pointer to a vector
+ @param N number of elements to delete
+ @param M first element to delete
+ @return V (value-result macro parameter)
+*/
+#define vec_delete(V,N,M) \
+do { \
+ word _v(l) = vec_len (V); \
+ word _v(n) = (N); \
+ word _v(m) = (M); \
+ /* Copy over deleted elements. */ \
+ if (_v(l) - _v(n) - _v(m) > 0) \
+ memmove ((V) + _v(m), (V) + _v(m) + _v(n), \
+ (_v(l) - _v(n) - _v(m)) * sizeof ((V)[0])); \
+ /* Zero empty space at end (for future re-allocation). */ \
+ if (_v(n) > 0) \
+ memset ((V) + _v(l) - _v(n), 0, _v(n) * sizeof ((V)[0])); \
+ _vec_len (V) -= _v(n); \
+} while (0)
+
+/** \brief Delete the element at index I
+
+ @param V pointer to a vector
+ @param I index to delete
+*/
+#define vec_del1(v,i) \
+do { \
+ uword _vec_del_l = _vec_len (v) - 1; \
+ uword _vec_del_i = (i); \
+ if (_vec_del_i < _vec_del_l) \
+ (v)[_vec_del_i] = (v)[_vec_del_l]; \
+ _vec_len (v) = _vec_del_l; \
+} while (0)
+
+/** \brief Append v2 after v1. Result in v1.
+ @param V1 target vector
+ @param V2 vector to append
+*/
+
+#define vec_append(v1,v2) \
+do { \
+ uword _v(l1) = vec_len (v1); \
+ uword _v(l2) = vec_len (v2); \
+ \
+ v1 = _vec_resize ((v1), _v(l2), \
+ (_v(l1) + _v(l2)) * sizeof ((v1)[0]), 0, 0); \
+ clib_memcpy ((v1) + _v(l1), (v2), _v(l2) * sizeof ((v2)[0])); \
+} while (0)
+
+/** \brief Append v2 after v1. Result in v1. Specified alignment.
+ @param V1 target vector
+ @param V2 vector to append
+ @param align required alignment
+*/
+
+#define vec_append_aligned(v1,v2,align) \
+do { \
+ uword _v(l1) = vec_len (v1); \
+ uword _v(l2) = vec_len (v2); \
+ \
+ v1 = _vec_resize ((v1), _v(l2), \
+ (_v(l1) + _v(l2)) * sizeof ((v1)[0]), 0, align); \
+ clib_memcpy ((v1) + _v(l1), (v2), _v(l2) * sizeof ((v2)[0])); \
+} while (0)
+
+/** \brief Prepend v2 before v1. Result in v1.
+ @param V1 target vector
+ @param V2 vector to prepend
+*/
+
+#define vec_prepend(v1,v2) \
+do { \
+ uword _v(l1) = vec_len (v1); \
+ uword _v(l2) = vec_len (v2); \
+ \
+ v1 = _vec_resize ((v1), _v(l2), \
+ (_v(l1) + _v(l2)) * sizeof ((v1)[0]), 0, 0); \
+ memmove ((v1) + _v(l2), (v1), _v(l1) * sizeof ((v1)[0])); \
+ clib_memcpy ((v1), (v2), _v(l2) * sizeof ((v2)[0])); \
+} while (0)
+
+/** \brief Prepend v2 before v1. Result in v1. Specified alignment
+ @param V1 target vector
+ @param V2 vector to prepend
+ @param align required alignment
+*/
+
+#define vec_prepend_aligned(v1,v2,align) \
+do { \
+ uword _v(l1) = vec_len (v1); \
+ uword _v(l2) = vec_len (v2); \
+ \
+ v1 = _vec_resize ((v1), _v(l2), \
+ (_v(l1) + _v(l2)) * sizeof ((v1)[0]), 0, align); \
+ memmove ((v1) + _v(l2), (v1), _v(l1) * sizeof ((v1)[0])); \
+ clib_memcpy ((v1), (v2), _v(l2) * sizeof ((v2)[0])); \
+} while (0)
+
+
+/** \brief Zero all vector elements. Null-pointer tolerant.
+ @param var Vector to zero
+*/
+#define vec_zero(var) \
+do { \
+ if (var) \
+ memset ((var), 0, vec_len (var) * sizeof ((var)[0])); \
+} while (0)
+
+/** \brief Set all vector elements to given value. Null-pointer tolerant.
+ @param v vector to set
+ @param val value for each vector element
+*/
+#define vec_set(v,val) \
+do { \
+ word _v(i); \
+ __typeof__ ((v)[0]) _val = (val); \
+ for (_v(i) = 0; _v(i) < vec_len (v); _v(i)++) \
+ (v)[_v(i)] = _val; \
+} while (0)
+
+#ifdef CLIB_UNIX
+#include <stdlib.h> /* for qsort */
+#endif
+
+/** \brief Compare two vectors, not NULL-pointer tolerant
+
+ @param v1 Pointer to a vector
+ @param v2 Pointer to a vector
+ @return 1 if equal, 0 if unequal
+*/
+#define vec_is_equal(v1,v2) \
+ (vec_len (v1) == vec_len (v2) && ! memcmp ((v1), (v2), vec_len (v1) * sizeof ((v1)[0])))
+
+/** \brief Compare two vectors (only applicable to vectors of signed numbers).
+ Used in qsort compare functions.
+
+ @param v1 Pointer to a vector
+ @param v2 Pointer to a vector
+ @return -1, 0, +1
+*/
+#define vec_cmp(v1,v2) \
+({ \
+ word _v(i), _v(cmp), _v(l); \
+ _v(l) = clib_min (vec_len (v1), vec_len (v2)); \
+ _v(cmp) = 0; \
+ for (_v(i) = 0; _v(i) < _v(l); _v(i)++) { \
+ _v(cmp) = (v1)[_v(i)] - (v2)[_v(i)]; \
+ if (_v(cmp)) \
+ break; \
+ } \
+ if (_v(cmp) == 0 && _v(l) > 0) \
+ _v(cmp) = vec_len(v1) - vec_len(v2); \
+ (_v(cmp) < 0 ? -1 : (_v(cmp) > 0 ? +1 : 0)); \
+})
+
+/** \brief Search a vector for the index of the entry that matches.
+
+ @param v1 Pointer to a vector
+ @param v2 Entry to match
+ @return index of match or ~0
+*/
+#define vec_search(v,E) \
+({ \
+ word _v(i) = 0; \
+ while (_v(i) < vec_len(v)) \
+ { \
+ if ((v)[_v(i)] == E) \
+ break; \
+ _v(i)++; \
+ } \
+ if (_v(i) == vec_len(v)) \
+ _v(i) = ~0; \
+ _v(i); \
+})
+
+/** \brief Sort a vector using the supplied element comparison function
+
+ @param vec vector to sort
+ @param f comparison function
+*/
+#define vec_sort_with_function(vec,f) \
+do { \
+ qsort (vec, vec_len (vec), sizeof (vec[0]), (void *) (f)); \
+} while (0)
+
+/** \brief Make a vector containing a NULL terminated c-string.
+
+ @param V (possibly NULL) pointer to a vector.
+ @param S pointer to string buffer.
+ @param L string length (NOT including the terminating NULL; a la strlen())
+*/
+#define vec_validate_init_c_string(V, S, L) \
+ do { \
+ vec_reset_length (V); \
+ vec_validate ((V), (L)); \
+ if ((S) && (L)) \
+ clib_memcpy ((V), (S), (L)); \
+ (V)[(L)] = 0; \
+ } while (0)
+
+
+/** \brief Test whether a vector is a NULL terminated c-string.
+
+ @param V (possibly NULL) pointer to a vector.
+ @return BOOLEAN indicating if the vector c-string is null terminated.
+*/
+#define vec_c_string_is_terminated(V) \
+ (((V) != 0) && (vec_len (V) != 0) && ((V)[vec_len ((V)) - 1] == 0))
+
+/** \brief (If necessary) NULL terminate a vector containing a c-string.
+
+ @param V (possibly NULL) pointer to a vector.
+ @return V (value-result macro parameter)
+*/
+#define vec_terminate_c_string(V) \
+ do { \
+ u32 vl = vec_len ((V)); \
+ if (!vec_c_string_is_terminated(V)) \
+ { \
+ vec_validate ((V), vl); \
+ (V)[vl] = 0; \
+ } \
+ } while (0)
+
+#endif /* included_vec_h */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/vec_bootstrap.h b/src/vppinfra/vec_bootstrap.h
new file mode 100644
index 00000000..3b8c7707
--- /dev/null
+++ b/src/vppinfra/vec_bootstrap.h
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_clib_vec_bootstrap_h
+#define included_clib_vec_bootstrap_h
+
+/** \file
+ Vector bootsrap header file
+*/
+
+/* Bootstrap include so that #include <vppinfra/mem.h> can include e.g.
+ <vppinfra/mheap.h> which depends on <vppinfra/vec.h>. */
+
+/** \brief vector header structure
+
+ Bookeeping header preceding vector elements in memory.
+ User header information may preceed standard vec header.
+ If you change u32 len -> u64 len, single vectors can
+ exceed 2**32 elements. Clib heaps are vectors. */
+
+typedef struct
+{
+#if CLIB_VEC64 > 0
+ u64 len;
+#else
+ u32 len; /**< Number of elements in vector (NOT its allocated length). */
+#endif
+ u8 vector_data[0]; /**< Vector data . */
+} vec_header_t;
+
+/** \brief Find the vector header
+
+ Given the user's pointer to a vector, find the corresponding
+ vector header
+
+ @param v pointer to a vector
+ @return pointer to the vector's vector_header_t
+*/
+#define _vec_find(v) ((vec_header_t *) (v) - 1)
+
+#define _vec_round_size(s) \
+ (((s) + sizeof (uword) - 1) &~ (sizeof (uword) - 1))
+
+always_inline uword
+vec_header_bytes (uword header_bytes)
+{
+ return round_pow2 (header_bytes + sizeof (vec_header_t),
+ sizeof (vec_header_t));
+}
+
+/** \brief Find a user vector header
+
+ Finds the user header of a vector with unspecified alignment given
+ the user pointer to the vector.
+*/
+
+always_inline void *
+vec_header (void *v, uword header_bytes)
+{
+ return v - vec_header_bytes (header_bytes);
+}
+
+/** \brief Find the end of user vector header
+
+ Finds the end of the user header of a vector with unspecified
+ alignment given the user pointer to the vector.
+*/
+
+always_inline void *
+vec_header_end (void *v, uword header_bytes)
+{
+ return v + vec_header_bytes (header_bytes);
+}
+
+always_inline uword
+vec_aligned_header_bytes (uword header_bytes, uword align)
+{
+ return round_pow2 (header_bytes + sizeof (vec_header_t), align);
+}
+
+always_inline void *
+vec_aligned_header (void *v, uword header_bytes, uword align)
+{
+ return v - vec_aligned_header_bytes (header_bytes, align);
+}
+
+always_inline void *
+vec_aligned_header_end (void *v, uword header_bytes, uword align)
+{
+ return v + vec_aligned_header_bytes (header_bytes, align);
+}
+
+
+/** \brief Number of elements in vector (lvalue-capable)
+
+ _vec_len (v) does not check for null, but can be used as a lvalue
+ (e.g. _vec_len (v) = 99).
+*/
+
+#define _vec_len(v) (_vec_find(v)->len)
+
+/** \brief Number of elements in vector (rvalue-only, NULL tolerant)
+
+ vec_len (v) checks for NULL, but cannot be used as an lvalue.
+ If in doubt, use vec_len...
+*/
+
+#define vec_len(v) ((v) ? _vec_len(v) : 0)
+
+/** \brief Reset vector length to zero
+ NULL-pointer tolerant
+*/
+
+#define vec_reset_length(v) do { if (v) _vec_len (v) = 0; } while (0)
+
+/** \brief Number of data bytes in vector. */
+
+#define vec_bytes(v) (vec_len (v) * sizeof (v[0]))
+
+/** \brief Total number of bytes that can fit in vector with current allocation. */
+
+#define vec_capacity(v,b) \
+({ \
+ void * _vec_capacity_v = (void *) (v); \
+ uword _vec_capacity_b = (b); \
+ _vec_capacity_b = sizeof (vec_header_t) + _vec_round_size (_vec_capacity_b); \
+ _vec_capacity_v ? clib_mem_size (_vec_capacity_v - _vec_capacity_b) : 0; \
+})
+
+/** \brief Total number of elements that can fit into vector. */
+#define vec_max_len(v) (vec_capacity(v,0) / sizeof (v[0]))
+
+/** \brief End (last data address) of vector. */
+#define vec_end(v) ((v) + vec_len (v))
+
+/** \brief True if given pointer is within given vector. */
+#define vec_is_member(v,e) ((e) >= (v) && (e) < vec_end (v))
+
+/** \brief Get vector value at index i checking that i is in bounds. */
+#define vec_elt_at_index(v,i) \
+({ \
+ ASSERT ((i) < vec_len (v)); \
+ (v) + (i); \
+})
+
+/** \brief Get vector value at index i */
+#define vec_elt(v,i) (vec_elt_at_index(v,i))[0]
+
+/** \brief Vector iterator */
+#define vec_foreach(var,vec) for (var = (vec); var < vec_end (vec); var++)
+
+/** \brief Vector iterator (reverse) */
+#define vec_foreach_backwards(var,vec) \
+for (var = vec_end (vec) - 1; var >= (vec); var--)
+
+/** \brief Iterate over vector indices. */
+#define vec_foreach_index(var,v) for ((var) = 0; (var) < vec_len (v); (var)++)
+
+#endif /* included_clib_vec_bootstrap_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/vector.c b/src/vppinfra/vector.c
new file mode 100644
index 00000000..68b4fdc2
--- /dev/null
+++ b/src/vppinfra/vector.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2005 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/types.h>
+
+#if defined (__SSE2__)
+u8 u32x4_compare_word_mask_table[256] = {
+ [0xf0] = (1 << 1),
+ [0x0f] = (1 << 0),
+ [0xff] = (1 << 0) | (1 << 1),
+};
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/vector.h b/src/vppinfra/vector.h
new file mode 100644
index 00000000..491e7cfe
--- /dev/null
+++ b/src/vppinfra/vector.h
@@ -0,0 +1,268 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2005 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_clib_vector_h
+#define included_clib_vector_h
+
+#include <vppinfra/clib.h>
+
+/* Vector types. */
+
+#if defined (__MMX__) || defined (__IWMMXT__) || defined (__aarch64__)
+#define CLIB_HAVE_VEC64
+#endif
+
+#if defined (__SSE2__) && __GNUC__ >= 4
+#define CLIB_HAVE_VEC128
+#endif
+
+#if defined (__ALTIVEC__)
+#define CLIB_HAVE_VEC128
+#endif
+
+/* 128 implies 64 */
+#ifdef CLIB_HAVE_VEC128
+#define CLIB_HAVE_VEC64
+#endif
+
+#define _vector_size(n) __attribute__ ((vector_size (n)))
+
+#if defined (__aarch64__) || defined (__arm__)
+typedef unsigned int u32x4 _vector_size (16);
+typedef u8 u8x16 _vector_size (16);
+typedef u16 u16x8 _vector_size (16);
+typedef u32 u32x4 _vector_size (16);
+typedef u64 u64x2 _vector_size (16);
+#endif
+
+#ifdef CLIB_HAVE_VEC64
+/* Signed 64 bit. */
+typedef char i8x8 _vector_size (8);
+typedef short i16x4 _vector_size (8);
+typedef int i32x2 _vector_size (8);
+
+/* Unsigned 64 bit. */
+typedef unsigned char u8x8 _vector_size (8);
+typedef unsigned short u16x4 _vector_size (8);
+typedef unsigned int u32x2 _vector_size (8);
+
+/* Floating point 64 bit. */
+typedef float f32x2 _vector_size (8);
+#endif /* CLIB_HAVE_VEC64 */
+
+#ifdef CLIB_HAVE_VEC128
+/* Signed 128 bit. */
+typedef i8 i8x16 _vector_size (16);
+typedef i16 i16x8 _vector_size (16);
+typedef i32 i32x4 _vector_size (16);
+typedef long long i64x2 _vector_size (16);
+
+/* Unsigned 128 bit. */
+typedef u8 u8x16 _vector_size (16);
+typedef u16 u16x8 _vector_size (16);
+typedef u32 u32x4 _vector_size (16);
+typedef u64 u64x2 _vector_size (16);
+
+typedef f32 f32x4 _vector_size (16);
+typedef f64 f64x2 _vector_size (16);
+
+/* Signed 256 bit. */
+typedef i8 i8x32 _vector_size (32);
+typedef i16 i16x16 _vector_size (32);
+typedef i32 i32x8 _vector_size (32);
+typedef long long i64x4 _vector_size (32);
+
+/* Unsigned 256 bit. */
+typedef u8 u8x32 _vector_size (32);
+typedef u16 u16x16 _vector_size (32);
+typedef u32 u32x8 _vector_size (32);
+typedef u64 u64x4 _vector_size (32);
+
+typedef f32 f32x8 _vector_size (32);
+typedef f64 f64x4 _vector_size (32);
+#endif /* CLIB_HAVE_VEC128 */
+
+/* Vector word sized types. */
+#ifndef CLIB_VECTOR_WORD_BITS
+#ifdef CLIB_HAVE_VEC128
+#define CLIB_VECTOR_WORD_BITS 128
+#else
+#define CLIB_VECTOR_WORD_BITS 64
+#endif
+#endif /* CLIB_VECTOR_WORD_BITS */
+
+/* Vector word sized types. */
+#if CLIB_VECTOR_WORD_BITS == 128
+typedef i8 i8x _vector_size (16);
+typedef i16 i16x _vector_size (16);
+typedef i32 i32x _vector_size (16);
+typedef i64 i64x _vector_size (16);
+typedef u8 u8x _vector_size (16);
+typedef u16 u16x _vector_size (16);
+typedef u32 u32x _vector_size (16);
+typedef u64 u64x _vector_size (16);
+#endif
+#if CLIB_VECTOR_WORD_BITS == 64
+typedef i8 i8x _vector_size (8);
+typedef i16 i16x _vector_size (8);
+typedef i32 i32x _vector_size (8);
+typedef i64 i64x _vector_size (8);
+typedef u8 u8x _vector_size (8);
+typedef u16 u16x _vector_size (8);
+typedef u32 u32x _vector_size (8);
+typedef u64 u64x _vector_size (8);
+#endif
+
+#undef _vector_size
+
+#define VECTOR_WORD_TYPE(t) t##x
+#define VECTOR_WORD_TYPE_LEN(t) (sizeof (VECTOR_WORD_TYPE(t)) / sizeof (t))
+
+/* Union types. */
+#if (defined(CLIB_HAVE_VEC128) || defined(CLIB_HAVE_VEC64))
+
+#define _(t) \
+ typedef union { \
+ t##x as_##t##x; \
+ t as_##t[VECTOR_WORD_TYPE_LEN (t)]; \
+ } t##x##_union_t;
+
+_(u8);
+_(u16);
+_(u32);
+_(u64);
+_(i8);
+_(i16);
+_(i32);
+_(i64);
+
+#undef _
+
+#endif
+
+#ifdef CLIB_HAVE_VEC64
+
+#define _(t,n) \
+ typedef union { \
+ t##x##n as_##t##x##n; \
+ t as_##t[n]; \
+ } t##x##n##_union_t; \
+
+_(u8, 8);
+_(u16, 4);
+_(u32, 2);
+_(i8, 8);
+_(i16, 4);
+_(i32, 2);
+
+#undef _
+
+#endif
+
+#ifdef CLIB_HAVE_VEC128
+
+#define _(t,n) \
+ typedef union { \
+ t##x##n as_##t##x##n; \
+ t as_##t[n]; \
+ } t##x##n##_union_t; \
+
+_(u8, 16);
+_(u16, 8);
+_(u32, 4);
+_(u64, 2);
+_(i8, 16);
+_(i16, 8);
+_(i32, 4);
+_(i64, 2);
+_(f32, 4);
+_(f64, 2);
+
+#undef _
+
+#endif
+
+/* When we don't have vector types, still define e.g. u32x4_union_t but as an array. */
+#if !defined(CLIB_HAVE_VEC128) && !defined(CLIB_HAVE_VEC64)
+
+#define _(t,n) \
+ typedef union { \
+ t as_##t[n]; \
+ } t##x##n##_union_t; \
+
+_(u8, 16);
+_(u16, 8);
+_(u32, 4);
+_(u64, 2);
+_(i8, 16);
+_(i16, 8);
+_(i32, 4);
+_(i64, 2);
+
+#undef _
+
+#endif
+
+#if defined (__SSE2__) && __GNUC__ >= 4
+#include <vppinfra/vector_sse2.h>
+#endif
+
+#if defined (__ALTIVEC__)
+#include <vppinfra/vector_altivec.h>
+#endif
+
+#if defined (__IWMMXT__)
+#include <vppinfra/vector_iwmmxt.h>
+#endif
+
+#if defined (__aarch64__)
+#include <vppinfra/vector_neon.h>
+#endif
+
+#if (defined(CLIB_HAVE_VEC128) || defined(CLIB_HAVE_VEC64))
+#include <vppinfra/vector_funcs.h>
+#endif
+
+#endif /* included_clib_vector_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/vector_altivec.h b/src/vppinfra/vector_altivec.h
new file mode 100644
index 00000000..0e9de820
--- /dev/null
+++ b/src/vppinfra/vector_altivec.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2009 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_vector_altivec_h
+#define included_vector_altivec_h
+
+/* Splats. */
+#define _(t,n,ti,fi,tr,fr) \
+ always_inline t##x##n t##x##n##_splat (t v) \
+ { return (t##x##n) __builtin_altivec_##fi ((ti) v); } \
+ \
+ always_inline t##x##n t##x##n##_splat_word (t##x##n x, int word_index) \
+ { return (t##x##n) __builtin_altivec_##fr ((tr) x, word_index); }
+
+#define u16x8_splat(i) ((u16x8) __builtin_altivec_vspltish (i))
+#define i16x8_splat(i) ((i16x8) __builtin_altivec_vspltish (i))
+#define u32x4_splat(i) ((u32x4) __builtin_altivec_vspltisw (i))
+#define i32x4_splat(i) ((i32x4) __builtin_altivec_vspltisw (i))
+
+#define u16x8_splat_word(x,i) ((u16x8) __builtin_altivec_vsplth ((i16x8) (x), (i)))
+#define i16x8_splat_word(x,i) ((i16x8) __builtin_altivec_vsplth ((i16x8) (x), (i)))
+#define u32x4_splat_word(x,i) ((u32x4) __builtin_altivec_vspltw ((i32x4) (x), (i)))
+#define i32x4_splat_word(x,i) ((i32x4) __builtin_altivec_vspltw ((i32x4) (x), (i)))
+
+#undef _
+
+/* 128 bit shifts. */
+#define _(t,ti,lr,f) \
+ always_inline t t##_##lr (t x, t y) \
+ { return (t) __builtin_altivec_##f ((ti) x, (ti) y); } \
+ \
+ always_inline t t##_i##lr (t x, int i) \
+ { \
+ t j = {i,i,i,i}; \
+ return t##_##lr (x, j); \
+ }
+
+_(u16x8, i16x8, shift_left, vslh);
+_(u32x4, i32x4, shift_left, vslw);
+_(u16x8, i16x8, shift_right, vsrh);
+_(u32x4, i32x4, shift_right, vsrw);
+_(i16x8, i16x8, shift_right, vsrah);
+_(i32x4, i32x4, shift_right, vsraw);
+_(u16x8, i16x8, rotate_left, vrlh);
+_(i16x8, i16x8, rotate_left, vrlh);
+_(u32x4, i32x4, rotate_left, vrlw);
+_(i32x4, i32x4, rotate_left, vrlw);
+
+#undef _
+
+#define _(t,it,lr,f) \
+ always_inline t t##_word_shift_##lr (t x, int n_words) \
+ { \
+ i32x4 n_bits = {0,0,0,n_words * BITS (it)}; \
+ return (t) __builtin_altivec_##f ((i32x4) x, n_bits); \
+ }
+
+_(u32x4, u32, left, vslo)
+_(i32x4, i32, left, vslo)
+_(u32x4, u32, right, vsro)
+_(i32x4, i32, right, vsro)
+_(u16x8, u16, left, vslo)
+_(i16x8, i16, left, vslo)
+_(u16x8, u16, right, vsro) _(i16x8, i16, right, vsro)
+#undef _
+ always_inline
+ u32
+ u32x4_get0 (u32x4 x)
+{
+ u32x4_union_t y;
+ y.as_u32x4 = x;
+ return y.as_u32[3];
+}
+
+/* Interleave. */
+#define _(t,it,lh,f) \
+ always_inline t t##_interleave_##lh (t x, t y) \
+ { return (t) __builtin_altivec_##f ((it) x, (it) y); }
+
+_(u32x4, i32x4, lo, vmrglw)
+_(i32x4, i32x4, lo, vmrglw)
+_(u16x8, i16x8, lo, vmrglh)
+_(i16x8, i16x8, lo, vmrglh)
+_(u32x4, i32x4, hi, vmrghw)
+_(i32x4, i32x4, hi, vmrghw)
+_(u16x8, i16x8, hi, vmrghh) _(i16x8, i16x8, hi, vmrghh)
+#undef _
+/* Unaligned loads/stores. */
+#ifndef __cplusplus
+#define _(t) \
+ always_inline void t##_store_unaligned (t x, t * a) \
+ { clib_mem_unaligned (a, t) = x; } \
+ always_inline t t##_load_unaligned (t * a) \
+ { return clib_mem_unaligned (a, t); }
+ _(u8x16) _(u16x8) _(u32x4) _(u64x2) _(i8x16) _(i16x8) _(i32x4) _(i64x2)
+#undef _
+#endif
+#define _signed_binop(n,m,f,g) \
+ /* Unsigned */ \
+ always_inline u##n##x##m \
+ u##n##x##m##_##f (u##n##x##m x, u##n##x##m y) \
+ { return (u##n##x##m) __builtin_altivec_##g ((i##n##x##m) x, (i##n##x##m) y); } \
+ \
+ /* Signed */ \
+ always_inline i##n##x##m \
+ i##n##x##m##_##f (i##n##x##m x, i##n##x##m y) \
+ { return (i##n##x##m) __builtin_altivec_##g ((i##n##x##m) x, (i##n##x##m) y); }
+/* Compare operations. */
+ _signed_binop (16, 8, is_equal, vcmpequh)
+_signed_binop (32, 4, is_equal, vcmpequw)
+#undef _signed_binop
+ always_inline u16x8 u16x8_is_zero (u16x8 x)
+{
+ u16x8 zero = { 0 };
+ return u16x8_is_equal (x, zero);
+}
+
+always_inline u32x4
+u32x4_is_zero (u32x4 x)
+{
+ u32x4 zero = { 0 };
+ return u32x4_is_equal (x, zero);
+}
+
+always_inline u32
+u32x4_zero_byte_mask (u32x4 x)
+{
+ u32x4 cmp = u32x4_is_zero (x);
+ u32x4 tmp = { 0x000f, 0x00f0, 0x0f00, 0xf000, };
+ cmp &= tmp;
+ cmp |= u32x4_word_shift_right (cmp, 2);
+ cmp |= u32x4_word_shift_right (cmp, 1);
+ return u32x4_get0 (cmp);
+}
+
+#endif /* included_vector_altivec_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/vector_funcs.h b/src/vppinfra/vector_funcs.h
new file mode 100644
index 00000000..db09de0f
--- /dev/null
+++ b/src/vppinfra/vector_funcs.h
@@ -0,0 +1,334 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2008 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_vector_funcs_h
+#define included_vector_funcs_h
+
+#include <vppinfra/byte_order.h>
+
+/* Addition/subtraction. */
+#if CLIB_VECTOR_WORD_BITS == 128
+#define u8x_add u8x16_add
+#define u16x_add u16x8_add
+#define u32x_add u32x4_add
+#define u64x_add u64x2_add
+#define i8x_add i8x16_add
+#define i16x_add i16x8_add
+#define i32x_add i32x4_add
+#define i64x_add i64x2_add
+#define u8x_sub u8x16_sub
+#define u16x_sub u16x8_sub
+#define u32x_sub u32x4_sub
+#define u64x_sub u64x2_sub
+#define i8x_sub i8x16_sub
+#define i16x_sub i16x8_sub
+#define i32x_sub i32x4_sub
+#define i64x_sub i64x2_sub
+#endif
+
+#if CLIB_VECTOR_WORD_BITS == 64
+#define u8x_add u8x8_add
+#define u16x_add u16x4_add
+#define u32x_add u32x2_add
+#define i8x_add i8x8_add
+#define i16x_add i16x4_add
+#define i32x_add i32x2_add
+#define u8x_sub u8x8_sub
+#define u16x_sub u16x4_sub
+#define u32x_sub u32x2_sub
+#define i8x_sub i8x8_sub
+#define i16x_sub i16x4_sub
+#define i32x_sub i32x2_sub
+#endif
+
+/* Saturating addition/subtraction. */
+#if CLIB_VECTOR_WORD_BITS == 128
+#define u8x_add_saturate u8x16_add_saturate
+#define u16x_add_saturate u16x8_add_saturate
+#define i8x_add_saturate i8x16_add_saturate
+#define i16x_add_saturate i16x8_add_saturate
+#define u8x_sub_saturate u8x16_sub_saturate
+#define u16x_sub_saturate u16x8_sub_saturate
+#define i8x_sub_saturate i8x16_sub_saturate
+#define i16x_sub_saturate i16x8_sub_saturate
+#endif
+
+#if CLIB_VECTOR_WORD_BITS == 64
+#define u8x_add_saturate u8x8_add_saturate
+#define u16x_add_saturate u16x4_add_saturate
+#define i8x_add_saturate i8x8_add_saturate
+#define i16x_add_saturate i16x4_add_saturate
+#define u8x_sub_saturate u8x8_sub_saturate
+#define u16x_sub_saturate u16x4_sub_saturate
+#define i8x_sub_saturate i8x8_sub_saturate
+#define i16x_sub_saturate i16x4_sub_saturate
+#endif
+
+#define _vector_interleave(a,b,t) \
+do { \
+ t _tmp_lo = t##_interleave_lo (a, b); \
+ t _tmp_hi = t##_interleave_hi (a, b); \
+ if (CLIB_ARCH_IS_LITTLE_ENDIAN) \
+ (a) = _tmp_lo, (b) = _tmp_hi; \
+ else \
+ (a) = _tmp_hi, (b) = _tmp_lo; \
+} while (0)
+
+/* 128 bit interleaves. */
+#define u8x16_interleave(a,b) _vector_interleave(a,b,u8x16)
+#define i8x16_interleave(a,b) _vector_interleave(a,b,i8x16)
+#define u16x8_interleave(a,b) _vector_interleave(a,b,u16x8)
+#define i16x8_interleave(a,b) _vector_interleave(a,b,i16x8)
+#define u32x4_interleave(a,b) _vector_interleave(a,b,u32x4)
+#define i32x4_interleave(a,b) _vector_interleave(a,b,i32x4)
+#define u64x2_interleave(a,b) _vector_interleave(a,b,u64x2)
+#define i64x2_interleave(a,b) _vector_interleave(a,b,i64x2)
+
+/* 64 bit interleaves. */
+#define u8x8_interleave(a,b) _vector_interleave(a,b,u8x8)
+#define i8x8_interleave(a,b) _vector_interleave(a,b,i8x8)
+#define u16x4_interleave(a,b) _vector_interleave(a,b,u16x4)
+#define i16x4_interleave(a,b) _vector_interleave(a,b,i16x4)
+#define u32x2_interleave(a,b) _vector_interleave(a,b,u32x2)
+#define i32x2_interleave(a,b) _vector_interleave(a,b,i32x2)
+
+/* Word sized interleaves. */
+#if CLIB_VECTOR_WORD_BITS == 128
+#define u8x_interleave u8x16_interleave
+#define u16x_interleave u16x8_interleave
+#define u32x_interleave u32x4_interleave
+#define u64x_interleave u64x2_interleave
+#endif
+
+#if CLIB_VECTOR_WORD_BITS == 64
+#define u8x_interleave u8x8_interleave
+#define u16x_interleave u16x4_interleave
+#define u32x_interleave u32x2_interleave
+#define u64x_interleave(a,b) /* do nothing */
+#endif
+
+/* Vector word sized shifts. */
+#if CLIB_VECTOR_WORD_BITS == 128
+#define u8x_shift_left u8x16_shift_left
+#define i8x_shift_left i8x16_shift_left
+#define u16x_shift_left u16x8_shift_left
+#define i16x_shift_left i16x8_shift_left
+#define u32x_shift_left u32x4_shift_left
+#define i32x_shift_left i32x4_shift_left
+#define u64x_shift_left u64x2_shift_left
+#define i64x_shift_left i64x2_shift_left
+#define u8x_shift_right u8x16_shift_right
+#define i8x_shift_right i8x16_shift_right
+#define u16x_shift_right u16x8_shift_right
+#define i16x_shift_right i16x8_shift_right
+#define u32x_shift_right u32x4_shift_right
+#define i32x_shift_right i32x4_shift_right
+#define u64x_shift_right u64x2_shift_right
+#define i64x_shift_right i64x2_shift_right
+#define u8x_rotate_left u8x16_rotate_left
+#define i8x_rotate_left i8x16_rotate_left
+#define u16x_rotate_left u16x8_rotate_left
+#define i16x_rotate_left i16x8_rotate_left
+#define u32x_rotate_left u32x4_rotate_left
+#define i32x_rotate_left i32x4_rotate_left
+#define u64x_rotate_left u64x2_rotate_left
+#define i64x_rotate_left i64x2_rotate_left
+#define u8x_rotate_right u8x16_rotate_right
+#define i8x_rotate_right i8x16_rotate_right
+#define u16x_rotate_right u16x8_rotate_right
+#define i16x_rotate_right i16x8_rotate_right
+#define u32x_rotate_right u32x4_rotate_right
+#define i32x_rotate_right i32x4_rotate_right
+#define u64x_rotate_right u64x2_rotate_right
+#define i64x_rotate_right i64x2_rotate_right
+#define u8x_ishift_left u8x16_ishift_left
+#define i8x_ishift_left i8x16_ishift_left
+#define u16x_ishift_left u16x8_ishift_left
+#define i16x_ishift_left i16x8_ishift_left
+#define u32x_ishift_left u32x4_ishift_left
+#define i32x_ishift_left i32x4_ishift_left
+#define u64x_ishift_left u64x2_ishift_left
+#define i64x_ishift_left i64x2_ishift_left
+#define u8x_ishift_right u8x16_ishift_right
+#define i8x_ishift_right i8x16_ishift_right
+#define u16x_ishift_right u16x8_ishift_right
+#define i16x_ishift_right i16x8_ishift_right
+#define u32x_ishift_right u32x4_ishift_right
+#define i32x_ishift_right i32x4_ishift_right
+#define u64x_ishift_right u64x2_ishift_right
+#define i64x_ishift_right i64x2_ishift_right
+#define u8x_irotate_left u8x16_irotate_left
+#define i8x_irotate_left i8x16_irotate_left
+#define u16x_irotate_left u16x8_irotate_left
+#define i16x_irotate_left i16x8_irotate_left
+#define u32x_irotate_left u32x4_irotate_left
+#define i32x_irotate_left i32x4_irotate_left
+#define u64x_irotate_left u64x2_irotate_left
+#define i64x_irotate_left i64x2_irotate_left
+#define u8x_irotate_right u8x16_irotate_right
+#define i8x_irotate_right i8x16_irotate_right
+#define u16x_irotate_right u16x8_irotate_right
+#define i16x_irotate_right i16x8_irotate_right
+#define u32x_irotate_right u32x4_irotate_right
+#define i32x_irotate_right i32x4_irotate_right
+#define u64x_irotate_right u64x2_irotate_right
+#define i64x_irotate_right i64x2_irotate_right
+#endif
+
+#if CLIB_VECTOR_WORD_BITS == 64
+#define u8x_shift_left u8x8_shift_left
+#define i8x_shift_left i8x8_shift_left
+#define u16x_shift_left u16x4_shift_left
+#define i16x_shift_left i16x4_shift_left
+#define u32x_shift_left u32x2_shift_left
+#define i32x_shift_left i32x2_shift_left
+#define u8x_shift_right u8x8_shift_right
+#define i8x_shift_right i8x8_shift_right
+#define u16x_shift_right u16x4_shift_right
+#define i16x_shift_right i16x4_shift_right
+#define u32x_shift_right u32x2_shift_right
+#define i32x_shift_right i32x2_shift_right
+#define u8x_rotate_left u8x8_rotate_left
+#define i8x_rotate_left i8x8_rotate_left
+#define u16x_rotate_left u16x4_rotate_left
+#define i16x_rotate_left i16x4_rotate_left
+#define u32x_rotate_left u32x2_rotate_left
+#define i32x_rotate_left i32x2_rotate_left
+#define u8x_rotate_right u8x8_rotate_right
+#define i8x_rotate_right i8x8_rotate_right
+#define u16x_rotate_right u16x4_rotate_right
+#define i16x_rotate_right i16x4_rotate_right
+#define u32x_rotate_right u32x2_rotate_right
+#define i32x_rotate_right i32x2_rotate_right
+#define u8x_ishift_left u8x8_ishift_left
+#define i8x_ishift_left i8x8_ishift_left
+#define u16x_ishift_left u16x4_ishift_left
+#define i16x_ishift_left i16x4_ishift_left
+#define u32x_ishift_left u32x2_ishift_left
+#define i32x_ishift_left i32x2_ishift_left
+#define u8x_ishift_right u8x8_ishift_right
+#define i8x_ishift_right i8x8_ishift_right
+#define u16x_ishift_right u16x4_ishift_right
+#define i16x_ishift_right i16x4_ishift_right
+#define u32x_ishift_right u32x2_ishift_right
+#define i32x_ishift_right i32x2_ishift_right
+#define u8x_irotate_left u8x8_irotate_left
+#define i8x_irotate_left i8x8_irotate_left
+#define u16x_irotate_left u16x4_irotate_left
+#define i16x_irotate_left i16x4_irotate_left
+#define u32x_irotate_left u32x2_irotate_left
+#define i32x_irotate_left i32x2_irotate_left
+#define u8x_irotate_right u8x8_irotate_right
+#define i8x_irotate_right i8x8_irotate_right
+#define u16x_irotate_right u16x4_irotate_right
+#define i16x_irotate_right i16x4_irotate_right
+#define u32x_irotate_right u32x2_irotate_right
+#define i32x_irotate_right i32x2_irotate_right
+#endif
+
+#if CLIB_VECTOR_WORD_BITS == 128
+#define u8x_splat u8x16_splat
+#define i8x_splat i8x16_splat
+#define u16x_splat u16x8_splat
+#define i16x_splat i16x8_splat
+#define u32x_splat u32x4_splat
+#define i32x_splat i32x4_splat
+#define u64x_splat u64x2_splat
+#define i64x_splat i64x2_splat
+#endif
+
+#if CLIB_VECTOR_WORD_BITS == 64
+#define u8x_splat u8x8_splat
+#define i8x_splat i8x8_splat
+#define u16x_splat u16x4_splat
+#define i16x_splat i16x4_splat
+#define u32x_splat u32x2_splat
+#define i32x_splat i32x2_splat
+#endif
+
+#define u32x4_transpose_step(x,y) \
+do { \
+ u32x4 _x = (x); \
+ u32x4 _y = (y); \
+ (x) = u32x4_interleave_lo (_x, _y); \
+ (y) = u32x4_interleave_hi (_x, _y); \
+} while (0)
+
+/* 4x4 transpose: x_ij -> x_ji */
+#define u32x4_transpose(x0,x1,x2,x3) \
+do { \
+ u32x4 _x0 = (u32x4) (x0); \
+ u32x4 _x1 = (u32x4) (x1); \
+ u32x4 _x2 = (u32x4) (x2); \
+ u32x4 _x3 = (u32x4) (x3); \
+ u32x4_transpose_step (_x0, _x2); \
+ u32x4_transpose_step (_x1, _x3); \
+ u32x4_transpose_step (_x0, _x1); \
+ u32x4_transpose_step (_x2, _x3); \
+ (x0) = (u32x4) _x0; \
+ (x1) = (u32x4) _x1; \
+ (x2) = (u32x4) _x2; \
+ (x3) = (u32x4) _x3; \
+} while (0)
+
+#define i32x4_transpose(x0,x1,x2,x3) \
+do { \
+ u32x4 _x0 = (u32x4) (x0); \
+ u32x4 _x1 = (u32x4) (x1); \
+ u32x4 _x2 = (u32x4) (x2); \
+ u32x4 _x3 = (u32x4) (x3); \
+ u32x4_transpose_step (_x0, _x2); \
+ u32x4_transpose_step (_x1, _x3); \
+ u32x4_transpose_step (_x0, _x1); \
+ u32x4_transpose_step (_x2, _x3); \
+ (x0) = (i32x4) _x0; \
+ (x1) = (i32x4) _x1; \
+ (x2) = (i32x4) _x2; \
+ (x3) = (i32x4) _x3; \
+} while (0)
+
+#undef _
+
+#endif /* included_vector_funcs_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/vector_iwmmxt.h b/src/vppinfra/vector_iwmmxt.h
new file mode 100644
index 00000000..8e662045
--- /dev/null
+++ b/src/vppinfra/vector_iwmmxt.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2008 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_vector_iwmmxt_h
+#define included_vector_iwmmxt_h
+
+#include <vppinfra/error.h> /* for ASSERT */
+
+/* 64 bit interleaves. */
+always_inline u8x8
+u8x8_interleave_hi (u8x8 a, u8x8 b)
+{
+ return __builtin_arm_wunpckihb (a, b);
+}
+
+always_inline u8x8
+u8x8_interleave_lo (u8x8 a, u8x8 b)
+{
+ return __builtin_arm_wunpckilb (a, b);
+}
+
+always_inline u16x4
+u16x4_interleave_hi (u16x4 a, u16x4 b)
+{
+ return __builtin_arm_wunpckihh (a, b);
+}
+
+always_inline u16x4
+u16x4_interleave_lo (u16x4 a, u16x4 b)
+{
+ return __builtin_arm_wunpckilh (a, b);
+}
+
+always_inline u32x2
+u32x2_interleave_hi (u32x2 a, u32x2 b)
+{
+ return __builtin_arm_wunpckihw (a, b);
+}
+
+always_inline u32x2
+u32x2_interleave_lo (u32x2 a, u32x2 b)
+{
+ return __builtin_arm_wunpckilw (a, b);
+}
+
+always_inline u32x2
+u32x2_splat (u32 a)
+{
+ u32x2 x = { a };
+ x = u32x2_interleave_lo (x, x);
+ return x;
+}
+
+always_inline u16x4
+u16x4_splat (u16 a)
+{
+ u32 t = (u32) a | ((u32) a << 16);
+ return u32x2_splat (t);
+}
+
+always_inline u8x8
+u8x8_splat (u8 a)
+{
+ u32 t = (u32) a | ((u32) a << 8);
+ t |= t << 16;
+ return u32x2_splat (t);
+}
+
+#define i32x2_splat u32x2_splat
+#define i16x4_splat u16x4_splat
+#define i8x8_splat u8x8_splat
+
+/* 64 bit shifts. */
+
+/* As of July 2008 the __builtin_arm shifts cause gcc-4.3.1 to crash
+ so we use asm versions. */
+#define _(t,u,lr,f) \
+ always_inline t \
+ t##_##lr (t x, int i) \
+ { \
+ i16x4 y; \
+ asm (#f " %[y], %[x], %[shift]" \
+ : [y] "=y" (y) \
+ : [x] "y" (x), [shift] "i" (i * u)); \
+ return y; \
+ }
+
+_(u16x4, 1, shift_left, wsllhi)
+_(u32x2, 1, shift_left, wsllwi)
+_(u16x4, 1, shift_right, wsrlhi)
+_(u32x2, 1, shift_right, wsrlwi)
+_(i16x4, 1, shift_left, wsllhi)
+_(i32x2, 1, shift_left, wsllwi)
+_(i16x4, 1, shift_right, wsrahi) _(i32x2, 1, shift_right, wsrawi)
+/* Word shifts. */
+ _(u8x8, 8, word_shift_left, wslldi)
+_(u16x4, 16, word_shift_left, wslldi)
+_(u32x2, 32, word_shift_left, wslldi)
+_(u8x8, 8, word_shift_right, wsrldi)
+_(u16x4, 16, word_shift_right, wsrldi)
+_(u32x2, 32, word_shift_right, wsrldi)
+_(i8x8, 8, word_shift_left, wslldi)
+_(i16x4, 16, word_shift_left, wslldi)
+_(i32x2, 32, word_shift_left, wslldi)
+_(i8x8, 8, word_shift_right, wsrldi)
+_(i16x4, 16, word_shift_right, wsrldi) _(i32x2, 32, word_shift_right, wsrldi)
+#undef _
+#endif /* included_vector_iwmmxt_h */
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/vector_neon.h b/src/vppinfra/vector_neon.h
new file mode 100644
index 00000000..cea52759
--- /dev/null
+++ b/src/vppinfra/vector_neon.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_vector_neon_h
+#define included_vector_neon_h
+#include <arm_neon.h>
+
+/* Splats. */
+
+#define u8x16_splat(i) vdupq_n_u8(i)
+#define u16x8_splat(i) vdupq_n_u16(i)
+#define i16x8_splat(i) vdupq_n_s16(i)
+#define u32x4_splat(i) vdupq_n_u32(i)
+#define i32x4_splat(i) vdupq_n_s32(i)
+
+/* Arithmetic */
+#define u16x8_add(a,b) vaddq_u16(a,b)
+#define i16x8_add(a,b) vaddq_s16(a,b)
+#define u16x8_sub_saturate(a,b) vsubq_u16(a,b)
+#define i16x8_sub_saturate(a,b) vsubq_s16(a,b)
+
+#define u16x8_is_equal(a,b) vceqq_u16(a,b)
+#define i16x8_is_equal(a,b) vceqq_i16(a,b)
+
+always_inline u32
+u16x8_zero_byte_mask (u16x8 input)
+{
+ u8x16 vall_one = vdupq_n_u8 (0x0);
+ u8x16 res_values = { 0x01, 0x02, 0x04, 0x08,
+ 0x10, 0x20, 0x40, 0x80,
+ 0x01, 0x02, 0x04, 0x08,
+ 0x10, 0x20, 0x40, 0x80
+ };
+
+ /* input --> [0x80, 0x40, 0x01, 0xf0, ... ] */
+ u8x16 test_result =
+ vreinterpretq_u8_u16 (vceqq_u16 (input, vreinterpretq_u16_u8 (vall_one)));
+ u8x16 before_merge = vminq_u8 (test_result, res_values);
+ /*before_merge--> [0x80, 0x00, 0x00, 0x10, ... ] */
+ /* u8x16 --> [a,b,c,d, e,f,g,h, i,j,k,l, m,n,o,p] */
+ /* pair add until we have 2 uint64_t */
+ u16x8 merge1 = vpaddlq_u8 (before_merge);
+ /* u16x8--> [a+b,c+d, e+f,g+h, i+j,k+l, m+n,o+p] */
+ u32x4 merge2 = vpaddlq_u16 (merge1);
+ /* u32x4--> [a+b+c+d, e+f+g+h, i+j+k+l, m+n+o+p] */
+ u64x2 merge3 = vpaddlq_u32 (merge2);
+ /* u64x2--> [a+b+c+d+e+f+g+h, i+j+k+l+m+n+o+p] */
+ return (u32) (vgetq_lane_u64 (merge3, 1) << 8) + vgetq_lane_u64 (merge3, 0);
+}
+
+#endif /* included_vector_neon_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/vector_sse2.h b/src/vppinfra/vector_sse2.h
new file mode 100644
index 00000000..6830d5c6
--- /dev/null
+++ b/src/vppinfra/vector_sse2.h
@@ -0,0 +1,705 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2005 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_vector_sse2_h
+#define included_vector_sse2_h
+
+#include <vppinfra/error_bootstrap.h> /* for ASSERT */
+#include <x86intrin.h>
+
+/* 128 bit interleaves. */
+always_inline u8x16
+u8x16_interleave_hi (u8x16 a, u8x16 b)
+{
+ return (u8x16) _mm_unpackhi_epi8 ((__m128i) a, (__m128i) b);
+}
+
+always_inline u8x16
+u8x16_interleave_lo (u8x16 a, u8x16 b)
+{
+ return (u8x16) _mm_unpacklo_epi8 ((__m128i) a, (__m128i) b);
+}
+
+always_inline u16x8
+u16x8_interleave_hi (u16x8 a, u16x8 b)
+{
+ return (u16x8) _mm_unpackhi_epi16 ((__m128i) a, (__m128i) b);
+}
+
+always_inline u16x8
+u16x8_interleave_lo (u16x8 a, u16x8 b)
+{
+ return (u16x8) _mm_unpacklo_epi16 ((__m128i) a, (__m128i) b);
+}
+
+always_inline u32x4
+u32x4_interleave_hi (u32x4 a, u32x4 b)
+{
+ return (u32x4) _mm_unpackhi_epi32 ((__m128i) a, (__m128i) b);
+}
+
+always_inline u32x4
+u32x4_interleave_lo (u32x4 a, u32x4 b)
+{
+ return (u32x4) _mm_unpacklo_epi32 ((__m128i) a, (__m128i) b);
+}
+
+always_inline u64x2
+u64x2_interleave_hi (u64x2 a, u64x2 b)
+{
+ return (u64x2) _mm_unpackhi_epi64 ((__m128i) a, (__m128i) b);
+}
+
+always_inline u64x2
+u64x2_interleave_lo (u64x2 a, u64x2 b)
+{
+ return (u64x2) _mm_unpacklo_epi64 ((__m128i) a, (__m128i) b);
+}
+
+/* 64 bit interleaves. */
+always_inline u8x8
+u8x8_interleave_hi (u8x8 a, u8x8 b)
+{
+ return (u8x8) _m_punpckhbw ((__m64) a, (__m64) b);
+}
+
+always_inline u8x8
+u8x8_interleave_lo (u8x8 a, u8x8 b)
+{
+ return (u8x8) _m_punpcklbw ((__m64) a, (__m64) b);
+}
+
+always_inline u16x4
+u16x4_interleave_hi (u16x4 a, u16x4 b)
+{
+ return (u16x4) _m_punpckhwd ((__m64) a, (__m64) b);
+}
+
+always_inline u16x4
+u16x4_interleave_lo (u16x4 a, u16x4 b)
+{
+ return (u16x4) _m_punpcklwd ((__m64) a, (__m64) b);
+}
+
+always_inline u32x2
+u32x2_interleave_hi (u32x2 a, u32x2 b)
+{
+ return (u32x2) _m_punpckhdq ((__m64) a, (__m64) b);
+}
+
+always_inline u32x2
+u32x2_interleave_lo (u32x2 a, u32x2 b)
+{
+ return (u32x2) _m_punpckldq ((__m64) a, (__m64) b);
+}
+
+/* 128 bit packs. */
+always_inline u8x16
+u16x8_pack (u16x8 lo, u16x8 hi)
+{
+ return (u8x16) _mm_packus_epi16 ((__m128i) lo, (__m128i) hi);
+}
+
+always_inline i8x16
+i16x8_pack (i16x8 lo, i16x8 hi)
+{
+ return (i8x16) _mm_packs_epi16 ((__m128i) lo, (__m128i) hi);
+}
+
+always_inline u16x8
+u32x4_pack (u32x4 lo, u32x4 hi)
+{
+ return (u16x8) _mm_packs_epi32 ((__m128i) lo, (__m128i) hi);
+}
+
+/* 64 bit packs. */
+always_inline u8x8
+u16x4_pack (u16x4 lo, u16x4 hi)
+{
+ return (u8x8) _m_packuswb ((__m64) lo, (__m64) hi);
+}
+
+always_inline i8x8
+i16x4_pack (i16x4 lo, i16x4 hi)
+{
+ return (i8x8) _m_packsswb ((__m64) lo, (__m64) hi);
+}
+
+always_inline u16x4
+u32x2_pack (u32x2 lo, u32x2 hi)
+{
+ return (u16x4) _m_packssdw ((__m64) lo, (__m64) hi);
+}
+
+always_inline i16x4
+i32x2_pack (i32x2 lo, i32x2 hi)
+{
+ return (i16x4) _m_packssdw ((__m64) lo, (__m64) hi);
+}
+
+/* Splats: replicate scalar value into vector. */
+always_inline u64x2
+u64x2_splat (u64 a)
+{
+ u64x2 x = { a, a };
+ return x;
+}
+
+always_inline u32x4
+u32x4_splat (u32 a)
+{
+ u32x4 x = { a, a, a, a };
+ return x;
+}
+
+always_inline u16x8
+u16x8_splat (u16 a)
+{
+ u16x8 x = { a, a, a, a, a, a, a, a };
+ return x;
+}
+
+always_inline u8x16
+u8x16_splat (u8 a)
+{
+ u8x16 x = { a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a };
+ return x;
+}
+
+always_inline u32x2
+u32x2_splat (u32 a)
+{
+ u32x2 x = { a, a };
+ return x;
+}
+
+always_inline u16x4
+u16x4_splat (u16 a)
+{
+ u16x4 x = { a, a, a, a };
+ return x;
+}
+
+always_inline u8x8
+u8x8_splat (u8 a)
+{
+ u8x8 x = { a, a, a, a, a, a, a, a };
+ return x;
+}
+
+#define i64x2_splat u64x2_splat
+#define i32x4_splat u32x4_splat
+#define i16x8_splat u16x8_splat
+#define i8x16_splat u8x16_splat
+#define i32x2_splat u32x2_splat
+#define i16x4_splat u16x4_splat
+#define i8x8_splat u8x8_splat
+
+#ifndef __ICC
+always_inline u64x2
+u64x2_read_lo (u64x2 x, u64 * a)
+{
+ return (u64x2) _mm_loadl_pi ((__m128) x, (__m64 *) a);
+}
+
+always_inline u64x2
+u64x2_read_hi (u64x2 x, u64 * a)
+{
+ return (u64x2) _mm_loadh_pi ((__m128) x, (__m64 *) a);
+}
+
+always_inline void
+u64x2_write_lo (u64x2 x, u64 * a)
+{
+ _mm_storel_pi ((__m64 *) a, (__m128) x);
+}
+
+always_inline void
+u64x2_write_hi (u64x2 x, u64 * a)
+{
+ _mm_storeh_pi ((__m64 *) a, (__m128) x);
+}
+#endif
+
+/* Unaligned loads/stores. */
+
+#define _(t) \
+ always_inline void t##_store_unaligned (t x, t * a) \
+ { _mm_storeu_si128 ((__m128i *) a, (__m128i) x); } \
+ always_inline t t##_load_unaligned (t * a) \
+ { return (t) _mm_loadu_si128 ((__m128i *) a); }
+
+_(u8x16) _(u16x8) _(u32x4) _(u64x2) _(i8x16) _(i16x8) _(i32x4) _(i64x2)
+#undef _
+#define _signed_binop(n,m,f,g) \
+ /* Unsigned */ \
+ always_inline u##n##x##m \
+ u##n##x##m##_##f (u##n##x##m x, u##n##x##m y) \
+ { return (u##n##x##m) _mm_##g##n ((__m128i) x, (__m128i) y); } \
+ \
+ /* Signed */ \
+ always_inline i##n##x##m \
+ i##n##x##m##_##f (i##n##x##m x, i##n##x##m y) \
+ { return (i##n##x##m) _mm_##g##n ((__m128i) x, (__m128i) y); }
+/* Addition/subtraction. */
+ _signed_binop (8, 16, add, add_epi)
+_signed_binop (16, 8, add, add_epi)
+_signed_binop (32, 4, add, add_epi)
+_signed_binop (64, 2, add, add_epi)
+_signed_binop (8, 16, sub, sub_epi)
+_signed_binop (16, 8, sub, sub_epi)
+_signed_binop (32, 4, sub, sub_epi) _signed_binop (64, 2, sub, sub_epi)
+/* Addition/subtraction with saturation. */
+ _signed_binop (8, 16, add_saturate, adds_epu)
+_signed_binop (16, 8, add_saturate, adds_epu)
+_signed_binop (8, 16, sub_saturate, subs_epu)
+_signed_binop (16, 8, sub_saturate, subs_epu)
+/* Multiplication. */
+ always_inline i16x8 i16x8_mul_lo (i16x8 x, i16x8 y)
+{
+ return (i16x8) _mm_mullo_epi16 ((__m128i) x, (__m128i) y);
+}
+
+always_inline u16x8
+u16x8_mul_lo (u16x8 x, u16x8 y)
+{
+ return (u16x8) _mm_mullo_epi16 ((__m128i) x, (__m128i) y);
+}
+
+always_inline i16x8
+i16x8_mul_hi (i16x8 x, i16x8 y)
+{
+ return (i16x8) _mm_mulhi_epu16 ((__m128i) x, (__m128i) y);
+}
+
+always_inline u16x8
+u16x8_mul_hi (u16x8 x, u16x8 y)
+{
+ return (u16x8) _mm_mulhi_epu16 ((__m128i) x, (__m128i) y);
+}
+
+/* 128 bit shifts. */
+
+#define _(p,a,b,c,f) \
+ always_inline p##a##x##b p##a##x##b##_ishift_##c (p##a##x##b x, int i) \
+ { return (p##a##x##b) _mm_##f##i_epi##a ((__m128i) x, i); } \
+ \
+ always_inline p##a##x##b p##a##x##b##_shift_##c (p##a##x##b x, p##a##x##b y) \
+ { return (p##a##x##b) _mm_##f##_epi##a ((__m128i) x, (__m128i) y); }
+
+_(u, 16, 8, left, sll)
+_(u, 32, 4, left, sll)
+_(u, 64, 2, left, sll)
+_(u, 16, 8, right, srl)
+_(u, 32, 4, right, srl)
+_(u, 64, 2, right, srl)
+_(i, 16, 8, left, sll)
+_(i, 32, 4, left, sll)
+_(i, 64, 2, left, sll) _(i, 16, 8, right, sra) _(i, 32, 4, right, sra)
+#undef _
+/* 64 bit shifts. */
+ always_inline u16x4
+u16x4_shift_left (u16x4 x, u16x4 i)
+{
+ return (u16x4) _m_psllw ((__m64) x, (__m64) i);
+};
+
+always_inline u32x2
+u32x2_shift_left (u32x2 x, u32x2 i)
+{
+ return (u32x2) _m_pslld ((__m64) x, (__m64) i);
+};
+
+always_inline u16x4
+u16x4_shift_right (u16x4 x, u16x4 i)
+{
+ return (u16x4) _m_psrlw ((__m64) x, (__m64) i);
+};
+
+always_inline u32x2
+u32x2_shift_right (u32x2 x, u32x2 i)
+{
+ return (u32x2) _m_psrld ((__m64) x, (__m64) i);
+};
+
+always_inline i16x4
+i16x4_shift_left (i16x4 x, i16x4 i)
+{
+ return (i16x4) _m_psllw ((__m64) x, (__m64) i);
+};
+
+always_inline i32x2
+i32x2_shift_left (i32x2 x, i32x2 i)
+{
+ return (i32x2) _m_pslld ((__m64) x, (__m64) i);
+};
+
+always_inline i16x4
+i16x4_shift_right (i16x4 x, i16x4 i)
+{
+ return (i16x4) _m_psraw ((__m64) x, (__m64) i);
+};
+
+always_inline i32x2
+i32x2_shift_right (i32x2 x, i32x2 i)
+{
+ return (i32x2) _m_psrad ((__m64) x, (__m64) i);
+};
+
+#define u8x16_word_shift_left(a,n) (u8x16) _mm_slli_si128((__m128i) a, n)
+#define u8x16_word_shift_right(a,n) (u8x16) _mm_srli_si128((__m128i) a, n)
+
+#define i8x16_word_shift_left(a,n) \
+ ((i8x16) u8x16_word_shift_left((u8x16) (a), (n)))
+#define i8x16_word_shift_right(a,n) \
+ ((i8x16) u8x16_word_shift_right((u8x16) (a), (n)))
+
+#define u16x8_word_shift_left(a,n) \
+ ((u16x8) u8x16_word_shift_left((u8x16) (a), (n) * sizeof (u16)))
+#define i16x8_word_shift_left(a,n) \
+ ((u16x8) u8x16_word_shift_left((u8x16) (a), (n) * sizeof (u16)))
+#define u16x8_word_shift_right(a,n) \
+ ((u16x8) u8x16_word_shift_right((u8x16) (a), (n) * sizeof (u16)))
+#define i16x8_word_shift_right(a,n) \
+ ((i16x8) u8x16_word_shift_right((u8x16) (a), (n) * sizeof (u16)))
+
+#define u32x4_word_shift_left(a,n) \
+ ((u32x4) u8x16_word_shift_left((u8x16) (a), (n) * sizeof (u32)))
+#define i32x4_word_shift_left(a,n) \
+ ((u32x4) u8x16_word_shift_left((u8x16) (a), (n) * sizeof (u32)))
+#define u32x4_word_shift_right(a,n) \
+ ((u32x4) u8x16_word_shift_right((u8x16) (a), (n) * sizeof (u32)))
+#define i32x4_word_shift_right(a,n) \
+ ((i32x4) u8x16_word_shift_right((u8x16) (a), (n) * sizeof (u32)))
+
+#define u64x2_word_shift_left(a,n) \
+ ((u64x2) u8x16_word_shift_left((u8x16) (a), (n) * sizeof (u64)))
+#define i64x2_word_shift_left(a,n) \
+ ((u64x2) u8x16_word_shift_left((u8x16) (a), (n) * sizeof (u64)))
+#define u64x2_word_shift_right(a,n) \
+ ((u64x2) u8x16_word_shift_right((u8x16) (a), (n) * sizeof (u64)))
+#define i64x2_word_shift_right(a,n) \
+ ((i64x2) u8x16_word_shift_right((u8x16) (a), (n) * sizeof (u64)))
+
+/* SSE2 has no rotate instructions: use shifts to simulate them. */
+#define _(t,n,lr1,lr2) \
+ always_inline t##x##n \
+ t##x##n##_irotate_##lr1 (t##x##n w, int i) \
+ { \
+ ASSERT (i >= 0 && i <= BITS (t)); \
+ return (t##x##n##_ishift_##lr1 (w, i) \
+ | t##x##n##_ishift_##lr2 (w, BITS (t) - i)); \
+ } \
+ \
+ always_inline t##x##n \
+ t##x##n##_rotate_##lr1 (t##x##n w, t##x##n i) \
+ { \
+ t##x##n j = t##x##n##_splat (BITS (t)); \
+ return (t##x##n##_shift_##lr1 (w, i) \
+ | t##x##n##_shift_##lr2 (w, j - i)); \
+ }
+
+_(u16, 8, left, right);
+_(u16, 8, right, left);
+_(u32, 4, left, right);
+_(u32, 4, right, left);
+_(u64, 2, left, right);
+_(u64, 2, right, left);
+
+#undef _
+
+#ifndef __clang__
+#define _(t,n,lr1,lr2) \
+ always_inline t##x##n \
+ t##x##n##_word_rotate2_##lr1 (t##x##n w0, t##x##n w1, int i) \
+ { \
+ int m = sizeof (t##x##n) / sizeof (t); \
+ ASSERT (i >= 0 && i < m); \
+ return (t##x##n##_word_shift_##lr1 (w0, i) \
+ | t##x##n##_word_shift_##lr2 (w1, m - i)); \
+ } \
+ \
+ always_inline t##x##n \
+ t##x##n##_word_rotate_##lr1 (t##x##n w0, int i) \
+ { return t##x##n##_word_rotate2_##lr1 (w0, w0, i); }
+
+_(u8, 16, left, right);
+_(u8, 16, right, left);
+_(u16, 8, left, right);
+_(u16, 8, right, left);
+_(u32, 4, left, right);
+_(u32, 4, right, left);
+_(u64, 2, left, right);
+_(u64, 2, right, left);
+
+#undef _
+#endif
+
+/* Compare operations. */
+always_inline u8x16
+u8x16_is_equal (u8x16 x, u8x16 y)
+{
+ return (u8x16) _mm_cmpeq_epi8 ((__m128i) x, (__m128i) y);
+}
+
+always_inline i8x16
+i8x16_is_equal (i8x16 x, i8x16 y)
+{
+ return (i8x16) _mm_cmpeq_epi8 ((__m128i) x, (__m128i) y);
+}
+
+always_inline u16x8
+u16x8_is_equal (u16x8 x, u16x8 y)
+{
+ return (u16x8) _mm_cmpeq_epi16 ((__m128i) x, (__m128i) y);
+}
+
+always_inline i16x8
+i16x8_is_equal (i16x8 x, i16x8 y)
+{
+ return (i16x8) _mm_cmpeq_epi16 ((__m128i) x, (__m128i) y);
+}
+
+always_inline u32x4
+u32x4_is_equal (u32x4 x, u32x4 y)
+{
+ return (u32x4) _mm_cmpeq_epi32 ((__m128i) x, (__m128i) y);
+}
+
+always_inline i32x4
+i32x4_is_equal (i32x4 x, i32x4 y)
+{
+ return (i32x4) _mm_cmpeq_epi32 ((__m128i) x, (__m128i) y);
+}
+
+always_inline u8x16
+i8x16_is_greater (i8x16 x, i8x16 y)
+{
+ return (u8x16) _mm_cmpgt_epi8 ((__m128i) x, (__m128i) y);
+}
+
+always_inline u16x8
+i16x8_is_greater (i16x8 x, i16x8 y)
+{
+ return (u16x8) _mm_cmpgt_epi16 ((__m128i) x, (__m128i) y);
+}
+
+always_inline u32x4
+i32x4_is_greater (i32x4 x, i32x4 y)
+{
+ return (u32x4) _mm_cmpgt_epi32 ((__m128i) x, (__m128i) y);
+}
+
+always_inline u8x16
+u8x16_is_zero (u8x16 x)
+{
+ u8x16 zero = { 0 };
+ return u8x16_is_equal (x, zero);
+}
+
+always_inline u16x8
+u16x8_is_zero (u16x8 x)
+{
+ u16x8 zero = { 0 };
+ return u16x8_is_equal (x, zero);
+}
+
+always_inline u32x4
+u32x4_is_zero (u32x4 x)
+{
+ u32x4 zero = { 0 };
+ return u32x4_is_equal (x, zero);
+}
+
+#define u32x4_select(A,MASK) \
+({ \
+ u32x4 _x, _y; \
+ _x = (A); \
+ asm volatile ("pshufd %[mask], %[x], %[y]" \
+ : /* outputs */ [y] "=x" (_y) \
+ : /* inputs */ [x] "x" (_x), [mask] "i" (MASK)); \
+ _y; \
+})
+
+#define u32x4_splat_word(x,i) \
+ u32x4_select ((x), (((i) << (2*0)) \
+ | ((i) << (2*1)) \
+ | ((i) << (2*2)) \
+ | ((i) << (2*3))))
+
+/* Extract low order 32 bit word. */
+always_inline u32
+u32x4_get0 (u32x4 x)
+{
+ u32 result;
+ asm volatile ("movd %[x], %[result]": /* outputs */ [result] "=r" (result)
+ : /* inputs */ [x] "x" (x));
+ return result;
+}
+
+always_inline u32x4
+u32x4_set0 (u32 x)
+{
+ u32x4 result;
+ asm volatile ("movd %[x], %[result]": /* outputs */ [result] "=x" (result)
+ : /* inputs */ [x] "r" (x));
+ return result;
+}
+
+always_inline i32x4
+i32x4_set0 (i32 x)
+{
+ return (i32x4) u32x4_set0 ((u32) x);
+}
+
+always_inline i32
+i32x4_get0 (i32x4 x)
+{
+ return (i32) u32x4_get0 ((u32x4) x);
+}
+
+/* Converts all ones/zeros compare mask to bitmap. */
+always_inline u32
+u8x16_compare_byte_mask (u8x16 x)
+{
+ return _mm_movemask_epi8 ((__m128i) x);
+}
+
+extern u8 u32x4_compare_word_mask_table[256];
+
+always_inline u32
+u32x4_compare_word_mask (u32x4 x)
+{
+ u32 m = u8x16_compare_byte_mask ((u8x16) x);
+ return (u32x4_compare_word_mask_table[(m >> 0) & 0xff]
+ | (u32x4_compare_word_mask_table[(m >> 8) & 0xff] << 2));
+}
+
+always_inline u32
+u8x16_zero_byte_mask (u8x16 x)
+{
+ u8x16 zero = { 0 };
+ return u8x16_compare_byte_mask (u8x16_is_equal (x, zero));
+}
+
+always_inline u32
+u16x8_zero_byte_mask (u16x8 x)
+{
+ u16x8 zero = { 0 };
+ return u8x16_compare_byte_mask ((u8x16) u16x8_is_equal (x, zero));
+}
+
+always_inline u32
+u32x4_zero_byte_mask (u32x4 x)
+{
+ u32x4 zero = { 0 };
+ return u8x16_compare_byte_mask ((u8x16) u32x4_is_equal (x, zero));
+}
+
+always_inline u8x16
+u8x16_max (u8x16 x, u8x16 y)
+{
+ return (u8x16) _mm_max_epu8 ((__m128i) x, (__m128i) y);
+}
+
+always_inline u32
+u8x16_max_scalar (u8x16 x)
+{
+ x = u8x16_max (x, u8x16_word_shift_right (x, 8));
+ x = u8x16_max (x, u8x16_word_shift_right (x, 4));
+ x = u8x16_max (x, u8x16_word_shift_right (x, 2));
+ x = u8x16_max (x, u8x16_word_shift_right (x, 1));
+ return _mm_extract_epi16 ((__m128i) x, 0) & 0xff;
+}
+
+always_inline u8x16
+u8x16_min (u8x16 x, u8x16 y)
+{
+ return (u8x16) _mm_min_epu8 ((__m128i) x, (__m128i) y);
+}
+
+always_inline u8
+u8x16_min_scalar (u8x16 x)
+{
+ x = u8x16_min (x, u8x16_word_shift_right (x, 8));
+ x = u8x16_min (x, u8x16_word_shift_right (x, 4));
+ x = u8x16_min (x, u8x16_word_shift_right (x, 2));
+ x = u8x16_min (x, u8x16_word_shift_right (x, 1));
+ return _mm_extract_epi16 ((__m128i) x, 0) & 0xff;
+}
+
+always_inline i16x8
+i16x8_max (i16x8 x, i16x8 y)
+{
+ return (i16x8) _mm_max_epi16 ((__m128i) x, (__m128i) y);
+}
+
+always_inline i16
+i16x8_max_scalar (i16x8 x)
+{
+ x = i16x8_max (x, i16x8_word_shift_right (x, 4));
+ x = i16x8_max (x, i16x8_word_shift_right (x, 2));
+ x = i16x8_max (x, i16x8_word_shift_right (x, 1));
+ return _mm_extract_epi16 ((__m128i) x, 0);
+}
+
+always_inline i16x8
+i16x8_min (i16x8 x, i16x8 y)
+{
+ return (i16x8) _mm_min_epi16 ((__m128i) x, (__m128i) y);
+}
+
+always_inline i16
+i16x8_min_scalar (i16x8 x)
+{
+ x = i16x8_min (x, i16x8_word_shift_right (x, 4));
+ x = i16x8_min (x, i16x8_word_shift_right (x, 2));
+ x = i16x8_min (x, i16x8_word_shift_right (x, 1));
+ return _mm_extract_epi16 ((__m128i) x, 0);
+}
+
+#undef _signed_binop
+
+#endif /* included_vector_sse2_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/vhash.c b/src/vppinfra/vhash.c
new file mode 100644
index 00000000..f9dac0d9
--- /dev/null
+++ b/src/vppinfra/vhash.c
@@ -0,0 +1,772 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2010 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/vhash.h>
+
+#ifdef CLIB_HAVE_VEC128
+
+/* Overflow search buckets have an extra u32x4 for saving key_hash data.
+ This makes it easier to refill main search bucket from overflow vector. */
+typedef struct
+{
+ /* 4 results for this bucket. */
+ u32x4_union_t result;
+
+ /* 4 hash codes for this bucket. These are used to refill main
+ search buckets from overflow buckets when space becomes available. */
+ u32x4_union_t key_hash;
+
+ /* n_key_u32s u32x4s of key data follow. */
+ u32x4_union_t key[0];
+} vhash_overflow_search_bucket_t;
+
+always_inline void
+set_overflow_result (vhash_overflow_search_bucket_t * b,
+ u32 i, u32 result, u32 key_hash)
+{
+ b->result.as_u32[i] = result;
+ b->key_hash.as_u32[i] = key_hash;
+}
+
+always_inline void
+free_overflow_bucket (vhash_overflow_buckets_t * ob,
+ vhash_overflow_search_bucket_t * b, u32 i)
+{
+ u32 o = (u32x4_union_t *) b - ob->search_buckets;
+ ASSERT (o < vec_len (ob->search_buckets));
+ vec_add1 (ob->free_indices, 4 * o + i);
+}
+
+always_inline vhash_overflow_search_bucket_t *
+get_overflow_search_bucket (vhash_overflow_buckets_t * obs, u32 i,
+ u32 n_key_u32s)
+{
+ return ((vhash_overflow_search_bucket_t *)
+ vec_elt_at_index (obs->search_buckets, i));
+}
+
+always_inline vhash_overflow_search_bucket_t *
+next_overflow_bucket (vhash_overflow_search_bucket_t * b, u32 n_key_u32s)
+{
+ return (vhash_overflow_search_bucket_t *) & b->key[n_key_u32s];
+}
+
+#define foreach_vhash_overflow_bucket(b,ob,n_key_u32s) \
+ for ((b) = (vhash_overflow_search_bucket_t *) ob->search_buckets; \
+ (u32x4_union_t *) (b) < vec_end (ob->search_buckets); \
+ b = next_overflow_bucket (b, n_key_u32s))
+
+u32
+vhash_get_overflow (vhash_t * h, u32 key_hash, u32 vi, u32 n_key_u32s)
+{
+ vhash_overflow_buckets_t *ob = vhash_get_overflow_buckets (h, key_hash);
+ vhash_overflow_search_bucket_t *b;
+ u32 i, result = 0;
+
+ foreach_vhash_overflow_bucket (b, ob, n_key_u32s)
+ {
+ u32x4 r = b->result.as_u32x4;
+
+ for (i = 0; i < n_key_u32s; i++)
+ r &= vhash_bucket_compare (h, &b->key[0], i, vi);
+
+ result = vhash_merge_results (r);
+ if (result)
+ break;
+ }
+
+ return result;
+}
+
+u32
+vhash_set_overflow (vhash_t * h,
+ u32 key_hash, u32 vi, u32 new_result, u32 n_key_u32s)
+{
+ vhash_overflow_buckets_t *ob = vhash_get_overflow_buckets (h, key_hash);
+ vhash_overflow_search_bucket_t *b;
+ u32 i_set, i, old_result;
+
+ foreach_vhash_overflow_bucket (b, ob, n_key_u32s)
+ {
+ u32x4 r;
+
+ r = b->result.as_u32x4;
+ for (i = 0; i < n_key_u32s; i++)
+ r &= vhash_bucket_compare (h, &b->key[0], i, vi);
+
+ old_result = vhash_merge_results (r);
+ if (old_result)
+ {
+ i_set = vhash_non_empty_result_index (r);
+ set_overflow_result (b, i_set, new_result, key_hash);
+ return old_result;
+ }
+ }
+
+ /* Check free list. */
+ if (vec_len (ob->free_indices) == 0)
+ {
+ /* Out of free overflow buckets. Resize. */
+ u32 j, *p;
+ i = vec_len (ob->search_buckets);
+ vec_resize_aligned (ob->search_buckets,
+ sizeof (b[0]) / sizeof (u32x4) + n_key_u32s,
+ CLIB_CACHE_LINE_BYTES);
+ vec_add2 (ob->free_indices, p, 4);
+ for (j = 0; j < 4; j++)
+ p[j] = 4 * i + j;
+ }
+
+ i = vec_pop (ob->free_indices);
+
+ i_set = i & 3;
+ b = ((vhash_overflow_search_bucket_t *)
+ vec_elt_at_index (ob->search_buckets, i / 4));
+
+ /* Insert result. */
+ set_overflow_result (b, i_set, new_result, key_hash);
+
+ /* Insert key. */
+ for (i = 0; i < n_key_u32s; i++)
+ b->key[i].as_u32[i_set] = vhash_get_key_word (h, i, vi);
+
+ ob->n_overflow++;
+ h->n_elts++;
+
+ return /* old result was invalid */ 0;
+}
+
+u32
+vhash_unset_overflow (vhash_t * h, u32 key_hash, u32 vi, u32 n_key_u32s)
+{
+ vhash_overflow_buckets_t *ob = vhash_get_overflow_buckets (h, key_hash);
+ vhash_overflow_search_bucket_t *b;
+ u32 i_set, i, old_result;
+
+ foreach_vhash_overflow_bucket (b, ob, n_key_u32s)
+ {
+ u32x4 r;
+
+ r = b->result.as_u32x4;
+ for (i = 0; i < n_key_u32s; i++)
+ r &= vhash_bucket_compare (h, &b->key[0], i, vi);
+
+ old_result = vhash_merge_results (r);
+ if (old_result)
+ {
+ i_set = vhash_non_empty_result_index (r);
+
+ /* Invalidate result and invert key hash so that this will
+ never match since all keys in this overflow bucket have
+ matching key hashs. */
+ set_overflow_result (b, i_set, 0, ~key_hash);
+
+ free_overflow_bucket (ob, b, i_set);
+
+ ASSERT (ob->n_overflow > 0);
+ ob->n_overflow--;
+ h->n_elts--;
+ return old_result;
+ }
+ }
+
+ /* Could not find key. */
+ return 0;
+}
+
+void
+vhash_unset_refill_from_overflow (vhash_t * h,
+ vhash_search_bucket_t * sb,
+ u32 key_hash, u32 n_key_u32s)
+{
+ vhash_overflow_buckets_t *obs = vhash_get_overflow_buckets (h, key_hash);
+ vhash_overflow_search_bucket_t *ob;
+ u32 i, j, i_refill, bucket_mask = h->bucket_mask.as_u32[0];
+
+ /* Find overflow element with matching key hash. */
+ foreach_vhash_overflow_bucket (ob, obs, n_key_u32s)
+ {
+ for (i = 0; i < 4; i++)
+ {
+ if (!ob->result.as_u32[i])
+ continue;
+ if ((ob->key_hash.as_u32[i] & bucket_mask)
+ != (key_hash & bucket_mask))
+ continue;
+
+ i_refill = vhash_empty_result_index (sb->result.as_u32x4);
+ sb->result.as_u32[i_refill] = ob->result.as_u32[i];
+ for (j = 0; j < n_key_u32s; j++)
+ sb->key[j].as_u32[i_refill] = ob->key[j].as_u32[i];
+ set_overflow_result (ob, i, 0, ~key_hash);
+ free_overflow_bucket (obs, ob, i);
+ return;
+ }
+ }
+}
+
+void
+vhash_init (vhash_t * h, u32 log2_n_keys, u32 n_key_u32, u32 * hash_seeds)
+{
+ uword i, j, m;
+ vhash_search_bucket_t *b;
+
+ memset (h, 0, sizeof (h[0]));
+
+ /* Must have at least 4 keys (e.g. one search bucket). */
+ log2_n_keys = clib_max (log2_n_keys, 2);
+
+ h->log2_n_keys = log2_n_keys;
+ h->n_key_u32 = n_key_u32;
+ m = pow2_mask (h->log2_n_keys) & ~3;
+ for (i = 0; i < VECTOR_WORD_TYPE_LEN (u32); i++)
+ h->bucket_mask.as_u32[i] = m;
+
+ /* Allocate and zero search buckets. */
+ i = (sizeof (b[0]) / sizeof (u32x4) + n_key_u32) << (log2_n_keys - 2);
+ vec_validate_aligned (h->search_buckets, i - 1, CLIB_CACHE_LINE_BYTES);
+
+ for (i = 0; i < ARRAY_LEN (h->find_first_zero_table); i++)
+ h->find_first_zero_table[i] = min_log2 (first_set (~i));
+
+ for (i = 0; i < ARRAY_LEN (h->hash_seeds); i++)
+ for (j = 0; j < VECTOR_WORD_TYPE_LEN (u32); j++)
+ h->hash_seeds[i].as_u32[j] = hash_seeds[i];
+}
+
+static_always_inline u32
+vhash_main_key_gather (void *_vm, u32 vi, u32 wi, u32 n_key_u32)
+{
+ vhash_main_t *vm = _vm;
+ return vec_elt (vm->keys, vi * n_key_u32 + wi);
+}
+
+static_always_inline u32x4
+vhash_main_4key_gather (void *_vm, u32 vi, u32 wi, u32 n_key_u32s)
+{
+ vhash_main_t *vm = _vm;
+ u32x4_union_t x;
+
+ ASSERT (n_key_u32s == vm->n_key_u32);
+ ASSERT (wi < n_key_u32s);
+
+ x.as_u32[0] = vec_elt (vm->keys, (vi + 0) * n_key_u32s + wi);
+ x.as_u32[1] = vec_elt (vm->keys, (vi + 1) * n_key_u32s + wi);
+ x.as_u32[2] = vec_elt (vm->keys, (vi + 2) * n_key_u32s + wi);
+ x.as_u32[3] = vec_elt (vm->keys, (vi + 3) * n_key_u32s + wi);
+ return x.as_u32x4;
+}
+
+static_always_inline u32
+vhash_main_set_result (void *_vm, u32 vi, u32 old_result, u32 n_key_u32)
+{
+ vhash_main_t *vm = _vm;
+ u32 *p = vec_elt_at_index (vm->results, vi);
+ u32 new_result = p[0];
+ p[0] = old_result;
+ return new_result;
+}
+
+static_always_inline u32
+vhash_main_get_result (void *_vm, u32 vi, u32 old_result, u32 n_key_u32)
+{
+ vhash_main_t *vm = _vm;
+ vec_elt (vm->results, vi) = old_result;
+ return old_result;
+}
+
+static_always_inline u32x4
+vhash_main_get_4result (void *_vm, u32 vi, u32x4 old_result, u32 n_key_u32)
+{
+ vhash_main_t *vm = _vm;
+ u32x4 *p = (u32x4 *) vec_elt_at_index (vm->results, vi);
+ p[0] = old_result;
+ return old_result;
+}
+
+#define _(N_KEY_U32) \
+ static_always_inline u32 \
+ vhash_main_key_gather_##N_KEY_U32 (void * _vm, u32 vi, u32 i) \
+ { return vhash_main_key_gather (_vm, vi, i, N_KEY_U32); } \
+ \
+ static_always_inline u32x4 \
+ vhash_main_4key_gather_##N_KEY_U32 (void * _vm, u32 vi, u32 i) \
+ { return vhash_main_4key_gather (_vm, vi, i, N_KEY_U32); } \
+ \
+ clib_pipeline_stage_static \
+ (vhash_main_gather_keys_stage_##N_KEY_U32, \
+ vhash_main_t *, vm, i, \
+ { \
+ vhash_gather_4key_stage \
+ (vm->vhash, \
+ /* vector_index */ i, \
+ vhash_main_4key_gather_##N_KEY_U32, \
+ vm, \
+ N_KEY_U32); \
+ }) \
+ \
+ clib_pipeline_stage_no_inline \
+ (vhash_main_gather_keys_mod_stage_##N_KEY_U32, \
+ vhash_main_t *, vm, i, \
+ { \
+ vhash_gather_key_stage \
+ (vm->vhash, \
+ /* vector_index */ vm->n_vectors_div_4, \
+ /* n_vectors */ vm->n_vectors_mod_4, \
+ vhash_main_key_gather_##N_KEY_U32, \
+ vm, \
+ N_KEY_U32); \
+ }) \
+ \
+ clib_pipeline_stage \
+ (vhash_main_hash_finalize_stage_##N_KEY_U32, \
+ vhash_main_t *, vm, i, \
+ { \
+ vhash_finalize_stage (vm->vhash, i, N_KEY_U32); \
+ }) \
+ \
+ clib_pipeline_stage_no_inline \
+ (vhash_main_hash_finalize_mod_stage_##N_KEY_U32, \
+ vhash_main_t *, vm, i, \
+ { \
+ vhash_finalize_stage (vm->vhash, vm->n_vectors_div_4, N_KEY_U32); \
+ }) \
+ \
+ clib_pipeline_stage_static \
+ (vhash_main_get_stage_##N_KEY_U32, \
+ vhash_main_t *, vm, i, \
+ { \
+ vhash_get_4_stage (vm->vhash, \
+ /* vector_index */ i, \
+ vhash_main_get_4result, \
+ vm, N_KEY_U32); \
+ }) \
+ \
+ clib_pipeline_stage_no_inline \
+ (vhash_main_get_mod_stage_##N_KEY_U32, \
+ vhash_main_t *, vm, i, \
+ { \
+ vhash_get_stage (vm->vhash, \
+ /* vector_index */ vm->n_vectors_div_4, \
+ /* n_vectors */ vm->n_vectors_mod_4, \
+ vhash_main_get_result, \
+ vm, N_KEY_U32); \
+ }) \
+ \
+ clib_pipeline_stage_static \
+ (vhash_main_set_stage_##N_KEY_U32, \
+ vhash_main_t *, vm, i, \
+ { \
+ vhash_set_stage (vm->vhash, \
+ /* vector_index */ i, \
+ /* n_vectors */ VECTOR_WORD_TYPE_LEN (u32), \
+ vhash_main_set_result, \
+ vm, N_KEY_U32); \
+ }) \
+ \
+ clib_pipeline_stage_no_inline \
+ (vhash_main_set_mod_stage_##N_KEY_U32, \
+ vhash_main_t *, vm, i, \
+ { \
+ vhash_set_stage (vm->vhash, \
+ /* vector_index */ vm->n_vectors_div_4, \
+ /* n_vectors */ vm->n_vectors_mod_4, \
+ vhash_main_set_result, \
+ vm, N_KEY_U32); \
+ }) \
+ \
+ clib_pipeline_stage_static \
+ (vhash_main_unset_stage_##N_KEY_U32, \
+ vhash_main_t *, vm, i, \
+ { \
+ vhash_unset_stage (vm->vhash, \
+ /* vector_index */ i, \
+ /* n_vectors */ VECTOR_WORD_TYPE_LEN (u32), \
+ vhash_main_get_result, \
+ vm, N_KEY_U32); \
+ }) \
+ \
+ clib_pipeline_stage_no_inline \
+ (vhash_main_unset_mod_stage_##N_KEY_U32, \
+ vhash_main_t *, vm, i, \
+ { \
+ vhash_unset_stage (vm->vhash, \
+ /* vector_index */ vm->n_vectors_div_4, \
+ /* n_vectors */ vm->n_vectors_mod_4, \
+ vhash_main_get_result, \
+ vm, N_KEY_U32); \
+ })
+
+_(1);
+_(2);
+_(3);
+_(4);
+_(5);
+_(6);
+
+#undef _
+
+#define _(N_KEY_U32) \
+ clib_pipeline_stage \
+ (vhash_main_hash_mix_stage_##N_KEY_U32, \
+ vhash_main_t *, vm, i, \
+ { \
+ vhash_mix_stage (vm->vhash, i, N_KEY_U32); \
+ }) \
+ \
+ clib_pipeline_stage_no_inline \
+ (vhash_main_hash_mix_mod_stage_##N_KEY_U32, \
+ vhash_main_t *, vm, i, \
+ { \
+ vhash_mix_stage (vm->vhash, vm->n_vectors_div_4, N_KEY_U32); \
+ })
+
+_(4);
+_(5);
+_(6);
+
+#undef _
+
+typedef enum
+{
+ GET, SET, UNSET,
+} vhash_main_op_t;
+
+static void
+vhash_main_op (vhash_main_t * vm, vhash_main_op_t op)
+{
+ u32 n_keys = vec_len (vm->results);
+
+ vm->n_key_u32 = vm->vhash->n_key_u32;
+
+ vhash_validate_sizes (vm->vhash, vm->n_key_u32, n_keys);
+
+ vm->n_vectors_div_4 = n_keys / 4;
+ vm->n_vectors_mod_4 = n_keys % 4;
+
+ if (vm->n_vectors_div_4 > 0)
+ {
+ switch (vm->n_key_u32)
+ {
+ default:
+ ASSERT (0);
+ break;
+
+#define _(N_KEY_U32) \
+ case N_KEY_U32: \
+ if (op == GET) \
+ clib_pipeline_run_3_stage \
+ (vm->n_vectors_div_4, \
+ vm, \
+ vhash_main_gather_keys_stage_##N_KEY_U32, \
+ vhash_main_hash_finalize_stage_##N_KEY_U32, \
+ vhash_main_get_stage_##N_KEY_U32); \
+ else if (op == SET) \
+ clib_pipeline_run_3_stage \
+ (vm->n_vectors_div_4, \
+ vm, \
+ vhash_main_gather_keys_stage_##N_KEY_U32, \
+ vhash_main_hash_finalize_stage_##N_KEY_U32, \
+ vhash_main_set_stage_##N_KEY_U32); \
+ else \
+ clib_pipeline_run_3_stage \
+ (vm->n_vectors_div_4, \
+ vm, \
+ vhash_main_gather_keys_stage_##N_KEY_U32, \
+ vhash_main_hash_finalize_stage_##N_KEY_U32, \
+ vhash_main_unset_stage_##N_KEY_U32); \
+ break;
+
+ _(1);
+ _(2);
+ _(3);
+
+#undef _
+
+#define _(N_KEY_U32) \
+ case N_KEY_U32: \
+ if (op == GET) \
+ clib_pipeline_run_4_stage \
+ (vm->n_vectors_div_4, \
+ vm, \
+ vhash_main_gather_keys_stage_##N_KEY_U32, \
+ vhash_main_hash_mix_stage_##N_KEY_U32, \
+ vhash_main_hash_finalize_stage_##N_KEY_U32, \
+ vhash_main_get_stage_##N_KEY_U32); \
+ else if (op == SET) \
+ clib_pipeline_run_4_stage \
+ (vm->n_vectors_div_4, \
+ vm, \
+ vhash_main_gather_keys_stage_##N_KEY_U32, \
+ vhash_main_hash_mix_stage_##N_KEY_U32, \
+ vhash_main_hash_finalize_stage_##N_KEY_U32, \
+ vhash_main_set_stage_##N_KEY_U32); \
+ else \
+ clib_pipeline_run_4_stage \
+ (vm->n_vectors_div_4, \
+ vm, \
+ vhash_main_gather_keys_stage_##N_KEY_U32, \
+ vhash_main_hash_mix_stage_##N_KEY_U32, \
+ vhash_main_hash_finalize_stage_##N_KEY_U32, \
+ vhash_main_unset_stage_##N_KEY_U32); \
+ break;
+
+ _(4);
+ _(5);
+ _(6);
+
+#undef _
+ }
+ }
+
+
+ if (vm->n_vectors_mod_4 > 0)
+ {
+ switch (vm->n_key_u32)
+ {
+ default:
+ ASSERT (0);
+ break;
+
+#define _(N_KEY_U32) \
+ case N_KEY_U32: \
+ if (op == GET) \
+ clib_pipeline_run_3_stage \
+ (1, \
+ vm, \
+ vhash_main_gather_keys_mod_stage_##N_KEY_U32, \
+ vhash_main_hash_finalize_mod_stage_##N_KEY_U32, \
+ vhash_main_get_mod_stage_##N_KEY_U32); \
+ else if (op == SET) \
+ clib_pipeline_run_3_stage \
+ (1, \
+ vm, \
+ vhash_main_gather_keys_mod_stage_##N_KEY_U32, \
+ vhash_main_hash_finalize_mod_stage_##N_KEY_U32, \
+ vhash_main_set_mod_stage_##N_KEY_U32); \
+ else \
+ clib_pipeline_run_3_stage \
+ (1, \
+ vm, \
+ vhash_main_gather_keys_mod_stage_##N_KEY_U32, \
+ vhash_main_hash_finalize_mod_stage_##N_KEY_U32, \
+ vhash_main_unset_mod_stage_##N_KEY_U32); \
+ break;
+
+ _(1);
+ _(2);
+ _(3);
+
+#undef _
+
+#define _(N_KEY_U32) \
+ case N_KEY_U32: \
+ if (op == GET) \
+ clib_pipeline_run_4_stage \
+ (1, \
+ vm, \
+ vhash_main_gather_keys_mod_stage_##N_KEY_U32, \
+ vhash_main_hash_mix_mod_stage_##N_KEY_U32, \
+ vhash_main_hash_finalize_mod_stage_##N_KEY_U32, \
+ vhash_main_get_mod_stage_##N_KEY_U32); \
+ else if (op == SET) \
+ clib_pipeline_run_4_stage \
+ (1, \
+ vm, \
+ vhash_main_gather_keys_mod_stage_##N_KEY_U32, \
+ vhash_main_hash_mix_mod_stage_##N_KEY_U32, \
+ vhash_main_hash_finalize_mod_stage_##N_KEY_U32, \
+ vhash_main_set_mod_stage_##N_KEY_U32); \
+ else \
+ clib_pipeline_run_4_stage \
+ (1, \
+ vm, \
+ vhash_main_gather_keys_mod_stage_##N_KEY_U32, \
+ vhash_main_hash_mix_mod_stage_##N_KEY_U32, \
+ vhash_main_hash_finalize_mod_stage_##N_KEY_U32, \
+ vhash_main_unset_mod_stage_##N_KEY_U32); \
+ break;
+
+ _(4);
+ _(5);
+ _(6);
+
+#undef _
+ }
+ }
+}
+
+void
+vhash_main_get (vhash_main_t * vm)
+{
+ vhash_main_op (vm, GET);
+}
+
+void
+vhash_main_set (vhash_main_t * vm)
+{
+ vhash_main_op (vm, SET);
+}
+
+void
+vhash_main_unset (vhash_main_t * vm)
+{
+ vhash_main_op (vm, UNSET);
+}
+
+u32
+vhash_resize_incremental (vhash_resize_t * vr, u32 vector_index,
+ u32 n_keys_this_call)
+{
+ vhash_t *old = vr->old;
+ vhash_main_t *vm = &vr->new;
+ vhash_t *new = vm->vhash;
+ uword i, j, n_key_u32;
+
+ n_key_u32 = old->n_key_u32;
+
+ if (vector_index == 0)
+ {
+ u32 hash_seeds[3];
+ hash_seeds[0] = old->hash_seeds[0].as_u32[0];
+ hash_seeds[1] = old->hash_seeds[1].as_u32[0];
+ hash_seeds[2] = old->hash_seeds[2].as_u32[0];
+ vhash_init (new, old->log2_n_keys + 1, n_key_u32, hash_seeds);
+ }
+
+ vec_reset_length (vm->keys);
+ vec_reset_length (vm->results);
+
+ if (0 == (vector_index >> old->log2_n_keys))
+ {
+ for (i = vector_index; 0 == (i >> (old->log2_n_keys - 2)); i++)
+ {
+ vhash_search_bucket_t *b =
+ vhash_get_search_bucket_with_index (old, 4 * i, n_key_u32);
+ u32 r, *k;
+
+#define _(I) \
+ if ((r = b->result.as_u32[I]) != 0) \
+ { \
+ vec_add1 (vm->results, r - 1); \
+ vec_add2 (vm->keys, k, n_key_u32); \
+ for (j = 0; j < n_key_u32; j++) \
+ k[j] = b->key[j].as_u32[I]; \
+ }
+
+ _(0);
+ _(1);
+ _(2);
+ _(3);
+
+#undef _
+
+ if (vec_len (vm->results) >= n_keys_this_call)
+ {
+ vhash_main_op (vm, SET);
+ return i;
+ }
+ }
+ }
+
+ /* Add overflow buckets. */
+ {
+ vhash_overflow_buckets_t *ob;
+ vhash_overflow_search_bucket_t *b;
+
+ for (ob = old->overflow_buckets;
+ ob < old->overflow_buckets + ARRAY_LEN (old->overflow_buckets); ob++)
+ {
+ foreach_vhash_overflow_bucket (b, ob, old->n_key_u32)
+ {
+ u32 r, *k;
+
+#define _(I) \
+ if ((r = b->result.as_u32[I]) != 0) \
+ { \
+ vec_add1 (vm->results, r - 1); \
+ vec_add2 (vm->keys, k, n_key_u32); \
+ for (j = 0; j < n_key_u32; j++) \
+ k[j] = b->key[j].as_u32[I]; \
+ }
+
+ _(0);
+ _(1);
+ _(2);
+ _(3);
+
+#undef _
+ }
+ }
+ }
+
+ vhash_main_op (vm, SET);
+
+ /* Let caller know we are done. */
+ return ~0;
+}
+
+void
+vhash_resize (vhash_t * old, u32 log2_n_keys)
+{
+ static vhash_resize_t vr;
+ vhash_t new;
+ u32 i = 0;
+
+ vr.old = old;
+ vr.new.vhash = &new;
+
+ while (1)
+ {
+ i = vhash_resize_incremental (&vr, i, 1024);
+ if (i == ~0)
+ break;
+ }
+
+ vhash_free (old);
+ *old = new;
+}
+
+#endif /* CLIB_HAVE_VEC128 */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/vhash.h b/src/vppinfra/vhash.h
new file mode 100644
index 00000000..5ab42292
--- /dev/null
+++ b/src/vppinfra/vhash.h
@@ -0,0 +1,850 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2010 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_clib_vhash_h
+#define included_clib_vhash_h
+
+#include <vppinfra/vector.h>
+
+#ifdef CLIB_HAVE_VEC128
+
+#include <vppinfra/cache.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/pipeline.h>
+
+/* Gathers 32 bits worth of key with given index. */
+typedef u32 (vhash_key_function_t) (void *state, u32 vector_index,
+ u32 key_word_index);
+typedef u32x4 (vhash_4key_function_t) (void *state, u32 vector_index,
+ u32 key_word_index);
+/* Sets/gets result of hash lookup. */
+typedef u32 (vhash_result_function_t) (void *state, u32 vector_index,
+ u32 result, u32 n_key_u32);
+typedef u32x4 (vhash_4result_function_t) (void *state, u32 vector_index,
+ u32x4 results, u32 n_key_u32);
+
+typedef struct
+{
+ u32x4_union_t hashed_key[3];
+} vhash_hashed_key_t;
+
+/* Search buckets are really this structure. */
+typedef struct
+{
+ /* 4 results for this bucket.
+ Zero is used to mark empty results. This means user can't use the result ~0
+ since user results differ from internal results stored in buckets by 1.
+ e.g. internal result = user result + 1. */
+ u32x4_union_t result;
+
+ /* n_key_u32s u32x4s of key data follow. */
+ u32x4_union_t key[0];
+} vhash_search_bucket_t;
+
+typedef struct
+{
+ u32x4_union_t *search_buckets;
+
+ /* Vector of bucket free indices. */
+ u32 *free_indices;
+
+ /* Number of entries in this overflow bucket. */
+ u32 n_overflow;
+} vhash_overflow_buckets_t;
+
+typedef struct
+{
+ /* 2^log2_n_keys keys grouped in groups of 4.
+ Each bucket contains 4 results plus 4 keys for a
+ total of (1 + n_key_u32) u32x4s. */
+ u32x4_union_t *search_buckets;
+
+ /* When a bucket of 4 results/keys are full we search
+ the overflow. hash_key is used to select which overflow
+ bucket. */
+ vhash_overflow_buckets_t overflow_buckets[16];
+
+ /* Total count of occupied elements in hash table. */
+ u32 n_elts;
+
+ u32 log2_n_keys;
+
+ /* Number of 32 bit words in a hash key. */
+ u32 n_key_u32;
+
+ u32x4_union_t bucket_mask;
+
+ /* table[i] = min_log2 (first_set (~i)). */
+ u8 find_first_zero_table[16];
+
+ /* Hash seeds for Jenkins hash. */
+ u32x4_union_t hash_seeds[3];
+
+ /* Key work space is a vector of length
+ n_key_u32s << log2_n_key_word_len_u32x. */
+ u32 log2_n_key_word_len_u32x;
+
+ /* Work space to store keys between pipeline stages. */
+ u32x4_union_t *key_work_space;
+
+ /* Hash work space to store Jenkins hash values between
+ pipeline stages. */
+ vhash_hashed_key_t *hash_work_space;
+} vhash_t;
+
+always_inline vhash_overflow_buckets_t *
+vhash_get_overflow_buckets (vhash_t * h, u32 key)
+{
+ u32 i = (((key & h->bucket_mask.as_u32[0]) >> 2) & 0xf);
+ ASSERT (i < ARRAY_LEN (h->overflow_buckets));
+ return h->overflow_buckets + i;
+}
+
+always_inline uword
+vhash_is_non_empty_overflow_bucket (vhash_t * h, u32 key)
+{
+ u32 i = (((key & h->bucket_mask.as_u32[0]) >> 2) & 0xf);
+ ASSERT (i < ARRAY_LEN (h->overflow_buckets));
+ return h->overflow_buckets[i].n_overflow > 0;
+}
+
+always_inline void
+vhash_free_overflow_buckets (vhash_overflow_buckets_t * obs)
+{
+ vec_free (obs->search_buckets);
+ vec_free (obs->free_indices);
+}
+
+always_inline void
+vhash_free (vhash_t * h)
+{
+ uword i;
+ for (i = 0; i < ARRAY_LEN (h->overflow_buckets); i++)
+ vhash_free_overflow_buckets (&h->overflow_buckets[i]);
+ vec_free (h->search_buckets);
+ vec_free (h->key_work_space);
+ vec_free (h->hash_work_space);
+}
+
+always_inline void
+vhash_set_key_word (vhash_t * h, u32 wi, u32 vi, u32 value)
+{
+ u32 i0 = (wi << h->log2_n_key_word_len_u32x) + (vi / 4);
+ u32 i1 = vi % 4;
+ vec_elt (h->key_work_space, i0).as_u32[i1] = value;
+}
+
+always_inline void
+vhash_set_key_word_u32x (vhash_t * h, u32 wi, u32 vi, u32x value)
+{
+ u32 i0 = (wi << h->log2_n_key_word_len_u32x) + (vi / 4);
+ vec_elt (h->key_work_space, i0).as_u32x4 = value;
+}
+
+always_inline u32
+vhash_get_key_word (vhash_t * h, u32 wi, u32 vi)
+{
+ u32 i0 = (wi << h->log2_n_key_word_len_u32x) + (vi / 4);
+ u32 i1 = vi % 4;
+ return vec_elt (h->key_work_space, i0).as_u32[i1];
+}
+
+always_inline u32x
+vhash_get_key_word_u32x (vhash_t * h, u32 wi, u32 vi)
+{
+ u32 i0 = (wi << h->log2_n_key_word_len_u32x) + vi;
+ return vec_elt (h->key_work_space, i0).as_u32x4;
+}
+
+always_inline void
+vhash_validate_sizes (vhash_t * h, u32 n_key_u32, u32 n_vectors)
+{
+ u32 n, l;
+
+ n = max_pow2 (n_vectors) / 4;
+ n = clib_max (n, 8);
+
+ h->log2_n_key_word_len_u32x = l = min_log2 (n);
+ vec_validate_aligned (h->key_work_space, (n_key_u32 << l) - 1,
+ CLIB_CACHE_LINE_BYTES);
+ vec_validate_aligned (h->hash_work_space, n - 1, CLIB_CACHE_LINE_BYTES);
+}
+
+always_inline void
+vhash_gather_key_stage (vhash_t * h,
+ u32 vector_index,
+ u32 n_vectors,
+ vhash_key_function_t key_function,
+ void *state, u32 n_key_u32s)
+{
+ u32 i, j, vi;
+
+ /* Gather keys for 4 packets (for 128 bit vector length e.g. u32x4). */
+ for (i = 0; i < n_vectors; i++)
+ {
+ vi = vector_index * 4 + i;
+ for (j = 0; j < n_key_u32s; j++)
+ vhash_set_key_word (h, j, vi, key_function (state, vi, j));
+ }
+}
+
+always_inline void
+vhash_gather_4key_stage (vhash_t * h,
+ u32 vector_index,
+ vhash_4key_function_t key_function,
+ void *state, u32 n_key_u32s)
+{
+ u32 j, vi;
+ vi = vector_index * 4;
+ for (j = 0; j < n_key_u32s; j++)
+ vhash_set_key_word_u32x (h, j, vi, key_function (state, vi, j));
+}
+
+always_inline void
+vhash_mix_stage (vhash_t * h, u32 vector_index, u32 n_key_u32s)
+{
+ i32 i, n_left;
+ u32x a, b, c;
+
+ /* Only need to do this for keys longer than 12 bytes. */
+ ASSERT (n_key_u32s > 3);
+
+ a = h->hash_seeds[0].as_u32x4;
+ b = h->hash_seeds[1].as_u32x4;
+ c = h->hash_seeds[2].as_u32x4;
+ for (i = 0, n_left = n_key_u32s - 3; n_left > 0; n_left -= 3, i += 3)
+ {
+ a +=
+ vhash_get_key_word_u32x (h, n_key_u32s - 1 - (i + 0), vector_index);
+ if (n_left > 1)
+ b +=
+ vhash_get_key_word_u32x (h, n_key_u32s - 1 - (i + 1), vector_index);
+ if (n_left > 2)
+ c +=
+ vhash_get_key_word_u32x (h, n_key_u32s - 1 - (i + 2), vector_index);
+
+ hash_v3_mix_u32x (a, b, c);
+ }
+
+ /* Save away a, b, c for later finalize. */
+ {
+ vhash_hashed_key_t *hk =
+ vec_elt_at_index (h->hash_work_space, vector_index);
+ hk->hashed_key[0].as_u32x4 = a;
+ hk->hashed_key[1].as_u32x4 = b;
+ hk->hashed_key[2].as_u32x4 = c;
+ }
+}
+
+always_inline vhash_search_bucket_t *
+vhash_get_search_bucket_with_index (vhash_t * h, u32 i, u32 n_key_u32s)
+{
+ return ((vhash_search_bucket_t *)
+ vec_elt_at_index (h->search_buckets,
+ (i / 4) *
+ ((sizeof (vhash_search_bucket_t) /
+ sizeof (u32x4)) + n_key_u32s)));
+}
+
+always_inline vhash_search_bucket_t *
+vhash_get_search_bucket (vhash_t * h, u32 key_hash, u32 n_key_u32s)
+{
+ u32 i = key_hash & h->bucket_mask.as_u32[0];
+ return vhash_get_search_bucket_with_index (h, i, n_key_u32s);
+}
+
+always_inline u32x4
+vhash_get_4_search_bucket_byte_offsets (vhash_t * h, u32x4 key_hash,
+ u32 n_key_u32s)
+{
+ vhash_search_bucket_t *b;
+ u32 n_bytes_per_bucket = sizeof (b[0]) + n_key_u32s * sizeof (b->key[0]);
+ u32x4 r = key_hash & h->bucket_mask.as_u32x4;
+
+ /* Multiply with shifts and adds to get bucket byte offset. */
+#define _(x) u32x4_ishift_left (r, (x) - 2)
+ if (n_bytes_per_bucket == (1 << 5))
+ r = _(5);
+ else if (n_bytes_per_bucket == ((1 << 5) + (1 << 4)))
+ r = _(5) + _(4);
+ else if (n_bytes_per_bucket == (1 << 6))
+ r = _(6);
+ else if (n_bytes_per_bucket == ((1 << 6) + (1 << 4)))
+ r = _(6) + _(4);
+ else if (n_bytes_per_bucket == ((1 << 6) + (1 << 5)))
+ r = _(6) + _(5);
+ else if (n_bytes_per_bucket == ((1 << 6) + (1 << 5) + (1 << 4)))
+ r = _(6) + _(5) + _(4);
+ else
+ ASSERT (0);
+#undef _
+ return r;
+}
+
+always_inline void
+vhash_finalize_stage (vhash_t * h, u32 vector_index, u32 n_key_u32s)
+{
+ i32 n_left;
+ u32x a, b, c;
+ vhash_hashed_key_t *hk =
+ vec_elt_at_index (h->hash_work_space, vector_index);
+
+ if (n_key_u32s <= 3)
+ {
+ a = h->hash_seeds[0].as_u32x4;
+ b = h->hash_seeds[1].as_u32x4;
+ c = h->hash_seeds[2].as_u32x4;
+ n_left = n_key_u32s;
+ }
+ else
+ {
+ a = hk->hashed_key[0].as_u32x4;
+ b = hk->hashed_key[1].as_u32x4;
+ c = hk->hashed_key[2].as_u32x4;
+ n_left = 3;
+ }
+
+ if (n_left > 0)
+ a += vhash_get_key_word_u32x (h, 0, vector_index);
+ if (n_left > 1)
+ b += vhash_get_key_word_u32x (h, 1, vector_index);
+ if (n_left > 2)
+ c += vhash_get_key_word_u32x (h, 2, vector_index);
+
+ hash_v3_finalize_u32x (a, b, c);
+
+ /* Only save away last 32 bits of hash code. */
+ hk->hashed_key[2].as_u32x4 = c;
+
+ /* Prefetch buckets. This costs a bit for small tables but saves
+ big for large ones. */
+ {
+ vhash_search_bucket_t *b0, *b1, *b2, *b3;
+ u32x4_union_t kh;
+
+ kh.as_u32x4 = vhash_get_4_search_bucket_byte_offsets (h, c, n_key_u32s);
+ hk->hashed_key[1].as_u32x4 = kh.as_u32x4;
+
+ b0 = (void *) h->search_buckets + kh.as_u32[0];
+ b1 = (void *) h->search_buckets + kh.as_u32[1];
+ b2 = (void *) h->search_buckets + kh.as_u32[2];
+ b3 = (void *) h->search_buckets + kh.as_u32[3];
+
+ CLIB_PREFETCH (b0, sizeof (b0[0]) + n_key_u32s * sizeof (b0->key[0]),
+ READ);
+ CLIB_PREFETCH (b1, sizeof (b1[0]) + n_key_u32s * sizeof (b1->key[0]),
+ READ);
+ CLIB_PREFETCH (b2, sizeof (b2[0]) + n_key_u32s * sizeof (b2->key[0]),
+ READ);
+ CLIB_PREFETCH (b3, sizeof (b3[0]) + n_key_u32s * sizeof (b3->key[0]),
+ READ);
+ }
+}
+
+always_inline u32
+vhash_merge_results (u32x4 r)
+{
+ r = r | u32x4_word_shift_right (r, 2);
+ r = r | u32x4_word_shift_right (r, 1);
+ return u32x4_get0 (r);
+}
+
+/* Bucket is full if none of its 4 results are 0. */
+always_inline u32
+vhash_search_bucket_is_full (u32x4 r)
+{
+ return u32x4_zero_byte_mask (r) == 0;
+}
+
+always_inline u32
+vhash_non_empty_result_index (u32x4 x)
+{
+ u32 empty_mask = u32x4_zero_byte_mask (x);
+ ASSERT (empty_mask != 0xffff);
+ return min_log2 (0xffff & ~empty_mask) / 4;
+}
+
+always_inline u32
+vhash_empty_result_index (u32x4 x)
+{
+ u32 empty_mask = u32x4_zero_byte_mask (x);
+ ASSERT (empty_mask != 0);
+ return min_log2 (0xffff & empty_mask) / 4;
+}
+
+always_inline u32x4
+vhash_bucket_compare (vhash_t * h,
+ u32x4_union_t * bucket, u32 key_word_index, u32 vi)
+{
+ u32 k = vhash_get_key_word (h, key_word_index, vi);
+ u32x4 x = { k, k, k, k };
+ return u32x4_is_equal (bucket[key_word_index].as_u32x4, x);
+}
+
+#define vhash_bucket_compare_4(h,wi,vi,b0,b1,b2,b3,cmp0,cmp1,cmp2,cmp3) \
+do { \
+ u32x4 _k4 = vhash_get_key_word_u32x ((h), (wi), (vi)); \
+ u32x4 _k0 = u32x4_splat_word (_k4, 0); \
+ u32x4 _k1 = u32x4_splat_word (_k4, 1); \
+ u32x4 _k2 = u32x4_splat_word (_k4, 2); \
+ u32x4 _k3 = u32x4_splat_word (_k4, 3); \
+ \
+ cmp0 = u32x4_is_equal (b0->key[wi].as_u32x4, _k0); \
+ cmp1 = u32x4_is_equal (b1->key[wi].as_u32x4, _k1); \
+ cmp2 = u32x4_is_equal (b2->key[wi].as_u32x4, _k2); \
+ cmp3 = u32x4_is_equal (b3->key[wi].as_u32x4, _k3); \
+} while (0)
+
+u32 vhash_get_overflow (vhash_t * h, u32 key_hash, u32 vi, u32 n_key_u32s);
+
+always_inline void
+vhash_get_stage (vhash_t * h,
+ u32 vector_index,
+ u32 n_vectors,
+ vhash_result_function_t result_function,
+ void *state, u32 n_key_u32s)
+{
+ u32 i, j;
+ vhash_hashed_key_t *hk =
+ vec_elt_at_index (h->hash_work_space, vector_index);
+ vhash_search_bucket_t *b;
+
+ for (i = 0; i < n_vectors; i++)
+ {
+ u32 vi = vector_index * 4 + i;
+ u32 key_hash = hk->hashed_key[2].as_u32[i];
+ u32 result;
+ u32x4 r, r0;
+
+ b = vhash_get_search_bucket (h, key_hash, n_key_u32s);
+
+ r = r0 = b->result.as_u32x4;
+ for (j = 0; j < n_key_u32s; j++)
+ r &= vhash_bucket_compare (h, &b->key[0], j, vi);
+
+ /* At this point only one of 4 results should be non-zero.
+ So we can or all 4 together and get the valid result (if there is one). */
+ result = vhash_merge_results (r);
+
+ if (!result && vhash_search_bucket_is_full (r0))
+ result = vhash_get_overflow (h, key_hash, vi, n_key_u32s);
+
+ result_function (state, vi, result - 1, n_key_u32s);
+ }
+}
+
+always_inline void
+vhash_get_4_stage (vhash_t * h,
+ u32 vector_index,
+ vhash_4result_function_t result_function,
+ void *state, u32 n_key_u32s)
+{
+ u32 i, vi;
+ vhash_hashed_key_t *hk =
+ vec_elt_at_index (h->hash_work_space, vector_index);
+ vhash_search_bucket_t *b0, *b1, *b2, *b3;
+ u32x4 r0, r1, r2, r3, r0_before, r1_before, r2_before, r3_before;
+ u32x4_union_t kh;
+
+ kh.as_u32x4 = hk->hashed_key[1].as_u32x4;
+
+ b0 = (void *) h->search_buckets + kh.as_u32[0];
+ b1 = (void *) h->search_buckets + kh.as_u32[1];
+ b2 = (void *) h->search_buckets + kh.as_u32[2];
+ b3 = (void *) h->search_buckets + kh.as_u32[3];
+
+ r0 = r0_before = b0->result.as_u32x4;
+ r1 = r1_before = b1->result.as_u32x4;
+ r2 = r2_before = b2->result.as_u32x4;
+ r3 = r3_before = b3->result.as_u32x4;
+
+ vi = vector_index * 4;
+
+ for (i = 0; i < n_key_u32s; i++)
+ {
+ u32x4 c0, c1, c2, c3;
+ vhash_bucket_compare_4 (h, i, vector_index,
+ b0, b1, b2, b3, c0, c1, c2, c3);
+ r0 &= c0;
+ r1 &= c1;
+ r2 &= c2;
+ r3 &= c3;
+ }
+
+ u32x4_transpose (r0, r1, r2, r3);
+
+ /* Gather together 4 results. */
+ {
+ u32x4_union_t r;
+ u32x4 ones = { 1, 1, 1, 1 };
+ u32 not_found_mask;
+
+ r.as_u32x4 = r0 | r1 | r2 | r3;
+ not_found_mask = u32x4_zero_byte_mask (r.as_u32x4);
+ not_found_mask &= ((vhash_search_bucket_is_full (r0_before) << (4 * 0))
+ | (vhash_search_bucket_is_full (r1_before) << (4 * 1))
+ | (vhash_search_bucket_is_full (r2_before) << (4 * 2))
+ | (vhash_search_bucket_is_full (r3_before) <<
+ (4 * 3)));
+ if (not_found_mask)
+ {
+ u32x4_union_t key_hash;
+
+ key_hash.as_u32x4 =
+ hk->hashed_key[2].as_u32x4 & h->bucket_mask.as_u32x4;
+
+ /* Slow path: one of the buckets may have been full and we need to search overflow. */
+ if (not_found_mask & (1 << (4 * 0)))
+ r.as_u32[0] = vhash_get_overflow (h, key_hash.as_u32[0],
+ vi + 0, n_key_u32s);
+ if (not_found_mask & (1 << (4 * 1)))
+ r.as_u32[1] = vhash_get_overflow (h, key_hash.as_u32[1],
+ vi + 1, n_key_u32s);
+ if (not_found_mask & (1 << (4 * 2)))
+ r.as_u32[2] = vhash_get_overflow (h, key_hash.as_u32[2],
+ vi + 2, n_key_u32s);
+ if (not_found_mask & (1 << (4 * 3)))
+ r.as_u32[3] = vhash_get_overflow (h, key_hash.as_u32[3],
+ vi + 3, n_key_u32s);
+ }
+
+ result_function (state, vi, r.as_u32x4 - ones, n_key_u32s);
+ }
+}
+
+u32
+vhash_set_overflow (vhash_t * h,
+ u32 key_hash, u32 vi, u32 new_result, u32 n_key_u32s);
+
+always_inline void
+vhash_set_stage (vhash_t * h,
+ u32 vector_index,
+ u32 n_vectors,
+ vhash_result_function_t result_function,
+ void *state, u32 n_key_u32s)
+{
+ u32 i, j, n_new_elts = 0;
+ vhash_hashed_key_t *hk =
+ vec_elt_at_index (h->hash_work_space, vector_index);
+ vhash_search_bucket_t *b;
+
+ for (i = 0; i < n_vectors; i++)
+ {
+ u32 vi = vector_index * 4 + i;
+ u32 key_hash = hk->hashed_key[2].as_u32[i];
+ u32 old_result, new_result;
+ u32 i_set;
+ u32x4 r, r0, cmp;
+
+ b = vhash_get_search_bucket (h, key_hash, n_key_u32s);
+
+ cmp = vhash_bucket_compare (h, &b->key[0], 0, vi);
+ for (j = 1; j < n_key_u32s; j++)
+ cmp &= vhash_bucket_compare (h, &b->key[0], j, vi);
+
+ r0 = b->result.as_u32x4;
+ r = r0 & cmp;
+
+ /* At this point only one of 4 results should be non-zero.
+ So we can or all 4 together and get the valid result (if there is one). */
+ old_result = vhash_merge_results (r);
+
+ if (!old_result && vhash_search_bucket_is_full (r0))
+ old_result = vhash_get_overflow (h, key_hash, vi, n_key_u32s);
+
+ /* Get new result; possibly do something with old result. */
+ new_result = result_function (state, vi, old_result - 1, n_key_u32s);
+
+ /* User cannot use ~0 as a hash result since a result of 0 is
+ used to mark unused bucket entries. */
+ ASSERT (new_result + 1 != 0);
+ new_result += 1;
+
+ /* Set over-writes existing result. */
+ if (old_result)
+ {
+ i_set = vhash_non_empty_result_index (r);
+ b->result.as_u32[i_set] = new_result;
+ }
+ else
+ {
+ /* Set allocates new result. */
+ u32 valid_mask;
+
+ valid_mask = (((b->result.as_u32[0] != 0) << 0)
+ | ((b->result.as_u32[1] != 0) << 1)
+ | ((b->result.as_u32[2] != 0) << 2)
+ | ((b->result.as_u32[3] != 0) << 3));
+
+ /* Rotate 4 bit valid mask so that key_hash corresponds to bit 0. */
+ i_set = key_hash & 3;
+ valid_mask =
+ ((valid_mask >> i_set) | (valid_mask << (4 - i_set))) & 0xf;
+
+ /* Insert into first empty position in bucket after key_hash. */
+ i_set = (i_set + h->find_first_zero_table[valid_mask]) & 3;
+
+ if (valid_mask != 0xf)
+ {
+ n_new_elts += 1;
+
+ b->result.as_u32[i_set] = new_result;
+
+ /* Insert new key into search bucket. */
+ for (j = 0; j < n_key_u32s; j++)
+ b->key[j].as_u32[i_set] = vhash_get_key_word (h, j, vi);
+ }
+ else
+ vhash_set_overflow (h, key_hash, vi, new_result, n_key_u32s);
+ }
+ }
+
+ h->n_elts += n_new_elts;
+}
+
+u32 vhash_unset_overflow (vhash_t * h, u32 key_hash, u32 vi, u32 n_key_u32s);
+
+void
+vhash_unset_refill_from_overflow (vhash_t * h,
+ vhash_search_bucket_t * b,
+ u32 key_hash, u32 n_key_u32s);
+
+/* Note: Eliot tried doing 4 unsets at once and could not get a speed up
+ and abandoned vhash_unset_4_stage. */
+always_inline void
+vhash_unset_stage (vhash_t * h,
+ u32 vector_index,
+ u32 n_vectors,
+ vhash_result_function_t result_function,
+ void *state, u32 n_key_u32s)
+{
+ u32 i, j, n_elts_unset = 0;
+ vhash_hashed_key_t *hk =
+ vec_elt_at_index (h->hash_work_space, vector_index);
+ vhash_search_bucket_t *b;
+
+ for (i = 0; i < n_vectors; i++)
+ {
+ u32 vi = vector_index * 4 + i;
+ u32 key_hash = hk->hashed_key[2].as_u32[i];
+ u32 old_result;
+ u32x4 cmp, r0;
+
+ b = vhash_get_search_bucket (h, key_hash, n_key_u32s);
+
+ cmp = vhash_bucket_compare (h, &b->key[0], 0, vi);
+ for (j = 1; j < n_key_u32s; j++)
+ cmp &= vhash_bucket_compare (h, &b->key[0], j, vi);
+
+ r0 = b->result.as_u32x4;
+
+ /* At this point cmp is all ones where key matches and zero otherwise.
+ So, this will invalidate results for matching key and do nothing otherwise. */
+ b->result.as_u32x4 = r0 & ~cmp;
+
+ old_result = vhash_merge_results (r0 & cmp);
+
+ n_elts_unset += old_result != 0;
+
+ if (vhash_search_bucket_is_full (r0))
+ {
+ if (old_result)
+ vhash_unset_refill_from_overflow (h, b, key_hash, n_key_u32s);
+ else
+ old_result = vhash_unset_overflow (h, key_hash, vi, n_key_u32s);
+ }
+
+ result_function (state, vi, old_result - 1, n_key_u32s);
+ }
+ ASSERT (h->n_elts >= n_elts_unset);
+ h->n_elts -= n_elts_unset;
+}
+
+void vhash_init (vhash_t * h, u32 log2_n_keys, u32 n_key_u32,
+ u32 * hash_seeds);
+
+void vhash_resize (vhash_t * old, u32 log2_n_keys);
+
+typedef struct
+{
+ vhash_t *vhash;
+
+ union
+ {
+ struct
+ {
+ u32 *keys;
+ u32 *results;
+ };
+
+ /* Vector layout for get keys. */
+ struct
+ {
+ u32x4_union_t *get_keys;
+ u32x4_union_t *get_results;
+ };
+ };
+
+ u32 n_vectors_div_4;
+ u32 n_vectors_mod_4;
+
+ u32 n_key_u32;
+
+ u32 n_keys;
+} vhash_main_t;
+
+always_inline u32
+vhash_get_alloc_keys (vhash_main_t * vm, u32 n_keys, u32 n_key_u32)
+{
+ u32 i, n;
+
+ i = vm->n_keys;
+ vm->n_keys = i + n_keys;
+
+ n = (round_pow2 (vm->n_keys, 4) / 4) * n_key_u32;
+
+ vec_validate_aligned (vm->get_keys, n - 1, sizeof (vm->get_keys[0]));
+ vec_validate_aligned (vm->get_results, n - 1, sizeof (vm->get_results[0]));
+
+ return i;
+}
+
+always_inline void
+vhash_get_set_key_word (vhash_main_t * vm, u32 vi, u32 wi, u32 n_key_u32,
+ u32 value)
+{
+ u32x4_union_t *k = vec_elt_at_index (vm->get_keys, (vi / 4) * n_key_u32);
+ ASSERT (wi < n_key_u32);
+ k[wi].as_u32[vi % 4] = value;
+}
+
+always_inline u32
+vhash_get_fetch_result (vhash_main_t * vm, u32 vi)
+{
+ u32x4_union_t *r = vec_elt_at_index (vm->get_results, vi / 4);
+ return r->as_u32[vi % 4];
+}
+
+void vhash_main_get (vhash_main_t * vm);
+
+always_inline u32
+vhash_set_alloc_keys (vhash_main_t * vm, u32 n_keys, u32 n_key_u32)
+{
+ u32 i;
+
+ i = vm->n_keys;
+ vm->n_keys = i + n_keys;
+
+ vec_resize (vm->keys, n_keys * n_key_u32);
+ vec_resize (vm->results, n_keys);
+
+ return i;
+}
+
+always_inline void
+vhash_set_set_key_word (vhash_main_t * vm, u32 vi, u32 wi, u32 n_key_u32,
+ u32 value)
+{
+ u32 *k = vec_elt_at_index (vm->keys, vi * n_key_u32);
+ ASSERT (wi < n_key_u32);
+ k[wi] = value;
+}
+
+always_inline void
+vhash_set_set_result (vhash_main_t * vm, u32 vi, u32 result)
+{
+ u32 *r = vec_elt_at_index (vm->results, vi);
+ r[0] = result;
+}
+
+always_inline u32
+vhash_set_fetch_old_result (vhash_main_t * vm, u32 vi)
+{
+ u32 *r = vec_elt_at_index (vm->results, vi);
+ return r[0];
+}
+
+void vhash_main_set (vhash_main_t * vm);
+
+always_inline u32
+vhash_unset_alloc_keys (vhash_main_t * vm, u32 n_keys, u32 n_key_u32)
+{
+ return vhash_set_alloc_keys (vm, n_keys, n_key_u32);
+}
+
+always_inline void
+vhash_unset_set_key_word (vhash_main_t * vm, u32 vi, u32 wi, u32 n_key_u32,
+ u32 value)
+{
+ vhash_set_set_key_word (vm, vi, wi, n_key_u32, value);
+}
+
+always_inline void
+vhash_unset_set_result (vhash_main_t * vm, u32 vi, u32 result)
+{
+ vhash_set_set_result (vm, vi, result);
+}
+
+always_inline u32
+vhash_unset_fetch_old_result (vhash_main_t * vm, u32 vi)
+{
+ return vhash_set_fetch_old_result (vm, vi);
+}
+
+void vhash_main_unset (vhash_main_t * vm);
+
+typedef struct
+{
+ vhash_main_t new;
+
+ vhash_t *old;
+} vhash_resize_t;
+
+u32 vhash_resize_incremental (vhash_resize_t * vr, u32 vector_index,
+ u32 n_vectors);
+
+#endif /* CLIB_HAVE_VEC128 */
+
+#endif /* included_clib_vhash_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/xxhash.h b/src/vppinfra/xxhash.h
new file mode 100644
index 00000000..ea1e21bf
--- /dev/null
+++ b/src/vppinfra/xxhash.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Original license for the code used to construct
+ clib_xxhash(...).
+
+ xxHash - Fast Hash algorithm
+ Copyright (C) 2012-2014, Yann Collet.
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef __included_xxhash_h__
+#define __included_xxhash_h__
+
+#define PRIME64_1 11400714785074694791ULL
+#define PRIME64_2 14029467366897019727ULL
+#define PRIME64_3 1609587929392839161ULL
+#define PRIME64_4 9650029242287828579ULL
+#define PRIME64_5 2870177450012600261ULL
+#define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
+
+static inline u64
+clib_xxhash (u64 key)
+{
+ u64 k1, h64;
+
+ k1 = key;
+ h64 = 0x9e3779b97f4a7c13LL + PRIME64_5 + 8;
+ k1 *= PRIME64_2;
+ k1 = XXH_rotl64 (k1, 31);
+ k1 *= PRIME64_1;
+ h64 ^= k1;
+ h64 = XXH_rotl64 (h64, 27) * PRIME64_1 + PRIME64_4;
+
+ h64 ^= h64 >> 33;
+ h64 *= PRIME64_2;
+ h64 ^= h64 >> 29;
+ h64 *= PRIME64_3;
+ h64 ^= h64 >> 32;
+ return h64;
+}
+
+#endif /* __included_xxhash_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/xy.h b/src/vppinfra/xy.h
new file mode 100644
index 00000000..fb562161
--- /dev/null
+++ b/src/vppinfra/xy.h
@@ -0,0 +1,56 @@
+/* (X,Y) coordinates. */
+
+/*
+ Copyright (c) 2008 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_clib_xy_h
+#define included_clib_xy_h
+
+#include <vppinfra/types.h>
+
+/* Basic definitions: coordinates and points. */
+typedef double xy_float_t;
+typedef __complex__ double xy_t;
+typedef __complex__ int ixy_t;
+
+typedef __complex__ char i8xy_t;
+typedef __complex__ short i16xy_t;
+typedef __complex__ int i32xy_t;
+
+/* X/Y components of a point: can be used as either rvalue/lvalue. */
+#define xy_x(x) __real__ (x)
+#define xy_y(x) __imag__ (x)
+
+/* Unit vectors in x/y directions. */
+#define xy_x_unit_vector (1)
+#define xy_y_unit_vector (1I)
+
+#endif /* included_clib_xy_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/zvec.c b/src/vppinfra/zvec.c
new file mode 100644
index 00000000..d062e5f7
--- /dev/null
+++ b/src/vppinfra/zvec.c
@@ -0,0 +1,442 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003, 2005 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/bitmap.h>
+#include <vppinfra/bitops.h> /* for next_with_same_number_of_set_bits */
+#include <vppinfra/error.h> /* for ASSERT */
+#include <vppinfra/mem.h>
+#include <vppinfra/os.h> /* for os_panic */
+#include <vppinfra/vec.h>
+#include <vppinfra/zvec.h>
+
+/* Consider coding as bitmap, coding = 2^c_0 + 2^c_1 + ... + 2^c_n
+ With c_0 < c_1 < ... < c_n. coding == 0 represents c_n = BITS (uword).
+
+ Unsigned integers i = 0 ... are represented as follows:
+
+ 0 <= i < 2^c_0 (i << 1) | (1 << 0) binary: i 1
+ 2^c_0 <= i < 2^c_0 + 2^c_1 (i << 2) | (1 << 1) binary: i 1 0
+ ... binary: i 0 ... 0
+
+ Smaller numbers use less bits. Coding is chosen so that encoding
+ of given histogram of typical values gives smallest number of bits.
+ The number and position of coding bits c_i are used to best fit the
+ histogram of typical values.
+*/
+
+/* Decode given compressed data. Return number of compressed data
+ bits used. */
+uword
+zvec_decode (uword coding, uword zdata, uword * n_zdata_bits)
+{
+ uword c, d, result, n_bits;
+ uword explicit_end, implicit_end;
+
+ result = 0;
+ n_bits = 0;
+ while (1)
+ {
+ c = first_set (coding);
+ implicit_end = c == coding;
+ explicit_end = (zdata & 1) & ~implicit_end;
+ d = (zdata >> explicit_end) & (c - 1);
+ if (explicit_end | implicit_end)
+ {
+ result += d;
+ n_bits += min_log2 (c) + explicit_end;
+ break;
+ }
+ n_bits += 1;
+ result += c;
+ coding ^= c;
+ zdata >>= 1;
+ }
+
+ if (coding == 0)
+ n_bits = BITS (uword);
+
+ *n_zdata_bits = n_bits;
+ return result;
+}
+
+uword
+zvec_encode (uword coding, uword data, uword * n_result_bits)
+{
+ uword c, shift, result;
+ uword explicit_end, implicit_end;
+
+ /* Data must be in range. Note special coding == 0
+ would break for data - 1 <= coding. */
+ ASSERT (data <= coding - 1);
+
+ shift = 0;
+ while (1)
+ {
+ c = first_set (coding);
+ implicit_end = c == coding;
+ explicit_end = ((data & (c - 1)) == data);
+ if (explicit_end | implicit_end)
+ {
+ uword t = explicit_end & ~implicit_end;
+ result = ((data << t) | t) << shift;
+ *n_result_bits =
+ /* data bits */ (c == 0 ? BITS (uword) : min_log2 (c))
+ /* shift bits */ + shift + t;
+ return result;
+ }
+ data -= c;
+ coding ^= c;
+ shift++;
+ }
+
+ /* Never reached. */
+ ASSERT (0);
+ return ~0;
+}
+
+always_inline uword
+get_data (void *data, uword data_bytes, uword is_signed)
+{
+ if (data_bytes == 1)
+ return is_signed ? zvec_signed_to_unsigned (*(i8 *) data) : *(u8 *) data;
+ else if (data_bytes == 2)
+ return is_signed ? zvec_signed_to_unsigned (*(i16 *) data) : *(u16 *)
+ data;
+ else if (data_bytes == 4)
+ return is_signed ? zvec_signed_to_unsigned (*(i32 *) data) : *(u32 *)
+ data;
+ else if (data_bytes == 8)
+ return is_signed ? zvec_signed_to_unsigned (*(i64 *) data) : *(u64 *)
+ data;
+ else
+ {
+ os_panic ();
+ return ~0;
+ }
+}
+
+always_inline void
+put_data (void *data, uword data_bytes, uword is_signed, uword x)
+{
+ if (data_bytes == 1)
+ {
+ if (is_signed)
+ *(i8 *) data = zvec_unsigned_to_signed (x);
+ else
+ *(u8 *) data = x;
+ }
+ else if (data_bytes == 2)
+ {
+ if (is_signed)
+ *(i16 *) data = zvec_unsigned_to_signed (x);
+ else
+ *(u16 *) data = x;
+ }
+ else if (data_bytes == 4)
+ {
+ if (is_signed)
+ *(i32 *) data = zvec_unsigned_to_signed (x);
+ else
+ *(u32 *) data = x;
+ }
+ else if (data_bytes == 8)
+ {
+ if (is_signed)
+ *(i64 *) data = zvec_unsigned_to_signed (x);
+ else
+ *(u64 *) data = x;
+ }
+ else
+ {
+ os_panic ();
+ }
+}
+
+always_inline uword *
+zvec_encode_inline (uword * zvec,
+ uword * zvec_n_bits,
+ uword coding,
+ void *data,
+ uword data_stride,
+ uword n_data, uword data_bytes, uword is_signed)
+{
+ uword i;
+
+ i = *zvec_n_bits;
+ while (n_data >= 1)
+ {
+ uword d0, z0, l0;
+
+ d0 = get_data (data + 0 * data_stride, data_bytes, is_signed);
+ data += 1 * data_stride;
+ n_data -= 1;
+
+ z0 = zvec_encode (coding, d0, &l0);
+ zvec = clib_bitmap_set_multiple (zvec, i, z0, l0);
+ i += l0;
+ }
+
+ *zvec_n_bits = i;
+ return zvec;
+}
+
+#define _(TYPE,IS_SIGNED) \
+ uword * zvec_encode_##TYPE (uword * zvec, \
+ uword * zvec_n_bits, \
+ uword coding, \
+ void * data, \
+ uword data_stride, \
+ uword n_data) \
+ { \
+ return zvec_encode_inline (zvec, zvec_n_bits, \
+ coding, \
+ data, data_stride, n_data, \
+ /* data_bytes */ sizeof (TYPE), \
+ /* is_signed */ IS_SIGNED); \
+ }
+
+_(u8, /* is_signed */ 0);
+_(u16, /* is_signed */ 0);
+_(u32, /* is_signed */ 0);
+_(u64, /* is_signed */ 0);
+_(i8, /* is_signed */ 1);
+_(i16, /* is_signed */ 1);
+_(i32, /* is_signed */ 1);
+_(i64, /* is_signed */ 1);
+
+#undef _
+
+always_inline uword
+coding_max_n_bits (uword coding)
+{
+ uword n_bits;
+ (void) zvec_decode (coding, 0, &n_bits);
+ return n_bits;
+}
+
+always_inline void
+zvec_decode_inline (uword * zvec,
+ uword * zvec_n_bits,
+ uword coding,
+ void *data,
+ uword data_stride,
+ uword n_data, uword data_bytes, uword is_signed)
+{
+ uword i, n_max;
+
+ i = *zvec_n_bits;
+ n_max = coding_max_n_bits (coding);
+ while (n_data >= 1)
+ {
+ uword d0, z0, l0;
+
+ z0 = clib_bitmap_get_multiple (zvec, i, n_max);
+ d0 = zvec_decode (coding, z0, &l0);
+ i += l0;
+ put_data (data + 0 * data_stride, data_bytes, is_signed, d0);
+ data += 1 * data_stride;
+ n_data -= 1;
+ }
+ *zvec_n_bits = i;
+}
+
+#define _(TYPE,IS_SIGNED) \
+ void zvec_decode_##TYPE (uword * zvec, \
+ uword * zvec_n_bits, \
+ uword coding, \
+ void * data, \
+ uword data_stride, \
+ uword n_data) \
+ { \
+ return zvec_decode_inline (zvec, zvec_n_bits, \
+ coding, \
+ data, data_stride, n_data, \
+ /* data_bytes */ sizeof (TYPE), \
+ /* is_signed */ IS_SIGNED); \
+ }
+
+_(u8, /* is_signed */ 0);
+_(u16, /* is_signed */ 0);
+_(u32, /* is_signed */ 0);
+_(u64, /* is_signed */ 0);
+_(i8, /* is_signed */ 1);
+_(i16, /* is_signed */ 1);
+_(i32, /* is_signed */ 1);
+_(i64, /* is_signed */ 1);
+
+#undef _
+
+/* Compute number of bits needed to encode given histogram. */
+static uword
+zvec_coding_bits (uword coding, uword * histogram_counts, uword min_bits)
+{
+ uword n_type_bits, n_bits;
+ uword this_count, last_count, max_count_index;
+ uword i, b, l;
+
+ n_bits = 0;
+ n_type_bits = 1;
+ last_count = 0;
+ max_count_index = vec_len (histogram_counts) - 1;
+
+ /* Coding is not large enough to encode given data. */
+ if (coding <= max_count_index)
+ return ~0;
+
+ i = 0;
+ while (coding != 0)
+ {
+ b = first_set (coding);
+ l = min_log2 (b);
+ i += b;
+
+ this_count =
+ histogram_counts[i > max_count_index ? max_count_index : i - 1];
+
+ /* No more data to encode? */
+ if (this_count == last_count)
+ break;
+
+ /* Last coding is i 0 ... 0 so we don't need an extra type bit. */
+ if (coding == b)
+ n_type_bits--;
+
+ n_bits += (this_count - last_count) * (n_type_bits + l);
+
+ /* This coding cannot be minimal: so return. */
+ if (n_bits >= min_bits)
+ return ~0;
+
+ last_count = this_count;
+ coding ^= b;
+ n_type_bits++;
+ }
+
+ return n_bits;
+}
+
+uword
+_zvec_coding_from_histogram (void *histogram,
+ uword histogram_len,
+ uword histogram_elt_count_offset,
+ uword histogram_elt_bytes,
+ uword max_value_to_encode,
+ zvec_coding_info_t * coding_return)
+{
+ uword coding, min_coding;
+ uword min_coding_bits, coding_bits;
+ uword i, n_bits_set, total_count;
+ uword *counts;
+ zvec_histogram_count_t *h_count = histogram + histogram_elt_count_offset;
+
+ if (histogram_len < 1)
+ {
+ coding_return->coding = 0;
+ coding_return->min_coding_bits = 0;
+ coding_return->n_data = 0;
+ coding_return->n_codes = 0;
+ coding_return->ave_coding_bits = 0;
+ return 0;
+ }
+
+ total_count = 0;
+ counts = vec_new (uword, histogram_len);
+ for (i = 0; i < histogram_len; i++)
+ {
+ zvec_histogram_count_t this_count = h_count[0];
+ total_count += this_count;
+ counts[i] = total_count;
+ h_count =
+ (zvec_histogram_count_t *) ((void *) h_count + histogram_elt_bytes);
+ }
+
+ min_coding = 0;
+ min_coding_bits = ~0;
+
+ {
+ uword base_coding =
+ max_value_to_encode !=
+ ~0 ? (1 + max_value_to_encode) : vec_len (counts);
+ uword max_coding = max_pow2 (2 * base_coding);
+
+ for (n_bits_set = 1; n_bits_set <= 8; n_bits_set++)
+ {
+ for (coding = pow2_mask (n_bits_set);
+ coding < max_coding;
+ coding = next_with_same_number_of_set_bits (coding))
+ {
+ coding_bits = zvec_coding_bits (coding, counts, min_coding_bits);
+ if (coding_bits >= min_coding_bits)
+ continue;
+ min_coding_bits = coding_bits;
+ min_coding = coding;
+ }
+ }
+ }
+
+ if (coding_return)
+ {
+ coding_return->coding = min_coding;
+ coding_return->min_coding_bits = min_coding_bits;
+ coding_return->n_data = total_count;
+ coding_return->n_codes = vec_len (counts);
+ coding_return->ave_coding_bits =
+ (f64) min_coding_bits / (f64) total_count;
+ }
+
+ vec_free (counts);
+
+ return min_coding;
+}
+
+u8 *
+format_zvec_coding (u8 * s, va_list * args)
+{
+ zvec_coding_info_t *c = va_arg (*args, zvec_coding_info_t *);
+ return format (s,
+ "zvec coding 0x%x, %d elts, %d codes, %d bits total, %.4f ave bits/code",
+ c->coding, c->n_data, c->n_codes, c->min_coding_bits,
+ c->ave_coding_bits);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vppinfra/zvec.h b/src/vppinfra/zvec.h
new file mode 100644
index 00000000..7d35a3fe
--- /dev/null
+++ b/src/vppinfra/zvec.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef included_zvec_h
+#define included_zvec_h
+
+#include <vppinfra/clib.h>
+#include <vppinfra/error.h> /* for ASSERT */
+#include <vppinfra/format.h>
+
+/* zvec: compressed vectors.
+
+ Data is entropy coded with 32 bit "codings".
+
+ Consider coding as bitmap, coding = 2^c_0 + 2^c_1 + ... + 2^c_n
+ With c_0 < c_1 < ... < c_n. coding == 0 represents c_n = BITS (uword).
+
+ Unsigned integers i = 0 ... are represented as follows:
+
+ 0 <= i < 2^c_0 (i << 1) | (1 << 0) binary: i 1
+ 2^c_0 <= i < 2^c_0 + 2^c_1 (i << 2) | (1 << 1) binary: i 1 0
+ ... binary: i 0 ... 0
+
+ Smaller numbers use less bits. Coding is chosen so that encoding
+ of given histogram of typical values gives smallest number of bits.
+ The number and position of coding bits c_i are used to best fit the
+ histogram of typical values.
+*/
+
+typedef struct
+{
+ /* Smallest coding for given histogram of typical data. */
+ u32 coding;
+
+ /* Number of data in histogram. */
+ u32 n_data;
+
+ /* Number of codes (unique values) in histogram. */
+ u32 n_codes;
+
+ /* Number of bits in smallest coding of data. */
+ u32 min_coding_bits;
+
+ /* Average number of bits per code. */
+ f64 ave_coding_bits;
+} zvec_coding_info_t;
+
+/* Encode/decode data. */
+uword zvec_encode (uword coding, uword data, uword * n_result_bits);
+uword zvec_decode (uword coding, uword zdata, uword * n_zdata_bits);
+
+format_function_t format_zvec_coding;
+
+typedef u32 zvec_histogram_count_t;
+
+#define zvec_coding_from_histogram(h,count_field,len,max_value_to_encode,zc) \
+ _zvec_coding_from_histogram ((h), (len), \
+ STRUCT_OFFSET_OF_VAR (h, count_field), \
+ sizeof (h[0]), \
+ max_value_to_encode, \
+ (zc))
+
+uword
+_zvec_coding_from_histogram (void *_histogram,
+ uword histogram_len,
+ uword histogram_elt_count_offset,
+ uword histogram_elt_bytes,
+ uword max_value_to_encode,
+ zvec_coding_info_t * coding_info_return);
+
+#define _(TYPE,IS_SIGNED) \
+ uword * zvec_encode_##TYPE (uword * zvec, uword * zvec_n_bits, uword coding, \
+ void * data, uword data_stride, uword n_data);
+
+_(u8, /* is_signed */ 0);
+_(u16, /* is_signed */ 0);
+_(u32, /* is_signed */ 0);
+_(u64, /* is_signed */ 0);
+_(i8, /* is_signed */ 1);
+_(i16, /* is_signed */ 1);
+_(i32, /* is_signed */ 1);
+_(i64, /* is_signed */ 1);
+
+#undef _
+
+#define _(TYPE,IS_SIGNED) \
+ void zvec_decode_##TYPE (uword * zvec, \
+ uword * zvec_n_bits, \
+ uword coding, \
+ void * data, \
+ uword data_stride, \
+ uword n_data)
+
+_(u8, /* is_signed */ 0);
+_(u16, /* is_signed */ 0);
+_(u32, /* is_signed */ 0);
+_(u64, /* is_signed */ 0);
+_(i8, /* is_signed */ 1);
+_(i16, /* is_signed */ 1);
+_(i32, /* is_signed */ 1);
+_(i64, /* is_signed */ 1);
+
+#undef _
+
+/* Signed <=> unsigned conversion.
+ -1, -2, -3, ... => 1, 3, 5, ... odds
+ 0, +1, +2, +3, ... => 0, 2, 4, 6, ... evens */
+always_inline uword
+zvec_signed_to_unsigned (word s)
+{
+ uword a = s < 0;
+ s = 2 * s + a;
+ return a ? -s : s;
+}
+
+always_inline word
+zvec_unsigned_to_signed (uword u)
+{
+ uword a = u & 1;
+ u >>= 1;
+ return a ? -u : u;
+}
+
+#endif /* included_zvec_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */