aboutsummaryrefslogtreecommitdiffstats
path: root/extras/vagrant/run.sh
blob: 3c779fa23f6a3f23c522ad4a99b1cbbd9ed64280 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
#!/bin/bash

# Figure out what system we are running on
if [ "$(uname)" <> "Darwin" ]; then
    OS_ID=$(grep '^ID=' /etc/os-release | cut -f2- -d= | sed -e 's/\"//g')
    OS_VERSION_ID=$(grep '^VERSION_ID=' /etc/os-release | cut -f2- -d= | sed -e 's/\"//g')
fi

if [ "$OS_ID" == "ubuntu" ]; then
    $OS_CODENAME=$UBUNTU_CODENAME
fi

if [ "$OS_ID" == "centos" ] || [ "$OS_ID" == "opensuse" ]; then
    # Install uio-pci-generic
    modprobe uio_pci_generic
fi
echo "Starting VPP..."
if [ "$OS_ID" == "ubuntu" ] && [ $OS_CODENAME = "trusty" ] ; then
    start vpp
else
    service vpp start
fi
itrary number of counters. Each counter * is identified by its index. This is used to aggregate per-cpu memory. * * Warning: * This reference counter is lock-free but is not race-condition free. * The counting result is approximate and another mechanism needs to be used * in order to ensure that an object may be freed. * */ #include <vnet/vnet.h> /* * Reference counting * A specific reference counter is used. The design is quite * similar to vlib counters but: * - It is possible to decrease the value * - Summing will not zero the per-thread counters * - Only the thread can reallocate its own counters vector (to avoid concurrency issues) */ typedef struct { u32 *counters; volatile u32 *counter_lock; CLIB_CACHE_LINE_ALIGN_MARK(o); } vlib_refcount_per_cpu_t; typedef struct { vlib_refcount_per_cpu_t *per_cpu; } vlib_refcount_t; static_always_inline void vlib_refcount_lock (volatile u32 *counter_lock) { while (clib_atomic_test_and_set (counter_lock)) ; } static_always_inline void vlib_refcount_unlock (volatile u32 *counter_lock) { clib_atomic_release(counter_lock); } void __vlib_refcount_resize(vlib_refcount_per_cpu_t *per_cpu, u32 size); static_always_inline void vlib_refcount_add(vlib_refcount_t *r, u32 thread_index, u32 counter_index, i32 v) { vlib_refcount_per_cpu_t *per_cpu = &r->per_cpu[thread_index]; if (PREDICT_FALSE(counter_index >= vec_len(per_cpu->counters))) __vlib_refcount_resize(per_cpu, clib_max(counter_index + 16,(vec_len(per_cpu->counters)) * 2)); per_cpu->counters[counter_index] += v; } u64 vlib_refcount_get(vlib_refcount_t *r, u32 index); static_always_inline void vlib_refcount_init(vlib_refcount_t *r) { vlib_thread_main_t *tm = vlib_get_thread_main (); u32 thread_index; r->per_cpu = 0; vec_validate (r->per_cpu, tm->n_vlib_mains - 1); for (thread_index = 0; thread_index < tm->n_vlib_mains; thread_index++) { r->per_cpu[thread_index].counter_lock = clib_mem_alloc_aligned(CLIB_CACHE_LINE_BYTES,CLIB_CACHE_LINE_BYTES); r->per_cpu[thread_index].counter_lock[0] = 0; } }