aboutsummaryrefslogtreecommitdiffstats
path: root/src/vnet/dpo/load_balance_map.h
blob: 82dd36b41b1c6df6087ed2232cb699a2a00d0035 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
/*
 * Copyright (c) 2016 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
/**
 * @brief
 */

#ifndef __LOAD_BALANCE_MAP_H__
#define __LOAD_BALANCE_MAP_H__

#include <vlib/vlib.h>
#include <vnet/fib/fib_types.h>
#include <vnet/dpo/load_balance.h>

struct load_balance_map_path_t_;

/**
 */
typedef struct load_balance_map_t_ {
    /**
     * required for pool_get_aligned.
     *  memebers used in the switch path come first!
     */
    CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);

    /**
     * The buckets of the map that provide the index to index translation.
     * In the first cacheline.
     */
    u16 *lbm_buckets;

    /**
     * the vector of paths this MAP represents
     */
    struct load_balance_map_path_t_ *lbm_paths;

    /**
     * the sum of the normalised weights. cache for convenience
     */
    u32 lbm_sum_of_norm_weights;

    /**
     * Number of locks. Maps are shared by a large number of recrusvie fib_entry_ts
     */
    u32 lbm_locks;
} load_balance_map_t;

extern index_t load_balance_map_add_or_lock(u32 n_buckets,
                                            u32 sum_of_weights,
                                            const load_balance_path_t *norm_paths);

extern void load_balance_map_lock(index_t lmbi);
extern void load_balance_map_unlock(index_t lbmi);

extern void load_balance_map_path_state_change(fib_node_index_t path_index);

extern u8* format_load_balance_map(u8 *s, va_list *ap);
extern void load_balance_map_show_mem(void);

/**
 * The encapsulation breakages are for fast DP access
 */
extern load_balance_map_t *load_balance_map_pool;

static inline load_balance_map_t*
load_balance_map_get (index_t lbmi)
{
    return (pool_elt_at_index(load_balance_map_pool, lbmi));
}

static inline u16
load_balance_map_translate (index_t lbmi,
                            u16 bucket)
{
    load_balance_map_t*lbm;

    lbm = load_balance_map_get(lbmi);

    return (lbm->lbm_buckets[bucket]);
}

static inline const dpo_id_t *
load_balance_get_fwd_bucket (const load_balance_t *lb,
                             u16 bucket)
{
    ASSERT(bucket < lb->lb_n_buckets);

    if (INDEX_INVALID != lb->lb_map)
    {
        bucket = load_balance_map_translate(lb->lb_map, bucket);
    }

    if (PREDICT_TRUE(LB_HAS_INLINE_BUCKETS(lb)))
    {
	return (&lb->lb_buckets_inline[bucket]);
    }
    else
    {
	return (&lb->lb_buckets[bucket]);
    }
}

extern void load_balance_map_module_init(void);

#endif