aboutsummaryrefslogtreecommitdiffstats
path: root/src/vpp-api/vom/prefix.cpp
blob: 24fb57b34d2d670d08951a603ed92bc06da8a3eb (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
/*
 * Copyright (c) 2017 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <boost/algorithm/string.hpp>
#include <sstream>

#include "vom/prefix.hpp"

namespace VOM {
/*
 * Keep this in sync with VPP's fib_protocol_t
 */
const l3_proto_t l3_proto_t::IPV4(0, "ipv4");
const l3_proto_t l3_proto_t::IPV6(1, "ipv6");
const l3_proto_t l3_proto_t::MPLS(2, "mpls");

l3_proto_t::l3_proto_t(int v, const std::string& s)
  : enum_base<l3_proto_t>(v, s)
{
}

bool
l3_proto_t::is_ipv6()
{
  return (*this == IPV6);
}

bool
l3_proto_t::is_ipv4()
{
  return (*this == IPV4);
}

const l3_proto_t&
l3_proto_t::from_address(const boost::asio::ip::address& addr)
{
  if (addr.is_v6()) {
    return IPV6;
  }

  return IPV4;
}

/*
 * Keep this in sync with VPP's dpo_proto_t
 */
const nh_proto_t nh_proto_t::IPV4(0, "ipv4");
const nh_proto_t nh_proto_t::IPV6(1, "ipv6");
const nh_proto_t nh_proto_t::MPLS(2, "mpls");
const nh_proto_t nh_proto_t::ETHERNET(3, "ethernet");

nh_proto_t::nh_proto_t(int v, const std::string& s)
  : enum_base<nh_proto_t>(v, s)
{
}

const nh_proto_t&
nh_proto_t::from_address(const boost::asio::ip::address& addr)
{
  if (addr.is_v6()) {
    return IPV6;
  }

  return IPV4;
}

/**
 * The all Zeros prefix
 */
const route::prefix_t route::prefix_t::ZERO("0.0.0.0", 0);
const route::prefix_t route::prefix_t::ZEROv6("::", 0);

route::prefix_t::prefix_t(const boost::asio::ip::address& addr, uint8_t len)
  : m_addr(addr)
  , m_len(len)
{
}

route::prefix_t::prefix_t(const boost::asio::ip::address& addr)
  : m_addr(addr)
  , m_len(VOM::mask_width(addr))
{
}

route::prefix_t::prefix_t(const std::string& s, uint8_t len)
  : m_addr(boost::asio::ip::address::from_string(s))
  , m_len(len)
{
}

route::prefix_t::prefix_t(const prefix_t& o)
  : m_addr(o.m_addr)
  , m_len(o.m_len)
{
}

route::prefix_t::prefix_t()
  : m_addr()
  , m_len(0)
{
}

route::prefix_t::~prefix_t()
{
}

route::prefix_t&
route::prefix_t::operator=(const route::prefix_t& o)
{
  m_addr = o.m_addr;
  m_len = o.m_len;

  return (*this);
}

const boost::asio::ip::address&
route::prefix_t::address() const
{
  return (m_addr);
}

uint8_t
route::prefix_t::mask_width() const
{
  return (m_len);
}

bool
route::prefix_t::operator<(const route::prefix_t& o) const
{
  if (m_len == o.m_len) {
    return (m_addr < o.m_addr);
  } else {
    return (m_len < o.m_len);
  }
}

bool
route::prefix_t::operator==(const route::prefix_t& o) const
{
  return (m_len == o.m_len && m_addr == o.m_addr);
}

bool
route::prefix_t::operator!=(const route::prefix_t& o) const
{
  return (!(*this == o));
}

std::string
route::prefix_t::to_string() const
{
  std::ostringstream s;

  s << m_addr.to_string() << "/" << std::to_string(m_len);

  return (s.str());
}

boost::asio::ip::address
from_bytes(uint8_t is_ip6, uint8_t* bytes)
{
  boost::asio::ip::address addr;

  if (is_ip6) {
    std::array<uint8_t, 16> a;
    std::copy(bytes, bytes + 16, std::begin(a));
    boost::asio::ip::address_v6 v6(a);
    addr = v6;
  } else {
    std::array<uint8_t, 4> a;
    std::copy(bytes, bytes + 4, std::begin(a));
    boost::asio::ip::address_v4 v4(a);
    addr = v4;
  }

  return (addr);
}

route::prefix_t::prefix_t(uint8_t is_ip6, uint8_t* addr, uint8_t len)
  : m_addr(from_bytes(is_ip6, addr))
  , m_len(len)
{
}
void
to_bytes(const boost::asio::ip::address_v6& addr, uint8_t* array)
{
  memcpy(array, addr.to_bytes().data(), 16);
}

void
to_bytes(const boost::asio::ip::address_v4& addr, uint8_t* array)
{
  memcpy(array, addr.to_bytes().data(), 4);
}

void
to_bytes(const boost::asio::ip::address& addr, uint8_t* is_ip6, uint8_t* array)
{
  if (addr.is_v6()) {
    *is_ip6 = 1;
    to_bytes(addr.to_v6(), array);
  } else {
    *is_ip6 = 0;
    to_bytes(addr.to_v4(), array);
  }
}

uint32_t
mask_width(const boost::asio::ip::address& addr)
{
  if (addr.is_v6()) {
    return 128;
  }
  return 32;
}

void
route::prefix_t::to_vpp(uint8_t* is_ip6, uint8_t* addr, uint8_t* len) const
{
  *len = m_len;
  to_bytes(m_addr, is_ip6, addr);
}

l3_proto_t
route::prefix_t::l3_proto() const
{
  if (m_addr.is_v6()) {
    return (l3_proto_t::IPV6);
  } else {
    return (l3_proto_t::IPV4);
  }

  return (l3_proto_t::IPV4);
}

std::ostream&
operator<<(std::ostream& os, const route::prefix_t& pfx)
{
  os << pfx.to_string();

  return (os);
}

boost::asio::ip::address_v4
operator|(const boost::asio::ip::address_v4& addr1,
          const boost::asio::ip::address_v4& addr2)
{
  uint32_t a;
  a = addr1.to_ulong() | addr2.to_ulong();
  boost::asio::ip::address_v4 addr(a);
  return (addr);
}

boost::asio::ip::address_v4 operator&(const boost::asio::ip::address_v4& addr1,
                                      const boost::asio::ip::address_v4& addr2)
{
  uint32_t a;
  a = addr1.to_ulong() & addr2.to_ulong();
  boost::asio::ip::address_v4 addr(a);
  return (addr);
}

boost::asio::ip::address_v4 operator~(const boost::asio::ip::address_v4& addr1)
{
  uint32_t a;
  a = ~addr1.to_ulong();
  boost::asio::ip::address_v4 addr(a);
  return (addr);
}

boost::asio::ip::address_v4
route::prefix_t::mask() const
{
  uint32_t a;

  a = ~((1 << mask_width()) - 1);
  boost::asio::ip::address_v4 addr(a);
  return (addr);
}

boost::asio::ip::address_v4
route::prefix_t::low() const
{
  boost::asio::ip::address_v4 low;
  low = address().to_v4() & mask();
  return (low);
}

boost::asio::ip::address_v4
route::prefix_t::high() const
{
  boost::asio::ip::address_v4 high;
  high = address().to_v4() | ~mask();
  return (high);
}
}
/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "mozilla")
 * End:
 */
ersion of the code makes use of normal routes in short- * circuiting an explicit mask and compare operation when testing whether * a key satisfies a normal route, and also in remembering the unique leaf * that governs a subtree. */ struct radix_node * rn_search( const void *v_arg, struct radix_node *head) { const u8 * const v = v_arg; struct radix_node *x; for (x = head; x->rn_b >= 0;) { if (x->rn_bmask & v[x->rn_off]) x = x->rn_r; else x = x->rn_l; } return x; } struct radix_node * rn_search_m( const void *v_arg, struct radix_node *head, const void *m_arg) { struct radix_node *x; const u8 * const v = v_arg; const u8 * const m = m_arg; for (x = head; x->rn_b >= 0;) { if ((x->rn_bmask & m[x->rn_off]) && (x->rn_bmask & v[x->rn_off])) x = x->rn_r; else x = x->rn_l; } return x; } int rn_refines( const void *m_arg, const void *n_arg) { const char *m = m_arg; const char *n = n_arg; const char *lim = n + *(const u8 *)n; const char *lim2 = lim; int longer = (*(const u8 *)n++) - (int)(*(const u8 *)m++); int masks_are_equal = 1; if (longer > 0) lim -= longer; while (n < lim) { if (*n & ~(*m)) return 0; if (*n++ != *m++) masks_are_equal = 0; } while (n < lim2) if (*n++) return 0; if (masks_are_equal && (longer < 0)) for (lim2 = m - longer; m < lim2; ) if (*m++) return 1; return !masks_are_equal; } struct radix_node * rn_lookup( const void *v_arg, const void *m_arg, struct radix_node_head *head) { struct radix_node *x; const char *netmask = NULL; if (m_arg) { if ((x = rn_addmask(m_arg, 1, head->rnh_treetop->rn_off)) == 0) return NULL; netmask = x->rn_key; } x = rn_match(v_arg, head); if (x != NULL && netmask != NULL) { while (x != NULL && x->rn_mask != netmask) x = x->rn_dupedkey; } return x; } static int rn_satisfies_leaf( const char *trial, struct radix_node *leaf, int skip) { const char *cp = trial; const char *cp2 = leaf->rn_key; const char *cp3 = leaf->rn_mask; const char *cplim; int length = MIN(*(const u8 *)cp, *(const u8 *)cp2); if (cp3 == 0) cp3 = rn_ones; else length = MIN(length, *(const u8 *)cp3); cplim = cp + length; cp3 += skip; cp2 += skip; for (cp += skip; cp < cplim; cp++, cp2++, cp3++) if ((*cp ^ *cp2) & *cp3) return 0; return 1; } struct radix_node * rn_match( const void *v_arg, struct radix_node_head *head) { const char * const v = v_arg; struct radix_node *t = head->rnh_treetop; struct radix_node *top = t; struct radix_node *x; struct radix_node *saved_t; const char *cp = v; const char *cp2; const char *cplim; int off = t->rn_off; int vlen = *(const u8 *)cp; int matched_off; int test, b, rn_b; /* * Open code rn_search(v, top) to avoid overhead of extra * subroutine call. */ for (; t->rn_b >= 0; ) { if (t->rn_bmask & cp[t->rn_off]) t = t->rn_r; else t = t->rn_l; } /* * See if we match exactly as a host destination * or at least learn how many bits match, for normal mask finesse. * * It doesn't hurt us to limit how many bytes to check * to the length of the mask, since if it matches we had a genuine * match and the leaf we have is the most specific one anyway; * if it didn't match with a shorter length it would fail * with a long one. This wins big for class B&C netmasks which * are probably the most common case... */ if (t->rn_mask) vlen = *(const u8 *)t->rn_mask; cp += off; cp2 = t->rn_key + off; cplim = v + vlen; for (; cp < cplim; cp++, cp2++) if (*cp != *cp2) goto on1; /* * This extra grot is in case we are explicitly asked * to look up the default. Ugh! */ if ((t->rn_flags & RNF_ROOT) && t->rn_dupedkey) t = t->rn_dupedkey; return t; on1: test = (*cp ^ *cp2) & 0xff; /* find first bit that differs */ for (b = 7; (test >>= 1) > 0;) b--; matched_off = cp - v; b += matched_off << 3; rn_b = -1 - b; /* * If there is a host route in a duped-key chain, it will be first. */ if ((saved_t = t)->rn_mask == 0) t = t->rn_dupedkey; for (; t; t = t->rn_dupedkey) /* * Even if we don't match exactly as a host, * we may match if the leaf we wound up at is * a route to a net. */ if (t->rn_flags & RNF_NORMAL) { if (rn_b <= t->rn_b) return t; } else if (rn_satisfies_leaf(v, t, matched_off)) return t; t = saved_t; /* start searching up the tree */ do { struct radix_mask *m; t = t->rn_p; m = t->rn_mklist; if (m) { /* * If non-contiguous masks ever become important * we can restore the masking and open coding of * the search and satisfaction test and put the * calculation of "off" back before the "do". */ do { if (m->rm_flags & RNF_NORMAL) { if (rn_b <= m->rm_b) return m->rm_leaf; } else { off = MIN(t->rn_off, matched_off); x = rn_search_m(v, t, m->rm_mask); while (x && x->rn_mask != m->rm_mask) x = x->rn_dupedkey; if (x && rn_satisfies_leaf(v, x, off)) return x; } m = m->rm_mklist; } while (m); } } while (t != top); return NULL; } static void rn_nodeprint(struct radix_node *rn, rn_printer_t printer, void *arg, const char *delim) { (*printer)(arg, "%s(%s%p: p<%p> l<%p> r<%p>)", delim, ((void *)rn == arg) ? "*" : "", rn, rn->rn_p, rn->rn_l, rn->rn_r); } #ifdef RN_DEBUG int rn_debug = 1; static void rn_dbg_print(void *arg, const char *fmt, ...) { va_list ap; va_start(ap, fmt); vlog(LOG_DEBUG, fmt, ap); va_end(ap); } static void rn_treeprint(struct radix_node_head *h, rn_printer_t printer, void *arg) { struct radix_node *dup, *rn; const char *delim; if (printer == NULL) return; rn = rn_walkfirst(h->rnh_treetop, printer, arg); for (;;) { /* Process leaves */ delim = ""; for (dup = rn; dup != NULL; dup = dup->rn_dupedkey) { if ((dup->rn_flags & RNF_ROOT) != 0) continue; rn_nodeprint(dup, printer, arg, delim); delim = ", "; } rn = rn_walknext(rn, printer, arg); if (rn->rn_flags & RNF_ROOT) return; } /* NOTREACHED */ } #define traverse(__head, __rn) rn_treeprint((__head), rn_dbg_print, (__rn)) #endif /* RN_DEBUG */ struct radix_node * rn_newpair( const void *v, int b, struct radix_node nodes[2]) { struct radix_node *tt = nodes; struct radix_node *t = tt + 1; t->rn_b = b; t->rn_bmask = 0x80 >> (b & 7); t->rn_l = tt; t->rn_off = b >> 3; tt->rn_b = -1; tt->rn_key = v; tt->rn_p = t; tt->rn_flags = t->rn_flags = RNF_ACTIVE; return t; } struct radix_node * rn_insert( const void *v_arg, struct radix_node_head *head, int *dupentry, struct radix_node nodes[2]) { struct radix_node *top = head->rnh_treetop; struct radix_node *t = rn_search(v_arg, top); struct radix_node *tt; const char *v = v_arg; int head_off = top->rn_off; int vlen = *((const u8 *)v); const char *cp = v + head_off; int b; /* * Find first bit at which v and t->rn_key differ */ { const char *cp2 = t->rn_key + head_off; const char *cplim = v + vlen; int cmp_res; while (cp < cplim) if (*cp2++ != *cp++) goto on1; *dupentry = 1; return t; on1: *dupentry = 0; cmp_res = (cp[-1] ^ cp2[-1]) & 0xff; for (b = (cp - v) << 3; cmp_res; b--) cmp_res >>= 1; } { struct radix_node *p, *x = top; cp = v; do { p = x; if (cp[x->rn_off] & x->rn_bmask) x = x->rn_r; else x = x->rn_l; } while (b > (unsigned) x->rn_b); /* x->rn_b < b && x->rn_b >= 0 */ #ifdef RN_DEBUG if (rn_debug) log(LOG_DEBUG, "%s: Going In:\n", __func__), traverse(head, p); #endif t = rn_newpair(v_arg, b, nodes); tt = t->rn_l; if ((cp[p->rn_off] & p->rn_bmask) == 0) p->rn_l = t; else p->rn_r = t; x->rn_p = t; t->rn_p = p; /* frees x, p as temp vars below */ if ((cp[t->rn_off] & t->rn_bmask) == 0) { t->rn_r = x; } else { t->rn_r = tt; t->rn_l = x; } #ifdef RN_DEBUG if (rn_debug) { log(LOG_DEBUG, "%s: Coming Out:\n", __func__), traverse(head, p); } #endif /* RN_DEBUG */ } return tt; } struct radix_node * rn_addmask( const void *n_arg, int search, int skip) { const char *netmask = n_arg; const char *cp; const char *cplim; struct radix_node *x; struct radix_node *saved_x; int b = 0, mlen, j; int maskduplicated, m0, isnormal; static int last_zeroed = 0; if ((mlen = *(const u8 *)netmask) > max_keylen) mlen = max_keylen; if (skip == 0) skip = 1; if (mlen <= skip) return mask_rnhead->rnh_nodes; if (skip > 1) memmove(addmask_key + 1, rn_ones + 1, skip - 1); if ((m0 = mlen) > skip) memmove(addmask_key + skip, netmask + skip, mlen - skip); /* * Trim trailing zeroes. */ for (cp = addmask_key + mlen; (cp > addmask_key) && cp[-1] == 0;) cp--; mlen = cp - addmask_key; if (mlen <= skip) { if (m0 >= last_zeroed) last_zeroed = mlen; return mask_rnhead->rnh_nodes; } if (m0 < last_zeroed) clib_memset(addmask_key + m0, 0, last_zeroed - m0); *addmask_key = last_zeroed = mlen; x = rn_search(addmask_key, rn_masktop); if (memcmp(addmask_key, x->rn_key, mlen) != 0) x = 0; if (x || search) return x; R_Malloc(x, struct radix_node *, max_keylen + 2 * sizeof (*x)); if ((saved_x = x) == NULL) return NULL; clib_memset(x, 0, max_keylen + 2 * sizeof (*x)); cp = netmask = (void *)(x + 2); memmove(x + 2, addmask_key, mlen); x = rn_insert(cp, mask_rnhead, &maskduplicated, x); if (maskduplicated) { log(LOG_ERR, "rn_addmask: mask impossibly already in tree\n"); Free(saved_x); return x; } /* * Calculate index of mask, and check for normalcy. */ cplim = netmask + mlen; isnormal = 1; for (cp = netmask + skip; (cp < cplim) && *(const u8 *)cp == 0xff;) cp++; if (cp != cplim) { for (j = 0x80; (j & *cp) != 0; j >>= 1) b++; if (*cp != normal_chars[b] || cp != (cplim - 1)) isnormal = 0; } b += (cp - netmask) << 3; x->rn_b = -1 - b; if (isnormal) x->rn_flags |= RNF_NORMAL; return x; } static int /* XXX: arbitrary ordering for non-contiguous masks */ rn_lexobetter( const void *m_arg, const void *n_arg) { const u8 *mp = m_arg; const u8 *np = n_arg; const u8 *lim; if (*mp > *np) return 1; /* not really, but need to check longer one first */ if (*mp == *np) for (lim = mp + *mp; mp < lim;) if (*mp++ > *np++) return 1; return 0; } static struct radix_mask * rn_new_radix_mask( struct radix_node *tt, struct radix_mask *next) { struct radix_mask *m; m = rm_alloc(); if (m == NULL) { log(LOG_ERR, "Mask for route not entered\n"); return NULL; } clib_memset(m, 0, sizeof(*m)); m->rm_b = tt->rn_b; m->rm_flags = tt->rn_flags; if (tt->rn_flags & RNF_NORMAL) m->rm_leaf = tt; else m->rm_mask = tt->rn_mask; m->rm_mklist = next; tt->rn_mklist = m; return m; } struct radix_node * rn_addroute( const void *v_arg, const void *n_arg, struct radix_node_head *head, struct radix_node treenodes[2]) { const char *v = v_arg, *netmask = n_arg; struct radix_node *t, *x = NULL, *tt; struct radix_node *saved_tt, *top = head->rnh_treetop; short b = 0, b_leaf = 0; int keyduplicated; const char *mmask; struct radix_mask *m, **mp; /* * In dealing with non-contiguous masks, there may be * many different routes which have the same mask. * We will find it useful to have a unique pointer to * the mask to speed avoiding duplicate references at * nodes and possibly save time in calculating indices. */ if (netmask != NULL) { if ((x = rn_addmask(netmask, 0, top->rn_off)) == NULL) return NULL; b_leaf = x->rn_b; b = -1 - x->rn_b; netmask = x->rn_key; } /* * Deal with duplicated keys: attach node to previous instance */ saved_tt = tt = rn_insert(v, head, &keyduplicated, treenodes); if (keyduplicated) { for (t = tt; tt != NULL; t = tt, tt = tt->rn_dupedkey) { if (tt->rn_mask == netmask) return NULL; if (netmask == NULL || (tt->rn_mask != NULL && (b_leaf < tt->rn_b || /* index(netmask) > node */ rn_refines(netmask, tt->rn_mask) || rn_lexobetter(netmask, tt->rn_mask)))) break; } /* * If the mask is not duplicated, we wouldn't * find it among possible duplicate key entries * anyway, so the above test doesn't hurt. * * We sort the masks for a duplicated key the same way as * in a masklist -- most specific to least specific. * This may require the unfortunate nuisance of relocating * the head of the list. * * We also reverse, or doubly link the list through the * parent pointer. */ if (tt == saved_tt) { struct radix_node *xx = x; /* link in at head of list */ (tt = treenodes)->rn_dupedkey = t; tt->rn_flags = t->rn_flags; tt->rn_p = x = t->rn_p; t->rn_p = tt; if (x->rn_l == t) x->rn_l = tt; else x->rn_r = tt; saved_tt = tt; x = xx; } else { (tt = treenodes)->rn_dupedkey = t->rn_dupedkey; t->rn_dupedkey = tt; tt->rn_p = t; if (tt->rn_dupedkey) tt->rn_dupedkey->rn_p = tt; } tt->rn_key = v; tt->rn_b = -1; tt->rn_flags = RNF_ACTIVE; } /* * Put mask in tree. */ if (netmask != NULL) { tt->rn_mask = netmask; tt->rn_b = x->rn_b; tt->rn_flags |= x->rn_flags & RNF_NORMAL; } t = saved_tt->rn_p; if (keyduplicated) goto on2; b_leaf = -1 - t->rn_b; if (t->rn_r == saved_tt) x = t->rn_l; else x = t->rn_r; /* Promote general routes from below */ if (x->rn_b < 0) { for (mp = &t->rn_mklist; x != NULL; x = x->rn_dupedkey) { if (x->rn_mask != NULL && x->rn_b >= b_leaf && x->rn_mklist == NULL) { *mp = m = rn_new_radix_mask(x, NULL); if (m != NULL) mp = &m->rm_mklist; } } } else if (x->rn_mklist != NULL) { /* * Skip over masks whose index is > that of new node */ for (mp = &x->rn_mklist; (m = *mp) != NULL; mp = &m->rm_mklist) if (m->rm_b >= b_leaf) break; t->rn_mklist = m; *mp = NULL; } on2: /* Add new route to highest possible ancestor's list */ if (netmask == NULL || b > t->rn_b) return tt; /* can't lift at all */ b_leaf = tt->rn_b; do { x = t; t = t->rn_p; } while (b <= t->rn_b && x != top); /* * Search through routes associated with node to * insert new route according to index. * Need same criteria as when sorting dupedkeys to avoid * double loop on deletion. */ for (mp = &x->rn_mklist; (m = *mp) != NULL; mp = &m->rm_mklist) { if (m->rm_b < b_leaf) continue; if (m->rm_b > b_leaf) break; if (m->rm_flags & RNF_NORMAL) { mmask = m->rm_leaf->rn_mask; if (tt->rn_flags & RNF_NORMAL) { log(LOG_ERR, "Non-unique normal route," " mask not entered\n"); return tt; } } else mmask = m->rm_mask; if (mmask == netmask) { m->rm_refs++; tt->rn_mklist = m; return tt; } if (rn_refines(netmask, mmask) || rn_lexobetter(netmask, mmask)) break; } *mp = rn_new_radix_mask(tt, *mp); return tt; } struct radix_node * rn_delete1( const void *v_arg, const void *netmask_arg, struct radix_node_head *head, struct radix_node *rn) { struct radix_node *t, *p, *x, *tt; struct radix_mask *m, *saved_m, **mp; struct radix_node *dupedkey, *saved_tt, *top; const char *v, *netmask; int b, head_off, vlen; v = v_arg; netmask = netmask_arg; x = head->rnh_treetop; tt = rn_search(v, x); head_off = x->rn_off; vlen = *(const u8 *)v; saved_tt = tt; top = x; if (tt == NULL || memcmp(v + head_off, tt->rn_key + head_off, vlen - head_off) != 0) return NULL; /* * Delete our route from mask lists. */ if (netmask != NULL) { if ((x = rn_addmask(netmask, 1, head_off)) == NULL) return NULL; netmask = x->rn_key; while (tt->rn_mask != netmask) if ((tt = tt->rn_dupedkey) == NULL) return NULL; } if (tt->rn_mask == NULL || (saved_m = m = tt->rn_mklist) == NULL) goto on1; if (tt->rn_flags & RNF_NORMAL) { if (m->rm_leaf != tt || m->rm_refs > 0) { log(LOG_ERR, "rn_delete: inconsistent annotation\n"); return NULL; /* dangling ref could cause disaster */ } } else { if (m->rm_mask != tt->rn_mask) { log(LOG_ERR, "rn_delete: inconsistent annotation\n"); goto on1; } if (--m->rm_refs >= 0) goto on1; } b = -1 - tt->rn_b; t = saved_tt->rn_p; if (b > t->rn_b) goto on1; /* Wasn't lifted at all */ do { x = t; t = t->rn_p; } while (b <= t->rn_b && x != top); for (mp = &x->rn_mklist; (m = *mp) != NULL; mp = &m->rm_mklist) { if (m == saved_m) { *mp = m->rm_mklist; rm_free(m); break; } } if (m == NULL) { log(LOG_ERR, "rn_delete: couldn't find our annotation\n"); if (tt->rn_flags & RNF_NORMAL) return NULL; /* Dangling ref to us */ } on1: /* * Eliminate us from tree */ if (tt->rn_flags & RNF_ROOT) return NULL; #ifdef RN_DEBUG if (rn_debug) log(LOG_DEBUG, "%s: Going In:\n", __func__), traverse(head, tt); #endif t = tt->rn_p; dupedkey = saved_tt->rn_dupedkey; if (dupedkey != NULL) { /* * Here, tt is the deletion target, and * saved_tt is the head of the dupedkey chain. */ if (tt == saved_tt) { x = dupedkey; x->rn_p = t; if (t->rn_l == tt) t->rn_l = x; else t->rn_r = x; } else { /* find node in front of tt on the chain */ for (x = p = saved_tt; p != NULL && p->rn_dupedkey != tt;) p = p->rn_dupedkey; if (p != NULL) { p->rn_dupedkey = tt->rn_dupedkey; if (tt->rn_dupedkey != NULL) tt->rn_dupedkey->rn_p = p; } else log(LOG_ERR, "rn_delete: couldn't find us\n"); } t = tt + 1; if (t->rn_flags & RNF_ACTIVE) { *++x = *t; p = t->rn_p; if (p->rn_l == t) p->rn_l = x; else p->rn_r = x; x->rn_l->rn_p = x; x->rn_r->rn_p = x; } goto out; } if (t->rn_l == tt) x = t->rn_r; else x = t->rn_l; p = t->rn_p; if (p->rn_r == t) p->rn_r = x; else p->rn_l = x; x->rn_p = p; /* * Demote routes attached to us. */ if (t->rn_mklist == NULL) ; else if (x->rn_b >= 0) { for (mp = &x->rn_mklist; (m = *mp) != NULL; mp = &m->rm_mklist) ; *mp = t->rn_mklist; } else { /* If there are any key,mask pairs in a sibling duped-key chain, some subset will appear sorted in the same order attached to our mklist */ for (m = t->rn_mklist; m != NULL && x != NULL; x = x->rn_dupedkey) { if (m == x->rn_mklist) { struct radix_mask *mm = m->rm_mklist; x->rn_mklist = NULL; if (--(m->rm_refs) < 0) rm_free(m); m = mm; } } if (m != NULL) { log(LOG_ERR, "rn_delete: Orphaned Mask %p at %p\n", m, x); } } /* * We may be holding an active internal node in the tree. */ x = tt + 1; if (t != x) { *t = *x; t->rn_l->rn_p = t; t->rn_r->rn_p = t; p = x->rn_p; if (p->rn_l == x) p->rn_l = t; else p->rn_r = t; } out: #ifdef RN_DEBUG if (rn_debug) { log(LOG_DEBUG, "%s: Coming Out:\n", __func__), traverse(head, tt); } #endif /* RN_DEBUG */ tt->rn_flags &= ~RNF_ACTIVE; tt[1].rn_flags &= ~RNF_ACTIVE; return tt; } struct radix_node * rn_delete( const void *v_arg, const void *netmask_arg, struct radix_node_head *head) { return rn_delete1(v_arg, netmask_arg, head, NULL); } static struct radix_node * rn_walknext(struct radix_node *rn, rn_printer_t printer, void *arg) { /* If at right child go back up, otherwise, go right */ while (rn->rn_p->rn_r == rn && (rn->rn_flags & RNF_ROOT) == 0) { if (printer != NULL) (*printer)(arg, SUBTREE_CLOSE); rn = rn->rn_p; } if (printer) rn_nodeprint(rn->rn_p, printer, arg, ""); /* Find the next *leaf* since next node might vanish, too */ for (rn = rn->rn_p->rn_r; rn->rn_b >= 0;) { if (printer != NULL) (*printer)(arg, SUBTREE_OPEN); rn = rn->rn_l; } return rn; } static struct radix_node * rn_walkfirst(struct radix_node *rn, rn_printer_t printer, void *arg) { /* First time through node, go left */ while (rn->rn_b >= 0) { if (printer != NULL) (*printer)(arg, SUBTREE_OPEN); rn = rn->rn_l; } return rn; } int rn_walktree( struct radix_node_head *h, int (*f)(struct radix_node *, void *), void *w) { int error; struct radix_node *base, *next, *rn; /* * This gets complicated because we may delete the node * while applying the function f to it, so we need to calculate * the successor node in advance. */ rn = rn_walkfirst(h->rnh_treetop, NULL, NULL); for (;;) { base = rn; next = rn_walknext(rn, NULL, NULL); /* Process leaves */ while ((rn = base) != NULL) { base = rn->rn_dupedkey; if (!(rn->rn_flags & RNF_ROOT) && (error = (*f)(rn, w))) return error; } rn = next; if (rn->rn_flags & RNF_ROOT) return 0; } /* NOTREACHED */ } struct radix_node * rn_search_matched(struct radix_node_head *h, int (*matcher)(struct radix_node *, void *), void *w) { bool matched; struct radix_node *base, *next, *rn; /* * This gets complicated because we may delete the node * while applying the function f to it, so we need to calculate * the successor node in advance. */ rn = rn_walkfirst(h->rnh_treetop, NULL, NULL); for (;;) { base = rn; next = rn_walknext(rn, NULL, NULL); /* Process leaves */ while ((rn = base) != NULL) { base = rn->rn_dupedkey; if (!(rn->rn_flags & RNF_ROOT)) { matched = (*matcher)(rn, w); if (matched) return rn; } } rn = next; if (rn->rn_flags & RNF_ROOT) return NULL; } /* NOTREACHED */ } int rn_inithead(void **head, int off) { struct radix_node_head *rnh; if (*head != NULL) return 1; R_Malloc(rnh, struct radix_node_head *, sizeof (*rnh)); if (rnh == NULL) return 0; *head = rnh; return rn_inithead0(rnh, off); } int rn_inithead0(struct radix_node_head *rnh, int off) { struct radix_node *t; struct radix_node *tt; struct radix_node *ttt; clib_memset(rnh, 0, sizeof(*rnh)); t = rn_newpair(rn_zeros, off, rnh->rnh_nodes); ttt = rnh->rnh_nodes + 2; t->rn_r = ttt; t->rn_p = t; tt = t->rn_l; tt->rn_flags = t->rn_flags = RNF_ROOT | RNF_ACTIVE; tt->rn_b = -1 - off; *ttt = *tt; ttt->rn_key = rn_ones; rnh->rnh_addaddr = rn_addroute; rnh->rnh_deladdr = rn_delete; rnh->rnh_matchaddr = rn_match; rnh->rnh_lookup = rn_lookup; rnh->rnh_treetop = t; return 1; } static clib_error_t * rn_module_init (vlib_main_t * vm) { char *cp, *cplim; R_Malloc(rn_zeros, char *, 3 * max_keylen); if (rn_zeros == NULL) return (clib_error_return (0, "RN Zeros...")); clib_memset(rn_zeros, 0, 3 * max_keylen); rn_ones = cp = rn_zeros + max_keylen; addmask_key = cplim = rn_ones + max_keylen; while (cp < cplim) *cp++ = -1; if (rn_inithead((void *)&mask_rnhead, 0) == 0) return (clib_error_return (0, "RN Init 2")); return (NULL); } VLIB_INIT_FUNCTION(rn_module_init);