summaryrefslogtreecommitdiffstats
path: root/src/vnet/ip/ip4_mtrie.c
AgeCommit message (Expand)AuthorFilesLines
2017-12-13Separate heap for IPv4 mtriesNeale Ranns1-3/+18
2017-11-29Include allocated table memory in 'sh fib mem' outputNeale Ranns1-3/+5
2017-11-27Fix - sh ip fib mtrie sumNeale Ranns1-8/+13
2017-10-04[aarch64] Fixes CLI crashes on dpaa2 platform.Christophe Fontaine1-1/+1
2017-06-06Fix coverity error in IP4 Mtrie.Neale Ranns1-4/+4
2017-04-11MTRIE coverity fixesNeale Ranns1-6/+8
2017-04-01MTRIE Optimisations 2Neale Ranns1-207/+404
2017-03-29Mtrie optimisationsNeale Ranns1-80/+124
2016-12-28Reorganize source tree to use single autotools instanceDamjan Marion1-0/+568
'>206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
/*
 * Copyright (c) 2015 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef included_vector_neon_h
#define included_vector_neon_h
#include <arm_neon.h>

/* Dummy. Aid making uniform macros */
#define vreinterpretq_u8_u8(a)  a
/* Implement the missing intrinsics to make uniform macros */
#define vminvq_u64(x)   \
({  \
  u64 x0 = vgetq_lane_u64(x, 0);    \
  u64 x1 = vgetq_lane_u64(x, 1);    \
  x0 < x1 ? x0 : x1;    \
})

/* Converts all ones/zeros compare mask to bitmap. */
always_inline u32
u8x16_compare_byte_mask (u8x16 v)
{
  uint8x16_t mask = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
    0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80
  };
  /* v --> [0xFF, 0x00, 0xFF, 0xFF, 0xFF, 0x00, 0xFF, 0x00, ... ] */
  uint8x16_t x = vandq_u8 (v, mask);
  /* after v & mask,
   * x --> [0x01, 0x00, 0x04, 0x08, 0x10, 0x00, 0x40, 0x00, ... ] */
  uint64x2_t x64 = vpaddlq_u32 (vpaddlq_u16 (vpaddlq_u8 (x)));
  /* after merge, x64 --> [0x5D, 0x.. ] */
  return (u32) (vgetq_lane_u64 (x64, 0) + (vgetq_lane_u64 (x64, 1) << 8));
}

/* *INDENT-OFF* */
#define foreach_neon_vec128i \
  _(i,8,16,s8) _(i,16,8,s16) _(i,32,4,s32)  _(i,64,2,s64)
#define foreach_neon_vec128u \
  _(u,8,16,u8) _(u,16,8,u16) _(u,32,4,u32)  _(u,64,2,u64)
#define foreach_neon_vec128f \
  _(f,32,4,f32) _(f,64,2,f64)

#define _(t, s, c, i)                                                         \
  static_always_inline t##s##x##c t##s##x##c##_splat (t##s x)                 \
  {                                                                           \
    return (t##s##x##c) vdupq_n_##i (x);                                      \
  }                                                                           \
                                                                              \
  static_always_inline t##s##x##c t##s##x##c##_load_unaligned (void *p)       \
  {                                                                           \
    return (t##s##x##c) vld1q_##i (p);                                        \
  }                                                                           \
                                                                              \
  static_always_inline void t##s##x##c##_store_unaligned (t##s##x##c v,       \
							  void *p)            \
  {                                                                           \
    vst1q_##i (p, v);                                                         \
  }                                                                           \
                                                                              \
  static_always_inline int t##s##x##c##_is_all_zero (t##s##x##c x)            \
  {                                                                           \
    return !!(vminvq_u##s (vceqq_##i (vdupq_n_##i (0), x)));                  \
  }                                                                           \
                                                                              \
  static_always_inline int t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b) \
  {                                                                           \
    return !!(vminvq_u##s (vceqq_##i (a, b)));                                \
  }                                                                           \
  static_always_inline int t##s##x##c##_is_all_equal (t##s##x##c v, t##s x)   \
  {                                                                           \
    return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x));                 \
  };                                                                          \
                                                                              \
  static_always_inline u32 t##s##x##c##_zero_byte_mask (t##s##x##c x)         \
  {                                                                           \
    uint8x16_t v = vreinterpretq_u8_u##s (vceqq_##i (vdupq_n_##i (0), x));    \
    return u8x16_compare_byte_mask (v);                                       \
  }                                                                           \
                                                                              \
  static_always_inline t##s##x##c t##s##x##c##_add_saturate (t##s##x##c a,    \
							     t##s##x##c b)    \
  {                                                                           \
    return (t##s##x##c) vqaddq_##i (a, b);                                    \
  }                                                                           \
                                                                              \
  static_always_inline t##s##x##c t##s##x##c##_sub_saturate (t##s##x##c a,    \
							     t##s##x##c b)    \
  {                                                                           \
    return (t##s##x##c) vqsubq_##i (a, b);                                    \
  }                                                                           \
                                                                              \
  static_always_inline t##s##x##c t##s##x##c##_blend (                        \
    t##s##x##c dst, t##s##x##c src, u##s##x##c mask)                          \
  {                                                                           \
    return (t##s##x##c) vbslq_##i (mask, src, dst);                           \
  }

foreach_neon_vec128i foreach_neon_vec128u

#undef _
/* *INDENT-ON* */

static_always_inline u16x8
u16x8_byte_swap (u16x8 v)
{
  return (u16x8) vrev16q_u8 ((u8x16) v);
}

static_always_inline u32x4
u32x4_byte_swap (u32x4 v)
{
  return (u32x4) vrev32q_u8 ((u8x16) v);
}

static_always_inline u32x4
u32x4_hadd (u32x4 v1, u32x4 v2)
{
  return (u32x4) vpaddq_u32 (v1, v2);
}

static_always_inline u64x2
u64x2_from_u32x4 (u32x4 v)
{
  return vmovl_u32 (vget_low_u32 (v));
}

static_always_inline u64x2
u64x2_from_u32x4_high (u32x4 v)
{
  return vmovl_high_u32 (v);
}

/* Creates a mask made up of the MSB of each byte of the source vector */
static_always_inline u16
u8x16_msb_mask (u8x16 v)
{
  int8x16_t shift =
    { -7, -6, -5, -4, -3, -2, -1, 0, -7, -6, -5, -4, -3, -2, -1, 0 };
  /* v --> [0x80, 0x7F, 0xF0, 0xAF, 0xF0, 0x00, 0xF2, 0x00, ... ] */
  uint8x16_t x = vshlq_u8 (vandq_u8 (v, vdupq_n_u8 (0x80)), shift);
  /* after (v & 0x80) >> shift,
   * x --> [0x01, 0x00, 0x04, 0x08, 0x10, 0x00, 0x40, 0x00, ... ] */
  uint64x2_t x64 = vpaddlq_u32 (vpaddlq_u16 (vpaddlq_u8 (x)));
  /* after merge, x64 --> [0x5D, 0x.. ] */
  return (u16) (vgetq_lane_u64 (x64, 0) + (vgetq_lane_u64 (x64, 1) << 8));
}

static_always_inline u64x2
u64x2_gather (void *p0, void *p1)
{
  u64x2 r = vdupq_n_u64 (*(u64 *) p0);
  r = vsetq_lane_u64 (*(u64 *) p1, r, 1);
  return r;
}

static_always_inline u32x4
u32x4_gather (void *p0, void *p1, void *p2, void *p3)
{
  u32x4 r = vdupq_n_u32 (*(u32 *) p0);
  r = vsetq_lane_u32 (*(u32 *) p1, r, 1);
  r = vsetq_lane_u32 (*(u32 *) p2, r, 2);
  r = vsetq_lane_u32 (*(u32 *) p3, r, 3);
  return r;
}

static_always_inline void
u64x2_scatter (u64x2 r, void *p0, void *p1)
{
  *(u64 *) p0 = vgetq_lane_u64 (r, 0);
  *(u64 *) p1 = vgetq_lane_u64 (r, 1);
}

static_always_inline void
u32x4_scatter (u32x4 r, void *p0, void *p1, void *p2, void *p3)
{
  *(u32 *) p0 = vgetq_lane_u32 (r, 0);
  *(u32 *) p1 = vgetq_lane_u32 (r, 1);
  *(u32 *) p2 = vgetq_lane_u32 (r, 2);
  *(u32 *) p3 = vgetq_lane_u32 (r, 3);
}

static_always_inline u32
u32x4_min_scalar (u32x4 v)
{
  return vminvq_u32 (v);
}

#define u8x16_word_shift_left(x,n)  vextq_u8(u8x16_splat (0), x, 16 - n)
#define u8x16_word_shift_right(x,n) vextq_u8(x, u8x16_splat (0), n)

always_inline u32x4
u32x4_interleave_hi (u32x4 a, u32x4 b)
{
  return (u32x4) vzip2q_u32 (a, b);
}

always_inline u32x4
u32x4_interleave_lo (u32x4 a, u32x4 b)
{
  return (u32x4) vzip1q_u32 (a, b);
}

static_always_inline u8x16
u8x16_reflect (u8x16 v)
{
  u8x16 mask = {
    15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
  };
  return (u8x16) vqtbl1q_u8 (v, mask);
}

static_always_inline u8x16
u8x16_xor3 (u8x16 a, u8x16 b, u8x16 c)
{
#if __GNUC__ == 8 && __ARM_FEATURE_SHA3 == 1
  u8x16 r;
__asm__ ("eor3 %0.16b,%1.16b,%2.16b,%3.16b": "=w" (r): "0" (a), "w" (b), "w" (c):);
  return r;
#endif
  return a ^ b ^ c;
}

static_always_inline u8x16
u8x16_load_partial (u8 *data, uword n)
{
  u8x16 r = {};
  if (n > 7)
    {
      u64x2 r;
      r[1] = *(u64u *) (data + n - 8);
      r >>= (16 - n) * 8;
      r[0] = *(u64u *) data;
      return (u8x16) r;
    }
  else if (n > 3)
    {
      u32x4 r = {};
      r[1] = *(u32u *) (data + n - 4);
      r >>= (8 - n) * 8;
      r[0] = *(u32u *) data;
      return (u8x16) r;
    }
  else if (n > 1)
    {
      u16x8 r = {};
      r[1] = *(u16u *) (data + n - 2);
      r >>= (4 - n) * 8;
      r[0] = *(u16u *) data;
      return (u8x16) r;
    }
  else if (n > 0)
    r[0] = *data;
  return r;
}

static_always_inline void
u8x16_store_partial (u8x16 r, u8 *data, uword n)
{
  if (n > 7)
    {
      *(u64u *) (data + n - 8) = ((u64x2) r)[1] << ((16 - n) * 8);
      *(u64u *) data = ((u64x2) r)[0];
    }
  else if (n > 3)
    {
      *(u32u *) (data + n - 4) = ((u32x4) r)[1] << ((8 - n) * 8);
      *(u32u *) data = ((u32x4) r)[0];
    }
  else if (n > 1)
    {
      *(u16u *) (data + n - 2) = ((u16x8) r)[1] << ((4 - n) * 8);
      *(u16u *) data = ((u16x8) r)[0];
    }
  else if (n > 0)
    data[0] = r[0];
}

#define CLIB_HAVE_VEC128_MSB_MASK

#define CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE
#define CLIB_VEC128_SPLAT_DEFINED
#endif /* included_vector_neon_h */

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */