summaryrefslogtreecommitdiffstats
path: root/src/vnet/adj/adj_midchain_delegate.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/vnet/adj/adj_midchain_delegate.c')
-rw-r--r--src/vnet/adj/adj_midchain_delegate.c183
1 files changed, 183 insertions, 0 deletions
diff --git a/src/vnet/adj/adj_midchain_delegate.c b/src/vnet/adj/adj_midchain_delegate.c
new file mode 100644
index 00000000000..922283e92cf
--- /dev/null
+++ b/src/vnet/adj/adj_midchain_delegate.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/adj/adj_delegate.h>
+#include <vnet/adj/adj_midchain.h>
+#include <vnet/fib/fib_table.h>
+
+/**
+ * Midchain stacker delegate
+ */
+typedef struct adj_midchain_delegate_t_
+{
+ /**
+ * the Fib Entry we are stacked on
+ */
+ fib_node_index_t amd_fei;
+
+ /**
+ * The sibling entry on the FIB entry
+ */
+ u32 amd_sibling;
+} adj_midchain_delegate_t;
+
+/**
+ * Pool of delegates
+ */
+static adj_midchain_delegate_t *amd_pool;
+
+static inline const adj_midchain_delegate_t*
+adj_midchain_from_const_base (const adj_delegate_t *ad)
+{
+ if (NULL != ad)
+ {
+ return (pool_elt_at_index(amd_pool, ad->ad_index));
+ }
+ return (NULL);
+}
+
+static void
+adj_midchain_delegate_restack_i (adj_index_t ai,
+ adj_midchain_delegate_t *amd)
+{
+ if (vnet_sw_interface_is_admin_up (vnet_get_main (),
+ adj_get_sw_if_index(ai)) &&
+ (FIB_NODE_INDEX_INVALID != amd->amd_fei))
+ {
+ const fib_prefix_t *pfx;
+
+ pfx = fib_entry_get_prefix(amd->amd_fei);
+
+ adj_nbr_midchain_stack_on_fib_entry (
+ ai,
+ amd->amd_fei,
+ fib_forw_chain_type_from_fib_proto(pfx->fp_proto));
+ }
+ else
+ {
+ adj_nbr_midchain_unstack (ai);
+ }
+}
+
+void
+adj_midchain_delegate_restack (adj_index_t ai)
+{
+ adj_midchain_delegate_t *amd;
+ ip_adjacency_t *adj;
+ adj_delegate_t *ad;
+
+ /*
+ * if there's a delegate already use that
+ */
+ adj = adj_get(ai);
+ ad = adj_delegate_get(adj, ADJ_DELEGATE_MIDCHAIN);
+
+ if (NULL != ad)
+ {
+ amd = pool_elt_at_index(amd_pool, ad->ad_index);
+
+ adj_midchain_delegate_restack_i(ai, amd);
+ }
+ /*
+ * else
+ * nothing to stack
+ */
+}
+
+void
+adj_midchain_delegate_stack (adj_index_t ai,
+ u32 fib_index,
+ const fib_prefix_t *pfx)
+{
+ adj_midchain_delegate_t *amd;
+ ip_adjacency_t *adj;
+ adj_delegate_t *ad;
+
+ /*
+ * if there's a delegate already use that
+ */
+ adj = adj_get(ai);
+ ad = adj_delegate_get(adj, ADJ_DELEGATE_MIDCHAIN);
+
+ if (NULL != ad)
+ {
+ amd = pool_elt_at_index(amd_pool, ad->ad_index);
+ }
+ else
+ {
+ pool_get(amd_pool, amd);
+ amd->amd_fei = FIB_NODE_INDEX_INVALID;
+ adj_delegate_add(adj, ADJ_DELEGATE_MIDCHAIN, amd - amd_pool);
+
+ amd->amd_fei = fib_table_entry_special_add(fib_index,
+ pfx,
+ FIB_SOURCE_RR,
+ FIB_ENTRY_FLAG_NONE);
+ amd->amd_sibling = fib_entry_child_add(amd->amd_fei,
+ FIB_NODE_TYPE_ADJ,
+ ai);
+ }
+ adj_midchain_delegate_restack_i(ai, amd);
+}
+
+void
+adj_midchain_delegate_unstack (adj_index_t ai)
+{
+ adj_nbr_midchain_unstack(ai);
+}
+
+static void
+adj_midchain_delegate_adj_deleted (adj_delegate_t *ad)
+{
+ adj_midchain_delegate_t *amd;
+
+ amd = pool_elt_at_index(amd_pool, ad->ad_index);
+
+ fib_entry_child_remove (amd->amd_fei, amd->amd_sibling);
+ fib_table_entry_delete_index (amd->amd_fei, FIB_SOURCE_RR);
+
+ pool_put(amd_pool, amd);
+}
+
+/**
+ * Print a delegate that represents MIDCHAIN tracking
+ */
+static u8 *
+adj_midchain_delegate_fmt (const adj_delegate_t *aed, u8 *s)
+{
+ const adj_midchain_delegate_t *amd = adj_midchain_from_const_base(aed);
+
+ s = format(s, "MIDCHAIN:[fib-entry:%d]", amd->amd_fei);
+
+ return (s);
+}
+
+const static adj_delegate_vft_t adj_delegate_vft = {
+ .adv_format = adj_midchain_delegate_fmt,
+ .adv_adj_deleted = adj_midchain_delegate_adj_deleted,
+};
+
+static clib_error_t *
+adj_midchain_delegate_module_init (vlib_main_t * vm)
+{
+ clib_error_t * error = NULL;
+
+ adj_delegate_register_type (ADJ_DELEGATE_MIDCHAIN, &adj_delegate_vft);
+
+ return (error);
+}
+
+VLIB_INIT_FUNCTION (adj_midchain_delegate_module_init);
+
fff0f0 } /* Literal.String.Affix */ .highlight .sb { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Backtick */ .highlight .sc { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Char */ .highlight .dl { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Delimiter */ .highlight .sd { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Doc */ .highlight .s2 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Double */ .highlight .se { color: #0044dd; background-color: #fff0f0 } /* Literal.String.Escape */ .highlight .sh { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Heredoc */ .highlight .si { color: #3333bb; background-color: #fff0f0 } /* Literal.String.Interpol */ .highlight .sx { color: #22bb22; background-color: #f0fff0 } /* Literal.String.Other */ .highlight .sr { color: #008800; background-color: #fff0ff } /* Literal.String.Regex */ .highlight .s1 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Single */ .highlight .ss { color: #aa6600; background-color: #fff0f0 } /* Literal.String.Symbol */ .highlight .bp { color: #003388 } /* Name.Builtin.Pseudo */ .highlight .fm { color: #0066bb; font-weight: bold } /* Name.Function.Magic */ .highlight .vc { color: #336699 } /* Name.Variable.Class */ .highlight .vg { color: #dd7700 } /* Name.Variable.Global */ .highlight .vi { color: #3333bb } /* Name.Variable.Instance */ .highlight .vm { color: #336699 } /* Name.Variable.Magic */ .highlight .il { color: #0000DD; font-weight: bold } /* Literal.Number.Integer.Long */ }
/*
 * Copyright (c) 2015 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef included_vector_neon_h
#define included_vector_neon_h
#include <arm_neon.h>

/* Arithmetic */
#define u16x8_sub_saturate(a,b) vsubq_u16(a,b)
#define i16x8_sub_saturate(a,b) vsubq_s16(a,b)
/* Dummy. Aid making uniform macros */
#define vreinterpretq_u8_u8(a)  a
/* Implement the missing intrinsics to make uniform macros */
#define vminvq_u64(x)   \
({  \
  u64 x0 = vgetq_lane_u64(x, 0);    \
  u64 x1 = vgetq_lane_u64(x, 1);    \
  x0 < x1 ? x0 : x1;    \
})

/* Converts all ones/zeros compare mask to bitmap. */
always_inline u32
u8x16_compare_byte_mask (u8x16 v)
{
  uint8x16_t mask = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
    0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80
  };
  /* v --> [0xFF, 0x00, 0xFF, 0xFF, 0xFF, 0x00, 0xFF, 0x00, ... ] */
  uint8x16_t x = vandq_u8 (v, mask);
  /* after v & mask,
   * x --> [0x01, 0x00, 0x04, 0x08, 0x10, 0x00, 0x40, 0x00, ... ] */
  uint64x2_t x64 = vpaddlq_u32 (vpaddlq_u16 (vpaddlq_u8 (x)));
  /* after merge, x64 --> [0x5D, 0x.. ] */
  return (u32) (vgetq_lane_u64 (x64, 0) + (vgetq_lane_u64 (x64, 1) << 8));
}

/* *INDENT-OFF* */
#define foreach_neon_vec128i \
  _(i,8,16,s8) _(i,16,8,s16) _(i,32,4,s32)  _(i,64,2,s64)
#define foreach_neon_vec128u \
  _(u,8,16,u8) _(u,16,8,u16) _(u,32,4,u32)  _(u,64,2,u64)
#define foreach_neon_vec128f \
  _(f,32,4,f32) _(f,64,2,f64)

#define _(t, s, c, i) \
static_always_inline t##s##x##c						\
t##s##x##c##_splat (t##s x)						\
{ return (t##s##x##c) vdupq_n_##i (x); }				\
\
static_always_inline t##s##x##c						\
t##s##x##c##_load_unaligned (void *p)					\
{ return (t##s##x##c) vld1q_##i (p); }					\
\
static_always_inline void						\
t##s##x##c##_store_unaligned (t##s##x##c v, void *p)			\
{ vst1q_##i (p, v); }							\
\
static_always_inline int						\
t##s##x##c##_is_all_zero (t##s##x##c x)					\
{ return !!(vminvq_u##s (vceqq_##i (vdupq_n_##i(0), x))); }						\
\
static_always_inline int						\
t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b)			\
{ return !!(vminvq_u##s (vceqq_##i (a, b))); }				\
\
static_always_inline int						\
t##s##x##c##_is_all_equal (t##s##x##c v, t##s x)			\
{ return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); };		\
\
static_always_inline u32						\
t##s##x##c##_zero_byte_mask (t##s##x##c x)			\
{ uint8x16_t v = vreinterpretq_u8_u##s (vceqq_##i (vdupq_n_##i(0), x));  \
  return u8x16_compare_byte_mask (v); } \
\
static_always_inline u##s##x##c						\
t##s##x##c##_is_greater (t##s##x##c a, t##s##x##c b)			\
{ return (u##s##x##c) vcgtq_##i (a, b); }				\
\
static_always_inline t##s##x##c						\
t##s##x##c##_blend (t##s##x##c dst, t##s##x##c src, u##s##x##c mask)	\
{ return (t##s##x##c) vbslq_##i (mask, src, dst); }

foreach_neon_vec128i foreach_neon_vec128u

#undef _
/* *INDENT-ON* */

static_always_inline u16x8
u16x8_byte_swap (u16x8 v)
{
  return (u16x8) vrev16q_u8 ((u8x16) v);
}

static_always_inline u8x16
u8x16_shuffle (u8x16 v, u8x16 m)
{
  return (u8x16) vqtbl1q_u8 (v, m);
}

static_always_inline u32x4
u32x4_hadd (u32x4 v1, u32x4 v2)
{
  return (u32x4) vpaddq_u32 (v1, v2);
}

static_always_inline u64x2
u32x4_extend_to_u64x2 (u32x4 v)
{
  return vmovl_u32 (vget_low_u32 (v));
}

static_always_inline u64x2
u32x4_extend_to_u64x2_high (u32x4 v)
{
  return vmovl_high_u32 (v);
}

/* Creates a mask made up of the MSB of each byte of the source vector */
static_always_inline u16
u8x16_msb_mask (u8x16 v)
{
  int8x16_t shift =
    { -7, -6, -5, -4, -3, -2, -1, 0, -7, -6, -5, -4, -3, -2, -1, 0 };
  /* v --> [0x80, 0x7F, 0xF0, 0xAF, 0xF0, 0x00, 0xF2, 0x00, ... ] */
  uint8x16_t x = vshlq_u8 (vandq_u8 (v, vdupq_n_u8 (0x80)), shift);
  /* after (v & 0x80) >> shift,
   * x --> [0x01, 0x00, 0x04, 0x08, 0x10, 0x00, 0x40, 0x00, ... ] */
  uint64x2_t x64 = vpaddlq_u32 (vpaddlq_u16 (vpaddlq_u8 (x)));
  /* after merge, x64 --> [0x5D, 0x.. ] */
  return (u16) (vgetq_lane_u64 (x64, 0) + (vgetq_lane_u64 (x64, 1) << 8));
}

#define CLIB_HAVE_VEC128_MSB_MASK

#define CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE
#define CLIB_VEC128_SPLAT_DEFINED
#endif /* included_vector_neon_h */

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */