summaryrefslogtreecommitdiffstats
path: root/src/plugins/perfmon/perfmon_intel_bdw.c
AgeCommit message (Expand)AuthorFilesLines
2019-03-20perfmon: python to C parser for intel CPUsDamjan Marion1-0/+1567
n58'>58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
/*
 * Copyright (c) 2015 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef included_vector_neon_h
#define included_vector_neon_h
#include <arm_neon.h>

/* Arithmetic */
#define u16x8_sub_saturate(a,b) vsubq_u16(a,b)
#define i16x8_sub_saturate(a,b) vsubq_s16(a,b)
/* Dummy. Aid making uniform macros */
#define vreinterpretq_u8_u8(a)  a
/* Implement the missing intrinsics to make uniform macros */
#define vminvq_u64(x)   \
({  \
  u64 x0 = vgetq_lane_u64(x, 0);    \
  u64 x1 = vgetq_lane_u64(x, 1);    \
  x0 < x1 ? x0 : x1;    \
})

/* Converts all ones/zeros compare mask to bitmap. */
always_inline u32
u8x16_compare_byte_mask (u8x16 v)
{
  uint8x16_t mask = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
    0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80
  };
  /* v --> [0xFF, 0x00, 0xFF, 0xFF, 0xFF, 0x00, 0xFF, 0x00, ... ] */
  uint8x16_t x = vandq_u8 (v, mask);
  /* after v & mask,
   * x --> [0x01, 0x00, 0x04, 0x08, 0x10, 0x00, 0x40, 0x00, ... ] */
  uint64x2_t x64 = vpaddlq_u32 (vpaddlq_u16 (vpaddlq_u8 (x)));
  /* after merge, x64 --> [0x5D, 0x.. ] */
  return (u32) (vgetq_lane_u64 (x64, 0) + (vgetq_lane_u64 (x64, 1) << 8));
}

/* *INDENT-OFF* */
#define foreach_neon_vec128i \
  _(i,8,16,s8) _(i,16,8,s16) _(i,32,4,s32)  _(i,64,2,s64)
#define foreach_neon_vec128u \
  _(u,8,16,u8) _(u,16,8,u16) _(u,32,4,u32)  _(u,64,2,u64)
#define foreach_neon_vec128f \
  _(f,32,4,f32) _(f,64,2,f64)

#define _(t, s, c, i) \
static_always_inline t##s##x##c						\
t##s##x##c##_splat (t##s x)						\
{ return (t##s##x##c) vdupq_n_##i (x); }				\
\
static_always_inline t##s##x##c						\
t##s##x##c##_load_unaligned (void *p)					\
{ return (t##s##x##c) vld1q_##i (p); }					\
\
static_always_inline void						\
t##s##x##c##_store_unaligned (t##s##x##c v, void *p)			\
{ vst1q_##i (p, v); }							\
\
static_always_inline int						\
t##s##x##c##_is_all_zero (t##s##x##c x)					\
{ return !!(vminvq_u##s (vceqq_##i (vdupq_n_##i(0), x))); }						\
\
static_always_inline int						\
t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b)			\
{ return !!(vminvq_u##s (vceqq_##i (a, b))); }				\
\
static_always_inline int						\
t##s##x##c##_is_all_equal (t##s##x##c v, t##s x)			\
{ return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); };		\
\
static_always_inline u32						\
t##s##x##c##_zero_byte_mask (t##s##x##c x)			\
{ uint8x16_t v = vreinterpretq_u8_u##s (vceqq_##i (vdupq_n_##i(0), x));  \
  return u8x16_compare_byte_mask (v); } \
\
static_always_inline u##s##x##c						\
t##s##x##c##_is_greater (t##s##x##c a, t##s##x##c b)			\
{ return (u##s##x##c) vcgtq_##i (a, b); }				\
\
static_always_inline t##s##x##c						\
t##s##x##c##_blend (t##s##x##c dst, t##s##x##c src, u##s##x##c mask)	\
{ return (t##s##x##c) vbslq_##i (mask, src, dst); }

foreach_neon_vec128i foreach_neon_vec128u

#undef _
/* *INDENT-ON* */

static_always_inline u16x8
u16x8_byte_swap (u16x8 v)
{
  return (u16x8) vrev16q_u8 ((u8x16) v);
}

static_always_inline u8x16
u8x16_shuffle (u8x16 v, u8x16 m)
{
  return (u8x16) vqtbl1q_u8 (v, m);
}

static_always_inline u32x4
u32x4_hadd (u32x4 v1, u32x4 v2)
{
  return (u32x4) vpaddq_u32 (v1, v2);
}

static_always_inline u64x2
u32x4_extend_to_u64x2 (u32x4 v)
{
  return vmovl_u32 (vget_low_u32 (v));
}

static_always_inline u64x2
u32x4_extend_to_u64x2_high (u32x4 v)
{
  return vmovl_high_u32 (v);
}

/* Creates a mask made up of the MSB of each byte of the source vector */
static_always_inline u16
u8x16_msb_mask (u8x16 v)
{
  int8x16_t shift =
    { -7, -6, -5, -4, -3, -2, -1, 0, -7, -6, -5, -4, -3, -2, -1, 0 };
  /* v --> [0x80, 0x7F, 0xF0, 0xAF, 0xF0, 0x00, 0xF2, 0x00, ... ] */
  uint8x16_t x = vshlq_u8 (vandq_u8 (v, vdupq_n_u8 (0x80)), shift);
  /* after (v & 0x80) >> shift,
   * x --> [0x01, 0x00, 0x04, 0x08, 0x10, 0x00, 0x40, 0x00, ... ] */
  uint64x2_t x64 = vpaddlq_u32 (vpaddlq_u16 (vpaddlq_u8 (x)));
  /* after merge, x64 --> [0x5D, 0x.. ] */
  return (u16) (vgetq_lane_u64 (x64, 0) + (vgetq_lane_u64 (x64, 1) << 8));
}

static_always_inline u64x2
u64x2_gather (void *p0, void *p1)
{
  u64x2 r = vdupq_n_u64 (*(u64 *) p0);
  r = vsetq_lane_u64 (*(u64 *) p1, r, 1);
  return r;
}

static_always_inline u32x4
u32x4_gather (void *p0, void *p1, void *p2, void *p3)
{
  u32x4 r = vdupq_n_u32 (*(u32 *) p0);
  r = vsetq_lane_u32 (*(u32 *) p1, r, 1);
  r = vsetq_lane_u32 (*(u32 *) p2, r, 2);
  r = vsetq_lane_u32 (*(u32 *) p3, r, 3);
  return r;
}

static_always_inline void
u64x2_scatter (u64x2 r, void *p0, void *p1)
{
  *(u64 *) p0 = vgetq_lane_u64 (r, 0);
  *(u64 *) p1 = vgetq_lane_u64 (r, 1);
}

static_always_inline void
u32x4_scatter (u32x4 r, void *p0, void *p1, void *p2, void *p3)
{
  *(u32 *) p0 = vgetq_lane_u32 (r, 0);
  *(u32 *) p1 = vgetq_lane_u32 (r, 1);
  *(u32 *) p2 = vgetq_lane_u32 (r, 2);
  *(u32 *) p3 = vgetq_lane_u32 (r, 3);
}

#define CLIB_HAVE_VEC128_MSB_MASK

#define CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE
#define CLIB_VEC128_SPLAT_DEFINED
#endif /* included_vector_neon_h */

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */