summaryrefslogtreecommitdiffstats
path: root/src/vnet/ipsec/ipsec.api
diff options
context:
space:
mode:
authorJakub Grajciar <jgrajcia@cisco.com>2019-10-14 11:33:36 +0200
committerOle Trøan <otroan@employees.org>2020-02-26 11:54:04 +0000
commit666ece35cd2625bbd8b6ddadb6e87444a617df4d (patch)
treebcb4965e520ca6701ae47f25e19f8ef051f4e7b3 /src/vnet/ipsec/ipsec.api
parentf712be54be8a24bea005280d6923cf856bc949c2 (diff)
ipsec: API cleanup
Use consistent API types. Change vl_api_ipsec_proto_t to iana values: ESP 50, AH 51, Type: fix Signed-off-by: Jakub Grajciar <jgrajcia@cisco.com> Change-Id: Ic961130ffa519d1c904d872c34f9a7461b1be77e Signed-off-by: Jakub Grajciar <jgrajcia@cisco.com>
Diffstat (limited to 'src/vnet/ipsec/ipsec.api')
-rw-r--r--src/vnet/ipsec/ipsec.api48
1 files changed, 25 insertions, 23 deletions
diff --git a/src/vnet/ipsec/ipsec.api b/src/vnet/ipsec/ipsec.api
index b5027f60301..3f8913fe26e 100644
--- a/src/vnet/ipsec/ipsec.api
+++ b/src/vnet/ipsec/ipsec.api
@@ -14,10 +14,12 @@
* limitations under the License.
*/
-option version = "3.0.0";
+option version = "3.0.1";
import "vnet/ipsec/ipsec_types.api";
import "vnet/interface_types.api";
+import "vnet/ip/ip_types.api";
+import "vnet/interface_types.api";
/** \brief IPsec: Add/delete Security Policy Database
@param client_index - opaque cookie to identify the sender
@@ -30,7 +32,7 @@ autoreply define ipsec_spd_add_del
{
u32 client_index;
u32 context;
- u8 is_add;
+ bool is_add;
u32 spd_id;
};
@@ -49,8 +51,8 @@ autoreply define ipsec_interface_add_del_spd
u32 client_index;
u32 context;
- u8 is_add;
- u32 sw_if_index;
+ bool is_add;
+ vl_api_interface_index_t sw_if_index;
u32 spd_id;
};
@@ -90,10 +92,11 @@ typedef ipsec_spd_entry
{
u32 spd_id;
i32 priority;
- u8 is_outbound;
+ bool is_outbound;
u32 sa_id;
vl_api_ipsec_spd_action_t policy;
+ /* Which protocol?? */
u8 protocol;
// Selector
@@ -119,7 +122,7 @@ define ipsec_spd_entry_add_del
{
u32 client_index;
u32 context;
- u8 is_add;
+ bool is_add;
vl_api_ipsec_spd_entry_t entry;
};
@@ -154,7 +157,7 @@ define ipsec_spds_details {
u32 context;
u32 spd_id;
u32 npolicies;
-};
+};
/** \brief Dump ipsec policy database data
@param client_index - opaque cookie to identify the sender
@@ -189,7 +192,7 @@ define ipsec_sad_entry_add_del
{
u32 client_index;
u32 context;
- u8 is_add;
+ bool is_add;
vl_api_ipsec_sad_entry_t entry;
};
define ipsec_sad_entry_add_del_reply
@@ -304,7 +307,7 @@ define ipsec_spd_interface_dump {
define ipsec_spd_interface_details {
u32 context;
u32 spd_index;
- u32 sw_if_index;
+ vl_api_interface_index_t sw_if_index;
};
/** \brief Add or delete IPsec tunnel interface
@@ -341,9 +344,9 @@ define ipsec_spd_interface_details {
define ipsec_tunnel_if_add_del {
u32 client_index;
u32 context;
- u8 is_add;
- u8 esn;
- u8 anti_replay;
+ bool is_add;
+ bool esn;
+ bool anti_replay;
vl_api_address_t local_ip;
vl_api_address_t remote_ip;
u32 local_spi;
@@ -358,9 +361,9 @@ define ipsec_tunnel_if_add_del {
u8 local_integ_key[128];
u8 remote_integ_key_len;
u8 remote_integ_key[128];
- u8 renumber;
+ bool renumber;
u32 show_instance;
- u8 udp_encap;
+ bool udp_encap;
u32 tx_table_id;
u32 salt;
};
@@ -373,7 +376,7 @@ define ipsec_tunnel_if_add_del {
define ipsec_tunnel_if_add_del_reply {
u32 context;
i32 retval;
- u32 sw_if_index;
+ vl_api_interface_index_t sw_if_index;
};
/** \brief Dump IPsec security association
@@ -389,7 +392,7 @@ define ipsec_sa_dump {
/** \brief IPsec security association database response
@param context - sender context which was passed in the request
- @param sa_id - SA ID, policy-based SAs >=0, tunnel interface SAs = 0
+ @param sa_id - SA ID, policy-based SAs >=0, tunnel interface SAs = 0
@param sw_if_index - sw_if_index of tunnel interface, policy-based SAs = ~0
@param spi - security parameter index
@param protocol - IPsec protocol (value from ipsec_protocol_t)
@@ -405,9 +408,9 @@ define ipsec_sa_dump {
@param is_tunnel_ipv6 - If using tunnel mode, endpoints are IPv6
@param tunnel_src_addr - Tunnel source address if using tunnel mode
@param tunnel_dst_addr - Tunnel destination address is using tunnel mode
- @param salt - 4 byte salt
+ @param salt - 4 byte salt
@param seq - current sequence number for outbound
- @param seq_hi - high 32 bits of ESN for outbound
+ @param seq_hi - high 32 bits of ESN for outbound
@param last_seq - highest sequence number received inbound
@param last_seq_hi - high 32 bits of highest ESN received inbound
@param replay_window - bit map of seq nums received relative to last_seq if using anti-replay
@@ -418,7 +421,7 @@ define ipsec_sa_details {
u32 context;
vl_api_ipsec_sad_entry_t entry;
- u32 sw_if_index;
+ vl_api_interface_index_t sw_if_index;
u32 salt;
u64 seq_outbound;
u64 last_seq_inbound;
@@ -440,7 +443,7 @@ define ipsec_sa_details {
autoreply define ipsec_tunnel_if_set_sa {
u32 client_index;
u32 context;
- u32 sw_if_index;
+ vl_api_interface_index_t sw_if_index;
u32 sa_id;
u8 is_outbound;
};
@@ -462,10 +465,10 @@ define ipsec_backend_dump {
*/
define ipsec_backend_details {
u32 context;
- u8 name[128];
+ string name[128];
vl_api_ipsec_proto_t protocol;
u8 index;
- u8 active;
+ bool active;
};
/** \brief Select IPsec backend
@@ -486,4 +489,3 @@ autoreply define ipsec_select_backend {
* eval: (c-set-style "gnu")
* End:
*/
-
href='#n421'>421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
/*
 * Copyright (c) 2018 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef included_vector_avx2_h
#define included_vector_avx2_h

#include <vppinfra/clib.h>
#include <x86intrin.h>

/* *INDENT-OFF* */
#define foreach_avx2_vec256i \
  _(i,8,32,epi8) _(i,16,16,epi16) _(i,32,8,epi32)  _(i,64,4,epi64)
#define foreach_avx2_vec256u \
  _(u,8,32,epi8) _(u,16,16,epi16) _(u,32,8,epi32)  _(u,64,4,epi64)
#define foreach_avx2_vec256f \
  _(f,32,8,ps) _(f,64,4,pd)

#define _mm256_set1_epi64 _mm256_set1_epi64x

/* splat, load_unaligned, store_unaligned, is_all_zero, is_equal,
   is_all_equal */
#define _(t, s, c, i) \
static_always_inline t##s##x##c						\
t##s##x##c##_splat (t##s x)						\
{ return (t##s##x##c) _mm256_set1_##i (x); }				\
\
static_always_inline t##s##x##c						\
t##s##x##c##_load_unaligned (void *p)					\
{ return (t##s##x##c) _mm256_loadu_si256 (p); }				\
\
static_always_inline void						\
t##s##x##c##_store_unaligned (t##s##x##c v, void *p)			\
{ _mm256_storeu_si256 ((__m256i *) p, (__m256i) v); }			\
\
static_always_inline int						\
t##s##x##c##_is_all_zero (t##s##x##c x)					\
{ return _mm256_testz_si256 ((__m256i) x, (__m256i) x); }		\
\
static_always_inline int						\
t##s##x##c##_is_equal (t##s##x##c a, t##s##x##c b)			\
{ return t##s##x##c##_is_all_zero (a ^ b); }				\
\
static_always_inline int						\
t##s##x##c##_is_all_equal (t##s##x##c v, t##s x)			\
{ return t##s##x##c##_is_equal (v, t##s##x##c##_splat (x)); }		\
\
static_always_inline t##s##x##c                                         \
t##s##x##c##_interleave_lo (t##s##x##c a, t##s##x##c b)                 \
{ return (t##s##x##c) _mm256_unpacklo_##i ((__m256i) a, (__m256i) b); } \
\
static_always_inline t##s##x##c                                         \
t##s##x##c##_interleave_hi (t##s##x##c a, t##s##x##c b)                 \
{ return (t##s##x##c) _mm256_unpackhi_##i ((__m256i) a, (__m256i) b); } \


foreach_avx2_vec256i foreach_avx2_vec256u
#undef _
/* *INDENT-ON* */

always_inline u32x8
u32x8_permute (u32x8 v, u32x8 idx)
{
  return (u32x8) _mm256_permutevar8x32_epi32 ((__m256i) v, (__m256i) idx);
}

#define u64x4_permute(v, m0, m1, m2, m3)                                      \
  (u64x4) _mm256_permute4x64_epi64 (                                          \
    (__m256i) v, ((m0) | (m1) << 2 | (m2) << 4 | (m3) << 6))

/* _extract_lo, _extract_hi */
/* *INDENT-OFF* */
#define _(t1,t2) \
always_inline t1							\
t2##_extract_lo (t2 v)							\
{ return (t1) _mm256_extracti128_si256 ((__m256i) v, 0); }		\
\
always_inline t1							\
t2##_extract_hi (t2 v)							\
{ return (t1) _mm256_extracti128_si256 ((__m256i) v, 1); }		\
\
always_inline t2							\
t2##_insert_lo (t2 v1, t1 v2)						\
{ return (t2) _mm256_inserti128_si256 ((__m256i) v1, (__m128i) v2, 0); }\
\
always_inline t2							\
t2##_insert_hi (t2 v1, t1 v2)						\
{ return (t2) _mm256_inserti128_si256 ((__m256i) v1, (__m128i) v2, 1); }\

_(u8x16, u8x32)
_(u16x8, u16x16)
_(u32x4, u32x8)
_(u64x2, u64x4)
#undef _
/* *INDENT-ON* */

/* 256 bit packs. */
#define _(f, t, fn)                                                           \
  always_inline t t##_pack (f lo, f hi)                                       \
  {                                                                           \
    return (t) fn ((__m256i) lo, (__m256i) hi);                               \
  }

_ (i16x16, i8x32, _mm256_packs_epi16)
_ (i16x16, u8x32, _mm256_packus_epi16)
_ (i32x8, i16x16, _mm256_packs_epi32)
_ (i32x8, u16x16, _mm256_packus_epi32)

#undef _

static_always_inline u32
u8x32_msb_mask (u8x32 v)
{
  return _mm256_movemask_epi8 ((__m256i) v);
}

static_always_inline u32
i8x32_msb_mask (i8x32 v)
{
  return _mm256_movemask_epi8 ((__m256i) v);
}

/* _from_ */
/* *INDENT-OFF* */
#define _(f,t,i) \
static_always_inline t							\
t##_from_##f (f x)							\
{ return (t) _mm256_cvt##i ((__m128i) x); }

_(u16x8, u32x8, epu16_epi32)
_(u16x8, u64x4, epu16_epi64)
_(u32x4, u64x4, epu32_epi64)
_ (u8x16, u16x16, epu8_epi16)
_(u8x16, u32x8, epu8_epi32)
_(u8x16, u64x4, epu8_epi64)
_(i16x8, i32x8, epi16_epi32)
_(i16x8, i64x4, epi16_epi64)
_(i32x4, i64x4, epi32_epi64)
_ (i8x16, i16x16, epi8_epi16)
_(i8x16, i32x8, epi8_epi32)
_(i8x16, i64x4, epi8_epi64)
#undef _
/* *INDENT-ON* */

static_always_inline u64x4
u64x4_byte_swap (u64x4 v)
{
  u8x32 swap = {
    7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
    7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8,
  };
  return (u64x4) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap);
}

static_always_inline u32x8
u32x8_byte_swap (u32x8 v)
{
  u8x32 swap = {
    3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12,
    3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
  };
  return (u32x8) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap);
}

static_always_inline u16x16
u16x16_byte_swap (u16x16 v)
{
  u8x32 swap = {
    1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
    1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14
  };
  return (u16x16) _mm256_shuffle_epi8 ((__m256i) v, (__m256i) swap);
}

#define u8x32_align_right(a, b, imm) \
  (u8x32) _mm256_alignr_epi8 ((__m256i) a, (__m256i) b, imm)

#define u64x4_align_right(a, b, imm)                                          \
  (u64x4) _mm256_alignr_epi64 ((__m256i) a, (__m256i) b, imm)

static_always_inline u32
u32x8_sum_elts (u32x8 sum8)
{
  sum8 += (u32x8) u8x32_align_right (sum8, sum8, 8);
  sum8 += (u32x8) u8x32_align_right (sum8, sum8, 4);
  return sum8[0] + sum8[4];
}

static_always_inline u32x8
u32x8_hadd (u32x8 v1, u32x8 v2)
{
  return (u32x8) _mm256_hadd_epi32 ((__m256i) v1, (__m256i) v2);
}

static_always_inline u32
u32x8_hxor (u32x8 v)
{
  u32x4 v4;
  v4 = u32x8_extract_lo (v) ^ u32x8_extract_hi (v);
  v4 ^= (u32x4) u8x16_align_right (v4, v4, 8);
  v4 ^= (u32x4) u8x16_align_right (v4, v4, 4);
  return v4[0];
}

static_always_inline u8x32
u8x32_xor3 (u8x32 a, u8x32 b, u8x32 c)
{
#if __AVX512F__
  return (u8x32) _mm256_ternarylogic_epi32 ((__m256i) a, (__m256i) b,
					    (__m256i) c, 0x96);
#endif
  return a ^ b ^ c;
}

static_always_inline u8x32
u8x32_reflect_u8x16 (u8x32 x)
{
  static const u8x32 mask = {
    15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
    15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
  };
  return (u8x32) _mm256_shuffle_epi8 ((__m256i) x, (__m256i) mask);
}

static_always_inline u16x16
u16x16_mask_last (u16x16 v, u8 n_last)
{
  const u16x16 masks[17] = {
    {0},
    {-1},
    {-1, -1},
    {-1, -1, -1},
    {-1, -1, -1, -1},
    {-1, -1, -1, -1, -1},
    {-1, -1, -1, -1, -1, -1},
    {-1, -1, -1, -1, -1, -1, -1},
    {-1, -1, -1, -1, -1, -1, -1, -1},
    {-1, -1, -1, -1, -1, -1, -1, -1, -1},
    {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
    {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
    {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
    {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
    {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
    {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
    {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
  };

  ASSERT (n_last < 17);

  return v & masks[16 - n_last];
}

static_always_inline f32x8
f32x8_from_u32x8 (u32x8 v)
{
  return (f32x8) _mm256_cvtepi32_ps ((__m256i) v);
}

static_always_inline u32x8
u32x8_from_f32x8 (f32x8 v)
{
  return (u32x8) _mm256_cvttps_epi32 ((__m256) v);
}

#define u32x8_blend(a,b,m) \
  (u32x8) _mm256_blend_epi32 ((__m256i) a, (__m256i) b, m)

#define u16x16_blend(v1, v2, mask) \
  (u16x16) _mm256_blend_epi16 ((__m256i) (v1), (__m256i) (v2), mask)

static_always_inline u64x4
u64x4_gather (void *p0, void *p1, void *p2, void *p3)
{
  u64x4 r = {
    *(u64 *) p0, *(u64 *) p1, *(u64 *) p2, *(u64 *) p3
  };
  return r;
}

static_always_inline u32x8
u32x8_gather (void *p0, void *p1, void *p2, void *p3, void *p4, void *p5,
	      void *p6, void *p7)
{
  u32x8 r = {
    *(u32 *) p0, *(u32 *) p1, *(u32 *) p2, *(u32 *) p3,
    *(u32 *) p4, *(u32 *) p5, *(u32 *) p6, *(u32 *) p7,
  };
  return r;
}


static_always_inline void
u64x4_scatter (u64x4 r, void *p0, void *p1, void *p2, void *p3)
{
  *(u64 *) p0 = r[0];
  *(u64 *) p1 = r[1];
  *(u64 *) p2 = r[2];
  *(u64 *) p3 = r[3];
}

static_always_inline void
u32x8_scatter (u32x8 r, void *p0, void *p1, void *p2, void *p3, void *p4,
	       void *p5, void *p6, void *p7)
{
  *(u32 *) p0 = r[0];
  *(u32 *) p1 = r[1];
  *(u32 *) p2 = r[2];
  *(u32 *) p3 = r[3];
  *(u32 *) p4 = r[4];
  *(u32 *) p5 = r[5];
  *(u32 *) p6 = r[6];
  *(u32 *) p7 = r[7];
}

static_always_inline void
u64x4_scatter_one (u64x4 r, int index, void *p)
{
  *(u64 *) p = r[index];
}

static_always_inline void
u32x8_scatter_one (u32x8 r, int index, void *p)
{
  *(u32 *) p = r[index];
}

static_always_inline u8x32
u8x32_blend (u8x32 v1, u8x32 v2, u8x32 mask)
{
  return (u8x32) _mm256_blendv_epi8 ((__m256i) v1, (__m256i) v2,
				     (__m256i) mask);
}

#define u8x32_word_shift_left(a, n)                                           \
  (u8x32) _mm256_bslli_epi128 ((__m256i) a, n)
#define u8x32_word_shift_right(a, n)                                          \
  (u8x32) _mm256_bsrli_epi128 ((__m256i) a, n)

#define u32x8_permute_lanes(a, b, m) \
  (u32x8) _mm256_permute2x128_si256 ((__m256i) a, (__m256i) b, m)
#define u64x4_permute_lanes(a, b, m) \
  (u64x4) _mm256_permute2x128_si256 ((__m256i) a, (__m256i) b, m)

static_always_inline u32x8
u32x8_min (u32x8 a, u32x8 b)
{
  return (u32x8) _mm256_min_epu32 ((__m256i) a, (__m256i) b);
}

static_always_inline u32
u32x8_min_scalar (u32x8 v)
{
  return u32x4_min_scalar (u32x4_min (u32x8_extract_lo (v),
				      u32x8_extract_hi (v)));
}

static_always_inline void
u32x8_transpose (u32x8 a[8])
{
  u64x4 r[8], x, y;

  r[0] = (u64x4) u32x8_interleave_lo (a[0], a[1]);
  r[1] = (u64x4) u32x8_interleave_hi (a[0], a[1]);
  r[2] = (u64x4) u32x8_interleave_lo (a[2], a[3]);
  r[3] = (u64x4) u32x8_interleave_hi (a[2], a[3]);
  r[4] = (u64x4) u32x8_interleave_lo (a[4], a[5]);
  r[5] = (u64x4) u32x8_interleave_hi (a[4], a[5]);
  r[6] = (u64x4) u32x8_interleave_lo (a[6], a[7]);
  r[7] = (u64x4) u32x8_interleave_hi (a[6], a[7]);

  x = u64x4_interleave_lo (r[0], r[2]);
  y = u64x4_interleave_lo (r[4], r[6]);
  a[0] = u32x8_permute_lanes (x, y, 0x20);
  a[4] = u32x8_permute_lanes (x, y, 0x31);

  x = u64x4_interleave_hi (r[0], r[2]);
  y = u64x4_interleave_hi (r[4], r[6]);
  a[1] = u32x8_permute_lanes (x, y, 0x20);
  a[5] = u32x8_permute_lanes (x, y, 0x31);

  x = u64x4_interleave_lo (r[1], r[3]);
  y = u64x4_interleave_lo (r[5], r[7]);
  a[2] = u32x8_permute_lanes (x, y, 0x20);
  a[6] = u32x8_permute_lanes (x, y, 0x31);

  x = u64x4_interleave_hi (r[1], r[3]);
  y = u64x4_interleave_hi (r[5], r[7]);
  a[3] = u32x8_permute_lanes (x, y, 0x20);
  a[7] = u32x8_permute_lanes (x, y, 0x31);
}

static_always_inline void
u64x4_transpose (u64x4 a[8])
{
  u64x4 r[4];

  r[0] = u64x4_interleave_lo (a[0], a[1]);
  r[1] = u64x4_interleave_hi (a[0], a[1]);
  r[2] = u64x4_interleave_lo (a[2], a[3]);
  r[3] = u64x4_interleave_hi (a[2], a[3]);

  a[0] = u64x4_permute_lanes (r[0], r[2], 0x20);
  a[1] = u64x4_permute_lanes (r[1], r[3], 0x20);
  a[2] = u64x4_permute_lanes (r[0], r[2], 0x31);
  a[3] = u64x4_permute_lanes (r[1], r[3], 0x31);
}

static_always_inline u8x32
u8x32_splat_u8x16 (u8x16 a)
{
  return (u8x32) _mm256_broadcastsi128_si256 ((__m128i) a);
}

static_always_inline u32x8
u32x8_splat_u32x4 (u32x4 a)
{
  return (u32x8) _mm256_broadcastsi128_si256 ((__m128i) a);
}

static_always_inline u8x32
u8x32_load_partial (u8 *data, uword n)
{
#if defined(CLIB_HAVE_VEC256_MASK_LOAD_STORE)
  return u8x32_mask_load_zero (data, pow2_mask (n));
#else
  u8x32 r = {};
  if (n > 16)
    {
      r = u8x32_insert_lo (r, *(u8x16u *) data);
      r = u8x32_insert_hi (r, u8x16_load_partial (data + 16, n - 16));
    }
  else
    r = u8x32_insert_lo (r, u8x16_load_partial (data, n));
  return r;
#endif
}

static_always_inline void
u8x32_store_partial (u8x32 r, u8 *data, uword n)
{
#if defined(CLIB_HAVE_VEC256_MASK_LOAD_STORE)
  u8x32_mask_store (r, data, pow2_mask (n));
#else
  if (n > 16)
    {
      *(u8x16u *) data = u8x32_extract_lo (r);
      u8x16_store_partial (u8x32_extract_hi (r), data + 16, n - 16);
    }
  else
    u8x16_store_partial (u8x32_extract_lo (r), data, n);
#endif
}

#endif /* included_vector_avx2_h */

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */