aboutsummaryrefslogtreecommitdiffstats
path: root/src/vppinfra/vector_avx2.h
diff options
context:
space:
mode:
authorDamjan Marion <damarion@cisco.com>2019-04-08 10:14:51 +0200
committerNeale Ranns <nranns@cisco.com>2019-04-08 11:57:40 +0000
commit9f7e33d60c1a807175c03028493c18be50d26e06 (patch)
tree05ea2816b82af8fe0952f6e2cca7c52b60629de8 /src/vppinfra/vector_avx2.h
parent2bc816915965c936d579be60e83105f9cb05c875 (diff)
vppinfra: u32x8 transpose
Change-Id: I7d39cb184f1f9ad24276183c29969327681a1f82 Signed-off-by: Damjan Marion <damarion@cisco.com>
Diffstat (limited to 'src/vppinfra/vector_avx2.h')
-rw-r--r--src/vppinfra/vector_avx2.h56
1 files changed, 56 insertions, 0 deletions
diff --git a/src/vppinfra/vector_avx2.h b/src/vppinfra/vector_avx2.h
index b9d6549da99..c857ad49c0d 100644
--- a/src/vppinfra/vector_avx2.h
+++ b/src/vppinfra/vector_avx2.h
@@ -260,6 +260,62 @@ u8x32_blend (u8x32 v1, u8x32 v2, u8x32 mask)
(__m256i) mask);
}
+#define u32x8_permute_lanes(a, b, m) \
+ (u32x8) _mm256_permute2x128_si256 ((__m256i) a, (__m256i) b, m)
+#define u64x4_permute_lanes(a, b, m) \
+ (u64x4) _mm256_permute2x128_si256 ((__m256i) a, (__m256i) b, m)
+
+static_always_inline void
+u32x8_transpose (u32x8 a[8])
+{
+ u64x4 r[8], x, y;
+
+ r[0] = (u64x4) u32x8_interleave_lo (a[0], a[1]);
+ r[1] = (u64x4) u32x8_interleave_hi (a[0], a[1]);
+ r[2] = (u64x4) u32x8_interleave_lo (a[2], a[3]);
+ r[3] = (u64x4) u32x8_interleave_hi (a[2], a[3]);
+ r[4] = (u64x4) u32x8_interleave_lo (a[4], a[5]);
+ r[5] = (u64x4) u32x8_interleave_hi (a[4], a[5]);
+ r[6] = (u64x4) u32x8_interleave_lo (a[6], a[7]);
+ r[7] = (u64x4) u32x8_interleave_hi (a[6], a[7]);
+
+ x = u64x4_interleave_lo (r[0], r[2]);
+ y = u64x4_interleave_lo (r[4], r[6]);
+ a[0] = u32x8_permute_lanes (x, y, 0x20);
+ a[4] = u32x8_permute_lanes (x, y, 0x31);
+
+ x = u64x4_interleave_hi (r[0], r[2]);
+ y = u64x4_interleave_hi (r[4], r[6]);
+ a[1] = u32x8_permute_lanes (x, y, 0x20);
+ a[5] = u32x8_permute_lanes (x, y, 0x31);
+
+ x = u64x4_interleave_lo (r[1], r[3]);
+ y = u64x4_interleave_lo (r[5], r[7]);
+ a[2] = u32x8_permute_lanes (x, y, 0x20);
+ a[6] = u32x8_permute_lanes (x, y, 0x31);
+
+ x = u64x4_interleave_hi (r[1], r[3]);
+ y = u64x4_interleave_hi (r[5], r[7]);
+ a[3] = u32x8_permute_lanes (x, y, 0x20);
+ a[7] = u32x8_permute_lanes (x, y, 0x31);
+}
+
+static_always_inline void
+u64x4_transpose (u64x4 a[8])
+{
+ u64x4 r[4];
+
+ r[0] = u64x4_interleave_lo (a[0], a[1]);
+ r[1] = u64x4_interleave_hi (a[0], a[1]);
+ r[2] = u64x4_interleave_lo (a[2], a[3]);
+ r[3] = u64x4_interleave_hi (a[2], a[3]);
+
+ a[0] = u64x4_permute_lanes (r[0], r[2], 0x20);
+ a[1] = u64x4_permute_lanes (r[1], r[3], 0x20);
+ a[2] = u64x4_permute_lanes (r[0], r[2], 0x31);
+ a[3] = u64x4_permute_lanes (r[1], r[3], 0x31);
+}
+
#endif /* included_vector_avx2_h */
/*