aboutsummaryrefslogtreecommitdiffstats
path: root/tests/vpp/perf/crypto/10ge2p1x710-ethip4ipsec4tnlsw-1atnl-ip4base-int-aes256gcm-reconf.robot
AgeCommit message (Collapse)AuthorFilesLines
2021-01-21Reconf: Fix bidir/unidir mismatchVratko Polak1-3/+2
Results if NDR search is in TPS (so unidir) since https://gerrit.fd.io/r/c/csit/+/28208 The bug was causing all reconf tests report zero loss, as they used half of NDR load during reconfiguration. + Small improvements to descriptions of other traffic keywords. Change-Id: Ib9af5861cc09e698eb762feb3f0a019571d17962 Signed-off-by: Vratko Polak <vrpolak@cisco.com>
2020-12-16API: deprecated IPSEC APIsJan Gelety1-1/+1
Jira: CSIT-1597 + add ipsec_sa tear down action Change-Id: I4d1e6f26c14e61e8ddc6f29fbf5528a034c84eb0 Signed-off-by: Jan Gelety <jgelety@cisco.com>
2020-11-26Re-enable 9000B tests for ipsecVratko Polak1-0/+12
Ticket: CSIT-1771 VPP-1207 VPP-1675 Change-Id: I8ba2d62054361e72b833943327434ea071e7e568 Signed-off-by: Vratko Polak <vrpolak@cisco.com>
2020-08-20Framework: use 'stl' in trex stateless profile namesJan Gelety1-1/+1
Change-Id: I74641cc89d2f25d50b67d51bf2567082b420aabb Signed-off-by: Jan Gelety <jgelety@cisco.com>
2020-07-23T-Rex: Add advanced stateful modeJan Gelety1-1/+1
- provide base routines to run T-Rex in advanced stateful mode Change-Id: Ib0dc5f2919c370753335f6446860683dc4b12d93 Signed-off-by: Jan Gelety <jgelety@cisco.com>
2020-06-09Remove leading tc[nn] from test namesJuraj Linkeš1-9/+9
The test names are unique without it and the information doesn't add anything extra. Change-Id: Idc7d6d1d21c8c05691e1757227a0a3787406d370 Signed-off-by: Juraj Linkeš <juraj.linkes@pantheon.tech>
2020-04-06Improve pf layerPeter Mikus1-5/+7
+ Merge single/double link + Introduce _pf{n}[0] variables so we can access physical function same way as virtual function + Cleanup code by moving complex logic to python + Prepare code for multiple vf functions Signed-off-by: Peter Mikus <pmikus@cisco.com> Change-Id: Ic2e74a38bfa146441357de8f0916aeb638941c49
2020-03-10Make RXQs/TXQs configurablePeter Mikus1-0/+3
Change-Id: Ib30bc4697fcba93a6723ee492a59a0523425f623 Signed-off-by: Peter Mikus <pmikus@cisco.com>
2020-02-25FIX: check if t-rex is running at test setup of all perf testsJan Gelety1-1/+1
Change-Id: I9af632035a1415666b2470c62a41d1b6acbf33c8 Signed-off-by: Jan Gelety <jgelety@cisco.com>
2020-02-04Ipsec: Use new plugin name in reconf testsVratko Polak1-1/+1
This edit is not suitable for rls2001. Change-Id: I18ea22346d5996e78034f35d74a87c125010a146 Signed-off-by: Vratko Polak <vrpolak@cisco.com>
2020-02-04Add more reconf tests, for IPsecVratko Polak1-0/+153
- Not adding nf_density tests. - Not adding hardware ipsec tests. - Not adding -policy- tests. - Using old crypto_ia32_plugin.so plugin name. + Suitable for cherry-picking to rls2001. Change-Id: Ibf44d6d91e2afa2320637ecd9eb69d5d5dc364aa Signed-off-by: Vratko Polak <vrpolak@cisco.com>
n nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef included_clib_memcpy_avx512_h #define included_clib_memcpy_avx512_h #include <stdint.h> #include <x86intrin.h> static inline void clib_mov16 (u8 * dst, const u8 * src) { __m128i xmm0; xmm0 = _mm_loadu_si128 ((const __m128i *) src); _mm_storeu_si128 ((__m128i *) dst, xmm0); } static inline void clib_mov32 (u8 * dst, const u8 * src) { __m256i ymm0; ymm0 = _mm256_loadu_si256 ((const __m256i *) src); _mm256_storeu_si256 ((__m256i *) dst, ymm0); } static inline void clib_mov64 (u8 * dst, const u8 * src) { __m512i zmm0; zmm0 = _mm512_loadu_si512 ((const void *) src); _mm512_storeu_si512 ((void *) dst, zmm0); } static inline void clib_mov128 (u8 * dst, const u8 * src) { clib_mov64 (dst + 0 * 64, src + 0 * 64); clib_mov64 (dst + 1 * 64, src + 1 * 64); } static inline void clib_mov256 (u8 * dst, const u8 * src) { clib_mov128 (dst + 0 * 128, src + 0 * 128); clib_mov128 (dst + 1 * 128, src + 1 * 128); } static inline void clib_mov128blocks (u8 * dst, const u8 * src, size_t n) { __m512i zmm0, zmm1; while (n >= 128) { zmm0 = _mm512_loadu_si512 ((const void *) (src + 0 * 64)); n -= 128; zmm1 = _mm512_loadu_si512 ((const void *) (src + 1 * 64)); src = src + 128; _mm512_storeu_si512 ((void *) (dst + 0 * 64), zmm0); _mm512_storeu_si512 ((void *) (dst + 1 * 64), zmm1); dst = dst + 128; } } static inline void clib_mov512blocks (u8 * dst, const u8 * src, size_t n) { __m512i zmm0, zmm1, zmm2, zmm3, zmm4, zmm5, zmm6, zmm7; while (n >= 512) { zmm0 = _mm512_loadu_si512 ((const void *) (src + 0 * 64)); n -= 512; zmm1 = _mm512_loadu_si512 ((const void *) (src + 1 * 64)); zmm2 = _mm512_loadu_si512 ((const void *) (src + 2 * 64)); zmm3 = _mm512_loadu_si512 ((const void *) (src + 3 * 64)); zmm4 = _mm512_loadu_si512 ((const void *) (src + 4 * 64)); zmm5 = _mm512_loadu_si512 ((const void *) (src + 5 * 64)); zmm6 = _mm512_loadu_si512 ((const void *) (src + 6 * 64)); zmm7 = _mm512_loadu_si512 ((const void *) (src + 7 * 64)); src = src + 512; _mm512_storeu_si512 ((void *) (dst + 0 * 64), zmm0); _mm512_storeu_si512 ((void *) (dst + 1 * 64), zmm1); _mm512_storeu_si512 ((void *) (dst + 2 * 64), zmm2); _mm512_storeu_si512 ((void *) (dst + 3 * 64), zmm3); _mm512_storeu_si512 ((void *) (dst + 4 * 64), zmm4); _mm512_storeu_si512 ((void *) (dst + 5 * 64), zmm5); _mm512_storeu_si512 ((void *) (dst + 6 * 64), zmm6); _mm512_storeu_si512 ((void *) (dst + 7 * 64), zmm7); dst = dst + 512; } } static inline void * clib_memcpy (void *dst, const void *src, size_t n) { uword dstu = (uword) dst; uword srcu = (uword) src; void *ret = dst; size_t dstofss; size_t bits; /** * Copy less than 16 bytes */ if (n < 16) { if (n & 0x01) { *(u8 *) dstu = *(const u8 *) srcu; srcu = (uword) ((const u8 *) srcu + 1); dstu = (uword) ((u8 *) dstu + 1); } if (n & 0x02) { *(u16 *) dstu = *(const u16 *) srcu; srcu = (uword) ((const u16 *) srcu + 1); dstu = (uword) ((u16 *) dstu + 1); } if (n & 0x04) { *(u32 *) dstu = *(const u32 *) srcu; srcu = (uword) ((const u32 *) srcu + 1); dstu = (uword) ((u32 *) dstu + 1); } if (n & 0x08) *(u64 *) dstu = *(const u64 *) srcu; return ret; } /** * Fast way when copy size doesn't exceed 512 bytes */ if (n <= 32) { clib_mov16 ((u8 *) dst, (const u8 *) src); clib_mov16 ((u8 *) dst - 16 + n, (const u8 *) src - 16 + n); return ret; } if (n <= 64) { clib_mov32 ((u8 *) dst, (const u8 *) src); clib_mov32 ((u8 *) dst - 32 + n, (const u8 *) src - 32 + n); return ret; } if (n <= 512) { if (n >= 256) { n -= 256; clib_mov256 ((u8 *) dst, (const u8 *) src); src = (const u8 *) src + 256; dst = (u8 *) dst + 256; } if (n >= 128) { n -= 128; clib_mov128 ((u8 *) dst, (const u8 *) src); src = (const u8 *) src + 128; dst = (u8 *) dst + 128; } COPY_BLOCK_128_BACK63: if (n > 64) { clib_mov64 ((u8 *) dst, (const u8 *) src); clib_mov64 ((u8 *) dst - 64 + n, (const u8 *) src - 64 + n); return ret; } if (n > 0) clib_mov64 ((u8 *) dst - 64 + n, (const u8 *) src - 64 + n); return ret; } /** * Make store aligned when copy size exceeds 512 bytes */ dstofss = (uword) dst & 0x3F; if (dstofss > 0) { dstofss = 64 - dstofss; n -= dstofss; clib_mov64 ((u8 *) dst, (const u8 *) src); src = (const u8 *) src + dstofss; dst = (u8 *) dst + dstofss; } /** * Copy 512-byte blocks. * Use copy block function for better instruction order control, * which is important when load is unaligned. */ clib_mov512blocks ((u8 *) dst, (const u8 *) src, n); bits = n; n = n & 511; bits -= n; src = (const u8 *) src + bits; dst = (u8 *) dst + bits; /** * Copy 128-byte blocks. * Use copy block function for better instruction order control, * which is important when load is unaligned. */ if (n >= 128) { clib_mov128blocks ((u8 *) dst, (const u8 *) src, n); bits = n; n = n & 127; bits -= n; src = (const u8 *) src + bits; dst = (u8 *) dst + bits; } /** * Copy whatever left */ goto COPY_BLOCK_128_BACK63; } #endif /* included_clib_mamcpy_avx512_h */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */