diff options
Diffstat (limited to 'external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream')
35 files changed, 3359 insertions, 0 deletions
diff --git a/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/portable/afternm_aes128ctr.c b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/portable/afternm_aes128ctr.c new file mode 100644 index 00000000..a5a9a7a9 --- /dev/null +++ b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/portable/afternm_aes128ctr.c @@ -0,0 +1,159 @@ +/* Author: Peter Schwabe, ported from an assembly implementation by Emilia Käsper + * Date: 2009-03-19 + * Public domain */ + +#include "api.h" +#include "int128.h" +#include "common.h" +#include "consts.h" + +int crypto_stream_afternm(unsigned char *out, unsigned long long len, const unsigned char *nonce, const unsigned char *c) +{ + + int128 xmm0; + int128 xmm1; + int128 xmm2; + int128 xmm3; + int128 xmm4; + int128 xmm5; + int128 xmm6; + int128 xmm7; + + int128 xmm8; + int128 xmm9; + int128 xmm10; + int128 xmm11; + int128 xmm12; + int128 xmm13; + int128 xmm14; + int128 xmm15; + + int128 nonce_stack; + unsigned long long lensav; + unsigned char bl[128]; + unsigned char *blp; + unsigned char *np; + unsigned char b; + + uint32 tmp; + + /* Copy nonce on the stack */ + copy2(&nonce_stack, (const int128 *) (nonce + 0)); + np = (unsigned char *)&nonce_stack; + + enc_block: + + xmm0 = *(int128 *) (np + 0); + copy2(&xmm1, &xmm0); + shufb(&xmm1, SWAP32); + copy2(&xmm2, &xmm1); + copy2(&xmm3, &xmm1); + copy2(&xmm4, &xmm1); + copy2(&xmm5, &xmm1); + copy2(&xmm6, &xmm1); + copy2(&xmm7, &xmm1); + + add_uint32_big(&xmm1, 1); + add_uint32_big(&xmm2, 2); + add_uint32_big(&xmm3, 3); + add_uint32_big(&xmm4, 4); + add_uint32_big(&xmm5, 5); + add_uint32_big(&xmm6, 6); + add_uint32_big(&xmm7, 7); + + shufb(&xmm0, M0); + shufb(&xmm1, M0SWAP); + shufb(&xmm2, M0SWAP); + shufb(&xmm3, M0SWAP); + shufb(&xmm4, M0SWAP); + shufb(&xmm5, M0SWAP); + shufb(&xmm6, M0SWAP); + shufb(&xmm7, M0SWAP); + + bitslice(xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0, xmm8) + + aesround( 1, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,c) + aesround( 2, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,c) + aesround( 3, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,c) + aesround( 4, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,c) + aesround( 5, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,c) + aesround( 6, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,c) + aesround( 7, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,c) + aesround( 8, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,c) + aesround( 9, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,c) + lastround(xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,c) + + bitslice(xmm13, xmm10, xmm15, xmm11, xmm14, xmm12, xmm9, xmm8, xmm0) + + if(len < 128) goto partial; + if(len == 128) goto full; + + tmp = load32_bigendian(np + 12); + tmp += 8; + store32_bigendian(np + 12, tmp); + + *(int128 *) (out + 0) = xmm8; + *(int128 *) (out + 16) = xmm9; + *(int128 *) (out + 32) = xmm12; + *(int128 *) (out + 48) = xmm14; + *(int128 *) (out + 64) = xmm11; + *(int128 *) (out + 80) = xmm15; + *(int128 *) (out + 96) = xmm10; + *(int128 *) (out + 112) = xmm13; + + len -= 128; + out += 128; + + goto enc_block; + + partial: + + lensav = len; + len >>= 4; + + tmp = load32_bigendian(np + 12); + tmp += len; + store32_bigendian(np + 12, tmp); + + blp = bl; + *(int128 *)(blp + 0) = xmm8; + *(int128 *)(blp + 16) = xmm9; + *(int128 *)(blp + 32) = xmm12; + *(int128 *)(blp + 48) = xmm14; + *(int128 *)(blp + 64) = xmm11; + *(int128 *)(blp + 80) = xmm15; + *(int128 *)(blp + 96) = xmm10; + *(int128 *)(blp + 112) = xmm13; + + bytes: + + if(lensav == 0) goto end; + + b = blp[0]; /* clang false positive */ + *(unsigned char *)(out + 0) = b; + + blp += 1; + out +=1; + lensav -= 1; + + goto bytes; + + full: + + tmp = load32_bigendian(np + 12); + tmp += 8; + store32_bigendian(np + 12, tmp); + + *(int128 *) (out + 0) = xmm8; + *(int128 *) (out + 16) = xmm9; + *(int128 *) (out + 32) = xmm12; + *(int128 *) (out + 48) = xmm14; + *(int128 *) (out + 64) = xmm11; + *(int128 *) (out + 80) = xmm15; + *(int128 *) (out + 96) = xmm10; + *(int128 *) (out + 112) = xmm13; + + end: + return 0; + +} diff --git a/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/portable/api.h b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/portable/api.h new file mode 100644 index 00000000..3c53fb95 --- /dev/null +++ b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/portable/api.h @@ -0,0 +1,13 @@ + +#include "crypto_stream_aes128ctr.h" + +#define crypto_stream crypto_stream_aes128ctr +#define crypto_stream_xor crypto_stream_aes128ctr_xor +#define crypto_stream_beforenm crypto_stream_aes128ctr_beforenm +#define crypto_stream_afternm crypto_stream_aes128ctr_afternm +#define crypto_stream_xor_afternm crypto_stream_aes128ctr_xor_afternm +#define crypto_stream_KEYBYTES crypto_stream_aes128ctr_KEYBYTES +#define crypto_stream_NONCEBYTES crypto_stream_aes128ctr_NONCEBYTES +#define crypto_stream_BEFORENMBYTES crypto_stream_aes128ctr_BEFORENMBYTES +#define crypto_stream_IMPLEMENTATION crypto_stream_aes128ctr_IMPLEMENTATION +#define crypto_stream_VERSION crypto_stream_aes128ctr_VERSION diff --git a/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/portable/beforenm_aes128ctr.c b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/portable/beforenm_aes128ctr.c new file mode 100644 index 00000000..f8623dd2 --- /dev/null +++ b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/portable/beforenm_aes128ctr.c @@ -0,0 +1,59 @@ +/* Author: Peter Schwabe, ported from an assembly implementation by Emilia Käsper + * Date: 2009-03-19 + * Public domain */ + +#include "api.h" +#include "consts.h" +#include "int128.h" +#include "common.h" + +int crypto_stream_beforenm(unsigned char *c, const unsigned char *k) +{ + + /* + int64 x0; + int64 x1; + int64 x2; + int64 x3; + int64 e; + int64 q0; + int64 q1; + int64 q2; + int64 q3; + */ + + int128 xmm0; + int128 xmm1; + int128 xmm2; + int128 xmm3; + int128 xmm4; + int128 xmm5; + int128 xmm6; + int128 xmm7; + int128 xmm8; + int128 xmm9; + int128 xmm10; + int128 xmm11; + int128 xmm12; + int128 xmm13; + int128 xmm14; + int128 xmm15; + int128 t; + + bitslicekey0(k, c) + + keyexpbs1(xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,c) + keyexpbs(xmm0, xmm1, xmm4, xmm6, xmm3, xmm7, xmm2, xmm5, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xor_rcon(&xmm1);, 2,c) + keyexpbs(xmm0, xmm1, xmm3, xmm2, xmm6, xmm5, xmm4, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xor_rcon(&xmm6);, 3,c) + keyexpbs(xmm0, xmm1, xmm6, xmm4, xmm2, xmm7, xmm3, xmm5, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xor_rcon(&xmm3);, 4,c) + + keyexpbs(xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xor_rcon(&xmm3);, 5,c) + keyexpbs(xmm0, xmm1, xmm4, xmm6, xmm3, xmm7, xmm2, xmm5, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xor_rcon(&xmm5);, 6,c) + keyexpbs(xmm0, xmm1, xmm3, xmm2, xmm6, xmm5, xmm4, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xor_rcon(&xmm3);, 7,c) + keyexpbs(xmm0, xmm1, xmm6, xmm4, xmm2, xmm7, xmm3, xmm5, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xor_rcon(&xmm7);, 8,c) + + keyexpbs(xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xor_rcon(&xmm0); xor_rcon(&xmm1); xor_rcon(&xmm6); xor_rcon(&xmm3);, 9,c) + keyexpbs10(xmm0, xmm1, xmm4, xmm6, xmm3, xmm7, xmm2, xmm5, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,c) + + return 0; +} diff --git a/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/portable/common.h b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/portable/common.h new file mode 100644 index 00000000..3923c02d --- /dev/null +++ b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/portable/common.h @@ -0,0 +1,788 @@ +/* Author: Peter Schwabe, ported from an assembly implementation by Emilia Käsper + Date: 2009-03-19 + Public domain */ +#ifndef COMMON_H +#define COMMON_H + +#include "types.h" + +#define load32_bigendian crypto_stream_aes128ctr_portable_load32_bigendian +uint32 load32_bigendian(const unsigned char *x); + +#define store32_bigendian crypto_stream_aes128ctr_portable_store32_bigendian +void store32_bigendian(unsigned char *x,uint32 u); + +#define load32_littleendian crypto_stream_aes128ctr_portable_load32_littleendian +uint32 load32_littleendian(const unsigned char *x); + +#define store32_littleendian crypto_stream_aes128ctr_portable_store32_littleendian +void store32_littleendian(unsigned char *x,uint32 u); + +#define load64_littleendian crypto_stream_aes128ctr_portable_load64_littleendian +uint64 load64_littleendian(const unsigned char *x); + +#define store64_littleendian crypto_stream_aes128ctr_portable_store64_littleendian +void store64_littleendian(unsigned char *x,uint64 u); + +/* Macros required only for key expansion */ + +#define keyexpbs1(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, t7, bskey) \ + rotbyte(&b0);\ + rotbyte(&b1);\ + rotbyte(&b2);\ + rotbyte(&b3);\ + rotbyte(&b4);\ + rotbyte(&b5);\ + rotbyte(&b6);\ + rotbyte(&b7);\ + ;\ + sbox(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, t7);\ + ;\ + xor_rcon(&b0);\ + shufb(&b0, EXPB0);\ + shufb(&b1, EXPB0);\ + shufb(&b4, EXPB0);\ + shufb(&b6, EXPB0);\ + shufb(&b3, EXPB0);\ + shufb(&b7, EXPB0);\ + shufb(&b2, EXPB0);\ + shufb(&b5, EXPB0);\ + shufb(&b0, EXPB0);\ + ;\ + t0 = *(int128 *)(bskey + 0);\ + t1 = *(int128 *)(bskey + 16);\ + t2 = *(int128 *)(bskey + 32);\ + t3 = *(int128 *)(bskey + 48);\ + t4 = *(int128 *)(bskey + 64);\ + t5 = *(int128 *)(bskey + 80);\ + t6 = *(int128 *)(bskey + 96);\ + t7 = *(int128 *)(bskey + 112);\ + ;\ + xor2(&b0, &t0);\ + xor2(&b1, &t1);\ + xor2(&b4, &t2);\ + xor2(&b6, &t3);\ + xor2(&b3, &t4);\ + xor2(&b7, &t5);\ + xor2(&b2, &t6);\ + xor2(&b5, &t7);\ + ;\ + rshift32_littleendian(&t0, 8);\ + rshift32_littleendian(&t1, 8);\ + rshift32_littleendian(&t2, 8);\ + rshift32_littleendian(&t3, 8);\ + rshift32_littleendian(&t4, 8);\ + rshift32_littleendian(&t5, 8);\ + rshift32_littleendian(&t6, 8);\ + rshift32_littleendian(&t7, 8);\ + ;\ + xor2(&b0, &t0);\ + xor2(&b1, &t1);\ + xor2(&b4, &t2);\ + xor2(&b6, &t3);\ + xor2(&b3, &t4);\ + xor2(&b7, &t5);\ + xor2(&b2, &t6);\ + xor2(&b5, &t7);\ + ;\ + rshift32_littleendian(&t0, 8);\ + rshift32_littleendian(&t1, 8);\ + rshift32_littleendian(&t2, 8);\ + rshift32_littleendian(&t3, 8);\ + rshift32_littleendian(&t4, 8);\ + rshift32_littleendian(&t5, 8);\ + rshift32_littleendian(&t6, 8);\ + rshift32_littleendian(&t7, 8);\ + ;\ + xor2(&b0, &t0);\ + xor2(&b1, &t1);\ + xor2(&b4, &t2);\ + xor2(&b6, &t3);\ + xor2(&b3, &t4);\ + xor2(&b7, &t5);\ + xor2(&b2, &t6);\ + xor2(&b5, &t7);\ + ;\ + rshift32_littleendian(&t0, 8);\ + rshift32_littleendian(&t1, 8);\ + rshift32_littleendian(&t2, 8);\ + rshift32_littleendian(&t3, 8);\ + rshift32_littleendian(&t4, 8);\ + rshift32_littleendian(&t5, 8);\ + rshift32_littleendian(&t6, 8);\ + rshift32_littleendian(&t7, 8);\ + ;\ + xor2(&b0, &t0);\ + xor2(&b1, &t1);\ + xor2(&b4, &t2);\ + xor2(&b6, &t3);\ + xor2(&b3, &t4);\ + xor2(&b7, &t5);\ + xor2(&b2, &t6);\ + xor2(&b5, &t7);\ + ;\ + *(int128 *)(bskey + 128) = b0;\ + *(int128 *)(bskey + 144) = b1;\ + *(int128 *)(bskey + 160) = b4;\ + *(int128 *)(bskey + 176) = b6;\ + *(int128 *)(bskey + 192) = b3;\ + *(int128 *)(bskey + 208) = b7;\ + *(int128 *)(bskey + 224) = b2;\ + *(int128 *)(bskey + 240) = b5;\ + +#define keyexpbs10(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, t7, bskey) ;\ + toggle(&b0);\ + toggle(&b1);\ + toggle(&b5);\ + toggle(&b6);\ + rotbyte(&b0);\ + rotbyte(&b1);\ + rotbyte(&b2);\ + rotbyte(&b3);\ + rotbyte(&b4);\ + rotbyte(&b5);\ + rotbyte(&b6);\ + rotbyte(&b7);\ + ;\ + sbox(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, t7);\ + ;\ + xor_rcon(&b1);\ + xor_rcon(&b4);\ + xor_rcon(&b3);\ + xor_rcon(&b7);\ + shufb(&b0, EXPB0);\ + shufb(&b1, EXPB0);\ + shufb(&b4, EXPB0);\ + shufb(&b6, EXPB0);\ + shufb(&b3, EXPB0);\ + shufb(&b7, EXPB0);\ + shufb(&b2, EXPB0);\ + shufb(&b5, EXPB0);\ + ;\ + t0 = *(int128 *)(bskey + 9 * 128 + 0);\ + t1 = *(int128 *)(bskey + 9 * 128 + 16);\ + t2 = *(int128 *)(bskey + 9 * 128 + 32);\ + t3 = *(int128 *)(bskey + 9 * 128 + 48);\ + t4 = *(int128 *)(bskey + 9 * 128 + 64);\ + t5 = *(int128 *)(bskey + 9 * 128 + 80);\ + t6 = *(int128 *)(bskey + 9 * 128 + 96);\ + t7 = *(int128 *)(bskey + 9 * 128 + 112);\ + ;\ + toggle(&t0);\ + toggle(&t1);\ + toggle(&t5);\ + toggle(&t6);\ + ;\ + xor2(&b0, &t0);\ + xor2(&b1, &t1);\ + xor2(&b4, &t2);\ + xor2(&b6, &t3);\ + xor2(&b3, &t4);\ + xor2(&b7, &t5);\ + xor2(&b2, &t6);\ + xor2(&b5, &t7);\ + ;\ + rshift32_littleendian(&t0, 8);\ + rshift32_littleendian(&t1, 8);\ + rshift32_littleendian(&t2, 8);\ + rshift32_littleendian(&t3, 8);\ + rshift32_littleendian(&t4, 8);\ + rshift32_littleendian(&t5, 8);\ + rshift32_littleendian(&t6, 8);\ + rshift32_littleendian(&t7, 8);\ + ;\ + xor2(&b0, &t0);\ + xor2(&b1, &t1);\ + xor2(&b4, &t2);\ + xor2(&b6, &t3);\ + xor2(&b3, &t4);\ + xor2(&b7, &t5);\ + xor2(&b2, &t6);\ + xor2(&b5, &t7);\ + ;\ + rshift32_littleendian(&t0, 8);\ + rshift32_littleendian(&t1, 8);\ + rshift32_littleendian(&t2, 8);\ + rshift32_littleendian(&t3, 8);\ + rshift32_littleendian(&t4, 8);\ + rshift32_littleendian(&t5, 8);\ + rshift32_littleendian(&t6, 8);\ + rshift32_littleendian(&t7, 8);\ + ;\ + xor2(&b0, &t0);\ + xor2(&b1, &t1);\ + xor2(&b4, &t2);\ + xor2(&b6, &t3);\ + xor2(&b3, &t4);\ + xor2(&b7, &t5);\ + xor2(&b2, &t6);\ + xor2(&b5, &t7);\ + ;\ + rshift32_littleendian(&t0, 8);\ + rshift32_littleendian(&t1, 8);\ + rshift32_littleendian(&t2, 8);\ + rshift32_littleendian(&t3, 8);\ + rshift32_littleendian(&t4, 8);\ + rshift32_littleendian(&t5, 8);\ + rshift32_littleendian(&t6, 8);\ + rshift32_littleendian(&t7, 8);\ + ;\ + xor2(&b0, &t0);\ + xor2(&b1, &t1);\ + xor2(&b4, &t2);\ + xor2(&b6, &t3);\ + xor2(&b3, &t4);\ + xor2(&b7, &t5);\ + xor2(&b2, &t6);\ + xor2(&b5, &t7);\ + ;\ + shufb(&b0, M0);\ + shufb(&b1, M0);\ + shufb(&b2, M0);\ + shufb(&b3, M0);\ + shufb(&b4, M0);\ + shufb(&b5, M0);\ + shufb(&b6, M0);\ + shufb(&b7, M0);\ + ;\ + *(int128 *)(bskey + 1280) = b0;\ + *(int128 *)(bskey + 1296) = b1;\ + *(int128 *)(bskey + 1312) = b4;\ + *(int128 *)(bskey + 1328) = b6;\ + *(int128 *)(bskey + 1344) = b3;\ + *(int128 *)(bskey + 1360) = b7;\ + *(int128 *)(bskey + 1376) = b2;\ + *(int128 *)(bskey + 1392) = b5;\ + + +#define keyexpbs(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, t7, rcon, i, bskey) \ + toggle(&b0);\ + toggle(&b1);\ + toggle(&b5);\ + toggle(&b6);\ + rotbyte(&b0);\ + rotbyte(&b1);\ + rotbyte(&b2);\ + rotbyte(&b3);\ + rotbyte(&b4);\ + rotbyte(&b5);\ + rotbyte(&b6);\ + rotbyte(&b7);\ + ;\ + sbox(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, t7);\ + ;\ + rcon;\ + shufb(&b0, EXPB0);\ + shufb(&b1, EXPB0);\ + shufb(&b4, EXPB0);\ + shufb(&b6, EXPB0);\ + shufb(&b3, EXPB0);\ + shufb(&b7, EXPB0);\ + shufb(&b2, EXPB0);\ + shufb(&b5, EXPB0);\ + ;\ + t0 = *(int128 *)(bskey + (i-1) * 128 + 0);\ + t1 = *(int128 *)(bskey + (i-1) * 128 + 16);\ + t2 = *(int128 *)(bskey + (i-1) * 128 + 32);\ + t3 = *(int128 *)(bskey + (i-1) * 128 + 48);\ + t4 = *(int128 *)(bskey + (i-1) * 128 + 64);\ + t5 = *(int128 *)(bskey + (i-1) * 128 + 80);\ + t6 = *(int128 *)(bskey + (i-1) * 128 + 96);\ + t7 = *(int128 *)(bskey + (i-1) * 128 + 112);\ + ;\ + toggle(&t0);\ + toggle(&t1);\ + toggle(&t5);\ + toggle(&t6);\ + ;\ + xor2(&b0, &t0);\ + xor2(&b1, &t1);\ + xor2(&b4, &t2);\ + xor2(&b6, &t3);\ + xor2(&b3, &t4);\ + xor2(&b7, &t5);\ + xor2(&b2, &t6);\ + xor2(&b5, &t7);\ + ;\ + rshift32_littleendian(&t0, 8);\ + rshift32_littleendian(&t1, 8);\ + rshift32_littleendian(&t2, 8);\ + rshift32_littleendian(&t3, 8);\ + rshift32_littleendian(&t4, 8);\ + rshift32_littleendian(&t5, 8);\ + rshift32_littleendian(&t6, 8);\ + rshift32_littleendian(&t7, 8);\ + ;\ + xor2(&b0, &t0);\ + xor2(&b1, &t1);\ + xor2(&b4, &t2);\ + xor2(&b6, &t3);\ + xor2(&b3, &t4);\ + xor2(&b7, &t5);\ + xor2(&b2, &t6);\ + xor2(&b5, &t7);\ + ;\ + rshift32_littleendian(&t0, 8);\ + rshift32_littleendian(&t1, 8);\ + rshift32_littleendian(&t2, 8);\ + rshift32_littleendian(&t3, 8);\ + rshift32_littleendian(&t4, 8);\ + rshift32_littleendian(&t5, 8);\ + rshift32_littleendian(&t6, 8);\ + rshift32_littleendian(&t7, 8);\ + ;\ + xor2(&b0, &t0);\ + xor2(&b1, &t1);\ + xor2(&b4, &t2);\ + xor2(&b6, &t3);\ + xor2(&b3, &t4);\ + xor2(&b7, &t5);\ + xor2(&b2, &t6);\ + xor2(&b5, &t7);\ + ;\ + rshift32_littleendian(&t0, 8);\ + rshift32_littleendian(&t1, 8);\ + rshift32_littleendian(&t2, 8);\ + rshift32_littleendian(&t3, 8);\ + rshift32_littleendian(&t4, 8);\ + rshift32_littleendian(&t5, 8);\ + rshift32_littleendian(&t6, 8);\ + rshift32_littleendian(&t7, 8);\ + ;\ + xor2(&b0, &t0);\ + xor2(&b1, &t1);\ + xor2(&b4, &t2);\ + xor2(&b6, &t3);\ + xor2(&b3, &t4);\ + xor2(&b7, &t5);\ + xor2(&b2, &t6);\ + xor2(&b5, &t7);\ + ;\ + *(int128 *)(bskey + i*128 + 0) = b0;\ + *(int128 *)(bskey + i*128 + 16) = b1;\ + *(int128 *)(bskey + i*128 + 32) = b4;\ + *(int128 *)(bskey + i*128 + 48) = b6;\ + *(int128 *)(bskey + i*128 + 64) = b3;\ + *(int128 *)(bskey + i*128 + 80) = b7;\ + *(int128 *)(bskey + i*128 + 96) = b2;\ + *(int128 *)(bskey + i*128 + 112) = b5;\ + +/* Macros used in multiple contexts */ + +#define bitslicekey0(key, bskey) \ + xmm0 = *(const int128 *) (key + 0);\ + shufb(&xmm0, M0);\ + copy2(&xmm1, &xmm0);\ + copy2(&xmm2, &xmm0);\ + copy2(&xmm3, &xmm0);\ + copy2(&xmm4, &xmm0);\ + copy2(&xmm5, &xmm0);\ + copy2(&xmm6, &xmm0);\ + copy2(&xmm7, &xmm0);\ + ;\ + bitslice(xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0, t);\ + ;\ + *(int128 *) (bskey + 0) = xmm0;\ + *(int128 *) (bskey + 16) = xmm1;\ + *(int128 *) (bskey + 32) = xmm2;\ + *(int128 *) (bskey + 48) = xmm3;\ + *(int128 *) (bskey + 64) = xmm4;\ + *(int128 *) (bskey + 80) = xmm5;\ + *(int128 *) (bskey + 96) = xmm6;\ + *(int128 *) (bskey + 112) = xmm7;\ + + +#define bitslicekey10(key, bskey) \ + xmm0 = *(int128 *) (key + 0);\ + copy2(xmm1, xmm0);\ + copy2(xmm2, xmm0);\ + copy2(xmm3, xmm0);\ + copy2(xmm4, xmm0);\ + copy2(xmm5, xmm0);\ + copy2(xmm6, xmm0);\ + copy2(xmm7, xmm0);\ + ;\ + bitslice(xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0, t);\ + ;\ + toggle(&xmm6);\ + toggle(&xmm5);\ + toggle(&xmm1);\ + toggle(&xmm0);\ + ;\ + *(int128 *) (bskey + 0 + 1280) = xmm0;\ + *(int128 *) (bskey + 16 + 1280) = xmm1;\ + *(int128 *) (bskey + 32 + 1280) = xmm2;\ + *(int128 *) (bskey + 48 + 1280) = xmm3;\ + *(int128 *) (bskey + 64 + 1280) = xmm4;\ + *(int128 *) (bskey + 80 + 1280) = xmm5;\ + *(int128 *) (bskey + 96 + 1280) = xmm6;\ + *(int128 *) (bskey + 112 + 1280) = xmm7;\ + + +#define bitslicekey(i,key,bskey) \ + xmm0 = *(int128 *) (key + 0);\ + shufb(&xmm0, M0);\ + copy2(&xmm1, &xmm0);\ + copy2(&xmm2, &xmm0);\ + copy2(&xmm3, &xmm0);\ + copy2(&xmm4, &xmm0);\ + copy2(&xmm5, &xmm0);\ + copy2(&xmm6, &xmm0);\ + copy2(&xmm7, &xmm0);\ + ;\ + bitslice(xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0, t);\ + ;\ + toggle(&xmm6);\ + toggle(&xmm5);\ + toggle(&xmm1);\ + toggle(&xmm0);\ + ;\ + *(int128 *) (bskey + 0 + 128*i) = xmm0;\ + *(int128 *) (bskey + 16 + 128*i) = xmm1;\ + *(int128 *) (bskey + 32 + 128*i) = xmm2;\ + *(int128 *) (bskey + 48 + 128*i) = xmm3;\ + *(int128 *) (bskey + 64 + 128*i) = xmm4;\ + *(int128 *) (bskey + 80 + 128*i) = xmm5;\ + *(int128 *) (bskey + 96 + 128*i) = xmm6;\ + *(int128 *) (bskey + 112 + 128*i) = xmm7;\ + + +#define bitslice(x0, x1, x2, x3, x4, x5, x6, x7, t) \ + swapmove(x0, x1, 1, BS0, t);\ + swapmove(x2, x3, 1, BS0, t);\ + swapmove(x4, x5, 1, BS0, t);\ + swapmove(x6, x7, 1, BS0, t);\ + ;\ + swapmove(x0, x2, 2, BS1, t);\ + swapmove(x1, x3, 2, BS1, t);\ + swapmove(x4, x6, 2, BS1, t);\ + swapmove(x5, x7, 2, BS1, t);\ + ;\ + swapmove(x0, x4, 4, BS2, t);\ + swapmove(x1, x5, 4, BS2, t);\ + swapmove(x2, x6, 4, BS2, t);\ + swapmove(x3, x7, 4, BS2, t);\ + + +#define swapmove(a, b, n, m, t) \ + copy2(&t, &b);\ + rshift64_littleendian(&t, n);\ + xor2(&t, &a);\ + and2(&t, &m);\ + xor2(&a, &t);\ + lshift64_littleendian(&t, n);\ + xor2(&b, &t); + +#define rotbyte(x) \ + shufb(x, ROTB) /* TODO: Make faster */ + + +/* Macros used for encryption (and decryption) */ + +#define shiftrows(x0, x1, x2, x3, x4, x5, x6, x7, i, M, bskey) \ + xor2(&x0, (const int128 *)(bskey + 128*(i-1) + 0));\ + shufb(&x0, M);\ + xor2(&x1, (const int128 *)(bskey + 128*(i-1) + 16));\ + shufb(&x1, M);\ + xor2(&x2, (const int128 *)(bskey + 128*(i-1) + 32));\ + shufb(&x2, M);\ + xor2(&x3, (const int128 *)(bskey + 128*(i-1) + 48));\ + shufb(&x3, M);\ + xor2(&x4, (const int128 *)(bskey + 128*(i-1) + 64));\ + shufb(&x4, M);\ + xor2(&x5, (const int128 *)(bskey + 128*(i-1) + 80));\ + shufb(&x5, M);\ + xor2(&x6, (const int128 *)(bskey + 128*(i-1) + 96));\ + shufb(&x6, M);\ + xor2(&x7, (const int128 *)(bskey + 128*(i-1) + 112));\ + shufb(&x7, M);\ + + +#define mixcolumns(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, t7) \ + shufd(&t0, &x0, 0x93);\ + shufd(&t1, &x1, 0x93);\ + shufd(&t2, &x2, 0x93);\ + shufd(&t3, &x3, 0x93);\ + shufd(&t4, &x4, 0x93);\ + shufd(&t5, &x5, 0x93);\ + shufd(&t6, &x6, 0x93);\ + shufd(&t7, &x7, 0x93);\ + ;\ + xor2(&x0, &t0);\ + xor2(&x1, &t1);\ + xor2(&x2, &t2);\ + xor2(&x3, &t3);\ + xor2(&x4, &t4);\ + xor2(&x5, &t5);\ + xor2(&x6, &t6);\ + xor2(&x7, &t7);\ + ;\ + xor2(&t0, &x7);\ + xor2(&t1, &x0);\ + xor2(&t2, &x1);\ + xor2(&t1, &x7);\ + xor2(&t3, &x2);\ + xor2(&t4, &x3);\ + xor2(&t5, &x4);\ + xor2(&t3, &x7);\ + xor2(&t6, &x5);\ + xor2(&t7, &x6);\ + xor2(&t4, &x7);\ + ;\ + shufd(&x0, &x0, 0x4e);\ + shufd(&x1, &x1, 0x4e);\ + shufd(&x2, &x2, 0x4e);\ + shufd(&x3, &x3, 0x4e);\ + shufd(&x4, &x4, 0x4e);\ + shufd(&x5, &x5, 0x4e);\ + shufd(&x6, &x6, 0x4e);\ + shufd(&x7, &x7, 0x4e);\ + ;\ + xor2(&t0, &x0);\ + xor2(&t1, &x1);\ + xor2(&t2, &x2);\ + xor2(&t3, &x3);\ + xor2(&t4, &x4);\ + xor2(&t5, &x5);\ + xor2(&t6, &x6);\ + xor2(&t7, &x7);\ + + +#define aesround(i, b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, t7, bskey) \ + shiftrows(b0, b1, b2, b3, b4, b5, b6, b7, i, SR, bskey);\ + sbox(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, t7);\ + mixcolumns(b0, b1, b4, b6, b3, b7, b2, b5, t0, t1, t2, t3, t4, t5, t6, t7);\ + + +#define lastround(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, t7, bskey) \ + shiftrows(b0, b1, b2, b3, b4, b5, b6, b7, 10, SRM0, bskey);\ + sbox(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, t7);\ + xor2(&b0,(const int128 *)(bskey + 128*10));\ + xor2(&b1,(const int128 *)(bskey + 128*10+16));\ + xor2(&b4,(const int128 *)(bskey + 128*10+32));\ + xor2(&b6,(const int128 *)(bskey + 128*10+48));\ + xor2(&b3,(const int128 *)(bskey + 128*10+64));\ + xor2(&b7,(const int128 *)(bskey + 128*10+80));\ + xor2(&b2,(const int128 *)(bskey + 128*10+96));\ + xor2(&b5,(const int128 *)(bskey + 128*10+112));\ + + +#define sbox(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, s0, s1, s2, s3) \ + InBasisChange(b0, b1, b2, b3, b4, b5, b6, b7); \ + Inv_GF256(b6, b5, b0, b3, b7, b1, b4, b2, t0, t1, t2, t3, s0, s1, s2, s3); \ + OutBasisChange(b7, b1, b4, b2, b6, b5, b0, b3); \ + + +#define InBasisChange(b0, b1, b2, b3, b4, b5, b6, b7) \ + xor2(&b5, &b6);\ + xor2(&b2, &b1);\ + xor2(&b5, &b0);\ + xor2(&b6, &b2);\ + xor2(&b3, &b0);\ + ;\ + xor2(&b6, &b3);\ + xor2(&b3, &b7);\ + xor2(&b3, &b4);\ + xor2(&b7, &b5);\ + xor2(&b3, &b1);\ + ;\ + xor2(&b4, &b5);\ + xor2(&b2, &b7);\ + xor2(&b1, &b5);\ + +#define OutBasisChange(b0, b1, b2, b3, b4, b5, b6, b7) \ + xor2(&b0, &b6);\ + xor2(&b1, &b4);\ + xor2(&b2, &b0);\ + xor2(&b4, &b6);\ + xor2(&b6, &b1);\ + ;\ + xor2(&b1, &b5);\ + xor2(&b5, &b3);\ + xor2(&b2, &b5);\ + xor2(&b3, &b7);\ + xor2(&b7, &b5);\ + ;\ + xor2(&b4, &b7);\ + +#define Mul_GF4(x0, x1, y0, y1, t0) \ + copy2(&t0, &y0);\ + xor2(&t0, &y1);\ + and2(&t0, &x0);\ + xor2(&x0, &x1);\ + and2(&x0, &y1);\ + and2(&x1, &y0);\ + xor2(&x0, &x1);\ + xor2(&x1, &t0);\ + +#define Mul_GF4_N(x0, x1, y0, y1, t0) \ + copy2(&t0, &y0);\ + xor2(&t0, &y1);\ + and2(&t0, &x0);\ + xor2(&x0, &x1);\ + and2(&x0, &y1);\ + and2(&x1, &y0);\ + xor2(&x1, &x0);\ + xor2(&x0, &t0);\ + +#define Mul_GF4_2(x0, x1, x2, x3, y0, y1, t0, t1) \ + copy2(&t0, = y0);\ + xor2(&t0, &y1);\ + copy2(&t1, &t0);\ + and2(&t0, &x0);\ + and2(&t1, &x2);\ + xor2(&x0, &x1);\ + xor2(&x2, &x3);\ + and2(&x0, &y1);\ + and2(&x2, &y1);\ + and2(&x1, &y0);\ + and2(&x3, &y0);\ + xor2(&x0, &x1);\ + xor2(&x2, &x3);\ + xor2(&x1, &t0);\ + xor2(&x3, &t1);\ + +#define Mul_GF16(x0, x1, x2, x3, y0, y1, y2, y3, t0, t1, t2, t3) \ + copy2(&t0, &x0);\ + copy2(&t1, &x1);\ + Mul_GF4(x0, x1, y0, y1, t2);\ + xor2(&t0, &x2);\ + xor2(&t1, &x3);\ + xor2(&y0, &y2);\ + xor2(&y1, &y3);\ + Mul_GF4_N(t0, t1, y0, y1, t2);\ + Mul_GF4(x2, x3, y2, y3, t3);\ + ;\ + xor2(&x0, &t0);\ + xor2(&x2, &t0);\ + xor2(&x1, &t1);\ + xor2(&x3, &t1);\ + +#define Mul_GF16_2(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, t0, t1, t2, t3) \ + copy2(&t0, &x0);\ + copy2(&t1, &x1);\ + Mul_GF4(x0, x1, y0, y1, t2);\ + xor2(&t0, &x2);\ + xor2(&t1, &x3);\ + xor2(&y0, &y2);\ + xor2(&y1, &y3);\ + Mul_GF4_N(t0, t1, y0, y1, t3);\ + Mul_GF4(x2, x3, y2, y3, t2);\ + ;\ + xor2(&x0, &t0);\ + xor2(&x2, &t0);\ + xor2(&x1, &t1);\ + xor2(&x3, &t1);\ + ;\ + copy2(&t0, &x4);\ + copy2(&t1, &x5);\ + xor2(&t0, &x6);\ + xor2(&t1, &x7);\ + Mul_GF4_N(t0, t1, y0, y1, t3);\ + Mul_GF4(x6, x7, y2, y3, t2);\ + xor2(&y0, &y2);\ + xor2(&y1, &y3);\ + Mul_GF4(x4, x5, y0, y1, t3);\ + ;\ + xor2(&x4, &t0);\ + xor2(&x6, &t0);\ + xor2(&x5, &t1);\ + xor2(&x7, &t1);\ + +#define Inv_GF16(x0, x1, x2, x3, t0, t1, t2, t3) \ + copy2(&t0, &x1);\ + copy2(&t1, &x0);\ + and2(&t0, &x3);\ + or2(&t1, &x2);\ + copy2(&t2, &x1);\ + copy2(&t3, &x0);\ + or2(&t2, &x2);\ + or2(&t3, &x3);\ + xor2(&t2, &t3);\ + ;\ + xor2(&t0, &t2);\ + xor2(&t1, &t2);\ + ;\ + Mul_GF4_2(x0, x1, x2, x3, t1, t0, t2, t3);\ + + +#define Inv_GF256(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, s0, s1, s2, s3) \ + copy2(&t3, &x4);\ + copy2(&t2, &x5);\ + copy2(&t1, &x1);\ + copy2(&s1, &x7);\ + copy2(&s0, &x0);\ + ;\ + xor2(&t3, &x6);\ + xor2(&t2, &x7);\ + xor2(&t1, &x3);\ + xor2(&s1, &x6);\ + xor2(&s0, &x2);\ + ;\ + copy2(&s2, &t3);\ + copy2(&t0, &t2);\ + copy2(&s3, &t3);\ + ;\ + or2(&t2, &t1);\ + or2(&t3, &s0);\ + xor2(&s3, &t0);\ + and2(&s2, &s0);\ + and2(&t0, &t1);\ + xor2(&s0, &t1);\ + and2(&s3, &s0);\ + copy2(&s0, &x3);\ + xor2(&s0, &x2);\ + and2(&s1, &s0);\ + xor2(&t3, &s1);\ + xor2(&t2, &s1);\ + copy2(&s1, &x4);\ + xor2(&s1, &x5);\ + copy2(&s0, &x1);\ + copy2(&t1, &s1);\ + xor2(&s0, &x0);\ + or2(&t1, &s0);\ + and2(&s1, &s0);\ + xor2(&t0, &s1);\ + xor2(&t3, &s3);\ + xor2(&t2, &s2);\ + xor2(&t1, &s3);\ + xor2(&t0, &s2);\ + xor2(&t1, &s2);\ + copy2(&s0, &x7);\ + copy2(&s1, &x6);\ + copy2(&s2, &x5);\ + copy2(&s3, &x4);\ + and2(&s0, &x3);\ + and2(&s1, &x2);\ + and2(&s2, &x1);\ + or2(&s3, &x0);\ + xor2(&t3, &s0);\ + xor2(&t2, &s1);\ + xor2(&t1, &s2);\ + xor2(&t0, &s3);\ + ;\ + copy2(&s0, &t3);\ + xor2(&s0, &t2);\ + and2(&t3, &t1);\ + copy2(&s2, &t0);\ + xor2(&s2, &t3);\ + copy2(&s3, &s0);\ + and2(&s3, &s2);\ + xor2(&s3, &t2);\ + copy2(&s1, &t1);\ + xor2(&s1, &t0);\ + xor2(&t3, &t2);\ + and2(&s1, &t3);\ + xor2(&s1, &t0);\ + xor2(&t1, &s1);\ + copy2(&t2, &s2);\ + xor2(&t2, &s1);\ + and2(&t2, &t0);\ + xor2(&t1, &t2);\ + xor2(&s2, &t2);\ + and2(&s2, &s3);\ + xor2(&s2, &s0);\ + ;\ + Mul_GF16_2(x0, x1, x2, x3, x4, x5, x6, x7, s3, s2, s1, t1, s0, t0, t2, t3);\ + +#endif diff --git a/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/portable/common_aes128ctr.c b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/portable/common_aes128ctr.c new file mode 100644 index 00000000..14a28cc6 --- /dev/null +++ b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/portable/common_aes128ctr.c @@ -0,0 +1,64 @@ +#include "common.h" + +uint32 load32_bigendian(const unsigned char *x) +{ + return + (uint32) (x[3]) \ + | (((uint32) (x[2])) << 8) \ + | (((uint32) (x[1])) << 16) \ + | (((uint32) (x[0])) << 24) + ; +} + +void store32_bigendian(unsigned char *x,uint32 u) +{ + x[3] = u; u >>= 8; + x[2] = u; u >>= 8; + x[1] = u; u >>= 8; + x[0] = u; +} + +uint32 load32_littleendian(const unsigned char *x) +{ + return + (uint32) (x[0]) \ + | (((uint32) (x[1])) << 8) \ + | (((uint32) (x[2])) << 16) \ + | (((uint32) (x[3])) << 24) + ; +} + +void store32_littleendian(unsigned char *x,uint32 u) +{ + x[0] = u; u >>= 8; + x[1] = u; u >>= 8; + x[2] = u; u >>= 8; + x[3] = u; +} + + +uint64 load64_littleendian(const unsigned char *x) +{ + return + (uint64) (x[0]) \ + | (((uint64) (x[1])) << 8) \ + | (((uint64) (x[2])) << 16) \ + | (((uint64) (x[3])) << 24) + | (((uint64) (x[4])) << 32) + | (((uint64) (x[5])) << 40) + | (((uint64) (x[6])) << 48) + | (((uint64) (x[7])) << 56) + ; +} + +void store64_littleendian(unsigned char *x,uint64 u) +{ + x[0] = u; u >>= 8; + x[1] = u; u >>= 8; + x[2] = u; u >>= 8; + x[3] = u; u >>= 8; + x[4] = u; u >>= 8; + x[5] = u; u >>= 8; + x[6] = u; u >>= 8; + x[7] = u; +} diff --git a/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/portable/consts.h b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/portable/consts.h new file mode 100644 index 00000000..4c50360b --- /dev/null +++ b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/portable/consts.h @@ -0,0 +1,28 @@ +#ifndef CONSTS_H +#define CONSTS_H + +#include "int128.h" + +#define ROTB crypto_stream_aes128ctr_portable_ROTB +#define M0 crypto_stream_aes128ctr_portable_M0 +#define EXPB0 crypto_stream_aes128ctr_portable_EXPB0 +#define SWAP32 crypto_stream_aes128ctr_portable_SWAP32 +#define M0SWAP crypto_stream_aes128ctr_portable_M0SWAP +#define SR crypto_stream_aes128ctr_portable_SR +#define SRM0 crypto_stream_aes128ctr_portable_SRM0 +#define BS0 crypto_stream_aes128ctr_portable_BS0 +#define BS1 crypto_stream_aes128ctr_portable_BS1 +#define BS2 crypto_stream_aes128ctr_portable_BS2 + +extern const unsigned char ROTB[16]; +extern const unsigned char M0[16]; +extern const unsigned char EXPB0[16]; +extern const unsigned char SWAP32[16]; +extern const unsigned char M0SWAP[16]; +extern const unsigned char SR[16]; +extern const unsigned char SRM0[16]; +extern const int128 BS0; +extern const int128 BS1; +extern const int128 BS2; + +#endif diff --git a/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/portable/consts_aes128ctr.c b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/portable/consts_aes128ctr.c new file mode 100644 index 00000000..f8029b84 --- /dev/null +++ b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/portable/consts_aes128ctr.c @@ -0,0 +1,14 @@ +#include "consts.h" + +const unsigned char ROTB[16] = {0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x08}; +const unsigned char M0[16] = {0x0f, 0x0b, 0x07, 0x03, 0x0e, 0x0a, 0x06, 0x02, 0x0d, 0x09, 0x05, 0x01, 0x0c, 0x08, 0x04, 0x00}; +const unsigned char EXPB0[16] = {0x03, 0x03, 0x03, 0x03, 0x07, 0x07, 0x07, 0x07, 0x0b, 0x0b, 0x0b, 0x0b, 0x0f, 0x0f, 0x0f, 0x0f}; + +const unsigned char SWAP32[16] = {0x03, 0x02, 0x01, 0x00, 0x07, 0x06, 0x05, 0x04, 0x0b, 0x0a, 0x09, 0x08, 0x0f, 0x0e, 0x0d, 0x0c}; +const unsigned char M0SWAP[16] = {0x0c, 0x08, 0x04, 0x00, 0x0d, 0x09, 0x05, 0x01, 0x0e, 0x0a, 0x06, 0x02, 0x0f, 0x0b, 0x07, 0x03}; +const unsigned char SR[16] = {0x01, 0x02, 0x03, 0x00, 0x06, 0x07, 0x04, 0x05, 0x0b, 0x08, 0x09, 0x0a, 0x0c, 0x0d, 0x0e, 0x0f}; +const unsigned char SRM0[16] = {0x0f, 0x0a, 0x05, 0x00, 0x0e, 0x09, 0x04, 0x03, 0x0d, 0x08, 0x07, 0x02, 0x0c, 0x0b, 0x06, 0x01}; + +const int128 BS0 = {{0x5555555555555555ULL, 0x5555555555555555ULL}}; +const int128 BS1 = {{0x3333333333333333ULL, 0x3333333333333333ULL}}; +const int128 BS2 = {{0x0f0f0f0f0f0f0f0fULL, 0x0f0f0f0f0f0f0f0fULL}}; diff --git a/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/portable/int128.h b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/portable/int128.h new file mode 100644 index 00000000..3fd2111d --- /dev/null +++ b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/portable/int128.h @@ -0,0 +1,56 @@ +#ifndef INT128_H +#define INT128_H + +#include <stdint.h> + +#include "common.h" + +#ifdef __cplusplus +# if __GNUC__ +# pragma GCC diagnostic ignored "-Wlong-long" +# endif +#endif + +typedef union { + uint64_t u64[2]; + uint32_t u32[4]; + uint8_t u8[16]; +} int128; + +#define xor2 crypto_stream_aes128ctr_portable_xor2 +void xor2(int128 *r, const int128 *x); + +#define and2 crypto_stream_aes128ctr_portable_and2 +void and2(int128 *r, const int128 *x); + +#define or2 crypto_stream_aes128ctr_portable_or2 +void or2(int128 *r, const int128 *x); + +#define copy2 crypto_stream_aes128ctr_portable_copy2 +void copy2(int128 *r, const int128 *x); + +#define shufb crypto_stream_aes128ctr_portable_shufb +void shufb(int128 *r, const unsigned char *l); + +#define shufd crypto_stream_aes128ctr_portable_shufd +void shufd(int128 *r, const int128 *x, const unsigned int c); + +#define rshift32_littleendian crypto_stream_aes128ctr_portable_rshift32_littleendian +void rshift32_littleendian(int128 *r, const unsigned int n); + +#define rshift64_littleendian crypto_stream_aes128ctr_portable_rshift64_littleendian +void rshift64_littleendian(int128 *r, const unsigned int n); + +#define lshift64_littleendian crypto_stream_aes128ctr_portable_lshift64_littleendian +void lshift64_littleendian(int128 *r, const unsigned int n); + +#define toggle crypto_stream_aes128ctr_portable_toggle +void toggle(int128 *r); + +#define xor_rcon crypto_stream_aes128ctr_portable_xor_rcon +void xor_rcon(int128 *r); + +#define add_uint32_big crypto_stream_aes128ctr_portable_add_uint32_big +void add_uint32_big(int128 *r, uint32 x); + +#endif diff --git a/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/portable/int128_aes128ctr.c b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/portable/int128_aes128ctr.c new file mode 100644 index 00000000..703de39b --- /dev/null +++ b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/portable/int128_aes128ctr.c @@ -0,0 +1,131 @@ + +#include "int128.h" +#include "common.h" + +void xor2(int128 *r, const int128 *x) +{ + r->u64[0] ^= x->u64[0]; + r->u64[1] ^= x->u64[1]; +} + +void and2(int128 *r, const int128 *x) +{ + r->u64[0] &= x->u64[0]; + r->u64[1] &= x->u64[1]; +} + +void or2(int128 *r, const int128 *x) +{ + r->u64[0] |= x->u64[0]; + r->u64[1] |= x->u64[1]; +} + +void copy2(int128 *r, const int128 *x) +{ + r->u64[0] = x->u64[0]; + r->u64[1] = x->u64[1]; +} + +void shufb(int128 *r, const unsigned char *l) +{ + int128 t; + uint8_t *ct; + uint8_t *cr; + + copy2(&t, r); + cr = r->u8; + ct = t.u8; + cr[0] = ct[l[0]]; + cr[1] = ct[l[1]]; + cr[2] = ct[l[2]]; + cr[3] = ct[l[3]]; + cr[4] = ct[l[4]]; + cr[5] = ct[l[5]]; + cr[6] = ct[l[6]]; + cr[7] = ct[l[7]]; + cr[8] = ct[l[8]]; + cr[9] = ct[l[9]]; + cr[10] = ct[l[10]]; + cr[11] = ct[l[11]]; + cr[12] = ct[l[12]]; + cr[13] = ct[l[13]]; + cr[14] = ct[l[14]]; + cr[15] = ct[l[15]]; +} + +void shufd(int128 *r, const int128 *x, const unsigned int c) +{ + int128 t; + + t.u32[0] = x->u32[c >> 0 & 3]; + t.u32[1] = x->u32[c >> 2 & 3]; + t.u32[2] = x->u32[c >> 4 & 3]; + t.u32[3] = x->u32[c >> 6 & 3]; + copy2(r, &t); +} + +void rshift32_littleendian(int128 *r, const unsigned int n) +{ + unsigned char *rp = (unsigned char *)r; + uint32 t; + t = load32_littleendian(rp); + t >>= n; + store32_littleendian(rp, t); + t = load32_littleendian(rp+4); + t >>= n; + store32_littleendian(rp+4, t); + t = load32_littleendian(rp+8); + t >>= n; + store32_littleendian(rp+8, t); + t = load32_littleendian(rp+12); + t >>= n; + store32_littleendian(rp+12, t); +} + +void rshift64_littleendian(int128 *r, const unsigned int n) +{ + unsigned char *rp = (unsigned char *)r; + uint64 t; + t = load64_littleendian(rp); + t >>= n; + store64_littleendian(rp, t); + t = load64_littleendian(rp+8); + t >>= n; + store64_littleendian(rp+8, t); +} + +void lshift64_littleendian(int128 *r, const unsigned int n) +{ + unsigned char *rp = (unsigned char *)r; + uint64 t; + t = load64_littleendian(rp); + t <<= n; + store64_littleendian(rp, t); + t = load64_littleendian(rp+8); + t <<= n; + store64_littleendian(rp+8, t); +} + +void toggle(int128 *r) +{ + r->u64[0] ^= 0xffffffffffffffffULL; + r->u64[1] ^= 0xffffffffffffffffULL; +} + +void xor_rcon(int128 *r) +{ + unsigned char *rp = (unsigned char *)r; + uint32 t; + t = load32_littleendian(rp+12); + t ^= 0xffffffff; + store32_littleendian(rp+12, t); +} + +void add_uint32_big(int128 *r, uint32 x) +{ + unsigned char *rp = (unsigned char *)r; + uint32 t; + t = load32_littleendian(rp+12); + t += x; + store32_littleendian(rp+12, t); +} diff --git a/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/portable/stream_aes128ctr.c b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/portable/stream_aes128ctr.c new file mode 100644 index 00000000..8f4ec72a --- /dev/null +++ b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/portable/stream_aes128ctr.c @@ -0,0 +1,28 @@ +#include "api.h" + +int crypto_stream( + unsigned char *out, + unsigned long long outlen, + const unsigned char *n, + const unsigned char *k + ) +{ + unsigned char d[crypto_stream_BEFORENMBYTES]; + crypto_stream_beforenm(d, k); + crypto_stream_afternm(out, outlen, n, d); + return 0; +} + +int crypto_stream_xor( + unsigned char *out, + const unsigned char *in, + unsigned long long inlen, + const unsigned char *n, + const unsigned char *k + ) +{ + unsigned char d[crypto_stream_BEFORENMBYTES]; + crypto_stream_beforenm(d, k); + crypto_stream_xor_afternm(out, in, inlen, n, d); + return 0; +} diff --git a/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/portable/types.h b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/portable/types.h new file mode 100644 index 00000000..6aa502fc --- /dev/null +++ b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/portable/types.h @@ -0,0 +1,10 @@ +#ifndef TYPES_H +#define TYPES_H + +#include "crypto_uint32.h" +typedef crypto_uint32 uint32; + +#include "crypto_uint64.h" +typedef crypto_uint64 uint64; + +#endif diff --git a/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/portable/xor_afternm_aes128ctr.c b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/portable/xor_afternm_aes128ctr.c new file mode 100644 index 00000000..139dbe51 --- /dev/null +++ b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/portable/xor_afternm_aes128ctr.c @@ -0,0 +1,181 @@ +/* Author: Peter Schwabe, ported from an assembly implementation by Emilia Käsper + * Date: 2009-03-19 + * Public domain */ + +#include <stdio.h> +#include "api.h" +#include "int128.h" +#include "common.h" +#include "consts.h" + +int crypto_stream_xor_afternm(unsigned char *out, const unsigned char *in, unsigned long long len, const unsigned char *nonce, const unsigned char *c) +{ + + int128 xmm0; + int128 xmm1; + int128 xmm2; + int128 xmm3; + int128 xmm4; + int128 xmm5; + int128 xmm6; + int128 xmm7; + + int128 xmm8; + int128 xmm9; + int128 xmm10; + int128 xmm11; + int128 xmm12; + int128 xmm13; + int128 xmm14; + int128 xmm15; + + int128 nonce_stack; + unsigned long long lensav; + unsigned char bl[128]; + unsigned char *blp; + unsigned char *np; + unsigned char b; + + uint32 tmp; + + /* Copy nonce on the stack */ + copy2(&nonce_stack, (const int128 *) (nonce + 0)); + np = (unsigned char *)&nonce_stack; + + enc_block: + + xmm0 = *(int128 *) (np + 0); + copy2(&xmm1, &xmm0); + shufb(&xmm1, SWAP32); + copy2(&xmm2, &xmm1); + copy2(&xmm3, &xmm1); + copy2(&xmm4, &xmm1); + copy2(&xmm5, &xmm1); + copy2(&xmm6, &xmm1); + copy2(&xmm7, &xmm1); + + add_uint32_big(&xmm1, 1); + add_uint32_big(&xmm2, 2); + add_uint32_big(&xmm3, 3); + add_uint32_big(&xmm4, 4); + add_uint32_big(&xmm5, 5); + add_uint32_big(&xmm6, 6); + add_uint32_big(&xmm7, 7); + + shufb(&xmm0, M0); + shufb(&xmm1, M0SWAP); + shufb(&xmm2, M0SWAP); + shufb(&xmm3, M0SWAP); + shufb(&xmm4, M0SWAP); + shufb(&xmm5, M0SWAP); + shufb(&xmm6, M0SWAP); + shufb(&xmm7, M0SWAP); + + bitslice(xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0, xmm8) + + aesround( 1, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,c) + aesround( 2, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,c) + aesround( 3, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,c) + aesround( 4, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,c) + aesround( 5, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,c) + aesround( 6, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,c) + aesround( 7, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,c) + aesround( 8, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,c) + aesround( 9, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,c) + lastround(xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,c) + + bitslice(xmm13, xmm10, xmm15, xmm11, xmm14, xmm12, xmm9, xmm8, xmm0) + + if(len < 128) goto partial; + if(len == 128) goto full; + + tmp = load32_bigendian(np + 12); + tmp += 8; + store32_bigendian(np + 12, tmp); + + xor2(&xmm8, (const int128 *)(in + 0)); + xor2(&xmm9, (const int128 *)(in + 16)); + xor2(&xmm12, (const int128 *)(in + 32)); + xor2(&xmm14, (const int128 *)(in + 48)); + xor2(&xmm11, (const int128 *)(in + 64)); + xor2(&xmm15, (const int128 *)(in + 80)); + xor2(&xmm10, (const int128 *)(in + 96)); + xor2(&xmm13, (const int128 *)(in + 112)); + + *(int128 *) (out + 0) = xmm8; + *(int128 *) (out + 16) = xmm9; + *(int128 *) (out + 32) = xmm12; + *(int128 *) (out + 48) = xmm14; + *(int128 *) (out + 64) = xmm11; + *(int128 *) (out + 80) = xmm15; + *(int128 *) (out + 96) = xmm10; + *(int128 *) (out + 112) = xmm13; + + len -= 128; + in += 128; + out += 128; + + goto enc_block; + + partial: + + lensav = len; + len >>= 4; + + tmp = load32_bigendian(np + 12); + tmp += len; + store32_bigendian(np + 12, tmp); + + blp = bl; + *(int128 *)(blp + 0) = xmm8; + *(int128 *)(blp + 16) = xmm9; + *(int128 *)(blp + 32) = xmm12; + *(int128 *)(blp + 48) = xmm14; + *(int128 *)(blp + 64) = xmm11; + *(int128 *)(blp + 80) = xmm15; + *(int128 *)(blp + 96) = xmm10; + *(int128 *)(blp + 112) = xmm13; + + bytes: + + if(lensav == 0) goto end; + + b = blp[0]; /* clang false positive */ + b ^= *(const unsigned char *)(in + 0); + *(unsigned char *)(out + 0) = b; + + blp += 1; + in +=1; + out +=1; + lensav -= 1; + + goto bytes; + + full: + + tmp = load32_bigendian(np + 12); + tmp += 8; + store32_bigendian(np + 12, tmp); + + xor2(&xmm8, (const int128 *)(in + 0)); + xor2(&xmm9, (const int128 *)(in + 16)); + xor2(&xmm12, (const int128 *)(in + 32)); + xor2(&xmm14, (const int128 *)(in + 48)); + xor2(&xmm11, (const int128 *)(in + 64)); + xor2(&xmm15, (const int128 *)(in + 80)); + xor2(&xmm10, (const int128 *)(in + 96)); + xor2(&xmm13, (const int128 *)(in + 112)); + + *(int128 *) (out + 0) = xmm8; + *(int128 *) (out + 16) = xmm9; + *(int128 *) (out + 32) = xmm12; + *(int128 *) (out + 48) = xmm14; + *(int128 *) (out + 64) = xmm11; + *(int128 *) (out + 80) = xmm15; + *(int128 *) (out + 96) = xmm10; + *(int128 *) (out + 112) = xmm13; + + end: + return 0; + +} diff --git a/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/stream_aes128ctr_api.c b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/stream_aes128ctr_api.c new file mode 100644 index 00000000..184ad3fb --- /dev/null +++ b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/aes128ctr/stream_aes128ctr_api.c @@ -0,0 +1,16 @@ +#include "crypto_stream_aes128ctr.h" + +size_t +crypto_stream_aes128ctr_keybytes(void) { + return crypto_stream_aes128ctr_KEYBYTES; +} + +size_t +crypto_stream_aes128ctr_noncebytes(void) { + return crypto_stream_aes128ctr_NONCEBYTES; +} + +size_t +crypto_stream_aes128ctr_beforenmbytes(void) { + return crypto_stream_aes128ctr_BEFORENMBYTES; +} diff --git a/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/chacha20/ref/api.h b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/chacha20/ref/api.h new file mode 100644 index 00000000..3d858670 --- /dev/null +++ b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/chacha20/ref/api.h @@ -0,0 +1,12 @@ + +#include "crypto_stream_chacha20.h" + +int +crypto_stream_chacha20_ref(unsigned char *c, unsigned long long clen, + const unsigned char *n, const unsigned char *k); + +int +crypto_stream_chacha20_ref_xor_ic(unsigned char *c, const unsigned char *m, + unsigned long long mlen, + const unsigned char *n, uint64_t ic, + const unsigned char *k); diff --git a/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/chacha20/ref/stream_chacha20_ref.c b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/chacha20/ref/stream_chacha20_ref.c new file mode 100644 index 00000000..967015f2 --- /dev/null +++ b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/chacha20/ref/stream_chacha20_ref.c @@ -0,0 +1,276 @@ + +/* $OpenBSD: chacha.c,v 1.1 2013/11/21 00:45:44 djm Exp $ */ + +/* + chacha-merged.c version 20080118 + D. J. Bernstein + Public domain. + */ + +#include <stdint.h> +#include <string.h> + +#include "api.h" +#include "crypto_stream_chacha20.h" +#include "utils.h" + +struct chacha_ctx { + uint32_t input[16]; +}; + +typedef uint8_t u8; +typedef uint32_t u32; + +typedef struct chacha_ctx chacha_ctx; + +#define U8C(v) (v##U) +#define U32C(v) (v##U) + +#define U8V(v) ((u8)(v) & U8C(0xFF)) +#define U32V(v) ((u32)(v) & U32C(0xFFFFFFFF)) + +#define ROTL32(v, n) \ + (U32V((v) << (n)) | ((v) >> (32 - (n)))) + +#define U8TO32_LITTLE(p) \ + (((u32)((p)[0]) ) | \ + ((u32)((p)[1]) << 8) | \ + ((u32)((p)[2]) << 16) | \ + ((u32)((p)[3]) << 24)) + +#define U32TO8_LITTLE(p, v) \ + do { \ + (p)[0] = U8V((v) ); \ + (p)[1] = U8V((v) >> 8); \ + (p)[2] = U8V((v) >> 16); \ + (p)[3] = U8V((v) >> 24); \ + } while (0) + +#define ROTATE(v,c) (ROTL32(v,c)) +#define XOR(v,w) ((v) ^ (w)) +#define PLUS(v,w) (U32V((v) + (w))) +#define PLUSONE(v) (PLUS((v),1)) + +#define QUARTERROUND(a,b,c,d) \ + a = PLUS(a,b); d = ROTATE(XOR(d,a),16); \ + c = PLUS(c,d); b = ROTATE(XOR(b,c),12); \ + a = PLUS(a,b); d = ROTATE(XOR(d,a), 8); \ + c = PLUS(c,d); b = ROTATE(XOR(b,c), 7); + +static const unsigned char sigma[16] = { + 'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k' +}; + +static void +chacha_keysetup(chacha_ctx *x, const u8 *k) +{ + const unsigned char *constants; + + x->input[4] = U8TO32_LITTLE(k + 0); + x->input[5] = U8TO32_LITTLE(k + 4); + x->input[6] = U8TO32_LITTLE(k + 8); + x->input[7] = U8TO32_LITTLE(k + 12); + k += 16; + constants = sigma; + x->input[8] = U8TO32_LITTLE(k + 0); + x->input[9] = U8TO32_LITTLE(k + 4); + x->input[10] = U8TO32_LITTLE(k + 8); + x->input[11] = U8TO32_LITTLE(k + 12); + x->input[0] = U8TO32_LITTLE(constants + 0); + x->input[1] = U8TO32_LITTLE(constants + 4); + x->input[2] = U8TO32_LITTLE(constants + 8); + x->input[3] = U8TO32_LITTLE(constants + 12); +} + +static void +chacha_ivsetup(chacha_ctx *x, const u8 *iv, const u8 *counter) +{ + x->input[12] = counter == NULL ? 0 : U8TO32_LITTLE(counter + 0); + x->input[13] = counter == NULL ? 0 : U8TO32_LITTLE(counter + 4); + x->input[14] = U8TO32_LITTLE(iv + 0); + x->input[15] = U8TO32_LITTLE(iv + 4); +} + +static void +chacha_encrypt_bytes(chacha_ctx *x, const u8 *m, u8 *c, unsigned long long bytes) +{ + u32 x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15; + u32 j0, j1, j2, j3, j4, j5, j6, j7, j8, j9, j10, j11, j12, j13, j14, j15; + u8 *ctarget = NULL; + u8 tmp[64]; + unsigned int i; + + if (!bytes) { + return; /* LCOV_EXCL_LINE */ + } + j0 = x->input[0]; + j1 = x->input[1]; + j2 = x->input[2]; + j3 = x->input[3]; + j4 = x->input[4]; + j5 = x->input[5]; + j6 = x->input[6]; + j7 = x->input[7]; + j8 = x->input[8]; + j9 = x->input[9]; + j10 = x->input[10]; + j11 = x->input[11]; + j12 = x->input[12]; + j13 = x->input[13]; + j14 = x->input[14]; + j15 = x->input[15]; + + for (;;) { + if (bytes < 64) { + for (i = 0; i < bytes; ++i) { + tmp[i] = m[i]; + } + m = tmp; + ctarget = c; + c = tmp; + } + x0 = j0; + x1 = j1; + x2 = j2; + x3 = j3; + x4 = j4; + x5 = j5; + x6 = j6; + x7 = j7; + x8 = j8; + x9 = j9; + x10 = j10; + x11 = j11; + x12 = j12; + x13 = j13; + x14 = j14; + x15 = j15; + for (i = 20; i > 0; i -= 2) { + QUARTERROUND(x0, x4, x8, x12) + QUARTERROUND(x1, x5, x9, x13) + QUARTERROUND(x2, x6, x10, x14) + QUARTERROUND(x3, x7, x11, x15) + QUARTERROUND(x0, x5, x10, x15) + QUARTERROUND(x1, x6, x11, x12) + QUARTERROUND(x2, x7, x8, x13) + QUARTERROUND(x3, x4, x9, x14) + } + x0 = PLUS(x0, j0); + x1 = PLUS(x1, j1); + x2 = PLUS(x2, j2); + x3 = PLUS(x3, j3); + x4 = PLUS(x4, j4); + x5 = PLUS(x5, j5); + x6 = PLUS(x6, j6); + x7 = PLUS(x7, j7); + x8 = PLUS(x8, j8); + x9 = PLUS(x9, j9); + x10 = PLUS(x10, j10); + x11 = PLUS(x11, j11); + x12 = PLUS(x12, j12); + x13 = PLUS(x13, j13); + x14 = PLUS(x14, j14); + x15 = PLUS(x15, j15); + + x0 = XOR(x0, U8TO32_LITTLE(m + 0)); + x1 = XOR(x1, U8TO32_LITTLE(m + 4)); + x2 = XOR(x2, U8TO32_LITTLE(m + 8)); + x3 = XOR(x3, U8TO32_LITTLE(m + 12)); + x4 = XOR(x4, U8TO32_LITTLE(m + 16)); + x5 = XOR(x5, U8TO32_LITTLE(m + 20)); + x6 = XOR(x6, U8TO32_LITTLE(m + 24)); + x7 = XOR(x7, U8TO32_LITTLE(m + 28)); + x8 = XOR(x8, U8TO32_LITTLE(m + 32)); + x9 = XOR(x9, U8TO32_LITTLE(m + 36)); + x10 = XOR(x10, U8TO32_LITTLE(m + 40)); + x11 = XOR(x11, U8TO32_LITTLE(m + 44)); + x12 = XOR(x12, U8TO32_LITTLE(m + 48)); + x13 = XOR(x13, U8TO32_LITTLE(m + 52)); + x14 = XOR(x14, U8TO32_LITTLE(m + 56)); + x15 = XOR(x15, U8TO32_LITTLE(m + 60)); + + j12 = PLUSONE(j12); + /* LCOV_EXCL_START */ + if (!j12) { + j13 = PLUSONE(j13); + } + /* LCOV_EXCL_STOP */ + + U32TO8_LITTLE(c + 0, x0); + U32TO8_LITTLE(c + 4, x1); + U32TO8_LITTLE(c + 8, x2); + U32TO8_LITTLE(c + 12, x3); + U32TO8_LITTLE(c + 16, x4); + U32TO8_LITTLE(c + 20, x5); + U32TO8_LITTLE(c + 24, x6); + U32TO8_LITTLE(c + 28, x7); + U32TO8_LITTLE(c + 32, x8); + U32TO8_LITTLE(c + 36, x9); + U32TO8_LITTLE(c + 40, x10); + U32TO8_LITTLE(c + 44, x11); + U32TO8_LITTLE(c + 48, x12); + U32TO8_LITTLE(c + 52, x13); + U32TO8_LITTLE(c + 56, x14); + U32TO8_LITTLE(c + 60, x15); + + if (bytes <= 64) { + if (bytes < 64) { + for (i = 0; i < (unsigned int) bytes; ++i) { + ctarget[i] = c[i]; + } + } + x->input[12] = j12; + x->input[13] = j13; + return; + } + bytes -= 64; + c += 64; + m += 64; + } +} + +int +crypto_stream_chacha20_ref(unsigned char *c, unsigned long long clen, + const unsigned char *n, const unsigned char *k) +{ + struct chacha_ctx ctx; + + if (!clen) { + return 0; + } + (void) sizeof(int[crypto_stream_chacha20_KEYBYTES == 256 / 8 ? 1 : -1]); + chacha_keysetup(&ctx, k); + chacha_ivsetup(&ctx, n, NULL); + memset(c, 0, clen); + chacha_encrypt_bytes(&ctx, c, c, clen); + sodium_memzero(&ctx, sizeof ctx); + + return 0; +} + +int +crypto_stream_chacha20_ref_xor_ic(unsigned char *c, const unsigned char *m, + unsigned long long mlen, + const unsigned char *n, uint64_t ic, + const unsigned char *k) +{ + struct chacha_ctx ctx; + uint8_t ic_bytes[8]; + uint32_t ic_high; + uint32_t ic_low; + + if (!mlen) { + return 0; + } + ic_high = U32V(ic >> 32); + ic_low = U32V(ic); + U32TO8_LITTLE(&ic_bytes[0], ic_low); + U32TO8_LITTLE(&ic_bytes[4], ic_high); + chacha_keysetup(&ctx, k); + chacha_ivsetup(&ctx, n, ic_bytes); + chacha_encrypt_bytes(&ctx, m, c, mlen); + sodium_memzero(&ctx, sizeof ctx); + sodium_memzero(ic_bytes, sizeof ic_bytes); + + return 0; +} diff --git a/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/chacha20/stream_chacha20_api.c b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/chacha20/stream_chacha20_api.c new file mode 100644 index 00000000..412cdfab --- /dev/null +++ b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/chacha20/stream_chacha20_api.c @@ -0,0 +1,36 @@ +#include "crypto_stream_chacha20.h" +#include "ref/api.h" + +size_t +crypto_stream_chacha20_keybytes(void) { + return crypto_stream_chacha20_KEYBYTES; +} + +size_t +crypto_stream_chacha20_noncebytes(void) { + return crypto_stream_chacha20_NONCEBYTES; +} + +int +crypto_stream_chacha20(unsigned char *c, unsigned long long clen, + const unsigned char *n, const unsigned char *k) +{ + return crypto_stream_chacha20_ref(c, clen, n, k); +} + +int +crypto_stream_chacha20_xor_ic(unsigned char *c, const unsigned char *m, + unsigned long long mlen, + const unsigned char *n, uint64_t ic, + const unsigned char *k) +{ + return crypto_stream_chacha20_ref_xor_ic(c, m, mlen, n, ic, k); +} + +int +crypto_stream_chacha20_xor(unsigned char *c, const unsigned char *m, + unsigned long long mlen, const unsigned char *n, + const unsigned char *k) +{ + return crypto_stream_chacha20_ref_xor_ic(c, m, mlen, n, 0U, k); +} diff --git a/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/crypto_stream.c b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/crypto_stream.c new file mode 100644 index 00000000..50a9c1c0 --- /dev/null +++ b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/crypto_stream.c @@ -0,0 +1,36 @@ + +#include "crypto_stream.h" + +size_t +crypto_stream_keybytes(void) +{ + return crypto_stream_KEYBYTES; +} + +size_t +crypto_stream_noncebytes(void) +{ + return crypto_stream_NONCEBYTES; +} + +const char * +crypto_stream_primitive(void) +{ + return crypto_stream_PRIMITIVE; +} + +int +crypto_stream(unsigned char *c, unsigned long long clen, + const unsigned char *n, const unsigned char *k) +{ + return crypto_stream_xsalsa20(c, clen, n, k); +} + + +int +crypto_stream_xor(unsigned char *c, const unsigned char *m, + unsigned long long mlen, const unsigned char *n, + const unsigned char *k) +{ + return crypto_stream_xsalsa20_xor(c, m, mlen, n, k); +} diff --git a/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa20/amd64_xmm6/api.h b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa20/amd64_xmm6/api.h new file mode 100644 index 00000000..037fb59d --- /dev/null +++ b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa20/amd64_xmm6/api.h @@ -0,0 +1 @@ +#include "crypto_stream_salsa20.h" diff --git a/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa20/amd64_xmm6/stream_salsa20_amd64_xmm6.S b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa20/amd64_xmm6/stream_salsa20_amd64_xmm6.S new file mode 100644 index 00000000..f2415685 --- /dev/null +++ b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa20/amd64_xmm6/stream_salsa20_amd64_xmm6.S @@ -0,0 +1,944 @@ +#if defined(__amd64) || defined(__amd64__) || defined(__x86_64__) + +.text +.p2align 5 + +.globl crypto_stream_salsa20 +.globl _crypto_stream_salsa20 +#ifdef __ELF__ +.type crypto_stream_salsa20, @function +.type _crypto_stream_salsa20, @function +#endif +crypto_stream_salsa20: +_crypto_stream_salsa20: +mov %rsp,%r11 +and $31,%r11 +add $512,%r11 +sub %r11,%rsp +movq %r11,416(%rsp) +movq %r12,424(%rsp) +movq %r13,432(%rsp) +movq %r14,440(%rsp) +movq %r15,448(%rsp) +movq %rbx,456(%rsp) +movq %rbp,464(%rsp) +mov %rsi,%r9 +mov %rdi,%rdi +mov %rdi,%rsi +mov %rdx,%rdx +mov %rcx,%r10 +cmp $0,%r9 +jbe ._done +mov $0,%rax +mov %r9,%rcx +rep stosb +sub %r9,%rdi +movq $0,472(%rsp) +jmp ._start + +.text +.p2align 5 + +.globl crypto_stream_salsa20_xor_ic +.globl _crypto_stream_salsa20_xor_ic +#ifdef __ELF__ +.type crypto_stream_salsa20_xor_ic, @function +.type _crypto_stream_salsa20_xor_ic, @function +#endif +crypto_stream_salsa20_xor_ic: +_crypto_stream_salsa20_xor_ic: + +mov %rsp,%r11 +and $31,%r11 +add $512,%r11 +sub %r11,%rsp +movq %r11,416(%rsp) +movq %r12,424(%rsp) +movq %r13,432(%rsp) +movq %r14,440(%rsp) +movq %r15,448(%rsp) +movq %rbx,456(%rsp) +movq %rbp,464(%rsp) +mov %rdi,%rdi +mov %rsi,%rsi +mov %r9,%r10 +movq %r8,472(%rsp) +mov %rdx,%r9 +mov %rcx,%rdx +cmp $0,%r9 +jbe ._done + +._start: +movl 20(%r10),%ecx +movl 0(%r10),%r8d +movl 0(%rdx),%eax +movl 16(%r10),%r11d +movl %ecx,64(%rsp) +movl %r8d,4+64(%rsp) +movl %eax,8+64(%rsp) +movl %r11d,12+64(%rsp) +movl 24(%r10),%r8d +movl 4(%r10),%eax +movl 4(%rdx),%edx +movq 472(%rsp),%rcx +movl %ecx,80(%rsp) +movl %r8d,4+80(%rsp) +movl %eax,8+80(%rsp) +movl %edx,12+80(%rsp) +movl 12(%r10),%edx +shr $32,%rcx +movl 28(%r10),%r8d +movl 8(%r10),%eax +movl %edx,96(%rsp) +movl %ecx,4+96(%rsp) +movl %r8d,8+96(%rsp) +movl %eax,12+96(%rsp) +mov $1634760805,%rdx +mov $857760878,%rcx +mov $2036477234,%r8 +mov $1797285236,%rax +movl %edx,112(%rsp) +movl %ecx,4+112(%rsp) +movl %r8d,8+112(%rsp) +movl %eax,12+112(%rsp) +cmp $256,%r9 +jb ._bytesbetween1and255 +movdqa 112(%rsp),%xmm0 +pshufd $0x55,%xmm0,%xmm1 +pshufd $0xaa,%xmm0,%xmm2 +pshufd $0xff,%xmm0,%xmm3 +pshufd $0x00,%xmm0,%xmm0 +movdqa %xmm1,128(%rsp) +movdqa %xmm2,144(%rsp) +movdqa %xmm3,160(%rsp) +movdqa %xmm0,176(%rsp) +movdqa 64(%rsp),%xmm0 +pshufd $0xaa,%xmm0,%xmm1 +pshufd $0xff,%xmm0,%xmm2 +pshufd $0x00,%xmm0,%xmm3 +pshufd $0x55,%xmm0,%xmm0 +movdqa %xmm1,192(%rsp) +movdqa %xmm2,208(%rsp) +movdqa %xmm3,224(%rsp) +movdqa %xmm0,240(%rsp) +movdqa 80(%rsp),%xmm0 +pshufd $0xff,%xmm0,%xmm1 +pshufd $0x55,%xmm0,%xmm2 +pshufd $0xaa,%xmm0,%xmm0 +movdqa %xmm1,256(%rsp) +movdqa %xmm2,272(%rsp) +movdqa %xmm0,288(%rsp) +movdqa 96(%rsp),%xmm0 +pshufd $0x00,%xmm0,%xmm1 +pshufd $0xaa,%xmm0,%xmm2 +pshufd $0xff,%xmm0,%xmm0 +movdqa %xmm1,304(%rsp) +movdqa %xmm2,320(%rsp) +movdqa %xmm0,336(%rsp) + +._bytesatleast256: +movq 472(%rsp),%rdx +mov %rdx,%rcx +shr $32,%rcx +movl %edx,352(%rsp) +movl %ecx,368(%rsp) +add $1,%rdx +mov %rdx,%rcx +shr $32,%rcx +movl %edx,4+352(%rsp) +movl %ecx,4+368(%rsp) +add $1,%rdx +mov %rdx,%rcx +shr $32,%rcx +movl %edx,8+352(%rsp) +movl %ecx,8+368(%rsp) +add $1,%rdx +mov %rdx,%rcx +shr $32,%rcx +movl %edx,12+352(%rsp) +movl %ecx,12+368(%rsp) +add $1,%rdx +mov %rdx,%rcx +shr $32,%rcx +movl %edx,80(%rsp) +movl %ecx,4+96(%rsp) +movq %rdx,472(%rsp) +movq %r9,480(%rsp) +mov $20,%rdx +movdqa 128(%rsp),%xmm0 +movdqa 144(%rsp),%xmm1 +movdqa 160(%rsp),%xmm2 +movdqa 320(%rsp),%xmm3 +movdqa 336(%rsp),%xmm4 +movdqa 192(%rsp),%xmm5 +movdqa 208(%rsp),%xmm6 +movdqa 240(%rsp),%xmm7 +movdqa 256(%rsp),%xmm8 +movdqa 272(%rsp),%xmm9 +movdqa 288(%rsp),%xmm10 +movdqa 368(%rsp),%xmm11 +movdqa 176(%rsp),%xmm12 +movdqa 224(%rsp),%xmm13 +movdqa 304(%rsp),%xmm14 +movdqa 352(%rsp),%xmm15 + +._mainloop1: +movdqa %xmm1,384(%rsp) +movdqa %xmm2,400(%rsp) +movdqa %xmm13,%xmm1 +paddd %xmm12,%xmm1 +movdqa %xmm1,%xmm2 +pslld $7,%xmm1 +pxor %xmm1,%xmm14 +psrld $25,%xmm2 +pxor %xmm2,%xmm14 +movdqa %xmm7,%xmm1 +paddd %xmm0,%xmm1 +movdqa %xmm1,%xmm2 +pslld $7,%xmm1 +pxor %xmm1,%xmm11 +psrld $25,%xmm2 +pxor %xmm2,%xmm11 +movdqa %xmm12,%xmm1 +paddd %xmm14,%xmm1 +movdqa %xmm1,%xmm2 +pslld $9,%xmm1 +pxor %xmm1,%xmm15 +psrld $23,%xmm2 +pxor %xmm2,%xmm15 +movdqa %xmm0,%xmm1 +paddd %xmm11,%xmm1 +movdqa %xmm1,%xmm2 +pslld $9,%xmm1 +pxor %xmm1,%xmm9 +psrld $23,%xmm2 +pxor %xmm2,%xmm9 +movdqa %xmm14,%xmm1 +paddd %xmm15,%xmm1 +movdqa %xmm1,%xmm2 +pslld $13,%xmm1 +pxor %xmm1,%xmm13 +psrld $19,%xmm2 +pxor %xmm2,%xmm13 +movdqa %xmm11,%xmm1 +paddd %xmm9,%xmm1 +movdqa %xmm1,%xmm2 +pslld $13,%xmm1 +pxor %xmm1,%xmm7 +psrld $19,%xmm2 +pxor %xmm2,%xmm7 +movdqa %xmm15,%xmm1 +paddd %xmm13,%xmm1 +movdqa %xmm1,%xmm2 +pslld $18,%xmm1 +pxor %xmm1,%xmm12 +psrld $14,%xmm2 +pxor %xmm2,%xmm12 +movdqa 384(%rsp),%xmm1 +movdqa %xmm12,384(%rsp) +movdqa %xmm9,%xmm2 +paddd %xmm7,%xmm2 +movdqa %xmm2,%xmm12 +pslld $18,%xmm2 +pxor %xmm2,%xmm0 +psrld $14,%xmm12 +pxor %xmm12,%xmm0 +movdqa %xmm5,%xmm2 +paddd %xmm1,%xmm2 +movdqa %xmm2,%xmm12 +pslld $7,%xmm2 +pxor %xmm2,%xmm3 +psrld $25,%xmm12 +pxor %xmm12,%xmm3 +movdqa 400(%rsp),%xmm2 +movdqa %xmm0,400(%rsp) +movdqa %xmm6,%xmm0 +paddd %xmm2,%xmm0 +movdqa %xmm0,%xmm12 +pslld $7,%xmm0 +pxor %xmm0,%xmm4 +psrld $25,%xmm12 +pxor %xmm12,%xmm4 +movdqa %xmm1,%xmm0 +paddd %xmm3,%xmm0 +movdqa %xmm0,%xmm12 +pslld $9,%xmm0 +pxor %xmm0,%xmm10 +psrld $23,%xmm12 +pxor %xmm12,%xmm10 +movdqa %xmm2,%xmm0 +paddd %xmm4,%xmm0 +movdqa %xmm0,%xmm12 +pslld $9,%xmm0 +pxor %xmm0,%xmm8 +psrld $23,%xmm12 +pxor %xmm12,%xmm8 +movdqa %xmm3,%xmm0 +paddd %xmm10,%xmm0 +movdqa %xmm0,%xmm12 +pslld $13,%xmm0 +pxor %xmm0,%xmm5 +psrld $19,%xmm12 +pxor %xmm12,%xmm5 +movdqa %xmm4,%xmm0 +paddd %xmm8,%xmm0 +movdqa %xmm0,%xmm12 +pslld $13,%xmm0 +pxor %xmm0,%xmm6 +psrld $19,%xmm12 +pxor %xmm12,%xmm6 +movdqa %xmm10,%xmm0 +paddd %xmm5,%xmm0 +movdqa %xmm0,%xmm12 +pslld $18,%xmm0 +pxor %xmm0,%xmm1 +psrld $14,%xmm12 +pxor %xmm12,%xmm1 +movdqa 384(%rsp),%xmm0 +movdqa %xmm1,384(%rsp) +movdqa %xmm4,%xmm1 +paddd %xmm0,%xmm1 +movdqa %xmm1,%xmm12 +pslld $7,%xmm1 +pxor %xmm1,%xmm7 +psrld $25,%xmm12 +pxor %xmm12,%xmm7 +movdqa %xmm8,%xmm1 +paddd %xmm6,%xmm1 +movdqa %xmm1,%xmm12 +pslld $18,%xmm1 +pxor %xmm1,%xmm2 +psrld $14,%xmm12 +pxor %xmm12,%xmm2 +movdqa 400(%rsp),%xmm12 +movdqa %xmm2,400(%rsp) +movdqa %xmm14,%xmm1 +paddd %xmm12,%xmm1 +movdqa %xmm1,%xmm2 +pslld $7,%xmm1 +pxor %xmm1,%xmm5 +psrld $25,%xmm2 +pxor %xmm2,%xmm5 +movdqa %xmm0,%xmm1 +paddd %xmm7,%xmm1 +movdqa %xmm1,%xmm2 +pslld $9,%xmm1 +pxor %xmm1,%xmm10 +psrld $23,%xmm2 +pxor %xmm2,%xmm10 +movdqa %xmm12,%xmm1 +paddd %xmm5,%xmm1 +movdqa %xmm1,%xmm2 +pslld $9,%xmm1 +pxor %xmm1,%xmm8 +psrld $23,%xmm2 +pxor %xmm2,%xmm8 +movdqa %xmm7,%xmm1 +paddd %xmm10,%xmm1 +movdqa %xmm1,%xmm2 +pslld $13,%xmm1 +pxor %xmm1,%xmm4 +psrld $19,%xmm2 +pxor %xmm2,%xmm4 +movdqa %xmm5,%xmm1 +paddd %xmm8,%xmm1 +movdqa %xmm1,%xmm2 +pslld $13,%xmm1 +pxor %xmm1,%xmm14 +psrld $19,%xmm2 +pxor %xmm2,%xmm14 +movdqa %xmm10,%xmm1 +paddd %xmm4,%xmm1 +movdqa %xmm1,%xmm2 +pslld $18,%xmm1 +pxor %xmm1,%xmm0 +psrld $14,%xmm2 +pxor %xmm2,%xmm0 +movdqa 384(%rsp),%xmm1 +movdqa %xmm0,384(%rsp) +movdqa %xmm8,%xmm0 +paddd %xmm14,%xmm0 +movdqa %xmm0,%xmm2 +pslld $18,%xmm0 +pxor %xmm0,%xmm12 +psrld $14,%xmm2 +pxor %xmm2,%xmm12 +movdqa %xmm11,%xmm0 +paddd %xmm1,%xmm0 +movdqa %xmm0,%xmm2 +pslld $7,%xmm0 +pxor %xmm0,%xmm6 +psrld $25,%xmm2 +pxor %xmm2,%xmm6 +movdqa 400(%rsp),%xmm2 +movdqa %xmm12,400(%rsp) +movdqa %xmm3,%xmm0 +paddd %xmm2,%xmm0 +movdqa %xmm0,%xmm12 +pslld $7,%xmm0 +pxor %xmm0,%xmm13 +psrld $25,%xmm12 +pxor %xmm12,%xmm13 +movdqa %xmm1,%xmm0 +paddd %xmm6,%xmm0 +movdqa %xmm0,%xmm12 +pslld $9,%xmm0 +pxor %xmm0,%xmm15 +psrld $23,%xmm12 +pxor %xmm12,%xmm15 +movdqa %xmm2,%xmm0 +paddd %xmm13,%xmm0 +movdqa %xmm0,%xmm12 +pslld $9,%xmm0 +pxor %xmm0,%xmm9 +psrld $23,%xmm12 +pxor %xmm12,%xmm9 +movdqa %xmm6,%xmm0 +paddd %xmm15,%xmm0 +movdqa %xmm0,%xmm12 +pslld $13,%xmm0 +pxor %xmm0,%xmm11 +psrld $19,%xmm12 +pxor %xmm12,%xmm11 +movdqa %xmm13,%xmm0 +paddd %xmm9,%xmm0 +movdqa %xmm0,%xmm12 +pslld $13,%xmm0 +pxor %xmm0,%xmm3 +psrld $19,%xmm12 +pxor %xmm12,%xmm3 +movdqa %xmm15,%xmm0 +paddd %xmm11,%xmm0 +movdqa %xmm0,%xmm12 +pslld $18,%xmm0 +pxor %xmm0,%xmm1 +psrld $14,%xmm12 +pxor %xmm12,%xmm1 +movdqa %xmm9,%xmm0 +paddd %xmm3,%xmm0 +movdqa %xmm0,%xmm12 +pslld $18,%xmm0 +pxor %xmm0,%xmm2 +psrld $14,%xmm12 +pxor %xmm12,%xmm2 +movdqa 384(%rsp),%xmm12 +movdqa 400(%rsp),%xmm0 +sub $2,%rdx +ja ._mainloop1 +paddd 176(%rsp),%xmm12 +paddd 240(%rsp),%xmm7 +paddd 288(%rsp),%xmm10 +paddd 336(%rsp),%xmm4 +movd %xmm12,%rdx +movd %xmm7,%rcx +movd %xmm10,%r8 +movd %xmm4,%r9 +pshufd $0x39,%xmm12,%xmm12 +pshufd $0x39,%xmm7,%xmm7 +pshufd $0x39,%xmm10,%xmm10 +pshufd $0x39,%xmm4,%xmm4 +xorl 0(%rsi),%edx +xorl 4(%rsi),%ecx +xorl 8(%rsi),%r8d +xorl 12(%rsi),%r9d +movl %edx,0(%rdi) +movl %ecx,4(%rdi) +movl %r8d,8(%rdi) +movl %r9d,12(%rdi) +movd %xmm12,%rdx +movd %xmm7,%rcx +movd %xmm10,%r8 +movd %xmm4,%r9 +pshufd $0x39,%xmm12,%xmm12 +pshufd $0x39,%xmm7,%xmm7 +pshufd $0x39,%xmm10,%xmm10 +pshufd $0x39,%xmm4,%xmm4 +xorl 64(%rsi),%edx +xorl 68(%rsi),%ecx +xorl 72(%rsi),%r8d +xorl 76(%rsi),%r9d +movl %edx,64(%rdi) +movl %ecx,68(%rdi) +movl %r8d,72(%rdi) +movl %r9d,76(%rdi) +movd %xmm12,%rdx +movd %xmm7,%rcx +movd %xmm10,%r8 +movd %xmm4,%r9 +pshufd $0x39,%xmm12,%xmm12 +pshufd $0x39,%xmm7,%xmm7 +pshufd $0x39,%xmm10,%xmm10 +pshufd $0x39,%xmm4,%xmm4 +xorl 128(%rsi),%edx +xorl 132(%rsi),%ecx +xorl 136(%rsi),%r8d +xorl 140(%rsi),%r9d +movl %edx,128(%rdi) +movl %ecx,132(%rdi) +movl %r8d,136(%rdi) +movl %r9d,140(%rdi) +movd %xmm12,%rdx +movd %xmm7,%rcx +movd %xmm10,%r8 +movd %xmm4,%r9 +xorl 192(%rsi),%edx +xorl 196(%rsi),%ecx +xorl 200(%rsi),%r8d +xorl 204(%rsi),%r9d +movl %edx,192(%rdi) +movl %ecx,196(%rdi) +movl %r8d,200(%rdi) +movl %r9d,204(%rdi) +paddd 304(%rsp),%xmm14 +paddd 128(%rsp),%xmm0 +paddd 192(%rsp),%xmm5 +paddd 256(%rsp),%xmm8 +movd %xmm14,%rdx +movd %xmm0,%rcx +movd %xmm5,%r8 +movd %xmm8,%r9 +pshufd $0x39,%xmm14,%xmm14 +pshufd $0x39,%xmm0,%xmm0 +pshufd $0x39,%xmm5,%xmm5 +pshufd $0x39,%xmm8,%xmm8 +xorl 16(%rsi),%edx +xorl 20(%rsi),%ecx +xorl 24(%rsi),%r8d +xorl 28(%rsi),%r9d +movl %edx,16(%rdi) +movl %ecx,20(%rdi) +movl %r8d,24(%rdi) +movl %r9d,28(%rdi) +movd %xmm14,%rdx +movd %xmm0,%rcx +movd %xmm5,%r8 +movd %xmm8,%r9 +pshufd $0x39,%xmm14,%xmm14 +pshufd $0x39,%xmm0,%xmm0 +pshufd $0x39,%xmm5,%xmm5 +pshufd $0x39,%xmm8,%xmm8 +xorl 80(%rsi),%edx +xorl 84(%rsi),%ecx +xorl 88(%rsi),%r8d +xorl 92(%rsi),%r9d +movl %edx,80(%rdi) +movl %ecx,84(%rdi) +movl %r8d,88(%rdi) +movl %r9d,92(%rdi) +movd %xmm14,%rdx +movd %xmm0,%rcx +movd %xmm5,%r8 +movd %xmm8,%r9 +pshufd $0x39,%xmm14,%xmm14 +pshufd $0x39,%xmm0,%xmm0 +pshufd $0x39,%xmm5,%xmm5 +pshufd $0x39,%xmm8,%xmm8 +xorl 144(%rsi),%edx +xorl 148(%rsi),%ecx +xorl 152(%rsi),%r8d +xorl 156(%rsi),%r9d +movl %edx,144(%rdi) +movl %ecx,148(%rdi) +movl %r8d,152(%rdi) +movl %r9d,156(%rdi) +movd %xmm14,%rdx +movd %xmm0,%rcx +movd %xmm5,%r8 +movd %xmm8,%r9 +xorl 208(%rsi),%edx +xorl 212(%rsi),%ecx +xorl 216(%rsi),%r8d +xorl 220(%rsi),%r9d +movl %edx,208(%rdi) +movl %ecx,212(%rdi) +movl %r8d,216(%rdi) +movl %r9d,220(%rdi) +paddd 352(%rsp),%xmm15 +paddd 368(%rsp),%xmm11 +paddd 144(%rsp),%xmm1 +paddd 208(%rsp),%xmm6 +movd %xmm15,%rdx +movd %xmm11,%rcx +movd %xmm1,%r8 +movd %xmm6,%r9 +pshufd $0x39,%xmm15,%xmm15 +pshufd $0x39,%xmm11,%xmm11 +pshufd $0x39,%xmm1,%xmm1 +pshufd $0x39,%xmm6,%xmm6 +xorl 32(%rsi),%edx +xorl 36(%rsi),%ecx +xorl 40(%rsi),%r8d +xorl 44(%rsi),%r9d +movl %edx,32(%rdi) +movl %ecx,36(%rdi) +movl %r8d,40(%rdi) +movl %r9d,44(%rdi) +movd %xmm15,%rdx +movd %xmm11,%rcx +movd %xmm1,%r8 +movd %xmm6,%r9 +pshufd $0x39,%xmm15,%xmm15 +pshufd $0x39,%xmm11,%xmm11 +pshufd $0x39,%xmm1,%xmm1 +pshufd $0x39,%xmm6,%xmm6 +xorl 96(%rsi),%edx +xorl 100(%rsi),%ecx +xorl 104(%rsi),%r8d +xorl 108(%rsi),%r9d +movl %edx,96(%rdi) +movl %ecx,100(%rdi) +movl %r8d,104(%rdi) +movl %r9d,108(%rdi) +movd %xmm15,%rdx +movd %xmm11,%rcx +movd %xmm1,%r8 +movd %xmm6,%r9 +pshufd $0x39,%xmm15,%xmm15 +pshufd $0x39,%xmm11,%xmm11 +pshufd $0x39,%xmm1,%xmm1 +pshufd $0x39,%xmm6,%xmm6 +xorl 160(%rsi),%edx +xorl 164(%rsi),%ecx +xorl 168(%rsi),%r8d +xorl 172(%rsi),%r9d +movl %edx,160(%rdi) +movl %ecx,164(%rdi) +movl %r8d,168(%rdi) +movl %r9d,172(%rdi) +movd %xmm15,%rdx +movd %xmm11,%rcx +movd %xmm1,%r8 +movd %xmm6,%r9 +xorl 224(%rsi),%edx +xorl 228(%rsi),%ecx +xorl 232(%rsi),%r8d +xorl 236(%rsi),%r9d +movl %edx,224(%rdi) +movl %ecx,228(%rdi) +movl %r8d,232(%rdi) +movl %r9d,236(%rdi) +paddd 224(%rsp),%xmm13 +paddd 272(%rsp),%xmm9 +paddd 320(%rsp),%xmm3 +paddd 160(%rsp),%xmm2 +movd %xmm13,%rdx +movd %xmm9,%rcx +movd %xmm3,%r8 +movd %xmm2,%r9 +pshufd $0x39,%xmm13,%xmm13 +pshufd $0x39,%xmm9,%xmm9 +pshufd $0x39,%xmm3,%xmm3 +pshufd $0x39,%xmm2,%xmm2 +xorl 48(%rsi),%edx +xorl 52(%rsi),%ecx +xorl 56(%rsi),%r8d +xorl 60(%rsi),%r9d +movl %edx,48(%rdi) +movl %ecx,52(%rdi) +movl %r8d,56(%rdi) +movl %r9d,60(%rdi) +movd %xmm13,%rdx +movd %xmm9,%rcx +movd %xmm3,%r8 +movd %xmm2,%r9 +pshufd $0x39,%xmm13,%xmm13 +pshufd $0x39,%xmm9,%xmm9 +pshufd $0x39,%xmm3,%xmm3 +pshufd $0x39,%xmm2,%xmm2 +xorl 112(%rsi),%edx +xorl 116(%rsi),%ecx +xorl 120(%rsi),%r8d +xorl 124(%rsi),%r9d +movl %edx,112(%rdi) +movl %ecx,116(%rdi) +movl %r8d,120(%rdi) +movl %r9d,124(%rdi) +movd %xmm13,%rdx +movd %xmm9,%rcx +movd %xmm3,%r8 +movd %xmm2,%r9 +pshufd $0x39,%xmm13,%xmm13 +pshufd $0x39,%xmm9,%xmm9 +pshufd $0x39,%xmm3,%xmm3 +pshufd $0x39,%xmm2,%xmm2 +xorl 176(%rsi),%edx +xorl 180(%rsi),%ecx +xorl 184(%rsi),%r8d +xorl 188(%rsi),%r9d +movl %edx,176(%rdi) +movl %ecx,180(%rdi) +movl %r8d,184(%rdi) +movl %r9d,188(%rdi) +movd %xmm13,%rdx +movd %xmm9,%rcx +movd %xmm3,%r8 +movd %xmm2,%r9 +xorl 240(%rsi),%edx +xorl 244(%rsi),%ecx +xorl 248(%rsi),%r8d +xorl 252(%rsi),%r9d +movl %edx,240(%rdi) +movl %ecx,244(%rdi) +movl %r8d,248(%rdi) +movl %r9d,252(%rdi) +movq 480(%rsp),%r9 +sub $256,%r9 +add $256,%rsi +add $256,%rdi +cmp $256,%r9 +jae ._bytesatleast256 +cmp $0,%r9 +jbe ._done + +._bytesbetween1and255: +cmp $64,%r9 +jae ._nocopy +mov %rdi,%rdx +leaq 0(%rsp),%rdi +mov %r9,%rcx +rep movsb +leaq 0(%rsp),%rdi +leaq 0(%rsp),%rsi + +._nocopy: +movq %r9,480(%rsp) +movdqa 112(%rsp),%xmm0 +movdqa 64(%rsp),%xmm1 +movdqa 80(%rsp),%xmm2 +movdqa 96(%rsp),%xmm3 +movdqa %xmm1,%xmm4 +mov $20,%rcx + +._mainloop2: +paddd %xmm0,%xmm4 +movdqa %xmm0,%xmm5 +movdqa %xmm4,%xmm6 +pslld $7,%xmm4 +psrld $25,%xmm6 +pxor %xmm4,%xmm3 +pxor %xmm6,%xmm3 +paddd %xmm3,%xmm5 +movdqa %xmm3,%xmm4 +movdqa %xmm5,%xmm6 +pslld $9,%xmm5 +psrld $23,%xmm6 +pxor %xmm5,%xmm2 +pshufd $0x93,%xmm3,%xmm3 +pxor %xmm6,%xmm2 +paddd %xmm2,%xmm4 +movdqa %xmm2,%xmm5 +movdqa %xmm4,%xmm6 +pslld $13,%xmm4 +psrld $19,%xmm6 +pxor %xmm4,%xmm1 +pshufd $0x4e,%xmm2,%xmm2 +pxor %xmm6,%xmm1 +paddd %xmm1,%xmm5 +movdqa %xmm3,%xmm4 +movdqa %xmm5,%xmm6 +pslld $18,%xmm5 +psrld $14,%xmm6 +pxor %xmm5,%xmm0 +pshufd $0x39,%xmm1,%xmm1 +pxor %xmm6,%xmm0 +paddd %xmm0,%xmm4 +movdqa %xmm0,%xmm5 +movdqa %xmm4,%xmm6 +pslld $7,%xmm4 +psrld $25,%xmm6 +pxor %xmm4,%xmm1 +pxor %xmm6,%xmm1 +paddd %xmm1,%xmm5 +movdqa %xmm1,%xmm4 +movdqa %xmm5,%xmm6 +pslld $9,%xmm5 +psrld $23,%xmm6 +pxor %xmm5,%xmm2 +pshufd $0x93,%xmm1,%xmm1 +pxor %xmm6,%xmm2 +paddd %xmm2,%xmm4 +movdqa %xmm2,%xmm5 +movdqa %xmm4,%xmm6 +pslld $13,%xmm4 +psrld $19,%xmm6 +pxor %xmm4,%xmm3 +pshufd $0x4e,%xmm2,%xmm2 +pxor %xmm6,%xmm3 +paddd %xmm3,%xmm5 +movdqa %xmm1,%xmm4 +movdqa %xmm5,%xmm6 +pslld $18,%xmm5 +psrld $14,%xmm6 +pxor %xmm5,%xmm0 +pshufd $0x39,%xmm3,%xmm3 +pxor %xmm6,%xmm0 +paddd %xmm0,%xmm4 +movdqa %xmm0,%xmm5 +movdqa %xmm4,%xmm6 +pslld $7,%xmm4 +psrld $25,%xmm6 +pxor %xmm4,%xmm3 +pxor %xmm6,%xmm3 +paddd %xmm3,%xmm5 +movdqa %xmm3,%xmm4 +movdqa %xmm5,%xmm6 +pslld $9,%xmm5 +psrld $23,%xmm6 +pxor %xmm5,%xmm2 +pshufd $0x93,%xmm3,%xmm3 +pxor %xmm6,%xmm2 +paddd %xmm2,%xmm4 +movdqa %xmm2,%xmm5 +movdqa %xmm4,%xmm6 +pslld $13,%xmm4 +psrld $19,%xmm6 +pxor %xmm4,%xmm1 +pshufd $0x4e,%xmm2,%xmm2 +pxor %xmm6,%xmm1 +paddd %xmm1,%xmm5 +movdqa %xmm3,%xmm4 +movdqa %xmm5,%xmm6 +pslld $18,%xmm5 +psrld $14,%xmm6 +pxor %xmm5,%xmm0 +pshufd $0x39,%xmm1,%xmm1 +pxor %xmm6,%xmm0 +paddd %xmm0,%xmm4 +movdqa %xmm0,%xmm5 +movdqa %xmm4,%xmm6 +pslld $7,%xmm4 +psrld $25,%xmm6 +pxor %xmm4,%xmm1 +pxor %xmm6,%xmm1 +paddd %xmm1,%xmm5 +movdqa %xmm1,%xmm4 +movdqa %xmm5,%xmm6 +pslld $9,%xmm5 +psrld $23,%xmm6 +pxor %xmm5,%xmm2 +pshufd $0x93,%xmm1,%xmm1 +pxor %xmm6,%xmm2 +paddd %xmm2,%xmm4 +movdqa %xmm2,%xmm5 +movdqa %xmm4,%xmm6 +pslld $13,%xmm4 +psrld $19,%xmm6 +pxor %xmm4,%xmm3 +pshufd $0x4e,%xmm2,%xmm2 +pxor %xmm6,%xmm3 +sub $4,%rcx +paddd %xmm3,%xmm5 +movdqa %xmm1,%xmm4 +movdqa %xmm5,%xmm6 +pslld $18,%xmm5 +pxor %xmm7,%xmm7 +psrld $14,%xmm6 +pxor %xmm5,%xmm0 +pshufd $0x39,%xmm3,%xmm3 +pxor %xmm6,%xmm0 +ja ._mainloop2 +paddd 112(%rsp),%xmm0 +paddd 64(%rsp),%xmm1 +paddd 80(%rsp),%xmm2 +paddd 96(%rsp),%xmm3 +movd %xmm0,%rcx +movd %xmm1,%r8 +movd %xmm2,%r9 +movd %xmm3,%rax +pshufd $0x39,%xmm0,%xmm0 +pshufd $0x39,%xmm1,%xmm1 +pshufd $0x39,%xmm2,%xmm2 +pshufd $0x39,%xmm3,%xmm3 +xorl 0(%rsi),%ecx +xorl 48(%rsi),%r8d +xorl 32(%rsi),%r9d +xorl 16(%rsi),%eax +movl %ecx,0(%rdi) +movl %r8d,48(%rdi) +movl %r9d,32(%rdi) +movl %eax,16(%rdi) +movd %xmm0,%rcx +movd %xmm1,%r8 +movd %xmm2,%r9 +movd %xmm3,%rax +pshufd $0x39,%xmm0,%xmm0 +pshufd $0x39,%xmm1,%xmm1 +pshufd $0x39,%xmm2,%xmm2 +pshufd $0x39,%xmm3,%xmm3 +xorl 20(%rsi),%ecx +xorl 4(%rsi),%r8d +xorl 52(%rsi),%r9d +xorl 36(%rsi),%eax +movl %ecx,20(%rdi) +movl %r8d,4(%rdi) +movl %r9d,52(%rdi) +movl %eax,36(%rdi) +movd %xmm0,%rcx +movd %xmm1,%r8 +movd %xmm2,%r9 +movd %xmm3,%rax +pshufd $0x39,%xmm0,%xmm0 +pshufd $0x39,%xmm1,%xmm1 +pshufd $0x39,%xmm2,%xmm2 +pshufd $0x39,%xmm3,%xmm3 +xorl 40(%rsi),%ecx +xorl 24(%rsi),%r8d +xorl 8(%rsi),%r9d +xorl 56(%rsi),%eax +movl %ecx,40(%rdi) +movl %r8d,24(%rdi) +movl %r9d,8(%rdi) +movl %eax,56(%rdi) +movd %xmm0,%rcx +movd %xmm1,%r8 +movd %xmm2,%r9 +movd %xmm3,%rax +xorl 60(%rsi),%ecx +xorl 44(%rsi),%r8d +xorl 28(%rsi),%r9d +xorl 12(%rsi),%eax +movl %ecx,60(%rdi) +movl %r8d,44(%rdi) +movl %r9d,28(%rdi) +movl %eax,12(%rdi) +movq 480(%rsp),%r9 +movq 472(%rsp),%rcx +add $1,%rcx +mov %rcx,%r8 +shr $32,%r8 +movl %ecx,80(%rsp) +movl %r8d,4+96(%rsp) +movq %rcx,472(%rsp) +cmp $64,%r9 +ja ._bytesatleast65 +jae ._bytesatleast64 +mov %rdi,%rsi +mov %rdx,%rdi +mov %r9,%rcx +rep movsb + +._bytesatleast64: +._done: +movq 416(%rsp),%r11 +movq 424(%rsp),%r12 +movq 432(%rsp),%r13 +movq 440(%rsp),%r14 +movq 448(%rsp),%r15 +movq 456(%rsp),%rbx +movq 464(%rsp),%rbp +add %r11,%rsp +xor %rax,%rax +mov %rsi,%rdx +ret + +._bytesatleast65: +sub $64,%r9 +add $64,%rdi +add $64,%rsi +jmp ._bytesbetween1and255 + +#endif + +#if defined(__linux__) && defined(__ELF__) +.section .note.GNU-stack,"",%progbits +#endif diff --git a/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa20/ref/api.h b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa20/ref/api.h new file mode 100644 index 00000000..3616ea71 --- /dev/null +++ b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa20/ref/api.h @@ -0,0 +1,5 @@ + +#include "crypto_stream_salsa20.h" + +#define crypto_stream crypto_stream_salsa20 +#define crypto_stream_xor crypto_stream_salsa20_xor diff --git a/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa20/ref/stream_salsa20_ref.c b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa20/ref/stream_salsa20_ref.c new file mode 100644 index 00000000..b90827c0 --- /dev/null +++ b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa20/ref/stream_salsa20_ref.c @@ -0,0 +1,61 @@ +/* +version 20140420 +D. J. Bernstein +Public domain. +*/ + +#include "api.h" +#include "crypto_core_salsa20.h" +#include "utils.h" + +#ifndef HAVE_AMD64_ASM + +typedef unsigned int uint32; + +static const unsigned char sigma[16] = { + 'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k' +}; + +int crypto_stream( + unsigned char *c,unsigned long long clen, + const unsigned char *n, + const unsigned char *k +) +{ + unsigned char in[16]; + unsigned char block[64]; + unsigned char kcopy[32]; + unsigned int i; + unsigned int u; + + if (!clen) return 0; + + for (i = 0;i < 32;++i) kcopy[i] = k[i]; + for (i = 0;i < 8;++i) in[i] = n[i]; + for (i = 8;i < 16;++i) in[i] = 0; + + while (clen >= 64) { + crypto_core_salsa20(c,in,kcopy,sigma); + + u = 1; + for (i = 8;i < 16;++i) { + u += (unsigned int) in[i]; + in[i] = u; + u >>= 8; + } + + clen -= 64; + c += 64; + } + + if (clen) { + crypto_core_salsa20(block,in,kcopy,sigma); + for (i = 0;i < (unsigned int) clen;++i) c[i] = block[i]; + } + sodium_memzero(block, sizeof block); + sodium_memzero(kcopy, sizeof kcopy); + + return 0; +} + +#endif diff --git a/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa20/ref/xor_salsa20_ref.c b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa20/ref/xor_salsa20_ref.c new file mode 100644 index 00000000..27cf1470 --- /dev/null +++ b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa20/ref/xor_salsa20_ref.c @@ -0,0 +1,69 @@ +/* +version 20140420 +D. J. Bernstein +Public domain. +*/ + +#include <stdint.h> + +#include "api.h" +#include "crypto_core_salsa20.h" +#include "utils.h" + +#ifndef HAVE_AMD64_ASM + +typedef unsigned int uint32; + +static const unsigned char sigma[16] = { + 'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k' +}; + +int crypto_stream_salsa20_xor_ic( + unsigned char *c, + const unsigned char *m,unsigned long long mlen, + const unsigned char *n, uint64_t ic, + const unsigned char *k +) +{ + unsigned char in[16]; + unsigned char block[64]; + unsigned char kcopy[32]; + unsigned int i; + unsigned int u; + + if (!mlen) return 0; + + for (i = 0;i < 32;++i) kcopy[i] = k[i]; + for (i = 0;i < 8;++i) in[i] = n[i]; + for (i = 8;i < 16;++i) { + in[i] = (unsigned char) (ic & 0xff); + ic >>= 8; + } + + while (mlen >= 64) { + crypto_core_salsa20(block,in,kcopy,sigma); + for (i = 0;i < 64;++i) c[i] = m[i] ^ block[i]; + + u = 1; + for (i = 8;i < 16;++i) { + u += (unsigned int) in[i]; + in[i] = u; + u >>= 8; + } + + mlen -= 64; + c += 64; + m += 64; + } + + if (mlen) { + crypto_core_salsa20(block,in,kcopy,sigma); + for (i = 0;i < (unsigned int) mlen;++i) c[i] = m[i] ^ block[i]; + } + sodium_memzero(block, sizeof block); + sodium_memzero(kcopy, sizeof kcopy); + + return 0; +} + +#endif diff --git a/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa20/stream_salsa20_api.c b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa20/stream_salsa20_api.c new file mode 100644 index 00000000..3bc05801 --- /dev/null +++ b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa20/stream_salsa20_api.c @@ -0,0 +1,19 @@ +#include "crypto_stream_salsa20.h" + +size_t +crypto_stream_salsa20_keybytes(void) { + return crypto_stream_salsa20_KEYBYTES; +} + +size_t +crypto_stream_salsa20_noncebytes(void) { + return crypto_stream_salsa20_NONCEBYTES; +} + +int +crypto_stream_salsa20_xor(unsigned char *c, const unsigned char *m, + unsigned long long mlen, const unsigned char *n, + const unsigned char *k) +{ + return crypto_stream_salsa20_xor_ic(c, m, mlen, n, 0U, k); +} diff --git a/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa2012/ref/api.h b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa2012/ref/api.h new file mode 100644 index 00000000..0efe8b8a --- /dev/null +++ b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa2012/ref/api.h @@ -0,0 +1,10 @@ + +#include "crypto_stream_salsa2012.h" + +#define crypto_stream crypto_stream_salsa2012 +#define crypto_stream_xor crypto_stream_salsa2012_xor +#define crypto_stream_KEYBYTES crypto_stream_salsa2012_KEYBYTES +#define crypto_stream_NONCEBYTES crypto_stream_salsa2012_NONCEBYTES +#define crypto_stream_IMPLEMENTATION crypto_stream_salsa2012_IMPLEMENTATION +#define crypto_stream_VERSION crypto_stream_salsa2012_VERSION + diff --git a/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa2012/ref/stream_salsa2012.c b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa2012/ref/stream_salsa2012.c new file mode 100644 index 00000000..ef121235 --- /dev/null +++ b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa2012/ref/stream_salsa2012.c @@ -0,0 +1,57 @@ +/* +version 20140420 +D. J. Bernstein +Public domain. +*/ + +#include "api.h" +#include "crypto_core_salsa2012.h" +#include "utils.h" + +typedef unsigned int uint32; + +static const unsigned char sigma[16] = { + 'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k' +}; + +int crypto_stream( + unsigned char *c,unsigned long long clen, + const unsigned char *n, + const unsigned char *k +) +{ + unsigned char in[16]; + unsigned char block[64]; + unsigned char kcopy[32]; + unsigned int i; + unsigned int u; + + if (!clen) return 0; + + for (i = 0;i < 32;++i) kcopy[i] = k[i]; + for (i = 0;i < 8;++i) in[i] = n[i]; + for (i = 8;i < 16;++i) in[i] = 0; + + while (clen >= 64) { + crypto_core_salsa2012(c,in,kcopy,sigma); + + u = 1; + for (i = 8;i < 16;++i) { + u += (unsigned int) in[i]; + in[i] = u; + u >>= 8; + } + + clen -= 64; + c += 64; + } + + if (clen) { + crypto_core_salsa2012(block,in,kcopy,sigma); + for (i = 0;i < (unsigned int) clen;++i) c[i] = block[i]; + } + sodium_memzero(block, sizeof block); + sodium_memzero(kcopy, sizeof kcopy); + + return 0; +} diff --git a/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa2012/ref/xor_salsa2012.c b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa2012/ref/xor_salsa2012.c new file mode 100644 index 00000000..132a720a --- /dev/null +++ b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa2012/ref/xor_salsa2012.c @@ -0,0 +1,60 @@ +/* +version 20140420 +D. J. Bernstein +Public domain. +*/ + +#include "api.h" +#include "crypto_core_salsa2012.h" +#include "utils.h" + +typedef unsigned int uint32; + +static const unsigned char sigma[16] = { + 'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k' +}; + +int crypto_stream_xor( + unsigned char *c, + const unsigned char *m,unsigned long long mlen, + const unsigned char *n, + const unsigned char *k +) +{ + unsigned char in[16]; + unsigned char block[64]; + unsigned char kcopy[32]; + unsigned int i; + unsigned int u; + + if (!mlen) return 0; + + for (i = 0;i < 32;++i) kcopy[i] = k[i]; + for (i = 0;i < 8;++i) in[i] = n[i]; + for (i = 8;i < 16;++i) in[i] = 0; + + while (mlen >= 64) { + crypto_core_salsa2012(block,in,kcopy,sigma); + for (i = 0;i < 64;++i) c[i] = m[i] ^ block[i]; + + u = 1; + for (i = 8;i < 16;++i) { + u += (unsigned int) in[i]; + in[i] = u; + u >>= 8; + } + + mlen -= 64; + c += 64; + m += 64; + } + + if (mlen) { + crypto_core_salsa2012(block,in,kcopy,sigma); + for (i = 0;i < (unsigned int) mlen;++i) c[i] = m[i] ^ block[i]; + } + sodium_memzero(block, sizeof block); + sodium_memzero(kcopy, sizeof kcopy); + + return 0; +} diff --git a/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa2012/stream_salsa2012_api.c b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa2012/stream_salsa2012_api.c new file mode 100644 index 00000000..3b5685f3 --- /dev/null +++ b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa2012/stream_salsa2012_api.c @@ -0,0 +1,11 @@ +#include "crypto_stream_salsa2012.h" + +size_t +crypto_stream_salsa2012_keybytes(void) { + return crypto_stream_salsa2012_KEYBYTES; +} + +size_t +crypto_stream_salsa2012_noncebytes(void) { + return crypto_stream_salsa2012_NONCEBYTES; +} diff --git a/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa208/ref/api.h b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa208/ref/api.h new file mode 100644 index 00000000..14b4a775 --- /dev/null +++ b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa208/ref/api.h @@ -0,0 +1,9 @@ + +#include "crypto_stream_salsa208.h" + +#define crypto_stream crypto_stream_salsa208 +#define crypto_stream_xor crypto_stream_salsa208_xor +#define crypto_stream_KEYBYTES crypto_stream_salsa208_KEYBYTES +#define crypto_stream_NONCEBYTES crypto_stream_salsa208_NONCEBYTES +#define crypto_stream_IMPLEMENTATION crypto_stream_salsa208_IMPLEMENTATION +#define crypto_stream_VERSION crypto_stream_salsa208_VERSION diff --git a/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa208/ref/stream_salsa208.c b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa208/ref/stream_salsa208.c new file mode 100644 index 00000000..6a15ff15 --- /dev/null +++ b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa208/ref/stream_salsa208.c @@ -0,0 +1,57 @@ +/* +version 20140420 +D. J. Bernstein +Public domain. +*/ + +#include "api.h" +#include "crypto_core_salsa208.h" +#include "utils.h" + +typedef unsigned int uint32; + +static const unsigned char sigma[16] = { + 'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k' +}; + +int crypto_stream( + unsigned char *c,unsigned long long clen, + const unsigned char *n, + const unsigned char *k +) +{ + unsigned char in[16]; + unsigned char block[64]; + unsigned char kcopy[32]; + unsigned int i; + unsigned int u; + + if (!clen) return 0; + + for (i = 0;i < 32;++i) kcopy[i] = k[i]; + for (i = 0;i < 8;++i) in[i] = n[i]; + for (i = 8;i < 16;++i) in[i] = 0; + + while (clen >= 64) { + crypto_core_salsa208(c,in,kcopy,sigma); + + u = 1; + for (i = 8;i < 16;++i) { + u += (unsigned int) in[i]; + in[i] = u; + u >>= 8; + } + + clen -= 64; + c += 64; + } + + if (clen) { + crypto_core_salsa208(block,in,kcopy,sigma); + for (i = 0;i < (unsigned int) clen;++i) c[i] = block[i]; + } + sodium_memzero(block, sizeof block); + sodium_memzero(kcopy, sizeof kcopy); + + return 0; +} diff --git a/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa208/ref/xor_salsa208.c b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa208/ref/xor_salsa208.c new file mode 100644 index 00000000..de0eae9f --- /dev/null +++ b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa208/ref/xor_salsa208.c @@ -0,0 +1,60 @@ +/* +version 20140420 +D. J. Bernstein +Public domain. +*/ + +#include "api.h" +#include "crypto_core_salsa208.h" +#include "utils.h" + +typedef unsigned int uint32; + +static const unsigned char sigma[16] = { + 'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k' +}; + +int crypto_stream_xor( + unsigned char *c, + const unsigned char *m,unsigned long long mlen, + const unsigned char *n, + const unsigned char *k +) +{ + unsigned char in[16]; + unsigned char block[64]; + unsigned char kcopy[32]; + unsigned int i; + unsigned int u; + + if (!mlen) return 0; + + for (i = 0;i < 32;++i) kcopy[i] = k[i]; + for (i = 0;i < 8;++i) in[i] = n[i]; + for (i = 8;i < 16;++i) in[i] = 0; + + while (mlen >= 64) { + crypto_core_salsa208(block,in,kcopy,sigma); + for (i = 0;i < 64;++i) c[i] = m[i] ^ block[i]; + + u = 1; + for (i = 8;i < 16;++i) { + u += (unsigned int) in[i]; + in[i] = u; + u >>= 8; + } + + mlen -= 64; + c += 64; + m += 64; + } + + if (mlen) { + crypto_core_salsa208(block,in,kcopy,sigma); + for (i = 0;i < (unsigned int) mlen;++i) c[i] = m[i] ^ block[i]; + } + sodium_memzero(block, sizeof block); + sodium_memzero(kcopy, sizeof kcopy); + + return 0; +} diff --git a/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa208/stream_salsa208_api.c b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa208/stream_salsa208_api.c new file mode 100644 index 00000000..640a8b2e --- /dev/null +++ b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/salsa208/stream_salsa208_api.c @@ -0,0 +1,11 @@ +#include "crypto_stream_salsa208.h" + +size_t +crypto_stream_salsa208_keybytes(void) { + return crypto_stream_salsa208_KEYBYTES; +} + +size_t +crypto_stream_salsa208_noncebytes(void) { + return crypto_stream_salsa208_NONCEBYTES; +} diff --git a/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/xsalsa20/ref/api.h b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/xsalsa20/ref/api.h new file mode 100644 index 00000000..58915f31 --- /dev/null +++ b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/xsalsa20/ref/api.h @@ -0,0 +1,10 @@ + +#include "crypto_stream_xsalsa20.h" + +#define crypto_stream crypto_stream_xsalsa20 +#define crypto_stream_xor crypto_stream_xsalsa20_xor +#define crypto_stream_KEYBYTES crypto_stream_xsalsa20_KEYBYTES +#define crypto_stream_NONCEBYTES crypto_stream_xsalsa20_NONCEBYTES +#define crypto_stream_IMPLEMENTATION crypto_stream_xsalsa20_IMPLEMENTATION +#define crypto_stream_VERSION crypto_stream_xsalsa20_VERSION + diff --git a/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/xsalsa20/ref/stream_xsalsa20.c b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/xsalsa20/ref/stream_xsalsa20.c new file mode 100644 index 00000000..64e42cb7 --- /dev/null +++ b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/xsalsa20/ref/stream_xsalsa20.c @@ -0,0 +1,28 @@ +/* +version 20080914 +D. J. Bernstein +Public domain. +*/ + +#include "api.h" +#include "crypto_core_hsalsa20.h" +#include "crypto_stream_salsa20.h" +#include "utils.h" + +static const unsigned char sigma[16] = { + 'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k' +}; + +int crypto_stream( + unsigned char *c,unsigned long long clen, + const unsigned char *n, + const unsigned char *k +) +{ + unsigned char subkey[32]; + int ret; + crypto_core_hsalsa20(subkey,n,k,sigma); + ret = crypto_stream_salsa20(c,clen,n + 16,subkey); + sodium_memzero(subkey, sizeof subkey); + return ret; +} diff --git a/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/xsalsa20/ref/xor_xsalsa20.c b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/xsalsa20/ref/xor_xsalsa20.c new file mode 100644 index 00000000..b77b9f78 --- /dev/null +++ b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/xsalsa20/ref/xor_xsalsa20.c @@ -0,0 +1,29 @@ +/* +version 20080913 +D. J. Bernstein +Public domain. +*/ + +#include "api.h" +#include "crypto_core_hsalsa20.h" +#include "crypto_stream_salsa20.h" +#include "utils.h" + +static const unsigned char sigma[16] = { + 'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k' +}; + +int crypto_stream_xor( + unsigned char *c, + const unsigned char *m,unsigned long long mlen, + const unsigned char *n, + const unsigned char *k +) +{ + unsigned char subkey[32]; + int ret; + crypto_core_hsalsa20(subkey,n,k,sigma); + ret = crypto_stream_salsa20_xor(c,m,mlen,n + 16,subkey); + sodium_memzero(subkey, sizeof subkey); + return ret; +} diff --git a/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/xsalsa20/stream_xsalsa20_api.c b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/xsalsa20/stream_xsalsa20_api.c new file mode 100644 index 00000000..256084e5 --- /dev/null +++ b/external_libs/python/pyzmq-14.7.0/bundled/libsodium/src/libsodium/crypto_stream/xsalsa20/stream_xsalsa20_api.c @@ -0,0 +1,11 @@ +#include "crypto_stream_xsalsa20.h" + +size_t +crypto_stream_xsalsa20_keybytes(void) { + return crypto_stream_xsalsa20_KEYBYTES; +} + +size_t +crypto_stream_xsalsa20_noncebytes(void) { + return crypto_stream_xsalsa20_NONCEBYTES; +} |