aboutsummaryrefslogtreecommitdiffstats
path: root/src/vppinfra/rbtree.h
blob: 3ab9a3347a52edc9b8f61fef4923b8059d62cc47 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
/*
 * Copyright (c) 2019 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef SRC_VPPINFRA_RBTREE_H_
#define SRC_VPPINFRA_RBTREE_H_

#include <vppinfra/types.h>
#include <vppinfra/pool.h>

#define RBTREE_TNIL_INDEX 0

typedef u32 rb_node_index_t;

typedef enum rb_tree_color_
{
  RBTREE_RED,
  RBTREE_BLACK
} rb_node_color_t;

typedef struct rb_node_
{
  u8 color;			/**< node color */
  rb_node_index_t parent;	/**< parent index */
  rb_node_index_t left;		/**< left child index */
  rb_node_index_t right;	/**< right child index */
  u32 key;			/**< node key */
  uword opaque;			/**< value stored by node */
} rb_node_t;

typedef struct rb_tree_
{
  rb_node_t *nodes;		/**< pool of nodes */
  rb_node_index_t root;		/**< root index */
} rb_tree_t;

typedef int (*rb_tree_lt_fn) (u32 a, u32 b);

void rb_tree_init (rb_tree_t * rt);
rb_node_index_t rb_tree_add (rb_tree_t * rt, u32 key);
rb_node_index_t rb_tree_add2 (rb_tree_t * rt, u32 key, uword opaque);
rb_node_index_t rb_tree_add_custom (rb_tree_t * rt, u32 key, uword opaque,
				    rb_tree_lt_fn ltfn);
void rb_tree_del (rb_tree_t * rt, u32 key);
void rb_tree_del_node (rb_tree_t * rt, rb_node_t * z);
void rb_tree_del_custom (rb_tree_t * rt, u32 key, rb_tree_lt_fn ltfn);
void rb_tree_free_nodes (rb_tree_t * rt);
u32 rb_tree_n_nodes (rb_tree_t * rt);
rb_node_t *rb_tree_min_subtree (rb_tree_t * rt, rb_node_t * x);
rb_node_t *rb_tree_max_subtree (rb_tree_t * rt, rb_node_t * x);
rb_node_t *rb_tree_search_subtree (rb_tree_t * rt, rb_node_t * x, u32 key);
rb_node_t *rb_tree_search_subtree_custom (rb_tree_t * rt, rb_node_t * x,
					  u32 key, rb_tree_lt_fn ltfn);
rb_node_t *rb_tree_successor (rb_tree_t * rt, rb_node_t * x);
rb_node_t *rb_tree_predecessor (rb_tree_t * rt, rb_node_t * x);
int rb_tree_is_init (rb_tree_t * rt);

static inline rb_node_index_t
rb_node_index (rb_tree_t * rt, rb_node_t * n)
{
  return n - rt->nodes;
}

static inline u8
rb_node_is_tnil (rb_tree_t * rt, rb_node_t * n)
{
  return rb_node_index (rt, n) == RBTREE_TNIL_INDEX;
}

static inline rb_node_t *
rb_node (rb_tree_t * rt, rb_node_index_t ri)
{
  return pool_elt_at_index (rt->nodes, ri);
}

static inline rb_node_t *
rb_node_right (rb_tree_t * rt, rb_node_t * n)
{
  return pool_elt_at_index (rt->nodes, n->right);
}

static inline rb_node_t *
rb_node_left (rb_tree_t * rt, rb_node_t * n)
{
  return pool_elt_at_index (rt->nodes, n->left);
}

static inline rb_node_t *
rb_node_parent (rb_tree_t * rt, rb_node_t * n)
{
  return pool_elt_at_index (rt->nodes, n->parent);
}

#endif /* SRC_VPPINFRA_RBTREE_H_ */

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */
674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963
/*
 * ipsecmb.c - Intel IPSec Multi-buffer library Crypto Engine
 *
 * Copyright (c) 2019 Cisco Systemss
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <fcntl.h>

#include <intel-ipsec-mb.h>

#include <vnet/vnet.h>
#include <vnet/plugin/plugin.h>
#include <vpp/app/version.h>
#include <vnet/crypto/crypto.h>
#include <vppinfra/cpu.h>

#define HMAC_MAX_BLOCK_SIZE  IMB_SHA_512_BLOCK_SIZE
#define EXPANDED_KEY_N_BYTES (16 * 15)

typedef struct
{
  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
  IMB_MGR *mgr;
#if IMB_VERSION_NUM >= IMB_VERSION(1, 3, 0)
  IMB_JOB burst_jobs[IMB_MAX_BURST_SIZE];
#endif
} ipsecmb_per_thread_data_t;

typedef struct
{
  u16 data_size;
  u8 block_size;
  aes_gcm_pre_t aes_gcm_pre;
  keyexp_t keyexp;
  hash_one_block_t hash_one_block;
  hash_fn_t hash_fn;
} ipsecmb_alg_data_t;

typedef struct ipsecmb_main_t_
{
  ipsecmb_per_thread_data_t *per_thread_data;
  ipsecmb_alg_data_t alg_data[VNET_CRYPTO_N_ALGS];
  void **key_data;
} ipsecmb_main_t;

typedef struct
{
  u8 enc_key_exp[EXPANDED_KEY_N_BYTES];
  u8 dec_key_exp[EXPANDED_KEY_N_BYTES];
} ipsecmb_aes_key_data_t;

static ipsecmb_main_t ipsecmb_main = { };

/* clang-format off */
/*
 * (Alg, JOB_HASH_ALG, fn, block-size-bytes, hash-size-bytes, digest-size-bytes)
 */
#define foreach_ipsecmb_hmac_op                                \
  _(SHA1,   SHA_1,   sha1,   64,  20, 20)                      \
  _(SHA224, SHA_224, sha224, 64,  32, 28)                      \
  _(SHA256, SHA_256, sha256, 64,  32, 32)                      \
  _(SHA384, SHA_384, sha384, 128, 64, 48)                      \
  _(SHA512, SHA_512, sha512, 128, 64, 64)

/*
 * (Alg, key-len-bits, JOB_CIPHER_MODE)
 */
#define foreach_ipsecmb_cipher_op                                             \
  _ (AES_128_CBC, 128, CBC)                                                   \
  _ (AES_192_CBC, 192, CBC)                                                   \
  _ (AES_256_CBC, 256, CBC)                                                   \
  _ (AES_128_CTR, 128, CNTR)                                                  \
  _ (AES_192_CTR, 192, CNTR)                                                  \
  _ (AES_256_CTR, 256, CNTR)

/*
 * (Alg, key-len-bytes, iv-len-bytes)
 */
#define foreach_ipsecmb_gcm_cipher_op                          \
  _(AES_128_GCM, 128)                                          \
  _(AES_192_GCM, 192)                                          \
  _(AES_256_GCM, 256)
/* clang-format on */
static_always_inline vnet_crypto_op_status_t
ipsecmb_status_job (IMB_STATUS status)
{
  switch (status)
    {
    case IMB_STATUS_COMPLETED:
      return VNET_CRYPTO_OP_STATUS_COMPLETED;
    case IMB_STATUS_BEING_PROCESSED:
    case IMB_STATUS_COMPLETED_CIPHER:
    case IMB_STATUS_COMPLETED_AUTH:
      return VNET_CRYPTO_OP_STATUS_WORK_IN_PROGRESS;
    case IMB_STATUS_INVALID_ARGS:
    case IMB_STATUS_INTERNAL_ERROR:
    case IMB_STATUS_ERROR:
      return VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
    }
  ASSERT (0);
  return VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
}

always_inline void
ipsecmb_retire_hmac_job (IMB_JOB *job, u32 *n_fail, u32 digest_size)
{
  vnet_crypto_op_t *op = job->user_data;
  u32 len = op->digest_len ? op->digest_len : digest_size;

  if (PREDICT_FALSE (IMB_STATUS_COMPLETED != job->status))
    {
      op->status = ipsecmb_status_job (job->status);
      *n_fail = *n_fail + 1;
      return;
    }

  if (op->flags & VNET_CRYPTO_OP_FLAG_HMAC_CHECK)
    {
      if ((memcmp (op->digest, job->auth_tag_output, len)))
	{
	  *n_fail = *n_fail + 1;
	  op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
	  return;
	}
    }
  else if (len == digest_size)
    clib_memcpy_fast (op->digest, job->auth_tag_output, digest_size);
  else
    clib_memcpy_fast (op->digest, job->auth_tag_output, len);

  op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
}

#if IMB_VERSION_NUM >= IMB_VERSION(1, 3, 0)
static_always_inline u32
ipsecmb_ops_hmac_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops,
			 u32 block_size, u32 hash_size, u32 digest_size,
			 IMB_HASH_ALG alg)
{
  ipsecmb_main_t *imbm = &ipsecmb_main;
  ipsecmb_per_thread_data_t *ptd =
    vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
  IMB_JOB *job;
  u32 i, n_fail = 0, ops_index = 0;
  u8 scratch[n_ops][digest_size];
  const u32 burst_sz =
    (n_ops > IMB_MAX_BURST_SIZE) ? IMB_MAX_BURST_SIZE : n_ops;

  while (n_ops)
    {
      const u32 n = (n_ops > burst_sz) ? burst_sz : n_ops;
      /*
       * configure all the jobs first ...
       */
      for (i = 0; i < n; i++, ops_index++)
	{
	  vnet_crypto_op_t *op = ops[ops_index];
	  const u8 *kd = (u8 *) imbm->key_data[op->key_index];

	  job = &ptd->burst_jobs[i];

	  job->src = op->src;
	  job->hash_start_src_offset_in_bytes = 0;
	  job->msg_len_to_hash_in_bytes = op->len;
	  job->auth_tag_output_len_in_bytes = digest_size;
	  job->auth_tag_output = scratch[ops_index];

	  job->u.HMAC._hashed_auth_key_xor_ipad = kd;
	  job->u.HMAC._hashed_auth_key_xor_opad = kd + hash_size;
	  job->user_data = op;
	}

      /*
       * submit all jobs to be processed and retire completed jobs
       */
      IMB_SUBMIT_HASH_BURST_NOCHECK (ptd->mgr, ptd->burst_jobs, n, alg);

      for (i = 0; i < n; i++)
	{
	  job = &ptd->burst_jobs[i];
	  ipsecmb_retire_hmac_job (job, &n_fail, digest_size);
	}

      n_ops -= n;
    }

  return ops_index - n_fail;
}
#else
static_always_inline u32
ipsecmb_ops_hmac_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops,
			 u32 block_size, u32 hash_size, u32 digest_size,
			 JOB_HASH_ALG alg)
{
  ipsecmb_main_t *imbm = &ipsecmb_main;
  ipsecmb_per_thread_data_t *ptd =
    vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
  IMB_JOB *job;
  u32 i, n_fail = 0;
  u8 scratch[n_ops][digest_size];

  /*
   * queue all the jobs first ...
   */
  for (i = 0; i < n_ops; i++)
    {
      vnet_crypto_op_t *op = ops[i];
      u8 *kd = (u8 *) imbm->key_data[op->key_index];

      job = IMB_GET_NEXT_JOB (ptd->mgr);

      job->src = op->src;
      job->hash_start_src_offset_in_bytes = 0;
      job->msg_len_to_hash_in_bytes = op->len;
      job->hash_alg = alg;
      job->auth_tag_output_len_in_bytes = digest_size;
      job->auth_tag_output = scratch[i];

      job->cipher_mode = IMB_CIPHER_NULL;
      job->cipher_direction = IMB_DIR_DECRYPT;
      job->chain_order = IMB_ORDER_HASH_CIPHER;

      job->u.HMAC._hashed_auth_key_xor_ipad = kd;
      job->u.HMAC._hashed_auth_key_xor_opad = kd + hash_size;
      job->user_data = op;

      job = IMB_SUBMIT_JOB (ptd->mgr);

      if (job)
	ipsecmb_retire_hmac_job (job, &n_fail, digest_size);
    }

  while ((job = IMB_FLUSH_JOB (ptd->mgr)))
    ipsecmb_retire_hmac_job (job, &n_fail, digest_size);

  return n_ops - n_fail;
}
#endif

/* clang-format off */
#define _(a, b, c, d, e, f)                                             \
static_always_inline u32                                                \
ipsecmb_ops_hmac_##a (vlib_main_t * vm,                                 \
                      vnet_crypto_op_t * ops[],                         \
                      u32 n_ops)                                        \
{ return ipsecmb_ops_hmac_inline (vm, ops, n_ops, d, e, f,              \
		IMB_AUTH_HMAC_##b); }                                   \

foreach_ipsecmb_hmac_op;
#undef _
/* clang-format on */

always_inline void
ipsecmb_retire_cipher_job (IMB_JOB *job, u32 *n_fail)
{
  vnet_crypto_op_t *op = job->user_data;

  if (PREDICT_FALSE (IMB_STATUS_COMPLETED != job->status))
    {
      op->status = ipsecmb_status_job (job->status);
      *n_fail = *n_fail + 1;
    }
  else
    op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
}

#if IMB_VERSION_NUM >= IMB_VERSION(1, 3, 0)
static_always_inline u32
ipsecmb_ops_aes_cipher_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[],
			       u32 n_ops, u32 key_len,
			       IMB_CIPHER_DIRECTION direction,
			       IMB_CIPHER_MODE cipher_mode)
{
  ipsecmb_main_t *imbm = &ipsecmb_main;
  ipsecmb_per_thread_data_t *ptd =
    vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
  IMB_JOB *job;
  u32 i, n_fail = 0, ops_index = 0;
  const u32 burst_sz =
    (n_ops > IMB_MAX_BURST_SIZE) ? IMB_MAX_BURST_SIZE : n_ops;

  while (n_ops)
    {
      const u32 n = (n_ops > burst_sz) ? burst_sz : n_ops;

      for (i = 0; i < n; i++)
	{
	  ipsecmb_aes_key_data_t *kd;
	  vnet_crypto_op_t *op = ops[ops_index++];
	  kd = (ipsecmb_aes_key_data_t *) imbm->key_data[op->key_index];

	  job = &ptd->burst_jobs[i];

	  job->src = op->src;
	  job->dst = op->dst;
	  job->msg_len_to_cipher_in_bytes = op->len;
	  job->cipher_start_src_offset_in_bytes = 0;

	  job->hash_alg = IMB_AUTH_NULL;

	  job->enc_keys = kd->enc_key_exp;
	  job->dec_keys = kd->dec_key_exp;
	  job->iv = op->iv;
	  job->iv_len_in_bytes = IMB_AES_BLOCK_SIZE;

	  job->user_data = op;
	}

      IMB_SUBMIT_CIPHER_BURST_NOCHECK (ptd->mgr, ptd->burst_jobs, n,
				       cipher_mode, direction, key_len / 8);
      for (i = 0; i < n; i++)
	{
	  job = &ptd->burst_jobs[i];
	  ipsecmb_retire_cipher_job (job, &n_fail);
	}

      n_ops -= n;
    }

  return ops_index - n_fail;
}
#else
static_always_inline u32
ipsecmb_ops_aes_cipher_inline (vlib_main_t *vm, vnet_crypto_op_t *ops[],
			       u32 n_ops, u32 key_len,
			       JOB_CIPHER_DIRECTION direction,
			       JOB_CIPHER_MODE cipher_mode)
{
  ipsecmb_main_t *imbm = &ipsecmb_main;
  ipsecmb_per_thread_data_t *ptd =
    vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
  IMB_JOB *job;
  u32 i, n_fail = 0;

  for (i = 0; i < n_ops; i++)
    {
      ipsecmb_aes_key_data_t *kd;
      vnet_crypto_op_t *op = ops[i];
      kd = (ipsecmb_aes_key_data_t *) imbm->key_data[op->key_index];

      job = IMB_GET_NEXT_JOB (ptd->mgr);

      job->src = op->src;
      job->dst = op->dst;
      job->msg_len_to_cipher_in_bytes = op->len;
      job->cipher_start_src_offset_in_bytes = 0;

      job->hash_alg = IMB_AUTH_NULL;
      job->cipher_mode = cipher_mode;
      job->cipher_direction = direction;
      job->chain_order =
	(direction == IMB_DIR_ENCRYPT ? IMB_ORDER_CIPHER_HASH :
					      IMB_ORDER_HASH_CIPHER);

      job->aes_key_len_in_bytes = key_len / 8;
      job->enc_keys = kd->enc_key_exp;
      job->dec_keys = kd->dec_key_exp;
      job->iv = op->iv;
      job->iv_len_in_bytes = IMB_AES_BLOCK_SIZE;

      job->user_data = op;

      job = IMB_SUBMIT_JOB (ptd->mgr);

      if (job)
	ipsecmb_retire_cipher_job (job, &n_fail);
    }

  while ((job = IMB_FLUSH_JOB (ptd->mgr)))
    ipsecmb_retire_cipher_job (job, &n_fail);

  return n_ops - n_fail;
}
#endif

/* clang-format off */
#define _(a, b, c)                                                            \
  static_always_inline u32 ipsecmb_ops_cipher_enc_##a (                       \
    vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops)                      \
  {                                                                           \
    return ipsecmb_ops_aes_cipher_inline (                                    \
                    vm, ops, n_ops, b, IMB_DIR_ENCRYPT, IMB_CIPHER_##c);      \
  }                                                                           \
                                                                              \
  static_always_inline u32 ipsecmb_ops_cipher_dec_##a (                       \
    vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops)                      \
  {                                                                           \
    return ipsecmb_ops_aes_cipher_inline (                                    \
                   vm, ops, n_ops, b, IMB_DIR_DECRYPT, IMB_CIPHER_##c);       \
  }

foreach_ipsecmb_cipher_op;
#undef _

#define _(a, b)                                                              \
static_always_inline u32                                                     \
ipsecmb_ops_gcm_cipher_enc_##a##_chained (vlib_main_t * vm,                  \
    vnet_crypto_op_t * ops[], vnet_crypto_op_chunk_t *chunks, u32 n_ops)     \
{                                                                            \
  ipsecmb_main_t *imbm = &ipsecmb_main;                                      \
  ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,  \
                                                     vm->thread_index);      \
  IMB_MGR *m = ptd->mgr;                                                     \
  vnet_crypto_op_chunk_t *chp;                                               \
  u32 i, j;                                                                  \
                                                                             \
  for (i = 0; i < n_ops; i++)                                                \
    {                                                                        \
      struct gcm_key_data *kd;                                               \
      struct gcm_context_data ctx;                                           \
      vnet_crypto_op_t *op = ops[i];                                         \
                                                                             \
      kd = (struct gcm_key_data *) imbm->key_data[op->key_index];            \
      ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS);              \
      IMB_AES##b##_GCM_INIT(m, kd, &ctx, op->iv, op->aad, op->aad_len);      \
      chp = chunks + op->chunk_index;                                        \
      for (j = 0; j < op->n_chunks; j++)                                     \
        {                                                                    \
          IMB_AES##b##_GCM_ENC_UPDATE (m, kd, &ctx, chp->dst, chp->src,      \
                                       chp->len);                            \
          chp += 1;                                                          \
        }                                                                    \
      IMB_AES##b##_GCM_ENC_FINALIZE(m, kd, &ctx, op->tag, op->tag_len);      \
                                                                             \
      op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;                          \
    }                                                                        \
                                                                             \
  return n_ops;                                                              \
}                                                                            \
                                                                             \
static_always_inline u32                                                     \
ipsecmb_ops_gcm_cipher_enc_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[],  \
                                u32 n_ops)                                   \
{                                                                            \
  ipsecmb_main_t *imbm = &ipsecmb_main;                                      \
  ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,  \
                                                     vm->thread_index);      \
  IMB_MGR *m = ptd->mgr;                                                     \
  u32 i;                                                                     \
                                                                             \
  for (i = 0; i < n_ops; i++)                                                \
    {                                                                        \
      struct gcm_key_data *kd;                                               \
      struct gcm_context_data ctx;                                           \
      vnet_crypto_op_t *op = ops[i];                                         \
                                                                             \
      kd = (struct gcm_key_data *) imbm->key_data[op->key_index];            \
      IMB_AES##b##_GCM_ENC (m, kd, &ctx, op->dst, op->src, op->len, op->iv,  \
                            op->aad, op->aad_len, op->tag, op->tag_len);     \
                                                                             \
      op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;                          \
    }                                                                        \
                                                                             \
  return n_ops;                                                              \
}                                                                            \
                                                                             \
static_always_inline u32                                                     \
ipsecmb_ops_gcm_cipher_dec_##a##_chained (vlib_main_t * vm,                  \
    vnet_crypto_op_t * ops[], vnet_crypto_op_chunk_t *chunks, u32 n_ops)     \
{                                                                            \
  ipsecmb_main_t *imbm = &ipsecmb_main;                                      \
  ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,  \
                                                     vm->thread_index);      \
  IMB_MGR *m = ptd->mgr;                                                     \
  vnet_crypto_op_chunk_t *chp;                                               \
  u32 i, j, n_failed = 0;                                                    \
                                                                             \
  for (i = 0; i < n_ops; i++)                                                \
    {                                                                        \
      struct gcm_key_data *kd;                                               \
      struct gcm_context_data ctx;                                           \
      vnet_crypto_op_t *op = ops[i];                                         \
      u8 scratch[64];                                                        \
                                                                             \
      kd = (struct gcm_key_data *) imbm->key_data[op->key_index];            \
      ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS);              \
      IMB_AES##b##_GCM_INIT(m, kd, &ctx, op->iv, op->aad, op->aad_len);      \
      chp = chunks + op->chunk_index;                                        \
      for (j = 0; j < op->n_chunks; j++)                                     \
        {                                                                    \
          IMB_AES##b##_GCM_DEC_UPDATE (m, kd, &ctx, chp->dst, chp->src,      \
                                       chp->len);                            \
          chp += 1;                                                          \
        }                                                                    \
      IMB_AES##b##_GCM_DEC_FINALIZE(m, kd, &ctx, scratch, op->tag_len);      \
                                                                             \
      if ((memcmp (op->tag, scratch, op->tag_len)))                          \
        {                                                                    \
          op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;                  \
          n_failed++;                                                        \
        }                                                                    \
      else                                                                   \
        op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;                        \
    }                                                                        \
                                                                             \
  return n_ops - n_failed;                                                   \
}                                                                            \
                                                                             \
static_always_inline u32                                                     \
ipsecmb_ops_gcm_cipher_dec_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[],  \
                                 u32 n_ops)                                  \
{                                                                            \
  ipsecmb_main_t *imbm = &ipsecmb_main;                                      \
  ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data,  \
                                                     vm->thread_index);      \
  IMB_MGR *m = ptd->mgr;                                                     \
  u32 i, n_failed = 0;                                                       \
                                                                             \
  for (i = 0; i < n_ops; i++)                                                \
    {                                                                        \
      struct gcm_key_data *kd;                                               \
      struct gcm_context_data ctx;                                           \
      vnet_crypto_op_t *op = ops[i];                                         \
      u8 scratch[64];                                                        \
                                                                             \
      kd = (struct gcm_key_data *) imbm->key_data[op->key_index];            \
      IMB_AES##b##_GCM_DEC (m, kd, &ctx, op->dst, op->src, op->len, op->iv,  \
                            op->aad, op->aad_len, scratch, op->tag_len);     \
                                                                             \
      if ((memcmp (op->tag, scratch, op->tag_len)))                          \
        {                                                                    \
          op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;                  \
          n_failed++;                                                        \
        }                                                                    \
      else                                                                   \
        op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;                        \
    }                                                                        \
                                                                             \
  return n_ops - n_failed;                                                   \
}
/* clang-format on */
foreach_ipsecmb_gcm_cipher_op;
#undef _

#ifdef HAVE_IPSECMB_CHACHA_POLY
always_inline void
ipsecmb_retire_aead_job (IMB_JOB *job, u32 *n_fail)
{
  vnet_crypto_op_t *op = job->user_data;
  u32 len = op->tag_len;

  if (PREDICT_FALSE (IMB_STATUS_COMPLETED != job->status))
    {
      op->status = ipsecmb_status_job (job->status);
      *n_fail = *n_fail + 1;
      return;
    }

  if (op->flags & VNET_CRYPTO_OP_FLAG_HMAC_CHECK)
    {
      if (memcmp (op->tag, job->auth_tag_output, len))
	{
	  *n_fail = *n_fail + 1;
	  op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
	  return;
	}
    }

  clib_memcpy_fast (op->tag, job->auth_tag_output, len);

  op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
}

static_always_inline u32
ipsecmb_ops_chacha_poly (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops,
			 IMB_CIPHER_DIRECTION dir)
{
  ipsecmb_main_t *imbm = &ipsecmb_main;
  ipsecmb_per_thread_data_t *ptd =
    vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
  struct IMB_JOB *job;
  IMB_MGR *m = ptd->mgr;
  u32 i, n_fail = 0, last_key_index = ~0;
  u8 scratch[VLIB_FRAME_SIZE][16];
  u8 *key = 0;

  for (i = 0; i < n_ops; i++)
    {
      vnet_crypto_op_t *op = ops[i];

      job = IMB_GET_NEXT_JOB (m);
      if (last_key_index != op->key_index)
	{
	  vnet_crypto_key_t *kd = vnet_crypto_get_key (op->key_index);

	  key = kd->data;
	  last_key_index = op->key_index;
	}

      job->cipher_direction = dir;
      job->chain_order = IMB_ORDER_HASH_CIPHER;
      job->cipher_mode = IMB_CIPHER_CHACHA20_POLY1305;
      job->hash_alg = IMB_AUTH_CHACHA20_POLY1305;
      job->enc_keys = job->dec_keys = key;
      job->key_len_in_bytes = 32;

      job->u.CHACHA20_POLY1305.aad = op->aad;
      job->u.CHACHA20_POLY1305.aad_len_in_bytes = op->aad_len;
      job->src = op->src;
      job->dst = op->dst;

      job->iv = op->iv;
      job->iv_len_in_bytes = 12;
      job->msg_len_to_cipher_in_bytes = job->msg_len_to_hash_in_bytes =
	op->len;
      job->cipher_start_src_offset_in_bytes =
	job->hash_start_src_offset_in_bytes = 0;

      job->auth_tag_output = scratch[i];
      job->auth_tag_output_len_in_bytes = 16;

      job->user_data = op;

      job = IMB_SUBMIT_JOB_NOCHECK (ptd->mgr);
      if (job)
	ipsecmb_retire_aead_job (job, &n_fail);

      op++;
    }

  while ((job = IMB_FLUSH_JOB (ptd->mgr)))
    ipsecmb_retire_aead_job (job, &n_fail);

  return n_ops - n_fail;
}

static_always_inline u32
ipsecmb_ops_chacha_poly_enc (vlib_main_t *vm, vnet_crypto_op_t *ops[],
			     u32 n_ops)
{
  return ipsecmb_ops_chacha_poly (vm, ops, n_ops, IMB_DIR_ENCRYPT);
}

static_always_inline u32
ipsecmb_ops_chacha_poly_dec (vlib_main_t *vm, vnet_crypto_op_t *ops[],
			     u32 n_ops)
{
  return ipsecmb_ops_chacha_poly (vm, ops, n_ops, IMB_DIR_DECRYPT);
}

static_always_inline u32
ipsecmb_ops_chacha_poly_chained (vlib_main_t *vm, vnet_crypto_op_t *ops[],
				 vnet_crypto_op_chunk_t *chunks, u32 n_ops,
				 IMB_CIPHER_DIRECTION dir)
{
  ipsecmb_main_t *imbm = &ipsecmb_main;
  ipsecmb_per_thread_data_t *ptd =
    vec_elt_at_index (imbm->per_thread_data, vm->thread_index);
  IMB_MGR *m = ptd->mgr;
  u32 i, n_fail = 0, last_key_index = ~0;
  u8 *key = 0;

  if (dir == IMB_DIR_ENCRYPT)
    {
      for (i = 0; i < n_ops; i++)
	{
	  vnet_crypto_op_t *op = ops[i];
	  struct chacha20_poly1305_context_data ctx;
	  vnet_crypto_op_chunk_t *chp;
	  u32 j;

	  ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS);

	  if (last_key_index != op->key_index)
	    {
	      vnet_crypto_key_t *kd = vnet_crypto_get_key (op->key_index);

	      key = kd->data;
	      last_key_index = op->key_index;
	    }

	  IMB_CHACHA20_POLY1305_INIT (m, key, &ctx, op->iv, op->aad,
				      op->aad_len);

	  chp = chunks + op->chunk_index;
	  for (j = 0; j < op->n_chunks; j++)
	    {
	      IMB_CHACHA20_POLY1305_ENC_UPDATE (m, key, &ctx, chp->dst,
						chp->src, chp->len);
	      chp += 1;
	    }

	  IMB_CHACHA20_POLY1305_ENC_FINALIZE (m, &ctx, op->tag, op->tag_len);

	  op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
	}
    }
  else /* dir == IMB_DIR_DECRYPT */
    {
      for (i = 0; i < n_ops; i++)
	{
	  vnet_crypto_op_t *op = ops[i];
	  struct chacha20_poly1305_context_data ctx;
	  vnet_crypto_op_chunk_t *chp;
	  u8 scratch[16];
	  u32 j;

	  ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS);

	  if (last_key_index != op->key_index)
	    {
	      vnet_crypto_key_t *kd = vnet_crypto_get_key (op->key_index);

	      key = kd->data;
	      last_key_index = op->key_index;
	    }

	  IMB_CHACHA20_POLY1305_INIT (m, key, &ctx, op->iv, op->aad,
				      op->aad_len);

	  chp = chunks + op->chunk_index;
	  for (j = 0; j < op->n_chunks; j++)
	    {
	      IMB_CHACHA20_POLY1305_DEC_UPDATE (m, key, &ctx, chp->dst,
						chp->src, chp->len);
	      chp += 1;
	    }

	  IMB_CHACHA20_POLY1305_DEC_FINALIZE (m, &ctx, scratch, op->tag_len);

	  if (memcmp (op->tag, scratch, op->tag_len))
	    {
	      n_fail = n_fail + 1;
	      op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
	    }
	  else
	    op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
	}
    }

  return n_ops - n_fail;
}

static_always_inline u32
ipsec_mb_ops_chacha_poly_enc_chained (vlib_main_t *vm, vnet_crypto_op_t *ops[],
				      vnet_crypto_op_chunk_t *chunks,
				      u32 n_ops)
{
  return ipsecmb_ops_chacha_poly_chained (vm, ops, chunks, n_ops,
					  IMB_DIR_ENCRYPT);
}

static_always_inline u32
ipsec_mb_ops_chacha_poly_dec_chained (vlib_main_t *vm, vnet_crypto_op_t *ops[],
				      vnet_crypto_op_chunk_t *chunks,
				      u32 n_ops)
{
  return ipsecmb_ops_chacha_poly_chained (vm, ops, chunks, n_ops,
					  IMB_DIR_DECRYPT);
}
#endif

static void
crypto_ipsecmb_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
			    vnet_crypto_key_index_t idx)
{
  ipsecmb_main_t *imbm = &ipsecmb_main;
  vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
  ipsecmb_alg_data_t *ad = imbm->alg_data + key->alg;
  u32 i;
  void *kd;

  /** TODO: add linked alg support **/
  if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
    return;

  if (kop == VNET_CRYPTO_KEY_OP_DEL)
    {
      if (idx >= vec_len (imbm->key_data))
	return;

      if (imbm->key_data[idx] == 0)
	return;

      clib_mem_free_s (imbm->key_data[idx]);
      imbm->key_data[idx] = 0;
      return;
    }

  if (ad->data_size == 0)
    return;

  vec_validate_aligned (imbm->key_data, idx, CLIB_CACHE_LINE_BYTES);

  if (kop == VNET_CRYPTO_KEY_OP_MODIFY && imbm->key_data[idx])
    {
      clib_mem_free_s (imbm->key_data[idx]);
    }

  kd = imbm->key_data[idx] = clib_mem_alloc_aligned (ad->data_size,
						     CLIB_CACHE_LINE_BYTES);

  /* AES CBC key expansion */
  if (ad->keyexp)
    {
      ad->keyexp (key->data, ((ipsecmb_aes_key_data_t *) kd)->enc_key_exp,
		  ((ipsecmb_aes_key_data_t *) kd)->dec_key_exp);
      return;
    }

  /* AES GCM */
  if (ad->aes_gcm_pre)
    {
      ad->aes_gcm_pre (key->data, (struct gcm_key_data *) kd);
      return;
    }

  /* HMAC */
  if (ad->hash_one_block)
    {
      const int block_qw = HMAC_MAX_BLOCK_SIZE / sizeof (u64);
      u64 pad[block_qw], key_hash[block_qw];

      clib_memset_u8 (key_hash, 0, HMAC_MAX_BLOCK_SIZE);
      if (vec_len (key->data) <= ad->block_size)
	clib_memcpy_fast (key_hash, key->data, vec_len (key->data));
      else
	ad->hash_fn (key->data, vec_len (key->data), key_hash);

      for (i = 0; i < block_qw; i++)
	pad[i] = key_hash[i] ^ 0x3636363636363636;
      ad->hash_one_block (pad, kd);

      for (i = 0; i < block_qw; i++)
	pad[i] = key_hash[i] ^ 0x5c5c5c5c5c5c5c5c;
      ad->hash_one_block (pad, ((u8 *) kd) + (ad->data_size / 2));

      return;
    }
}

static clib_error_t *
crypto_ipsecmb_init (vlib_main_t * vm)
{
  ipsecmb_main_t *imbm = &ipsecmb_main;
  ipsecmb_alg_data_t *ad;
  ipsecmb_per_thread_data_t *ptd;
  vlib_thread_main_t *tm = vlib_get_thread_main ();
  IMB_MGR *m = 0;
  u32 eidx;
  u8 *name;

  if (!clib_cpu_supports_aes ())
    return 0;

  /*
   * A priority that is better than OpenSSL but worse than VPP natvie
   */
  name = format (0, "Intel(R) Multi-Buffer Crypto for IPsec Library %s%c",
		 IMB_VERSION_STR, 0);
  eidx = vnet_crypto_register_engine (vm, "ipsecmb", 80, (char *) name);

  vec_validate_aligned (imbm->per_thread_data, tm->n_vlib_mains - 1,
			CLIB_CACHE_LINE_BYTES);

  vec_foreach (ptd, imbm->per_thread_data)
    {
	ptd->mgr = alloc_mb_mgr (0);
#if IMB_VERSION_NUM >= IMB_VERSION(1, 3, 0)
	clib_memset_u8 (ptd->burst_jobs, 0,
			sizeof (IMB_JOB) * IMB_MAX_BURST_SIZE);
#endif
	if (clib_cpu_supports_avx512f ())
	  init_mb_mgr_avx512 (ptd->mgr);
	else if (clib_cpu_supports_avx2 () && clib_cpu_supports_bmi2 ())
	  init_mb_mgr_avx2 (ptd->mgr);
	else
	  init_mb_mgr_sse (ptd->mgr);

	if (ptd == imbm->per_thread_data)
	  m = ptd->mgr;
    }

#define _(a, b, c, d, e, f)                                              \
  vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_HMAC, \
                                    ipsecmb_ops_hmac_##a);               \
  ad = imbm->alg_data + VNET_CRYPTO_ALG_HMAC_##a;                        \
  ad->block_size = d;                                                    \
  ad->data_size = e * 2;                                                 \
  ad->hash_one_block = m-> c##_one_block;                                \
  ad->hash_fn = m-> c;                                                   \

  foreach_ipsecmb_hmac_op;
#undef _
#define _(a, b, c)                                                            \
  vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC,       \
				    ipsecmb_ops_cipher_enc_##a);              \
  vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC,       \
				    ipsecmb_ops_cipher_dec_##a);              \
  ad = imbm->alg_data + VNET_CRYPTO_ALG_##a;                                  \
  ad->data_size = sizeof (ipsecmb_aes_key_data_t);                            \
  ad->keyexp = m->keyexp_##b;

  foreach_ipsecmb_cipher_op;
#undef _
#define _(a, b)                                                         \
  vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \
                                    ipsecmb_ops_gcm_cipher_enc_##a);    \
  vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \
                                    ipsecmb_ops_gcm_cipher_dec_##a);    \
  vnet_crypto_register_chained_ops_handler                              \
      (vm, eidx, VNET_CRYPTO_OP_##a##_ENC,                              \
       ipsecmb_ops_gcm_cipher_enc_##a##_chained);                       \
  vnet_crypto_register_chained_ops_handler                              \
      (vm, eidx, VNET_CRYPTO_OP_##a##_DEC,                              \
       ipsecmb_ops_gcm_cipher_dec_##a##_chained);                       \
  ad = imbm->alg_data + VNET_CRYPTO_ALG_##a;                            \
  ad->data_size = sizeof (struct gcm_key_data);                         \
  ad->aes_gcm_pre = m->gcm##b##_pre;                                    \

  foreach_ipsecmb_gcm_cipher_op;
#undef _

#ifdef HAVE_IPSECMB_CHACHA_POLY
  vnet_crypto_register_ops_handler (vm, eidx,
				    VNET_CRYPTO_OP_CHACHA20_POLY1305_ENC,
				    ipsecmb_ops_chacha_poly_enc);
  vnet_crypto_register_ops_handler (vm, eidx,
				    VNET_CRYPTO_OP_CHACHA20_POLY1305_DEC,
				    ipsecmb_ops_chacha_poly_dec);
  vnet_crypto_register_chained_ops_handler (
    vm, eidx, VNET_CRYPTO_OP_CHACHA20_POLY1305_ENC,
    ipsec_mb_ops_chacha_poly_enc_chained);
  vnet_crypto_register_chained_ops_handler (
    vm, eidx, VNET_CRYPTO_OP_CHACHA20_POLY1305_DEC,
    ipsec_mb_ops_chacha_poly_dec_chained);
  ad = imbm->alg_data + VNET_CRYPTO_ALG_CHACHA20_POLY1305;
  ad->data_size = 0;
#endif

  vnet_crypto_register_key_handler (vm, eidx, crypto_ipsecmb_key_handler);
  return (NULL);
}

VLIB_INIT_FUNCTION (crypto_ipsecmb_init) =
{
  .runs_after = VLIB_INITS ("vnet_crypto_init"),
};

VLIB_PLUGIN_REGISTER () =
{
  .version = VPP_BUILD_VER,
  .description = "Intel IPSEC Multi-buffer Crypto Engine",
};

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */