summaryrefslogtreecommitdiffstats
path: root/dpdk
AgeCommit message (Expand)AuthorFilesLines
2018-07-08ixgbe link update patch for DPDK 18.05Matthew Smith2-1/+14
2018-07-02dpdk: bump default DPDK version to 18.05Damjan Marion1-1/+1
2018-06-26Update to latest stable DPDK release (18.02.2)Marco Varlese1-3/+3
2018-06-17ixgbe patch for link status updatesMatthew Smith2-1/+14
2018-06-07Add support for DPDK 18.05Damjan Marion15-1954/+49
2018-06-04Configure or deduce CLIB_LOG2_CACHE_LINE_BYTES (VPP-1064)Dave Barach1-0/+1
2018-05-30Fix clang compilation on aarch64: replace -pie with -fPIE for dpdk compilation.Sirshak Das1-0/+4
2018-05-25Fix VPP DPDK build failure with Mellanox NIC on aarch64Bin Huang1-1/+2
2018-05-12dpdk: Add build related keywords for failsafe PMDRui Cai1-1/+4
2018-04-26dpdk: fix building debs from stable dpdk tarballs (VPP-1259)Damjan Marion1-1/+1
2018-04-24DPDK: CVE-2018-1059Marco Varlese1-3/+3
2018-03-10Assign correct NUMA node for DPDK crypto devicesLee Roberts1-0/+29
2018-03-05Set DPDK_MLX4_PMD and DPDK_MLX5_PMD compile with default dlopen linksAmir Zeidner1-0/+2
2018-02-24dpdk: Add option to specify cache line size for dpdk buildDamjan Marion1-0/+2
2018-02-23DPDK: disabling DPAA since broken for 18.02Marco Varlese1-0/+6
2018-02-21dpdk: fix building dpdk debug images with dpdk 18.02Damjan Marion1-1/+1
2018-02-19dpdk: bump to 18.02Damjan Marion1-1/+1
2018-02-15dpdk: add support for DPDK 18.02, deprecate 17.08Damjan Marion1-48/+5
2018-01-09Patch ENA PMD to skip setting tx flags in rx pathMatthew Smith1-0/+21
2017-12-18Provide useful output when installed vpp-dpdk version is incorrectEd Warnicke1-0/+5
2017-12-05dpdk/ipsec: multiple fixesSergio Gonzalez Monroy1-0/+101
2017-11-30dpdk: bump to 17.11Damjan Marion1-2/+2
2017-11-29Revert "Dpdk: 17.08 tarball updated 11/27"Damjan Marion1-4/+3
2017-11-29Dpdk: 17.08 tarball updated 11/27Ed Kern1-3/+4
2017-11-20dpdk: add support for DPDK 17.11Damjan Marion1-1/+6
2017-11-13Reduce number of parallel buildsDamjan Marion1-1/+1
2017-11-05dpdk: build nasm from sourceSergio Gonzalez Monroy1-1/+21
2017-10-10dpdk: patch to support bonded interface for MLX NICSteve Shin2-1/+64
2017-10-05dpdk/ipsec: rework plus improved cli commandsSergio Gonzalez Monroy1-2/+2
2017-09-12Add option to build without multi-buffer crypto.Thomas F Herbert1-2/+3
2017-09-11Improved arm64 chip detectionBrian Brooks1-11/+46
2017-08-31Native arm64 build: dpdk/Makefile changeBrian Brooks1-2/+11
2017-08-25dpdk: bump to dpdk 17.08, remove support for dpdk 17.02Damjan Marion1-3/+2
2017-08-25dpdk: required changes for 17.08Sergio Gonzalez Monroy1-14/+33
2017-08-22dpdk: define MACHINE before it is usedDamjan Marion1-2/+1
2017-08-21dpdk: disable tun/tap PMDDamjan Marion1-0/+1
2017-08-15Previous version was still downloading, unpacking and building IPSEC / AESMarco Varlese1-6/+12
2017-08-14dpdk: force libdir for isa-l crypto librarySergio Gonzalez Monroy1-1/+2
2017-08-14Added MD5SUM for DPDK 17.08 tarball as a first step towards migrationMarco Varlese1-0/+1
2017-08-09dpdk: only build SW crypto for x86_64 platformsSergio Gonzalez Monroy1-3/+10
2017-07-14dpdk: update buildSergio Gonzalez Monroy1-28/+31
2017-06-24make: Fix parallel building with some container platforms (VPP-880)Chris Luke1-1/+7
2017-05-31Revert "dpdk: build sw cryptodev support with make verify"Peter Mikus1-9/+4
2017-05-30dpdk: build sw cryptodev support with make verifySergio Gonzalez Monroy1-4/+9
2017-05-17dpdk: disable 16-bit descriptors for X710/XL710Damjan Marion1-2/+1
2017-05-15dpdk: revert dpdk 17.05 change which causes virtio issuesDamjan Marion5-244/+59
2017-05-11dpdk: bump to dpdk 17.05Damjan Marion1-4/+5
2017-04-25Add support for 32-bit x86 compilation in MakefilesDamjan Marion1-3/+5
2017-03-31dpdk: add support for Mellanox ConnectX-5 devicesDamjan Marion9-1/+1703
2017-03-22dpdk: fix plugin linking with sw crypto librariesSergio Gonzalez Monroy1-19/+26
/* Comment.Preproc */ .highlight .cpf { color: #75715e } /* Comment.PreprocFile */ .highlight .c1 { color: #75715e } /* Comment.Single */ .highlight .cs { color: #75715e } /* Comment.Special */ .highlight .gd { color: #f92672 } /* Generic.Deleted */ .highlight .ge { font-style: italic } /* Generic.Emph */ .highlight .gi { color: #a6e22e } /* Generic.Inserted */ .highlight .gs { font-weight: bold } /* Generic.Strong */ .highlight .gu { color: #75715e } /* Generic.Subheading */ .highlight .kc { color: #66d9ef } /* Keyword.Constant */ .highlight .kd { color: #66d9ef } /* Keyword.Declaration */ .highlight .kn { color: #f92672 } /* Keyword.Namespace */ .highlight .kp { color: #66d9ef } /* Keyword.Pseudo */ .highlight .kr { color: #66d9ef } /* Keyword.Reserved */ .highlight .kt { color: #66d9ef } /* Keyword.Type */ .highlight .ld { color: #e6db74 } /* Literal.Date */ .highlight .m { color: #ae81ff } /* Literal.Number */ .highlight .s { color: #e6db74 } /* Literal.String */ .highlight .na { color: #a6e22e } /* Name.Attribute */ .highlight .nb { color: #f8f8f2 } /* Name.Builtin */ .highlight .nc { color: #a6e22e } /* Name.Class */ .highlight .no { color: #66d9ef } /* Name.Constant */ .highlight .nd { color: #a6e22e } /* Name.Decorator */ .highlight .ni { color: #f8f8f2 } /* Name.Entity */ .highlight .ne { color: #a6e22e } /* Name.Exception */ .highlight .nf { color: #a6e22e } /* Name.Function */ .highlight .nl { color: #f8f8f2 } /* Name.Label */ .highlight .nn { color: #f8f8f2 } /* Name.Namespace */ .highlight .nx { color: #a6e22e } /* Name.Other */ .highlight .py { color: #f8f8f2 } /* Name.Property */ .highlight .nt { color: #f92672 } /* Name.Tag */ .highlight .nv { color: #f8f8f2 } /* Name.Variable */ .highlight .ow { color: #f92672 } /* Operator.Word */ .highlight .w { color: #f8f8f2 } /* Text.Whitespace */ .highlight .mb { color: #ae81ff } /* Literal.Number.Bin */ .highlight .mf { color: #ae81ff } /* Literal.Number.Float */ .highlight .mh { color: #ae81ff } /* Literal.Number.Hex */ .highlight .mi { color: #ae81ff } /* Literal.Number.Integer */ .highlight .mo { color: #ae81ff } /* Literal.Number.Oct */ .highlight .sa { color: #e6db74 } /* Literal.String.Affix */ .highlight .sb { color: #e6db74 } /* Literal.String.Backtick */ .highlight .sc { color: #e6db74 } /* Literal.String.Char */ .highlight .dl { color: #e6db74 } /* Literal.String.Delimiter */ .highlight .sd { color: #e6db74 } /* Literal.String.Doc */ .highlight .s2 { color: #e6db74 } /* Literal.String.Double */ .highlight .se { color: #ae81ff } /* Literal.String.Escape */ .highlight .sh { color: #e6db74 } /* Literal.String.Heredoc */ .highlight .si { color: #e6db74 } /* Literal.String.Interpol */ .highlight .sx { color: #e6db74 } /* Literal.String.Other */ .highlight .sr { color: #e6db74 } /* Literal.String.Regex */ .highlight .s1 { color: #e6db74 } /* Literal.String.Single */ .highlight .ss { color: #e6db74 } /* Literal.String.Symbol */ .highlight .bp { color: #f8f8f2 } /* Name.Builtin.Pseudo */ .highlight .fm { color: #a6e22e } /* Name.Function.Magic */ .highlight .vc { color: #f8f8f2 } /* Name.Variable.Class */ .highlight .vg { color: #f8f8f2 } /* Name.Variable.Global */ .highlight .vi { color: #f8f8f2 } /* Name.Variable.Instance */ .highlight .vm { color: #f8f8f2 } /* Name.Variable.Magic */ .highlight .il { color: #ae81ff } /* Literal.Number.Integer.Long */ } @media (prefers-color-scheme: light) { .highlight .hll { background-color: #ffffcc } .highlight .c { color: #888888 } /* Comment */ .highlight .err { color: #a61717; background-color: #e3d2d2 } /* Error */ .highlight .k { color: #008800; font-weight: bold } /* Keyword */ .highlight .ch { color: #888888 } /* Comment.Hashbang */ .highlight .cm { color: #888888 } /* Comment.Multiline */ .highlight .cp { color: #cc0000; font-weight: bold } /* Comment.Preproc */ .highlight .cpf { color: #888888 } /* Comment.PreprocFile */ .highlight .c1 { color: #888888 } /* Comment.Single */ .highlight .cs { color: #cc0000; font-weight: bold; background-color: #fff0f0 } /* Comment.Special */ .highlight .gd { color: #000000; background-color: #ffdddd } /* Generic.Deleted */ .highlight .ge { font-style: italic } /* Generic.Emph */ .highlight .gr { color: #aa0000 } /* Generic.Error */ .highlight .gh { color: #333333 } /* Generic.Heading */ .highlight .gi { color: #000000; background-color: #ddffdd } /* Generic.Inserted */ .highlight .go { color: #888888 } /* Generic.Output */ .highlight .gp { color: #555555 } /* Generic.Prompt */ .highlight .gs { font-weight: bold } /* Generic.Strong */ .highlight .gu { color: #666666 } /* Generic.Subheading */ .highlight .gt { color: #aa0000 } /* Generic.Traceback */ .highlight .kc { color: #008800; font-weight: bold } /* Keyword.Constant */ .highlight .kd { color: #008800; font-weight: bold } /* Keyword.Declaration */ .highlight .kn { color: #008800; font-weight: bold } /* Keyword.Namespace */ .highlight .kp { color: #008800 } /* Keyword.Pseudo */ .highlight .kr { color: #008800; font-weight: bold } /* Keyword.Reserved */ .highlight .kt { color: #888888; font-weight: bold } /* Keyword.Type */ .highlight .m { color: #0000DD; font-weight: bold } /* Literal.Number */ .highlight .s { color: #dd2200; background-color: #fff0f0 } /* Literal.String */ .highlight .na { color: #336699 } /* Name.Attribute */ .highlight .nb { color: #003388 } /* Name.Builtin */ .highlight .nc { color: #bb0066; font-weight: bold } /* Name.Class */ .highlight .no { color: #003366; font-weight: bold } /* Name.Constant */ .highlight .nd { color: #555555 } /* Name.Decorator */ .highlight .ne { color: #bb0066; font-weight: bold } /* Name.Exception */ .highlight .nf { color: #0066bb; font-weight: bold } /* Name.Function */ .highlight .nl { color: #336699; font-style: italic } /* Name.Label */ .highlight .nn { color: #bb0066; font-weight: bold } /* Name.Namespace */ .highlight .py { color: #336699; font-weight: bold } /* Name.Property */ .highlight .nt { color: #bb0066; font-weight: bold } /* Name.Tag */ .highlight .nv { color: #336699 } /* Name.Variable */ .highlight .ow { color: #008800 } /* Operator.Word */ .highlight .w { color: #bbbbbb } /* Text.Whitespace */ .highlight .mb { color: #0000DD; font-weight: bold } /* Literal.Number.Bin */ .highlight .mf { color: #0000DD; font-weight: bold } /* Literal.Number.Float */ .highlight .mh { color: #0000DD; font-weight: bold } /* Literal.Number.Hex */ .highlight .mi { color: #0000DD; font-weight: bold } /* Literal.Number.Integer */ .highlight .mo { color: #0000DD; font-weight: bold } /* Literal.Number.Oct */ .highlight .sa { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Affix */ .highlight .sb { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Backtick */ .highlight .sc { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Char */ .highlight .dl { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Delimiter */ .highlight .sd { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Doc */ .highlight .s2 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Double */ .highlight .se { color: #0044dd; background-color: #fff0f0 } /* Literal.String.Escape */ .highlight .sh { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Heredoc */ .highlight .si { color: #3333bb; background-color: #fff0f0 } /* Literal.String.Interpol */ .highlight .sx { color: #22bb22; background-color: #f0fff0 } /* Literal.String.Other */ .highlight .sr { color: #008800; background-color: #fff0ff } /* Literal.String.Regex */ .highlight .s1 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Single */ .highlight .ss { color: #aa6600; background-color: #fff0f0 } /* Literal.String.Symbol */ .highlight .bp { color: #003388 } /* Name.Builtin.Pseudo */ .highlight .fm { color: #0066bb; font-weight: bold } /* Name.Function.Magic */ .highlight .vc { color: #336699 } /* Name.Variable.Class */ .highlight .vg { color: #dd7700 } /* Name.Variable.Global */ .highlight .vi { color: #3333bb } /* Name.Variable.Instance */ .highlight .vm { color: #336699 } /* Name.Variable.Magic */ .highlight .il { color: #0000DD; font-weight: bold } /* Literal.Number.Integer.Long */ }
/*
 * Copyright (c) 2017 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
#ifndef __included_ioam_cache_h__
#define __included_ioam_cache_h__

#include <vnet/vnet.h>
#include <vnet/ip/ip.h>
#include <vnet/ip/ip_packet.h>
#include <vnet/ip/ip4_packet.h>
#include <vnet/ip/ip6_packet.h>
#include <vnet/srv6/sr.h>

#include <vppinfra/pool.h>
#include <vppinfra/hash.h>
#include <vppinfra/error.h>
#include <vppinfra/elog.h>
#include <vppinfra/bihash_8_8.h>
#include <ioam/analyse/ip6/ip6_ioam_analyse.h>
#include <vppinfra/tw_timer_16t_2w_512sl.h>
/*
 * ioam_cache.h
 * This header contains routines for caching of ioam header and
 * buffer:
 * 1 - On application facing node: to cache ioam header recvd
 *     in request and reattach in response to provide round
 *     trip path visibility. Since request response matching
 *     is needed works with TCP and relies on (5 tuples,seq no)
 * 2 - On M-Anycast server node: This node replicates requests
 *    towards multiple anycast service nodes serving anycast
 *    IP6 address. It evaluates response and forwards the best
 *    response towards the client of requesting the service.
 *    Again since request-response matching is needed, works
 *    with TCP  and relies on (5 tuples,seq no) for matching.
 *    To do this it caches SYN-ACK responses for a short time to
 *    evaluate multiple responses received before the selected
 *    SYN-ACK response is forwared and others dropped.
 *
 * M-Anycast server cache:
 *   - There is a pool of cache entries per worker thread.
 *   - Cache entry is created when SYN is received expected
 *     number of responses are marked based on number of
 *     SR tunnels for the anycast destination address
 *   - The pool/thread id and pool index are attached in the
 *    message as an ioam option for quick look up.
 *   - When is received SYN-ACK the ioam option containing
 *     thread id + pool index of the cache entry is used to
 *     look up cache entry.
 *   - Cache synchronization:
 *      - This is achieved by cache entry add/del/update all handled
 *        by the same worker/main thread
 *      - Packets from client to threads - syn packets, can be disctributed
 *        based on incoming interface affinity to the cpu core pinned to
 *        the thread or a simple sequence number based distribution
 *        if thread per interface is not scaling
 *      - Response packets from server towards clients - syn-acks, are
 *        forced to the same thread that created the cache entry
 *        using SR and the destination of SR v6 address assigned
 *        to the core/thread. This adderss is sent as an ioam option
 *        in the syn that can be then used on the other side to
 *        populate v6 dst address in the response
 *      - Timeout: timer wheel per thread is used to track the syn-ack wait
 *        time. The timer wheel tick is updated via an input node per thread.
 *
 * Application facing node/Service side cache:
 *  - Single pool of cache entries.
 *  - Cache entry is created when SYN is received. Caches the ioam
 *    header. Hash table entry is created based on 5 tuple and
 *    TCP seq no to pool index
 *  - Response SYN-ACK processed by looking up pool index in hash table
 *    and cache entry in the pool is used to get the ioam header rewrite
 *    string. Entry is freed from pool and hash table after use.
 *  - Locking/Synchronization: Currently this functionality is deployed
 *    with main/single thread only. Hence no locking is used.
 *  - Deployment: A VPP node per application server servicing anycast
 *    address is expected. Locking/synchronization needed when the server
 *    /application facing node is started with multiple worker threads.
 *
 */

/*
 * Application facing server side caching:
 * Cache entry for ioam header
 * Currently caters to TCP and relies on
 * TCP - 5 tuples + seqno to cache and reinsert
 * ioam header b/n TCP request response
 */
typedef struct
{
  /** Required for pool_get_aligned */
  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
  ip6_address_t src_address;
  ip6_address_t dst_address;
  u16 src_port;
  u16 dst_port;
  u8 protocol;
  u32 seq_no;
  ip6_address_t next_hop;
  u16 my_address_offset;
  u8 *ioam_rewrite_string;
} ioam_cache_entry_t;

/*
 * Cache entry for anycast server selection
 * Works for TCP as 5 tuple + sequence number
 * is required for request response matching
 * max_responses expected is set based on number
 *              of SR tunnels for the dst_address
 * Timeout or all response_received = max_responses
 *            will clear the entry
 * buffer_index index of the response msg vlib buffer
 *           that is currently the best response
 */
typedef struct
{
  /** Required for pool_get_aligned */
  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
  u32 pool_id;
  u32 pool_index;
  ip6_address_t src_address;
  ip6_address_t dst_address;
  u16 src_port;
  u16 dst_port;
  u8 protocol;
  u32 seq_no;
  u32 buffer_index;
  ip6_hop_by_hop_header_t *hbh;	//pointer to hbh header in the buffer
  u64 created_at;
  u8 response_received;
  u8 max_responses;
  u32 stop_timer_handle;
  /** Handle returned from tw_start_timer */
  u32 timer_handle;
  /** entry should expire at this clock tick */
  u32 expected_to_expire;
} ioam_cache_ts_entry_t;

/*
 * Per thread tunnel selection cache stats
 */
typedef struct
{
  u64 inuse;
  u64 add_failed;
} ioam_cache_ts_pool_stats_t;

/* Server side: iOAM header caching */
#define MAX_CACHE_ENTRIES 4096
/* M-Anycast: Cache for SR tunnel selection */
#define MAX_CACHE_TS_ENTRIES 1048576

#define IOAM_CACHE_TABLE_DEFAULT_HASH_NUM_BUCKETS (4 * 1024)
#define IOAM_CACHE_TABLE_DEFAULT_HASH_MEMORY_SIZE (2<<20)

typedef struct
{
  /* API message ID base */
  u16 msg_id_base;

  /* Pool of ioam_cache_buffer_t */
  ioam_cache_entry_t *ioam_rewrite_pool;

  /* For steering packets ioam cache entry is followed by
   * SR header. This is the SR rewrite template */
  u8 *sr_rewrite_template;
  /* The current rewrite string being used */
  u8 *rewrite;
  u8 rewrite_pool_index_offset;
  ip6_address_t sr_localsid_cache;

  u64 lookup_table_nbuckets;
  u64 lookup_table_size;
  clib_bihash_8_8_t ioam_rewrite_cache_table;

  /* M-Anycast: Pool of ioam_cache_ts_entry_t per thread */
  ioam_cache_ts_entry_t **ioam_ts_pool;
  ioam_cache_ts_pool_stats_t *ts_stats;
  /** per thread single-wheel */
  tw_timer_wheel_16t_2w_512sl_t *timer_wheels;

  /*
   * Selection criteria: oneway delay: Server to M-Anycast
   * or RTT
   */
  bool criteria_oneway;
  u8 wait_for_responses;
  ip6_address_t sr_localsid_ts;

  /* convenience */
  vlib_main_t *vlib_main;

  uword cache_hbh_slot;
  uword ts_hbh_slot;
  u32 ip6_hbh_pop_node_index;
  u32 error_node_index;
  u32 cleanup_process_node_index;

  u32 ip6_add_from_cache_hbh_node_index;
  u32 ip6_reset_ts_hbh_node_index;
} ioam_cache_main_t;

extern ioam_cache_main_t ioam_cache_main;

extern vlib_node_registration_t ioam_cache_node;
extern vlib_node_registration_t ioam_cache_ts_node;

/*  Compute flow hash.  We'll use it to select which Sponge to use for this
 *  flow.  And other things.
 *  ip6_compute_flow_hash in ip6.h doesnt locate tcp/udp when
 *  ext headers are present. While it could be made to it will be a
 *  performance hit for ECMP flows.
 *  HEnce this function here, with L4 information directly input
 *  Useful when tcp/udp headers are already located in presence of
 *  ext headers
 */
always_inline u32
ip6_compute_flow_hash_ext (const ip6_header_t * ip,
			   u8 protocol,
			   u16 src_port,
			   u16 dst_port, flow_hash_config_t flow_hash_config)
{
  u64 a, b, c;
  u64 t1, t2;

  t1 = (ip->src_address.as_u64[0] ^ ip->src_address.as_u64[1]);
  t1 = (flow_hash_config & IP_FLOW_HASH_SRC_ADDR) ? t1 : 0;

  t2 = (ip->dst_address.as_u64[0] ^ ip->dst_address.as_u64[1]);
  t2 = (flow_hash_config & IP_FLOW_HASH_DST_ADDR) ? t2 : 0;

  a = (flow_hash_config & IP_FLOW_HASH_REVERSE_SRC_DST) ? t2 : t1;
  b = (flow_hash_config & IP_FLOW_HASH_REVERSE_SRC_DST) ? t1 : t2;
  b ^= (flow_hash_config & IP_FLOW_HASH_PROTO) ? protocol : 0;

  t1 = src_port;
  t2 = dst_port;

  t1 = (flow_hash_config & IP_FLOW_HASH_SRC_PORT) ? t1 : 0;
  t2 = (flow_hash_config & IP_FLOW_HASH_DST_PORT) ? t2 : 0;

  c = (flow_hash_config & IP_FLOW_HASH_REVERSE_SRC_DST) ?
    ((t1 << 16) | t2) : ((t2 << 16) | t1);

  hash_mix64 (a, b, c);
  return (u32) c;
}


/* 2 new ioam E2E options :
 * 1. HBH_OPTION_TYPE_IOAM_EDGE_TO_EDGE_ID: IP6 address
 *                of ioam node that inserted ioam header
 * 2. HBH_OPTION_TYPE_IOAM_E2E_CACHE_ID: Pool id and index
 *                   to look up tunnel select cache entry
 */
#define HBH_OPTION_TYPE_IOAM_EDGE_TO_EDGE_ID 30
#define HBH_OPTION_TYPE_IOAM_E2E_CACHE_ID 31

typedef CLIB_PACKED (struct
		     {
		     ip6_hop_by_hop_option_t hdr; u8 e2e_type; u8 reserved[5];
		     ip6_address_t id;
		     }) ioam_e2e_id_option_t;

typedef CLIB_PACKED (struct
		     {
		     ip6_hop_by_hop_option_t hdr; u8 e2e_type; u8 pool_id;
		     u32 pool_index;
		     }) ioam_e2e_cache_option_t;

#define IOAM_E2E_ID_OPTION_RND ((sizeof(ioam_e2e_id_option_t) + 7) & ~7)
#define IOAM_E2E_ID_HBH_EXT_LEN (IOAM_E2E_ID_OPTION_RND >> 3)
#define IOAM_E2E_CACHE_OPTION_RND ((sizeof(ioam_e2e_cache_option_t) + 7) & ~7)
#define IOAM_E2E_CACHE_HBH_EXT_LEN (IOAM_E2E_CACHE_OPTION_RND >> 3)

static inline void
ioam_e2e_id_rewrite_handler (ioam_e2e_id_option_t * e2e_option,
			     ip6_address_t * address)
{
  e2e_option->id.as_u64[0] = address->as_u64[0];
  e2e_option->id.as_u64[1] = address->as_u64[1];

}

/* Following functions are for the caching of ioam header
 * to enable reattaching it for a complete request-response
 * message exchange */
inline static void
ioam_cache_entry_free (ioam_cache_entry_t * entry)
{
  ioam_cache_main_t *cm = &ioam_cache_main;
  if (entry)
    {
      vec_free (entry->ioam_rewrite_string);
      clib_memset (entry, 0, sizeof (*entry));
      pool_put (cm->ioam_rewrite_pool, entry);
    }
}

inline static ioam_cache_entry_t *
ioam_cache_entry_cleanup (u32 pool_index)
{
  ioam_cache_main_t *cm = &ioam_cache_main;
  ioam_cache_entry_t *entry = 0;

  entry = pool_elt_at_index (cm->ioam_rewrite_pool, pool_index);
  ioam_cache_entry_free (entry);
  return (0);
}

inline static ioam_cache_entry_t *
ioam_cache_lookup (ip6_header_t * ip0, u16 src_port, u16 dst_port, u32 seq_no)
{
  ioam_cache_main_t *cm = &ioam_cache_main;
  u32 flow_hash = ip6_compute_flow_hash_ext (ip0, ip0->protocol,
					     src_port, dst_port,
					     IP_FLOW_HASH_DEFAULT |
					     IP_FLOW_HASH_REVERSE_SRC_DST);
  clib_bihash_kv_8_8_t kv, value;

  kv.key = (u64) flow_hash << 32 | seq_no;
  kv.value = 0;
  value.key = 0;
  value.value = 0;

  if (clib_bihash_search_8_8 (&cm->ioam_rewrite_cache_table, &kv, &value) >=
      0)
    {
      ioam_cache_entry_t *entry = 0;

      entry = pool_elt_at_index (cm->ioam_rewrite_pool, value.value);
      /* match */
      if (ip6_address_compare (&ip0->src_address, &entry->dst_address) == 0 &&
	  ip6_address_compare (&ip0->dst_address, &entry->src_address) == 0 &&
	  entry->src_port == dst_port &&
	  entry->dst_port == src_port && entry->seq_no == seq_no)
	{
	  /* If lookup is successful remove it from the hash */
	  clib_bihash_add_del_8_8 (&cm->ioam_rewrite_cache_table, &kv, 0);
	  return (entry);
	}
      else
	return (0);

    }
  return (0);
}

/*
 * Caches ioam hbh header
 * Extends the hbh header with option to contain IP6 address of the node
 * that caches it
 */
inline static int
ioam_cache_add (vlib_buffer_t * b0,
		ip6_header_t * ip0,
		u16 src_port,
		u16 dst_port, ip6_hop_by_hop_header_t * hbh0, u32 seq_no)
{
  ioam_cache_main_t *cm = &ioam_cache_main;
  ioam_cache_entry_t *entry = 0;
  u32 rewrite_len = 0, e2e_id_offset = 0;
  u32 pool_index = 0;
  ioam_e2e_id_option_t *e2e = 0;

  pool_get_aligned (cm->ioam_rewrite_pool, entry, CLIB_CACHE_LINE_BYTES);
  clib_memset (entry, 0, sizeof (*entry));
  pool_index = entry - cm->ioam_rewrite_pool;

  clib_memcpy_fast (entry->dst_address.as_u64, ip0->dst_address.as_u64,
		    sizeof (ip6_address_t));
  clib_memcpy_fast (entry->src_address.as_u64, ip0->src_address.as_u64,
		    sizeof (ip6_address_t));
  entry->src_port = src_port;
  entry->dst_port = dst_port;
  entry->seq_no = seq_no;
  rewrite_len = ((hbh0->length + 1) << 3);
  vec_validate (entry->ioam_rewrite_string, rewrite_len - 1);
  e2e = ip6_ioam_find_hbh_option (hbh0, HBH_OPTION_TYPE_IOAM_EDGE_TO_EDGE_ID);
  if (e2e)
    {
      entry->next_hop.as_u64[0] = e2e->id.as_u64[0];
      entry->next_hop.as_u64[1] = e2e->id.as_u64[1];
    }
  else
    {
      return (-1);
    }
  e2e_id_offset = (u8 *) e2e - (u8 *) hbh0;
  /* setup e2e id option to insert v6 address of the node caching it */
  clib_memcpy_fast (entry->ioam_rewrite_string, hbh0, rewrite_len);
  hbh0 = (ip6_hop_by_hop_header_t *) entry->ioam_rewrite_string;

  /* suffix rewrite string with e2e ID option */
  e2e = (ioam_e2e_id_option_t *) (entry->ioam_rewrite_string + e2e_id_offset);
  ioam_e2e_id_rewrite_handler (e2e, &cm->sr_localsid_cache);
  entry->my_address_offset = (u8 *) (&e2e->id) - (u8 *) hbh0;

  /* add it to hash, replacing and freeing any collision for now */
  u32 flow_hash =
    ip6_compute_flow_hash_ext (ip0, hbh0->protocol, src_port, dst_port,
			       IP_FLOW_HASH_DEFAULT);
  clib_bihash_kv_8_8_t kv, value;
  kv.key = (u64) flow_hash << 32 | seq_no;
  kv.value = 0;
  if (clib_bihash_search_8_8 (&cm->ioam_rewrite_cache_table, &kv, &value) >=
      0)
    {
      /* replace */
      ioam_cache_entry_cleanup (value.value);
    }
  kv.value = pool_index;
  clib_bihash_add_del_8_8 (&cm->ioam_rewrite_cache_table, &kv, 1);
  return (0);
}

/* Creates SR rewrite string
 * This is appended with ioam header on the server facing
 * node.
 * This SR header is necessary to attract packets towards
 * selected Anycast server.
 */
inline static void
ioam_cache_sr_rewrite_template_create (void)
{
  ioam_cache_main_t *cm = &ioam_cache_main;
  ip6_address_t *segments = 0;
  ip6_address_t *this_seg = 0;

  /* This nodes address and the original dest will be
   * filled when the packet is processed */
  vec_add2 (segments, this_seg, 1);
  clib_memset (this_seg, 0xfe, sizeof (ip6_address_t));
  cm->sr_rewrite_template = ip6_sr_compute_rewrite_string_insert (segments);
  vec_free (segments);
}

inline static int
ioam_cache_table_init (vlib_main_t * vm)
{
  ioam_cache_main_t *cm = &ioam_cache_main;

  pool_alloc_aligned (cm->ioam_rewrite_pool,
		      MAX_CACHE_ENTRIES, CLIB_CACHE_LINE_BYTES);
  cm->lookup_table_nbuckets = IOAM_CACHE_TABLE_DEFAULT_HASH_NUM_BUCKETS;
  cm->lookup_table_nbuckets = 1 << max_log2 (cm->lookup_table_nbuckets);
  cm->lookup_table_size = IOAM_CACHE_TABLE_DEFAULT_HASH_MEMORY_SIZE;

  clib_bihash_init_8_8 (&cm->ioam_rewrite_cache_table,
			"ioam rewrite cache table",
			cm->lookup_table_nbuckets, cm->lookup_table_size);
  /* Create SR rewrite template */
  ioam_cache_sr_rewrite_template_create ();
  return (1);
}

inline static int
ioam_cache_table_destroy (vlib_main_t * vm)
{
  ioam_cache_main_t *cm = &ioam_cache_main;
  ioam_cache_entry_t *entry = 0;
  /* free pool and hash table */
  clib_bihash_free_8_8 (&cm->ioam_rewrite_cache_table);
  pool_foreach (entry, cm->ioam_rewrite_pool)
  {
    ioam_cache_entry_free (entry);
  }
  pool_free (cm->ioam_rewrite_pool);
  cm->ioam_rewrite_pool = 0;
  vec_free (cm->sr_rewrite_template);
  cm->sr_rewrite_template = 0;
  return (0);
}

inline static u8 *
format_ioam_cache_entry (u8 * s, va_list * args)
{
  ioam_cache_entry_t *e = va_arg (*args, ioam_cache_entry_t *);
  ioam_cache_main_t *cm = &ioam_cache_main;
  int rewrite_len = vec_len (e->ioam_rewrite_string);

  s = format (s, "%d: %U:%d to  %U:%d seq_no %lu\n",
	      (e - cm->ioam_rewrite_pool),
	      format_ip6_address, &e->src_address,
	      e->src_port,
	      format_ip6_address, &e->dst_address, e->dst_port, e->seq_no);

  if (rewrite_len)
    {
      s = format (s, "  %U",
		  format_ip6_hop_by_hop_ext_hdr,
		  (ip6_hop_by_hop_header_t *) e->ioam_rewrite_string,
		  rewrite_len - 1);
    }
  return s;
}

void ioam_cache_ts_timer_node_enable (vlib_main_t * vm, u8 enable);

#define IOAM_CACHE_TS_TIMEOUT 1.0	//SYN timeout 1 sec
#define IOAM_CACHE_TS_TICK 100e-3
/* Timer delays as multiples of 100ms */
#define IOAM_CACHE_TS_TIMEOUT_TICKS IOAM_CACHE_TS_TICK*9
#define TIMER_HANDLE_INVALID ((u32) ~0)


void expired_cache_ts_timer_callback (u32 * expired_timers);

/*
 * Following functions are to manage M-Anycast server selection
 * cache
 * There is a per worker thread pool to create a cache entry
 * for a TCP SYN received. TCP SYN-ACK contians ioam header
 * with HBH_OPTION_TYPE_IOAM_E2E_CACHE_ID option to point to the
 * entry.
 */
inline static int
ioam_cache_ts_table_init (vlib_main_t * vm)
{
  ioam_cache_main_t *cm = &ioam_cache_main;
  int no_of_threads = vec_len (vlib_worker_threads);
  int i;

  vec_validate_aligned (cm->ioam_ts_pool, no_of_threads - 1,
			CLIB_CACHE_LINE_BYTES);
  vec_validate_aligned (cm->ts_stats, no_of_threads - 1,
			CLIB_CACHE_LINE_BYTES);
  vec_validate (cm->timer_wheels, no_of_threads - 1);
  cm->lookup_table_nbuckets = IOAM_CACHE_TABLE_DEFAULT_HASH_NUM_BUCKETS;
  cm->lookup_table_nbuckets = 1 << max_log2 (cm->lookup_table_nbuckets);
  cm->lookup_table_size = IOAM_CACHE_TABLE_DEFAULT_HASH_MEMORY_SIZE;
  for (i = 0; i < no_of_threads; i++)
    {
      pool_alloc_aligned (cm->ioam_ts_pool[i],
			  MAX_CACHE_TS_ENTRIES, CLIB_CACHE_LINE_BYTES);
      clib_memset (&cm->ts_stats[i], 0, sizeof (ioam_cache_ts_pool_stats_t));
      tw_timer_wheel_init_16t_2w_512sl (&cm->timer_wheels[i],
					expired_cache_ts_timer_callback,
					IOAM_CACHE_TS_TICK
					/* timer period 100ms */ ,
					10e4);
      cm->timer_wheels[i].last_run_time = vlib_time_now (vm);
    }
  ioam_cache_ts_timer_node_enable (vm, 1);
  return (1);
}

always_inline void
ioam_cache_ts_timer_set (ioam_cache_main_t * cm,
			 ioam_cache_ts_entry_t * entry, u32 interval)
{
  entry->timer_handle
    = tw_timer_start_16t_2w_512sl (&cm->timer_wheels[entry->pool_id],
				   entry->pool_index, 1, interval);
}

always_inline void
ioam_cache_ts_timer_reset (ioam_cache_main_t * cm,
			   ioam_cache_ts_entry_t * entry)
{
  tw_timer_stop_16t_2w_512sl (&cm->timer_wheels[entry->pool_id],
			      entry->timer_handle);
  entry->timer_handle = TIMER_HANDLE_INVALID;
}

inline static void
ioam_cache_ts_entry_free (u32 thread_id,
			  ioam_cache_ts_entry_t * entry, u32 node_index)
{
  ioam_cache_main_t *cm = &ioam_cache_main;
  vlib_main_t *vm = cm->vlib_main;
  vlib_frame_t *nf = 0;
  u32 *to_next;

  if (entry)
    {
      if (entry->hbh != 0)
	{
	  nf = vlib_get_frame_to_node (vm, node_index);
	  nf->n_vectors = 0;
	  to_next = vlib_frame_vector_args (nf);
	  nf->n_vectors = 1;
	  to_next[0] = entry->buffer_index;
	  vlib_put_frame_to_node (vm, node_index, nf);
	}
      pool_put (cm->ioam_ts_pool[thread_id], entry);
      cm->ts_stats[thread_id].inuse--;
      clib_memset (entry, 0, sizeof (*entry));
    }
}

inline static int
ioam_cache_ts_table_destroy (vlib_main_t * vm)
{
  ioam_cache_main_t *cm = &ioam_cache_main;
  ioam_cache_ts_entry_t *entry = 0;
  int no_of_threads = vec_len (vlib_worker_threads);
  int i;

  /* free pool and hash table */
  for (i = 0; i < no_of_threads; i++)
    {
      pool_foreach (entry, cm->ioam_ts_pool[i])
      {
	ioam_cache_ts_entry_free (i, entry, cm->error_node_index);
      }
      pool_free (cm->ioam_ts_pool[i]);
      cm->ioam_ts_pool = 0;
      tw_timer_wheel_free_16t_2w_512sl (&cm->timer_wheels[i]);
    }
  vec_free (cm->ioam_ts_pool);
  return (0);
}

inline static int
ioam_cache_ts_entry_cleanup (u32 thread_id, u32 pool_index)
{
  ioam_cache_main_t *cm = &ioam_cache_main;
  ioam_cache_ts_entry_t *entry = 0;

  entry = pool_elt_at_index (cm->ioam_ts_pool[thread_id], pool_index);
  ioam_cache_ts_entry_free (thread_id, entry, cm->error_node_index);
  return (0);
}

/*
 * Caches buffer for ioam SR tunnel select for Anycast service
 */
inline static int
ioam_cache_ts_add (ip6_header_t * ip0,
		   u16 src_port,
		   u16 dst_port,
		   u32 seq_no,
		   u8 max_responses, u64 now, u32 thread_id, u32 * pool_index)
{
  ioam_cache_main_t *cm = &ioam_cache_main;
  ioam_cache_ts_entry_t *entry = 0;

  if (cm->ts_stats[thread_id].inuse == MAX_CACHE_TS_ENTRIES)
    {
      cm->ts_stats[thread_id].add_failed++;
      return (-1);
    }

  pool_get_aligned (cm->ioam_ts_pool[thread_id], entry,
		    CLIB_CACHE_LINE_BYTES);
  clib_memset (entry, 0, sizeof (*entry));
  *pool_index = entry - cm->ioam_ts_pool[thread_id];

  clib_memcpy_fast (entry->dst_address.as_u64, ip0->dst_address.as_u64,
		    sizeof (ip6_address_t));
  clib_memcpy_fast (entry->src_address.as_u64, ip0->src_address.as_u64,
		    sizeof (ip6_address_t));
  entry->src_port = src_port;
  entry->dst_port = dst_port;
  entry->seq_no = seq_no;
  entry->response_received = 0;
  entry->max_responses = max_responses;
  entry->created_at = now;
  entry->hbh = 0;
  entry->buffer_index = 0;
  entry->pool_id = thread_id;
  entry->pool_index = *pool_index;
  ioam_cache_ts_timer_set (cm, entry, IOAM_CACHE_TS_TIMEOUT);
  cm->ts_stats[thread_id].inuse++;
  return (0);
}

inline static void
ioam_cache_ts_send (u32 thread_id, i32 pool_index)
{
  ioam_cache_main_t *cm = &ioam_cache_main;
  ioam_cache_ts_entry_t *entry = 0;

  entry = pool_elt_at_index (cm->ioam_ts_pool[thread_id], pool_index);
  if (!pool_is_free (cm->ioam_ts_pool[thread_id], entry) && entry)
    {
      /* send and free pool entry */
      ioam_cache_ts_entry_free (thread_id, entry, cm->ip6_hbh_pop_node_index);
    }
}

inline static void
ioam_cache_ts_check_and_send (u32 thread_id, i32 pool_index)
{
  ioam_cache_main_t *cm = &ioam_cache_main;
  ioam_cache_ts_entry_t *entry = 0;
  entry = pool_elt_at_index (cm->ioam_ts_pool[thread_id], pool_index);
  if (entry && entry->hbh)
    {
      if (entry->response_received == entry->max_responses ||
	  entry->created_at + IOAM_CACHE_TS_TIMEOUT <=
	  vlib_time_now (cm->vlib_main))
	{
	  ioam_cache_ts_timer_reset (cm, entry);
	  ioam_cache_ts_send (thread_id, pool_index);
	}
    }
}

inline static int
ioam_cache_ts_update (u32 thread_id,
		      i32 pool_index,
		      u32 buffer_index, ip6_hop_by_hop_header_t * hbh)
{
  ioam_cache_main_t *cm = &ioam_cache_main;
  ioam_cache_ts_entry_t *entry = 0;
  vlib_main_t *vm = cm->vlib_main;
  vlib_frame_t *nf = 0;
  u32 *to_next;

  entry = pool_elt_at_index (cm->ioam_ts_pool[thread_id], pool_index);
  if (!pool_is_free (cm->ioam_ts_pool[thread_id], entry) && entry)
    {
      /* drop existing buffer */
      if (entry->hbh != 0)
	{
	  nf = vlib_get_frame_to_node (vm, cm->error_node_index);
	  nf->n_vectors = 0;
	  to_next = vlib_frame_vector_args (nf);
	  nf->n_vectors = 1;
	  to_next[0] = entry->buffer_index;
	  vlib_put_frame_to_node (vm, cm->error_node_index, nf);
	}
      /* update */
      entry->buffer_index = buffer_index;
      entry->hbh = hbh;
      /* check and send */
      ioam_cache_ts_check_and_send (thread_id, pool_index);
      return (0);
    }
  return (-1);
}

/*
 * looks up the entry based on the e2e option pool index
 * result = 0 found the entry
 * result < 0 indicates failture to find an entry
 */
inline static int
ioam_cache_ts_lookup (ip6_header_t * ip0,
		      u8 protocol,
		      u16 src_port,
		      u16 dst_port,
		      u32 seq_no,
		      ip6_hop_by_hop_header_t ** hbh,
		      u32 * pool_index, u8 * thread_id, u8 response_seen)
{
  ioam_cache_main_t *cm = &ioam_cache_main;
  ip6_hop_by_hop_header_t *hbh0 = 0;
  ioam_e2e_cache_option_t *e2e = 0;

  hbh0 = (ip6_hop_by_hop_header_t *) (ip0 + 1);
  e2e =
    (ioam_e2e_cache_option_t *) ((u8 *) hbh0 + cm->rewrite_pool_index_offset);
  if ((u8 *) e2e < ((u8 *) hbh0 + ((hbh0->length + 1) << 3))
      && e2e->hdr.type == HBH_OPTION_TYPE_IOAM_E2E_CACHE_ID)
    {
      ioam_cache_ts_entry_t *entry = 0;
      *pool_index = e2e->pool_index;
      *thread_id = e2e->pool_id;
      entry = pool_elt_at_index (cm->ioam_ts_pool[*thread_id], *pool_index);
      /* match */
      if (entry &&
	  ip6_address_compare (&ip0->src_address, &entry->dst_address) == 0 &&
	  ip6_address_compare (&ip0->dst_address, &entry->src_address) == 0 &&
	  entry->src_port == dst_port &&
	  entry->dst_port == src_port && entry->seq_no == seq_no)
	{
	  *hbh = entry->hbh;
	  entry->response_received += response_seen;
	  return (0);
	}
      else if (entry)
	{
	  return (-1);
	}
    }
  return (-1);
}

inline static u8 *
format_ioam_cache_ts_entry (u8 * s, va_list * args)
{
  ioam_cache_ts_entry_t *e = va_arg (*args, ioam_cache_ts_entry_t *);
  u32 thread_id = va_arg (*args, u32);
  ioam_cache_main_t *cm = &ioam_cache_main;
  ioam_e2e_id_option_t *e2e = 0;
  vlib_main_t *vm = cm->vlib_main;
  clib_time_t *ct = &vm->clib_time;

  if (!e)
    goto end;

  if (e->hbh)
    {
      e2e =
	ip6_ioam_find_hbh_option (e->hbh,
				  HBH_OPTION_TYPE_IOAM_EDGE_TO_EDGE_ID);

      s =
	format (s,
		"%d: %U:%d to  %U:%d seq_no %u buffer %u %U \n\t\tCreated at %U Received %d\n",
		(e - cm->ioam_ts_pool[thread_id]), format_ip6_address,
		&e->src_address, e->src_port, format_ip6_address,
		&e->dst_address, e->dst_port, e->seq_no, e->buffer_index,
		format_ip6_address, e2e ? &e2e->id : 0, format_time_interval,
		"h:m:s:u",
		(e->created_at -
		 vm->cpu_time_main_loop_start) * ct->seconds_per_clock,
		e->response_received);
    }
  else
    {
      s =
	format (s,
		"%d: %U:%d to  %U:%d seq_no %u Buffer %u \n\t\tCreated at %U Received %d\n",
		(e - cm->ioam_ts_pool[thread_id]), format_ip6_address,
		&e->src_address, e->src_port, format_ip6_address,
		&e->dst_address, e->dst_port, e->seq_no, e->buffer_index,
		format_time_interval, "h:m:s:u",
		(e->created_at -
		 vm->cpu_time_main_loop_start) * ct->seconds_per_clock,
		e->response_received);
    }

end:
  return s;
}

/*
 * Get extended rewrite string for iOAM data in v6
 * This makes space for an e2e options to carry cache pool info
 * and manycast server address.
 * It set the rewrite string per configs in ioam ip6 + new option
 * for cache along with offset to the option to populate cache
 * pool id and index
 */
static inline int
ip6_ioam_ts_cache_set_rewrite (void)
{
  ip6_hop_by_hop_ioam_main_t *hm = &ip6_hop_by_hop_ioam_main;
  ioam_cache_main_t *cm = &ioam_cache_main;
  ip6_hop_by_hop_header_t *hbh;
  u32 rewrite_len = 0;
  ioam_e2e_cache_option_t *e2e = 0;
  ioam_e2e_id_option_t *e2e_id = 0;

  vec_free (cm->rewrite);
  ip6_ioam_set_rewrite (&(cm->rewrite), hm->has_trace_option,
			hm->has_pot_option, hm->has_seqno_option);
  hbh = (ip6_hop_by_hop_header_t *) cm->rewrite;
  rewrite_len = ((hbh->length + 1) << 3);
  vec_validate (cm->rewrite,
		rewrite_len - 1 + IOAM_E2E_CACHE_OPTION_RND +
		IOAM_E2E_ID_OPTION_RND);
  hbh = (ip6_hop_by_hop_header_t *) cm->rewrite;
  /* setup e2e id option to insert pool id and index of the node caching it */
  hbh->length += IOAM_E2E_CACHE_HBH_EXT_LEN + IOAM_E2E_ID_HBH_EXT_LEN;
  cm->rewrite_pool_index_offset = rewrite_len;
  e2e = (ioam_e2e_cache_option_t *) (cm->rewrite + rewrite_len);
  e2e->hdr.type = HBH_OPTION_TYPE_IOAM_E2E_CACHE_ID
    | HBH_OPTION_TYPE_SKIP_UNKNOWN;
  e2e->hdr.length = sizeof (ioam_e2e_cache_option_t) -
    sizeof (ip6_hop_by_hop_option_t);
  e2e->e2e_type = 2;
  e2e_id =
    (ioam_e2e_id_option_t *) ((u8 *) e2e + sizeof (ioam_e2e_cache_option_t));
  e2e_id->hdr.type =
    HBH_OPTION_TYPE_IOAM_EDGE_TO_EDGE_ID | HBH_OPTION_TYPE_SKIP_UNKNOWN;
  e2e_id->hdr.length =
    sizeof (ioam_e2e_id_option_t) - sizeof (ip6_hop_by_hop_option_t);
  e2e_id->e2e_type = 1;

  return (0);
}

static inline int
ip6_ioam_ts_cache_cleanup_rewrite (void)
{
  ioam_cache_main_t *cm = &ioam_cache_main;

  vec_free (cm->rewrite);
  cm->rewrite = 0;
  cm->rewrite_pool_index_offset = 0;
  return (0);
}
#endif /* __included_ioam_cache_h__ */

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */