summaryrefslogtreecommitdiffstats
path: root/src/plugins/ioam/lib-e2e/ioam_seqno_lib.h
blob: 6bd38ff2968f413dab5724d199cf90d484409add (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
/*
 * Copyright (c) 2016 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef PLUGINS_IOAM_PLUGIN_IOAM_LIB_E2E_IOAM_SEQNO_LIB_H_
#define PLUGINS_IOAM_PLUGIN_IOAM_LIB_E2E_IOAM_SEQNO_LIB_H_

#include <vppinfra/types.h>

#define SEQ_CHECK_VALUE 0x80000000	/* for seq number wraparound detection */

#define SEQNO_WINDOW_SIZE 2048
#define SEQNO_WINDOW_ARRAY_SIZE 64

typedef struct seqno_bitmap_
{
  u32 window_size;
  u32 array_size;
  u32 mask;
  u32 pad;
  u64 highest;
  u64 array[SEQNO_WINDOW_ARRAY_SIZE];	/* Will be alloc to array_size */
} seqno_bitmap;

typedef struct seqno_rx_info_
{
  u64 rx_packets;
  u64 lost_packets;
  u64 reordered_packets;
  u64 dup_packets;
  seqno_bitmap bitmap;
} seqno_rx_info;

/* This structure is 64-byte aligned */
typedef struct ioam_seqno_data_
{
  union
  {
    u32 seq_num;		/* Useful only for encap node */
    seqno_rx_info seqno_rx;
  };
} ioam_seqno_data;

static inline void
BIT_SET (u64 * p, u32 n)
{
  p[n >> 5] |= (1 << (n & 31));
}

static inline int
BIT_TEST (u64 * p, u32 n)
{
  return p[n >> 5] & (1 << (n & 31));
}

static void
BIT_CLEAR (u64 * p, u64 start, int num_bits, u32 mask)
{
  int n, t;
  int start_index = (start >> 5);
  int mask_index = (mask >> 5);

  start_index &= mask_index;
  if (start & 0x1f)
    {
      int start_bit = (start & 0x1f);

      n = (1 << start_bit) - 1;
      t = start_bit + num_bits;
      if (t < 32)
	{
	  n |= ~((1 << t) - 1);
	  p[start_index] &= n;
	  return;
	}
      p[start_index] &= n;
      start_index = (start_index + 1) & mask_index;
      num_bits -= (32 - start_bit);
    }
  while (num_bits >= 32)
    {
      p[start_index] = 0;
      start_index = (start_index + 1) & mask_index;
      num_bits -= 32;
    }
  n = ~((1 << num_bits) - 1);
  p[start_index] &= n;
}

static inline u8
seqno_check_wraparound (u32 a, u32 b)
{
  if ((a != b) && (a > b) && ((a - b) > SEQ_CHECK_VALUE))
    {
      return 1;
    }
  return 0;
}

/*
 * Function to analyze the PPC value recevied.
 *     - Updates the bitmap with received sequence number
 *     - counts the received/lost/duplicate/reordered packets
 */
inline static void
ioam_analyze_seqno (seqno_rx_info * seqno_rx, u64 seqno)
{
  int diff;
  static int peer_dead_count;
  seqno_bitmap *bitmap = &seqno_rx->bitmap;

  seqno_rx->rx_packets++;

  if (seqno > bitmap->highest)
    {				/* new larger sequence number */
      peer_dead_count = 0;
      diff = seqno - bitmap->highest;
      if (diff < bitmap->window_size)
	{
	  if (diff > 1)
	    {			/* diff==1 is *such* a common case it's a win to optimize it */
	      BIT_CLEAR (bitmap->array, bitmap->highest + 1, diff - 1,
			 bitmap->mask);
	      seqno_rx->lost_packets += diff - 1;
	    }
	}
      else
	{
	  seqno_rx->lost_packets += diff - 1;
	  memset (bitmap->array, 0, bitmap->array_size * sizeof (u64));
	}
      BIT_SET (bitmap->array, seqno & bitmap->mask);
      bitmap->highest = seqno;
      return;
    }

  /* we've seen a bigger seq number before */
  diff = bitmap->highest - seqno;
  if (diff >= bitmap->window_size)
    {
      if (seqno_check_wraparound (bitmap->highest, seqno))
	{
	  memset (bitmap->array, 0, bitmap->array_size * sizeof (u64));
	  BIT_SET (bitmap->array, seqno & bitmap->mask);
	  bitmap->highest = seqno;
	  return;
	}
      else
	{
	  peer_dead_count++;
	  if (peer_dead_count > 25)
	    {
	      peer_dead_count = 0;
	      memset (bitmap->array, 0, bitmap->array_size * sizeof (u64));
	      BIT_SET (bitmap->array, seqno & bitmap->mask);
	      bitmap->highest = seqno;
	    }
	  //ppc_rx->reordered_packets++;
	}
      return;
    }

  if (BIT_TEST (bitmap->array, seqno & bitmap->mask))
    {
      seqno_rx->dup_packets++;
      return;			/* Already seen */
    }
  seqno_rx->reordered_packets++;
  seqno_rx->lost_packets--;
  BIT_SET (bitmap->array, seqno & bitmap->mask);
  return;
}

u8 *show_ioam_seqno_analyse_data_fn (u8 * s, seqno_rx_info * rx);

u8 *show_ioam_seqno_cmd_fn (u8 * s, ioam_seqno_data * seqno_data, u8 enc);

void ioam_seqno_init_data (ioam_seqno_data * data);

void ioam_seqno_init_rx_info (seqno_rx_info * data);

#endif /* PLUGINS_IOAM_PLUGIN_IOAM_LIB_E2E_IOAM_SEQNO_LIB_H_ */

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */
n class="p">(wrk->mq_evt_conns, mqc); memset (mqc, 0, sizeof (*mqc)); return mqc; } u32 vcl_mq_evt_conn_index (vcl_worker_t * wrk, vcl_mq_evt_conn_t * mqc) { return (mqc - wrk->mq_evt_conns); } vcl_mq_evt_conn_t * vcl_mq_evt_conn_get (vcl_worker_t * wrk, u32 mq_conn_idx) { return pool_elt_at_index (wrk->mq_evt_conns, mq_conn_idx); } int vcl_mq_epoll_add_evfd (vcl_worker_t * wrk, svm_msg_q_t * mq) { struct epoll_event e = { 0 }; vcl_mq_evt_conn_t *mqc; u32 mqc_index; int mq_fd; mq_fd = svm_msg_q_get_consumer_eventfd (mq); if (wrk->mqs_epfd < 0 || mq_fd == -1) return -1; mqc = vcl_mq_evt_conn_alloc (wrk); mqc_index = vcl_mq_evt_conn_index (wrk, mqc); mqc->mq_fd = mq_fd; mqc->mq = mq; e.events = EPOLLIN; e.data.u32 = mqc_index; if (epoll_ctl (wrk->mqs_epfd, EPOLL_CTL_ADD, mq_fd, &e) < 0) { clib_warning ("failed to add mq eventfd to mq epoll fd"); return -1; } return mqc_index; } int vcl_mq_epoll_del_evfd (vcl_worker_t * wrk, u32 mqc_index) { vcl_mq_evt_conn_t *mqc; if (wrk->mqs_epfd || mqc_index == ~0) return -1; mqc = vcl_mq_evt_conn_get (wrk, mqc_index); if (epoll_ctl (wrk->mqs_epfd, EPOLL_CTL_DEL, mqc->mq_fd, 0) < 0) { clib_warning ("failed to del mq eventfd to mq epoll fd"); return -1; } return 0; } static vcl_worker_t * vcl_worker_alloc (void) { vcl_worker_t *wrk; pool_get (vcm->workers, wrk); memset (wrk, 0, sizeof (*wrk)); wrk->wrk_index = wrk - vcm->workers; wrk->forked_child = ~0; return wrk; } static void vcl_worker_free (vcl_worker_t * wrk) { pool_put (vcm->workers, wrk); } void vcl_worker_cleanup (vcl_worker_t * wrk, u8 notify_vpp) { clib_spinlock_lock (&vcm->workers_lock); if (notify_vpp) { if (wrk->wrk_index == vcl_get_worker_index ()) vcl_send_app_worker_add_del (0 /* is_add */ ); else vcl_send_child_worker_del (wrk); } if (wrk->mqs_epfd > 0) close (wrk->mqs_epfd); hash_free (wrk->session_index_by_vpp_handles); hash_free (wrk->ct_registration_by_mq); clib_spinlock_free (&wrk->ct_registration_lock); vec_free (wrk->mq_events); vec_free (wrk->mq_msg_vector); vcl_worker_free (wrk); clib_spinlock_unlock (&vcm->workers_lock); } static void vcl_worker_cleanup_cb (void *arg) { vcl_worker_t *wrk = vcl_worker_get_current (); u32 wrk_index = wrk->wrk_index; vcl_worker_cleanup (wrk, 1 /* notify vpp */ ); vcl_set_worker_index (~0); VDBG (0, "cleaned up worker %u", wrk_index); } vcl_worker_t * vcl_worker_alloc_and_init () { vcl_worker_t *wrk; /* This was initialized already */ if (vcl_get_worker_index () != ~0) return 0; if (pool_elts (vcm->workers) == vcm->cfg.max_workers) { VDBG (0, "max-workers %u limit reached", vcm->cfg.max_workers); return 0; } clib_spinlock_lock (&vcm->workers_lock); wrk = vcl_worker_alloc (); vcl_set_worker_index (wrk->wrk_index); wrk->thread_id = pthread_self (); wrk->current_pid = getpid (); wrk->mqs_epfd = -1; if (vcm->cfg.use_mq_eventfd) { wrk->mqs_epfd = epoll_create (1); if (wrk->mqs_epfd < 0) { clib_unix_warning ("epoll_create() returned"); goto done; } } wrk->session_index_by_vpp_handles = hash_create (0, sizeof (uword)); wrk->ct_registration_by_mq = hash_create (0, sizeof (uword)); clib_spinlock_init (&wrk->ct_registration_lock); clib_time_init (&wrk->clib_time); vec_validate (wrk->mq_events, 64); vec_validate (wrk->mq_msg_vector, 128); vec_reset_length (wrk->mq_msg_vector); vec_validate (wrk->unhandled_evts_vector, 128); vec_reset_length (wrk->unhandled_evts_vector); clib_spinlock_unlock (&vcm->workers_lock); done: return wrk; } int vcl_worker_register_with_vpp (void) { vcl_worker_t *wrk = vcl_worker_get_current (); clib_spinlock_lock (&vcm->workers_lock); vcm->app_state = STATE_APP_ADDING_WORKER; vcl_send_app_worker_add_del (1 /* is_add */ ); if (vcl_wait_for_app_state_change (STATE_APP_READY)) { clib_warning ("failed to add worker to vpp"); return -1; } if (pthread_key_create (&vcl_worker_stop_key, vcl_worker_cleanup_cb)) clib_warning ("failed to add pthread cleanup function"); if (pthread_setspecific (vcl_worker_stop_key, &wrk->thread_id)) clib_warning ("failed to setup key value"); clib_spinlock_unlock (&vcm->workers_lock); VDBG (0, "added worker %u", wrk->wrk_index); return 0; } int vcl_worker_set_bapi (void) { vcl_worker_t *wrk = vcl_worker_get_current (); int i; /* Find the first worker with the same pid */ for (i = 0; i < vec_len (vcm->workers); i++) { if (i == wrk->wrk_index) continue; if (vcm->workers[i].current_pid == wrk->current_pid) { wrk->vl_input_queue = vcm->workers[i].vl_input_queue; wrk->my_client_index = vcm->workers[i].my_client_index; return 0; } } return -1; } vcl_shared_session_t * vcl_shared_session_alloc (void) { vcl_shared_session_t *ss; pool_get (vcm->shared_sessions, ss); memset (ss, 0, sizeof (*ss)); ss->ss_index = ss - vcm->shared_sessions; return ss; } vcl_shared_session_t * vcl_shared_session_get (u32 ss_index) { if (pool_is_free_index (vcm->shared_sessions, ss_index)) return 0; return pool_elt_at_index (vcm->shared_sessions, ss_index); } void vcl_shared_session_free (vcl_shared_session_t * ss) { pool_put (vcm->shared_sessions, ss); } void vcl_worker_share_session (vcl_worker_t * parent, vcl_worker_t * wrk, vcl_session_t * new_s) { vcl_shared_session_t *ss; vcl_session_t *s; s = vcl_session_get (parent, new_s->session_index); if (s->shared_index == ~0) { ss = vcl_shared_session_alloc (); vec_add1 (ss->workers, parent->wrk_index); s->shared_index = ss->ss_index; } else { ss = vcl_shared_session_get (s->shared_index); } new_s->shared_index = ss->ss_index; vec_add1 (ss->workers, wrk->wrk_index); } int vcl_worker_unshare_session (vcl_worker_t * wrk, vcl_session_t * s) { vcl_shared_session_t *ss; int i; ss = vcl_shared_session_get (s->shared_index); for (i = 0; i < vec_len (ss->workers); i++) { if (ss->workers[i] == wrk->wrk_index) { vec_del1 (ss->workers, i); break; } } if (vec_len (ss->workers) == 0) { vcl_shared_session_free (ss); return 1; } return 0; } void vcl_worker_share_sessions (vcl_worker_t * parent_wrk) { vcl_session_t *new_s; vcl_worker_t *wrk; if (!parent_wrk->sessions) return; wrk = vcl_worker_get_current (); wrk->sessions = pool_dup (parent_wrk->sessions); wrk->session_index_by_vpp_handles = hash_dup (parent_wrk->session_index_by_vpp_handles); /* *INDENT-OFF* */ pool_foreach (new_s, wrk->sessions, ({ vcl_worker_share_session (parent_wrk, wrk, new_s); })); /* *INDENT-ON* */ } int vcl_session_get_refcnt (vcl_session_t * s) { vcl_shared_session_t *ss; ss = vcl_shared_session_get (s->shared_index); if (ss) return vec_len (ss->workers); return 0; } void vcl_segment_table_add (u64 segment_handle, u32 svm_segment_index) { clib_rwlock_writer_lock (&vcm->segment_table_lock); hash_set (vcm->segment_table, segment_handle, svm_segment_index); clib_rwlock_writer_unlock (&vcm->segment_table_lock); } u32 vcl_segment_table_lookup (u64 segment_handle) { uword *seg_indexp; clib_rwlock_reader_lock (&vcm->segment_table_lock); seg_indexp = hash_get (vcm->segment_table, segment_handle); clib_rwlock_reader_unlock (&vcm->segment_table_lock); if (!seg_indexp) return VCL_INVALID_SEGMENT_INDEX; return ((u32) * seg_indexp); } void vcl_segment_table_del (u64 segment_handle) { clib_rwlock_writer_lock (&vcm->segment_table_lock); hash_unset (vcm->segment_table, segment_handle); clib_rwlock_writer_unlock (&vcm->segment_table_lock); } /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */