summaryrefslogtreecommitdiffstats
path: root/src/plugins/linux-cp/lcp_api.c
blob: a217aa708f3b407015f8e0f6b7e3492b8594c6ad (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
/*
 * Copyright 2020 Rubicon Communications, LLC.
 *
 * SPDX-License-Identifier: Apache-2.0
 */

#include <sys/socket.h>
#include <linux/if.h>

#include <vnet/vnet.h>
#include <vnet/plugin/plugin.h>

#include <vlibapi/api.h>
#include <vlibmemory/api.h>
#include <vpp/app/version.h>
#include <vnet/format_fns.h>

#include <linux-cp/lcp_interface.h>
#include <linux-cp/lcp.api_enum.h>
#include <linux-cp/lcp.api_types.h>

static u16 lcp_msg_id_base;
#define REPLY_MSG_ID_BASE lcp_msg_id_base
#include <vlibapi/api_helper_macros.h>

static lip_host_type_t
api_decode_host_type (vl_api_lcp_itf_host_type_t type)
{
  if (type == LCP_API_ITF_HOST_TUN)
    return LCP_ITF_HOST_TUN;

  return LCP_ITF_HOST_TAP;
}

static vl_api_lcp_itf_host_type_t
api_encode_host_type (lip_host_type_t type)
{
  if (type == LCP_ITF_HOST_TUN)
    return LCP_API_ITF_HOST_TUN;

  return LCP_API_ITF_HOST_TAP;
}

static int
vl_api_lcp_itf_pair_add (u32 phy_sw_if_index, lip_host_type_t lip_host_type,
			 u8 *mp_host_if_name, size_t sizeof_host_if_name,
			 u8 *mp_namespace, size_t sizeof_mp_namespace,
			 u32 *host_sw_if_index_p)
{
  u8 *host_if_name, *netns;
  int host_len, netns_len, rv;

  host_if_name = netns = 0;

  /* lcp_itf_pair_create expects vec of u8 */
  host_len = clib_strnlen ((char *) mp_host_if_name, sizeof_host_if_name - 1);
  vec_add (host_if_name, mp_host_if_name, host_len);
  vec_add1 (host_if_name, 0);

  netns_len = clib_strnlen ((char *) mp_namespace, sizeof_mp_namespace - 1);
  vec_add (netns, mp_namespace, netns_len);
  vec_add1 (netns, 0);

  rv = lcp_itf_pair_create (phy_sw_if_index, host_if_name, lip_host_type,
			    netns, host_sw_if_index_p);

  vec_free (host_if_name);
  vec_free (netns);

  return rv;
}

static void
vl_api_lcp_itf_pair_add_del_t_handler (vl_api_lcp_itf_pair_add_del_t *mp)
{
  u32 phy_sw_if_index;
  vl_api_lcp_itf_pair_add_del_reply_t *rmp;
  lip_host_type_t lip_host_type;
  int rv;

  VALIDATE_SW_IF_INDEX_END (mp);

  phy_sw_if_index = mp->sw_if_index;
  lip_host_type = api_decode_host_type (mp->host_if_type);
  if (mp->is_add)
    {
      rv = vl_api_lcp_itf_pair_add (
	phy_sw_if_index, lip_host_type, mp->host_if_name,
	sizeof (mp->host_if_name), mp->netns, sizeof (mp->netns), NULL);
    }
  else
    {
      rv = lcp_itf_pair_delete (phy_sw_if_index);
    }

  BAD_SW_IF_INDEX_LABEL;
  REPLY_MACRO_END (VL_API_LCP_ITF_PAIR_ADD_DEL_REPLY);
}

static void
vl_api_lcp_itf_pair_add_del_v2_t_handler (vl_api_lcp_itf_pair_add_del_v2_t *mp)
{
  u32 phy_sw_if_index, host_sw_if_index = ~0;
  vl_api_lcp_itf_pair_add_del_v2_reply_t *rmp;
  lip_host_type_t lip_host_type;
  int rv;

  VALIDATE_SW_IF_INDEX_END (mp);

  phy_sw_if_index = mp->sw_if_index;
  lip_host_type = api_decode_host_type (mp->host_if_type);
  if (mp->is_add)
    {
      rv = vl_api_lcp_itf_pair_add (phy_sw_if_index, lip_host_type,
				    mp->host_if_name,
				    sizeof (mp->host_if_name), mp->netns,
				    sizeof (mp->netns), &host_sw_if_index);
    }
  else
    {
      rv = lcp_itf_pair_delete (phy_sw_if_index);
    }

  BAD_SW_IF_INDEX_LABEL;
  REPLY_MACRO2_END (VL_API_LCP_ITF_PAIR_ADD_DEL_V2_REPLY,
		    { rmp->host_sw_if_index = host_sw_if_index; });
}

static void
send_lcp_itf_pair_details (index_t lipi, vl_api_registration_t *rp,
			   u32 context)
{
  vl_api_lcp_itf_pair_details_t *rmp;
  lcp_itf_pair_t *lcp_pair = lcp_itf_pair_get (lipi);

  REPLY_MACRO_DETAILS4_END (
    VL_API_LCP_ITF_PAIR_DETAILS, rp, context, ({
      rmp->phy_sw_if_index = lcp_pair->lip_phy_sw_if_index;
      rmp->host_sw_if_index = lcp_pair->lip_host_sw_if_index;
      rmp->vif_index = lcp_pair->lip_vif_index;
      rmp->host_if_type = api_encode_host_type (lcp_pair->lip_host_type);

      memcpy_s (rmp->host_if_name, sizeof (rmp->host_if_name),
		lcp_pair->lip_host_name, vec_len (lcp_pair->lip_host_name));
      rmp->host_if_name[vec_len (lcp_pair->lip_host_name)] = 0;

      memcpy_s (rmp->netns, sizeof (rmp->netns), lcp_pair->lip_namespace,
		vec_len (lcp_pair->lip_namespace));
      rmp->netns[vec_len (lcp_pair->lip_namespace)] = 0;
    }));
}

static void
vl_api_lcp_itf_pair_get_t_handler (vl_api_lcp_itf_pair_get_t *mp)
{
  vl_api_lcp_itf_pair_get_reply_t *rmp;
  i32 rv = 0;

  REPLY_AND_DETAILS_MACRO_END (
    VL_API_LCP_ITF_PAIR_GET_REPLY, lcp_itf_pair_pool,
    ({ send_lcp_itf_pair_details (cursor, rp, mp->context); }));
}

static void
vl_api_lcp_default_ns_set_t_handler (vl_api_lcp_default_ns_set_t *mp)
{
  vl_api_lcp_default_ns_set_reply_t *rmp;
  int rv;

  mp->netns[LCP_NS_LEN - 1] = 0;
  rv = lcp_set_default_ns (mp->netns);

  REPLY_MACRO (VL_API_LCP_DEFAULT_NS_SET_REPLY);
}

static void
vl_api_lcp_default_ns_get_t_handler (vl_api_lcp_default_ns_get_t *mp)
{
  vl_api_lcp_default_ns_get_reply_t *rmp;

  REPLY_MACRO_DETAILS2 (VL_API_LCP_DEFAULT_NS_GET_REPLY, ({
			  char *ns = (char *) lcp_get_default_ns ();
			  if (ns)
			    clib_strncpy ((char *) rmp->netns, ns,
					  LCP_NS_LEN - 1);
			}));
}

static void
vl_api_lcp_itf_pair_replace_begin_t_handler (
  vl_api_lcp_itf_pair_replace_begin_t *mp)
{
  vl_api_lcp_itf_pair_replace_begin_reply_t *rmp;
  int rv;

  rv = lcp_itf_pair_replace_begin ();

  REPLY_MACRO (VL_API_LCP_ITF_PAIR_REPLACE_BEGIN_REPLY);
}

static void
vl_api_lcp_itf_pair_replace_end_t_handler (
  vl_api_lcp_itf_pair_replace_end_t *mp)
{
  vl_api_lcp_itf_pair_replace_end_reply_t *rmp;
  int rv = 0;

  rv = lcp_itf_pair_replace_end ();

  REPLY_MACRO (VL_API_LCP_ITF_PAIR_REPLACE_END_REPLY);
}

/*
 * Set up the API message handling tables
 */
#include <linux-cp/lcp.api.c>

static clib_error_t *
lcp_api_init (vlib_main_t *vm)
{
  /* Ask for a correctly-sized block of API message decode slots */
  lcp_msg_id_base = setup_message_id_table ();

  return (NULL);
}

VLIB_INIT_FUNCTION (lcp_api_init);

#include <vpp/app/version.h>
VLIB_PLUGIN_REGISTER () = {
  .version = VPP_BUILD_VER,
  .description = "Linux Control Plane - Interface Mirror",
  .default_disabled = 1,
};

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */
pan class="kt">void vlib_smp_unsafe_warning (void) { if (CLIB_DEBUG > 0) { if (vlib_get_thread_index ()) fformat (stderr, "%s: SMP unsafe warning...\n", __FUNCTION__); } } always_inline int __foreach_vlib_main_helper (vlib_main_t *ii, vlib_main_t **p) { vlib_main_t *vm; u32 index = ii - (vlib_main_t *) 0; if (index >= vec_len (vlib_global_main.vlib_mains)) return 0; *p = vm = vlib_global_main.vlib_mains[index]; ASSERT (index == 0 || vm->parked_at_barrier == 1); return 1; } #define foreach_vlib_main() \ for (vlib_main_t *ii = 0, *this_vlib_main; \ __foreach_vlib_main_helper (ii, &this_vlib_main); ii++) \ if (this_vlib_main) #define foreach_sched_policy \ _(SCHED_OTHER, OTHER, "other") \ _(SCHED_BATCH, BATCH, "batch") \ _(SCHED_IDLE, IDLE, "idle") \ _(SCHED_FIFO, FIFO, "fifo") \ _(SCHED_RR, RR, "rr") typedef enum { #define _(v,f,s) SCHED_POLICY_##f = v, foreach_sched_policy #undef _ SCHED_POLICY_N, } sched_policy_t; typedef struct { /* Link list of registrations, built by constructors */ vlib_thread_registration_t *next; /* Vector of registrations, w/ non-data-structure clones at the top */ vlib_thread_registration_t **registrations; uword *thread_registrations_by_name; vlib_worker_thread_t *worker_threads; int use_pthreads; /* Number of vlib_main / vnet_main clones */ u32 n_vlib_mains; /* Number of thread stacks to create */ u32 n_thread_stacks; /* Number of pthreads */ u32 n_pthreads; /* Number of threads */ u32 n_threads; /* Number of cores to skip, must match the core mask */ u32 skip_cores; /* Thread prefix name */ u8 *thread_prefix; /* main thread lcore */ u32 main_lcore; /* Bitmap of available CPU cores */ uword *cpu_core_bitmap; /* Bitmap of available CPU sockets (NUMA nodes) */ uword *cpu_socket_bitmap; /* Worker handoff queues */ vlib_frame_queue_main_t *frame_queue_mains; /* worker thread initialization barrier */ volatile u32 worker_thread_release; /* scheduling policy */ u32 sched_policy; /* scheduling policy priority */ u32 sched_priority; /* NUMA-bound heap size */ uword numa_heap_size; } vlib_thread_main_t; extern vlib_thread_main_t vlib_thread_main; #include <vlib/global_funcs.h> #define VLIB_REGISTER_THREAD(x,...) \ __VA_ARGS__ vlib_thread_registration_t x; \ static void __vlib_add_thread_registration_##x (void) \ __attribute__((__constructor__)) ; \ static void __vlib_add_thread_registration_##x (void) \ { \ vlib_thread_main_t * tm = &vlib_thread_main; \ x.next = tm->next; \ tm->next = &x; \ } \ static void __vlib_rm_thread_registration_##x (void) \ __attribute__((__destructor__)) ; \ static void __vlib_rm_thread_registration_##x (void) \ { \ vlib_thread_main_t * tm = &vlib_thread_main; \ VLIB_REMOVE_FROM_LINKED_LIST (tm->next, &x, next); \ } \ __VA_ARGS__ vlib_thread_registration_t x always_inline u32 vlib_num_workers () { return vlib_thread_main.n_vlib_mains - 1; } always_inline u32 vlib_get_worker_thread_index (u32 worker_index) { return worker_index + 1; } always_inline u32 vlib_get_worker_index (u32 thread_index) { return thread_index - 1; } always_inline u32 vlib_get_current_worker_index () { return vlib_get_thread_index () - 1; } static inline void vlib_worker_thread_barrier_check (void) { if (PREDICT_FALSE (*vlib_worker_threads->wait_at_barrier)) { vlib_global_main_t *vgm = vlib_get_global_main (); vlib_main_t *vm = vlib_get_main (); u32 thread_index = vm->thread_index; f64 t = vlib_time_now (vm); if (PREDICT_FALSE (vec_len (vm->barrier_perf_callbacks) != 0)) clib_call_callbacks (vm->barrier_perf_callbacks, vm, vm->clib_time.last_cpu_time, 0 /* enter */ ); if (PREDICT_FALSE (vlib_worker_threads->barrier_elog_enabled)) { vlib_worker_thread_t *w = vlib_worker_threads + thread_index; /* *INDENT-OFF* */ ELOG_TYPE_DECLARE (e) = { .format = "barrier-wait-thread-%d", .format_args = "i4", }; /* *INDENT-ON* */ struct { u32 thread_index; } __clib_packed *ed; ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); ed->thread_index = thread_index; } if (CLIB_DEBUG > 0) { vm = vlib_get_main (); vm->parked_at_barrier = 1; } clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, 1); while (*vlib_worker_threads->wait_at_barrier) ; /* * Recompute the offset from thread-0 time. * Note that vlib_time_now adds vm->time_offset, so * clear it first. Save the resulting idea of "now", to * see how well we're doing. See show_clock_command_fn(...) */ { f64 now; vm->time_offset = 0.0; now = vlib_time_now (vm); vm->time_offset = vgm->vlib_mains[0]->time_last_barrier_release - now; vm->time_last_barrier_release = vlib_time_now (vm); } if (CLIB_DEBUG > 0) vm->parked_at_barrier = 0; clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, -1); if (PREDICT_FALSE (*vlib_worker_threads->node_reforks_required)) { if (PREDICT_FALSE (vlib_worker_threads->barrier_elog_enabled)) { t = vlib_time_now (vm) - t; vlib_worker_thread_t *w = vlib_worker_threads + thread_index; /* *INDENT-OFF* */ ELOG_TYPE_DECLARE (e) = { .format = "barrier-refork-thread-%d", .format_args = "i4", }; /* *INDENT-ON* */ struct { u32 thread_index; } __clib_packed *ed; ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); ed->thread_index = thread_index; } vlib_worker_thread_node_refork (); clib_atomic_fetch_add (vlib_worker_threads->node_reforks_required, -1); while (*vlib_worker_threads->node_reforks_required) ; } if (PREDICT_FALSE (vlib_worker_threads->barrier_elog_enabled)) { t = vlib_time_now (vm) - t; vlib_worker_thread_t *w = vlib_worker_threads + thread_index; /* *INDENT-OFF* */ ELOG_TYPE_DECLARE (e) = { .format = "barrier-released-thread-%d: %dus", .format_args = "i4i4", }; /* *INDENT-ON* */ struct { u32 thread_index; u32 duration; } __clib_packed *ed; ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); ed->thread_index = thread_index; ed->duration = (int) (1000000.0 * t); } if (PREDICT_FALSE (vec_len (vm->barrier_perf_callbacks) != 0)) clib_call_callbacks (vm->barrier_perf_callbacks, vm, vm->clib_time.last_cpu_time, 1 /* leave */ ); } } always_inline vlib_main_t * vlib_get_worker_vlib_main (u32 worker_index) { vlib_main_t *vm; vlib_thread_main_t *tm = &vlib_thread_main; ASSERT (worker_index < tm->n_vlib_mains - 1); vm = vlib_get_main_by_index (worker_index + 1); ASSERT (vm); return vm; } static inline u8 vlib_thread_is_main_w_barrier (void) { return (!vlib_num_workers () || ((vlib_get_thread_index () == 0 && vlib_worker_threads->wait_at_barrier[0]))); } u8 *vlib_thread_stack_init (uword thread_index); extern void *rpc_call_main_thread_cb_fn; void vlib_process_signal_event_mt_helper (vlib_process_signal_event_mt_args_t * args); void vlib_rpc_call_main_thread (void *function, u8 * args, u32 size); void vlib_get_thread_core_numa (vlib_worker_thread_t * w, unsigned cpu_id); vlib_thread_main_t *vlib_get_thread_main_not_inline (void); /** * Force workers sync from within worker * * Must be paired with @ref vlib_workers_continue */ void vlib_workers_sync (void); /** * Release barrier after workers sync */ void vlib_workers_continue (void); #endif /* included_vlib_threads_h */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */