aboutsummaryrefslogtreecommitdiffstats
path: root/src/vnet/feature/feature.c
blob: 47c8c62334fad8ec3ea82fcade0577696908a676 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
/*
 * Copyright (c) 2016 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <vnet/feature/feature.h>
#include <vnet/adj/adj.h>

vnet_feature_main_t feature_main;

static clib_error_t *
vnet_feature_init (vlib_main_t * vm)
{
  vnet_feature_main_t *fm = &feature_main;
  vnet_feature_registration_t *freg;
  vnet_feature_arc_registration_t *areg;
  vnet_feature_constraint_registration_t *creg;
  u32 arc_index = 0;

  fm->arc_index_by_name = hash_create_string (0, sizeof (uword));
  areg = fm->next_arc;

  /* process feature arc registrations */
  while (areg)
    {
      char *s;
      int i = 0;
      areg->feature_arc_index = arc_index;
      if (areg->arc_index_ptr)
	*areg->arc_index_ptr = arc_index;
      hash_set_mem (fm->arc_index_by_name, areg->arc_name,
		    pointer_to_uword (areg));

      /* process start nodes */
      while ((s = areg->start_nodes[i]))
	{
	  i++;
	}
      areg->n_start_nodes = i;

      /* next */
      areg = areg->next;
      arc_index++;
    }

  vec_validate (fm->next_feature_by_arc, arc_index - 1);
  vec_validate (fm->feature_nodes, arc_index - 1);
  vec_validate (fm->feature_config_mains, arc_index - 1);
  vec_validate (fm->next_feature_by_name, arc_index - 1);
  vec_validate (fm->sw_if_index_has_features, arc_index - 1);
  vec_validate (fm->feature_count_by_sw_if_index, arc_index - 1);
  vec_validate (fm->next_constraint_by_arc, arc_index - 1);

  freg = fm->next_feature;
  while (freg)
    {
      vnet_feature_registration_t *next;
      uword *p = hash_get_mem (fm->arc_index_by_name, freg->arc_name);
      if (p == 0)
	{
	  /* Don't start vpp with broken features arcs */
	  clib_warning ("Unknown feature arc '%s'", freg->arc_name);
	  os_exit (1);
	}

      areg = uword_to_pointer (p[0], vnet_feature_arc_registration_t *);
      arc_index = areg->feature_arc_index;

      next = freg->next;
      freg->next_in_arc = fm->next_feature_by_arc[arc_index];
      fm->next_feature_by_arc[arc_index] = freg;

      /* next */
      freg = next;
    }

  /* Move bulk constraints to the constraint by arc lists */
  creg = fm->next_constraint;
  while (creg)
    {
      vnet_feature_constraint_registration_t *next;
      uword *p = hash_get_mem (fm->arc_index_by_name, creg->arc_name);
      if (p == 0)
	{
	  /* Don't start vpp with broken features arcs */
	  clib_warning ("Unknown feature arc '%s'", creg->arc_name);
	  os_exit (1);
	}

      areg = uword_to_pointer (p[0], vnet_feature_arc_registration_t *);
      arc_index = areg->feature_arc_index;

      next = creg->next;
      creg->next_in_arc = fm->next_constraint_by_arc[arc_index];
      fm->next_constraint_by_arc[arc_index] = creg;

      /* next */
      creg = next;
    }


  areg = fm->next_arc;
  while (areg)
    {
      clib_error_t *error;
      vnet_feature_config_main_t *cm;
      vnet_config_main_t *vcm;
      char **features_in_order, *last_feature;

      arc_index = areg->feature_arc_index;
      cm = &fm->feature_config_mains[arc_index];
      vcm = &cm->config_main;
      if ((error = vnet_feature_arc_init
	   (vm, vcm, areg->start_nodes, areg->n_start_nodes,
	    fm->next_feature_by_arc[arc_index],
	    fm->next_constraint_by_arc[arc_index],
	    &fm->feature_nodes[arc_index])))
	{
	  clib_error_report (error);
	  os_exit (1);
	}

      features_in_order = fm->feature_nodes[arc_index];

      /* If specified, verify that the last node in the arc is actually last */
      if (areg->last_in_arc && vec_len (features_in_order) > 0)
	{
	  last_feature = features_in_order[vec_len (features_in_order) - 1];
	  if (strncmp (areg->last_in_arc, last_feature,
		       strlen (areg->last_in_arc)))
	    clib_warning
	      ("WARNING: %s arc: last node is %s, but expected %s!",
	       areg->arc_name, last_feature, areg->last_in_arc);
	}

      fm->next_feature_by_name[arc_index] =
	hash_create_string (0, sizeof (uword));
      freg = fm->next_feature_by_arc[arc_index];

      while (freg)
	{
	  hash_set_mem (fm->next_feature_by_name[arc_index],
			freg->node_name, pointer_to_uword (freg));
	  freg = freg->next_in_arc;
	}

      /* next */
      areg = areg->next;
      arc_index++;
    }

  return 0;
}

VLIB_INIT_FUNCTION (vnet_feature_init);

u8
vnet_get_feature_arc_index (const char *s)
{
  vnet_feature_main_t *fm = &feature_main;
  vnet_feature_arc_registration_t *reg;
  uword *p;

  p = hash_get_mem (fm->arc_index_by_name, s);
  if (p == 0)
    return ~0;

  reg = uword_to_pointer (p[0], vnet_feature_arc_registration_t *);
  return reg->feature_arc_index;
}

vnet_feature_registration_t *
vnet_get_feature_reg (const char *arc_name, const char *node_name)
{
  u8 arc_index;

  arc_index = vnet_get_feature_arc_index (arc_name);
  if (arc_index == (u8) ~ 0)
    return 0;

  vnet_feature_main_t *fm = &feature_main;
  vnet_feature_registration_t *reg;
  uword *p;

  p = hash_get_mem (fm->next_feature_by_name[arc_index], node_name);
  if (p == 0)
    return 0;

  reg = uword_to_pointer (p[0], vnet_feature_registration_t *);
  return reg;
}

u32
vnet_get_feature_index (u8 arc, const char *s)
{
  vnet_feature_main_t *fm = &feature_main;
  vnet_feature_registration_t *reg;
  uword *p;

  if (s == 0)
    return ~0;

  p = hash_get_mem (fm->next_feature_by_name[arc], s);
  if (p == 0)
    return ~0;

  reg = uword_to_pointer (p[0], vnet_feature_registration_t *);
  return reg->feature_index;
}

int
vnet_feature_enable_disable_with_index (u8 arc_index, u32 feature_index,
					u32 sw_if_index, int enable_disable,
					void *feature_config,
					u32 n_feature_config_bytes)
{
  vnet_feature_main_t *fm = &feature_main;
  vnet_feature_config_main_t *cm;
  i16 feature_count;
  u32 ci;

  if (arc_index == (u8) ~ 0)
    return VNET_API_ERROR_INVALID_VALUE;

  if (feature_index == ~0)
    return VNET_API_ERROR_INVALID_VALUE_2;

  cm = &fm->feature_config_mains[arc_index];
  vec_validate_init_empty (cm->config_index_by_sw_if_index, sw_if_index, ~0);
  ci = cm->config_index_by_sw_if_index[sw_if_index];

  vec_validate (fm->feature_count_by_sw_if_index[arc_index], sw_if_index);
  feature_count = fm->feature_count_by_sw_if_index[arc_index][sw_if_index];

  if (!enable_disable && feature_count < 1)
    return 0;

  ci = (enable_disable
	? vnet_config_add_feature
	: vnet_config_del_feature)
    (vlib_get_main (), &cm->config_main, ci, feature_index, feature_config,
     n_feature_config_bytes);
  if (ci == ~0)
    {
      return 0;
    }
  cm->config_index_by_sw_if_index[sw_if_index] = ci;

  /* update feature count */
  enable_disable = (enable_disable > 0);
  feature_count += enable_disable ? 1 : -1;
  ASSERT (feature_count >= 0);

  fm->sw_if_index_has_features[arc_index] =
    clib_bitmap_set (fm->sw_if_index_has_features[arc_index], sw_if_index,
		     (feature_count > 0));
  adj_feature_update (sw_if_index, arc_index, (feature_count > 0));

  fm->feature_count_by_sw_if_index[arc_index][sw_if_index] = feature_count;
  return 0;
}

int
vnet_feature_enable_disable (const char *arc_name, const char *node_name,
			     u32 sw_if_index, int enable_disable,
			     void *feature_config, u32 n_feature_config_bytes)
{
  u32 feature_index;
  u8 arc_index;

  arc_index = vnet_get_feature_arc_index (arc_name);

  if (arc_index == (u8) ~ 0)
    return VNET_API_ERROR_INVALID_VALUE;

  feature_index = vnet_get_feature_index (arc_index, node_name);

  return vnet_feature_enable_disable_with_index (arc_index, feature_index,
						 sw_if_index, enable_disable,
						 feature_config,
						 n_feature_config_bytes);
}

static int
feature_cmp (void *a1, void *a2)
{
  vnet_feature_registration_t *reg1 = a1;
  vnet_feature_registration_t *reg2 = a2;

  return (int) reg1->feature_index - reg2->feature_index;
}

/** Display the set of available driver features.
    Useful for verifying that expected features are present
*/

static clib_error_t *
show_features_command_fn (vlib_main_t * vm,
			  unformat_input_t * input, vlib_cli_command_t * cmd)
{
  vnet_feature_main_t *fm = &feature_main;
  vnet_feature_arc_registration_t *areg;
  vnet_feature_registration_t *freg;
  vnet_feature_registration_t *feature_regs = 0;
  int verbose = 0;

  if (unformat (input, "verbose"))
    verbose = 1;

  vlib_cli_output (vm, "Available feature paths");

  areg = fm->next_arc;
  while (areg)
    {
      if (verbose)
	vlib_cli_output (vm, "[%2d] %s:", areg->feature_arc_index,
			 areg->arc_name);
      else
	vlib_cli_output (vm, "%s:", areg->arc_name);

      freg = fm->next_feature_by_arc[areg->feature_arc_index];
      while (freg)
	{
	  vec_add1 (feature_regs, freg[0]);
	  freg = freg->next_in_arc;
	}

      vec_sort_with_function (feature_regs, feature_cmp);

      vec_foreach (freg, feature_regs)
      {
	if (verbose)
	  vlib_cli_output (vm, "  [%2d]: %s\n", freg->feature_index,
			   freg->node_name);
	else
	  vlib_cli_output (vm, "  %s\n", freg->node_name);
      }
      vec_reset_length (feature_regs);
      /* next */
      areg = areg->next;
    }
  vec_free (feature_regs);

  return 0;
}

/*?
 * Display the set of available driver features
 *
 * @cliexpar
 * Example:
 * @cliexcmd{show features [verbose]}
 * @cliexend
 * @endparblock
?*/
/* *INDENT-OFF* */
VLIB_CLI_COMMAND (show_features_command, static) = {
  .path = "show features",
  .short_help = "show features",
  .function = show_features_command_fn,
};
/* *INDENT-ON* */

/** Display the set of driver features configured on a specific interface
  * Called by "show interface" handler
 */

void
vnet_interface_features_show (vlib_main_t * vm, u32 sw_if_index, int verbose)
{
  vnet_feature_main_t *fm = &feature_main;
  u32 node_index, current_config_index;
  u16 feature_arc;
  vnet_feature_config_main_t *cm = fm->feature_config_mains;
  vnet_feature_arc_registration_t *areg;
  vnet_config_main_t *vcm;
  vnet_config_t *cfg;
  u32 cfg_index;
  vnet_config_feature_t *feat;
  vlib_node_t *n;
  int i;

  vlib_cli_output (vm, "Feature paths configured on %U...",
		   format_vnet_sw_if_index_name,
		   vnet_get_main (), sw_if_index);

  areg = fm->next_arc;
  while (areg)
    {
      feature_arc = areg->feature_arc_index;
      vcm = &(cm[feature_arc].config_main);

      vlib_cli_output (vm, "\n%s:", areg->arc_name);
      areg = areg->next;

      if (NULL == cm[feature_arc].config_index_by_sw_if_index ||
	  vec_len (cm[feature_arc].config_index_by_sw_if_index) <=
	  sw_if_index)
	{
	  vlib_cli_output (vm, "  none configured");
	  continue;
	}

      current_config_index =
	vec_elt (cm[feature_arc].config_index_by_sw_if_index, sw_if_index);

      if (current_config_index == ~0)
	{
	  vlib_cli_output (vm, "  none configured");
	  continue;
	}

      ASSERT (current_config_index
	      < vec_len (vcm->config_pool_index_by_user_index));

      cfg_index = vcm->config_pool_index_by_user_index[current_config_index];
      cfg = pool_elt_at_index (vcm->config_pool, cfg_index);

      for (i = 0; i < vec_len (cfg->features); i++)
	{
	  feat = cfg->features + i;
	  node_index = feat->node_index;
	  n = vlib_get_node (vm, node_index);
	  if (verbose)
	    vlib_cli_output (vm, "  [%2d] %v", feat->feature_index, n->name);
	  else
	    vlib_cli_output (vm, "  %v", n->name);
	}
    }
}

static clib_error_t *
set_interface_features_command_fn (vlib_main_t * vm,
				   unformat_input_t * input,
				   vlib_cli_command_t * cmd)
{
  vnet_main_t *vnm = vnet_get_main ();
  unformat_input_t _line_input, *line_input = &_line_input;
  clib_error_t *error = 0;

  u8 *arc_name = 0;
  u8 *feature_name = 0;
  u32 sw_if_index = ~0;
  u8 enable = 1;

  /* Get a line of input. */
  if (!unformat_user (input, unformat_line_input, line_input))
    return 0;

  while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
    {
      if (unformat
	  (line_input, "%U %v", unformat_vnet_sw_interface, vnm, &sw_if_index,
	   &feature_name))
	;
      else if (unformat (line_input, "arc %v", &arc_name))
	;
      else if (unformat (line_input, "disable"))
	enable = 0;
      else
	{
	  if (feature_name && arc_name)
	    break;
	  error = unformat_parse_error (line_input);
	  goto done;
	}
    }

  if (sw_if_index == ~0)
    {
      error = clib_error_return (0, "Interface not specified...");
      goto done;
    }

  vec_add1 (arc_name, 0);
  vec_add1 (feature_name, 0);

  vnet_feature_registration_t *reg;
  reg =
    vnet_get_feature_reg ((const char *) arc_name,
			  (const char *) feature_name);
  if (reg == 0)
    {
      error = clib_error_return (0, "Unknown feature...");
      goto done;
    }
  if (reg->enable_disable_cb)
    error = reg->enable_disable_cb (sw_if_index, enable);
  if (!error)
    vnet_feature_enable_disable ((const char *) arc_name,
				 (const char *) feature_name, sw_if_index,
				 enable, 0, 0);

done:
  vec_free (feature_name);
  vec_free (arc_name);
  unformat_free (line_input);
  return error;
}

/*?
 * Set feature for given interface
 *
 * @cliexpar
 * Example:
 * @cliexcmd{set interface feature GigabitEthernet2/0/0 ip4_flow_classify arc ip4_unicast}
 * @cliexend
 * @endparblock
?*/
/* *INDENT-OFF* */
VLIB_CLI_COMMAND (set_interface_feature_command, static) = {
  .path = "set interface feature",
  .short_help = "set interface feature <intfc> <feature_name> arc <arc_name> "
      "[disable]",
  .function = set_interface_features_command_fn,
};
/* *INDENT-ON* */

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */
uot;"" Singleton object which reports test start to parent process """ _shared_state = {} def __init__(self): self.__dict__ = self._shared_state self._pipe = None @property def pipe(self): return self._pipe @pipe.setter def pipe(self, pipe): if self._pipe is not None: raise Exception("Internal error - pipe should only be set once.") self._pipe = pipe def send_keep_alive(self, test, desc=None): """ Write current test tmpdir & desc to keep-alive pipe to signal liveness """ if self.pipe is None: # if not running forked.. return if isclass(test): desc = "%s (%s)" % (desc, unittest.util.strclass(test)) else: desc = test.id() self.pipe.send((desc, config.vpp, test.tempdir, test.vpp.pid)) class TestCaseTag(Enum): # marks the suites that must run at the end # using only a single test runner RUN_SOLO = 1 # marks the suites broken on VPP multi-worker FIXME_VPP_WORKERS = 2 # marks the suites broken when ASan is enabled FIXME_ASAN = 3 # marks suites broken on Ubuntu-22.04 FIXME_UBUNTU2204 = 4 # marks suites broken on Debian-11 FIXME_DEBIAN11 = 5 # marks suites broken on debug vpp image FIXME_VPP_DEBUG = 6 def create_tag_decorator(e): def decorator(cls): try: cls.test_tags.append(e) except AttributeError: cls.test_tags = [e] return cls return decorator tag_run_solo = create_tag_decorator(TestCaseTag.RUN_SOLO) tag_fixme_vpp_workers = create_tag_decorator(TestCaseTag.FIXME_VPP_WORKERS) tag_fixme_asan = create_tag_decorator(TestCaseTag.FIXME_ASAN) tag_fixme_ubuntu2204 = create_tag_decorator(TestCaseTag.FIXME_UBUNTU2204) tag_fixme_debian11 = create_tag_decorator(TestCaseTag.FIXME_DEBIAN11) tag_fixme_vpp_debug = create_tag_decorator(TestCaseTag.FIXME_VPP_DEBUG) class DummyVpp: returncode = None pid = 0xCAFEBAFE def poll(self): pass def terminate(self): pass class CPUInterface(ABC): cpus = [] skipped_due_to_cpu_lack = False @classmethod @abstractmethod def get_cpus_required(cls): pass @classmethod def assign_cpus(cls, cpus): cls.cpus = cpus class VppAsfTestCase(CPUInterface, unittest.TestCase): """This subclass is a base class for VPP test cases that are implemented as classes. It provides methods to create and run test case. """ extra_vpp_statseg_config = "" extra_vpp_config = [] extra_vpp_plugin_config = [] logger = null_logger vapi_response_timeout = 5 remove_configured_vpp_objects_on_tear_down = True @classmethod def has_tag(cls, tag): """if the test case has a given tag - return true""" try: return tag in cls.test_tags except AttributeError: pass return False @classmethod def is_tagged_run_solo(cls): """if the test case class is timing-sensitive - return true""" return cls.has_tag(TestCaseTag.RUN_SOLO) @classmethod def skip_fixme_asan(cls): """if @tag_fixme_asan & ASan is enabled - mark for skip""" if cls.has_tag(TestCaseTag.FIXME_ASAN): vpp_extra_cmake_args = os.environ.get("VPP_EXTRA_CMAKE_ARGS", "") if "DVPP_ENABLE_SANITIZE_ADDR=ON" in vpp_extra_cmake_args: cls = unittest.skip("Skipping @tag_fixme_asan tests")(cls) @classmethod def instance(cls): """Return the instance of this testcase""" return cls.test_instance @classmethod def set_debug_flags(cls, d): cls.gdbserver_port = 7777 cls.debug_core = False cls.debug_gdb = False cls.debug_gdbserver = False cls.debug_all = False cls.debug_attach = False if d is None: return dl = d.lower() if dl == "core": cls.debug_core = True elif dl == "gdb" or dl == "gdb-all": cls.debug_gdb = True elif dl == "gdbserver" or dl == "gdbserver-all": cls.debug_gdbserver = True elif dl == "attach": cls.debug_attach = True else: raise Exception("Unrecognized DEBUG option: '%s'" % d) if dl == "gdb-all" or dl == "gdbserver-all": cls.debug_all = True @classmethod def get_vpp_worker_count(cls): if not hasattr(cls, "vpp_worker_count"): if cls.has_tag(TestCaseTag.FIXME_VPP_WORKERS): cls.vpp_worker_count = 0 else: cls.vpp_worker_count = config.vpp_worker_count return cls.vpp_worker_count @classmethod def get_cpus_required(cls): return 1 + cls.get_vpp_worker_count() @classmethod def setUpConstants(cls): """Set-up the test case class based on environment variables""" cls.step = config.step cls.plugin_path = ":".join(config.vpp_plugin_dir) cls.test_plugin_path = ":".join(config.vpp_test_plugin_dir) cls.extern_plugin_path = ":".join(config.extern_plugin_dir) debug_cli = "" if cls.step or cls.debug_gdb or cls.debug_gdbserver: debug_cli = "cli-listen localhost:5002" size = re.search(r"\d+[gG]", config.coredump_size) if size: coredump_size = f"coredump-size {config.coredump_size}".lower() else: coredump_size = "coredump-size unlimited" default_variant = config.variant if default_variant is not None: default_variant = "defaults { %s 100 }" % default_variant else: default_variant = "" api_fuzzing = config.api_fuzz if api_fuzzing is None: api_fuzzing = "off" cls.vpp_cmdline = [ config.vpp, "unix", "{", "nodaemon", debug_cli, "full-coredump", coredump_size, "runtime-dir", cls.tempdir, "}", "api-trace", "{", "on", "}", "api-segment", "{", "prefix", cls.get_api_segment_prefix(), "}", "cpu", "{", "main-core", str(cls.cpus[0]), ] if cls.extern_plugin_path not in (None, ""): cls.extra_vpp_plugin_config.append("add-path %s" % cls.extern_plugin_path) if cls.get_vpp_worker_count(): cls.vpp_cmdline.extend( ["corelist-workers", ",".join([str(x) for x in cls.cpus[1:]])] ) cls.vpp_cmdline.extend( [ "}", "physmem", "{", "max-size", "32m", "}", "statseg", "{", "socket-name", cls.get_stats_sock_path(), cls.extra_vpp_statseg_config, "}", "socksvr", "{", "socket-name", cls.get_api_sock_path(), "}", "node { ", default_variant, "}", "api-fuzz {", api_fuzzing, "}", "plugins", "{", "plugin", "dpdk_plugin.so", "{", "disable", "}", "plugin", "rdma_plugin.so", "{", "disable", "}", "plugin", "lisp_unittest_plugin.so", "{", "enable", "}", "plugin", "unittest_plugin.so", "{", "enable", "}", ] + cls.extra_vpp_plugin_config + [ "}", ] ) if cls.extra_vpp_config is not None: cls.vpp_cmdline.extend(cls.extra_vpp_config) if not cls.debug_attach: cls.logger.info("vpp_cmdline args: %s" % cls.vpp_cmdline) cls.logger.info("vpp_cmdline: %s" % " ".join(cls.vpp_cmdline)) @classmethod def wait_for_enter(cls): if cls.debug_gdbserver: print(double_line_delim) print("Spawned GDB server with PID: %d" % cls.vpp.pid) elif cls.debug_gdb: print(double_line_delim) print("Spawned VPP with PID: %d" % cls.vpp.pid) else: cls.logger.debug("Spawned VPP with PID: %d" % cls.vpp.pid) return print(single_line_delim) print("You can debug VPP using:") if cls.debug_gdbserver: print( f"sudo gdb {config.vpp} " f"-ex 'target remote localhost:{cls.gdbserver_port}'" ) print( "Now is the time to attach gdb by running the above " "command, set up breakpoints etc., then resume VPP from " "within gdb by issuing the 'continue' command" ) cls.gdbserver_port += 1 elif cls.debug_gdb: print(f"sudo gdb {config.vpp} -ex 'attach {cls.vpp.pid}'") print( "Now is the time to attach gdb by running the above " "command and set up breakpoints etc., then resume VPP from" " within gdb by issuing the 'continue' command" ) print(single_line_delim) input("Press ENTER to continue running the testcase...") @classmethod def attach_vpp(cls): cls.vpp = DummyVpp() @classmethod def run_vpp(cls): cls.logger.debug(f"Assigned cpus: {cls.cpus}") cmdline = cls.vpp_cmdline if cls.debug_gdbserver: gdbserver = "/usr/bin/gdbserver" if not os.path.isfile(gdbserver) or not os.access(gdbserver, os.X_OK): raise Exception( "gdbserver binary '%s' does not exist or is " "not executable" % gdbserver ) cmdline = [ gdbserver, "localhost:{port}".format(port=cls.gdbserver_port), ] + cls.vpp_cmdline cls.logger.info("Gdbserver cmdline is %s", " ".join(cmdline)) try: cls.vpp = subprocess.Popen( cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) except subprocess.CalledProcessError as e: cls.logger.critical( "Subprocess returned with non-0 return code: (%s)", e.returncode ) raise except OSError as e: cls.logger.critical( "Subprocess returned with OS error: (%s) %s", e.errno, e.strerror ) raise except Exception as e: cls.logger.exception("Subprocess returned unexpected from %s:", cmdline) raise cls.wait_for_enter() @classmethod def wait_for_coredump(cls): corefile = cls.tempdir + "/core" if os.path.isfile(corefile): cls.logger.error("Waiting for coredump to complete: %s", corefile) curr_size = os.path.getsize(corefile) deadline = time.time() + 60 ok = False while time.time() < deadline: cls.sleep(1) size = curr_size curr_size = os.path.getsize(corefile) if size == curr_size: ok = True break if not ok: cls.logger.error( "Timed out waiting for coredump to complete: %s", corefile ) else: cls.logger.error("Coredump complete: %s, size %d", corefile, curr_size) @classmethod def get_stats_sock_path(cls): return "%s/stats.sock" % cls.tempdir @classmethod def get_api_sock_path(cls): return "%s/api.sock" % cls.tempdir @classmethod def get_api_segment_prefix(cls): return os.path.basename(cls.tempdir) # Only used for VAPI @classmethod def get_tempdir(cls): if cls.debug_attach: tmpdir = f"{config.tmp_dir}/unittest-attach-gdb" else: tmpdir = f"{config.tmp_dir}/{get_testcase_dirname(cls.__name__)}" if config.wipe_tmp_dir: shutil.rmtree(tmpdir, ignore_errors=True) os.mkdir(tmpdir) return tmpdir @classmethod def create_file_handler(cls): if config.log_dir is None: cls.file_handler = FileHandler(f"{cls.tempdir}/log.txt") return logdir = f"{config.log_dir}/{get_testcase_dirname(cls.__name__)}" if config.wipe_tmp_dir: shutil.rmtree(logdir, ignore_errors=True) os.mkdir(logdir) cls.file_handler = FileHandler(f"{logdir}/log.txt") @classmethod def setUpClass(cls): """ Perform class setup before running the testcase Remove shared memory files, start vpp and connect the vpp-api """ super(VppAsfTestCase, cls).setUpClass() cls.logger = get_logger(cls.__name__) cls.logger.debug(f"--- START setUpClass() {cls.__name__} ---") random.seed(config.rnd_seed) if hasattr(cls, "parallel_handler"): cls.logger.addHandler(cls.parallel_handler) cls.logger.propagate = False cls.set_debug_flags(config.debug) cls.tempdir = cls.get_tempdir() cls.create_file_handler() cls.file_handler.setFormatter( Formatter(fmt="%(asctime)s,%(msecs)03d %(message)s", datefmt="%H:%M:%S") ) cls.file_handler.setLevel(DEBUG) cls.logger.addHandler(cls.file_handler) cls.logger.debug("--- setUpClass() for %s called ---" % cls.__name__) os.chdir(cls.tempdir) cls.logger.info( "Temporary dir is %s, api socket is %s", cls.tempdir, cls.get_api_sock_path(), ) cls.logger.debug("Random seed is %s", config.rnd_seed) cls.setUpConstants() cls.verbose = 0 cls.vpp_dead = False cls.registry = VppObjectRegistry() cls.vpp_startup_failed = False cls.reporter = KeepAliveReporter() # need to catch exceptions here because if we raise, then the cleanup # doesn't get called and we might end with a zombie vpp try: if cls.debug_attach: cls.attach_vpp() else: cls.run_vpp() cls.reporter.send_keep_alive(cls, "setUpClass") VppTestResult.current_test_case_info = TestCaseInfo( cls.logger, cls.tempdir, cls.vpp.pid, config.vpp ) cls.vpp_stdout_deque = deque() cls.vpp_stderr_deque = deque() if not cls.debug_attach: cls.pump_thread_stop_flag = Event() cls.pump_thread_wakeup_pipe = os.pipe() cls.pump_thread = Thread(target=pump_output, args=(cls,)) cls.pump_thread.daemon = True cls.pump_thread.start() if cls.debug_gdb or cls.debug_gdbserver or cls.debug_attach: cls.vapi_response_timeout = 0 cls.vapi = VppPapiProvider(cls.__name__, cls, cls.vapi_response_timeout) if cls.step: hook = hookmodule.StepHook(cls) else: hook = hookmodule.PollHook(cls) cls.vapi.register_hook(hook) cls.statistics = VPPStats(socketname=cls.get_stats_sock_path()) try: hook.poll_vpp() except VppDiedError: cls.wait_for_coredump() cls.vpp_startup_failed = True cls.logger.critical( "VPP died shortly after startup, check the" " output to standard error for possible cause" ) raise try: cls.vapi.connect() except (vpp_papi.VPPIOError, Exception) as e: cls.logger.debug("Exception connecting to vapi: %s" % e) cls.vapi.disconnect() if cls.debug_gdbserver: print( colorize( "You're running VPP inside gdbserver but " "VPP-API connection failed, did you forget " "to 'continue' VPP from within gdb?", RED, ) ) raise e if cls.debug_attach: last_line = cls.vapi.cli("show thread").split("\n")[-2] cls.vpp_worker_count = int(last_line.split(" ")[0]) print("Detected VPP with %s workers." % cls.vpp_worker_count) except vpp_papi.VPPRuntimeError as e: cls.logger.debug("%s" % e) cls.quit() raise e except Exception as e: cls.logger.debug("Exception connecting to VPP: %s" % e) cls.quit() raise e cls.logger.debug(f"--- END setUpClass() {cls.__name__} ---") @classmethod def _debug_quit(cls): if cls.debug_gdbserver or cls.debug_gdb: try: cls.vpp.poll() if cls.vpp.returncode is None: print() print(double_line_delim) print("VPP or GDB server is still running") print(single_line_delim) input( "When done debugging, press ENTER to kill the " "process and finish running the testcase..." ) except AttributeError: pass @classmethod def quit(cls): """ Disconnect vpp-api, kill vpp and cleanup shared memory files """ cls._debug_quit() # first signal that we want to stop the pump thread, then wake it up if hasattr(cls, "pump_thread_stop_flag"): cls.pump_thread_stop_flag.set() if hasattr(cls, "pump_thread_wakeup_pipe"): os.write(cls.pump_thread_wakeup_pipe[1], b"ding dong wake up") if hasattr(cls, "pump_thread"): cls.logger.debug("Waiting for pump thread to stop") cls.pump_thread.join() if hasattr(cls, "vpp_stderr_reader_thread"): cls.logger.debug("Waiting for stderr pump to stop") cls.vpp_stderr_reader_thread.join() if hasattr(cls, "vpp"): if hasattr(cls, "vapi"): cls.logger.debug(cls.vapi.vpp.get_stats()) cls.logger.debug("Disconnecting class vapi client on %s", cls.__name__) cls.vapi.disconnect() cls.logger.debug("Deleting class vapi attribute on %s", cls.__name__) del cls.vapi cls.vpp.poll() if not cls.debug_attach and cls.vpp.returncode is None: cls.wait_for_coredump() cls.logger.debug("Sending TERM to vpp") cls.vpp.terminate() cls.logger.debug("Waiting for vpp to die") try: outs, errs = cls.vpp.communicate(timeout=5) except subprocess.TimeoutExpired: cls.vpp.kill() outs, errs = cls.vpp.communicate() cls.logger.debug("Deleting class vpp attribute on %s", cls.__name__) if not cls.debug_attach: cls.vpp.stdout.close() cls.vpp.stderr.close() del cls.vpp if cls.vpp_startup_failed: stdout_log = cls.logger.info stderr_log = cls.logger.critical else: stdout_log = cls.logger.info stderr_log = cls.logger.info if hasattr(cls, "vpp_stdout_deque"): stdout_log(single_line_delim) stdout_log("VPP output to stdout while running %s:", cls.__name__) stdout_log(single_line_delim) vpp_output = "".join(cls.vpp_stdout_deque) with open(cls.tempdir + "/vpp_stdout.txt", "w") as f: f.write(vpp_output) stdout_log("\n%s", vpp_output) stdout_log(single_line_delim) if hasattr(cls, "vpp_stderr_deque"): stderr_log(single_line_delim) stderr_log("VPP output to stderr while running %s:", cls.__name__) stderr_log(single_line_delim) vpp_output = "".join(cls.vpp_stderr_deque) with open(cls.tempdir + "/vpp_stderr.txt", "w") as f: f.write(vpp_output) stderr_log("\n%s", vpp_output) stderr_log(single_line_delim) @classmethod def tearDownClass(cls): """Perform final cleanup after running all tests in this test-case""" cls.logger.debug(f"--- START tearDownClass() {cls.__name__} ---") cls.reporter.send_keep_alive(cls, "tearDownClass") cls.quit() cls.file_handler.close() if config.debug_framework: debug_internal.on_tear_down_class(cls) cls.logger.debug(f"--- END tearDownClass() {cls.__name__} ---") def show_commands_at_teardown(self): """Allow subclass specific teardown logging additions.""" self.logger.info("--- No test specific show commands provided. ---") def tearDown(self): """Show various debug prints after each test""" self.logger.debug( f"--- START tearDown() {self.__class__.__name__}.{self._testMethodName}({self._testMethodDoc}) ---" ) try: if not self.vpp_dead: self.logger.debug(self.vapi.cli("show trace max 1000")) self.logger.info(self.vapi.ppcli("show interface")) self.logger.info(self.vapi.ppcli("show hardware")) self.logger.info(self.statistics.set_errors_str()) self.logger.info(self.vapi.ppcli("show run")) self.logger.info(self.vapi.ppcli("show log")) self.logger.info(self.vapi.ppcli("show bihash")) self.logger.info("Logging testcase specific show commands.") self.show_commands_at_teardown() if self.remove_configured_vpp_objects_on_tear_down: self.registry.remove_vpp_config(self.logger) # Save/Dump VPP api trace log m = self._testMethodName api_trace = "vpp_api_trace.%s.%d.log" % (m, self.vpp.pid) tmp_api_trace = "/tmp/%s" % api_trace vpp_api_trace_log = "%s/%s" % (self.tempdir, api_trace) self.logger.info(self.vapi.ppcli("api trace save %s" % api_trace)) self.logger.info("Moving %s to %s\n" % (tmp_api_trace, vpp_api_trace_log)) shutil.move(tmp_api_trace, vpp_api_trace_log) except VppTransportSocketIOError: self.logger.debug( "VppTransportSocketIOError: Vpp dead. Cannot log show commands." ) self.vpp_dead = True else: self.registry.unregister_all(self.logger) # Remove any leftover pcap files if hasattr(self, "pg_interfaces") and len(self.pg_interfaces) > 0: testcase_dir = os.path.dirname(self.pg_interfaces[0].out_path) for p in Path(testcase_dir).glob("pg*.pcap"): self.logger.debug(f"Removing {p}") p.unlink() self.logger.debug( f"--- END tearDown() {self.__class__.__name__}.{self._testMethodName}('{self._testMethodDoc}') ---" ) def setUp(self): """Clear trace before running each test""" super(VppAsfTestCase, self).setUp() self.logger.debug( f"--- START setUp() {self.__class__.__name__}.{self._testMethodName}('{self._testMethodDoc}') ---" ) # Save testname include in pcap history filenames if hasattr(self, "pg_interfaces"): for i in self.pg_interfaces: i.test_name = self._testMethodName self.reporter.send_keep_alive(self) if self.vpp_dead: self.wait_for_coredump() raise VppDiedError( rv=None, testcase=self.__class__.__name__, method_name=self._testMethodName, ) self.sleep(0.1, "during setUp") self.vpp_stdout_deque.append( "--- test setUp() for %s.%s(%s) starts here ---\n" % (self.__class__.__name__, self._testMethodName, self._testMethodDoc) ) self.vpp_stderr_deque.append( "--- test setUp() for %s.%s(%s) starts here ---\n" % (self.__class__.__name__, self._testMethodName, self._testMethodDoc) ) self.vapi.cli("clear trace") # store the test instance inside the test class - so that objects # holding the class can access instance methods (like assertEqual) type(self).test_instance = self self.logger.debug( f"--- END setUp() {self.__class__.__name__}.{self._testMethodName}('{self._testMethodDoc}') ---" ) @classmethod def get_vpp_time(cls): # processes e.g. "Time now 2.190522, Wed, 11 Mar 2020 17:29:54 GMT" # returns float("2.190522") timestr = cls.vapi.cli("show clock") head, sep, tail = timestr.partition(",") head, sep, tail = head.partition("Time now") return float(tail) @classmethod def sleep_on_vpp_time(cls, sec): """Sleep according to time in VPP world""" # On a busy system with many processes # we might end up with VPP time being slower than real world # So take that into account when waiting for VPP to do something start_time = cls.get_vpp_time() while cls.get_vpp_time() - start_time < sec: cls.sleep(0.1) @classmethod def create_loopback_interfaces(cls, count): """ Create loopback interfaces. :param count: number of interfaces created. :returns: List of created interfaces. """ result = [VppLoInterface(cls) for i in range(count)] for intf in result: setattr(cls, intf.name, intf) cls.lo_interfaces = result return result def assert_equal(self, real_value, expected_value, name_or_class=None): if name_or_class is None: self.assertEqual(real_value, expected_value) return try: msg = "Invalid %s: %d('%s') does not match expected value %d('%s')" msg = msg % ( getdoc(name_or_class).strip(), real_value, str(name_or_class(real_value)), expected_value, str(name_or_class(expected_value)), ) except Exception: msg = "Invalid %s: %s does not match expected value %s" % ( name_or_class, real_value, expected_value, ) self.assertEqual(real_value, expected_value, msg) def assert_in_range(self, real_value, expected_min, expected_max, name=None): if name is None: msg = None else: msg = "Invalid %s: %s out of range <%s,%s>" % ( name, real_value, expected_min, expected_max, ) self.assertTrue(expected_min <= real_value <= expected_max, msg) def get_counter(self, counter): if counter.startswith("/"): counter_value = self.statistics.get_counter(counter) else: counters = self.vapi.cli("sh errors").split("\n") counter_value = 0 for i in range(1, len(counters) - 1): results = counters[i].split() if results[1] == counter: counter_value = int(results[0]) break return counter_value def assert_counter_equal(self, counter, expected_value, thread=None, index=0): c = self.get_counter(counter) if thread is not None: c = c[thread][index] else: c = sum(x[index] for x in c) self.logger.debug( "validate counter `%s[%s]', expected: %s, real value: %s" % (counter, index, expected_value, c) ) self.assert_equal(c, expected_value, "counter `%s[%s]'" % (counter, index)) def assert_error_counter_equal(self, counter, expected_value): counter_value = self.statistics[counter].sum() self.assert_equal(counter_value, expected_value, "error counter `%s'" % counter) @classmethod def sleep(cls, timeout, remark=None): # /* Allow sleep(0) to maintain win32 semantics, and as decreed # * by Guido, only the main thread can be interrupted. # */ # https://github.com/python/cpython/blob/6673decfa0fb078f60587f5cb5e98460eea137c2/Modules/timemodule.c#L1892 # noqa if timeout == 0: # yield quantum if hasattr(os, "sched_yield"): os.sched_yield() else: time.sleep(0) return cls.logger.debug("Starting sleep for %es (%s)", timeout, remark) before = time.time() time.sleep(timeout) after = time.time() if after - before > 2 * timeout: cls.logger.error( "unexpected self.sleep() result - slept for %es instead of ~%es!", after - before, timeout, ) cls.logger.debug( "Finished sleep (%s) - slept %es (wanted %es)", remark, after - before, timeout, ) def virtual_sleep(self, timeout, remark=None): self.logger.debug("Moving VPP time by %s (%s)", timeout, remark) self.vapi.cli("set clock adjust %s" % timeout) def snapshot_stats(self, stats_diff): """Return snapshot of interesting stats based on diff dictionary.""" stats_snapshot = {} for sw_if_index in stats_diff: for counter in stats_diff[sw_if_index]: stats_snapshot[counter] = self.statistics[counter] self.logger.debug(f"Took statistics stats_snapshot: {stats_snapshot}") return stats_snapshot def compare_stats_with_snapshot(self, stats_diff, stats_snapshot): """Assert appropriate difference between current stats and snapshot.""" for sw_if_index in stats_diff: for cntr, diff in stats_diff[sw_if_index].items(): if sw_if_index == "err": self.assert_equal( self.statistics[cntr].sum(), stats_snapshot[cntr].sum() + diff, f"'{cntr}' counter value (previous value: " f"{stats_snapshot[cntr].sum()}, " f"expected diff: {diff})", ) else: try: self.assert_equal( self.statistics[cntr][:, sw_if_index].sum(), stats_snapshot[cntr][:, sw_if_index].sum() + diff, f"'{cntr}' counter value (previous value: " f"{stats_snapshot[cntr][:, sw_if_index].sum()}, " f"expected diff: {diff})", ) except IndexError as e: # if diff is 0, then this most probably a case where # test declares multiple interfaces but traffic hasn't # passed through this one yet - which means the counter # value is 0 and can be ignored if 0 != diff: raise Exception( f"Couldn't sum counter: {cntr} on sw_if_index: {sw_if_index}" ) from e def get_testcase_doc_name(test): return getdoc(test.__class__).splitlines()[0] def get_test_description(descriptions, test): short_description = test.shortDescription() if descriptions and short_description: return short_description else: return str(test) def get_failed_testcase_linkname(failed_dir, testcase_dirname): return os.path.join(failed_dir, f"{testcase_dirname}-FAILED") def get_testcase_dirname(testcase_class_name): return f"vpp-unittest-{testcase_class_name}" class TestCaseInfo(object): def __init__(self, logger, tempdir, vpp_pid, vpp_bin_path): self.logger = logger self.tempdir = tempdir self.vpp_pid = vpp_pid self.vpp_bin_path = vpp_bin_path self.core_crash_test = None class VppTestResult(unittest.TestResult): """ @property result_string String variable to store the test case result string. @property errors List variable containing 2-tuples of TestCase instances and strings holding formatted tracebacks. Each tuple represents a test which raised an unexpected exception. @property failures List variable containing 2-tuples of TestCase instances and strings holding formatted tracebacks. Each tuple represents a test where a failure was explicitly signalled using the TestCase.assert*() methods. """ failed_test_cases_info = set() core_crash_test_cases_info = set() current_test_case_info = None def __init__(self, stream=None, descriptions=None, verbosity=None, runner=None): """ :param stream File descriptor to store where to report test results. Set to the standard error stream by default. :param descriptions Boolean variable to store information if to use test case descriptions. :param verbosity Integer variable to store required verbosity level. """ super(VppTestResult, self).__init__(stream, descriptions, verbosity) self.stream = stream self.descriptions = descriptions self.verbosity = verbosity self.result_code = TestResultCode.TEST_RUN self.result_string = None self.runner = runner self.printed = [] def decodePcapFiles(self, test, when_configured=False): if when_configured == False or config.decode_pcaps == True: if hasattr(test, "pg_interfaces") and len(test.pg_interfaces) > 0: testcase_dir = os.path.dirname(test.pg_interfaces[0].out_path) test.pg_interfaces[0].decode_pcap_files( testcase_dir, f"suite{test.__class__.__name__}" ) test.pg_interfaces[0].decode_pcap_files( testcase_dir, test._testMethodName ) def addSuccess(self, test): """ Record a test succeeded result :param test: """ self.log_result("addSuccess", test) self.decodePcapFiles(test, when_configured=True) unittest.TestResult.addSuccess(self, test) self.result_string = colorize("OK", GREEN) self.result_code = TestResultCode.PASS self.send_result_through_pipe(test, self.result_code) def addExpectedFailure(self, test, err): self.log_result("addExpectedFailure", test, err) self.decodePcapFiles(test) super().addExpectedFailure(test, err) self.result_string = colorize("FAIL", GREEN) self.result_code = TestResultCode.EXPECTED_FAIL self.send_result_through_pipe(test, self.result_code) def addUnexpectedSuccess(self, test): self.log_result("addUnexpectedSuccess", test) self.decodePcapFiles(test, when_configured=True) super().addUnexpectedSuccess(test) self.result_string = colorize("OK", RED) self.result_code = TestResultCode.UNEXPECTED_PASS self.send_result_through_pipe(test, self.result_code) def addSkip(self, test, reason): """ Record a test skipped. :param test: :param reason: """ self.log_result("addSkip", test, reason=reason) unittest.TestResult.addSkip(self, test, reason) self.result_string = colorize("SKIP", YELLOW) if reason == "not enough cpus": self.result_code = TestResultCode.SKIP_CPU_SHORTAGE else: self.result_code = TestResultCode.SKIP self.send_result_through_pipe(test, self.result_code) def symlink_failed(self): if self.current_test_case_info: try: failed_dir = config.failed_dir link_path = get_failed_testcase_linkname( failed_dir, os.path.basename(self.current_test_case_info.tempdir) ) self.current_test_case_info.logger.debug( "creating a link to the failed test" ) self.current_test_case_info.logger.debug( "os.symlink(%s, %s)" % (self.current_test_case_info.tempdir, link_path) ) if os.path.exists(link_path): self.current_test_case_info.logger.debug("symlink already exists") else: os.symlink(self.current_test_case_info.tempdir, link_path) except Exception as e: self.current_test_case_info.logger.error(e) def send_result_through_pipe(self, test, result): if hasattr(self, "test_framework_result_pipe"): pipe = self.test_framework_result_pipe if pipe: pipe.send((test.id(), result)) def log_result(self, fn, test, err=None, reason=None): if self.current_test_case_info: if isinstance(test, unittest.suite._ErrorHolder): test_name = test.description else: test_name = "%s.%s(%s)" % ( test.__class__.__name__, test._testMethodName, test._testMethodDoc, ) extra_msg = "" if err: extra_msg += f", error is {err}" if reason: extra_msg += f", reason is {reason}" self.current_test_case_info.logger.debug( f"--- {fn}() {test_name} called{extra_msg}" ) if err: self.current_test_case_info.logger.debug( "formatted exception is:\n%s" % "".join(format_exception(*err)) ) def add_error(self, test, err, unittest_fn, result_code): self.result_code = result_code if result_code == TestResultCode.FAIL: self.log_result("addFailure", test, err=err) error_type_str = colorize("FAIL", RED) elif result_code == TestResultCode.ERROR: self.log_result("addError", test, err=err) error_type_str = colorize("ERROR", RED) else: raise Exception(f"Unexpected result code {result_code}") self.decodePcapFiles(test) unittest_fn(self, test, err) if self.current_test_case_info: self.result_string = "%s [ temp dir used by test case: %s ]" % ( error_type_str, self.current_test_case_info.tempdir, ) self.symlink_failed() self.failed_test_cases_info.add(self.current_test_case_info) if is_core_present(self.current_test_case_info.tempdir): if not self.current_test_case_info.core_crash_test: if isinstance(test, unittest.suite._ErrorHolder): test_name = str(test) else: test_name = "'{!s}' ({!s})".format( get_testcase_doc_name(test), test.id() ) self.current_test_case_info.core_crash_test = test_name self.core_crash_test_cases_info.add(self.current_test_case_info) else: self.result_string = "%s [no temp dir]" % error_type_str self.send_result_through_pipe(test, result_code) def addFailure(self, test, err): """ Record a test failed result :param test: :param err: error message """ self.add_error(test, err, unittest.TestResult.addFailure, TestResultCode.FAIL) def addError(self, test, err): """ Record a test error result :param test: :param err: error message """ self.add_error(test, err, unittest.TestResult.addError, TestResultCode.ERROR) def getDescription(self, test): """ Get test description :param test: :returns: test description """ return get_test_description(self.descriptions, test) def startTest(self, test): """ Start a test :param test: """ def print_header(test): if test.__class__ in self.printed: return test_doc = getdoc(test) if not test_doc: raise Exception("No doc string for test '%s'" % test.id()) test_title = test_doc.splitlines()[0].rstrip() test_title = colorize(test_title, GREEN) if test.is_tagged_run_solo(): test_title = colorize(f"SOLO RUN: {test_title}", YELLOW) # This block may overwrite the colorized title above, # but we want this to stand out and be fixed if test.has_tag(TestCaseTag.FIXME_VPP_WORKERS): test_title = colorize(f"FIXME with VPP workers: {test_title}", RED) if test.has_tag(TestCaseTag.FIXME_ASAN): test_title = colorize(f"FIXME with ASAN: {test_title}", RED) test.skip_fixme_asan() if hasattr(test, "vpp_worker_count"): if test.vpp_worker_count == 0: test_title += " [main thread only]" elif test.vpp_worker_count == 1: test_title += " [1 worker thread]" else: test_title += f" [{test.vpp_worker_count} worker threads]" if test.__class__.skipped_due_to_cpu_lack: test_title = colorize( f"{test_title} [skipped - not enough cpus, " f"required={test.__class__.get_cpus_required()}, " f"available={max_vpp_cpus}]", YELLOW, ) print(double_line_delim) print(test_title) print(double_line_delim) self.printed.append(test.__class__) print_header(test) self.start_test = time.time() unittest.TestResult.startTest(self, test) if self.verbosity > 0: self.stream.writeln("Starting " + self.getDescription(test) + " ...") self.stream.writeln(single_line_delim) def stopTest(self, test): """ Called when the given test has been run :param test: """ unittest.TestResult.stopTest(self, test) result_code_to_suffix = { TestResultCode.PASS: "", TestResultCode.FAIL: "", TestResultCode.ERROR: "", TestResultCode.SKIP: "", TestResultCode.TEST_RUN: "", TestResultCode.SKIP_CPU_SHORTAGE: "", TestResultCode.EXPECTED_FAIL: " [EXPECTED FAIL]", TestResultCode.UNEXPECTED_PASS: " [UNEXPECTED PASS]", } if self.verbosity > 0: self.stream.writeln(single_line_delim) self.stream.writeln( "%-72s%s%s" % ( self.getDescription(test), self.result_string, result_code_to_suffix[self.result_code], ) ) self.stream.writeln(single_line_delim) else: self.stream.writeln( "%-67s %4.2f %s%s" % ( self.getDescription(test), time.time() - self.start_test, self.result_string, result_code_to_suffix[self.result_code], ) ) self.send_result_through_pipe(test, TestResultCode.TEST_RUN) def printErrors(self): """ Print errors from running the test case """ if len(self.errors) > 0 or len(self.failures) > 0: self.stream.writeln() self.printErrorList("ERROR", self.errors) self.printErrorList("FAIL", self.failures) # ^^ that is the last output from unittest before summary if not self.runner.print_summary: devnull = unittest.runner._WritelnDecorator(open(os.devnull, "w")) self.stream = devnull self.runner.stream = devnull def printErrorList(self, flavour, errors): """ Print error list to the output stream together with error type and test case description. :param flavour: error type :param errors: iterable errors """ for test, err in errors: self.stream.writeln(double_line_delim) self.stream.writeln("%s: %s" % (flavour, self.getDescription(test))) self.stream.writeln(single_line_delim) self.stream.writeln("%s" % err) class VppTestRunner(unittest.TextTestRunner): """ A basic test runner implementation which prints results to standard error. """ @property def resultclass(self): """Class maintaining the results of the tests""" return VppTestResult def __init__( self, keep_alive_pipe=None, descriptions=True, verbosity=1, result_pipe=None, failfast=False, buffer=False, resultclass=None, print_summary=True, **kwargs, ): # ignore stream setting here, use hard-coded stdout to be in sync # with prints from VppAsfTestCase methods ... super(VppTestRunner, self).__init__( sys.stdout, descriptions, verbosity, failfast, buffer, resultclass, **kwargs ) KeepAliveReporter.pipe = keep_alive_pipe self.orig_stream = self.stream self.resultclass.test_framework_result_pipe = result_pipe self.print_summary = print_summary def _makeResult(self): return self.resultclass(self.stream, self.descriptions, self.verbosity, self) def run(self, test): """ Run the tests :param test: """ faulthandler.enable() # emit stack trace to stderr if killed by signal result = super(VppTestRunner, self).run(test) if not self.print_summary: self.stream = self.orig_stream result.stream = self.orig_stream return result class Worker(Thread): def __init__(self, executable_args, logger, env=None, *args, **kwargs): super(Worker, self).__init__(*args, **kwargs) self.logger = logger self.args = executable_args if hasattr(self, "testcase") and self.testcase.debug_all: if self.testcase.debug_gdbserver: self.args = [ "/usr/bin/gdbserver", "localhost:{port}".format(port=self.testcase.gdbserver_port), ] + args elif self.testcase.debug_gdb and hasattr(self, "wait_for_gdb"): self.args.append(self.wait_for_gdb) self.app_bin = executable_args[0] self.app_name = os.path.basename(self.app_bin) if hasattr(self, "role"): self.app_name += " {role}".format(role=self.role) self.process = None self.result = None env = {} if env is None else env self.env = copy.deepcopy(env) def wait_for_enter(self): if not hasattr(self, "testcase"): return if self.testcase.debug_all and self.testcase.debug_gdbserver: print() print(double_line_delim) print( "Spawned GDB Server for '{app}' with PID: {pid}".format( app=self.app_name, pid=self.process.pid ) ) elif self.testcase.debug_all and self.testcase.debug_gdb: print() print(double_line_delim) print( "Spawned '{app}' with PID: {pid}".format( app=self.app_name, pid=self.process.pid ) ) else: return print(single_line_delim) print("You can debug '{app}' using:".format(app=self.app_name)) if self.testcase.debug_gdbserver: print( "sudo gdb " + self.app_bin + " -ex 'target remote localhost:{port}'".format( port=self.testcase.gdbserver_port ) ) print( "Now is the time to attach gdb by running the above " "command, set up breakpoints etc., then resume from " "within gdb by issuing the 'continue' command" ) self.testcase.gdbserver_port += 1 elif self.testcase.debug_gdb: print( "sudo gdb " + self.app_bin + " -ex 'attach {pid}'".format(pid=self.process.pid) ) print( "Now is the time to attach gdb by running the above " "command and set up breakpoints etc., then resume from" " within gdb by issuing the 'continue' command" ) print(single_line_delim) input("Press ENTER to continue running the testcase...") def run(self): executable = self.args[0] if not os.path.exists(executable) or not os.access( executable, os.F_OK | os.X_OK ): # Exit code that means some system file did not exist, # could not be opened, or had some other kind of error. self.result = os.EX_OSFILE raise EnvironmentError( "executable '%s' is not found or executable." % executable ) self.logger.debug( "Running executable '{app}': '{cmd}'".format( app=self.app_name, cmd=" ".join(self.args) ) ) env = os.environ.copy() env.update(self.env) env["CK_LOG_FILE_NAME"] = "-" self.process = subprocess.Popen( ["stdbuf", "-o0", "-e0"] + self.args, shell=False, env=env, preexec_fn=os.setpgrp, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) self.wait_for_enter() out, err = self.process.communicate() self.logger.debug("Finished running `{app}'".format(app=self.app_name)) self.logger.info("Return code is `%s'" % self.process.returncode) self.logger.info(single_line_delim) self.logger.info( "Executable `{app}' wrote to stdout:".format(app=self.app_name) ) self.logger.info(single_line_delim) self.logger.info(out.decode("utf-8")) self.logger.info(single_line_delim) self.logger.info( "Executable `{app}' wrote to stderr:".format(app=self.app_name) ) self.logger.info(single_line_delim) self.logger.info(err.decode("utf-8")) self.logger.info(single_line_delim) self.result = self.process.returncode if __name__ == "__main__": pass