diff options
-rw-r--r-- | extras/hs-test/framework_test.go | 2 | ||||
-rw-r--r-- | extras/hs-test/infra/cpu.go | 55 | ||||
-rw-r--r-- | extras/hs-test/infra/hst_suite.go | 9 | ||||
-rw-r--r-- | src/plugins/acl/hash_lookup.c | 22 | ||||
-rw-r--r-- | src/plugins/hs_apps/echo_client.c | 52 | ||||
-rw-r--r-- | src/plugins/hs_apps/echo_client.h | 2 | ||||
-rw-r--r-- | src/vat2/main.c | 35 | ||||
-rw-r--r-- | test/vpp_iperf.py | 41 |
8 files changed, 100 insertions, 118 deletions
diff --git a/extras/hs-test/framework_test.go b/extras/hs-test/framework_test.go index f3bf1be56a8..be62b61a057 100644 --- a/extras/hs-test/framework_test.go +++ b/extras/hs-test/framework_test.go @@ -33,6 +33,8 @@ func TestHst(t *testing.T) { TestTimeout = time.Minute * 5 } + RunningInCi = os.Getenv("BUILD_NUMBER") != "" + output, err := os.ReadFile("/sys/devices/system/node/online") if err == nil && strings.Contains(string(output), "-") { NumaAwareCpuAlloc = true diff --git a/extras/hs-test/infra/cpu.go b/extras/hs-test/infra/cpu.go index 743a4eddc67..4afc96bcee4 100644 --- a/extras/hs-test/infra/cpu.go +++ b/extras/hs-test/infra/cpu.go @@ -6,7 +6,6 @@ import ( "fmt" "os" "os/exec" - "strconv" "strings" . "github.com/onsi/ginkgo/v2" @@ -21,8 +20,6 @@ type CpuContext struct { type CpuAllocatorT struct { cpus []int - runningInCi bool - buildNumber int maxContainerCount int } @@ -40,13 +37,8 @@ func (c *CpuAllocatorT) Allocate(containerCount int, nCpus int, offset int) (*Cp // indexes, not actual cores var minCpu, maxCpu int - if c.runningInCi { - minCpu = ((c.buildNumber) * c.maxContainerCount * nCpus) + offset - maxCpu = ((c.buildNumber + 1) * c.maxContainerCount * nCpus) - 1 + offset - } else { - minCpu = ((GinkgoParallelProcess() - 1) * c.maxContainerCount * nCpus) + offset - maxCpu = (GinkgoParallelProcess() * c.maxContainerCount * nCpus) - 1 + offset - } + minCpu = ((GinkgoParallelProcess() - 1) * c.maxContainerCount * nCpus) + offset + maxCpu = (GinkgoParallelProcess() * c.maxContainerCount * nCpus) - 1 + offset if len(c.cpus)-1 < maxCpu { err := fmt.Errorf("could not allocate %d CPUs; available count: %d; attempted to allocate cores with index %d-%d; max index: %d;\n"+ @@ -66,33 +58,9 @@ func (c *CpuAllocatorT) Allocate(containerCount int, nCpus int, offset int) (*Cp } func (c *CpuAllocatorT) readCpus() error { - var first, second, third, fourth int - var file *os.File - var err error - - if c.runningInCi { - // non-debug build runs on node0, debug on node1 - if *IsDebugBuild { - file, err = os.Open("/sys/devices/system/node/node1/cpulist") - } else { - file, err = os.Open("/sys/devices/system/node/node0/cpulist") - } - if err != nil { - return err - } - defer file.Close() - - sc := bufio.NewScanner(file) - sc.Scan() - line := sc.Text() - _, err = fmt.Sscanf(line, "%d-%d,%d-%d", &first, &second, &third, &fourth) - if err != nil { - return err - } + var first, second int - c.cpus = iterateAndAppend(first, second, c.cpus) - c.cpus = iterateAndAppend(third, fourth, c.cpus) - } else if NumaAwareCpuAlloc { + if NumaAwareCpuAlloc { var range1, range2 int var tmpCpus []int @@ -124,7 +92,7 @@ func (c *CpuAllocatorT) readCpus() error { line := sc.Text() for _, coreRange := range strings.Split(line, ",") { - if strings.IndexRune(coreRange, '-') != -1 { + if strings.ContainsRune(coreRange, '-') { _, err = fmt.Sscanf(coreRange, "%d-%d", &range1, &range2) if err != nil { return err @@ -148,7 +116,8 @@ func (c *CpuAllocatorT) readCpus() error { // and we can use offsets countToRemove := len(tmpCpus) % (c.maxContainerCount * *NConfiguredCpus) if countToRemove >= len(tmpCpus) { - return fmt.Errorf("requested too much CPUs per container (%d) should be no more than %d", *NConfiguredCpus, len(tmpCpus)/c.maxContainerCount) + return fmt.Errorf("requested too many CPUs per container (%d), should be no more "+ + "than %d", *NConfiguredCpus, len(tmpCpus)/c.maxContainerCount) } c.cpus = append(c.cpus, tmpCpus[:len(tmpCpus)-countToRemove]...) tmpCpus = tmpCpus[:0] @@ -200,16 +169,6 @@ func CpuAllocator() (*CpuAllocatorT, error) { var err error cpuAllocator = new(CpuAllocatorT) cpuAllocator.maxContainerCount = 4 - buildNumberStr := os.Getenv("BUILD_NUMBER") - - if buildNumberStr != "" { - cpuAllocator.runningInCi = true - // get last digit of build number - cpuAllocator.buildNumber, err = strconv.Atoi(buildNumberStr[len(buildNumberStr)-1:]) - if err != nil { - return nil, err - } - } err = cpuAllocator.readCpus() if err != nil { return nil, err diff --git a/extras/hs-test/infra/hst_suite.go b/extras/hs-test/infra/hst_suite.go index 5ef4883ebdb..c2dfc592ebb 100644 --- a/extras/hs-test/infra/hst_suite.go +++ b/extras/hs-test/infra/hst_suite.go @@ -46,6 +46,7 @@ var ParallelTotal = flag.Lookup("ginkgo.parallel.total") var DryRun = flag.Bool("dryrun", false, "set up containers but don't run tests") var NumaAwareCpuAlloc bool var TestTimeout time.Duration +var RunningInCi bool type HstSuite struct { AllContainers map[string]*Container @@ -443,11 +444,7 @@ func (s *HstSuite) SkipIfNotEnoughAvailableCpus() { availableCpus++ } - if s.CpuAllocator.runningInCi { - maxRequestedCpu = ((s.CpuAllocator.buildNumber + 1) * s.CpuAllocator.maxContainerCount * s.CpuCount) - } else { - maxRequestedCpu = (GinkgoParallelProcess() * s.CpuAllocator.maxContainerCount * s.CpuCount) - } + maxRequestedCpu = (GinkgoParallelProcess() * s.CpuAllocator.maxContainerCount * s.CpuCount) if availableCpus < maxRequestedCpu { s.Skip(fmt.Sprintf("Test case cannot allocate requested cpus "+ @@ -516,7 +513,7 @@ func (s *HstSuite) WaitForCoreDump() bool { output, _ := exechelper.Output(cmd) AddReportEntry("VPP Backtrace", StringerStruct{Label: string(output)}) os.WriteFile(s.getLogDirPath()+"backtrace.log", output, os.FileMode(0644)) - if s.CpuAllocator.runningInCi { + if RunningInCi { err = os.Remove(corePath) if err == nil { s.Log("removed " + corePath) diff --git a/src/plugins/acl/hash_lookup.c b/src/plugins/acl/hash_lookup.c index 9c3c662a8f1..b4f86208a71 100644 --- a/src/plugins/acl/hash_lookup.c +++ b/src/plugins/acl/hash_lookup.c @@ -946,31 +946,15 @@ hash_acl_reapply(acl_main_t *am, u32 lc_index, int acl_index) static void make_ip6_address_mask(ip6_address_t *addr, u8 prefix_len) { + ASSERT (prefix_len <= 128); ip6_address_mask_from_width(addr, prefix_len); } - -/* Maybe should be moved into the core somewhere */ -always_inline void -ip4_address_mask_from_width (ip4_address_t * a, u32 width) -{ - int i, byte, bit, bitnum; - ASSERT (width <= 32); - clib_memset (a, 0, sizeof (a[0])); - for (i = 0; i < width; i++) - { - bitnum = (7 - (i & 7)); - byte = i / 8; - bit = 1 << bitnum; - a->as_u8[byte] |= bit; - } -} - - static void make_ip4_address_mask(ip4_address_t *addr, u8 prefix_len) { - ip4_address_mask_from_width(addr, prefix_len); + ASSERT (prefix_len <= 32); + ip4_preflen_to_mask (prefix_len, addr); } static void diff --git a/src/plugins/hs_apps/echo_client.c b/src/plugins/hs_apps/echo_client.c index b08edaaa5f5..f57fd748dba 100644 --- a/src/plugins/hs_apps/echo_client.c +++ b/src/plugins/hs_apps/echo_client.c @@ -79,21 +79,27 @@ ec_session_get (ec_worker_t *wrk, u32 ec_index) static void send_data_chunk (ec_main_t *ecm, ec_session_t *es) { + const u64 max_burst = 128000; u8 *test_data = ecm->connect_test_data; int test_buf_len, test_buf_offset, rv; + u64 bytes_to_send; u32 bytes_this_chunk; + svm_fifo_t *f = es->tx_fifo; test_buf_len = vec_len (test_data); ASSERT (test_buf_len > 0); + if (ecm->run_time) + bytes_to_send = clib_min (svm_fifo_max_enqueue_prod (f), max_burst); + else + bytes_to_send = clib_min (es->bytes_to_send, max_burst); test_buf_offset = es->bytes_sent % test_buf_len; - bytes_this_chunk = - clib_min (test_buf_len - test_buf_offset, es->bytes_to_send); + + bytes_this_chunk = clib_min (test_buf_len - test_buf_offset, bytes_to_send); if (!es->is_dgram) { if (ecm->no_copy) { - svm_fifo_t *f = es->tx_fifo; rv = clib_min (svm_fifo_max_enqueue_prod (f), bytes_this_chunk); svm_fifo_enqueue_nocopy (f, rv); session_program_tx_io_evt (es->tx_fifo->vpp_sh, SESSION_IO_EVT_TX); @@ -105,7 +111,6 @@ send_data_chunk (ec_main_t *ecm, ec_session_t *es) } else { - svm_fifo_t *f = es->tx_fifo; u32 max_enqueue = svm_fifo_max_enqueue_prod (f); if (max_enqueue < sizeof (session_dgram_hdr_t)) @@ -147,8 +152,11 @@ send_data_chunk (ec_main_t *ecm, ec_session_t *es) if (rv > 0) { /* Account for it... */ - es->bytes_to_send -= rv; es->bytes_sent += rv; + if (ecm->run_time) + es->bytes_to_receive += rv; + else + es->bytes_to_send -= rv; if (ecm->cfg.verbose) { @@ -266,7 +274,7 @@ ec_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame) { ecm->repeats++; ecm->prev_conns = vec_len (conns_this_batch); - if (ecm->repeats == 500000) + if (ecm->repeats == 500000 && !ecm->run_time) { ec_err ("stuck clients"); } @@ -297,7 +305,7 @@ ec_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame) delete_session = 0; } - if (PREDICT_FALSE (delete_session == 1)) + if (PREDICT_FALSE (delete_session == 1) || ecm->timer_expired) { clib_atomic_fetch_add (&ecm->tx_total, es->bytes_sent); clib_atomic_fetch_add (&ecm->rx_total, es->bytes_received); @@ -356,6 +364,7 @@ ec_reset_runtime_config (ec_main_t *ecm) ecm->tls_engine = CRYPTO_ENGINE_OPENSSL; ecm->no_copy = 0; ecm->run_test = EC_STARTING; + ecm->timer_expired = false; ecm->ready_connections = 0; ecm->connect_conn_index = 0; ecm->rx_total = 0; @@ -368,6 +377,7 @@ ec_reset_runtime_config (ec_main_t *ecm) ecm->attach_flags = 0; ecm->syn_timeout = 20.0; ecm->test_timeout = 20.0; + ecm->run_time = 0; vec_free (ecm->connect_uri); } @@ -1072,7 +1082,7 @@ ec_command_fn (vlib_main_t *vm, unformat_input_t *input, ec_main_t *ecm = &ec_main; uword *event_data = 0, event_type; clib_error_t *error = 0; - int rv, had_config = 1; + int rv, timed_run_conflict = 0, had_config = 1; u64 total_bytes; f64 delta; @@ -1101,11 +1111,13 @@ ec_command_fn (vlib_main_t *vm, unformat_input_t *input, ; else if (unformat (line_input, "bytes %U", unformat_memory_size, &ecm->bytes_to_send)) - ; + timed_run_conflict++; else if (unformat (line_input, "test-timeout %f", &ecm->test_timeout)) ; else if (unformat (line_input, "syn-timeout %f", &ecm->syn_timeout)) ; + else if (unformat (line_input, "run-time %f", &ecm->run_time)) + ; else if (unformat (line_input, "echo-bytes")) ecm->echo_bytes = 1; else if (unformat (line_input, "fifo-size %U", unformat_memory_size, @@ -1149,6 +1161,9 @@ ec_command_fn (vlib_main_t *vm, unformat_input_t *input, } } + if (timed_run_conflict && ecm->run_time) + return clib_error_return (0, "failed: invalid arguments for a timed run!"); + parse_config: ecm->cfg.num_test_sessions = ecm->expected_connections = @@ -1234,11 +1249,22 @@ parse_config: goto stop_test; } + /* Testing officially starts now */ + ecm->test_start_time = vlib_time_now (ecm->vlib_main); + ec_cli ("Test started at %.6f", ecm->test_start_time); + + /* + * If a timed run, wait and expire timer + */ + if (ecm->run_time) + { + vlib_process_suspend (vm, ecm->run_time); + ec_main.timer_expired = true; + } + /* * Wait for the sessions to finish or test_timeout seconds pass */ - ecm->test_start_time = vlib_time_now (ecm->vlib_main); - ec_cli ("Test started at %.6f", ecm->test_start_time); vlib_process_wait_for_event_or_clock (vm, ecm->test_timeout); event_type = vlib_process_get_events (vm, &event_data); switch (event_type) @@ -1332,8 +1358,8 @@ cleanup: VLIB_CLI_COMMAND (ec_command, static) = { .path = "test echo clients", .short_help = - "test echo clients [nclients %d][bytes <bytes>[m|g]]" - "[test-timeout <time>][syn-timeout <time>][echo-bytes][fifo-size <size>]" + "test echo clients [nclients %d][bytes <bytes>[m|g]][test-timeout <time>]" + "[run-time <time>][syn-timeout <time>][echo-bytes][fifo-size <size>]" "[private-segment-count <count>][private-segment-size <bytes>[m|g]]" "[preallocate-fifos][preallocate-sessions][client-batch <batch-size>]" "[uri <tcp://ip/port>][test-bytes][verbose]", diff --git a/src/plugins/hs_apps/echo_client.h b/src/plugins/hs_apps/echo_client.h index 5868c3652ce..d06f237c757 100644 --- a/src/plugins/hs_apps/echo_client.h +++ b/src/plugins/hs_apps/echo_client.h @@ -57,6 +57,7 @@ typedef struct volatile u64 rx_total; volatile u64 tx_total; volatile int run_test; /**< Signal start of test */ + volatile bool timer_expired; /**< Signal end of timed test */ f64 syn_start_time; f64 test_start_time; @@ -97,6 +98,7 @@ typedef struct u64 appns_secret; /**< App namespace secret */ f64 syn_timeout; /**< Test syn timeout (s) */ f64 test_timeout; /**< Test timeout (s) */ + f64 run_time; /**< Length of a test (s) */ /* * Flags diff --git a/src/vat2/main.c b/src/vat2/main.c index bf415854db1..2949c4899aa 100644 --- a/src/vat2/main.c +++ b/src/vat2/main.c @@ -253,16 +253,15 @@ print_help (void) "Send API message to VPP and print reply\n" "\n" "-d, --debug Print additional information\n" - "-p, --prefix <prefix> Specify shared memory prefix to connect " - "to a given VPP instance\n" + "--dump-apis List all APIs available in VAT2 (might " + "not reflect running VPP)\n" "-f, --file <filename> File containing a JSON object with the " "arguments for the message to send\n" + "-p, --plugin-path Plugin path\n" + "-s, --prefix <prefix> Specify shared memory prefix to connect " + "to a given VPP instance\n" "-t, --template <message-name> Print a template JSON object for given API" - " message\n" - "--dump-apis List all APIs available in VAT2 (might " - "not reflect running VPP)\n" - "--plugin-path Pluing path" - "\n"; + " message\n"; printf ("%s", help_string); } @@ -281,38 +280,38 @@ main (int argc, char **argv) char *msgname = 0; static struct option long_options[] = { { "debug", no_argument, 0, 'd' }, - { "prefix", required_argument, 0, 's' }, - { "file", required_argument, 0, 'f' }, { "dump-apis", no_argument, 0, 0 }, - { "template", required_argument, 0, 't' }, + { "file", required_argument, 0, 'f' }, { "plugin-path", required_argument, 0, 'p' }, + { "prefix", required_argument, 0, 's' }, + { "template", required_argument, 0, 't' }, { 0, 0, 0, 0 } }; - while ((c = getopt_long (argc, argv, "hdp:f:t:", long_options, + while ((c = getopt_long (argc, argv, "df:p:s:t:", long_options, &option_index)) != -1) { switch (c) { case 0: - if (option_index == 3) + if (option_index == 1) dump_api = true; break; case 'd': vat2_debug = true; break; - case 't': - template = optarg; - break; - case 's': - prefix = optarg; - break; case 'f': filename = optarg; break; case 'p': pluginpath = optarg; break; + case 's': + prefix = optarg; + break; + case 't': + template = optarg; + break; case '?': print_help (); return 1; diff --git a/test/vpp_iperf.py b/test/vpp_iperf.py index b325399f8e1..03ced8bf12f 100644 --- a/test/vpp_iperf.py +++ b/test/vpp_iperf.py @@ -5,6 +5,8 @@ import subprocess import os import sys +import time +import signal class VppIperf: @@ -196,20 +198,31 @@ def start_iperf( def stop_iperf(iperf_cmd): """Stop the iperf process matching the iperf_cmd string.""" - args = ["pgrep", "-x", "-f", iperf_cmd] - p = subprocess.Popen( - args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8" - ) - stdout, _ = p.communicate() - for pid in stdout.split(): - try: - subprocess.run( - f"kill -9 {pid}", - encoding="utf-8", - shell=True, - ) - except Exception: - pass + try: + result = subprocess.run( + ["pgrep", "-x", "-f", iperf_cmd], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + encoding="utf-8", + ) + pids = result.stdout.strip().split() + if not pids: + # No matching iperf3 processes found + return + + for pid in pids: + try: + # First send SIGTERM to cleanup and notify the parent process + os.kill(int(pid), signal.SIGTERM) + time.sleep(2) + os.kill(int(pid), 0) # Check if still alive + os.kill(int(pid), signal.SIGKILL) + except ProcessLookupError: + pass # Process already exited + except Exception as e: + print(f"Error terminating iperf3 process {pid}: {e}") + except Exception as e: + print(f"Failed to run pgrep for '{iperf_cmd}': {e}") if __name__ == "__main__": |