diff options
-rw-r--r-- | Makefile | 1 | ||||
-rw-r--r-- | build/external/deb/debian/control | 2 | ||||
-rw-r--r-- | build/optional/deb/debian/control | 2 | ||||
-rw-r--r-- | extras/hs-test/framework_test.go | 2 | ||||
-rw-r--r-- | extras/hs-test/infra/cpu.go | 55 | ||||
-rw-r--r-- | extras/hs-test/infra/hst_suite.go | 9 | ||||
-rw-r--r-- | src/plugins/acl/hash_lookup.c | 22 | ||||
-rw-r--r-- | src/plugins/dev_octeon/crypto.c | 12 | ||||
-rw-r--r-- | src/plugins/dev_octeon/crypto.h | 7 | ||||
-rw-r--r-- | src/plugins/dev_octeon/init.c | 43 | ||||
-rw-r--r-- | src/plugins/dev_octeon/octeon.h | 6 | ||||
-rw-r--r-- | src/vat2/main.c | 35 | ||||
-rw-r--r-- | test/Makefile | 2 | ||||
-rw-r--r-- | test/vpp_iperf.py | 41 |
14 files changed, 122 insertions, 117 deletions
@@ -106,6 +106,7 @@ DEB_DEPENDS += iperf ethtool # for 'make test TEST=vm_vpp_interfaces' DEB_DEPENDS += libpcap-dev DEB_DEPENDS += tshark DEB_DEPENDS += jq # for extracting test summary from .json report (hs-test) +DEB_DEPENDS += nasm libnuma-dev # for make-ext-deps LIBFFI=libffi6 # works on all but 20.04 and debian-testing ifeq ($(OS_VERSION_ID),24.04) diff --git a/build/external/deb/debian/control b/build/external/deb/debian/control index a4e59f4c577..701d7bf0961 100644 --- a/build/external/deb/debian/control +++ b/build/external/deb/debian/control @@ -2,7 +2,7 @@ Source: vpp-ext-deps Section: net Priority: extra Maintainer: vpp-dev@lists.fd.io -Build-Depends: debhelper (>= 9), dkms +Build-Depends: debhelper (>= 9) Standards-Version: 3.9.4 Package: vpp-ext-deps diff --git a/build/optional/deb/debian/control b/build/optional/deb/debian/control index d892395ddab..c18809f7f5e 100644 --- a/build/optional/deb/debian/control +++ b/build/optional/deb/debian/control @@ -2,7 +2,7 @@ Source: vpp-opt-deps Section: net Priority: extra Maintainer: vpp-dev@lists.fd.io -Build-Depends: debhelper (>= 9), dkms +Build-Depends: debhelper (>= 9) Standards-Version: 3.9.4 Package: vpp-opt-deps diff --git a/extras/hs-test/framework_test.go b/extras/hs-test/framework_test.go index f3bf1be56a8..be62b61a057 100644 --- a/extras/hs-test/framework_test.go +++ b/extras/hs-test/framework_test.go @@ -33,6 +33,8 @@ func TestHst(t *testing.T) { TestTimeout = time.Minute * 5 } + RunningInCi = os.Getenv("BUILD_NUMBER") != "" + output, err := os.ReadFile("/sys/devices/system/node/online") if err == nil && strings.Contains(string(output), "-") { NumaAwareCpuAlloc = true diff --git a/extras/hs-test/infra/cpu.go b/extras/hs-test/infra/cpu.go index 743a4eddc67..4afc96bcee4 100644 --- a/extras/hs-test/infra/cpu.go +++ b/extras/hs-test/infra/cpu.go @@ -6,7 +6,6 @@ import ( "fmt" "os" "os/exec" - "strconv" "strings" . "github.com/onsi/ginkgo/v2" @@ -21,8 +20,6 @@ type CpuContext struct { type CpuAllocatorT struct { cpus []int - runningInCi bool - buildNumber int maxContainerCount int } @@ -40,13 +37,8 @@ func (c *CpuAllocatorT) Allocate(containerCount int, nCpus int, offset int) (*Cp // indexes, not actual cores var minCpu, maxCpu int - if c.runningInCi { - minCpu = ((c.buildNumber) * c.maxContainerCount * nCpus) + offset - maxCpu = ((c.buildNumber + 1) * c.maxContainerCount * nCpus) - 1 + offset - } else { - minCpu = ((GinkgoParallelProcess() - 1) * c.maxContainerCount * nCpus) + offset - maxCpu = (GinkgoParallelProcess() * c.maxContainerCount * nCpus) - 1 + offset - } + minCpu = ((GinkgoParallelProcess() - 1) * c.maxContainerCount * nCpus) + offset + maxCpu = (GinkgoParallelProcess() * c.maxContainerCount * nCpus) - 1 + offset if len(c.cpus)-1 < maxCpu { err := fmt.Errorf("could not allocate %d CPUs; available count: %d; attempted to allocate cores with index %d-%d; max index: %d;\n"+ @@ -66,33 +58,9 @@ func (c *CpuAllocatorT) Allocate(containerCount int, nCpus int, offset int) (*Cp } func (c *CpuAllocatorT) readCpus() error { - var first, second, third, fourth int - var file *os.File - var err error - - if c.runningInCi { - // non-debug build runs on node0, debug on node1 - if *IsDebugBuild { - file, err = os.Open("/sys/devices/system/node/node1/cpulist") - } else { - file, err = os.Open("/sys/devices/system/node/node0/cpulist") - } - if err != nil { - return err - } - defer file.Close() - - sc := bufio.NewScanner(file) - sc.Scan() - line := sc.Text() - _, err = fmt.Sscanf(line, "%d-%d,%d-%d", &first, &second, &third, &fourth) - if err != nil { - return err - } + var first, second int - c.cpus = iterateAndAppend(first, second, c.cpus) - c.cpus = iterateAndAppend(third, fourth, c.cpus) - } else if NumaAwareCpuAlloc { + if NumaAwareCpuAlloc { var range1, range2 int var tmpCpus []int @@ -124,7 +92,7 @@ func (c *CpuAllocatorT) readCpus() error { line := sc.Text() for _, coreRange := range strings.Split(line, ",") { - if strings.IndexRune(coreRange, '-') != -1 { + if strings.ContainsRune(coreRange, '-') { _, err = fmt.Sscanf(coreRange, "%d-%d", &range1, &range2) if err != nil { return err @@ -148,7 +116,8 @@ func (c *CpuAllocatorT) readCpus() error { // and we can use offsets countToRemove := len(tmpCpus) % (c.maxContainerCount * *NConfiguredCpus) if countToRemove >= len(tmpCpus) { - return fmt.Errorf("requested too much CPUs per container (%d) should be no more than %d", *NConfiguredCpus, len(tmpCpus)/c.maxContainerCount) + return fmt.Errorf("requested too many CPUs per container (%d), should be no more "+ + "than %d", *NConfiguredCpus, len(tmpCpus)/c.maxContainerCount) } c.cpus = append(c.cpus, tmpCpus[:len(tmpCpus)-countToRemove]...) tmpCpus = tmpCpus[:0] @@ -200,16 +169,6 @@ func CpuAllocator() (*CpuAllocatorT, error) { var err error cpuAllocator = new(CpuAllocatorT) cpuAllocator.maxContainerCount = 4 - buildNumberStr := os.Getenv("BUILD_NUMBER") - - if buildNumberStr != "" { - cpuAllocator.runningInCi = true - // get last digit of build number - cpuAllocator.buildNumber, err = strconv.Atoi(buildNumberStr[len(buildNumberStr)-1:]) - if err != nil { - return nil, err - } - } err = cpuAllocator.readCpus() if err != nil { return nil, err diff --git a/extras/hs-test/infra/hst_suite.go b/extras/hs-test/infra/hst_suite.go index 5ef4883ebdb..c2dfc592ebb 100644 --- a/extras/hs-test/infra/hst_suite.go +++ b/extras/hs-test/infra/hst_suite.go @@ -46,6 +46,7 @@ var ParallelTotal = flag.Lookup("ginkgo.parallel.total") var DryRun = flag.Bool("dryrun", false, "set up containers but don't run tests") var NumaAwareCpuAlloc bool var TestTimeout time.Duration +var RunningInCi bool type HstSuite struct { AllContainers map[string]*Container @@ -443,11 +444,7 @@ func (s *HstSuite) SkipIfNotEnoughAvailableCpus() { availableCpus++ } - if s.CpuAllocator.runningInCi { - maxRequestedCpu = ((s.CpuAllocator.buildNumber + 1) * s.CpuAllocator.maxContainerCount * s.CpuCount) - } else { - maxRequestedCpu = (GinkgoParallelProcess() * s.CpuAllocator.maxContainerCount * s.CpuCount) - } + maxRequestedCpu = (GinkgoParallelProcess() * s.CpuAllocator.maxContainerCount * s.CpuCount) if availableCpus < maxRequestedCpu { s.Skip(fmt.Sprintf("Test case cannot allocate requested cpus "+ @@ -516,7 +513,7 @@ func (s *HstSuite) WaitForCoreDump() bool { output, _ := exechelper.Output(cmd) AddReportEntry("VPP Backtrace", StringerStruct{Label: string(output)}) os.WriteFile(s.getLogDirPath()+"backtrace.log", output, os.FileMode(0644)) - if s.CpuAllocator.runningInCi { + if RunningInCi { err = os.Remove(corePath) if err == nil { s.Log("removed " + corePath) diff --git a/src/plugins/acl/hash_lookup.c b/src/plugins/acl/hash_lookup.c index 9c3c662a8f1..b4f86208a71 100644 --- a/src/plugins/acl/hash_lookup.c +++ b/src/plugins/acl/hash_lookup.c @@ -946,31 +946,15 @@ hash_acl_reapply(acl_main_t *am, u32 lc_index, int acl_index) static void make_ip6_address_mask(ip6_address_t *addr, u8 prefix_len) { + ASSERT (prefix_len <= 128); ip6_address_mask_from_width(addr, prefix_len); } - -/* Maybe should be moved into the core somewhere */ -always_inline void -ip4_address_mask_from_width (ip4_address_t * a, u32 width) -{ - int i, byte, bit, bitnum; - ASSERT (width <= 32); - clib_memset (a, 0, sizeof (a[0])); - for (i = 0; i < width; i++) - { - bitnum = (7 - (i & 7)); - byte = i / 8; - bit = 1 << bitnum; - a->as_u8[byte] |= bit; - } -} - - static void make_ip4_address_mask(ip4_address_t *addr, u8 prefix_len) { - ip4_address_mask_from_width(addr, prefix_len); + ASSERT (prefix_len <= 32); + ip4_preflen_to_mask (prefix_len, addr); } static void diff --git a/src/plugins/dev_octeon/crypto.c b/src/plugins/dev_octeon/crypto.c index 800f24a008a..52c796089d5 100644 --- a/src/plugins/dev_octeon/crypto.c +++ b/src/plugins/dev_octeon/crypto.c @@ -1354,7 +1354,7 @@ oct_crypto_aead_session_update (vlib_main_t *vm, oct_crypto_sess_t *sess, vnet_crypto_key_t *key = vnet_crypto_get_key (key_index); roc_se_cipher_type enc_type = 0; roc_se_auth_type auth_type = 0; - u32 digest_len = ~0; + u32 digest_len = 16; i32 rv = 0; switch (key->alg) @@ -1366,9 +1366,6 @@ oct_crypto_aead_session_update (vlib_main_t *vm, oct_crypto_sess_t *sess, sess->aes_gcm = 1; sess->iv_offset = 0; sess->iv_length = 16; - sess->cpt_ctx.mac_len = 16; - sess->cpt_op = type; - digest_len = 16; break; case VNET_CRYPTO_ALG_CHACHA20_POLY1305: enc_type = ROC_SE_CHACHA20; @@ -1381,6 +1378,9 @@ oct_crypto_aead_session_update (vlib_main_t *vm, oct_crypto_sess_t *sess, return -1; } + sess->cpt_ctx.mac_len = digest_len; + sess->cpt_op = type; + rv = roc_se_ciph_key_set (&sess->cpt_ctx, enc_type, key->data, key->length); if (rv) { @@ -1940,7 +1940,7 @@ oct_init_crypto_engine_handlers (vlib_main_t *vm, vnet_dev_t *dev) } int -oct_conf_sw_queue (vlib_main_t *vm, vnet_dev_t *dev) +oct_conf_sw_queue (vlib_main_t *vm, vnet_dev_t *dev, oct_crypto_dev_t *ocd) { oct_crypto_main_t *ocm = &oct_crypto_main; vlib_thread_main_t *tm = vlib_get_thread_main (); @@ -1961,7 +1961,7 @@ oct_conf_sw_queue (vlib_main_t *vm, vnet_dev_t *dev) * Each pending queue will get number of cpt desc / number of cores. * And that desc count is shared across inflight entries. */ - n_inflight_req = (OCT_CPT_LF_MAX_NB_DESC / tm->n_vlib_mains); + n_inflight_req = (ocd->n_desc / tm->n_vlib_mains); for (i = 0; i < tm->n_vlib_mains; ++i) { diff --git a/src/plugins/dev_octeon/crypto.h b/src/plugins/dev_octeon/crypto.h index 5bd26f6b9be..4d8c56a314c 100644 --- a/src/plugins/dev_octeon/crypto.h +++ b/src/plugins/dev_octeon/crypto.h @@ -11,6 +11,9 @@ #define OCT_MAX_N_CPT_DEV 2 +#define OCT_CPT_LF_DEF_NB_DESC 16384 + +#define OCT_CPT_LF_MIN_NB_DESC 1024 #define OCT_CPT_LF_MAX_NB_DESC 128000 /* CRYPTO_ID, KEY_LENGTH_IN_BYTES, TAG_LEN, AAD_LEN */ @@ -81,6 +84,7 @@ typedef struct struct roc_cpt_lmtline lmtline; struct roc_cpt_lf lf; vnet_dev_t *dev; + u32 n_desc; } oct_crypto_dev_t; typedef struct @@ -211,5 +215,6 @@ vnet_crypto_async_frame_t *oct_crypto_frame_dequeue (vlib_main_t *vm, u32 *nb_elts_processed, u32 *enqueue_thread_idx); int oct_init_crypto_engine_handlers (vlib_main_t *vm, vnet_dev_t *dev); -int oct_conf_sw_queue (vlib_main_t *vm, vnet_dev_t *dev); +int oct_conf_sw_queue (vlib_main_t *vm, vnet_dev_t *dev, + oct_crypto_dev_t *ocd); #endif /* _CRYPTO_H_ */ diff --git a/src/plugins/dev_octeon/init.c b/src/plugins/dev_octeon/init.c index 561cbe94fed..69fb097e91f 100644 --- a/src/plugins/dev_octeon/init.c +++ b/src/plugins/dev_octeon/init.c @@ -61,6 +61,22 @@ static struct #undef _ }; +static vnet_dev_arg_t oct_dev_args[] = { + { + .id = OCT_DEV_ARG_CRYPTO_N_DESC, + .name = "n_desc", + .desc = "number of cpt descriptors, applicable to cpt devices only", + .type = VNET_DEV_ARG_TYPE_UINT32, + .default_val.uint32 = OCT_CPT_LF_DEF_NB_DESC, + }, + { + .id = OCT_DEV_ARG_END, + .name = "end", + .desc = "Argument end", + .type = VNET_DEV_ARG_END, + }, +}; + static u8 * oct_probe (vlib_main_t *vm, vnet_dev_bus_index_t bus_index, void *dev_info) { @@ -241,7 +257,7 @@ oct_conf_cpt_queue (vlib_main_t *vm, vnet_dev_t *dev, oct_crypto_dev_t *ocd) cpt_lf = &ocd->lf; cpt_lmtline = &ocd->lmtline; - cpt_lf->nb_desc = OCT_CPT_LF_MAX_NB_DESC; + cpt_lf->nb_desc = ocd->n_desc; cpt_lf->lf_id = 0; if ((rrv = roc_cpt_lf_init (roc_cpt, cpt_lf)) < 0) return cnx_return_roc_err (dev, rrv, "roc_cpt_lf_init"); @@ -261,6 +277,7 @@ oct_init_cpt (vlib_main_t *vm, vnet_dev_t *dev) extern oct_plt_init_param_t oct_plt_init_param; oct_device_t *cd = vnet_dev_get_data (dev); oct_crypto_dev_t *ocd = NULL; + u32 n_desc; int rrv; if (ocm->n_cpt == OCT_MAX_N_CPT_DEV || ocm->started) @@ -274,6 +291,27 @@ oct_init_cpt (vlib_main_t *vm, vnet_dev_t *dev) ocd->roc_cpt->pci_dev = &cd->plt_pci_dev; ocd->dev = dev; + ocd->n_desc = OCT_CPT_LF_DEF_NB_DESC; + + foreach_vnet_dev_args (arg, dev) + { + if (arg->id == OCT_DEV_ARG_CRYPTO_N_DESC && + vnet_dev_arg_get_uint32 (arg)) + { + n_desc = vnet_dev_arg_get_uint32 (arg); + if (n_desc < OCT_CPT_LF_MIN_NB_DESC || + n_desc > OCT_CPT_LF_MAX_NB_DESC) + { + log_err (dev, + "number of cpt descriptors should be within range " + "of %u and %u", + OCT_CPT_LF_MIN_NB_DESC, OCT_CPT_LF_MAX_NB_DESC); + return VNET_DEV_ERR_NOT_SUPPORTED; + } + + ocd->n_desc = vnet_dev_arg_get_uint32 (arg); + } + } if ((rrv = roc_cpt_dev_init (ocd->roc_cpt))) return cnx_return_roc_err (dev, rrv, "roc_cpt_dev_init"); @@ -290,7 +328,7 @@ oct_init_cpt (vlib_main_t *vm, vnet_dev_t *dev) * Initialize s/w queues, which are common across multiple * crypto devices */ - oct_conf_sw_queue (vm, dev); + oct_conf_sw_queue (vm, dev, ocd); ocm->crypto_dev[0] = ocd; } @@ -396,6 +434,7 @@ VNET_DEV_REGISTER_DRIVER (octeon) = { .free = oct_free, .probe = oct_probe, }, + .args = oct_dev_args, }; static clib_error_t * diff --git a/src/plugins/dev_octeon/octeon.h b/src/plugins/dev_octeon/octeon.h index ccf8f62880d..0cf937528f0 100644 --- a/src/plugins/dev_octeon/octeon.h +++ b/src/plugins/dev_octeon/octeon.h @@ -25,6 +25,12 @@ typedef enum { + OCT_DEV_ARG_CRYPTO_N_DESC = 1, + OCT_DEV_ARG_END, +} oct_dev_args_t; + +typedef enum +{ OCT_DEVICE_TYPE_UNKNOWN = 0, OCT_DEVICE_TYPE_RVU_PF, OCT_DEVICE_TYPE_RVU_VF, diff --git a/src/vat2/main.c b/src/vat2/main.c index bf415854db1..2949c4899aa 100644 --- a/src/vat2/main.c +++ b/src/vat2/main.c @@ -253,16 +253,15 @@ print_help (void) "Send API message to VPP and print reply\n" "\n" "-d, --debug Print additional information\n" - "-p, --prefix <prefix> Specify shared memory prefix to connect " - "to a given VPP instance\n" + "--dump-apis List all APIs available in VAT2 (might " + "not reflect running VPP)\n" "-f, --file <filename> File containing a JSON object with the " "arguments for the message to send\n" + "-p, --plugin-path Plugin path\n" + "-s, --prefix <prefix> Specify shared memory prefix to connect " + "to a given VPP instance\n" "-t, --template <message-name> Print a template JSON object for given API" - " message\n" - "--dump-apis List all APIs available in VAT2 (might " - "not reflect running VPP)\n" - "--plugin-path Pluing path" - "\n"; + " message\n"; printf ("%s", help_string); } @@ -281,38 +280,38 @@ main (int argc, char **argv) char *msgname = 0; static struct option long_options[] = { { "debug", no_argument, 0, 'd' }, - { "prefix", required_argument, 0, 's' }, - { "file", required_argument, 0, 'f' }, { "dump-apis", no_argument, 0, 0 }, - { "template", required_argument, 0, 't' }, + { "file", required_argument, 0, 'f' }, { "plugin-path", required_argument, 0, 'p' }, + { "prefix", required_argument, 0, 's' }, + { "template", required_argument, 0, 't' }, { 0, 0, 0, 0 } }; - while ((c = getopt_long (argc, argv, "hdp:f:t:", long_options, + while ((c = getopt_long (argc, argv, "df:p:s:t:", long_options, &option_index)) != -1) { switch (c) { case 0: - if (option_index == 3) + if (option_index == 1) dump_api = true; break; case 'd': vat2_debug = true; break; - case 't': - template = optarg; - break; - case 's': - prefix = optarg; - break; case 'f': filename = optarg; break; case 'p': pluginpath = optarg; break; + case 's': + prefix = optarg; + break; + case 't': + template = optarg; + break; case '?': print_help (); return 1; diff --git a/test/Makefile b/test/Makefile index 37f8e2db18b..6196be80a18 100644 --- a/test/Makefile +++ b/test/Makefile @@ -402,7 +402,7 @@ endif LCOV_VERSION=$(shell lcov --version | sed -E 's/^lcov: LCOV version ([0-9]+)[.].*/\1/') LCOV_IGNORE_ERRORS= ifeq ($(LCOV_VERSION),2) -LCOV_IGNORE_ERRORS=--ignore-errors unused,empty,mismatch,gcov +LCOV_IGNORE_ERRORS=--ignore-errors unused,empty,mismatch,gcov,negative endif .PHONY: cov-post diff --git a/test/vpp_iperf.py b/test/vpp_iperf.py index b325399f8e1..03ced8bf12f 100644 --- a/test/vpp_iperf.py +++ b/test/vpp_iperf.py @@ -5,6 +5,8 @@ import subprocess import os import sys +import time +import signal class VppIperf: @@ -196,20 +198,31 @@ def start_iperf( def stop_iperf(iperf_cmd): """Stop the iperf process matching the iperf_cmd string.""" - args = ["pgrep", "-x", "-f", iperf_cmd] - p = subprocess.Popen( - args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8" - ) - stdout, _ = p.communicate() - for pid in stdout.split(): - try: - subprocess.run( - f"kill -9 {pid}", - encoding="utf-8", - shell=True, - ) - except Exception: - pass + try: + result = subprocess.run( + ["pgrep", "-x", "-f", iperf_cmd], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + encoding="utf-8", + ) + pids = result.stdout.strip().split() + if not pids: + # No matching iperf3 processes found + return + + for pid in pids: + try: + # First send SIGTERM to cleanup and notify the parent process + os.kill(int(pid), signal.SIGTERM) + time.sleep(2) + os.kill(int(pid), 0) # Check if still alive + os.kill(int(pid), signal.SIGKILL) + except ProcessLookupError: + pass # Process already exited + except Exception as e: + print(f"Error terminating iperf3 process {pid}: {e}") + except Exception as e: + print(f"Failed to run pgrep for '{iperf_cmd}': {e}") if __name__ == "__main__": |