diff options
author | 2025-04-02 19:01:17 +0200 | |
---|---|---|
committer | 2025-04-03 07:01:16 +0000 | |
commit | 6d1e7b133f4dd8bc5833aae0e68847ccbda17817 (patch) | |
tree | ecdf97950f396882103fa1a217d9cffa659e76a9 /extras | |
parent | 8a8bcae52833fbf542c01a766a5984bd75d6a939 (diff) |
- no longer distinguishing between release and debug build in the CI
- core allocation will always start from core 1 (or 0 if CPU0=true)
Type: improvement
Change-Id: I4568bda01bd90fba14ca81f6669bdab3b7521415
Signed-off-by: Adrian Villin <avillin@cisco.com>
Diffstat (limited to 'extras')
-rw-r--r-- | extras/hs-test/framework_test.go | 2 | ||||
-rw-r--r-- | extras/hs-test/infra/cpu.go | 55 | ||||
-rw-r--r-- | extras/hs-test/infra/hst_suite.go | 9 |
3 files changed, 12 insertions, 54 deletions
diff --git a/extras/hs-test/framework_test.go b/extras/hs-test/framework_test.go index f3bf1be56a8..be62b61a057 100644 --- a/extras/hs-test/framework_test.go +++ b/extras/hs-test/framework_test.go @@ -33,6 +33,8 @@ func TestHst(t *testing.T) { TestTimeout = time.Minute * 5 } + RunningInCi = os.Getenv("BUILD_NUMBER") != "" + output, err := os.ReadFile("/sys/devices/system/node/online") if err == nil && strings.Contains(string(output), "-") { NumaAwareCpuAlloc = true diff --git a/extras/hs-test/infra/cpu.go b/extras/hs-test/infra/cpu.go index 743a4eddc67..4afc96bcee4 100644 --- a/extras/hs-test/infra/cpu.go +++ b/extras/hs-test/infra/cpu.go @@ -6,7 +6,6 @@ import ( "fmt" "os" "os/exec" - "strconv" "strings" . "github.com/onsi/ginkgo/v2" @@ -21,8 +20,6 @@ type CpuContext struct { type CpuAllocatorT struct { cpus []int - runningInCi bool - buildNumber int maxContainerCount int } @@ -40,13 +37,8 @@ func (c *CpuAllocatorT) Allocate(containerCount int, nCpus int, offset int) (*Cp // indexes, not actual cores var minCpu, maxCpu int - if c.runningInCi { - minCpu = ((c.buildNumber) * c.maxContainerCount * nCpus) + offset - maxCpu = ((c.buildNumber + 1) * c.maxContainerCount * nCpus) - 1 + offset - } else { - minCpu = ((GinkgoParallelProcess() - 1) * c.maxContainerCount * nCpus) + offset - maxCpu = (GinkgoParallelProcess() * c.maxContainerCount * nCpus) - 1 + offset - } + minCpu = ((GinkgoParallelProcess() - 1) * c.maxContainerCount * nCpus) + offset + maxCpu = (GinkgoParallelProcess() * c.maxContainerCount * nCpus) - 1 + offset if len(c.cpus)-1 < maxCpu { err := fmt.Errorf("could not allocate %d CPUs; available count: %d; attempted to allocate cores with index %d-%d; max index: %d;\n"+ @@ -66,33 +58,9 @@ func (c *CpuAllocatorT) Allocate(containerCount int, nCpus int, offset int) (*Cp } func (c *CpuAllocatorT) readCpus() error { - var first, second, third, fourth int - var file *os.File - var err error - - if c.runningInCi { - // non-debug build runs on node0, debug on node1 - if *IsDebugBuild { - file, err = os.Open("/sys/devices/system/node/node1/cpulist") - } else { - file, err = os.Open("/sys/devices/system/node/node0/cpulist") - } - if err != nil { - return err - } - defer file.Close() - - sc := bufio.NewScanner(file) - sc.Scan() - line := sc.Text() - _, err = fmt.Sscanf(line, "%d-%d,%d-%d", &first, &second, &third, &fourth) - if err != nil { - return err - } + var first, second int - c.cpus = iterateAndAppend(first, second, c.cpus) - c.cpus = iterateAndAppend(third, fourth, c.cpus) - } else if NumaAwareCpuAlloc { + if NumaAwareCpuAlloc { var range1, range2 int var tmpCpus []int @@ -124,7 +92,7 @@ func (c *CpuAllocatorT) readCpus() error { line := sc.Text() for _, coreRange := range strings.Split(line, ",") { - if strings.IndexRune(coreRange, '-') != -1 { + if strings.ContainsRune(coreRange, '-') { _, err = fmt.Sscanf(coreRange, "%d-%d", &range1, &range2) if err != nil { return err @@ -148,7 +116,8 @@ func (c *CpuAllocatorT) readCpus() error { // and we can use offsets countToRemove := len(tmpCpus) % (c.maxContainerCount * *NConfiguredCpus) if countToRemove >= len(tmpCpus) { - return fmt.Errorf("requested too much CPUs per container (%d) should be no more than %d", *NConfiguredCpus, len(tmpCpus)/c.maxContainerCount) + return fmt.Errorf("requested too many CPUs per container (%d), should be no more "+ + "than %d", *NConfiguredCpus, len(tmpCpus)/c.maxContainerCount) } c.cpus = append(c.cpus, tmpCpus[:len(tmpCpus)-countToRemove]...) tmpCpus = tmpCpus[:0] @@ -200,16 +169,6 @@ func CpuAllocator() (*CpuAllocatorT, error) { var err error cpuAllocator = new(CpuAllocatorT) cpuAllocator.maxContainerCount = 4 - buildNumberStr := os.Getenv("BUILD_NUMBER") - - if buildNumberStr != "" { - cpuAllocator.runningInCi = true - // get last digit of build number - cpuAllocator.buildNumber, err = strconv.Atoi(buildNumberStr[len(buildNumberStr)-1:]) - if err != nil { - return nil, err - } - } err = cpuAllocator.readCpus() if err != nil { return nil, err diff --git a/extras/hs-test/infra/hst_suite.go b/extras/hs-test/infra/hst_suite.go index 5ef4883ebdb..c2dfc592ebb 100644 --- a/extras/hs-test/infra/hst_suite.go +++ b/extras/hs-test/infra/hst_suite.go @@ -46,6 +46,7 @@ var ParallelTotal = flag.Lookup("ginkgo.parallel.total") var DryRun = flag.Bool("dryrun", false, "set up containers but don't run tests") var NumaAwareCpuAlloc bool var TestTimeout time.Duration +var RunningInCi bool type HstSuite struct { AllContainers map[string]*Container @@ -443,11 +444,7 @@ func (s *HstSuite) SkipIfNotEnoughAvailableCpus() { availableCpus++ } - if s.CpuAllocator.runningInCi { - maxRequestedCpu = ((s.CpuAllocator.buildNumber + 1) * s.CpuAllocator.maxContainerCount * s.CpuCount) - } else { - maxRequestedCpu = (GinkgoParallelProcess() * s.CpuAllocator.maxContainerCount * s.CpuCount) - } + maxRequestedCpu = (GinkgoParallelProcess() * s.CpuAllocator.maxContainerCount * s.CpuCount) if availableCpus < maxRequestedCpu { s.Skip(fmt.Sprintf("Test case cannot allocate requested cpus "+ @@ -516,7 +513,7 @@ func (s *HstSuite) WaitForCoreDump() bool { output, _ := exechelper.Output(cmd) AddReportEntry("VPP Backtrace", StringerStruct{Label: string(output)}) os.WriteFile(s.getLogDirPath()+"backtrace.log", output, os.FileMode(0644)) - if s.CpuAllocator.runningInCi { + if RunningInCi { err = os.Remove(corePath) if err == nil { s.Log("removed " + corePath) |