aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAdrian Villin <avillin@cisco.com>2024-07-17 14:38:48 +0200
committerDave Wallace <dwallacelf@gmail.com>2024-07-17 16:10:29 +0000
commitb69ee00c34ede374dad78f33106b39cf75087d5a (patch)
treed7f741ee85c4e9cddf973a03c3bbdf33c302fb5f
parentaedfd7ca3d2f5b17cfd20d4dc3919fe765adb27a (diff)
hs-test: minor cpu pinning suite improvements
- added max cpu check for CI - added a check for Ip4AddrAllocator: fixes a case where teardown panics if a test crashes before allocator gets initialized Type: test Change-Id: Ica12366cd79d77801964dfbdc8ee7c9969b4a9ce Signed-off-by: Adrian Villin <avillin@cisco.com>
-rw-r--r--extras/hs-test/infra/cpu.go7
-rw-r--r--extras/hs-test/infra/hst_suite.go23
-rw-r--r--extras/hs-test/infra/suite_cpu_pinning.go15
3 files changed, 29 insertions, 16 deletions
diff --git a/extras/hs-test/infra/cpu.go b/extras/hs-test/infra/cpu.go
index 6ba60e5dfe4..a1682819a2f 100644
--- a/extras/hs-test/infra/cpu.go
+++ b/extras/hs-test/infra/cpu.go
@@ -39,13 +39,6 @@ func (c *CpuAllocatorT) Allocate(containerCount int, nCpus int) (*CpuContext, er
// indexes, not actual cores
var minCpu, maxCpu int
- // temporary fix for CpuPinningSuite
- if strings.Contains(CurrentSpecReport().ContainerHierarchyTexts[0], "CpuPinning") {
- cpuAllocator.maxContainerCount = 1
- } else {
- cpuAllocator.maxContainerCount = 4
- }
-
if c.runningInCi {
minCpu = ((c.buildNumber) * c.maxContainerCount * nCpus)
maxCpu = ((c.buildNumber + 1) * c.maxContainerCount * nCpus) - 1
diff --git a/extras/hs-test/infra/hst_suite.go b/extras/hs-test/infra/hst_suite.go
index 41c8d29f84a..ac255b9fd44 100644
--- a/extras/hs-test/infra/hst_suite.go
+++ b/extras/hs-test/infra/hst_suite.go
@@ -47,7 +47,7 @@ type HstSuite struct {
TestIds map[string]string
CpuAllocator *CpuAllocatorT
CpuContexts []*CpuContext
- CpuPerVpp int
+ CpuCount int
Ppid string
ProcessIndex string
Logger *log.Logger
@@ -75,11 +75,11 @@ func (s *HstSuite) SetupSuite() {
if err != nil {
Fail("failed to init cpu allocator: " + fmt.Sprint(err))
}
- s.CpuPerVpp = *NConfiguredCpus
+ s.CpuCount = *NConfiguredCpus
}
func (s *HstSuite) AllocateCpus() []int {
- cpuCtx, err := s.CpuAllocator.Allocate(len(s.StartedContainers), s.CpuPerVpp)
+ cpuCtx, err := s.CpuAllocator.Allocate(len(s.StartedContainers), s.CpuCount)
// using Fail instead of AssertNil to make error message more readable
if err != nil {
Fail(fmt.Sprint(err))
@@ -105,7 +105,10 @@ func (s *HstSuite) TearDownTest() {
}
s.ResetContainers()
s.RemoveVolumes()
- s.Ip4AddrAllocator.DeleteIpAddresses()
+
+ if s.Ip4AddrAllocator != nil {
+ s.Ip4AddrAllocator.DeleteIpAddresses()
+ }
}
func (s *HstSuite) SkipIfUnconfiguring() {
@@ -247,11 +250,17 @@ func (s *HstSuite) SkipIfMultiWorker(args ...any) {
}
}
-func (s *HstSuite) SkipIfNotEnoughAvailableCpus(containerCount int, nCpus int) bool {
- MaxRequestedCpu := (GinkgoParallelProcess() * containerCount * nCpus)
+func (s *HstSuite) SkipIfNotEnoughAvailableCpus() bool {
+ var MaxRequestedCpu int
+
+ if s.CpuAllocator.runningInCi {
+ MaxRequestedCpu = ((s.CpuAllocator.buildNumber + 1) * s.CpuAllocator.maxContainerCount * s.CpuCount)
+ } else {
+ MaxRequestedCpu = (GinkgoParallelProcess() * s.CpuAllocator.maxContainerCount * s.CpuCount)
+ }
if len(s.CpuAllocator.cpus)-1 < MaxRequestedCpu {
- s.Skip(fmt.Sprintf("test case cannot allocate requested cpus (%d cpus * %d containers)", nCpus, containerCount))
+ s.Skip(fmt.Sprintf("test case cannot allocate requested cpus (%d cpus * %d containers)", s.CpuCount, s.CpuAllocator.maxContainerCount))
}
return true
diff --git a/extras/hs-test/infra/suite_cpu_pinning.go b/extras/hs-test/infra/suite_cpu_pinning.go
index 629d2dac3ed..e829efa950b 100644
--- a/extras/hs-test/infra/suite_cpu_pinning.go
+++ b/extras/hs-test/infra/suite_cpu_pinning.go
@@ -13,6 +13,7 @@ var cpuPinningSoloTests = map[string][]func(s *CpuPinningSuite){}
type CpuPinningSuite struct {
HstSuite
+ previousMaxContainerCount int
}
func RegisterCpuPinningTests(tests ...func(s *CpuPinningSuite)) {
@@ -31,12 +32,22 @@ func (s *CpuPinningSuite) SetupSuite() {
func (s *CpuPinningSuite) SetupTest() {
// Skip if we cannot allocate 3 CPUs for test container
- s.SkipIfNotEnoughAvailableCpus(1, 3)
- s.CpuPerVpp = 3
+ s.previousMaxContainerCount = s.CpuAllocator.maxContainerCount
+ s.CpuCount = 3
+ s.CpuAllocator.maxContainerCount = 1
+ s.SkipIfNotEnoughAvailableCpus()
+
s.HstSuite.SetupTest()
container := s.GetContainerByName(SingleTopoContainerVpp)
vpp, err := container.newVppInstance(container.AllocatedCpus)
s.AssertNotNil(vpp, fmt.Sprint(err))
+}
+
+func (s *CpuPinningSuite) TearDownTest() {
+ // reset vars
+ s.CpuCount = *NConfiguredCpus
+ s.CpuAllocator.maxContainerCount = s.previousMaxContainerCount
+ s.HstSuite.TearDownTest()
}