aboutsummaryrefslogtreecommitdiffstats
path: root/extras/hs-test/infra
diff options
context:
space:
mode:
Diffstat (limited to 'extras/hs-test/infra')
-rw-r--r--extras/hs-test/infra/container.go268
-rw-r--r--extras/hs-test/infra/cpu.go198
-rw-r--r--extras/hs-test/infra/hst_suite.go324
-rw-r--r--extras/hs-test/infra/suite_cpu_pinning.go113
-rw-r--r--extras/hs-test/infra/suite_envoy_proxy.go213
-rw-r--r--extras/hs-test/infra/suite_iperf_linux.go (renamed from extras/hs-test/infra/suite_tap.go)38
-rw-r--r--extras/hs-test/infra/suite_ldp.go203
-rw-r--r--extras/hs-test/infra/suite_nginx.go144
-rw-r--r--extras/hs-test/infra/suite_nginx_proxy.go191
-rw-r--r--extras/hs-test/infra/suite_no_topo.go12
-rw-r--r--extras/hs-test/infra/suite_vpp_proxy.go212
-rw-r--r--extras/hs-test/infra/utils.go197
-rw-r--r--extras/hs-test/infra/vppinstance.go184
13 files changed, 1907 insertions, 390 deletions
diff --git a/extras/hs-test/infra/container.go b/extras/hs-test/infra/container.go
index 1dd82809f8a..8ec9b8cd02c 100644
--- a/extras/hs-test/infra/container.go
+++ b/extras/hs-test/infra/container.go
@@ -1,13 +1,25 @@
package hst
import (
+ "bytes"
+ "context"
"fmt"
"os"
"os/exec"
+ "regexp"
+ "slices"
+ "strconv"
"strings"
"text/template"
"time"
+ "github.com/docker/go-units"
+
+ "github.com/cilium/cilium/pkg/sysctl"
+ containerTypes "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/image"
+ "github.com/docker/docker/pkg/stdcopy"
"github.com/edwarnicke/exechelper"
. "github.com/onsi/ginkgo/v2"
)
@@ -32,12 +44,14 @@ type Container struct {
IsOptional bool
RunDetached bool
Name string
+ ID string
Image string
ExtraRunningArgs string
Volumes map[string]Volume
EnvVars map[string]string
VppInstance *VppInstance
AllocatedCpus []int
+ ctx context.Context
}
func newContainer(suite *HstSuite, yamlInput ContainerConfig) (*Container, error) {
@@ -52,6 +66,7 @@ func newContainer(suite *HstSuite, yamlInput ContainerConfig) (*Container, error
container.EnvVars = make(map[string]string)
container.Name = containerName
container.Suite = suite
+ container.ctx = context.Background()
if Image, ok := yamlInput["image"]; ok {
container.Image = Image.(string)
@@ -133,8 +148,6 @@ func (c *Container) GetContainerWorkDir() (res string) {
func (c *Container) getContainerArguments() string {
args := "--ulimit nofile=90000:90000 --cap-add=all --privileged --network host"
- c.allocateCpus()
- args += fmt.Sprintf(" --cpuset-cpus=\"%d-%d\"", c.AllocatedCpus[0], c.AllocatedCpus[len(c.AllocatedCpus)-1])
args += c.getVolumesAsCliOption()
args += c.getEnvVarsAsCliOption()
if *VppSourceFileDir != "" {
@@ -145,22 +158,58 @@ func (c *Container) getContainerArguments() string {
return args
}
-func (c *Container) runWithRetry(cmd string) error {
- nTries := 5
- for i := 0; i < nTries; i++ {
- err := exechelper.Run(cmd)
- if err == nil {
- return nil
- }
- time.Sleep(1 * time.Second)
- }
- return fmt.Errorf("failed to run container command")
+func (c *Container) PullDockerImage(name string, ctx context.Context) {
+ // "func (*Client) ImagePull" doesn't work, returns "No such image"
+ c.Suite.Log("Pulling image: " + name)
+ _, err := exechelper.CombinedOutput("docker pull " + name)
+ c.Suite.AssertNil(err)
}
+// Creates a container
func (c *Container) Create() error {
- cmd := "docker create " + c.getContainerArguments()
- c.Suite.Log(cmd)
- return exechelper.Run(cmd)
+ var sliceOfImageNames []string
+ images, err := c.Suite.Docker.ImageList(c.ctx, image.ListOptions{})
+ c.Suite.AssertNil(err)
+
+ for _, image := range images {
+ sliceOfImageNames = append(sliceOfImageNames, strings.Split(image.RepoTags[0], ":")[0])
+ }
+ if !slices.Contains(sliceOfImageNames, c.Image) {
+ c.PullDockerImage(c.Image, c.ctx)
+ }
+
+ c.allocateCpus()
+ cpuSet := fmt.Sprintf("%d-%d", c.AllocatedCpus[0], c.AllocatedCpus[len(c.AllocatedCpus)-1])
+ resp, err := c.Suite.Docker.ContainerCreate(
+ c.ctx,
+ &containerTypes.Config{
+ Hostname: c.Name,
+ Image: c.Image,
+ Env: c.getEnvVars(),
+ Cmd: strings.Split(c.ExtraRunningArgs, " "),
+ },
+ &containerTypes.HostConfig{
+ Resources: containerTypes.Resources{
+ Ulimits: []*units.Ulimit{
+ {
+ Name: "nofile",
+ Soft: 90000,
+ Hard: 90000,
+ },
+ },
+ CpusetCpus: cpuSet,
+ },
+ CapAdd: []string{"ALL"},
+ Privileged: true,
+ NetworkMode: "host",
+ Binds: c.getVolumesAsSlice(),
+ },
+ nil,
+ nil,
+ c.Name,
+ )
+ c.ID = resp.ID
+ return err
}
func (c *Container) allocateCpus() {
@@ -169,10 +218,66 @@ func (c *Container) allocateCpus() {
c.Suite.Log("Allocated CPUs " + fmt.Sprint(c.AllocatedCpus) + " to container " + c.Name)
}
+// Starts a container
func (c *Container) Start() error {
- cmd := "docker start " + c.Name
- c.Suite.Log(cmd)
- return c.runWithRetry(cmd)
+ var err error
+ var nTries int
+
+ for nTries = 0; nTries < 5; nTries++ {
+ err = c.Suite.Docker.ContainerStart(c.ctx, c.ID, containerTypes.StartOptions{})
+ if err == nil {
+ continue
+ }
+ c.Suite.Log("Error while starting " + c.Name + ". Retrying...")
+ time.Sleep(1 * time.Second)
+ }
+ if nTries >= 5 {
+ return err
+ }
+
+ // wait for container to start
+ time.Sleep(1 * time.Second)
+
+ // check if container exited right after startup
+ containers, err := c.Suite.Docker.ContainerList(c.ctx, containerTypes.ListOptions{
+ All: true,
+ Filters: filters.NewArgs(filters.Arg("name", c.Name)),
+ })
+ if err != nil {
+ return err
+ }
+ if containers[0].State == "exited" {
+ c.Suite.Log("Container details: " + fmt.Sprint(containers[0]))
+ return fmt.Errorf("Container %s exited: '%s'", c.Name, containers[0].Status)
+ }
+
+ return err
+}
+
+func (c *Container) GetOutput() (string, string) {
+ // Wait for the container to finish executing
+ statusCh, errCh := c.Suite.Docker.ContainerWait(c.ctx, c.ID, containerTypes.WaitConditionNotRunning)
+ select {
+ case err := <-errCh:
+ c.Suite.AssertNil(err)
+ case <-statusCh:
+ }
+
+ // Get the logs from the container
+ logOptions := containerTypes.LogsOptions{ShowStdout: true, ShowStderr: true}
+ logReader, err := c.Suite.Docker.ContainerLogs(c.ctx, c.ID, logOptions)
+ c.Suite.AssertNil(err)
+ defer logReader.Close()
+
+ var stdoutBuf, stderrBuf bytes.Buffer
+
+ // Use stdcopy.StdCopy to demultiplex the multiplexed stream
+ _, err = stdcopy.StdCopy(&stdoutBuf, &stderrBuf, logReader)
+ c.Suite.AssertNil(err)
+
+ stdout := stdoutBuf.String()
+ stderr := stderrBuf.String()
+ return stdout, stderr
}
func (c *Container) prepareCommand() (string, error) {
@@ -180,7 +285,7 @@ func (c *Container) prepareCommand() (string, error) {
return "", fmt.Errorf("run container failed: name is blank")
}
- cmd := "docker run "
+ cmd := "docker exec "
if c.RunDetached {
cmd += " -d"
}
@@ -201,22 +306,44 @@ func (c *Container) CombinedOutput() (string, error) {
return string(byteOutput), err
}
-func (c *Container) Run() error {
- cmd, err := c.prepareCommand()
- if err != nil {
- return err
- }
- return c.runWithRetry(cmd)
+// Creates and starts a container
+func (c *Container) Run() {
+ c.Suite.AssertNil(c.Create())
+ c.Suite.AssertNil(c.Start())
}
func (c *Container) addVolume(hostDir string, containerDir string, isDefaultWorkDir bool) {
var volume Volume
- volume.HostDir = hostDir
+ volume.HostDir = strings.Replace(hostDir, "volumes", c.Suite.GetTestId()+"/"+"volumes", 1)
volume.ContainerDir = containerDir
volume.IsDefaultWorkDir = isDefaultWorkDir
c.Volumes[hostDir] = volume
}
+func (c *Container) getVolumesAsSlice() []string {
+ var volumeSlice []string
+
+ if *VppSourceFileDir != "" {
+ volumeSlice = append(volumeSlice, fmt.Sprintf("%s:%s", *VppSourceFileDir, *VppSourceFileDir))
+ }
+
+ core_pattern, err := sysctl.Read("kernel.core_pattern")
+ if err == nil {
+ index := strings.LastIndex(core_pattern, "/")
+ core_pattern = core_pattern[:index]
+ volumeSlice = append(volumeSlice, c.Suite.getLogDirPath()+":"+core_pattern)
+ } else {
+ c.Suite.Log(err)
+ }
+
+ if len(c.Volumes) > 0 {
+ for _, volume := range c.Volumes {
+ volumeSlice = append(volumeSlice, fmt.Sprintf("%s:%s", volume.HostDir, volume.ContainerDir))
+ }
+ }
+ return volumeSlice
+}
+
func (c *Container) getVolumesAsCliOption() string {
cliOption := ""
@@ -245,10 +372,23 @@ func (c *Container) getEnvVarsAsCliOption() string {
return cliOption
}
+func (c *Container) getEnvVars() []string {
+ var envVars []string
+ if len(c.EnvVars) == 0 {
+ return envVars
+ }
+
+ for name, value := range c.EnvVars {
+ envVars = append(envVars, fmt.Sprintf("%s=%s", name, value))
+ }
+ return envVars
+}
+
func (c *Container) newVppInstance(cpus []int, additionalConfigs ...Stanza) (*VppInstance, error) {
vpp := new(VppInstance)
vpp.Container = c
vpp.Cpus = cpus
+ vpp.setDefaultCpuConfig()
vpp.AdditionalConfig = append(vpp.AdditionalConfig, additionalConfigs...)
c.VppInstance = vpp
return vpp, nil
@@ -276,6 +416,11 @@ func (c *Container) CreateFile(destFileName string, content string) error {
return nil
}
+func (c *Container) GetFile(sourceFileName, targetFileName string) error {
+ cmd := exec.Command("docker", "cp", c.Name+":"+sourceFileName, targetFileName)
+ return cmd.Run()
+}
+
/*
* Executes in detached mode so that the started application can continue to run
* without blocking execution of test
@@ -300,49 +445,57 @@ func (c *Container) Exec(command string, arguments ...any) string {
return string(byteOutput)
}
-func (c *Container) getLogDirPath() string {
- testId := c.Suite.GetTestId()
- testName := c.Suite.GetCurrentTestName()
- logDirPath := logDir + testName + "/" + testId + "/"
-
- cmd := exec.Command("mkdir", "-p", logDirPath)
- if err := cmd.Run(); err != nil {
- Fail("mkdir error: " + fmt.Sprint(err))
- }
-
- return logDirPath
-}
-
func (c *Container) saveLogs() {
- testLogFilePath := c.getLogDirPath() + "container-" + c.Name + ".log"
+ testLogFilePath := c.Suite.getLogDirPath() + "container-" + c.Name + ".log"
- cmd := exec.Command("docker", "logs", "--details", "-t", c.Name)
- c.Suite.Log(cmd)
- output, err := cmd.CombinedOutput()
+ logs, err := c.log(0)
if err != nil {
c.Suite.Log(err)
+ return
}
f, err := os.Create(testLogFilePath)
if err != nil {
- Fail("file create error: " + fmt.Sprint(err))
+ c.Suite.Log(err)
+ return
}
- fmt.Fprint(f, string(output))
- f.Close()
+ defer f.Close()
+ fmt.Fprint(f, logs)
}
-// Outputs logs from docker containers. Set 'maxLines' to 0 to output the full log.
+// Returns logs from docker containers. Set 'maxLines' to 0 to output the full log.
func (c *Container) log(maxLines int) (string, error) {
- var cmd string
+ var logOptions containerTypes.LogsOptions
if maxLines == 0 {
- cmd = "docker logs " + c.Name
+ logOptions = containerTypes.LogsOptions{ShowStdout: true, ShowStderr: true, Details: true, Timestamps: true}
} else {
- cmd = fmt.Sprintf("docker logs --tail %d %s", maxLines, c.Name)
+ logOptions = containerTypes.LogsOptions{ShowStdout: true, ShowStderr: true, Details: true, Tail: strconv.Itoa(maxLines)}
}
- c.Suite.Log(cmd)
- o, err := exechelper.CombinedOutput(cmd)
- return string(o), err
+ out, err := c.Suite.Docker.ContainerLogs(c.ctx, c.ID, logOptions)
+ if err != nil {
+ c.Suite.Log(err)
+ return "", err
+ }
+ defer out.Close()
+
+ var stdoutBuf, stderrBuf bytes.Buffer
+
+ _, err = stdcopy.StdCopy(&stdoutBuf, &stderrBuf, out)
+ if err != nil {
+ c.Suite.Log(err)
+ }
+
+ stdout := stdoutBuf.String()
+ stderr := stderrBuf.String()
+
+ re := regexp.MustCompile("(?m)^.*==> /dev/null <==.*$[\r\n]+")
+ stdout = re.ReplaceAllString(stdout, "")
+
+ re = regexp.MustCompile("(?m)^.*tail: cannot open '' for reading: No such file or directory.*$[\r\n]+")
+ stderr = re.ReplaceAllString(stderr, "")
+
+ return stdout + stderr, err
}
func (c *Container) stop() error {
@@ -352,8 +505,13 @@ func (c *Container) stop() error {
}
c.VppInstance = nil
c.saveLogs()
- c.Suite.Log("docker stop " + c.Name + " -t 0")
- return exechelper.Run("docker stop " + c.Name + " -t 0")
+
+ c.Suite.Log("Stopping container " + c.Name)
+ timeout := 0
+ if err := c.Suite.Docker.ContainerStop(c.ctx, c.ID, containerTypes.StopOptions{Timeout: &timeout}); err != nil {
+ return err
+ }
+ return nil
}
func (c *Container) CreateConfig(targetConfigName string, templateName string, values any) {
diff --git a/extras/hs-test/infra/cpu.go b/extras/hs-test/infra/cpu.go
index b5555d85b98..615f8a3f87d 100644
--- a/extras/hs-test/infra/cpu.go
+++ b/extras/hs-test/infra/cpu.go
@@ -4,10 +4,12 @@ import (
"bufio"
"errors"
"fmt"
- . "github.com/onsi/ginkgo/v2"
"os"
"os/exec"
+ "strconv"
"strings"
+
+ . "github.com/onsi/ginkgo/v2"
)
var CgroupPath = "/sys/fs/cgroup/"
@@ -18,80 +20,188 @@ type CpuContext struct {
}
type CpuAllocatorT struct {
- cpus []int
+ cpus []int
+ runningInCi bool
+ buildNumber int
+ maxContainerCount int
+}
+
+func iterateAndAppend(start int, end int, slice []int) []int {
+ for i := start; i <= end; i++ {
+ slice = append(slice, i)
+ }
+ return slice
}
var cpuAllocator *CpuAllocatorT = nil
func (c *CpuAllocatorT) Allocate(containerCount int, nCpus int) (*CpuContext, error) {
var cpuCtx CpuContext
+ // indexes, not actual cores
+ var minCpu, maxCpu int
- // splitting cpus into equal parts; this will over-allocate cores but it's good enough for now
- maxContainerCount := 4
- // skip CPU 0
- minCpu := ((GinkgoParallelProcess() - 1) * maxContainerCount * nCpus) + 1
- maxCpu := (GinkgoParallelProcess() * maxContainerCount * nCpus)
+ if c.runningInCi {
+ minCpu = ((c.buildNumber) * c.maxContainerCount * nCpus)
+ maxCpu = ((c.buildNumber + 1) * c.maxContainerCount * nCpus) - 1
+ } else {
+ minCpu = ((GinkgoParallelProcess() - 1) * c.maxContainerCount * nCpus)
+ maxCpu = (GinkgoParallelProcess() * c.maxContainerCount * nCpus) - 1
+ }
if len(c.cpus)-1 < maxCpu {
- err := fmt.Errorf("could not allocate %d CPUs; available: %d; attempted to allocate cores %d-%d",
- nCpus*containerCount, len(c.cpus)-1, minCpu, maxCpu)
+ err := fmt.Errorf("could not allocate %d CPUs; available count: %d; attempted to allocate cores with index %d-%d; max index: %d;\n"+
+ "available cores: %v", nCpus*containerCount, len(c.cpus), minCpu, maxCpu, len(c.cpus)-1, c.cpus)
return nil, err
}
+
if containerCount == 1 {
cpuCtx.cpus = c.cpus[minCpu : minCpu+nCpus]
- } else if containerCount > 1 && containerCount <= maxContainerCount {
+ } else if containerCount > 1 && containerCount <= c.maxContainerCount {
cpuCtx.cpus = c.cpus[minCpu+(nCpus*(containerCount-1)) : minCpu+(nCpus*containerCount)]
} else {
- return nil, fmt.Errorf("too many containers; CPU allocation for >%d containers is not implemented", maxContainerCount)
+ return nil, fmt.Errorf("too many containers; CPU allocation for >%d containers is not implemented", c.maxContainerCount)
}
-
cpuCtx.cpuAllocator = c
return &cpuCtx, nil
}
func (c *CpuAllocatorT) readCpus() error {
- var first, last int
-
- // Path depends on cgroup version. We need to check which version is in use.
- // For that following command can be used: 'stat -fc %T /sys/fs/cgroup/'
- // In case the output states 'cgroup2fs' then cgroups v2 is used, 'tmpfs' in case cgroups v1.
- cmd := exec.Command("stat", "-fc", "%T", "/sys/fs/cgroup/")
- byteOutput, err := cmd.CombinedOutput()
- if err != nil {
- return err
- }
- CpuPath := CgroupPath
- if strings.Contains(string(byteOutput), "tmpfs") {
- CpuPath += "cpuset/cpuset.effective_cpus"
- } else if strings.Contains(string(byteOutput), "cgroup2fs") {
- CpuPath += "cpuset.cpus.effective"
+ var first, second, third, fourth int
+ var file *os.File
+ var err error
+
+ if c.runningInCi {
+ // non-debug build runs on node0, debug on node1
+ if *IsDebugBuild {
+ file, err = os.Open("/sys/devices/system/node/node1/cpulist")
+ } else {
+ file, err = os.Open("/sys/devices/system/node/node0/cpulist")
+ }
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+
+ sc := bufio.NewScanner(file)
+ sc.Scan()
+ line := sc.Text()
+ _, err = fmt.Sscanf(line, "%d-%d,%d-%d", &first, &second, &third, &fourth)
+ if err != nil {
+ return err
+ }
+
+ c.cpus = iterateAndAppend(first, second, c.cpus)
+ c.cpus = iterateAndAppend(third, fourth, c.cpus)
+ } else if NumaAwareCpuAlloc {
+ var fifth, sixth int
+ var tmpCpus []int
+
+ file, err := os.Open("/sys/devices/system/node/online")
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+
+ sc := bufio.NewScanner(file)
+ sc.Scan()
+ line := sc.Text()
+ // get numa node range
+ _, err = fmt.Sscanf(line, "%d-%d", &first, &second)
+ if err != nil {
+ return err
+ }
+
+ for i := first; i <= second; i++ {
+ file, err := os.Open("/sys/devices/system/node/node" + fmt.Sprint(i) + "/cpulist")
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+
+ // get numa node cores
+ sc := bufio.NewScanner(file)
+ sc.Scan()
+ line := sc.Text()
+ _, err = fmt.Sscanf(line, "%d-%d,%d-%d", &third, &fourth, &fifth, &sixth)
+ if err != nil {
+ return err
+ }
+
+ // get numa node cores from first range
+ tmpCpus = iterateAndAppend(third, fourth, tmpCpus)
+
+ // discard cpu 0
+ if tmpCpus[0] == 0 && !*UseCpu0 {
+ tmpCpus = tmpCpus[1:]
+ }
+
+ // get numa node cores from second range
+ tmpCpus = iterateAndAppend(fifth, sixth, tmpCpus)
+
+ // make c.cpus divisible by maxContainerCount * nCpus, so we don't have to check which numa will be used
+ // and we can use offsets
+ count_to_remove := len(tmpCpus) % (c.maxContainerCount * *NConfiguredCpus)
+ c.cpus = append(c.cpus, tmpCpus[:len(tmpCpus)-count_to_remove]...)
+ tmpCpus = tmpCpus[:0]
+ }
} else {
- return errors.New("cgroup unknown fs: " + string(byteOutput))
- }
+ // Path depends on cgroup version. We need to check which version is in use.
+ // For that following command can be used: 'stat -fc %T /sys/fs/cgroup/'
+ // In case the output states 'cgroup2fs' then cgroups v2 is used, 'tmpfs' in case cgroups v1.
+ cmd := exec.Command("stat", "-fc", "%T", "/sys/fs/cgroup/")
+ byteOutput, err := cmd.CombinedOutput()
+ if err != nil {
+ return err
+ }
- file, err := os.Open(CpuPath)
- if err != nil {
- return err
- }
- defer file.Close()
-
- sc := bufio.NewScanner(file)
- sc.Scan()
- line := sc.Text()
- _, err = fmt.Sscanf(line, "%d-%d", &first, &last)
- if err != nil {
- return err
+ CpuPath := CgroupPath
+ if strings.Contains(string(byteOutput), "tmpfs") {
+ CpuPath += "cpuset/cpuset.effective_cpus"
+ } else if strings.Contains(string(byteOutput), "cgroup2fs") {
+ CpuPath += "cpuset.cpus.effective"
+ } else {
+ return errors.New("cgroup unknown fs: " + string(byteOutput))
+ }
+
+ file, err := os.Open(CpuPath)
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+
+ sc := bufio.NewScanner(file)
+ sc.Scan()
+ line := sc.Text()
+ _, err = fmt.Sscanf(line, "%d-%d", &first, &second)
+ if err != nil {
+ return err
+ }
+ c.cpus = iterateAndAppend(first, second, c.cpus)
}
- for i := first; i <= last; i++ {
- c.cpus = append(c.cpus, i)
+
+ // discard cpu 0
+ if c.cpus[0] == 0 && !*UseCpu0 {
+ c.cpus = c.cpus[1:]
}
return nil
}
func CpuAllocator() (*CpuAllocatorT, error) {
if cpuAllocator == nil {
+ var err error
cpuAllocator = new(CpuAllocatorT)
- err := cpuAllocator.readCpus()
+ cpuAllocator.maxContainerCount = 4
+ buildNumberStr := os.Getenv("BUILD_NUMBER")
+
+ if buildNumberStr != "" {
+ cpuAllocator.runningInCi = true
+ // get last digit of build number
+ cpuAllocator.buildNumber, err = strconv.Atoi(buildNumberStr[len(buildNumberStr)-1:])
+ if err != nil {
+ return nil, err
+ }
+ }
+ err = cpuAllocator.readCpus()
if err != nil {
return nil, err
}
diff --git a/extras/hs-test/infra/hst_suite.go b/extras/hs-test/infra/hst_suite.go
index a6ba14676d0..234a8409ea0 100644
--- a/extras/hs-test/infra/hst_suite.go
+++ b/extras/hs-test/infra/hst_suite.go
@@ -2,22 +2,27 @@ package hst
import (
"bufio"
- "errors"
"flag"
"fmt"
"io"
"log"
+ "net/http"
+ "net/http/httputil"
"os"
"os/exec"
"path/filepath"
"runtime"
+ "strconv"
"strings"
"time"
+ "github.com/edwarnicke/exechelper"
+
+ containerTypes "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/client"
"github.com/onsi/gomega/gmeasure"
"gopkg.in/yaml.v3"
- "github.com/edwarnicke/exechelper"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
@@ -33,6 +38,10 @@ var IsVppDebug = flag.Bool("debug", false, "attach gdb to vpp")
var NConfiguredCpus = flag.Int("cpus", 1, "number of CPUs assigned to vpp")
var VppSourceFileDir = flag.String("vppsrc", "", "vpp source file directory")
var IsDebugBuild = flag.Bool("debug_build", false, "some paths are different with debug build")
+var UseCpu0 = flag.Bool("cpu0", false, "use cpu0")
+var IsLeakCheck = flag.Bool("leak_check", false, "run leak-check tests")
+var ParallelTotal = flag.Lookup("ginkgo.parallel.total")
+var NumaAwareCpuAlloc bool
var SuiteTimeout time.Duration
type HstSuite struct {
@@ -45,11 +54,27 @@ type HstSuite struct {
TestIds map[string]string
CpuAllocator *CpuAllocatorT
CpuContexts []*CpuContext
- CpuPerVpp int
+ CpuCount int
Ppid string
ProcessIndex string
Logger *log.Logger
LogFile *os.File
+ Docker *client.Client
+}
+
+// used for colorful ReportEntry
+type StringerStruct struct {
+ Label string
+}
+
+// ColorableString for ReportEntry to use
+func (s StringerStruct) ColorableString() string {
+ return fmt.Sprintf("{{red}}%s{{/}}", s.Label)
+}
+
+// non-colorable String() is used by go's string formatting support but ignored by ReportEntry
+func (s StringerStruct) String() string {
+ return s.Label
}
func getTestFilename() string {
@@ -57,8 +82,29 @@ func getTestFilename() string {
return filepath.Base(filename)
}
+func (s *HstSuite) getLogDirPath() string {
+ testId := s.GetTestId()
+ testName := s.GetCurrentTestName()
+ logDirPath := logDir + testName + "/" + testId + "/"
+
+ cmd := exec.Command("mkdir", "-p", logDirPath)
+ if err := cmd.Run(); err != nil {
+ Fail("mkdir error: " + fmt.Sprint(err))
+ }
+
+ return logDirPath
+}
+
+func (s *HstSuite) newDockerClient() {
+ var err error
+ s.Docker, err = client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
+ s.AssertNil(err)
+ s.Log("docker client created")
+}
+
func (s *HstSuite) SetupSuite() {
s.CreateLogger()
+ s.newDockerClient()
s.Log("Suite Setup")
RegisterFailHandler(func(message string, callerSkip ...int) {
s.HstFail()
@@ -73,12 +119,15 @@ func (s *HstSuite) SetupSuite() {
if err != nil {
Fail("failed to init cpu allocator: " + fmt.Sprint(err))
}
- s.CpuPerVpp = *NConfiguredCpus
+ s.CpuCount = *NConfiguredCpus
}
func (s *HstSuite) AllocateCpus() []int {
- cpuCtx, err := s.CpuAllocator.Allocate(len(s.StartedContainers), s.CpuPerVpp)
- s.AssertNil(err)
+ cpuCtx, err := s.CpuAllocator.Allocate(len(s.StartedContainers), s.CpuCount)
+ // using Fail instead of AssertNil to make error message more readable
+ if err != nil {
+ Fail(fmt.Sprint(err))
+ }
s.AddCpuContext(cpuCtx)
return cpuCtx.cpus
}
@@ -89,6 +138,7 @@ func (s *HstSuite) AddCpuContext(cpuCtx *CpuContext) {
func (s *HstSuite) TearDownSuite() {
defer s.LogFile.Close()
+ defer s.Docker.Close()
s.Log("Suite Teardown")
s.UnconfigureNetworkTopology()
}
@@ -98,9 +148,12 @@ func (s *HstSuite) TearDownTest() {
if *IsPersistent {
return
}
+ s.WaitForCoreDump()
s.ResetContainers()
- s.RemoveVolumes()
- s.Ip4AddrAllocator.DeleteIpAddresses()
+
+ if s.Ip4AddrAllocator != nil {
+ s.Ip4AddrAllocator.DeleteIpAddresses()
+ }
}
func (s *HstSuite) SkipIfUnconfiguring() {
@@ -113,18 +166,9 @@ func (s *HstSuite) SetupTest() {
s.Log("Test Setup")
s.StartedContainers = s.StartedContainers[:0]
s.SkipIfUnconfiguring()
- s.SetupVolumes()
s.SetupContainers()
}
-func (s *HstSuite) SetupVolumes() {
- for _, volume := range s.Volumes {
- cmd := "docker volume create --name=" + volume
- s.Log(cmd)
- exechelper.Run(cmd)
- }
-}
-
func (s *HstSuite) SetupContainers() {
for _, container := range s.Containers {
if !container.IsOptional {
@@ -171,7 +215,7 @@ func (s *HstSuite) HstFail() {
out, err := container.log(20)
if err != nil {
s.Log("An error occured while obtaining '" + container.Name + "' container logs: " + fmt.Sprint(err))
- s.Log("The container might not be running - check logs in " + container.getLogDirPath())
+ s.Log("The container might not be running - check logs in " + s.getLogDirPath())
continue
}
s.Log("\nvvvvvvvvvvvvvvv " +
@@ -183,31 +227,67 @@ func (s *HstSuite) HstFail() {
}
func (s *HstSuite) AssertNil(object interface{}, msgAndArgs ...interface{}) {
- Expect(object).To(BeNil(), msgAndArgs...)
+ ExpectWithOffset(2, object).To(BeNil(), msgAndArgs...)
}
func (s *HstSuite) AssertNotNil(object interface{}, msgAndArgs ...interface{}) {
- Expect(object).ToNot(BeNil(), msgAndArgs...)
+ ExpectWithOffset(2, object).ToNot(BeNil(), msgAndArgs...)
}
func (s *HstSuite) AssertEqual(expected, actual interface{}, msgAndArgs ...interface{}) {
- Expect(actual).To(Equal(expected), msgAndArgs...)
+ ExpectWithOffset(2, actual).To(Equal(expected), msgAndArgs...)
}
func (s *HstSuite) AssertNotEqual(expected, actual interface{}, msgAndArgs ...interface{}) {
- Expect(actual).ToNot(Equal(expected), msgAndArgs...)
+ ExpectWithOffset(2, actual).ToNot(Equal(expected), msgAndArgs...)
}
func (s *HstSuite) AssertContains(testString, contains interface{}, msgAndArgs ...interface{}) {
- Expect(testString).To(ContainSubstring(fmt.Sprint(contains)), msgAndArgs...)
+ ExpectWithOffset(2, testString).To(ContainSubstring(fmt.Sprint(contains)), msgAndArgs...)
}
func (s *HstSuite) AssertNotContains(testString, contains interface{}, msgAndArgs ...interface{}) {
- Expect(testString).ToNot(ContainSubstring(fmt.Sprint(contains)), msgAndArgs...)
+ ExpectWithOffset(2, testString).ToNot(ContainSubstring(fmt.Sprint(contains)), msgAndArgs...)
+}
+
+func (s *HstSuite) AssertEmpty(object interface{}, msgAndArgs ...interface{}) {
+ ExpectWithOffset(2, object).To(BeEmpty(), msgAndArgs...)
}
func (s *HstSuite) AssertNotEmpty(object interface{}, msgAndArgs ...interface{}) {
- Expect(object).ToNot(BeEmpty(), msgAndArgs...)
+ ExpectWithOffset(2, object).ToNot(BeEmpty(), msgAndArgs...)
+}
+
+func (s *HstSuite) AssertMatchError(actual, expected error, msgAndArgs ...interface{}) {
+ ExpectWithOffset(2, actual).To(MatchError(expected), msgAndArgs...)
+}
+
+func (s *HstSuite) AssertGreaterThan(actual, expected interface{}, msgAndArgs ...interface{}) {
+ ExpectWithOffset(2, actual).Should(BeNumerically(">=", expected), msgAndArgs...)
+}
+
+func (s *HstSuite) AssertTimeEqualWithinThreshold(actual, expected time.Time, threshold time.Duration, msgAndArgs ...interface{}) {
+ ExpectWithOffset(2, actual).Should(BeTemporally("~", expected, threshold), msgAndArgs...)
+}
+
+func (s *HstSuite) AssertHttpStatus(resp *http.Response, expectedStatus int, msgAndArgs ...interface{}) {
+ ExpectWithOffset(2, resp).To(HaveHTTPStatus(expectedStatus), msgAndArgs...)
+}
+
+func (s *HstSuite) AssertHttpHeaderWithValue(resp *http.Response, key string, value interface{}, msgAndArgs ...interface{}) {
+ ExpectWithOffset(2, resp).To(HaveHTTPHeaderWithValue(key, value), msgAndArgs...)
+}
+
+func (s *HstSuite) AssertHttpHeaderNotPresent(resp *http.Response, key string, msgAndArgs ...interface{}) {
+ ExpectWithOffset(2, resp.Header.Get(key)).To(BeEmpty(), msgAndArgs...)
+}
+
+func (s *HstSuite) AssertHttpContentLength(resp *http.Response, expectedContentLen int64, msgAndArgs ...interface{}) {
+ ExpectWithOffset(2, resp).To(HaveHTTPHeaderWithValue("Content-Length", strconv.FormatInt(expectedContentLen, 10)), msgAndArgs...)
+}
+
+func (s *HstSuite) AssertHttpBody(resp *http.Response, expectedBody string, msgAndArgs ...interface{}) {
+ ExpectWithOffset(2, resp).To(HaveHTTPBody(expectedBody), msgAndArgs...)
}
func (s *HstSuite) CreateLogger() {
@@ -242,6 +322,22 @@ func (s *HstSuite) SkipIfMultiWorker(args ...any) {
}
}
+func (s *HstSuite) SkipIfNotEnoughAvailableCpus() bool {
+ var MaxRequestedCpu int
+
+ if s.CpuAllocator.runningInCi {
+ MaxRequestedCpu = ((s.CpuAllocator.buildNumber + 1) * s.CpuAllocator.maxContainerCount * s.CpuCount)
+ } else {
+ MaxRequestedCpu = (GinkgoParallelProcess() * s.CpuAllocator.maxContainerCount * s.CpuCount)
+ }
+
+ if len(s.CpuAllocator.cpus)-1 < MaxRequestedCpu {
+ s.Skip(fmt.Sprintf("test case cannot allocate requested cpus (%d cpus * %d containers)", s.CpuCount, s.CpuAllocator.maxContainerCount))
+ }
+
+ return true
+}
+
func (s *HstSuite) SkipUnlessExtendedTestsBuilt() {
imageName := "hs-test/nginx-http3"
@@ -256,18 +352,81 @@ func (s *HstSuite) SkipUnlessExtendedTestsBuilt() {
}
}
-func (s *HstSuite) ResetContainers() {
- for _, container := range s.StartedContainers {
- container.stop()
- exechelper.Run("docker rm " + container.Name)
+func (s *HstSuite) SkipUnlessLeakCheck() {
+ if !*IsLeakCheck {
+ s.Skip("leak-check tests excluded")
}
}
-func (s *HstSuite) RemoveVolumes() {
- for _, volumeName := range s.Volumes {
- cmd := "docker volume rm " + volumeName
- exechelper.Run(cmd)
- os.RemoveAll(volumeName)
+func (s *HstSuite) WaitForCoreDump() {
+ var filename string
+ dir, err := os.Open(s.getLogDirPath())
+ if err != nil {
+ s.Log(err)
+ return
+ }
+ defer dir.Close()
+
+ files, err := dir.Readdirnames(0)
+ if err != nil {
+ s.Log(err)
+ return
+ }
+ for _, file := range files {
+ if strings.Contains(file, "core") {
+ filename = file
+ }
+ }
+ timeout := 60
+ waitTime := 5
+
+ if filename != "" {
+ corePath := s.getLogDirPath() + filename
+ s.Log(fmt.Sprintf("WAITING FOR CORE DUMP (%s)", corePath))
+ for i := waitTime; i <= timeout; i += waitTime {
+ fileInfo, err := os.Stat(corePath)
+ if err != nil {
+ s.Log("Error while reading file info: " + fmt.Sprint(err))
+ return
+ }
+ currSize := fileInfo.Size()
+ s.Log(fmt.Sprintf("Waiting %ds/%ds...", i, timeout))
+ time.Sleep(time.Duration(waitTime) * time.Second)
+ fileInfo, _ = os.Stat(corePath)
+
+ if currSize == fileInfo.Size() {
+ debug := ""
+ if *IsDebugBuild {
+ debug = "_debug"
+ }
+ vppBinPath := fmt.Sprintf("../../build-root/build-vpp%s-native/vpp/bin/vpp", debug)
+ pluginsLibPath := fmt.Sprintf("build-root/build-vpp%s-native/vpp/lib/x86_64-linux-gnu/vpp_plugins", debug)
+ cmd := fmt.Sprintf("sudo gdb %s -c %s -ex 'set solib-search-path %s/%s' -ex 'bt full' -batch", vppBinPath, corePath, *VppSourceFileDir, pluginsLibPath)
+ s.Log(cmd)
+ output, _ := exechelper.Output(cmd)
+ AddReportEntry("VPP Backtrace", StringerStruct{Label: string(output)})
+ os.WriteFile(s.getLogDirPath()+"backtrace.log", output, os.FileMode(0644))
+ if s.CpuAllocator.runningInCi {
+ err = os.Remove(corePath)
+ if err == nil {
+ s.Log("removed " + corePath)
+ } else {
+ s.Log(err)
+ }
+ }
+ return
+ }
+ }
+ }
+}
+
+func (s *HstSuite) ResetContainers() {
+ for _, container := range s.StartedContainers {
+ container.stop()
+ s.Log("Removing container " + container.Name)
+ if err := s.Docker.ContainerRemove(container.ctx, container.ID, containerTypes.RemoveOptions{RemoveVolumes: true}); err != nil {
+ s.Log(err)
+ }
}
}
@@ -448,89 +607,12 @@ func (s *HstSuite) GetPortFromPpid() string {
return port[len(port)-3:] + s.ProcessIndex
}
-func (s *HstSuite) StartServerApp(running chan error, done chan struct{}, env []string) {
- cmd := exec.Command("iperf3", "-4", "-s", "-p", s.GetPortFromPpid())
- if env != nil {
- cmd.Env = env
- }
- s.Log(cmd)
- err := cmd.Start()
- if err != nil {
- msg := fmt.Errorf("failed to start iperf server: %v", err)
- running <- msg
- return
- }
- running <- nil
- <-done
- cmd.Process.Kill()
-}
-
-func (s *HstSuite) StartClientApp(ipAddress string, env []string, clnCh chan error, clnRes chan string) {
- defer func() {
- clnCh <- nil
- }()
-
- nTries := 0
-
- for {
- cmd := exec.Command("iperf3", "-c", ipAddress, "-u", "-l", "1460", "-b", "10g", "-p", s.GetPortFromPpid())
- if env != nil {
- cmd.Env = env
- }
- s.Log(cmd)
- o, err := cmd.CombinedOutput()
- if err != nil {
- if nTries > 5 {
- clnCh <- fmt.Errorf("failed to start client app '%s'.\n%s", err, o)
- return
- }
- time.Sleep(1 * time.Second)
- nTries++
- continue
- } else {
- clnRes <- fmt.Sprintf("Client output: %s", o)
- }
- break
- }
-}
-
-func (s *HstSuite) StartHttpServer(running chan struct{}, done chan struct{}, addressPort, netNs string) {
- cmd := newCommand([]string{"./http_server", addressPort, s.Ppid, s.ProcessIndex}, netNs)
- err := cmd.Start()
- s.Log(cmd)
- if err != nil {
- s.Log("Failed to start http server: " + fmt.Sprint(err))
- return
- }
- running <- struct{}{}
- <-done
- cmd.Process.Kill()
-}
-
-func (s *HstSuite) StartWget(finished chan error, server_ip, port, query, netNs string) {
- defer func() {
- finished <- errors.New("wget error")
- }()
-
- cmd := newCommand([]string{"wget", "--timeout=10", "--no-proxy", "--tries=5", "-O", "/dev/null", server_ip + ":" + port + "/" + query},
- netNs)
- s.Log(cmd)
- o, err := cmd.CombinedOutput()
- if err != nil {
- finished <- fmt.Errorf("wget error: '%v\n\n%s'", err, o)
- return
- } else if !strings.Contains(string(o), "200 OK") {
- finished <- fmt.Errorf("wget error: response not 200 OK")
- return
- }
- finished <- nil
-}
-
/*
-runBenchmark creates Gomega's experiment with the passed-in name and samples the passed-in callback repeatedly (samplesNum times),
+RunBenchmark creates Gomega's experiment with the passed-in name and samples the passed-in callback repeatedly (samplesNum times),
passing in suite context, experiment and your data.
You can also instruct runBenchmark to run with multiple concurrent workers.
+Note that if running in parallel Gomega returns from Sample when spins up all samples and does not wait until all finished.
You can record multiple named measurements (float64 or duration) within passed-in callback.
runBenchmark then produces report to show statistical distribution of measurements.
*/
@@ -543,3 +625,19 @@ func (s *HstSuite) RunBenchmark(name string, samplesNum, parallelNum int, callba
}, gmeasure.SamplingConfig{N: samplesNum, NumParallel: parallelNum})
AddReportEntry(experiment.Name, experiment)
}
+
+/*
+LogHttpReq is Gomega's ghttp server handler which logs received HTTP request.
+
+You should put it at the first place, so request is logged always.
+*/
+func (s *HstSuite) LogHttpReq(body bool) http.HandlerFunc {
+ return func(w http.ResponseWriter, req *http.Request) {
+ dump, err := httputil.DumpRequest(req, body)
+ if err == nil {
+ s.Log("\n> Received request (" + req.RemoteAddr + "):\n" +
+ string(dump) +
+ "\n------------------------------\n")
+ }
+ }
+}
diff --git a/extras/hs-test/infra/suite_cpu_pinning.go b/extras/hs-test/infra/suite_cpu_pinning.go
new file mode 100644
index 00000000000..355fdf96604
--- /dev/null
+++ b/extras/hs-test/infra/suite_cpu_pinning.go
@@ -0,0 +1,113 @@
+package hst
+
+import (
+ "fmt"
+ "reflect"
+ "runtime"
+ "strings"
+
+ . "github.com/onsi/ginkgo/v2"
+)
+
+var cpuPinningTests = map[string][]func(s *CpuPinningSuite){}
+var cpuPinningSoloTests = map[string][]func(s *CpuPinningSuite){}
+
+type CpuPinningSuite struct {
+ HstSuite
+ previousMaxContainerCount int
+}
+
+func RegisterCpuPinningTests(tests ...func(s *CpuPinningSuite)) {
+ cpuPinningTests[getTestFilename()] = tests
+}
+
+func RegisterCpuPinningSoloTests(tests ...func(s *CpuPinningSuite)) {
+ cpuPinningSoloTests[getTestFilename()] = tests
+}
+
+func (s *CpuPinningSuite) SetupSuite() {
+ s.HstSuite.SetupSuite()
+ s.LoadNetworkTopology("tap")
+ s.LoadContainerTopology("singleCpuPinning")
+}
+
+func (s *CpuPinningSuite) SetupTest() {
+ // Skip if we cannot allocate 3 CPUs for test container
+ s.previousMaxContainerCount = s.CpuAllocator.maxContainerCount
+ s.CpuCount = 3
+ s.CpuAllocator.maxContainerCount = 1
+ s.SkipIfNotEnoughAvailableCpus()
+
+ s.HstSuite.SetupTest()
+ container := s.GetContainerByName(SingleTopoContainerVpp)
+ vpp, err := container.newVppInstance(container.AllocatedCpus)
+ s.AssertNotNil(vpp, fmt.Sprint(err))
+}
+
+func (s *CpuPinningSuite) TearDownTest() {
+ // reset vars
+ s.CpuCount = *NConfiguredCpus
+ s.CpuAllocator.maxContainerCount = s.previousMaxContainerCount
+ s.HstSuite.TearDownTest()
+
+}
+
+var _ = Describe("CpuPinningSuite", Ordered, ContinueOnFailure, func() {
+ var s CpuPinningSuite
+ BeforeAll(func() {
+ s.SetupSuite()
+ })
+ BeforeEach(func() {
+ s.SetupTest()
+ })
+ AfterAll(func() {
+ s.TearDownSuite()
+
+ })
+ AfterEach(func() {
+ s.TearDownTest()
+ })
+
+ // https://onsi.github.io/ginkgo/#dynamically-generating-specs
+ for filename, tests := range cpuPinningTests {
+ for _, test := range tests {
+ test := test
+ pc := reflect.ValueOf(test).Pointer()
+ funcValue := runtime.FuncForPC(pc)
+ testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
+ It(testName, func(ctx SpecContext) {
+ s.Log(testName + ": BEGIN")
+ test(&s)
+ }, SpecTimeout(SuiteTimeout))
+ }
+ }
+})
+
+var _ = Describe("CpuPinningSuiteSolo", Ordered, ContinueOnFailure, Serial, func() {
+ var s CpuPinningSuite
+ BeforeAll(func() {
+ s.SetupSuite()
+ })
+ BeforeEach(func() {
+ s.SetupTest()
+ })
+ AfterAll(func() {
+ s.TearDownSuite()
+ })
+ AfterEach(func() {
+ s.TearDownTest()
+ })
+
+ for filename, tests := range cpuPinningSoloTests {
+ for _, test := range tests {
+ test := test
+ pc := reflect.ValueOf(test).Pointer()
+ funcValue := runtime.FuncForPC(pc)
+ testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
+ It(testName, Label("SOLO"), func(ctx SpecContext) {
+ s.Log(testName + ": BEGIN")
+ test(&s)
+ }, SpecTimeout(SuiteTimeout))
+ }
+ }
+})
diff --git a/extras/hs-test/infra/suite_envoy_proxy.go b/extras/hs-test/infra/suite_envoy_proxy.go
new file mode 100644
index 00000000000..e34a7d74225
--- /dev/null
+++ b/extras/hs-test/infra/suite_envoy_proxy.go
@@ -0,0 +1,213 @@
+// Suite for Envoy proxy testing
+//
+// The topology consists of 4 containers: curl (client), VPP (session layer), Envoy (proxy), nginx (target HTTP server).
+// VPP has 2 tap interfaces configured, one for client network and second for server/target network.
+
+package hst
+
+import (
+ "fmt"
+ "reflect"
+ "runtime"
+ "strings"
+
+ . "github.com/onsi/ginkgo/v2"
+)
+
+const (
+ VppContainerName = "vpp"
+ EnvoyProxyContainerName = "envoy-vcl"
+)
+
+type EnvoyProxySuite struct {
+ HstSuite
+ nginxPort uint16
+ proxyPort uint16
+ maxTimeout int
+}
+
+var envoyProxyTests = map[string][]func(s *EnvoyProxySuite){}
+var envoyProxySoloTests = map[string][]func(s *EnvoyProxySuite){}
+
+func RegisterEnvoyProxyTests(tests ...func(s *EnvoyProxySuite)) {
+ envoyProxyTests[getTestFilename()] = tests
+}
+
+func RegisterEnvoyProxySoloTests(tests ...func(s *EnvoyProxySuite)) {
+ envoyProxySoloTests[getTestFilename()] = tests
+}
+
+func (s *EnvoyProxySuite) SetupSuite() {
+ s.HstSuite.SetupSuite()
+ s.LoadNetworkTopology("2taps")
+ s.LoadContainerTopology("envoyProxy")
+
+ if *IsVppDebug {
+ s.maxTimeout = 600
+ } else {
+ s.maxTimeout = 60
+ }
+}
+
+func (s *EnvoyProxySuite) SetupTest() {
+ s.HstSuite.SetupTest()
+
+ // VPP
+ var sessionConfig Stanza
+ sessionConfig.
+ NewStanza("session").
+ Append("enable").
+ Append("use-app-socket-api").
+ Append("evt_qs_memfd_seg").
+ Append("event-queue-length 100000")
+
+ vppContainer := s.GetContainerByName(VppContainerName)
+ vpp, err := vppContainer.newVppInstance(vppContainer.AllocatedCpus, sessionConfig)
+ s.AssertNotNil(vpp, fmt.Sprint(err))
+ s.AssertNil(vpp.Start())
+ clientInterface := s.GetInterfaceByName(ClientTapInterfaceName)
+ s.AssertNil(vpp.createTap(clientInterface, 1))
+ serverInterface := s.GetInterfaceByName(ServerTapInterfaceName)
+ s.AssertNil(vpp.createTap(serverInterface, 2))
+ vppContainer.Exec("chmod 777 -R %s", vppContainer.GetContainerWorkDir())
+
+ // nginx HTTP server
+ nginxContainer := s.GetTransientContainerByName(NginxServerContainerName)
+ s.AssertNil(nginxContainer.Create())
+ s.nginxPort = 80
+ nginxSettings := struct {
+ LogPrefix string
+ Address string
+ Port uint16
+ Timeout int
+ }{
+ LogPrefix: nginxContainer.Name,
+ Address: serverInterface.Ip4AddressString(),
+ Port: s.nginxPort,
+ Timeout: s.maxTimeout,
+ }
+ nginxContainer.CreateConfig(
+ "/nginx.conf",
+ "./resources/nginx/nginx_server.conf",
+ nginxSettings,
+ )
+ s.AssertNil(nginxContainer.Start())
+
+ // Envoy
+ envoyContainer := s.GetContainerByName(EnvoyProxyContainerName)
+ s.AssertNil(envoyContainer.Create())
+ s.proxyPort = 8080
+ envoySettings := struct {
+ LogPrefix string
+ ServerAddress string
+ ServerPort uint16
+ ProxyPort uint16
+ }{
+ LogPrefix: envoyContainer.Name,
+ ServerAddress: serverInterface.Ip4AddressString(),
+ ServerPort: s.nginxPort,
+ ProxyPort: s.proxyPort,
+ }
+ envoyContainer.CreateConfig(
+ "/etc/envoy/envoy.yaml",
+ "resources/envoy/proxy.yaml",
+ envoySettings,
+ )
+ s.AssertNil(envoyContainer.Start())
+
+ // Add Ipv4 ARP entry for nginx HTTP server, otherwise first request fail (HTTP error 503)
+ arp := fmt.Sprintf("set ip neighbor %s %s %s",
+ serverInterface.Peer.Name(),
+ serverInterface.Ip4AddressString(),
+ serverInterface.HwAddress)
+ vppContainer.VppInstance.Vppctl(arp)
+}
+
+func (s *EnvoyProxySuite) TearDownTest() {
+ if CurrentSpecReport().Failed() {
+ s.CollectNginxLogs(NginxServerContainerName)
+ s.CollectEnvoyLogs(EnvoyProxyContainerName)
+ }
+ s.HstSuite.TearDownTest()
+}
+
+func (s *EnvoyProxySuite) ProxyPort() uint16 {
+ return s.proxyPort
+}
+
+func (s *EnvoyProxySuite) ProxyAddr() string {
+ return s.GetInterfaceByName(ClientTapInterfaceName).Peer.Ip4AddressString()
+}
+
+func (s *EnvoyProxySuite) CurlDownloadResource(uri string) {
+ args := fmt.Sprintf("-w @/tmp/write_out_download --max-time %d --insecure --noproxy '*' --remote-name --output-dir /tmp %s", s.maxTimeout, uri)
+ writeOut, log := s.RunCurlContainer(args)
+ s.AssertContains(writeOut, "GET response code: 200")
+ s.AssertNotContains(log, "bytes remaining to read")
+ s.AssertNotContains(log, "Operation timed out")
+}
+
+func (s *EnvoyProxySuite) CurlUploadResource(uri, file string) {
+ args := fmt.Sprintf("-w @/tmp/write_out_upload --max-time %d --insecure --noproxy '*' -T %s %s", s.maxTimeout, file, uri)
+ writeOut, log := s.RunCurlContainer(args)
+ s.AssertContains(writeOut, "PUT response code: 201")
+ s.AssertNotContains(log, "Operation timed out")
+}
+
+var _ = Describe("EnvoyProxySuite", Ordered, ContinueOnFailure, func() {
+ var s EnvoyProxySuite
+ BeforeAll(func() {
+ s.SetupSuite()
+ })
+ BeforeEach(func() {
+ s.SetupTest()
+ })
+ AfterAll(func() {
+ s.TearDownSuite()
+ })
+ AfterEach(func() {
+ s.TearDownTest()
+ })
+
+ for filename, tests := range envoyProxyTests {
+ for _, test := range tests {
+ test := test
+ pc := reflect.ValueOf(test).Pointer()
+ funcValue := runtime.FuncForPC(pc)
+ testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
+ It(testName, func(ctx SpecContext) {
+ s.Log(testName + ": BEGIN")
+ test(&s)
+ }, SpecTimeout(SuiteTimeout))
+ }
+ }
+})
+
+var _ = Describe("EnvoyProxySuiteSolo", Ordered, ContinueOnFailure, func() {
+ var s EnvoyProxySuite
+ BeforeAll(func() {
+ s.SetupSuite()
+ })
+ BeforeEach(func() {
+ s.SetupTest()
+ })
+ AfterAll(func() {
+ s.TearDownSuite()
+ })
+ AfterEach(func() {
+ s.TearDownTest()
+ })
+
+ for filename, tests := range envoyProxySoloTests {
+ for _, test := range tests {
+ test := test
+ pc := reflect.ValueOf(test).Pointer()
+ funcValue := runtime.FuncForPC(pc)
+ testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
+ It(testName, Label("SOLO"), func(ctx SpecContext) {
+ s.Log(testName + ": BEGIN")
+ test(&s)
+ }, SpecTimeout(SuiteTimeout))
+ }
+ }
+})
diff --git a/extras/hs-test/infra/suite_tap.go b/extras/hs-test/infra/suite_iperf_linux.go
index c02ab8e8535..728429b505f 100644
--- a/extras/hs-test/infra/suite_tap.go
+++ b/extras/hs-test/infra/suite_iperf_linux.go
@@ -9,28 +9,36 @@ import (
. "github.com/onsi/ginkgo/v2"
)
-type TapSuite struct {
+type IperfSuite struct {
HstSuite
}
-var tapTests = map[string][]func(s *TapSuite){}
-var tapSoloTests = map[string][]func(s *TapSuite){}
+const (
+ ServerIperfContainerName string = "server"
+ ServerIperfInterfaceName string = "hstsrv"
+ ClientIperfContainerName string = "client"
+ ClientIperfInterfaceName string = "hstcln"
+)
+
+var iperfTests = map[string][]func(s *IperfSuite){}
+var iperfSoloTests = map[string][]func(s *IperfSuite){}
-func RegisterTapTests(tests ...func(s *TapSuite)) {
- tapTests[getTestFilename()] = tests
+func RegisterIperfTests(tests ...func(s *IperfSuite)) {
+ iperfTests[getTestFilename()] = tests
}
-func RegisterTapSoloTests(tests ...func(s *TapSuite)) {
- tapSoloTests[getTestFilename()] = tests
+func RegisterIperfSoloTests(tests ...func(s *IperfSuite)) {
+ iperfSoloTests[getTestFilename()] = tests
}
-func (s *TapSuite) SetupSuite() {
+func (s *IperfSuite) SetupSuite() {
time.Sleep(1 * time.Second)
s.HstSuite.SetupSuite()
- s.ConfigureNetworkTopology("tap")
+ s.ConfigureNetworkTopology("2taps")
+ s.LoadContainerTopology("2containers")
}
-var _ = Describe("TapSuite", Ordered, ContinueOnFailure, func() {
- var s TapSuite
+var _ = Describe("IperfSuite", Ordered, ContinueOnFailure, func() {
+ var s IperfSuite
BeforeAll(func() {
s.SetupSuite()
})
@@ -44,7 +52,7 @@ var _ = Describe("TapSuite", Ordered, ContinueOnFailure, func() {
s.TearDownTest()
})
- for filename, tests := range tapTests {
+ for filename, tests := range iperfTests {
for _, test := range tests {
test := test
pc := reflect.ValueOf(test).Pointer()
@@ -58,8 +66,8 @@ var _ = Describe("TapSuite", Ordered, ContinueOnFailure, func() {
}
})
-var _ = Describe("TapSuiteSolo", Ordered, ContinueOnFailure, Serial, func() {
- var s TapSuite
+var _ = Describe("IperfSuiteSolo", Ordered, ContinueOnFailure, Serial, func() {
+ var s IperfSuite
BeforeAll(func() {
s.SetupSuite()
})
@@ -73,7 +81,7 @@ var _ = Describe("TapSuiteSolo", Ordered, ContinueOnFailure, Serial, func() {
s.TearDownTest()
})
- for filename, tests := range tapSoloTests {
+ for filename, tests := range iperfSoloTests {
for _, test := range tests {
test := test
pc := reflect.ValueOf(test).Pointer()
diff --git a/extras/hs-test/infra/suite_ldp.go b/extras/hs-test/infra/suite_ldp.go
new file mode 100644
index 00000000000..15b45f710ef
--- /dev/null
+++ b/extras/hs-test/infra/suite_ldp.go
@@ -0,0 +1,203 @@
+package hst
+
+import (
+ "fmt"
+ "reflect"
+ "runtime"
+ "strings"
+ "time"
+
+ . "github.com/onsi/ginkgo/v2"
+)
+
+// These correspond to names used in yaml config
+const (
+ ServerLdpInterfaceName = "srv"
+ ClientLdpInterfaceName = "cln"
+)
+
+var ldpTests = map[string][]func(s *LdpSuite){}
+var ldpSoloTests = map[string][]func(s *LdpSuite){}
+
+type LdpSuite struct {
+ HstSuite
+}
+
+func RegisterLdpTests(tests ...func(s *LdpSuite)) {
+ ldpTests[getTestFilename()] = tests
+}
+func RegisterSoloLdpTests(tests ...func(s *LdpSuite)) {
+ ldpSoloTests[getTestFilename()] = tests
+}
+
+func (s *LdpSuite) SetupSuite() {
+ time.Sleep(1 * time.Second)
+ s.HstSuite.SetupSuite()
+ s.ConfigureNetworkTopology("2peerVeth")
+ s.LoadContainerTopology("2peerVethLdp")
+}
+
+func (s *LdpSuite) SetupTest() {
+ s.HstSuite.SetupTest()
+
+ // Setup test conditions
+ var sessionConfig Stanza
+ sessionConfig.
+ NewStanza("session").
+ Append("enable").
+ Append("use-app-socket-api")
+
+ if strings.Contains(CurrentSpecReport().LeafNodeText, "InterruptMode") {
+ sessionConfig.Append("use-private-rx-mqs").Close()
+ s.Log("**********************INTERRUPT MODE**********************")
+ } else {
+ sessionConfig.Close()
+ }
+
+ // ... For server
+ serverContainer := s.GetContainerByName("server-vpp")
+
+ serverVpp, err := serverContainer.newVppInstance(serverContainer.AllocatedCpus, sessionConfig)
+ s.AssertNotNil(serverVpp, fmt.Sprint(err))
+
+ s.SetupServerVpp()
+
+ // ... For client
+ clientContainer := s.GetContainerByName("client-vpp")
+
+ clientVpp, err := clientContainer.newVppInstance(clientContainer.AllocatedCpus, sessionConfig)
+ s.AssertNotNil(clientVpp, fmt.Sprint(err))
+
+ s.setupClientVpp()
+
+ serverContainer.AddEnvVar("VCL_CONFIG", serverContainer.GetContainerWorkDir()+"/vcl_srv.conf")
+ clientContainer.AddEnvVar("VCL_CONFIG", clientContainer.GetContainerWorkDir()+"/vcl_cln.conf")
+
+ for _, container := range s.StartedContainers {
+ container.AddEnvVar("LD_PRELOAD", "/usr/lib/libvcl_ldpreload.so")
+ container.AddEnvVar("LDP_DEBUG", "0")
+ container.AddEnvVar("VCL_DEBUG", "0")
+ }
+}
+
+func (s *LdpSuite) TearDownTest() {
+ for _, container := range s.StartedContainers {
+ delete(container.EnvVars, "LD_PRELOAD")
+ delete(container.EnvVars, "VCL_CONFIG")
+ }
+ s.HstSuite.TearDownTest()
+
+}
+
+func (s *LdpSuite) SetupServerVpp() {
+ var srvVclConf Stanza
+ serverContainer := s.GetContainerByName("server-vpp")
+ serverVclFileName := serverContainer.GetHostWorkDir() + "/vcl_srv.conf"
+ serverVpp := serverContainer.VppInstance
+ s.AssertNil(serverVpp.Start())
+
+ serverVeth := s.GetInterfaceByName(ServerInterfaceName)
+ idx, err := serverVpp.createAfPacket(serverVeth)
+ s.AssertNil(err, fmt.Sprint(err))
+ s.AssertNotEqual(0, idx)
+
+ serverAppSocketApi := fmt.Sprintf("app-socket-api %s/var/run/app_ns_sockets/default",
+ serverContainer.GetContainerWorkDir())
+ err = srvVclConf.
+ NewStanza("vcl").
+ Append("rx-fifo-size 4000000").
+ Append("tx-fifo-size 4000000").
+ Append("app-scope-local").
+ Append("app-scope-global").
+ Append("use-mq-eventfd").
+ Append(serverAppSocketApi).Close().
+ SaveToFile(serverVclFileName)
+ s.AssertNil(err, fmt.Sprint(err))
+}
+
+func (s *LdpSuite) setupClientVpp() {
+ var clnVclConf Stanza
+ clientContainer := s.GetContainerByName("client-vpp")
+ clientVclFileName := clientContainer.GetHostWorkDir() + "/vcl_cln.conf"
+ clientVpp := clientContainer.VppInstance
+ s.AssertNil(clientVpp.Start())
+
+ clientVeth := s.GetInterfaceByName(ClientInterfaceName)
+ idx, err := clientVpp.createAfPacket(clientVeth)
+ s.AssertNil(err, fmt.Sprint(err))
+ s.AssertNotEqual(0, idx)
+
+ clientAppSocketApi := fmt.Sprintf("app-socket-api %s/var/run/app_ns_sockets/default",
+ clientContainer.GetContainerWorkDir())
+ err = clnVclConf.
+ NewStanza("vcl").
+ Append("rx-fifo-size 4000000").
+ Append("tx-fifo-size 4000000").
+ Append("app-scope-local").
+ Append("app-scope-global").
+ Append("use-mq-eventfd").
+ Append(clientAppSocketApi).Close().
+ SaveToFile(clientVclFileName)
+ s.AssertNil(err, fmt.Sprint(err))
+}
+
+var _ = Describe("LdpSuite", Ordered, ContinueOnFailure, func() {
+ var s LdpSuite
+ BeforeAll(func() {
+ s.SetupSuite()
+ })
+ BeforeEach(func() {
+ s.SetupTest()
+ })
+ AfterAll(func() {
+ s.TearDownSuite()
+
+ })
+ AfterEach(func() {
+ s.TearDownTest()
+ })
+
+ // https://onsi.github.io/ginkgo/#dynamically-generating-specs
+ for filename, tests := range ldpTests {
+ for _, test := range tests {
+ test := test
+ pc := reflect.ValueOf(test).Pointer()
+ funcValue := runtime.FuncForPC(pc)
+ testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
+ It(testName, func(ctx SpecContext) {
+ s.Log(testName + ": BEGIN")
+ test(&s)
+ }, SpecTimeout(SuiteTimeout))
+ }
+ }
+})
+
+var _ = Describe("LdpSuiteSolo", Ordered, ContinueOnFailure, Serial, func() {
+ var s LdpSuite
+ BeforeAll(func() {
+ s.SetupSuite()
+ })
+ BeforeEach(func() {
+ s.SetupTest()
+ })
+ AfterAll(func() {
+ s.TearDownSuite()
+ })
+ AfterEach(func() {
+ s.TearDownTest()
+ })
+
+ // https://onsi.github.io/ginkgo/#dynamically-generating-specs
+ for filename, tests := range ldpSoloTests {
+ for _, test := range tests {
+ test := test
+ pc := reflect.ValueOf(test).Pointer()
+ funcValue := runtime.FuncForPC(pc)
+ testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
+ It(testName, Label("SOLO"), func(ctx SpecContext) {
+ s.Log(testName + ": BEGIN")
+ test(&s)
+ }, SpecTimeout(SuiteTimeout))
+ }
+ }
+})
diff --git a/extras/hs-test/infra/suite_nginx.go b/extras/hs-test/infra/suite_nginx.go
deleted file mode 100644
index bb1bdb0f42b..00000000000
--- a/extras/hs-test/infra/suite_nginx.go
+++ /dev/null
@@ -1,144 +0,0 @@
-package hst
-
-import (
- "reflect"
- "runtime"
- "strings"
-
- . "github.com/onsi/ginkgo/v2"
-)
-
-// These correspond to names used in yaml config
-const (
- VppProxyContainerName = "vpp-proxy"
- NginxProxyContainerName = "nginx-proxy"
- NginxServerContainerName = "nginx-server"
- MirroringClientInterfaceName = "hstcln"
- MirroringServerInterfaceName = "hstsrv"
-)
-
-var nginxTests = map[string][]func(s *NginxSuite){}
-var nginxSoloTests = map[string][]func(s *NginxSuite){}
-
-type NginxSuite struct {
- HstSuite
-}
-
-func RegisterNginxTests(tests ...func(s *NginxSuite)) {
- nginxTests[getTestFilename()] = tests
-}
-func RegisterNginxSoloTests(tests ...func(s *NginxSuite)) {
- nginxSoloTests[getTestFilename()] = tests
-}
-
-func (s *NginxSuite) SetupSuite() {
- s.HstSuite.SetupSuite()
- s.LoadNetworkTopology("2taps")
- s.LoadContainerTopology("nginxProxyAndServer")
-}
-
-func (s *NginxSuite) SetupTest() {
- s.HstSuite.SetupTest()
-
- // Setup test conditions
- var sessionConfig Stanza
- sessionConfig.
- NewStanza("session").
- Append("enable").
- Append("use-app-socket-api")
-
- if strings.Contains(CurrentSpecReport().LeafNodeText, "InterruptMode") {
- sessionConfig.Append("use-private-rx-mqs").Close()
- s.Log("**********************INTERRUPT MODE**********************")
- } else {
- sessionConfig.Close()
- }
-
- // ... for proxy
- vppProxyContainer := s.GetContainerByName(VppProxyContainerName)
- proxyVpp, _ := vppProxyContainer.newVppInstance(vppProxyContainer.AllocatedCpus, sessionConfig)
- s.AssertNil(proxyVpp.Start())
-
- clientInterface := s.GetInterfaceByName(MirroringClientInterfaceName)
- s.AssertNil(proxyVpp.createTap(clientInterface, 1))
-
- serverInterface := s.GetInterfaceByName(MirroringServerInterfaceName)
- s.AssertNil(proxyVpp.createTap(serverInterface, 2))
-
- nginxContainer := s.GetTransientContainerByName(NginxProxyContainerName)
- nginxContainer.Create()
-
- values := struct {
- Proxy string
- Server string
- }{
- Proxy: clientInterface.Peer.Ip4AddressString(),
- Server: serverInterface.Ip4AddressString(),
- }
- nginxContainer.CreateConfig(
- "/nginx.conf",
- "./resources/nginx/nginx_proxy_mirroring.conf",
- values,
- )
- s.AssertNil(nginxContainer.Start())
-
- proxyVpp.WaitForApp("nginx-", 5)
-}
-
-var _ = Describe("NginxSuite", Ordered, ContinueOnFailure, func() {
- var s NginxSuite
- BeforeAll(func() {
- s.SetupSuite()
- })
- BeforeEach(func() {
- s.SetupTest()
- })
- AfterAll(func() {
- s.TearDownSuite()
- })
- AfterEach(func() {
- s.TearDownTest()
- })
-
- for filename, tests := range nginxTests {
- for _, test := range tests {
- test := test
- pc := reflect.ValueOf(test).Pointer()
- funcValue := runtime.FuncForPC(pc)
- testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
- It(testName, func(ctx SpecContext) {
- s.Log(testName + ": BEGIN")
- test(&s)
- }, SpecTimeout(SuiteTimeout))
- }
- }
-})
-
-var _ = Describe("NginxSuiteSolo", Ordered, ContinueOnFailure, Serial, func() {
- var s NginxSuite
- BeforeAll(func() {
- s.SetupSuite()
- })
- BeforeEach(func() {
- s.SetupTest()
- })
- AfterAll(func() {
- s.TearDownSuite()
- })
- AfterEach(func() {
- s.TearDownTest()
- })
-
- for filename, tests := range nginxSoloTests {
- for _, test := range tests {
- test := test
- pc := reflect.ValueOf(test).Pointer()
- funcValue := runtime.FuncForPC(pc)
- testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
- It(testName, Label("SOLO"), func(ctx SpecContext) {
- s.Log(testName + ": BEGIN")
- test(&s)
- }, SpecTimeout(SuiteTimeout))
- }
- }
-})
diff --git a/extras/hs-test/infra/suite_nginx_proxy.go b/extras/hs-test/infra/suite_nginx_proxy.go
new file mode 100644
index 00000000000..75215cfc78d
--- /dev/null
+++ b/extras/hs-test/infra/suite_nginx_proxy.go
@@ -0,0 +1,191 @@
+package hst
+
+import (
+ "fmt"
+ "reflect"
+ "runtime"
+ "strings"
+
+ . "github.com/onsi/ginkgo/v2"
+)
+
+// These correspond to names used in yaml config
+const (
+ NginxProxyContainerName = "nginx-proxy"
+ NginxServerContainerName = "nginx-server"
+ MirroringClientInterfaceName = "hstcln"
+ MirroringServerInterfaceName = "hstsrv"
+)
+
+var nginxProxyTests = map[string][]func(s *NginxProxySuite){}
+var nginxProxySoloTests = map[string][]func(s *NginxProxySuite){}
+
+type NginxProxySuite struct {
+ HstSuite
+ proxyPort uint16
+ maxTimeout int
+}
+
+func RegisterNginxProxyTests(tests ...func(s *NginxProxySuite)) {
+ nginxProxyTests[getTestFilename()] = tests
+}
+func RegisterNginxProxySoloTests(tests ...func(s *NginxProxySuite)) {
+ nginxProxySoloTests[getTestFilename()] = tests
+}
+
+func (s *NginxProxySuite) SetupSuite() {
+ s.HstSuite.SetupSuite()
+ s.LoadNetworkTopology("2taps")
+ s.LoadContainerTopology("nginxProxy")
+
+ if *IsVppDebug {
+ s.maxTimeout = 600
+ } else {
+ s.maxTimeout = 60
+ }
+}
+
+func (s *NginxProxySuite) SetupTest() {
+ s.HstSuite.SetupTest()
+
+ // VPP
+ var sessionConfig Stanza
+ sessionConfig.
+ NewStanza("session").
+ Append("enable").
+ Append("use-app-socket-api")
+
+ vppContainer := s.GetContainerByName(VppContainerName)
+ vpp, err := vppContainer.newVppInstance(vppContainer.AllocatedCpus, sessionConfig)
+ s.AssertNotNil(vpp, fmt.Sprint(err))
+ s.AssertNil(vpp.Start())
+ clientInterface := s.GetInterfaceByName(MirroringClientInterfaceName)
+ s.AssertNil(vpp.createTap(clientInterface, 1))
+ serverInterface := s.GetInterfaceByName(MirroringServerInterfaceName)
+ s.AssertNil(vpp.createTap(serverInterface, 2))
+
+ // nginx proxy
+ nginxProxyContainer := s.GetTransientContainerByName(NginxProxyContainerName)
+ s.AssertNil(nginxProxyContainer.Create())
+ s.proxyPort = 80
+ values := struct {
+ LogPrefix string
+ Proxy string
+ Server string
+ Port uint16
+ }{
+ LogPrefix: nginxProxyContainer.Name,
+ Proxy: clientInterface.Peer.Ip4AddressString(),
+ Server: serverInterface.Ip4AddressString(),
+ Port: s.proxyPort,
+ }
+ nginxProxyContainer.CreateConfig(
+ "/nginx.conf",
+ "./resources/nginx/nginx_proxy_mirroring.conf",
+ values,
+ )
+ s.AssertNil(nginxProxyContainer.Start())
+
+ // nginx HTTP server
+ nginxServerContainer := s.GetTransientContainerByName(NginxServerContainerName)
+ s.AssertNil(nginxServerContainer.Create())
+ nginxSettings := struct {
+ LogPrefix string
+ Address string
+ Timeout int
+ }{
+ LogPrefix: nginxServerContainer.Name,
+ Address: serverInterface.Ip4AddressString(),
+ Timeout: s.maxTimeout,
+ }
+ nginxServerContainer.CreateConfig(
+ "/nginx.conf",
+ "./resources/nginx/nginx_server_mirroring.conf",
+ nginxSettings,
+ )
+ s.AssertNil(nginxServerContainer.Start())
+
+ vpp.WaitForApp("nginx-", 5)
+}
+
+func (s *NginxProxySuite) TearDownTest() {
+ if CurrentSpecReport().Failed() {
+ s.CollectNginxLogs(NginxServerContainerName)
+ s.CollectNginxLogs(NginxProxyContainerName)
+ }
+ s.HstSuite.TearDownTest()
+}
+
+func (s *NginxProxySuite) ProxyPort() uint16 {
+ return s.proxyPort
+}
+
+func (s *NginxProxySuite) ProxyAddr() string {
+ return s.GetInterfaceByName(MirroringClientInterfaceName).Peer.Ip4AddressString()
+}
+
+func (s *NginxProxySuite) CurlDownloadResource(uri string) {
+ args := fmt.Sprintf("-w @/tmp/write_out_download --max-time %d --insecure --noproxy '*' --remote-name --output-dir /tmp %s", s.maxTimeout, uri)
+ writeOut, log := s.RunCurlContainer(args)
+ s.AssertContains(writeOut, "GET response code: 200")
+ s.AssertNotContains(log, "bytes remaining to read")
+ s.AssertNotContains(log, "Operation timed out")
+}
+
+var _ = Describe("NginxProxySuite", Ordered, ContinueOnFailure, func() {
+ var s NginxProxySuite
+ BeforeAll(func() {
+ s.SetupSuite()
+ })
+ BeforeEach(func() {
+ s.SetupTest()
+ })
+ AfterAll(func() {
+ s.TearDownSuite()
+ })
+ AfterEach(func() {
+ s.TearDownTest()
+ })
+
+ for filename, tests := range nginxProxyTests {
+ for _, test := range tests {
+ test := test
+ pc := reflect.ValueOf(test).Pointer()
+ funcValue := runtime.FuncForPC(pc)
+ testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
+ It(testName, func(ctx SpecContext) {
+ s.Log(testName + ": BEGIN")
+ test(&s)
+ }, SpecTimeout(SuiteTimeout))
+ }
+ }
+})
+
+var _ = Describe("NginxProxySuiteSolo", Ordered, ContinueOnFailure, Serial, func() {
+ var s NginxProxySuite
+ BeforeAll(func() {
+ s.SetupSuite()
+ })
+ BeforeEach(func() {
+ s.SetupTest()
+ })
+ AfterAll(func() {
+ s.TearDownSuite()
+ })
+ AfterEach(func() {
+ s.TearDownTest()
+ })
+
+ for filename, tests := range nginxProxySoloTests {
+ for _, test := range tests {
+ test := test
+ pc := reflect.ValueOf(test).Pointer()
+ funcValue := runtime.FuncForPC(pc)
+ testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
+ It(testName, Label("SOLO"), func(ctx SpecContext) {
+ s.Log(testName + ": BEGIN")
+ test(&s)
+ }, SpecTimeout(SuiteTimeout))
+ }
+ }
+})
diff --git a/extras/hs-test/infra/suite_no_topo.go b/extras/hs-test/infra/suite_no_topo.go
index 5f53f55f1bb..9b4998a77a1 100644
--- a/extras/hs-test/infra/suite_no_topo.go
+++ b/extras/hs-test/infra/suite_no_topo.go
@@ -60,6 +60,18 @@ func (s *NoTopoSuite) SetupTest() {
s.AssertNil(vpp.createTap(tapInterface), "failed to create tap interface")
}
+func (s *NoTopoSuite) VppAddr() string {
+ return s.GetInterfaceByName(TapInterfaceName).Peer.Ip4AddressString()
+}
+
+func (s *NoTopoSuite) VppIfName() string {
+ return s.GetInterfaceByName(TapInterfaceName).Peer.Name()
+}
+
+func (s *NoTopoSuite) HostAddr() string {
+ return s.GetInterfaceByName(TapInterfaceName).Ip4AddressString()
+}
+
var _ = Describe("NoTopoSuite", Ordered, ContinueOnFailure, func() {
var s NoTopoSuite
BeforeAll(func() {
diff --git a/extras/hs-test/infra/suite_vpp_proxy.go b/extras/hs-test/infra/suite_vpp_proxy.go
new file mode 100644
index 00000000000..868684bcede
--- /dev/null
+++ b/extras/hs-test/infra/suite_vpp_proxy.go
@@ -0,0 +1,212 @@
+// Suite for VPP proxy testing
+//
+// The topology consists of 3 containers: curl (client), VPP (proxy), nginx (target HTTP server).
+// VPP has 2 tap interfaces configured, one for client network and second for server/target network.
+
+package hst
+
+import (
+ "fmt"
+ "reflect"
+ "runtime"
+ "strings"
+
+ . "github.com/onsi/ginkgo/v2"
+)
+
+// These correspond to names used in yaml config
+const (
+ VppProxyContainerName = "vpp-proxy"
+ ClientTapInterfaceName = "hstcln"
+ ServerTapInterfaceName = "hstsrv"
+ CurlContainerTestFile = "/tmp/testFile"
+)
+
+type VppProxySuite struct {
+ HstSuite
+ nginxPort uint16
+ maxTimeout int
+}
+
+var vppProxyTests = map[string][]func(s *VppProxySuite){}
+var vppProxySoloTests = map[string][]func(s *VppProxySuite){}
+
+func RegisterVppProxyTests(tests ...func(s *VppProxySuite)) {
+ vppProxyTests[getTestFilename()] = tests
+}
+
+func RegisterVppProxySoloTests(tests ...func(s *VppProxySuite)) {
+ vppProxySoloTests[getTestFilename()] = tests
+}
+
+func (s *VppProxySuite) SetupSuite() {
+ s.HstSuite.SetupSuite()
+ s.LoadNetworkTopology("2taps")
+ s.LoadContainerTopology("vppProxy")
+
+ if *IsVppDebug {
+ s.maxTimeout = 600
+ } else {
+ s.maxTimeout = 60
+ }
+}
+
+func (s *VppProxySuite) SetupTest() {
+ s.HstSuite.SetupTest()
+
+ // VPP HTTP connect-proxy
+ vppContainer := s.GetContainerByName(VppProxyContainerName)
+ vpp, err := vppContainer.newVppInstance(vppContainer.AllocatedCpus)
+ s.AssertNotNil(vpp, fmt.Sprint(err))
+ s.AssertNil(vpp.Start())
+ clientInterface := s.GetInterfaceByName(ClientTapInterfaceName)
+ s.AssertNil(vpp.createTap(clientInterface, 1))
+ serverInterface := s.GetInterfaceByName(ServerTapInterfaceName)
+ s.AssertNil(vpp.createTap(serverInterface, 2))
+
+ // nginx HTTP server
+ nginxContainer := s.GetTransientContainerByName(NginxServerContainerName)
+ s.AssertNil(nginxContainer.Create())
+ s.nginxPort = 80
+ nginxSettings := struct {
+ LogPrefix string
+ Address string
+ Port uint16
+ Timeout int
+ }{
+ LogPrefix: nginxContainer.Name,
+ Address: serverInterface.Ip4AddressString(),
+ Port: s.nginxPort,
+ Timeout: s.maxTimeout,
+ }
+ nginxContainer.CreateConfig(
+ "/nginx.conf",
+ "./resources/nginx/nginx_server.conf",
+ nginxSettings,
+ )
+ s.AssertNil(nginxContainer.Start())
+}
+
+func (s *VppProxySuite) TearDownTest() {
+ vpp := s.GetContainerByName(VppProxyContainerName).VppInstance
+ if CurrentSpecReport().Failed() {
+ s.Log(vpp.Vppctl("show session verbose 2"))
+ s.Log(vpp.Vppctl("show error"))
+ s.CollectNginxLogs(NginxServerContainerName)
+ }
+ s.HstSuite.TearDownTest()
+}
+
+func (s *VppProxySuite) NginxPort() uint16 {
+ return s.nginxPort
+}
+
+func (s *VppProxySuite) NginxAddr() string {
+ return s.GetInterfaceByName(ServerTapInterfaceName).Ip4AddressString()
+}
+
+func (s *VppProxySuite) VppProxyAddr() string {
+ return s.GetInterfaceByName(ClientTapInterfaceName).Peer.Ip4AddressString()
+}
+
+func (s *VppProxySuite) CurlRequest(targetUri string) (string, string) {
+ args := fmt.Sprintf("--insecure --noproxy '*' %s", targetUri)
+ body, log := s.RunCurlContainer(args)
+ return body, log
+}
+
+func (s *VppProxySuite) CurlRequestViaTunnel(targetUri string, proxyUri string) (string, string) {
+ args := fmt.Sprintf("--max-time %d --insecure -p -x %s %s", s.maxTimeout, proxyUri, targetUri)
+ body, log := s.RunCurlContainer(args)
+ return body, log
+}
+
+func (s *VppProxySuite) CurlDownloadResource(uri string) {
+ args := fmt.Sprintf("-w @/tmp/write_out_download --max-time %d --insecure --noproxy '*' --remote-name --output-dir /tmp %s", s.maxTimeout, uri)
+ writeOut, log := s.RunCurlContainer(args)
+ s.AssertContains(writeOut, "GET response code: 200")
+ s.AssertNotContains(log, "bytes remaining to read")
+ s.AssertNotContains(log, "Operation timed out")
+}
+
+func (s *VppProxySuite) CurlUploadResource(uri, file string) {
+ args := fmt.Sprintf("-w @/tmp/write_out_upload --max-time %d --insecure --noproxy '*' -T %s %s", s.maxTimeout, file, uri)
+ writeOut, log := s.RunCurlContainer(args)
+ s.AssertContains(writeOut, "PUT response code: 201")
+ s.AssertNotContains(log, "Operation timed out")
+}
+
+func (s *VppProxySuite) CurlDownloadResourceViaTunnel(uri string, proxyUri string) {
+ args := fmt.Sprintf("-w @/tmp/write_out_download_connect --max-time %d --insecure -p -x %s --remote-name --output-dir /tmp %s", s.maxTimeout, proxyUri, uri)
+ writeOut, log := s.RunCurlContainer(args)
+ s.AssertContains(writeOut, "CONNECT response code: 200")
+ s.AssertContains(writeOut, "GET response code: 200")
+ s.AssertNotContains(log, "bytes remaining to read")
+ s.AssertNotContains(log, "Operation timed out")
+}
+
+func (s *VppProxySuite) CurlUploadResourceViaTunnel(uri, proxyUri, file string) {
+ args := fmt.Sprintf("-w @/tmp/write_out_upload_connect --max-time %d --insecure -p -x %s -T %s %s", s.maxTimeout, proxyUri, file, uri)
+ writeOut, log := s.RunCurlContainer(args)
+ s.AssertContains(writeOut, "CONNECT response code: 200")
+ s.AssertContains(writeOut, "PUT response code: 201")
+ s.AssertNotContains(log, "Operation timed out")
+}
+
+var _ = Describe("VppProxySuite", Ordered, ContinueOnFailure, func() {
+ var s VppProxySuite
+ BeforeAll(func() {
+ s.SetupSuite()
+ })
+ BeforeEach(func() {
+ s.SetupTest()
+ })
+ AfterAll(func() {
+ s.TearDownSuite()
+ })
+ AfterEach(func() {
+ s.TearDownTest()
+ })
+
+ for filename, tests := range vppProxyTests {
+ for _, test := range tests {
+ test := test
+ pc := reflect.ValueOf(test).Pointer()
+ funcValue := runtime.FuncForPC(pc)
+ testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
+ It(testName, func(ctx SpecContext) {
+ s.Log(testName + ": BEGIN")
+ test(&s)
+ }, SpecTimeout(SuiteTimeout))
+ }
+ }
+})
+
+var _ = Describe("VppProxySuiteSolo", Ordered, ContinueOnFailure, func() {
+ var s VppProxySuite
+ BeforeAll(func() {
+ s.SetupSuite()
+ })
+ BeforeEach(func() {
+ s.SetupTest()
+ })
+ AfterAll(func() {
+ s.TearDownSuite()
+ })
+ AfterEach(func() {
+ s.TearDownTest()
+ })
+
+ for filename, tests := range vppProxySoloTests {
+ for _, test := range tests {
+ test := test
+ pc := reflect.ValueOf(test).Pointer()
+ funcValue := runtime.FuncForPC(pc)
+ testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
+ It(testName, Label("SOLO"), func(ctx SpecContext) {
+ s.Log(testName + ": BEGIN")
+ test(&s)
+ }, SpecTimeout(SuiteTimeout))
+ }
+ }
+})
diff --git a/extras/hs-test/infra/utils.go b/extras/hs-test/infra/utils.go
index 9619efbbf63..25d8519cb8a 100644
--- a/extras/hs-test/infra/utils.go
+++ b/extras/hs-test/infra/utils.go
@@ -1,11 +1,14 @@
package hst
import (
+ "errors"
"fmt"
"io"
"net"
"net/http"
+ "net/http/httputil"
"os"
+ "os/exec"
"strings"
"time"
)
@@ -96,6 +99,14 @@ func NewHttpClient() *http.Client {
return client
}
+func DumpHttpResp(resp *http.Response, body bool) string {
+ dump, err := httputil.DumpResponse(resp, body)
+ if err != nil {
+ return ""
+ }
+ return string(dump)
+}
+
func TcpSendReceive(address, data string) (string, error) {
conn, err := net.DialTimeout("tcp", address, time.Second*30)
if err != nil {
@@ -117,3 +128,189 @@ func TcpSendReceive(address, data string) (string, error) {
}
return string(reply), nil
}
+
+/*
+RunCurlContainer execute curl command with given args.
+Container with name "curl" must be available.
+Curl runs in verbose mode and progress meter switch off by default.
+*/
+func (s *HstSuite) RunCurlContainer(args string) (string, string) {
+ curlCont := s.GetContainerByName("curl")
+ cmd := fmt.Sprintf("curl -v -s %s", args)
+ s.Log(cmd)
+ curlCont.ExtraRunningArgs = cmd
+ curlCont.Run()
+ stdout, stderr := curlCont.GetOutput()
+ s.Log(stderr)
+ s.Log(stdout)
+ return stdout, stderr
+}
+
+/*
+CollectNginxLogs save access and error logs to the test execution directory.
+Nginx logging need to be set following way:
+
+ - error_log <default-work-dir>/{{.LogPrefix}}-error.log;
+ - access_log <default-work-dir>/{{.LogPrefix}}-access.log;
+
+where LogPrefix is set to nginxContainer.Name
+*/
+func (s *HstSuite) CollectNginxLogs(containerName string) {
+ nginxContainer := s.GetContainerByName(containerName)
+ targetDir := nginxContainer.Suite.getLogDirPath()
+ source := nginxContainer.GetHostWorkDir() + "/" + nginxContainer.Name + "-"
+ cmd := exec.Command("cp", "-t", targetDir, source+"error.log", source+"access.log")
+ s.Log(cmd.String())
+ err := cmd.Run()
+ if err != nil {
+ s.Log(fmt.Sprint(err))
+ }
+}
+
+/*
+CollectEnvoyLogs save access logs to the test execution directory.
+Envoy access log path need to be set following way:
+<default-work-dir>/{{.LogPrefix}}-access.log
+where LogPrefix is set to envoyContainer.Name
+*/
+func (s *HstSuite) CollectEnvoyLogs(containerName string) {
+ envoyContainer := s.GetContainerByName(containerName)
+ targetDir := envoyContainer.Suite.getLogDirPath()
+ source := envoyContainer.GetHostWorkDir() + "/" + envoyContainer.Name + "-"
+ cmd := exec.Command("cp", "-t", targetDir, source+"access.log")
+ s.Log(cmd.String())
+ err := cmd.Run()
+ if err != nil {
+ s.Log(fmt.Sprint(err))
+ }
+}
+
+func (s *HstSuite) StartIperfServerApp(running chan error, done chan struct{}, env []string) {
+ cmd := exec.Command("iperf3", "-4", "-s", "-p", s.GetPortFromPpid())
+ if env != nil {
+ cmd.Env = env
+ }
+ s.Log(cmd)
+ err := cmd.Start()
+ if err != nil {
+ msg := fmt.Errorf("failed to start iperf server: %v", err)
+ running <- msg
+ return
+ }
+ running <- nil
+ <-done
+ cmd.Process.Kill()
+}
+
+func (s *HstSuite) StartIperfClientApp(ipAddress string, env []string, clnCh chan error, clnRes chan string) {
+ defer func() {
+ clnCh <- nil
+ }()
+
+ nTries := 0
+
+ for {
+ cmd := exec.Command("iperf3", "-c", ipAddress, "-u", "-l", "1460", "-b", "10g", "-p", s.GetPortFromPpid())
+ if env != nil {
+ cmd.Env = env
+ }
+ s.Log(cmd)
+ o, err := cmd.CombinedOutput()
+ if err != nil {
+ if nTries > 5 {
+ clnRes <- ""
+ clnCh <- fmt.Errorf("failed to start client app '%s'.\n%s", err, o)
+ return
+ }
+ time.Sleep(1 * time.Second)
+ nTries++
+ continue
+ } else {
+ clnRes <- fmt.Sprintf("Client output: %s", o)
+ }
+ break
+ }
+}
+
+func (s *HstSuite) StartHttpServer(running chan struct{}, done chan struct{}, addressPort, netNs string) {
+ cmd := newCommand([]string{"./http_server", addressPort, s.Ppid, s.ProcessIndex}, netNs)
+ err := cmd.Start()
+ s.Log(cmd)
+ if err != nil {
+ s.Log("Failed to start http server: " + fmt.Sprint(err))
+ return
+ }
+ running <- struct{}{}
+ <-done
+ cmd.Process.Kill()
+}
+
+func (s *HstSuite) StartWget(finished chan error, server_ip, port, query, netNs string) {
+ defer func() {
+ finished <- errors.New("wget error")
+ }()
+
+ cmd := newCommand([]string{"wget", "--timeout=10", "--no-proxy", "--tries=5", "-O", "/dev/null", server_ip + ":" + port + "/" + query},
+ netNs)
+ s.Log(cmd)
+ o, err := cmd.CombinedOutput()
+ if err != nil {
+ finished <- fmt.Errorf("wget error: '%v\n\n%s'", err, o)
+ return
+ } else if !strings.Contains(string(o), "200 OK") {
+ finished <- fmt.Errorf("wget error: response not 200 OK")
+ return
+ }
+ finished <- nil
+}
+
+// Start a server app. 'processName' is used to check whether the app started correctly.
+func (s *HstSuite) StartServerApp(c *Container, processName string, cmd string,
+ running chan error, done chan struct{}) {
+
+ s.Log("starting server")
+ c.ExecServer(cmd)
+ cmd2 := exec.Command("docker", "exec", c.Name, "pidof", processName)
+ err := cmd2.Run()
+ if err != nil {
+ msg := fmt.Errorf("failed to start server app: %v", err)
+ running <- msg
+ <-done
+ return
+ }
+ running <- nil
+ <-done
+}
+
+func (s *HstSuite) StartClientApp(c *Container, cmd string,
+ clnCh chan error, clnRes chan string) {
+ defer func() {
+ close(clnCh)
+ close(clnRes)
+ }()
+
+ s.Log("starting client app, please wait")
+
+ nTries := 0
+ for {
+ // exec.Cmd can only be used once, which is why it's in the loop
+ cmd2 := exec.Command("/bin/sh", "-c", "docker exec "+c.getEnvVarsAsCliOption()+" "+
+ c.Name+" "+cmd)
+ s.Log(cmd2)
+ o, err := cmd2.CombinedOutput()
+ if err != nil {
+ s.Log(err)
+ if nTries > 5 {
+ clnRes <- ""
+ clnCh <- fmt.Errorf("failed to start client app '%s'", err)
+ s.AssertNil(err, fmt.Sprint(err))
+ break
+ }
+ time.Sleep(1 * time.Second)
+ nTries++
+ } else {
+ clnRes <- fmt.Sprintf("Client output: %s", o)
+ break
+ }
+ }
+}
diff --git a/extras/hs-test/infra/vppinstance.go b/extras/hs-test/infra/vppinstance.go
index 48d2b783917..a1f2ce46ed3 100644
--- a/extras/hs-test/infra/vppinstance.go
+++ b/extras/hs-test/infra/vppinstance.go
@@ -2,8 +2,8 @@ package hst
import (
"context"
+ "encoding/json"
"fmt"
- "go.fd.io/govpp/binapi/ethernet_types"
"io"
"net"
"os"
@@ -14,6 +14,8 @@ import (
"syscall"
"time"
+ "go.fd.io/govpp/binapi/ethernet_types"
+
"github.com/edwarnicke/exechelper"
. "github.com/onsi/ginkgo/v2"
"github.com/sirupsen/logrus"
@@ -32,19 +34,15 @@ const vppConfigTemplate = `unix {
nodaemon
log %[1]s%[4]s
full-coredump
+ coredump-size unlimited
cli-listen %[1]s%[2]s
runtime-dir %[1]s/var/run
- gid vpp
}
api-trace {
on
}
-api-segment {
- gid vpp
-}
-
socksvr {
socket-name %[1]s%[3]s
}
@@ -88,6 +86,20 @@ type VppInstance struct {
Connection *core.Connection
ApiStream api.Stream
Cpus []int
+ CpuConfig VppCpuConfig
+}
+
+type VppCpuConfig struct {
+ PinMainCpu bool
+ PinWorkersCorelist bool
+ SkipCores int
+}
+
+type VppMemTrace struct {
+ Count int `json:"count"`
+ Size int `json:"bytes"`
+ Sample string `json:"sample"`
+ Traceback []string `json:"traceback"`
}
func (vpp *VppInstance) getSuite() *HstSuite {
@@ -131,7 +143,7 @@ func (vpp *VppInstance) Start() error {
defaultApiSocketFilePath,
defaultLogFilePath,
)
- configContent += vpp.generateCpuConfig()
+ configContent += vpp.generateVPPCpuConfig()
for _, c := range vpp.AdditionalConfig {
configContent += c.ToString()
}
@@ -464,7 +476,7 @@ func (vpp *VppInstance) createTap(
}
func (vpp *VppInstance) saveLogs() {
- logTarget := vpp.Container.getLogDirPath() + "vppinstance-" + vpp.Container.Name + ".log"
+ logTarget := vpp.getSuite().getLogDirPath() + "vppinstance-" + vpp.Container.Name + ".log"
logSource := vpp.Container.GetHostWorkDir() + defaultLogFilePath
cmd := exec.Command("cp", logSource, logTarget)
vpp.getSuite().Log(cmd.String())
@@ -476,26 +488,160 @@ func (vpp *VppInstance) Disconnect() {
vpp.ApiStream.Close()
}
-func (vpp *VppInstance) generateCpuConfig() string {
+func (vpp *VppInstance) setDefaultCpuConfig() {
+ vpp.CpuConfig.PinMainCpu = true
+ vpp.CpuConfig.PinWorkersCorelist = true
+ vpp.CpuConfig.SkipCores = 0
+}
+
+func (vpp *VppInstance) generateVPPCpuConfig() string {
var c Stanza
var s string
+ startCpu := 0
if len(vpp.Cpus) < 1 {
return ""
}
- c.NewStanza("cpu").
- Append(fmt.Sprintf("main-core %d", vpp.Cpus[0]))
- vpp.getSuite().Log(fmt.Sprintf("main-core %d", vpp.Cpus[0]))
- workers := vpp.Cpus[1:]
+
+ c.NewStanza("cpu")
+
+ // If skip-cores is valid, use as start value to assign main/workers CPUs
+ if vpp.CpuConfig.SkipCores != 0 {
+ c.Append(fmt.Sprintf("skip-cores %d", vpp.CpuConfig.SkipCores))
+ vpp.getSuite().Log(fmt.Sprintf("skip-cores %d", vpp.CpuConfig.SkipCores))
+ }
+
+ if len(vpp.Cpus) > vpp.CpuConfig.SkipCores {
+ startCpu = vpp.CpuConfig.SkipCores
+ }
+
+ if vpp.CpuConfig.PinMainCpu {
+ c.Append(fmt.Sprintf("main-core %d", vpp.Cpus[startCpu]))
+ vpp.getSuite().Log(fmt.Sprintf("main-core %d", vpp.Cpus[startCpu]))
+ }
+
+ workers := vpp.Cpus[startCpu+1:]
if len(workers) > 0 {
- for i := 0; i < len(workers); i++ {
- if i != 0 {
- s = s + ", "
+ if vpp.CpuConfig.PinWorkersCorelist {
+ for i := 0; i < len(workers); i++ {
+ if i != 0 {
+ s = s + ", "
+ }
+ s = s + fmt.Sprintf("%d", workers[i])
}
- s = s + fmt.Sprintf("%d", workers[i])
+ c.Append(fmt.Sprintf("corelist-workers %s", s))
+ vpp.getSuite().Log("corelist-workers " + s)
+ } else {
+ s = fmt.Sprintf("%d", len(workers))
+ c.Append(fmt.Sprintf("workers %s", s))
+ vpp.getSuite().Log("workers " + s)
}
- c.Append(fmt.Sprintf("corelist-workers %s", s))
- vpp.getSuite().Log("corelist-workers " + s)
}
+
return c.Close().ToString()
}
+
+// EnableMemoryTrace enables memory traces of VPP main-heap
+func (vpp *VppInstance) EnableMemoryTrace() {
+ vpp.getSuite().Log(vpp.Vppctl("memory-trace on main-heap"))
+}
+
+// GetMemoryTrace dumps memory traces for analysis
+func (vpp *VppInstance) GetMemoryTrace() ([]VppMemTrace, error) {
+ var trace []VppMemTrace
+ vpp.getSuite().Log(vpp.Vppctl("save memory-trace trace.json"))
+ err := vpp.Container.GetFile("/tmp/trace.json", "/tmp/trace.json")
+ if err != nil {
+ return nil, err
+ }
+ fileBytes, err := os.ReadFile("/tmp/trace.json")
+ if err != nil {
+ return nil, err
+ }
+ err = json.Unmarshal(fileBytes, &trace)
+ if err != nil {
+ return nil, err
+ }
+ return trace, nil
+}
+
+// memTracesSuppressCli filter out CLI related samples
+func memTracesSuppressCli(traces []VppMemTrace) []VppMemTrace {
+ var filtered []VppMemTrace
+ for i := 0; i < len(traces); i++ {
+ isCli := false
+ for j := 0; j < len(traces[i].Traceback); j++ {
+ if strings.Contains(traces[i].Traceback[j], "unix_cli") {
+ isCli = true
+ break
+ }
+ }
+ if !isCli {
+ filtered = append(filtered, traces[i])
+ }
+ }
+ return filtered
+}
+
+// MemLeakCheck compares memory traces at different point in time, analyzes if memory leaks happen and produces report
+func (vpp *VppInstance) MemLeakCheck(first, second []VppMemTrace) {
+ totalBytes := 0
+ totalCounts := 0
+ trace1 := memTracesSuppressCli(first)
+ trace2 := memTracesSuppressCli(second)
+ report := ""
+ for i := 0; i < len(trace2); i++ {
+ match := false
+ for j := 0; j < len(trace1); j++ {
+ if trace1[j].Sample == trace2[i].Sample {
+ if trace2[i].Size > trace1[j].Size {
+ deltaBytes := trace2[i].Size - trace1[j].Size
+ deltaCounts := trace2[i].Count - trace1[j].Count
+ report += fmt.Sprintf("grow %d byte(s) in %d allocation(s) from:\n", deltaBytes, deltaCounts)
+ for j := 0; j < len(trace2[i].Traceback); j++ {
+ report += fmt.Sprintf("\t#%d %s\n", j, trace2[i].Traceback[j])
+ }
+ totalBytes += deltaBytes
+ totalCounts += deltaCounts
+ }
+ match = true
+ break
+ }
+ }
+ if !match {
+ report += fmt.Sprintf("\nleak of %d byte(s) in %d allocation(s) from:\n", trace2[i].Size, trace2[i].Count)
+ for j := 0; j < len(trace2[i].Traceback); j++ {
+ report += fmt.Sprintf("\t#%d %s\n", j, trace2[i].Traceback[j])
+ }
+ totalBytes += trace2[i].Size
+ totalCounts += trace2[i].Count
+ }
+ }
+ summary := fmt.Sprintf("\nSUMMARY: %d byte(s) leaked in %d allocation(s)\n", totalBytes, totalCounts)
+ AddReportEntry(summary, report)
+}
+
+// CollectEventLogs saves event logs to the test execution directory
+func (vpp *VppInstance) CollectEventLogs() {
+ vpp.getSuite().Log(vpp.Vppctl("event-logger save event_log"))
+ targetDir := vpp.Container.Suite.getLogDirPath()
+ err := vpp.Container.GetFile("/tmp/event_log", targetDir+"/"+vpp.Container.Name+"-event_log")
+ if err != nil {
+ vpp.getSuite().Log(fmt.Sprint(err))
+ }
+}
+
+// EnablePcapTrace enables packet capture on all interfaces and maximum 10000 packets
+func (vpp *VppInstance) EnablePcapTrace() {
+ vpp.getSuite().Log(vpp.Vppctl("pcap trace rx tx max 10000 intfc any file vppTest.pcap"))
+}
+
+// CollectPcapTrace saves pcap trace to the test execution directory
+func (vpp *VppInstance) CollectPcapTrace() {
+ vpp.getSuite().Log(vpp.Vppctl("pcap trace off"))
+ targetDir := vpp.Container.Suite.getLogDirPath()
+ err := vpp.Container.GetFile("/tmp/vppTest.pcap", targetDir+"/"+vpp.Container.Name+".pcap")
+ if err != nil {
+ vpp.getSuite().Log(fmt.Sprint(err))
+ }
+}