aboutsummaryrefslogtreecommitdiffstats
path: root/extras/hs-test/infra
diff options
context:
space:
mode:
authorAdrian Villin <avillin@cisco.com>2024-06-14 09:32:39 +0200
committerDave Wallace <dwallacelf@gmail.com>2024-06-14 18:10:26 +0000
commit4677d920c0b0ff1f1aae81fb2f0052d939a2e89c (patch)
tree9c1adf90f6c1a977b622c1d0211e0e86d1bef985 /extras/hs-test/infra
parent2aa0f0da5dedcf6301c74a39b5e3749359e07e6d (diff)
hs-test: separate infra from tests
- most functions and vars now start with a capital letter: needed to access them outside the package that declares them - updated README.md - very minor changes in MAKEFILE Type: test Change-Id: I4b5a194f08f09d59e372e57da6451fbb5a1de4da Signed-off-by: Adrian Villin <avillin@cisco.com>
Diffstat (limited to 'extras/hs-test/infra')
-rw-r--r--extras/hs-test/infra/address_allocator.go98
-rw-r--r--extras/hs-test/infra/container.go380
-rw-r--r--extras/hs-test/infra/cpu.go100
-rw-r--r--extras/hs-test/infra/hst_suite.go544
-rw-r--r--extras/hs-test/infra/netconfig.go383
-rw-r--r--extras/hs-test/infra/suite_nginx.go137
-rw-r--r--extras/hs-test/infra/suite_no_topo.go112
-rw-r--r--extras/hs-test/infra/suite_ns.go121
-rw-r--r--extras/hs-test/infra/suite_tap.go88
-rw-r--r--extras/hs-test/infra/suite_veth.go146
-rw-r--r--extras/hs-test/infra/topo.go25
-rw-r--r--extras/hs-test/infra/utils.go119
-rw-r--r--extras/hs-test/infra/vppinstance.go500
13 files changed, 2753 insertions, 0 deletions
diff --git a/extras/hs-test/infra/address_allocator.go b/extras/hs-test/infra/address_allocator.go
new file mode 100644
index 00000000000..cb647024412
--- /dev/null
+++ b/extras/hs-test/infra/address_allocator.go
@@ -0,0 +1,98 @@
+package hst
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/edwarnicke/exechelper"
+)
+
+type AddressCounter = int
+
+type Ip4AddressAllocator struct {
+ networks map[int]AddressCounter
+ chosenOctet int
+ assignedIps []string
+}
+
+func (a *Ip4AddressAllocator) AddNetwork(networkNumber int) {
+ a.networks[networkNumber] = 1
+}
+
+func (a *Ip4AddressAllocator) NewIp4InterfaceAddress(inputNetworkNumber ...int) (string, error) {
+ var networkNumber int = 0
+ if len(inputNetworkNumber) > 0 {
+ networkNumber = inputNetworkNumber[0]
+ }
+
+ if _, ok := a.networks[networkNumber]; !ok {
+ a.AddNetwork(networkNumber)
+ }
+
+ numberOfAddresses := a.networks[networkNumber]
+
+ if numberOfAddresses == 254 {
+ return "", fmt.Errorf("no available IPv4 addresses")
+ }
+
+ address, err := a.createIpAddress(networkNumber, numberOfAddresses)
+
+ a.networks[networkNumber] = numberOfAddresses + 1
+
+ return address + "/24", err
+}
+
+// Creates a file every time an IP is assigned: used to keep track of addresses in use.
+// If an address is not in use, 'counter' is then copied to 'chosenOctet' and it is used for the remaining tests.
+// Also checks host IP addresses.
+func (a *Ip4AddressAllocator) createIpAddress(networkNumber int, numberOfAddresses int) (string, error) {
+ hostIps, _ := exechelper.CombinedOutput("ip a")
+ counter := 10
+ var address string
+
+ for {
+ if a.chosenOctet != 0 {
+ address = fmt.Sprintf("10.%v.%v.%v", a.chosenOctet, networkNumber, numberOfAddresses)
+ file, err := os.Create(address)
+ if err != nil {
+ return "", errors.New("unable to create file: " + fmt.Sprint(err))
+ }
+ file.Close()
+ break
+ } else {
+ address = fmt.Sprintf("10.%v.%v.%v", counter, networkNumber, numberOfAddresses)
+ _, err := os.Stat(address)
+ if err == nil || strings.Contains(string(hostIps), address) {
+ counter++
+ } else if os.IsNotExist(err) {
+ file, err := os.Create(address)
+ if err != nil {
+ return "", errors.New("unable to create file: " + fmt.Sprint(err))
+ }
+ file.Close()
+ a.chosenOctet = counter
+ break
+ } else {
+ return "", errors.New("an error occurred while checking if a file exists: " + fmt.Sprint(err))
+ }
+ }
+ }
+
+ a.assignedIps = append(a.assignedIps, address)
+ return address, nil
+}
+
+func (a *Ip4AddressAllocator) DeleteIpAddresses() {
+ for ip := range a.assignedIps {
+ os.Remove(a.assignedIps[ip])
+ }
+}
+
+func NewIp4AddressAllocator() *Ip4AddressAllocator {
+ var ip4AddrAllocator = new(Ip4AddressAllocator)
+ ip4AddrAllocator.networks = make(map[int]AddressCounter)
+ ip4AddrAllocator.AddNetwork(0)
+ return ip4AddrAllocator
+}
diff --git a/extras/hs-test/infra/container.go b/extras/hs-test/infra/container.go
new file mode 100644
index 00000000000..1dd82809f8a
--- /dev/null
+++ b/extras/hs-test/infra/container.go
@@ -0,0 +1,380 @@
+package hst
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "strings"
+ "text/template"
+ "time"
+
+ "github.com/edwarnicke/exechelper"
+ . "github.com/onsi/ginkgo/v2"
+)
+
+const (
+ logDir string = "/tmp/hs-test/"
+ volumeDir string = "/volumes"
+)
+
+var (
+ workDir, _ = os.Getwd()
+)
+
+type Volume struct {
+ HostDir string
+ ContainerDir string
+ IsDefaultWorkDir bool
+}
+
+type Container struct {
+ Suite *HstSuite
+ IsOptional bool
+ RunDetached bool
+ Name string
+ Image string
+ ExtraRunningArgs string
+ Volumes map[string]Volume
+ EnvVars map[string]string
+ VppInstance *VppInstance
+ AllocatedCpus []int
+}
+
+func newContainer(suite *HstSuite, yamlInput ContainerConfig) (*Container, error) {
+ containerName := yamlInput["name"].(string)
+ if len(containerName) == 0 {
+ err := fmt.Errorf("container name must not be blank")
+ return nil, err
+ }
+
+ var container = new(Container)
+ container.Volumes = make(map[string]Volume)
+ container.EnvVars = make(map[string]string)
+ container.Name = containerName
+ container.Suite = suite
+
+ if Image, ok := yamlInput["image"]; ok {
+ container.Image = Image.(string)
+ } else {
+ container.Image = "hs-test/vpp"
+ }
+
+ if args, ok := yamlInput["extra-args"]; ok {
+ container.ExtraRunningArgs = args.(string)
+ } else {
+ container.ExtraRunningArgs = ""
+ }
+
+ if isOptional, ok := yamlInput["is-optional"]; ok {
+ container.IsOptional = isOptional.(bool)
+ } else {
+ container.IsOptional = false
+ }
+
+ if runDetached, ok := yamlInput["run-detached"]; ok {
+ container.RunDetached = runDetached.(bool)
+ } else {
+ container.RunDetached = true
+ }
+
+ if _, ok := yamlInput["volumes"]; ok {
+ workingVolumeDir := logDir + suite.GetCurrentTestName() + volumeDir
+ workDirReplacer := strings.NewReplacer("$HST_DIR", workDir)
+ volDirReplacer := strings.NewReplacer("$HST_VOLUME_DIR", workingVolumeDir)
+ for _, volu := range yamlInput["volumes"].([]interface{}) {
+ volumeMap := volu.(ContainerConfig)
+ hostDir := workDirReplacer.Replace(volumeMap["host-dir"].(string))
+ hostDir = volDirReplacer.Replace(hostDir)
+ containerDir := volumeMap["container-dir"].(string)
+ isDefaultWorkDir := false
+
+ if isDefault, ok := volumeMap["is-default-work-dir"]; ok {
+ isDefaultWorkDir = isDefault.(bool)
+ }
+ container.addVolume(hostDir, containerDir, isDefaultWorkDir)
+ }
+ }
+
+ if _, ok := yamlInput["vars"]; ok {
+ for _, envVar := range yamlInput["vars"].([]interface{}) {
+ envVarMap := envVar.(ContainerConfig)
+ name := envVarMap["name"].(string)
+ value := envVarMap["value"].(string)
+ container.AddEnvVar(name, value)
+ }
+ }
+ return container, nil
+}
+
+func (c *Container) getWorkDirVolume() (res Volume, exists bool) {
+ for _, v := range c.Volumes {
+ if v.IsDefaultWorkDir {
+ res = v
+ exists = true
+ return
+ }
+ }
+ return
+}
+
+func (c *Container) GetHostWorkDir() (res string) {
+ if v, ok := c.getWorkDirVolume(); ok {
+ res = v.HostDir
+ }
+ return
+}
+
+func (c *Container) GetContainerWorkDir() (res string) {
+ if v, ok := c.getWorkDirVolume(); ok {
+ res = v.ContainerDir
+ }
+ return
+}
+
+func (c *Container) getContainerArguments() string {
+ args := "--ulimit nofile=90000:90000 --cap-add=all --privileged --network host"
+ c.allocateCpus()
+ args += fmt.Sprintf(" --cpuset-cpus=\"%d-%d\"", c.AllocatedCpus[0], c.AllocatedCpus[len(c.AllocatedCpus)-1])
+ args += c.getVolumesAsCliOption()
+ args += c.getEnvVarsAsCliOption()
+ if *VppSourceFileDir != "" {
+ args += fmt.Sprintf(" -v %s:%s", *VppSourceFileDir, *VppSourceFileDir)
+ }
+ args += " --name " + c.Name + " " + c.Image
+ args += " " + c.ExtraRunningArgs
+ return args
+}
+
+func (c *Container) runWithRetry(cmd string) error {
+ nTries := 5
+ for i := 0; i < nTries; i++ {
+ err := exechelper.Run(cmd)
+ if err == nil {
+ return nil
+ }
+ time.Sleep(1 * time.Second)
+ }
+ return fmt.Errorf("failed to run container command")
+}
+
+func (c *Container) Create() error {
+ cmd := "docker create " + c.getContainerArguments()
+ c.Suite.Log(cmd)
+ return exechelper.Run(cmd)
+}
+
+func (c *Container) allocateCpus() {
+ c.Suite.StartedContainers = append(c.Suite.StartedContainers, c)
+ c.AllocatedCpus = c.Suite.AllocateCpus()
+ c.Suite.Log("Allocated CPUs " + fmt.Sprint(c.AllocatedCpus) + " to container " + c.Name)
+}
+
+func (c *Container) Start() error {
+ cmd := "docker start " + c.Name
+ c.Suite.Log(cmd)
+ return c.runWithRetry(cmd)
+}
+
+func (c *Container) prepareCommand() (string, error) {
+ if c.Name == "" {
+ return "", fmt.Errorf("run container failed: name is blank")
+ }
+
+ cmd := "docker run "
+ if c.RunDetached {
+ cmd += " -d"
+ }
+
+ cmd += " " + c.getContainerArguments()
+
+ c.Suite.Log(cmd)
+ return cmd, nil
+}
+
+func (c *Container) CombinedOutput() (string, error) {
+ cmd, err := c.prepareCommand()
+ if err != nil {
+ return "", err
+ }
+
+ byteOutput, err := exechelper.CombinedOutput(cmd)
+ return string(byteOutput), err
+}
+
+func (c *Container) Run() error {
+ cmd, err := c.prepareCommand()
+ if err != nil {
+ return err
+ }
+ return c.runWithRetry(cmd)
+}
+
+func (c *Container) addVolume(hostDir string, containerDir string, isDefaultWorkDir bool) {
+ var volume Volume
+ volume.HostDir = hostDir
+ volume.ContainerDir = containerDir
+ volume.IsDefaultWorkDir = isDefaultWorkDir
+ c.Volumes[hostDir] = volume
+}
+
+func (c *Container) getVolumesAsCliOption() string {
+ cliOption := ""
+
+ if len(c.Volumes) > 0 {
+ for _, volume := range c.Volumes {
+ cliOption += fmt.Sprintf(" -v %s:%s", volume.HostDir, volume.ContainerDir)
+ }
+ }
+
+ return cliOption
+}
+
+func (c *Container) AddEnvVar(name string, value string) {
+ c.EnvVars[name] = value
+}
+
+func (c *Container) getEnvVarsAsCliOption() string {
+ cliOption := ""
+ if len(c.EnvVars) == 0 {
+ return cliOption
+ }
+
+ for name, value := range c.EnvVars {
+ cliOption += fmt.Sprintf(" -e %s=%s", name, value)
+ }
+ return cliOption
+}
+
+func (c *Container) newVppInstance(cpus []int, additionalConfigs ...Stanza) (*VppInstance, error) {
+ vpp := new(VppInstance)
+ vpp.Container = c
+ vpp.Cpus = cpus
+ vpp.AdditionalConfig = append(vpp.AdditionalConfig, additionalConfigs...)
+ c.VppInstance = vpp
+ return vpp, nil
+}
+
+func (c *Container) copy(sourceFileName string, targetFileName string) error {
+ cmd := exec.Command("docker", "cp", sourceFileName, c.Name+":"+targetFileName)
+ return cmd.Run()
+}
+
+func (c *Container) CreateFile(destFileName string, content string) error {
+ f, err := os.CreateTemp("/tmp", "hst-config"+c.Suite.Ppid)
+ if err != nil {
+ return err
+ }
+ defer os.Remove(f.Name())
+
+ if _, err := f.Write([]byte(content)); err != nil {
+ return err
+ }
+ if err := f.Close(); err != nil {
+ return err
+ }
+ c.copy(f.Name(), destFileName)
+ return nil
+}
+
+/*
+ * Executes in detached mode so that the started application can continue to run
+ * without blocking execution of test
+ */
+func (c *Container) ExecServer(command string, arguments ...any) {
+ serverCommand := fmt.Sprintf(command, arguments...)
+ containerExecCommand := "docker exec -d" + c.getEnvVarsAsCliOption() +
+ " " + c.Name + " " + serverCommand
+ GinkgoHelper()
+ c.Suite.Log(containerExecCommand)
+ c.Suite.AssertNil(exechelper.Run(containerExecCommand))
+}
+
+func (c *Container) Exec(command string, arguments ...any) string {
+ cliCommand := fmt.Sprintf(command, arguments...)
+ containerExecCommand := "docker exec" + c.getEnvVarsAsCliOption() +
+ " " + c.Name + " " + cliCommand
+ GinkgoHelper()
+ c.Suite.Log(containerExecCommand)
+ byteOutput, err := exechelper.CombinedOutput(containerExecCommand)
+ c.Suite.AssertNil(err, fmt.Sprint(err))
+ return string(byteOutput)
+}
+
+func (c *Container) getLogDirPath() string {
+ testId := c.Suite.GetTestId()
+ testName := c.Suite.GetCurrentTestName()
+ logDirPath := logDir + testName + "/" + testId + "/"
+
+ cmd := exec.Command("mkdir", "-p", logDirPath)
+ if err := cmd.Run(); err != nil {
+ Fail("mkdir error: " + fmt.Sprint(err))
+ }
+
+ return logDirPath
+}
+
+func (c *Container) saveLogs() {
+ testLogFilePath := c.getLogDirPath() + "container-" + c.Name + ".log"
+
+ cmd := exec.Command("docker", "logs", "--details", "-t", c.Name)
+ c.Suite.Log(cmd)
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ c.Suite.Log(err)
+ }
+
+ f, err := os.Create(testLogFilePath)
+ if err != nil {
+ Fail("file create error: " + fmt.Sprint(err))
+ }
+ fmt.Fprint(f, string(output))
+ f.Close()
+}
+
+// Outputs logs from docker containers. Set 'maxLines' to 0 to output the full log.
+func (c *Container) log(maxLines int) (string, error) {
+ var cmd string
+ if maxLines == 0 {
+ cmd = "docker logs " + c.Name
+ } else {
+ cmd = fmt.Sprintf("docker logs --tail %d %s", maxLines, c.Name)
+ }
+
+ c.Suite.Log(cmd)
+ o, err := exechelper.CombinedOutput(cmd)
+ return string(o), err
+}
+
+func (c *Container) stop() error {
+ if c.VppInstance != nil && c.VppInstance.ApiStream != nil {
+ c.VppInstance.saveLogs()
+ c.VppInstance.Disconnect()
+ }
+ c.VppInstance = nil
+ c.saveLogs()
+ c.Suite.Log("docker stop " + c.Name + " -t 0")
+ return exechelper.Run("docker stop " + c.Name + " -t 0")
+}
+
+func (c *Container) CreateConfig(targetConfigName string, templateName string, values any) {
+ template := template.Must(template.ParseFiles(templateName))
+
+ f, err := os.CreateTemp(logDir, "hst-config")
+ c.Suite.AssertNil(err, err)
+ defer os.Remove(f.Name())
+
+ err = template.Execute(f, values)
+ c.Suite.AssertNil(err, err)
+
+ err = f.Close()
+ c.Suite.AssertNil(err, err)
+
+ c.copy(f.Name(), targetConfigName)
+}
+
+func init() {
+ cmd := exec.Command("mkdir", "-p", logDir)
+ if err := cmd.Run(); err != nil {
+ panic(err)
+ }
+}
diff --git a/extras/hs-test/infra/cpu.go b/extras/hs-test/infra/cpu.go
new file mode 100644
index 00000000000..b5555d85b98
--- /dev/null
+++ b/extras/hs-test/infra/cpu.go
@@ -0,0 +1,100 @@
+package hst
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ . "github.com/onsi/ginkgo/v2"
+ "os"
+ "os/exec"
+ "strings"
+)
+
+var CgroupPath = "/sys/fs/cgroup/"
+
+type CpuContext struct {
+ cpuAllocator *CpuAllocatorT
+ cpus []int
+}
+
+type CpuAllocatorT struct {
+ cpus []int
+}
+
+var cpuAllocator *CpuAllocatorT = nil
+
+func (c *CpuAllocatorT) Allocate(containerCount int, nCpus int) (*CpuContext, error) {
+ var cpuCtx CpuContext
+
+ // splitting cpus into equal parts; this will over-allocate cores but it's good enough for now
+ maxContainerCount := 4
+ // skip CPU 0
+ minCpu := ((GinkgoParallelProcess() - 1) * maxContainerCount * nCpus) + 1
+ maxCpu := (GinkgoParallelProcess() * maxContainerCount * nCpus)
+
+ if len(c.cpus)-1 < maxCpu {
+ err := fmt.Errorf("could not allocate %d CPUs; available: %d; attempted to allocate cores %d-%d",
+ nCpus*containerCount, len(c.cpus)-1, minCpu, maxCpu)
+ return nil, err
+ }
+ if containerCount == 1 {
+ cpuCtx.cpus = c.cpus[minCpu : minCpu+nCpus]
+ } else if containerCount > 1 && containerCount <= maxContainerCount {
+ cpuCtx.cpus = c.cpus[minCpu+(nCpus*(containerCount-1)) : minCpu+(nCpus*containerCount)]
+ } else {
+ return nil, fmt.Errorf("too many containers; CPU allocation for >%d containers is not implemented", maxContainerCount)
+ }
+
+ cpuCtx.cpuAllocator = c
+ return &cpuCtx, nil
+}
+
+func (c *CpuAllocatorT) readCpus() error {
+ var first, last int
+
+ // Path depends on cgroup version. We need to check which version is in use.
+ // For that following command can be used: 'stat -fc %T /sys/fs/cgroup/'
+ // In case the output states 'cgroup2fs' then cgroups v2 is used, 'tmpfs' in case cgroups v1.
+ cmd := exec.Command("stat", "-fc", "%T", "/sys/fs/cgroup/")
+ byteOutput, err := cmd.CombinedOutput()
+ if err != nil {
+ return err
+ }
+ CpuPath := CgroupPath
+ if strings.Contains(string(byteOutput), "tmpfs") {
+ CpuPath += "cpuset/cpuset.effective_cpus"
+ } else if strings.Contains(string(byteOutput), "cgroup2fs") {
+ CpuPath += "cpuset.cpus.effective"
+ } else {
+ return errors.New("cgroup unknown fs: " + string(byteOutput))
+ }
+
+ file, err := os.Open(CpuPath)
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+
+ sc := bufio.NewScanner(file)
+ sc.Scan()
+ line := sc.Text()
+ _, err = fmt.Sscanf(line, "%d-%d", &first, &last)
+ if err != nil {
+ return err
+ }
+ for i := first; i <= last; i++ {
+ c.cpus = append(c.cpus, i)
+ }
+ return nil
+}
+
+func CpuAllocator() (*CpuAllocatorT, error) {
+ if cpuAllocator == nil {
+ cpuAllocator = new(CpuAllocatorT)
+ err := cpuAllocator.readCpus()
+ if err != nil {
+ return nil, err
+ }
+ }
+ return cpuAllocator, nil
+}
diff --git a/extras/hs-test/infra/hst_suite.go b/extras/hs-test/infra/hst_suite.go
new file mode 100644
index 00000000000..46aede7b7ef
--- /dev/null
+++ b/extras/hs-test/infra/hst_suite.go
@@ -0,0 +1,544 @@
+package hst
+
+import (
+ "bufio"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "time"
+
+ "github.com/onsi/gomega/gmeasure"
+ "gopkg.in/yaml.v3"
+
+ "github.com/edwarnicke/exechelper"
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
+
+const (
+ DEFAULT_NETWORK_NUM int = 1
+)
+
+var IsPersistent = flag.Bool("persist", false, "persists topology config")
+var IsVerbose = flag.Bool("verbose", false, "verbose test output")
+var IsUnconfiguring = flag.Bool("unconfigure", false, "remove topology")
+var IsVppDebug = flag.Bool("debug", false, "attach gdb to vpp")
+var NConfiguredCpus = flag.Int("cpus", 1, "number of CPUs assigned to vpp")
+var VppSourceFileDir = flag.String("vppsrc", "", "vpp source file directory")
+var SuiteTimeout time.Duration
+
+type HstSuite struct {
+ Containers map[string]*Container
+ StartedContainers []*Container
+ Volumes []string
+ NetConfigs []NetConfig
+ NetInterfaces map[string]*NetInterface
+ Ip4AddrAllocator *Ip4AddressAllocator
+ TestIds map[string]string
+ CpuAllocator *CpuAllocatorT
+ CpuContexts []*CpuContext
+ CpuPerVpp int
+ Ppid string
+ ProcessIndex string
+ Logger *log.Logger
+ LogFile *os.File
+}
+
+func getTestFilename() string {
+ _, filename, _, _ := runtime.Caller(2)
+ return filepath.Base(filename)
+}
+
+func (s *HstSuite) SetupSuite() {
+ s.CreateLogger()
+ s.Log("Suite Setup")
+ RegisterFailHandler(func(message string, callerSkip ...int) {
+ s.HstFail()
+ Fail(message, callerSkip...)
+ })
+ var err error
+ s.Ppid = fmt.Sprint(os.Getppid())
+ // remove last number so we have space to prepend a process index (interfaces have a char limit)
+ s.Ppid = s.Ppid[:len(s.Ppid)-1]
+ s.ProcessIndex = fmt.Sprint(GinkgoParallelProcess())
+ s.CpuAllocator, err = CpuAllocator()
+ if err != nil {
+ Fail("failed to init cpu allocator: " + fmt.Sprint(err))
+ }
+ s.CpuPerVpp = *NConfiguredCpus
+}
+
+func (s *HstSuite) AllocateCpus() []int {
+ cpuCtx, err := s.CpuAllocator.Allocate(len(s.StartedContainers), s.CpuPerVpp)
+ s.AssertNil(err)
+ s.AddCpuContext(cpuCtx)
+ return cpuCtx.cpus
+}
+
+func (s *HstSuite) AddCpuContext(cpuCtx *CpuContext) {
+ s.CpuContexts = append(s.CpuContexts, cpuCtx)
+}
+
+func (s *HstSuite) TearDownSuite() {
+ defer s.LogFile.Close()
+ s.Log("Suite Teardown")
+ s.UnconfigureNetworkTopology()
+}
+
+func (s *HstSuite) TearDownTest() {
+ s.Log("Test Teardown")
+ if *IsPersistent {
+ return
+ }
+ s.ResetContainers()
+ s.RemoveVolumes()
+ s.Ip4AddrAllocator.DeleteIpAddresses()
+}
+
+func (s *HstSuite) SkipIfUnconfiguring() {
+ if *IsUnconfiguring {
+ s.Skip("skipping to unconfigure")
+ }
+}
+
+func (s *HstSuite) SetupTest() {
+ s.Log("Test Setup")
+ s.StartedContainers = s.StartedContainers[:0]
+ s.SkipIfUnconfiguring()
+ s.SetupVolumes()
+ s.SetupContainers()
+}
+
+func (s *HstSuite) SetupVolumes() {
+ for _, volume := range s.Volumes {
+ cmd := "docker volume create --name=" + volume
+ s.Log(cmd)
+ exechelper.Run(cmd)
+ }
+}
+
+func (s *HstSuite) SetupContainers() {
+ for _, container := range s.Containers {
+ if !container.IsOptional {
+ container.Run()
+ }
+ }
+}
+
+func (s *HstSuite) LogVppInstance(container *Container, maxLines int) {
+ if container.VppInstance == nil {
+ return
+ }
+
+ logSource := container.GetHostWorkDir() + defaultLogFilePath
+ file, err := os.Open(logSource)
+
+ if err != nil {
+ return
+ }
+ defer file.Close()
+
+ scanner := bufio.NewScanner(file)
+ var lines []string
+ var counter int
+
+ for scanner.Scan() {
+ lines = append(lines, scanner.Text())
+ counter++
+ if counter > maxLines {
+ lines = lines[1:]
+ counter--
+ }
+ }
+
+ s.Log("vvvvvvvvvvvvvvv " + container.Name + " [VPP instance]:")
+ for _, line := range lines {
+ s.Log(line)
+ }
+ s.Log("^^^^^^^^^^^^^^^\n\n")
+}
+
+func (s *HstSuite) HstFail() {
+ for _, container := range s.StartedContainers {
+ out, err := container.log(20)
+ if err != nil {
+ s.Log("An error occured while obtaining '" + container.Name + "' container logs: " + fmt.Sprint(err))
+ s.Log("The container might not be running - check logs in " + container.getLogDirPath())
+ continue
+ }
+ s.Log("\nvvvvvvvvvvvvvvv " +
+ container.Name + ":\n" +
+ out +
+ "^^^^^^^^^^^^^^^\n\n")
+ s.LogVppInstance(container, 20)
+ }
+}
+
+func (s *HstSuite) AssertNil(object interface{}, msgAndArgs ...interface{}) {
+ Expect(object).To(BeNil(), msgAndArgs...)
+}
+
+func (s *HstSuite) AssertNotNil(object interface{}, msgAndArgs ...interface{}) {
+ Expect(object).ToNot(BeNil(), msgAndArgs...)
+}
+
+func (s *HstSuite) AssertEqual(expected, actual interface{}, msgAndArgs ...interface{}) {
+ Expect(actual).To(Equal(expected), msgAndArgs...)
+}
+
+func (s *HstSuite) AssertNotEqual(expected, actual interface{}, msgAndArgs ...interface{}) {
+ Expect(actual).ToNot(Equal(expected), msgAndArgs...)
+}
+
+func (s *HstSuite) AssertContains(testString, contains interface{}, msgAndArgs ...interface{}) {
+ Expect(testString).To(ContainSubstring(fmt.Sprint(contains)), msgAndArgs...)
+}
+
+func (s *HstSuite) AssertNotContains(testString, contains interface{}, msgAndArgs ...interface{}) {
+ Expect(testString).ToNot(ContainSubstring(fmt.Sprint(contains)), msgAndArgs...)
+}
+
+func (s *HstSuite) AssertNotEmpty(object interface{}, msgAndArgs ...interface{}) {
+ Expect(object).ToNot(BeEmpty(), msgAndArgs...)
+}
+
+func (s *HstSuite) CreateLogger() {
+ suiteName := s.GetCurrentSuiteName()
+ var err error
+ s.LogFile, err = os.Create("summary/" + suiteName + ".log")
+ if err != nil {
+ Fail("Unable to create log file.")
+ }
+ s.Logger = log.New(io.Writer(s.LogFile), "", log.LstdFlags)
+}
+
+// Logs to files by default, logs to stdout when VERBOSE=true with GinkgoWriter
+// to keep console tidy
+func (s *HstSuite) Log(arg any) {
+ logs := strings.Split(fmt.Sprint(arg), "\n")
+ for _, line := range logs {
+ s.Logger.Println(line)
+ }
+ if *IsVerbose {
+ GinkgoWriter.Println(arg)
+ }
+}
+
+func (s *HstSuite) Skip(args string) {
+ Skip(args)
+}
+
+func (s *HstSuite) SkipIfMultiWorker(args ...any) {
+ if *NConfiguredCpus > 1 {
+ s.Skip("test case not supported with multiple vpp workers")
+ }
+}
+
+func (s *HstSuite) SkipUnlessExtendedTestsBuilt() {
+ imageName := "hs-test/nginx-http3"
+
+ cmd := exec.Command("docker", "images", imageName)
+ byteOutput, err := cmd.CombinedOutput()
+ if err != nil {
+ s.Log("error while searching for docker image")
+ return
+ }
+ if !strings.Contains(string(byteOutput), imageName) {
+ s.Skip("extended tests not built")
+ }
+}
+
+func (s *HstSuite) ResetContainers() {
+ for _, container := range s.StartedContainers {
+ container.stop()
+ exechelper.Run("docker rm " + container.Name)
+ }
+}
+
+func (s *HstSuite) RemoveVolumes() {
+ for _, volumeName := range s.Volumes {
+ cmd := "docker volume rm " + volumeName
+ exechelper.Run(cmd)
+ os.RemoveAll(volumeName)
+ }
+}
+
+func (s *HstSuite) GetNetNamespaceByName(name string) string {
+ return s.ProcessIndex + name + s.Ppid
+}
+
+func (s *HstSuite) GetInterfaceByName(name string) *NetInterface {
+ return s.NetInterfaces[s.ProcessIndex+name+s.Ppid]
+}
+
+func (s *HstSuite) GetContainerByName(name string) *Container {
+ return s.Containers[s.ProcessIndex+name+s.Ppid]
+}
+
+/*
+ * Create a copy and return its address, so that individial tests which call this
+ * are not able to modify the original container and affect other tests by doing that
+ */
+func (s *HstSuite) GetTransientContainerByName(name string) *Container {
+ containerCopy := *s.Containers[s.ProcessIndex+name+s.Ppid]
+ return &containerCopy
+}
+
+func (s *HstSuite) LoadContainerTopology(topologyName string) {
+ data, err := os.ReadFile(containerTopologyDir + topologyName + ".yaml")
+ if err != nil {
+ Fail("read error: " + fmt.Sprint(err))
+ }
+ var yamlTopo YamlTopology
+ err = yaml.Unmarshal(data, &yamlTopo)
+ if err != nil {
+ Fail("unmarshal error: " + fmt.Sprint(err))
+ }
+
+ for _, elem := range yamlTopo.Volumes {
+ volumeMap := elem["volume"].(VolumeConfig)
+ hostDir := volumeMap["host-dir"].(string)
+ workingVolumeDir := logDir + s.GetCurrentTestName() + volumeDir
+ volDirReplacer := strings.NewReplacer("$HST_VOLUME_DIR", workingVolumeDir)
+ hostDir = volDirReplacer.Replace(hostDir)
+ s.Volumes = append(s.Volumes, hostDir)
+ }
+
+ s.Containers = make(map[string]*Container)
+ for _, elem := range yamlTopo.Containers {
+ newContainer, err := newContainer(s, elem)
+ newContainer.Suite = s
+ newContainer.Name = newContainer.Suite.ProcessIndex + newContainer.Name + newContainer.Suite.Ppid
+ if err != nil {
+ Fail("container config error: " + fmt.Sprint(err))
+ }
+ s.Containers[newContainer.Name] = newContainer
+ }
+}
+
+func (s *HstSuite) LoadNetworkTopology(topologyName string) {
+ data, err := os.ReadFile(networkTopologyDir + topologyName + ".yaml")
+ if err != nil {
+ Fail("read error: " + fmt.Sprint(err))
+ }
+ var yamlTopo YamlTopology
+ err = yaml.Unmarshal(data, &yamlTopo)
+ if err != nil {
+ Fail("unmarshal error: " + fmt.Sprint(err))
+ }
+
+ s.Ip4AddrAllocator = NewIp4AddressAllocator()
+ s.NetInterfaces = make(map[string]*NetInterface)
+
+ for _, elem := range yamlTopo.Devices {
+ if _, ok := elem["name"]; ok {
+ elem["name"] = s.ProcessIndex + elem["name"].(string) + s.Ppid
+ }
+
+ if peer, ok := elem["peer"].(NetDevConfig); ok {
+ if peer["name"].(string) != "" {
+ peer["name"] = s.ProcessIndex + peer["name"].(string) + s.Ppid
+ }
+ if _, ok := peer["netns"]; ok {
+ peer["netns"] = s.ProcessIndex + peer["netns"].(string) + s.Ppid
+ }
+ }
+
+ if _, ok := elem["netns"]; ok {
+ elem["netns"] = s.ProcessIndex + elem["netns"].(string) + s.Ppid
+ }
+
+ if _, ok := elem["interfaces"]; ok {
+ interfaceCount := len(elem["interfaces"].([]interface{}))
+ for i := 0; i < interfaceCount; i++ {
+ elem["interfaces"].([]interface{})[i] = s.ProcessIndex + elem["interfaces"].([]interface{})[i].(string) + s.Ppid
+ }
+ }
+
+ switch elem["type"].(string) {
+ case NetNs:
+ {
+ if namespace, err := newNetNamespace(elem); err == nil {
+ s.NetConfigs = append(s.NetConfigs, &namespace)
+ } else {
+ Fail("network config error: " + fmt.Sprint(err))
+ }
+ }
+ case Veth, Tap:
+ {
+ if netIf, err := newNetworkInterface(elem, s.Ip4AddrAllocator); err == nil {
+ s.NetConfigs = append(s.NetConfigs, netIf)
+ s.NetInterfaces[netIf.Name()] = netIf
+ } else {
+ Fail("network config error: " + fmt.Sprint(err))
+ }
+ }
+ case Bridge:
+ {
+ if bridge, err := newBridge(elem); err == nil {
+ s.NetConfigs = append(s.NetConfigs, &bridge)
+ } else {
+ Fail("network config error: " + fmt.Sprint(err))
+ }
+ }
+ }
+ }
+}
+
+func (s *HstSuite) ConfigureNetworkTopology(topologyName string) {
+ s.LoadNetworkTopology(topologyName)
+
+ if *IsUnconfiguring {
+ return
+ }
+
+ for _, nc := range s.NetConfigs {
+ s.Log(nc.Name())
+ if err := nc.configure(); err != nil {
+ Fail("Network config error: " + fmt.Sprint(err))
+ }
+ }
+}
+
+func (s *HstSuite) UnconfigureNetworkTopology() {
+ if *IsPersistent {
+ return
+ }
+ for _, nc := range s.NetConfigs {
+ nc.unconfigure()
+ }
+}
+
+func (s *HstSuite) GetTestId() string {
+ testName := s.GetCurrentTestName()
+
+ if s.TestIds == nil {
+ s.TestIds = map[string]string{}
+ }
+
+ if _, ok := s.TestIds[testName]; !ok {
+ s.TestIds[testName] = time.Now().Format("2006-01-02_15-04-05")
+ }
+
+ return s.TestIds[testName]
+}
+
+func (s *HstSuite) GetCurrentTestName() string {
+ return strings.Split(CurrentSpecReport().LeafNodeText, "/")[1]
+}
+
+func (s *HstSuite) GetCurrentSuiteName() string {
+ return CurrentSpecReport().ContainerHierarchyTexts[0]
+}
+
+// Returns last 3 digits of PID + Ginkgo process index as the 4th digit
+func (s *HstSuite) GetPortFromPpid() string {
+ port := s.Ppid
+ for len(port) < 3 {
+ port += "0"
+ }
+ return port[len(port)-3:] + s.ProcessIndex
+}
+
+func (s *HstSuite) StartServerApp(running chan error, done chan struct{}, env []string) {
+ cmd := exec.Command("iperf3", "-4", "-s", "-p", s.GetPortFromPpid())
+ if env != nil {
+ cmd.Env = env
+ }
+ s.Log(cmd)
+ err := cmd.Start()
+ if err != nil {
+ msg := fmt.Errorf("failed to start iperf server: %v", err)
+ running <- msg
+ return
+ }
+ running <- nil
+ <-done
+ cmd.Process.Kill()
+}
+
+func (s *HstSuite) StartClientApp(ipAddress string, env []string, clnCh chan error, clnRes chan string) {
+ defer func() {
+ clnCh <- nil
+ }()
+
+ nTries := 0
+
+ for {
+ cmd := exec.Command("iperf3", "-c", ipAddress, "-u", "-l", "1460", "-b", "10g", "-p", s.GetPortFromPpid())
+ if env != nil {
+ cmd.Env = env
+ }
+ s.Log(cmd)
+ o, err := cmd.CombinedOutput()
+ if err != nil {
+ if nTries > 5 {
+ clnCh <- fmt.Errorf("failed to start client app '%s'.\n%s", err, o)
+ return
+ }
+ time.Sleep(1 * time.Second)
+ nTries++
+ continue
+ } else {
+ clnRes <- fmt.Sprintf("Client output: %s", o)
+ }
+ break
+ }
+}
+
+func (s *HstSuite) StartHttpServer(running chan struct{}, done chan struct{}, addressPort, netNs string) {
+ cmd := newCommand([]string{"./http_server", addressPort, s.Ppid, s.ProcessIndex}, netNs)
+ err := cmd.Start()
+ s.Log(cmd)
+ if err != nil {
+ s.Log("Failed to start http server: " + fmt.Sprint(err))
+ return
+ }
+ running <- struct{}{}
+ <-done
+ cmd.Process.Kill()
+}
+
+func (s *HstSuite) StartWget(finished chan error, server_ip, port, query, netNs string) {
+ defer func() {
+ finished <- errors.New("wget error")
+ }()
+
+ cmd := newCommand([]string{"wget", "--timeout=10", "--no-proxy", "--tries=5", "-O", "/dev/null", server_ip + ":" + port + "/" + query},
+ netNs)
+ s.Log(cmd)
+ o, err := cmd.CombinedOutput()
+ if err != nil {
+ finished <- fmt.Errorf("wget error: '%v\n\n%s'", err, o)
+ return
+ } else if !strings.Contains(string(o), "200 OK") {
+ finished <- fmt.Errorf("wget error: response not 200 OK")
+ return
+ }
+ finished <- nil
+}
+
+/*
+runBenchmark creates Gomega's experiment with the passed-in name and samples the passed-in callback repeatedly (samplesNum times),
+passing in suite context, experiment and your data.
+
+You can also instruct runBenchmark to run with multiple concurrent workers.
+You can record multiple named measurements (float64 or duration) within passed-in callback.
+runBenchmark then produces report to show statistical distribution of measurements.
+*/
+func (s *HstSuite) RunBenchmark(name string, samplesNum, parallelNum int, callback func(s *HstSuite, e *gmeasure.Experiment, data interface{}), data interface{}) {
+ experiment := gmeasure.NewExperiment(name)
+
+ experiment.Sample(func(idx int) {
+ defer GinkgoRecover()
+ callback(s, experiment, data)
+ }, gmeasure.SamplingConfig{N: samplesNum, NumParallel: parallelNum})
+ AddReportEntry(experiment.Name, experiment)
+}
diff --git a/extras/hs-test/infra/netconfig.go b/extras/hs-test/infra/netconfig.go
new file mode 100644
index 00000000000..3f3d3e3e84c
--- /dev/null
+++ b/extras/hs-test/infra/netconfig.go
@@ -0,0 +1,383 @@
+package hst
+
+import (
+ "errors"
+ "fmt"
+ "os/exec"
+ "strings"
+
+ "go.fd.io/govpp/binapi/ethernet_types"
+ "go.fd.io/govpp/binapi/interface_types"
+ "go.fd.io/govpp/binapi/ip_types"
+)
+
+type (
+ Cmd = exec.Cmd
+ MacAddress = ethernet_types.MacAddress
+ AddressWithPrefix = ip_types.AddressWithPrefix
+ IP4AddressWithPrefix = ip_types.IP4AddressWithPrefix
+ InterfaceIndex = interface_types.InterfaceIndex
+
+ NetConfig interface {
+ configure() error
+ unconfigure()
+ Name() string
+ Type() string
+ }
+
+ NetConfigBase struct {
+ name string
+ category string // what else to call this when `type` is reserved?
+ }
+
+ NetInterface struct {
+ NetConfigBase
+ Ip4AddrAllocator *Ip4AddressAllocator
+ Ip4Address string
+ Index InterfaceIndex
+ HwAddress MacAddress
+ NetworkNamespace string
+ NetworkNumber int
+ Peer *NetInterface
+ }
+
+ NetworkNamespace struct {
+ NetConfigBase
+ }
+
+ NetworkBridge struct {
+ NetConfigBase
+ NetworkNamespace string
+ Interfaces []string
+ }
+)
+
+const (
+ NetNs string = "netns"
+ Veth string = "veth"
+ Tap string = "tap"
+ Bridge string = "bridge"
+)
+
+type InterfaceAdder func(n *NetInterface) *Cmd
+
+var (
+ ipCommandMap = map[string]InterfaceAdder{
+ Veth: func(n *NetInterface) *Cmd {
+ return exec.Command("ip", "link", "add", n.name, "type", "veth", "peer", "name", n.Peer.name)
+ },
+ Tap: func(n *NetInterface) *Cmd {
+ return exec.Command("ip", "tuntap", "add", n.name, "mode", "tap")
+ },
+ }
+)
+
+func newNetworkInterface(cfg NetDevConfig, a *Ip4AddressAllocator) (*NetInterface, error) {
+ var newInterface *NetInterface = &NetInterface{}
+ var err error
+ newInterface.Ip4AddrAllocator = a
+ newInterface.name = cfg["name"].(string)
+ newInterface.NetworkNumber = DEFAULT_NETWORK_NUM
+
+ if interfaceType, ok := cfg["type"]; ok {
+ newInterface.category = interfaceType.(string)
+ }
+
+ if presetHwAddress, ok := cfg["preset-hw-address"]; ok {
+ newInterface.HwAddress, err = ethernet_types.ParseMacAddress(presetHwAddress.(string))
+ if err != nil {
+ return &NetInterface{}, err
+ }
+ }
+
+ if netns, ok := cfg["netns"]; ok {
+ newInterface.NetworkNamespace = netns.(string)
+ }
+
+ if ip, ok := cfg["ip4"]; ok {
+ if n, ok := ip.(NetDevConfig)["network"]; ok {
+ newInterface.NetworkNumber = n.(int)
+ }
+ newInterface.Ip4Address, err = newInterface.Ip4AddrAllocator.NewIp4InterfaceAddress(
+ newInterface.NetworkNumber,
+ )
+ if err != nil {
+ return &NetInterface{}, err
+ }
+ }
+
+ if _, ok := cfg["peer"]; !ok {
+ return newInterface, nil
+ }
+
+ peer := cfg["peer"].(NetDevConfig)
+
+ if newInterface.Peer, err = newNetworkInterface(peer, a); err != nil {
+ return &NetInterface{}, err
+ }
+
+ return newInterface, nil
+}
+
+func (n *NetInterface) configureUpState() error {
+ err := setDevUp(n.Name(), "")
+ if err != nil {
+ return fmt.Errorf("set link up failed: %v", err)
+ }
+ return nil
+}
+
+func (n *NetInterface) configureNetworkNamespace() error {
+ if n.NetworkNamespace != "" {
+ err := linkSetNetns(n.name, n.NetworkNamespace)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (n *NetInterface) configureAddress() error {
+ if n.Ip4Address != "" {
+ if err := addAddress(
+ n.Name(),
+ n.Ip4Address,
+ n.NetworkNamespace,
+ ); err != nil {
+ return err
+ }
+
+ }
+ return nil
+}
+
+func (n *NetInterface) configure() error {
+ cmd := ipCommandMap[n.Type()](n)
+ _, err := cmd.CombinedOutput()
+ if err != nil {
+ return fmt.Errorf("creating interface '%v' failed: %v", n.Name(), err)
+ }
+
+ if err := n.configureUpState(); err != nil {
+ return err
+ }
+
+ if err := n.configureNetworkNamespace(); err != nil {
+ return err
+ }
+
+ if err := n.configureAddress(); err != nil {
+ return err
+ }
+
+ if n.Peer != nil && n.Peer.name != "" {
+ if err := n.Peer.configureUpState(); err != nil {
+ return err
+ }
+
+ if err := n.Peer.configureNetworkNamespace(); err != nil {
+ return err
+ }
+
+ if err := n.Peer.configureAddress(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (n *NetInterface) unconfigure() {
+ delLink(n.name)
+}
+
+func (n *NetInterface) Name() string {
+ return n.name
+}
+
+func (n *NetInterface) Type() string {
+ return n.category
+}
+
+func (n *NetInterface) AddressWithPrefix() AddressWithPrefix {
+ address, _ := ip_types.ParseAddressWithPrefix(n.Ip4Address)
+ return address
+}
+
+func (n *NetInterface) Ip4AddressWithPrefix() IP4AddressWithPrefix {
+ ip4Prefix, _ := ip_types.ParseIP4Prefix(n.Ip4Address)
+ Ip4AddressWithPrefix := ip_types.IP4AddressWithPrefix(ip4Prefix)
+ return Ip4AddressWithPrefix
+}
+
+func (n *NetInterface) Ip4AddressString() string {
+ return strings.Split(n.Ip4Address, "/")[0]
+}
+
+func (b *NetConfigBase) Name() string {
+ return b.name
+}
+
+func (b *NetConfigBase) Type() string {
+ return b.category
+}
+
+func newNetNamespace(cfg NetDevConfig) (NetworkNamespace, error) {
+ var networkNamespace NetworkNamespace
+ networkNamespace.name = cfg["name"].(string)
+ networkNamespace.category = NetNs
+ return networkNamespace, nil
+}
+
+func (ns *NetworkNamespace) configure() error {
+ return addDelNetns(ns.name, true)
+}
+
+func (ns *NetworkNamespace) unconfigure() {
+ addDelNetns(ns.name, false)
+}
+
+func newBridge(cfg NetDevConfig) (NetworkBridge, error) {
+ var bridge NetworkBridge
+ bridge.name = cfg["name"].(string)
+ bridge.category = Bridge
+ for _, v := range cfg["interfaces"].([]interface{}) {
+ bridge.Interfaces = append(bridge.Interfaces, v.(string))
+ }
+
+ bridge.NetworkNamespace = ""
+ if netns, ok := cfg["netns"]; ok {
+ bridge.NetworkNamespace = netns.(string)
+ }
+ return bridge, nil
+}
+
+func (b *NetworkBridge) configure() error {
+ return addBridge(b.name, b.Interfaces, b.NetworkNamespace)
+}
+
+func (b *NetworkBridge) unconfigure() {
+ delBridge(b.name, b.NetworkNamespace)
+}
+
+func delBridge(brName, ns string) error {
+ err := setDevDown(brName, ns)
+ if err != nil {
+ return err
+ }
+
+ err = addDelBridge(brName, ns, false)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func setDevUp(dev, ns string) error {
+ return setDevUpDown(dev, ns, true)
+}
+
+func setDevDown(dev, ns string) error {
+ return setDevUpDown(dev, ns, false)
+}
+
+func delLink(ifName string) {
+ cmd := exec.Command("ip", "link", "del", ifName)
+ cmd.Run()
+}
+
+func setDevUpDown(dev, ns string, isUp bool) error {
+ var op string
+ if isUp {
+ op = "up"
+ } else {
+ op = "down"
+ }
+ c := []string{"ip", "link", "set", "dev", dev, op}
+ cmd := appendNetns(c, ns)
+ err := cmd.Run()
+ if err != nil {
+ return fmt.Errorf("error bringing %s device %s! (cmd: '%s')", dev, op, cmd)
+ }
+ return nil
+}
+
+func addDelNetns(name string, isAdd bool) error {
+ var op string
+ if isAdd {
+ op = "add"
+ } else {
+ op = "del"
+ }
+ cmd := exec.Command("ip", "netns", op, name)
+ _, err := cmd.CombinedOutput()
+ if err != nil {
+ return fmt.Errorf("add/del netns failed (cmd: '%s')", cmd)
+ }
+ return nil
+}
+
+func linkSetNetns(ifName, ns string) error {
+ cmd := exec.Command("ip", "link", "set", "dev", ifName, "up", "netns", ns)
+ err := cmd.Run()
+ if err != nil {
+ return fmt.Errorf("error setting device '%s' to netns '%s: %v", ifName, ns, err)
+ }
+ return nil
+}
+
+func newCommand(s []string, ns string) *exec.Cmd {
+ return appendNetns(s, ns)
+}
+
+func appendNetns(s []string, ns string) *exec.Cmd {
+ var cmd *exec.Cmd
+ if ns == "" {
+ // use default namespace
+ cmd = exec.Command(s[0], s[1:]...)
+ } else {
+ var args = []string{"netns", "exec", ns}
+ args = append(args, s[:]...)
+ cmd = exec.Command("ip", args...)
+ }
+ return cmd
+}
+
+func addDelBridge(brName, ns string, isAdd bool) error {
+ var op string
+ if isAdd {
+ op = "addbr"
+ } else {
+ op = "delbr"
+ }
+ var c = []string{"brctl", op, brName}
+ cmd := appendNetns(c, ns)
+ err := cmd.Run()
+ if err != nil {
+ s := fmt.Sprintf("%s %s failed! err: '%s'", op, brName, err)
+ return errors.New(s)
+ }
+ return nil
+}
+
+func addBridge(brName string, ifs []string, ns string) error {
+ err := addDelBridge(brName, ns, true)
+ if err != nil {
+ return err
+ }
+
+ for _, v := range ifs {
+ c := []string{"brctl", "addif", brName, v}
+ cmd := appendNetns(c, ns)
+ err = cmd.Run()
+ if err != nil {
+ return fmt.Errorf("error adding %s to bridge %s: %s", v, brName, err)
+ }
+ }
+ err = setDevUp(brName, ns)
+ if err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/extras/hs-test/infra/suite_nginx.go b/extras/hs-test/infra/suite_nginx.go
new file mode 100644
index 00000000000..f835262d591
--- /dev/null
+++ b/extras/hs-test/infra/suite_nginx.go
@@ -0,0 +1,137 @@
+package hst
+
+import (
+ "reflect"
+ "runtime"
+ "strings"
+
+ . "github.com/onsi/ginkgo/v2"
+)
+
+// These correspond to names used in yaml config
+const (
+ VppProxyContainerName = "vpp-proxy"
+ NginxProxyContainerName = "nginx-proxy"
+ NginxServerContainerName = "nginx-server"
+ MirroringClientInterfaceName = "hstcln"
+ MirroringServerInterfaceName = "hstsrv"
+)
+
+var nginxTests = map[string][]func(s *NginxSuite){}
+var nginxSoloTests = map[string][]func(s *NginxSuite){}
+
+type NginxSuite struct {
+ HstSuite
+}
+
+func RegisterNginxTests(tests ...func(s *NginxSuite)) {
+ nginxTests[getTestFilename()] = tests
+}
+func RegisterNginxSoloTests(tests ...func(s *NginxSuite)) {
+ nginxSoloTests[getTestFilename()] = tests
+}
+
+func (s *NginxSuite) SetupSuite() {
+ s.HstSuite.SetupSuite()
+ s.LoadNetworkTopology("2taps")
+ s.LoadContainerTopology("nginxProxyAndServer")
+}
+
+func (s *NginxSuite) SetupTest() {
+ s.HstSuite.SetupTest()
+
+ // Setup test conditions
+ var sessionConfig Stanza
+ sessionConfig.
+ NewStanza("session").
+ Append("enable").
+ Append("use-app-socket-api").Close()
+
+ // ... for proxy
+ vppProxyContainer := s.GetContainerByName(VppProxyContainerName)
+ proxyVpp, _ := vppProxyContainer.newVppInstance(vppProxyContainer.AllocatedCpus, sessionConfig)
+ s.AssertNil(proxyVpp.Start())
+
+ clientInterface := s.GetInterfaceByName(MirroringClientInterfaceName)
+ s.AssertNil(proxyVpp.createTap(clientInterface, 1))
+
+ serverInterface := s.GetInterfaceByName(MirroringServerInterfaceName)
+ s.AssertNil(proxyVpp.createTap(serverInterface, 2))
+
+ nginxContainer := s.GetTransientContainerByName(NginxProxyContainerName)
+ nginxContainer.Create()
+
+ values := struct {
+ Proxy string
+ Server string
+ }{
+ Proxy: clientInterface.Peer.Ip4AddressString(),
+ Server: serverInterface.Ip4AddressString(),
+ }
+ nginxContainer.CreateConfig(
+ "/nginx.conf",
+ "./resources/nginx/nginx_proxy_mirroring.conf",
+ values,
+ )
+ s.AssertNil(nginxContainer.Start())
+
+ proxyVpp.WaitForApp("nginx-", 5)
+}
+
+var _ = Describe("NginxSuite", Ordered, ContinueOnFailure, func() {
+ var s NginxSuite
+ BeforeAll(func() {
+ s.SetupSuite()
+ })
+ BeforeEach(func() {
+ s.SetupTest()
+ })
+ AfterAll(func() {
+ s.TearDownSuite()
+ })
+ AfterEach(func() {
+ s.TearDownTest()
+ })
+
+ for filename, tests := range nginxTests {
+ for _, test := range tests {
+ test := test
+ pc := reflect.ValueOf(test).Pointer()
+ funcValue := runtime.FuncForPC(pc)
+ testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
+ It(testName, func(ctx SpecContext) {
+ s.Log(testName + ": BEGIN")
+ test(&s)
+ }, SpecTimeout(SuiteTimeout))
+ }
+ }
+})
+
+var _ = Describe("NginxSuiteSolo", Ordered, ContinueOnFailure, Serial, func() {
+ var s NginxSuite
+ BeforeAll(func() {
+ s.SetupSuite()
+ })
+ BeforeEach(func() {
+ s.SetupTest()
+ })
+ AfterAll(func() {
+ s.TearDownSuite()
+ })
+ AfterEach(func() {
+ s.TearDownTest()
+ })
+
+ for filename, tests := range nginxSoloTests {
+ for _, test := range tests {
+ test := test
+ pc := reflect.ValueOf(test).Pointer()
+ funcValue := runtime.FuncForPC(pc)
+ testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
+ It(testName, Label("SOLO"), func(ctx SpecContext) {
+ s.Log(testName + ": BEGIN")
+ test(&s)
+ }, SpecTimeout(SuiteTimeout))
+ }
+ }
+})
diff --git a/extras/hs-test/infra/suite_no_topo.go b/extras/hs-test/infra/suite_no_topo.go
new file mode 100644
index 00000000000..c48e6fb1845
--- /dev/null
+++ b/extras/hs-test/infra/suite_no_topo.go
@@ -0,0 +1,112 @@
+package hst
+
+import (
+ "reflect"
+ "runtime"
+ "strings"
+
+ . "github.com/onsi/ginkgo/v2"
+)
+
+const (
+ SingleTopoContainerVpp = "vpp"
+ SingleTopoContainerNginx = "nginx"
+ TapInterfaceName = "htaphost"
+)
+
+var noTopoTests = map[string][]func(s *NoTopoSuite){}
+var noTopoSoloTests = map[string][]func(s *NoTopoSuite){}
+
+type NoTopoSuite struct {
+ HstSuite
+}
+
+func RegisterNoTopoTests(tests ...func(s *NoTopoSuite)) {
+ noTopoTests[getTestFilename()] = tests
+}
+func RegisterNoTopoSoloTests(tests ...func(s *NoTopoSuite)) {
+ noTopoSoloTests[getTestFilename()] = tests
+}
+
+func (s *NoTopoSuite) SetupSuite() {
+ s.HstSuite.SetupSuite()
+ s.LoadNetworkTopology("tap")
+ s.LoadContainerTopology("single")
+}
+
+func (s *NoTopoSuite) SetupTest() {
+ s.HstSuite.SetupTest()
+
+ // Setup test conditions
+ var sessionConfig Stanza
+ sessionConfig.
+ NewStanza("session").
+ Append("enable").
+ Append("use-app-socket-api").Close()
+
+ container := s.GetContainerByName(SingleTopoContainerVpp)
+ vpp, _ := container.newVppInstance(container.AllocatedCpus, sessionConfig)
+ s.AssertNil(vpp.Start())
+
+ tapInterface := s.GetInterfaceByName(TapInterfaceName)
+
+ s.AssertNil(vpp.createTap(tapInterface), "failed to create tap interface")
+}
+
+var _ = Describe("NoTopoSuite", Ordered, ContinueOnFailure, func() {
+ var s NoTopoSuite
+ BeforeAll(func() {
+ s.SetupSuite()
+ })
+ BeforeEach(func() {
+ s.SetupTest()
+ })
+ AfterAll(func() {
+ s.TearDownSuite()
+ })
+ AfterEach(func() {
+ s.TearDownTest()
+ })
+
+ for filename, tests := range noTopoTests {
+ for _, test := range tests {
+ test := test
+ pc := reflect.ValueOf(test).Pointer()
+ funcValue := runtime.FuncForPC(pc)
+ testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
+ It(testName, func(ctx SpecContext) {
+ s.Log(testName + ": BEGIN")
+ test(&s)
+ }, SpecTimeout(SuiteTimeout))
+ }
+ }
+})
+
+var _ = Describe("NoTopoSuiteSolo", Ordered, ContinueOnFailure, Serial, func() {
+ var s NoTopoSuite
+ BeforeAll(func() {
+ s.SetupSuite()
+ })
+ BeforeEach(func() {
+ s.SetupTest()
+ })
+ AfterAll(func() {
+ s.TearDownSuite()
+ })
+ AfterEach(func() {
+ s.TearDownTest()
+ })
+
+ for filename, tests := range noTopoSoloTests {
+ for _, test := range tests {
+ test := test
+ pc := reflect.ValueOf(test).Pointer()
+ funcValue := runtime.FuncForPC(pc)
+ testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
+ It(testName, Label("SOLO"), func(ctx SpecContext) {
+ s.Log(testName + ": BEGIN")
+ test(&s)
+ }, SpecTimeout(SuiteTimeout))
+ }
+ }
+})
diff --git a/extras/hs-test/infra/suite_ns.go b/extras/hs-test/infra/suite_ns.go
new file mode 100644
index 00000000000..d88730b1c0b
--- /dev/null
+++ b/extras/hs-test/infra/suite_ns.go
@@ -0,0 +1,121 @@
+package hst
+
+import (
+ "fmt"
+ "reflect"
+ "runtime"
+ "strings"
+
+ . "github.com/onsi/ginkgo/v2"
+)
+
+// These correspond to names used in yaml config
+const (
+ ClientInterface = "hclnvpp"
+ ServerInterface = "hsrvvpp"
+)
+
+var nsTests = map[string][]func(s *NsSuite){}
+var nsSoloTests = map[string][]func(s *NsSuite){}
+
+type NsSuite struct {
+ HstSuite
+}
+
+func RegisterNsTests(tests ...func(s *NsSuite)) {
+ nsTests[getTestFilename()] = tests
+}
+func RegisterNsSoloTests(tests ...func(s *NsSuite)) {
+ nsSoloTests[getTestFilename()] = tests
+}
+
+func (s *NsSuite) SetupSuite() {
+ s.HstSuite.SetupSuite()
+ s.ConfigureNetworkTopology("ns")
+ s.LoadContainerTopology("ns")
+}
+
+func (s *NsSuite) SetupTest() {
+ s.HstSuite.SetupTest()
+
+ // Setup test conditions
+ var sessionConfig Stanza
+ sessionConfig.
+ NewStanza("session").
+ Append("enable").
+ Append("use-app-socket-api").
+ Append("evt_qs_memfd_seg").
+ Append("event-queue-length 100000").Close()
+
+ container := s.GetContainerByName("vpp")
+ vpp, _ := container.newVppInstance(container.AllocatedCpus, sessionConfig)
+ s.AssertNil(vpp.Start())
+
+ idx, err := vpp.createAfPacket(s.GetInterfaceByName(ServerInterface))
+ s.AssertNil(err, fmt.Sprint(err))
+ s.AssertNotEqual(0, idx)
+
+ idx, err = vpp.createAfPacket(s.GetInterfaceByName(ClientInterface))
+ s.AssertNil(err, fmt.Sprint(err))
+ s.AssertNotEqual(0, idx)
+
+ container.Exec("chmod 777 -R %s", container.GetContainerWorkDir())
+}
+
+var _ = Describe("NsSuite", Ordered, ContinueOnFailure, func() {
+ var s NsSuite
+ BeforeAll(func() {
+ s.SetupSuite()
+ })
+ BeforeEach(func() {
+ s.SetupTest()
+ })
+ AfterAll(func() {
+ s.TearDownSuite()
+ })
+ AfterEach(func() {
+ s.TearDownTest()
+ })
+
+ for filename, tests := range nsTests {
+ for _, test := range tests {
+ test := test
+ pc := reflect.ValueOf(test).Pointer()
+ funcValue := runtime.FuncForPC(pc)
+ testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
+ It(testName, func(ctx SpecContext) {
+ s.Log(testName + ": BEGIN")
+ test(&s)
+ }, SpecTimeout(SuiteTimeout))
+ }
+ }
+})
+
+var _ = Describe("NsSuiteSolo", Ordered, ContinueOnFailure, Serial, func() {
+ var s NsSuite
+ BeforeAll(func() {
+ s.SetupSuite()
+ })
+ BeforeEach(func() {
+ s.SetupTest()
+ })
+ AfterAll(func() {
+ s.TearDownSuite()
+ })
+ AfterEach(func() {
+ s.TearDownTest()
+ })
+
+ for filename, tests := range nsSoloTests {
+ for _, test := range tests {
+ test := test
+ pc := reflect.ValueOf(test).Pointer()
+ funcValue := runtime.FuncForPC(pc)
+ testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
+ It(testName, Label("SOLO"), func(ctx SpecContext) {
+ s.Log(testName + ": BEGIN")
+ test(&s)
+ }, SpecTimeout(SuiteTimeout))
+ }
+ }
+})
diff --git a/extras/hs-test/infra/suite_tap.go b/extras/hs-test/infra/suite_tap.go
new file mode 100644
index 00000000000..c02ab8e8535
--- /dev/null
+++ b/extras/hs-test/infra/suite_tap.go
@@ -0,0 +1,88 @@
+package hst
+
+import (
+ "reflect"
+ "runtime"
+ "strings"
+ "time"
+
+ . "github.com/onsi/ginkgo/v2"
+)
+
+type TapSuite struct {
+ HstSuite
+}
+
+var tapTests = map[string][]func(s *TapSuite){}
+var tapSoloTests = map[string][]func(s *TapSuite){}
+
+func RegisterTapTests(tests ...func(s *TapSuite)) {
+ tapTests[getTestFilename()] = tests
+}
+func RegisterTapSoloTests(tests ...func(s *TapSuite)) {
+ tapSoloTests[getTestFilename()] = tests
+}
+
+func (s *TapSuite) SetupSuite() {
+ time.Sleep(1 * time.Second)
+ s.HstSuite.SetupSuite()
+ s.ConfigureNetworkTopology("tap")
+}
+
+var _ = Describe("TapSuite", Ordered, ContinueOnFailure, func() {
+ var s TapSuite
+ BeforeAll(func() {
+ s.SetupSuite()
+ })
+ BeforeEach(func() {
+ s.SetupTest()
+ })
+ AfterAll(func() {
+ s.TearDownSuite()
+ })
+ AfterEach(func() {
+ s.TearDownTest()
+ })
+
+ for filename, tests := range tapTests {
+ for _, test := range tests {
+ test := test
+ pc := reflect.ValueOf(test).Pointer()
+ funcValue := runtime.FuncForPC(pc)
+ testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
+ It(testName, func(ctx SpecContext) {
+ s.Log(testName + ": BEGIN")
+ test(&s)
+ }, SpecTimeout(SuiteTimeout))
+ }
+ }
+})
+
+var _ = Describe("TapSuiteSolo", Ordered, ContinueOnFailure, Serial, func() {
+ var s TapSuite
+ BeforeAll(func() {
+ s.SetupSuite()
+ })
+ BeforeEach(func() {
+ s.SetupTest()
+ })
+ AfterAll(func() {
+ s.TearDownSuite()
+ })
+ AfterEach(func() {
+ s.TearDownTest()
+ })
+
+ for filename, tests := range tapSoloTests {
+ for _, test := range tests {
+ test := test
+ pc := reflect.ValueOf(test).Pointer()
+ funcValue := runtime.FuncForPC(pc)
+ testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
+ It(testName, Label("SOLO"), func(ctx SpecContext) {
+ s.Log(testName + ": BEGIN")
+ test(&s)
+ }, SpecTimeout(SuiteTimeout))
+ }
+ }
+})
diff --git a/extras/hs-test/infra/suite_veth.go b/extras/hs-test/infra/suite_veth.go
new file mode 100644
index 00000000000..d7bfa55acd0
--- /dev/null
+++ b/extras/hs-test/infra/suite_veth.go
@@ -0,0 +1,146 @@
+package hst
+
+import (
+ "fmt"
+ "reflect"
+ "runtime"
+ "strings"
+ "time"
+
+ . "github.com/onsi/ginkgo/v2"
+)
+
+// These correspond to names used in yaml config
+const (
+ ServerInterfaceName = "srv"
+ ClientInterfaceName = "cln"
+)
+
+var vethTests = map[string][]func(s *VethsSuite){}
+var vethSoloTests = map[string][]func(s *VethsSuite){}
+
+type VethsSuite struct {
+ HstSuite
+}
+
+func RegisterVethTests(tests ...func(s *VethsSuite)) {
+ vethTests[getTestFilename()] = tests
+}
+func RegisterSoloVethTests(tests ...func(s *VethsSuite)) {
+ vethSoloTests[getTestFilename()] = tests
+}
+
+func (s *VethsSuite) SetupSuite() {
+ time.Sleep(1 * time.Second)
+ s.HstSuite.SetupSuite()
+ s.ConfigureNetworkTopology("2peerVeth")
+ s.LoadContainerTopology("2peerVeth")
+}
+
+func (s *VethsSuite) SetupTest() {
+ s.HstSuite.SetupTest()
+
+ // Setup test conditions
+ var sessionConfig Stanza
+ sessionConfig.
+ NewStanza("session").
+ Append("enable").
+ Append("use-app-socket-api").Close()
+
+ // ... For server
+ serverContainer := s.GetContainerByName("server-vpp")
+
+ serverVpp, err := serverContainer.newVppInstance(serverContainer.AllocatedCpus, sessionConfig)
+ s.AssertNotNil(serverVpp, fmt.Sprint(err))
+
+ s.SetupServerVpp()
+
+ // ... For client
+ clientContainer := s.GetContainerByName("client-vpp")
+
+ clientVpp, err := clientContainer.newVppInstance(clientContainer.AllocatedCpus, sessionConfig)
+ s.AssertNotNil(clientVpp, fmt.Sprint(err))
+
+ s.setupClientVpp()
+}
+
+func (s *VethsSuite) SetupServerVpp() {
+ serverVpp := s.GetContainerByName("server-vpp").VppInstance
+ s.AssertNil(serverVpp.Start())
+
+ serverVeth := s.GetInterfaceByName(ServerInterfaceName)
+ idx, err := serverVpp.createAfPacket(serverVeth)
+ s.AssertNil(err, fmt.Sprint(err))
+ s.AssertNotEqual(0, idx)
+}
+
+func (s *VethsSuite) setupClientVpp() {
+ clientVpp := s.GetContainerByName("client-vpp").VppInstance
+ s.AssertNil(clientVpp.Start())
+
+ clientVeth := s.GetInterfaceByName(ClientInterfaceName)
+ idx, err := clientVpp.createAfPacket(clientVeth)
+ s.AssertNil(err, fmt.Sprint(err))
+ s.AssertNotEqual(0, idx)
+}
+
+var _ = Describe("VethsSuite", Ordered, ContinueOnFailure, func() {
+ var s VethsSuite
+ BeforeAll(func() {
+ s.SetupSuite()
+ })
+ BeforeEach(func() {
+ s.SetupTest()
+ })
+ AfterAll(func() {
+ s.TearDownSuite()
+
+ })
+ AfterEach(func() {
+ s.TearDownTest()
+ })
+
+ // https://onsi.github.io/ginkgo/#dynamically-generating-specs
+ for filename, tests := range vethTests {
+ for _, test := range tests {
+ test := test
+ pc := reflect.ValueOf(test).Pointer()
+ funcValue := runtime.FuncForPC(pc)
+ testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
+ It(testName, func(ctx SpecContext) {
+ s.Log(testName + ": BEGIN")
+ test(&s)
+ }, SpecTimeout(SuiteTimeout))
+ }
+ }
+})
+
+var _ = Describe("VethsSuiteSolo", Ordered, ContinueOnFailure, Serial, func() {
+ var s VethsSuite
+ BeforeAll(func() {
+ s.SetupSuite()
+ })
+ BeforeEach(func() {
+ s.SetupTest()
+ })
+ AfterAll(func() {
+ s.TearDownSuite()
+ })
+ AfterEach(func() {
+ s.TearDownTest()
+ })
+
+ // https://onsi.github.io/ginkgo/#dynamically-generating-specs
+ for filename, tests := range vethSoloTests {
+ for _, test := range tests {
+ test := test
+ pc := reflect.ValueOf(test).Pointer()
+ funcValue := runtime.FuncForPC(pc)
+ testName := filename + "/" + strings.Split(funcValue.Name(), ".")[2]
+ It(testName, Label("SOLO"), func(ctx SpecContext) {
+ s.Log(testName + ": BEGIN")
+ test(&s)
+ }, SpecTimeout(SuiteTimeout))
+ }
+ }
+})
diff --git a/extras/hs-test/infra/topo.go b/extras/hs-test/infra/topo.go
new file mode 100644
index 00000000000..f9c6528ba93
--- /dev/null
+++ b/extras/hs-test/infra/topo.go
@@ -0,0 +1,25 @@
+package hst
+
+import (
+ "fmt"
+)
+
+type NetDevConfig map[string]interface{}
+type ContainerConfig map[string]interface{}
+type VolumeConfig map[string]interface{}
+
+type YamlTopology struct {
+ Devices []NetDevConfig `yaml:"devices"`
+ Containers []ContainerConfig `yaml:"containers"`
+ Volumes []VolumeConfig `yaml:"volumes"`
+}
+
+func addAddress(device, address, ns string) error {
+ c := []string{"ip", "addr", "add", address, "dev", device}
+ cmd := appendNetns(c, ns)
+ err := cmd.Run()
+ if err != nil {
+ return fmt.Errorf("failed to set ip address for %s: %v", device, err)
+ }
+ return nil
+}
diff --git a/extras/hs-test/infra/utils.go b/extras/hs-test/infra/utils.go
new file mode 100644
index 00000000000..9619efbbf63
--- /dev/null
+++ b/extras/hs-test/infra/utils.go
@@ -0,0 +1,119 @@
+package hst
+
+import (
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "os"
+ "strings"
+ "time"
+)
+
+const networkTopologyDir string = "topo-network/"
+const containerTopologyDir string = "topo-containers/"
+
+type Stanza struct {
+ content string
+ pad int
+}
+
+type ActionResult struct {
+ Err error
+ Desc string
+ ErrOutput string
+ StdOutput string
+}
+
+type JsonResult struct {
+ Code int
+ Desc string
+ ErrOutput string
+ StdOutput string
+}
+
+func AssertFileSize(f1, f2 string) error {
+ fi1, err := os.Stat(f1)
+ if err != nil {
+ return err
+ }
+
+ fi2, err1 := os.Stat(f2)
+ if err1 != nil {
+ return err1
+ }
+
+ if fi1.Size() != fi2.Size() {
+ return fmt.Errorf("file sizes differ (%d vs %d)", fi1.Size(), fi2.Size())
+ }
+ return nil
+}
+
+func (c *Stanza) NewStanza(name string) *Stanza {
+ c.Append("\n" + name + " {")
+ c.pad += 2
+ return c
+}
+
+func (c *Stanza) Append(name string) *Stanza {
+ c.content += strings.Repeat(" ", c.pad)
+ c.content += name + "\n"
+ return c
+}
+
+func (c *Stanza) Close() *Stanza {
+ c.content += "}\n"
+ c.pad -= 2
+ return c
+}
+
+func (s *Stanza) ToString() string {
+ return s.content
+}
+
+func (s *Stanza) SaveToFile(fileName string) error {
+ fo, err := os.Create(fileName)
+ if err != nil {
+ return err
+ }
+ defer fo.Close()
+
+ _, err = io.Copy(fo, strings.NewReader(s.content))
+ return err
+}
+
+// NewHttpClient creates [http.Client] with disabled proxy and redirects, it also sets timeout to 30seconds.
+func NewHttpClient() *http.Client {
+ transport := http.DefaultTransport
+ transport.(*http.Transport).Proxy = nil
+ transport.(*http.Transport).DisableKeepAlives = true
+ client := &http.Client{
+ Transport: transport,
+ Timeout: time.Second * 30,
+ CheckRedirect: func(req *http.Request, via []*http.Request) error {
+ return http.ErrUseLastResponse
+ }}
+ return client
+}
+
+func TcpSendReceive(address, data string) (string, error) {
+ conn, err := net.DialTimeout("tcp", address, time.Second*30)
+ if err != nil {
+ return "", err
+ }
+ defer conn.Close()
+ err = conn.SetDeadline(time.Now().Add(time.Second * 30))
+ if err != nil {
+ return "", err
+ }
+ _, err = conn.Write([]byte(data))
+ if err != nil {
+ return "", err
+ }
+ reply := make([]byte, 1024)
+ _, err = conn.Read(reply)
+ if err != nil {
+ return "", err
+ }
+ return string(reply), nil
+}
diff --git a/extras/hs-test/infra/vppinstance.go b/extras/hs-test/infra/vppinstance.go
new file mode 100644
index 00000000000..5164a54aa9a
--- /dev/null
+++ b/extras/hs-test/infra/vppinstance.go
@@ -0,0 +1,500 @@
+package hst
+
+import (
+ "context"
+ "fmt"
+ "go.fd.io/govpp/binapi/ethernet_types"
+ "io"
+ "net"
+ "os"
+ "os/exec"
+ "os/signal"
+ "strconv"
+ "strings"
+ "syscall"
+ "time"
+
+ "github.com/edwarnicke/exechelper"
+ . "github.com/onsi/ginkgo/v2"
+ "github.com/sirupsen/logrus"
+
+ "go.fd.io/govpp"
+ "go.fd.io/govpp/api"
+ "go.fd.io/govpp/binapi/af_packet"
+ interfaces "go.fd.io/govpp/binapi/interface"
+ "go.fd.io/govpp/binapi/interface_types"
+ "go.fd.io/govpp/binapi/session"
+ "go.fd.io/govpp/binapi/tapv2"
+ "go.fd.io/govpp/core"
+)
+
+const vppConfigTemplate = `unix {
+ nodaemon
+ log %[1]s%[4]s
+ full-coredump
+ cli-listen %[1]s%[2]s
+ runtime-dir %[1]s/var/run
+ gid vpp
+}
+
+api-trace {
+ on
+}
+
+api-segment {
+ gid vpp
+}
+
+socksvr {
+ socket-name %[1]s%[3]s
+}
+
+statseg {
+ socket-name %[1]s/var/run/vpp/stats.sock
+}
+
+plugins {
+ plugin default { disable }
+
+ plugin unittest_plugin.so { enable }
+ plugin quic_plugin.so { enable }
+ plugin af_packet_plugin.so { enable }
+ plugin hs_apps_plugin.so { enable }
+ plugin http_plugin.so { enable }
+ plugin http_static_plugin.so { enable }
+ plugin prom_plugin.so { enable }
+ plugin tlsopenssl_plugin.so { enable }
+ plugin ping_plugin.so { enable }
+ plugin nsim_plugin.so { enable }
+ plugin mactime_plugin.so { enable }
+}
+
+logging {
+ default-log-level debug
+ default-syslog-log-level debug
+}
+
+`
+
+const (
+ defaultCliSocketFilePath = "/var/run/vpp/cli.sock"
+ defaultApiSocketFilePath = "/var/run/vpp/api.sock"
+ defaultLogFilePath = "/var/log/vpp/vpp.log"
+)
+
+type VppInstance struct {
+ Container *Container
+ AdditionalConfig []Stanza
+ Connection *core.Connection
+ ApiStream api.Stream
+ Cpus []int
+}
+
+func (vpp *VppInstance) getSuite() *HstSuite {
+ return vpp.Container.Suite
+}
+
+func (vpp *VppInstance) getCliSocket() string {
+ return fmt.Sprintf("%s%s", vpp.Container.GetContainerWorkDir(), defaultCliSocketFilePath)
+}
+
+func (vpp *VppInstance) getRunDir() string {
+ return vpp.Container.GetContainerWorkDir() + "/var/run/vpp"
+}
+
+func (vpp *VppInstance) getLogDir() string {
+ return vpp.Container.GetContainerWorkDir() + "/var/log/vpp"
+}
+
+func (vpp *VppInstance) getEtcDir() string {
+ return vpp.Container.GetContainerWorkDir() + "/etc/vpp"
+}
+
+func (vpp *VppInstance) Start() error {
+ maxReconnectAttempts := 3
+ // Replace default logger in govpp with our own
+ govppLogger := logrus.New()
+ govppLogger.SetOutput(io.MultiWriter(vpp.getSuite().Logger.Writer(), GinkgoWriter))
+ core.SetLogger(govppLogger)
+ // Create folders
+ containerWorkDir := vpp.Container.GetContainerWorkDir()
+
+ vpp.Container.Exec("mkdir --mode=0700 -p " + vpp.getRunDir())
+ vpp.Container.Exec("mkdir --mode=0700 -p " + vpp.getLogDir())
+ vpp.Container.Exec("mkdir --mode=0700 -p " + vpp.getEtcDir())
+
+ // Create startup.conf inside the container
+ configContent := fmt.Sprintf(
+ vppConfigTemplate,
+ containerWorkDir,
+ defaultCliSocketFilePath,
+ defaultApiSocketFilePath,
+ defaultLogFilePath,
+ )
+ configContent += vpp.generateCpuConfig()
+ for _, c := range vpp.AdditionalConfig {
+ configContent += c.ToString()
+ }
+ startupFileName := vpp.getEtcDir() + "/startup.conf"
+ vpp.Container.CreateFile(startupFileName, configContent)
+
+ // create wrapper script for vppctl with proper CLI socket path
+ cliContent := "#!/usr/bin/bash\nvppctl -s " + vpp.getRunDir() + "/cli.sock"
+ vppcliFileName := "/usr/bin/vppcli"
+ vpp.Container.CreateFile(vppcliFileName, cliContent)
+ vpp.Container.Exec("chmod 0755 " + vppcliFileName)
+
+ vpp.getSuite().Log("starting vpp")
+ if *IsVppDebug {
+ // default = 3; VPP will timeout while debugging if there are not enough attempts
+ maxReconnectAttempts = 5000
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, syscall.SIGQUIT)
+ cont := make(chan bool, 1)
+ go func() {
+ <-sig
+ cont <- true
+ }()
+
+ vpp.Container.ExecServer("su -c \"vpp -c " + startupFileName + " &> /proc/1/fd/1\"")
+ fmt.Println("run following command in different terminal:")
+ fmt.Println("docker exec -it " + vpp.Container.Name + " gdb -ex \"attach $(docker exec " + vpp.Container.Name + " pidof vpp)\"")
+ fmt.Println("Afterwards press CTRL+\\ to continue")
+ <-cont
+ fmt.Println("continuing...")
+ } else {
+ // Start VPP
+ vpp.Container.ExecServer("su -c \"vpp -c " + startupFileName + " &> /proc/1/fd/1\"")
+ }
+
+ vpp.getSuite().Log("connecting to vpp")
+ // Connect to VPP and store the connection
+ sockAddress := vpp.Container.GetHostWorkDir() + defaultApiSocketFilePath
+ conn, connEv, err := govpp.AsyncConnect(
+ sockAddress,
+ maxReconnectAttempts,
+ core.DefaultReconnectInterval)
+ if err != nil {
+ vpp.getSuite().Log("async connect error: " + fmt.Sprint(err))
+ return err
+ }
+ vpp.Connection = conn
+
+ // ... wait for Connected event
+ e := <-connEv
+ if e.State != core.Connected {
+ vpp.getSuite().Log("connecting to VPP failed: " + fmt.Sprint(e.Error))
+ }
+
+ ch, err := conn.NewStream(
+ context.Background(),
+ core.WithRequestSize(50),
+ core.WithReplySize(50),
+ core.WithReplyTimeout(time.Second*5))
+ if err != nil {
+ vpp.getSuite().Log("creating stream failed: " + fmt.Sprint(err))
+ return err
+ }
+ vpp.ApiStream = ch
+
+ return nil
+}
+
+func (vpp *VppInstance) Vppctl(command string, arguments ...any) string {
+ vppCliCommand := fmt.Sprintf(command, arguments...)
+ containerExecCommand := fmt.Sprintf("docker exec --detach=false %[1]s vppctl -s %[2]s %[3]s",
+ vpp.Container.Name, vpp.getCliSocket(), vppCliCommand)
+ vpp.getSuite().Log(containerExecCommand)
+ output, err := exechelper.CombinedOutput(containerExecCommand)
+ vpp.getSuite().AssertNil(err)
+
+ return string(output)
+}
+
+func (vpp *VppInstance) GetSessionStat(stat string) int {
+ o := vpp.Vppctl("show session stats")
+ vpp.getSuite().Log(o)
+ for _, line := range strings.Split(o, "\n") {
+ if strings.Contains(line, stat) {
+ tokens := strings.Split(strings.TrimSpace(line), " ")
+ val, err := strconv.Atoi(tokens[0])
+ if err != nil {
+ Fail("failed to parse stat value %s" + fmt.Sprint(err))
+ return 0
+ }
+ return val
+ }
+ }
+ return 0
+}
+
+func (vpp *VppInstance) WaitForApp(appName string, timeout int) {
+ vpp.getSuite().Log("waiting for app " + appName)
+ for i := 0; i < timeout; i++ {
+ o := vpp.Vppctl("show app")
+ if strings.Contains(o, appName) {
+ return
+ }
+ time.Sleep(1 * time.Second)
+ }
+ vpp.getSuite().AssertNil(1, "Timeout while waiting for app '%s'", appName)
+}
+
+func (vpp *VppInstance) createAfPacket(
+ veth *NetInterface,
+) (interface_types.InterfaceIndex, error) {
+ createReq := &af_packet.AfPacketCreateV3{
+ Mode: 1,
+ UseRandomHwAddr: true,
+ HostIfName: veth.Name(),
+ Flags: af_packet.AfPacketFlags(11),
+ }
+ if veth.HwAddress != (MacAddress{}) {
+ createReq.UseRandomHwAddr = false
+ createReq.HwAddr = veth.HwAddress
+ }
+
+ vpp.getSuite().Log("create af-packet interface " + veth.Name())
+ if err := vpp.ApiStream.SendMsg(createReq); err != nil {
+ vpp.getSuite().HstFail()
+ return 0, err
+ }
+ replymsg, err := vpp.ApiStream.RecvMsg()
+ if err != nil {
+ return 0, err
+ }
+ reply := replymsg.(*af_packet.AfPacketCreateV3Reply)
+ err = api.RetvalToVPPApiError(reply.Retval)
+ if err != nil {
+ return 0, err
+ }
+
+ veth.Index = reply.SwIfIndex
+
+ // Set to up
+ upReq := &interfaces.SwInterfaceSetFlags{
+ SwIfIndex: veth.Index,
+ Flags: interface_types.IF_STATUS_API_FLAG_ADMIN_UP,
+ }
+
+ vpp.getSuite().Log("set af-packet interface " + veth.Name() + " up")
+ if err := vpp.ApiStream.SendMsg(upReq); err != nil {
+ return 0, err
+ }
+ replymsg, err = vpp.ApiStream.RecvMsg()
+ if err != nil {
+ return 0, err
+ }
+ reply2 := replymsg.(*interfaces.SwInterfaceSetFlagsReply)
+ if err = api.RetvalToVPPApiError(reply2.Retval); err != nil {
+ return 0, err
+ }
+
+ // Add address
+ if veth.AddressWithPrefix() == (AddressWithPrefix{}) {
+ var err error
+ var ip4Address string
+ if ip4Address, err = veth.Ip4AddrAllocator.NewIp4InterfaceAddress(veth.Peer.NetworkNumber); err == nil {
+ veth.Ip4Address = ip4Address
+ } else {
+ return 0, err
+ }
+ }
+ addressReq := &interfaces.SwInterfaceAddDelAddress{
+ IsAdd: true,
+ SwIfIndex: veth.Index,
+ Prefix: veth.AddressWithPrefix(),
+ }
+
+ vpp.getSuite().Log("af-packet interface " + veth.Name() + " add address " + veth.Ip4Address)
+ if err := vpp.ApiStream.SendMsg(addressReq); err != nil {
+ return 0, err
+ }
+ replymsg, err = vpp.ApiStream.RecvMsg()
+ if err != nil {
+ return 0, err
+ }
+ reply3 := replymsg.(*interfaces.SwInterfaceAddDelAddressReply)
+ err = api.RetvalToVPPApiError(reply3.Retval)
+ if err != nil {
+ return 0, err
+ }
+
+ return veth.Index, nil
+}
+
+func (vpp *VppInstance) addAppNamespace(
+ secret uint64,
+ ifx interface_types.InterfaceIndex,
+ namespaceId string,
+) error {
+ req := &session.AppNamespaceAddDelV4{
+ IsAdd: true,
+ Secret: secret,
+ SwIfIndex: ifx,
+ NamespaceID: namespaceId,
+ SockName: defaultApiSocketFilePath,
+ }
+
+ vpp.getSuite().Log("add app namespace " + namespaceId)
+ if err := vpp.ApiStream.SendMsg(req); err != nil {
+ return err
+ }
+ replymsg, err := vpp.ApiStream.RecvMsg()
+ if err != nil {
+ return err
+ }
+ reply := replymsg.(*session.AppNamespaceAddDelV4Reply)
+ if err = api.RetvalToVPPApiError(reply.Retval); err != nil {
+ return err
+ }
+
+ sessionReq := &session.SessionEnableDisable{
+ IsEnable: true,
+ }
+
+ vpp.getSuite().Log("enable app namespace " + namespaceId)
+ if err := vpp.ApiStream.SendMsg(sessionReq); err != nil {
+ return err
+ }
+ replymsg, err = vpp.ApiStream.RecvMsg()
+ if err != nil {
+ return err
+ }
+ reply2 := replymsg.(*session.SessionEnableDisableReply)
+ if err = api.RetvalToVPPApiError(reply2.Retval); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (vpp *VppInstance) createTap(
+ tap *NetInterface,
+ tapId ...uint32,
+) error {
+ var id uint32 = 1
+ if len(tapId) > 0 {
+ id = tapId[0]
+ }
+ createTapReq := &tapv2.TapCreateV3{
+ ID: id,
+ HostIfNameSet: true,
+ HostIfName: tap.Name(),
+ HostIP4PrefixSet: true,
+ HostIP4Prefix: tap.Ip4AddressWithPrefix(),
+ }
+
+ vpp.getSuite().Log("create tap interface " + tap.Name())
+ // Create tap interface
+ if err := vpp.ApiStream.SendMsg(createTapReq); err != nil {
+ return err
+ }
+ replymsg, err := vpp.ApiStream.RecvMsg()
+ if err != nil {
+ return err
+ }
+ reply := replymsg.(*tapv2.TapCreateV3Reply)
+ if err = api.RetvalToVPPApiError(reply.Retval); err != nil {
+ return err
+ }
+ tap.Peer.Index = reply.SwIfIndex
+
+ // Get name and mac
+ if err := vpp.ApiStream.SendMsg(&interfaces.SwInterfaceDump{
+ SwIfIndex: reply.SwIfIndex,
+ }); err != nil {
+ return err
+ }
+ replymsg, err = vpp.ApiStream.RecvMsg()
+ if err != nil {
+ return err
+ }
+ ifDetails := replymsg.(*interfaces.SwInterfaceDetails)
+ tap.Peer.name = ifDetails.InterfaceName
+ tap.Peer.HwAddress = ifDetails.L2Address
+
+ // Add address
+ addAddressReq := &interfaces.SwInterfaceAddDelAddress{
+ IsAdd: true,
+ SwIfIndex: reply.SwIfIndex,
+ Prefix: tap.Peer.AddressWithPrefix(),
+ }
+
+ vpp.getSuite().Log("tap interface " + tap.Name() + " add address " + tap.Peer.Ip4Address)
+ if err := vpp.ApiStream.SendMsg(addAddressReq); err != nil {
+ return err
+ }
+ replymsg, err = vpp.ApiStream.RecvMsg()
+ if err != nil {
+ return err
+ }
+ reply2 := replymsg.(*interfaces.SwInterfaceAddDelAddressReply)
+ if err = api.RetvalToVPPApiError(reply2.Retval); err != nil {
+ return err
+ }
+
+ // Set interface to up
+ upReq := &interfaces.SwInterfaceSetFlags{
+ SwIfIndex: reply.SwIfIndex,
+ Flags: interface_types.IF_STATUS_API_FLAG_ADMIN_UP,
+ }
+
+ vpp.getSuite().Log("set tap interface " + tap.Name() + " up")
+ if err := vpp.ApiStream.SendMsg(upReq); err != nil {
+ return err
+ }
+ replymsg, err = vpp.ApiStream.RecvMsg()
+ if err != nil {
+ return err
+ }
+ reply3 := replymsg.(*interfaces.SwInterfaceSetFlagsReply)
+ if err = api.RetvalToVPPApiError(reply3.Retval); err != nil {
+ return err
+ }
+
+ // Get host mac
+ netIntf, err := net.InterfaceByName(tap.Name())
+ if err == nil {
+ tap.HwAddress, _ = ethernet_types.ParseMacAddress(netIntf.HardwareAddr.String())
+ }
+
+ return nil
+}
+
+func (vpp *VppInstance) saveLogs() {
+ logTarget := vpp.Container.getLogDirPath() + "vppinstance-" + vpp.Container.Name + ".log"
+ logSource := vpp.Container.GetHostWorkDir() + defaultLogFilePath
+ cmd := exec.Command("cp", logSource, logTarget)
+ vpp.getSuite().Log(cmd.String())
+ cmd.Run()
+}
+
+func (vpp *VppInstance) Disconnect() {
+ vpp.Connection.Disconnect()
+ vpp.ApiStream.Close()
+}
+
+func (vpp *VppInstance) generateCpuConfig() string {
+ var c Stanza
+ var s string
+ if len(vpp.Cpus) < 1 {
+ return ""
+ }
+ c.NewStanza("cpu").
+ Append(fmt.Sprintf("main-core %d", vpp.Cpus[0]))
+ vpp.getSuite().Log(fmt.Sprintf("main-core %d", vpp.Cpus[0]))
+ workers := vpp.Cpus[1:]
+
+ if len(workers) > 0 {
+ for i := 0; i < len(workers); i++ {
+ if i != 0 {
+ s = s + ", "
+ }
+ s = s + fmt.Sprintf("%d", workers[i])
+ }
+ c.Append(fmt.Sprintf("corelist-workers %s", s))
+ vpp.getSuite().Log("corelist-workers " + s)
+ }
+ return c.Close().ToString()
+}