aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--extras/hs-test/README.rst20
-rw-r--r--extras/hs-test/infra/container.go2
-rw-r--r--extras/hs-test/infra/cpu.go10
-rw-r--r--extras/hs-test/infra/hst_suite.go40
-rw-r--r--extras/hs-test/infra/suite_vpp_proxy.go2
-rw-r--r--extras/hs-test/proxy_test.go9
-rw-r--r--src/vnet/pg/pg.h51
-rw-r--r--test/test_pg_stream.py94
8 files changed, 201 insertions, 27 deletions
diff --git a/extras/hs-test/README.rst b/extras/hs-test/README.rst
index c62be5a84aa..25f512ca117 100644
--- a/extras/hs-test/README.rst
+++ b/extras/hs-test/README.rst
@@ -68,6 +68,10 @@ For adding a new suite, please see `Modifying the framework`_ below.
Assumed are two docker containers, each with its own VPP instance running. One VPP then pings the other.
This can be put in file ``extras/hs-test/my_test.go`` and run with command ``make test TEST=MyTest``.
+To add a multi-worker test, name it ``[name]MTTest``. Doing this, the framework will allocate 3 CPUs to a VPP container, no matter what ``CPUS`` is set to.
+Only a single multi-worker VPP container is supported for now. Please register multi-worker tests as Solo tests to avoid reusing the same cores
+when running in parallel.
+
::
package main
@@ -77,7 +81,12 @@ This can be put in file ``extras/hs-test/my_test.go`` and run with command ``mak
)
func init(){
- RegisterMySuiteTest(MyTest)
+ RegisterMySuiteTests(MyTest)
+ RegisterSoloMySuiteTests(MyMTTest)
+ }
+
+ func MyMTTest(s *MySuite){
+ MyTest(s)
}
func MyTest(s *MySuite) {
@@ -86,8 +95,8 @@ This can be put in file ``extras/hs-test/my_test.go`` and run with command ``mak
serverVethAddress := s.NetInterfaces["server-iface"].Ip4AddressString()
result := clientVpp.Vppctl("ping " + serverVethAddress)
- s.Log(result)
s.AssertNotNil(result)
+ s.Log(result)
}
@@ -100,6 +109,7 @@ The framework allows us to filter test cases in a few different ways, using ``ma
* File name
* Test name
* All of the above as long as they are ordered properly, e.g. ``make test TEST=VethsSuite.http_test.go.HeaderServerTest``
+ * Multiple tests/suites: ``make test TEST=HttpClient,LdpSuite``
**Names are case sensitive!**
@@ -308,6 +318,12 @@ or a new version incompatibility issue occurs.
Debugging a test
----------------
+DRYRUN
+^^^^^^
+
+``make test TEST=[name] DRYRUN=true`` will setup and start most of the containers, but won't run any tests or start VPP. VPP and interfaces will be
+configured automatically once you start VPP with the generated startup.conf file.
+
GDB
^^^
diff --git a/extras/hs-test/infra/container.go b/extras/hs-test/infra/container.go
index cc79a5cbb18..f6ccd88e118 100644
--- a/extras/hs-test/infra/container.go
+++ b/extras/hs-test/infra/container.go
@@ -214,7 +214,7 @@ func (c *Container) Create() error {
func (c *Container) allocateCpus() {
c.Suite.StartedContainers = append(c.Suite.StartedContainers, c)
- c.AllocatedCpus = c.Suite.AllocateCpus()
+ c.AllocatedCpus = c.Suite.AllocateCpus(c.Name)
c.Suite.Log("Allocated CPUs " + fmt.Sprint(c.AllocatedCpus) + " to container " + c.Name)
}
diff --git a/extras/hs-test/infra/cpu.go b/extras/hs-test/infra/cpu.go
index 615f8a3f87d..7a29eb4a9c3 100644
--- a/extras/hs-test/infra/cpu.go
+++ b/extras/hs-test/infra/cpu.go
@@ -35,17 +35,17 @@ func iterateAndAppend(start int, end int, slice []int) []int {
var cpuAllocator *CpuAllocatorT = nil
-func (c *CpuAllocatorT) Allocate(containerCount int, nCpus int) (*CpuContext, error) {
+func (c *CpuAllocatorT) Allocate(containerCount int, nCpus int, offset int) (*CpuContext, error) {
var cpuCtx CpuContext
// indexes, not actual cores
var minCpu, maxCpu int
if c.runningInCi {
- minCpu = ((c.buildNumber) * c.maxContainerCount * nCpus)
- maxCpu = ((c.buildNumber + 1) * c.maxContainerCount * nCpus) - 1
+ minCpu = ((c.buildNumber) * c.maxContainerCount * nCpus) + offset
+ maxCpu = ((c.buildNumber + 1) * c.maxContainerCount * nCpus) - 1 + offset
} else {
- minCpu = ((GinkgoParallelProcess() - 1) * c.maxContainerCount * nCpus)
- maxCpu = (GinkgoParallelProcess() * c.maxContainerCount * nCpus) - 1
+ minCpu = ((GinkgoParallelProcess() - 1) * c.maxContainerCount * nCpus) + offset
+ maxCpu = (GinkgoParallelProcess() * c.maxContainerCount * nCpus) - 1 + offset
}
if len(c.cpus)-1 < maxCpu {
diff --git a/extras/hs-test/infra/hst_suite.go b/extras/hs-test/infra/hst_suite.go
index bf46dfdef7e..d6b4006a4e4 100644
--- a/extras/hs-test/infra/hst_suite.go
+++ b/extras/hs-test/infra/hst_suite.go
@@ -135,12 +135,38 @@ func (s *HstSuite) SetupSuite() {
s.CpuCount = *NConfiguredCpus
}
-func (s *HstSuite) AllocateCpus() []int {
- cpuCtx, err := s.CpuAllocator.Allocate(len(s.StartedContainers), s.CpuCount)
- // using Fail instead of AssertNil to make error message more readable
- if err != nil {
- Fail(fmt.Sprint(err))
+func (s *HstSuite) AllocateCpus(containerName string) []int {
+ var cpuCtx *CpuContext
+ var err error
+ currentTestName := CurrentSpecReport().LeafNodeText
+
+ if strings.Contains(currentTestName, "MTTest") {
+ prevContainerCount := s.CpuAllocator.maxContainerCount
+ if strings.Contains(containerName, "vpp") {
+ // CPU range is assigned based on the Ginkgo process index (or build number if
+ // running in the CI), *NConfiguredCpus and a maxContainerCount.
+ // maxContainerCount is set to 4 when CpuAllocator is initialized.
+ // 4 is not a random number - all of our suites use a maximum of 4 containers simultaneously,
+ // and it's also the maximum number of containers we can run with *NConfiguredCpus=2 (with CPU0=true)
+ // on processors with 8 threads. Currently, the CpuAllocator puts all cores into a slice,
+ // makes the length of the slice divisible by 4x*NConfiguredCpus, and then the minCpu and
+ // maxCpu (range) for each container is calculated. Then we just offset based on minCpu,
+ // the number of started containers and *NConfiguredCpus. This way, every container
+ // uses the correct CPUs, even if multiple NUMA nodes are available.
+ // However, because of this, if we want to assign different number of cores to different containers,
+ // we have to change maxContainerCount to manipulate the CPU range. Hopefully a temporary workaround.
+ s.CpuAllocator.maxContainerCount = 1
+ cpuCtx, err = s.CpuAllocator.Allocate(1, 3, 0)
+ } else {
+ s.CpuAllocator.maxContainerCount = 3
+ cpuCtx, err = s.CpuAllocator.Allocate(len(s.StartedContainers), s.CpuCount, 2)
+ }
+ s.CpuAllocator.maxContainerCount = prevContainerCount
+ } else {
+ cpuCtx, err = s.CpuAllocator.Allocate(len(s.StartedContainers), s.CpuCount, 0)
}
+
+ s.AssertNil(err)
s.AddCpuContext(cpuCtx)
return cpuCtx.cpus
}
@@ -369,8 +395,8 @@ func (s *HstSuite) SkipIfNotEnoughAvailableCpus() {
if availableCpus < maxRequestedCpu {
s.Skip(fmt.Sprintf("Test case cannot allocate requested cpus "+
- "(%d cpus * %d containers, %d available). Try using 'CPU0=true'",
- s.CpuCount, s.CpuAllocator.maxContainerCount, availableCpus))
+ "(%d containers * %d cpus, %d available). Try using 'CPU0=true'",
+ s.CpuAllocator.maxContainerCount, s.CpuCount, availableCpus))
}
}
diff --git a/extras/hs-test/infra/suite_vpp_proxy.go b/extras/hs-test/infra/suite_vpp_proxy.go
index 16c6115bc23..d696109b31b 100644
--- a/extras/hs-test/infra/suite_vpp_proxy.go
+++ b/extras/hs-test/infra/suite_vpp_proxy.go
@@ -189,7 +189,7 @@ var _ = Describe("VppProxySuite", Ordered, ContinueOnFailure, func() {
}
})
-var _ = Describe("VppProxySuiteSolo", Ordered, ContinueOnFailure, func() {
+var _ = Describe("VppProxySuiteSolo", Ordered, ContinueOnFailure, Serial, func() {
var s VppProxySuite
BeforeAll(func() {
s.SetupSuite()
diff --git a/extras/hs-test/proxy_test.go b/extras/hs-test/proxy_test.go
index 1f5d13e269b..367818925d2 100644
--- a/extras/hs-test/proxy_test.go
+++ b/extras/hs-test/proxy_test.go
@@ -9,6 +9,7 @@ import (
func init() {
RegisterVppProxyTests(VppProxyHttpGetTcpTest, VppProxyHttpGetTlsTest, VppProxyHttpPutTcpTest, VppProxyHttpPutTlsTest,
VppConnectProxyGetTest, VppConnectProxyPutTest)
+ RegisterVppProxySoloTests(VppProxyHttpGetTcpMTTest, VppProxyHttpPutTcpMTTest)
RegisterVppUdpProxyTests(VppProxyUdpTest)
RegisterEnvoyProxyTests(EnvoyProxyHttpGetTcpTest, EnvoyProxyHttpPutTcpTest)
RegisterNginxProxyTests(NginxMirroringTest)
@@ -25,6 +26,10 @@ func configureVppProxy(s *VppProxySuite, proto string, proxyPort uint16) {
s.Log("proxy configured: " + output)
}
+func VppProxyHttpGetTcpMTTest(s *VppProxySuite) {
+ VppProxyHttpGetTcpTest(s)
+}
+
func VppProxyHttpGetTcpTest(s *VppProxySuite) {
var proxyPort uint16 = 8080
configureVppProxy(s, "tcp", proxyPort)
@@ -39,6 +44,10 @@ func VppProxyHttpGetTlsTest(s *VppProxySuite) {
s.CurlDownloadResource(uri)
}
+func VppProxyHttpPutTcpMTTest(s *VppProxySuite) {
+ VppProxyHttpPutTcpTest(s)
+}
+
func VppProxyHttpPutTcpTest(s *VppProxySuite) {
var proxyPort uint16 = 8080
configureVppProxy(s, "tcp", proxyPort)
diff --git a/src/vnet/pg/pg.h b/src/vnet/pg/pg.h
index 5e99d9af9f6..5e63b58caf6 100644
--- a/src/vnet/pg/pg.h
+++ b/src/vnet/pg/pg.h
@@ -182,14 +182,38 @@ typedef struct pg_stream_t
} pg_stream_t;
always_inline void
-pg_buffer_index_free (pg_buffer_index_t * bi)
+pg_free_buffers (pg_buffer_index_t *bi)
{
vlib_main_t *vm = vlib_get_main ();
- word n_alloc;
- vec_free (bi->edits);
- n_alloc = clib_fifo_elts (bi->buffer_fifo);
- vlib_buffer_free (vm, bi->buffer_fifo, n_alloc);
- clib_fifo_free (bi->buffer_fifo);
+ uword n_elts, head, len;
+
+ if (!bi || !bi->buffer_fifo)
+ return;
+
+ n_elts = clib_fifo_elts (bi->buffer_fifo);
+ if (n_elts)
+ {
+ len = clib_fifo_len (bi->buffer_fifo);
+ head = clib_fifo_head_index (bi->buffer_fifo);
+
+ if (head + n_elts <= len)
+ vlib_buffer_free (vm, &bi->buffer_fifo[head], n_elts);
+ else
+ {
+ vlib_buffer_free (vm, &bi->buffer_fifo[head], len - head);
+ vlib_buffer_free (vm, bi->buffer_fifo, n_elts - (len - head));
+ }
+ }
+}
+
+always_inline void
+pg_buffer_index_free (pg_buffer_index_t *bi)
+{
+ if (bi)
+ {
+ vec_free (bi->edits);
+ clib_fifo_free (bi->buffer_fifo);
+ }
}
always_inline void
@@ -220,11 +244,16 @@ pg_stream_free (pg_stream_t * s)
vec_free (s->replay_packet_templates);
vec_free (s->replay_packet_timestamps);
- {
- pg_buffer_index_t *bi;
- vec_foreach (bi, s->buffer_indices) pg_buffer_index_free (bi);
- vec_free (s->buffer_indices);
- }
+ if (s->buffer_indices)
+ {
+ pg_buffer_index_t *bi;
+ // We only need to free the buffers from the first array, as the buffers
+ // are chained when packet-generator enable is issued.
+ pg_free_buffers (s->buffer_indices);
+ vec_foreach (bi, s->buffer_indices)
+ pg_buffer_index_free (bi);
+ vec_free (s->buffer_indices);
+ }
}
always_inline int
diff --git a/test/test_pg_stream.py b/test/test_pg_stream.py
new file mode 100644
index 00000000000..6c01d3b7b16
--- /dev/null
+++ b/test/test_pg_stream.py
@@ -0,0 +1,94 @@
+#!/usr/bin/env python3
+
+import unittest
+
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether
+from scapy.layers.inet import IP, UDP
+from scapy.layers.inet6 import IPv6
+
+from framework import VppTestCase
+from asfframework import VppTestRunner
+
+
+class TestPgStream(VppTestCase):
+ """PG Stream Test Case"""
+
+ def __init__(self, *args):
+ VppTestCase.__init__(self, *args)
+
+ def setUp(self):
+ super(TestPgStream, self).setUp()
+
+ # Create 3 pg interfaces - one each for ethernet, IPv4, and IPv6.
+ self.create_pg_interfaces(range(0, 1))
+ self.pg_interfaces += self.create_pg_ip4_interfaces(range(1, 2))
+ self.pg_interfaces += self.create_pg_ip6_interfaces(range(2, 3))
+
+ for i in self.pg_interfaces:
+ i.admin_up()
+
+ for i in [self.pg0, self.pg1]:
+ i.config_ip4()
+
+ for i in [self.pg0, self.pg2]:
+ i.config_ip6()
+
+ self.pg0.resolve_arp()
+ self.pg0.resolve_ndp()
+
+ def tearDown(self):
+ super(TestPgStream, self).tearDown()
+ for i in self.pg_interfaces:
+ i.unconfig_ip4()
+ i.admin_down()
+ i.remove_vpp_config()
+
+ def pg_stream(self, count=100, rate=1e6, packet_size=700):
+ rate = str(rate)
+ packet_size = str(packet_size)
+ count = str(count)
+
+ cmds = [
+ "packet-generator new {{\n"
+ " name pg0-stream\n"
+ " limit {count}\n"
+ " node ethernet-input\n"
+ " source pg0\n"
+ " rate {rate}\n"
+ " size {packet_size}+{packet_size}\n"
+ " data {{\n"
+ " IP4: {src_mac} -> 00:02:03:04:05:06\n"
+ " UDP: 192.168.20.20 -> 192.168.10.100\n"
+ " UDP: 1234 -> 4321\n"
+ " incrementing 100\n"
+ " }}\n"
+ "}}\n".format(
+ count=count,
+ rate=rate,
+ packet_size=packet_size,
+ src_mac=self.pg0.local_mac,
+ ),
+ "packet-generator enable",
+ "packet-generator disable",
+ "packet-generator delete pg0-stream",
+ ]
+
+ for cmd in cmds:
+ r = self.vapi.cli_return_response(cmd)
+ if r.retval != 0:
+ if hasattr(r, "reply"):
+ self.logger.info(cmd + " FAIL reply " + r.reply)
+ else:
+ self.logger.info(cmd + " FAIL retval " + str(r.retval))
+
+ def test_pg_stream(self):
+ """PG Stream testing"""
+ self.pg_stream(rate=100, packet_size=64)
+ self.pg_stream(count=1000, rate=1000)
+ self.pg_stream(count=100000, rate=10000, packet_size=1500)
+ self.pg_stream(packet_size=4000)
+
+
+if __name__ == "__main__":
+ unittest.main(testRunner=VppTestRunner)