diff options
8 files changed, 219 insertions, 311 deletions
diff --git a/fdio.infra.terraform/terraform-nomad-vpp-device/conf/nomad/device-shim.hcl.tftpl b/fdio.infra.terraform/terraform-nomad-vpp-device/conf/nomad/device-shim.hcl.tftpl new file mode 100644 index 0000000000..28e38a2d0b --- /dev/null +++ b/fdio.infra.terraform/terraform-nomad-vpp-device/conf/nomad/device-shim.hcl.tftpl @@ -0,0 +1,78 @@ +job "${job_name}" { + datacenters = ["${datacenters}"] + type = "system" + group "${job_name}-amd" { + count = ${group_count} + constraint { + attribute = "$${node.class}" + value = "csit" + } + restart { + interval = "1m" + attempts = 3 + delay = "15s" + mode = "delay" + } + network { + port "ssh" { + static = 6022 + } + port "ssh2" { + static = 6023 + } + } + task "${job_name}-amd" { + driver = "docker" + config { + image = "${image_x86_64}" + network_mode = "host" + pid_mode = "host" + volumes = [ + "/var/run/docker.sock:/var/run/docker.sock" + ] + privileged = true + } + resources { + cpu = ${cpu} + memory = ${memory} + } + } + } + group "${job_name}-arm" { + count = ${group_count} + constraint { + attribute = "$${node.class}" + value = "csitarm" + } + restart { + interval = "1m" + attempts = 3 + delay = "15s" + mode = "delay" + } + network { + port "ssh" { + static = 6022 + } + port "ssh2" { + static = 6023 + } + } + task "${job_name}-arm" { + driver = "docker" + config { + image = "${image_aarch64}" + network_mode = "host" + pid_mode = "host" + volumes = [ + "/var/run/docker.sock:/var/run/docker.sock" + ] + privileged = true + } + resources { + cpu = ${cpu} + memory = ${memory} + } + } + } +} diff --git a/fdio.infra.terraform/terraform-nomad-vpp-device/conf/nomad/vpp-device.hcl.tftpl b/fdio.infra.terraform/terraform-nomad-vpp-device/conf/nomad/vpp-device.hcl.tftpl deleted file mode 100644 index aac1a46165..0000000000 --- a/fdio.infra.terraform/terraform-nomad-vpp-device/conf/nomad/vpp-device.hcl.tftpl +++ /dev/null @@ -1,188 +0,0 @@ -job "${job_name}" { - # The "region" parameter specifies the region in which to execute the job. - # If omitted, this inherits the default region name of "global". - # region = "global" - # - # The "datacenters" parameter specifies the list of datacenters which should - # be considered when placing this task. This must be provided. - datacenters = "${datacenters}" - - # The "type" parameter controls the type of job, which impacts the scheduler's - # decision on placement. This configuration is optional and defaults to - # "service". For a full list of job types and their differences, please see - # the online documentation. - # - type = "system" - - # The "group" stanza defines a series of tasks that should be co-located on - # the same Nomad client. Any task within a group will be placed on the same - # client. - # - # https://www.nomadproject.io/docs/job-specification/group - # - group "csit-shim-amd-group-1" { - # The "count" parameter specifies the number of the task groups that should - # be running under this group. This value must be non-negative. - count = ${group_count} - - # The constraint allows restricting the set of eligible nodes. Constraints - # may filter on attributes or client metadata. - # - # https://www.nomadproject.io/docs/job-specification/constraint - # - constraint { - attribute = "$${node.class}" - value = "csit" - } - - # The restart stanza configures a tasks's behavior on task failure. Restarts - # happen on the client that is running the task. - # - # https://www.nomadproject.io/docs/job-specification/restart - # - restart { - interval = "1m" - attempts = 3 - delay = "15s" - mode = "delay" - } - - # The network stanza specifies the networking requirements for the task - # group, including the network mode and port allocations. When scheduling - # jobs in Nomad they are provisioned across your fleet of machines along - # with other jobs and services. Because you don't know in advance what host - # your job will be provisioned on, Nomad will provide your tasks with - # network configuration when they start up. - # - # https://www.nomadproject.io/docs/job-specification/network - # - network { - port "ssh" { - static = 6022 - } - port "ssh2" { - static = 6023 - } - } - - # The "task" stanza creates an individual unit of work, such as a Docker - # container, web application, or batch processing. - # - # https://www.nomadproject.io/docs/job-specification/task - # - task "csit-shim-amd-task-1" { - # The "driver" parameter specifies the task driver that should be used to - # run the task. - driver = "docker" - - # The "config" stanza specifies the driver configuration, which is passed - # directly to the driver to start the task. The details of configurations - # are specific to each driver, so please see specific driver - # documentation for more information. - config { - image = "${image_x86_64}" - network_mode = "host" - pid_mode = "host" - volumes = [ - "/var/run/docker.sock:/var/run/docker.sock" - ] - privileged = true - } - - # The "resources" stanza describes the requirements a task needs to - # execute. Resource requirements include memory, network, cpu, and more. - # This ensures the task will execute on a machine that contains enough - # resource capacity. - # - # https://www.nomadproject.io/docs/job-specification/resources - # - resources { - cpu = ${cpu} - memory = ${memory} - } - } - } - - group "csit-shim-arm-group-1" { - # The "count" parameter specifies the number of the task groups that should - # be running under this group. This value must be non-negative. - count = ${group_count} - - # The constraint allows restricting the set of eligible nodes. Constraints - # may filter on attributes or client metadata. - # - # https://www.nomadproject.io/docs/job-specification/constraint - # - constraint { - attribute = "$${node.class}" - value = "csitarm" - } - - # The restart stanza configures a tasks's behavior on task failure. Restarts - # happen on the client that is running the task. - # - # https://www.nomadproject.io/docs/job-specification/restart - # - restart { - interval = "1m" - attempts = 3 - delay = "15s" - mode = "delay" - } - - # The network stanza specifies the networking requirements for the task - # group, including the network mode and port allocations. When scheduling - # jobs in Nomad they are provisioned across your fleet of machines along - # with other jobs and services. Because you don't know in advance what host - # your job will be provisioned on, Nomad will provide your tasks with - # network configuration when they start up. - # - # https://www.nomadproject.io/docs/job-specification/network - # - network { - port "ssh" { - static = 6022 - } - port "ssh2" { - static = 6023 - } - } - - # The "task" stanza creates an individual unit of work, such as a Docker - # container, web application, or batch processing. - # - # https://www.nomadproject.io/docs/job-specification/task - # - task "csit-shim-arm-task-1" { - # The "driver" parameter specifies the task driver that should be used to - # run the task. - driver = "docker" - - # The "config" stanza specifies the driver configuration, which is passed - # directly to the driver to start the task. The details of configurations - # are specific to each driver, so please see specific driver - # documentation for more information. - config { - image = "${image_aarch64}" - network_mode = "host" - pid_mode = "host" - volumes = [ - "/var/run/docker.sock:/var/run/docker.sock" - ] - privileged = true - } - - # The "resources" stanza describes the requirements a task needs to - # execute. Resource requirements include memory, network, cpu, and more. - # This ensures the task will execute on a machine that contains enough - # resource capacity. - # - # https://www.nomadproject.io/docs/job-specification/resources - # - resources { - cpu = ${cpu} - memory = ${memory} - } - } - } -}
\ No newline at end of file diff --git a/fdio.infra.terraform/terraform-nomad-vpp-device/fdio/main.tf b/fdio.infra.terraform/terraform-nomad-vpp-device/fdio/main.tf index 96666e6e89..a6217d781f 100644 --- a/fdio.infra.terraform/terraform-nomad-vpp-device/fdio/main.tf +++ b/fdio.infra.terraform/terraform-nomad-vpp-device/fdio/main.tf @@ -6,10 +6,11 @@ module "vpp-device" { # nomad datacenters = ["yul1"] - job_name = "prod-device-csit-shim" - group_count = "1" - cpu = "1500" - memory = "4096" + job_name = "device-shim" + group_count = 1 + cpu = 1500 + memory = 4096 image_aarch64 = "fdiotools/csit_shim-ubuntu2004:2021_03_02_143938_UTC-aarch64" image_x86_64 = "fdiotools/csit_shim-ubuntu2004:2021_03_04_142103_UTC-x86_64" -}
\ No newline at end of file +} + diff --git a/fdio.infra.terraform/terraform-nomad-vpp-device/fdio/versions.tf b/fdio.infra.terraform/terraform-nomad-vpp-device/fdio/versions.tf index 82b792fd72..fc5a3ab12d 100644 --- a/fdio.infra.terraform/terraform-nomad-vpp-device/fdio/versions.tf +++ b/fdio.infra.terraform/terraform-nomad-vpp-device/fdio/versions.tf @@ -1,9 +1,15 @@ terraform { + backend "consul" { + address = "10.30.51.23:8500" + scheme = "http" + path = "terraform/device-csit-shim" + } required_providers { nomad = { source = "hashicorp/nomad" - version = ">= 1.4.19" + version = ">= 1.4.20" } } - required_version = ">= 1.3.7" -}
\ No newline at end of file + required_version = ">= 1.5.4" +} + diff --git a/fdio.infra.terraform/terraform-nomad-vpp-device/main.tf b/fdio.infra.terraform/terraform-nomad-vpp-device/main.tf index 2e67085714..b176172d00 100644 --- a/fdio.infra.terraform/terraform-nomad-vpp-device/main.tf +++ b/fdio.infra.terraform/terraform-nomad-vpp-device/main.tf @@ -2,18 +2,19 @@ locals { datacenters = join(",", var.datacenters) } -resource "nomad_job" "nomad_job_csit_shim" { +resource "nomad_job" "nomad_job" { jobspec = templatefile( - "${path.module}/conf/nomad/vpp-device.hcl.tftpl", + "${path.module}/conf/nomad/${var.job_name}.hcl.tftpl", { - datacenters = local.datacenters - job_name = var.job_name - group_count = var.group_count - cpu = var.cpu - mem = var.memory - image_aarch64 = var.image_aarch64 - image_x86_64 = var.image_x86_64 + datacenters = local.datacenters, + job_name = var.job_name, + group_count = var.group_count, + cpu = var.cpu, + memory = var.memory, + image_aarch64 = var.image_aarch64, + image_x86_64 = var.image_x86_64 } ) - detach = false -}
\ No newline at end of file + detach = false +} + diff --git a/fdio.infra.terraform/terraform-nomad-vpp-device/versions.tf b/fdio.infra.terraform/terraform-nomad-vpp-device/versions.tf index a319c35908..f40435fe77 100644 --- a/fdio.infra.terraform/terraform-nomad-vpp-device/versions.tf +++ b/fdio.infra.terraform/terraform-nomad-vpp-device/versions.tf @@ -2,8 +2,8 @@ terraform { required_providers { nomad = { source = "hashicorp/nomad" - version = ">= 1.4.19" + version = ">= 1.4.20" } } - required_version = ">= 1.3.7" + required_version = ">= 1.5.4" } diff --git a/resources/libraries/python/VppConfigGenerator.py b/resources/libraries/python/VppConfigGenerator.py index 65a19b2211..e23a8520da 100644 --- a/resources/libraries/python/VppConfigGenerator.py +++ b/resources/libraries/python/VppConfigGenerator.py @@ -49,17 +49,17 @@ class VppConfigGenerator: def __init__(self): """Initialize library.""" # VPP Node to apply configuration on - self._node = u"" + self._node = "" # Topology node key - self._node_key = u"" + self._node_key = "" # VPP Configuration self._nodeconfig = dict() # Serialized VPP Configuration - self._vpp_config = u"" + self._vpp_config = "" # VPP Service name - self._vpp_service_name = u"vpp" + self._vpp_service_name = "vpp" # VPP Startup config location - self._vpp_startup_conf = u"/etc/vpp/startup.conf" + self._vpp_startup_conf = "/etc/vpp/startup.conf" def set_node(self, node, node_key=None): """Set DUT node. @@ -70,9 +70,9 @@ class VppConfigGenerator: :type node_key: str :raises RuntimeError: If Node type is not DUT. """ - if node[u"type"] != NodeType.DUT: + if node["type"] != NodeType.DUT: raise RuntimeError( - u"Startup config can only be applied to DUTnode." + "Startup config can only be applied to DUTnode." ) self._node = node self._node_key = node_key @@ -102,8 +102,8 @@ class VppConfigGenerator: if path[0] not in config: config[path[0]] = dict() elif isinstance(config[path[0]], str): - config[path[0]] = dict() if config[path[0]] == u"" \ - else {config[path[0]]: u""} + config[path[0]] = dict() if config[path[0]] == "" \ + else {config[path[0]]: ""} self.add_config_item(config[path[0]], value, path[1:]) def dump_config(self, obj, level=-1): @@ -115,7 +115,7 @@ class VppConfigGenerator: :type level: int :returns: nothing """ - indent = u" " + indent = " " if level >= 0: self._vpp_config += f"{level * indent}{{\n" if isinstance(obj, dict): @@ -137,50 +137,50 @@ class VppConfigGenerator: :param value: Log file. :type value: str """ - path = [u"unix", u"log"] + path = ["unix", "log"] self.add_config_item(self._nodeconfig, value, path) - def add_unix_cli_listen(self, value=u"/run/vpp/cli.sock"): + def add_unix_cli_listen(self, value="/run/vpp/cli.sock"): """Add UNIX cli-listen configuration. :param value: CLI listen address and port or path to CLI socket. :type value: str """ - path = [u"unix", u"cli-listen"] + path = ["unix", "cli-listen"] self.add_config_item(self._nodeconfig, value, path) def add_unix_cli_no_pager(self): """Add UNIX cli-no-pager configuration.""" - path = [u"unix", u"cli-no-pager"] - self.add_config_item(self._nodeconfig, u"", path) + path = ["unix", "cli-no-pager"] + self.add_config_item(self._nodeconfig, "", path) - def add_unix_gid(self, value=u"vpp"): + def add_unix_gid(self, value="vpp"): """Add UNIX gid configuration. :param value: Gid. :type value: str """ - path = [u"unix", u"gid"] + path = ["unix", "gid"] self.add_config_item(self._nodeconfig, value, path) def add_unix_nodaemon(self): """Add UNIX nodaemon configuration.""" - path = [u"unix", u"nodaemon"] - self.add_config_item(self._nodeconfig, u"", path) + path = ["unix", "nodaemon"] + self.add_config_item(self._nodeconfig, "", path) def add_unix_coredump(self): """Add UNIX full-coredump configuration.""" - path = [u"unix", u"full-coredump"] - self.add_config_item(self._nodeconfig, u"", path) + path = ["unix", "full-coredump"] + self.add_config_item(self._nodeconfig, "", path) def add_unix_exec(self, value): """Add UNIX exec configuration.""" - path = [u"unix", u"exec"] + path = ["unix", "exec"] self.add_config_item(self._nodeconfig, value, path) def add_socksvr(self, socket=Constants.SOCKSVR_PATH): """Add socksvr configuration.""" - path = [u"socksvr", u"socket-name"] + path = ["socksvr", "socket-name"] self.add_config_item(self._nodeconfig, socket, path) def add_graph_node_variant(self, variant=Constants.GRAPH_NODE_VARIANT): @@ -189,39 +189,48 @@ class VppConfigGenerator: :param value: Graph node variant default value. :type value: str """ - if variant == u"": + if variant == "": return - variant_list = [u"hsw", u"skx", u"icl"] + variant_list = ["hsw", "skx", "icl"] if variant not in variant_list: raise ValueError("Invalid graph node variant value") - path = [u"node", u"default", u"variant"] + path = ["node", "default", "variant"] self.add_config_item(self._nodeconfig, variant, path) - def add_api_segment_gid(self, value=u"vpp"): - """Add API-SEGMENT gid configuration. + def add_api_segment_gid(self, value="vpp"): + """Add api-segment gid configuration. :param value: Gid. :type value: str """ - path = [u"api-segment", u"gid"] + path = ["api-segment", "gid"] self.add_config_item(self._nodeconfig, value, path) def add_api_segment_global_size(self, value): - """Add API-SEGMENT global-size configuration. + """Add api-segment global-size configuration. :param value: Global size. :type value: str """ - path = [u"api-segment", u"global-size"] + path = ["api-segment", "global-size"] + self.add_config_item(self._nodeconfig, value, path) + + def add_api_segment_prefix(self, value="vpp"): + """Add api-segment prefix configuration. + + :param value: Gid. + :type value: str + """ + path = ["api-segment", "prefix"] self.add_config_item(self._nodeconfig, value, path) def add_api_segment_api_size(self, value): - """Add API-SEGMENT api-size configuration. + """Add api-segment api-size configuration. :param value: API size. :type value: str """ - path = [u"api-segment", u"api-size"] + path = ["api-segment", "api-size"] self.add_config_item(self._nodeconfig, value, path) def add_buffers_per_numa(self, value): @@ -230,7 +239,7 @@ class VppConfigGenerator: :param value: Number of buffers allocated. :type value: int """ - path = [u"buffers", u"buffers-per-numa"] + path = ["buffers", "buffers-per-numa"] self.add_config_item(self._nodeconfig, value, path) def add_buffers_default_data_size(self, value): @@ -239,7 +248,7 @@ class VppConfigGenerator: :param value: Buffers data-size allocated. :type value: int """ - path = [u"buffers", u"default data-size"] + path = ["buffers", "default data-size"] self.add_config_item(self._nodeconfig, value, path) def add_dpdk_dev(self, *devices): @@ -250,8 +259,8 @@ class VppConfigGenerator: """ for device in devices: if pci_dev_check(device): - path = [u"dpdk", f"dev {device}"] - self.add_config_item(self._nodeconfig, u"", path) + path = ["dpdk", f"dev {device}"] + self.add_config_item(self._nodeconfig, "", path) def add_dpdk_cryptodev(self, count, num_rx_queues=1): """Add DPDK Crypto PCI device configuration. @@ -284,8 +293,8 @@ class VppConfigGenerator: for _ in range(count): cryptodev_config = f"vdev cryptodev_{sw_pmd_type}_pmd," \ f"socket_id={str(socket_id)}" - path = [u"dpdk", cryptodev_config] - self.add_config_item(self._nodeconfig, u"", path) + path = ["dpdk", cryptodev_config] + self.add_config_item(self._nodeconfig, "", path) def add_dpdk_dev_default_rxq(self, value): """Add DPDK dev default rxq configuration. @@ -293,7 +302,7 @@ class VppConfigGenerator: :param value: Default number of rxqs. :type value: str """ - path = [u"dpdk", u"dev default", u"num-rx-queues"] + path = ["dpdk", "dev default", "num-rx-queues"] self.add_config_item(self._nodeconfig, value, path) def add_dpdk_dev_default_txq(self, value): @@ -302,7 +311,7 @@ class VppConfigGenerator: :param value: Default number of txqs. :type value: str """ - path = [u"dpdk", u"dev default", u"num-tx-queues"] + path = ["dpdk", "dev default", "num-tx-queues"] self.add_config_item(self._nodeconfig, value, path) def add_dpdk_dev_default_rxd(self, value): @@ -311,7 +320,7 @@ class VppConfigGenerator: :param value: Default number of rxds. :type value: str """ - path = [u"dpdk", u"dev default", u"num-rx-desc"] + path = ["dpdk", "dev default", "num-rx-desc"] self.add_config_item(self._nodeconfig, value, path) def add_dpdk_dev_default_txd(self, value): @@ -320,7 +329,7 @@ class VppConfigGenerator: :param value: Default number of txds. :type value: str """ - path = [u"dpdk", u"dev default", u"num-tx-desc"] + path = ["dpdk", "dev default", "num-tx-desc"] self.add_config_item(self._nodeconfig, value, path) def add_dpdk_log_level(self, value): @@ -329,13 +338,13 @@ class VppConfigGenerator: :param value: Log level. :type value: str """ - path = [u"dpdk", u"log-level"] + path = ["dpdk", "log-level"] self.add_config_item(self._nodeconfig, value, path) def add_dpdk_no_pci(self): """Add DPDK no-pci.""" - path = [u"dpdk", u"no-pci"] - self.add_config_item(self._nodeconfig, u"", path) + path = ["dpdk", "no-pci"] + self.add_config_item(self._nodeconfig, "", path) def add_dpdk_uio_driver(self, value=None): """Add DPDK uio-driver configuration. @@ -347,7 +356,7 @@ class VppConfigGenerator: """ if value is None: value = Topology.get_uio_driver(self._node) - path = [u"dpdk", u"uio-driver"] + path = ["dpdk", "uio-driver"] self.add_config_item(self._nodeconfig, value, path) def add_dpdk_max_simd_bitwidth(self, variant=Constants.GRAPH_NODE_VARIANT): @@ -356,14 +365,14 @@ class VppConfigGenerator: :param value: Graph node variant default value. :type value: str """ - if variant == u"icl": + if variant == "icl": value = 512 - elif variant in [u"skx", u"hsw"]: + elif variant in ["skx", "hsw"]: value = 256 else: return - path = [u"dpdk", u"max-simd-bitwidth"] + path = ["dpdk", "max-simd-bitwidth"] self.add_config_item(self._nodeconfig, value, path) def add_cpu_main_core(self, value): @@ -372,7 +381,7 @@ class VppConfigGenerator: :param value: Main core option. :type value: str """ - path = [u"cpu", u"main-core"] + path = ["cpu", "main-core"] self.add_config_item(self._nodeconfig, value, path) def add_cpu_corelist_workers(self, value): @@ -381,7 +390,7 @@ class VppConfigGenerator: :param value: Corelist-workers option. :type value: str """ - path = [u"cpu", u"corelist-workers"] + path = ["cpu", "corelist-workers"] self.add_config_item(self._nodeconfig, value, path) def add_main_heap_size(self, value): @@ -390,7 +399,7 @@ class VppConfigGenerator: :param value: Amount of heap. :type value: str """ - path = [u"memory", u"main-heap-size"] + path = ["memory", "main-heap-size"] self.add_config_item(self._nodeconfig, value, path) def add_main_heap_page_size(self, value): @@ -399,7 +408,7 @@ class VppConfigGenerator: :param value: Heap page size. :type value: str """ - path = [u"memory", u"main-heap-page-size"] + path = ["memory", "main-heap-page-size"] self.add_config_item(self._nodeconfig, value, path) def add_default_hugepage_size(self, value=Constants.DEFAULT_HUGEPAGE_SIZE): @@ -408,13 +417,13 @@ class VppConfigGenerator: :param value: Hugepage size. :type value: str """ - path = [u"memory", u"default-hugepage-size"] + path = ["memory", "default-hugepage-size"] self.add_config_item(self._nodeconfig, value, path) def add_api_trace(self): """Add API trace configuration.""" - path = [u"api-trace", u"on"] - self.add_config_item(self._nodeconfig, u"", path) + path = ["api-trace", "on"] + self.add_config_item(self._nodeconfig, "", path) def add_ip6_hash_buckets(self, value): """Add IP6 hash buckets configuration. @@ -422,7 +431,7 @@ class VppConfigGenerator: :param value: Number of IP6 hash buckets. :type value: str """ - path = [u"ip6", u"hash-buckets"] + path = ["ip6", "hash-buckets"] self.add_config_item(self._nodeconfig, value, path) def add_ip6_heap_size(self, value): @@ -431,7 +440,7 @@ class VppConfigGenerator: :param value: IP6 Heapsize amount. :type value: str """ - path = [u"ip6", u"heap-size"] + path = ["ip6", "heap-size"] self.add_config_item(self._nodeconfig, value, path) def add_ipsec_spd_flow_cache_ipv4_inbound(self, value): @@ -440,7 +449,7 @@ class VppConfigGenerator: :param value: "on" to enable spd flow cache. :type value: str """ - path = [u"ipsec", u"ipv4-inbound-spd-flow-cache"] + path = ["ipsec", "ipv4-inbound-spd-flow-cache"] self.add_config_item(self._nodeconfig, value, path) def add_ipsec_spd_flow_cache_ipv4_outbound(self, value): @@ -449,7 +458,7 @@ class VppConfigGenerator: :param value: "on" to enable spd flow cache. :type value: str """ - path = [u"ipsec", u"ipv4-outbound-spd-flow-cache"] + path = ["ipsec", "ipv4-outbound-spd-flow-cache"] self.add_config_item(self._nodeconfig, value, path) def add_ipsec_spd_fast_path_ipv4_inbound(self, value): @@ -467,7 +476,7 @@ class VppConfigGenerator: :param value: "on" to enable spd fast path. :type value: str """ - path = [u"ipsec", u"ipv4-outbound-spd-fast-path"] + path = ["ipsec", "ipv4-outbound-spd-fast-path"] self.add_config_item(self._nodeconfig, value, path) def add_ipsec_spd_fast_path_num_buckets(self, value): @@ -476,7 +485,7 @@ class VppConfigGenerator: :param value: Number of buckets. :type value: int """ - path = [u"ipsec", u"spd-fast-path-num-buckets"] + path = ["ipsec", "spd-fast-path-num-buckets"] self.add_config_item(self._nodeconfig, value, path) def add_statseg_size(self, value): @@ -485,7 +494,7 @@ class VppConfigGenerator: :param value: Stats heapsize amount. :type value: str """ - path = [u"statseg", u"size"] + path = ["statseg", "size"] self.add_config_item(self._nodeconfig, value, path) def add_statseg_page_size(self, value): @@ -494,7 +503,7 @@ class VppConfigGenerator: :param value: Stats heapsize amount. :type value: str """ - path = [u"statseg", u"page-size"] + path = ["statseg", "page-size"] self.add_config_item(self._nodeconfig, value, path) def add_statseg_per_node_counters(self, value): @@ -503,7 +512,7 @@ class VppConfigGenerator: :param value: "on" to switch the counters on. :type value: str """ - path = [u"statseg", u"per-node-counters"] + path = ["statseg", "per-node-counters"] self.add_config_item(self._nodeconfig, value, path) def add_plugin(self, state, *plugins): @@ -515,27 +524,27 @@ class VppConfigGenerator: :type plugins: list """ for plugin in plugins: - path = [u"plugins", f"plugin {plugin}", state] - self.add_config_item(self._nodeconfig, u" ", path) + path = ["plugins", f"plugin {plugin}", state] + self.add_config_item(self._nodeconfig, " ", path) def add_dpdk_no_multi_seg(self): """Add DPDK no-multi-seg configuration.""" - path = [u"dpdk", u"no-multi-seg"] - self.add_config_item(self._nodeconfig, u"", path) + path = ["dpdk", "no-multi-seg"] + self.add_config_item(self._nodeconfig, "", path) def add_dpdk_no_tx_checksum_offload(self): """Add DPDK no-tx-checksum-offload configuration.""" - path = [u"dpdk", u"no-tx-checksum-offload"] - self.add_config_item(self._nodeconfig, u"", path) + path = ["dpdk", "no-tx-checksum-offload"] + self.add_config_item(self._nodeconfig, "", path) - def add_nat(self, value=u"deterministic"): + def add_nat(self, value="deterministic"): """Add NAT mode configuration. :param value: NAT mode. :type value: str """ - path = [u"nat", value] - self.add_config_item(self._nodeconfig, u"", path) + path = ["nat", value] + self.add_config_item(self._nodeconfig, "", path) def add_nat_max_translations_per_thread(self, value): """Add NAT max. translations per thread number configuration. @@ -543,21 +552,21 @@ class VppConfigGenerator: :param value: NAT mode. :type value: str """ - path = [u"nat", u"max translations per thread"] + path = ["nat", "max translations per thread"] self.add_config_item(self._nodeconfig, value, path) def add_nsim_poll_main_thread(self): """Add NSIM poll-main-thread configuration.""" - path = [u"nsim", u"poll-main-thread"] - self.add_config_item(self._nodeconfig, u"", path) + path = ["nsim", "poll-main-thread"] + self.add_config_item(self._nodeconfig, "", path) - def add_tcp_congestion_control_algorithm(self, value=u"cubic"): + def add_tcp_congestion_control_algorithm(self, value="cubic"): """Add TCP congestion control algorithm. :param value: The congestion control algorithm to use. Example: cubic :type value: str """ - path = [u"tcp", u"cc-algo"] + path = ["tcp", "cc-algo"] self.add_config_item(self._nodeconfig, value, path) def add_tcp_preallocated_connections(self, value): @@ -566,7 +575,7 @@ class VppConfigGenerator: :param value: The number of pre-allocated connections. :type value: int """ - path = [u"tcp", u"preallocated-connections"] + path = ["tcp", "preallocated-connections"] self.add_config_item(self._nodeconfig, value, path) def add_tcp_preallocated_half_open_connections(self, value): @@ -575,23 +584,23 @@ class VppConfigGenerator: :param value: The number of pre-allocated half open connections. :type value: int """ - path = [u"tcp", u"preallocated-half-open-connections"] + path = ["tcp", "preallocated-half-open-connections"] self.add_config_item(self._nodeconfig, value, path) def add_session_enable(self): """Add session enable.""" - path = [u"session", u"enable"] - self.add_config_item(self._nodeconfig, u"", path) + path = ["session", "enable"] + self.add_config_item(self._nodeconfig, "", path) def add_session_app_socket_api(self): """Use session app socket api.""" - path = [u"session", u"use-app-socket-api"] - self.add_config_item(self._nodeconfig, u"", path) + path = ["session", "use-app-socket-api"] + self.add_config_item(self._nodeconfig, "", path) def add_session_event_queues_memfd_segment(self): """Add session event queue memfd segment.""" - path = [u"session", u"evt_qs_memfd_seg"] - self.add_config_item(self._nodeconfig, u"", path) + path = ["session", "evt_qs_memfd_seg"] + self.add_config_item(self._nodeconfig, "", path) def add_session_event_queue_length(self, value): """Add session event queue length. @@ -599,7 +608,7 @@ class VppConfigGenerator: :param value: Session event queue length. :type value: int """ - path = [u"session", u"event-queue-length"] + path = ["session", "event-queue-length"] self.add_config_item(self._nodeconfig, value, path) def add_session_event_queues_segment_size(self, value): @@ -608,7 +617,7 @@ class VppConfigGenerator: :param value: Session event queue segment size. :type value: str """ - path = [u"session", u"evt_qs_seg_size"] + path = ["session", "evt_qs_seg_size"] self.add_config_item(self._nodeconfig, value, path) def add_session_preallocated_sessions(self, value): @@ -617,7 +626,7 @@ class VppConfigGenerator: :param value: Number of pre-allocated sessions. :type value: int """ - path = [u"session", u"preallocated-sessions"] + path = ["session", "preallocated-sessions"] self.add_config_item(self._nodeconfig, value, path) def add_session_v4_session_table_buckets(self, value): @@ -626,7 +635,7 @@ class VppConfigGenerator: :param value: Number of v4 session table buckets. :type value: int """ - path = [u"session", u"v4-session-table-buckets"] + path = ["session", "v4-session-table-buckets"] self.add_config_item(self._nodeconfig, value, path) def add_session_v4_session_table_memory(self, value): @@ -635,7 +644,7 @@ class VppConfigGenerator: :param value: Size of v4 session table memory. :type value: str """ - path = [u"session", u"v4-session-table-memory"] + path = ["session", "v4-session-table-memory"] self.add_config_item(self._nodeconfig, value, path) def add_session_v4_halfopen_table_buckets(self, value): @@ -644,7 +653,7 @@ class VppConfigGenerator: :param value: Number of v4 halfopen table buckets. :type value: int """ - path = [u"session", u"v4-halfopen-table-buckets"] + path = ["session", "v4-halfopen-table-buckets"] self.add_config_item(self._nodeconfig, value, path) def add_session_v4_halfopen_table_memory(self, value): @@ -653,7 +662,7 @@ class VppConfigGenerator: :param value: Size of v4 halfopen table memory. :type value: str """ - path = [u"session", u"v4-halfopen-table-memory"] + path = ["session", "v4-halfopen-table-memory"] self.add_config_item(self._nodeconfig, value, path) def add_session_local_endpoints_table_buckets(self, value): @@ -662,7 +671,7 @@ class VppConfigGenerator: :param value: Number of local endpoints table buckets. :type value: int """ - path = [u"session", u"local-endpoints-table-buckets"] + path = ["session", "local-endpoints-table-buckets"] self.add_config_item(self._nodeconfig, value, path) def add_session_local_endpoints_table_memory(self, value): @@ -671,7 +680,7 @@ class VppConfigGenerator: :param value: Size of local endpoints table memory. :type value: str """ - path = [u"session", u"local-endpoints-table-memory"] + path = ["session", "local-endpoints-table-memory"] self.add_config_item(self._nodeconfig, value, path) def add_dma_dev(self, devices): @@ -681,8 +690,8 @@ class VppConfigGenerator: :type devices: list """ for device in devices: - path = [u"dsa", f"dev {device}"] - self.add_config_item(self._nodeconfig, u"", path) + path = ["dsa", f"dev {device}"] + self.add_config_item(self._nodeconfig, "", path) def write_config(self, filename=None): """Generate and write VPP startup configuration to file. @@ -700,7 +709,7 @@ class VppConfigGenerator: cmd = f"echo \"{self._vpp_config}\" | sudo tee {filename}" exec_cmd_no_error( - self._node, cmd, message=u"Writing config file failed!" + self._node, cmd, message="Writing config file failed!" ) def apply_config(self, filename=None, verify_vpp=True): @@ -732,7 +741,7 @@ class VppInitConfig: """ huge_size = Constants.DEFAULT_HUGEPAGE_SIZE for node in nodes.values(): - if node[u"type"] == NodeType.DUT: + if node["type"] == NodeType.DUT: vpp_config = VppConfigGenerator() vpp_config.set_node(node) vpp_config.add_unix_log() @@ -751,7 +760,7 @@ class VppInitConfig: vpp_config.add_plugin("enable", "dpdk_plugin.so") vpp_config.add_dpdk_dev( *[node["interfaces"][interface].get("pci_address") \ - for interface in node[u"interfaces"]] + for interface in node["interfaces"]] ) vpp_config.add_ip6_hash_buckets(2000000) vpp_config.add_ip6_heap_size("4G") diff --git a/resources/libraries/robot/shared/default.robot b/resources/libraries/robot/shared/default.robot index c2a013ddf1..2dc78941b3 100644 --- a/resources/libraries/robot/shared/default.robot +++ b/resources/libraries/robot/shared/default.robot @@ -150,6 +150,7 @@ | | | Run Keyword | ${dut}.Add Unix CLI Listen | | | Run Keyword | ${dut}.Add Unix CLI No Pager | | | Run Keyword | ${dut}.Add Unix GID +| | | Run Keyword | ${dut}.Add API Segment Prefix | ${dut} | | | Run Keyword | ${dut}.Add Unix Coredump | | | Run Keyword | ${dut}.Add Socksvr | ${SOCKSVR_PATH} | | | Run Keyword | ${dut}.Add Main Heap Size | ${${heap_size_mult}*${3}}G |