diff options
author | Peter Mikus <pmikus@cisco.com> | 2019-01-04 07:38:16 +0000 |
---|---|---|
committer | Peter Mikus <pmikus@cisco.com> | 2019-01-08 12:11:59 +0000 |
commit | d57482c4cd391c5c0b6a440629f81df09169bc82 (patch) | |
tree | 8b9d74527ce5667755614bab9cccf12300623576 | |
parent | d1c1b27d17033799cd586a079820d708cc8a541c (diff) |
CSIT-1387 Implement mapping function
Implement universal mapping function for placing the NF
threads to cpu cores/threads.
- Must be SMT aware.
- Must work with N-core NF.
- Must be DUT (vswtich) aware.
- Must be deterministic.
- Must use DTCR and MTCR parameters.
Change-Id: I98b61861f85c5af7ecd856719b4adf2ba95c262f
Signed-off-by: Peter Mikus <pmikus@cisco.com>
3 files changed, 134 insertions, 114 deletions
diff --git a/resources/libraries/python/CpuUtils.py b/resources/libraries/python/CpuUtils.py index f4d52f8662..aa1bd79419 100644 --- a/resources/libraries/python/CpuUtils.py +++ b/resources/libraries/python/CpuUtils.py @@ -49,7 +49,6 @@ class CpuUtils(object): :returns: True if SMT is enabled, False if SMT is disabled. :rtype: bool """ - cpu_mems = [item[-4:] for item in cpu_info] cpu_mems_len = len(cpu_mems) / CpuUtils.NR_OF_THREADS count = 0 @@ -117,7 +116,6 @@ class CpuUtils(object): :raises RuntimeError: If node cpuinfo is not available or if SMT is not enabled. """ - cpu_node = int(cpu_node) cpu_info = node.get("cpuinfo") if cpu_info is None: @@ -160,7 +158,6 @@ class CpuUtils(object): :rtype: list :raises RuntimeError: If we require more cpus than available. """ - cpu_list = CpuUtils.cpu_list_per_node(node, cpu_node, smt_used) cpu_list_len = len(cpu_list) @@ -202,7 +199,6 @@ class CpuUtils(object): :returns: Cpu numbers related to numa from argument. :rtype: str """ - cpu_list = CpuUtils.cpu_slice_of_list_per_node(node, cpu_node, skip_cnt=skip_cnt, cpu_cnt=cpu_cnt, @@ -229,7 +225,6 @@ class CpuUtils(object): :returns: String of node related range of CPU numbers. :rtype: str """ - cpu_list = CpuUtils.cpu_slice_of_list_per_node(node, cpu_node, skip_cnt=skip_cnt, cpu_cnt=cpu_cnt, @@ -246,3 +241,57 @@ class CpuUtils(object): cpu_range = "{}{}{}".format(cpu_list[0], sep, cpu_list[-1]) return cpu_range + + @staticmethod + def cpu_slice_of_list_for_nf(**kwargs): + """Return list of node related list of CPU numbers. + + :param kwargs: Key-value pairs used to compute placement. + :type kwargs: dict + :returns: Cpu numbers related to numa from argument. + :rtype: list + :raises RuntimeError: If we require more cpus than available or if + placement is not possible due to wrong parameters. + """ + if kwargs['chain_id'] - 1 >= kwargs['chains']: + raise RuntimeError("ChainID is higher than total number of chains!") + if kwargs['node_id'] - 1 >= kwargs['nodeness']: + raise RuntimeError("NodeID is higher than chain nodeness!") + + smt_used = CpuUtils.is_smt_enabled(kwargs['node']['cpuinfo']) + cpu_list = CpuUtils.cpu_list_per_node(kwargs['node'], + kwargs['cpu_node'], smt_used) + cpu_list_len = len(cpu_list) + + mt_req = ((kwargs['chains'] * kwargs['nodeness']) + kwargs['mtcr'] - 1)\ + / kwargs['mtcr'] + dt_req = ((kwargs['chains'] * kwargs['nodeness']) + kwargs['dtcr'] - 1)\ + / kwargs['dtcr'] + + if kwargs['skip_cnt'] + mt_req + dt_req > cpu_list_len: + raise RuntimeError("Not enough CPU cores available for placement!") + + offset = (kwargs['node_id'] - 1) + (kwargs['chain_id'] - 1)\ + * kwargs['nodeness'] + dtc = kwargs['dtc'] + try: + mt_odd = (offset / mt_req) & 1 + mt_skip = kwargs['skip_cnt'] + (offset % mt_req) + dt_skip = kwargs['skip_cnt'] + mt_req + (offset % dt_req) * dtc + except ZeroDivisionError: + raise RuntimeError("Invalid placement combination!") + + if smt_used: + cpu_list_0 = cpu_list[:cpu_list_len / CpuUtils.NR_OF_THREADS] + cpu_list_1 = cpu_list[cpu_list_len / CpuUtils.NR_OF_THREADS:] + + mt_cpu_list = [cpu for cpu in cpu_list_1[mt_skip:mt_skip + 1]] \ + if mt_odd else [cpu for cpu in cpu_list_0[mt_skip:mt_skip + 1]] + + dt_cpu_list = [cpu for cpu in cpu_list_0[dt_skip:dt_skip + dtc]] + dt_cpu_list += [cpu for cpu in cpu_list_1[dt_skip:dt_skip + dtc]] + else: + mt_cpu_list = [cpu for cpu in cpu_list[mt_skip:mt_skip + 1]] + dt_cpu_list = [cpu for cpu in cpu_list[dt_skip:dt_skip + dtc]] + + return mt_cpu_list + dt_cpu_list diff --git a/resources/libraries/robot/performance/performance_configuration.robot b/resources/libraries/robot/performance/performance_configuration.robot index 419231ee55..17f2d5f13f 100644 --- a/resources/libraries/robot/performance/performance_configuration.robot +++ b/resources/libraries/robot/performance/performance_configuration.robot @@ -2263,6 +2263,8 @@ | | ... | - sock2 - Socket path for second Vhost-User interface. | | ... | Type: string | | ... | - vm_name - QemuUtil instance name. Type: string +| | ... | - chains: Total number of chains. Type: integer +| | ... | - nodeness: Total number of nodes per chain. Type: integer | | ... | - qemu_id - Qemu Id when starting more then one guest VM on DUT | | ... | node. Type: integer | | ... | - jumbo - Set True if jumbo frames are used in the test. @@ -2281,10 +2283,14 @@ | | ... | \| ${nodes['DUT1']} \| /tmp/sock-2-1 \| /tmp/sock-2-2 \| DUT1_VM2 \ | | ... | \| qemu_id=${2} \| | | ... -| | [Arguments] | ${dut} | ${sock1} | ${sock2} | ${vm_name} | ${qemu_id}=${1} -| | ... | ${jumbo}=${False} | ${perf_qemu_qsz}=${256} -| | ... | ${use_tuned_cfs}=${False} +| | [Arguments] | ${dut} | ${sock1} | ${sock2} | ${vm_name} | ${chains}=${1} +| | ... | ${nodeness}=${1} | ${qemu_id}=${1} | ${jumbo}=${False} +| | ... | ${perf_qemu_qsz}=${256} | ${use_tuned_cfs}=${False} | | ... +| | ${nf_cpus}= | Create network function CPU list | ${dut} +| | ... | chains=${chains} | nodeness=${nodeness} | chain_id=${1} +| | ... | node_id=${qemu_id} | auto_scale=${True} +| | ${nf_cpus_count}= | Get Length | ${nf_cpus} | | Import Library | resources.libraries.python.QemuUtils | qemu_id=${qemu_id} | | ... | WITH NAME | ${vm_name} | | Run keyword | ${vm_name}.Qemu Set Node | ${nodes['${dut}']} @@ -2292,28 +2298,6 @@ | | Run keyword | ${vm_name}.Qemu Set Serial Port | ${serial_port} | | ${ssh_fwd_port}= | Evaluate | ${qemu_id} + ${10021} | | Run keyword | ${vm_name}.Qemu Set Ssh Fwd Port | ${ssh_fwd_port} -| | ${if1_status} | ${value}= | Run Keyword And Ignore Error -| | ... | Variable Should Exist | ${${dut}_if1} -| | @{if_list}= | Run Keyword If | '${if1_status}' == 'PASS' -| | ... | Create List | ${${dut}_if1} -| | ... | ELSE | Create List | ${${dut}_if1_1} | ${${dut}_if1_2} -| | ${if2_status} | ${value}= | Run Keyword And Ignore Error -| | ... | Variable Should Exist | ${${dut}_if2} -| | Run Keyword If | '${if2_status}' == 'PASS' -| | ... | Append To List | ${if_list} | ${${dut}_if2} -| | ... | ELSE | Append To List | ${if_list} | ${${dut}_if2_1} | ${${dut}_if2_2} -| | ${dut_numa}= | Get interfaces numa node | ${nodes['${dut}']} | @{if_list} -# Compute CPU placement for VM based on expected DUT placement. -| | ${os_cpus}= | Set Variable | ${1} -| | ${dut_main_cpus}= | Set Variable | ${1} -| | ${dut_wk_cpus}= | Set Variable | ${cpu_count_int} -| | ${vm_cpus}= | Evaluate | ${dut_wk_cpus} + ${dut_main_cpus} -| | ${skip_dut}= | Evaluate | ${dut_wk_cpus} + ${dut_main_cpus} + ${os_cpus} -| | ${skip_cpu}= | Evaluate | ${skip_dut} + (${qemu_id} - ${1}) * ${vm_cpus} -| | ${qemu_cpus}= | Cpu slice of list per node | ${nodes['${dut}']} -| | ... | ${dut_numa} | skip_cnt=${skip_cpu} | cpu_cnt=${vm_cpus} -| | ... | smt_used=${smt_used} -| | ${vm_thrs}= | Get Length | ${qemu_cpus} | | Run keyword | ${vm_name}.Qemu Set Queue Count | ${rxq_count_int} | | Run keyword | ${vm_name}.Qemu Set Queue Size | ${perf_qemu_qsz} | | Run keyword | ${vm_name}.Qemu Add Vhost User If | ${sock1} @@ -2325,13 +2309,14 @@ | | ... | ${perf_qemu_path}-patch/bin/ | | ... | ${perf_qemu_path}-base/bin/ | | Run Keyword If | ${qemu_build} | ${vm_name}.Build QEMU | ${nodes['${dut}']} -| | ... | apply_patch=${False} +| | ... | apply_patch=${apply_patch} | | Run keyword | ${vm_name}.Qemu Set Path | ${perf_qemu_path} -| | Run keyword | ${vm_name}.Qemu Set Smp | ${vm_thrs} | ${vm_thrs} | 1 | 1 +| | Run keyword | ${vm_name}.Qemu Set Smp | ${nf_cpus_count} | ${nf_cpus_count} +| | ... | 1 | 1 | | Run keyword | ${vm_name}.Qemu Set Mem Size | 2048 | | Run keyword | ${vm_name}.Qemu Set Disk Image | ${perf_vm_image} | | ${vm}= | Run keyword | ${vm_name}.Qemu Start -| | Run keyword | ${vm_name}.Qemu Set Affinity | @{qemu_cpus} +| | Run keyword | ${vm_name}.Qemu Set Affinity | @{nf_cpus} | | Run keyword If | ${use_tuned_cfs} | ${vm_name}.Qemu Set Scheduler Policy | | ${max_pkt_len}= | Set Variable If | ${jumbo} | 9200 | ${EMPTY} | | ${testpmd_cpus}= | Evaluate | ${thr_count_int} + ${1} @@ -2373,8 +2358,8 @@ | | | ${vm}= | | | ... | Configure guest VM with dpdk-testpmd connected via vhost-user | | | ... | ${dut} | ${sock1} | ${sock2} | ${dut}_VM${number} -| | | ... | qemu_id=${number} | jumbo=${jumbo} | perf_qemu_qsz=${perf_qemu_qsz} -| | | ... | use_tuned_cfs=${use_tuned_cfs} +| | | ... | nodeness=${vm_count} | qemu_id=${number} | jumbo=${jumbo} +| | | ... | perf_qemu_qsz=${perf_qemu_qsz} | use_tuned_cfs=${use_tuned_cfs} | | | Set To Dictionary | ${${dut}_vm_refs} | ${dut}_VM${number} | ${vm} | Configure guest VMs with dpdk-testpmd connected via vhost-user @@ -2420,6 +2405,8 @@ | | ... | - vm_name - QemuUtil instance name. Type: string | | ... | - eth0_mac - MAC address of first Vhost interface. Type: string | | ... | - eth1_mac - MAC address of second Vhost interface. Type: string +| | ... | - chains: Total number of chains. Type: integer +| | ... | - nodeness: Total number of nodes per chain. Type: integer | | ... | - qemu_id - Qemu Id when starting more then one guest VM on DUT | | ... | node. Type: integer | | ... | - jumbo - Set True if jumbo frames are used in the test. @@ -2439,9 +2426,14 @@ | | ... | \| 00:00:00:00:00:01 \| 00:00:00:00:00:02 \| | | ... | | [Arguments] | ${dut} | ${sock1} | ${sock2} | ${vm_name} -| | ... | ${eth0_mac} | ${eth1_mac} | ${qemu_id}=${1} | ${jumbo}=${False} -| | ... | ${perf_qemu_qsz}=${256} | ${use_tuned_cfs}=${False} +| | ... | ${eth0_mac} | ${eth1_mac} | ${chains}=${1} | ${nodeness}=${1} +| | ... | ${qemu_id}=${1} | ${jumbo}=${False} | ${perf_qemu_qsz}=${256} +| | ... | ${use_tuned_cfs}=${False} | | ... +| | ${nf_cpus}= | Create network function CPU list | ${dut} +| | ... | chains=${chains} | nodeness=${nodeness} | chain_id=${1} +| | ... | node_id=${qemu_id} | auto_scale=${True} +| | ${nf_cpus_count}= | Get Length | ${nf_cpus} | | Import Library | resources.libraries.python.QemuUtils | qemu_id=${qemu_id} | | ... | WITH NAME | ${vm_name} | | Run keyword | ${vm_name}.Qemu Set Node | ${nodes['${dut}']} @@ -2449,19 +2441,6 @@ | | Run keyword | ${vm_name}.Qemu Set Serial Port | ${serial_port} | | ${ssh_fwd_port}= | Evaluate | ${qemu_id} + ${10021} | | Run keyword | ${vm_name}.Qemu Set Ssh Fwd Port | ${ssh_fwd_port} -| | ${dut_numa}= | Get interfaces numa node | ${nodes['${dut}']} -| | ... | ${${dut}_if1} | ${${dut}_if2} -# Compute CPU placement for VM based on expected DUT placement. -| | ${os_cpus}= | Set Variable | ${1} -| | ${dut_main_cpus}= | Set Variable | ${1} -| | ${dut_wk_cpus}= | Set Variable | ${cpu_count_int} -| | ${vm_cpus}= | Evaluate | ${dut_wk_cpus} + ${dut_main_cpus} -| | ${skip_dut}= | Evaluate | ${dut_wk_cpus} + ${dut_main_cpus} + ${os_cpus} -| | ${skip_cpu}= | Evaluate | ${skip_dut} + (${qemu_id} - ${1}) * ${vm_cpus} -| | ${qemu_cpus}= | Cpu slice of list per node | ${nodes['${dut}']} -| | ... | ${dut_numa} | skip_cnt=${skip_cpu} | cpu_cnt=${vm_cpus} -| | ... | smt_used=${smt_used} -| | ${vm_thrs}= | Get Length | ${qemu_cpus} | | Run keyword | ${vm_name}.Qemu Set Queue Count | ${rxq_count_int} | | Run keyword | ${vm_name}.Qemu Set Queue Size | ${perf_qemu_qsz} | | Run keyword | ${vm_name}.Qemu Add Vhost User If | ${sock1} @@ -2475,11 +2454,12 @@ | | Run Keyword If | ${qemu_build} | ${vm_name}.Build QEMU | ${nodes['${dut}']} | | ... | apply_patch=${False} | | Run keyword | ${vm_name}.Qemu Set Path | ${perf_qemu_path} -| | Run keyword | ${vm_name}.Qemu Set Smp | ${vm_thrs} | ${vm_thrs} | 1 | 1 +| | Run keyword | ${vm_name}.Qemu Set Smp | ${nf_cpus_count} | ${nf_cpus_count} +| | ... | 1 | 1 | | Run keyword | ${vm_name}.Qemu Set Mem Size | 2048 | | Run keyword | ${vm_name}.Qemu Set Disk Image | ${perf_vm_image} | | ${vm}= | Run keyword | ${vm_name}.Qemu Start -| | Run keyword | ${vm_name}.Qemu Set Affinity | @{qemu_cpus} +| | Run keyword | ${vm_name}.Qemu Set Affinity | @{nf_cpus} | | Run keyword If | ${use_tuned_cfs} | ${vm_name}.Qemu Set Scheduler Policy | | ${max_pkt_len}= | Set Variable If | ${jumbo} | 9200 | ${EMPTY} | | ${testpmd_cpus}= | Evaluate | ${thr_count_int} + ${1} @@ -2526,8 +2506,8 @@ | | | ... | Configure guest VM with dpdk-testpmd-mac connected via vhost-user | | | ... | ${dut} | ${sock1} | ${sock2} | ${dut}_VM${number} | | | ... | ${${dut}-vhost-${number}-if1_mac} -| | | ... | ${${dut}-vhost-${number}-if2_mac} | qemu_id=${number} -| | | ... | jumbo=${jumbo} | perf_qemu_qsz=${perf_qemu_qsz} +| | | ... | ${${dut}-vhost-${number}-if2_mac} | nodeness=${vm_count} +| | | ... | qemu_id=${number} | jumbo=${jumbo} | perf_qemu_qsz=${perf_qemu_qsz} | | | ... | use_tuned_cfs=${use_tuned_cfs} | | | Set To Dictionary | ${${dut}_vm_refs} | ${dut}_VM${number} | ${vm} @@ -2560,66 +2540,6 @@ | | | ... | perf_qemu_qsz=${perf_qemu_qsz} | use_tuned_cfs=${False} | | All VPP Interfaces Ready Wait | ${nodes} -| Configure guest VM with linux bridge connected via vhost-user -| | [Documentation] -| | ... | Start QEMU guest with two vhost-user interfaces and interconnecting\ -| | ... | linux bridge. -| | ... -| | ... | *Arguments:* -| | ... | - dut - DUT node to start guest VM on. Type: dictionary -| | ... | - sock1 - Socket path for first Vhost-User interface. -| | ... | Type: string -| | ... | - sock2 - Socket path for second Vhost-User interface. -| | ... | Type: string -| | ... | - vm_name - QemuUtil instance name. Type: string -| | ... | - skip_cnt - number of cpus which will be skipped. Type: int -| | ... -| | ... | _NOTE:_ This KW expects following test case variables to be set: -| | ... | - cpu_count_int - Number of Physical CPUs allocated for DUT. -| | ... -| | ... | *Example:* -| | ... -| | ... | \| Configure guest VM with linux bridge connected via vhost-user \ -| | ... | \| ${nodes['DUT1']} \| /tmp/sock1 \| /tmp/sock2 \| DUT1_VM \| ${6} \| -| | ... -| | [Arguments] | ${dut} | ${sock1} | ${sock2} | ${vm_name} | ${skip_cnt}=${6} -| | ... | ${count}=${5} -| | ... -| | Import Library | resources.libraries.python.QemuUtils -| | ... | WITH NAME | ${vm_name} -| | Run keyword | ${vm_name}.Qemu Set Node | ${dut} -| | ${dut_numa}= | Get interfaces numa node | ${dut} -| | ... | ${dut1_if1} | ${dut1_if2} -| | ${vm_phy_cpus}= | Evaluate | ${cpu_count_int} + ${1} -| | ${skip_cnt}= | Evaluate | ${skip} + (${qemu_id} - ${1}) * ${vm_phy_cpus} -| | ${qemu_cpus}= | Cpu slice of list per node | ${dut} | ${dut_numa} -| | ... | skip_cnt=${skip_cnt} | cpu_cnt=${vm_phy_cpus} | smt_used=${smt_used} -| | ${vm_thr_cpus}= | Get Length | ${qemu_cpus} -| | Run keyword | ${vm_name}.Qemu Set Queue Size | ${perf_qemu_qsz} -| | Run keyword | ${vm_name}.Qemu Add Vhost User If | ${sock1} -| | Run keyword | ${vm_name}.Qemu Add Vhost User If | ${sock2} -| | ${apply_patch}= | Set Variable | ${False} -| | ${perf_qemu_path}= | Set Variable If | ${apply_patch} -| | ... | ${perf_qemu_path}-patch/bin/ -| | ... | ${perf_qemu_path}-base/bin/ -| | Run Keyword If | ${qemu_build} | ${vm_name}.Build QEMU | ${dut} -| | ... | apply_patch=${apply_patch} -| | Run keyword | ${vm_name}.Qemu Set Path | ${perf_qemu_path} -| | Run keyword | ${vm_name}.Qemu Set Smp | ${vm_thr_cpus} | ${vm_thr_cpus} -| | ... | 1 | 1 -| | Run keyword | ${vm_name}.Qemu Set Mem Size | 2048 -| | Run keyword | ${vm_name}.Qemu Set Disk Image | ${perf_vm_image} -| | ${vm}= | Run keyword | ${vm_name}.Qemu Start -| | Run keyword | ${vm_name}.Qemu Set Affinity | @{qemu_cpus} -| | ${br}= | Set Variable | br0 -| | ${vhost1}= | Get Vhost User If Name By Sock | ${vm} | ${sock1} -| | ${vhost2}= | Get Vhost User If Name By Sock | ${vm} | ${sock2} -| | Linux Add Bridge | ${vm} | ${br} | ${vhost1} | ${vhost2} -| | Set Interface State | ${vm} | ${vhost1} | up | if_type=name -| | Set Interface State | ${vm} | ${vhost2} | up | if_type=name -| | Set Interface State | ${vm} | ${br} | up | if_type=name -| | Return From Keyword | ${vm} - | Initialize LISP IPv4 forwarding in 3-node circular topology | | [Documentation] | Custom setup of IPv4 addresses on all DUT nodes and TG \ | | ... | Don`t set route. diff --git a/resources/libraries/robot/performance/performance_utils.robot b/resources/libraries/robot/performance/performance_utils.robot index 1be3119713..4033442833 100644 --- a/resources/libraries/robot/performance/performance_utils.robot +++ b/resources/libraries/robot/performance/performance_utils.robot @@ -482,3 +482,54 @@ | | Run Keyword If | ${dut_stats}==${True} | | ... | Show runtime counters on all DUTs | ${nodes} | | Stop traffic on tg + +| Create network function CPU list +| | [Documentation] +| | ... | Create list of CPUs allocated for network function base on SUT/DUT +| | ... | placement and other network functions placement. +| | ... +| | ... | *Arguments:* +| | ... | - dut - DUT node. Type: dictionary +| | ... | - chains: Total number of chains. Type: integer +| | ... | - nodeness: Total number of nodes per chain. Type: integer +| | ... | - chain_id - Network function chain ID. Type: integer +| | ... | - node_id - Network function node ID within chain. Type: integer +| | ... | - mtcr - Main thread to core ratio. Type: integer +| | ... | - dtcr - Dataplane thread to core ratio. Type: integer +| | ... | - auto_scale - If True, use same amount of Dataplane threads for +| | ... | network function as DUT, otherwise use single physical core for +| | ... | every network function. Type: boolean +| | ... +| | ... | *Note:* +| | ... | KW uses test variables \${cpu_count_int} set by +| | ... | "Add worker threads and rxqueues to all DUTs" +| | ... +| | ... | *Example:* +| | ... +| | ... | \| Create network function CPU list \| ${nodes['DUT1']} \ +| | ... | \| 1 \| 1 \| 1 \| 1 \| +| | ... +| | [Arguments] | ${dut} | ${chains}=${1} | ${nodeness}=${1} | ${chain_id}=${1} +| | ... | ${node_id}=${1} | ${mtcr}=${2} | ${dtcr}=${1} | ${auto_scale}=${False} +| | ... +| | ${sut_sc}= | Set Variable | ${1} +| | ${dut_mc}= | Set Variable | ${1} +| | ${dut_dc}= | Set Variable | ${cpu_count_int} +| | ${skip}= | Evaluate | ${sut_sc} + ${dut_mc} + ${dut_dc} +| | ${dtc}= | Set Variable If | ${auto_scale} | ${cpu_count_int} | ${1} +| | ${if1_status} | ${value}= | Run Keyword And Ignore Error +| | ... | Variable Should Exist | ${${dut}_if1} +| | @{if_list}= | Run Keyword If | '${if1_status}' == 'PASS' +| | ... | Create List | ${${dut}_if1} +| | ... | ELSE | Create List | ${${dut}_if1_1} | ${${dut}_if1_2} +| | ${if2_status} | ${value}= | Run Keyword And Ignore Error +| | ... | Variable Should Exist | ${${dut}_if2} +| | Run Keyword If | '${if2_status}' == 'PASS' +| | ... | Append To List | ${if_list} | ${${dut}_if2} +| | ... | ELSE | Append To List | ${if_list} | ${${dut}_if2_1} | ${${dut}_if2_2} +| | ${dut_numa}= | Get interfaces numa node | ${nodes['${dut}']} | @{if_list} +| | ${nf_cpus}= | Cpu slice of list for NF | node=${nodes['${dut}']} +| | ... | cpu_node=${dut_numa} | chains=${chains} | nodeness=${nodeness} +| | ... | chain_id=${chain_id} | node_id=${node_id} | mtcr=${mtcr} +| | ... | dtcr=${dtcr} | dtc=${dtc} | skip_cnt=${skip} +| | Return From Keyword | ${nf_cpus} |