aboutsummaryrefslogtreecommitdiffstats
path: root/main.py
blob: d575567854c5ba854d6008c56d7317772b436a31 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
#!/usr/bin/env python

# Copyright (c) 2016 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""This is a helper script to make test execution easy."""

from __future__ import print_function
import sys
import os
import time
from string import ascii_lowercase
from random import sample
import argparse
from pykwalify.core import Core
from pykwalify.errors import PyKwalifyException
from yaml import load
import robot
from robot.errors import DATA_ERROR, DataError, FRAMEWORK_ERROR, FrameworkError
from robot.run import RobotFramework
from robot.conf.settings import RobotSettings
from robot.running.builder import TestSuiteBuilder
from robot.running.model import TestSuite

TOPOLOGIES_DIR = './topologies/enabled/'
TESTS_DIR = './tests'
OUTPUTS_DIR = './outputs'


def get_suite_list(*datasources, **options):
    """Returns filtered test suites based on include exclude tags

    :param datasources: paths to tests
    :param options: Robot Framework options (robot.conf.settings.py)
    :return: list of Robot Framework TestSuites which contain tests
    """
    class _MyRobotFramework(RobotFramework):
        """Custom implementation of RobotFramework main()."""
        def main(self, datasources, **options):
            # copied from robot.run.RobotFramework.main
            settings = RobotSettings(options)
            test_suite = TestSuiteBuilder(settings['SuiteNames'],
                                          settings['WarnOnSkipped'])
            # pylint: disable=star-args
            suite = test_suite.build(*datasources)
            suite.configure(**settings.suite_config)

            return suite

    # get all test cases list without run tests, execute runs overloaded main
    # function
    suite = _MyRobotFramework().execute(*datasources, output=None, dryrun=True,
                                        **options)
    if isinstance(suite, TestSuite):
        suites = []
        suites.append(suite)
        append_new = True
        while append_new:
            append_new = False
            tmp = []
            for suite in suites:
                # pylint: disable=protected-access
                if len(suite.suites._items) > 0:
                    for i in suite.suites._items:
                        tmp.append(i)
                    append_new = True
                else:
                    tmp.append(suite)
            suites = tmp
        return suites
        # TODO: check testcases Tags ? all tests should have same set of tags
    else:
        if suite == DATA_ERROR:
            raise DataError
        if suite == FRAMEWORK_ERROR:
            raise FrameworkError
        return []


def run_suites(tests_dir, suites, output_dir, output_prefix='suite',
               **options):
    """Execute RF's run with parameters."""

    with open('{}/{}.out'.format(output_dir, output_prefix), 'w') as out:
        robot.run(tests_dir,
                  suite=[s.longname for s in suites],
                  output='{}/{}.xml'.format(output_dir, output_prefix),
                  debugfile='{}/{}.log'.format(output_dir, output_prefix),
                  log=None,
                  report=None,
                  stdout=out,
                  **options)


def parse_outputs(output_dir):
    """Parse output xmls from all executed tests."""

    outs = [os.path.join(output_dir, file_name)
            for file_name in os.listdir(output_dir)
            if file_name.endswith('.xml')]
    # pylint: disable=star-args
    robot.rebot(*outs, merge=True)


def topology_lookup(topology_paths, topo_dir, validate):
    """Make topology list and validate topologies against schema

    :param parsed_args: topology list, is empty then scans topologies in
                        topo_dir
    :param topo_dir: scan directory for topologies
    :param validate: if True then validate topology
    :return: list of topologies
    """

    ret_topologies = []
    if topology_paths:
        for topo in topology_paths:
            if os.path.exists(topo):
                ret_topologies.append(topo)
            else:
                print("Topology file {} doesn't exist".format(topo),
                      file=sys.stderr)
    else:
        ret_topologies = [os.path.join(topo_dir, file_name)
                          for file_name in os.listdir(topo_dir)
                          if file_name.lower().endswith('.yaml')]

    if len(ret_topologies) == 0:
        print('No valid topology found', file=sys.stderr)
        exit(1)

    # validate topologies against schema
    exit_on_error = False
    for topology_name in ret_topologies:
        try:
            with open(topology_name) as file_name:
                yaml_obj = load(file_name)
            core = Core(source_file=topology_name,
                        schema_files=yaml_obj["metadata"]["schema"])
            core.validate()
        except PyKwalifyException as ex:
            print('Unable to verify topology {}, schema error: {}'.\
                  format(topology_name, ex),
                  file=sys.stderr)
            exit_on_error = True
        except KeyError as ex:
            print('Unable to verify topology {}, key error: {}'.\
                  format(topology_name, ex),
                  file=sys.stderr)
            exit_on_error = True
        except Exception as ex:
            print('Unable to verify topology {}, {}'.format(topology_name, ex),
                  file=sys.stderr)
            exit_on_error = True

    if exit_on_error and validate:
        exit(1)

    return ret_topologies


def main():
    """Main function."""
    parser = argparse.ArgumentParser(description='A test runner')
    parser.add_argument('-i', '--include', action='append',
                        help='include tests with tag')
    parser.add_argument('-e', '--exclude', action='append',
                        help='exclude tests with tag')
    parser.add_argument('-s', '--suite', action='append',
                        help='full name of suite to run')
    parser.add_argument('-t', '--topology', action='append',
                        help='topology where tests should be run')
    parser.add_argument('-d', '--test_dir', nargs='?', default=TESTS_DIR,
                        help='where tests are stored')
    parser.add_argument('-o', '--output_dir', nargs='?', default=OUTPUTS_DIR,
                        help='where results are stored')
    parser.add_argument('-L', '--loglevel', nargs='?', default='INFO', type=str,
                        choices=['TRACE', 'DEBUG', 'INFO', 'WARN', 'NONE'],
                        help='robot frameworks level for logging')
    parser.add_argument('-n', '--no_validate', action="store_false",
                        help='Do not exit if topology validation failed')

    args = parser.parse_args()

    i = args.include or []
    excl = args.exclude or []
    suite_filter = args.suite or []
    test_dir = args.test_dir

    # prepare output subdir
    suite_output_dir = os.path.join(args.output_dir,
                                    time.strftime('%y%m%d%H%M%S'))
    os.makedirs(suite_output_dir)

    topologies = topology_lookup(args.topology, TOPOLOGIES_DIR,
                                 args.no_validate)
    suite_list = get_suite_list(test_dir, include=i, exclude=excl,
                                suite=suite_filter)

    # TODO: do the topology suite mapping magic
    #       for now all tests on single topology
    if len(topologies) > 1:
        print('Multiple topologies unsupported yet', file=sys.stderr)
        exit(1)
    topology_suite_mapping = {topologies[0]: suite_list}

    # on all topologies, run test
    # TODO: run parallel
    for topology_path, topology_suite_list in topology_suite_mapping.items():
        topology_path_variable = 'TOPOLOGY_PATH:{}'.format(topology_path)
        variables = [topology_path_variable]
        print('Runing tests on topology {}'.format(topology_path))
        run_suites(test_dir, topology_suite_list, variable=variables,
                   output_dir=suite_output_dir,
                   output_prefix=''.join(sample(ascii_lowercase, 5)),
                   include=i, exclude=excl, loglevel=args.loglevel)

    print('Parsing test results')
    parse_outputs(suite_output_dir)


if __name__ == "__main__":
    main()
d *port) { if (port == NULL) { RTE_LOG(ERR, PORT, "%s: Port is NULL\n", __func__); return -EINVAL; } rte_port_ethdev_writer_flush(port); rte_free(port); return 0; } static int rte_port_ethdev_writer_stats_read(void *port, struct rte_port_out_stats *stats, int clear) { struct rte_port_ethdev_writer *p = (struct rte_port_ethdev_writer *) port; if (stats != NULL) memcpy(stats, &p->stats, sizeof(p->stats)); if (clear) memset(&p->stats, 0, sizeof(p->stats)); return 0; } /* * Port ETHDEV Writer Nodrop */ #ifdef RTE_PORT_STATS_COLLECT #define RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_IN_ADD(port, val) \ port->stats.n_pkts_in += val #define RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_DROP_ADD(port, val) \ port->stats.n_pkts_drop += val #else #define RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_IN_ADD(port, val) #define RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_DROP_ADD(port, val) #endif struct rte_port_ethdev_writer_nodrop { struct rte_port_out_stats stats; struct rte_mbuf *tx_buf[2 * RTE_PORT_IN_BURST_SIZE_MAX]; uint32_t tx_burst_sz; uint16_t tx_buf_count; uint64_t bsz_mask; uint64_t n_retries; uint16_t queue_id; uint8_t port_id; }; static void * rte_port_ethdev_writer_nodrop_create(void *params, int socket_id) { struct rte_port_ethdev_writer_nodrop_params *conf = (struct rte_port_ethdev_writer_nodrop_params *) params; struct rte_port_ethdev_writer_nodrop *port; /* Check input parameters */ if ((conf == NULL) || (conf->tx_burst_sz == 0) || (conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX) || (!rte_is_power_of_2(conf->tx_burst_sz))) { RTE_LOG(ERR, PORT, "%s: Invalid input parameters\n", __func__); return NULL; } /* Memory allocation */ port = rte_zmalloc_socket("PORT", sizeof(*port), RTE_CACHE_LINE_SIZE, socket_id); if (port == NULL) { RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__); return NULL; } /* Initialization */ port->port_id = conf->port_id; port->queue_id = conf->queue_id; port->tx_burst_sz = conf->tx_burst_sz; port->tx_buf_count = 0; port->bsz_mask = 1LLU << (conf->tx_burst_sz - 1); /* * When n_retries is 0 it means that we should wait for every packet to * send no matter how many retries should it take. To limit number of * branches in fast path, we use UINT64_MAX instead of branching. */ port->n_retries = (conf->n_retries == 0) ? UINT64_MAX : conf->n_retries; return port; } static inline void send_burst_nodrop(struct rte_port_ethdev_writer_nodrop *p) { uint32_t nb_tx = 0, i; nb_tx = rte_eth_tx_burst(p->port_id, p->queue_id, p->tx_buf, p->tx_buf_count); /* We sent all the packets in a first try */ if (nb_tx >= p->tx_buf_count) { p->tx_buf_count = 0; return; } for (i = 0; i < p->n_retries; i++) { nb_tx += rte_eth_tx_burst(p->port_id, p->queue_id, p->tx_buf + nb_tx, p->tx_buf_count - nb_tx); /* We sent all the packets in more than one try */ if (nb_tx >= p->tx_buf_count) { p->tx_buf_count = 0; return; } } /* We didn't send the packets in maximum allowed attempts */ RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx); for ( ; nb_tx < p->tx_buf_count; nb_tx++) rte_pktmbuf_free(p->tx_buf[nb_tx]); p->tx_buf_count = 0; } static int rte_port_ethdev_writer_nodrop_tx(void *port, struct rte_mbuf *pkt) { struct rte_port_ethdev_writer_nodrop *p = (struct rte_port_ethdev_writer_nodrop *) port; p->tx_buf[p->tx_buf_count++] = pkt; RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_IN_ADD(p, 1); if (p->tx_buf_count >= p->tx_burst_sz) send_burst_nodrop(p); return 0; } static int rte_port_ethdev_writer_nodrop_tx_bulk(void *port, struct rte_mbuf **pkts, uint64_t pkts_mask) { struct rte_port_ethdev_writer_nodrop *p = (struct rte_port_ethdev_writer_nodrop *) port; uint64_t bsz_mask = p->bsz_mask; uint32_t tx_buf_count = p->tx_buf_count; uint64_t expr = (pkts_mask & (pkts_mask + 1)) | ((pkts_mask & bsz_mask) ^ bsz_mask); if (expr == 0) { uint64_t n_pkts = __builtin_popcountll(pkts_mask); uint32_t n_pkts_ok; if (tx_buf_count) send_burst_nodrop(p); RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_IN_ADD(p, n_pkts); n_pkts_ok = rte_eth_tx_burst(p->port_id, p->queue_id, pkts, n_pkts); if (n_pkts_ok >= n_pkts) return 0; /* * If we didnt manage to send all packets in single burst, move * remaining packets to the buffer and call send burst. */ for (; n_pkts_ok < n_pkts; n_pkts_ok++) { struct rte_mbuf *pkt = pkts[n_pkts_ok]; p->tx_buf[p->tx_buf_count++] = pkt; } send_burst_nodrop(p); } else { for ( ; pkts_mask; ) { uint32_t pkt_index = __builtin_ctzll(pkts_mask); uint64_t pkt_mask = 1LLU << pkt_index; struct rte_mbuf *pkt = pkts[pkt_index]; p->tx_buf[tx_buf_count++] = pkt; RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_IN_ADD(p, 1); pkts_mask &= ~pkt_mask; } p->tx_buf_count = tx_buf_count; if (tx_buf_count >= p->tx_burst_sz) send_burst_nodrop(p); } return 0; } static int rte_port_ethdev_writer_nodrop_flush(void *port) { struct rte_port_ethdev_writer_nodrop *p = (struct rte_port_ethdev_writer_nodrop *) port; if (p->tx_buf_count > 0) send_burst_nodrop(p); return 0; } static int rte_port_ethdev_writer_nodrop_free(void *port) { if (port == NULL) { RTE_LOG(ERR, PORT, "%s: Port is NULL\n", __func__); return -EINVAL; } rte_port_ethdev_writer_nodrop_flush(port); rte_free(port); return 0; } static int rte_port_ethdev_writer_nodrop_stats_read(void *port, struct rte_port_out_stats *stats, int clear) { struct rte_port_ethdev_writer_nodrop *p = (struct rte_port_ethdev_writer_nodrop *) port; if (stats != NULL) memcpy(stats, &p->stats, sizeof(p->stats)); if (clear) memset(&p->stats, 0, sizeof(p->stats)); return 0; } /* * Summary of port operations */ struct rte_port_in_ops rte_port_ethdev_reader_ops = { .f_create = rte_port_ethdev_reader_create, .f_free = rte_port_ethdev_reader_free, .f_rx = rte_port_ethdev_reader_rx, .f_stats = rte_port_ethdev_reader_stats_read, }; struct rte_port_out_ops rte_port_ethdev_writer_ops = { .f_create = rte_port_ethdev_writer_create, .f_free = rte_port_ethdev_writer_free, .f_tx = rte_port_ethdev_writer_tx, .f_tx_bulk = rte_port_ethdev_writer_tx_bulk, .f_flush = rte_port_ethdev_writer_flush, .f_stats = rte_port_ethdev_writer_stats_read, }; struct rte_port_out_ops rte_port_ethdev_writer_nodrop_ops = { .f_create = rte_port_ethdev_writer_nodrop_create, .f_free = rte_port_ethdev_writer_nodrop_free, .f_tx = rte_port_ethdev_writer_nodrop_tx, .f_tx_bulk = rte_port_ethdev_writer_nodrop_tx_bulk, .f_flush = rte_port_ethdev_writer_nodrop_flush, .f_stats = rte_port_ethdev_writer_nodrop_stats_read, };