summaryrefslogtreecommitdiffstats
path: root/docs
diff options
context:
space:
mode:
authorDave Barach <dave@barachs.net>2019-09-23 10:27:27 -0400
committerFlorin Coras <florin.coras@gmail.com>2019-09-23 16:20:06 +0000
commit33909777c63712ca397165cd92e7cc62208eb5c8 (patch)
treeb89669157d6f2a181d67eef5c7beb172fbf37d8c /docs
parent44ca60ecdba866160bebbc6c1eb983674819d429 (diff)
misc: unify pcap rx / tx / drop trace
Use a single vnet_pcap_t in vlib_global_main, specifically to support unified tracing Update sphinx docs, doxygen tags Type: refactor Ticket: VPP-1776 Signed-off-by: Dave Barach <dave@barachs.net> Change-Id: Id15d41a596712968c0714cef1bd2cd5bc9cbdd55
Diffstat (limited to 'docs')
-rw-r--r--docs/gettingstarted/developers/vnet.md24
1 files changed, 16 insertions, 8 deletions
diff --git a/docs/gettingstarted/developers/vnet.md b/docs/gettingstarted/developers/vnet.md
index b8f878c2cd7..826af0a90a9 100644
--- a/docs/gettingstarted/developers/vnet.md
+++ b/docs/gettingstarted/developers/vnet.md
@@ -470,25 +470,33 @@ This should be of significant value when developing new vpp graph
nodes. If new code mispositions b->current_data, it will be completely
obvious from looking at the dispatch trace in wireshark.
-## pcap rx and tx tracing
+## pcap rx, tx, and drop tracing
-vpp also supports rx and tx packet capture in pcap format, through the
-"pcap rx trace" and "pcap tx trace" debug CLI commands
+vpp also supports rx, tx, and drop packet capture in pcap format,
+through the "pcap trace" debug CLI command.
-This command is used to start or stop a packet capture, or show
-the status of packet capture. Note that both "pcap rx trace" and
-"pcap tx trace" are implemented. The command syntax is identical,
-simply substitute rx for tx as needed.
+This command is used to start or stop a packet capture, or show the
+status of packet capture. Each of "pcap trace rx", "pcap trace tx",
+and "pcap trace drop" is implemented. Supply one or more of "rx",
+"tx", and "drop" to enable multiple simultaneous capture types.
These commands have the following optional parameters:
-on|off- Used to start or stop a packet capture.
+- <b>rx</b> - trace received packets.
+
+- <b>tx</b> - trace transmitted packets.
+
+- <b>drop</b> - trace dropped packets.
- <b>max _nnnn_</b> - file size, number of packet captures. Once
<nnnn> packets have been received, the trace buffer buffer is flushed
to the indicated file. Defaults to 1000. Can only be updated if packet
capture is off.
+- <b>max-bytes-per-pkt _nnnn_</b> - maximum number of bytes to trace
+ on a per-paket basis. Must be >32 and less than 9000. Default value:
+ 512.
+
- <b>intfc _interface_ | _any_</b> - Used to specify a given interface,
or use '<em>any</em>' to run packet capture on all interfaces.
'<em>any</em>' is the default if not provided. Settings from a previous
ef='#n90'>90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
#!/usr/bin/env python

# Copyright (c) 2016 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""This is a helper script to make test execution easy."""

from __future__ import print_function
import sys
import os
import time
from string import ascii_lowercase
from random import sample
import argparse
from pykwalify.core import Core
from pykwalify.errors import PyKwalifyException
from yaml import load
import robot
from robot.errors import DATA_ERROR, DataError, FRAMEWORK_ERROR, FrameworkError
from robot.run import RobotFramework
from robot.conf.settings import RobotSettings
from robot.running.builder import TestSuiteBuilder
from robot.running.model import TestSuite

TOPOLOGIES_DIR = './topologies/enabled/'
TESTS_DIR = './tests'
OUTPUTS_DIR = './outputs'


def get_suite_list(*datasources, **options):
    """Returns filtered test suites based on include exclude tags

    :param datasources: paths to tests
    :param options: Robot Framework options (robot.conf.settings.py)
    :return: list of Robot Framework TestSuites which contain tests
    """
    class _MyRobotFramework(RobotFramework):
        """Custom implementation of RobotFramework main()."""
        def main(self, datasources, **options):
            # copied from robot.run.RobotFramework.main
            settings = RobotSettings(options)
            test_suite = TestSuiteBuilder(settings['SuiteNames'],
                                          settings['WarnOnSkipped'])
            # pylint: disable=star-args
            suite = test_suite.build(*datasources)
            suite.configure(**settings.suite_config)

            return suite

    # get all test cases list without run tests, execute runs overloaded main
    # function
    suite = _MyRobotFramework().execute(*datasources, output=None, dryrun=True,
                                        **options)
    if isinstance(suite, TestSuite):
        suites = []
        suites.append(suite)
        append_new = True
        while append_new:
            append_new = False
            tmp = []
            for suite in suites:
                # pylint: disable=protected-access
                if len(suite.suites._items) > 0:
                    for i in suite.suites._items:
                        tmp.append(i)
                    append_new = True
                else:
                    tmp.append(suite)
            suites = tmp
        return suites
        # TODO: check testcases Tags ? all tests should have same set of tags
    else:
        if suite == DATA_ERROR:
            raise DataError
        if suite == FRAMEWORK_ERROR:
            raise FrameworkError
        return []


def run_suites(tests_dir, suites, output_dir, output_prefix='suite',
               **options):
    """Execute RF's run with parameters."""

    with open('{}/{}.out'.format(output_dir, output_prefix), 'w') as out:
        robot.run(tests_dir,
                  suite=[s.longname for s in suites],
                  output='{}/{}.xml'.format(output_dir, output_prefix),
                  debugfile='{}/{}.log'.format(output_dir, output_prefix),
                  log=None,
                  report=None,
                  stdout=out,
                  **options)


def parse_outputs(output_dir):
    """Parse output xmls from all executed tests."""

    outs = [os.path.join(output_dir, file_name)
            for file_name in os.listdir(output_dir)
            if file_name.endswith('.xml')]
    # pylint: disable=star-args
    robot.rebot(*outs, merge=True)


def topology_lookup(topology_paths, topo_dir, validate):
    """Make topology list and validate topologies against schema

    :param parsed_args: topology list, is empty then scans topologies in
                        topo_dir
    :param topo_dir: scan directory for topologies
    :param validate: if True then validate topology
    :return: list of topologies
    """

    ret_topologies = []
    if topology_paths:
        for topo in topology_paths:
            if os.path.exists(topo):
                ret_topologies.append(topo)
            else:
                print("Topology file {} doesn't exist".format(topo),
                      file=sys.stderr)
    else:
        ret_topologies = [os.path.join(topo_dir, file_name)
                          for file_name in os.listdir(topo_dir)
                          if file_name.lower().endswith('.yaml')]

    if len(ret_topologies) == 0:
        print('No valid topology found', file=sys.stderr)
        exit(1)

    # validate topologies against schema
    exit_on_error = False
    for topology_name in ret_topologies:
        try:
            with open(topology_name) as file_name:
                yaml_obj = load(file_name)
            core = Core(source_file=topology_name,
                        schema_files=yaml_obj["metadata"]["schema"])
            core.validate()
        except PyKwalifyException as ex:
            print('Unable to verify topology {}, schema error: {}'.\
                  format(topology_name, ex),
                  file=sys.stderr)
            exit_on_error = True
        except KeyError as ex:
            print('Unable to verify topology {}, key error: {}'.\
                  format(topology_name, ex),
                  file=sys.stderr)
            exit_on_error = True
        except Exception as ex:
            print('Unable to verify topology {}, {}'.format(topology_name, ex),
                  file=sys.stderr)
            exit_on_error = True

    if exit_on_error and validate:
        exit(1)

    return ret_topologies


def main():
    """Main function."""
    parser = argparse.ArgumentParser(description='A test runner')
    parser.add_argument('-i', '--include', action='append',
                        help='include tests with tag')
    parser.add_argument('-e', '--exclude', action='append',
                        help='exclude tests with tag')
    parser.add_argument('-s', '--suite', action='append',
                        help='full name of suite to run')
    parser.add_argument('-t', '--topology', action='append',
                        help='topology where tests should be run')
    parser.add_argument('-d', '--test_dir', nargs='?', default=TESTS_DIR,
                        help='where tests are stored')
    parser.add_argument('-o', '--output_dir', nargs='?', default=OUTPUTS_DIR,
                        help='where results are stored')
    parser.add_argument('-L', '--loglevel', nargs='?', default='INFO', type=str,
                        choices=['TRACE', 'DEBUG', 'INFO', 'WARN', 'NONE'],
                        help='robot frameworks level for logging')
    parser.add_argument('-n', '--no_validate', action="store_false",
                        help='Do not exit if topology validation failed')

    args = parser.parse_args()

    i = args.include or []
    excl = args.exclude or []
    suite_filter = args.suite or []
    test_dir = args.test_dir

    # prepare output subdir
    suite_output_dir = os.path.join(args.output_dir,
                                    time.strftime('%y%m%d%H%M%S'))
    os.makedirs(suite_output_dir)

    topologies = topology_lookup(args.topology, TOPOLOGIES_DIR,
                                 args.no_validate)
    suite_list = get_suite_list(test_dir, include=i, exclude=excl,
                                suite=suite_filter)

    # TODO: do the topology suite mapping magic
    #       for now all tests on single topology
    if len(topologies) > 1:
        print('Multiple topologies unsupported yet', file=sys.stderr)
        exit(1)
    topology_suite_mapping = {topologies[0]: suite_list}

    # on all topologies, run test
    # TODO: run parallel
    for topology_path, topology_suite_list in topology_suite_mapping.items():
        topology_path_variable = 'TOPOLOGY_PATH:{}'.format(topology_path)
        variables = [topology_path_variable]
        print('Runing tests on topology {}'.format(topology_path))
        run_suites(test_dir, topology_suite_list, variable=variables,
                   output_dir=suite_output_dir,
                   output_prefix=''.join(sample(ascii_lowercase, 5)),
                   include=i, exclude=excl, loglevel=args.loglevel)

    print('Parsing test results')
    parse_outputs(suite_output_dir)


if __name__ == "__main__":
    main()