summaryrefslogtreecommitdiffstats
path: root/src/scripts/fts.py
blob: b579190e433eb597e737489510e384f2a08f250e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
#!/usr/bin/env python3

import sys
import os
import ipaddress
import yaml
from pprint import pprint
import re
from jsonschema import validate, exceptions
import argparse
from subprocess import run, PIPE
from io import StringIO

# VPP feature JSON schema
schema = {
    "$schema": "http://json-schema.org/schema#",
    "type": "object",
    "properties": {
        "name": {"type": "string"},
        "description": {"type": "string"},
        "maintainer": {"$ref": "#/definitions/maintainers"},
        "state": {"type": "string",
                  "enum": ["production", "experimental", "development"]},
        "features": {"$ref": "#/definitions/features"},
        "missing": {"$ref": "#/definitions/features"},
        "properties": {"type": "array",
                       "items": {"type": "string",
                                 "enum": ["API", "CLI", "STATS",
                                          "MULTITHREAD"]},
                       },
    },
    "additionalProperties": False,
    "definitions": {
        "maintainers": {
            "anyof": [{
                "type": "array",
                "items": {"type": "string"},
                "minItems": 1,
            },
                {"type": "string"}],
        },
        "featureobject": {
            "type": "object",
            "patternProperties": {
                "^.*$": {"$ref": "#/definitions/features"},
            },
        },
        "features": {
            "type": "array",
            "items": {"anyOf": [{"$ref": "#/definitions/featureobject"},
                                {"type": "string"},
                                ]},
            "minItems": 1,
        },
    },
}


def filelist_from_git_status():
    filelist = []
    git_status = 'git status --porcelain */FEATURE*.yaml'
    rv = run(git_status.split(), stdout=PIPE, stderr=PIPE)
    if rv.returncode != 0:
        sys.exit(rv.returncode)

    for l in rv.stdout.decode('ascii').split('\n'):
        if len(l):
            filelist.append(l.split()[1])
    return filelist


def filelist_from_git_ls():
    filelist = []
    git_ls = 'git ls-files :(top)*/FEATURE*.yaml'
    rv = run(git_ls.split(), stdout=PIPE, stderr=PIPE)
    if rv.returncode != 0:
        sys.exit(rv.returncode)

    for l in rv.stdout.decode('ascii').split('\n'):
        if len(l):
            filelist.append(l)
    return filelist

def version_from_git():
    git_describe = 'git describe'
    rv = run(git_describe.split(), stdout=PIPE, stderr=PIPE)
    if rv.returncode != 0:
        sys.exit(rv.returncode)
    return rv.stdout.decode('ascii').split('\n')[0]

class MarkDown():
    _dispatch = {}

    def __init__(self, stream):
        self.stream = stream
        self.toc = []

    def print_maintainer(self, o):
        write = self.stream.write
        if type(o) is list:
            write('Maintainers: ' +
                  ', '.join('{m}'.format(m=m) for m in
                            o) + '  \n')
        else:
            write('Maintainer: {o}  \n'.format(o=o))

    _dispatch['maintainer'] = print_maintainer

    def print_features(self, o, indent=0):
        write = self.stream.write
        for f in o:
            indentstr = ' ' * indent
            if type(f) is dict:
                for k, v in f.items():
                    write('{indentstr}- {k}\n'.format(indentstr=indentstr, k=k))
                    self.print_features(v, indent + 2)
            else:
                write('{indentstr}- {f}\n'.format(indentstr=indentstr, f=f))
        write('\n')
    _dispatch['features'] = print_features

    def print_markdown_header(self, o):
        write = self.stream.write
        write('## {o}\n'.format(o=o))
        version = version_from_git()
        write('VPP version: {version}\n\n'.format(version=version))
    _dispatch['markdown_header'] = print_markdown_header

    def print_name(self, o):
        write = self.stream.write
        write('### {o}\n'.format(o=o))
        self.toc.append(o)
    _dispatch['name'] = print_name

    def print_description(self, o):
        write = self.stream.write
        write('\n{o}\n\n'.format(o=o))
    _dispatch['description'] = print_description

    def print_state(self, o):
        write = self.stream.write
        write('Feature maturity level: {o}  \n'.format(o=o))
    _dispatch['state'] = print_state

    def print_properties(self, o):
        write = self.stream.write
        write('Supports: {s}  \n'.format(s=" ".join(o)))
    _dispatch['properties'] = print_properties

    def print_missing(self, o):
        write = self.stream.write
        write('\nNot yet implemented:  \n')
        self.print_features(o)
    _dispatch['missing'] = print_missing

    def print_code(self, o):
        write = self.stream.write
        write('Source Code: [{o}]({o}) \n'.format(o=o))
    _dispatch['code'] = print_code

    def print(self, t, o):
        write = self.stream.write
        if t in self._dispatch:
            self._dispatch[t](self, o,)
        else:
            write('NOT IMPLEMENTED: {t}\n')

def output_toc(toc, stream):
    write = stream.write
    write('## VPP Feature list:\n')

    for t in toc:
        ref = t.lower().replace(' ', '-')
        write('[{t}](#{ref})  \n'.format(t=t, ref=ref))

def featuresort(k):
    return k[1]['name']

def featurelistsort(k):
    orderedfields = {
        'name': 0,
        'maintainer': 1,
        'description': 2,
        'features': 3,
        'state': 4,
        'properties': 5,
        'missing': 6,
        'code': 7,
    }
    return orderedfields[k[0]]

def output_markdown(features, fields, notfields):
    stream = StringIO()
    m = MarkDown(stream)
    m.print('markdown_header', 'Feature Details:')
    for path, featuredef in sorted(features.items(), key=featuresort):
        codeurl = 'https://git.fd.io/vpp/tree/src/' + \
                  '/'.join(os.path.normpath(path).split('/')[1:-1])
        featuredef['code'] = codeurl
        for k, v in sorted(featuredef.items(), key=featurelistsort):
            if notfields:
                if k not in notfields:
                    m.print(k, v)
            elif fields:
                if k in fields:
                    m.print(k, v)
            else:
                m.print(k, v)

    tocstream = StringIO()
    output_toc(m.toc, tocstream)
    return tocstream, stream

def main():
    parser = argparse.ArgumentParser(description='VPP Feature List.')
    parser.add_argument('--validate', dest='validate', action='store_true',
                        help='validate the FEATURE.yaml file')
    parser.add_argument('--git-status', dest='git_status', action='store_true',
                        help='Get filelist from git status')
    parser.add_argument('--all', dest='all', action='store_true',
                        help='Validate all files in repository')
    parser.add_argument('--markdown', dest='markdown', action='store_true',
                        help='Output feature table in markdown')
    parser.add_argument('infile', nargs='?', type=argparse.FileType('r'),
                        default=sys.stdin)
    group = parser.add_mutually_exclusive_group()
    group.add_argument('--include', help='List of fields to include')
    group.add_argument('--exclude', help='List of fields to exclude')
    args = parser.parse_args()
    features = {}

    if args.git_status:
        filelist = filelist_from_git_status()
    elif args.all:
        filelist = filelist_from_git_ls()
    else:
        filelist = args.infile

    if args.include:
        fields = args.include.split(',')
    else:
        fields = []
    if args.exclude:
        notfields = args.exclude.split(',')
    else:
        notfields = []

    for featurefile in filelist:
        featurefile = featurefile.rstrip()

        # Load configuration file
        with open(featurefile, encoding='utf-8') as f:
            cfg = yaml.load(f, Loader=yaml.SafeLoader)
        try:
            validate(instance=cfg, schema=schema)
        except exceptions.ValidationError:
            print('File does not validate: {featurefile}' \
                  .format(featurefile=featurefile), file=sys.stderr)
            raise
        features[featurefile] = cfg

    if args.markdown:
        stream = StringIO()
        tocstream, stream = output_markdown(features, fields, notfields)
        print(tocstream.getvalue())
        print(stream.getvalue())
        stream.close()


if __name__ == '__main__':
    main()
1])->l2_classify.table_index = table_index[3]; vnet_buffer (b[0])->l2_classify.opaque_index = ~0; vnet_buffer (b[1])->l2_classify.opaque_index = ~0; vnet_classify_prefetch_bucket (t[2], vnet_buffer (b[0])->l2_classify.hash); vnet_classify_prefetch_bucket (t[3], vnet_buffer (b[1])->l2_classify.hash); } while (n_left >= 2) { vnet_classify_entry_t *e[2] = { 0, 0 }; u32 _next[2] = { ACL_NEXT_INDEX_DENY, ACL_NEXT_INDEX_DENY }; h[0] = h[2]; h[1] = h[3]; t[0] = t[2]; t[1] = t[3]; sw_if_index[0] = sw_if_index[2]; sw_if_index[1] = sw_if_index[3]; table_index[0] = table_index[2]; table_index[1] = table_index[3]; hash[0] = hash[2]; hash[1] = hash[3]; /* prefetch next iteration */ if (n_left >= 6) { vlib_prefetch_buffer_header (b[4], LOAD); vlib_prefetch_buffer_header (b[5], LOAD); clib_prefetch_load (b[4]->data); clib_prefetch_load (b[5]->data); } /* calculate hashes for b[2] & b[3] */ if (n_left >= 4) { sw_if_index[2] = ~0 == way ? 0 : vnet_buffer (b[2])->sw_if_index[way]; sw_if_index[3] = ~0 == way ? 0 : vnet_buffer (b[3])->sw_if_index[way]; table_index[2] = table_index_by_sw_if_index[sw_if_index[2]]; table_index[3] = table_index_by_sw_if_index[sw_if_index[3]]; t[2] = pool_elt_at_index (tables, table_index[2]); t[3] = pool_elt_at_index (tables, table_index[3]); if (t[2]->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA) h[2] = (void *) vlib_buffer_get_current (b[2]) + t[2]->current_data_offset; else h[2] = b[2]->data; if (t[3]->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA) h[3] = (void *) vlib_buffer_get_current (b[3]) + t[3]->current_data_offset; else h[3] = b[3]->data; if (is_output) { /* Save the rewrite length, since we are using the l2_classify struct */ vnet_buffer (b[2])->l2_classify.pad.l2_len = vnet_buffer (b[2])->ip.save_rewrite_length; /* advance the match pointer so the matching happens on IP header */ h[2] += vnet_buffer (b[2])->l2_classify.pad.l2_len; /* Save the rewrite length, since we are using the l2_classify struct */ vnet_buffer (b[3])->l2_classify.pad.l2_len = vnet_buffer (b[3])->ip.save_rewrite_length; /* advance the match pointer so the matching happens on IP header */ h[3] += vnet_buffer (b[3])->l2_classify.pad.l2_len; } hash[2] = vnet_classify_hash_packet_inline (t[2], (u8 *) h[2]); hash[3] = vnet_classify_hash_packet_inline (t[3], (u8 *) h[3]); vnet_buffer (b[2])->l2_classify.hash = hash[2]; vnet_buffer (b[3])->l2_classify.hash = hash[3]; vnet_buffer (b[2])->l2_classify.table_index = table_index[2]; vnet_buffer (b[3])->l2_classify.table_index = table_index[3]; vnet_buffer (b[2])->l2_classify.opaque_index = ~0; vnet_buffer (b[3])->l2_classify.opaque_index = ~0; vnet_classify_prefetch_bucket (t[2], vnet_buffer (b[2])-> l2_classify.hash); vnet_classify_prefetch_bucket (t[3], vnet_buffer (b[3])-> l2_classify.hash); } /* find entry for b[0] & b[1] */ vnet_get_config_data (cm, &b[0]->current_config_index, &_next[0], /* # bytes of config data */ 0); vnet_get_config_data (cm, &b[1]->current_config_index, &_next[1], /* # bytes of config data */ 0); if (PREDICT_TRUE (table_index[0] != ~0)) { e[0] = vnet_classify_find_entry_inline (t[0], (u8 *) h[0], hash[0], now); if (e[0]) { vnet_buffer (b[0])->l2_classify.opaque_index = e[0]->opaque_index; vlib_buffer_advance (b[0], e[0]->advance); _next[0] = (e[0]->next_index < n_next_nodes) ? e[0]->next_index : _next[0]; hits++; b[0]->error = (_next[0] == ACL_NEXT_INDEX_DENY) ? error_deny : error_none; if (!is_output) { if (e[0]->action == CLASSIFY_ACTION_SET_IP4_FIB_INDEX || e[0]->action == CLASSIFY_ACTION_SET_IP6_FIB_INDEX) vnet_buffer (b[0])->sw_if_index[VLIB_TX] = e[0]->metadata; else if (e[0]->action == CLASSIFY_ACTION_SET_METADATA) vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = e[0]->metadata; } } else { while (1) { if (PREDICT_TRUE (t[0]->next_table_index != ~0)) t[0] = pool_elt_at_index (tables, t[0]->next_table_index); else { _next[0] = (t[0]->miss_next_index < n_next_nodes) ? t[0]->miss_next_index : _next[0]; misses++; b[0]->error = (_next[0] == ACL_NEXT_INDEX_DENY) ? error_miss : error_none; break; } if (t[0]->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA) h[0] = (void *) vlib_buffer_get_current (b[0]) + t[0]->current_data_offset; else h[0] = b[0]->data; /* advance the match pointer so the matching happens on IP header */ if (is_output) h[0] += vnet_buffer (b[0])->l2_classify.pad.l2_len; hash[0] = vnet_classify_hash_packet_inline (t[0], (u8 *) h[0]); e[0] = vnet_classify_find_entry_inline (t[0], (u8 *) h[0], hash[0], now); if (e[0]) { vnet_buffer (b[0])->l2_classify.opaque_index = e[0]->opaque_index; vlib_buffer_advance (b[0], e[0]->advance); _next[0] = (e[0]->next_index < n_next_nodes) ? e[0]->next_index : _next[0]; hits++; chain_hits++; b[0]->error = (_next[0] == ACL_NEXT_INDEX_DENY) ? error_deny : error_none; if (!is_output) { if (e[0]->action == CLASSIFY_ACTION_SET_IP4_FIB_INDEX || e[0]->action == CLASSIFY_ACTION_SET_IP6_FIB_INDEX) vnet_buffer (b[0])->sw_if_index[VLIB_TX] = e[0]->metadata; else if (e[0]->action == CLASSIFY_ACTION_SET_METADATA) vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = e[0]->metadata; } break; } } } } if (PREDICT_TRUE (table_index[1] != ~0)) { e[1] = vnet_classify_find_entry_inline (t[1], (u8 *) h[1], hash[1], now); if (e[1]) { vnet_buffer (b[1])->l2_classify.opaque_index = e[1]->opaque_index; vlib_buffer_advance (b[1], e[1]->advance); _next[1] = (e[1]->next_index < n_next_nodes) ? e[1]->next_index : _next[1]; hits++; b[1]->error = (_next[1] == ACL_NEXT_INDEX_DENY) ? error_deny : error_none; if (!is_output) { if (e[1]->action == CLASSIFY_ACTION_SET_IP4_FIB_INDEX || e[1]->action == CLASSIFY_ACTION_SET_IP6_FIB_INDEX) vnet_buffer (b[1])->sw_if_index[VLIB_TX] = e[1]->metadata; else if (e[1]->action == CLASSIFY_ACTION_SET_METADATA) vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = e[1]->metadata; } } else { while (1) { if (PREDICT_TRUE (t[1]->next_table_index != ~0)) t[1] = pool_elt_at_index (tables, t[1]->next_table_index); else { _next[1] = (t[1]->miss_next_index < n_next_nodes) ? t[1]->miss_next_index : _next[1]; misses++; b[1]->error = (_next[1] == ACL_NEXT_INDEX_DENY) ? error_miss : error_none; break; } if (t[1]->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA) h[1] = (void *) vlib_buffer_get_current (b[1]) + t[1]->current_data_offset; else h[1] = b[1]->data; /* advance the match pointer so the matching happens on IP header */ if (is_output) h[1] += vnet_buffer (b[1])->l2_classify.pad.l2_len; hash[1] = vnet_classify_hash_packet_inline (t[1], (u8 *) h[1]); e[1] = vnet_classify_find_entry_inline (t[1], (u8 *) h[1], hash[1], now); if (e[1]) { vnet_buffer (b[1])->l2_classify.opaque_index = e[1]->opaque_index; vlib_buffer_advance (b[1], e[1]->advance); _next[1] = (e[1]->next_index < n_next_nodes) ? e[1]->next_index : _next[1]; hits++; chain_hits++; b[1]->error = (_next[1] == ACL_NEXT_INDEX_DENY) ? error_deny : error_none; if (!is_output) { if (e[1]->action == CLASSIFY_ACTION_SET_IP4_FIB_INDEX || e[1]->action == CLASSIFY_ACTION_SET_IP6_FIB_INDEX) vnet_buffer (b[1])->sw_if_index[VLIB_TX] = e[1]->metadata; else if (e[1]->action == CLASSIFY_ACTION_SET_METADATA) vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = e[1]->metadata; } break; } } } } if (do_trace && b[0]->flags & VLIB_BUFFER_IS_TRACED) { ip_in_out_acl_trace_t *_t = vlib_add_trace (vm, node, b[0], sizeof (*_t)); _t->sw_if_index = ~0 == way ? 0 : vnet_buffer (b[0])->sw_if_index[way]; _t->next_index = _next[0]; _t->table_index = t[0] ? t[0] - tables : ~0; _t->offset = (e[0] && t[0]) ? vnet_classify_get_offset (t[0], e[0]) : ~0; } if (do_trace && b[1]->flags & VLIB_BUFFER_IS_TRACED) { ip_in_out_acl_trace_t *_t = vlib_add_trace (vm, node, b[1], sizeof (*_t)); _t->sw_if_index = ~0 == way ? 0 : vnet_buffer (b[1])->sw_if_index[way]; _t->next_index = _next[1]; _t->table_index = t[1] ? t[1] - tables : ~0; _t->offset = (e[1] && t[1]) ? vnet_classify_get_offset (t[1], e[1]) : ~0; } if ((_next[0] == ACL_NEXT_INDEX_DENY) && is_output) { /* on output, for the drop node to work properly, go back to ip header */ vlib_buffer_advance (b[0], vnet_buffer (b[0])->l2.l2_len); } if ((_next[1] == ACL_NEXT_INDEX_DENY) && is_output) { /* on output, for the drop node to work properly, go back to ip header */ vlib_buffer_advance (b[1], vnet_buffer (b[1])->l2.l2_len); } next[0] = _next[0]; next[1] = _next[1]; /* _next */ next += 2; b += 2; n_left -= 2; } while (n_left > 0) { u8 *h0; u32 sw_if_index0; u32 table_index0; vnet_classify_table_t *t0 = 0; vnet_classify_entry_t *e0 = 0; u32 next0 = ACL_NEXT_INDEX_DENY; u64 hash0; sw_if_index0 = ~0 == way ? 0 : vnet_buffer (b[0])->sw_if_index[way]; table_index0 = table_index_by_sw_if_index[sw_if_index0]; t0 = pool_elt_at_index (tables, table_index0); if (t0->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA) h0 = (void *) vlib_buffer_get_current (b[0]) + t0->current_data_offset; else h0 = b[0]->data; if (is_output) { /* Save the rewrite length, since we are using the l2_classify struct */ vnet_buffer (b[0])->l2_classify.pad.l2_len = vnet_buffer (b[0])->ip.save_rewrite_length; /* advance the match pointer so the matching happens on IP header */ h0 += vnet_buffer (b[0])->l2_classify.pad.l2_len; } vnet_buffer (b[0])->l2_classify.hash = vnet_classify_hash_packet (t0, (u8 *) h0); vnet_buffer (b[0])->l2_classify.table_index = table_index0; vnet_buffer (b[0])->l2_classify.opaque_index = ~0; vnet_get_config_data (cm, &b[0]->current_config_index, &next0, /* # bytes of config data */ 0); if (PREDICT_TRUE (table_index0 != ~0)) { hash0 = vnet_buffer (b[0])->l2_classify.hash; t0 = pool_elt_at_index (tables, table_index0); if (t0->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA) h0 = (void *) vlib_buffer_get_current (b[0]) + t0->current_data_offset; else h0 = b[0]->data; /* advance the match pointer so the matching happens on IP header */ if (is_output) h0 += vnet_buffer (b[0])->l2_classify.pad.l2_len; e0 = vnet_classify_find_entry_inline (t0, (u8 *) h0, hash0, now); if (e0) { vnet_buffer (b[0])->l2_classify.opaque_index = e0->opaque_index; vlib_buffer_advance (b[0], e0->advance); next0 = (e0->next_index < n_next_nodes) ? e0->next_index : next0; hits++; b[0]->error = (next0 == ACL_NEXT_INDEX_DENY) ? error_deny : error_none; if (!is_output) { if (e0->action == CLASSIFY_ACTION_SET_IP4_FIB_INDEX || e0->action == CLASSIFY_ACTION_SET_IP6_FIB_INDEX) vnet_buffer (b[0])->sw_if_index[VLIB_TX] = e0->metadata; else if (e0->action == CLASSIFY_ACTION_SET_METADATA) vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = e0->metadata; } } else { while (1) { if (PREDICT_TRUE (t0->next_table_index != ~0)) t0 = pool_elt_at_index (tables, t0->next_table_index); else { next0 = (t0->miss_next_index < n_next_nodes) ? t0->miss_next_index : next0; misses++; b[0]->error = (next0 == ACL_NEXT_INDEX_DENY) ? error_miss : error_none; break; } if (t0->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA) h0 = (void *) vlib_buffer_get_current (b[0]) + t0->current_data_offset; else h0 = b[0]->data; /* advance the match pointer so the matching happens on IP header */ if (is_output) h0 += vnet_buffer (b[0])->l2_classify.pad.l2_len; hash0 = vnet_classify_hash_packet_inline (t0, (u8 *) h0); e0 = vnet_classify_find_entry_inline (t0, (u8 *) h0, hash0, now); if (e0) { vnet_buffer (b[0])->l2_classify.opaque_index = e0->opaque_index; vlib_buffer_advance (b[0], e0->advance); next0 = (e0->next_index < n_next_nodes) ? e0->next_index : next0; hits++; b[0]->error = (next0 == ACL_NEXT_INDEX_DENY) ? error_deny : error_none; if (!is_output) { if (e0->action == CLASSIFY_ACTION_SET_IP4_FIB_INDEX || e0->action == CLASSIFY_ACTION_SET_IP6_FIB_INDEX) vnet_buffer (b[0])->sw_if_index[VLIB_TX] = e0->metadata; else if (e0->action == CLASSIFY_ACTION_SET_METADATA) vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = e0->metadata; } break; } } } } if (do_trace && b[0]->flags & VLIB_BUFFER_IS_TRACED) { ip_in_out_acl_trace_t *t = vlib_add_trace (vm, node, b[0], sizeof (*t)); t->sw_if_index = ~0 == way ? 0 : vnet_buffer (b[0])->sw_if_index[way]; t->next_index = next0; t->table_index = t0 - tables; t->offset = (e0 && t0) ? vnet_classify_get_offset (t0, e0) : ~0; } if ((next0 == ACL_NEXT_INDEX_DENY) && is_output) { /* on output, for the drop node to work properly, go back to ip header */ vlib_buffer_advance (b[0], vnet_buffer (b[0])->l2.l2_len); } next[0] = next0; /* next */ next++; b++; n_left--; } *hits__ = hits; *misses__ = misses; *chain_hits__ = chain_hits; } static_always_inline uword ip_in_out_acl_inline (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, const in_out_acl_table_id_t tid, const vlib_node_registration_t *parent_error_node, const u32 error_none_index, const u32 error_deny_index, const u32 error_miss_index, const vlib_rx_or_tx_t way, const int is_output) { const in_out_acl_main_t *am = &in_out_acl_main; vnet_classify_table_t *tables = am->vnet_classify_main->tables; u32 *from = vlib_frame_vector_args (frame); const u32 *table_index_by_sw_if_index = am->classify_table_index_by_sw_if_index[is_output][tid]; vnet_config_main_t *cm = am->vnet_config_main[is_output][tid]; const vlib_node_runtime_t *error_node = vlib_node_get_runtime (vm, parent_error_node->index); const vlib_error_t error_none = error_node->errors[error_none_index]; const vlib_error_t error_deny = error_node->errors[error_deny_index]; const vlib_error_t error_miss = error_node->errors[error_miss_index]; vlib_buffer_t *bufs[VLIB_FRAME_SIZE]; u16 nexts[VLIB_FRAME_SIZE]; u32 hits, misses, chain_hits; vlib_get_buffers (vm, from, bufs, frame->n_vectors); #define ip_in_out_acl_inline_trace__(do_trace) \ ip_in_out_acl_inline_trace ( \ vm, node, frame, bufs, nexts, frame->n_vectors, &hits, &misses, \ &chain_hits, error_deny, error_miss, error_none, tables, \ table_index_by_sw_if_index, cm, way, is_output, do_trace) if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE)) ip_in_out_acl_inline_trace__ (1 /* do_trace */); else ip_in_out_acl_inline_trace__ (0 /* do_trace */); vlib_node_increment_counter ( vm, node->node_index, is_output ? IP_OUTACL_ERROR_MISS : IP_INACL_ERROR_MISS, misses); vlib_node_increment_counter ( vm, node->node_index, is_output ? IP_OUTACL_ERROR_HIT : IP_INACL_ERROR_HIT, hits); vlib_node_increment_counter (vm, node->node_index, is_output ? IP_OUTACL_ERROR_CHAIN_HIT : IP_INACL_ERROR_CHAIN_HIT, chain_hits); vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors); return frame->n_vectors; } VLIB_NODE_FN (ip4_inacl_node) (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame) { return ip_in_out_acl_inline ( vm, node, frame, IN_OUT_ACL_TABLE_IP4, &ip4_input_node, IP4_ERROR_NONE, IP4_ERROR_INACL_SESSION_DENY, IP4_ERROR_INACL_TABLE_MISS, VLIB_RX, 0 /* is_output */); } VLIB_NODE_FN (ip4_punt_acl_node) (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame) { return ip_in_out_acl_inline ( vm, node, frame, IN_OUT_ACL_TABLE_IP4_PUNT, &ip4_input_node, IP4_ERROR_NONE, IP4_ERROR_INACL_SESSION_DENY, IP4_ERROR_INACL_TABLE_MISS, ~0 /* way */, 0 /* is_output */); } VLIB_NODE_FN (ip4_outacl_node) (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame) { return ip_in_out_acl_inline ( vm, node, frame, IN_OUT_ACL_TABLE_IP4, &ip4_input_node, IP4_ERROR_NONE, IP4_ERROR_INACL_SESSION_DENY, IP4_ERROR_INACL_TABLE_MISS, VLIB_TX, 1 /* is_output */); } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (ip4_inacl_node) = { .name = "ip4-inacl", .vector_size = sizeof (u32), .format_trace = format_ip_inacl_trace, .n_errors = ARRAY_LEN(ip_inacl_error_strings), .error_strings = ip_inacl_error_strings, .n_next_nodes = ACL_NEXT_INDEX_N_NEXT, .next_nodes = { [ACL_NEXT_INDEX_DENY] = "ip4-drop", }, }; VLIB_REGISTER_NODE (ip4_punt_acl_node) = { .name = "ip4-punt-acl", .vector_size = sizeof (u32), .format_trace = format_ip_inacl_trace, .n_errors = ARRAY_LEN(ip_inacl_error_strings), .error_strings = ip_inacl_error_strings, .n_next_nodes = ACL_NEXT_INDEX_N_NEXT, .next_nodes = { [ACL_NEXT_INDEX_DENY] = "ip4-drop", }, }; VLIB_REGISTER_NODE (ip4_outacl_node) = { .name = "ip4-outacl", .vector_size = sizeof (u32), .format_trace = format_ip_outacl_trace, .n_errors = ARRAY_LEN(ip_outacl_error_strings), .error_strings = ip_outacl_error_strings, .n_next_nodes = ACL_NEXT_INDEX_N_NEXT, .next_nodes = { [ACL_NEXT_INDEX_DENY] = "ip4-drop", }, }; /* *INDENT-ON* */ VNET_FEATURE_INIT (ip4_punt_acl_feature) = { .arc_name = "ip4-punt", .node_name = "ip4-punt-acl", .runs_after = VNET_FEATURES ("ip4-punt-policer"), }; VLIB_NODE_FN (ip6_inacl_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return ip_in_out_acl_inline ( vm, node, frame, IN_OUT_ACL_TABLE_IP6, &ip6_input_node, IP6_ERROR_NONE, IP6_ERROR_INACL_SESSION_DENY, IP6_ERROR_INACL_TABLE_MISS, VLIB_RX, 0 /* is_output */); } VLIB_NODE_FN (ip6_punt_acl_node) (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame) { return ip_in_out_acl_inline ( vm, node, frame, IN_OUT_ACL_TABLE_IP6_PUNT, &ip6_input_node, IP6_ERROR_NONE, IP6_ERROR_INACL_SESSION_DENY, IP6_ERROR_INACL_TABLE_MISS, ~0 /* way */, 0 /* is_output */); } VLIB_NODE_FN (ip6_outacl_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return ip_in_out_acl_inline ( vm, node, frame, IN_OUT_ACL_TABLE_IP6, &ip6_input_node, IP6_ERROR_NONE, IP6_ERROR_INACL_SESSION_DENY, IP6_ERROR_INACL_TABLE_MISS, VLIB_TX, 1 /* is_output */); } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (ip6_inacl_node) = { .name = "ip6-inacl", .vector_size = sizeof (u32), .format_trace = format_ip_inacl_trace, .n_errors = ARRAY_LEN(ip_inacl_error_strings), .error_strings = ip_inacl_error_strings, .n_next_nodes = ACL_NEXT_INDEX_N_NEXT, .next_nodes = { [ACL_NEXT_INDEX_DENY] = "ip6-drop", }, }; VLIB_REGISTER_NODE (ip6_punt_acl_node) = { .name = "ip6-punt-acl", .vector_size = sizeof (u32), .format_trace = format_ip_inacl_trace, .n_errors = ARRAY_LEN(ip_inacl_error_strings), .error_strings = ip_inacl_error_strings, .n_next_nodes = ACL_NEXT_INDEX_N_NEXT, .next_nodes = { [ACL_NEXT_INDEX_DENY] = "ip6-drop", }, }; VLIB_REGISTER_NODE (ip6_outacl_node) = { .name = "ip6-outacl", .vector_size = sizeof (u32), .format_trace = format_ip_outacl_trace, .n_errors = ARRAY_LEN(ip_outacl_error_strings), .error_strings = ip_outacl_error_strings, .n_next_nodes = ACL_NEXT_INDEX_N_NEXT, .next_nodes = { [ACL_NEXT_INDEX_DENY] = "ip6-drop", }, }; /* *INDENT-ON* */ VNET_FEATURE_INIT (ip6_punt_acl_feature) = { .arc_name = "ip6-punt", .node_name = "ip6-punt-acl", .runs_after = VNET_FEATURES ("ip6-punt-policer"), }; #ifndef CLIB_MARCH_VARIANT static clib_error_t * ip_in_out_acl_init (vlib_main_t * vm) { return 0; } VLIB_INIT_FUNCTION (ip_in_out_acl_init); #endif /* CLIB_MARCH_VARIANT */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */