aboutsummaryrefslogtreecommitdiffstats
path: root/src/vnet/util/radix.h
blob: d9ba66592acc7e6b5a3e8baf3bd4d880f53bd9a8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
/*	$NetBSD: radix.h,v 1.23 2016/11/15 01:50:06 ozaki-r Exp $	*/

/*
 * Copyright (c) 1988, 1989, 1993
 *	The Regents of the University of California.  All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. Neither the name of the University nor the names of its contributors
 *    may be used to endorse or promote products derived from this software
 *    without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 * SUCH DAMAGE.
 *
 *	@(#)radix.h	8.2 (Berkeley) 10/31/94
 */

#ifndef _NET_RADIX_H_
#define	_NET_RADIX_H_

#include <vlib/vlib.h>

/*
 * Radix search tree node layout.
 */

struct radix_node {
	struct	radix_mask *rn_mklist;	/* list of masks contained in subtree */
	struct	radix_node *rn_p;	/* parent */
	i16	rn_b;			/* bit offset; -1-index(netmask) */
	u8	rn_bmask;		/* node: mask for bit test*/
	u8	rn_flags;		/* enumerated next */
#define RNF_NORMAL	1		/* leaf contains normal route */
#define RNF_ROOT	2		/* leaf is root leaf for tree */
#define RNF_ACTIVE	4		/* This node is alive (for rtfree) */
	union {
		struct {			/* leaf only data: */
			const char *rn_Key;	/* object of search */
			const char *rn_Mask;	/* netmask, if present */
			struct	radix_node *rn_Dupedkey;
		} rn_leaf;
		struct {			/* node only data: */
			int	rn_Off;		/* where to start compare */
			struct	radix_node *rn_L;/* progeny */
			struct	radix_node *rn_R;/* progeny */
		} rn_node;
	} rn_u;
#ifdef RN_DEBUG
	i32 rn_info;
	struct radix_node *rn_twin;
	struct radix_node *rn_ybro;
#endif
};

#define rn_dupedkey rn_u.rn_leaf.rn_Dupedkey
#define rn_key rn_u.rn_leaf.rn_Key
#define rn_mask rn_u.rn_leaf.rn_Mask
#define rn_off rn_u.rn_node.rn_Off
#define rn_l rn_u.rn_node.rn_L
#define rn_r rn_u.rn_node.rn_R

/*
 * Annotations to tree concerning potential routes applying to subtrees.
 */

struct radix_mask {
	i16	rm_b;			/* bit offset; -1-index(netmask) */
	i8	rm_unused;		/* cf. rn_bmask */
	u8	rm_flags;		/* cf. rn_flags */
	struct	radix_mask *rm_mklist;	/* more masks to try */
	union	{
		const char *rmu_mask;		/* the mask */
		struct	radix_node *rmu_leaf;	/* for normal routes */
	}	rm_rmu;
	i32	rm_refs;		/* # of references to this struct */
};

#define rm_mask rm_rmu.rmu_mask
#define rm_leaf rm_rmu.rmu_leaf		/* extra field would make 32 bytes */

struct radix_node_head {
	struct	radix_node *rnh_treetop;
	i32	rnh_addrsize;		/* permit, but not require fixed keys */
	i32	rnh_pktsize;		/* permit, but not require fixed keys */
	struct	radix_node *(*rnh_addaddr)	/* add based on sockaddr */
		(const void *v, const void *mask,
		     struct radix_node_head *head, struct radix_node nodes[]);
	struct	radix_node *(*rnh_addpkt)	/* add based on packet hdr */
		(const void *v, const void *mask,
		     struct radix_node_head *head, struct radix_node nodes[]);
	struct	radix_node *(*rnh_deladdr)	/* remove based on sockaddr */
		(const void *v, const void *mask, struct radix_node_head *head);
	struct	radix_node *(*rnh_delpkt)	/* remove based on packet hdr */
		(const void *v, const void *mask, struct radix_node_head *head);
	struct	radix_node *(*rnh_matchaddr)	/* locate based on sockaddr */
		(const void *v, struct radix_node_head *head);
	struct	radix_node *(*rnh_lookup)	/* locate based on sockaddr */
		(const void *v, const void *mask, struct radix_node_head *head);
	struct	radix_node *(*rnh_matchpkt)	/* locate based on packet hdr */
		(const void *v, struct radix_node_head *head);
	struct	radix_node rnh_nodes[3];	/* empty tree for common case */
};

void	rn_init(void);
int	rn_inithead(void **, int);
void	rn_delayedinit(void **, int);
int	rn_inithead0(struct radix_node_head *, int);
int	rn_refines(const void *, const void *);
int	rn_walktree(struct radix_node_head *,
	            int (*)(struct radix_node *, void *),
		    void *);
struct radix_node *
	rn_search_matched(struct radix_node_head *,
	                  int (*)(struct radix_node *, void *),
		          void *);
struct radix_node
	 *rn_addmask(const void *, int, int),
	 *rn_addroute(const void *, const void *, struct radix_node_head *,
			struct radix_node [2]),
	 *rn_delete1(const void *, const void *, struct radix_node_head *,
			struct radix_node *),
	 *rn_delete(const void *, const void *, struct radix_node_head *),
	 *rn_insert(const void *, struct radix_node_head *, int *,
			struct radix_node [2]),
	 *rn_lookup(const void *, const void *, struct radix_node_head *),
	 *rn_match(const void *, struct radix_node_head *),
	 *rn_newpair(const void *, int, struct radix_node[2]),
	 *rn_search(const void *, struct radix_node *),
	 *rn_search_m(const void *, struct radix_node *, const void *);

#endif /* !_NET_RADIX_H_ */
ransition node. @param vm vlib_main_t corresponding to the current thread @param node vlib_node_runtime_t @param frame vlib_frame_t whose contents should be dispatched @param is_ipv4 indicates if called for IPv4 or IPv6 node */ always_inline uword udp46_punt_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame, int is_ip4) { u32 n_left_from, *from, *to_next; word advance; from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; /* udp[46]_lookup hands us the data payload, not the IP header */ if (is_ip4) advance = -(sizeof (ip4_header_t) + sizeof (udp_header_t)); else advance = -(sizeof (ip6_header_t) + sizeof (udp_header_t)); while (n_left_from > 0) { u32 n_left_to_next; vlib_get_next_frame (vm, node, punt_next_punt (is_ip4), to_next, n_left_to_next); while (n_left_from > 0 && n_left_to_next > 0) { u32 bi0; vlib_buffer_t *b0; bi0 = from[0]; to_next[0] = bi0; from += 1; to_next += 1; n_left_from -= 1; n_left_to_next -= 1; b0 = vlib_get_buffer (vm, bi0); vlib_buffer_advance (b0, advance); b0->error = node->errors[PUNT_ERROR_UDP_PORT]; } vlib_put_next_frame (vm, node, punt_next_punt (is_ip4), n_left_to_next); } return from_frame->n_vectors; } static char *punt_error_strings[] = { #define punt_error(n,s) s, #include "punt_error.def" #undef punt_error }; /** @brief IPv4 UDP punt node. @node ip4-udp-punt This is the IPv4 UDP punt transition node. It is registered as a next node for the "ip4-udp-lookup" handling UDP port(s) requested for punt. The buffer's current data pointer is adjusted to the original packet IPv4 header. All buffers are dispatched to "error-punt". @param vm vlib_main_t corresponding to the current thread @param node vlib_node_runtime_t @param frame vlib_frame_t whose contents should be dispatched @par Graph mechanics: next index usage @em Sets: - <code>vnet_buffer(b)->current_data</code> - <code>vnet_buffer(b)->current_len</code> <em>Next Index:</em> - Dispatches the packet to the "error-punt" node */ VLIB_NODE_FN (udp4_punt_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame) { return udp46_punt_inline (vm, node, from_frame, 1 /* is_ip4 */ ); } /** @brief IPv6 UDP punt node. @node ip6-udp-punt This is the IPv6 UDP punt transition node. It is registered as a next node for the "ip6-udp-lookup" handling UDP port(s) requested for punt. The buffer's current data pointer is adjusted to the original packet IPv6 header. All buffers are dispatched to "error-punt". @param vm vlib_main_t corresponding to the current thread @param node vlib_node_runtime_t @param frame vlib_frame_t whose contents should be dispatched @par Graph mechanics: next index usage @em Sets: - <code>vnet_buffer(b)->current_data</code> - <code>vnet_buffer(b)->current_len</code> <em>Next Index:</em> - Dispatches the packet to the "error-punt" node */ VLIB_NODE_FN (udp6_punt_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame) { return udp46_punt_inline (vm, node, from_frame, 0 /* is_ip4 */ ); } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (udp4_punt_node) = { .name = "ip4-udp-punt", /* Takes a vector of packets. */ .vector_size = sizeof (u32), .n_errors = PUNT_N_ERROR, .error_strings = punt_error_strings, .n_next_nodes = PUNT_N_NEXT, .next_nodes = { #define _(s,n) [PUNT_NEXT_##s] = n, foreach_punt_next #undef _ }, }; VLIB_REGISTER_NODE (udp6_punt_node) = { .name = "ip6-udp-punt", /* Takes a vector of packets. */ .vector_size = sizeof (u32), .n_errors = PUNT_N_ERROR, .error_strings = punt_error_strings, .n_next_nodes = PUNT_N_NEXT, .next_nodes = { #define _(s,n) [PUNT_NEXT_##s] = n, foreach_punt_next #undef _ }, }; /* *INDENT-ON* */ typedef struct { punt_client_t client; u8 is_midchain; u8 packet_data[64]; } udp_punt_trace_t; static u8 * format_udp_punt_trace (u8 * s, va_list * args) { CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); udp_punt_trace_t *t = va_arg (*args, udp_punt_trace_t *); u32 indent = format_get_indent (s); s = format (s, "to: %s", t->client.caddr.sun_path); if (t->is_midchain) { s = format (s, "\n%U(buffer is part of chain)", format_white_space, indent); } s = format (s, "\n%U%U", format_white_space, indent, format_hex_bytes, t->packet_data, sizeof (t->packet_data)); return s; } always_inline uword punt_socket_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame, punt_type_t pt, ip_address_family_t af) { u32 *buffers = vlib_frame_vector_args (frame); u32 thread_index = vm->thread_index; uword n_packets = frame->n_vectors; punt_main_t *pm = &punt_main; int i; punt_thread_data_t *ptd = &pm->thread_data[thread_index]; u32 node_index = (AF_IP4 == af ? udp4_punt_socket_node.index : udp6_punt_socket_node.index); for (i = 0; i < n_packets; i++) { struct iovec *iov; vlib_buffer_t *b; uword l; punt_packetdesc_t packetdesc; punt_client_t *c; b = vlib_get_buffer (vm, buffers[i]); if (PUNT_TYPE_L4 == pt) { /* Reverse UDP Punt advance */ udp_header_t *udp; if (AF_IP4 == af) { vlib_buffer_advance (b, -(sizeof (ip4_header_t) + sizeof (udp_header_t))); ip4_header_t *ip = vlib_buffer_get_current (b); udp = (udp_header_t *) (ip + 1); } else { vlib_buffer_advance (b, -(sizeof (ip6_header_t) + sizeof (udp_header_t))); ip6_header_t *ip = vlib_buffer_get_current (b); udp = (udp_header_t *) (ip + 1); } /* * Find registerered client * If no registered client, drop packet and count */ c = punt_client_l4_get (af, clib_net_to_host_u16 (udp->dst_port)); } else if (PUNT_TYPE_IP_PROTO == pt) { /* Reverse UDP Punt advance */ ip_protocol_t proto; if (AF_IP4 == af) { ip4_header_t *ip = vlib_buffer_get_current (b); proto = ip->protocol; } else { ip6_header_t *ip = vlib_buffer_get_current (b); proto = ip->protocol; } c = punt_client_ip_proto_get (af, proto); } else if (PUNT_TYPE_EXCEPTION == pt) { c = punt_client_exception_get (b->punt_reason); } else c = NULL; if (PREDICT_FALSE (NULL == c)) { vlib_node_increment_counter (vm, node_index, PUNT_ERROR_SOCKET_TX_ERROR, 1); goto error; } struct sockaddr_un *caddr = &c->caddr; /* Re-set iovecs */ vec_reset_length (ptd->iovecs); /* Add packet descriptor */ packetdesc.sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX]; packetdesc.action = 0; vec_add2 (ptd->iovecs, iov, 1); iov->iov_base = &packetdesc; iov->iov_len = sizeof (packetdesc); /** VLIB buffer chain -> Unix iovec(s). */ vlib_buffer_advance (b, -(sizeof (ethernet_header_t))); vec_add2 (ptd->iovecs, iov, 1); iov->iov_base = b->data + b->current_data; iov->iov_len = l = b->current_length; if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED)) { udp_punt_trace_t *t; t = vlib_add_trace (vm, node, b, sizeof (t[0])); clib_memcpy_fast (&t->client, c, sizeof (t->client)); clib_memcpy_fast (t->packet_data, vlib_buffer_get_current (b), sizeof (t->packet_data)); } if (PREDICT_FALSE (b->flags & VLIB_BUFFER_NEXT_PRESENT)) { do { b = vlib_get_buffer (vm, b->next_buffer); if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED)) { udp_punt_trace_t *t; t = vlib_add_trace (vm, node, b, sizeof (t[0])); clib_memcpy_fast (&t->client, c, sizeof (t->client)); t->is_midchain = 1; } vec_add2 (ptd->iovecs, iov, 1); iov->iov_base = b->data + b->current_data; iov->iov_len = b->current_length; l += b->current_length; } while (b->flags & VLIB_BUFFER_NEXT_PRESENT); } struct msghdr msg = { .msg_name = caddr, .msg_namelen = sizeof (*caddr), .msg_iov = ptd->iovecs, .msg_iovlen = vec_len (ptd->iovecs), }; if (sendmsg (pm->socket_fd, &msg, 0) < (ssize_t) l) vlib_node_increment_counter (vm, node_index, PUNT_ERROR_SOCKET_TX_ERROR, 1); else vlib_node_increment_counter (vm, node_index, PUNT_ERROR_SOCKET_TX, 1); } error: vlib_buffer_free (vm, buffers, n_packets); return n_packets; } static uword udp4_punt_socket (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame) { return punt_socket_inline (vm, node, from_frame, PUNT_TYPE_L4, AF_IP4); } static uword udp6_punt_socket (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame) { return punt_socket_inline (vm, node, from_frame, PUNT_TYPE_L4, AF_IP6); } static uword ip4_proto_punt_socket (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame) { return punt_socket_inline (vm, node, from_frame, PUNT_TYPE_IP_PROTO, AF_IP4); } static uword ip6_proto_punt_socket (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame) { return punt_socket_inline (vm, node, from_frame, PUNT_TYPE_IP_PROTO, AF_IP6); } static uword exception_punt_socket (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame) { return punt_socket_inline (vm, node, from_frame, PUNT_TYPE_EXCEPTION, AF_IP4); } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (udp4_punt_socket_node) = { .function = udp4_punt_socket, .name = "ip4-udp-punt-socket", .format_trace = format_udp_punt_trace, .flags = VLIB_NODE_FLAG_IS_DROP, /* Takes a vector of packets. */ .vector_size = sizeof (u32), .n_errors = PUNT_N_ERROR, .error_strings = punt_error_strings, }; VLIB_REGISTER_NODE (udp6_punt_socket_node) = { .function = udp6_punt_socket, .name = "ip6-udp-punt-socket", .format_trace = format_udp_punt_trace, .flags = VLIB_NODE_FLAG_IS_DROP, .vector_size = sizeof (u32), .n_errors = PUNT_N_ERROR, .error_strings = punt_error_strings, }; VLIB_REGISTER_NODE (ip4_proto_punt_socket_node) = { .function = ip4_proto_punt_socket, .name = "ip4-proto-punt-socket", .format_trace = format_udp_punt_trace, .flags = VLIB_NODE_FLAG_IS_DROP, /* Takes a vector of packets. */ .vector_size = sizeof (u32), .n_errors = PUNT_N_ERROR, .error_strings = punt_error_strings, }; VLIB_REGISTER_NODE (ip6_proto_punt_socket_node) = { .function = ip6_proto_punt_socket, .name = "ip6-proto-punt-socket", .format_trace = format_udp_punt_trace, .flags = VLIB_NODE_FLAG_IS_DROP, .vector_size = sizeof (u32), .n_errors = PUNT_N_ERROR, .error_strings = punt_error_strings, }; VLIB_REGISTER_NODE (exception_punt_socket_node) = { .function = exception_punt_socket, .name = "exception-punt-socket", .format_trace = format_udp_punt_trace, .flags = VLIB_NODE_FLAG_IS_DROP, .vector_size = sizeof (u32), .n_errors = PUNT_N_ERROR, .error_strings = punt_error_strings, }; /* *INDENT-ON* */ typedef struct { enum punt_action_e action; u32 sw_if_index; } punt_trace_t; static u8 * format_punt_trace (u8 * s, va_list * va) { CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *); CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *); vnet_main_t *vnm = vnet_get_main (); punt_trace_t *t = va_arg (*va, punt_trace_t *); s = format (s, "%U Action: %d", format_vnet_sw_if_index_name, vnm, t->sw_if_index, t->action); return s; } static uword punt_socket_rx_fd (vlib_main_t * vm, vlib_node_runtime_t * node, u32 fd) { const uword buffer_size = vlib_buffer_get_default_data_size (vm); u32 n_trace = vlib_get_trace_count (vm, node); u32 next = node->cached_next_index; u32 n_left_to_next, next_index; u32 *to_next; u32 error = PUNT_ERROR_NONE; vlib_get_next_frame (vm, node, next, to_next, n_left_to_next); /* $$$$ Only dealing with one buffer at the time for now */ u32 bi; vlib_buffer_t *b; punt_packetdesc_t packetdesc; ssize_t size; struct iovec io[2]; if (vlib_buffer_alloc (vm, &bi, 1) != 1) { error = PUNT_ERROR_NOBUFFER; goto error; } b = vlib_get_buffer (vm, bi); io[0].iov_base = &packetdesc; io[0].iov_len = sizeof (packetdesc); io[1].iov_base = b->data; io[1].iov_len = buffer_size; size = readv (fd, io, 2); /* We need at least the packet descriptor plus a header */ if (size <= (int) (sizeof (packetdesc) + sizeof (ip4_header_t))) { vlib_buffer_free (vm, &bi, 1); error = PUNT_ERROR_READV; goto error; } b->flags = VNET_BUFFER_F_LOCALLY_ORIGINATED; b->current_length = size - sizeof (packetdesc); VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b); switch (packetdesc.action) { case PUNT_L2: vnet_buffer (b)->sw_if_index[VLIB_TX] = packetdesc.sw_if_index; next_index = PUNT_SOCKET_RX_NEXT_INTERFACE_OUTPUT; break; case PUNT_IP4_ROUTED: vnet_buffer (b)->sw_if_index[VLIB_RX] = packetdesc.sw_if_index; vnet_buffer (b)->sw_if_index[VLIB_TX] = ~0; next_index = PUNT_SOCKET_RX_NEXT_IP4_LOOKUP; break; case PUNT_IP6_ROUTED: vnet_buffer (b)->sw_if_index[VLIB_RX] = packetdesc.sw_if_index; vnet_buffer (b)->sw_if_index[VLIB_TX] = ~0; next_index = PUNT_SOCKET_RX_NEXT_IP6_LOOKUP; break; default: error = PUNT_ERROR_ACTION; vlib_buffer_free (vm, &bi, 1); goto error; } if (PREDICT_FALSE (n_trace > 0)) { punt_trace_t *t; vlib_trace_buffer (vm, node, next_index, b, 1 /* follow_chain */ ); vlib_set_trace_count (vm, node, --n_trace); t = vlib_add_trace (vm, node, b, sizeof (*t)); t->sw_if_index = packetdesc.sw_if_index; t->action = packetdesc.action; } to_next[0] = bi; to_next++; n_left_to_next--; vlib_validate_buffer_enqueue_x1 (vm, node, next, to_next, n_left_to_next, bi, next_index); vlib_put_next_frame (vm, node, next, n_left_to_next); return 1; error: vlib_node_increment_counter (vm, punt_socket_rx_node.index, error, 1); return 0; } static uword punt_socket_rx (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { punt_main_t *pm = &punt_main; u32 total_count = 0; int i; for (i = 0; i < vec_len (pm->ready_fds); i++) { total_count += punt_socket_rx_fd (vm, node, pm->ready_fds[i]); vec_del1 (pm->ready_fds, i); } return total_count; } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (punt_socket_rx_node) = { .function = punt_socket_rx, .name = "punt-socket-rx", .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED, .type = VLIB_NODE_TYPE_INPUT, .state = VLIB_NODE_STATE_INTERRUPT, .vector_size = 1, .n_errors = PUNT_N_ERROR, .error_strings = punt_error_strings, .n_next_nodes = PUNT_SOCKET_RX_N_NEXT, .next_nodes = { [PUNT_SOCKET_RX_NEXT_INTERFACE_OUTPUT] = "interface-output", [PUNT_SOCKET_RX_NEXT_IP4_LOOKUP] = "ip4-lookup", [PUNT_SOCKET_RX_NEXT_IP6_LOOKUP] = "ip6-lookup", }, .format_trace = format_punt_trace, }; /* *INDENT-ON* */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */