aboutsummaryrefslogtreecommitdiffstats
path: root/resources/libraries/python/HTTPRequest.py
blob: 567ac791b72b5d359e84f68cf5098700523e304b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
# Copyright (c) 2016 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Implementation of HTTP requests GET, PUT, POST and DELETE used in
communication with Honeycomb.

The HTTP requests are implemented in the class HTTPRequest which uses
requests.request.
"""

from enum import IntEnum, unique

from robot.api.deco import keyword
from robot.api import logger

from requests import request, RequestException, Timeout, TooManyRedirects, \
    HTTPError, ConnectionError
from requests.auth import HTTPBasicAuth


@unique
class HTTPCodes(IntEnum):
    """HTTP status codes"""
    OK = 200
    UNAUTHORIZED = 401
    FORBIDDEN = 403
    NOT_FOUND = 404
    INTERNAL_SERVER_ERROR = 500
    SERVICE_UNAVAILABLE = 503


class HTTPRequestError(Exception):
    """Exception raised by HTTPRequest objects.

    When raising this exception, put this information to the message in this
    order:
    - short description of the encountered problem,
    - relevant messages if there are any collected, e.g., from caught
      exception,
    - relevant data if there are any collected.
    The logging is performed on two levels: 1. error - short description of the
    problem; 2. debug - detailed information.
    """

    def __init__(self, msg, details='', enable_logging=True):
        """Sets the exception message and enables / disables logging.

        It is not wanted to log errors when using these keywords together
        with keywords like "Wait until keyword succeeds". So you can disable
        logging by setting enable_logging to False.

        :param msg: Message to be displayed and logged.
        :param enable_logging: When True, logging is enabled, otherwise
        logging is disabled.
        :type msg: str
        :type enable_logging: bool
        """
        super(HTTPRequestError, self).__init__()
        self._msg = "{0}: {1}".format(self.__class__.__name__, msg)
        self._details = details
        if enable_logging:
            logger.error(self._msg)
            logger.debug(self._details)

    def __repr__(self):
        return repr(self._msg)

    def __str__(self):
        return str(self._msg)


class HTTPRequest(object):
    """A class implementing HTTP requests GET, PUT, POST and DELETE used in
    communication with Honeycomb.

    The communication with Honeycomb and processing of all exceptions is done in
    the method _http_request which uses requests.request to send requests and
    receive responses. The received status code and content of response are
    logged on the debug level.
    All possible exceptions raised by requests.request are also processed there.

    The other methods (get, put, post and delete) use _http_request to send
    corresponding request.

    These methods must not be used as keywords in tests. Use keywords
    implemented in the module HoneycombAPIKeywords instead.
    """

    def __init__(self):
        pass

    @staticmethod
    def create_full_url(ip_addr, port, path):
        """Creates full url including host, port, and path to data.

        :param ip_addr: Server IP.
        :param port: Communication port.
        :param path: Path to data.
        :type ip_addr: str
        :type port: str or int
        :type path: str
        :return: Full url.
        :rtype: str
        """
        return "http://{ip}:{port}{path}".format(ip=ip_addr, port=port,
                                                 path=path)

    @staticmethod
    def _http_request(method, node, path, enable_logging=True, **kwargs):
        """Sends specified HTTP request and returns status code and response
        content.

        :param method: The method to be performed on the resource identified by
        the given request URI.
        :param node: Honeycomb node.
        :param path: URL path, e.g. /index.html.
        :param enable_logging: Used to suppress errors when checking Honeycomb
        state during suite setup and teardown.
        :param kwargs: Named parameters accepted by request.request:
            params -- (optional) Dictionary or bytes to be sent in the query
            string for the Request.
            data -- (optional) Dictionary, bytes, or file-like object to
            send in the body of the Request.
            json -- (optional) json data to send in the body of the Request.
            headers -- (optional) Dictionary of HTTP Headers to send with
            the Request.
            cookies -- (optional) Dict or CookieJar object to send with the
            Request.
            files -- (optional) Dictionary of 'name': file-like-objects
            (or {'name': ('filename', fileobj)}) for multipart encoding upload.
            timeout (float or tuple) -- (optional) How long to wait for the
            server to send data before giving up, as a float, or a (connect
            timeout, read timeout) tuple.
            allow_redirects (bool) -- (optional) Boolean. Set to True if POST/
            PUT/DELETE redirect following is allowed.
            proxies -- (optional) Dictionary mapping protocol to the URL of
            the proxy.
            verify -- (optional) whether the SSL cert will be verified.
            A CA_BUNDLE path can also be provided. Defaults to True.
            stream -- (optional) if False, the response content will be
            immediately downloaded.
            cert -- (optional) if String, path to ssl client cert file (.pem).
            If Tuple, ('cert', 'key') pair.
        :type method: str
        :type node: dict
        :type path: str
        :type enable_logging: bool
        :type kwargs: dict
        :return: Status code and content of response.
        :rtype: tuple
        :raises HTTPRequestError: If
        1. it is not possible to connect,
        2. invalid HTTP response comes from server,
        3. request exceeded the configured number of maximum re-directions,
        4. request timed out,
        5. there is any other unexpected HTTP request exception.
        """
        timeout = kwargs["timeout"]
        url = HTTPRequest.create_full_url(node['host'],
                                          node['honeycomb']['port'],
                                          path)
        try:
            auth = HTTPBasicAuth(node['honeycomb']['user'],
                                 node['honeycomb']['passwd'])
            rsp = request(method, url, auth=auth, **kwargs)

            logger.debug("Status code: {0}".format(rsp.status_code))
            logger.debug("Response: {0}".format(rsp.content))

            return rsp.status_code, rsp.content

        except ConnectionError as err:
            # Switching the logging on / off is needed only for
            # "requests.ConnectionError"
            raise HTTPRequestError("Not possible to connect to {0}:{1}.".
                                   format(node['host'],
                                          node['honeycomb']['port']),
                                   repr(err), enable_logging=enable_logging)
        except HTTPError as err:
            raise HTTPRequestError("Invalid HTTP response from {0}.".
                                   format(node['host']), repr(err))
        except TooManyRedirects as err:
            raise HTTPRequestError("Request exceeded the configured number "
                                   "of maximum re-directions.", repr(err))
        except Timeout as err:
            raise HTTPRequestError("Request timed out. Timeout is set to {0}.".
                                   format(timeout), repr(err))
        except RequestException as err:
            raise HTTPRequestError("Unexpected HTTP request exception.",
                                   repr(err))

    @staticmethod
    @keyword(name="HTTP Get")
    def get(node, path, headers=None, timeout=10, enable_logging=True):
        """Sends a GET request and returns the response and status code.

        :param node: Honeycomb node.
        :param path: URL path, e.g. /index.html.
        :param headers: Dictionary of HTTP Headers to send with the Request.
        :param timeout: How long to wait for the server to send data before
        giving up, as a float, or a (connect timeout, read timeout) tuple.
        :param enable_logging: Used to suppress errors when checking Honeycomb
        state during suite setup and teardown. When True, logging is enabled,
        otherwise logging is disabled.
        :type node: dict
        :type path: str
        :type headers: dict
        :type timeout: float or tuple
        :type enable_logging: bool
        :return: Status code and content of response.
        :rtype: tuple
        """

        return HTTPRequest._http_request('GET', node, path,
                                         enable_logging=enable_logging,
                                         headers=headers, timeout=timeout)

    @staticmethod
    @keyword(name="HTTP Put")
    def put(node, path, headers=None, payload=None, json=None, timeout=10):
        """Sends a PUT request and returns the response and status code.

        :param node: Honeycomb node.
        :param path: URL path, e.g. /index.html.
        :param headers: Dictionary of HTTP Headers to send with the Request.
        :param payload: Dictionary, bytes, or file-like object to send in
        the body of the Request.
        :param json: JSON formatted string to send in the body of the Request.
        :param timeout: How long to wait for the server to send data before
        giving up, as a float, or a (connect timeout, read timeout) tuple.
        :type node: dict
        :type path: str
        :type headers: dict
        :type payload: dict, bytes, or file-like object
        :type json: str
        :type timeout: float or tuple
        :return: Status code and content of response.
        :rtype: tuple
        """
        return HTTPRequest._http_request('PUT', node, path, headers=headers,
                                         data=payload, json=json,
                                         timeout=timeout)

    @staticmethod
    @keyword(name="HTTP Post")
    def post(node, path, headers=None, payload=None, json=None, timeout=10):
        """Sends a POST request and returns the response and status code.

        :param node: Honeycomb node.
        :param path: URL path, e.g. /index.html.
        :param headers: Dictionary of HTTP Headers to send with the Request.
        :param payload: Dictionary, bytes, or file-like object to send in
        the body of the Request.
        :param json: JSON formatted string to send in the body of the Request.
        :param timeout: How long to wait for the server to send data before
        giving up, as a float, or a (connect timeout, read timeout) tuple.
        :type node: dict
        :type path: str
        :type headers: dict
        :type payload: dict, bytes, or file-like object
        :type json: str
        :type timeout: float or tuple
        :return: Status code and content of response.
        :rtype: tuple
        """
        return HTTPRequest._http_request('POST', node, path, headers=headers,
                                         data=payload, json=json,
                                         timeout=timeout)

    @staticmethod
    @keyword(name="HTTP Delete")
    def delete(node, path, timeout=10):
        """Sends a DELETE request and returns the response and status code.

        :param node: Honeycomb node.
        :param path: URL path, e.g. /index.html.
        :param timeout: How long to wait for the server to send data before
        giving up, as a float, or a (connect timeout, read timeout) tuple.
        :type node: dict
        :type path: str
        :type timeout: float or tuple
        :return: Status code and content of response.
        :rtype: tuple
        """
        return HTTPRequest._http_request('DELETE', node, path, timeout=timeout)
an class="p">)) { b = vlib_get_buffer (vm, b->next_buffer); mb = rte_mbuf_from_vlib_buffer (b); last_mb->next = mb; last_mb = mb; mb->data_len = b->current_length; mb->pkt_len = b->current_length; mb->data_off = VLIB_BUFFER_PRE_DATA_SIZE + b->current_data; first_mb->nb_segs++; if (PREDICT_FALSE (b->n_add_refs)) { rte_mbuf_refcnt_update (mb, b->n_add_refs); b->n_add_refs = 0; } } } /* * This function calls the dpdk's tx_burst function to transmit the packets. * It manages a lock per-device if the device does not * support multiple queues. It returns the number of packets untransmitted * If all packets are transmitted (the normal case), the function returns 0. */ static_always_inline u32 tx_burst_vector_internal (vlib_main_t * vm, dpdk_device_t * xd, struct rte_mbuf **mb, u32 n_left) { dpdk_main_t *dm = &dpdk_main; u32 n_retry; int n_sent = 0; int queue_id; n_retry = 16; queue_id = vm->thread_index; do { /* * This device only supports one TX queue, * and we're running multi-threaded... */ if (PREDICT_FALSE (xd->lockp != 0)) { queue_id = queue_id % xd->tx_q_used; while (__sync_lock_test_and_set (xd->lockp[queue_id], 1)) /* zzzz */ queue_id = (queue_id + 1) % xd->tx_q_used; } if (PREDICT_FALSE (xd->flags & DPDK_DEVICE_FLAG_HQOS)) /* HQoS ON */ { /* no wrap, transmit in one burst */ dpdk_device_hqos_per_worker_thread_t *hqos = &xd->hqos_wt[vm->thread_index]; ASSERT (hqos->swq != NULL); dpdk_hqos_metadata_set (hqos, mb, n_left); n_sent = rte_ring_sp_enqueue_burst (hqos->swq, (void **) mb, n_left, 0); } else if (PREDICT_TRUE (xd->flags & DPDK_DEVICE_FLAG_PMD)) { /* no wrap, transmit in one burst */ n_sent = rte_eth_tx_burst (xd->device_index, queue_id, mb, n_left); } else { ASSERT (0); n_sent = 0; } if (PREDICT_FALSE (xd->lockp != 0)) *xd->lockp[queue_id] = 0; if (PREDICT_FALSE (n_sent < 0)) { // emit non-fatal message, bump counter vnet_main_t *vnm = dm->vnet_main; vnet_interface_main_t *im = &vnm->interface_main; u32 node_index; node_index = vec_elt_at_index (im->hw_interfaces, xd->hw_if_index)->tx_node_index; vlib_error_count (vm, node_index, DPDK_TX_FUNC_ERROR_BAD_RETVAL, 1); clib_warning ("rte_eth_tx_burst[%d]: error %d", xd->device_index, n_sent); return n_left; // untransmitted packets } n_left -= n_sent; mb += n_sent; } while (n_sent && n_left && (n_retry > 0)); return n_left; } static_always_inline void dpdk_prefetch_buffer (vlib_main_t * vm, struct rte_mbuf *mb) { vlib_buffer_t *b = vlib_buffer_from_rte_mbuf (mb); CLIB_PREFETCH (mb, 2 * CLIB_CACHE_LINE_BYTES, STORE); CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, LOAD); } static_always_inline void dpdk_buffer_recycle (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_buffer_t * b, u32 bi, struct rte_mbuf **mbp) { dpdk_main_t *dm = &dpdk_main; struct rte_mbuf *mb_new; if (PREDICT_FALSE (b->flags & VLIB_BUFFER_RECYCLE) == 0) return; mb_new = dpdk_replicate_packet_mb (b); if (PREDICT_FALSE (mb_new == 0)) { vlib_error_count (vm, node->node_index, DPDK_TX_FUNC_ERROR_REPL_FAIL, 1); b->flags |= VLIB_BUFFER_REPL_FAIL; } else *mbp = mb_new; vec_add1 (dm->recycle[vm->thread_index], bi); } static_always_inline void dpdk_buffer_tx_offload (dpdk_device_t * xd, vlib_buffer_t * b, struct rte_mbuf *mb) { u32 ip_cksum = b->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM; u32 tcp_cksum = b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM; u32 udp_cksum = b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM; int is_ip4 = b->flags & VNET_BUFFER_F_IS_IP4; u64 ol_flags; /* Is there any work for us? */ if (PREDICT_TRUE ((ip_cksum | tcp_cksum | udp_cksum) == 0)) return; mb->l2_len = vnet_buffer (b)->l3_hdr_offset - b->current_data; mb->l3_len = vnet_buffer (b)->l4_hdr_offset - vnet_buffer (b)->l3_hdr_offset; mb->outer_l3_len = 0; mb->outer_l2_len = 0; ol_flags = is_ip4 ? PKT_TX_IPV4 : PKT_TX_IPV6; ol_flags |= ip_cksum ? PKT_TX_IP_CKSUM : 0; ol_flags |= tcp_cksum ? PKT_TX_TCP_CKSUM : 0; ol_flags |= udp_cksum ? PKT_TX_UDP_CKSUM : 0; mb->ol_flags |= ol_flags; /* we are trying to help compiler here by using local ol_flags with known state of all flags */ if (xd->flags & DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM) rte_net_intel_cksum_flags_prepare (mb, ol_flags); } /* * Transmits the packets on the frame to the interface associated with the * node. It first copies packets on the frame to a per-thread arrays * containing the rte_mbuf pointers. */ uword CLIB_MULTIARCH_FN (dpdk_interface_tx) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * f) { dpdk_main_t *dm = &dpdk_main; vnet_interface_output_runtime_t *rd = (void *) node->runtime_data; dpdk_device_t *xd = vec_elt_at_index (dm->devices, rd->dev_instance); u32 n_packets = f->n_vectors; u32 n_left; u32 *from; u32 thread_index = vm->thread_index; int queue_id = thread_index; u32 tx_pkts = 0, all_or_flags = 0; dpdk_per_thread_data_t *ptd = vec_elt_at_index (dm->per_thread_data, thread_index); struct rte_mbuf **mb; vlib_buffer_t *b[4]; #ifdef CLIB_HAVE_VEC256 u64x4 off4 = u64x4_splat (buffer_main.buffer_mem_start - sizeof (struct rte_mbuf)); u32x8 permute_mask = { 0, 4, 1, 5, 2, 6, 3, 7 }; u32x8 zero = { 0 }; #endif from = vlib_frame_vector_args (f); ASSERT (n_packets <= VLIB_FRAME_SIZE); /* TX PCAP tracing */ if (PREDICT_FALSE (dm->tx_pcap_enable)) { n_left = n_packets; while (n_left > 0) { u32 bi0 = from[0]; vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0); if (dm->pcap_sw_if_index == 0 || dm->pcap_sw_if_index == vnet_buffer (b0)->sw_if_index[VLIB_TX]) pcap_add_buffer (&dm->pcap_main, vm, bi0, 512); from++; n_left--; } } /* calculate rte_mbuf pointers out of buffer indices */ from = vlib_frame_vector_args (f); n_left = n_packets; mb = ptd->mbufs; while (n_left >= 8) { #ifdef CLIB_HAVE_VEC256 u32x8 bi0, bi1; u64x4 mb0, mb1; /* load 4 bufer indices into lower part of 256-bit register */ bi0 = u32x8_insert_lo (zero, u32x4_load_unaligned (from)); bi1 = u32x8_insert_lo (zero, u32x4_load_unaligned (from + 4)); /* permute 256-bit register so each buffer index is in own u64 */ mb0 = (u64x4) u32x8_permute (bi0, permute_mask); mb1 = (u64x4) u32x8_permute (bi1, permute_mask); /* shift and add to get rte_mbuf pointer */ mb0 <<= CLIB_LOG2_CACHE_LINE_BYTES; mb1 <<= CLIB_LOG2_CACHE_LINE_BYTES; u64x4_store_unaligned (mb0 + off4, mb); u64x4_store_unaligned (mb1 + off4, mb + 4); #else mb[0] = rte_mbuf_from_vlib_buffer (vlib_get_buffer (vm, from[0])); mb[1] = rte_mbuf_from_vlib_buffer (vlib_get_buffer (vm, from[1])); mb[2] = rte_mbuf_from_vlib_buffer (vlib_get_buffer (vm, from[2])); mb[3] = rte_mbuf_from_vlib_buffer (vlib_get_buffer (vm, from[3])); mb[4] = rte_mbuf_from_vlib_buffer (vlib_get_buffer (vm, from[4])); mb[5] = rte_mbuf_from_vlib_buffer (vlib_get_buffer (vm, from[5])); mb[6] = rte_mbuf_from_vlib_buffer (vlib_get_buffer (vm, from[6])); mb[7] = rte_mbuf_from_vlib_buffer (vlib_get_buffer (vm, from[7])); #endif from += 8; mb += 8; n_left -= 8; } while (n_left) { mb[0] = rte_mbuf_from_vlib_buffer (vlib_get_buffer (vm, from[0])); from++; mb++; n_left--; } from = vlib_frame_vector_args (f); n_left = n_packets; mb = ptd->mbufs; while (n_left >= 8) { u32 or_flags; dpdk_prefetch_buffer (vm, mb[4]); dpdk_prefetch_buffer (vm, mb[5]); dpdk_prefetch_buffer (vm, mb[6]); dpdk_prefetch_buffer (vm, mb[7]); b[0] = vlib_buffer_from_rte_mbuf (mb[0]); b[1] = vlib_buffer_from_rte_mbuf (mb[1]); b[2] = vlib_buffer_from_rte_mbuf (mb[2]); b[3] = vlib_buffer_from_rte_mbuf (mb[3]); or_flags = b[0]->flags | b[1]->flags | b[2]->flags | b[3]->flags; all_or_flags |= or_flags; VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]); VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[1]); VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[2]); VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[3]); if (or_flags & VLIB_BUFFER_NEXT_PRESENT) { dpdk_validate_rte_mbuf (vm, b[0], 1); dpdk_validate_rte_mbuf (vm, b[1], 1); dpdk_validate_rte_mbuf (vm, b[2], 1); dpdk_validate_rte_mbuf (vm, b[3], 1); } else { dpdk_validate_rte_mbuf (vm, b[0], 0); dpdk_validate_rte_mbuf (vm, b[1], 0); dpdk_validate_rte_mbuf (vm, b[2], 0); dpdk_validate_rte_mbuf (vm, b[3], 0); } if (PREDICT_FALSE ((xd->flags & DPDK_DEVICE_FLAG_TX_OFFLOAD) && (or_flags & (VNET_BUFFER_F_OFFLOAD_TCP_CKSUM | VNET_BUFFER_F_OFFLOAD_IP_CKSUM | VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)))) { dpdk_buffer_tx_offload (xd, b[0], mb[0]); dpdk_buffer_tx_offload (xd, b[1], mb[1]); dpdk_buffer_tx_offload (xd, b[2], mb[2]); dpdk_buffer_tx_offload (xd, b[3], mb[3]); } if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE)) { if (b[0]->flags & VLIB_BUFFER_IS_TRACED) dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[0]); if (b[1]->flags & VLIB_BUFFER_IS_TRACED) dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[1]); if (b[2]->flags & VLIB_BUFFER_IS_TRACED) dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[2]); if (b[3]->flags & VLIB_BUFFER_IS_TRACED) dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[3]); } mb += 4; n_left -= 4; } while (n_left > 0) { b[0] = vlib_buffer_from_rte_mbuf (mb[0]); all_or_flags |= b[0]->flags; VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]); dpdk_validate_rte_mbuf (vm, b[0], 1); dpdk_buffer_tx_offload (xd, b[0], mb[0]); if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE)) if (b[0]->flags & VLIB_BUFFER_IS_TRACED) dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[0]); mb++; n_left--; } /* run inly if we have buffers to recycle */ if (PREDICT_FALSE (all_or_flags & VLIB_BUFFER_RECYCLE)) { struct rte_mbuf **mb_old; from = vlib_frame_vector_args (f); n_left = n_packets; mb_old = mb = ptd->mbufs; while (n_left > 0) { b[0] = vlib_buffer_from_rte_mbuf (mb[0]); dpdk_buffer_recycle (vm, node, b[0], from[0], &mb_old[0]); /* in case of REPL_FAIL we need to shift data */ mb[0] = mb_old[0]; if (PREDICT_TRUE ((b[0]->flags & VLIB_BUFFER_REPL_FAIL) == 0)) mb++; mb_old++; from++; n_left--; } } /* transmit as many packets as possible */ n_packets = mb - ptd->mbufs; n_left = tx_burst_vector_internal (vm, xd, ptd->mbufs, n_packets); { /* If there is no callback then drop any non-transmitted packets */ if (PREDICT_FALSE (n_left)) { vlib_simple_counter_main_t *cm; vnet_main_t *vnm = vnet_get_main (); cm = vec_elt_at_index (vnm->interface_main.sw_if_counters, VNET_INTERFACE_COUNTER_TX_ERROR); vlib_increment_simple_counter (cm, thread_index, xd->sw_if_index, n_left); vlib_error_count (vm, node->node_index, DPDK_TX_FUNC_ERROR_PKT_DROP, n_left); while (n_left--) rte_pktmbuf_free (ptd->mbufs[n_packets - n_left]); } } /* Recycle replicated buffers */ if (PREDICT_FALSE (vec_len (dm->recycle[thread_index]))) { vlib_buffer_free (vm, dm->recycle[thread_index], vec_len (dm->recycle[thread_index])); _vec_len (dm->recycle[thread_index]) = 0; } return tx_pkts; } #ifndef CLIB_MULTIARCH_VARIANT static void dpdk_clear_hw_interface_counters (u32 instance) { dpdk_main_t *dm = &dpdk_main; dpdk_device_t *xd = vec_elt_at_index (dm->devices, instance); /* * Set the "last_cleared_stats" to the current stats, so that * things appear to clear from a display perspective. */ dpdk_update_counters (xd, vlib_time_now (dm->vlib_main)); clib_memcpy (&xd->last_cleared_stats, &xd->stats, sizeof (xd->stats)); clib_memcpy (xd->last_cleared_xstats, xd->xstats, vec_len (xd->last_cleared_xstats) * sizeof (xd->last_cleared_xstats[0])); } static clib_error_t * dpdk_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags) { vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index); uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0; dpdk_main_t *dm = &dpdk_main; dpdk_device_t *xd = vec_elt_at_index (dm->devices, hif->dev_instance); if (xd->flags & DPDK_DEVICE_FLAG_PMD_INIT_FAIL) return clib_error_return (0, "Interface not initialized"); if (is_up) { vnet_hw_interface_set_flags (vnm, xd->hw_if_index, VNET_HW_INTERFACE_FLAG_LINK_UP); if ((xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) == 0) dpdk_device_start (xd); xd->flags |= DPDK_DEVICE_FLAG_ADMIN_UP; f64 now = vlib_time_now (dm->vlib_main); dpdk_update_counters (xd, now); dpdk_update_link_state (xd, now); } else { vnet_hw_interface_set_flags (vnm, xd->hw_if_index, 0); if ((xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) != 0) dpdk_device_stop (xd); xd->flags &= ~DPDK_DEVICE_FLAG_ADMIN_UP; } return /* no error */ 0; } /* * Dynamically redirect all pkts from a specific interface * to the specified node */ static void dpdk_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index, u32 node_index) { dpdk_main_t *xm = &dpdk_main; vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index); dpdk_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance); /* Shut off redirection */ if (node_index == ~0) { xd->per_interface_next_index = node_index; return; } xd->per_interface_next_index = vlib_node_add_next (xm->vlib_main, dpdk_input_node.index, node_index); } static clib_error_t * dpdk_subif_add_del_function (vnet_main_t * vnm, u32 hw_if_index, struct vnet_sw_interface_t *st, int is_add) { dpdk_main_t *xm = &dpdk_main; vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index); dpdk_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance); vnet_sw_interface_t *t = (vnet_sw_interface_t *) st; int r, vlan_offload; u32 prev_subifs = xd->num_subifs; clib_error_t *err = 0; if (is_add) xd->num_subifs++; else if (xd->num_subifs) xd->num_subifs--; if ((xd->flags & DPDK_DEVICE_FLAG_PMD) == 0) goto done; /* currently we program VLANS only for IXGBE VF and I40E VF */ if ((xd->pmd != VNET_DPDK_PMD_IXGBEVF) && (xd->pmd != VNET_DPDK_PMD_I40EVF)) goto done; if (t->sub.eth.flags.no_tags == 1) goto done; if ((t->sub.eth.flags.one_tag != 1) || (t->sub.eth.flags.exact_match != 1)) { xd->num_subifs = prev_subifs; err = clib_error_return (0, "unsupported VLAN setup"); goto done; } vlan_offload = rte_eth_dev_get_vlan_offload (xd->device_index); vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; if ((r = rte_eth_dev_set_vlan_offload (xd->device_index, vlan_offload))) { xd->num_subifs = prev_subifs; err = clib_error_return (0, "rte_eth_dev_set_vlan_offload[%d]: err %d", xd->device_index, r); goto done; } if ((r = rte_eth_dev_vlan_filter (xd->device_index, t->sub.eth.outer_vlan_id, is_add))) { xd->num_subifs = prev_subifs; err = clib_error_return (0, "rte_eth_dev_vlan_filter[%d]: err %d", xd->device_index, r); goto done; } done: if (xd->num_subifs) xd->flags |= DPDK_DEVICE_FLAG_HAVE_SUBIF; else xd->flags &= ~DPDK_DEVICE_FLAG_HAVE_SUBIF; return err; } /* *INDENT-OFF* */ VNET_DEVICE_CLASS (dpdk_device_class) = { .name = "dpdk", .tx_function = dpdk_interface_tx, .tx_function_n_errors = DPDK_TX_FUNC_N_ERROR, .tx_function_error_strings = dpdk_tx_func_error_strings, .format_device_name = format_dpdk_device_name, .format_device = format_dpdk_device, .format_tx_trace = format_dpdk_tx_trace, .clear_counters = dpdk_clear_hw_interface_counters, .admin_up_down_function = dpdk_interface_admin_up_down, .subif_add_del_function = dpdk_subif_add_del_function, .rx_redirect_to_node = dpdk_set_interface_next_node, .mac_addr_change_function = dpdk_set_mac_address, }; /* *INDENT-ON* */ #if __x86_64__ vlib_node_function_t __clib_weak dpdk_interface_tx_avx512; vlib_node_function_t __clib_weak dpdk_interface_tx_avx2; static void __clib_constructor dpdk_interface_tx_multiarch_select (void) { if (dpdk_interface_tx_avx512 && clib_cpu_supports_avx512f ()) dpdk_device_class.tx_function = dpdk_interface_tx_avx512; else if (dpdk_interface_tx_avx2 && clib_cpu_supports_avx2 ()) dpdk_device_class.tx_function = dpdk_interface_tx_avx2; } #endif #endif #define UP_DOWN_FLAG_EVENT 1 #ifndef CLIB_MULTIARCH_VARIANT uword admin_up_down_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f) { clib_error_t *error = 0; uword event_type; uword *event_data = 0; u32 sw_if_index; u32 flags; while (1) { vlib_process_wait_for_event (vm); event_type = vlib_process_get_events (vm, &event_data); dpdk_main.admin_up_down_in_progress = 1; switch (event_type) { case UP_DOWN_FLAG_EVENT: { if (vec_len (event_data) == 2) { sw_if_index = event_data[0]; flags = event_data[1]; error = vnet_sw_interface_set_flags (vnet_get_main (), sw_if_index, flags); clib_error_report (error); } } break; } vec_reset_length (event_data); dpdk_main.admin_up_down_in_progress = 0; } return 0; /* or not */ } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (admin_up_down_process_node,static) = { .function = admin_up_down_process, .type = VLIB_NODE_TYPE_PROCESS, .name = "admin-up-down-process", .process_log2_n_stack_bytes = 17, // 256KB }; /* *INDENT-ON* */ #endif /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */