/* *------------------------------------------------------------------ * Copyright (c) 2016 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *------------------------------------------------------------------ */ #define _GNU_SOURCE #include #include #include #include #include #include #include #include #include #define foreach_memif_tx_func_error \ _ (NO_FREE_SLOTS, no_free_slots, ERROR, "no free tx slots") \ _ (ROLLBACK, rollback, ERROR, "no enough space in tx buffers") typedef enum { #define _(f, n, s, d) MEMIF_TX_ERROR_##f, foreach_memif_tx_func_error #undef _ MEMIF_TX_N_ERROR, } memif_tx_func_error_t; static vlib_error_desc_t memif_tx_func_error_counters[] = { #define _(f, n, s, d) { #n, d, VL_COUNTER_SEVERITY_##s }, foreach_memif_tx_func_error #undef _ }; #ifndef CLIB_MARCH_VARIANT u8 * format_memif_device_name (u8 * s, va_list * args) { u32 dev_instance = va_arg (*args, u32); memif_main_t *mm = &memif_main; memif_if_t *mif = pool_elt_at_index (mm->interfaces, dev_instance); memif_socket_file_t *msf; msf = pool_elt_at_index (mm->socket_files, mif->socket_file_index); s = format (s, "memif%lu/%lu", msf->socket_id, mif->id); return s; } #endif static u8 * format_memif_device (u8 * s, va_list * args) { u32 dev_instance = va_arg (*args, u32); int verbose = va_arg (*args, int); u32 indent = format_get_indent (s); s = format (s, "MEMIF interface"); if (verbose) { s = format (s, "\n%U instance %u", format_white_space, indent + 2, dev_instance); } return s; } static u8 * format_memif_tx_trace (u8 * s, va_list * args) { s = format (s, "Unimplemented..."); return s; } static_always_inline void memif_add_copy_op (memif_per_thread_data_t * ptd, void *data, u32 len, u16 buffer_offset, u16 buffer_vec_index) { memif_copy_op_t *co; vec_add2_aligned (ptd->copy_ops, co, 1, CLIB_CACHE_LINE_BYTES); co->data = data; co->data_len = len; co->buffer_offset = buffer_offset; co->buffer_vec_index = buffer_vec_index; } static_always_inline uword memif_interface_tx_inline (vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, memif_if_t *mif, memif_ring_type_t type, memif_queue_t *mq, memif_per_thread_data_t *ptd, u32 n_left) { memif_ring_t *ring; u32 n_copy_op; u16 ring_size, mask, slot, free_slots; int n_retries = 5; vlib_buffer_t *b0, *b1, *b2, *b3; memif_copy_op_t *co; memif_region_index_t last_region = ~0; void *last_region_shm = 0; u16 head, tail; ring = mq->ring; ring_size = 1 << mq->log2_ring_size; mask = ring_size - 1; retry: if (type == MEMIF_RING_S2M) { slot = head = ring->head; tail = __atomic_load_n (&ring->tail, __ATOMIC_ACQUIRE); mq->last_tail += tail - mq->last_tail; free_slots = ring_size - head + mq->last_tail; } else { slot = tail = ring->tail; head = __atomic_load_n (&ring->head, __ATOMIC_ACQUIRE); mq->last_tail += tail - mq->last_tail; free_slots = head - tail; } while (n_left && free_slots) { memif_desc_t *d0; void *mb0; i32 src_off; u32 bi0, dst_off, src_left, dst_left, bytes_to_copy; u32 saved_ptd_copy_ops_len = _vec_len (ptd->copy_ops); u32 saved_ptd_buffers_len = _vec_len (ptd->buffers); u16 saved_slot = slot; clib_prefetch_load (&ring->desc[(slot + 8) & mask]); d0 = &ring->desc[slot & mask]; if (PREDICT_FALSE (last_region != d0->region)) { last_region_shm = mif->regions[d0->region].shm; last_region = d0->region; } mb0 = last_region_shm + d0->offset; dst_off = 0; /* slave is the producer, so it should be able to reset buffer length */ dst_left = (type == MEMIF_RING_S2M) ? mif->run.buffer_size : d0->length; if (PREDICT_TRUE (n_left >= 4)) vlib_prefetch_buffer_header (vlib_get_buffer (vm, buffers[3]), LOAD); bi0 = buffers[0]; next_in_chain: b0 = vlib_get_buffer (vm, bi0); src_off = b0->current_data; src_left = b0->current_length; while (src_left) { if (PREDICT_FALSE (dst_left == 0)) { if (free_slots) { slot++; free_slots--; d0->length = dst_off; d0->flags = MEMIF_DESC_FLAG_NEXT; d0 = &ring->desc[slot & mask]; dst_off = 0; dst_left = (type == MEMIF_RING_S2M) ? mif->run.buffer_size : d0->length; if (PREDICT_FALSE (last_region != d0->region)) { last_region_shm = mif->regions[d0->region].shm; last_region = d0->region; } mb0 = last_region_shm + d0->offset; } else { /* we need to rollback vectors before bailing out */ _vec_len (ptd->buffers) = saved_ptd_buffers_len; _vec_len (ptd->copy_ops) = saved_ptd_copy_ops_len; vlib_error_count (vm, node->node_index, MEMIF_TX_ERROR_ROL