/* *------------------------------------------------------------------ * Copyright (c) 2017 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *------------------------------------------------------------------ */ #define _GNU_SOURCE #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include memif_main_t memif_main; static u32 memif_eth_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hi, u32 flags) { /* nothing for now */ return 0; } static clib_error_t * memif_eth_set_max_frame_size (vnet_main_t *vnm, vnet_hw_interface_t *hi, u32 flags) { /* nothing for now */ return 0; } static void memif_queue_intfd_close (memif_queue_t * mq) { if (mq->int_clib_file_index != ~0) { memif_file_del_by_index (mq->int_clib_file_index); mq->int_clib_file_index = ~0; mq->int_fd = -1; } else if (mq->int_fd > -1) { close (mq->int_fd); mq->int_fd = -1; } } static void memif_disconnect_free_zc_queue_buffer (memif_queue_t * mq, u8 is_rx) { vlib_main_t *vm = vlib_get_main (); u16 ring_size, n_slots, mask, start; ring_size = 1 << mq->log2_ring_size; mask = ring_size - 1; n_slots = mq->ring->head - mq->last_tail; start = mq->last_tail & mask; if (is_rx) vlib_buffer_free_from_ring (vm, mq->buffers, start, ring_size, n_slots); else vlib_buffer_free_from_ring_no_next (vm, mq->buffers, start, ring_size, n_slots); vec_free (mq->buffers); } void memif_disconnect (memif_if_t * mif, clib_error_t * err) { memif_main_t *mm = &memif_main; vnet_main_t *vnm = vnet_get_main (); memif_region_t *mr; memif_queue_t *mq; int i; vlib_main_t *vm = vlib_get_main (); int with_barrier = 0; if (mif == 0) return; memif_log_debug (mif, "disconnect %u (%v)", mif->dev_instance, err ? err->what : 0); if (err) { clib_error_t *e = 0; mif->local_disc_string = vec_dup (err->what); if (mif->sock && clib_socket_is_connected (mif->sock)) e = memif_msg_send_disconnect (mif, err); clib_error_free (e); } /* set interface down */ mif->flags &= ~(MEMIF_IF_FLAG_CONNECTED | MEMIF_IF_FLAG_CONNECTING); if (mif->hw_if_index != ~0) vnet_hw_interface_set_flags (vnm, mif->hw_if_index, 0); /* close connection socket */ if (mif->sock && mif->sock->fd) { memif_socket_file_t *msf = vec_elt_at_index (mm->socket_files, mif->socket_file_index); hash_unset (msf->dev_instance_by_fd, mif->sock->fd); memif_socket_close (&mif->sock); } else if (mif->sock) { clib_error_t *err; err = clib_socket_close (mif->sock); if (err) { memif_log_err (mif, "%U", format_clib_error, err); clib_error_free (err); } clib_mem_free (mif->sock); } if (vlib_worker_thread_barrier_held () == 0) { with_barrier = 1; vlib_worker_thread_barrier_sync (vm); } vec_foreach_index (i, mif->rx_queues) { mq = vec_elt_at_index (mif->rx_queues, i); if (mq->ring) { if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY) { memif_disconnect_free_zc_queue_buffer(mq, 1); } mq->ring = 0; } } vnet_hw_if_unregister_all_rx_queues (vnm, mif->hw_if_index); vec_foreach_index (i, mif->tx_queues) { mq = vec_elt_at_index (mif->tx_queues, i); if (mq->ring) { if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY) { memif_disconnect_free_zc_queue_buffer(mq, 0); } clib_spinlock_free (&mq->lockp); } mq->ring = 0; } vnet_hw_if_unregister_all_tx_queues (vnm, mif->hw_if_index); vnet_hw_if_update_runtime_data (vnm, mif->hw_if_index); /* free tx and rx queues */ vec_foreach (mq, mif->rx_queues) memif_queue_intfd_close (mq); vec_free (mif->rx_queues); vec_foreach (mq, mif->tx_queues) memif_queue_intfd_close (mq); vec_free (mif->tx_queues); /* free memory regions */ vec_foreach (mr, mif->regions) { int rv; if (mr->is_external) continue; if ((rv = munmap (mr->shm, mr->region_size))) memif_log_err (mif, "munmap failed, rv = %d", rv); if (mr->fd > -1) close (mr->fd); } vec_free (mif->regions); vec_free (mif->remote_name); vec_free (mif->remote_if_name); clib_fifo_free (mif->msg_queue); if (with_barrier) vlib_worker_thread_barrier_release (vm); } static clib_error_t * memif_int_fd_write_ready (clib_file_t * uf) { memif_main_t *mm = &memif_main; u16 qid = uf->private_data & 0xFFFF; memif_if_t *mif = vec_elt_at_index (mm->interfaces, uf->private_data >> 16); memif_log_warn (mif, "unexpected EPOLLOUT on RX for queue %u", qid); return 0; } static clib_error_t * memif_int_fd_read_ready (clib_file_t * uf) { memif_main_t *mm = &memif_main; vnet_main_t *vnm = vnet_get_main (); u16 qid = uf->private_data & 0xFFFF; memif_if_t *mif = vec_elt_at_index (mm->interfaces, uf->private_data >> 16); memif_queue_t *mq = vec_elt_at_index (mif->rx_queues, qid); u64 b; ssize_t size; size = read (uf->file_descriptor, &b, sizeof (b)); if (size < 0) { memif_log_debug (mif, "Failed to read from socket"); return 0; } vnet_hw_if_rx_queue_set_int_pending (vnm, mq->queue_index); mq->int_count++; return 0; } clib_error_t * memif_connect (memif_if_t * mif) { memif_main_t *mm = &memif_main; vlib_main_t *vm = vlib_get_main (); vnet_main_t *vnm = vnet_get_main (); clib_file_t template = { 0 }; memif_region_t *mr; int i, j; u32 n_txqs = 0, n_threads = vlib_get_n_threads (); clib_error_t *err = NULL; u8 max_log2_ring_sz = 0; int with_barrier = 0; memif_log_debug (mif, "connect %u", mif->dev_instance); vec_free (mif->local_disc_string); vec_free (mif->remote_disc_string); vec_foreach (mr, mif->regions) { if (mr->shm) continue; if (mr->fd < 0) { err = clib_error_return (0, "no memory region fd"); goto error; } if ((mr->shm = mmap (NULL, mr->region_size, PROT_READ | PROT_WRITE, MAP_SHARED, mr->fd, 0)) == MAP_FAILED) { err = clib_error_return_unix (0, "mmap"); goto error; } } template.read_function = memif_int_fd_read_ready; template.write_function = memif_int_fd_write_ready; with_barrier = 1; if (vlib_worker_thread_barrier_held ()) with_barrier = 0; if (with_barrier) vlib_worker_thread_barrier_sync (vm); vec_foreach_index (i, mif->tx_queues) { memif_queue_t *mq = vec_elt_at_index (mif->tx_queues, i); max_log2_ring_sz = clib_max (max_log2_ring_sz, mq->log2_ring_size); mq->ring = mif->regions[mq->region].shm + mq->offset; if (mq->ring->cookie != MEMIF_COOKIE) { err = clib_error_return (0, "wrong cookie on tx ring %u", i); goto error; } mq->queue_index = vnet_hw_if_register_tx_queue (vnm, mif->hw_if_index, i); clib_spinlock_init (&mq->lockp); if (mif->flags & MEMIF_IF_FLAG_USE_DMA) { memif_dma_info_t *dma_info; mq->dma_head = 0; mq->dma_tail = 0; mq->dma_info_head = 0; mq->dma_info_tail = 0; mq->dma_info_size = MEMIF_DMA_INFO_SIZE; vec_validate_aligned (mq->dma_info, MEMIF_DMA_INFO_SIZE, CLIB_CACHE_LINE_BYTES); vec_foreach (dma_info, mq->dma_info) { vec_validate_aligned (dma_info->data.desc_data, pow2_mask (max_log2_ring_sz), CLIB_CACHE_LINE_BYTES); vec_validate_aligned (dma_info->data.desc_len, pow2_mask (max_log2_ring_sz), CLIB_CACHE_LINE_BYTES); vec_validate_aligned (dma_info->data.desc_status, pow2_mask (max_log2_ring_sz), CLIB_CACHE_LINE_BYTES); vec_validate_aligned (dma_info->data.copy_ops, 0, CLIB_CACHE_LINE_BYTES); vec_reset_length (dma_info->data.copy_ops); vec_validate_aligned (dma_info->data.buffers, 0, CLIB_CACHE_LINE_BYTES); vec_reset_length (dma_info->data.buffers); } } } if (vec_len (mif->tx_queues) > 0) { n_txqs = vec_len (mif->tx_queues); for (j = 0; j < n_threads; j++) { u32 qi = mif->tx_queues[j % n_txqs].queue_index; vnet_hw_if_tx_queue_assign_thread (vnm, qi, j); } } vec_foreach_index (i, mif->rx_queues) { memif_queue_t *mq = vec_elt_at_index (mif->rx_queues, i); u32 ti; u32 qi; int rv; max_log2_ring_sz = clib_max (max_log2_ring_sz, mq->log2_ring_size); mq->ring = mif->regions[mq->region].shm + mq->offset; if (mq->ring->cookie != MEMIF_COOKIE) { err = clib_error_return (0, "wrong cookie on tx ring %u", i); goto error; } qi = vnet_hw_if_register_rx_queue (vnm, mif->hw_if_index, i, VNET_HW_IF_RXQ_THREAD_ANY); mq->queue_index = qi; if (mif->flags & MEMIF_IF_FLAG_USE_DMA) { memif_dma_info_t *dma_info; mq->dma_head = 0; mq->dma_tail = 0; mq->dma_info_head = 0; mq->dma_info_tail = 0; mq->dma_info_size = MEMIF_DMA_INFO_SIZE; vec_validate_aligned (mq->dma_info, MEMIF_DMA_INFO_SIZE, CLIB_CACHE_LINE_BYTES); vec_foreach (dma_info, mq->dma_info) { vec_validate_aligned (dma_info->data.desc_data, pow2_mask (max_log2_ring_sz), CLIB_CACHE_LINE_BYTES); vec_validate_aligned (dma_info->data.desc_len, pow2_mask (max_log2_ring_sz), CLIB_CACHE_LINE_BYTES); vec_validate_aligned (dma_info->data.desc_status, pow2_mask (max_log2_ring_sz), CLIB_CACHE_LINE_BYTES); vec_validate_aligned (dma_info->data.copy_ops, 0, CLIB_CACHE_LINE_BYTES); vec_reset_length (dma_info->data.copy_ops); vec_validate_aligned (dma_info->data.buffers, 0, CLIB_CACHE_LINE_BYTES); vec_reset_length (dma_info->data.buffers); } } if (mq->int_fd > -1) { template.file_descriptor = mq->int_fd; template.private_data = (mif->dev_instance << 16) | (i & 0xFFFF); template.description = format (0, "%U rx %u int", format_memif_device_name, mif->dev_instance, i); memif_file_add (&mq->int_clib_file_index, &template); vnet_hw_if_set_rx_queue_file_index (vnm, qi, mq->int_clib_file_index); } ti = vnet_hw_if_get_rx_queue_thread_index (vnm, qi); mq->buffer_pool_index = vlib_buffer_pool_get_default_for_numa ( vm, vlib_get_main_by_index (ti)->numa_node); rv = vnet_hw_if_set_rx_queue_mode (vnm, qi, VNET_HW_IF_RX_MODE_DEFAULT); vnet_hw_if_update_runtime_data (vnm, mif->hw_if_index); if (rv) memif_log_err (mif, "Warning: unable to set rx mode for interface %d queue %d: " "rc=%d", mif->hw_if_index, i, rv); else { vnet_hw_if_rx_mode rxmode = vnet_hw_if_get_rx_queue_mode (vnm, qi); if (rxmode == VNET_HW_IF_RX_MODE_POLLING) mq->ring->flags |= MEMIF_RING_FLAG_MASK_INT; else vnet_hw_if_rx_queue_set_int_pending (vnm, qi); } } if (1 << max_log2_ring_sz > vec_len (mm->per_thread_data[0].desc_data)) { memif_per_thread_data_t *ptd; vec_foreach (ptd, mm->per_thread_data) { vec_validate_aligned (ptd->desc_data, pow2_mask (max_log2_ring_sz), CLIB_CACHE_LINE_BYTES); vec_validate_aligned (ptd->desc_len, pow2_mask (max_log2_ring_sz), CLIB_CACHE_LINE_BYTES); vec_validate_aligned (ptd->desc_status, pow2_mask (max_log2_ring_sz), CLIB_CACHE_LINE_BYTES); } } if (with_barrier) vlib_worker_thread_barrier_release (vm); mif->flags &= ~MEMIF_IF_FLAG_CONNECTING; mif->flags |= MEMIF_IF_FLAG_CONNECTED; vnet_hw_interface_set_flags (vnm, mif->hw_if_index, VNET_HW_INTERFACE_FLAG_LINK_UP); return 0; error: if (with_barrier) vlib_worker_thread_barrier_release (vm); memif_log_err (mif, "%U", format_clib_error, err); return err; } static_always_inline memif_ring_t * memif_get_ring (memif_if_t * mif, memif_ring_type_t type, u16 ring_num) { if (vec_len (mif->regions) == 0) return NULL; void *p = mif->regions[0].shm; int ring_size = sizeof (memif_ring_t) + sizeof (memif_desc_t) * (1 << mif->run.log2_ring_size); p += (ring_num + type * mif->run.num_s2m_rings) * ring_size; return (memif_ring_t *) p; } clib_error_t * memif_init_regions_and_queues (memif_if_t * mif) { vlib_main_t *vm = vlib_get_main (); memif_socket_file_t *msf; memif_ring_t *ring = NULL; int fd, i, j; u64 buffer_offset; memif_region_t *r; clib_error_t *err; ASSERT (vec_len (mif->regions) == 0); vec_add2_aligned (mif->regions, r, 1, CLIB_CACHE_LINE_BYTES); buffer_offset = (mif->run.num_s2m_rings + mif->run.num_m2s_rings) * (sizeof (memif_ring_t) + sizeof (memif_desc_t) * (1 << mif->run.log2_ring_size)); r->region_size = buffer_offset; if ((mif->flags & MEMIF_IF_FLAG_ZERO_COPY) == 0) r->region_size += mif->run.buffer_size * (1 << mif->run.log2_ring_size) * (mif->run.num_s2m_rings + mif->run.num_m2s_rings); if ((fd = clib_mem_vm_create_fd (CLIB_MEM_PAGE_SZ_DEFAULT, "%U region 0", format_memif_device_name, mif->dev_instance)) == -1) { err = clib_mem_get_last_error (); goto error; } if ((ftruncate (fd, r->region_size)) == -1) { err = clib_error_return_unix (0, "ftruncate"); goto error; } msf = pool_elt_at_index (memif_main.socket_files, mif->socket_file_index); r->shm = clib_mem_vm_map_shared (0, r->region_size, fd, 0, "memif%lu/%lu:0", msf->socket_id, mif->id); if (r->shm == CLIB_MEM_VM_MAP_FAILED) { err = clib_error_return_unix (0, "memif shared region map failed"); goto error; } r->fd = fd; if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY) { vlib_buffer_pool_t *bp; vec_foreach (bp, vm->buffer_main->buffer_pools) { vlib_physmem_map_t *pm; pm = vlib_physmem_get_map (vm, bp->physmem_map_index); vec_add2_aligned (mif->regions, r, 1, CLIB_CACHE_LINE_BYTES); r->fd = pm->fd; r->region_size = pm->n_pages << pm->log2_page_size; r->shm = pm->base; r->is_external = 1; } } for (i = 0; i < mif->run.num_s2m_rings; i++) { ring = memif_get_ring (mif, MEMIF_RING_S2M, i); ring->head = ring->tail = 0; ring->cookie = MEMIF_COOKIE; if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY) continue; for (j = 0; j < (1 << mif->run.log2_ring_size); j++) { u16 slot = i * (1 << mif->run.log2_ring_size) + j; ring->desc[j].region = 0; ring->desc[j].offset = buffer_offset + (u32) (slot * mif->run.buffer_size); ring->desc[j].length = mif->run.buffer_size; } } for (i = 0; i < mif->run.num_m2s_rings; i++) { ring = memif_get_ring (mif, MEMIF_RING_M2S, i); ring->head = ring->tail = 0; ring->cookie = MEMIF_COOKIE; if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY) continue; for (j = 0; j < (1 << mif->run.log2_ring_size); j++) { u16 slot = (i + mif->run.num_s2m_rings) * (1 << mif->run.log2_ring_size) + j; ring->desc[j].region = 0; ring->desc[j].offset = buffer_offset + (u32) (slot * mif->run.buffer_size); ring->desc[j].length = mif->run.buffer_size; } } ASSERT (mif->tx_queues == 0); vec_validate_aligned (mif->tx_queues, mif->run.num_s2m_rings - 1, CLIB_CACHE_LINE_BYTES); vec_foreach_index (i, mif->tx_queues) { memif_queue_t *mq = vec_elt_at_index (mif->tx_queues, i); if ((mq->int_fd = eventfd (0, EFD_NONBLOCK)) < 0) { err = clib_error_return_unix (0, "eventfd[tx queue %u]", i); goto error; } mq->int_clib_file_index = ~0; mq->ring = memif_get_ring (mif, MEMIF_RING_S2M, i); mq->log2_ring_size = mif->cfg.log2_ring_size; mq->region = 0; mq->offset = (void *) mq->ring - (void *) mif->regions[mq->region].shm; mq->last_head = 0; mq->type = MEMIF_RING_S2M; if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY) vec_validate_aligned (mq->buffers, 1 << mq->log2_ring_size, CLIB_CACHE_LINE_BYTES); } ASSERT (mif->rx_queues == 0); vec_validate_aligned (mif->rx_queues, mif->run.num_m2s_rings - 1, CLIB_CACHE_LINE_BYTES); vec_foreach_index (i, mif->rx_queues) { memif_queue_t *mq = vec_elt_at_index (mif->rx_queues, i); if ((mq->int_fd = eventfd (0, EFD_NONBLOCK)) < 0) { err = clib_error_return_unix (0, "eventfd[rx queue %u]", i); goto error; } mq->int_clib_file_index = ~0; mq->ring = memif_get_ring (mif, MEMIF_RING_M2S, i); mq->log2_ring_size = mif->cfg.log2_ring_size; mq->region = 0; mq->offset = (void *) mq->ring - (void *) mif->regions[mq->region].shm; mq->last_head = 0; mq->type = MEMIF_RING_M2S; if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY) vec_validate_aligned (mq->buffers, 1 << mq->log2_ring_size, CLIB_CACHE_LINE_BYTES); } return 0; error: memif_log_err (mif, "%U", format_clib_error, err); return err; } static uword memif_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f) { memif_main_t *mm = &memif_main; memif_if_t *mif; clib_socket_t *sock; uword *event_data = 0, event_type; u8 enabled = 0; f64 start_time, last_run_duration = 0, now; clib_error_t *err; sock = clib_mem_alloc (sizeof (clib_socket_t)); clib_memset (sock, 0, sizeof (clib_socket_t)); while (1) { if (enabled) vlib_process_wait_for_event_or_clock (vm, (f64) 3 - last_run_duration); else vlib_process_wait_for_event (vm); event_type = vlib_process_get_events (vm, &event_data); vec_reset_length (event_data); switch (event_type) { case ~0: break; case MEMIF_PROCESS_EVENT_START: enabled = 1; break; case MEMIF_PROCESS_EVENT_STOP: enabled = 0; continue; case MEMIF_PROCESS_EVENT_ADMIN_UP_DOWN: break; default: ASSERT (0); } last_run_duration = start_time = vlib_time_now (vm); pool_foreach (mif, mm->interfaces) { memif_socket_file_t * msf = vec_elt_at_index (mm->socket_files, mif->socket_file_index); /* Allow no more than 10us without a pause */ now = vlib_time_now (vm); if (now > start_time + 10e-6) { vlib_process_suspend (vm, 100e-6); /* suspend for 100 us */ start_time = vlib_time_now (vm); } if ((mif->flags & MEMIF_IF_FLAG_ADMIN_UP) == 0) continue; if (mif->flags & MEMIF_IF_FLAG_CONNECTING) continue; if (mif->flags & MEMIF_IF_FLAG_CONNECTED) continue; if (mif->flags & MEMIF_IF_FLAG_IS_SLAVE) { clib_memset (sock, 0, sizeof(clib_socket_t)); sock->config = (char *) msf->filename; sock->is_seqpacket = 1; sock->is_blocking = 1; if ((err = clib_socket_init (sock))) { clib_error_free (err); } else { clib_file_t t = { 0 }; t.read_function = memif_slave_conn_fd_read_ready; t.write_function = memif_slave_conn_fd_write_ready; t.error_function = memif_slave_conn_fd_error; t.file_descriptor = sock->fd; t.private_data = mif->dev_instance; memif_file_add (&sock->private_data, &t); t.description = format (0, "%U ctl", format_memif_device_name, mif->dev_instance); hash_set (msf->dev_instance_by_fd, sock->fd, mif->dev_instance); mif->flags |= MEMIF_IF_FLAG_CONNECTING; mif->sock = sock; sock = clib_mem_alloc (sizeof(clib_socket_t)); } } } last_run_duration = vlib_time_now (vm) - last_run_duration; } return 0; } VLIB_REGISTER_NODE (memif_process_node,static) = { .function = memif_process, .type = VLIB_NODE_TYPE_PROCESS, .name = "memif-process", }; /* * Returns an unused socket id, and ~0 if it can't find one. */ u32 memif_get_unused_socket_id () { memif_main_t *mm = &memif_main; uword *p; int i, j; static u32 seed = 0; /* limit to 1M tries */ for (j = 0; j < 1 << 10; j++) { seed = random_u32 (&seed); for (i = 0; i < 1 << 10; i++) { /* look around randomly generated id */ seed += (2 * (i % 2) - 1) * i; if (seed == (u32) ~0) continue; p = hash_get (mm->socket_file_index_by_sock_id, seed); if (!p) return seed; } } return ~0; } clib_error_t * memif_socket_filename_add_del (u8 is_add, u32 sock_id, char *sock_filename) { memif_main_t *mm = &memif_main; uword *p; memif_socket_file_t *msf; clib_error_t *err = 0; char *dir = 0, *tmp; u32 idx = 0; u8 *name = 0; /* allow adding socket id 0 */ if (sock_id == 0 && is_add == 0) return vnet_error (VNET_ERR_INVALID_ARGUMENT, "cannot delete socket id 0"); if (sock_id == ~0) return vnet_error (VNET_ERR_INVALID_ARGUMENT, "socked id is not specified"); if (is_add == 0) { p = hash_get (mm->socket_file_index_by_sock_id, sock_id); if (!p) /* Don't delete non-existent entries. */ return vnet_error (VNET_ERR_INVALID_ARGUMENT, "socket file with id %u does not exist", sock_id); msf = pool_elt_at_index (mm->socket_files, *p); if (msf->ref_cnt > 0) return vnet_error (VNET_ERR_UNEXPECTED_INTF_STATE, "socket file '%s' is in use", msf->filename); vec_free (msf->filename); pool_put (mm->socket_files, msf); hash_unset (mm->socket_file_index_by_sock_id, sock_id); return 0; } if (sock_filename == 0 || sock_filename[0] == 0) return vnet_error (VNET_ERR_INVALID_ARGUMENT, "socket filename not specified"); if (clib_socket_prefix_is_valid (sock_filename)) { name = format (0, "%s%c", sock_filename, 0); } else if (sock_filename[0] == '/') { name = format (0, "%s%c", sock_filename, 0); } else { /* copy runtime dir path */ vec_add (dir, vlib_unix_get_runtime_dir (), strlen (vlib_unix_get_runtime_dir ())); vec_add1 (dir, '/'); /* if sock_filename contains dirs, add them to path */ tmp = strrchr (sock_filename, '/'); if (tmp) { idx = tmp - sock_filename; vec_add (dir, sock_filename, idx); } vec_add1 (dir, '\0'); /* create socket dir */ if ((err = vlib_unix_recursive_mkdir (dir))) { clib_error_free (err); err = vnet_error (VNET_ERR_SYSCALL_ERROR_1, "unable to create socket dir"); goto done; } name = format (0, "%s/%s%c", vlib_unix_get_runtime_dir (), sock_filename, 0); } p = hash_get (mm->socket_file_index_by_sock_id, sock_id); if (p) { msf = pool_elt_at_index (mm->socket_files, *p); if (strcmp ((char *) msf->filename, (char *) name) == 0) { /* Silently accept identical "add". */ goto done; } /* But don't allow a direct add of a different filename. */ err = vnet_error (VNET_ERR_ENTRY_ALREADY_EXISTS, "entry already exists"); goto done; } pool_get (mm->socket_files, msf); clib_memset (msf, 0, sizeof (memif_socket_file_t)); msf->filename = name; msf->socket_id = sock_id; name = 0; hash_set (mm->socket_file_index_by_sock_id, sock_id, msf - mm->socket_files); done: vec_free (name); vec_free (dir); return err; } clib_error_t * memif_delete_if (vlib_main_t *vm, memif_if_t *mif) { vnet_main_t *vnm = vnet_get_main (); memif_main_t *mm = &memif_main; memif_socket_file_t *msf = vec_elt_at_index (mm->socket_files, mif->socket_file_index); clib_error_t *err; mif->flags |= MEMIF_IF_FLAG_DELETING; vec_free (mif->local_disc_string); vec_free (mif->remote_disc_string); /* bring down the interface */ vnet_hw_interface_set_flags (vnm, mif->hw_if_index, 0); vnet_sw_interface_set_flags (vnm, mif->sw_if_index, 0); err = clib_error_return (0, "interface deleted"); memif_disconnect (mif, err); clib_error_free (err); if (mif->hw_if_index != ~0) { /* remove the interface */ if (mif->mode == MEMIF_INTERFACE_MODE_IP) vnet_delete_hw_interface (vnm, mif->hw_if_index); else ethernet_delete_interface (vnm, mif->hw_if_index); mif->hw_if_index = ~0; } /* free interface data structures */ mhash_unset (&msf->dev_instance_by_id, &mif->id, 0); /* remove socket file */ if (--(msf->ref_cnt) == 0) { if (msf->is_listener) { int i; vec_foreach_index (i, msf->pending_clients) memif_socket_close (msf->pending_clients + i); memif_socket_close (&msf->sock); vec_free (msf->pending_clients); } mhash_free (&msf->dev_instance_by_id); hash_free (msf->dev_instance_by_fd); if (msf->sock) { err = clib_socket_close (msf->sock); if (err) { memif_log_err (mif, "%U", format_clib_error, err); clib_error_free (err); } clib_mem_free (msf->sock); } } vec_free (mif->local_disc_string); clib_memset (mif, 0, sizeof (*mif)); pool_put (mm->interfaces, mif); if (pool_elts (mm->interfaces) == 0) vlib_process_signal_event (vm, memif_process_node.index, MEMIF_PROCESS_EVENT_STOP, 0); return 0; } VNET_HW_INTERFACE_CLASS (memif_ip_hw_if_class, static) = { .name = "memif-ip", .flags = VNET_HW_INTERFACE_CLASS_FLAG_P2P, .tx_hash_fn_type = VNET_HASH_FN_TYPE_IP, }; static void memif_prepare_dma_args (vlib_dma_config_t *args) { args->max_batches = 256; args->max_transfer_size = VLIB_BUFFER_DEFAULT_DATA_SIZE; args->barrier_before_last = 1; args->sw_fallback = 1; args->callback_fn = NULL; } clib_error_t * memif_create_if (vlib_main_t *vm, memif_create_if_args_t *args) { memif_main_t *mm = &memif_main; vlib_thread_main_t *tm = vlib_get_thread_main (); vnet_main_t *vnm = vnet_get_main (); vnet_eth_interface_registration_t eir = {}; memif_if_t *mif = 0; vnet_sw_interface_t *sw; uword *p; memif_socket_file_t *msf = 0; clib_error_t *err = 0; p = hash_get (mm->socket_file_index_by_sock_id, args->socket_id); if (p == 0) { err = vnet_error (VNET_ERR_INVALID_ARGUMENT, "unknown socket id"); goto done; } msf = vec_elt_at_index (mm->socket_files, p[0]); /* existing socket file can be either master or slave but cannot be both */ if (msf->ref_cnt > 0) { if ((!msf->is_listener != !args->is_master)) { err = vnet_error (VNET_ERR_SUBIF_ALREADY_EXISTS, "socket file cannot be used by both master and slave"); goto done; } p = mhash_get (&msf->dev_instance_by_id, &args->id); if (p) { err = vnet_error (VNET_ERR_SUBIF_ALREADY_EXISTS, "interface already exists"); goto done; } } /* Create new socket file */ if (msf->ref_cnt == 0) { mhash_init (&msf->dev_instance_by_id, sizeof (uword), sizeof (memif_interface_id_t)); msf->dev_instance_by_fd = hash_create (0, sizeof (uword)); msf->is_listener = (args->is_master != 0); memif_log_debug (0, "initializing socket file %s", msf->filename); } if (mm->per_thread_data == 0) { int i; vec_validate_aligned (mm->per_thread_data, tm->n_vlib_mains - 1, CLIB_CACHE_LINE_BYTES); for (i = 0; i < tm->n_vlib_mains; i++) { memif_per_thread_data_t *ptd = vec_elt_at_index (mm->per_thread_data, i); vlib_buffer_t *bt = &ptd->buffer_template; clib_memset (bt, 0, sizeof (vlib_buffer_t)); bt->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID; bt->total_length_not_including_first_buffer = 0; vnet_buffer (bt)->sw_if_index[VLIB_TX] = (u32) ~ 0; vec_validate_aligned (ptd->copy_ops, 0, CLIB_CACHE_LINE_BYTES); vec_reset_length (ptd->copy_ops); vec_validate_aligned (ptd->buffers, 0, CLIB_CACHE_LINE_BYTES); vec_reset_length (ptd->buffers); } } pool_get (mm->interfaces, mif); clib_memset (mif, 0, sizeof (*mif)); mif->dev_instance = mif - mm->interfaces; mif->socket_file_index = msf - mm->socket_files; mif->id = args->id; mif->sw_if_index = mif->hw_if_index = mif->per_interface_next_index = ~0; mif->mode = args->mode; if (args->secret) mif->secret = vec_dup (args->secret); /* register dma config if enabled */ if (args->use_dma) { vlib_dma_config_t dma_args; bzero (&dma_args, sizeof (dma_args)); memif_prepare_dma_args (&dma_args); dma_args.max_transfers = 1 << args->log2_ring_size; dma_args.callback_fn = memif_dma_completion_cb; mif->dma_input_config = vlib_dma_config_add (vm, &dma_args); dma_args.callback_fn = memif_tx_dma_completion_cb; mif->dma_tx_config = vlib_dma_config_add (vm, &dma_args); } if (mif->mode == MEMIF_INTERFACE_MODE_ETHERNET) { if (!args->hw_addr_set) { f64 now = vlib_time_now (vm); u32 rnd; rnd = (u32) (now * 1e6); rnd = random_u32 (&rnd); memcpy (args->hw_addr + 2, &rnd, sizeof (rnd)); args->hw_addr[0] = 2; args->hw_addr[1] = 0xfe; } eir.dev_class_index = memif_device_class.index; eir.dev_instance = mif->dev_instance; eir.address = args->hw_addr; eir.cb.flag_change = memif_eth_flag_change; eir.cb.set_max_frame_size = memif_eth_set_max_frame_size; mif->hw_if_index = vnet_eth_register_interface (vnm, &eir); } else if (mif->mode == MEMIF_INTERFACE_MODE_IP) { mif->hw_if_index = vnet_register_interface (vnm, memif_device_class.index, mif->dev_instance, memif_ip_hw_if_class.index, mif->dev_instance); } else { err = vnet_error (VNET_ERR_SYSCALL_ERROR_2, "unsupported interface mode"); goto error; } sw = vnet_get_hw_sw_interface (vnm, mif->hw_if_index); mif->sw_if_index = sw->sw_if_index; mif->cfg.log2_ring_size = args->log2_ring_size; mif->cfg.buffer_size = args->buffer_size; mif->cfg.num_s2m_rings = args->is_master ? args->rx_queues : args->tx_queues; mif->cfg.num_m2s_rings = args->is_master ? args->tx_queues : args->rx_queues; args->sw_if_index = mif->sw_if_index; /* If this is new one, start listening */ if (msf->is_listener && msf->ref_cnt == 0) { clib_socket_t *s = clib_mem_alloc (sizeof (clib_socket_t)); ASSERT (msf->sock == 0); msf->sock = s; clib_memset (s, 0, sizeof (clib_socket_t)); s->config = (char *) msf->filename; s->local_only = 1; s->is_server = 1; s->allow_group_write = 1; s->is_seqpacket = 1; s->passcred = 1; if ((err = clib_socket_init (s))) { err->code = VNET_ERR_SYSCALL_ERROR_4; goto error; } clib_file_t template = { 0 }; template.read_function = memif_conn_fd_accept_ready; template.file_descriptor = msf->sock->fd; template.private_data = mif->socket_file_index; template.description = format (0, "memif listener %s", msf->filename); memif_file_add (&msf->sock->private_data, &template); } msf->ref_cnt++; if (args->is_master == 0) { mif->flags |= MEMIF_IF_FLAG_IS_SLAVE; if (args->is_zero_copy) mif->flags |= MEMIF_IF_FLAG_ZERO_COPY; } if (args->use_dma) mif->flags |= MEMIF_IF_FLAG_USE_DMA; vnet_hw_if_set_caps (vnm, mif->hw_if_index, VNET_HW_IF_CAP_INT_MODE); vnet_hw_if_set_input_node (vnm, mif->hw_if_index, memif_input_node.index); mhash_set (&msf->dev_instance_by_id, &mif->id, mif->dev_instance, 0); if (pool_elts (mm->interfaces) == 1) { vlib_process_signal_event (vm, memif_process_node.index, MEMIF_PROCESS_EVENT_START, 0); } goto done; error: memif_delete_if (vm, mif); if (err) memif_log_err (mif, "%U", format_clib_error, err); return err; done: return err; } clib_error_t * memif_interface_admin_up_down (vnet_main_t *vnm, u32 hw_if_index, u32 flags) { memif_main_t *mm = &memif_main; vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index); memif_if_t *mif = pool_elt_at_index (mm->interfaces, hw->dev_instance); static clib_error_t *error = 0; if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) { if (mif->flags & MEMIF_IF_FLAG_CONNECTED) { vnet_hw_interface_set_flags (vnm, mif->hw_if_index, VNET_HW_INTERFACE_FLAG_LINK_UP); } mif->flags |= MEMIF_IF_FLAG_ADMIN_UP; } else mif->flags &= ~MEMIF_IF_FLAG_ADMIN_UP; vlib_process_signal_event (vnm->vlib_main, memif_process_node.index, MEMIF_PROCESS_EVENT_ADMIN_UP_DOWN, 0); return error; } static clib_error_t * memif_init (vlib_main_t * vm) { memif_main_t *mm = &memif_main; clib_memset (mm, 0, sizeof (memif_main_t)); mm->log_class = vlib_log_register_class ("memif_plugin", 0); memif_log_debug (0, "initialized"); /* initialize binary API */ memif_plugin_api_hookup (vm); /* * Pre-stuff socket filename pool with a non-modifieable mapping * for socket-id 0 to MEMIF_DEFAULT_SOCKET_FILENAME in the * default run-time directory. */ return memif_socket_filename_add_del (1, 0, MEMIF_DEFAULT_SOCKET_FILENAME); } VLIB_INIT_FUNCTION (memif_init); VLIB_PLUGIN_REGISTER () = { .version = VPP_BUILD_VER, .description = "Packet Memory Interface (memif) -- Experimental", }; /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */