diff options
Diffstat (limited to 'src/plugins/rdma/input.c')
-rw-r--r-- | src/plugins/rdma/input.c | 181 |
1 files changed, 89 insertions, 92 deletions
diff --git a/src/plugins/rdma/input.c b/src/plugins/rdma/input.c index b2f3c280a06..3c9481f2e07 100644 --- a/src/plugins/rdma/input.c +++ b/src/plugins/rdma/input.c @@ -45,17 +45,30 @@ rdma_device_input_refill (vlib_main_t * vm, rdma_device_t * rd, rdma_rxq_t * rxq) { u32 n_alloc, n; - u32 buffers[VLIB_FRAME_SIZE], *bi = buffers; vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs; struct ibv_recv_wr wr[VLIB_FRAME_SIZE], *w = wr; struct ibv_sge sge[VLIB_FRAME_SIZE], *s = sge; + u32 slot = rxq->tail & (rxq->size - 1); - if (PREDICT_FALSE (rxq->n_enq >= rxq->size)) + /* do not enqueue more packet than ring space */ + n_alloc = clib_min (VLIB_FRAME_SIZE, rxq->size - (rxq->tail - rxq->head)); + + /* do not bother to allocate if too small */ + if (n_alloc < 16) + return; + + /* avoid wrap-around logic in core loop */ + n_alloc = clib_min (n_alloc, rxq->size - slot); + + n = n_alloc = + vlib_buffer_alloc_to_ring_from_pool (vm, rxq->bufs, slot, rxq->size, + n_alloc, rd->pool); + + /* if ring is full or allocation error, do nothing */ + if (PREDICT_FALSE (0 == n_alloc)) return; - n_alloc = clib_min (VLIB_FRAME_SIZE, rxq->size - rxq->n_enq); - n_alloc = n = vlib_buffer_alloc (vm, buffers, n_alloc); - vlib_get_buffers (vm, buffers, bufs, n_alloc); + vlib_get_buffers (vm, &rxq->bufs[slot], bufs, n_alloc); while (n >= 4) { @@ -67,42 +80,37 @@ rdma_device_input_refill (vlib_main_t * vm, rdma_device_t * rd, s[0].addr = vlib_buffer_get_va (b[0]); s[0].length = vlib_buffer_get_default_data_size (vm); - s[0].lkey = rd->mr->lkey; + s[0].lkey = rd->lkey; s[1].addr = vlib_buffer_get_va (b[1]); s[1].length = vlib_buffer_get_default_data_size (vm); - s[1].lkey = rd->mr->lkey; + s[1].lkey = rd->lkey; s[2].addr = vlib_buffer_get_va (b[2]); s[2].length = vlib_buffer_get_default_data_size (vm); - s[2].lkey = rd->mr->lkey; + s[2].lkey = rd->lkey; s[3].addr = vlib_buffer_get_va (b[3]); s[3].length = vlib_buffer_get_default_data_size (vm); - s[3].lkey = rd->mr->lkey; + s[3].lkey = rd->lkey; - w[0].wr_id = bi[0]; w[0].next = &w[0] + 1; w[0].sg_list = &s[0]; w[0].num_sge = 1; - w[1].wr_id = bi[1]; w[1].next = &w[1] + 1; w[1].sg_list = &s[1]; w[1].num_sge = 1; - w[2].wr_id = bi[2]; w[2].next = &w[2] + 1; w[2].sg_list = &s[2]; w[2].num_sge = 1; - w[3].wr_id = bi[3]; w[3].next = &w[3] + 1; w[3].sg_list = &s[3]; w[3].num_sge = 1; s += 4; - bi += 4; w += 4; b += 4; n -= 4; @@ -112,15 +120,13 @@ rdma_device_input_refill (vlib_main_t * vm, rdma_device_t * rd, { s[0].addr = vlib_buffer_get_va (b[0]); s[0].length = vlib_buffer_get_default_data_size (vm); - s[0].lkey = rd->mr->lkey; + s[0].lkey = rd->lkey; - w[0].wr_id = bi[0]; w[0].next = &w[0] + 1; w[0].sg_list = &s[0]; w[0].num_sge = 1; s += 1; - bi += 1; w += 1; b += 1; n -= 1; @@ -132,10 +138,11 @@ rdma_device_input_refill (vlib_main_t * vm, rdma_device_t * rd, if (ibv_post_wq_recv (rxq->wq, wr, &w) != 0) { n = w - wr; - vlib_buffer_free (vm, buffers + n, n_alloc - n); + vlib_buffer_free_from_ring (vm, rxq->bufs, slot + n, rxq->size, + n_alloc - n); } - rxq->n_enq += n; + rxq->tail += n; } static_always_inline void @@ -193,11 +200,16 @@ rdma_device_input_ethernet (vlib_main_t * vm, vlib_node_runtime_t * node, } static_always_inline u32 -rdma_device_input_load_wc (u32 n_left_from, struct ibv_wc * wc, u32 * to_next, - u32 * bufsz) +rdma_device_input_bufs (vlib_main_t * vm, const rdma_device_t * rd, + u32 * next, u32 * bi, struct ibv_wc * wc, + u32 n_left_from, vlib_buffer_t * bt) { + vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs; u32 n_rx_bytes[4] = { 0 }; + vlib_get_buffers (vm, bi, bufs, n_left_from); + ASSERT (bt->buffer_pool_index == bufs[0]->buffer_pool_index); + while (n_left_from >= 4) { if (PREDICT_TRUE (n_left_from >= 8)) @@ -206,92 +218,53 @@ rdma_device_input_load_wc (u32 n_left_from, struct ibv_wc * wc, u32 * to_next, CLIB_PREFETCH (&wc[4 + 1], CLIB_CACHE_LINE_BYTES, LOAD); CLIB_PREFETCH (&wc[4 + 2], CLIB_CACHE_LINE_BYTES, LOAD); CLIB_PREFETCH (&wc[4 + 3], CLIB_CACHE_LINE_BYTES, LOAD); - CLIB_PREFETCH (&bufsz[4 + 0], 4 * sizeof (bufsz[0]), STORE); - CLIB_PREFETCH (&to_next[4 + 0], 4 * sizeof (to_next[0]), STORE); + vlib_prefetch_buffer_header (b[4 + 0], STORE); + vlib_prefetch_buffer_header (b[4 + 1], STORE); + vlib_prefetch_buffer_header (b[4 + 2], STORE); + vlib_prefetch_buffer_header (b[4 + 3], STORE); } - to_next[0] = wc[0].wr_id; - to_next[1] = wc[1].wr_id; - to_next[2] = wc[2].wr_id; - to_next[3] = wc[3].wr_id; + vlib_buffer_copy_indices (next, bi, 4); - bufsz[0] = wc[0].byte_len; - bufsz[1] = wc[1].byte_len; - bufsz[2] = wc[2].byte_len; - bufsz[3] = wc[3].byte_len; + vlib_buffer_copy_template (b[0], bt); + vlib_buffer_copy_template (b[1], bt); + vlib_buffer_copy_template (b[2], bt); + vlib_buffer_copy_template (b[3], bt); + + b[0]->current_length = wc[0].byte_len; + b[1]->current_length = wc[1].byte_len; + b[2]->current_length = wc[2].byte_len; + b[3]->current_length = wc[3].byte_len; n_rx_bytes[0] += wc[0].byte_len; n_rx_bytes[1] += wc[1].byte_len; n_rx_bytes[2] += wc[2].byte_len; n_rx_bytes[3] += wc[3].byte_len; + next += 4; + bi += 4; + b += 4; wc += 4; - to_next += 4; - bufsz += 4; n_left_from -= 4; } while (n_left_from >= 1) { - to_next[0] = wc[0].wr_id; - bufsz[0] = wc[0].byte_len; + vlib_buffer_copy_indices (next, bi, 1); + vlib_buffer_copy_template (b[0], bt); + b[0]->current_length = wc[0].byte_len; n_rx_bytes[0] += wc[0].byte_len; + next += 1; + bi += 1; + b += 1; wc += 1; - to_next += 1; - bufsz += 1; n_left_from -= 1; } return n_rx_bytes[0] + n_rx_bytes[1] + n_rx_bytes[2] + n_rx_bytes[3]; } -static_always_inline void -rdma_device_input_bufs_init (u32 n_left_from, vlib_buffer_t ** bufs, - u32 * bufsz, u32 sw_if_index, vlib_buffer_t * bt) -{ - vnet_buffer (bt)->sw_if_index[VLIB_RX] = sw_if_index; - vnet_buffer (bt)->sw_if_index[VLIB_TX] = ~0; - bt->buffer_pool_index = bufs[0]->buffer_pool_index; - bt->ref_count = 1; - - while (n_left_from >= 4) - { - if (PREDICT_TRUE (n_left_from >= 8)) - { - vlib_prefetch_buffer_header (bufs[4 + 0], STORE); - vlib_prefetch_buffer_header (bufs[4 + 1], STORE); - vlib_prefetch_buffer_header (bufs[4 + 2], STORE); - vlib_prefetch_buffer_header (bufs[4 + 3], STORE); - CLIB_PREFETCH (&bufsz[4 + 0], 4 * sizeof (bufsz[0]), LOAD); - } - - vlib_buffer_copy_template (bufs[0], bt); - vlib_buffer_copy_template (bufs[1], bt); - vlib_buffer_copy_template (bufs[2], bt); - vlib_buffer_copy_template (bufs[3], bt); - - bufs[0]->current_length = bufsz[0]; - bufs[1]->current_length = bufsz[1]; - bufs[2]->current_length = bufsz[2]; - bufs[3]->current_length = bufsz[3]; - - bufs += 4; - bufsz += 4; - n_left_from -= 4; - } - - while (n_left_from >= 1) - { - vlib_buffer_copy_template (bufs[0], bt); - bufs[0]->current_length = bufsz[0]; - - bufs += 1; - bufsz += 1; - n_left_from -= 1; - } -} - static_always_inline uword rdma_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame, rdma_device_t * rd, u16 qid) @@ -299,12 +272,16 @@ rdma_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vnet_main_t *vnm = vnet_get_main (); rdma_rxq_t *rxq = vec_elt_at_index (rd->rxqs, qid); struct ibv_wc wc[VLIB_FRAME_SIZE]; - u32 bufsz[VLIB_FRAME_SIZE]; - vlib_buffer_t *bufs[VLIB_FRAME_SIZE], bt; + vlib_buffer_t bt; u32 next_index, *to_next, n_left_to_next; u32 n_rx_packets, n_rx_bytes; + u32 slot, n_tail; + + ASSERT (rxq->size >= VLIB_FRAME_SIZE && is_pow2 (rxq->size)); + ASSERT (rxq->tail - rxq->head <= rxq->size); n_rx_packets = ibv_poll_cq (rxq->cq, VLIB_FRAME_SIZE, wc); + ASSERT (n_rx_packets <= rxq->tail - rxq->head); if (PREDICT_FALSE (n_rx_packets <= 0)) { @@ -312,30 +289,50 @@ rdma_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, return 0; } + /* init buffer template */ clib_memset_u64 (&bt, 0, STRUCT_OFFSET_OF (vlib_buffer_t, template_end) / sizeof (u64)); + vnet_buffer (&bt)->sw_if_index[VLIB_RX] = rd->sw_if_index; + vnet_buffer (&bt)->sw_if_index[VLIB_TX] = ~0; + bt.buffer_pool_index = rd->pool; + bt.ref_count = 1; + + /* update buffer template for input feature arcs if any */ next_index = rd->per_interface_next_index; if (PREDICT_FALSE (vnet_device_input_have_features (rd->sw_if_index))) vnet_feature_start_device_input_x1 (rd->sw_if_index, &next_index, &bt); vlib_get_new_next_frame (vm, node, next_index, to_next, n_left_to_next); - n_rx_bytes = rdma_device_input_load_wc (n_rx_packets, wc, to_next, bufsz); - vlib_get_buffers (vm, to_next, bufs, n_rx_packets); - rdma_device_input_bufs_init (n_rx_packets, bufs, bufsz, rd->sw_if_index, - &bt); - rdma_device_input_trace (vm, node, rd, n_rx_packets, to_next, next_index); + ASSERT (n_rx_packets <= n_left_to_next); + + /* + * avoid wrap-around logic in core loop + * we requested VLIB_FRAME_SIZE packets and rxq->size >= VLIB_FRAME_SIZE + * => we can process all packets in 2 iterations max + */ + slot = rxq->head & (rxq->size - 1); + n_tail = clib_min (n_rx_packets, rxq->size - slot); + n_rx_bytes = + rdma_device_input_bufs (vm, rd, &to_next[0], &rxq->bufs[slot], wc, n_tail, + &bt); + if (n_tail < n_rx_packets) + n_rx_bytes += + rdma_device_input_bufs (vm, rd, &to_next[n_tail], &rxq->bufs[0], wc, + n_rx_packets - n_tail, &bt); rdma_device_input_ethernet (vm, node, rd, next_index); vlib_put_next_frame (vm, node, next_index, n_left_to_next - n_rx_packets); + rxq->head += n_rx_packets; + + rdma_device_input_trace (vm, node, rd, n_rx_packets, to_next, next_index); + vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, vm->thread_index, rd->hw_if_index, n_rx_packets, n_rx_bytes); - rxq->n_enq -= n_rx_packets; - rdma_device_input_refill (vm, rd, rxq); return n_rx_packets; |