diff options
author | Damjan Marion <damarion@cisco.com> | 2018-11-20 21:07:03 +0100 |
---|---|---|
committer | Dave Barach <openvpp@barachs.net> | 2018-11-20 21:56:21 +0000 |
commit | 7e0b17df0990ebca9cbae71b31cae7fd2c439acf (patch) | |
tree | 84adee48667895e8191af12b9dab723254b118ff /src | |
parent | 3553abaec54c2784bc6fdccc890411d586c3997e (diff) |
vhost-user: cleanup
Change-Id: Ibf68423e9514b8e85cdf0a3e57ababd55dd4fcc4
Signed-off-by: Damjan Marion <damarion@cisco.com>
Diffstat (limited to 'src')
-rw-r--r-- | src/vnet/devices/virtio/vhost_user_input.c | 71 | ||||
-rw-r--r-- | src/vnet/devices/virtio/vhost_user_output.c | 38 |
2 files changed, 44 insertions, 65 deletions
diff --git a/src/vnet/devices/virtio/vhost_user_input.c b/src/vnet/devices/virtio/vhost_user_input.c index 32b924f1362..664fc38a5d3 100644 --- a/src/vnet/devices/virtio/vhost_user_input.c +++ b/src/vnet/devices/virtio/vhost_user_input.c @@ -256,7 +256,7 @@ vhost_user_if_input (vlib_main_t * vm, u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT; u32 n_trace = vlib_get_trace_count (vm, node); u32 map_hint = 0; - u16 thread_index = vm->thread_index; + vhost_cpu_t *cpu = &vum->cpus[vm->thread_index]; u16 copy_len = 0; /* The descriptor table is not ready yet */ @@ -338,34 +338,31 @@ vhost_user_if_input (vlib_main_t * vm, * processing cost really comes from the memory copy. * The assumption is that big packets will fit in 40 buffers. */ - if (PREDICT_FALSE (vum->cpus[thread_index].rx_buffers_len < n_left + 1 || - vum->cpus[thread_index].rx_buffers_len < 40)) + if (PREDICT_FALSE (cpu->rx_buffers_len < n_left + 1 || + cpu->rx_buffers_len < 40)) { - u32 curr_len = vum->cpus[thread_index].rx_buffers_len; - vum->cpus[thread_index].rx_buffers_len += - vlib_buffer_alloc_from_free_list (vm, - vum->cpus[thread_index].rx_buffers + - curr_len, + u32 curr_len = cpu->rx_buffers_len; + cpu->rx_buffers_len += + vlib_buffer_alloc_from_free_list (vm, cpu->rx_buffers + curr_len, VHOST_USER_RX_BUFFERS_N - curr_len, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX); if (PREDICT_FALSE - (vum->cpus[thread_index].rx_buffers_len < - VHOST_USER_RX_BUFFER_STARVATION)) + (cpu->rx_buffers_len < VHOST_USER_RX_BUFFER_STARVATION)) { /* In case of buffer starvation, discard some packets from the queue * and log the event. * We keep doing best effort for the remaining packets. */ - u32 flush = (n_left + 1 > vum->cpus[thread_index].rx_buffers_len) ? - n_left + 1 - vum->cpus[thread_index].rx_buffers_len : 1; + u32 flush = (n_left + 1 > cpu->rx_buffers_len) ? + n_left + 1 - cpu->rx_buffers_len : 1; flush = vhost_user_rx_discard_packet (vm, vui, txvq, flush); n_left -= flush; vlib_increment_simple_counter (vnet_main. interface_main.sw_if_counters + VNET_INTERFACE_COUNTER_DROP, - vlib_get_thread_index (), - vui->sw_if_index, flush); + vm->thread_index, vui->sw_if_index, + flush); vlib_error_count (vm, vhost_user_input_node.index, VHOST_USER_INPUT_FUNC_ERROR_NO_BUFFER, flush); @@ -384,7 +381,7 @@ vhost_user_if_input (vlib_main_t * vm, u32 desc_data_offset; vring_desc_t *desc_table = txvq->desc; - if (PREDICT_FALSE (vum->cpus[thread_index].rx_buffers_len <= 1)) + if (PREDICT_FALSE (cpu->rx_buffers_len <= 1)) { /* Not enough rx_buffers * Note: We yeld on 1 so we don't need to do an additional @@ -396,19 +393,15 @@ vhost_user_if_input (vlib_main_t * vm, desc_current = txvq->avail->ring[txvq->last_avail_idx & txvq->qsz_mask]; - vum->cpus[thread_index].rx_buffers_len--; - bi_current = (vum->cpus[thread_index].rx_buffers) - [vum->cpus[thread_index].rx_buffers_len]; + cpu->rx_buffers_len--; + bi_current = cpu->rx_buffers[cpu->rx_buffers_len]; b_head = b_current = vlib_get_buffer (vm, bi_current); to_next[0] = bi_current; //We do that now so we can forget about bi_current to_next++; n_left_to_next--; - vlib_prefetch_buffer_with_index (vm, - (vum-> - cpus[thread_index].rx_buffers) - [vum->cpus[thread_index]. - rx_buffers_len - 1], LOAD); + vlib_prefetch_buffer_with_index + (vm, cpu->rx_buffers[cpu->rx_buffers_len - 1], LOAD); /* Just preset the used descriptor id and length for later */ txvq->used->ring[txvq->last_used_idx & txvq->qsz_mask].id = @@ -483,8 +476,7 @@ vhost_user_if_input (vlib_main_t * vm, if (PREDICT_FALSE (b_current->current_length == VLIB_BUFFER_DATA_SIZE)) { - if (PREDICT_FALSE - (vum->cpus[thread_index].rx_buffers_len == 0)) + if (PREDICT_FALSE (cpu->rx_buffers_len == 0)) { /* Cancel speculation */ to_next--; @@ -497,19 +489,14 @@ vhost_user_if_input (vlib_main_t * vm, * not an issue as they would still be valid. Useless, * but valid. */ - vhost_user_input_rewind_buffers (vm, - &vum->cpus - [thread_index], - b_head); + vhost_user_input_rewind_buffers (vm, cpu, b_head); n_left = 0; goto stop; } /* Get next output */ - vum->cpus[thread_index].rx_buffers_len--; - u32 bi_next = - (vum->cpus[thread_index].rx_buffers)[vum->cpus - [thread_index].rx_buffers_len]; + cpu->rx_buffers_len--; + u32 bi_next = cpu->rx_buffers[cpu->rx_buffers_len]; b_current->next_buffer = bi_next; b_current->flags |= VLIB_BUFFER_NEXT_PRESENT; bi_current = bi_next; @@ -517,7 +504,7 @@ vhost_user_if_input (vlib_main_t * vm, } /* Prepare a copy order executed later for the data */ - vhost_copy_t *cpy = &vum->cpus[thread_index].copy[copy_len]; + vhost_copy_t *cpy = &cpu->copy[copy_len]; copy_len++; u32 desc_data_l = desc_table[desc_current].len - desc_data_offset; @@ -574,9 +561,8 @@ vhost_user_if_input (vlib_main_t * vm, */ if (PREDICT_FALSE (copy_len >= VHOST_USER_RX_COPY_THRESHOLD)) { - if (PREDICT_FALSE - (vhost_user_input_copy (vui, vum->cpus[thread_index].copy, - copy_len, &map_hint))) + if (PREDICT_FALSE (vhost_user_input_copy (vui, cpu->copy, + copy_len, &map_hint))) { vlib_error_count (vm, node->node_index, VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1); @@ -594,9 +580,8 @@ vhost_user_if_input (vlib_main_t * vm, } /* Do the memory copies */ - if (PREDICT_FALSE - (vhost_user_input_copy (vui, vum->cpus[thread_index].copy, - copy_len, &map_hint))) + if (PREDICT_FALSE (vhost_user_input_copy (vui, cpu->copy, copy_len, + &map_hint))) { vlib_error_count (vm, node->node_index, VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1); @@ -620,10 +605,10 @@ vhost_user_if_input (vlib_main_t * vm, /* increase rx counters */ vlib_increment_combined_counter (vnet_main.interface_main.combined_sw_if_counters - + VNET_INTERFACE_COUNTER_RX, - vlib_get_thread_index (), vui->sw_if_index, n_rx_packets, n_rx_bytes); + + VNET_INTERFACE_COUNTER_RX, vm->thread_index, vui->sw_if_index, + n_rx_packets, n_rx_bytes); - vnet_device_increment_rx_packets (thread_index, n_rx_packets); + vnet_device_increment_rx_packets (vm->thread_index, n_rx_packets); return n_rx_packets; } diff --git a/src/vnet/devices/virtio/vhost_user_output.c b/src/vnet/devices/virtio/vhost_user_output.c index 58db1a9647b..4fbd63ea708 100644 --- a/src/vnet/devices/virtio/vhost_user_output.c +++ b/src/vnet/devices/virtio/vhost_user_output.c @@ -240,6 +240,7 @@ VNET_DEVICE_CLASS_TX_FN (vhost_user_device_class) (vlib_main_t * vm, vhost_user_vring_t *rxvq; u8 error; u32 thread_index = vm->thread_index; + vhost_cpu_t *cpu = &vum->cpus[thread_index]; u32 map_hint = 0; u8 retry = 8; u16 copy_len; @@ -257,9 +258,8 @@ VNET_DEVICE_CLASS_TX_FN (vhost_user_device_class) (vlib_main_t * vm, goto done3; } - qid = - VHOST_VRING_IDX_RX (*vec_elt_at_index - (vui->per_cpu_tx_qid, thread_index)); + qid = VHOST_VRING_IDX_RX (*vec_elt_at_index (vui->per_cpu_tx_qid, + thread_index)); rxvq = &vui->vrings[qid]; if (PREDICT_FALSE (rxvq->avail == 0)) { @@ -290,11 +290,9 @@ retry: if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { - vum->cpus[thread_index].current_trace = - vlib_add_trace (vm, node, b0, - sizeof (*vum->cpus[thread_index].current_trace)); - vhost_user_tx_trace (vum->cpus[thread_index].current_trace, - vui, qid / 2, b0, rxvq); + cpu->current_trace = vlib_add_trace (vm, node, b0, + sizeof (*cpu->current_trace)); + vhost_user_tx_trace (cpu->current_trace, vui, qid / 2, b0, rxvq); } if (PREDICT_FALSE (rxvq->last_avail_idx == rxvq->avail->idx)) @@ -334,15 +332,14 @@ retry: { // Get a header from the header array - virtio_net_hdr_mrg_rxbuf_t *hdr = - &vum->cpus[thread_index].tx_headers[tx_headers_len]; + virtio_net_hdr_mrg_rxbuf_t *hdr = &cpu->tx_headers[tx_headers_len]; tx_headers_len++; hdr->hdr.flags = 0; hdr->hdr.gso_type = 0; hdr->num_buffers = 1; //This is local, no need to check // Prepare a copy order executed later for the header - vhost_copy_t *cpy = &vum->cpus[thread_index].copy[copy_len]; + vhost_copy_t *cpy = &cpu->copy[copy_len]; copy_len++; cpy->len = vui->virtio_net_hdr_sz; cpy->dst = buffer_map_addr; @@ -367,7 +364,7 @@ retry: else if (vui->virtio_net_hdr_sz == 12) //MRG is available { virtio_net_hdr_mrg_rxbuf_t *hdr = - &vum->cpus[thread_index].tx_headers[tx_headers_len - 1]; + &cpu->tx_headers[tx_headers_len - 1]; //Move from available to used buffer rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].id = @@ -429,7 +426,7 @@ retry: } { - vhost_copy_t *cpy = &vum->cpus[thread_index].copy[copy_len]; + vhost_copy_t *cpy = &cpu->copy[copy_len]; copy_len++; cpy->len = bytes_left; cpy->len = (cpy->len > buffer_len) ? buffer_len : cpy->len; @@ -472,21 +469,19 @@ retry: if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { - vum->cpus[thread_index].current_trace->hdr = - vum->cpus[thread_index].tx_headers[tx_headers_len - 1]; + cpu->current_trace->hdr = cpu->tx_headers[tx_headers_len - 1]; } n_left--; //At the end for error counting when 'goto done' is invoked /* * Do the copy periodically to prevent - * vum->cpus[thread_index].copy array overflow and corrupt memory + * cpu->copy array overflow and corrupt memory */ if (PREDICT_FALSE (copy_len >= VHOST_USER_TX_COPY_THRESHOLD)) { - if (PREDICT_FALSE - (vhost_user_tx_copy (vui, vum->cpus[thread_index].copy, - copy_len, &map_hint))) + if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len, + &map_hint))) { vlib_error_count (vm, node->node_index, VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1); @@ -503,9 +498,8 @@ retry: done: //Do the memory copies - if (PREDICT_FALSE - (vhost_user_tx_copy (vui, vum->cpus[thread_index].copy, - copy_len, &map_hint))) + if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len, + &map_hint))) { vlib_error_count (vm, node->node_index, VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1); |