summaryrefslogtreecommitdiffstats
path: root/src/vppinfra/test_spinlock.c
blob: 4ea0fca28eae2c3c69d6ab6fbacba55c89429f7d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
/*
 * Copyright (c) 2019 Arm Limited.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif

#include <vppinfra/mem.h>
#include <vppinfra/cache.h>
#include <vppinfra/lock.h>
#include <pthread.h>
#include <vppinfra/format.h>
#include <vppinfra/error.h>
#include <vppinfra/time.h>
#include <sched.h>
#include <stdlib.h>
#include <vppinfra/atomics.h>

static u32 all_threads_online = 0;

typedef struct
{
  uword threads_per_core;
  uword cpu_mask;
  uword num_cores;
  uword increment_per_thread;
  clib_spinlock_t slock;
  uword shared_count;
  uword iterations;
} spinlock_test_main_t;

void *
inc_shared_counter (void *arg)
{
  f64 *time = vec_new (f64, 1);
  *time = 0;
  spinlock_test_main_t *stm = arg;

  /* Wait for all threads to be created */
  while (!clib_atomic_load_acq_n (&all_threads_online));

  f64 start = clib_cpu_time_now ();
  for (uword i = 0; i < stm->increment_per_thread; i++)
    {
      clib_spinlock_lock (&stm->slock);
      stm->shared_count++;
      clib_spinlock_unlock (&stm->slock);
    }
  *time = clib_cpu_time_now () - start;
  return time;
}

unsigned
test_spinlock (spinlock_test_main_t * stm, f64 * elapse_time)
{
  int error;
  uword num_threads = stm->num_cores * stm->threads_per_core;
  pthread_t pthread[num_threads];

  cpu_set_t cpuset;
  unsigned cores_set = 0, cpu_id = 0;
  for (unsigned cpu_mask = stm->cpu_mask; cpu_mask; cpu_mask >>= 1)
    {
      if (!(cpu_mask & 1))
	{
	  cpu_id++;
	  continue;
	}

      CPU_ZERO (&cpuset);
      CPU_SET (cpu_id, &cpuset);
      for (uword t_num = 0; t_num < stm->threads_per_core; t_num++)
	{
	  uword t_index = cores_set * stm->threads_per_core + t_num;
	  if ((error = pthread_create (&pthread[t_index], NULL,
				       &inc_shared_counter, stm)))
	    clib_unix_warning ("pthread_create failed with %d", error);

	  if ((error = pthread_setaffinity_np (pthread[t_index],
					       sizeof (cpu_set_t), &cpuset)))
	    clib_unix_warning ("pthread_setaffinity_np failed with %d",
			       error);
	}
      cores_set++;
      cpu_id++;
    }

  /* Launch all threads */
  clib_atomic_store_rel_n (&all_threads_online, 1);

  for (uword thread_num = 0; thread_num < num_threads; thread_num++)
    {
      f64 *time;
      if ((error = pthread_join (pthread[thread_num], (void *) &time)))
	clib_unix_warning ("pthread_join failed with %d", error);
      *elapse_time += *time;
      vec_free (time);
    }

  fformat (stdout, "Time elapsed: %.4e cycles\n", *elapse_time);
  return stm->shared_count;
}

uword
num_cores_in_cpu_mask (uword mask)
{
  uword num_cores = 0;
  for (uword cpu_mask = mask; cpu_mask; cpu_mask >>= 1)
    num_cores += (cpu_mask & 1);
  return num_cores;
}

int
test_spinlock_main (unformat_input_t * i)
{
  spinlock_test_main_t _stm, *stm = &_stm;
  clib_memset (stm, 0, sizeof (spinlock_test_main_t));

  while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT)
    {
      if (0 == unformat (i, "threads/core %d", &stm->threads_per_core)
	  && 0 == unformat (i, "cpu_mask %x", &stm->cpu_mask)
	  && 0 == unformat (i, "increment %d", &stm->increment_per_thread)
	  && 0 == unformat (i, "iterations %d", &stm->iterations))
	{
	  clib_unix_warning ("unknown input '%U'", format_unformat_error, i);
	  return 1;
	}
    }

  stm->num_cores = num_cores_in_cpu_mask (stm->cpu_mask);

  uword total_increment = stm->threads_per_core * stm->num_cores *
    stm->increment_per_thread;

  clib_spinlock_init (&stm->slock);

  f64 average_time = 0;
  for (uword trial = 0; trial < stm->iterations; trial++)
    {
      stm->shared_count = 0;
      f64 elapse_time = 0;
      if (test_spinlock (stm, &elapse_time) != total_increment)
	{
	  clib_spinlock_free (&stm->slock);
	  fformat (stdout, "FAILED: expected count: %d, actual count: %d\n",
		   total_increment, stm->shared_count);
	  return 1;
	}

      fformat (stdout, "Trial %d SUCCESS: %d = %d\n",
	       trial, stm->shared_count, total_increment);
      average_time = (average_time * trial + elapse_time) / (trial + 1);
      fformat (stdout, "Average lock/unlock cycles %.4e\n", average_time);
    }
  clib_spinlock_free (&stm->slock);
  return 0;
}

#ifdef CLIB_UNIX
/** Launches a number of threads to simultaneously increment a global
    counter, and records timestamps for spinlock performance benchmarking

    @param "threads/core [# threads/core]" - number of threads per core
    @param "cpu_mask [cpu_mask]" - cpu hex string e.g. input ff sets cpus 0 - 7
    @param "increment [# increments]" - number of increments per threads
    @param "iterations [# iterations]" - number of iterations
    @returns exit code
*/
int
main (int argc, char *argv[])
{
  unformat_input_t i;
  i32 ret;
  clib_time_t time;

  clib_mem_init (0, 3ULL << 30);
  clib_time_init (&time);

  unformat_init_command_line (&i, argv);
  ret = test_spinlock_main (&i);
  unformat_free (&i);

  return ret;
}
#endif /* CLIB_UNIX */

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */
">hi->dev_instance, ~0); vum->show_dev_instance_by_real_dev_instance[hi->dev_instance] = new_dev_instance; vu_log_debug (vui, "renumbered vhost-user interface dev_instance %d to %d", hi->dev_instance, new_dev_instance); return 0; } /** * @brief Try once to lock the vring * @return 0 on success, non-zero on failure. */ static_always_inline int vhost_user_vring_try_lock (vhost_user_intf_t * vui, u32 qid) { return clib_atomic_test_and_set (vui->vring_locks[qid]); } /** * @brief Spin until the vring is successfully locked */ static_always_inline void vhost_user_vring_lock (vhost_user_intf_t * vui, u32 qid) { while (vhost_user_vring_try_lock (vui, qid)) ; } /** * @brief Unlock the vring lock */ static_always_inline void vhost_user_vring_unlock (vhost_user_intf_t * vui, u32 qid) { clib_atomic_release (vui->vring_locks[qid]); } static_always_inline void vhost_user_tx_trace (vhost_trace_t * t, vhost_user_intf_t * vui, u16 qid, vlib_buffer_t * b, vhost_user_vring_t * rxvq) { vhost_user_main_t *vum = &vhost_user_main; u32 last_avail_idx = rxvq->last_avail_idx; u32 desc_current = rxvq->avail->ring[last_avail_idx & rxvq->qsz_mask]; vring_desc_t *hdr_desc = 0; u32 hint = 0; clib_memset (t, 0, sizeof (*t)); t->device_index = vui - vum->vhost_user_interfaces; t->qid = qid; hdr_desc = &rxvq->desc[desc_current]; if (rxvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT) { t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT; /* Header is the first here */ hdr_desc = map_guest_mem (vui, rxvq->desc[desc_current].addr, &hint); } if (rxvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT) { t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED; } if (!(rxvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT) && !(rxvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)) { t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC; } t->first_desc_len = hdr_desc ? hdr_desc->len : 0; } static_always_inline u32 vhost_user_tx_copy (vhost_user_intf_t * vui, vhost_copy_t * cpy, u16 copy_len, u32 * map_hint) { void *dst0, *dst1, *dst2, *dst3; if (PREDICT_TRUE (copy_len >= 4)) { if (PREDICT_FALSE (!(dst2 = map_guest_mem (vui, cpy[0].dst, map_hint)))) return 1; if (PREDICT_FALSE (!(dst3 = map_guest_mem (vui, cpy[1].dst, map_hint)))) return 1; while (PREDICT_TRUE (copy_len >= 4)) { dst0 = dst2; dst1 = dst3; if (PREDICT_FALSE (!(dst2 = map_guest_mem (vui, cpy[2].dst, map_hint)))) return 1; if (PREDICT_FALSE (!(dst3 = map_guest_mem (vui, cpy[3].dst, map_hint)))) return 1; CLIB_PREFETCH ((void *) cpy[2].src, 64, LOAD); CLIB_PREFETCH ((void *) cpy[3].src, 64, LOAD); clib_memcpy_fast (dst0, (void *) cpy[0].src, cpy[0].len); clib_memcpy_fast (dst1, (void *) cpy[1].src, cpy[1].len); vhost_user_log_dirty_pages_2 (vui, cpy[0].dst, cpy[0].len, 1); vhost_user_log_dirty_pages_2 (vui, cpy[1].dst, cpy[1].len, 1); copy_len -= 2; cpy += 2; } } while (copy_len) { if (PREDICT_FALSE (!(dst0 = map_guest_mem (vui, cpy->dst, map_hint)))) return 1; clib_memcpy_fast (dst0, (void *) cpy->src, cpy->len); vhost_user_log_dirty_pages_2 (vui, cpy->dst, cpy->len, 1); copy_len -= 1; cpy += 1; } return 0; } VNET_DEVICE_CLASS_TX_FN (vhost_user_device_class) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { u32 *buffers = vlib_frame_vector_args (frame); u32 n_left = frame->n_vectors; vhost_user_main_t *vum = &vhost_user_main; vnet_interface_output_runtime_t *rd = (void *) node->runtime_data; vhost_user_intf_t *vui = pool_elt_at_index (vum->vhost_user_interfaces, rd->dev_instance); u32 qid = ~0; vhost_user_vring_t *rxvq; u8 error; u32 thread_index = vm->thread_index; vhost_cpu_t *cpu = &vum->cpus[thread_index]; u32 map_hint = 0; u8 retry = 8; u16 copy_len; u16 tx_headers_len; if (PREDICT_FALSE (!vui->admin_up)) { error = VHOST_USER_TX_FUNC_ERROR_DOWN; goto done3; } if (PREDICT_FALSE (!vui->is_ready)) { error = VHOST_USER_TX_FUNC_ERROR_NOT_READY; goto done3; } qid = VHOST_VRING_IDX_RX (*vec_elt_at_index (vui->per_cpu_tx_qid, thread_index)); rxvq = &vui->vrings[qid]; if (PREDICT_FALSE (rxvq->avail == 0)) { error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL; goto done3; } if (PREDICT_FALSE (vui->use_tx_spinlock)) vhost_user_vring_lock (vui, qid); retry: error = VHOST_USER_TX_FUNC_ERROR_NONE; tx_headers_len = 0; copy_len = 0; while (n_left > 0) { vlib_buffer_t *b0, *current_b0; u16 desc_head, desc_index, desc_len; vring_desc_t *desc_table; uword buffer_map_addr; u32 buffer_len; u16 bytes_left; if (PREDICT_TRUE (n_left > 1)) vlib_prefetch_buffer_with_index (vm, buffers[1], LOAD); b0 = vlib_get_buffer (vm, buffers[0]); if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { cpu->current_trace = vlib_add_trace (vm, node, b0, sizeof (*cpu->current_trace)); vhost_user_tx_trace (cpu->current_trace, vui, qid / 2, b0, rxvq); } if (PREDICT_FALSE (rxvq->last_avail_idx == rxvq->avail->idx)) { error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF; goto done; } desc_table = rxvq->desc; desc_head = desc_index = rxvq->avail->ring[rxvq->last_avail_idx & rxvq->qsz_mask]; /* Go deeper in case of indirect descriptor * I don't know of any driver providing indirect for RX. */ if (PREDICT_FALSE (rxvq->desc[desc_head].flags & VIRTQ_DESC_F_INDIRECT)) { if (PREDICT_FALSE (rxvq->desc[desc_head].len < sizeof (vring_desc_t))) { error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW; goto done; } if (PREDICT_FALSE (!(desc_table = map_guest_mem (vui, rxvq->desc[desc_index].addr, &map_hint)))) { error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL; goto done; } desc_index = 0; } desc_len = vui->virtio_net_hdr_sz; buffer_map_addr = desc_table[desc_index].addr; buffer_len = desc_table[desc_index].len; { // Get a header from the header array virtio_net_hdr_mrg_rxbuf_t *hdr = &cpu->tx_headers[tx_headers_len]; tx_headers_len++; hdr->hdr.flags = 0; hdr->hdr.gso_type = 0; hdr->num_buffers = 1; //This is local, no need to check // Prepare a copy order executed later for the header vhost_copy_t *cpy = &cpu->copy[copy_len]; copy_len++; cpy->len = vui->virtio_net_hdr_sz; cpy->dst = buffer_map_addr; cpy->src = (uword) hdr; } buffer_map_addr += vui->virtio_net_hdr_sz; buffer_len -= vui->virtio_net_hdr_sz; bytes_left = b0->current_length; current_b0 = b0; while (1) { if (buffer_len == 0) { //Get new output if (desc_table[desc_index].flags & VIRTQ_DESC_F_NEXT) { //Next one is chained desc_index = desc_table[desc_index].next; buffer_map_addr = desc_table[desc_index].addr; buffer_len = desc_table[desc_index].len; } else if (vui->virtio_net_hdr_sz == 12) //MRG is available { virtio_net_hdr_mrg_rxbuf_t *hdr = &cpu->tx_headers[tx_headers_len - 1]; //Move from available to used buffer rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].id = desc_head; rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].len = desc_len; vhost_user_log_dirty_ring (vui, rxvq, ring[rxvq->last_used_idx & rxvq->qsz_mask]); rxvq->last_avail_idx++; rxvq->last_used_idx++; hdr->num_buffers++; desc_len = 0; if (PREDICT_FALSE (rxvq->last_avail_idx == rxvq->avail->idx)) { //Dequeue queued descriptors for this packet rxvq->last_used_idx -= hdr->num_buffers - 1; rxvq->last_avail_idx -= hdr->num_buffers - 1; error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF; goto done; } desc_table = rxvq->desc; desc_head = desc_index = rxvq->avail->ring[rxvq->last_avail_idx & rxvq->qsz_mask]; if (PREDICT_FALSE (rxvq->desc[desc_head].flags & VIRTQ_DESC_F_INDIRECT)) { //It is seriously unlikely that a driver will put indirect descriptor //after non-indirect descriptor. if (PREDICT_FALSE (rxvq->desc[desc_head].len < sizeof (vring_desc_t))) { error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW; goto done; } if (PREDICT_FALSE (!(desc_table = map_guest_mem (vui, rxvq->desc[desc_index].addr, &map_hint)))) { error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL; goto done; } desc_index = 0; } buffer_map_addr = desc_table[desc_index].addr; buffer_len = desc_table[desc_index].len; } else { error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOMRG; goto done; } } { vhost_copy_t *cpy = &cpu->copy[copy_len]; copy_len++; cpy->len = bytes_left; cpy->len = (cpy->len > buffer_len) ? buffer_len : cpy->len; cpy->dst = buffer_map_addr; cpy->src = (uword) vlib_buffer_get_current (current_b0) + current_b0->current_length - bytes_left; bytes_left -= cpy->len; buffer_len -= cpy->len; buffer_map_addr += cpy->len; desc_len += cpy->len; CLIB_PREFETCH (&rxvq->desc, CLIB_CACHE_LINE_BYTES, LOAD); } // Check if vlib buffer has more data. If not, get more or break. if (PREDICT_TRUE (!bytes_left)) { if (PREDICT_FALSE (current_b0->flags & VLIB_BUFFER_NEXT_PRESENT)) { current_b0 = vlib_get_buffer (vm, current_b0->next_buffer); bytes_left = current_b0->current_length; } else { //End of packet break; } } } //Move from available to used ring rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].id = desc_head; rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].len = desc_len; vhost_user_log_dirty_ring (vui, rxvq, ring[rxvq->last_used_idx & rxvq->qsz_mask]); rxvq->last_avail_idx++; rxvq->last_used_idx++; if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { cpu->current_trace->hdr = cpu->tx_headers[tx_headers_len - 1]; } n_left--; //At the end for error counting when 'goto done' is invoked /* * Do the copy periodically to prevent * cpu->copy array overflow and corrupt memory */ if (PREDICT_FALSE (copy_len >= VHOST_USER_TX_COPY_THRESHOLD)) { if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len, &map_hint))) { vlib_error_count (vm, node->node_index, VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1); } copy_len = 0; /* give buffers back to driver */ CLIB_MEMORY_BARRIER (); rxvq->used->idx = rxvq->last_used_idx; vhost_user_log_dirty_ring (vui, rxvq, idx); } buffers++; } done: //Do the memory copies if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len, &map_hint))) { vlib_error_count (vm, node->node_index, VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1); } CLIB_MEMORY_BARRIER (); rxvq->used->idx = rxvq->last_used_idx; vhost_user_log_dirty_ring (vui, rxvq, idx); /* * When n_left is set, error is always set to something too. * In case error is due to lack of remaining buffers, we go back up and * retry. * The idea is that it is better to waste some time on packets * that have been processed already than dropping them and get * more fresh packets with a good likelyhood that they will be dropped too. * This technique also gives more time to VM driver to pick-up packets. * In case the traffic flows from physical to virtual interfaces, this * technique will end-up leveraging the physical NIC buffer in order to * absorb the VM's CPU jitter. */ if (n_left && (error == VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF) && retry) { retry--; goto retry; } /* interrupt (call) handling */ if ((rxvq->callfd_idx != ~0) && !(rxvq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) { rxvq->n_since_last_int += frame->n_vectors - n_left; if (rxvq->n_since_last_int > vum->coalesce_frames) vhost_user_send_call (vm, rxvq); } vhost_user_vring_unlock (vui, qid); done3: if (PREDICT_FALSE (n_left && error != VHOST_USER_TX_FUNC_ERROR_NONE)) { vlib_error_count (vm, node->node_index, error, n_left); vlib_increment_simple_counter (vnet_main.interface_main.sw_if_counters + VNET_INTERFACE_COUNTER_DROP, thread_index, vui->sw_if_index, n_left); } vlib_buffer_free (vm, vlib_frame_vector_args (frame), frame->n_vectors); return frame->n_vectors; } static __clib_unused clib_error_t * vhost_user_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index, u32 qid, vnet_hw_interface_rx_mode mode) { vlib_main_t *vm = vnm->vlib_main; vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index); vhost_user_main_t *vum = &vhost_user_main; vhost_user_intf_t *vui = pool_elt_at_index (vum->vhost_user_interfaces, hif->dev_instance); vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)]; if ((mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT) || (mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE)) { if (txvq->kickfd_idx == ~0) { // We cannot support interrupt mode if the driver opts out return clib_error_return (0, "Driver does not support interrupt"); } if (txvq->mode == VNET_HW_INTERFACE_RX_MODE_POLLING) { vum->ifq_count++; // Start the timer if this is the first encounter on interrupt // interface/queue if ((vum->ifq_count == 1) && (vum->coalesce_time > 0.0) && (vum->coalesce_frames > 0)) vlib_process_signal_event (vm, vhost_user_send_interrupt_node.index, VHOST_USER_EVENT_START_TIMER, 0); } } else if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING) { if (((txvq->mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT) || (txvq->mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE)) && vum->ifq_count) { vum->ifq_count--; // Stop the timer if there is no more interrupt interface/queue if ((vum->ifq_count == 0) && (vum->coalesce_time > 0.0) && (vum->coalesce_frames > 0)) vlib_process_signal_event (vm, vhost_user_send_interrupt_node.index, VHOST_USER_EVENT_STOP_TIMER, 0); } } txvq->mode = mode; if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING) txvq->used->flags = VRING_USED_F_NO_NOTIFY; else if ((mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE) || (mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT)) txvq->used->flags = 0; else { vu_log_err (vui, "unhandled mode %d changed for if %d queue %d", mode, hw_if_index, qid); return clib_error_return (0, "unsupported"); } return 0; } static __clib_unused clib_error_t * vhost_user_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags) { vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index); vhost_user_main_t *vum = &vhost_user_main; vhost_user_intf_t *vui = pool_elt_at_index (vum->vhost_user_interfaces, hif->dev_instance); u8 link_old, link_new; link_old = vui_is_link_up (vui); vui->admin_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0; link_new = vui_is_link_up (vui); if (link_old != link_new) vnet_hw_interface_set_flags (vnm, vui->hw_if_index, link_new ? VNET_HW_INTERFACE_FLAG_LINK_UP : 0); return /* no error */ 0; } /* *INDENT-OFF* */ VNET_DEVICE_CLASS (vhost_user_device_class) = { .name = "vhost-user", .tx_function_n_errors = VHOST_USER_TX_FUNC_N_ERROR, .tx_function_error_strings = vhost_user_tx_func_error_strings, .format_device_name = format_vhost_user_interface_name, .name_renumber = vhost_user_name_renumber, .admin_up_down_function = vhost_user_interface_admin_up_down, .rx_mode_change_function = vhost_user_interface_rx_mode_change, .format_tx_trace = format_vhost_trace, }; /* *INDENT-ON* */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */