summaryrefslogtreecommitdiffstats
path: root/src/plugins/gtpu/gtpu.api
blob: f8333ff2ebc691353c027ef7b187cf9b671dab9e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
/*
 * Copyright (c) 2017 Intel and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

option version = "1.0.0";

/** \brief Set or delete an GTPU tunnel
    @param client_index - opaque cookie to identify the sender
    @param context - sender context, to match reply w/ request
    @param is_add - add address if non-zero, else delete
    @param is_ipv6 - src_address and dst_address is ipv6 or not
    @param src_address - GTPU tunnel's source address.
    @param dst_address - GTPU tunnel's destination address.
    @param mcast_sw_if_index - version, O-bit and C-bit (see nsh_packet.h)
    @param encap_vrf_id - fib identifier used for outgoing encapsulated packets
    @param decap_next_index - the index of the next node if success
    @param teid - Local Tunnel Endpoint Identifier
*/
define gtpu_add_del_tunnel
{
  u32 client_index;
  u32 context;
  u8 is_add;
  u8 is_ipv6;
  u8 src_address[16];
  u8 dst_address[16];
  u32 mcast_sw_if_index;
  u32 encap_vrf_id;
  u32 decap_next_index;
  u32 teid;
};

/** \brief reply for set or delete an GTPU tunnel
    @param context - sender context, to match reply w/ request
    @param retval - return code
    @param sw_if_index - software index of the interface
*/
define gtpu_add_del_tunnel_reply
{
  u32 context;
  i32 retval;
  u32 sw_if_index;
};

/** \brief Dump GTPU tunnel
    @param client_index - opaque cookie to identify the sender
    @param context - sender context, to match reply w/ request
    @param sw_if_index - software index of the interface
*/
define gtpu_tunnel_dump
{
  u32 client_index;
  u32 context;
  u32 sw_if_index;
};

/** \brief dump details of an GTPU tunnel
    @param context - sender context, to match reply w/ request
    @param sw_if_index - software index of the interface
    @param is_ipv6 - src_address and dst_address is ipv6 or not
    @param src_address - GTPU tunnel's source address.
    @param dst_address - GTPU tunnel's destination address.
    @param mcast_sw_if_index - version, O-bit and C-bit (see nsh_packet.h)
    @param encap_vrf_id - fib identifier used for outgoing encapsulated packets
    @param decap_next_index - the index of the next node if success
    @param teid - Local Tunnel Endpoint Identifier
*/
define gtpu_tunnel_details
{
  u32 context;
  u32 sw_if_index;
  u8 is_ipv6;
  u8 src_address[16];
  u8 dst_address[16];
  u32 mcast_sw_if_index;
  u32 encap_vrf_id;
  u32 decap_next_index;
  u32 teid;
};

/** \brief Interface set gtpu-bypass request
    @param client_index - opaque cookie to identify the sender
    @param context - sender context, to match reply w/ request
    @param sw_if_index - interface used to reach neighbor
    @param is_ipv6 - if non-zero, enable ipv6-gtpu-bypass, else ipv4-gtpu-bypass
    @param enable - if non-zero enable, else disable
*/
define sw_interface_set_gtpu_bypass
{
  u32 client_index;
  u32 context;
  u32 sw_if_index;
  u8 is_ipv6;
  u8 enable;
};

/** \brief Interface set gtpu-bypass response
    @param context - sender context, to match reply w/ request
    @param retval - return code for the request
*/
define sw_interface_set_gtpu_bypass_reply
{
  u32 context;
  i32 retval;
};

/*
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */
ic.Heading */ .highlight .gi { color: #000000; background-color: #ddffdd } /* Generic.Inserted */ .highlight .go { color: #888888 } /* Generic.Output */ .highlight .gp { color: #555555 } /* Generic.Prompt */ .highlight .gs { font-weight: bold } /* Generic.Strong */ .highlight .gu { color: #666666 } /* Generic.Subheading */ .highlight .gt { color: #aa0000 } /* Generic.Traceback */ .highlight .kc { color: #008800; font-weight: bold } /* Keyword.Constant */ .highlight .kd { color: #008800; font-weight: bold } /* Keyword.Declaration */ .highlight .kn { color: #008800; font-weight: bold } /* Keyword.Namespace */ .highlight .kp { color: #008800 } /* Keyword.Pseudo */ .highlight .kr { color: #008800; font-weight: bold } /* Keyword.Reserved */ .highlight .kt { color: #888888; font-weight: bold } /* Keyword.Type */ .highlight .m { color: #0000DD; font-weight: bold } /* Literal.Number */ .highlight .s { color: #dd2200; background-color: #fff0f0 } /* Literal.String */ .highlight .na { color: #336699 } /* Name.Attribute */ .highlight .nb { color: #003388 } /* Name.Builtin */ .highlight .nc { color: #bb0066; font-weight: bold } /* Name.Class */ .highlight .no { color: #003366; font-weight: bold } /* Name.Constant */ .highlight .nd { color: #555555 } /* Name.Decorator */ .highlight .ne { color: #bb0066; font-weight: bold } /* Name.Exception */ .highlight .nf { color: #0066bb; font-weight: bold } /* Name.Function */ .highlight .nl { color: #336699; font-style: italic } /* Name.Label */ .highlight .nn { color: #bb0066; font-weight: bold } /* Name.Namespace */ .highlight .py { color: #336699; font-weight: bold } /* Name.Property */ .highlight .nt { color: #bb0066; font-weight: bold } /* Name.Tag */ .highlight .nv { color: #336699 } /* Name.Variable */ .highlight .ow { color: #008800 } /* Operator.Word */ .highlight .w { color: #bbbbbb } /* Text.Whitespace */ .highlight .mb { color: #0000DD; font-weight: bold } /* Literal.Number.Bin */ .highlight .mf { color: #0000DD; font-weight: bold } /* Literal.Number.Float */ .highlight .mh { color: #0000DD; font-weight: bold } /* Literal.Number.Hex */ .highlight .mi { color: #0000DD; font-weight: bold } /* Literal.Number.Integer */ .highlight .mo { color: #0000DD; font-weight: bold } /* Literal.Number.Oct */ .highlight .sa { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Affix */ .highlight .sb { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Backtick */ .highlight .sc { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Char */ .highlight .dl { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Delimiter */ .highlight .sd { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Doc */ .highlight .s2 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Double */ .highlight .se { color: #0044dd; background-color: #fff0f0 } /* Literal.String.Escape */ .highlight .sh { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Heredoc */ .highlight .si { color: #3333bb; background-color: #fff0f0 } /* Literal.String.Interpol */ .highlight .sx { color: #22bb22; background-color: #f0fff0 } /* Literal.String.Other */ .highlight .sr { color: #008800; background-color: #fff0ff } /* Literal.String.Regex */ .highlight .s1 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Single */ .highlight .ss { color: #aa6600; background-color: #fff0f0 } /* Literal.String.Symbol */ .highlight .bp { color: #003388 } /* Name.Builtin.Pseudo */ .highlight .fm { color: #0066bb; font-weight: bold } /* Name.Function.Magic */ .highlight .vc { color: #336699 } /* Name.Variable.Class */ .highlight .vg { color: #dd7700 } /* Name.Variable.Global */ .highlight .vi { color: #3333bb } /* Name.Variable.Instance */ .highlight .vm { color: #336699 } /* Name.Variable.Magic */ .highlight .il { color: #0000DD; font-weight: bold } /* Literal.Number.Integer.Long */ }
/*
 * Copyright (c) 2015 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <vnet/vnet.h>
#include <vnet/devices/devices.h>
#include <vnet/feature/feature.h>
#include <vnet/ip/ip.h>
#include <vnet/ethernet/ethernet.h>

vnet_device_main_t vnet_device_main;

static uword
device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
		 vlib_frame_t * frame)
{
  return 0;
}

/* *INDENT-OFF* */
VLIB_REGISTER_NODE (device_input_node) = {
  .function = device_input_fn,
  .name = "device-input",
  .runtime_data_bytes = sizeof (vnet_device_input_runtime_t),
  .type = VLIB_NODE_TYPE_INPUT,
  .state = VLIB_NODE_STATE_DISABLED,
  .n_next_nodes = VNET_DEVICE_INPUT_N_NEXT_NODES,
  .next_nodes = VNET_DEVICE_INPUT_NEXT_NODES,
};

/* Table defines how much we need to advance current data pointer
   in the buffer if we shortcut to l3 nodes */

const u32 __attribute__((aligned (CLIB_CACHE_LINE_BYTES)))
device_input_next_node_advance[((VNET_DEVICE_INPUT_N_NEXT_NODES /
				CLIB_CACHE_LINE_BYTES) +1) * CLIB_CACHE_LINE_BYTES] =
{
      [VNET_DEVICE_INPUT_NEXT_IP4_INPUT] = sizeof (ethernet_header_t),
      [VNET_DEVICE_INPUT_NEXT_IP4_NCS_INPUT] = sizeof (ethernet_header_t),
      [VNET_DEVICE_INPUT_NEXT_IP6_INPUT] = sizeof (ethernet_header_t),
      [VNET_DEVICE_INPUT_NEXT_MPLS_INPUT] = sizeof (ethernet_header_t),
};

const u32 __attribute__((aligned (CLIB_CACHE_LINE_BYTES)))
device_input_next_node_flags[((VNET_DEVICE_INPUT_N_NEXT_NODES /
				CLIB_CACHE_LINE_BYTES) +1) * CLIB_CACHE_LINE_BYTES] =
{
      [VNET_DEVICE_INPUT_NEXT_IP4_INPUT] = VNET_BUFFER_F_L3_HDR_OFFSET_VALID,
      [VNET_DEVICE_INPUT_NEXT_IP4_NCS_INPUT] = VNET_BUFFER_F_L3_HDR_OFFSET_VALID,
      [VNET_DEVICE_INPUT_NEXT_IP6_INPUT] = VNET_BUFFER_F_L3_HDR_OFFSET_VALID,
      [VNET_DEVICE_INPUT_NEXT_MPLS_INPUT] = VNET_BUFFER_F_L3_HDR_OFFSET_VALID,
};

VNET_FEATURE_ARC_INIT (device_input, static) =
{
  .arc_name  = "device-input",
  .start_nodes = VNET_FEATURES ("device-input"),
  .last_in_arc = "ethernet-input",
  .arc_index_ptr = &feature_main.device_input_feature_arc_index,
};

VNET_FEATURE_INIT (l2_patch, static) = {
  .arc_name = "device-input",
  .node_name = "l2-patch",
  .runs_before = VNET_FEATURES ("ethernet-input"),
};

VNET_FEATURE_INIT (worker_handoff, static) = {
  .arc_name = "device-input",
  .node_name = "worker-handoff",
  .runs_before = VNET_FEATURES ("ethernet-input"),
};

VNET_FEATURE_INIT (span_input, static) = {
  .arc_name = "device-input",
  .node_name = "span-input",
  .runs_before = VNET_FEATURES ("ethernet-input"),
};

VNET_FEATURE_INIT (p2p_ethernet_node, static) = {
  .arc_name = "device-input",
  .node_name = "p2p-ethernet-input",
  .runs_before = VNET_FEATURES ("ethernet-input"),
};

VNET_FEATURE_INIT (ethernet_input, static) = {
  .arc_name = "device-input",
  .node_name = "ethernet-input",
  .runs_before = 0, /* not before any other features */
};
/* *INDENT-ON* */

static int
vnet_device_queue_sort (void *a1, void *a2)
{
  vnet_device_and_queue_t *dq1 = a1;
  vnet_device_and_queue_t *dq2 = a2;

  if (dq1->dev_instance > dq2->dev_instance)
    return 1;
  else if (dq1->dev_instance < dq2->dev_instance)
    return -1;
  else if (dq1->queue_id > dq2->queue_id)
    return 1;
  else if (dq1->queue_id < dq2->queue_id)
    return -1;
  else
    return 0;
}

static void
vnet_device_queue_update (vnet_main_t * vnm, vnet_device_input_runtime_t * rt)
{
  vnet_device_and_queue_t *dq;
  vnet_hw_interface_t *hw;

  vec_sort_with_function (rt->devices_and_queues, vnet_device_queue_sort);

  vec_foreach (dq, rt->devices_and_queues)
  {
    hw = vnet_get_hw_interface (vnm, dq->hw_if_index);
    vec_validate (hw->dq_runtime_index_by_queue, dq->queue_id);
    hw->dq_runtime_index_by_queue[dq->queue_id] = dq - rt->devices_and_queues;
  }
}

void
vnet_hw_interface_assign_rx_thread (vnet_main_t * vnm, u32 hw_if_index,
				    u16 queue_id, uword thread_index)
{
  vnet_device_main_t *vdm = &vnet_device_main;
  vlib_main_t *vm, *vm0;
  vnet_device_input_runtime_t *rt;
  vnet_device_and_queue_t *dq;
  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);

  ASSERT (hw->input_node_index > 0);

  if (vdm->first_worker_thread_index == 0)
    thread_index = 0;

  if (thread_index != 0 &&
      (thread_index < vdm->first_worker_thread_index ||
       thread_index > vdm->last_worker_thread_index))
    {
      thread_index = vdm->next_worker_thread_index++;
      if (vdm->next_worker_thread_index > vdm->last_worker_thread_index)
	vdm->next_worker_thread_index = vdm->first_worker_thread_index;
    }

  vm = vlib_mains[thread_index];
  vm0 = vlib_get_main ();

  vlib_worker_thread_barrier_sync (vm0);

  rt = vlib_node_get_runtime_data (vm, hw->input_node_index);

  vec_add2 (rt->devices_and_queues, dq, 1);
  dq->hw_if_index = hw_if_index;
  dq->dev_instance = hw->dev_instance;
  dq->queue_id = queue_id;
  dq->mode = VNET_HW_INTERFACE_RX_MODE_POLLING;
  rt->enabled_node_state = VLIB_NODE_STATE_POLLING;

  vnet_device_queue_update (vnm, rt);
  vec_validate (hw->input_node_thread_index_by_queue, queue_id);
  vec_validate (hw->rx_mode_by_queue, queue_id);
  hw->input_node_thread_index_by_queue[queue_id] = thread_index;
  hw->rx_mode_by_queue[queue_id] = VNET_HW_INTERFACE_RX_MODE_POLLING;

  vlib_worker_thread_barrier_release (vm0);

  vlib_node_set_state (vm, hw->input_node_index, rt->enabled_node_state);
}

int
vnet_hw_interface_unassign_rx_thread (vnet_main_t * vnm, u32 hw_if_index,
				      u16 queue_id)
{
  vlib_main_t *vm, *vm0;
  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
  vnet_device_input_runtime_t *rt;
  vnet_device_and_queue_t *dq;
  uword old_thread_index;
  vnet_hw_interface_rx_mode mode;

  if (hw->input_node_thread_index_by_queue == 0)
    return VNET_API_ERROR_INVALID_INTERFACE;

  if (vec_len (hw->input_node_thread_index_by_queue) < queue_id + 1)
    return VNET_API_ERROR_INVALID_INTERFACE;

  old_thread_index = hw->input_node_thread_index_by_queue[queue_id];

  vm = vlib_mains[old_thread_index];

  rt = vlib_node_get_runtime_data (vm, hw->input_node_index);

  vec_foreach (dq, rt->devices_and_queues)
    if (dq->hw_if_index == hw_if_index && dq->queue_id == queue_id)
    {
      mode = dq->mode;
      goto delete;
    }

  return VNET_API_ERROR_INVALID_INTERFACE;

delete:

  vm0 = vlib_get_main ();
  vlib_worker_thread_barrier_sync (vm0);
  vec_del1 (rt->devices_and_queues, dq - rt->devices_and_queues);
  vnet_device_queue_update (vnm, rt);
  hw->rx_mode_by_queue[queue_id] = VNET_HW_INTERFACE_RX_MODE_UNKNOWN;
  vlib_worker_thread_barrier_release (vm0);

  if (vec_len (rt->devices_and_queues) == 0)
    vlib_node_set_state (vm, hw->input_node_index, VLIB_NODE_STATE_DISABLED);
  else if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
    {
      /*
       * if the deleted interface is polling, we may need to set the node state
       * to interrupt if there is no more polling interface for this device's
       * corresponding thread. This is because mixed interfaces
       * (polling and interrupt), assigned to the same thread, set the
       * thread to polling prior to the deletion.
       */
      vec_foreach (dq, rt->devices_and_queues)
      {
	if (dq->mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
	  return 0;
      }
      rt->enabled_node_state = VLIB_NODE_STATE_INTERRUPT;
      vlib_node_set_state (vm, hw->input_node_index, rt->enabled_node_state);
    }

  return 0;
}


int
vnet_hw_interface_set_rx_mode (vnet_main_t * vnm, u32 hw_if_index,
			       u16 queue_id, vnet_hw_interface_rx_mode mode)
{
  vlib_main_t *vm;
  uword thread_index;
  vnet_device_and_queue_t *dq;
  vlib_node_state_t enabled_node_state;
  ASSERT (mode < VNET_HW_INTERFACE_NUM_RX_MODES);
  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
  vnet_device_input_runtime_t *rt;
  int is_polling = 0;

  if (mode == VNET_HW_INTERFACE_RX_MODE_DEFAULT)
    mode = hw->default_rx_mode;

  if (hw->input_node_thread_index_by_queue == 0 || hw->rx_mode_by_queue == 0)
    return VNET_API_ERROR_INVALID_INTERFACE;

  if (hw->rx_mode_by_queue[queue_id] == mode)
    return 0;

  if (mode != VNET_HW_INTERFACE_RX_MODE_POLLING &&
      (hw->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_INT_MODE) == 0)
    return VNET_API_ERROR_UNSUPPORTED;

  if ((vec_len (hw->input_node_thread_index_by_queue) < queue_id + 1) ||
      (vec_len (hw->rx_mode_by_queue) < queue_id + 1))
    return VNET_API_ERROR_INVALID_QUEUE;

  hw->rx_mode_by_queue[queue_id] = mode;
  thread_index = hw->input_node_thread_index_by_queue[queue_id];
  vm = vlib_mains[thread_index];

  rt = vlib_node_get_runtime_data (vm, hw->input_node_index);

  vec_foreach (dq, rt->devices_and_queues)
  {
    if (dq->hw_if_index == hw_if_index && dq->queue_id == queue_id)
      dq->mode = mode;
    if (dq->mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
      is_polling = 1;
  }

  if (is_polling)
    enabled_node_state = VLIB_NODE_STATE_POLLING;
  else
    enabled_node_state = VLIB_NODE_STATE_INTERRUPT;

  if (rt->enabled_node_state != enabled_node_state)
    {
      rt->enabled_node_state = enabled_node_state;
      if (vlib_node_get_state (vm, hw->input_node_index) !=
	  VLIB_NODE_STATE_DISABLED)
	vlib_node_set_state (vm, hw->input_node_index, enabled_node_state);
    }

  return 0;
}

int
vnet_hw_interface_get_rx_mode (vnet_main_t * vnm, u32 hw_if_index,
			       u16 queue_id, vnet_hw_interface_rx_mode * mode)
{
  vlib_main_t *vm;
  uword thread_index;
  vnet_device_and_queue_t *dq;
  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
  vnet_device_input_runtime_t *rt;

  if (hw->input_node_thread_index_by_queue == 0)
    return VNET_API_ERROR_INVALID_INTERFACE;

  if ((vec_len (hw->input_node_thread_index_by_queue) < queue_id + 1) ||
      (vec_len (hw->rx_mode_by_queue) < queue_id + 1))
    return VNET_API_ERROR_INVALID_QUEUE;

  thread_index = hw->input_node_thread_index_by_queue[queue_id];
  vm = vlib_mains[thread_index];

  rt = vlib_node_get_runtime_data (vm, hw->input_node_index);

  vec_foreach (dq, rt->devices_and_queues)
    if (dq->hw_if_index == hw_if_index && dq->queue_id == queue_id)
    {
      *mode = dq->mode;
      return 0;
    }

  return VNET_API_ERROR_INVALID_INTERFACE;
}



static clib_error_t *
vnet_device_init (vlib_main_t * vm)
{
  vnet_device_main_t *vdm = &vnet_device_main;
  vlib_thread_main_t *tm = vlib_get_thread_main ();
  vlib_thread_registration_t *tr;
  uword *p;

  vec_validate_aligned (vdm->workers, tm->n_vlib_mains - 1,
			CLIB_CACHE_LINE_BYTES);

  p = hash_get_mem (tm->thread_registrations_by_name, "workers");
  tr = p ? (vlib_thread_registration_t *) p[0] : 0;
  if (tr && tr->count > 0)
    {
      vdm->first_worker_thread_index = tr->first_index;
      vdm->next_worker_thread_index = tr->first_index;
      vdm->last_worker_thread_index = tr->first_index + tr->count - 1;
    }
  return 0;
}

VLIB_INIT_FUNCTION (vnet_device_init);

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */