aboutsummaryrefslogtreecommitdiffstats
path: root/src/plugins/wireguard
diff options
context:
space:
mode:
Diffstat (limited to 'src/plugins/wireguard')
-rw-r--r--[-rwxr-xr-x]src/plugins/wireguard/CMakeLists.txt11
-rw-r--r--src/plugins/wireguard/FEATURE.yaml3
-rwxr-xr-xsrc/plugins/wireguard/README.md55
-rw-r--r--src/plugins/wireguard/README.rst79
-rw-r--r--[-rwxr-xr-x]src/plugins/wireguard/blake/blake2-impl.h0
-rw-r--r--[-rwxr-xr-x]src/plugins/wireguard/blake/blake2s.c0
-rw-r--r--[-rwxr-xr-x]src/plugins/wireguard/blake/blake2s.h0
-rw-r--r--[-rwxr-xr-x]src/plugins/wireguard/wireguard.api79
-rw-r--r--[-rwxr-xr-x]src/plugins/wireguard/wireguard.c65
-rw-r--r--[-rwxr-xr-x]src/plugins/wireguard/wireguard.h79
-rw-r--r--[-rwxr-xr-x]src/plugins/wireguard/wireguard_api.c210
-rw-r--r--src/plugins/wireguard/wireguard_chachapoly.c133
-rw-r--r--src/plugins/wireguard/wireguard_chachapoly.h48
-rw-r--r--[-rwxr-xr-x]src/plugins/wireguard/wireguard_cli.c103
-rw-r--r--[-rwxr-xr-x]src/plugins/wireguard/wireguard_cookie.c218
-rw-r--r--[-rwxr-xr-x]src/plugins/wireguard/wireguard_cookie.h43
-rw-r--r--src/plugins/wireguard/wireguard_handoff.c104
-rw-r--r--src/plugins/wireguard/wireguard_hchacha20.h90
-rw-r--r--src/plugins/wireguard/wireguard_if.c180
-rw-r--r--src/plugins/wireguard/wireguard_if.h59
-rw-r--r--[-rwxr-xr-x]src/plugins/wireguard/wireguard_index_table.c14
-rw-r--r--[-rwxr-xr-x]src/plugins/wireguard/wireguard_index_table.h7
-rw-r--r--src/plugins/wireguard/wireguard_input.c1111
-rw-r--r--[-rwxr-xr-x]src/plugins/wireguard/wireguard_key.c0
-rw-r--r--[-rwxr-xr-x]src/plugins/wireguard/wireguard_key.h0
-rw-r--r--[-rwxr-xr-x]src/plugins/wireguard/wireguard_messages.h0
-rw-r--r--[-rwxr-xr-x]src/plugins/wireguard/wireguard_noise.c320
-rw-r--r--[-rwxr-xr-x]src/plugins/wireguard/wireguard_noise.h97
-rw-r--r--[-rwxr-xr-x]src/plugins/wireguard/wireguard_output_tun.c869
-rw-r--r--src/plugins/wireguard/wireguard_peer.c481
-rw-r--r--src/plugins/wireguard/wireguard_peer.h90
-rw-r--r--[-rwxr-xr-x]src/plugins/wireguard/wireguard_send.c194
-rw-r--r--[-rwxr-xr-x]src/plugins/wireguard/wireguard_send.h7
-rw-r--r--src/plugins/wireguard/wireguard_timer.c50
-rw-r--r--[-rwxr-xr-x]src/plugins/wireguard/wireguard_timer.h14
35 files changed, 3783 insertions, 1030 deletions
diff --git a/src/plugins/wireguard/CMakeLists.txt b/src/plugins/wireguard/CMakeLists.txt
index 6dddc67298d..710b6a3b04a 100755..100644
--- a/src/plugins/wireguard/CMakeLists.txt
+++ b/src/plugins/wireguard/CMakeLists.txt
@@ -12,7 +12,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+if(NOT OPENSSL_FOUND)
+ message(WARNING "OpenSSL not found - wireguard plugin disabled")
+ return()
+endif()
+
if (OPENSSL_VERSION VERSION_LESS 1.1.0)
+ message(WARNING "OpenSSL too old - wireguard plugin disabled")
return()
endif()
@@ -33,8 +39,11 @@ add_vpp_plugin(wireguard
wireguard_input.c
wireguard_output_tun.c
wireguard_handoff.c
+ wireguard_hchacha20.h
wireguard_key.c
wireguard_key.h
+ wireguard_chachapoly.c
+ wireguard_chachapoly.h
wireguard_cli.c
wireguard_messages.h
wireguard_noise.c
@@ -51,7 +60,7 @@ add_vpp_plugin(wireguard
wireguard_index_table.h
wireguard_api.c
- LINK_LIBRARIES ${OPENSSL_LIBRARIES}
+ LINK_LIBRARIES ${OPENSSL_CRYPTO_LIBRARIES}
API_FILES
wireguard.api
diff --git a/src/plugins/wireguard/FEATURE.yaml b/src/plugins/wireguard/FEATURE.yaml
index cf8b6d7f3c4..5c0a588a484 100644
--- a/src/plugins/wireguard/FEATURE.yaml
+++ b/src/plugins/wireguard/FEATURE.yaml
@@ -7,6 +7,3 @@ features:
description: "Wireguard protocol implementation"
state: development
properties: [API, CLI]
-missing:
- - IPv6 support
- - DoS protection as in the original protocol
diff --git a/src/plugins/wireguard/README.md b/src/plugins/wireguard/README.md
deleted file mode 100755
index df69d93789f..00000000000
--- a/src/plugins/wireguard/README.md
+++ /dev/null
@@ -1,55 +0,0 @@
-# Wireguard vpp-plugin {#wireguard_plugin_doc}
-
-## Overview
-This plugin is an implementation of [wireguard protocol](https://www.wireguard.com/) for VPP. It allows one to create secure VPN tunnels.
-This implementation is based on [wireguard-openbsd](https://git.zx2c4.com/wireguard-openbsd/).
-
-## Crypto
-
-The crypto protocols:
-
-- blake2s [[Source]](https://github.com/BLAKE2/BLAKE2)
-
-OpenSSL:
-
-- curve25519
-- chachapoly1305
-
-## Plugin usage example
-
-### Create wireguard interface
-
-```
-> vpp# wireguard create listen-port <port> private-key <priv_key> src <src_ip4> [generate-key]
-> *wg_interface*
-> vpp# set int state <wg_interface> up
-> vpp# set int ip address <wg_interface> <wg_ip4>
-```
-
-### Add a peer configuration:
-```
-> vpp# wireguard peer add <wg_interface> public-key <pub_key_other> endpoint <ip4_dst> allowed-ip <prefix> dst-port <port_dst> persistent-keepalive [keepalive_interval]
-> vpp# *peer_idx*
-```
-
-### Show config
-```
-> vpp# show wireguard interface
-> vpp# show wireguard peer
-```
-
-### Remove peer
-```
-> vpp# wireguard peer remove <peer_idx>
-```
-
-
-### Delete interface
-```
-> vpp# wireguard delete <wg_interface>
-```
-
-## Main next steps for improving this implementation
-1. Use all benefits of VPP-engine.
-2. Add IPv6 support (currently only supports IPv4)
-3. Add DoS protection as in original protocol (using cookie)
diff --git a/src/plugins/wireguard/README.rst b/src/plugins/wireguard/README.rst
new file mode 100644
index 00000000000..35dd2c41382
--- /dev/null
+++ b/src/plugins/wireguard/README.rst
@@ -0,0 +1,79 @@
+.. _wireguard_plugin_doc:
+
+Wireguard vpp-plugin
+====================
+
+Overview
+--------
+
+This plugin is an implementation of `wireguard
+protocol <https://www.wireguard.com/>`__ for VPP. It allows one to
+create secure VPN tunnels. This implementation is based on
+`wireguard-openbsd <https://git.zx2c4.com/wireguard-openbsd/>`__.
+
+Crypto
+------
+
+The crypto protocols:
+
+- blake2s `[Source] <https://github.com/BLAKE2/BLAKE2>`__
+
+OpenSSL:
+
+- curve25519
+- chachapoly1305
+
+Plugin usage example
+--------------------
+
+Create wireguard interface
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+ > vpp# wireguard create listen-port <port> private-key <priv_key> src <src_ip4> [generate-key]
+ > *wg_interface*
+ > vpp# set int state <wg_interface> up
+ > vpp# set int ip address <wg_interface> <wg_ip4>
+
+Add a peer configuration:
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+ > vpp# wireguard peer add <wg_interface> public-key <pub_key_other> endpoint <ip4_dst> allowed-ip <prefix> port <port_dst> persistent-keepalive [keepalive_interval]
+ > vpp# *peer_idx*
+
+Add routes for allowed-ip:
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+::
+
+ > ip route add <prefix> via <wg_ip4> <wg_interface>
+
+Show config
+~~~~~~~~~~~
+
+::
+
+ > vpp# show wireguard interface
+ > vpp# show wireguard peer
+
+Remove peer
+~~~~~~~~~~~
+
+::
+
+ > vpp# wireguard peer remove <peer_idx>
+
+Delete interface
+~~~~~~~~~~~~~~~~
+
+::
+
+ > vpp# wireguard delete <wg_interface>
+
+Main next steps for improving this implementation
+-------------------------------------------------
+
+1. Use all benefits of VPP-engine.
diff --git a/src/plugins/wireguard/blake/blake2-impl.h b/src/plugins/wireguard/blake/blake2-impl.h
index ad60b4a5775..ad60b4a5775 100755..100644
--- a/src/plugins/wireguard/blake/blake2-impl.h
+++ b/src/plugins/wireguard/blake/blake2-impl.h
diff --git a/src/plugins/wireguard/blake/blake2s.c b/src/plugins/wireguard/blake/blake2s.c
index 3ff312a1322..3ff312a1322 100755..100644
--- a/src/plugins/wireguard/blake/blake2s.c
+++ b/src/plugins/wireguard/blake/blake2s.c
diff --git a/src/plugins/wireguard/blake/blake2s.h b/src/plugins/wireguard/blake/blake2s.h
index 37da0acf28a..37da0acf28a 100755..100644
--- a/src/plugins/wireguard/blake/blake2s.h
+++ b/src/plugins/wireguard/blake/blake2s.h
diff --git a/src/plugins/wireguard/wireguard.api b/src/plugins/wireguard/wireguard.api
index e290fc41ffc..55a36c6f6e5 100755..100644
--- a/src/plugins/wireguard/wireguard.api
+++ b/src/plugins/wireguard/wireguard.api
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-option version = "0.1.0";
+option version = "1.3.0";
import "vnet/interface_types.api";
import "vnet/ip/ip_types.api";
@@ -83,19 +83,24 @@ define wireguard_interface_details
enum wireguard_peer_flags : u8
{
WIREGUARD_PEER_STATUS_DEAD = 0x1,
+ WIREGUARD_PEER_ESTABLISHED = 0x2,
};
-/** \brief Create new peer
+/** \brief Peer structure
+ @param peer_index - peer pool index
@param public_key - public key (in binary format) of destination peer
@param port - destination port
+ @param persistent_keepalive - keepalive packet timeout
@param table_id - The IP table in which 'endpoint' is reachable
@param endpoint - destination ip
- @param allowed_ip - allowed incoming ip tunnel
- @param tun_sw_if_index - tunnel interface
- @param persistent_keepalive - keepalive packet timeout
+ @param sw_if_index - tunnel SW interface
+ @param flags - peer status flags
+ @param n_allowed_ips - number of prefixes in allowed_ips
+ @param allowed_ips - allowed incoming tunnel prefixes
*/
typedef wireguard_peer
{
+ u32 peer_index;
u8 public_key[32];
u16 port;
u16 persistent_keepalive;
@@ -107,6 +112,41 @@ typedef wireguard_peer
vl_api_prefix_t allowed_ips[n_allowed_ips];
};
+service {
+ rpc want_wireguard_peer_events returns want_wireguard_peer_events_reply
+ events wireguard_peer_event;
+};
+/** \brief Register for wireguard peer events
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - index of the interface to dump peer info on, ~0 if on all
+ @param peer_index - index of the peer to dump info on, ~0 if on all
+ @param enable_disable - 1 => register for events, 0 => cancel registration
+ @param pid - sender's pid
+*/
+autoreply define want_wireguard_peer_events
+{
+ u32 client_index;
+ u32 context;
+ vl_api_interface_index_t sw_if_index [default=0xFFFFFFFF];
+ u32 peer_index [default=0xFFFFFFFF];
+ u32 enable_disable;
+ u32 pid;
+};
+/** \brief Interface Event generated by want_wireguard_peer_events
+ @param client_index - opaque cookie to identify the sender
+ @param pid - client pid registered to receive notification
+ @param peer_index - index of the peer for this event
+ @param deleted - interface was deleted
+*/
+define wireguard_peer_event
+{
+ u32 client_index;
+ u32 pid;
+ u32 peer_index;
+ vl_api_wireguard_peer_flags_t flags;
+};
+
/** \brief Create new peer
@param client_index - opaque cookie to identify the sender
@param context - sender context, to match reply w/ request
@@ -118,6 +158,12 @@ define wireguard_peer_add
u32 context;
vl_api_wireguard_peer_t peer;
};
+
+/** \brief Create new peer
+ @param context - sender context, to match reply w/ request
+ @param retval - return status
+ @param peer_index - Created or existing peer pool index
+*/
define wireguard_peer_add_reply
{
u32 context;
@@ -125,10 +171,10 @@ define wireguard_peer_add_reply
u32 peer_index;
};
-/** \brief Remove peer by public_key
+/** \brief Remove peer
@param client_index - opaque cookie to identify the sender
@param context - sender context, to match reply w/ request
- @param public_key
+ @param peer_index - peer to be removed
*/
autoreply define wireguard_peer_remove
{
@@ -140,23 +186,34 @@ autoreply define wireguard_peer_remove
/** \brief Dump all peers
@param client_index - opaque cookie to identify the sender
@param context - sender context, to match reply w/ request
+ @param peer_index - peer index to be dumped. If 0xFFFFFFFF dumps all peers
*/
define wireguard_peers_dump {
u32 client_index;
u32 context;
+ u32 peer_index [default=0xFFFFFFFF];
};
-/** \brief Dump peers response
+/** \brief Dump peer details
@param context - sender context, to match reply w/ request
- @param is_dead - is peer valid yet
- @param public_key - peer public_key
- @param ip4_address - ip4 endpoint address
+ @param peer - peer details
*/
define wireguard_peers_details {
u32 context;
vl_api_wireguard_peer_t peer;
};
+/** \brief Wireguard Set Async mode
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param async_enable - wireguard async mode on or off, default off
+*/
+autoreply define wg_set_async_mode {
+ u32 client_index;
+ u32 context;
+ bool async_enable [default=false];
+};
+
/*
* Local Variables:
* eval: (c-set-style "gnu")
diff --git a/src/plugins/wireguard/wireguard.c b/src/plugins/wireguard/wireguard.c
index 58422299069..b1c8bc79870 100755..100644
--- a/src/plugins/wireguard/wireguard.c
+++ b/src/plugins/wireguard/wireguard.c
@@ -15,8 +15,8 @@
#include <vnet/vnet.h>
#include <vnet/plugin/plugin.h>
-#include <vnet/ipip/ipip.h>
#include <vpp/app/version.h>
+#include <vnet/crypto/crypto.h>
#include <wireguard/wireguard_send.h>
#include <wireguard/wireguard_key.h>
@@ -24,6 +24,45 @@
#include <wireguard/wireguard.h>
wg_main_t wg_main;
+wg_async_post_next_t wg_encrypt_async_next;
+wg_async_post_next_t wg_decrypt_async_next;
+
+void
+wg_set_async_mode (u32 is_enabled)
+{
+ if (is_enabled)
+ wg_op_mode_set_ASYNC ();
+ else
+ wg_op_mode_unset_ASYNC ();
+}
+
+static void
+wireguard_register_post_node (vlib_main_t *vm)
+
+{
+ wg_async_post_next_t *eit;
+ wg_async_post_next_t *dit;
+
+ eit = &wg_encrypt_async_next;
+ dit = &wg_decrypt_async_next;
+
+ eit->wg4_post_next =
+ vnet_crypto_register_post_node (vm, "wg4-output-tun-post-node");
+ eit->wg6_post_next =
+ vnet_crypto_register_post_node (vm, "wg6-output-tun-post-node");
+
+ dit->wg4_post_next =
+ vnet_crypto_register_post_node (vm, "wg4-input-post-node");
+ dit->wg6_post_next =
+ vnet_crypto_register_post_node (vm, "wg6-input-post-node");
+}
+
+void
+wg_secure_zero_memory (void *v, size_t n)
+{
+ static void *(*const volatile memset_v) (void *, int, size_t) = &memset;
+ memset_v (v, 0, n);
+}
static clib_error_t *
wg_init (vlib_main_t * vm)
@@ -32,9 +71,12 @@ wg_init (vlib_main_t * vm)
wmp->vlib_main = vm;
- wmp->in_fq_index = vlib_frame_queue_main_init (wg_input_node.index, 0);
- wmp->out_fq_index =
- vlib_frame_queue_main_init (wg_output_tun_node.index, 0);
+ wmp->in4_fq_index = vlib_frame_queue_main_init (wg4_input_node.index, 0);
+ wmp->in6_fq_index = vlib_frame_queue_main_init (wg6_input_node.index, 0);
+ wmp->out4_fq_index =
+ vlib_frame_queue_main_init (wg4_output_tun_node.index, 0);
+ wmp->out6_fq_index =
+ vlib_frame_queue_main_init (wg6_output_tun_node.index, 0);
vlib_thread_main_t *tm = vlib_get_thread_main ();
@@ -42,27 +84,32 @@ wg_init (vlib_main_t * vm)
CLIB_CACHE_LINE_BYTES);
wg_timer_wheel_init ();
+ wireguard_register_post_node (vm);
+ wmp->op_mode_flags = 0;
return (NULL);
}
VLIB_INIT_FUNCTION (wg_init);
-/* *INDENT-OFF* */
-VNET_FEATURE_INIT (wg_output_tun, static) =
-{
+VNET_FEATURE_INIT (wg4_output_tun, static) = {
.arc_name = "ip4-output",
- .node_name = "wg-output-tun",
+ .node_name = "wg4-output-tun",
.runs_after = VNET_FEATURES ("gso-ip4"),
};
+VNET_FEATURE_INIT (wg6_output_tun, static) = {
+ .arc_name = "ip6-output",
+ .node_name = "wg6-output-tun",
+ .runs_after = VNET_FEATURES ("gso-ip6"),
+};
+
VLIB_PLUGIN_REGISTER () =
{
.version = VPP_BUILD_VER,
.description = "Wireguard Protocol",
};
-/* *INDENT-ON* */
/*
* fd.io coding-style-patch-verification: ON
diff --git a/src/plugins/wireguard/wireguard.h b/src/plugins/wireguard/wireguard.h
index ef308c4c397..05cefc4f073 100755..100644
--- a/src/plugins/wireguard/wireguard.h
+++ b/src/plugins/wireguard/wireguard.h
@@ -18,16 +18,25 @@
#include <wireguard/wireguard_index_table.h>
#include <wireguard/wireguard_messages.h>
#include <wireguard/wireguard_timer.h>
+#include <vnet/buffer.h>
#define WG_DEFAULT_DATA_SIZE 2048
-extern vlib_node_registration_t wg_input_node;
-extern vlib_node_registration_t wg_output_tun_node;
+extern vlib_node_registration_t wg4_input_node;
+extern vlib_node_registration_t wg6_input_node;
+extern vlib_node_registration_t wg4_output_tun_node;
+extern vlib_node_registration_t wg6_output_tun_node;
typedef struct wg_per_thread_data_t_
{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ vnet_crypto_op_t *crypto_ops;
+ vnet_crypto_op_t *chained_crypto_ops;
+ vnet_crypto_op_chunk_t *chunks;
+ vnet_crypto_async_frame_t **async_frames;
u8 data[WG_DEFAULT_DATA_SIZE];
} wg_per_thread_data_t;
+
typedef struct
{
/* convenience */
@@ -37,19 +46,81 @@ typedef struct
wg_index_table_t index_table;
- u32 in_fq_index;
- u32 out_fq_index;
+ u32 in4_fq_index;
+ u32 in6_fq_index;
+ u32 out4_fq_index;
+ u32 out6_fq_index;
wg_per_thread_data_t *per_thread_data;
u8 feature_init;
tw_timer_wheel_16t_2w_512sl_t timer_wheel;
+
+ /* operation mode flags (e.g. async) */
+ u8 op_mode_flags;
} wg_main_t;
+typedef struct
+{
+ /* wg post node index for async crypto */
+ u32 wg4_post_next;
+ u32 wg6_post_next;
+} wg_async_post_next_t;
+
+extern wg_async_post_next_t wg_encrypt_async_next;
+extern wg_async_post_next_t wg_decrypt_async_next;
extern wg_main_t wg_main;
+/**
+ * Wireguard operation mode
+ **/
+#define foreach_wg_op_mode_flags _ (0, ASYNC, "async")
+
+/**
+ * Helper function to set/unset and check op modes
+ **/
+typedef enum wg_op_mode_flags_t_
+{
+#define _(v, f, s) WG_OP_MODE_FLAG_##f = 1 << v,
+ foreach_wg_op_mode_flags
+#undef _
+} __clib_packed wg_op_mode_flags_t;
+
+#define _(a, v, s) \
+ always_inline int wg_op_mode_set_##v (void) \
+ { \
+ return (wg_main.op_mode_flags |= WG_OP_MODE_FLAG_##v); \
+ } \
+ always_inline int wg_op_mode_unset_##v (void) \
+ { \
+ return (wg_main.op_mode_flags &= ~WG_OP_MODE_FLAG_##v); \
+ } \
+ always_inline int wg_op_mode_is_set_##v (void) \
+ { \
+ return (wg_main.op_mode_flags & WG_OP_MODE_FLAG_##v); \
+ }
+foreach_wg_op_mode_flags
+#undef _
+
+ typedef struct
+{
+ u8 __pad[22];
+ u16 next_index;
+} wg_post_data_t;
+
+STATIC_ASSERT (sizeof (wg_post_data_t) <=
+ STRUCT_SIZE_OF (vnet_buffer_opaque_t, unused),
+ "Custom meta-data too large for vnet_buffer_opaque_t");
+
+#define wg_post_data(b) \
+ ((wg_post_data_t *) ((u8 *) ((b)->opaque) + \
+ STRUCT_OFFSET_OF (vnet_buffer_opaque_t, unused)))
+
#define WG_START_EVENT 1
void wg_feature_init (wg_main_t * wmp);
+void wg_set_async_mode (u32 is_enabled);
+
+void wg_secure_zero_memory (void *v, size_t n);
#endif /* __included_wg_h__ */
diff --git a/src/plugins/wireguard/wireguard_api.c b/src/plugins/wireguard/wireguard_api.c
index 36cc2509463..e736efcd6c0 100755..100644
--- a/src/plugins/wireguard/wireguard_api.c
+++ b/src/plugins/wireguard/wireguard_api.c
@@ -27,9 +27,9 @@
#include <wireguard/wireguard_key.h>
#include <wireguard/wireguard.h>
#include <wireguard/wireguard_if.h>
-#include <wireguard/wireguard_peer.h>
#define REPLY_MSG_ID_BASE wmp->msg_id_base
+#include <wireguard/wireguard_peer.h>
#include <vlibapi/api_helper_macros.h>
static void
@@ -47,26 +47,18 @@ static void
ip_address_decode2 (&mp->interface.src_ip, &src);
- if (AF_IP6 == ip_addr_version (&src))
- rv = VNET_API_ERROR_INVALID_PROTOCOL;
+ if (mp->generate_key)
+ curve25519_gen_secret (private_key);
else
- {
- if (mp->generate_key)
- curve25519_gen_secret (private_key);
- else
- clib_memcpy (private_key, mp->interface.private_key,
- NOISE_PUBLIC_KEY_LEN);
-
- rv = wg_if_create (ntohl (mp->interface.user_instance), private_key,
- ntohs (mp->interface.port), &src, &sw_if_index);
- }
+ clib_memcpy (private_key, mp->interface.private_key, NOISE_PUBLIC_KEY_LEN);
+
+ rv = wg_if_create (ntohl (mp->interface.user_instance), private_key,
+ ntohs (mp->interface.port), &src, &sw_if_index);
- /* *INDENT-OFF* */
REPLY_MACRO2(VL_API_WIREGUARD_INTERFACE_CREATE_REPLY,
{
rmp->sw_if_index = htonl(sw_if_index);
});
- /* *INDENT-ON* */
}
static void
@@ -85,9 +77,7 @@ static void
BAD_SW_IF_INDEX_LABEL;
- /* *INDENT-OFF* */
REPLY_MACRO(VL_API_WIREGUARD_INTERFACE_DELETE_REPLY);
- /* *INDENT-ON* */
}
typedef struct wg_deatils_walk_t_
@@ -119,6 +109,7 @@ wireguard_if_send_details (index_t wgii, void *data)
local->l_public, NOISE_PUBLIC_KEY_LEN);
rmp->interface.sw_if_index = htonl (wgi->sw_if_index);
rmp->interface.port = htons (wgi->port);
+ rmp->interface.user_instance = htonl (wgi->user_instance);
ip_address_encode2 (&wgi->src_ip, &rmp->interface.src_ip);
rmp->context = ctx->context;
@@ -147,7 +138,15 @@ vl_api_wireguard_interface_dump_t_handler (vl_api_wireguard_interface_dump_t *
.show_private_key = mp->show_private_key,
};
- wg_if_walk (wireguard_if_send_details, &ctx);
+ u32 sw_if_index = ntohl (mp->sw_if_index);
+ if (sw_if_index == ~0)
+ wg_if_walk (wireguard_if_send_details, &ctx);
+ else
+ {
+ index_t wgii = wg_if_find_by_sw_if_index (sw_if_index);
+ if (wgii != INDEX_INVALID)
+ wireguard_if_send_details (wgii, &ctx);
+ }
}
static void
@@ -177,29 +176,19 @@ vl_api_wireguard_peer_add_t_handler (vl_api_wireguard_peer_add_t * mp)
for (ii = 0; ii < mp->peer.n_allowed_ips; ii++)
ip_prefix_decode (&mp->peer.allowed_ips[ii], &allowed_ips[ii]);
- if (AF_IP6 == ip_addr_version (&endpoint) ||
- FIB_PROTOCOL_IP6 == allowed_ips[0].fp_proto)
- /* ip6 currently not supported, but the API needs to support it
- * else we'll need to change it later, and that's a PITA */
- rv = VNET_API_ERROR_INVALID_PROTOCOL;
- else
- rv = wg_peer_add (ntohl (mp->peer.sw_if_index),
- mp->peer.public_key,
- ntohl (mp->peer.table_id),
- &ip_addr_46 (&endpoint),
- allowed_ips,
- ntohs (mp->peer.port),
- ntohs (mp->peer.persistent_keepalive), &peeri);
+ rv = wg_peer_add (ntohl (mp->peer.sw_if_index), mp->peer.public_key,
+ ntohl (mp->peer.table_id), &ip_addr_46 (&endpoint),
+ allowed_ips, ntohs (mp->peer.port),
+ ntohs (mp->peer.persistent_keepalive), &peeri);
vec_free (allowed_ips);
done:
BAD_SW_IF_INDEX_LABEL;
- /* *INDENT-OFF* */
+
REPLY_MACRO2(VL_API_WIREGUARD_PEER_ADD_REPLY,
{
rmp->peer_index = ntohl (peeri);
});
- /* *INDENT-ON* */
}
static void
@@ -213,13 +202,11 @@ vl_api_wireguard_peer_remove_t_handler (vl_api_wireguard_peer_remove_t * mp)
rv = wg_peer_remove (ntohl (mp->peer_index));
- /* *INDENT-OFF* */
REPLY_MACRO(VL_API_WIREGUARD_PEER_REMOVE_REPLY);
- /* *INDENT-ON* */
}
static walk_rc_t
-send_wg_peers_details (index_t peeri, void *data)
+wg_api_send_peers_details (index_t peeri, void *data)
{
vl_api_wireguard_peers_details_t *rmp;
wg_deatils_walk_t *ctx = data;
@@ -227,7 +214,11 @@ send_wg_peers_details (index_t peeri, void *data)
u8 n_allowed_ips;
size_t ss;
+ if (pool_is_free_index (wg_peer_pool, peeri))
+ return (WALK_CONTINUE);
+
peer = wg_peer_get (peeri);
+
n_allowed_ips = vec_len (peer->allowed_ips);
ss = (sizeof (*rmp) + (n_allowed_ips * sizeof (rmp->peer.allowed_ips[0])));
@@ -237,8 +228,8 @@ send_wg_peers_details (index_t peeri, void *data)
rmp->_vl_msg_id = htons (VL_API_WIREGUARD_PEERS_DETAILS +
wg_main.msg_id_base);
- if (peer->is_dead)
- rmp->peer.flags = WIREGUARD_PEER_STATUS_DEAD;
+ rmp->peer.peer_index = htonl (peeri);
+ rmp->peer.flags = peer->flags;
clib_memcpy (rmp->peer.public_key,
peer->remote.r_public, NOISE_PUBLIC_KEY_LEN);
@@ -246,11 +237,12 @@ send_wg_peers_details (index_t peeri, void *data)
rmp->peer.port = htons (peer->dst.port);
rmp->peer.n_allowed_ips = n_allowed_ips;
rmp->peer.sw_if_index = htonl (peer->wg_sw_if_index);
+ rmp->peer.persistent_keepalive = htons (peer->persistent_keepalive_interval);
+ rmp->peer.table_id = htonl (peer->table_id);
int ii;
for (ii = 0; ii < n_allowed_ips; ii++)
- ip_prefix_encode (&peer->allowed_ips[ii].prefix,
- &rmp->peer.allowed_ips[ii]);
+ ip_prefix_encode (&peer->allowed_ips[ii], &rmp->peer.allowed_ips[ii]);
rmp->context = ctx->context;
@@ -276,7 +268,143 @@ vl_api_wireguard_peers_dump_t_handler (vl_api_wireguard_peers_dump_t * mp)
.context = mp->context,
};
- wg_peer_walk (send_wg_peers_details, &ctx);
+ if (mp->peer_index == ~0)
+ wg_peer_walk (wg_api_send_peers_details, &ctx);
+ else
+ wg_api_send_peers_details (ntohl (mp->peer_index), &ctx);
+}
+
+static vpe_client_registration_t *
+wg_api_client_lookup (wg_peer_t *peer, u32 client_index)
+{
+ uword *p;
+ vpe_client_registration_t *api_client = NULL;
+
+ p = hash_get (peer->api_client_by_client_index, client_index);
+ if (p)
+ api_client = vec_elt_at_index (peer->api_clients, p[0]);
+
+ return api_client;
+}
+
+static walk_rc_t
+wg_api_update_peer_api_client (index_t peeri, void *data)
+{
+ if (pool_is_free_index (wg_peer_pool, peeri))
+ return (WALK_CONTINUE);
+
+ vl_api_want_wireguard_peer_events_t *mp = data;
+ wg_peer_t *peer = wg_peer_get (peeri);
+
+ if (ntohl (mp->sw_if_index) != ~0 &&
+ ntohl (mp->sw_if_index) != peer->wg_sw_if_index)
+ {
+ return (WALK_CONTINUE);
+ }
+
+ vpe_client_registration_t *api_client;
+
+ api_client = wg_api_client_lookup (peer, mp->client_index);
+
+ if (api_client)
+ {
+ if (mp->enable_disable)
+ {
+ return (WALK_CONTINUE);
+ }
+ hash_unset (peer->api_client_by_client_index, api_client->client_index);
+ pool_put (peer->api_clients, api_client);
+ }
+ if (mp->enable_disable)
+ {
+ pool_get (peer->api_clients, api_client);
+ clib_memset (api_client, 0, sizeof (vpe_client_registration_t));
+ api_client->client_index = mp->client_index;
+ api_client->client_pid = mp->pid;
+ hash_set (peer->api_client_by_client_index, mp->client_index,
+ api_client - peer->api_clients);
+ }
+
+ return (WALK_CONTINUE);
+}
+
+static void
+vl_api_want_wireguard_peer_events_t_handler (
+ vl_api_want_wireguard_peer_events_t *mp)
+{
+ wg_main_t *wmp = &wg_main;
+ vl_api_want_wireguard_peer_events_reply_t *rmp;
+ int rv = 0;
+
+ wg_feature_init (wmp);
+
+ if (mp->peer_index == ~0)
+ wg_peer_walk (wg_api_update_peer_api_client, mp);
+ else
+ wg_api_update_peer_api_client (ntohl (mp->peer_index), mp);
+
+ REPLY_MACRO (VL_API_WANT_WIREGUARD_PEER_EVENTS_REPLY);
+}
+
+static void
+wg_api_send_peer_event (vl_api_registration_t *rp, index_t peer_index,
+ wg_peer_flags flags)
+{
+ vl_api_wireguard_peer_event_t *mp = vl_msg_api_alloc (sizeof (*mp));
+ clib_memset (mp, 0, sizeof (*mp));
+
+ mp->_vl_msg_id = htons (VL_API_WIREGUARD_PEER_EVENT + wg_main.msg_id_base);
+ mp->peer_index = htonl (peer_index);
+ mp->flags = flags;
+
+ vl_api_send_msg (rp, (u8 *) mp);
+}
+
+typedef struct
+{
+ index_t peeri;
+ wg_peer_flags flags;
+} wg_api_peer_event_args_t;
+
+static void
+wg_api_peer_event_cb (wg_api_peer_event_args_t *args)
+{
+ wg_peer_t *peer = wg_peer_get (args->peeri);
+ vpe_client_registration_t *api_client;
+ vl_api_registration_t *rp;
+
+ pool_foreach (api_client, peer->api_clients)
+ {
+ rp = vl_api_client_index_to_registration (api_client->client_index);
+ if (rp)
+ {
+ wg_api_send_peer_event (rp, args->peeri, args->flags);
+ }
+ };
+}
+
+void
+wg_api_peer_event (index_t peeri, wg_peer_flags flags)
+{
+ wg_api_peer_event_args_t args = {
+ .peeri = peeri,
+ .flags = flags,
+ };
+
+ vl_api_rpc_call_main_thread (wg_api_peer_event_cb, (u8 *) &args,
+ sizeof (args));
+}
+
+static void
+vl_api_wg_set_async_mode_t_handler (vl_api_wg_set_async_mode_t *mp)
+{
+ wg_main_t *wmp = &wg_main;
+ vl_api_wg_set_async_mode_reply_t *rmp;
+ int rv = 0;
+
+ wg_set_async_mode (mp->async_enable);
+
+ REPLY_MACRO (VL_API_WG_SET_ASYNC_MODE_REPLY);
}
/* set tup the API message handling tables */
diff --git a/src/plugins/wireguard/wireguard_chachapoly.c b/src/plugins/wireguard/wireguard_chachapoly.c
new file mode 100644
index 00000000000..ad644ff6cb8
--- /dev/null
+++ b/src/plugins/wireguard/wireguard_chachapoly.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2022 Rubicon Communications, LLC.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <wireguard/wireguard.h>
+#include <wireguard/wireguard_chachapoly.h>
+#include <wireguard/wireguard_hchacha20.h>
+
+bool
+wg_chacha20poly1305_calc (vlib_main_t *vm, u8 *src, u32 src_len, u8 *dst,
+ u8 *aad, u32 aad_len, u64 nonce,
+ vnet_crypto_op_id_t op_id,
+ vnet_crypto_key_index_t key_index)
+{
+ vnet_crypto_op_t _op, *op = &_op;
+ u8 iv[12];
+ u8 tag_[NOISE_AUTHTAG_LEN] = {};
+ u8 src_[] = {};
+
+ clib_memset (iv, 0, 12);
+ clib_memcpy (iv + 4, &nonce, sizeof (nonce));
+
+ vnet_crypto_op_init (op, op_id);
+
+ op->tag_len = NOISE_AUTHTAG_LEN;
+ if (op_id == VNET_CRYPTO_OP_CHACHA20_POLY1305_DEC)
+ {
+ op->tag = src + src_len - NOISE_AUTHTAG_LEN;
+ src_len -= NOISE_AUTHTAG_LEN;
+ op->flags |= VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
+ }
+ else
+ op->tag = tag_;
+
+ op->src = !src ? src_ : src;
+ op->len = src_len;
+
+ op->dst = dst;
+ op->key_index = key_index;
+ op->aad = aad;
+ op->aad_len = aad_len;
+ op->iv = iv;
+
+ vnet_crypto_process_ops (vm, op, 1);
+ if (op_id == VNET_CRYPTO_OP_CHACHA20_POLY1305_ENC)
+ {
+ clib_memcpy (dst + src_len, op->tag, NOISE_AUTHTAG_LEN);
+ }
+
+ return (op->status == VNET_CRYPTO_OP_STATUS_COMPLETED);
+}
+
+void
+wg_xchacha20poly1305_encrypt (vlib_main_t *vm, u8 *src, u32 src_len, u8 *dst,
+ u8 *aad, u32 aad_len,
+ u8 nonce[XCHACHA20POLY1305_NONCE_SIZE],
+ u8 key[CHACHA20POLY1305_KEY_SIZE])
+{
+ int i;
+ u32 derived_key[CHACHA20POLY1305_KEY_SIZE / sizeof (u32)];
+ u64 h_nonce;
+
+ clib_memcpy (&h_nonce, nonce + 16, sizeof (h_nonce));
+ h_nonce = clib_little_to_host_u64 (h_nonce);
+ hchacha20 (derived_key, nonce, key);
+
+ for (i = 0; i < (sizeof (derived_key) / sizeof (derived_key[0])); i++)
+ (derived_key[i]) = clib_host_to_little_u32 ((derived_key[i]));
+
+ uint32_t key_idx;
+
+ key_idx =
+ vnet_crypto_key_add (vm, VNET_CRYPTO_ALG_CHACHA20_POLY1305,
+ (uint8_t *) derived_key, CHACHA20POLY1305_KEY_SIZE);
+
+ wg_chacha20poly1305_calc (vm, src, src_len, dst, aad, aad_len, h_nonce,
+ VNET_CRYPTO_OP_CHACHA20_POLY1305_ENC, key_idx);
+
+ vnet_crypto_key_del (vm, key_idx);
+ wg_secure_zero_memory (derived_key, CHACHA20POLY1305_KEY_SIZE);
+}
+
+bool
+wg_xchacha20poly1305_decrypt (vlib_main_t *vm, u8 *src, u32 src_len, u8 *dst,
+ u8 *aad, u32 aad_len,
+ u8 nonce[XCHACHA20POLY1305_NONCE_SIZE],
+ u8 key[CHACHA20POLY1305_KEY_SIZE])
+{
+ int ret, i;
+ u32 derived_key[CHACHA20POLY1305_KEY_SIZE / sizeof (u32)];
+ u64 h_nonce;
+
+ clib_memcpy (&h_nonce, nonce + 16, sizeof (h_nonce));
+ h_nonce = clib_little_to_host_u64 (h_nonce);
+ hchacha20 (derived_key, nonce, key);
+
+ for (i = 0; i < (sizeof (derived_key) / sizeof (derived_key[0])); i++)
+ (derived_key[i]) = clib_host_to_little_u32 ((derived_key[i]));
+
+ uint32_t key_idx;
+
+ key_idx =
+ vnet_crypto_key_add (vm, VNET_CRYPTO_ALG_CHACHA20_POLY1305,
+ (uint8_t *) derived_key, CHACHA20POLY1305_KEY_SIZE);
+
+ ret =
+ wg_chacha20poly1305_calc (vm, src, src_len, dst, aad, aad_len, h_nonce,
+ VNET_CRYPTO_OP_CHACHA20_POLY1305_DEC, key_idx);
+
+ vnet_crypto_key_del (vm, key_idx);
+ wg_secure_zero_memory (derived_key, CHACHA20POLY1305_KEY_SIZE);
+
+ return ret;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/wireguard/wireguard_chachapoly.h b/src/plugins/wireguard/wireguard_chachapoly.h
new file mode 100644
index 00000000000..f09b2c8dd9d
--- /dev/null
+++ b/src/plugins/wireguard/wireguard_chachapoly.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2022 Rubicon Communications, LLC.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __included_wg_chachapoly_h__
+#define __included_wg_chachapoly_h__
+
+#include <vlib/vlib.h>
+#include <vnet/crypto/crypto.h>
+
+#define XCHACHA20POLY1305_NONCE_SIZE 24
+#define CHACHA20POLY1305_KEY_SIZE 32
+
+bool wg_chacha20poly1305_calc (vlib_main_t *vm, u8 *src, u32 src_len, u8 *dst,
+ u8 *aad, u32 aad_len, u64 nonce,
+ vnet_crypto_op_id_t op_id,
+ vnet_crypto_key_index_t key_index);
+
+void wg_xchacha20poly1305_encrypt (vlib_main_t *vm, u8 *src, u32 src_len,
+ u8 *dst, u8 *aad, u32 aad_len,
+ u8 nonce[XCHACHA20POLY1305_NONCE_SIZE],
+ u8 key[CHACHA20POLY1305_KEY_SIZE]);
+
+bool wg_xchacha20poly1305_decrypt (vlib_main_t *vm, u8 *src, u32 src_len,
+ u8 *dst, u8 *aad, u32 aad_len,
+ u8 nonce[XCHACHA20POLY1305_NONCE_SIZE],
+ u8 key[CHACHA20POLY1305_KEY_SIZE]);
+
+#endif /* __included_wg_chachapoly_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/wireguard/wireguard_cli.c b/src/plugins/wireguard/wireguard_cli.c
index 3b4bf56a3dc..e412fa36c44 100755..100644
--- a/src/plugins/wireguard/wireguard_cli.c
+++ b/src/plugins/wireguard/wireguard_cli.c
@@ -25,7 +25,7 @@ wg_if_create_cli (vlib_main_t * vm,
{
wg_main_t *wmp = &wg_main;
unformat_input_t _line_input, *line_input = &_line_input;
- u8 private_key[NOISE_PUBLIC_KEY_LEN];
+ u8 private_key[NOISE_PUBLIC_KEY_LEN + 1];
u32 instance, sw_if_index;
ip_address_t src_ip;
clib_error_t *error;
@@ -94,14 +94,12 @@ wg_if_create_cli (vlib_main_t * vm,
/*?
* Create a Wireguard interface.
?*/
-/* *INDENT-OFF* */
VLIB_CLI_COMMAND (wg_if_create_command, static) = {
.path = "wireguard create",
.short_help = "wireguard create listen-port <port> "
"private-key <key> src <IP> [generate-key]",
.function = wg_if_create_cli,
};
-/* *INDENT-ON* */
static clib_error_t *
wg_if_delete_cli (vlib_main_t * vm,
@@ -143,13 +141,11 @@ wg_if_delete_cli (vlib_main_t * vm,
/*?
* Delete a Wireguard interface.
?*/
-/* *INDENT-OFF* */
VLIB_CLI_COMMAND (wg_if_delete_command, static) = {
.path = "wireguard delete",
.short_help = "wireguard delete <interface>",
.function = wg_if_delete_cli,
};
-/* *INDENT-ON* */
static clib_error_t *
@@ -162,10 +158,10 @@ wg_peer_add_command_fn (vlib_main_t * vm,
unformat_input_t _line_input, *line_input = &_line_input;
u8 *public_key_64 = 0;
- u8 public_key[NOISE_PUBLIC_KEY_LEN];
+ u8 public_key[NOISE_PUBLIC_KEY_LEN + 1];
fib_prefix_t allowed_ip, *allowed_ips = NULL;
ip_prefix_t pfx;
- ip_address_t ip;
+ ip_address_t ip = ip_address_initializer;
u32 portDst = 0, table_id = 0;
u32 persistent_keepalive = 0;
u32 tun_sw_if_index = ~0;
@@ -192,7 +188,7 @@ wg_peer_add_command_fn (vlib_main_t * vm,
;
else if (unformat (line_input, "table-id %d", &table_id))
;
- else if (unformat (line_input, "port %d", &portDst))
+ else if (unformat (line_input, "dst-port %d", &portDst))
;
else if (unformat (line_input, "persistent-keepalive %d",
&persistent_keepalive))
@@ -213,16 +209,14 @@ wg_peer_add_command_fn (vlib_main_t * vm,
}
}
- if (AF_IP6 == ip_addr_version (&ip) ||
- FIB_PROTOCOL_IP6 == allowed_ip.fp_proto)
- rv = VNET_API_ERROR_INVALID_PROTOCOL;
- else
- rv = wg_peer_add (tun_sw_if_index,
- public_key,
- table_id,
- &ip_addr_46 (&ip),
- allowed_ips,
- portDst, persistent_keepalive, &peer_index);
+ if (0 == vec_len (allowed_ips))
+ {
+ error = clib_error_return (0, "Allowed IPs are not specified");
+ goto done;
+ }
+
+ rv = wg_peer_add (tun_sw_if_index, public_key, table_id, &ip_addr_46 (&ip),
+ allowed_ips, portDst, persistent_keepalive, &peer_index);
switch (rv)
{
@@ -253,16 +247,14 @@ done:
return error;
}
-/* *INDENT-OFF* */
-VLIB_CLI_COMMAND (wg_peer_add_command, static) =
-{
+VLIB_CLI_COMMAND (wg_peer_add_command, static) = {
.path = "wireguard peer add",
- .short_help = "wireguard peer add <wg_int> public-key <pub_key_other>"
- "endpoint <ip4_dst> allowed-ip <prefix>"
- "dst-port [port_dst] persistent-keepalive [keepalive_interval]",
+ .short_help =
+ "wireguard peer add <wg_int> public-key <pub_key_other> "
+ "endpoint <ip4_dst> allowed-ip <prefix> "
+ "dst-port [port_dst] persistent-keepalive [keepalive_interval]",
.function = wg_peer_add_command_fn,
};
-/* *INDENT-ON* */
static clib_error_t *
wg_peer_remove_command_fn (vlib_main_t * vm,
@@ -301,14 +293,12 @@ done:
return error;
}
-/* *INDENT-OFF* */
VLIB_CLI_COMMAND (wg_peer_remove_command, static) =
{
.path = "wireguard peer remove",
.short_help = "wireguard peer remove <index>",
.function = wg_peer_remove_command_fn,
};
-/* *INDENT-ON* */
static walk_rc_t
wg_peer_show_one (index_t peeri, void *arg)
@@ -327,14 +317,12 @@ wg_show_peer_command_fn (vlib_main_t * vm,
return NULL;
}
-/* *INDENT-OFF* */
VLIB_CLI_COMMAND (wg_show_peers_command, static) =
{
.path = "show wireguard peer",
.short_help = "show wireguard peer",
.function = wg_show_peer_command_fn,
};
-/* *INDENT-ON* */
static walk_rc_t
wg_if_show_one (index_t itfi, void *arg)
@@ -357,14 +345,67 @@ wg_show_if_command_fn (vlib_main_t * vm,
return NULL;
}
-/* *INDENT-OFF* */
VLIB_CLI_COMMAND (wg_show_itfs_command, static) =
{
.path = "show wireguard interface",
.short_help = "show wireguard",
.function = wg_show_if_command_fn,
};
-/* *INDENT-ON* */
+
+static clib_error_t *
+wg_set_async_mode_command_fn (vlib_main_t *vm, unformat_input_t *input,
+ vlib_cli_command_t *cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ int async_enable = 0;
+
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "on"))
+ async_enable = 1;
+ else if (unformat (line_input, "off"))
+ async_enable = 0;
+ else
+ return (clib_error_return (0, "unknown input '%U'",
+ format_unformat_error, line_input));
+ }
+
+ wg_set_async_mode (async_enable);
+
+ unformat_free (line_input);
+ return (NULL);
+}
+
+VLIB_CLI_COMMAND (wg_set_async_mode_command, static) = {
+ .path = "set wireguard async mode",
+ .short_help = "set wireguard async mode on|off",
+ .function = wg_set_async_mode_command_fn,
+};
+
+static clib_error_t *
+wg_show_mode_command_fn (vlib_main_t *vm, unformat_input_t *input,
+ vlib_cli_command_t *cmd)
+{
+ vlib_cli_output (vm, "Wireguard mode");
+
+#define _(v, f, s) \
+ vlib_cli_output (vm, "\t%s: %s", s, \
+ (wg_op_mode_is_set_##f () ? "enabled" : "disabled"));
+ foreach_wg_op_mode_flags
+#undef _
+
+ return (NULL);
+}
+
+VLIB_CLI_COMMAND (wg_show_modemode_command, static) = {
+ .path = "show wireguard mode",
+ .short_help = "show wireguard mode",
+ .function = wg_show_mode_command_fn,
+};
+
/*
* fd.io coding-style-patch-verification: ON
diff --git a/src/plugins/wireguard/wireguard_cookie.c b/src/plugins/wireguard/wireguard_cookie.c
index f54ce715906..4ebbfa0fa63 100755..100644
--- a/src/plugins/wireguard/wireguard_cookie.c
+++ b/src/plugins/wireguard/wireguard_cookie.c
@@ -20,6 +20,7 @@
#include <vlib/vlib.h>
#include <wireguard/wireguard_cookie.h>
+#include <wireguard/wireguard_chachapoly.h>
#include <wireguard/wireguard.h>
static void cookie_precompute_key (uint8_t *,
@@ -29,9 +30,14 @@ static void cookie_macs_mac1 (message_macs_t *, const void *, size_t,
const uint8_t[COOKIE_KEY_SIZE]);
static void cookie_macs_mac2 (message_macs_t *, const void *, size_t,
const uint8_t[COOKIE_COOKIE_SIZE]);
-static void cookie_checker_make_cookie (vlib_main_t * vm, cookie_checker_t *,
+static void cookie_checker_make_cookie (vlib_main_t *vm, cookie_checker_t *,
uint8_t[COOKIE_COOKIE_SIZE],
- ip4_address_t ip4, u16 udp_port);
+ ip46_address_t *ip, u16 udp_port);
+
+static void ratelimit_init (ratelimit_t *, ratelimit_entry_t *);
+static void ratelimit_deinit (ratelimit_t *);
+static void ratelimit_gc (ratelimit_t *, bool);
+static bool ratelimit_allow (ratelimit_t *, ip46_address_t *);
/* Public Functions */
void
@@ -43,6 +49,14 @@ cookie_maker_init (cookie_maker_t * cp, const uint8_t key[COOKIE_INPUT_SIZE])
}
void
+cookie_checker_init (cookie_checker_t *cc, ratelimit_entry_t *pool)
+{
+ clib_memset (cc, 0, sizeof (*cc));
+ ratelimit_init (&cc->cc_ratelimit_v4, pool);
+ ratelimit_init (&cc->cc_ratelimit_v6, pool);
+}
+
+void
cookie_checker_update (cookie_checker_t * cc, uint8_t key[COOKIE_INPUT_SIZE])
{
if (key)
@@ -58,6 +72,58 @@ cookie_checker_update (cookie_checker_t * cc, uint8_t key[COOKIE_INPUT_SIZE])
}
void
+cookie_checker_deinit (cookie_checker_t *cc)
+{
+ ratelimit_deinit (&cc->cc_ratelimit_v4);
+ ratelimit_deinit (&cc->cc_ratelimit_v6);
+}
+
+void
+cookie_checker_create_payload (vlib_main_t *vm, cookie_checker_t *cc,
+ message_macs_t *cm,
+ uint8_t nonce[COOKIE_NONCE_SIZE],
+ uint8_t ecookie[COOKIE_ENCRYPTED_SIZE],
+ ip46_address_t *ip, u16 udp_port)
+{
+ uint8_t cookie[COOKIE_COOKIE_SIZE];
+
+ cookie_checker_make_cookie (vm, cc, cookie, ip, udp_port);
+ RAND_bytes (nonce, COOKIE_NONCE_SIZE);
+
+ wg_xchacha20poly1305_encrypt (vm, cookie, COOKIE_COOKIE_SIZE, ecookie,
+ cm->mac1, COOKIE_MAC_SIZE, nonce,
+ cc->cc_cookie_key);
+
+ wg_secure_zero_memory (cookie, sizeof (cookie));
+}
+
+bool
+cookie_maker_consume_payload (vlib_main_t *vm, cookie_maker_t *cp,
+ uint8_t nonce[COOKIE_NONCE_SIZE],
+ uint8_t ecookie[COOKIE_ENCRYPTED_SIZE])
+{
+ uint8_t cookie[COOKIE_COOKIE_SIZE];
+
+ if (cp->cp_mac1_valid == 0)
+ {
+ return false;
+ }
+
+ if (!wg_xchacha20poly1305_decrypt (vm, ecookie, COOKIE_ENCRYPTED_SIZE,
+ cookie, cp->cp_mac1_last, COOKIE_MAC_SIZE,
+ nonce, cp->cp_cookie_key))
+ {
+ return false;
+ }
+
+ clib_memcpy (cp->cp_cookie, cookie, COOKIE_COOKIE_SIZE);
+ cp->cp_birthdate = vlib_time_now (vm);
+ cp->cp_mac1_valid = 0;
+
+ return true;
+}
+
+void
cookie_maker_mac (cookie_maker_t * cp, message_macs_t * cm, void *buf,
size_t len)
{
@@ -76,9 +142,9 @@ cookie_maker_mac (cookie_maker_t * cp, message_macs_t * cm, void *buf,
}
enum cookie_mac_state
-cookie_checker_validate_macs (vlib_main_t * vm, cookie_checker_t * cc,
- message_macs_t * cm, void *buf, size_t len,
- bool busy, ip4_address_t ip4, u16 udp_port)
+cookie_checker_validate_macs (vlib_main_t *vm, cookie_checker_t *cc,
+ message_macs_t *cm, void *buf, size_t len,
+ bool busy, ip46_address_t *ip, u16 udp_port)
{
message_macs_t our_cm;
uint8_t cookie[COOKIE_COOKIE_SIZE];
@@ -93,13 +159,20 @@ cookie_checker_validate_macs (vlib_main_t * vm, cookie_checker_t * cc,
if (!busy)
return VALID_MAC_BUT_NO_COOKIE;
- cookie_checker_make_cookie (vm, cc, cookie, ip4, udp_port);
+ cookie_checker_make_cookie (vm, cc, cookie, ip, udp_port);
cookie_macs_mac2 (&our_cm, buf, len, cookie);
/* If the mac2 is invalid, we want to send a cookie response */
if (clib_memcmp (our_cm.mac2, cm->mac2, COOKIE_MAC_SIZE) != 0)
return VALID_MAC_BUT_NO_COOKIE;
+ /* If the mac2 is valid, we may want to rate limit the peer */
+ ratelimit_t *rl;
+ rl = ip46_address_is_ip4 (ip) ? &cc->cc_ratelimit_v4 : &cc->cc_ratelimit_v6;
+
+ if (!ratelimit_allow (rl, ip))
+ return VALID_MAC_WITH_COOKIE_BUT_RATELIMITED;
+
return VALID_MAC_WITH_COOKIE;
}
@@ -139,9 +212,9 @@ cookie_macs_mac2 (message_macs_t * cm, const void *buf, size_t len,
}
static void
-cookie_checker_make_cookie (vlib_main_t * vm, cookie_checker_t * cc,
+cookie_checker_make_cookie (vlib_main_t *vm, cookie_checker_t *cc,
uint8_t cookie[COOKIE_COOKIE_SIZE],
- ip4_address_t ip4, u16 udp_port)
+ ip46_address_t *ip, u16 udp_port)
{
blake2s_state_t state;
@@ -155,11 +228,138 @@ cookie_checker_make_cookie (vlib_main_t * vm, cookie_checker_t * cc,
blake2s_init_key (&state, COOKIE_COOKIE_SIZE, cc->cc_secret,
COOKIE_SECRET_SIZE);
- blake2s_update (&state, ip4.as_u8, sizeof (ip4_address_t)); //TODO: IP6
+ if (ip46_address_is_ip4 (ip))
+ {
+ blake2s_update (&state, ip->ip4.as_u8, sizeof (ip4_address_t));
+ }
+ else
+ {
+ blake2s_update (&state, ip->ip6.as_u8, sizeof (ip6_address_t));
+ }
blake2s_update (&state, (u8 *) & udp_port, sizeof (u16));
blake2s_final (&state, cookie, COOKIE_COOKIE_SIZE);
}
+static void
+ratelimit_init (ratelimit_t *rl, ratelimit_entry_t *pool)
+{
+ rl->rl_pool = pool;
+}
+
+static void
+ratelimit_deinit (ratelimit_t *rl)
+{
+ ratelimit_gc (rl, /* force */ true);
+ hash_free (rl->rl_table);
+}
+
+static void
+ratelimit_gc (ratelimit_t *rl, bool force)
+{
+ u32 r_key;
+ u32 r_idx;
+ ratelimit_entry_t *r;
+
+ if (force)
+ {
+ /* clang-format off */
+ hash_foreach (r_key, r_idx, rl->rl_table, {
+ r = pool_elt_at_index (rl->rl_pool, r_idx);
+ pool_put (rl->rl_pool, r);
+ });
+ /* clang-format on */
+ return;
+ }
+
+ f64 now = vlib_time_now (vlib_get_main ());
+
+ if ((rl->rl_last_gc + ELEMENT_TIMEOUT) < now)
+ {
+ u32 *r_key_to_del = NULL;
+ u32 *pr_key;
+
+ rl->rl_last_gc = now;
+
+ /* clang-format off */
+ hash_foreach (r_key, r_idx, rl->rl_table, {
+ r = pool_elt_at_index (rl->rl_pool, r_idx);
+ if ((r->r_last_time + ELEMENT_TIMEOUT) < now)
+ {
+ vec_add1 (r_key_to_del, r_key);
+ pool_put (rl->rl_pool, r);
+ }
+ });
+ /* clang-format on */
+
+ vec_foreach (pr_key, r_key_to_del)
+ {
+ hash_unset (rl->rl_table, *pr_key);
+ }
+
+ vec_free (r_key_to_del);
+ }
+}
+
+static bool
+ratelimit_allow (ratelimit_t *rl, ip46_address_t *ip)
+{
+ u32 r_key;
+ uword *p;
+ u32 r_idx;
+ ratelimit_entry_t *r;
+ f64 now = vlib_time_now (vlib_get_main ());
+
+ if (ip46_address_is_ip4 (ip))
+ /* Use all 4 bytes of IPv4 address */
+ r_key = ip->ip4.as_u32;
+ else
+ /* Use top 8 bytes (/64) of IPv6 address */
+ r_key = ip->ip6.as_u32[0] ^ ip->ip6.as_u32[1];
+
+ /* Check if there is already an entry for the IP address */
+ p = hash_get (rl->rl_table, r_key);
+ if (p)
+ {
+ u64 tokens;
+ f64 diff;
+
+ r_idx = p[0];
+ r = pool_elt_at_index (rl->rl_pool, r_idx);
+
+ diff = now - r->r_last_time;
+ r->r_last_time = now;
+
+ tokens = r->r_tokens + diff * NSEC_PER_SEC;
+
+ if (tokens > TOKEN_MAX)
+ tokens = TOKEN_MAX;
+
+ if (tokens >= INITIATION_COST)
+ {
+ r->r_tokens = tokens - INITIATION_COST;
+ return true;
+ }
+
+ r->r_tokens = tokens;
+ return false;
+ }
+
+ /* No entry for the IP address */
+ ratelimit_gc (rl, /* force */ false);
+
+ if (hash_elts (rl->rl_table) >= RATELIMIT_SIZE_MAX)
+ return false;
+
+ pool_get (rl->rl_pool, r);
+ r_idx = r - rl->rl_pool;
+ hash_set (rl->rl_table, r_key, r_idx);
+
+ r->r_last_time = now;
+ r->r_tokens = TOKEN_MAX - INITIATION_COST;
+
+ return true;
+}
+
/*
* fd.io coding-style-patch-verification: ON
*
diff --git a/src/plugins/wireguard/wireguard_cookie.h b/src/plugins/wireguard/wireguard_cookie.h
index 489cce81325..7467cf2ed4a 100755..100644
--- a/src/plugins/wireguard/wireguard_cookie.h
+++ b/src/plugins/wireguard/wireguard_cookie.h
@@ -18,14 +18,15 @@
#ifndef __included_wg_cookie_h__
#define __included_wg_cookie_h__
-#include <vnet/ip/ip4_packet.h>
+#include <vnet/ip/ip46_address.h>
#include <wireguard/wireguard_noise.h>
enum cookie_mac_state
{
INVALID_MAC,
VALID_MAC_BUT_NO_COOKIE,
- VALID_MAC_WITH_COOKIE
+ VALID_MAC_WITH_COOKIE,
+ VALID_MAC_WITH_COOKIE_BUT_RATELIMITED,
};
#define COOKIE_MAC_SIZE 16
@@ -50,8 +51,6 @@ enum cookie_mac_state
#define INITIATION_COST (NSEC_PER_SEC / INITIATIONS_PER_SECOND)
#define TOKEN_MAX (INITIATION_COST * INITIATIONS_BURSTABLE)
#define ELEMENT_TIMEOUT 1
-#define IPV4_MASK_SIZE 4 /* Use all 4 bytes of IPv4 address */
-#define IPV6_MASK_SIZE 8 /* Use top 8 bytes (/64) of IPv6 address */
typedef struct cookie_macs
{
@@ -59,6 +58,19 @@ typedef struct cookie_macs
uint8_t mac2[COOKIE_MAC_SIZE];
} message_macs_t;
+typedef struct ratelimit_entry
+{
+ f64 r_last_time;
+ u64 r_tokens;
+} ratelimit_entry_t;
+
+typedef struct ratelimit
+{
+ ratelimit_entry_t *rl_pool;
+ uword *rl_table;
+ f64 rl_last_gc;
+} ratelimit_t;
+
typedef struct cookie_maker
{
uint8_t cp_mac1_key[COOKIE_KEY_SIZE];
@@ -72,6 +84,9 @@ typedef struct cookie_maker
typedef struct cookie_checker
{
+ ratelimit_t cc_ratelimit_v4;
+ ratelimit_t cc_ratelimit_v6;
+
uint8_t cc_mac1_key[COOKIE_KEY_SIZE];
uint8_t cc_cookie_key[COOKIE_KEY_SIZE];
@@ -81,14 +96,22 @@ typedef struct cookie_checker
void cookie_maker_init (cookie_maker_t *, const uint8_t[COOKIE_INPUT_SIZE]);
+void cookie_checker_init (cookie_checker_t *, ratelimit_entry_t *);
void cookie_checker_update (cookie_checker_t *, uint8_t[COOKIE_INPUT_SIZE]);
+void cookie_checker_deinit (cookie_checker_t *);
+void cookie_checker_create_payload (vlib_main_t *vm, cookie_checker_t *cc,
+ message_macs_t *cm,
+ uint8_t nonce[COOKIE_NONCE_SIZE],
+ uint8_t ecookie[COOKIE_ENCRYPTED_SIZE],
+ ip46_address_t *ip, u16 udp_port);
+bool cookie_maker_consume_payload (vlib_main_t *vm, cookie_maker_t *cp,
+ uint8_t nonce[COOKIE_NONCE_SIZE],
+ uint8_t ecookie[COOKIE_ENCRYPTED_SIZE]);
void cookie_maker_mac (cookie_maker_t *, message_macs_t *, void *, size_t);
-enum cookie_mac_state cookie_checker_validate_macs (vlib_main_t * vm,
- cookie_checker_t *,
- message_macs_t *, void *,
- size_t, bool,
- ip4_address_t ip4,
- u16 udp_port);
+enum cookie_mac_state
+cookie_checker_validate_macs (vlib_main_t *vm, cookie_checker_t *,
+ message_macs_t *, void *, size_t, bool,
+ ip46_address_t *ip, u16 udp_port);
#endif /* __included_wg_cookie_h__ */
diff --git a/src/plugins/wireguard/wireguard_handoff.c b/src/plugins/wireguard/wireguard_handoff.c
index d3e37b30c88..195baf209a0 100644
--- a/src/plugins/wireguard/wireguard_handoff.c
+++ b/src/plugins/wireguard/wireguard_handoff.c
@@ -129,40 +129,77 @@ wg_handoff (vlib_main_t * vm,
return n_enq;
}
-VLIB_NODE_FN (wg_handshake_handoff) (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * from_frame)
+VLIB_NODE_FN (wg4_handshake_handoff)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
{
wg_main_t *wmp = &wg_main;
- return wg_handoff (vm, node, from_frame, wmp->in_fq_index,
+ return wg_handoff (vm, node, from_frame, wmp->in4_fq_index,
WG_HANDOFF_HANDSHAKE);
}
-VLIB_NODE_FN (wg_input_data_handoff) (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * from_frame)
+VLIB_NODE_FN (wg6_handshake_handoff)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
{
wg_main_t *wmp = &wg_main;
- return wg_handoff (vm, node, from_frame, wmp->in_fq_index,
+ return wg_handoff (vm, node, from_frame, wmp->in6_fq_index,
+ WG_HANDOFF_HANDSHAKE);
+}
+
+VLIB_NODE_FN (wg4_input_data_handoff)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
+{
+ wg_main_t *wmp = &wg_main;
+
+ return wg_handoff (vm, node, from_frame, wmp->in4_fq_index,
+ WG_HANDOFF_INP_DATA);
+}
+
+VLIB_NODE_FN (wg6_input_data_handoff)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
+{
+ wg_main_t *wmp = &wg_main;
+
+ return wg_handoff (vm, node, from_frame, wmp->in6_fq_index,
WG_HANDOFF_INP_DATA);
}
-VLIB_NODE_FN (wg_output_tun_handoff) (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * from_frame)
+VLIB_NODE_FN (wg4_output_tun_handoff)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
{
wg_main_t *wmp = &wg_main;
- return wg_handoff (vm, node, from_frame, wmp->out_fq_index,
+ return wg_handoff (vm, node, from_frame, wmp->out4_fq_index,
WG_HANDOFF_OUT_TUN);
}
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (wg_handshake_handoff) =
+VLIB_NODE_FN (wg6_output_tun_handoff)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
+{
+ wg_main_t *wmp = &wg_main;
+
+ return wg_handoff (vm, node, from_frame, wmp->out6_fq_index,
+ WG_HANDOFF_OUT_TUN);
+}
+
+VLIB_REGISTER_NODE (wg4_handshake_handoff) =
+{
+ .name = "wg4-handshake-handoff",
+ .vector_size = sizeof (u32),
+ .format_trace = format_wg_handoff_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (wg_handoff_error_strings),
+ .error_strings = wg_handoff_error_strings,
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [0] = "error-drop",
+ },
+};
+
+VLIB_REGISTER_NODE (wg6_handshake_handoff) =
{
- .name = "wg-handshake-handoff",
+ .name = "wg6-handshake-handoff",
.vector_size = sizeof (u32),
.format_trace = format_wg_handoff_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
@@ -174,9 +211,9 @@ VLIB_REGISTER_NODE (wg_handshake_handoff) =
},
};
-VLIB_REGISTER_NODE (wg_input_data_handoff) =
+VLIB_REGISTER_NODE (wg4_input_data_handoff) =
{
- .name = "wg-input-data-handoff",
+ .name = "wg4-input-data-handoff",
.vector_size = sizeof (u32),
.format_trace = format_wg_handoff_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
@@ -188,9 +225,37 @@ VLIB_REGISTER_NODE (wg_input_data_handoff) =
},
};
-VLIB_REGISTER_NODE (wg_output_tun_handoff) =
+VLIB_REGISTER_NODE (wg6_input_data_handoff) =
+{
+ .name = "wg6-input-data-handoff",
+ .vector_size = sizeof (u32),
+ .format_trace = format_wg_handoff_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (wg_handoff_error_strings),
+ .error_strings = wg_handoff_error_strings,
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [0] = "error-drop",
+ },
+};
+
+VLIB_REGISTER_NODE (wg4_output_tun_handoff) =
+{
+ .name = "wg4-output-tun-handoff",
+ .vector_size = sizeof (u32),
+ .format_trace = format_wg_handoff_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (wg_handoff_error_strings),
+ .error_strings = wg_handoff_error_strings,
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [0] = "error-drop",
+ },
+};
+
+VLIB_REGISTER_NODE (wg6_output_tun_handoff) =
{
- .name = "wg-output-tun-handoff",
+ .name = "wg6-output-tun-handoff",
.vector_size = sizeof (u32),
.format_trace = format_wg_handoff_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
@@ -201,7 +266,6 @@ VLIB_REGISTER_NODE (wg_output_tun_handoff) =
[0] = "error-drop",
},
};
-/* *INDENT-ON* */
/*
* fd.io coding-style-patch-verification: ON
diff --git a/src/plugins/wireguard/wireguard_hchacha20.h b/src/plugins/wireguard/wireguard_hchacha20.h
new file mode 100644
index 00000000000..a2d139621c9
--- /dev/null
+++ b/src/plugins/wireguard/wireguard_hchacha20.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2022 Rubicon Communications, LLC.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * chacha-merged.c version 20080118
+ * D. J. Bernstein
+ * Public domain.
+ */
+
+#ifndef __included_wg_hchacha20_h__
+#define __included_wg_hchacha20_h__
+
+#include <vlib/vlib.h>
+
+/* clang-format off */
+#define U32C(v) (v##U)
+#define U32V(v) ((u32)(v) & U32C(0xFFFFFFFF))
+
+#define ROTL32(v, n) \
+ (U32V((v) << (n)) | ((v) >> (32 - (n))))
+
+#define U8TO32_LITTLE(p) \
+ (((u32)((p)[0]) ) | \
+ ((u32)((p)[1]) << 8) | \
+ ((u32)((p)[2]) << 16) | \
+ ((u32)((p)[3]) << 24))
+
+#define ROTATE(v,c) (ROTL32(v,c))
+#define XOR(v,w) ((v) ^ (w))
+#define PLUS(v,w) (U32V((v) + (w)))
+
+#define QUARTERROUND(a,b,c,d) \
+ a = PLUS(a,b); d = ROTATE(XOR(d,a),16); \
+ c = PLUS(c,d); b = ROTATE(XOR(b,c),12); \
+ a = PLUS(a,b); d = ROTATE(XOR(d,a), 8); \
+ c = PLUS(c,d); b = ROTATE(XOR(b,c), 7);
+/* clang-format on */
+
+static const char sigma[16] = "expand 32-byte k";
+
+static inline void
+hchacha20 (u32 derived_key[8], const u8 nonce[16], const u8 key[32])
+{
+ int i;
+ u32 x[] = { U8TO32_LITTLE (sigma + 0), U8TO32_LITTLE (sigma + 4),
+ U8TO32_LITTLE (sigma + 8), U8TO32_LITTLE (sigma + 12),
+ U8TO32_LITTLE (key + 0), U8TO32_LITTLE (key + 4),
+ U8TO32_LITTLE (key + 8), U8TO32_LITTLE (key + 12),
+ U8TO32_LITTLE (key + 16), U8TO32_LITTLE (key + 20),
+ U8TO32_LITTLE (key + 24), U8TO32_LITTLE (key + 28),
+ U8TO32_LITTLE (nonce + 0), U8TO32_LITTLE (nonce + 4),
+ U8TO32_LITTLE (nonce + 8), U8TO32_LITTLE (nonce + 12) };
+
+ for (i = 20; i > 0; i -= 2)
+ {
+ QUARTERROUND (x[0], x[4], x[8], x[12])
+ QUARTERROUND (x[1], x[5], x[9], x[13])
+ QUARTERROUND (x[2], x[6], x[10], x[14])
+ QUARTERROUND (x[3], x[7], x[11], x[15])
+ QUARTERROUND (x[0], x[5], x[10], x[15])
+ QUARTERROUND (x[1], x[6], x[11], x[12])
+ QUARTERROUND (x[2], x[7], x[8], x[13])
+ QUARTERROUND (x[3], x[4], x[9], x[14])
+ }
+
+ clib_memcpy (derived_key + 0, x + 0, sizeof (u32) * 4);
+ clib_memcpy (derived_key + 4, x + 12, sizeof (u32) * 4);
+}
+
+#endif /* __included_wg_hchacha20_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/wireguard/wireguard_if.c b/src/plugins/wireguard/wireguard_if.c
index f7eb5a1d9e0..afeeda1dd2b 100644
--- a/src/plugins/wireguard/wireguard_if.c
+++ b/src/plugins/wireguard/wireguard_if.c
@@ -32,13 +32,17 @@ static uword *wg_if_instances;
static index_t *wg_if_index_by_sw_if_index;
/* vector of interfaces key'd on their UDP port (in network order) */
-index_t *wg_if_index_by_port;
+index_t **wg_if_indexes_by_port;
+
+/* pool of ratelimit entries */
+static ratelimit_entry_t *wg_ratelimit_pool;
static u8 *
format_wg_if_name (u8 * s, va_list * args)
{
u32 dev_instance = va_arg (*args, u32);
- return format (s, "wg%d", dev_instance);
+ wg_if_t *wgi = wg_if_get (dev_instance);
+ return format (s, "wg%d", wgi->user_instance);
}
u8 *
@@ -49,7 +53,6 @@ format_wg_if (u8 * s, va_list * args)
noise_local_t *local = noise_local_get (wgi->local_idx);
u8 key[NOISE_KEY_LEN_BASE64];
-
s = format (s, "[%d] %U src:%U port:%d",
wgii,
format_vnet_sw_if_index_name, vnet_get_main (),
@@ -113,20 +116,20 @@ wg_remote_get (const uint8_t public[NOISE_PUBLIC_KEY_LEN])
}
static uint32_t
-wg_index_set (noise_remote_t * remote)
+wg_index_set (vlib_main_t *vm, noise_remote_t *remote)
{
wg_main_t *wmp = &wg_main;
u32 rnd_seed = (u32) (vlib_time_now (wmp->vlib_main) * 1e6);
u32 ret =
- wg_index_table_add (&wmp->index_table, remote->r_peer_idx, rnd_seed);
+ wg_index_table_add (vm, &wmp->index_table, remote->r_peer_idx, rnd_seed);
return ret;
}
static void
-wg_index_drop (uint32_t key)
+wg_index_drop (vlib_main_t *vm, uint32_t key)
{
wg_main_t *wmp = &wg_main;
- wg_index_table_del (&wmp->index_table, key);
+ wg_index_table_del (vm, &wmp->index_table, key);
}
static clib_error_t *
@@ -151,11 +154,21 @@ wg_if_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
void
wg_if_update_adj (vnet_main_t * vnm, u32 sw_if_index, adj_index_t ai)
{
- /* The peers manage the adjacencies */
+ index_t wgii;
+
+ /* Convert any neighbour adjacency that has a next-hop reachable through
+ * the wg interface into a midchain. This is to avoid sending ARP/ND to
+ * resolve the next-hop address via the wg interface. Then, if one of the
+ * peers has matching prefix among allowed prefixes, the midchain will be
+ * updated to the corresponding one.
+ */
+ adj_nbr_midchain_update_rewrite (ai, NULL, NULL, ADJ_FLAG_NONE, NULL);
+
+ wgii = wg_if_find_by_sw_if_index (sw_if_index);
+ wg_if_peer_walk (wg_if_get (wgii), wg_peer_if_adj_change, &ai);
}
-/* *INDENT-OFF* */
VNET_DEVICE_CLASS (wg_if_device_class) = {
.name = "Wireguard Tunnel",
.format_device_name = format_wg_if_name,
@@ -167,7 +180,6 @@ VNET_HW_INTERFACE_CLASS(wg_hw_interface_class) = {
.update_adjacency = wg_if_update_adj,
.flags = VNET_HW_INTERFACE_CLASS_FLAG_NBMA,
};
-/* *INDENT-ON* */
/*
* Maintain a bitmap of allocated wg_if instance numbers.
@@ -251,13 +263,6 @@ wg_if_create (u32 user_instance,
*sw_if_indexp = (u32) ~ 0;
/*
- * Check if the required port is already in use
- */
- udp_dst_port_info_t *pi = udp_get_dst_port_info (&udp_main, port, UDP_IP4);
- if (pi)
- return VNET_API_ERROR_UDP_PORT_TAKEN;
-
- /*
* Allocate a wg_if instance. Either select on dynamically
* or try to use the desired user_instance number.
*/
@@ -265,13 +270,11 @@ wg_if_create (u32 user_instance,
if (instance == ~0)
return VNET_API_ERROR_INVALID_REGISTRATION;
- /* *INDENT-OFF* */
struct noise_upcall upcall = {
.u_remote_get = wg_remote_get,
.u_index_set = wg_index_set,
.u_index_drop = wg_index_drop,
};
- /* *INDENT-ON* */
pool_get (noise_local_pool, local);
@@ -283,7 +286,7 @@ wg_if_create (u32 user_instance,
return VNET_API_ERROR_INVALID_REGISTRATION;
}
- pool_get (wg_if_pool, wg_if);
+ pool_get_zero (wg_if_pool, wg_if);
/* tunnel index (or instance) */
u32 t_idx = wg_if - wg_if_pool;
@@ -292,13 +295,20 @@ wg_if_create (u32 user_instance,
if (~0 == wg_if->user_instance)
wg_if->user_instance = t_idx;
- udp_register_dst_port (vlib_get_main (), port, wg_input_node.index, 1);
+ vec_validate_init_empty (wg_if_indexes_by_port, port, NULL);
+ if (vec_len (wg_if_indexes_by_port[port]) == 0)
+ {
+ udp_register_dst_port (vlib_get_main (), port, wg4_input_node.index,
+ UDP_IP4);
+ udp_register_dst_port (vlib_get_main (), port, wg6_input_node.index,
+ UDP_IP6);
+ }
- vec_validate_init_empty (wg_if_index_by_port, port, INDEX_INVALID);
- wg_if_index_by_port[port] = wg_if - wg_if_pool;
+ vec_add1 (wg_if_indexes_by_port[port], t_idx);
wg_if->port = port;
wg_if->local_idx = local - noise_local_pool;
+ cookie_checker_init (&wg_if->cookie_checker, wg_ratelimit_pool);
cookie_checker_update (&wg_if->cookie_checker, local->l_public);
hw_if_index = vnet_register_interface (vnm,
@@ -314,6 +324,8 @@ wg_if_create (u32 user_instance,
ip_address_copy (&wg_if->src_ip, src_ip);
wg_if->sw_if_index = *sw_if_indexp = hi->sw_if_index;
+ vnet_set_interface_l3_output_node (vnm->vlib_main, hi->sw_if_index,
+ (u8 *) "tunnel-output");
return 0;
}
@@ -331,15 +343,38 @@ wg_if_delete (u32 sw_if_index)
return VNET_API_ERROR_INVALID_VALUE;
wg_if_t *wg_if;
- wg_if = wg_if_get (wg_if_find_by_sw_if_index (sw_if_index));
+ index_t wgii = wg_if_find_by_sw_if_index (sw_if_index);
+ wg_if = wg_if_get (wgii);
if (NULL == wg_if)
return VNET_API_ERROR_INVALID_SW_IF_INDEX_2;
if (wg_if_instance_free (wg_if->user_instance) < 0)
return VNET_API_ERROR_INVALID_VALUE_2;
- udp_unregister_dst_port (vlib_get_main (), wg_if->port, 1);
- wg_if_index_by_port[wg_if->port] = INDEX_INVALID;
+ // Remove peers before interface deletion
+ wg_if_peer_walk (wg_if, wg_peer_if_delete, NULL);
+
+ hash_free (wg_if->peers);
+
+ index_t *ii;
+ index_t *ifs = wg_if_indexes_get_by_port (wg_if->port);
+ vec_foreach (ii, ifs)
+ {
+ if (*ii == wgii)
+ {
+ vec_del1 (ifs, ifs - ii);
+ break;
+ }
+ }
+ if (vec_len (ifs) == 0)
+ {
+ udp_unregister_dst_port (vlib_get_main (), wg_if->port, 1);
+ udp_unregister_dst_port (vlib_get_main (), wg_if->port, 0);
+ }
+
+ cookie_checker_deinit (&wg_if->cookie_checker);
+
+ vnet_reset_interface_l3_output_node (vnm->vlib_main, sw_if_index);
vnet_delete_hw_interface (vnm, hw->hw_if_index);
pool_put_index (noise_local_pool, wg_if->local_idx);
pool_put (wg_if_pool, wg_if);
@@ -353,8 +388,12 @@ wg_if_peer_add (wg_if_t * wgi, index_t peeri)
hash_set (wgi->peers, peeri, peeri);
if (1 == hash_elts (wgi->peers))
- vnet_feature_enable_disable ("ip4-output", "wg-output-tun",
- wgi->sw_if_index, 1, 0, 0);
+ {
+ vnet_feature_enable_disable ("ip4-output", "wg4-output-tun",
+ wgi->sw_if_index, 1, 0, 0);
+ vnet_feature_enable_disable ("ip6-output", "wg6-output-tun",
+ wgi->sw_if_index, 1, 0, 0);
+ }
}
void
@@ -363,8 +402,12 @@ wg_if_peer_remove (wg_if_t * wgi, index_t peeri)
hash_unset (wgi->peers, peeri);
if (0 == hash_elts (wgi->peers))
- vnet_feature_enable_disable ("ip4-output", "wg-output-tun",
- wgi->sw_if_index, 0, 0, 0);
+ {
+ vnet_feature_enable_disable ("ip4-output", "wg4-output-tun",
+ wgi->sw_if_index, 0, 0, 0);
+ vnet_feature_enable_disable ("ip6-output", "wg6-output-tun",
+ wgi->sw_if_index, 0, 0, 0);
+ }
}
void
@@ -372,13 +415,11 @@ wg_if_walk (wg_if_walk_cb_t fn, void *data)
{
index_t wgii;
- /* *INDENT-OFF* */
pool_foreach_index (wgii, wg_if_pool)
{
if (WALK_STOP == fn(wgii, data))
break;
}
- /* *INDENT-ON* */
}
index_t
@@ -386,85 +427,14 @@ wg_if_peer_walk (wg_if_t * wgi, wg_if_peer_walk_cb_t fn, void *data)
{
index_t peeri, val;
- /* *INDENT-OFF* */
- hash_foreach (peeri, val, wgi->peers,
- {
- if (WALK_STOP == fn(wgi, peeri, data))
+ hash_foreach (peeri, val, wgi->peers, {
+ if (WALK_STOP == fn (peeri, data))
return peeri;
});
- /* *INDENT-ON* */
return INDEX_INVALID;
}
-
-static void
-wg_if_table_bind_v4 (ip4_main_t * im,
- uword opaque,
- u32 sw_if_index, u32 new_fib_index, u32 old_fib_index)
-{
- wg_if_t *wg_if;
-
- wg_if = wg_if_get (wg_if_find_by_sw_if_index (sw_if_index));
- if (NULL == wg_if)
- return;
-
- wg_peer_table_bind_ctx_t ctx = {
- .af = AF_IP4,
- .old_fib_index = old_fib_index,
- .new_fib_index = new_fib_index,
- };
-
- wg_if_peer_walk (wg_if, wg_peer_if_table_change, &ctx);
-}
-
-static void
-wg_if_table_bind_v6 (ip6_main_t * im,
- uword opaque,
- u32 sw_if_index, u32 new_fib_index, u32 old_fib_index)
-{
- wg_if_t *wg_if;
-
- wg_if = wg_if_get (wg_if_find_by_sw_if_index (sw_if_index));
- if (NULL == wg_if)
- return;
-
- wg_peer_table_bind_ctx_t ctx = {
- .af = AF_IP6,
- .old_fib_index = old_fib_index,
- .new_fib_index = new_fib_index,
- };
-
- wg_if_peer_walk (wg_if, wg_peer_if_table_change, &ctx);
-}
-
-static clib_error_t *
-wg_if_module_init (vlib_main_t * vm)
-{
- {
- ip4_table_bind_callback_t cb = {
- .function = wg_if_table_bind_v4,
- };
- vec_add1 (ip4_main.table_bind_callbacks, cb);
- }
- {
- ip6_table_bind_callback_t cb = {
- .function = wg_if_table_bind_v6,
- };
- vec_add1 (ip6_main.table_bind_callbacks, cb);
- }
-
- return (NULL);
-}
-
-/* *INDENT-OFF* */
-VLIB_INIT_FUNCTION (wg_if_module_init) =
-{
- .runs_after = VLIB_INITS("ip_main_init"),
-};
-/* *INDENT-ON* */
-
-
/*
* fd.io coding-style-patch-verification: ON
*
diff --git a/src/plugins/wireguard/wireguard_if.h b/src/plugins/wireguard/wireguard_if.h
index 7c11ad9b281..2a6ab8e4be5 100644
--- a/src/plugins/wireguard/wireguard_if.h
+++ b/src/plugins/wireguard/wireguard_if.h
@@ -31,13 +31,15 @@ typedef struct wg_if_t_
cookie_checker_t cookie_checker;
u16 port;
- wg_index_table_t index_table;
-
/* Source IP address for originated packets */
ip_address_t src_ip;
/* hash table of peers on this link */
uword *peers;
+
+ /* Under load params */
+ f64 handshake_counting_end;
+ u32 handshake_num;
} wg_if_t;
@@ -52,8 +54,7 @@ u8 *format_wg_if (u8 * s, va_list * va);
typedef walk_rc_t (*wg_if_walk_cb_t) (index_t wgi, void *data);
void wg_if_walk (wg_if_walk_cb_t fn, void *data);
-typedef walk_rc_t (*wg_if_peer_walk_cb_t) (wg_if_t * wgi, index_t peeri,
- void *data);
+typedef walk_rc_t (*wg_if_peer_walk_cb_t) (index_t peeri, void *data);
index_t wg_if_peer_walk (wg_if_t * wgi, wg_if_peer_walk_cb_t fn, void *data);
void wg_if_peer_add (wg_if_t * wgi, index_t peeri);
@@ -72,18 +73,56 @@ wg_if_get (index_t wgii)
return (pool_elt_at_index (wg_if_pool, wgii));
}
-extern index_t *wg_if_index_by_port;
+extern index_t **wg_if_indexes_by_port;
-static_always_inline wg_if_t *
-wg_if_get_by_port (u16 port)
+static_always_inline index_t *
+wg_if_indexes_get_by_port (u16 port)
{
- if (vec_len (wg_if_index_by_port) < port)
+ if (vec_len (wg_if_indexes_by_port) == 0)
return (NULL);
- if (INDEX_INVALID == wg_if_index_by_port[port])
+ if (vec_len (wg_if_indexes_by_port[port]) == 0)
return (NULL);
- return (wg_if_get (wg_if_index_by_port[port]));
+ return (wg_if_indexes_by_port[port]);
}
+#define HANDSHAKE_COUNTING_INTERVAL 0.5
+#define UNDER_LOAD_INTERVAL 1.0
+#define HANDSHAKE_NUM_PER_PEER_UNTIL_UNDER_LOAD 40
+
+static_always_inline bool
+wg_if_is_under_load (vlib_main_t *vm, wg_if_t *wgi)
+{
+ static f64 wg_under_load_end;
+ f64 now = vlib_time_now (vm);
+ u32 num_until_under_load =
+ hash_elts (wgi->peers) * HANDSHAKE_NUM_PER_PEER_UNTIL_UNDER_LOAD;
+
+ if (wgi->handshake_counting_end < now)
+ {
+ wgi->handshake_counting_end = now + HANDSHAKE_COUNTING_INTERVAL;
+ wgi->handshake_num = 0;
+ }
+ wgi->handshake_num++;
+
+ if (wgi->handshake_num >= num_until_under_load)
+ {
+ wg_under_load_end = now + UNDER_LOAD_INTERVAL;
+ return true;
+ }
+
+ if (wg_under_load_end > now)
+ {
+ return true;
+ }
+
+ return false;
+}
+
+static_always_inline void
+wg_if_dec_handshake_num (wg_if_t *wgi)
+{
+ wgi->handshake_num--;
+}
#endif
diff --git a/src/plugins/wireguard/wireguard_index_table.c b/src/plugins/wireguard/wireguard_index_table.c
index 5f81204b4c0..da53bfd75f1 100755..100644
--- a/src/plugins/wireguard/wireguard_index_table.c
+++ b/src/plugins/wireguard/wireguard_index_table.c
@@ -13,13 +13,15 @@
* limitations under the License.
*/
+#include <vlib/vlib.h>
#include <vppinfra/hash.h>
#include <vppinfra/pool.h>
#include <vppinfra/random.h>
#include <wireguard/wireguard_index_table.h>
u32
-wg_index_table_add (wg_index_table_t * table, u32 peer_pool_idx, u32 rnd_seed)
+wg_index_table_add (vlib_main_t *vm, wg_index_table_t *table,
+ u32 peer_pool_idx, u32 rnd_seed)
{
u32 key;
@@ -29,19 +31,25 @@ wg_index_table_add (wg_index_table_t * table, u32 peer_pool_idx, u32 rnd_seed)
if (hash_get (table->hash, key))
continue;
+ vlib_worker_thread_barrier_sync (vm);
hash_set (table->hash, key, peer_pool_idx);
+ vlib_worker_thread_barrier_release (vm);
break;
}
return key;
}
void
-wg_index_table_del (wg_index_table_t * table, u32 key)
+wg_index_table_del (vlib_main_t *vm, wg_index_table_t *table, u32 key)
{
uword *p;
p = hash_get (table->hash, key);
if (p)
- hash_unset (table->hash, key);
+ {
+ vlib_worker_thread_barrier_sync (vm);
+ hash_unset (table->hash, key);
+ vlib_worker_thread_barrier_release (vm);
+ }
}
u32 *
diff --git a/src/plugins/wireguard/wireguard_index_table.h b/src/plugins/wireguard/wireguard_index_table.h
index 67cae1f49d5..e9aa374c0ca 100755..100644
--- a/src/plugins/wireguard/wireguard_index_table.h
+++ b/src/plugins/wireguard/wireguard_index_table.h
@@ -16,6 +16,7 @@
#ifndef __included_wg_index_table_h__
#define __included_wg_index_table_h__
+#include <vlib/vlib.h>
#include <vppinfra/types.h>
typedef struct
@@ -23,9 +24,9 @@ typedef struct
uword *hash;
} wg_index_table_t;
-u32 wg_index_table_add (wg_index_table_t * table, u32 peer_pool_idx,
- u32 rnd_seed);
-void wg_index_table_del (wg_index_table_t * table, u32 key);
+u32 wg_index_table_add (vlib_main_t *vm, wg_index_table_t *table,
+ u32 peer_pool_idx, u32 rnd_seed);
+void wg_index_table_del (vlib_main_t *vm, wg_index_table_t *table, u32 key);
u32 *wg_index_table_lookup (const wg_index_table_t * table, u32 key);
#endif //__included_wg_index_table_h__
diff --git a/src/plugins/wireguard/wireguard_input.c b/src/plugins/wireguard/wireguard_input.c
index 5db814292f8..1eb7fbfed0b 100644
--- a/src/plugins/wireguard/wireguard_input.c
+++ b/src/plugins/wireguard/wireguard_input.c
@@ -25,14 +25,18 @@
#define foreach_wg_input_error \
_ (NONE, "No error") \
_ (HANDSHAKE_MAC, "Invalid MAC handshake") \
+ _ (HANDSHAKE_RATELIMITED, "Handshake ratelimited") \
_ (PEER, "Peer error") \
_ (INTERFACE, "Interface error") \
_ (DECRYPTION, "Failed during decryption") \
_ (KEEPALIVE_SEND, "Failed while sending Keepalive") \
_ (HANDSHAKE_SEND, "Failed while sending Handshake") \
_ (HANDSHAKE_RECEIVE, "Failed while receiving Handshake") \
- _ (TOO_BIG, "Packet too big") \
- _ (UNDEFINED, "Undefined error")
+ _ (COOKIE_DECRYPTION, "Failed during Cookie decryption") \
+ _ (COOKIE_SEND, "Failed during sending Cookie") \
+ _ (NO_BUFFERS, "No buffers") \
+ _ (UNDEFINED, "Undefined error") \
+ _ (CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)")
typedef enum
{
@@ -56,6 +60,12 @@ typedef struct
index_t peer;
} wg_input_trace_t;
+typedef struct
+{
+ index_t peer;
+ u16 next;
+} wg_input_post_trace_t;
+
u8 *
format_wg_message_type (u8 * s, va_list * args)
{
@@ -79,11 +89,27 @@ format_wg_input_trace (u8 * s, va_list * args)
wg_input_trace_t *t = va_arg (*args, wg_input_trace_t *);
- s = format (s, "WG input: \n");
- s = format (s, " Type: %U\n", format_wg_message_type, t->type);
- s = format (s, " peer: %d\n", t->peer);
- s = format (s, " Length: %d\n", t->current_length);
- s = format (s, " Keepalive: %s", t->is_keepalive ? "true" : "false");
+ s = format (s, "Wireguard input: \n");
+ s = format (s, " Type: %U\n", format_wg_message_type, t->type);
+ s = format (s, " Peer: %d\n", t->peer);
+ s = format (s, " Length: %d\n", t->current_length);
+ s = format (s, " Keepalive: %s", t->is_keepalive ? "true" : "false");
+
+ return s;
+}
+
+/* post-node packet trace format function */
+static u8 *
+format_wg_input_post_trace (u8 *s, va_list *args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+
+ wg_input_post_trace_t *t = va_arg (*args, wg_input_post_trace_t *);
+
+ s = format (s, "WG input post: \n");
+ s = format (s, " peer: %u\n", t->peer);
+ s = format (s, " next: %u\n", t->next);
return s;
}
@@ -93,48 +119,52 @@ typedef enum
WG_INPUT_NEXT_HANDOFF_HANDSHAKE,
WG_INPUT_NEXT_HANDOFF_DATA,
WG_INPUT_NEXT_IP4_INPUT,
+ WG_INPUT_NEXT_IP6_INPUT,
WG_INPUT_NEXT_PUNT,
WG_INPUT_NEXT_ERROR,
WG_INPUT_N_NEXT,
} wg_input_next_t;
-/* static void */
-/* set_peer_address (wg_peer_t * peer, ip4_address_t ip4, u16 udp_port) */
-/* { */
-/* if (peer) */
-/* { */
-/* ip46_address_set_ip4 (&peer->dst.addr, &ip4); */
-/* peer->dst.port = udp_port; */
-/* } */
-/* } */
+static u8
+is_ip4_header (u8 *data)
+{
+ return (data[0] >> 4) == 0x4;
+}
static wg_input_error_t
-wg_handshake_process (vlib_main_t * vm, wg_main_t * wmp, vlib_buffer_t * b)
+wg_handshake_process (vlib_main_t *vm, wg_main_t *wmp, vlib_buffer_t *b,
+ u32 node_idx, u8 is_ip4)
{
ASSERT (vm->thread_index == 0);
enum cookie_mac_state mac_state;
bool packet_needs_cookie;
bool under_load;
+ index_t *wg_ifs;
wg_if_t *wg_if;
wg_peer_t *peer = NULL;
void *current_b_data = vlib_buffer_get_current (b);
+ ip46_address_t src_ip;
+ if (is_ip4)
+ {
+ ip4_header_t *iph4 =
+ current_b_data - sizeof (udp_header_t) - sizeof (ip4_header_t);
+ ip46_address_set_ip4 (&src_ip, &iph4->src_address);
+ }
+ else
+ {
+ ip6_header_t *iph6 =
+ current_b_data - sizeof (udp_header_t) - sizeof (ip6_header_t);
+ ip46_address_set_ip6 (&src_ip, &iph6->src_address);
+ }
+
udp_header_t *uhd = current_b_data - sizeof (udp_header_t);
- ip4_header_t *iph =
- current_b_data - sizeof (udp_header_t) - sizeof (ip4_header_t);
- ip4_address_t ip4_src = iph->src_address;
- u16 udp_src_port = clib_host_to_net_u16 (uhd->src_port);;
- u16 udp_dst_port = clib_host_to_net_u16 (uhd->dst_port);;
+ u16 udp_src_port = clib_host_to_net_u16 (uhd->src_port);
+ u16 udp_dst_port = clib_host_to_net_u16 (uhd->dst_port);
message_header_t *header = current_b_data;
- under_load = false;
-
- wg_if = wg_if_get_by_port (udp_dst_port);
-
- if (NULL == wg_if)
- return WG_INPUT_ERROR_INTERFACE;
if (PREDICT_FALSE (header->type == MESSAGE_HANDSHAKE_COOKIE))
{
@@ -147,7 +177,9 @@ wg_handshake_process (vlib_main_t * vm, wg_main_t * wmp, vlib_buffer_t * b)
else
return WG_INPUT_ERROR_PEER;
- // TODO: Implement cookie_maker_consume_payload
+ if (!cookie_maker_consume_payload (
+ vm, &peer->cookie_maker, packet->nonce, packet->encrypted_cookie))
+ return WG_INPUT_ERROR_COOKIE_DECRYPTION;
return WG_INPUT_ERROR_NONE;
}
@@ -159,16 +191,40 @@ wg_handshake_process (vlib_main_t * vm, wg_main_t * wmp, vlib_buffer_t * b)
message_macs_t *macs = (message_macs_t *)
((u8 *) current_b_data + len - sizeof (*macs));
- mac_state =
- cookie_checker_validate_macs (vm, &wg_if->cookie_checker, macs,
- current_b_data, len, under_load, ip4_src,
- udp_src_port);
+ index_t *ii;
+ wg_ifs = wg_if_indexes_get_by_port (udp_dst_port);
+ if (NULL == wg_ifs)
+ return WG_INPUT_ERROR_INTERFACE;
+
+ vec_foreach (ii, wg_ifs)
+ {
+ wg_if = wg_if_get (*ii);
+ if (NULL == wg_if)
+ continue;
+
+ under_load = wg_if_is_under_load (vm, wg_if);
+ mac_state = cookie_checker_validate_macs (
+ vm, &wg_if->cookie_checker, macs, current_b_data, len, under_load,
+ &src_ip, udp_src_port);
+ if (mac_state == INVALID_MAC)
+ {
+ wg_if_dec_handshake_num (wg_if);
+ wg_if = NULL;
+ continue;
+ }
+ break;
+ }
+
+ if (NULL == wg_if)
+ return WG_INPUT_ERROR_HANDSHAKE_MAC;
if ((under_load && mac_state == VALID_MAC_WITH_COOKIE)
|| (!under_load && mac_state == VALID_MAC_BUT_NO_COOKIE))
packet_needs_cookie = false;
else if (under_load && mac_state == VALID_MAC_BUT_NO_COOKIE)
packet_needs_cookie = true;
+ else if (mac_state == VALID_MAC_WITH_COOKIE_BUT_RATELIMITED)
+ return WG_INPUT_ERROR_HANDSHAKE_RATELIMITED;
else
return WG_INPUT_ERROR_HANDSHAKE_MAC;
@@ -180,8 +236,16 @@ wg_handshake_process (vlib_main_t * vm, wg_main_t * wmp, vlib_buffer_t * b)
if (packet_needs_cookie)
{
- // TODO: Add processing
+
+ if (!wg_send_handshake_cookie (vm, message->sender_index,
+ &wg_if->cookie_checker, macs,
+ &ip_addr_46 (&wg_if->src_ip),
+ wg_if->port, &src_ip, udp_src_port))
+ return WG_INPUT_ERROR_COOKIE_SEND;
+
+ return WG_INPUT_ERROR_NONE;
}
+
noise_remote_t *rp;
if (noise_consume_initiation
(vm, noise_local_get (wg_if->local_idx), &rp,
@@ -195,10 +259,11 @@ wg_handshake_process (vlib_main_t * vm, wg_main_t * wmp, vlib_buffer_t * b)
return WG_INPUT_ERROR_PEER;
}
- // set_peer_address (peer, ip4_src, udp_src_port);
+ wg_peer_update_endpoint (rp->r_peer_idx, &src_ip, udp_src_port);
+
if (PREDICT_FALSE (!wg_send_handshake_response (vm, peer)))
{
- vlib_node_increment_counter (vm, wg_input_node.index,
+ vlib_node_increment_counter (vm, node_idx,
WG_INPUT_ERROR_HANDSHAKE_SEND, 1);
}
break;
@@ -206,13 +271,27 @@ wg_handshake_process (vlib_main_t * vm, wg_main_t * wmp, vlib_buffer_t * b)
case MESSAGE_HANDSHAKE_RESPONSE:
{
message_handshake_response_t *resp = current_b_data;
+
+ if (packet_needs_cookie)
+ {
+ if (!wg_send_handshake_cookie (vm, resp->sender_index,
+ &wg_if->cookie_checker, macs,
+ &ip_addr_46 (&wg_if->src_ip),
+ wg_if->port, &src_ip, udp_src_port))
+ return WG_INPUT_ERROR_COOKIE_SEND;
+
+ return WG_INPUT_ERROR_NONE;
+ }
+
+ index_t peeri = INDEX_INVALID;
u32 *entry =
wg_index_table_lookup (&wmp->index_table, resp->receiver_index);
if (PREDICT_TRUE (entry != NULL))
{
- peer = wg_peer_get (*entry);
- if (peer->is_dead)
+ peeri = *entry;
+ peer = wg_peer_get (peeri);
+ if (wg_peer_is_dead (peer))
return WG_INPUT_ERROR_PEER;
}
else
@@ -225,12 +304,9 @@ wg_handshake_process (vlib_main_t * vm, wg_main_t * wmp, vlib_buffer_t * b)
{
return WG_INPUT_ERROR_PEER;
}
- if (packet_needs_cookie)
- {
- // TODO: Add processing
- }
- // set_peer_address (peer, ip4_src, udp_src_port);
+ wg_peer_update_endpoint (peeri, &src_ip, udp_src_port);
+
if (noise_remote_begin_session (vm, &peer->remote))
{
@@ -238,9 +314,12 @@ wg_handshake_process (vlib_main_t * vm, wg_main_t * wmp, vlib_buffer_t * b)
wg_timers_handshake_complete (peer);
if (PREDICT_FALSE (!wg_send_keepalive (vm, peer)))
{
- vlib_node_increment_counter (vm, wg_input_node.index,
- WG_INPUT_ERROR_KEEPALIVE_SEND,
- 1);
+ vlib_node_increment_counter (vm, node_idx,
+ WG_INPUT_ERROR_KEEPALIVE_SEND, 1);
+ }
+ else
+ {
+ wg_peer_update_flags (peeri, WG_PEER_ESTABLISHED, true);
}
}
break;
@@ -254,68 +333,450 @@ wg_handshake_process (vlib_main_t * vm, wg_main_t * wmp, vlib_buffer_t * b)
return WG_INPUT_ERROR_NONE;
}
-static_always_inline bool
-fib_prefix_is_cover_addr_4 (const fib_prefix_t * p1,
- const ip4_address_t * ip4)
+static_always_inline int
+wg_input_post_process (vlib_main_t *vm, vlib_buffer_t *b, u16 *next,
+ wg_peer_t *peer, message_data_t *data,
+ bool *is_keepalive)
{
- switch (p1->fp_proto)
+ next[0] = WG_INPUT_NEXT_PUNT;
+ noise_keypair_t *kp;
+ vlib_buffer_t *lb;
+
+ if ((kp = wg_get_active_keypair (&peer->remote, data->receiver_index)) ==
+ NULL)
+ return -1;
+
+ if (!noise_counter_recv (&kp->kp_ctr, data->counter))
{
- case FIB_PROTOCOL_IP4:
- return (ip4_destination_matches_route (&ip4_main,
- &p1->fp_addr.ip4,
- ip4, p1->fp_len) != 0);
- case FIB_PROTOCOL_IP6:
- return (false);
- case FIB_PROTOCOL_MPLS:
- break;
+ return -1;
+ }
+
+ lb = b;
+ /* Find last buffer in the chain */
+ while (lb->flags & VLIB_BUFFER_NEXT_PRESENT)
+ lb = vlib_get_buffer (vm, lb->next_buffer);
+
+ u16 encr_len = vlib_buffer_length_in_chain (vm, b) - sizeof (message_data_t);
+ u16 decr_len = encr_len - NOISE_AUTHTAG_LEN;
+
+ vlib_buffer_advance (b, sizeof (message_data_t));
+ vlib_buffer_chain_increase_length (b, lb, -NOISE_AUTHTAG_LEN);
+ vnet_buffer_offload_flags_clear (b, VNET_BUFFER_OFFLOAD_F_UDP_CKSUM);
+
+ /* Keepalive packet has zero length */
+ if (decr_len == 0)
+ {
+ *is_keepalive = true;
+ return 0;
+ }
+
+ wg_timers_data_received (peer);
+
+ ip46_address_t src_ip;
+ u8 is_ip4_inner = is_ip4_header (vlib_buffer_get_current (b));
+ if (is_ip4_inner)
+ {
+ ip46_address_set_ip4 (
+ &src_ip, &((ip4_header_t *) vlib_buffer_get_current (b))->src_address);
+ }
+ else
+ {
+ ip46_address_set_ip6 (
+ &src_ip, &((ip6_header_t *) vlib_buffer_get_current (b))->src_address);
}
- return (false);
+
+ const fib_prefix_t *allowed_ip;
+ bool allowed = false;
+
+ /*
+ * we could make this into an ACL, but the expectation
+ * is that there aren't many allowed IPs and thus a linear
+ * walk is faster than an ACL
+ */
+ vec_foreach (allowed_ip, peer->allowed_ips)
+ {
+ if (fib_prefix_is_cover_addr_46 (allowed_ip, &src_ip))
+ {
+ allowed = true;
+ break;
+ }
+ }
+ if (allowed)
+ {
+ vnet_buffer (b)->sw_if_index[VLIB_RX] = peer->wg_sw_if_index;
+ next[0] =
+ is_ip4_inner ? WG_INPUT_NEXT_IP4_INPUT : WG_INPUT_NEXT_IP6_INPUT;
+ }
+
+ return 0;
}
-VLIB_NODE_FN (wg_input_node) (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame)
+static_always_inline void
+wg_input_process_ops (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vnet_crypto_op_t *ops, vlib_buffer_t *b[], u16 *nexts,
+ u16 drop_next)
{
- message_type_t header_type;
- u32 n_left_from;
- u32 *from;
- vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
- u16 nexts[VLIB_FRAME_SIZE], *next;
- u32 thread_index = vm->thread_index;
+ u32 n_fail, n_ops = vec_len (ops);
+ vnet_crypto_op_t *op = ops;
- from = vlib_frame_vector_args (frame);
- n_left_from = frame->n_vectors;
- b = bufs;
- next = nexts;
+ if (n_ops == 0)
+ return;
- vlib_get_buffers (vm, from, bufs, n_left_from);
+ n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
+
+ while (n_fail)
+ {
+ ASSERT (op - ops < n_ops);
+
+ if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
+ {
+ u32 bi = op->user_data;
+ b[bi]->error = node->errors[WG_INPUT_ERROR_DECRYPTION];
+ nexts[bi] = drop_next;
+ n_fail--;
+ }
+ op++;
+ }
+}
+
+static_always_inline void
+wg_input_process_chained_ops (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vnet_crypto_op_t *ops, vlib_buffer_t *b[],
+ u16 *nexts, vnet_crypto_op_chunk_t *chunks,
+ u16 drop_next)
+{
+ u32 n_fail, n_ops = vec_len (ops);
+ vnet_crypto_op_t *op = ops;
+ if (n_ops == 0)
+ return;
+
+ n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
+
+ while (n_fail)
+ {
+ ASSERT (op - ops < n_ops);
+
+ if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
+ {
+ u32 bi = op->user_data;
+ b[bi]->error = node->errors[WG_INPUT_ERROR_DECRYPTION];
+ nexts[bi] = drop_next;
+ n_fail--;
+ }
+ op++;
+ }
+}
+
+static_always_inline void
+wg_input_chain_crypto (vlib_main_t *vm, wg_per_thread_data_t *ptd,
+ vlib_buffer_t *b, vlib_buffer_t *lb, u8 *start,
+ u32 start_len, u16 *n_ch)
+{
+ vnet_crypto_op_chunk_t *ch;
+ vlib_buffer_t *cb = b;
+ u32 n_chunks = 1;
+
+ vec_add2 (ptd->chunks, ch, 1);
+ ch->len = start_len;
+ ch->src = ch->dst = start;
+ cb = vlib_get_buffer (vm, cb->next_buffer);
+
+ while (1)
+ {
+ vec_add2 (ptd->chunks, ch, 1);
+ n_chunks += 1;
+ if (lb == cb)
+ ch->len = cb->current_length - NOISE_AUTHTAG_LEN;
+ else
+ ch->len = cb->current_length;
+
+ ch->src = ch->dst = vlib_buffer_get_current (cb);
+
+ if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
+ break;
+
+ cb = vlib_get_buffer (vm, cb->next_buffer);
+ }
+
+ if (n_ch)
+ *n_ch = n_chunks;
+}
+
+always_inline void
+wg_prepare_sync_dec_op (vlib_main_t *vm, wg_per_thread_data_t *ptd,
+ vlib_buffer_t *b, vlib_buffer_t *lb,
+ vnet_crypto_op_t **crypto_ops, u8 *src, u32 src_len,
+ u8 *dst, u8 *aad, u32 aad_len,
+ vnet_crypto_key_index_t key_index, u32 bi, u8 *iv)
+{
+ vnet_crypto_op_t _op, *op = &_op;
+ u8 src_[] = {};
+
+ vec_add2_aligned (crypto_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
+ vnet_crypto_op_init (op, VNET_CRYPTO_OP_CHACHA20_POLY1305_DEC);
+
+ op->tag_len = NOISE_AUTHTAG_LEN;
+ op->tag = vlib_buffer_get_tail (lb) - NOISE_AUTHTAG_LEN;
+ op->key_index = key_index;
+ op->aad = aad;
+ op->aad_len = aad_len;
+ op->iv = iv;
+ op->user_data = bi;
+ op->flags |= VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
+
+ if (b != lb)
+ {
+ /* Chained buffers */
+ op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
+ op->chunk_index = vec_len (ptd->chunks);
+ wg_input_chain_crypto (vm, ptd, b, lb, src, src_len + NOISE_AUTHTAG_LEN,
+ &op->n_chunks);
+ }
+ else
+ {
+ op->src = !src ? src_ : src;
+ op->len = src_len;
+ op->dst = dst;
+ }
+}
+
+static_always_inline void
+wg_input_add_to_frame (vlib_main_t *vm, vnet_crypto_async_frame_t *f,
+ u32 key_index, u32 crypto_len, i16 crypto_start_offset,
+ u32 buffer_index, u16 next_node, u8 *iv, u8 *tag,
+ u8 flags)
+{
+ vnet_crypto_async_frame_elt_t *fe;
+ u16 index;
+
+ ASSERT (f->n_elts < VNET_CRYPTO_FRAME_SIZE);
+
+ index = f->n_elts;
+ fe = &f->elts[index];
+ f->n_elts++;
+ fe->key_index = key_index;
+ fe->crypto_total_length = crypto_len;
+ fe->crypto_start_offset = crypto_start_offset;
+ fe->iv = iv;
+ fe->tag = tag;
+ fe->flags = flags;
+ f->buffer_indices[index] = buffer_index;
+ f->next_node_index[index] = next_node;
+}
+
+static_always_inline enum noise_state_crypt
+wg_input_process (vlib_main_t *vm, wg_per_thread_data_t *ptd,
+ vnet_crypto_op_t **crypto_ops,
+ vnet_crypto_async_frame_t **async_frame, vlib_buffer_t *b,
+ vlib_buffer_t *lb, u32 buf_idx, noise_remote_t *r,
+ uint32_t r_idx, uint64_t nonce, uint8_t *src, size_t srclen,
+ size_t srclen_total, uint8_t *dst, u32 from_idx, u8 *iv,
+ f64 time, u8 is_async, u16 async_next_node)
+{
+ noise_keypair_t *kp;
+ enum noise_state_crypt ret = SC_FAILED;
+
+ if ((kp = wg_get_active_keypair (r, r_idx)) == NULL)
+ {
+ goto error;
+ }
+
+ /* We confirm that our values are within our tolerances. These values
+ * are the same as the encrypt routine.
+ *
+ * kp_ctr isn't locked here, we're happy to accept a racy read. */
+ if (wg_birthdate_has_expired_opt (kp->kp_birthdate, REJECT_AFTER_TIME,
+ time) ||
+ kp->kp_ctr.c_recv >= REJECT_AFTER_MESSAGES)
+ goto error;
+
+ /* Decrypt, then validate the counter. We don't want to validate the
+ * counter before decrypting as we do not know the message is authentic
+ * prior to decryption. */
+
+ clib_memset (iv, 0, 4);
+ clib_memcpy (iv + 4, &nonce, sizeof (nonce));
+
+ if (is_async)
+ {
+ u8 flags = VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
+ u8 *tag = vlib_buffer_get_tail (lb) - NOISE_AUTHTAG_LEN;
+
+ if (b != lb)
+ flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
+
+ if (NULL == *async_frame ||
+ vnet_crypto_async_frame_is_full (*async_frame))
+ {
+ *async_frame = vnet_crypto_async_get_frame (
+ vm, VNET_CRYPTO_OP_CHACHA20_POLY1305_TAG16_AAD0_DEC);
+ if (PREDICT_FALSE (NULL == *async_frame))
+ goto error;
+ /* Save the frame to the list we'll submit at the end */
+ vec_add1 (ptd->async_frames, *async_frame);
+ }
+
+ wg_input_add_to_frame (vm, *async_frame, kp->kp_recv_index, srclen_total,
+ src - b->data, buf_idx, async_next_node, iv, tag,
+ flags);
+ }
+ else
+ {
+ wg_prepare_sync_dec_op (vm, ptd, b, lb, crypto_ops, src, srclen, dst,
+ NULL, 0, kp->kp_recv_index, from_idx, iv);
+ }
+
+ /* If we've received the handshake confirming data packet then move the
+ * next keypair into current. If we do slide the next keypair in, then
+ * we skip the REKEY_AFTER_TIME_RECV check. This is safe to do as a
+ * data packet can't confirm a session that we are an INITIATOR of. */
+ if (kp == r->r_next)
+ {
+ clib_rwlock_writer_lock (&r->r_keypair_lock);
+ if (kp == r->r_next && kp->kp_local_index == r_idx)
+ {
+ noise_remote_keypair_free (vm, r, &r->r_previous);
+ r->r_previous = r->r_current;
+ r->r_current = r->r_next;
+ r->r_next = NULL;
+
+ ret = SC_CONN_RESET;
+ clib_rwlock_writer_unlock (&r->r_keypair_lock);
+ goto error;
+ }
+ clib_rwlock_writer_unlock (&r->r_keypair_lock);
+ }
+
+ /* Similar to when we encrypt, we want to notify the caller when we
+ * are approaching our tolerances. We notify if:
+ * - we're the initiator and the current keypair is older than
+ * REKEY_AFTER_TIME_RECV seconds. */
+ ret = SC_KEEP_KEY_FRESH;
+ kp = r->r_current;
+ if (kp != NULL && kp->kp_valid && kp->kp_is_initiator &&
+ wg_birthdate_has_expired_opt (kp->kp_birthdate, REKEY_AFTER_TIME_RECV,
+ time))
+ goto error;
+
+ ret = SC_OK;
+error:
+ return ret;
+}
+
+static_always_inline void
+wg_find_outer_addr_port (vlib_buffer_t *b, ip46_address_t *addr, u16 *port,
+ u8 is_ip4)
+{
+ if (is_ip4)
+ {
+ ip4_udp_header_t *ip4_udp_hdr =
+ vlib_buffer_get_current (b) - sizeof (ip4_udp_header_t);
+ ip46_address_set_ip4 (addr, &ip4_udp_hdr->ip4.src_address);
+ *port = clib_net_to_host_u16 (ip4_udp_hdr->udp.src_port);
+ }
+ else
+ {
+ ip6_udp_header_t *ip6_udp_hdr =
+ vlib_buffer_get_current (b) - sizeof (ip6_udp_header_t);
+ ip46_address_set_ip6 (addr, &ip6_udp_hdr->ip6.src_address);
+ *port = clib_net_to_host_u16 (ip6_udp_hdr->udp.src_port);
+ }
+}
+
+always_inline uword
+wg_input_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame, u8 is_ip4, u16 async_next_node)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_interface_main_t *im = &vnm->interface_main;
wg_main_t *wmp = &wg_main;
+ wg_per_thread_data_t *ptd =
+ vec_elt_at_index (wmp->per_thread_data, vm->thread_index);
+ u32 *from = vlib_frame_vector_args (frame);
+ u32 n_left_from = frame->n_vectors;
+
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
+ vlib_buffer_t *lb;
+ u32 thread_index = vm->thread_index;
+ vnet_crypto_op_t **crypto_ops;
+ const u16 drop_next = WG_INPUT_NEXT_PUNT;
+ message_type_t header_type;
+ vlib_buffer_t *data_bufs[VLIB_FRAME_SIZE];
+ u32 data_bi[VLIB_FRAME_SIZE]; /* buffer index for data */
+ u32 other_bi[VLIB_FRAME_SIZE]; /* buffer index for drop or handoff */
+ u16 other_nexts[VLIB_FRAME_SIZE], *other_next = other_nexts, n_other = 0;
+ u16 data_nexts[VLIB_FRAME_SIZE], *data_next = data_nexts, n_data = 0;
+ u16 n_async = 0;
+ const u8 is_async = wg_op_mode_is_set_ASYNC ();
+ vnet_crypto_async_frame_t *async_frame = NULL;
+
+ vlib_get_buffers (vm, from, bufs, n_left_from);
+ vec_reset_length (ptd->crypto_ops);
+ vec_reset_length (ptd->chained_crypto_ops);
+ vec_reset_length (ptd->chunks);
+ vec_reset_length (ptd->async_frames);
+
+ f64 time = clib_time_now (&vm->clib_time) + vm->time_offset;
+
wg_peer_t *peer = NULL;
+ u32 *last_peer_time_idx = NULL;
+ u32 last_rec_idx = ~0;
+
+ bool is_keepalive = false;
+ u32 *peer_idx = NULL;
+ index_t peeri = INDEX_INVALID;
while (n_left_from > 0)
{
- bool is_keepalive = false;
- next[0] = WG_INPUT_NEXT_PUNT;
+ if (n_left_from > 2)
+ {
+ u8 *p;
+ vlib_prefetch_buffer_header (b[2], LOAD);
+ p = vlib_buffer_get_current (b[1]);
+ CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (vlib_buffer_get_tail (b[1]), CLIB_CACHE_LINE_BYTES,
+ LOAD);
+ }
+
+ other_next[n_other] = WG_INPUT_NEXT_PUNT;
+ data_nexts[n_data] = WG_INPUT_N_NEXT;
+
header_type =
((message_header_t *) vlib_buffer_get_current (b[0]))->type;
- u32 *peer_idx;
if (PREDICT_TRUE (header_type == MESSAGE_DATA))
{
message_data_t *data = vlib_buffer_get_current (b[0]);
-
+ u8 *iv_data = b[0]->pre_data;
+ u32 buf_idx = from[b - bufs];
+ u32 n_bufs;
peer_idx = wg_index_table_lookup (&wmp->index_table,
data->receiver_index);
- if (peer_idx)
+ if (data->receiver_index != last_rec_idx)
{
- peer = wg_peer_get (*peer_idx);
+ peer_idx = wg_index_table_lookup (&wmp->index_table,
+ data->receiver_index);
+ if (PREDICT_TRUE (peer_idx != NULL))
+ {
+ peeri = *peer_idx;
+ peer = wg_peer_get (peeri);
+ last_rec_idx = data->receiver_index;
+ }
+ else
+ {
+ peer = NULL;
+ last_rec_idx = ~0;
+ }
}
- else
+
+ if (PREDICT_FALSE (!peer_idx))
{
- next[0] = WG_INPUT_NEXT_ERROR;
+ other_next[n_other] = WG_INPUT_NEXT_ERROR;
b[0]->error = node->errors[WG_INPUT_ERROR_PEER];
+ other_bi[n_other] = buf_idx;
+ n_other += 1;
goto out;
}
@@ -330,128 +791,445 @@ VLIB_NODE_FN (wg_input_node) (vlib_main_t * vm,
if (PREDICT_TRUE (thread_index != peer->input_thread_index))
{
- next[0] = WG_INPUT_NEXT_HANDOFF_DATA;
+ other_next[n_other] = WG_INPUT_NEXT_HANDOFF_DATA;
+ other_bi[n_other] = buf_idx;
+ n_other += 1;
goto next;
}
- u16 encr_len = b[0]->current_length - sizeof (message_data_t);
- u16 decr_len = encr_len - NOISE_AUTHTAG_LEN;
- if (PREDICT_FALSE (decr_len >= WG_DEFAULT_DATA_SIZE))
+ lb = b[0];
+ n_bufs = vlib_buffer_chain_linearize (vm, b[0]);
+ if (n_bufs == 0)
{
- b[0]->error = node->errors[WG_INPUT_ERROR_TOO_BIG];
+ other_next[n_other] = WG_INPUT_NEXT_ERROR;
+ b[0]->error = node->errors[WG_INPUT_ERROR_NO_BUFFERS];
+ other_bi[n_other] = buf_idx;
+ n_other += 1;
goto out;
}
- u8 *decr_data = wmp->per_thread_data[thread_index].data;
+ if (n_bufs > 1)
+ {
+ vlib_buffer_t *before_last = b[0];
+
+ /* Find last and before last buffer in the chain */
+ while (lb->flags & VLIB_BUFFER_NEXT_PRESENT)
+ {
+ before_last = lb;
+ lb = vlib_get_buffer (vm, lb->next_buffer);
+ }
+
+ /* Ensure auth tag is contiguous and not splitted into two last
+ * buffers */
+ if (PREDICT_FALSE (lb->current_length < NOISE_AUTHTAG_LEN))
+ {
+ u32 len_diff = NOISE_AUTHTAG_LEN - lb->current_length;
+
+ before_last->current_length -= len_diff;
+ if (before_last == b[0])
+ before_last->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
+
+ vlib_buffer_advance (lb, (signed) -len_diff);
+
+ clib_memcpy_fast (vlib_buffer_get_current (lb),
+ vlib_buffer_get_tail (before_last),
+ len_diff);
+ }
+ }
+
+ u16 encr_len = b[0]->current_length - sizeof (message_data_t);
+ u16 decr_len = encr_len - NOISE_AUTHTAG_LEN;
+ u16 encr_len_total =
+ vlib_buffer_length_in_chain (vm, b[0]) - sizeof (message_data_t);
+ u16 decr_len_total = encr_len_total - NOISE_AUTHTAG_LEN;
+
+ if (lb != b[0])
+ crypto_ops = &ptd->chained_crypto_ops;
+ else
+ crypto_ops = &ptd->crypto_ops;
- enum noise_state_crypt state_cr = noise_remote_decrypt (vm,
- &peer->remote,
- data->receiver_index,
- data->counter,
- data->encrypted_data,
- encr_len,
- decr_data);
+ enum noise_state_crypt state_cr =
+ wg_input_process (vm, ptd, crypto_ops, &async_frame, b[0], lb,
+ buf_idx, &peer->remote, data->receiver_index,
+ data->counter, data->encrypted_data, decr_len,
+ decr_len_total, data->encrypted_data, n_data,
+ iv_data, time, is_async, async_next_node);
- if (PREDICT_FALSE (state_cr == SC_CONN_RESET))
+ if (PREDICT_FALSE (state_cr == SC_FAILED))
{
- wg_timers_handshake_complete (peer);
+ wg_peer_update_flags (*peer_idx, WG_PEER_ESTABLISHED, false);
+ other_next[n_other] = WG_INPUT_NEXT_ERROR;
+ b[0]->error = node->errors[WG_INPUT_ERROR_DECRYPTION];
+ other_bi[n_other] = buf_idx;
+ n_other += 1;
+ goto out;
}
- else if (PREDICT_FALSE (state_cr == SC_KEEP_KEY_FRESH))
+ if (!is_async)
{
- wg_send_handshake_from_mt (*peer_idx, false);
+ data_bufs[n_data] = b[0];
+ data_bi[n_data] = buf_idx;
+ n_data += 1;
}
- else if (PREDICT_FALSE (state_cr == SC_FAILED))
+ else
{
- next[0] = WG_INPUT_NEXT_ERROR;
- b[0]->error = node->errors[WG_INPUT_ERROR_DECRYPTION];
- goto out;
+ n_async += 1;
}
- clib_memcpy (vlib_buffer_get_current (b[0]), decr_data, decr_len);
- b[0]->current_length = decr_len;
- vnet_buffer_offload_flags_clear (b[0],
- VNET_BUFFER_OFFLOAD_F_UDP_CKSUM);
-
- wg_timers_any_authenticated_packet_received (peer);
- wg_timers_any_authenticated_packet_traversal (peer);
-
- /* Keepalive packet has zero length */
- if (decr_len == 0)
+ if (PREDICT_FALSE (state_cr == SC_CONN_RESET))
{
- is_keepalive = true;
- goto out;
+ wg_timers_handshake_complete (peer);
+ goto next;
}
-
- wg_timers_data_received (peer);
-
- ip4_header_t *iph = vlib_buffer_get_current (b[0]);
-
- const wg_peer_allowed_ip_t *allowed_ip;
- bool allowed = false;
-
- /*
- * we could make this into an ACL, but the expectation
- * is that there aren't many allowed IPs and thus a linear
- * walk is fater than an ACL
- */
- vec_foreach (allowed_ip, peer->allowed_ips)
- {
- if (fib_prefix_is_cover_addr_4 (&allowed_ip->prefix,
- &iph->src_address))
- {
- allowed = true;
- break;
- }
- }
- if (allowed)
+ else if (PREDICT_FALSE (state_cr == SC_KEEP_KEY_FRESH))
{
- vnet_buffer (b[0])->sw_if_index[VLIB_RX] = peer->wg_sw_if_index;
- next[0] = WG_INPUT_NEXT_IP4_INPUT;
+ wg_send_handshake_from_mt (peeri, false);
+ goto next;
}
+ else if (PREDICT_TRUE (state_cr == SC_OK))
+ goto next;
}
else
{
- peer_idx = NULL;
-
/* Handshake packets should be processed in main thread */
if (thread_index != 0)
{
- next[0] = WG_INPUT_NEXT_HANDOFF_HANDSHAKE;
+ other_next[n_other] = WG_INPUT_NEXT_HANDOFF_HANDSHAKE;
+ other_bi[n_other] = from[b - bufs];
+ n_other += 1;
goto next;
}
- wg_input_error_t ret = wg_handshake_process (vm, wmp, b[0]);
+ wg_input_error_t ret =
+ wg_handshake_process (vm, wmp, b[0], node->node_index, is_ip4);
if (ret != WG_INPUT_ERROR_NONE)
{
- next[0] = WG_INPUT_NEXT_ERROR;
+ other_next[n_other] = WG_INPUT_NEXT_ERROR;
b[0]->error = node->errors[ret];
+ other_bi[n_other] = from[b - bufs];
+ n_other += 1;
+ }
+ else
+ {
+ other_bi[n_other] = from[b - bufs];
+ n_other += 1;
}
}
out:
- if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
- && (b[0]->flags & VLIB_BUFFER_IS_TRACED)))
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b[0]->flags & VLIB_BUFFER_IS_TRACED)))
{
wg_input_trace_t *t = vlib_add_trace (vm, node, b[0], sizeof (*t));
t->type = header_type;
t->current_length = b[0]->current_length;
t->is_keepalive = is_keepalive;
- t->peer = peer_idx ? *peer_idx : INDEX_INVALID;
+ t->peer = peer_idx ? peeri : INDEX_INVALID;
}
+
next:
n_left_from -= 1;
- next += 1;
b += 1;
}
- vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
+
+ /* decrypt packets */
+ wg_input_process_ops (vm, node, ptd->crypto_ops, data_bufs, data_nexts,
+ drop_next);
+ wg_input_process_chained_ops (vm, node, ptd->chained_crypto_ops, data_bufs,
+ data_nexts, ptd->chunks, drop_next);
+
+ /* process after decryption */
+ b = data_bufs;
+ n_left_from = n_data;
+ last_rec_idx = ~0;
+ last_peer_time_idx = NULL;
+
+ while (n_left_from > 0)
+ {
+ bool is_keepalive = false;
+ u32 *peer_idx = NULL;
+
+ if (PREDICT_FALSE (data_next[0] == WG_INPUT_NEXT_PUNT))
+ {
+ goto trace;
+ }
+ if (n_left_from > 2)
+ {
+ u8 *p;
+ vlib_prefetch_buffer_header (b[2], LOAD);
+ p = vlib_buffer_get_current (b[1]);
+ CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (vlib_buffer_get_tail (b[1]), CLIB_CACHE_LINE_BYTES,
+ LOAD);
+ }
+
+ message_data_t *data = vlib_buffer_get_current (b[0]);
+ ip46_address_t out_src_ip;
+ u16 out_udp_src_port;
+
+ wg_find_outer_addr_port (b[0], &out_src_ip, &out_udp_src_port, is_ip4);
+
+ if (data->receiver_index != last_rec_idx)
+ {
+ peer_idx =
+ wg_index_table_lookup (&wmp->index_table, data->receiver_index);
+ if (PREDICT_TRUE (peer_idx != NULL))
+ {
+ peeri = *peer_idx;
+ peer = wg_peer_get (peeri);
+ last_rec_idx = data->receiver_index;
+ }
+ else
+ {
+ peer = NULL;
+ last_rec_idx = ~0;
+ }
+ }
+
+ if (PREDICT_TRUE (peer != NULL))
+ {
+ if (PREDICT_FALSE (wg_input_post_process (vm, b[0], data_next, peer,
+ data, &is_keepalive) < 0))
+ goto trace;
+ }
+ else
+ {
+ data_next[0] = WG_INPUT_NEXT_PUNT;
+ goto trace;
+ }
+
+ if (PREDICT_FALSE (peer_idx && (last_peer_time_idx != peer_idx)))
+ {
+ if (PREDICT_FALSE (
+ !ip46_address_is_equal (&peer->dst.addr, &out_src_ip) ||
+ peer->dst.port != out_udp_src_port))
+ wg_peer_update_endpoint_from_mt (peeri, &out_src_ip,
+ out_udp_src_port);
+ wg_timers_any_authenticated_packet_received_opt (peer, time);
+ wg_timers_any_authenticated_packet_traversal (peer);
+ wg_peer_update_flags (*peer_idx, WG_PEER_ESTABLISHED, true);
+ last_peer_time_idx = peer_idx;
+ }
+
+ vlib_increment_combined_counter (im->combined_sw_if_counters +
+ VNET_INTERFACE_COUNTER_RX,
+ vm->thread_index, peer->wg_sw_if_index,
+ 1 /* packets */, b[0]->current_length);
+
+ trace:
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b[0]->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ wg_input_trace_t *t = vlib_add_trace (vm, node, b[0], sizeof (*t));
+ t->type = header_type;
+ t->current_length = b[0]->current_length;
+ t->is_keepalive = is_keepalive;
+ t->peer = peer_idx ? peeri : INDEX_INVALID;
+ }
+
+ b += 1;
+ n_left_from -= 1;
+ data_next += 1;
+ }
+
+ if (n_async)
+ {
+ /* submit all of the open frames */
+ vnet_crypto_async_frame_t **async_frame;
+ vec_foreach (async_frame, ptd->async_frames)
+ {
+ if (PREDICT_FALSE (
+ vnet_crypto_async_submit_open_frame (vm, *async_frame) < 0))
+ {
+ u32 n_drop = (*async_frame)->n_elts;
+ u32 *bi = (*async_frame)->buffer_indices;
+ u16 index = n_other;
+ while (n_drop--)
+ {
+ other_bi[index] = bi[0];
+ vlib_buffer_t *b = vlib_get_buffer (vm, bi[0]);
+ other_nexts[index] = drop_next;
+ b->error = node->errors[WG_INPUT_ERROR_CRYPTO_ENGINE_ERROR];
+ bi++;
+ index++;
+ }
+ n_other += (*async_frame)->n_elts;
+
+ vnet_crypto_async_reset_frame (*async_frame);
+ vnet_crypto_async_free_frame (vm, *async_frame);
+ }
+ }
+ }
+
+ /* enqueue other bufs */
+ if (n_other)
+ vlib_buffer_enqueue_to_next (vm, node, other_bi, other_next, n_other);
+
+ /* enqueue data bufs */
+ if (n_data)
+ vlib_buffer_enqueue_to_next (vm, node, data_bi, data_nexts, n_data);
return frame->n_vectors;
}
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (wg_input_node) =
+always_inline uword
+wg_input_post (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame,
+ u8 is_ip4)
{
- .name = "wg-input",
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_interface_main_t *im = &vnm->interface_main;
+ wg_main_t *wmp = &wg_main;
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
+ u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
+ u32 *from = vlib_frame_vector_args (frame);
+ u32 n_left = frame->n_vectors;
+ wg_peer_t *peer = NULL;
+ u32 *peer_idx = NULL;
+ u32 *last_peer_time_idx = NULL;
+ index_t peeri = INDEX_INVALID;
+ u32 last_rec_idx = ~0;
+ f64 time = clib_time_now (&vm->clib_time) + vm->time_offset;
+
+ vlib_get_buffers (vm, from, b, n_left);
+
+ if (n_left >= 2)
+ {
+ vlib_prefetch_buffer_header (b[0], LOAD);
+ vlib_prefetch_buffer_header (b[1], LOAD);
+ }
+
+ while (n_left > 0)
+ {
+ if (n_left > 2)
+ {
+ u8 *p;
+ vlib_prefetch_buffer_header (b[2], LOAD);
+ p = vlib_buffer_get_current (b[1]);
+ CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ bool is_keepalive = false;
+ message_data_t *data = vlib_buffer_get_current (b[0]);
+ ip46_address_t out_src_ip;
+ u16 out_udp_src_port;
+
+ wg_find_outer_addr_port (b[0], &out_src_ip, &out_udp_src_port, is_ip4);
+
+ if (data->receiver_index != last_rec_idx)
+ {
+ peer_idx =
+ wg_index_table_lookup (&wmp->index_table, data->receiver_index);
+
+ if (PREDICT_TRUE (peer_idx != NULL))
+ {
+ peeri = *peer_idx;
+ peer = wg_peer_get (peeri);
+ last_rec_idx = data->receiver_index;
+ }
+ else
+ {
+ peer = NULL;
+ last_rec_idx = ~0;
+ }
+ }
+
+ if (PREDICT_TRUE (peer != NULL))
+ {
+ if (PREDICT_FALSE (wg_input_post_process (vm, b[0], next, peer, data,
+ &is_keepalive) < 0))
+ goto trace;
+ }
+ else
+ {
+ next[0] = WG_INPUT_NEXT_PUNT;
+ goto trace;
+ }
+
+ if (PREDICT_FALSE (peer_idx && (last_peer_time_idx != peer_idx)))
+ {
+ if (PREDICT_FALSE (
+ !ip46_address_is_equal (&peer->dst.addr, &out_src_ip) ||
+ peer->dst.port != out_udp_src_port))
+ wg_peer_update_endpoint_from_mt (peeri, &out_src_ip,
+ out_udp_src_port);
+ wg_timers_any_authenticated_packet_received_opt (peer, time);
+ wg_timers_any_authenticated_packet_traversal (peer);
+ wg_peer_update_flags (*peer_idx, WG_PEER_ESTABLISHED, true);
+ last_peer_time_idx = peer_idx;
+ }
+
+ vlib_increment_combined_counter (im->combined_sw_if_counters +
+ VNET_INTERFACE_COUNTER_RX,
+ vm->thread_index, peer->wg_sw_if_index,
+ 1 /* packets */, b[0]->current_length);
+
+ trace:
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b[0]->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ wg_input_post_trace_t *t =
+ vlib_add_trace (vm, node, b[0], sizeof (*t));
+ t->next = next[0];
+ t->peer = peer_idx ? peeri : INDEX_INVALID;
+ }
+
+ b += 1;
+ next += 1;
+ n_left -= 1;
+ }
+
+ vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
+ return frame->n_vectors;
+}
+
+VLIB_NODE_FN (wg4_input_node)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
+{
+ return wg_input_inline (vm, node, frame, /* is_ip4 */ 1,
+ wg_decrypt_async_next.wg4_post_next);
+}
+
+VLIB_NODE_FN (wg6_input_node)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
+{
+ return wg_input_inline (vm, node, frame, /* is_ip4 */ 0,
+ wg_decrypt_async_next.wg6_post_next);
+}
+
+VLIB_NODE_FN (wg4_input_post_node)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
+{
+ return wg_input_post (vm, node, from_frame, /* is_ip4 */ 1);
+}
+
+VLIB_NODE_FN (wg6_input_post_node)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
+{
+ return wg_input_post (vm, node, from_frame, /* is_ip4 */ 0);
+}
+
+VLIB_REGISTER_NODE (wg4_input_node) =
+{
+ .name = "wg4-input",
+ .vector_size = sizeof (u32),
+ .format_trace = format_wg_input_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (wg_input_error_strings),
+ .error_strings = wg_input_error_strings,
+ .n_next_nodes = WG_INPUT_N_NEXT,
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [WG_INPUT_NEXT_HANDOFF_HANDSHAKE] = "wg4-handshake-handoff",
+ [WG_INPUT_NEXT_HANDOFF_DATA] = "wg4-input-data-handoff",
+ [WG_INPUT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
+ [WG_INPUT_NEXT_IP6_INPUT] = "ip6-input",
+ [WG_INPUT_NEXT_PUNT] = "error-punt",
+ [WG_INPUT_NEXT_ERROR] = "error-drop",
+ },
+};
+
+VLIB_REGISTER_NODE (wg6_input_node) =
+{
+ .name = "wg6-input",
.vector_size = sizeof (u32),
.format_trace = format_wg_input_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
@@ -460,14 +1238,37 @@ VLIB_REGISTER_NODE (wg_input_node) =
.n_next_nodes = WG_INPUT_N_NEXT,
/* edit / add dispositions here */
.next_nodes = {
- [WG_INPUT_NEXT_HANDOFF_HANDSHAKE] = "wg-handshake-handoff",
- [WG_INPUT_NEXT_HANDOFF_DATA] = "wg-input-data-handoff",
+ [WG_INPUT_NEXT_HANDOFF_HANDSHAKE] = "wg6-handshake-handoff",
+ [WG_INPUT_NEXT_HANDOFF_DATA] = "wg6-input-data-handoff",
[WG_INPUT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
+ [WG_INPUT_NEXT_IP6_INPUT] = "ip6-input",
[WG_INPUT_NEXT_PUNT] = "error-punt",
[WG_INPUT_NEXT_ERROR] = "error-drop",
},
};
-/* *INDENT-ON* */
+
+VLIB_REGISTER_NODE (wg4_input_post_node) = {
+ .name = "wg4-input-post-node",
+ .vector_size = sizeof (u32),
+ .format_trace = format_wg_input_post_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .sibling_of = "wg4-input",
+
+ .n_errors = ARRAY_LEN (wg_input_error_strings),
+ .error_strings = wg_input_error_strings,
+};
+
+VLIB_REGISTER_NODE (wg6_input_post_node) = {
+ .name = "wg6-input-post-node",
+ .vector_size = sizeof (u32),
+ .format_trace = format_wg_input_post_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .sibling_of = "wg6-input",
+
+ .n_errors = ARRAY_LEN (wg_input_error_strings),
+ .error_strings = wg_input_error_strings,
+};
+
/*
* fd.io coding-style-patch-verification: ON
diff --git a/src/plugins/wireguard/wireguard_key.c b/src/plugins/wireguard/wireguard_key.c
index 1ef1d8bf743..1ef1d8bf743 100755..100644
--- a/src/plugins/wireguard/wireguard_key.c
+++ b/src/plugins/wireguard/wireguard_key.c
diff --git a/src/plugins/wireguard/wireguard_key.h b/src/plugins/wireguard/wireguard_key.h
index ed96fb1da91..ed96fb1da91 100755..100644
--- a/src/plugins/wireguard/wireguard_key.h
+++ b/src/plugins/wireguard/wireguard_key.h
diff --git a/src/plugins/wireguard/wireguard_messages.h b/src/plugins/wireguard/wireguard_messages.h
index 3587c5c8a45..3587c5c8a45 100755..100644
--- a/src/plugins/wireguard/wireguard_messages.h
+++ b/src/plugins/wireguard/wireguard_messages.h
diff --git a/src/plugins/wireguard/wireguard_noise.c b/src/plugins/wireguard/wireguard_noise.c
index 7b4c01942bc..c3f28f442f5 100755..100644
--- a/src/plugins/wireguard/wireguard_noise.c
+++ b/src/plugins/wireguard/wireguard_noise.c
@@ -17,6 +17,7 @@
#include <openssl/hmac.h>
#include <wireguard/wireguard.h>
+#include <wireguard/wireguard_chachapoly.h>
/* This implements Noise_IKpsk2:
*
@@ -32,11 +33,13 @@ noise_local_t *noise_local_pool;
static noise_keypair_t *noise_remote_keypair_allocate (noise_remote_t *);
static void noise_remote_keypair_free (vlib_main_t * vm, noise_remote_t *,
noise_keypair_t **);
-static uint32_t noise_remote_handshake_index_get (noise_remote_t *);
-static void noise_remote_handshake_index_drop (noise_remote_t *);
+static uint32_t noise_remote_handshake_index_get (vlib_main_t *vm,
+ noise_remote_t *);
+static void noise_remote_handshake_index_drop (vlib_main_t *vm,
+ noise_remote_t *);
static uint64_t noise_counter_send (noise_counter_t *);
-static bool noise_counter_recv (noise_counter_t *, uint64_t);
+bool noise_counter_recv (noise_counter_t *, uint64_t);
static void noise_kdf (uint8_t *, uint8_t *, uint8_t *, const uint8_t *,
size_t, size_t, size_t, size_t,
@@ -67,8 +70,6 @@ static void noise_msg_ephemeral (uint8_t[NOISE_HASH_LEN],
static void noise_tai64n_now (uint8_t[NOISE_TIMESTAMP_LEN]);
-static void secure_zero_memory (void *v, size_t n);
-
/* Set/Get noise parameters */
void
noise_local_init (noise_local_t * l, struct noise_upcall *upcall)
@@ -87,7 +88,7 @@ noise_local_set_private (noise_local_t * l,
}
void
-noise_remote_init (noise_remote_t * r, uint32_t peer_pool_idx,
+noise_remote_init (vlib_main_t *vm, noise_remote_t *r, uint32_t peer_pool_idx,
const uint8_t public[NOISE_PUBLIC_KEY_LEN],
u32 noise_local_idx)
{
@@ -98,19 +99,19 @@ noise_remote_init (noise_remote_t * r, uint32_t peer_pool_idx,
r->r_local_idx = noise_local_idx;
r->r_handshake.hs_state = HS_ZEROED;
- noise_remote_precompute (r);
+ noise_remote_precompute (vm, r);
}
void
-noise_remote_precompute (noise_remote_t * r)
+noise_remote_precompute (vlib_main_t *vm, noise_remote_t *r)
{
noise_local_t *l = noise_local_get (r->r_local_idx);
if (!curve25519_gen_shared (r->r_ss, l->l_private, r->r_public))
clib_memset (r->r_ss, 0, NOISE_PUBLIC_KEY_LEN);
- noise_remote_handshake_index_drop (r);
- secure_zero_memory (&r->r_handshake, sizeof (r->r_handshake));
+ noise_remote_handshake_index_drop (vm, r);
+ wg_secure_zero_memory (&r->r_handshake, sizeof (r->r_handshake));
}
/* Handshake functions */
@@ -122,7 +123,7 @@ noise_create_initiation (vlib_main_t * vm, noise_remote_t * r,
{
noise_handshake_t *hs = &r->r_handshake;
noise_local_t *l = noise_local_get (r->r_local_idx);
- uint8_t _key[NOISE_SYMMETRIC_KEY_LEN];
+ uint8_t _key[NOISE_SYMMETRIC_KEY_LEN] = { 0 };
uint32_t key_idx;
uint8_t *key;
int ret = false;
@@ -143,6 +144,7 @@ noise_create_initiation (vlib_main_t * vm, noise_remote_t * r,
/* es */
if (!noise_mix_dh (hs->hs_ck, key, hs->hs_e, r->r_public))
goto error;
+ vnet_crypto_key_update (vm, key_idx);
/* s */
noise_msg_encrypt (vm, es, l->l_public, NOISE_PUBLIC_KEY_LEN, key_idx,
@@ -151,17 +153,18 @@ noise_create_initiation (vlib_main_t * vm, noise_remote_t * r,
/* ss */
if (!noise_mix_ss (hs->hs_ck, key, r->r_ss))
goto error;
+ vnet_crypto_key_update (vm, key_idx);
/* {t} */
noise_tai64n_now (ets);
noise_msg_encrypt (vm, ets, ets, NOISE_TIMESTAMP_LEN, key_idx, hs->hs_hash);
- noise_remote_handshake_index_drop (r);
+ noise_remote_handshake_index_drop (vm, r);
hs->hs_state = CREATED_INITIATION;
- hs->hs_local_index = noise_remote_handshake_index_get (r);
+ hs->hs_local_index = noise_remote_handshake_index_get (vm, r);
*s_idx = hs->hs_local_index;
ret = true;
error:
- secure_zero_memory (key, NOISE_SYMMETRIC_KEY_LEN);
+ wg_secure_zero_memory (key, NOISE_SYMMETRIC_KEY_LEN);
vnet_crypto_key_del (vm, key_idx);
return ret;
}
@@ -177,9 +180,9 @@ noise_consume_initiation (vlib_main_t * vm, noise_local_t * l,
{
noise_remote_t *r;
noise_handshake_t hs;
- uint8_t _key[NOISE_SYMMETRIC_KEY_LEN];
- uint8_t r_public[NOISE_PUBLIC_KEY_LEN];
- uint8_t timestamp[NOISE_TIMESTAMP_LEN];
+ uint8_t _key[NOISE_SYMMETRIC_KEY_LEN] = { 0 };
+ uint8_t r_public[NOISE_PUBLIC_KEY_LEN] = { 0 };
+ uint8_t timestamp[NOISE_TIMESTAMP_LEN] = { 0 };
u32 key_idx;
uint8_t *key;
int ret = false;
@@ -197,6 +200,7 @@ noise_consume_initiation (vlib_main_t * vm, noise_local_t * l,
/* es */
if (!noise_mix_dh (hs.hs_ck, key, l->l_private, ue))
goto error;
+ vnet_crypto_key_update (vm, key_idx);
/* s */
@@ -212,6 +216,7 @@ noise_consume_initiation (vlib_main_t * vm, noise_local_t * l,
/* ss */
if (!noise_mix_ss (hs.hs_ck, key, r->r_ss))
goto error;
+ vnet_crypto_key_update (vm, key_idx);
/* {t} */
if (!noise_msg_decrypt (vm, timestamp, ets,
@@ -238,15 +243,15 @@ noise_consume_initiation (vlib_main_t * vm, noise_local_t * l,
goto error;
/* Ok, we're happy to accept this initiation now */
- noise_remote_handshake_index_drop (r);
+ noise_remote_handshake_index_drop (vm, r);
r->r_handshake = hs;
*rp = r;
ret = true;
error:
- secure_zero_memory (key, NOISE_SYMMETRIC_KEY_LEN);
+ wg_secure_zero_memory (key, NOISE_SYMMETRIC_KEY_LEN);
vnet_crypto_key_del (vm, key_idx);
- secure_zero_memory (&hs, sizeof (hs));
+ wg_secure_zero_memory (&hs, sizeof (hs));
return ret;
}
@@ -256,8 +261,8 @@ noise_create_response (vlib_main_t * vm, noise_remote_t * r, uint32_t * s_idx,
uint8_t en[0 + NOISE_AUTHTAG_LEN])
{
noise_handshake_t *hs = &r->r_handshake;
- uint8_t _key[NOISE_SYMMETRIC_KEY_LEN];
- uint8_t e[NOISE_PUBLIC_KEY_LEN];
+ uint8_t _key[NOISE_SYMMETRIC_KEY_LEN] = { 0 };
+ uint8_t e[NOISE_PUBLIC_KEY_LEN] = { 0 };
uint32_t key_idx;
uint8_t *key;
int ret = false;
@@ -286,20 +291,21 @@ noise_create_response (vlib_main_t * vm, noise_remote_t * r, uint32_t * s_idx,
/* psk */
noise_mix_psk (hs->hs_ck, hs->hs_hash, key, r->r_psk);
+ vnet_crypto_key_update (vm, key_idx);
/* {} */
noise_msg_encrypt (vm, en, NULL, 0, key_idx, hs->hs_hash);
hs->hs_state = CREATED_RESPONSE;
- hs->hs_local_index = noise_remote_handshake_index_get (r);
+ hs->hs_local_index = noise_remote_handshake_index_get (vm, r);
*r_idx = hs->hs_remote_index;
*s_idx = hs->hs_local_index;
ret = true;
error:
- secure_zero_memory (key, NOISE_SYMMETRIC_KEY_LEN);
+ wg_secure_zero_memory (key, NOISE_SYMMETRIC_KEY_LEN);
vnet_crypto_key_del (vm, key_idx);
- secure_zero_memory (e, NOISE_PUBLIC_KEY_LEN);
+ wg_secure_zero_memory (e, NOISE_PUBLIC_KEY_LEN);
return ret;
}
@@ -310,8 +316,8 @@ noise_consume_response (vlib_main_t * vm, noise_remote_t * r, uint32_t s_idx,
{
noise_local_t *l = noise_local_get (r->r_local_idx);
noise_handshake_t hs;
- uint8_t _key[NOISE_SYMMETRIC_KEY_LEN];
- uint8_t preshared_key[NOISE_PUBLIC_KEY_LEN];
+ uint8_t _key[NOISE_SYMMETRIC_KEY_LEN] = { 0 };
+ uint8_t preshared_key[NOISE_PUBLIC_KEY_LEN] = { 0 };
uint32_t key_idx;
uint8_t *key;
int ret = false;
@@ -340,6 +346,7 @@ noise_consume_response (vlib_main_t * vm, noise_remote_t * r, uint32_t s_idx,
/* psk */
noise_mix_psk (hs.hs_ck, hs.hs_hash, key, preshared_key);
+ vnet_crypto_key_update (vm, key_idx);
/* {} */
@@ -358,8 +365,8 @@ noise_consume_response (vlib_main_t * vm, noise_remote_t * r, uint32_t s_idx,
ret = true;
}
error:
- secure_zero_memory (&hs, sizeof (hs));
- secure_zero_memory (key, NOISE_SYMMETRIC_KEY_LEN);
+ wg_secure_zero_memory (&hs, sizeof (hs));
+ wg_secure_zero_memory (key, NOISE_SYMMETRIC_KEY_LEN);
vnet_crypto_key_del (vm, key_idx);
return ret;
}
@@ -407,6 +414,8 @@ noise_remote_begin_session (vlib_main_t * vm, noise_remote_t * r)
/* Now we need to add_new_keypair */
clib_rwlock_writer_lock (&r->r_keypair_lock);
+ /* Activate barrier to synchronization keys between threads */
+ vlib_worker_thread_barrier_sync (vm);
next = r->r_next;
current = r->r_current;
previous = r->r_previous;
@@ -438,19 +447,20 @@ noise_remote_begin_session (vlib_main_t * vm, noise_remote_t * r)
r->r_next = noise_remote_keypair_allocate (r);
*r->r_next = kp;
}
+ vlib_worker_thread_barrier_release (vm);
clib_rwlock_writer_unlock (&r->r_keypair_lock);
- secure_zero_memory (&r->r_handshake, sizeof (r->r_handshake));
+ wg_secure_zero_memory (&r->r_handshake, sizeof (r->r_handshake));
- secure_zero_memory (&kp, sizeof (kp));
+ wg_secure_zero_memory (&kp, sizeof (kp));
return true;
}
void
noise_remote_clear (vlib_main_t * vm, noise_remote_t * r)
{
- noise_remote_handshake_index_drop (r);
- secure_zero_memory (&r->r_handshake, sizeof (r->r_handshake));
+ noise_remote_handshake_index_drop (vm, r);
+ wg_secure_zero_memory (&r->r_handshake, sizeof (r->r_handshake));
clib_rwlock_writer_lock (&r->r_keypair_lock);
noise_remote_keypair_free (vm, r, &r->r_next);
@@ -492,55 +502,6 @@ noise_remote_ready (noise_remote_t * r)
return ret;
}
-static bool
-chacha20poly1305_calc (vlib_main_t * vm,
- u8 * src,
- u32 src_len,
- u8 * dst,
- u8 * aad,
- u32 aad_len,
- u64 nonce,
- vnet_crypto_op_id_t op_id,
- vnet_crypto_key_index_t key_index)
-{
- vnet_crypto_op_t _op, *op = &_op;
- u8 iv[12];
- u8 tag_[NOISE_AUTHTAG_LEN] = { };
- u8 src_[] = { };
-
- clib_memset (iv, 0, 12);
- clib_memcpy (iv + 4, &nonce, sizeof (nonce));
-
- vnet_crypto_op_init (op, op_id);
-
- op->tag_len = NOISE_AUTHTAG_LEN;
- if (op_id == VNET_CRYPTO_OP_CHACHA20_POLY1305_DEC)
- {
- op->tag = src + src_len - NOISE_AUTHTAG_LEN;
- src_len -= NOISE_AUTHTAG_LEN;
- op->flags |= VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
- }
- else
- op->tag = tag_;
-
- op->src = !src ? src_ : src;
- op->len = src_len;
-
- op->dst = dst;
- op->key_index = key_index;
- op->aad = aad;
- op->aad_len = aad_len;
- op->iv = iv;
-
- vnet_crypto_process_ops (vm, op, 1);
- if (op_id == VNET_CRYPTO_OP_CHACHA20_POLY1305_ENC)
- {
- clib_memcpy (dst + src_len, op->tag, NOISE_AUTHTAG_LEN);
- }
-
- return (op->status == VNET_CRYPTO_OP_STATUS_COMPLETED);
-}
-
enum noise_state_crypt
noise_remote_encrypt (vlib_main_t * vm, noise_remote_t * r, uint32_t * r_idx,
uint64_t * nonce, uint8_t * src, size_t srclen,
@@ -549,7 +510,6 @@ noise_remote_encrypt (vlib_main_t * vm, noise_remote_t * r, uint32_t * r_idx,
noise_keypair_t *kp;
enum noise_state_crypt ret = SC_FAILED;
- clib_rwlock_reader_lock (&r->r_keypair_lock);
if ((kp = r->r_current) == NULL)
goto error;
@@ -570,9 +530,9 @@ noise_remote_encrypt (vlib_main_t * vm, noise_remote_t * r, uint32_t * r_idx,
* are passed back out to the caller through the provided data pointer. */
*r_idx = kp->kp_remote_index;
- chacha20poly1305_calc (vm, src, srclen, dst, NULL, 0, *nonce,
- VNET_CRYPTO_OP_CHACHA20_POLY1305_ENC,
- kp->kp_send_index);
+ wg_chacha20poly1305_calc (vm, src, srclen, dst, NULL, 0, *nonce,
+ VNET_CRYPTO_OP_CHACHA20_POLY1305_ENC,
+ kp->kp_send_index);
/* If our values are still within tolerances, but we are approaching
* the tolerances, we notify the caller with ESTALE that they should
@@ -589,94 +549,6 @@ noise_remote_encrypt (vlib_main_t * vm, noise_remote_t * r, uint32_t * r_idx,
ret = SC_OK;
error:
- clib_rwlock_reader_unlock (&r->r_keypair_lock);
- return ret;
-}
-
-enum noise_state_crypt
-noise_remote_decrypt (vlib_main_t * vm, noise_remote_t * r, uint32_t r_idx,
- uint64_t nonce, uint8_t * src, size_t srclen,
- uint8_t * dst)
-{
- noise_keypair_t *kp;
- enum noise_state_crypt ret = SC_FAILED;
- clib_rwlock_reader_lock (&r->r_keypair_lock);
-
- if (r->r_current != NULL && r->r_current->kp_local_index == r_idx)
- {
- kp = r->r_current;
- }
- else if (r->r_previous != NULL && r->r_previous->kp_local_index == r_idx)
- {
- kp = r->r_previous;
- }
- else if (r->r_next != NULL && r->r_next->kp_local_index == r_idx)
- {
- kp = r->r_next;
- }
- else
- {
- goto error;
- }
-
- /* We confirm that our values are within our tolerances. These values
- * are the same as the encrypt routine.
- *
- * kp_ctr isn't locked here, we're happy to accept a racy read. */
- if (wg_birthdate_has_expired (kp->kp_birthdate, REJECT_AFTER_TIME) ||
- kp->kp_ctr.c_recv >= REJECT_AFTER_MESSAGES)
- goto error;
-
- /* Decrypt, then validate the counter. We don't want to validate the
- * counter before decrypting as we do not know the message is authentic
- * prior to decryption. */
- if (!chacha20poly1305_calc (vm, src, srclen, dst, NULL, 0, nonce,
- VNET_CRYPTO_OP_CHACHA20_POLY1305_DEC,
- kp->kp_recv_index))
- goto error;
-
- if (!noise_counter_recv (&kp->kp_ctr, nonce))
- goto error;
-
- /* If we've received the handshake confirming data packet then move the
- * next keypair into current. If we do slide the next keypair in, then
- * we skip the REKEY_AFTER_TIME_RECV check. This is safe to do as a
- * data packet can't confirm a session that we are an INITIATOR of. */
- if (kp == r->r_next)
- {
- clib_rwlock_reader_unlock (&r->r_keypair_lock);
- clib_rwlock_writer_lock (&r->r_keypair_lock);
- if (kp == r->r_next && kp->kp_local_index == r_idx)
- {
- noise_remote_keypair_free (vm, r, &r->r_previous);
- r->r_previous = r->r_current;
- r->r_current = r->r_next;
- r->r_next = NULL;
-
- ret = SC_CONN_RESET;
- clib_rwlock_writer_unlock (&r->r_keypair_lock);
- clib_rwlock_reader_lock (&r->r_keypair_lock);
- goto error;
- }
- clib_rwlock_writer_unlock (&r->r_keypair_lock);
- clib_rwlock_reader_lock (&r->r_keypair_lock);
- }
-
- /* Similar to when we encrypt, we want to notify the caller when we
- * are approaching our tolerances. We notify if:
- * - we're the initiator and the current keypair is older than
- * REKEY_AFTER_TIME_RECV seconds. */
- ret = SC_KEEP_KEY_FRESH;
- kp = r->r_current;
- if (kp != NULL &&
- kp->kp_valid &&
- kp->kp_is_initiator &&
- wg_birthdate_has_expired (kp->kp_birthdate, REKEY_AFTER_TIME_RECV))
- goto error;
-
- ret = SC_OK;
-error:
- clib_rwlock_reader_unlock (&r->r_keypair_lock);
return ret;
}
@@ -690,86 +562,22 @@ noise_remote_keypair_allocate (noise_remote_t * r)
return kp;
}
-static void
-noise_remote_keypair_free (vlib_main_t * vm, noise_remote_t * r,
- noise_keypair_t ** kp)
-{
- noise_local_t *local = noise_local_get (r->r_local_idx);
- struct noise_upcall *u = &local->l_upcall;
- if (*kp)
- {
- u->u_index_drop ((*kp)->kp_local_index);
- vnet_crypto_key_del (vm, (*kp)->kp_send_index);
- vnet_crypto_key_del (vm, (*kp)->kp_recv_index);
- clib_mem_free (*kp);
- }
-}
-
static uint32_t
-noise_remote_handshake_index_get (noise_remote_t * r)
+noise_remote_handshake_index_get (vlib_main_t *vm, noise_remote_t *r)
{
noise_local_t *local = noise_local_get (r->r_local_idx);
struct noise_upcall *u = &local->l_upcall;
- return u->u_index_set (r);
+ return u->u_index_set (vm, r);
}
static void
-noise_remote_handshake_index_drop (noise_remote_t * r)
+noise_remote_handshake_index_drop (vlib_main_t *vm, noise_remote_t *r)
{
noise_handshake_t *hs = &r->r_handshake;
noise_local_t *local = noise_local_get (r->r_local_idx);
struct noise_upcall *u = &local->l_upcall;
if (hs->hs_state != HS_ZEROED)
- u->u_index_drop (hs->hs_local_index);
-}
-
-static uint64_t
-noise_counter_send (noise_counter_t * ctr)
-{
- uint64_t ret;
- ret = ctr->c_send++;
- return ret;
-}
-
-static bool
-noise_counter_recv (noise_counter_t * ctr, uint64_t recv)
-{
- uint64_t i, top, index_recv, index_ctr;
- unsigned long bit;
- bool ret = false;
-
- /* Check that the recv counter is valid */
- if (ctr->c_recv >= REJECT_AFTER_MESSAGES || recv >= REJECT_AFTER_MESSAGES)
- goto error;
-
- /* If the packet is out of the window, invalid */
- if (recv + COUNTER_WINDOW_SIZE < ctr->c_recv)
- goto error;
-
- /* If the new counter is ahead of the current counter, we'll need to
- * zero out the bitmap that has previously been used */
- index_recv = recv / COUNTER_BITS;
- index_ctr = ctr->c_recv / COUNTER_BITS;
-
- if (recv > ctr->c_recv)
- {
- top = clib_min (index_recv - index_ctr, COUNTER_NUM);
- for (i = 1; i <= top; i++)
- ctr->c_backtrack[(i + index_ctr) & (COUNTER_NUM - 1)] = 0;
- ctr->c_recv = recv;
- }
-
- index_recv %= COUNTER_NUM;
- bit = 1ul << (recv % COUNTER_BITS);
-
- if (ctr->c_backtrack[index_recv] & bit)
- goto error;
-
- ctr->c_backtrack[index_recv] |= bit;
-
- ret = true;
-error:
- return ret;
+ u->u_index_drop (vm, hs->hs_local_index);
}
static void
@@ -816,8 +624,8 @@ noise_kdf (uint8_t * a, uint8_t * b, uint8_t * c, const uint8_t * x,
out:
/* Clear sensitive data from stack */
- secure_zero_memory (sec, BLAKE2S_HASH_SIZE);
- secure_zero_memory (out, BLAKE2S_HASH_SIZE + 1);
+ wg_secure_zero_memory (sec, BLAKE2S_HASH_SIZE);
+ wg_secure_zero_memory (out, BLAKE2S_HASH_SIZE + 1);
}
static bool
@@ -832,7 +640,7 @@ noise_mix_dh (uint8_t ck[NOISE_HASH_LEN],
noise_kdf (ck, key, NULL, dh,
NOISE_HASH_LEN, NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN,
ck);
- secure_zero_memory (dh, NOISE_PUBLIC_KEY_LEN);
+ wg_secure_zero_memory (dh, NOISE_PUBLIC_KEY_LEN);
return true;
}
@@ -873,7 +681,7 @@ noise_mix_psk (uint8_t ck[NOISE_HASH_LEN], uint8_t hash[NOISE_HASH_LEN],
NOISE_HASH_LEN, NOISE_HASH_LEN, NOISE_SYMMETRIC_KEY_LEN,
NOISE_SYMMETRIC_KEY_LEN, ck);
noise_mix_hash (hash, tmp, NOISE_HASH_LEN);
- secure_zero_memory (tmp, NOISE_HASH_LEN);
+ wg_secure_zero_memory (tmp, NOISE_HASH_LEN);
}
static void
@@ -900,8 +708,8 @@ noise_msg_encrypt (vlib_main_t * vm, uint8_t * dst, uint8_t * src,
uint8_t hash[NOISE_HASH_LEN])
{
/* Nonce always zero for Noise_IK */
- chacha20poly1305_calc (vm, src, src_len, dst, hash, NOISE_HASH_LEN, 0,
- VNET_CRYPTO_OP_CHACHA20_POLY1305_ENC, key_idx);
+ wg_chacha20poly1305_calc (vm, src, src_len, dst, hash, NOISE_HASH_LEN, 0,
+ VNET_CRYPTO_OP_CHACHA20_POLY1305_ENC, key_idx);
noise_mix_hash (hash, dst, src_len + NOISE_AUTHTAG_LEN);
}
@@ -911,8 +719,9 @@ noise_msg_decrypt (vlib_main_t * vm, uint8_t * dst, uint8_t * src,
uint8_t hash[NOISE_HASH_LEN])
{
/* Nonce always zero for Noise_IK */
- if (!chacha20poly1305_calc (vm, src, src_len, dst, hash, NOISE_HASH_LEN, 0,
- VNET_CRYPTO_OP_CHACHA20_POLY1305_DEC, key_idx))
+ if (!wg_chacha20poly1305_calc (vm, src, src_len, dst, hash, NOISE_HASH_LEN,
+ 0, VNET_CRYPTO_OP_CHACHA20_POLY1305_DEC,
+ key_idx))
return false;
noise_mix_hash (hash, src, src_len);
return true;
@@ -942,21 +751,14 @@ noise_tai64n_now (uint8_t output[NOISE_TIMESTAMP_LEN])
unix_nanosec &= REJECT_INTERVAL_MASK;
/* https://cr.yp.to/libtai/tai64.html */
- sec = htobe64 (0x400000000000000aULL + unix_sec);
- nsec = htobe32 (unix_nanosec);
+ sec = clib_host_to_big_u64 (0x400000000000000aULL + unix_sec);
+ nsec = clib_host_to_big_u32 (unix_nanosec);
/* memcpy to output buffer, assuming output could be unaligned. */
clib_memcpy (output, &sec, sizeof (sec));
clib_memcpy (output + sizeof (sec), &nsec, sizeof (nsec));
}
-static void
-secure_zero_memory (void *v, size_t n)
-{
- static void *(*const volatile memset_v) (void *, int, size_t) = &memset;
- memset_v (v, 0, n);
-}
-
/*
* fd.io coding-style-patch-verification: ON
*
diff --git a/src/plugins/wireguard/wireguard_noise.h b/src/plugins/wireguard/wireguard_noise.h
index 5b5a88fa250..fd2c09ebfa5 100755..100644
--- a/src/plugins/wireguard/wireguard_noise.h
+++ b/src/plugins/wireguard/wireguard_noise.h
@@ -121,8 +121,8 @@ typedef struct noise_local
{
void *u_arg;
noise_remote_t *(*u_remote_get) (const uint8_t[NOISE_PUBLIC_KEY_LEN]);
- uint32_t (*u_index_set) (noise_remote_t *);
- void (*u_index_drop) (uint32_t);
+ uint32_t (*u_index_set) (vlib_main_t *, noise_remote_t *);
+ void (*u_index_drop) (vlib_main_t *, uint32_t);
} l_upcall;
} noise_local_t;
@@ -136,15 +136,23 @@ noise_local_get (uint32_t locali)
return (pool_elt_at_index (noise_local_pool, locali));
}
+static_always_inline uint64_t
+noise_counter_send (noise_counter_t *ctr)
+{
+ uint64_t ret;
+ ret = ctr->c_send++;
+ return ret;
+}
+
void noise_local_init (noise_local_t *, struct noise_upcall *);
bool noise_local_set_private (noise_local_t *,
const uint8_t[NOISE_PUBLIC_KEY_LEN]);
-void noise_remote_init (noise_remote_t *, uint32_t,
+void noise_remote_init (vlib_main_t *, noise_remote_t *, uint32_t,
const uint8_t[NOISE_PUBLIC_KEY_LEN], uint32_t);
/* Should be called anytime noise_local_set_private is called */
-void noise_remote_precompute (noise_remote_t *);
+void noise_remote_precompute (vlib_main_t *, noise_remote_t *);
/* Cryptographic functions */
bool noise_create_initiation (vlib_main_t * vm, noise_remote_t *,
@@ -187,12 +195,83 @@ noise_remote_encrypt (vlib_main_t * vm, noise_remote_t *,
uint32_t * r_idx,
uint64_t * nonce,
uint8_t * src, size_t srclen, uint8_t * dst);
-enum noise_state_crypt
-noise_remote_decrypt (vlib_main_t * vm, noise_remote_t *,
- uint32_t r_idx,
- uint64_t nonce,
- uint8_t * src, size_t srclen, uint8_t * dst);
+static_always_inline noise_keypair_t *
+wg_get_active_keypair (noise_remote_t *r, uint32_t r_idx)
+{
+ if (r->r_current != NULL && r->r_current->kp_local_index == r_idx)
+ {
+ return r->r_current;
+ }
+ else if (r->r_previous != NULL && r->r_previous->kp_local_index == r_idx)
+ {
+ return r->r_previous;
+ }
+ else if (r->r_next != NULL && r->r_next->kp_local_index == r_idx)
+ {
+ return r->r_next;
+ }
+ else
+ {
+ return NULL;
+ }
+}
+
+inline bool
+noise_counter_recv (noise_counter_t *ctr, uint64_t recv)
+{
+ uint64_t i, top, index_recv, index_ctr;
+ unsigned long bit;
+ bool ret = false;
+
+ /* Check that the recv counter is valid */
+ if (ctr->c_recv >= REJECT_AFTER_MESSAGES || recv >= REJECT_AFTER_MESSAGES)
+ goto error;
+
+ /* If the packet is out of the window, invalid */
+ if (recv + COUNTER_WINDOW_SIZE < ctr->c_recv)
+ goto error;
+
+ /* If the new counter is ahead of the current counter, we'll need to
+ * zero out the bitmap that has previously been used */
+ index_recv = recv / COUNTER_BITS;
+ index_ctr = ctr->c_recv / COUNTER_BITS;
+
+ if (recv > ctr->c_recv)
+ {
+ top = clib_min (index_recv - index_ctr, COUNTER_NUM);
+ for (i = 1; i <= top; i++)
+ ctr->c_backtrack[(i + index_ctr) & (COUNTER_NUM - 1)] = 0;
+ ctr->c_recv = recv;
+ }
+
+ index_recv %= COUNTER_NUM;
+ bit = 1ul << (recv % COUNTER_BITS);
+
+ if (ctr->c_backtrack[index_recv] & bit)
+ goto error;
+
+ ctr->c_backtrack[index_recv] |= bit;
+
+ ret = true;
+error:
+ return ret;
+}
+
+static_always_inline void
+noise_remote_keypair_free (vlib_main_t *vm, noise_remote_t *r,
+ noise_keypair_t **kp)
+{
+ noise_local_t *local = noise_local_get (r->r_local_idx);
+ struct noise_upcall *u = &local->l_upcall;
+ if (*kp)
+ {
+ u->u_index_drop (vm, (*kp)->kp_local_index);
+ vnet_crypto_key_del (vm, (*kp)->kp_send_index);
+ vnet_crypto_key_del (vm, (*kp)->kp_recv_index);
+ clib_mem_free (*kp);
+ }
+}
#endif /* __included_wg_noise_h__ */
diff --git a/src/plugins/wireguard/wireguard_output_tun.c b/src/plugins/wireguard/wireguard_output_tun.c
index 53a8797c973..c9411f6ff20 100755..100644
--- a/src/plugins/wireguard/wireguard_output_tun.c
+++ b/src/plugins/wireguard/wireguard_output_tun.c
@@ -21,11 +21,12 @@
#include <wireguard/wireguard.h>
#include <wireguard/wireguard_send.h>
-#define foreach_wg_output_error \
- _(NONE, "No error") \
- _(PEER, "Peer error") \
- _(KEYPAIR, "Keypair error") \
- _(TOO_BIG, "packet too big") \
+#define foreach_wg_output_error \
+ _ (NONE, "No error") \
+ _ (PEER, "Peer error") \
+ _ (KEYPAIR, "Keypair error") \
+ _ (NO_BUFFERS, "No buffers") \
+ _ (CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)")
typedef enum
{
@@ -51,18 +52,34 @@ typedef enum
typedef struct
{
- ip4_udp_header_t hdr;
index_t peer;
+ u8 header[sizeof (ip6_udp_header_t)];
+ u8 is_ip4;
} wg_output_tun_trace_t;
+typedef struct
+{
+ index_t peer;
+ u32 next_index;
+} wg_output_tun_post_trace_t;
+
u8 *
format_ip4_udp_header (u8 * s, va_list * args)
{
- ip4_udp_header_t *hdr = va_arg (*args, ip4_udp_header_t *);
+ ip4_udp_header_t *hdr4 = va_arg (*args, ip4_udp_header_t *);
+
+ s = format (s, "%U:$U", format_ip4_header, &hdr4->ip4, format_udp_header,
+ &hdr4->udp);
+ return (s);
+}
- s = format (s, "%U:$U",
- format_ip4_header, &hdr->ip4, format_udp_header, &hdr->udp);
+u8 *
+format_ip6_udp_header (u8 *s, va_list *args)
+{
+ ip6_udp_header_t *hdr6 = va_arg (*args, ip6_udp_header_t *);
+ s = format (s, "%U:$U", format_ip6_header, &hdr6->ip6, format_udp_header,
+ &hdr6->udp);
return (s);
}
@@ -76,50 +93,415 @@ format_wg_output_tun_trace (u8 * s, va_list * args)
wg_output_tun_trace_t *t = va_arg (*args, wg_output_tun_trace_t *);
s = format (s, "peer: %d\n", t->peer);
- s = format (s, " Encrypted packet: %U", format_ip4_udp_header, &t->hdr);
+ s = format (s, " Encrypted packet: ");
+
+ s = t->is_ip4 ? format (s, "%U", format_ip4_udp_header, t->header) :
+ format (s, "%U", format_ip6_udp_header, t->header);
return s;
}
-VLIB_NODE_FN (wg_output_tun_node) (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame)
+/* post node - packet trace format function */
+static u8 *
+format_wg_output_tun_post_trace (u8 *s, va_list *args)
{
- u32 n_left_from;
- u32 *from;
- vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
- u16 nexts[VLIB_FRAME_SIZE], *next;
- u32 thread_index = vm->thread_index;
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
- from = vlib_frame_vector_args (frame);
- n_left_from = frame->n_vectors;
- b = bufs;
- next = nexts;
+ wg_output_tun_post_trace_t *t = va_arg (*args, wg_output_tun_post_trace_t *);
- vlib_get_buffers (vm, from, bufs, n_left_from);
+ s = format (s, "peer: %d\n", t->peer);
+ s = format (s, " wg-post: next node index %u", t->next_index);
+ return s;
+}
+
+static_always_inline void
+wg_output_chain_crypto (vlib_main_t *vm, wg_per_thread_data_t *ptd,
+ vlib_buffer_t *b, vlib_buffer_t *lb, u8 *start,
+ u32 start_len, u16 *n_ch)
+{
+ vnet_crypto_op_chunk_t *ch;
+ vlib_buffer_t *cb = b;
+ u32 n_chunks = 1;
+
+ vec_add2 (ptd->chunks, ch, 1);
+ ch->len = start_len;
+ ch->src = ch->dst = start;
+ cb = vlib_get_buffer (vm, cb->next_buffer);
+
+ while (1)
+ {
+ vec_add2 (ptd->chunks, ch, 1);
+ n_chunks += 1;
+ if (lb == cb)
+ ch->len = cb->current_length - NOISE_AUTHTAG_LEN;
+ else
+ ch->len = cb->current_length;
+
+ ch->src = ch->dst = vlib_buffer_get_current (cb);
+
+ if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
+ break;
+
+ cb = vlib_get_buffer (vm, cb->next_buffer);
+ }
+
+ if (n_ch)
+ *n_ch = n_chunks;
+}
+
+static_always_inline void
+wg_prepare_sync_enc_op (vlib_main_t *vm, wg_per_thread_data_t *ptd,
+ vlib_buffer_t *b, vlib_buffer_t *lb,
+ vnet_crypto_op_t **crypto_ops, u8 *src, u32 src_len,
+ u8 *dst, u8 *aad, u32 aad_len, u64 nonce,
+ vnet_crypto_key_index_t key_index, u32 bi, u8 *iv)
+{
+ vnet_crypto_op_t _op, *op = &_op;
+ u8 src_[] = {};
+
+ clib_memset (iv, 0, 4);
+ clib_memcpy (iv + 4, &nonce, sizeof (nonce));
+
+ vec_add2_aligned (crypto_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
+ vnet_crypto_op_init (op, VNET_CRYPTO_OP_CHACHA20_POLY1305_ENC);
+
+ op->tag_len = NOISE_AUTHTAG_LEN;
+ op->tag = vlib_buffer_get_tail (lb) - NOISE_AUTHTAG_LEN;
+ op->key_index = key_index;
+ op->aad = aad;
+ op->aad_len = aad_len;
+ op->iv = iv;
+ op->user_data = bi;
+
+ if (b != lb)
+ {
+ /* Chained buffers */
+ op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
+ op->chunk_index = vec_len (ptd->chunks);
+ wg_output_chain_crypto (vm, ptd, b, lb, src, src_len, &op->n_chunks);
+ }
+ else
+ {
+ op->src = !src ? src_ : src;
+ op->len = src_len;
+ op->dst = dst;
+ }
+}
+
+static_always_inline void
+wg_output_process_chained_ops (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vnet_crypto_op_t *ops, vlib_buffer_t *b[],
+ u16 *nexts, vnet_crypto_op_chunk_t *chunks,
+ u16 drop_next)
+{
+ u32 n_fail, n_ops = vec_len (ops);
+ vnet_crypto_op_t *op = ops;
+
+ if (n_ops == 0)
+ return;
+
+ n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
+
+ while (n_fail)
+ {
+ ASSERT (op - ops < n_ops);
+
+ if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
+ {
+ u32 bi = op->user_data;
+ b[bi]->error = node->errors[WG_OUTPUT_ERROR_CRYPTO_ENGINE_ERROR];
+ nexts[bi] = drop_next;
+ n_fail--;
+ }
+ op++;
+ }
+}
+
+static_always_inline void
+wg_output_process_ops (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vnet_crypto_op_t *ops, vlib_buffer_t *b[], u16 *nexts,
+ u16 drop_next)
+{
+ u32 n_fail, n_ops = vec_len (ops);
+ vnet_crypto_op_t *op = ops;
+
+ if (n_ops == 0)
+ return;
+ n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
+
+ while (n_fail)
+ {
+ ASSERT (op - ops < n_ops);
+
+ if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
+ {
+ u32 bi = op->user_data;
+ b[bi]->error = node->errors[WG_OUTPUT_ERROR_CRYPTO_ENGINE_ERROR];
+ nexts[bi] = drop_next;
+ n_fail--;
+ }
+ op++;
+ }
+}
+
+static_always_inline void
+wg_output_tun_add_to_frame (vlib_main_t *vm, vnet_crypto_async_frame_t *f,
+ u32 key_index, u32 crypto_len,
+ i16 crypto_start_offset, u32 buffer_index,
+ u16 next_node, u8 *iv, u8 *tag, u8 flags)
+{
+ vnet_crypto_async_frame_elt_t *fe;
+ u16 index;
+
+ ASSERT (f->n_elts < VNET_CRYPTO_FRAME_SIZE);
+
+ index = f->n_elts;
+ fe = &f->elts[index];
+ f->n_elts++;
+ fe->key_index = key_index;
+ fe->crypto_total_length = crypto_len;
+ fe->crypto_start_offset = crypto_start_offset;
+ fe->iv = iv;
+ fe->tag = tag;
+ fe->flags = flags;
+ f->buffer_indices[index] = buffer_index;
+ f->next_node_index[index] = next_node;
+}
+
+static_always_inline enum noise_state_crypt
+wg_output_tun_process (vlib_main_t *vm, wg_per_thread_data_t *ptd,
+ vlib_buffer_t *b, vlib_buffer_t *lb,
+ vnet_crypto_op_t **crypto_ops, noise_remote_t *r,
+ uint32_t *r_idx, uint64_t *nonce, uint8_t *src,
+ size_t srclen, uint8_t *dst, u32 bi, u8 *iv, f64 time)
+{
+ noise_keypair_t *kp;
+ enum noise_state_crypt ret = SC_FAILED;
+
+ if ((kp = r->r_current) == NULL)
+ goto error;
+
+ /* We confirm that our values are within our tolerances. We want:
+ * - a valid keypair
+ * - our keypair to be less than REJECT_AFTER_TIME seconds old
+ * - our receive counter to be less than REJECT_AFTER_MESSAGES
+ * - our send counter to be less than REJECT_AFTER_MESSAGES
+ */
+ if (!kp->kp_valid ||
+ wg_birthdate_has_expired_opt (kp->kp_birthdate, REJECT_AFTER_TIME,
+ time) ||
+ kp->kp_ctr.c_recv >= REJECT_AFTER_MESSAGES ||
+ ((*nonce = noise_counter_send (&kp->kp_ctr)) > REJECT_AFTER_MESSAGES))
+ goto error;
+
+ /* We encrypt into the same buffer, so the caller must ensure that buf
+ * has NOISE_AUTHTAG_LEN bytes to store the MAC. The nonce and index
+ * are passed back out to the caller through the provided data pointer. */
+ *r_idx = kp->kp_remote_index;
+
+ wg_prepare_sync_enc_op (vm, ptd, b, lb, crypto_ops, src, srclen, dst, NULL,
+ 0, *nonce, kp->kp_send_index, bi, iv);
+
+ /* If our values are still within tolerances, but we are approaching
+ * the tolerances, we notify the caller with ESTALE that they should
+ * establish a new keypair. The current keypair can continue to be used
+ * until the tolerances are hit. We notify if:
+ * - our send counter is valid and not less than REKEY_AFTER_MESSAGES
+ * - we're the initiator and our keypair is older than
+ * REKEY_AFTER_TIME seconds */
+ ret = SC_KEEP_KEY_FRESH;
+ if ((kp->kp_valid && *nonce >= REKEY_AFTER_MESSAGES) ||
+ (kp->kp_is_initiator && wg_birthdate_has_expired_opt (
+ kp->kp_birthdate, REKEY_AFTER_TIME, time)))
+ goto error;
+
+ ret = SC_OK;
+error:
+ return ret;
+}
+
+static_always_inline enum noise_state_crypt
+wg_add_to_async_frame (vlib_main_t *vm, wg_per_thread_data_t *ptd,
+ vnet_crypto_async_frame_t **async_frame,
+ vlib_buffer_t *b, vlib_buffer_t *lb, u8 *payload,
+ u32 payload_len, u32 bi, u16 next, u16 async_next,
+ noise_remote_t *r, uint32_t *r_idx, uint64_t *nonce,
+ u8 *iv, f64 time)
+{
+ wg_post_data_t *post = wg_post_data (b);
+ u8 flag = 0;
+ u8 *tag;
+ noise_keypair_t *kp;
+
+ post->next_index = next;
+
+ /* crypto */
+ enum noise_state_crypt ret = SC_FAILED;
+
+ if ((kp = r->r_current) == NULL)
+ goto error;
+
+ /* We confirm that our values are within our tolerances. We want:
+ * - a valid keypair
+ * - our keypair to be less than REJECT_AFTER_TIME seconds old
+ * - our receive counter to be less than REJECT_AFTER_MESSAGES
+ * - our send counter to be less than REJECT_AFTER_MESSAGES
+ */
+ if (!kp->kp_valid ||
+ wg_birthdate_has_expired_opt (kp->kp_birthdate, REJECT_AFTER_TIME,
+ time) ||
+ kp->kp_ctr.c_recv >= REJECT_AFTER_MESSAGES ||
+ ((*nonce = noise_counter_send (&kp->kp_ctr)) > REJECT_AFTER_MESSAGES))
+ goto error;
+
+ /* We encrypt into the same buffer, so the caller must ensure that buf
+ * has NOISE_AUTHTAG_LEN bytes to store the MAC. The nonce and index
+ * are passed back out to the caller through the provided data pointer. */
+ *r_idx = kp->kp_remote_index;
+
+ clib_memset (iv, 0, 4);
+ clib_memcpy (iv + 4, nonce, sizeof (*nonce));
+
+ /* get a frame for this op if we don't yet have one or it's full */
+ if (NULL == *async_frame || vnet_crypto_async_frame_is_full (*async_frame))
+ {
+ *async_frame = vnet_crypto_async_get_frame (
+ vm, VNET_CRYPTO_OP_CHACHA20_POLY1305_TAG16_AAD0_ENC);
+ if (PREDICT_FALSE (NULL == *async_frame))
+ goto error;
+ /* Save the frame to the list we'll submit at the end */
+ vec_add1 (ptd->async_frames, *async_frame);
+ }
+
+ if (b != lb)
+ flag |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
+
+ tag = vlib_buffer_get_tail (lb) - NOISE_AUTHTAG_LEN;
+
+ /* this always succeeds because we know the frame is not full */
+ wg_output_tun_add_to_frame (vm, *async_frame, kp->kp_send_index, payload_len,
+ payload - b->data, bi, async_next, iv, tag,
+ flag);
+
+ /* If our values are still within tolerances, but we are approaching
+ * the tolerances, we notify the caller with ESTALE that they should
+ * establish a new keypair. The current keypair can continue to be used
+ * until the tolerances are hit. We notify if:
+ * - our send counter is valid and not less than REKEY_AFTER_MESSAGES
+ * - we're the initiator and our keypair is older than
+ * REKEY_AFTER_TIME seconds */
+ ret = SC_KEEP_KEY_FRESH;
+ if ((kp->kp_valid && *nonce >= REKEY_AFTER_MESSAGES) ||
+ (kp->kp_is_initiator && wg_birthdate_has_expired_opt (
+ kp->kp_birthdate, REKEY_AFTER_TIME, time)))
+ goto error;
+
+ ret = SC_OK;
+error:
+ return ret;
+}
+
+static_always_inline void
+wg_calc_checksum (vlib_main_t *vm, vlib_buffer_t *b)
+{
+ int bogus = 0;
+ u8 ip_ver_out = (*((u8 *) vlib_buffer_get_current (b)) >> 4);
+
+ /* IPv6 UDP checksum is mandatory */
+ if (ip_ver_out == 6)
+ {
+ ip6_header_t *ip6 =
+ (ip6_header_t *) ((u8 *) vlib_buffer_get_current (b));
+ udp_header_t *udp = ip6_next_header (ip6);
+ udp->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b, ip6, &bogus);
+ }
+}
+
+/* is_ip4 - inner header flag */
+always_inline uword
+wg_output_tun_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame, u8 is_ip4, u16 async_next_node)
+{
wg_main_t *wmp = &wg_main;
+ wg_per_thread_data_t *ptd =
+ vec_elt_at_index (wmp->per_thread_data, vm->thread_index);
+ u32 *from = vlib_frame_vector_args (frame);
+ u32 n_left_from = frame->n_vectors;
+ ip4_udp_wg_header_t *hdr4_out = NULL;
+ ip6_udp_wg_header_t *hdr6_out = NULL;
+ message_data_t *message_data_wg = NULL;
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
+ vlib_buffer_t *lb;
+ vnet_crypto_op_t **crypto_ops;
+ u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
+ vlib_buffer_t *sync_bufs[VLIB_FRAME_SIZE];
+ u32 thread_index = vm->thread_index;
+ u16 n_sync = 0;
+ const u16 drop_next = WG_OUTPUT_NEXT_ERROR;
+ const u8 is_async = wg_op_mode_is_set_ASYNC ();
+ vnet_crypto_async_frame_t *async_frame = NULL;
+ u16 n_async = 0;
+ u16 noop_nexts[VLIB_FRAME_SIZE], *noop_next = noop_nexts, n_noop = 0;
+ u16 err = !0;
+ u32 sync_bi[VLIB_FRAME_SIZE];
+ u32 noop_bi[VLIB_FRAME_SIZE];
+
+ vlib_get_buffers (vm, from, bufs, n_left_from);
+ vec_reset_length (ptd->crypto_ops);
+ vec_reset_length (ptd->chained_crypto_ops);
+ vec_reset_length (ptd->chunks);
+ vec_reset_length (ptd->async_frames);
+
wg_peer_t *peer = NULL;
+ u32 adj_index = 0;
+ u32 last_adj_index = ~0;
+ index_t peeri = INDEX_INVALID;
+
+ f64 time = clib_time_now (&vm->clib_time) + vm->time_offset;
while (n_left_from > 0)
{
- ip4_udp_header_t *hdr = vlib_buffer_get_current (b[0]);
- u8 *plain_data = (vlib_buffer_get_current (b[0]) +
- sizeof (ip4_udp_header_t));
- u16 plain_data_len =
- clib_net_to_host_u16 (((ip4_header_t *) plain_data)->length);
- index_t peeri;
+ u8 iph_offset = 0;
+ u8 is_ip4_out = 1;
+ u8 *plain_data;
+ u16 plain_data_len;
+ u16 plain_data_len_total;
+ u16 n_bufs;
+ u16 b_space_left_at_beginning;
+ u32 bi = from[b - bufs];
+
+ if (n_left_from > 2)
+ {
+ u8 *p;
+ vlib_prefetch_buffer_header (b[2], LOAD);
+ p = vlib_buffer_get_current (b[1]);
+ CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (vlib_buffer_get_tail (b[1]), CLIB_CACHE_LINE_BYTES,
+ LOAD);
+ }
- next[0] = WG_OUTPUT_NEXT_ERROR;
- peeri =
- wg_peer_get_by_adj_index (vnet_buffer (b[0])->ip.adj_index[VLIB_TX]);
- peer = wg_peer_get (peeri);
+ noop_next[0] = WG_OUTPUT_NEXT_ERROR;
+ err = WG_OUTPUT_NEXT_ERROR;
- if (!peer || peer->is_dead)
+ adj_index = vnet_buffer (b[0])->ip.adj_index[VLIB_TX];
+
+ if (PREDICT_FALSE (last_adj_index != adj_index))
+ {
+ peeri = wg_peer_get_by_adj_index (adj_index);
+ if (peeri == INDEX_INVALID)
+ {
+ b[0]->error = node->errors[WG_OUTPUT_ERROR_PEER];
+ goto out;
+ }
+ peer = wg_peer_get (peeri);
+ }
+
+ if (!peer || wg_peer_is_dead (peer))
{
b[0]->error = node->errors[WG_OUTPUT_ERROR_PEER];
goto out;
}
-
if (PREDICT_FALSE (~0 == peer->output_thread_index))
{
/* this is the first packet to use this peer, claim the peer
@@ -129,9 +511,10 @@ VLIB_NODE_FN (wg_output_tun_node) (vlib_main_t * vm,
wg_peer_assign_thread (thread_index));
}
- if (PREDICT_TRUE (thread_index != peer->output_thread_index))
+ if (PREDICT_FALSE (thread_index != peer->output_thread_index))
{
- next[0] = WG_OUTPUT_NEXT_HANDOFF;
+ noop_next[0] = WG_OUTPUT_NEXT_HANDOFF;
+ err = WG_OUTPUT_NEXT_HANDOFF;
goto next;
}
@@ -141,31 +524,119 @@ VLIB_NODE_FN (wg_output_tun_node) (vlib_main_t * vm,
b[0]->error = node->errors[WG_OUTPUT_ERROR_KEYPAIR];
goto out;
}
- size_t encrypted_packet_len = message_data_len (plain_data_len);
- /*
- * Ensure there is enough space to write the encrypted data
- * into the packet
- */
- if (PREDICT_FALSE (encrypted_packet_len >= WG_DEFAULT_DATA_SIZE) ||
- PREDICT_FALSE ((b[0]->current_data + encrypted_packet_len) >=
- vlib_buffer_get_default_data_size (vm)))
+ lb = b[0];
+ n_bufs = vlib_buffer_chain_linearize (vm, b[0]);
+ if (n_bufs == 0)
{
- b[0]->error = node->errors[WG_OUTPUT_ERROR_TOO_BIG];
+ b[0]->error = node->errors[WG_OUTPUT_ERROR_NO_BUFFERS];
goto out;
}
- message_data_t *encrypted_packet =
- (message_data_t *) wmp->per_thread_data[thread_index].data;
+ if (n_bufs > 1)
+ {
+ /* Find last buffer in the chain */
+ while (lb->flags & VLIB_BUFFER_NEXT_PRESENT)
+ lb = vlib_get_buffer (vm, lb->next_buffer);
+ }
+
+ /* Ensure there is enough free space at the beginning of the first buffer
+ * to write ethernet header (e.g. IPv6 VxLAN over IPv6 Wireguard will
+ * trigger this)
+ */
+ ASSERT ((signed) b[0]->current_data >=
+ (signed) -VLIB_BUFFER_PRE_DATA_SIZE);
+ b_space_left_at_beginning =
+ b[0]->current_data + VLIB_BUFFER_PRE_DATA_SIZE;
+ if (PREDICT_FALSE (b_space_left_at_beginning <
+ sizeof (ethernet_header_t)))
+ {
+ u32 size_diff =
+ sizeof (ethernet_header_t) - b_space_left_at_beginning;
+
+ /* Can only move buffer when it's single and has enough free space*/
+ if (lb == b[0] &&
+ vlib_buffer_space_left_at_end (vm, b[0]) >= size_diff)
+ {
+ vlib_buffer_move (vm, b[0],
+ b[0]->current_data + (signed) size_diff);
+ }
+ else
+ {
+ b[0]->error = node->errors[WG_OUTPUT_ERROR_NO_BUFFERS];
+ goto out;
+ }
+ }
+
+ /*
+ * Ensure there is enough free space at the end of the last buffer to
+ * write auth tag */
+ if (PREDICT_FALSE (vlib_buffer_space_left_at_end (vm, lb) <
+ NOISE_AUTHTAG_LEN))
+ {
+ u32 tmp_bi = 0;
+ if (vlib_buffer_alloc (vm, &tmp_bi, 1) != 1)
+ {
+ b[0]->error = node->errors[WG_OUTPUT_ERROR_NO_BUFFERS];
+ goto out;
+ }
+ lb = vlib_buffer_chain_buffer (vm, lb, tmp_bi);
+ }
+
+ iph_offset = vnet_buffer (b[0])->ip.save_rewrite_length;
+ plain_data = vlib_buffer_get_current (b[0]) + iph_offset;
+ plain_data_len = b[0]->current_length - iph_offset;
+ plain_data_len_total =
+ vlib_buffer_length_in_chain (vm, b[0]) - iph_offset;
+ size_t encrypted_packet_len = message_data_len (plain_data_len_total);
+ vlib_buffer_chain_increase_length (b[0], lb, NOISE_AUTHTAG_LEN);
+ u8 *iv_data = b[0]->pre_data;
+
+ is_ip4_out = ip46_address_is_ip4 (&peer->src.addr);
+ if (is_ip4_out)
+ {
+ hdr4_out = vlib_buffer_get_current (b[0]);
+ message_data_wg = &hdr4_out->wg;
+ }
+ else
+ {
+ hdr6_out = vlib_buffer_get_current (b[0]);
+ message_data_wg = &hdr6_out->wg;
+ }
+
+ if (PREDICT_FALSE (last_adj_index != adj_index))
+ {
+ wg_timers_any_authenticated_packet_sent_opt (peer, time);
+ wg_timers_data_sent_opt (peer, time);
+ wg_timers_any_authenticated_packet_traversal (peer);
+ last_adj_index = adj_index;
+ }
+
+ /* Here we are sure that can send packet to next node */
+ next[0] = WG_OUTPUT_NEXT_INTERFACE_OUTPUT;
+
+ if (lb != b[0])
+ crypto_ops = &ptd->chained_crypto_ops;
+ else
+ crypto_ops = &ptd->crypto_ops;
enum noise_state_crypt state;
- state =
- noise_remote_encrypt (vm,
- &peer->remote,
- &encrypted_packet->receiver_index,
- &encrypted_packet->counter, plain_data,
- plain_data_len,
- encrypted_packet->encrypted_data);
+
+ if (is_async)
+ {
+ state = wg_add_to_async_frame (
+ vm, ptd, &async_frame, b[0], lb, plain_data, plain_data_len_total,
+ bi, next[0], async_next_node, &peer->remote,
+ &message_data_wg->receiver_index, &message_data_wg->counter,
+ iv_data, time);
+ }
+ else
+ {
+ state = wg_output_tun_process (
+ vm, ptd, b[0], lb, crypto_ops, &peer->remote,
+ &message_data_wg->receiver_index, &message_data_wg->counter,
+ plain_data, plain_data_len, plain_data, n_sync, iv_data, time);
+ }
if (PREDICT_FALSE (state == SC_KEEP_KEY_FRESH))
{
@@ -173,27 +644,31 @@ VLIB_NODE_FN (wg_output_tun_node) (vlib_main_t * vm,
}
else if (PREDICT_FALSE (state == SC_FAILED))
{
- //TODO: Maybe wrong
+ // TODO: Maybe wrong
wg_send_handshake_from_mt (peeri, false);
+ wg_peer_update_flags (peeri, WG_PEER_ESTABLISHED, false);
+ noop_next[0] = WG_OUTPUT_NEXT_ERROR;
goto out;
}
- /* Here we are sure that can send packet to next node */
- next[0] = WG_OUTPUT_NEXT_INTERFACE_OUTPUT;
- encrypted_packet->header.type = MESSAGE_DATA;
-
- clib_memcpy (plain_data, (u8 *) encrypted_packet, encrypted_packet_len);
+ err = WG_OUTPUT_NEXT_INTERFACE_OUTPUT;
- hdr->udp.length = clib_host_to_net_u16 (encrypted_packet_len +
- sizeof (udp_header_t));
- b[0]->current_length = (encrypted_packet_len +
- sizeof (ip4_header_t) + sizeof (udp_header_t));
- ip4_header_set_len_w_chksum
- (&hdr->ip4, clib_host_to_net_u16 (b[0]->current_length));
-
- wg_timers_any_authenticated_packet_sent (peer);
- wg_timers_data_sent (peer);
- wg_timers_any_authenticated_packet_traversal (peer);
+ if (is_ip4_out)
+ {
+ hdr4_out->wg.header.type = MESSAGE_DATA;
+ hdr4_out->udp.length = clib_host_to_net_u16 (encrypted_packet_len +
+ sizeof (udp_header_t));
+ ip4_header_set_len_w_chksum (
+ &hdr4_out->ip4, clib_host_to_net_u16 (encrypted_packet_len +
+ sizeof (ip4_udp_header_t)));
+ }
+ else
+ {
+ hdr6_out->wg.header.type = MESSAGE_DATA;
+ hdr6_out->ip6.payload_length = hdr6_out->udp.length =
+ clib_host_to_net_u16 (encrypted_packet_len +
+ sizeof (udp_header_t));
+ }
out:
if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
@@ -201,23 +676,262 @@ VLIB_NODE_FN (wg_output_tun_node) (vlib_main_t * vm,
{
wg_output_tun_trace_t *t =
vlib_add_trace (vm, node, b[0], sizeof (*t));
- t->hdr = *hdr;
+
t->peer = peeri;
+ t->is_ip4 = is_ip4_out;
+ if (hdr4_out)
+ clib_memcpy (t->header, hdr4_out, sizeof (ip4_udp_header_t));
+ else if (hdr6_out)
+ clib_memcpy (t->header, hdr6_out, sizeof (ip6_udp_header_t));
}
+
next:
+ if (PREDICT_FALSE (err != WG_OUTPUT_NEXT_INTERFACE_OUTPUT))
+ {
+ noop_bi[n_noop] = bi;
+ n_noop++;
+ noop_next++;
+ goto next_left;
+ }
+ if (!is_async)
+ {
+ sync_bi[n_sync] = bi;
+ sync_bufs[n_sync] = b[0];
+ n_sync += 1;
+ next += 1;
+ }
+ else
+ {
+ n_async++;
+ }
+ next_left:
n_left_from -= 1;
- next += 1;
b += 1;
}
+ if (n_sync)
+ {
+ /* wg-output-process-ops */
+ wg_output_process_ops (vm, node, ptd->crypto_ops, sync_bufs, nexts,
+ drop_next);
+ wg_output_process_chained_ops (vm, node, ptd->chained_crypto_ops,
+ sync_bufs, nexts, ptd->chunks, drop_next);
+
+ int n_left_from_sync_bufs = n_sync;
+ while (n_left_from_sync_bufs > 0)
+ {
+ n_left_from_sync_bufs--;
+ wg_calc_checksum (vm, sync_bufs[n_left_from_sync_bufs]);
+ }
+
+ vlib_buffer_enqueue_to_next (vm, node, sync_bi, nexts, n_sync);
+ }
+ if (n_async)
+ {
+ /* submit all of the open frames */
+ vnet_crypto_async_frame_t **async_frame;
+
+ vec_foreach (async_frame, ptd->async_frames)
+ {
+ if (PREDICT_FALSE (
+ vnet_crypto_async_submit_open_frame (vm, *async_frame) < 0))
+ {
+ u32 n_drop = (*async_frame)->n_elts;
+ u32 *bi = (*async_frame)->buffer_indices;
+ u16 index = n_noop;
+ while (n_drop--)
+ {
+ noop_bi[index] = bi[0];
+ vlib_buffer_t *b = vlib_get_buffer (vm, bi[0]);
+ noop_nexts[index] = drop_next;
+ b->error = node->errors[WG_OUTPUT_ERROR_CRYPTO_ENGINE_ERROR];
+ bi++;
+ index++;
+ }
+ n_noop += (*async_frame)->n_elts;
+
+ vnet_crypto_async_reset_frame (*async_frame);
+ vnet_crypto_async_free_frame (vm, *async_frame);
+ }
+ }
+ }
+ if (n_noop)
+ {
+ vlib_buffer_enqueue_to_next (vm, node, noop_bi, noop_nexts, n_noop);
+ }
+
+ return frame->n_vectors;
+}
+
+always_inline uword
+wg_output_tun_post (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame)
+{
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
+ u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
+ u32 *from = vlib_frame_vector_args (frame);
+ u32 n_left = frame->n_vectors;
+
+ index_t peeri = ~0;
+
+ vlib_get_buffers (vm, from, b, n_left);
+
+ if (n_left >= 4)
+ {
+ vlib_prefetch_buffer_header (b[0], LOAD);
+ vlib_prefetch_buffer_header (b[1], LOAD);
+ vlib_prefetch_buffer_header (b[2], LOAD);
+ vlib_prefetch_buffer_header (b[3], LOAD);
+ }
+
+ while (n_left > 8)
+ {
+ vlib_prefetch_buffer_header (b[4], LOAD);
+ vlib_prefetch_buffer_header (b[5], LOAD);
+ vlib_prefetch_buffer_header (b[6], LOAD);
+ vlib_prefetch_buffer_header (b[7], LOAD);
+
+ next[0] = (wg_post_data (b[0]))->next_index;
+ next[1] = (wg_post_data (b[1]))->next_index;
+ next[2] = (wg_post_data (b[2]))->next_index;
+ next[3] = (wg_post_data (b[3]))->next_index;
+
+ wg_calc_checksum (vm, b[0]);
+ wg_calc_checksum (vm, b[1]);
+ wg_calc_checksum (vm, b[2]);
+ wg_calc_checksum (vm, b[3]);
+
+ if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
+ {
+ if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ wg_output_tun_post_trace_t *tr =
+ vlib_add_trace (vm, node, b[0], sizeof (*tr));
+ peeri = wg_peer_get_by_adj_index (
+ vnet_buffer (b[0])->ip.adj_index[VLIB_TX]);
+ tr->peer = peeri;
+ tr->next_index = next[0];
+ }
+ if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ wg_output_tun_post_trace_t *tr =
+ vlib_add_trace (vm, node, b[1], sizeof (*tr));
+ peeri = wg_peer_get_by_adj_index (
+ vnet_buffer (b[1])->ip.adj_index[VLIB_TX]);
+ tr->next_index = next[1];
+ }
+ if (b[2]->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ wg_output_tun_post_trace_t *tr =
+ vlib_add_trace (vm, node, b[2], sizeof (*tr));
+ peeri = wg_peer_get_by_adj_index (
+ vnet_buffer (b[2])->ip.adj_index[VLIB_TX]);
+ tr->next_index = next[2];
+ }
+ if (b[3]->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ wg_output_tun_post_trace_t *tr =
+ vlib_add_trace (vm, node, b[3], sizeof (*tr));
+ peeri = wg_peer_get_by_adj_index (
+ vnet_buffer (b[3])->ip.adj_index[VLIB_TX]);
+ tr->next_index = next[3];
+ }
+ }
+
+ b += 4;
+ next += 4;
+ n_left -= 4;
+ }
+
+ while (n_left > 0)
+ {
+ wg_calc_checksum (vm, b[0]);
+
+ next[0] = (wg_post_data (b[0]))->next_index;
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b[0]->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ wg_output_tun_post_trace_t *tr =
+ vlib_add_trace (vm, node, b[0], sizeof (*tr));
+ peeri = wg_peer_get_by_adj_index (
+ vnet_buffer (b[0])->ip.adj_index[VLIB_TX]);
+ tr->next_index = next[0];
+ }
+
+ b += 1;
+ next += 1;
+ n_left -= 1;
+ }
+
vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
return frame->n_vectors;
}
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (wg_output_tun_node) =
+VLIB_REGISTER_NODE (wg4_output_tun_post_node) = {
+ .name = "wg4-output-tun-post-node",
+ .vector_size = sizeof (u32),
+ .format_trace = format_wg_output_tun_post_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .sibling_of = "wg4-output-tun",
+ .n_errors = ARRAY_LEN (wg_output_error_strings),
+ .error_strings = wg_output_error_strings,
+};
+
+VLIB_REGISTER_NODE (wg6_output_tun_post_node) = {
+ .name = "wg6-output-tun-post-node",
+ .vector_size = sizeof (u32),
+ .format_trace = format_wg_output_tun_post_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .sibling_of = "wg6-output-tun",
+ .n_errors = ARRAY_LEN (wg_output_error_strings),
+ .error_strings = wg_output_error_strings,
+};
+
+VLIB_NODE_FN (wg4_output_tun_post_node)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
+{
+ return wg_output_tun_post (vm, node, from_frame);
+}
+
+VLIB_NODE_FN (wg6_output_tun_post_node)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
+{
+ return wg_output_tun_post (vm, node, from_frame);
+}
+
+VLIB_NODE_FN (wg4_output_tun_node)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
+{
+ return wg_output_tun_inline (vm, node, frame, /* is_ip4 */ 1,
+ wg_encrypt_async_next.wg4_post_next);
+}
+
+VLIB_NODE_FN (wg6_output_tun_node)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
+{
+ return wg_output_tun_inline (vm, node, frame, /* is_ip4 */ 0,
+ wg_encrypt_async_next.wg6_post_next);
+}
+
+VLIB_REGISTER_NODE (wg4_output_tun_node) =
+{
+ .name = "wg4-output-tun",
+ .vector_size = sizeof (u32),
+ .format_trace = format_wg_output_tun_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (wg_output_error_strings),
+ .error_strings = wg_output_error_strings,
+ .n_next_nodes = WG_OUTPUT_N_NEXT,
+ .next_nodes = {
+ [WG_OUTPUT_NEXT_HANDOFF] = "wg4-output-tun-handoff",
+ [WG_OUTPUT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
+ [WG_OUTPUT_NEXT_ERROR] = "error-drop",
+ },
+};
+
+VLIB_REGISTER_NODE (wg6_output_tun_node) =
{
- .name = "wg-output-tun",
+ .name = "wg6-output-tun",
.vector_size = sizeof (u32),
.format_trace = format_wg_output_tun_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
@@ -225,12 +939,11 @@ VLIB_REGISTER_NODE (wg_output_tun_node) =
.error_strings = wg_output_error_strings,
.n_next_nodes = WG_OUTPUT_N_NEXT,
.next_nodes = {
- [WG_OUTPUT_NEXT_HANDOFF] = "wg-output-tun-handoff",
+ [WG_OUTPUT_NEXT_HANDOFF] = "wg6-output-tun-handoff",
[WG_OUTPUT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
[WG_OUTPUT_NEXT_ERROR] = "error-drop",
},
};
-/* *INDENT-ON* */
/*
* fd.io coding-style-patch-verification: ON
diff --git a/src/plugins/wireguard/wireguard_peer.c b/src/plugins/wireguard/wireguard_peer.c
index f47019b110a..e71db86de0b 100644
--- a/src/plugins/wireguard/wireguard_peer.c
+++ b/src/plugins/wireguard/wireguard_peer.c
@@ -22,8 +22,8 @@
#include <wireguard/wireguard_key.h>
#include <wireguard/wireguard_send.h>
#include <wireguard/wireguard.h>
+#include <vnet/tunnel/tunnel_dp.h>
-static fib_source_t wg_fib_source;
wg_peer_t *wg_peer_pool;
index_t *wg_peer_by_adj_index;
@@ -36,48 +36,20 @@ wg_peer_endpoint_reset (wg_peer_endpoint_t * ep)
}
static void
-wg_peer_endpoint_init (wg_peer_endpoint_t * ep,
- const ip46_address_t * addr, u16 port)
+wg_peer_endpoint_init (wg_peer_endpoint_t *ep, const ip46_address_t *addr,
+ u16 port)
{
ip46_address_copy (&ep->addr, addr);
ep->port = port;
}
static void
-wg_peer_fib_flush (wg_peer_t * peer)
-{
- wg_peer_allowed_ip_t *allowed_ip;
-
- vec_foreach (allowed_ip, peer->allowed_ips)
- {
- fib_table_entry_delete_index (allowed_ip->fib_entry_index, wg_fib_source);
- allowed_ip->fib_entry_index = FIB_NODE_INDEX_INVALID;
- }
-}
-
-static void
-wg_peer_fib_populate (wg_peer_t * peer, u32 fib_index)
-{
- wg_peer_allowed_ip_t *allowed_ip;
-
- vec_foreach (allowed_ip, peer->allowed_ips)
- {
- allowed_ip->fib_entry_index =
- fib_table_entry_path_add (fib_index,
- &allowed_ip->prefix,
- wg_fib_source,
- FIB_ENTRY_FLAG_NONE,
- fib_proto_to_dpo (allowed_ip->
- prefix.fp_proto),
- &peer->dst.addr, peer->wg_sw_if_index, ~0, 1,
- NULL, FIB_ROUTE_PATH_FLAG_NONE);
- }
-}
-
-static void
wg_peer_clear (vlib_main_t * vm, wg_peer_t * peer)
{
+ index_t perri = peer - wg_peer_pool;
wg_timers_stop (peer);
+ wg_peer_update_flags (perri, WG_PEER_ESTABLISHED, false);
+ wg_peer_update_flags (perri, WG_PEER_STATUS_DEAD, true);
for (int i = 0; i < WG_N_TIMERS; i++)
{
peer->timers[i] = ~0;
@@ -91,16 +63,16 @@ wg_peer_clear (vlib_main_t * vm, wg_peer_t * peer)
wg_peer_endpoint_reset (&peer->src);
wg_peer_endpoint_reset (&peer->dst);
- if (INDEX_INVALID != peer->adj_index)
+ adj_index_t *adj_index;
+ vec_foreach (adj_index, peer->adj_indices)
{
- adj_unlock (peer->adj_index);
- wg_peer_by_adj_index[peer->adj_index] = INDEX_INVALID;
- }
- wg_peer_fib_flush (peer);
+ wg_peer_by_adj_index[*adj_index] = INDEX_INVALID;
+ if (adj_is_valid (*adj_index))
+ adj_midchain_delegate_unstack (*adj_index);
+ }
peer->input_thread_index = ~0;
peer->output_thread_index = ~0;
- peer->adj_index = INDEX_INVALID;
peer->timer_wheel = 0;
peer->persistent_keepalive_interval = 0;
peer->timer_handshake_attempts = 0;
@@ -111,107 +83,251 @@ wg_peer_clear (vlib_main_t * vm, wg_peer_t * peer)
peer->new_handshake_interval_tick = 0;
peer->rehandshake_interval_tick = 0;
peer->timer_need_another_keepalive = false;
- peer->is_dead = true;
+ peer->handshake_is_sent = false;
+ vec_free (peer->rewrite);
vec_free (peer->allowed_ips);
+ vec_free (peer->adj_indices);
}
static void
wg_peer_init (vlib_main_t * vm, wg_peer_t * peer)
{
- peer->adj_index = INDEX_INVALID;
+ peer->api_client_by_client_index = hash_create (0, sizeof (u32));
+ peer->api_clients = NULL;
wg_peer_clear (vm, peer);
}
-static u8 *
-wg_peer_build_rewrite (const wg_peer_t * peer)
-{
- // v4 only for now
- ip4_udp_header_t *hdr;
- u8 *rewrite = NULL;
-
- vec_validate (rewrite, sizeof (*hdr) - 1);
- hdr = (ip4_udp_header_t *) rewrite;
-
- hdr->ip4.ip_version_and_header_length = 0x45;
- hdr->ip4.ttl = 64;
- hdr->ip4.src_address = peer->src.addr.ip4;
- hdr->ip4.dst_address = peer->dst.addr.ip4;
- hdr->ip4.protocol = IP_PROTOCOL_UDP;
- hdr->ip4.checksum = ip4_header_checksum (&hdr->ip4);
-
- hdr->udp.src_port = clib_host_to_net_u16 (peer->src.port);
- hdr->udp.dst_port = clib_host_to_net_u16 (peer->dst.port);
- hdr->udp.checksum = 0;
-
- return (rewrite);
-}
-
static void
-wg_peer_adj_stack (wg_peer_t * peer)
+wg_peer_adj_stack (wg_peer_t *peer, adj_index_t ai)
{
ip_adjacency_t *adj;
u32 sw_if_index;
wg_if_t *wgi;
+ fib_protocol_t fib_proto;
- adj = adj_get (peer->adj_index);
+ if (!adj_is_valid (ai))
+ return;
+
+ adj = adj_get (ai);
sw_if_index = adj->rewrite_header.sw_if_index;
+ u8 is_ip4 = ip46_address_is_ip4 (&peer->src.addr);
+ fib_proto = is_ip4 ? FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6;
wgi = wg_if_get (wg_if_find_by_sw_if_index (sw_if_index));
if (!wgi)
return;
- if (!vnet_sw_interface_is_admin_up (vnet_get_main (), wgi->sw_if_index))
+ if (!vnet_sw_interface_is_admin_up (vnet_get_main (), wgi->sw_if_index) ||
+ !wg_peer_can_send (peer))
{
- adj_midchain_delegate_unstack (peer->adj_index);
+ adj_midchain_delegate_unstack (ai);
}
else
{
- /* *INDENT-OFF* */
fib_prefix_t dst = {
- .fp_len = 32,
- .fp_proto = FIB_PROTOCOL_IP4,
- .fp_addr = peer->dst.addr,
+ .fp_len = is_ip4 ? 32 : 128,
+ .fp_proto = fib_proto,
+ .fp_addr = peer->dst.addr,
};
- /* *INDENT-ON* */
u32 fib_index;
- fib_index = fib_table_find (FIB_PROTOCOL_IP4, peer->table_id);
+ fib_index = fib_table_find (fib_proto, peer->table_id);
+
+ adj_midchain_delegate_stack (ai, fib_index, &dst);
+ }
+}
+
+static void
+wg_peer_adj_reset_stacking (adj_index_t ai)
+{
+ adj_midchain_delegate_remove (ai);
+}
+
+static void
+wg_peer_66_fixup (vlib_main_t *vm, const ip_adjacency_t *adj, vlib_buffer_t *b,
+ const void *data)
+{
+ u8 iph_offset = 0;
+ ip6_header_t *ip6_out;
+ ip6_header_t *ip6_in;
+
+ /* Must set locally originated otherwise we're not allowed to
+ fragment the packet later */
+ b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
+
+ ip6_out = vlib_buffer_get_current (b);
+ iph_offset = vnet_buffer (b)->ip.save_rewrite_length;
+ ip6_in = vlib_buffer_get_current (b) + iph_offset;
+
+ ip6_out->ip_version_traffic_class_and_flow_label =
+ ip6_in->ip_version_traffic_class_and_flow_label;
+}
+
+static void
+wg_peer_46_fixup (vlib_main_t *vm, const ip_adjacency_t *adj, vlib_buffer_t *b,
+ const void *data)
+{
+ u8 iph_offset = 0;
+ ip6_header_t *ip6_out;
+ ip4_header_t *ip4_in;
+
+ /* Must set locally originated otherwise we're not allowed to
+ fragment the packet later */
+ b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
+
+ ip6_out = vlib_buffer_get_current (b);
+ iph_offset = vnet_buffer (b)->ip.save_rewrite_length;
+ ip4_in = vlib_buffer_get_current (b) + iph_offset;
+
+ u32 vtcfl = 0x6 << 28;
+ vtcfl |= ip4_in->tos << 20;
+ vtcfl |= vnet_buffer (b)->ip.flow_hash & 0x000fffff;
- adj_midchain_delegate_stack (peer->adj_index, fib_index, &dst);
+ ip6_out->ip_version_traffic_class_and_flow_label =
+ clib_host_to_net_u32 (vtcfl);
+}
+
+static adj_midchain_fixup_t
+wg_peer_get_fixup (wg_peer_t *peer, vnet_link_t lt)
+{
+ if (!ip46_address_is_ip4 (&peer->dst.addr))
+ {
+ if (lt == VNET_LINK_IP4)
+ return (wg_peer_46_fixup);
+ if (lt == VNET_LINK_IP6)
+ return (wg_peer_66_fixup);
}
+ return (NULL);
+}
+
+static void
+wg_peer_disable (vlib_main_t *vm, wg_peer_t *peer)
+{
+ index_t peeri = peer - wg_peer_pool;
+
+ wg_timers_stop (peer);
+ wg_peer_update_flags (peeri, WG_PEER_ESTABLISHED, false);
+
+ for (int i = 0; i < WG_N_TIMERS; i++)
+ {
+ peer->timers[i] = ~0;
+ peer->timers_dispatched[i] = 0;
+ }
+ peer->timer_handshake_attempts = 0;
+
+ peer->last_sent_handshake = vlib_time_now (vm) - (REKEY_TIMEOUT + 1);
+ peer->last_sent_packet = 0;
+ peer->last_received_packet = 0;
+ peer->session_derived = 0;
+ peer->rehandshake_started = 0;
+
+ peer->new_handshake_interval_tick = 0;
+ peer->rehandshake_interval_tick = 0;
+
+ peer->timer_need_another_keepalive = false;
+
+ noise_remote_clear (vm, &peer->remote);
+}
+
+static void
+wg_peer_enable (vlib_main_t *vm, wg_peer_t *peer)
+{
+ index_t peeri = peer - wg_peer_pool;
+ wg_if_t *wg_if;
+ u8 public_key[NOISE_PUBLIC_KEY_LEN];
+
+ wg_if = wg_if_get (wg_if_find_by_sw_if_index (peer->wg_sw_if_index));
+ clib_memcpy (public_key, peer->remote.r_public, NOISE_PUBLIC_KEY_LEN);
+
+ noise_remote_init (vm, &peer->remote, peeri, public_key, wg_if->local_idx);
+
+ wg_timers_send_first_handshake (peer);
}
walk_rc_t
-wg_peer_if_admin_state_change (wg_if_t * wgi, index_t peeri, void *data)
+wg_peer_if_admin_state_change (index_t peeri, void *data)
{
- wg_peer_adj_stack (wg_peer_get (peeri));
+ wg_peer_t *peer;
+ adj_index_t *adj_index;
+ vlib_main_t *vm = vlib_get_main ();
+
+ peer = wg_peer_get (peeri);
+ vec_foreach (adj_index, peer->adj_indices)
+ {
+ wg_peer_adj_stack (peer, *adj_index);
+ }
+
+ if (vnet_sw_interface_is_admin_up (vnet_get_main (), peer->wg_sw_if_index))
+ {
+ wg_peer_enable (vm, peer);
+ }
+ else
+ {
+ wg_peer_disable (vm, peer);
+ }
return (WALK_CONTINUE);
}
walk_rc_t
-wg_peer_if_table_change (wg_if_t * wgi, index_t peeri, void *data)
+wg_peer_if_adj_change (index_t peeri, void *data)
{
- wg_peer_table_bind_ctx_t *ctx = data;
+ adj_index_t *adj_index = data;
+ adj_midchain_fixup_t fixup;
+ ip_adjacency_t *adj;
wg_peer_t *peer;
+ fib_prefix_t *allowed_ip;
+
+ adj = adj_get (*adj_index);
peer = wg_peer_get (peeri);
+ vec_foreach (allowed_ip, peer->allowed_ips)
+ {
+ if (fib_prefix_is_cover_addr_46 (allowed_ip,
+ &adj->sub_type.nbr.next_hop))
+ {
+ vec_add1 (peer->adj_indices, *adj_index);
+
+ vec_validate_init_empty (wg_peer_by_adj_index, *adj_index,
+ INDEX_INVALID);
+ wg_peer_by_adj_index[*adj_index] = peeri;
+
+ fixup = wg_peer_get_fixup (peer, adj_get_link_type (*adj_index));
+ adj_nbr_midchain_update_rewrite (*adj_index, fixup, NULL,
+ ADJ_FLAG_MIDCHAIN_IP_STACK,
+ vec_dup (peer->rewrite));
+
+ wg_peer_adj_stack (peer, *adj_index);
+ return (WALK_STOP);
+ }
+ }
- wg_peer_fib_flush (peer);
- wg_peer_fib_populate (peer, ctx->new_fib_index);
+ return (WALK_CONTINUE);
+}
+adj_walk_rc_t
+wg_peer_adj_walk (adj_index_t ai, void *data)
+{
+ return wg_peer_if_adj_change ((*(index_t *) (data)), &ai) == WALK_CONTINUE ?
+ ADJ_WALK_RC_CONTINUE :
+ ADJ_WALK_RC_STOP;
+}
+
+walk_rc_t
+wg_peer_if_delete (index_t peeri, void *data)
+{
+ wg_peer_remove (peeri);
return (WALK_CONTINUE);
}
static int
-wg_peer_fill (vlib_main_t * vm, wg_peer_t * peer,
- u32 table_id,
- const ip46_address_t * dst,
- u16 port,
+wg_peer_fill (vlib_main_t *vm, wg_peer_t *peer, u32 table_id,
+ const ip46_address_t *dst, u16 port,
u16 persistent_keepalive_interval,
- const fib_prefix_t * allowed_ips, u32 wg_sw_if_index)
+ const fib_prefix_t *allowed_ips, u32 wg_sw_if_index)
{
+ index_t perri = peer - wg_peer_pool;
wg_peer_endpoint_init (&peer->dst, dst, port);
peer->table_id = table_id;
@@ -219,7 +335,7 @@ wg_peer_fill (vlib_main_t * vm, wg_peer_t * peer,
peer->timer_wheel = &wg_main.timer_wheel;
peer->persistent_keepalive_interval = persistent_keepalive_interval;
peer->last_sent_handshake = vlib_time_now (vm) - (REKEY_TIMEOUT + 1);
- peer->is_dead = false;
+ wg_peer_update_flags (perri, WG_PEER_STATUS_DEAD, false);
const wg_if_t *wgi = wg_if_get (wg_if_find_by_sw_if_index (wg_sw_if_index));
@@ -229,53 +345,102 @@ wg_peer_fill (vlib_main_t * vm, wg_peer_t * peer,
ip_address_to_46 (&wgi->src_ip, &peer->src.addr);
peer->src.port = wgi->port;
- /*
- * and an adjacency for the endpoint address in the overlay
- * on the wg interface
- */
- peer->rewrite = wg_peer_build_rewrite (peer);
-
- peer->adj_index = adj_nbr_add_or_lock (FIB_PROTOCOL_IP4,
- VNET_LINK_IP4,
- &peer->dst.addr, wgi->sw_if_index);
-
- vec_validate_init_empty (wg_peer_by_adj_index,
- peer->adj_index, INDEX_INVALID);
- wg_peer_by_adj_index[peer->adj_index] = peer - wg_peer_pool;
-
- adj_nbr_midchain_update_rewrite (peer->adj_index,
- NULL,
- NULL,
- ADJ_FLAG_MIDCHAIN_IP_STACK,
- vec_dup (peer->rewrite));
- wg_peer_adj_stack (peer);
-
- /*
- * add a route in the overlay to each of the allowed-ips
- */
- u32 ii;
+ u8 is_ip4 = ip46_address_is_ip4 (&peer->dst.addr);
+ peer->rewrite = wg_build_rewrite (&peer->src.addr, peer->src.port,
+ &peer->dst.addr, peer->dst.port, is_ip4);
+ u32 ii;
vec_validate (peer->allowed_ips, vec_len (allowed_ips) - 1);
-
vec_foreach_index (ii, allowed_ips)
{
- peer->allowed_ips[ii].prefix = allowed_ips[ii];
+ peer->allowed_ips[ii] = allowed_ips[ii];
}
- wg_peer_fib_populate (peer,
- fib_table_get_index_for_sw_if_index
- (FIB_PROTOCOL_IP4, peer->wg_sw_if_index));
-
+ fib_protocol_t proto;
+ FOR_EACH_FIB_IP_PROTOCOL (proto)
+ {
+ adj_nbr_walk (wg_sw_if_index, proto, wg_peer_adj_walk, &perri);
+ }
return (0);
}
+void
+wg_peer_update_flags (index_t peeri, wg_peer_flags flag, bool add_del)
+{
+ wg_peer_t *peer = wg_peer_get (peeri);
+ if ((add_del && (peer->flags & flag)) || (!add_del && !(peer->flags & flag)))
+ {
+ return;
+ }
+
+ peer->flags ^= flag;
+ wg_api_peer_event (peeri, peer->flags);
+}
+
+void
+wg_peer_update_endpoint (index_t peeri, const ip46_address_t *addr, u16 port)
+{
+ wg_peer_t *peer = wg_peer_get (peeri);
+
+ if (ip46_address_is_equal (&peer->dst.addr, addr) && peer->dst.port == port)
+ return;
+
+ wg_peer_endpoint_init (&peer->dst, addr, port);
+
+ u8 is_ip4 = ip46_address_is_ip4 (&peer->dst.addr);
+ vec_free (peer->rewrite);
+ peer->rewrite = wg_build_rewrite (&peer->src.addr, peer->src.port,
+ &peer->dst.addr, peer->dst.port, is_ip4);
+
+ adj_index_t *adj_index;
+ vec_foreach (adj_index, peer->adj_indices)
+ {
+ if (adj_is_valid (*adj_index))
+ {
+ adj_midchain_fixup_t fixup =
+ wg_peer_get_fixup (peer, adj_get_link_type (*adj_index));
+ adj_nbr_midchain_update_rewrite (*adj_index, fixup, NULL,
+ ADJ_FLAG_MIDCHAIN_IP_STACK,
+ vec_dup (peer->rewrite));
+
+ wg_peer_adj_reset_stacking (*adj_index);
+ wg_peer_adj_stack (peer, *adj_index);
+ }
+ }
+}
+
+typedef struct wg_peer_upd_ep_args_t_
+{
+ index_t peeri;
+ ip46_address_t addr;
+ u16 port;
+} wg_peer_upd_ep_args_t;
+
+static void
+wg_peer_update_endpoint_thread_fn (wg_peer_upd_ep_args_t *args)
+{
+ wg_peer_update_endpoint (args->peeri, &args->addr, args->port);
+}
+
+void
+wg_peer_update_endpoint_from_mt (index_t peeri, const ip46_address_t *addr,
+ u16 port)
+{
+ wg_peer_upd_ep_args_t args = {
+ .peeri = peeri,
+ .port = port,
+ };
+
+ ip46_address_copy (&args.addr, addr);
+ vlib_rpc_call_main_thread (wg_peer_update_endpoint_thread_fn, (u8 *) &args,
+ sizeof (args));
+}
+
int
-wg_peer_add (u32 tun_sw_if_index,
- const u8 public_key[NOISE_PUBLIC_KEY_LEN],
- u32 table_id,
- const ip46_address_t * endpoint,
- const fib_prefix_t * allowed_ips,
- u16 port, u16 persistent_keepalive, u32 * peer_index)
+wg_peer_add (u32 tun_sw_if_index, const u8 public_key[NOISE_PUBLIC_KEY_LEN],
+ u32 table_id, const ip46_address_t *endpoint,
+ const fib_prefix_t *allowed_ips, u16 port,
+ u16 persistent_keepalive, u32 *peer_index)
{
wg_if_t *wg_if;
wg_peer_t *peer;
@@ -290,7 +455,6 @@ wg_peer_add (u32 tun_sw_if_index,
if (!wg_if)
return (VNET_API_ERROR_INVALID_SW_IF_INDEX);
- /* *INDENT-OFF* */
pool_foreach (peer, wg_peer_pool)
{
if (!memcmp (peer->remote.r_public, public_key, NOISE_PUBLIC_KEY_LEN))
@@ -298,12 +462,11 @@ wg_peer_add (u32 tun_sw_if_index,
return (VNET_API_ERROR_ENTRY_ALREADY_EXISTS);
}
}
- /* *INDENT-ON* */
if (pool_elts (wg_peer_pool) > MAX_PEERS)
return (VNET_API_ERROR_LIMIT_EXCEEDED);
- pool_get (wg_peer_pool, peer);
+ pool_get_zero (wg_peer_pool, peer);
wg_peer_init (vm, peer);
@@ -317,13 +480,13 @@ wg_peer_add (u32 tun_sw_if_index,
return (rv);
}
- noise_remote_init (&peer->remote, peer - wg_peer_pool, public_key,
+ noise_remote_init (vm, &peer->remote, peer - wg_peer_pool, public_key,
wg_if->local_idx);
cookie_maker_init (&peer->cookie_maker, public_key);
- if (peer->persistent_keepalive_interval != 0)
+ if (vnet_sw_interface_is_admin_up (vnet_get_main (), tun_sw_if_index))
{
- wg_send_keepalive (vm, peer);
+ wg_timers_send_first_handshake (peer);
}
*peer_index = peer - wg_peer_pool;
@@ -347,9 +510,6 @@ wg_peer_remove (index_t peeri)
wgi = wg_if_get (wg_if_find_by_sw_if_index (peer->wg_sw_if_index));
wg_if_peer_remove (wgi, peeri);
- vnet_feature_enable_disable ("ip4-output", "wg-output-tun",
- peer->wg_sw_if_index, 0, 0, 0);
-
noise_remote_clear (wmp->vlib_main, &peer->remote);
wg_peer_clear (wmp->vlib_main, peer);
pool_put (wg_peer_pool, peer);
@@ -362,13 +522,11 @@ wg_peer_walk (wg_peer_walk_cb_t fn, void *data)
{
index_t peeri;
- /* *INDENT-OFF* */
pool_foreach_index (peeri, wg_peer_pool)
{
if (WALK_STOP == fn(peeri, data))
return peeri;
}
- /* *INDENT-ON* */
return INDEX_INVALID;
}
@@ -377,8 +535,8 @@ format_wg_peer_endpoint (u8 * s, va_list * args)
{
wg_peer_endpoint_t *ep = va_arg (*args, wg_peer_endpoint_t *);
- s = format (s, "%U:%d",
- format_ip46_address, &ep->addr, IP46_TYPE_ANY, ep->port);
+ s = format (s, "%U:%d", format_ip46_address, &ep->addr, IP46_TYPE_ANY,
+ ep->port);
return (s);
}
@@ -387,48 +545,37 @@ u8 *
format_wg_peer (u8 * s, va_list * va)
{
index_t peeri = va_arg (*va, index_t);
- wg_peer_allowed_ip_t *allowed_ip;
+ fib_prefix_t *allowed_ip;
+ adj_index_t *adj_index;
u8 key[NOISE_KEY_LEN_BASE64];
wg_peer_t *peer;
peer = wg_peer_get (peeri);
key_to_base64 (peer->remote.r_public, NOISE_PUBLIC_KEY_LEN, key);
- s = format (s, "[%d] endpoint:[%U->%U] %U keep-alive:%d adj:%d",
- peeri,
- format_wg_peer_endpoint, &peer->src,
- format_wg_peer_endpoint, &peer->dst,
- format_vnet_sw_if_index_name, vnet_get_main (),
- peer->wg_sw_if_index,
- peer->persistent_keepalive_interval, peer->adj_index);
- s = format (s, "\n key:%=s %U",
- key, format_hex_bytes, peer->remote.r_public,
- NOISE_PUBLIC_KEY_LEN);
+ s = format (
+ s,
+ "[%d] endpoint:[%U->%U] %U keep-alive:%d flags: %d, api-clients count: %d",
+ peeri, format_wg_peer_endpoint, &peer->src, format_wg_peer_endpoint,
+ &peer->dst, format_vnet_sw_if_index_name, vnet_get_main (),
+ peer->wg_sw_if_index, peer->persistent_keepalive_interval, peer->flags,
+ pool_elts (peer->api_clients));
+ s = format (s, "\n adj:");
+ vec_foreach (adj_index, peer->adj_indices)
+ {
+ s = format (s, " %d", *adj_index);
+ }
+ s = format (s, "\n key:%=s %U", key, format_hex_bytes,
+ peer->remote.r_public, NOISE_PUBLIC_KEY_LEN);
s = format (s, "\n allowed-ips:");
vec_foreach (allowed_ip, peer->allowed_ips)
{
- s = format (s, " %U", format_fib_prefix, &allowed_ip->prefix);
+ s = format (s, " %U", format_fib_prefix, allowed_ip);
}
return s;
}
-static clib_error_t *
-wg_peer_module_init (vlib_main_t * vm)
-{
- /*
- * use a priority better than interface source, so that
- * if the same subnet is added to the wg interface and is
- * used as an allowed IP, then the wireguard soueced prefix
- * wins and traffic is routed to the endpoint rather than dropped
- */
- wg_fib_source = fib_source_allocate ("wireguard", 0x2, FIB_SOURCE_BH_API);
-
- return (NULL);
-}
-
-VLIB_INIT_FUNCTION (wg_peer_module_init);
-
/*
* fd.io coding-style-patch-verification: ON
*
diff --git a/src/plugins/wireguard/wireguard_peer.h b/src/plugins/wireguard/wireguard_peer.h
index b60c669ac0f..613c2640ad1 100644
--- a/src/plugins/wireguard/wireguard_peer.h
+++ b/src/plugins/wireguard/wireguard_peer.h
@@ -17,6 +17,8 @@
#ifndef __included_wg_peer_h__
#define __included_wg_peer_h__
+#include <vlibapi/api_helper_macros.h>
+
#include <vnet/ip/ip.h>
#include <wireguard/wireguard_cookie.h>
@@ -31,13 +33,28 @@ typedef struct ip4_udp_header_t_
udp_header_t udp;
} __clib_packed ip4_udp_header_t;
-u8 *format_ip4_udp_header (u8 * s, va_list * va);
+typedef struct ip4_udp_wg_header_t_
+{
+ ip4_header_t ip4;
+ udp_header_t udp;
+ message_data_t wg;
+} __clib_packed ip4_udp_wg_header_t;
+
+typedef struct ip6_udp_header_t_
+{
+ ip6_header_t ip6;
+ udp_header_t udp;
+} __clib_packed ip6_udp_header_t;
-typedef struct wg_peer_allowed_ip_t_
+typedef struct ip6_udp_wg_header_t_
{
- fib_prefix_t prefix;
- fib_node_index_t fib_entry_index;
-} wg_peer_allowed_ip_t;
+ ip6_header_t ip6;
+ udp_header_t udp;
+ message_data_t wg;
+} __clib_packed ip6_udp_wg_header_t;
+
+u8 *format_ip4_udp_header (u8 * s, va_list * va);
+u8 *format_ip6_udp_header (u8 *s, va_list *va);
typedef struct wg_peer_endpoint_t_
{
@@ -45,6 +62,12 @@ typedef struct wg_peer_endpoint_t_
u16 port;
} wg_peer_endpoint_t;
+typedef enum
+{
+ WG_PEER_STATUS_DEAD = 0x1,
+ WG_PEER_ESTABLISHED = 0x2,
+} wg_peer_flags;
+
typedef struct wg_peer
{
noise_remote_t remote;
@@ -57,17 +80,22 @@ typedef struct wg_peer
wg_peer_endpoint_t dst;
wg_peer_endpoint_t src;
u32 table_id;
- adj_index_t adj_index;
+ adj_index_t *adj_indices;
/* rewrite built from address information */
u8 *rewrite;
/* Vector of allowed-ips */
- wg_peer_allowed_ip_t *allowed_ips;
+ fib_prefix_t *allowed_ips;
/* The WG interface this peer is attached to */
u32 wg_sw_if_index;
+ /* API client registered for events */
+ vpe_client_registration_t *api_clients;
+ uword *api_client_by_client_index;
+ wg_peer_flags flags;
+
/* Timers */
tw_timer_wheel_16t_2w_512sl_t *timer_wheel;
u32 timers[WG_N_TIMERS];
@@ -88,7 +116,8 @@ typedef struct wg_peer
bool timer_need_another_keepalive;
- bool is_dead;
+ /* Handshake is sent to main thread? */
+ bool handshake_is_sent;
} wg_peer_t;
typedef struct wg_peer_table_bind_ctx_t_
@@ -111,9 +140,23 @@ index_t wg_peer_walk (wg_peer_walk_cb_t fn, void *data);
u8 *format_wg_peer (u8 * s, va_list * va);
-walk_rc_t wg_peer_if_admin_state_change (wg_if_t * wgi, index_t peeri,
- void *data);
-walk_rc_t wg_peer_if_table_change (wg_if_t * wgi, index_t peeri, void *data);
+walk_rc_t wg_peer_if_admin_state_change (index_t peeri, void *data);
+walk_rc_t wg_peer_if_delete (index_t peeri, void *data);
+walk_rc_t wg_peer_if_adj_change (index_t peeri, void *data);
+adj_walk_rc_t wg_peer_adj_walk (adj_index_t ai, void *data);
+
+void wg_api_peer_event (index_t peeri, wg_peer_flags flags);
+void wg_peer_update_flags (index_t peeri, wg_peer_flags flag, bool add_del);
+void wg_peer_update_endpoint (index_t peeri, const ip46_address_t *addr,
+ u16 port);
+void wg_peer_update_endpoint_from_mt (index_t peeri,
+ const ip46_address_t *addr, u16 port);
+
+static inline bool
+wg_peer_is_dead (wg_peer_t *peer)
+{
+ return peer && peer->flags & WG_PEER_STATUS_DEAD;
+}
/*
* Expoed for the data-plane
@@ -130,6 +173,8 @@ wg_peer_get (index_t peeri)
static inline index_t
wg_peer_get_by_adj_index (index_t ai)
{
+ if (ai >= vec_len (wg_peer_by_adj_index))
+ return INDEX_INVALID;
return (wg_peer_by_adj_index[ai]);
}
@@ -145,6 +190,29 @@ wg_peer_assign_thread (u32 thread_id)
1) : thread_id));
}
+static_always_inline bool
+fib_prefix_is_cover_addr_46 (const fib_prefix_t *p1, const ip46_address_t *ip)
+{
+ switch (p1->fp_proto)
+ {
+ case FIB_PROTOCOL_IP4:
+ return (ip4_destination_matches_route (&ip4_main, &p1->fp_addr.ip4,
+ &ip->ip4, p1->fp_len) != 0);
+ case FIB_PROTOCOL_IP6:
+ return (ip6_destination_matches_route (&ip6_main, &p1->fp_addr.ip6,
+ &ip->ip6, p1->fp_len) != 0);
+ case FIB_PROTOCOL_MPLS:
+ break;
+ }
+ return (false);
+}
+
+static inline bool
+wg_peer_can_send (wg_peer_t *peer)
+{
+ return peer && peer->rewrite;
+}
+
#endif // __included_wg_peer_h__
/*
diff --git a/src/plugins/wireguard/wireguard_send.c b/src/plugins/wireguard/wireguard_send.c
index f492e05c175..41b2e7706a1 100755..100644
--- a/src/plugins/wireguard/wireguard_send.c
+++ b/src/plugins/wireguard/wireguard_send.c
@@ -22,11 +22,11 @@
#include <wireguard/wireguard_send.h>
static int
-ip46_enqueue_packet (vlib_main_t * vm, u32 bi0, int is_ip6)
+ip46_enqueue_packet (vlib_main_t *vm, u32 bi0, int is_ip4)
{
vlib_frame_t *f = 0;
u32 lookup_node_index =
- is_ip6 ? ip6_lookup_node.index : ip4_lookup_node.index;
+ is_ip4 ? ip4_lookup_node.index : ip6_lookup_node.index;
f = vlib_get_frame_to_node (vm, lookup_node_index);
/* f can not be NULL here - frame allocation failure causes panic */
@@ -41,25 +41,51 @@ ip46_enqueue_packet (vlib_main_t * vm, u32 bi0, int is_ip6)
}
static void
-wg_buffer_prepend_rewrite (vlib_buffer_t * b0, const wg_peer_t * peer)
+wg_buffer_prepend_rewrite (vlib_main_t *vm, vlib_buffer_t *b0,
+ const u8 *rewrite, u8 is_ip4)
{
- ip4_udp_header_t *hdr;
+ if (is_ip4)
+ {
+ ip4_udp_header_t *hdr4;
+
+ vlib_buffer_advance (b0, -sizeof (*hdr4));
+
+ hdr4 = vlib_buffer_get_current (b0);
+
+ /* copy only ip4 and udp header; wireguard header not needed */
+ clib_memcpy (hdr4, rewrite, sizeof (ip4_udp_header_t));
+
+ hdr4->udp.length =
+ clib_host_to_net_u16 (b0->current_length - sizeof (ip4_header_t));
+ ip4_header_set_len_w_chksum (&hdr4->ip4,
+ clib_host_to_net_u16 (b0->current_length));
+ }
+ else
+ {
+ ip6_udp_header_t *hdr6;
+
+ vlib_buffer_advance (b0, -sizeof (*hdr6));
- vlib_buffer_advance (b0, -sizeof (*hdr));
+ hdr6 = vlib_buffer_get_current (b0);
- hdr = vlib_buffer_get_current (b0);
- clib_memcpy (hdr, peer->rewrite, vec_len (peer->rewrite));
+ /* copy only ip6 and udp header; wireguard header not needed */
+ clib_memcpy (hdr6, rewrite, sizeof (ip6_udp_header_t));
- hdr->udp.length =
- clib_host_to_net_u16 (b0->current_length - sizeof (ip4_header_t));
- ip4_header_set_len_w_chksum (&hdr->ip4,
- clib_host_to_net_u16 (b0->current_length));
+ hdr6->ip6.payload_length = hdr6->udp.length =
+ clib_host_to_net_u16 (b0->current_length - sizeof (ip6_header_t));
+
+ /* IPv6 UDP checksum is mandatory */
+ int bogus = 0;
+ ip6_header_t *ip6_0 = &(hdr6->ip6);
+ hdr6->udp.checksum =
+ ip6_tcp_udp_icmp_compute_checksum (vm, b0, ip6_0, &bogus);
+ ASSERT (bogus == 0);
+ }
}
static bool
-wg_create_buffer (vlib_main_t * vm,
- const wg_peer_t * peer,
- const u8 * packet, u32 packet_len, u32 * bi)
+wg_create_buffer (vlib_main_t *vm, const u8 *rewrite, const u8 *packet,
+ u32 packet_len, u32 *bi, u8 is_ip4)
{
u32 n_buf0 = 0;
vlib_buffer_t *b0;
@@ -75,23 +101,75 @@ wg_create_buffer (vlib_main_t * vm,
b0->current_length = packet_len;
- wg_buffer_prepend_rewrite (b0, peer);
+ wg_buffer_prepend_rewrite (vm, b0, rewrite, is_ip4);
return true;
}
+u8 *
+wg_build_rewrite (ip46_address_t *src_addr, u16 src_port,
+ ip46_address_t *dst_addr, u16 dst_port, u8 is_ip4)
+{
+ if (ip46_address_is_zero (dst_addr) || 0 == dst_port)
+ return NULL;
+
+ u8 *rewrite = NULL;
+ if (is_ip4)
+ {
+ ip4_udp_header_t *hdr;
+
+ /* reserve space for ip4, udp and wireguard headers */
+ vec_validate (rewrite, sizeof (ip4_udp_wg_header_t) - 1);
+ hdr = (ip4_udp_header_t *) rewrite;
+
+ hdr->ip4.ip_version_and_header_length = 0x45;
+ hdr->ip4.ttl = 64;
+ hdr->ip4.src_address = src_addr->ip4;
+ hdr->ip4.dst_address = dst_addr->ip4;
+ hdr->ip4.protocol = IP_PROTOCOL_UDP;
+ hdr->ip4.checksum = ip4_header_checksum (&hdr->ip4);
+
+ hdr->udp.src_port = clib_host_to_net_u16 (src_port);
+ hdr->udp.dst_port = clib_host_to_net_u16 (dst_port);
+ hdr->udp.checksum = 0;
+ }
+ else
+ {
+ ip6_udp_header_t *hdr;
+
+ /* reserve space for ip6, udp and wireguard headers */
+ vec_validate (rewrite, sizeof (ip6_udp_wg_header_t) - 1);
+ hdr = (ip6_udp_header_t *) rewrite;
+
+ hdr->ip6.ip_version_traffic_class_and_flow_label = 0x60;
+ ip6_address_copy (&hdr->ip6.src_address, &src_addr->ip6);
+ ip6_address_copy (&hdr->ip6.dst_address, &dst_addr->ip6);
+ hdr->ip6.protocol = IP_PROTOCOL_UDP;
+ hdr->ip6.hop_limit = 64;
+
+ hdr->udp.src_port = clib_host_to_net_u16 (src_port);
+ hdr->udp.dst_port = clib_host_to_net_u16 (dst_port);
+ hdr->udp.checksum = 0;
+ }
+
+ return (rewrite);
+}
+
bool
wg_send_handshake (vlib_main_t * vm, wg_peer_t * peer, bool is_retry)
{
ASSERT (vm->thread_index == 0);
+ if (!wg_peer_can_send (peer))
+ return false;
+
message_handshake_initiation_t packet;
if (!is_retry)
peer->timer_handshake_attempts = 0;
- if (!wg_birthdate_has_expired (peer->last_sent_handshake,
- REKEY_TIMEOUT) || peer->is_dead)
+ if (!wg_birthdate_has_expired (peer->last_sent_handshake, REKEY_TIMEOUT) ||
+ wg_peer_is_dead (peer))
return true;
if (noise_create_initiation (vm,
@@ -113,11 +191,13 @@ wg_send_handshake (vlib_main_t * vm, wg_peer_t * peer, bool is_retry)
else
return false;
+ u8 is_ip4 = ip46_address_is_ip4 (&peer->dst.addr);
u32 bi0 = 0;
- if (!wg_create_buffer (vm, peer, (u8 *) & packet, sizeof (packet), &bi0))
+ if (!wg_create_buffer (vm, peer->rewrite, (u8 *) &packet, sizeof (packet),
+ &bi0, is_ip4))
return false;
- ip46_enqueue_packet (vm, bi0, false);
+ ip46_enqueue_packet (vm, bi0, is_ip4);
return true;
}
@@ -134,8 +214,11 @@ wg_send_handshake_thread_fn (void *arg)
wg_main_t *wmp = &wg_main;
wg_peer_t *peer = wg_peer_get (a->peer_idx);
+ bool handshake;
wg_send_handshake (wmp->vlib_main, peer, a->is_retry);
+ handshake = false;
+ __atomic_store_n (&peer->handshake_is_sent, handshake, __ATOMIC_RELEASE);
return 0;
}
@@ -147,8 +230,18 @@ wg_send_handshake_from_mt (u32 peer_idx, bool is_retry)
.is_retry = is_retry,
};
- vl_api_rpc_call_main_thread (wg_send_handshake_thread_fn,
- (u8 *) & a, sizeof (a));
+ wg_peer_t *peer = wg_peer_get (peer_idx);
+
+ bool handshake =
+ __atomic_load_n (&peer->handshake_is_sent, __ATOMIC_ACQUIRE);
+
+ if (handshake == false)
+ {
+ handshake = true;
+ __atomic_store_n (&peer->handshake_is_sent, handshake, __ATOMIC_RELEASE);
+ vl_api_rpc_call_main_thread (wg_send_handshake_thread_fn, (u8 *) &a,
+ sizeof (a));
+ }
}
bool
@@ -156,6 +249,9 @@ wg_send_keepalive (vlib_main_t * vm, wg_peer_t * peer)
{
ASSERT (vm->thread_index == 0);
+ if (!wg_peer_can_send (peer))
+ return false;
+
u32 size_of_packet = message_data_len (0);
message_data_t *packet =
(message_data_t *) wg_main.per_thread_data[vm->thread_index].data;
@@ -181,19 +277,22 @@ wg_send_keepalive (vlib_main_t * vm, wg_peer_t * peer)
}
else if (PREDICT_FALSE (state == SC_FAILED))
{
+ wg_peer_update_flags (peer - wg_peer_pool, WG_PEER_ESTABLISHED, false);
ret = false;
goto out;
}
+ u8 is_ip4 = ip46_address_is_ip4 (&peer->dst.addr);
packet->header.type = MESSAGE_DATA;
- if (!wg_create_buffer (vm, peer, (u8 *) packet, size_of_packet, &bi0))
+ if (!wg_create_buffer (vm, peer->rewrite, (u8 *) packet, size_of_packet,
+ &bi0, is_ip4))
{
ret = false;
goto out;
}
- ip46_enqueue_packet (vm, bi0, false);
+ ip46_enqueue_packet (vm, bi0, is_ip4);
wg_timers_any_authenticated_packet_sent (peer);
wg_timers_any_authenticated_packet_traversal (peer);
@@ -207,6 +306,9 @@ wg_send_handshake_response (vlib_main_t * vm, wg_peer_t * peer)
{
message_handshake_response_t packet;
+ if (!wg_peer_can_send (peer))
+ return false;
+
if (noise_create_response (vm,
&peer->remote,
&packet.sender_index,
@@ -223,20 +325,52 @@ wg_send_handshake_response (vlib_main_t * vm, wg_peer_t * peer)
wg_timers_session_derived (peer);
wg_timers_any_authenticated_packet_sent (peer);
wg_timers_any_authenticated_packet_traversal (peer);
- peer->last_sent_handshake = vlib_time_now (vm);
u32 bi0 = 0;
- if (!wg_create_buffer (vm, peer, (u8 *) & packet,
- sizeof (packet), &bi0))
+ u8 is_ip4 = ip46_address_is_ip4 (&peer->dst.addr);
+ if (!wg_create_buffer (vm, peer->rewrite, (u8 *) &packet,
+ sizeof (packet), &bi0, is_ip4))
return false;
- ip46_enqueue_packet (vm, bi0, false);
+ ip46_enqueue_packet (vm, bi0, is_ip4);
+ return true;
}
- else
- return false;
+ return false;
}
- else
+ return false;
+}
+
+bool
+wg_send_handshake_cookie (vlib_main_t *vm, u32 sender_index,
+ cookie_checker_t *cookie_checker,
+ message_macs_t *macs, ip46_address_t *wg_if_addr,
+ u16 wg_if_port, ip46_address_t *remote_addr,
+ u16 remote_port)
+{
+ message_handshake_cookie_t packet;
+ u8 *rewrite;
+
+ packet.header.type = MESSAGE_HANDSHAKE_COOKIE;
+ packet.receiver_index = sender_index;
+
+ cookie_checker_create_payload (vm, cookie_checker, macs, packet.nonce,
+ packet.encrypted_cookie, remote_addr,
+ remote_port);
+
+ u32 bi0 = 0;
+ u8 is_ip4 = ip46_address_is_ip4 (remote_addr);
+ bool ret;
+ rewrite = wg_build_rewrite (wg_if_addr, wg_if_port, remote_addr, remote_port,
+ is_ip4);
+
+ ret = wg_create_buffer (vm, rewrite, (u8 *) &packet, sizeof (packet), &bi0,
+ is_ip4);
+ vec_free (rewrite);
+ if (!ret)
return false;
+
+ ip46_enqueue_packet (vm, bi0, is_ip4);
+
return true;
}
diff --git a/src/plugins/wireguard/wireguard_send.h b/src/plugins/wireguard/wireguard_send.h
index 9575b84b659..419783a5db2 100755..100644
--- a/src/plugins/wireguard/wireguard_send.h
+++ b/src/plugins/wireguard/wireguard_send.h
@@ -19,10 +19,17 @@
#include <wireguard/wireguard_peer.h>
+u8 *wg_build_rewrite (ip46_address_t *src_addr, u16 src_port,
+ ip46_address_t *dst_addr, u16 dst_port, u8 is_ip4);
bool wg_send_keepalive (vlib_main_t * vm, wg_peer_t * peer);
bool wg_send_handshake (vlib_main_t * vm, wg_peer_t * peer, bool is_retry);
void wg_send_handshake_from_mt (u32 peer_index, bool is_retry);
bool wg_send_handshake_response (vlib_main_t * vm, wg_peer_t * peer);
+bool wg_send_handshake_cookie (vlib_main_t *vm, u32 sender_index,
+ cookie_checker_t *cookie_checker,
+ message_macs_t *macs,
+ ip46_address_t *wg_if_addr, u16 wg_if_port,
+ ip46_address_t *remote_addr, u16 remote_port);
always_inline void
ip4_header_set_len_w_chksum (ip4_header_t * ip4, u16 len)
diff --git a/src/plugins/wireguard/wireguard_timer.c b/src/plugins/wireguard/wireguard_timer.c
index b245b853fb5..237e67c1f06 100644
--- a/src/plugins/wireguard/wireguard_timer.c
+++ b/src/plugins/wireguard/wireguard_timer.c
@@ -26,6 +26,13 @@ get_random_u32_max (u32 max)
return random_u32 (&seed) % max;
}
+static u32
+get_random_u32_max_opt (u32 max, f64 time)
+{
+ u32 seed = (u32) (time * 1e6);
+ return random_u32 (&seed) % max;
+}
+
static void
stop_timer (wg_peer_t * peer, u32 timer_id)
{
@@ -66,7 +73,7 @@ start_timer_thread_fn (void *arg)
return 0;
}
-static void
+static_always_inline void
start_timer_from_mt (u32 peer_idx, u32 timer_id, u32 interval_ticks)
{
wg_timers_args a = {
@@ -191,14 +198,14 @@ wg_expired_zero_key_material (vlib_main_t * vm, wg_peer_t * peer)
return;
}
- if (!peer->is_dead)
+ if (!wg_peer_is_dead (peer))
{
noise_remote_clear (vm, &peer->remote);
}
}
-void
-wg_timers_any_authenticated_packet_traversal (wg_peer_t * peer)
+inline void
+wg_timers_any_authenticated_packet_traversal (wg_peer_t *peer)
{
if (peer->persistent_keepalive_interval)
{
@@ -214,6 +221,12 @@ wg_timers_any_authenticated_packet_sent (wg_peer_t * peer)
peer->last_sent_packet = vlib_time_now (vlib_get_main ());
}
+inline void
+wg_timers_any_authenticated_packet_sent_opt (wg_peer_t *peer, f64 time)
+{
+ peer->last_sent_packet = time;
+}
+
void
wg_timers_handshake_initiated (wg_peer_t * peer)
{
@@ -226,6 +239,16 @@ wg_timers_handshake_initiated (wg_peer_t * peer)
}
void
+wg_timers_send_first_handshake (wg_peer_t *peer)
+{
+ // zero value is not allowed
+ peer->new_handshake_interval_tick =
+ get_random_u32_max (REKEY_TIMEOUT_JITTER) + 1;
+ start_timer_from_mt (peer - wg_peer_pool, WG_TIMER_NEW_HANDSHAKE,
+ peer->new_handshake_interval_tick);
+}
+
+void
wg_timers_session_derived (wg_peer_t * peer)
{
peer->session_derived = vlib_time_now (vlib_get_main ());
@@ -246,6 +269,17 @@ wg_timers_data_sent (wg_peer_t * peer)
peer->new_handshake_interval_tick);
}
+inline void
+wg_timers_data_sent_opt (wg_peer_t *peer, f64 time)
+{
+ peer->new_handshake_interval_tick =
+ (KEEPALIVE_TIMEOUT + REKEY_TIMEOUT) * WHZ +
+ get_random_u32_max_opt (REKEY_TIMEOUT_JITTER, time);
+
+ start_timer_from_mt (peer - wg_peer_pool, WG_TIMER_NEW_HANDSHAKE,
+ peer->new_handshake_interval_tick);
+}
+
/* Should be called after an authenticated data packet is received. */
void
wg_timers_data_received (wg_peer_t * peer)
@@ -275,6 +309,12 @@ wg_timers_any_authenticated_packet_received (wg_peer_t * peer)
peer->last_received_packet = vlib_time_now (vlib_get_main ());
}
+inline void
+wg_timers_any_authenticated_packet_received_opt (wg_peer_t *peer, f64 time)
+{
+ peer->last_received_packet = time;
+}
+
static vlib_node_registration_t wg_timer_mngr_node;
static void
@@ -394,14 +434,12 @@ wg_timers_stop (wg_peer_t * peer)
}
}
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (wg_timer_mngr_node, static) = {
.function = wg_timer_mngr_fn,
.type = VLIB_NODE_TYPE_PROCESS,
.name =
"wg-timer-manager",
};
-/* *INDENT-ON* */
void
wg_feature_init (wg_main_t * wmp)
diff --git a/src/plugins/wireguard/wireguard_timer.h b/src/plugins/wireguard/wireguard_timer.h
index 6b59a39f815..47638bfd74d 100755..100644
--- a/src/plugins/wireguard/wireguard_timer.h
+++ b/src/plugins/wireguard/wireguard_timer.h
@@ -41,11 +41,16 @@ typedef struct wg_peer wg_peer_t;
void wg_timer_wheel_init ();
void wg_timers_stop (wg_peer_t * peer);
void wg_timers_data_sent (wg_peer_t * peer);
+void wg_timers_data_sent_opt (wg_peer_t *peer, f64 time);
void wg_timers_data_received (wg_peer_t * peer);
void wg_timers_any_authenticated_packet_sent (wg_peer_t * peer);
+void wg_timers_any_authenticated_packet_sent_opt (wg_peer_t *peer, f64 time);
void wg_timers_any_authenticated_packet_received (wg_peer_t * peer);
+void wg_timers_any_authenticated_packet_received_opt (wg_peer_t *peer,
+ f64 time);
void wg_timers_handshake_initiated (wg_peer_t * peer);
void wg_timers_handshake_complete (wg_peer_t * peer);
+void wg_timers_send_first_handshake (wg_peer_t *peer);
void wg_timers_session_derived (wg_peer_t * peer);
void wg_timers_any_authenticated_packet_traversal (wg_peer_t * peer);
@@ -53,10 +58,19 @@ void wg_timers_any_authenticated_packet_traversal (wg_peer_t * peer);
static inline bool
wg_birthdate_has_expired (f64 birthday_seconds, f64 expiration_seconds)
{
+ if (birthday_seconds == 0.0)
+ return true;
f64 now_seconds = vlib_time_now (vlib_get_main ());
return (birthday_seconds + expiration_seconds) < now_seconds;
}
+static_always_inline bool
+wg_birthdate_has_expired_opt (f64 birthday_seconds, f64 expiration_seconds,
+ f64 time)
+{
+ return (birthday_seconds + expiration_seconds) < time;
+}
+
#endif /* __included_wg_timer_h__ */
/*