summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorMarco Varlese <marco.varlese@suse.com>2018-01-31 11:00:01 +0100
committerFlorin Coras <florin.coras@gmail.com>2018-02-01 23:45:03 +0000
commit91389ac2c28ae10f2b7f766e4dfe7a7fd96dc5e0 (patch)
tree8a0286cca7960df4f1365f7e20a9a34ced835c4c /src
parent75e7d1301475d49311d14e202936c62df0c07d10 (diff)
Out-of-order data chunks handling and more
This patch addresses the need to handle out-of-order data chunks received by a peer. To do that effectively, we had to add the handling of data chunks flags (E/B/U bit) to understand whether the stream is fragmenting user-message data and in that case if a fragment is the FIRST/MIDDLE/LAST one of a transmission. The same patch also addresses the security requirement to have a HMAC calculated and incorporated in the INIT_ACK and COOKIE_ECHO chunks. The algorithm used is the HMAC-SHA1. Change-Id: Ib6a9a80492e2aafe5c8480d6e02da895efe9f90b Signed-off-by: Marco Varlese <marco.varlese@suse.com>
Diffstat (limited to 'src')
-rw-r--r--src/vnet/sctp/sctp.h26
-rw-r--r--src/vnet/sctp/sctp_input.c136
-rw-r--r--src/vnet/sctp/sctp_output.c38
-rw-r--r--src/vnet/sctp/sctp_packet.h104
4 files changed, 235 insertions, 69 deletions
diff --git a/src/vnet/sctp/sctp.h b/src/vnet/sctp/sctp.h
index 3e3750ea92a..8f80d840c33 100644
--- a/src/vnet/sctp/sctp.h
+++ b/src/vnet/sctp/sctp.h
@@ -110,9 +110,25 @@ typedef struct
} sctp_options_t;
-#define SetBit(A,k) ( A[(k/32)] |= (1 << (k%32)) )
-#define ClearBit(A,k) ( A[(k/32)] &= ~(1 << (k%32)) )
-#define TestBit(A,k) ( A[(k/32)] & (1 << (k%32)) )
+/* Useful macros to deal with the out_of_order_map (array of bit) */
+#define SET_BIT(A,k) ( A[(k/32)] |= (1 << (k%32)) )
+#define CLEAR_BIT(A,k) ( A[(k/32)] &= ~(1 << (k%32)) )
+#define TEST_BIT(A,k) ( A[(k/32)] & (1 << (k%32)) )
+
+always_inline void
+_bytes_swap (void *pv, size_t n)
+{
+ char *p = pv;
+ size_t lo, hi;
+ for (lo = 0, hi = n - 1; hi > lo; lo++, hi--)
+ {
+ char tmp = p[lo];
+ p[lo] = p[hi];
+ p[hi] = tmp;
+ }
+}
+
+#define ENDIANESS_SWAP(x) _bytes_swap(&x, sizeof(x));
#define MAX_INFLIGHT_PACKETS 128
#define MAX_ENQUEABLE_SACKS 2
@@ -182,6 +198,10 @@ typedef struct _sctp_connection
u32 rtt_ts;
u32 rtt_seq;
+ u8 overall_sending_status; /**< 0 indicates first fragment of a user message
+ 1 indicates normal stream
+ 2 indicates last fragment of a user message */
+
sctp_options_t rcv_opts;
sctp_options_t snd_opts;
diff --git a/src/vnet/sctp/sctp_input.c b/src/vnet/sctp/sctp_input.c
index a1bcb2b0ae3..44624500016 100644
--- a/src/vnet/sctp/sctp_input.c
+++ b/src/vnet/sctp/sctp_input.c
@@ -540,6 +540,61 @@ sctp_handle_init_ack (sctp_header_t * sctp_hdr,
return SCTP_ERROR_NONE;
}
+/** Enqueue data out-of-order for delivery to application */
+always_inline int
+sctp_session_enqueue_data_ooo (sctp_connection_t * sctp_conn,
+ vlib_buffer_t * b, u16 data_len, u8 conn_idx)
+{
+ int written, error = SCTP_ERROR_ENQUEUED;
+
+ written =
+ session_enqueue_stream_connection (&sctp_conn->
+ sub_conn[conn_idx].connection, b, 0,
+ 1 /* queue event */ ,
+ 0);
+
+ /* Update next_tsn_expected */
+ if (PREDICT_TRUE (written == data_len))
+ {
+ sctp_conn->next_tsn_expected += written;
+
+ SCTP_ADV_DBG ("CONN = %u, WRITTEN [%u] == DATA_LEN [%d]",
+ sctp_conn->sub_conn[conn_idx].connection.c_index,
+ written, data_len);
+ }
+ /* If more data written than expected, account for out-of-order bytes. */
+ else if (written > data_len)
+ {
+ sctp_conn->next_tsn_expected += written;
+
+ SCTP_ADV_DBG ("CONN = %u, WRITTEN [%u] > DATA_LEN [%d]",
+ sctp_conn->sub_conn[conn_idx].connection.c_index,
+ written, data_len);
+ }
+ else if (written > 0)
+ {
+ /* We've written something but FIFO is probably full now */
+ sctp_conn->next_tsn_expected += written;
+
+ error = SCTP_ERROR_PARTIALLY_ENQUEUED;
+
+ SCTP_ADV_DBG
+ ("CONN = %u, WRITTEN [%u] > 0 (SCTP_ERROR_PARTIALLY_ENQUEUED)",
+ sctp_conn->sub_conn[conn_idx].connection.c_index, written);
+ }
+ else
+ {
+ SCTP_ADV_DBG ("CONN = %u, WRITTEN == 0 (SCTP_ERROR_FIFO_FULL)",
+ sctp_conn->sub_conn[conn_idx].connection.c_index);
+
+ return SCTP_ERROR_FIFO_FULL;
+ }
+
+ /* TODO: Update out_of_order_map & SACK list */
+
+ return error;
+}
+
/** Enqueue data for delivery to application */
always_inline int
sctp_session_enqueue_data (sctp_connection_t * sctp_conn, vlib_buffer_t * b,
@@ -617,6 +672,22 @@ sctp_is_sack_delayable (sctp_connection_t * sctp_conn, u8 gapping)
return 0;
}
+always_inline void
+sctp_is_connection_gapping (sctp_connection_t * sctp_conn, u32 tsn,
+ u8 * gapping)
+{
+ if (sctp_conn->next_tsn_expected != tsn) // It means data transmission is GAPPING
+ {
+ SCTP_CONN_TRACKING_DBG
+ ("GAPPING: CONN_INDEX = %u, sctp_conn->next_tsn_expected = %u, tsn = %u, diff = %u",
+ sctp_conn->sub_conn[idx].connection.c_index,
+ sctp_conn->next_tsn_expected, tsn,
+ sctp_conn->next_tsn_expected - tsn);
+
+ *gapping = 1;
+ }
+}
+
always_inline u16
sctp_handle_data (sctp_payload_data_chunk_t * sctp_data_chunk,
sctp_connection_t * sctp_conn, vlib_buffer_t * b,
@@ -624,7 +695,7 @@ sctp_handle_data (sctp_payload_data_chunk_t * sctp_data_chunk,
{
u32 error = 0, n_data_bytes;
u8 idx = sctp_pick_conn_idx_on_state (sctp_conn->state);
- u8 gapping = 0;
+ u8 is_gapping = 0;
/* Check that the LOCALLY generated tag is being used by the REMOTE peer as the verification tag */
if (sctp_conn->local_tag != sctp_data_chunk->sctp_hdr.verification_tag)
@@ -641,28 +712,48 @@ sctp_handle_data (sctp_payload_data_chunk_t * sctp_data_chunk,
n_data_bytes = vnet_buffer (b)->sctp.data_len;
ASSERT (n_data_bytes);
- if (sctp_conn->next_tsn_expected != tsn) // It means data transmission is GAPPING
- {
- SCTP_CONN_TRACKING_DBG
- ("GAPPING: CONN_INDEX = %u, sctp_conn->next_tsn_expected = %u, tsn = %u, diff = %u",
- sctp_conn->sub_conn[idx].connection.c_index,
- sctp_conn->next_tsn_expected, tsn,
- sctp_conn->next_tsn_expected - tsn);
-
- gapping = 1;
- }
+ sctp_is_connection_gapping (sctp_conn, tsn, &is_gapping);
sctp_conn->last_rcvd_tsn = tsn;
SCTP_ADV_DBG ("POINTER_WITH_DATA = %p", b->data);
- /* In order data, enqueue. Fifo figures out by itself if any out-of-order
- * segments can be enqueued after fifo tail offset changes. */
- error = sctp_session_enqueue_data (sctp_conn, b, n_data_bytes, idx);
+ u8 bbit = vnet_sctp_get_bbit (&sctp_data_chunk->chunk_hdr);
+ u8 ebit = vnet_sctp_get_ebit (&sctp_data_chunk->chunk_hdr);
+
+ if (bbit == 1 && ebit == 1) /* Unfragmented message */
+ {
+ /* In order data, enqueue. Fifo figures out by itself if any out-of-order
+ * segments can be enqueued after fifo tail offset changes. */
+ error = sctp_session_enqueue_data (sctp_conn, b, n_data_bytes, idx);
+ }
+ else if (bbit == 1 && ebit == 0) /* First piece of a fragmented user message */
+ {
+ error = sctp_session_enqueue_data (sctp_conn, b, n_data_bytes, idx);
+ }
+ else if (bbit == 0 && ebit == 1) /* Last piece of a fragmented user message */
+ {
+ if (PREDICT_FALSE (is_gapping == 1))
+ error =
+ sctp_session_enqueue_data_ooo (sctp_conn, b, n_data_bytes, idx);
+ else
+ error = sctp_session_enqueue_data (sctp_conn, b, n_data_bytes, idx);
+ }
+ else /* Middle piece of a fragmented user message */
+ {
+ if (PREDICT_FALSE (is_gapping == 1))
+ error =
+ sctp_session_enqueue_data_ooo (sctp_conn, b, n_data_bytes, idx);
+ else
+ error = sctp_session_enqueue_data (sctp_conn, b, n_data_bytes, idx);
+ }
+ sctp_conn->last_rcvd_tsn = tsn;
*next0 = sctp_next_output (sctp_conn->sub_conn[idx].c_is_ip4);
- if (sctp_is_sack_delayable (sctp_conn, gapping) != 0)
+ SCTP_ADV_DBG ("POINTER_WITH_DATA = %p", b->data);
+
+ if (sctp_is_sack_delayable (sctp_conn, is_gapping) != 0)
sctp_prepare_sack_chunk (sctp_conn, b);
return error;
@@ -677,12 +768,27 @@ sctp_handle_cookie_echo (sctp_header_t * sctp_hdr,
/* Build TCB */
u8 idx = sctp_pick_conn_idx_on_chunk (COOKIE_ECHO);
+ sctp_cookie_echo_chunk_t *cookie_echo =
+ (sctp_cookie_echo_chunk_t *) sctp_hdr;
+
/* Check that the LOCALLY generated tag is being used by the REMOTE peer as the verification tag */
if (sctp_conn->local_tag != sctp_hdr->verification_tag)
{
return SCTP_ERROR_INVALID_TAG;
}
+ u32 now = sctp_time_now ();
+ u32 creation_time =
+ clib_net_to_host_u32 (cookie_echo->cookie.creation_time);
+ u32 cookie_lifespan =
+ clib_net_to_host_u32 (cookie_echo->cookie.cookie_lifespan);
+ if (now > creation_time + cookie_lifespan)
+ {
+ SCTP_DBG ("now (%u) > creation_time (%u) + cookie_lifespan (%u)",
+ now, creation_time, cookie_lifespan);
+ return SCTP_ERROR_COOKIE_ECHO_VIOLATION;
+ }
+
sctp_prepare_cookie_ack_chunk (sctp_conn, b0);
/* Change state */
diff --git a/src/vnet/sctp/sctp_output.c b/src/vnet/sctp/sctp_output.c
index 7b22cc59ac4..3d870ff5bd4 100644
--- a/src/vnet/sctp/sctp_output.c
+++ b/src/vnet/sctp/sctp_output.c
@@ -15,6 +15,7 @@
#include <vnet/sctp/sctp.h>
#include <vnet/sctp/sctp_debug.h>
#include <vppinfra/random.h>
+#include <openssl/hmac.h>
vlib_node_registration_t sctp4_output_node;
vlib_node_registration_t sctp6_output_node;
@@ -494,10 +495,35 @@ sctp_prepare_init_chunk (sctp_connection_t * sctp_conn, vlib_buffer_t * b)
init_chunk->sctp_hdr.dst_port);
}
-u64
-sctp_compute_mac ()
+void
+sctp_compute_mac (sctp_connection_t * sctp_conn,
+ sctp_state_cookie_param_t * state_cookie)
{
- return 0x0;
+#if OPENSSL_VERSION_NUMBER >= 0x10100000L
+ HMAC_CTX *ctx;
+#else
+ HMAC_CTX ctx;
+ const EVP_MD *md = EVP_sha1 ();
+#endif
+ unsigned int len = 0;
+
+#if OPENSSL_VERSION_NUMBER >= 0x10100000L
+ ctx = HMAC_CTX_new ();
+ HMAC_Init_ex (&ctx, &state_cookie->creation_time,
+ sizeof (state_cookie->creation_time), md, NULL);
+ HMAC_Update (ctx, (const unsigned char *) &sctp_conn, sizeof (sctp_conn));
+ HMAC_Final (ctx, state_cookie->mac, &len);
+#else
+ HMAC_CTX_init (&ctx);
+ HMAC_Init_ex (&ctx, &state_cookie->creation_time,
+ sizeof (state_cookie->creation_time), md, NULL);
+
+ HMAC_Update (&ctx, (const unsigned char *) &sctp_conn, sizeof (sctp_conn));
+ HMAC_Final (&ctx, state_cookie->mac, &len);
+ HMAC_CTX_cleanup (&ctx);
+#endif
+
+ ENDIANESS_SWAP (state_cookie->mac);
}
void
@@ -626,7 +652,8 @@ sctp_prepare_initack_chunk (sctp_connection_t * sctp_conn, vlib_buffer_t * b,
state_cookie_param->creation_time = clib_host_to_net_u32 (sctp_time_now ());
state_cookie_param->cookie_lifespan =
clib_host_to_net_u32 (SCTP_VALID_COOKIE_LIFE);
- state_cookie_param->mac = clib_host_to_net_u64 (sctp_compute_mac ());
+
+ sctp_compute_mac (sctp_conn, state_cookie_param);
pointer_offset += sizeof (sctp_state_cookie_param_t);
@@ -1068,6 +1095,9 @@ sctp_push_hdr_i (sctp_connection_t * sctp_conn, vlib_buffer_t * b,
vnet_sctp_set_chunk_type (&data_chunk->chunk_hdr, DATA);
vnet_sctp_set_chunk_length (&data_chunk->chunk_hdr, chunk_length);
+ vnet_sctp_set_bbit (&data_chunk->chunk_hdr);
+ vnet_sctp_set_ebit (&data_chunk->chunk_hdr);
+
SCTP_ADV_DBG_OUTPUT ("POINTER_WITH_DATA = %p, DATA_OFFSET = %u",
b->data, b->current_data);
diff --git a/src/vnet/sctp/sctp_packet.h b/src/vnet/sctp/sctp_packet.h
index d1fe7ab71ea..b831d249f76 100644
--- a/src/vnet/sctp/sctp_packet.h
+++ b/src/vnet/sctp/sctp_packet.h
@@ -267,6 +267,15 @@ typedef struct
#define CHUNK_FLAGS_MASK 0x00FF0000
#define CHUNK_FLAGS_SHIFT 16
+#define CHUNK_UBIT_MASK 0x000F0000
+#define CHUNK_UBIT_SHIFT 18
+
+#define CHUNK_BBIT_MASK 0x000F0000
+#define CHUNK_BBIT_SHIFT 17
+
+#define CHUNK_EBIT_MASK 0x000F0000
+#define CHUNK_EBIT_SHIFT 16
+
#define CHUNK_LENGTH_MASK 0x0000FFFF
#define CHUNK_LENGTH_SHIFT 0
@@ -283,6 +292,45 @@ vnet_sctp_common_hdr_params_net_to_host (sctp_chunks_common_hdr_t * h)
}
always_inline void
+vnet_sctp_set_ubit (sctp_chunks_common_hdr_t * h)
+{
+ h->params &= ~(CHUNK_UBIT_MASK);
+ h->params |= (1 << CHUNK_UBIT_SHIFT) & CHUNK_UBIT_MASK;
+}
+
+always_inline u8
+vnet_sctp_get_ubit (sctp_chunks_common_hdr_t * h)
+{
+ return ((h->params & CHUNK_UBIT_MASK) >> CHUNK_UBIT_SHIFT);
+}
+
+always_inline void
+vnet_sctp_set_bbit (sctp_chunks_common_hdr_t * h)
+{
+ h->params &= ~(CHUNK_BBIT_MASK);
+ h->params |= (1 << CHUNK_BBIT_SHIFT) & CHUNK_BBIT_MASK;
+}
+
+always_inline u8
+vnet_sctp_get_bbit (sctp_chunks_common_hdr_t * h)
+{
+ return ((h->params & CHUNK_BBIT_MASK) >> CHUNK_BBIT_SHIFT);
+}
+
+always_inline void
+vnet_sctp_set_ebit (sctp_chunks_common_hdr_t * h)
+{
+ h->params &= ~(CHUNK_EBIT_MASK);
+ h->params |= (1 << CHUNK_EBIT_SHIFT) & CHUNK_EBIT_MASK;
+}
+
+always_inline u8
+vnet_sctp_get_ebit (sctp_chunks_common_hdr_t * h)
+{
+ return ((h->params & CHUNK_EBIT_MASK) >> CHUNK_EBIT_SHIFT);
+}
+
+always_inline void
vnet_sctp_set_chunk_type (sctp_chunks_common_hdr_t * h, sctp_chunk_type t)
{
h->params &= ~(CHUNK_TYPE_MASK);
@@ -408,45 +456,6 @@ typedef struct
} sctp_payload_data_chunk_t;
always_inline void
-vnet_sctp_set_ebit (sctp_payload_data_chunk_t * p, u8 enable)
-{
- //p->chunk_hdr.flags = clib_host_to_net_u16 (enable);
-}
-
-always_inline u8
-vnet_sctp_get_ebit (sctp_payload_data_chunk_t * p)
-{
- //return (clib_net_to_host_u16 (p->chunk_hdr.flags));
- return 0;
-}
-
-always_inline void
-vnet_sctp_set_bbit (sctp_payload_data_chunk_t * p, u8 enable)
-{
- //p->chunk_hdr.flags = clib_host_to_net_u16 (enable << 1);
-}
-
-always_inline u8
-vnet_sctp_get_bbit (sctp_payload_data_chunk_t * p)
-{
- //return (clib_net_to_host_u16 (p->chunk_hdr.flags >> 1));
- return 0;
-}
-
-always_inline void
-vnet_sctp_set_ubit (sctp_payload_data_chunk_t * p, u8 enable)
-{
- //p->chunk_hdr.flags = clib_host_to_net_u16 (enable << 2);
-}
-
-always_inline u8
-vnet_sctp_get_ubit (sctp_payload_data_chunk_t * p)
-{
- //return (clib_net_to_host_u16 (p->chunk_hdr.flags >> 2));
- return 0;
-}
-
-always_inline void
vnet_sctp_set_tsn (sctp_payload_data_chunk_t * p, u32 tsn)
{
p->tsn = clib_host_to_net_u32 (tsn);
@@ -680,6 +689,14 @@ typedef struct
*/
typedef sctp_init_chunk_t sctp_init_ack_chunk_t;
+typedef struct
+{
+ u16 type;
+ u16 length;
+
+} sctp_opt_params_hdr_t;
+
+#define SHA1_OUTPUT_LENGTH 20
/*
* 0 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
@@ -693,16 +710,9 @@ typedef sctp_init_chunk_t sctp_init_ack_chunk_t;
*/
typedef struct
{
- u16 type;
- u16 length;
-
-} sctp_opt_params_hdr_t;
-
-typedef struct
-{
sctp_opt_params_hdr_t param_hdr;
- u64 mac; /* RFC 2104 */
+ unsigned char mac[SHA1_OUTPUT_LENGTH]; /* RFC 2104 */
u32 creation_time;
u32 cookie_lifespan;