/* * Copyright (c) 2017 SUSE LLC. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include sctp_main_t sctp_main; static u32 sctp_connection_bind (u32 session_index, transport_endpoint_t * tep) { sctp_main_t *tm = &sctp_main; sctp_connection_t *listener; void *iface_ip; pool_get (tm->listener_pool, listener); memset (listener, 0, sizeof (*listener)); listener->sub_conn[SCTP_PRIMARY_PATH_IDX].subconn_idx = SCTP_PRIMARY_PATH_IDX; listener->sub_conn[SCTP_PRIMARY_PATH_IDX].c_c_index = listener - tm->listener_pool; listener->sub_conn[SCTP_PRIMARY_PATH_IDX].connection.lcl_port = tep->port; /* If we are provided a sw_if_index, bind using one of its IPs */ if (ip_is_zero (&tep->ip, 1) && tep->sw_if_index != ENDPOINT_INVALID_INDEX) { if ((iface_ip = ip_interface_get_first_ip (tep->sw_if_index, tep->is_ip4))) ip_set (&tep->ip, iface_ip, tep->is_ip4); } ip_copy (&listener->sub_conn[SCTP_PRIMARY_PATH_IDX].connection.lcl_ip, &tep->ip, tep->is_ip4); u32 mtu = tep->is_ip4 ? vnet_sw_interface_get_mtu (vnet_get_main (), tep->sw_if_index, VNET_MTU_IP4) : vnet_sw_interface_get_mtu (vnet_get_main (), tep->sw_if_index, VNET_MTU_IP6); listener->sub_conn[SCTP_PRIMARY_PATH_IDX].PMTU = mtu; listener->sub_conn[SCTP_PRIMARY_PATH_IDX].connection.is_ip4 = tep->is_ip4; listener->sub_conn[SCTP_PRIMARY_PATH_IDX].connection.proto = TRANSPORT_PROTO_SCTP; listener->sub_conn[SCTP_PRIMARY_PATH_IDX].c_s_index = session_index; listener->sub_conn[SCTP_PRIMARY_PATH_IDX].connection.fib_index = tep->fib_index; listener->state = SCTP_STATE_CLOSED; sctp_connection_timers_init (listener); return listener->sub_conn[SCTP_PRIMARY_PATH_IDX].c_c_index; } u32 sctp_session_bind (u32 session_index, transport_endpoint_t * tep) { return sctp_connection_bind (session_index, tep); } static void sctp_connection_unbind (u32 listener_index) { sctp_main_t *tm = vnet_get_sctp_main (); sctp_connection_t *sctp_conn; sctp_conn = pool_elt_at_index (tm->listener_pool, listener_index); /* Poison the entry */ if (CLIB_DEBUG > 0) memset (sctp_conn, 0xFA, sizeof (*sctp_conn)); pool_put_index (tm->listener_pool, listener_index); } u32 sctp_session_unbind (u32 listener_index) { sctp_connection_unbind (listener_index); return 0; } void sctp_punt_unknown (vlib_main_t * vm, u8 is_ip4, u8 is_add) { sctp_main_t *tm = &sctp_main; if (is_ip4) tm->punt_unknown4 = is_add; else tm->punt_unknown6 = is_add; } static int sctp_alloc_custom_local_endpoint (sctp_main_t * tm, ip46_address_t * lcl_addr, u16 * lcl_port, u8 is_ip4) { int index, port; if (is_ip4) { index = tm->last_v4_address_rotor++; if (tm->last_v4_address_rotor >= vec_len (tm->ip4_src_addresses)) tm->last_v4_address_rotor = 0; lcl_addr->ip4.as_u32 = tm->ip4_src_addresses[index].as_u32; } else { index = tm->last_v6_address_rotor++; if (tm->last_v6_address_rotor >= vec_len (tm->ip6_src_addresses)) tm->last_v6_address_rotor = 0; clib_memcpy (&lcl_addr->ip6, &tm->ip6_src_addresses[index], sizeof (ip6_address_t)); } port = transport_alloc_local_port (TRANSPORT_PROTO_SCTP, lcl_addr); if (port < 1) { clib_warning ("Failed to allocate src port"); return -1; } *lcl_port = port; return 0; } /** * Initialize all connection timers as invalid */ void sctp_connection_timers_init (sctp_connection_t * sctp_conn) { int i, j; /* Set all to invalid */ for (i = 0; i < MAX_SCTP_CONNECTIONS; i++) { sctp_conn->sub_conn[i].RTO = SCTP_RTO_INIT; for (j = 0; j < SCTP_N_TIMERS; j++) { sctp_conn->sub_conn[i].timers[j] = SCTP_TIMER_HANDLE_INVALID; } } } /** * Stop all connection timers */ void sctp_connection_timers_reset (sctp_connection_t * sctp_conn) { int i, j; for (i = 0; i < MAX_SCTP_CONNECTIONS; i++) { for (j = 0; j < SCTP_N_TIMERS; j++) sctp
Package: dpdk-modules-#KVERS#
Section: kernel
Architecture: amd64 arm64 i386
Depends: linux-image-#KVERS# #KDREV#
Provides: dpdk-modules
Conflicts: dpdk-igb-uio-dkms, dpdk-rte-kni-dkms
Built-Using: ${built:using:kernel}
Description: Data Plane Development Kit (rte kni, igb uio modules)
 DPDK is a set of libraries for fast packet processing. Applications run
 in user-space and communicate directly with dedicated network interfaces.
 This package contains the source code for the igb_uio kernel module.
 .
 This package provides the kernel drivers for userspace networking.
 Includes Intel DPDK kernel modules.
d_index; /* Make sure all timers are cleared */ sctp_connection_timers_reset (sctp_conn); /* Poison the entry */ if (CLIB_DEBUG > 0) memset (sctp_conn, 0xFA, sizeof (*sctp_conn)); pool_put (tm->connections[thread_index], sctp_conn); } int sctp_session_open (transport_endpoint_t * tep) { return sctp_connection_open (tep); } u16 sctp_check_outstanding_data_chunks (sctp_connection_t * sctp_conn) { u8 i; for (i = 0; i < MAX_SCTP_CONNECTIONS; i++) { if (sctp_conn->sub_conn[i].state == SCTP_SUBCONN_STATE_DOWN) continue; if (sctp_conn->sub_conn[i].is_retransmitting == 1 || sctp_conn->sub_conn[i].enqueue_state != SCTP_ERROR_ENQUEUED) { SCTP_DBG_OUTPUT ("Connection %u has still DATA to be enqueued inboud / outboud", sctp_conn->sub_conn[i].connection.c_index); return 1; } } return 0; /* Indicates no more data to be read/sent */ } void sctp_connection_close (sctp_connection_t * sctp_conn) { SCTP_DBG ("Closing connection %u...", sctp_conn->sub_conn[SCTP_PRIMARY_PATH_IDX].connection.c_index); sctp_conn->state = SCTP_STATE_SHUTDOWN_PENDING; sctp_send_shutdown (sctp_conn); } void sctp_session_close (u32 conn_index, u32 thread_index) { ASSERT (thread_index == 0); sctp_connection_t *sctp_conn = sctp_connection_get (conn_index, thread_index); if (sctp_conn != NULL) sctp_connection_close (sctp_conn); } void sctp_session_cleanup (u32 conn_index, u32 thread_index) { sctp_connection_t *sctp_conn = sctp_connection_get (conn_index, thread_index); if (sctp_conn != NULL) { sctp_connection_timers_reset (sctp_conn); /* Wait for the session tx events to clear */ sctp_conn->state = SCTP_STATE_CLOSED; } } /** * Compute maximum segment size for session layer. */ u16 sctp_session_send_mss (transport_connection_t * trans_conn) { sctp_connection_t *sctp_conn = sctp_get_connection_from_transport (trans_conn); if (sctp_conn == NULL) { SCTP_DBG ("sctp_conn == NULL"); return 0; } update_cwnd (sctp_conn); update_smallest_pmtu_idx (sctp_conn); u8 idx = sctp_data_subconn_select (sctp_conn); return sctp_conn->sub_conn[idx].cwnd; } u16 sctp_snd_space (sctp_connection_t * sctp_conn) { /* RFC 4096 Section 6.1; point (A) */ if (sctp_conn->peer_rwnd == 0) return 0; u8 idx = sctp_data_subconn_select (sctp_conn); u32 available_wnd = clib_min (sctp_conn->peer_rwnd, sctp_conn->sub_conn[idx].cwnd); int flight_size = (int) (sctp_conn->next_tsn - sctp_conn->last_unacked_tsn); if (available_wnd <= flight_size) return 0; /* Finally, let's subtract the DATA chunk headers overhead */ return available_wnd - flight_size - sizeof (sctp_payload_data_chunk_t) - sizeof (sctp_full_hdr_t); } /** * Compute TX window session is allowed to fill. */ u32 sctp_session_send_space (transport_connection_t * trans_conn) { sctp_connection_t *sctp_conn = sctp_get_connection_from_transport (trans_conn); return sctp_snd_space (sctp_conn); } transport_connection_t * sctp_session_get_transport (u32 conn_index, u32 thread_index) { sctp_connection_t *sctp_conn = sctp_connection_get (conn_index, thread_index); if (PREDICT_TRUE (sctp_conn != NULL)) return &sctp_conn->sub_conn[SCTP_PRIMARY_PATH_IDX].connection; return NULL; } transport_connection_t * sctp_session_get_listener (u32 listener_index) { sctp_main_t *tm = vnet_get_sctp_main (); sctp_connection_t *sctp_conn; sctp_conn = pool_elt_at_index (tm->listener_pool, listener_index); return &sctp_conn->sub_conn[SCTP_PRIMARY_PATH_IDX].connection; } u8 * format_sctp_session (u8 * s, va_list * args) { u32 tci = va_arg (*args, u32); u32 thread_index = va_arg (*args, u32); u32 verbose = va_arg (*args, u32); sctp_connection_t *tc; tc = sctp_connection_get (tci, thread_index); if (tc) s = format (s, "%U", format_sctp_connection, tc, verbose); else s = format (s, "empty\n"); return s; } u8 * format_sctp_listener_session (u8 * s, va_list * args) { u32 tci = va_arg (*args, u32); sctp_connection_t *tc = sctp_listener_get (tci); return format (s, "%U", format_sctp_connection_id, tc); } void sctp_expired_timers_cb (u32 conn_index, u32 timer_id) { sctp_connection_t *sctp_conn; sctp_conn = sctp_connection_get (conn_index, vlib_get_thread_index ()); /* note: the connection may have already disappeared */ if (PREDICT_FALSE (sctp_conn == 0)) return; SCTP_DBG ("%s expired", sctp_timer_to_string (timer_id)); if (sctp_conn->sub_conn[conn_index].unacknowledged_hb > SCTP_PATH_MAX_RETRANS) { // The remote-peer is considered to be unreachable hence shutting down u8 i, total_subs_down = 1; for (i = 0; i < MAX_SCTP_CONNECTIONS; i++) { if (sctp_conn->sub_conn[i].state == SCTP_SUBCONN_STATE_DOWN) continue; u32 now = sctp_time_now (); if (now > (sctp_conn->sub_conn[i].last_seen + SCTP_HB_INTERVAL)) { total_subs_down += 1; sctp_conn->sub_conn[i].state = SCTP_SUBCONN_STATE_DOWN; } } if (total_subs_down == MAX_SCTP_CONNECTIONS) { /* Start cleanup. App wasn't notified yet so use delete notify as * opposed to delete to cleanup session layer state. */ stream_session_delete_notify (&sctp_conn->sub_conn [SCTP_PRIMARY_PATH_IDX].connection); sctp_connection_timers_reset (sctp_conn); sctp_connection_cleanup (sctp_conn); } return; } switch (timer_id) { case SCTP_TIMER_T1_INIT: sctp_send_init (sctp_conn); break; case SCTP_TIMER_T1_COOKIE: sctp_send_cookie_echo (sctp_conn); break; case SCTP_TIMER_T2_SHUTDOWN: sctp_send_shutdown (sctp_conn); break; case SCTP_TIMER_T3_RXTX: sctp_timer_reset (sctp_conn, conn_index, timer_id); sctp_conn->flags |= SCTP_CONN_RECOVERY; sctp_data_retransmit (sctp_conn); break; case SCTP_TIMER_T4_HEARTBEAT: sctp_timer_reset (sctp_conn, conn_index, timer_id); goto heartbeat; } return; heartbeat: sctp_send_heartbeat (sctp_conn); } static void sctp_expired_timers_dispatch (u32 * expired_timers) { int i; u32 connection_index, timer_id; for (i = 0; i < vec_len (expired_timers); i++) { /* Get session index and timer id */ connection_index = expired_timers[i] & 0x0FFFFFFF; timer_id = expired_timers[i] >> 28; SCTP_DBG ("Expired timer ID: %u", timer_id); /* Handle expiration */ sctp_expired_timers_cb (connection_index, timer_id); } } void sctp_initialize_timer_wheels (sctp_main_t * tm) { tw_timer_wheel_16t_2w_512sl_t *tw; /* *INDENT-OFF* */ foreach_vlib_main (({ tw = &tm->timer_wheels[ii]; tw_timer_wheel_init_16t_2w_512sl (tw, sctp_expired_timers_dispatch, 100e-3 /* timer period 100ms */ , ~0); tw->last_run_time = vlib_time_now (this_vlib_main); })); /* *INDENT-ON* */ } clib_error_t * sctp_main_enable (vlib_main_t * vm) { sctp_main_t *tm = vnet_get_sctp_main (); vlib_thread_main_t *vtm = vlib_get_thread_main (); clib_error_t *error = 0; u32 num_threads; int thread; sctp_connection_t *sctp_conn __attribute__ ((unused)); u32 preallocated_connections_per_thread; if ((error = vlib_call_init_function (vm, ip_main_init))) return error; if ((error = vlib_call_init_function (vm, ip4_lookup_init))) return error; if ((error = vlib_call_init_function (vm, ip6_lookup_init))) return error; /* * Registrations */ ip4_register_protocol (IP_PROTOCOL_SCTP, sctp4_input_node.index); ip6_register_protocol (IP_PROTOCOL_SCTP, sctp6_input_node.index); /* * Initialize data structures */ num_threads = 1 /* main thread */ + vtm->n_threads; vec_validate (tm->connections, num_threads - 1); /* * Preallocate connections. Assume that thread 0 won't * use preallocated threads when running multi-core */ if (num_threads == 1) { thread = 0; preallocated_connections_per_thread = tm->preallocated_connections; } else { thread = 1; preallocated_connections_per_thread = tm->preallocated_connections / (num_threads - 1); } for (; thread < num_threads; thread++) { if (preallocated_connections_per_thread) pool_init_fixed (tm->connections[thread], preallocated_connections_per_thread); } /* Initialize per worker thread tx buffers (used for control messages) */ vec_validate (tm->tx_buffers, num_threads - 1); /* Initialize timer wheels */ vec_validate (tm->timer_wheels, num_threads - 1); sctp_initialize_timer_wheels (tm); /* Initialize clocks per tick for SCTP timestamp. Used to compute * monotonically increasing timestamps. */ tm->tstamp_ticks_per_clock = vm->clib_time.seconds_per_clock / SCTP_TSTAMP_RESOLUTION; if (num_threads > 1) { clib_spinlock_init (&tm->half_open_lock); } vec_validate (tm->tx_frames[0], num_threads - 1); vec_validate (tm->tx_frames[1], num_threads - 1); vec_validate (tm->ip_lookup_tx_frames[0], num_threads - 1); vec_validate (tm->ip_lookup_tx_frames[1], num_threads - 1); tm->bytes_per_buffer = vlib_buffer_free_list_buffer_size (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX); vec_validate (tm->time_now, num_threads - 1); return error; } clib_error_t * sctp_enable_disable (vlib_main_t * vm, u8 is_en) { if (is_en) { if (sctp_main.is_enabled) return 0; return sctp_main_enable (vm); } else { sctp_main.is_enabled = 0; } return 0; } transport_connection_t * sctp_half_open_session_get_transport (u32 conn_index) { sctp_connection_t *sctp_conn = sctp_half_open_connection_get (conn_index); return &sctp_conn->sub_conn[SCTP_PRIMARY_PATH_IDX].connection; } u8 * format_sctp_half_open (u8 * s, va_list * args) { u32 tci = va_arg (*args, u32); sctp_connection_t *sctp_conn = sctp_half_open_connection_get (tci); return format (s, "%U", format_sctp_connection_id, sctp_conn); } void sctp_update_time (f64 now, u8 thread_index) { sctp_set_time_now (thread_index); tw_timer_expire_timers_16t_2w_512sl (&sctp_main.timer_wheels[thread_index], now); sctp_flush_frames_to_output (thread_index); } /* *INDENT OFF* */ const static transport_proto_vft_t sctp_proto = { .enable = sctp_enable_disable, .bind = sctp_session_bind, .unbind = sctp_session_unbind, .open = sctp_session_open, .close = sctp_session_close, .cleanup = sctp_session_cleanup, .push_header = sctp_push_header, .send_mss = sctp_session_send_mss, .send_space = sctp_session_send_space, .update_time = sctp_update_time, .get_connection = sctp_session_get_transport, .get_listener = sctp_session_get_listener, .get_half_open = sctp_half_open_session_get_transport, .format_connection = format_sctp_session, .format_listener = format_sctp_listener_session, .format_half_open = format_sctp_half_open, .tx_type = TRANSPORT_TX_DEQUEUE, .service_type = TRANSPORT_SERVICE_VC, }; /* *INDENT ON* */ clib_error_t * sctp_init (vlib_main_t * vm) { sctp_main_t *tm = vnet_get_sctp_main (); ip_main_t *im = &ip_main; ip_protocol_info_t *pi; /* Session layer, and by implication SCTP, are disabled by default */ tm->is_enabled = 0; /* Register with IP for header parsing */ pi = ip_get_protocol_info (im, IP_PROTOCOL_SCTP); if (pi == 0) return clib_error_return (0, "SCTP protocol info AWOL"); pi->format_header = format_sctp_header; pi->unformat_pg_edit = unformat_pg_sctp_header; /* Register as transport with session layer */ transport_register_protocol (TRANSPORT_PROTO_SCTP, &sctp_proto, FIB_PROTOCOL_IP4, sctp4_output_node.index); transport_register_protocol (TRANSPORT_PROTO_SCTP, &sctp_proto, FIB_PROTOCOL_IP6, sctp6_output_node.index); sctp_api_reference (); return 0; } VLIB_INIT_FUNCTION (sctp_init); /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */