aboutsummaryrefslogtreecommitdiffstats
path: root/src/plugins
diff options
context:
space:
mode:
Diffstat (limited to 'src/plugins')
-rw-r--r--src/plugins/abf/abf_itf_attach.c14
-rw-r--r--src/plugins/acl/elog_acl_trace.h234
-rw-r--r--src/plugins/acl/fa_node.h238
-rw-r--r--src/plugins/acl/hash_lookup.c22
-rw-r--r--src/plugins/acl/public_inlines.h10
-rw-r--r--src/plugins/acl/sess_mgmt_node.c16
-rw-r--r--src/plugins/acl/session_inlines.h15
-rw-r--r--src/plugins/adl/ip4_allowlist.c2
-rw-r--r--src/plugins/adl/ip6_allowlist.c2
-rw-r--r--src/plugins/af_packet/af_packet.c2
-rw-r--r--src/plugins/af_packet/node.c4
-rw-r--r--src/plugins/af_xdp/device.c2
-rw-r--r--src/plugins/cnat/cnat_snat_policy.c5
-rw-r--r--src/plugins/cnat/cnat_types.h2
-rw-r--r--src/plugins/crypto_sw_scheduler/main.c2
-rw-r--r--src/plugins/ct6/ct6.h6
-rw-r--r--src/plugins/dev_armada/pp2/rx.c2
-rw-r--r--src/plugins/dev_ena/ena.c3
-rw-r--r--src/plugins/dev_iavf/virtchnl.h1
-rw-r--r--src/plugins/dev_octeon/common.h5
-rw-r--r--src/plugins/dev_octeon/crypto.c14
-rw-r--r--src/plugins/dev_octeon/crypto.h13
-rw-r--r--src/plugins/dev_octeon/init.c55
-rw-r--r--src/plugins/dev_octeon/octeon.h9
-rw-r--r--src/plugins/dev_octeon/port.c34
-rw-r--r--src/plugins/dev_octeon/queue.c84
-rw-r--r--src/plugins/dev_octeon/roc_helper.c294
-rw-r--r--src/plugins/dev_octeon/rx_node.c4
-rw-r--r--src/plugins/dma_intel/dsa.c2
-rw-r--r--src/plugins/dpdk/cryptodev/cryptodev.c10
-rw-r--r--src/plugins/dpdk/cryptodev/cryptodev_op_data_path.c7
-rw-r--r--src/plugins/dpdk/cryptodev/cryptodev_raw_data_path.c5
-rw-r--r--src/plugins/dpdk/device/common.c5
-rw-r--r--src/plugins/dpdk/device/device.c74
-rw-r--r--src/plugins/dpdk/device/dpdk.h4
-rw-r--r--src/plugins/dpdk/device/dpdk_priv.h4
-rw-r--r--src/plugins/dpdk/device/driver.c1
-rw-r--r--src/plugins/dpdk/device/format.c6
-rw-r--r--src/plugins/dpdk/device/init.c4
-rw-r--r--src/plugins/dpdk/device/node.c7
-rw-r--r--src/plugins/geneve/decap.c2
-rw-r--r--src/plugins/geneve/encap.c2
-rw-r--r--src/plugins/gtpu/gtpu_decap.c4
-rw-r--r--src/plugins/gtpu/gtpu_encap.c2
-rw-r--r--src/plugins/hs_apps/CMakeLists.txt6
-rw-r--r--src/plugins/hs_apps/echo_client.c141
-rw-r--r--src/plugins/hs_apps/echo_client.h12
-rw-r--r--src/plugins/hs_apps/echo_server.c8
-rw-r--r--src/plugins/hs_apps/http_cli.c8
-rw-r--r--src/plugins/hs_apps/http_client.c504
-rw-r--r--src/plugins/hs_apps/http_client_cli.c31
-rw-r--r--src/plugins/hs_apps/http_tps.c13
-rw-r--r--src/plugins/hs_apps/proxy.c3
-rw-r--r--src/plugins/hs_apps/proxy.h2
-rw-r--r--src/plugins/hs_apps/test_builtins.c29
-rw-r--r--src/plugins/hs_apps/vcl/vcl_test_cl_udp.c156
-rw-r--r--src/plugins/http/CMakeLists.txt9
-rw-r--r--src/plugins/http/extras/mk_huffman_table.py416
-rw-r--r--src/plugins/http/http.c2485
-rw-r--r--src/plugins/http/http.h589
-rw-r--r--src/plugins/http/http1.c1936
-rw-r--r--src/plugins/http/http2/frame.c339
-rw-r--r--src/plugins/http/http2/frame.h246
-rw-r--r--src/plugins/http/http2/hpack.c1173
-rw-r--r--src/plugins/http/http2/hpack.h183
-rw-r--r--src/plugins/http/http2/http2.c1771
-rw-r--r--src/plugins/http/http2/http2.h97
-rw-r--r--src/plugins/http/http2/huffman_table.h319
-rw-r--r--src/plugins/http/http_buffer.c52
-rw-r--r--src/plugins/http/http_buffer.h18
-rw-r--r--src/plugins/http/http_header_names.h3
-rw-r--r--src/plugins/http/http_plugin.rst11
-rw-r--r--src/plugins/http/http_private.h901
-rw-r--r--src/plugins/http/http_timer.h10
-rw-r--r--src/plugins/http/test/http_test.c775
-rw-r--r--src/plugins/http_static/http_cache.c8
-rw-r--r--src/plugins/http_static/http_cache.h1
-rw-r--r--src/plugins/http_static/http_static.api47
-rw-r--r--src/plugins/http_static/http_static.c68
-rw-r--r--src/plugins/http_static/http_static.h93
-rw-r--r--src/plugins/http_static/http_static_test.c112
-rw-r--r--src/plugins/http_static/static_server.c719
-rw-r--r--src/plugins/ikev2/ikev2.c4
-rw-r--r--src/plugins/ikev2/ikev2_priv.h2
-rw-r--r--src/plugins/ioam/export-vxlan-gpe/vxlan_gpe_ioam_export.c2
-rw-r--r--src/plugins/ioam/export-vxlan-gpe/vxlan_gpe_node.c4
-rw-r--r--src/plugins/ioam/ip6/ioam_cache_tunnel_select_node.c4
-rw-r--r--src/plugins/ioam/lib-vxlan-gpe/ioam_decap.c5
-rw-r--r--src/plugins/ioam/lib-vxlan-gpe/ioam_encap.c5
-rw-r--r--src/plugins/ioam/lib-vxlan-gpe/ioam_pop.c5
-rw-r--r--src/plugins/ioam/lib-vxlan-gpe/ioam_transit.c2
-rw-r--r--src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_api.c18
-rw-r--r--src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam.c6
-rw-r--r--src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h8
-rw-r--r--src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam_packet.h4
-rw-r--r--src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam_trace.c4
-rw-r--r--src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam_util.h4
-rw-r--r--src/plugins/ip_session_redirect/api.c64
-rw-r--r--src/plugins/ip_session_redirect/ip_session_redirect.api41
-rw-r--r--src/plugins/ip_session_redirect/ip_session_redirect.h24
-rw-r--r--src/plugins/ip_session_redirect/redirect.c24
-rw-r--r--src/plugins/ip_session_redirect/test_api.c74
-rw-r--r--src/plugins/l2tp/l2tp.c2
-rw-r--r--src/plugins/lb/lb.c4
-rw-r--r--src/plugins/lb/node.c4
-rw-r--r--src/plugins/linux-cp/lcp.api36
-rw-r--r--src/plugins/linux-cp/lcp_api.c34
-rw-r--r--src/plugins/linux-cp/lcp_cli.c56
-rw-r--r--src/plugins/linux-cp/lcp_interface.c47
-rw-r--r--src/plugins/linux-cp/lcp_interface.h13
-rw-r--r--src/plugins/linux-cp/lcp_nl.c2
-rw-r--r--src/plugins/linux-cp/lcp_node.c116
-rw-r--r--src/plugins/linux-cp/lcp_router.c13
-rw-r--r--src/plugins/lisp/lisp-gpe/decap.c6
-rw-r--r--src/plugins/lisp/lisp-gpe/interface.c2
-rw-r--r--src/plugins/mactime/node.c2
-rw-r--r--src/plugins/map/ip4_map.c2
-rw-r--r--src/plugins/map/ip4_map_t.c4
-rw-r--r--src/plugins/map/ip6_map.c6
-rw-r--r--src/plugins/map/ip6_map_t.c4
-rw-r--r--src/plugins/memif/memif.c8
-rw-r--r--src/plugins/memif/node.c6
-rw-r--r--src/plugins/memif/private.h2
-rw-r--r--src/plugins/memif/socket.c2
-rw-r--r--src/plugins/nat/det44/det44.h1
-rw-r--r--src/plugins/nat/det44/det44_in2out.c2
-rw-r--r--src/plugins/nat/det44/det44_out2in.c2
-rw-r--r--src/plugins/nat/dslite/dslite.h1
-rw-r--r--src/plugins/nat/dslite/dslite_in2out.c1
-rw-r--r--src/plugins/nat/dslite/dslite_out2in.c1
-rw-r--r--src/plugins/nat/lib/inlines.h44
-rw-r--r--src/plugins/nat/lib/ipfix_logging.c1
-rw-r--r--src/plugins/nat/lib/nat_syslog.c1
-rw-r--r--src/plugins/nat/nat44-ed/nat44_ed.h1
-rw-r--r--src/plugins/nat/nat44-ed/nat44_ed_inlines.h1
-rw-r--r--src/plugins/nat/nat44-ei/nat44_ei.c1
-rw-r--r--src/plugins/nat/nat44-ei/nat44_ei.h1
-rw-r--r--src/plugins/nat/nat44-ei/nat44_ei_in2out.c1
-rw-r--r--src/plugins/nat/nat44-ei/nat44_ei_out2in.c1
-rw-r--r--src/plugins/nat/nat64/nat64.c1
-rw-r--r--src/plugins/nat/nat64/nat64.h1
-rw-r--r--src/plugins/nat/nat64/nat64_db.c1
-rw-r--r--src/plugins/nat/pnat/pnat.api17
-rw-r--r--src/plugins/nat/pnat/pnat_api.c14
-rw-r--r--src/plugins/netmap/netmap.c4
-rw-r--r--src/plugins/netmap/node.c4
-rw-r--r--src/plugins/nsh/nsh.c35
-rw-r--r--src/plugins/nsh/nsh.h5
-rw-r--r--src/plugins/nsh/nsh_pop.c2
-rw-r--r--src/plugins/ping/ping_api.c18
-rw-r--r--src/plugins/pppoe/pppoe_cp_node.c2
-rw-r--r--src/plugins/pppoe/pppoe_decap.c2
-rw-r--r--src/plugins/prom/prom.c7
-rw-r--r--src/plugins/prom/prom.h2
-rw-r--r--src/plugins/prom/prom_cli.c2
-rw-r--r--src/plugins/pvti/input.c2
-rw-r--r--src/plugins/pvti/output.c2
-rw-r--r--src/plugins/pvti/pvti.h2
-rw-r--r--src/plugins/quic/quic.c43
-rw-r--r--src/plugins/quic/quic.h4
-rw-r--r--src/plugins/quic/quic_crypto.c9
-rw-r--r--src/plugins/rdma/device.c2
-rw-r--r--src/plugins/sflow/sflow.c13
-rw-r--r--src/plugins/snort/enqueue.c2
-rw-r--r--src/plugins/snort/main.c14
-rw-r--r--src/plugins/srtp/srtp.c16
-rw-r--r--src/plugins/srv6-ad-flow/node.c2
-rw-r--r--src/plugins/srv6-am/node.c2
-rw-r--r--src/plugins/srv6-mobile/node.c10
-rw-r--r--src/plugins/tlsmbedtls/tls_mbedtls.c2
-rw-r--r--src/plugins/tlsopenssl/tls_async.c252
-rw-r--r--src/plugins/tlsopenssl/tls_openssl.c32
-rw-r--r--src/plugins/tlsopenssl/tls_openssl.h24
-rw-r--r--src/plugins/tlspicotls/pico_vpp_crypto.c6
-rw-r--r--src/plugins/unittest/ipsec_test.c14
-rw-r--r--src/plugins/unittest/session_test.c278
-rw-r--r--src/plugins/unittest/svm_fifo_test.c2
-rw-r--r--src/plugins/unittest/tcp_test.c10
-rw-r--r--src/plugins/urpf/urpf_dp.h18
-rw-r--r--src/plugins/vhost/vhost_user.c27
-rw-r--r--src/plugins/vhost/vhost_user.h14
-rw-r--r--src/plugins/vhost/vhost_user_input.c2
-rw-r--r--src/plugins/vhost/vhost_user_output.c8
-rw-r--r--src/plugins/vmxnet3/input.c2
-rw-r--r--src/plugins/vmxnet3/vmxnet3.h2
-rw-r--r--src/plugins/vrrp/vrrp_periodic.c14
-rw-r--r--src/plugins/vxlan-gpe/CMakeLists.txt32
-rw-r--r--src/plugins/vxlan-gpe/FEATURE.yaml10
-rw-r--r--src/plugins/vxlan-gpe/decap.c1167
-rw-r--r--src/plugins/vxlan-gpe/dir.dox32
-rw-r--r--src/plugins/vxlan-gpe/encap.c433
-rw-r--r--src/plugins/vxlan-gpe/plugin.c26
-rw-r--r--src/plugins/vxlan-gpe/vxlan-gpe-rfc.txt868
-rw-r--r--src/plugins/vxlan-gpe/vxlan_gpe.api140
-rw-r--r--src/plugins/vxlan-gpe/vxlan_gpe.c1259
-rw-r--r--src/plugins/vxlan-gpe/vxlan_gpe.h306
-rw-r--r--src/plugins/vxlan-gpe/vxlan_gpe_api.c360
-rw-r--r--src/plugins/vxlan-gpe/vxlan_gpe_error.def16
-rw-r--r--src/plugins/vxlan-gpe/vxlan_gpe_packet.h120
-rw-r--r--src/plugins/vxlan/decap.c4
-rw-r--r--src/plugins/vxlan/encap.c2
-rw-r--r--src/plugins/wireguard/wireguard_input.c2
-rw-r--r--src/plugins/wireguard/wireguard_output_tun.c2
203 files changed, 17231 insertions, 3757 deletions
diff --git a/src/plugins/abf/abf_itf_attach.c b/src/plugins/abf/abf_itf_attach.c
index 04e5c4c40c2..3e55df52562 100644
--- a/src/plugins/abf/abf_itf_attach.c
+++ b/src/plugins/abf/abf_itf_attach.c
@@ -681,18 +681,20 @@ VLIB_REGISTER_NODE (abf_ip6_node) =
}
};
-VNET_FEATURE_INIT (abf_ip4_feat, static) =
-{
+VNET_FEATURE_INIT (abf_ip4_feat, static) = {
.arc_name = "ip4-unicast",
.node_name = "abf-input-ip4",
- .runs_after = VNET_FEATURES ("acl-plugin-in-ip4-fa"),
+ .runs_after = VNET_FEATURES ("acl-plugin-in-ip4-fa",
+ "ip4-full-reassembly-feature",
+ "ip4-sv-reassembly-feature"),
};
-VNET_FEATURE_INIT (abf_ip6_feat, static) =
-{
+VNET_FEATURE_INIT (abf_ip6_feat, static) = {
.arc_name = "ip6-unicast",
.node_name = "abf-input-ip6",
- .runs_after = VNET_FEATURES ("acl-plugin-in-ip6-fa"),
+ .runs_after = VNET_FEATURES ("acl-plugin-in-ip6-fa",
+ "ip6-full-reassembly-feature",
+ "ip6-sv-reassembly-feature"),
};
static fib_node_t *
diff --git a/src/plugins/acl/elog_acl_trace.h b/src/plugins/acl/elog_acl_trace.h
index 0c4f68f7b0f..ae2ef8588ea 100644
--- a/src/plugins/acl/elog_acl_trace.h
+++ b/src/plugins/acl/elog_acl_trace.h
@@ -19,119 +19,143 @@
/* use like: elog_acl_cond_trace_X1(am, (x < 0), "foobar: %d", "i4", int32_value); */
-#define elog_acl_cond_trace_X1(am, trace_cond, acl_elog_trace_format_label, acl_elog_trace_format_args, acl_elog_val1) \
-do { \
- if (trace_cond) { \
- CLIB_UNUSED(struct { u8 available_space[18 - sizeof(acl_elog_val1)]; } *static_check); \
- u16 thread_index = os_get_thread_index (); \
- vlib_worker_thread_t * w = vlib_worker_threads + thread_index; \
- ELOG_TYPE_DECLARE (e) = \
- { \
- .format = "(%02d) " acl_elog_trace_format_label, \
- .format_args = "i2" acl_elog_trace_format_args, \
- }; \
- CLIB_PACKED(struct \
- { \
- u16 thread; \
- typeof(acl_elog_val1) val1; \
- }) *ed; \
- ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); \
- ed->thread = thread_index; \
- ed->val1 = acl_elog_val1; \
- } \
-} while (0)
-
+#define elog_acl_cond_trace_X1(am, trace_cond, acl_elog_trace_format_label, \
+ acl_elog_trace_format_args, acl_elog_val1) \
+ do \
+ { \
+ if (trace_cond) \
+ { \
+ CLIB_UNUSED (struct { \
+ u8 available_space[18 - sizeof (acl_elog_val1)]; \
+ } * static_check); \
+ clib_thread_index_t thread_index = os_get_thread_index (); \
+ vlib_worker_thread_t *w = vlib_worker_threads + thread_index; \
+ ELOG_TYPE_DECLARE (e) = { \
+ .format = "(%02d) " acl_elog_trace_format_label, \
+ .format_args = "i2" acl_elog_trace_format_args, \
+ }; \
+ CLIB_PACKED (struct { \
+ u16 thread; \
+ typeof (acl_elog_val1) val1; \
+ }) * \
+ ed; \
+ ed = \
+ ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); \
+ ed->thread = thread_index; \
+ ed->val1 = acl_elog_val1; \
+ } \
+ } \
+ while (0)
/* use like: elog_acl_cond_trace_X2(am, (x<0), "foobar: %d some u64: %lu", "i4i8", int32_value, int64_value); */
-#define elog_acl_cond_trace_X2(am, trace_cond, acl_elog_trace_format_label, acl_elog_trace_format_args, \
- acl_elog_val1, acl_elog_val2) \
-do { \
- if (trace_cond) { \
- CLIB_UNUSED(struct { u8 available_space[18 - sizeof(acl_elog_val1) - sizeof(acl_elog_val2)]; } *static_check); \
- u16 thread_index = os_get_thread_index (); \
- vlib_worker_thread_t * w = vlib_worker_threads + thread_index; \
- ELOG_TYPE_DECLARE (e) = \
- { \
- .format = "(%02d) " acl_elog_trace_format_label, \
- .format_args = "i2" acl_elog_trace_format_args, \
- }; \
- CLIB_PACKED(struct \
- { \
- u16 thread; \
- typeof(acl_elog_val1) val1; \
- typeof(acl_elog_val2) val2; \
- }) *ed; \
- ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); \
- ed->thread = thread_index; \
- ed->val1 = acl_elog_val1; \
- ed->val2 = acl_elog_val2; \
- } \
-} while (0)
-
+#define elog_acl_cond_trace_X2(am, trace_cond, acl_elog_trace_format_label, \
+ acl_elog_trace_format_args, acl_elog_val1, \
+ acl_elog_val2) \
+ do \
+ { \
+ if (trace_cond) \
+ { \
+ CLIB_UNUSED (struct { \
+ u8 available_space[18 - sizeof (acl_elog_val1) - \
+ sizeof (acl_elog_val2)]; \
+ } * static_check); \
+ clib_thread_index_t thread_index = os_get_thread_index (); \
+ vlib_worker_thread_t *w = vlib_worker_threads + thread_index; \
+ ELOG_TYPE_DECLARE (e) = { \
+ .format = "(%02d) " acl_elog_trace_format_label, \
+ .format_args = "i2" acl_elog_trace_format_args, \
+ }; \
+ CLIB_PACKED (struct { \
+ u16 thread; \
+ typeof (acl_elog_val1) val1; \
+ typeof (acl_elog_val2) val2; \
+ }) * \
+ ed; \
+ ed = \
+ ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); \
+ ed->thread = thread_index; \
+ ed->val1 = acl_elog_val1; \
+ ed->val2 = acl_elog_val2; \
+ } \
+ } \
+ while (0)
/* use like: elog_acl_cond_trace_X3(am, (x<0), "foobar: %d some u64 %lu baz: %d", "i4i8i4", int32_value, u64_value, int_value); */
-#define elog_acl_cond_trace_X3(am, trace_cond, acl_elog_trace_format_label, acl_elog_trace_format_args, acl_elog_val1, \
- acl_elog_val2, acl_elog_val3) \
-do { \
- if (trace_cond) { \
- CLIB_UNUSED(struct { u8 available_space[18 - sizeof(acl_elog_val1) - sizeof(acl_elog_val2) \
- - sizeof(acl_elog_val3)]; } *static_check); \
- u16 thread_index = os_get_thread_index (); \
- vlib_worker_thread_t * w = vlib_worker_threads + thread_index; \
- ELOG_TYPE_DECLARE (e) = \
- { \
- .format = "(%02d) " acl_elog_trace_format_label, \
- .format_args = "i2" acl_elog_trace_format_args, \
- }; \
- CLIB_PACKED(struct \
- { \
- u16 thread; \
- typeof(acl_elog_val1) val1; \
- typeof(acl_elog_val2) val2; \
- typeof(acl_elog_val3) val3; \
- }) *ed; \
- ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); \
- ed->thread = thread_index; \
- ed->val1 = acl_elog_val1; \
- ed->val2 = acl_elog_val2; \
- ed->val3 = acl_elog_val3; \
- } \
-} while (0)
-
+#define elog_acl_cond_trace_X3(am, trace_cond, acl_elog_trace_format_label, \
+ acl_elog_trace_format_args, acl_elog_val1, \
+ acl_elog_val2, acl_elog_val3) \
+ do \
+ { \
+ if (trace_cond) \
+ { \
+ CLIB_UNUSED (struct { \
+ u8 available_space[18 - sizeof (acl_elog_val1) - \
+ sizeof (acl_elog_val2) - \
+ sizeof (acl_elog_val3)]; \
+ } * static_check); \
+ clib_thread_index_t thread_index = os_get_thread_index (); \
+ vlib_worker_thread_t *w = vlib_worker_threads + thread_index; \
+ ELOG_TYPE_DECLARE (e) = { \
+ .format = "(%02d) " acl_elog_trace_format_label, \
+ .format_args = "i2" acl_elog_trace_format_args, \
+ }; \
+ CLIB_PACKED (struct { \
+ u16 thread; \
+ typeof (acl_elog_val1) val1; \
+ typeof (acl_elog_val2) val2; \
+ typeof (acl_elog_val3) val3; \
+ }) * \
+ ed; \
+ ed = \
+ ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); \
+ ed->thread = thread_index; \
+ ed->val1 = acl_elog_val1; \
+ ed->val2 = acl_elog_val2; \
+ ed->val3 = acl_elog_val3; \
+ } \
+ } \
+ while (0)
/* use like: elog_acl_cond_trace_X4(am, (x<0), "foobar: %d some int %d baz: %d bar: %d", "i4i4i4i4", int32_value, int32_value2, int_value, int_value); */
-#define elog_acl_cond_trace_X4(am, trace_cond, acl_elog_trace_format_label, acl_elog_trace_format_args, acl_elog_val1, \
- acl_elog_val2, acl_elog_val3, acl_elog_val4) \
-do { \
- if (trace_cond) { \
- CLIB_UNUSED(struct { u8 available_space[18 - sizeof(acl_elog_val1) - sizeof(acl_elog_val2) \
- - sizeof(acl_elog_val3) -sizeof(acl_elog_val4)]; } *static_check); \
- u16 thread_index = os_get_thread_index (); \
- vlib_worker_thread_t * w = vlib_worker_threads + thread_index; \
- ELOG_TYPE_DECLARE (e) = \
- { \
- .format = "(%02d) " acl_elog_trace_format_label, \
- .format_args = "i2" acl_elog_trace_format_args, \
- }; \
- CLIB_PACKED(struct \
- { \
- u16 thread; \
- typeof(acl_elog_val1) val1; \
- typeof(acl_elog_val2) val2; \
- typeof(acl_elog_val3) val3; \
- typeof(acl_elog_val4) val4; \
- }) *ed; \
- ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); \
- ed->thread = thread_index; \
- ed->val1 = acl_elog_val1; \
- ed->val2 = acl_elog_val2; \
- ed->val3 = acl_elog_val3; \
- ed->val4 = acl_elog_val4; \
- } \
-} while (0)
-
+#define elog_acl_cond_trace_X4(am, trace_cond, acl_elog_trace_format_label, \
+ acl_elog_trace_format_args, acl_elog_val1, \
+ acl_elog_val2, acl_elog_val3, acl_elog_val4) \
+ do \
+ { \
+ if (trace_cond) \
+ { \
+ CLIB_UNUSED (struct { \
+ u8 available_space[18 - sizeof (acl_elog_val1) - \
+ sizeof (acl_elog_val2) - \
+ sizeof (acl_elog_val3) - \
+ sizeof (acl_elog_val4)]; \
+ } * static_check); \
+ clib_thread_index_t thread_index = os_get_thread_index (); \
+ vlib_worker_thread_t *w = vlib_worker_threads + thread_index; \
+ ELOG_TYPE_DECLARE (e) = { \
+ .format = "(%02d) " acl_elog_trace_format_label, \
+ .format_args = "i2" acl_elog_trace_format_args, \
+ }; \
+ CLIB_PACKED (struct { \
+ u16 thread; \
+ typeof (acl_elog_val1) val1; \
+ typeof (acl_elog_val2) val2; \
+ typeof (acl_elog_val3) val3; \
+ typeof (acl_elog_val4) val4; \
+ }) * \
+ ed; \
+ ed = \
+ ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); \
+ ed->thread = thread_index; \
+ ed->val1 = acl_elog_val1; \
+ ed->val2 = acl_elog_val2; \
+ ed->val3 = acl_elog_val3; \
+ ed->val4 = acl_elog_val4; \
+ } \
+ } \
+ while (0)
#endif
diff --git a/src/plugins/acl/fa_node.h b/src/plugins/acl/fa_node.h
index c4a971aada3..f1ea8dfaf0a 100644
--- a/src/plugins/acl/fa_node.h
+++ b/src/plugins/acl/fa_node.h
@@ -110,7 +110,7 @@ typedef struct {
u8 as_u8[2];
u16 as_u16;
} tcp_flags_seen; ; /* +2 bytes = 62 */
- u16 thread_index; /* +2 bytes = 64 */
+ clib_thread_index_t thread_index; /* +2 bytes = 64 */
u64 link_enqueue_time; /* 8 byte = 8 */
u32 link_prev_idx; /* +4 bytes = 12 */
u32 link_next_idx; /* +4 bytes = 16 */
@@ -133,7 +133,7 @@ typedef struct {
u64 as_u64;
struct {
u32 session_index;
- u16 thread_index;
+ clib_thread_index_t thread_index;
u16 intf_policy_epoch;
};
};
@@ -255,119 +255,143 @@ u8 *format_acl_plugin_5tuple (u8 * s, va_list * args);
/* use like: elog_acl_maybe_trace_X1(am, "foobar: %d", "i4", int32_value); */
-#define elog_acl_maybe_trace_X1(am, acl_elog_trace_format_label, acl_elog_trace_format_args, acl_elog_val1) \
-do { \
- if (am->trace_sessions) { \
- CLIB_UNUSED(struct { u8 available_space[18 - sizeof(acl_elog_val1)]; } *static_check); \
- u16 thread_index = os_get_thread_index (); \
- vlib_worker_thread_t * w = vlib_worker_threads + thread_index; \
- ELOG_TYPE_DECLARE (e) = \
- { \
- .format = "(%02d) " acl_elog_trace_format_label, \
- .format_args = "i2" acl_elog_trace_format_args, \
- }; \
- CLIB_PACKED(struct \
- { \
- u16 thread; \
- typeof(acl_elog_val1) val1; \
- }) *ed; \
- ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); \
- ed->thread = thread_index; \
- ed->val1 = acl_elog_val1; \
- } \
-} while (0)
-
+#define elog_acl_maybe_trace_X1(am, acl_elog_trace_format_label, \
+ acl_elog_trace_format_args, acl_elog_val1) \
+ do \
+ { \
+ if (am->trace_sessions) \
+ { \
+ CLIB_UNUSED (struct { \
+ u8 available_space[18 - sizeof (acl_elog_val1)]; \
+ } * static_check); \
+ clib_thread_index_t thread_index = os_get_thread_index (); \
+ vlib_worker_thread_t *w = vlib_worker_threads + thread_index; \
+ ELOG_TYPE_DECLARE (e) = { \
+ .format = "(%02d) " acl_elog_trace_format_label, \
+ .format_args = "i2" acl_elog_trace_format_args, \
+ }; \
+ CLIB_PACKED (struct { \
+ u16 thread; \
+ typeof (acl_elog_val1) val1; \
+ }) * \
+ ed; \
+ ed = \
+ ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); \
+ ed->thread = thread_index; \
+ ed->val1 = acl_elog_val1; \
+ } \
+ } \
+ while (0)
/* use like: elog_acl_maybe_trace_X2(am, "foobar: %d some u64: %lu", "i4i8", int32_value, int64_value); */
-#define elog_acl_maybe_trace_X2(am, acl_elog_trace_format_label, acl_elog_trace_format_args, \
- acl_elog_val1, acl_elog_val2) \
-do { \
- if (am->trace_sessions) { \
- CLIB_UNUSED(struct { u8 available_space[18 - sizeof(acl_elog_val1) - sizeof(acl_elog_val2)]; } *static_check); \
- u16 thread_index = os_get_thread_index (); \
- vlib_worker_thread_t * w = vlib_worker_threads + thread_index; \
- ELOG_TYPE_DECLARE (e) = \
- { \
- .format = "(%02d) " acl_elog_trace_format_label, \
- .format_args = "i2" acl_elog_trace_format_args, \
- }; \
- CLIB_PACKED(struct \
- { \
- u16 thread; \
- typeof(acl_elog_val1) val1; \
- typeof(acl_elog_val2) val2; \
- }) *ed; \
- ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); \
- ed->thread = thread_index; \
- ed->val1 = acl_elog_val1; \
- ed->val2 = acl_elog_val2; \
- } \
-} while (0)
-
+#define elog_acl_maybe_trace_X2(am, acl_elog_trace_format_label, \
+ acl_elog_trace_format_args, acl_elog_val1, \
+ acl_elog_val2) \
+ do \
+ { \
+ if (am->trace_sessions) \
+ { \
+ CLIB_UNUSED (struct { \
+ u8 available_space[18 - sizeof (acl_elog_val1) - \
+ sizeof (acl_elog_val2)]; \
+ } * static_check); \
+ clib_thread_index_t thread_index = os_get_thread_index (); \
+ vlib_worker_thread_t *w = vlib_worker_threads + thread_index; \
+ ELOG_TYPE_DECLARE (e) = { \
+ .format = "(%02d) " acl_elog_trace_format_label, \
+ .format_args = "i2" acl_elog_trace_format_args, \
+ }; \
+ CLIB_PACKED (struct { \
+ u16 thread; \
+ typeof (acl_elog_val1) val1; \
+ typeof (acl_elog_val2) val2; \
+ }) * \
+ ed; \
+ ed = \
+ ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); \
+ ed->thread = thread_index; \
+ ed->val1 = acl_elog_val1; \
+ ed->val2 = acl_elog_val2; \
+ } \
+ } \
+ while (0)
/* use like: elog_acl_maybe_trace_X3(am, "foobar: %d some u64 %lu baz: %d", "i4i8i4", int32_value, u64_value, int_value); */
-#define elog_acl_maybe_trace_X3(am, acl_elog_trace_format_label, acl_elog_trace_format_args, acl_elog_val1, \
- acl_elog_val2, acl_elog_val3) \
-do { \
- if (am->trace_sessions) { \
- CLIB_UNUSED(struct { u8 available_space[18 - sizeof(acl_elog_val1) - sizeof(acl_elog_val2) \
- - sizeof(acl_elog_val3)]; } *static_check); \
- u16 thread_index = os_get_thread_index (); \
- vlib_worker_thread_t * w = vlib_worker_threads + thread_index; \
- ELOG_TYPE_DECLARE (e) = \
- { \
- .format = "(%02d) " acl_elog_trace_format_label, \
- .format_args = "i2" acl_elog_trace_format_args, \
- }; \
- CLIB_PACKED(struct \
- { \
- u16 thread; \
- typeof(acl_elog_val1) val1; \
- typeof(acl_elog_val2) val2; \
- typeof(acl_elog_val3) val3; \
- }) *ed; \
- ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); \
- ed->thread = thread_index; \
- ed->val1 = acl_elog_val1; \
- ed->val2 = acl_elog_val2; \
- ed->val3 = acl_elog_val3; \
- } \
-} while (0)
-
+#define elog_acl_maybe_trace_X3(am, acl_elog_trace_format_label, \
+ acl_elog_trace_format_args, acl_elog_val1, \
+ acl_elog_val2, acl_elog_val3) \
+ do \
+ { \
+ if (am->trace_sessions) \
+ { \
+ CLIB_UNUSED (struct { \
+ u8 available_space[18 - sizeof (acl_elog_val1) - \
+ sizeof (acl_elog_val2) - \
+ sizeof (acl_elog_val3)]; \
+ } * static_check); \
+ clib_thread_index_t thread_index = os_get_thread_index (); \
+ vlib_worker_thread_t *w = vlib_worker_threads + thread_index; \
+ ELOG_TYPE_DECLARE (e) = { \
+ .format = "(%02d) " acl_elog_trace_format_label, \
+ .format_args = "i2" acl_elog_trace_format_args, \
+ }; \
+ CLIB_PACKED (struct { \
+ u16 thread; \
+ typeof (acl_elog_val1) val1; \
+ typeof (acl_elog_val2) val2; \
+ typeof (acl_elog_val3) val3; \
+ }) * \
+ ed; \
+ ed = \
+ ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); \
+ ed->thread = thread_index; \
+ ed->val1 = acl_elog_val1; \
+ ed->val2 = acl_elog_val2; \
+ ed->val3 = acl_elog_val3; \
+ } \
+ } \
+ while (0)
/* use like: elog_acl_maybe_trace_X4(am, "foobar: %d some int %d baz: %d bar: %d", "i4i4i4i4", int32_value, int32_value2, int_value, int_value); */
-#define elog_acl_maybe_trace_X4(am, acl_elog_trace_format_label, acl_elog_trace_format_args, acl_elog_val1, \
- acl_elog_val2, acl_elog_val3, acl_elog_val4) \
-do { \
- if (am->trace_sessions) { \
- CLIB_UNUSED(struct { u8 available_space[18 - sizeof(acl_elog_val1) - sizeof(acl_elog_val2) \
- - sizeof(acl_elog_val3) -sizeof(acl_elog_val4)]; } *static_check); \
- u16 thread_index = os_get_thread_index (); \
- vlib_worker_thread_t * w = vlib_worker_threads + thread_index; \
- ELOG_TYPE_DECLARE (e) = \
- { \
- .format = "(%02d) " acl_elog_trace_format_label, \
- .format_args = "i2" acl_elog_trace_format_args, \
- }; \
- CLIB_PACKED(struct \
- { \
- u16 thread; \
- typeof(acl_elog_val1) val1; \
- typeof(acl_elog_val2) val2; \
- typeof(acl_elog_val3) val3; \
- typeof(acl_elog_val4) val4; \
- }) *ed; \
- ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); \
- ed->thread = thread_index; \
- ed->val1 = acl_elog_val1; \
- ed->val2 = acl_elog_val2; \
- ed->val3 = acl_elog_val3; \
- ed->val4 = acl_elog_val4; \
- } \
-} while (0)
-
+#define elog_acl_maybe_trace_X4(am, acl_elog_trace_format_label, \
+ acl_elog_trace_format_args, acl_elog_val1, \
+ acl_elog_val2, acl_elog_val3, acl_elog_val4) \
+ do \
+ { \
+ if (am->trace_sessions) \
+ { \
+ CLIB_UNUSED (struct { \
+ u8 available_space[18 - sizeof (acl_elog_val1) - \
+ sizeof (acl_elog_val2) - \
+ sizeof (acl_elog_val3) - \
+ sizeof (acl_elog_val4)]; \
+ } * static_check); \
+ clib_thread_index_t thread_index = os_get_thread_index (); \
+ vlib_worker_thread_t *w = vlib_worker_threads + thread_index; \
+ ELOG_TYPE_DECLARE (e) = { \
+ .format = "(%02d) " acl_elog_trace_format_label, \
+ .format_args = "i2" acl_elog_trace_format_args, \
+ }; \
+ CLIB_PACKED (struct { \
+ u16 thread; \
+ typeof (acl_elog_val1) val1; \
+ typeof (acl_elog_val2) val2; \
+ typeof (acl_elog_val3) val3; \
+ typeof (acl_elog_val4) val4; \
+ }) * \
+ ed; \
+ ed = \
+ ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); \
+ ed->thread = thread_index; \
+ ed->val1 = acl_elog_val1; \
+ ed->val2 = acl_elog_val2; \
+ ed->val3 = acl_elog_val3; \
+ ed->val4 = acl_elog_val4; \
+ } \
+ } \
+ while (0)
#endif
diff --git a/src/plugins/acl/hash_lookup.c b/src/plugins/acl/hash_lookup.c
index 9c3c662a8f1..b4f86208a71 100644
--- a/src/plugins/acl/hash_lookup.c
+++ b/src/plugins/acl/hash_lookup.c
@@ -946,31 +946,15 @@ hash_acl_reapply(acl_main_t *am, u32 lc_index, int acl_index)
static void
make_ip6_address_mask(ip6_address_t *addr, u8 prefix_len)
{
+ ASSERT (prefix_len <= 128);
ip6_address_mask_from_width(addr, prefix_len);
}
-
-/* Maybe should be moved into the core somewhere */
-always_inline void
-ip4_address_mask_from_width (ip4_address_t * a, u32 width)
-{
- int i, byte, bit, bitnum;
- ASSERT (width <= 32);
- clib_memset (a, 0, sizeof (a[0]));
- for (i = 0; i < width; i++)
- {
- bitnum = (7 - (i & 7));
- byte = i / 8;
- bit = 1 << bitnum;
- a->as_u8[byte] |= bit;
- }
-}
-
-
static void
make_ip4_address_mask(ip4_address_t *addr, u8 prefix_len)
{
- ip4_address_mask_from_width(addr, prefix_len);
+ ASSERT (prefix_len <= 32);
+ ip4_preflen_to_mask (prefix_len, addr);
}
static void
diff --git a/src/plugins/acl/public_inlines.h b/src/plugins/acl/public_inlines.h
index eb9f0de920f..f39285344b0 100644
--- a/src/plugins/acl/public_inlines.h
+++ b/src/plugins/acl/public_inlines.h
@@ -268,8 +268,8 @@ fa_acl_match_ip6_addr (ip6_address_t * addr1, ip6_address_t * addr2,
}
if (prefixlen % 8)
{
- u8 b1 = *((u8 *) addr1 + 1 + prefixlen / 8);
- u8 b2 = *((u8 *) addr2 + 1 + prefixlen / 8);
+ u8 b1 = *((u8 *) addr1 + prefixlen / 8);
+ u8 b2 = *((u8 *) addr2 + prefixlen / 8);
u8 mask0 = (0xff - ((1 << (8 - (prefixlen % 8))) - 1));
return (b1 & mask0) == b2;
}
@@ -715,8 +715,10 @@ acl_plugin_match_5tuple_inline_and_count (void *p_acl_main, u32 lc_index,
r_acl_pos_p, r_acl_match_p, r_rule_match_p, trace_bitmap);
}
if (PREDICT_TRUE(ret)) {
- u16 thread_index = os_get_thread_index ();
- vlib_increment_combined_counter(am->combined_acl_counters + *r_acl_match_p, thread_index, *r_rule_match_p, 1, packet_size);
+ clib_thread_index_t thread_index = os_get_thread_index ();
+ vlib_increment_combined_counter (
+ am->combined_acl_counters + *r_acl_match_p, thread_index,
+ *r_rule_match_p, 1, packet_size);
}
return ret;
}
diff --git a/src/plugins/acl/sess_mgmt_node.c b/src/plugins/acl/sess_mgmt_node.c
index 418baef9b6b..10f0e92c808 100644
--- a/src/plugins/acl/sess_mgmt_node.c
+++ b/src/plugins/acl/sess_mgmt_node.c
@@ -136,16 +136,17 @@ fa_session_get_list_timeout (acl_main_t * am, fa_session_t * sess)
}
static u64
-acl_fa_get_list_head_expiry_time (acl_main_t * am,
- acl_fa_per_worker_data_t * pw, u64 now,
- u16 thread_index, int timeout_type)
+acl_fa_get_list_head_expiry_time (acl_main_t *am, acl_fa_per_worker_data_t *pw,
+ u64 now, clib_thread_index_t thread_index,
+ int timeout_type)
{
return pw->fa_conn_list_head_expiry_time[timeout_type];
}
static int
-acl_fa_conn_time_to_check (acl_main_t * am, acl_fa_per_worker_data_t * pw,
- u64 now, u16 thread_index, u32 session_index)
+acl_fa_conn_time_to_check (acl_main_t *am, acl_fa_per_worker_data_t *pw,
+ u64 now, clib_thread_index_t thread_index,
+ u32 session_index)
{
if (session_index == FA_SESSION_BOGUS_INDEX)
return 0;
@@ -162,7 +163,8 @@ acl_fa_conn_time_to_check (acl_main_t * am, acl_fa_per_worker_data_t * pw,
* return the total number of sessions reclaimed.
*/
static int
-acl_fa_check_idle_sessions (acl_main_t * am, u16 thread_index, u64 now)
+acl_fa_check_idle_sessions (acl_main_t *am, clib_thread_index_t thread_index,
+ u64 now)
{
acl_fa_per_worker_data_t *pw = &am->per_worker_data[thread_index];
fa_full_session_id_t fsid;
@@ -429,7 +431,7 @@ acl_fa_worker_conn_cleaner_process (vlib_main_t * vm,
{
acl_main_t *am = &acl_main;
u64 now = clib_cpu_time_now ();
- u16 thread_index = os_get_thread_index ();
+ clib_thread_index_t thread_index = os_get_thread_index ();
acl_fa_per_worker_data_t *pw = &am->per_worker_data[thread_index];
int num_expired;
elog_acl_maybe_trace_X1 (am,
diff --git a/src/plugins/acl/session_inlines.h b/src/plugins/acl/session_inlines.h
index edc8a7057ee..c98194005a4 100644
--- a/src/plugins/acl/session_inlines.h
+++ b/src/plugins/acl/session_inlines.h
@@ -115,16 +115,16 @@ fa_session_get_timeout (acl_main_t * am, fa_session_t * sess)
}
always_inline fa_session_t *
-get_session_ptr_no_check (acl_main_t * am, u16 thread_index,
+get_session_ptr_no_check (acl_main_t *am, clib_thread_index_t thread_index,
u32 session_index)
{
acl_fa_per_worker_data_t *pw = &am->per_worker_data[thread_index];
return pool_elt_at_index (pw->fa_sessions_pool, session_index);
}
-
always_inline fa_session_t *
-get_session_ptr (acl_main_t * am, u16 thread_index, u32 session_index)
+get_session_ptr (acl_main_t *am, clib_thread_index_t thread_index,
+ u32 session_index)
{
acl_fa_per_worker_data_t *pw = &am->per_worker_data[thread_index];
@@ -135,7 +135,8 @@ get_session_ptr (acl_main_t * am, u16 thread_index, u32 session_index)
}
always_inline int
-is_valid_session_ptr (acl_main_t * am, u16 thread_index, fa_session_t * sess)
+is_valid_session_ptr (acl_main_t *am, clib_thread_index_t thread_index,
+ fa_session_t *sess)
{
acl_fa_per_worker_data_t *pw = &am->per_worker_data[thread_index];
return ((sess != 0)
@@ -470,10 +471,10 @@ acl_fa_can_add_session (acl_main_t * am, int is_input, u32 sw_if_index)
am->fa_conn_table_max_entries);
}
-
always_inline void
-acl_fa_try_recycle_session (acl_main_t * am, int is_input, u16 thread_index,
- u32 sw_if_index, u64 now)
+acl_fa_try_recycle_session (acl_main_t *am, int is_input,
+ clib_thread_index_t thread_index, u32 sw_if_index,
+ u64 now)
{
/* try to recycle a TCP transient session */
acl_fa_per_worker_data_t *pw = &am->per_worker_data[thread_index];
diff --git a/src/plugins/adl/ip4_allowlist.c b/src/plugins/adl/ip4_allowlist.c
index 4c755725ea7..a44cb51762f 100644
--- a/src/plugins/adl/ip4_allowlist.c
+++ b/src/plugins/adl/ip4_allowlist.c
@@ -58,7 +58,7 @@ VLIB_NODE_FN (ip4_adl_allowlist_node) (vlib_main_t * vm,
adl_feature_type_t next_index;
adl_main_t *cm = &adl_main;
vlib_combined_counter_main_t * vcm = &load_balance_main.lbm_via_counters;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u32 allowed_packets;
from = vlib_frame_vector_args (frame);
diff --git a/src/plugins/adl/ip6_allowlist.c b/src/plugins/adl/ip6_allowlist.c
index 5f38484666b..f9d964645c4 100644
--- a/src/plugins/adl/ip6_allowlist.c
+++ b/src/plugins/adl/ip6_allowlist.c
@@ -58,7 +58,7 @@ VLIB_NODE_FN (ip6_adl_allowlist_node) (vlib_main_t * vm,
adl_feature_type_t next_index;
adl_main_t *cm = &adl_main;
vlib_combined_counter_main_t * vcm = &load_balance_main.lbm_via_counters;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u32 allowed_packets;
from = vlib_frame_vector_args (frame);
diff --git a/src/plugins/af_packet/af_packet.c b/src/plugins/af_packet/af_packet.c
index 8cb2af27d7f..f3a1f495fe7 100644
--- a/src/plugins/af_packet/af_packet.c
+++ b/src/plugins/af_packet/af_packet.c
@@ -30,7 +30,7 @@
#include <vppinfra/linux/sysfs.h>
#include <vlib/vlib.h>
-#include <vlib/unix/unix.h>
+#include <vlib/file.h>
#include <vnet/ip/ip.h>
#include <vnet/devices/netlink.h>
#include <vnet/ethernet/ethernet.h>
diff --git a/src/plugins/af_packet/node.c b/src/plugins/af_packet/node.c
index 279f11c0183..e60a037b093 100644
--- a/src/plugins/af_packet/node.c
+++ b/src/plugins/af_packet/node.c
@@ -269,7 +269,7 @@ af_packet_v3_device_input_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
u32 block_nr = rx_queue->rx_req->req3.tp_block_nr;
u8 *block_start = 0;
uword n_trace = vlib_get_trace_count (vm, node);
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u32 n_buffer_bytes = vlib_buffer_get_default_data_size (vm);
u32 min_bufs = rx_queue->rx_req->req3.tp_frame_size / n_buffer_bytes;
u32 num_pkts = 0;
@@ -571,7 +571,7 @@ af_packet_v2_device_input_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
u32 frame_num = rx_queue->rx_req->req.tp_frame_nr;
u8 *block_start = rx_queue->rx_ring[block];
uword n_trace = vlib_get_trace_count (vm, node);
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u32 n_buffer_bytes = vlib_buffer_get_default_data_size (vm);
u32 min_bufs = rx_queue->rx_req->req.tp_frame_size / n_buffer_bytes;
u32 sw_if_index = apif->sw_if_index;
diff --git a/src/plugins/af_xdp/device.c b/src/plugins/af_xdp/device.c
index 63a276ce51e..8d9496206d2 100644
--- a/src/plugins/af_xdp/device.c
+++ b/src/plugins/af_xdp/device.c
@@ -24,7 +24,7 @@
#include <linux/limits.h>
#include <bpf/bpf.h>
#include <vlib/vlib.h>
-#include <vlib/unix/unix.h>
+#include <vlib/file.h>
#include <vlib/pci/pci.h>
#include <vppinfra/linux/netns.h>
#include <vppinfra/linux/sysfs.h>
diff --git a/src/plugins/cnat/cnat_snat_policy.c b/src/plugins/cnat/cnat_snat_policy.c
index cd9bfef492a..5f15b7d26c9 100644
--- a/src/plugins/cnat/cnat_snat_policy.c
+++ b/src/plugins/cnat/cnat_snat_policy.c
@@ -22,7 +22,8 @@ cnat_snat_policy_main_t cnat_snat_policy_main;
uword
unformat_cnat_snat_interface_map_type (unformat_input_t *input, va_list *args)
{
- u8 *a = va_arg (*args, u8 *);
+ cnat_snat_interface_map_type_t *a =
+ va_arg (*args, cnat_snat_interface_map_type_t *);
if (unformat (input, "include-v4"))
*a = CNAT_SNAT_IF_MAP_INCLUDE_V4;
else if (unformat (input, "include-v6"))
@@ -113,7 +114,7 @@ cnat_snat_policy_add_del_if_command_fn (vlib_main_t *vm,
vnet_main_t *vnm = vnet_get_main ();
int is_add = 1;
u32 sw_if_index = ~0;
- u32 table = 0;
+ cnat_snat_interface_map_type_t table = CNAT_SNAT_IF_MAP_INCLUDE_V4;
int rv;
while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
diff --git a/src/plugins/cnat/cnat_types.h b/src/plugins/cnat/cnat_types.h
index d229d21adae..37eb62ec981 100644
--- a/src/plugins/cnat/cnat_types.h
+++ b/src/plugins/cnat/cnat_types.h
@@ -192,7 +192,7 @@ typedef struct cnat_timestamp_mpool_t_
typedef struct cnat_node_ctx_
{
f64 now;
- u32 thread_index;
+ clib_thread_index_t thread_index;
ip_address_family_t af;
u8 do_trace;
} cnat_node_ctx_t;
diff --git a/src/plugins/crypto_sw_scheduler/main.c b/src/plugins/crypto_sw_scheduler/main.c
index dc97ce937d9..bb1505a38cf 100644
--- a/src/plugins/crypto_sw_scheduler/main.c
+++ b/src/plugins/crypto_sw_scheduler/main.c
@@ -446,7 +446,7 @@ convert_async_crypto_id (vnet_crypto_op_id_t async_op_id, u32 *crypto_op,
static_always_inline vnet_crypto_async_frame_t *
crypto_sw_scheduler_dequeue (vlib_main_t *vm, u32 *nb_elts_processed,
- u32 *enqueue_thread_idx)
+ clib_thread_index_t *enqueue_thread_idx)
{
crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
crypto_sw_scheduler_per_thread_data_t *ptd =
diff --git a/src/plugins/ct6/ct6.h b/src/plugins/ct6/ct6.h
index 0b7deb07839..a6919174d86 100644
--- a/src/plugins/ct6/ct6.h
+++ b/src/plugins/ct6/ct6.h
@@ -46,7 +46,7 @@ typedef CLIB_PACKED (struct
typedef struct
{
ct6_session_key_t key;
- u32 thread_index;
+ clib_thread_index_t thread_index;
u32 next_index;
u32 prev_index;
u32 hits;
@@ -95,7 +95,7 @@ static inline void
ct6_lru_remove (ct6_main_t * cmp, ct6_session_t * s0)
{
ct6_session_t *next_sess, *prev_sess;
- u32 thread_index;
+ clib_thread_index_t thread_index;
u32 s0_index;
thread_index = s0->thread_index;
@@ -128,7 +128,7 @@ static inline void
ct6_lru_add (ct6_main_t * cmp, ct6_session_t * s0, f64 now)
{
ct6_session_t *next_sess;
- u32 thread_index;
+ clib_thread_index_t thread_index;
u32 s0_index;
s0->hits++;
diff --git a/src/plugins/dev_armada/pp2/rx.c b/src/plugins/dev_armada/pp2/rx.c
index 4e73882e3d8..8eff72d6157 100644
--- a/src/plugins/dev_armada/pp2/rx.c
+++ b/src/plugins/dev_armada/pp2/rx.c
@@ -208,7 +208,7 @@ mrvl_pp2_rx_refill (vlib_main_t *vm, vlib_node_runtime_t *node,
vnet_dev_port_t *port = rxq->port;
vnet_dev_t *dev = port->dev;
mvpp2_device_t *md = vnet_dev_get_data (dev);
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
struct pp2_hif *hif = md->hif[thread_index];
struct pp2_bpool *bpool = md->thread[thread_index].bpool;
struct buff_release_entry *bre = md->thread[thread_index].bre;
diff --git a/src/plugins/dev_ena/ena.c b/src/plugins/dev_ena/ena.c
index ed5c47ed505..a81a33d5f22 100644
--- a/src/plugins/dev_ena/ena.c
+++ b/src/plugins/dev_ena/ena.c
@@ -13,7 +13,6 @@
static ena_aq_host_info_t host_info = {
.os_type = 3, /* DPDK */
- .kernel_ver_str = VPP_BUILD_VER,
.os_dist_str = VPP_BUILD_VER,
.driver_version = {
.major = 16,
@@ -171,6 +170,8 @@ ena_init (vlib_main_t *vm, vnet_dev_t *dev)
*ed->host_info = host_info;
ed->host_info->num_cpus = vlib_get_n_threads ();
+ strncpy ((char *) ed->host_info->kernel_ver_str, VPP_BUILD_VER,
+ sizeof (ed->host_info->kernel_ver_str) - 1);
ena_set_mem_addr (vm, dev, &host_attr.os_info_ba, ed->host_info);
if ((rv = ena_aq_set_feature (vm, dev, ENA_ADMIN_FEAT_ID_HOST_ATTR_CONFIG,
diff --git a/src/plugins/dev_iavf/virtchnl.h b/src/plugins/dev_iavf/virtchnl.h
index 2099104c8ad..72158684e9e 100644
--- a/src/plugins/dev_iavf/virtchnl.h
+++ b/src/plugins/dev_iavf/virtchnl.h
@@ -560,6 +560,7 @@ typedef struct
{
u16 unicast_promisc : 1;
u16 multicast_promisc : 1;
+ u16 unused : 14;
};
u16 flags;
};
diff --git a/src/plugins/dev_octeon/common.h b/src/plugins/dev_octeon/common.h
index a7a051526d2..9c6dde694f1 100644
--- a/src/plugins/dev_octeon/common.h
+++ b/src/plugins/dev_octeon/common.h
@@ -12,7 +12,8 @@
#include <base/roc_api.h>
static_always_inline u32
-oct_aura_free_all_buffers (vlib_main_t *vm, u64 aura_handle, u16 hdr_off)
+oct_aura_free_all_buffers (vlib_main_t *vm, u64 aura_handle, u16 hdr_off,
+ u32 num_buffers)
{
u32 n = 0;
u64 iova;
@@ -22,6 +23,8 @@ oct_aura_free_all_buffers (vlib_main_t *vm, u64 aura_handle, u16 hdr_off)
vlib_buffer_t *b = (void *) iova + hdr_off;
vlib_buffer_free_one (vm, vlib_get_buffer_index (vm, b));
n++;
+ if (num_buffers && n == num_buffers)
+ break;
}
return n;
}
diff --git a/src/plugins/dev_octeon/crypto.c b/src/plugins/dev_octeon/crypto.c
index 800f24a008a..49b6f61375c 100644
--- a/src/plugins/dev_octeon/crypto.c
+++ b/src/plugins/dev_octeon/crypto.c
@@ -1354,7 +1354,7 @@ oct_crypto_aead_session_update (vlib_main_t *vm, oct_crypto_sess_t *sess,
vnet_crypto_key_t *key = vnet_crypto_get_key (key_index);
roc_se_cipher_type enc_type = 0;
roc_se_auth_type auth_type = 0;
- u32 digest_len = ~0;
+ u32 digest_len = 16;
i32 rv = 0;
switch (key->alg)
@@ -1366,9 +1366,6 @@ oct_crypto_aead_session_update (vlib_main_t *vm, oct_crypto_sess_t *sess,
sess->aes_gcm = 1;
sess->iv_offset = 0;
sess->iv_length = 16;
- sess->cpt_ctx.mac_len = 16;
- sess->cpt_op = type;
- digest_len = 16;
break;
case VNET_CRYPTO_ALG_CHACHA20_POLY1305:
enc_type = ROC_SE_CHACHA20;
@@ -1381,6 +1378,9 @@ oct_crypto_aead_session_update (vlib_main_t *vm, oct_crypto_sess_t *sess,
return -1;
}
+ sess->cpt_ctx.mac_len = digest_len;
+ sess->cpt_op = type;
+
rv = roc_se_ciph_key_set (&sess->cpt_ctx, enc_type, key->data, key->length);
if (rv)
{
@@ -1827,7 +1827,7 @@ oct_crypto_enqueue_aead_aad_0_dec (vlib_main_t *vm,
vnet_crypto_async_frame_t *
oct_crypto_frame_dequeue (vlib_main_t *vm, u32 *nb_elts_processed,
- u32 *enqueue_thread_idx)
+ clib_thread_index_t *enqueue_thread_idx)
{
oct_crypto_main_t *ocm = &oct_crypto_main;
u32 deq_head, status = VNET_CRYPTO_OP_STATUS_COMPLETED;
@@ -1940,7 +1940,7 @@ oct_init_crypto_engine_handlers (vlib_main_t *vm, vnet_dev_t *dev)
}
int
-oct_conf_sw_queue (vlib_main_t *vm, vnet_dev_t *dev)
+oct_conf_sw_queue (vlib_main_t *vm, vnet_dev_t *dev, oct_crypto_dev_t *ocd)
{
oct_crypto_main_t *ocm = &oct_crypto_main;
vlib_thread_main_t *tm = vlib_get_thread_main ();
@@ -1961,7 +1961,7 @@ oct_conf_sw_queue (vlib_main_t *vm, vnet_dev_t *dev)
* Each pending queue will get number of cpt desc / number of cores.
* And that desc count is shared across inflight entries.
*/
- n_inflight_req = (OCT_CPT_LF_MAX_NB_DESC / tm->n_vlib_mains);
+ n_inflight_req = (ocd->n_desc / tm->n_vlib_mains);
for (i = 0; i < tm->n_vlib_mains; ++i)
{
diff --git a/src/plugins/dev_octeon/crypto.h b/src/plugins/dev_octeon/crypto.h
index 5bd26f6b9be..a99ee12ddb2 100644
--- a/src/plugins/dev_octeon/crypto.h
+++ b/src/plugins/dev_octeon/crypto.h
@@ -11,6 +11,9 @@
#define OCT_MAX_N_CPT_DEV 2
+#define OCT_CPT_LF_DEF_NB_DESC 16384
+
+#define OCT_CPT_LF_MIN_NB_DESC 1024
#define OCT_CPT_LF_MAX_NB_DESC 128000
/* CRYPTO_ID, KEY_LENGTH_IN_BYTES, TAG_LEN, AAD_LEN */
@@ -81,6 +84,7 @@ typedef struct
struct roc_cpt_lmtline lmtline;
struct roc_cpt_lf lf;
vnet_dev_t *dev;
+ u32 n_desc;
} oct_crypto_dev_t;
typedef struct
@@ -207,9 +211,10 @@ int oct_crypto_enqueue_aead_aad_12_dec (vlib_main_t *vm,
vnet_crypto_async_frame_t *frame);
int oct_crypto_enqueue_aead_aad_0_dec (vlib_main_t *vm,
vnet_crypto_async_frame_t *frame);
-vnet_crypto_async_frame_t *oct_crypto_frame_dequeue (vlib_main_t *vm,
- u32 *nb_elts_processed,
- u32 *enqueue_thread_idx);
+vnet_crypto_async_frame_t *
+oct_crypto_frame_dequeue (vlib_main_t *vm, u32 *nb_elts_processed,
+ clib_thread_index_t *enqueue_thread_idx);
int oct_init_crypto_engine_handlers (vlib_main_t *vm, vnet_dev_t *dev);
-int oct_conf_sw_queue (vlib_main_t *vm, vnet_dev_t *dev);
+int oct_conf_sw_queue (vlib_main_t *vm, vnet_dev_t *dev,
+ oct_crypto_dev_t *ocd);
#endif /* _CRYPTO_H_ */
diff --git a/src/plugins/dev_octeon/init.c b/src/plugins/dev_octeon/init.c
index 561cbe94fed..e1a6f9064ba 100644
--- a/src/plugins/dev_octeon/init.c
+++ b/src/plugins/dev_octeon/init.c
@@ -61,6 +61,22 @@ static struct
#undef _
};
+static vnet_dev_arg_t oct_dev_args[] = {
+ {
+ .id = OCT_DEV_ARG_CRYPTO_N_DESC,
+ .name = "n_desc",
+ .desc = "number of cpt descriptors, applicable to cpt devices only",
+ .type = VNET_DEV_ARG_TYPE_UINT32,
+ .default_val.uint32 = OCT_CPT_LF_DEF_NB_DESC,
+ },
+ {
+ .id = OCT_DEV_ARG_END,
+ .name = "end",
+ .desc = "Argument end",
+ .type = VNET_DEV_ARG_END,
+ },
+};
+
static u8 *
oct_probe (vlib_main_t *vm, vnet_dev_bus_index_t bus_index, void *dev_info)
{
@@ -241,7 +257,7 @@ oct_conf_cpt_queue (vlib_main_t *vm, vnet_dev_t *dev, oct_crypto_dev_t *ocd)
cpt_lf = &ocd->lf;
cpt_lmtline = &ocd->lmtline;
- cpt_lf->nb_desc = OCT_CPT_LF_MAX_NB_DESC;
+ cpt_lf->nb_desc = ocd->n_desc;
cpt_lf->lf_id = 0;
if ((rrv = roc_cpt_lf_init (roc_cpt, cpt_lf)) < 0)
return cnx_return_roc_err (dev, rrv, "roc_cpt_lf_init");
@@ -261,6 +277,7 @@ oct_init_cpt (vlib_main_t *vm, vnet_dev_t *dev)
extern oct_plt_init_param_t oct_plt_init_param;
oct_device_t *cd = vnet_dev_get_data (dev);
oct_crypto_dev_t *ocd = NULL;
+ u32 n_desc;
int rrv;
if (ocm->n_cpt == OCT_MAX_N_CPT_DEV || ocm->started)
@@ -274,6 +291,27 @@ oct_init_cpt (vlib_main_t *vm, vnet_dev_t *dev)
ocd->roc_cpt->pci_dev = &cd->plt_pci_dev;
ocd->dev = dev;
+ ocd->n_desc = OCT_CPT_LF_DEF_NB_DESC;
+
+ foreach_vnet_dev_args (arg, dev)
+ {
+ if (arg->id == OCT_DEV_ARG_CRYPTO_N_DESC &&
+ vnet_dev_arg_get_uint32 (arg))
+ {
+ n_desc = vnet_dev_arg_get_uint32 (arg);
+ if (n_desc < OCT_CPT_LF_MIN_NB_DESC ||
+ n_desc > OCT_CPT_LF_MAX_NB_DESC)
+ {
+ log_err (dev,
+ "number of cpt descriptors should be within range "
+ "of %u and %u",
+ OCT_CPT_LF_MIN_NB_DESC, OCT_CPT_LF_MAX_NB_DESC);
+ return VNET_DEV_ERR_NOT_SUPPORTED;
+ }
+
+ ocd->n_desc = vnet_dev_arg_get_uint32 (arg);
+ }
+ }
if ((rrv = roc_cpt_dev_init (ocd->roc_cpt)))
return cnx_return_roc_err (dev, rrv, "roc_cpt_dev_init");
@@ -290,7 +328,7 @@ oct_init_cpt (vlib_main_t *vm, vnet_dev_t *dev)
* Initialize s/w queues, which are common across multiple
* crypto devices
*/
- oct_conf_sw_queue (vm, dev);
+ oct_conf_sw_queue (vm, dev, ocd);
ocm->crypto_dev[0] = ocd;
}
@@ -335,6 +373,7 @@ oct_init (vlib_main_t *vm, vnet_dev_t *dev)
.id.class_id = pci_hdr.class << 16 | pci_hdr.subclass,
.pci_handle = vnet_dev_get_pci_handle (dev),
};
+ cd->msix_handler = NULL;
foreach_int (i, 2, 4)
{
@@ -345,9 +384,20 @@ oct_init (vlib_main_t *vm, vnet_dev_t *dev)
}
STATIC_ASSERT (sizeof (cd->plt_pci_dev.name) == sizeof (dev->device_id), "");
+
+ if ((rv = vnet_dev_pci_bus_master_enable (vm, dev)))
+ return rv;
+
strncpy ((char *) cd->plt_pci_dev.name, dev->device_id,
sizeof (dev->device_id));
+ cd->plt_pci_dev.intr_handle = malloc (sizeof (struct oct_pci_intr_handle));
+ if (!cd->plt_pci_dev.intr_handle)
+ return VNET_DEV_ERR_DMA_MEM_ALLOC_FAIL;
+ memset (cd->plt_pci_dev.intr_handle, 0x0,
+ sizeof (struct oct_pci_intr_handle));
+ cd->plt_pci_dev.intr_handle->pci_handle = cd->plt_pci_dev.pci_handle;
+
switch (cd->type)
{
case OCT_DEVICE_TYPE_RVU_PF:
@@ -396,6 +446,7 @@ VNET_DEV_REGISTER_DRIVER (octeon) = {
.free = oct_free,
.probe = oct_probe,
},
+ .args = oct_dev_args,
};
static clib_error_t *
diff --git a/src/plugins/dev_octeon/octeon.h b/src/plugins/dev_octeon/octeon.h
index ccf8f62880d..24d51f9091e 100644
--- a/src/plugins/dev_octeon/octeon.h
+++ b/src/plugins/dev_octeon/octeon.h
@@ -25,6 +25,12 @@
typedef enum
{
+ OCT_DEV_ARG_CRYPTO_N_DESC = 1,
+ OCT_DEV_ARG_END,
+} oct_dev_args_t;
+
+typedef enum
+{
OCT_DEVICE_TYPE_UNKNOWN = 0,
OCT_DEVICE_TYPE_RVU_PF,
OCT_DEVICE_TYPE_RVU_VF,
@@ -43,6 +49,7 @@ typedef struct
u32 speed;
struct plt_pci_device plt_pci_dev;
struct roc_nix *nix;
+ oct_msix_handler_info_t *msix_handler;
} oct_device_t;
typedef struct
@@ -60,6 +67,7 @@ typedef struct
u8 lf_allocated : 1;
u8 tm_initialized : 1;
u8 npc_initialized : 1;
+ u8 q_intr_enabled : 1;
struct roc_npc npc;
oct_flow_entry_t *flow_entries;
} oct_port_t;
@@ -133,6 +141,7 @@ void oct_tx_queue_free (vlib_main_t *, vnet_dev_tx_queue_t *);
vnet_dev_rv_t oct_rxq_init (vlib_main_t *, vnet_dev_rx_queue_t *);
vnet_dev_rv_t oct_txq_init (vlib_main_t *, vnet_dev_tx_queue_t *);
void oct_rxq_deinit (vlib_main_t *, vnet_dev_rx_queue_t *);
+int oct_drain_queue (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq);
void oct_txq_deinit (vlib_main_t *, vnet_dev_tx_queue_t *);
format_function_t format_oct_rxq_info;
format_function_t format_oct_txq_info;
diff --git a/src/plugins/dev_octeon/port.c b/src/plugins/dev_octeon/port.c
index f8a7d6ba7db..0fc3fcb7871 100644
--- a/src/plugins/dev_octeon/port.c
+++ b/src/plugins/dev_octeon/port.c
@@ -227,6 +227,15 @@ oct_port_init (vlib_main_t *vm, vnet_dev_port_t *port)
return rv;
}
+ if (roc_nix_register_queue_irqs (nix))
+ {
+ rv = oct_roc_err (dev, rrv, "roc_nix_register_queue_irqs() failed");
+ oct_port_deinit (vm, port);
+ return rv;
+ }
+ cp->q_intr_enabled = 1;
+ oct_port_add_counters (vm, port);
+
return VNET_DEV_OK;
}
@@ -257,6 +266,13 @@ oct_port_deinit (vlib_main_t *vm, vnet_dev_port_t *port)
cp->tm_initialized = 0;
}
+ /* Unregister queue irqs */
+ if (cp->q_intr_enabled)
+ {
+ roc_nix_unregister_queue_irqs (nix);
+ cp->q_intr_enabled = 0;
+ }
+
if (cp->lf_allocated)
{
if ((rrv = roc_nix_lf_free (nix)))
@@ -377,7 +393,9 @@ oct_rxq_stop (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq)
if ((rrv = roc_nix_rq_ena_dis (&crq->rq, 0)))
oct_roc_err (dev, rrv, "roc_nix_rq_ena_dis() failed");
- n = oct_aura_free_all_buffers (vm, crq->aura_handle, crq->hdr_off);
+ n = oct_drain_queue (vm, rxq);
+ n += oct_aura_free_all_buffers (vm, crq->aura_handle, crq->hdr_off,
+ crq->n_enq - n);
if (crq->n_enq - n > 0)
log_err (dev, "%u buffers leaked on rx queue %u stop", crq->n_enq - n,
@@ -396,10 +414,7 @@ oct_txq_stop (vlib_main_t *vm, vnet_dev_tx_queue_t *txq)
oct_npa_batch_alloc_cl128_t *cl;
u32 n, off = ctq->hdr_off;
- n = oct_aura_free_all_buffers (vm, ctq->aura_handle, off);
- ctq->n_enq -= n;
-
- if (ctq->n_enq > 0 && ctq->ba_num_cl > 0)
+ if (ctq->ba_num_cl > 0)
for (n = ctq->ba_num_cl, cl = ctq->ba_buffer + ctq->ba_first_cl; n;
cl++, n--)
{
@@ -409,12 +424,20 @@ oct_txq_stop (vlib_main_t *vm, vnet_dev_tx_queue_t *txq)
if (st.status.ccode != ALLOC_CCODE_INVAL)
for (u32 i = 0; i < st.status.count; i++)
{
+#if (CLIB_DEBUG > 0)
+ if (!i || (i == 8))
+ cl->iova[i] &= OCT_BATCH_ALLOC_IOVA0_MASK;
+#endif
vlib_buffer_t *b = (vlib_buffer_t *) (cl->iova[i] + off);
vlib_buffer_free_one (vm, vlib_get_buffer_index (vm, b));
ctq->n_enq--;
}
}
+ n = oct_aura_free_all_buffers (vm, ctq->aura_handle, off,
+ 0 /* To free all availiable buffers */);
+ ctq->n_enq -= n;
+
if (ctq->n_enq > 0)
log_err (dev, "%u buffers leaked on tx queue %u stop", ctq->n_enq,
txq->queue_id);
@@ -422,6 +445,7 @@ oct_txq_stop (vlib_main_t *vm, vnet_dev_tx_queue_t *txq)
log_debug (dev, "%u buffers freed from tx queue %u", n, txq->queue_id);
ctq->n_enq = 0;
+ ctq->ba_num_cl = ctq->ba_first_cl = 0;
}
vnet_dev_rv_t
diff --git a/src/plugins/dev_octeon/queue.c b/src/plugins/dev_octeon/queue.c
index 58d391b8508..6534a5c93b1 100644
--- a/src/plugins/dev_octeon/queue.c
+++ b/src/plugins/dev_octeon/queue.c
@@ -156,6 +156,89 @@ oct_rxq_init (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq)
return VNET_DEV_OK;
}
+static_always_inline vlib_buffer_t *
+oct_seg_to_bp (void *p)
+{
+ return (vlib_buffer_t *) p - 1;
+}
+
+static void
+oct_multi_seg_free (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq,
+ oct_nix_rx_cqe_desc_t *d)
+{
+ vlib_buffer_t *t;
+ u8 s0 = d->sg0.segs, s1;
+
+ t = oct_seg_to_bp (d->segs0[1]);
+ vlib_buffer_free_one (vm, vlib_get_buffer_index (vm, t));
+
+ if (s0 == 2)
+ return;
+ t = oct_seg_to_bp (d->segs0[2]);
+ vlib_buffer_free_one (vm, vlib_get_buffer_index (vm, t));
+
+ if (d->sg1.subdc != NIX_SUBDC_SG)
+ return;
+
+ s1 = d->sg1.segs;
+ if (s1 == 0)
+ return;
+
+ t = oct_seg_to_bp (d->segs1[0]);
+ vlib_buffer_free_one (vm, vlib_get_buffer_index (vm, t));
+
+ if (s1 == 1)
+ return;
+ t = oct_seg_to_bp (d->segs1[1]);
+ vlib_buffer_free_one (vm, vlib_get_buffer_index (vm, t));
+
+ if (s1 == 2)
+ return;
+ t = oct_seg_to_bp (d->segs1[2]);
+ vlib_buffer_free_one (vm, vlib_get_buffer_index (vm, t));
+}
+
+int
+oct_drain_queue (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq)
+{
+ oct_rxq_t *crq = vnet_dev_get_rx_queue_data (rxq);
+ oct_nix_rx_cqe_desc_t *descs = crq->cq.desc_base;
+ oct_nix_lf_cq_op_status_t status;
+ u32 cq_size = crq->cq.nb_desc;
+ u32 cq_mask = crq->cq.qmask;
+ vlib_buffer_t *b;
+ u32 i, head, n_desc, n, f_cnt = 0;
+
+ /* Free all CQ entries */
+ while (1)
+ {
+ /* get head and tail from NIX_LF_CQ_OP_STATUS */
+ status.as_u64 = roc_atomic64_add_sync (crq->cq.wdata, crq->cq.status);
+ if (status.cq_err || status.op_err)
+ return f_cnt;
+
+ head = status.head;
+ n_desc = (status.tail - head) & cq_mask;
+
+ if (n_desc == 0)
+ return f_cnt;
+
+ n = clib_min (cq_size - head, n_desc);
+ for (i = head; i < n; i++)
+ {
+ b = oct_seg_to_bp (descs[i].segs0[0]);
+ vlib_buffer_free_one (vm, vlib_get_buffer_index (vm, b));
+ if (descs[i].sg0.segs > 1)
+ oct_multi_seg_free (vm, rxq, &descs[i]);
+ }
+ f_cnt += n;
+ plt_write64 ((crq->cq.wdata | n), crq->cq.door);
+ plt_wmb ();
+ }
+
+ return f_cnt;
+}
+
void
oct_rxq_deinit (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq)
{
@@ -173,6 +256,7 @@ oct_rxq_deinit (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq)
if (crq->cq_initialized)
{
+ oct_drain_queue (vm, rxq);
rrv = roc_nix_cq_fini (&crq->cq);
if (rrv)
oct_roc_err (dev, rrv, "roc_nix_cq_fini() failed");
diff --git a/src/plugins/dev_octeon/roc_helper.c b/src/plugins/dev_octeon/roc_helper.c
index c1166b654cf..0f872047fb4 100644
--- a/src/plugins/dev_octeon/roc_helper.c
+++ b/src/plugins/dev_octeon/roc_helper.c
@@ -9,6 +9,7 @@
#include <vlib/linux/vfio.h>
#include <base/roc_api.h>
#include <common.h>
+#include "octeon.h"
static oct_plt_memzone_list_t memzone_list;
@@ -76,7 +77,7 @@ oct_drv_physmem_alloc (vlib_main_t *vm, u32 size, u32 align)
if (align)
{
/* Force ROC align alloc in case alignment is less than ROC align */
- align = align < ROC_ALIGN ? ROC_ALIGN : align;
+ align = ((align + ROC_ALIGN - 1) & ~(ROC_ALIGN - 1));
mem = vlib_physmem_alloc_aligned_on_numa (vm, size, align, 0);
}
else
@@ -114,38 +115,39 @@ oct_plt_zmalloc (u32 size, u32 align)
return oct_drv_physmem_alloc (vm, size, align);
}
-static oct_plt_memzone_t *
-memzone_get (u32 index)
+static void *
+oct_plt_realloc (void *addr, u32 size, u32 align)
{
- if (index == ((u32) ~0))
- return 0;
+ align = CLIB_CACHE_LINE_ROUND (align);
+ size = CLIB_CACHE_LINE_ROUND (size);
- return pool_elt_at_index (memzone_list.mem_pool, index);
+ if (align)
+ return clib_mem_realloc_aligned (addr, size, align);
+ else
+ return clib_mem_realloc (addr, size);
}
-static int
-oct_plt_memzone_free (const oct_plt_memzone_t *name)
+static oct_plt_memzone_t *
+oct_plt_memzone_lookup (const char *name)
{
- uword *p;
- p = hash_get_mem (memzone_list.memzone_by_name, name);
-
- if (p[0] == ((u32) ~0))
- return -EINVAL;
-
- hash_unset_mem (memzone_list.memzone_by_name, name);
+ oct_plt_memzone_t *mem_pool;
- pool_put_index (memzone_list.mem_pool, p[0]);
+ pool_foreach (mem_pool, memzone_list.mem_pool)
+ {
+ if (!clib_strcmp (mem_pool->name, name))
+ return mem_pool;
+ }
return 0;
}
-static oct_plt_memzone_t *
-oct_plt_memzone_lookup (const char *name)
+static int
+oct_plt_memzone_free (const oct_plt_memzone_t *mz)
{
- uword *p;
- p = hash_get_mem (memzone_list.memzone_by_name, name);
- if (p)
- return memzone_get (p[0]);
+ if (!mz || !oct_plt_memzone_lookup (mz->name))
+ return -EINVAL;
+
+ pool_put (memzone_list.mem_pool, mz);
return 0;
}
@@ -165,16 +167,258 @@ oct_plt_memzone_reserve_aligned (const char *name, u64 len, u8 socket,
mem_pool->addr = p;
mem_pool->index = mem_pool - memzone_list.mem_pool;
- hash_set_mem (memzone_list.memzone_by_name, name, mem_pool->index);
+ strcpy (mem_pool->name, name);
return mem_pool;
}
+static void
+plt_msix_handler (vlib_main_t *vm, vlib_pci_dev_handle_t handle, uint16_t line)
+{
+ vnet_dev_t *dev = (vnet_dev_t *) vlib_pci_get_private_data (vm, handle);
+ oct_device_t *cd = vnet_dev_get_data (dev);
+
+ if (cd->msix_handler && cd->msix_handler[line].fn)
+ cd->msix_handler[line].fn (cd->msix_handler[line].data);
+}
+
+static int
+oct_plt_get_num_vectors (oct_pci_dev_handle_t handle)
+{
+ vlib_main_t *vm = vlib_get_main ();
+
+ return vlib_pci_get_num_msix_interrupts (vm, handle);
+}
+
+static int
+oct_plt_intr_enable (oct_pci_dev_handle_t handle, uint16_t start,
+ uint16_t count, uint8_t enable,
+ enum oct_msix_rsrc_op_t op)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vnet_dev_t *dev = (vnet_dev_t *) vlib_pci_get_private_data (vm, handle);
+ oct_device_t *cd = vnet_dev_get_data (dev);
+ clib_error_t *error = NULL;
+
+ if (op == OCT_MSIX_RSRC_ALLOC)
+ {
+ if (cd->msix_handler)
+ {
+ clib_warning ("MSIX handlers already allocated\n");
+ return -EINVAL;
+ }
+ cd->msix_handler = malloc (sizeof (*cd->msix_handler) * (start + count));
+ if (!cd->msix_handler)
+ {
+ clib_warning ("MSIX handlers alilocation failed\n");
+ return -ENOMEM;
+ }
+ }
+ if (enable)
+ error = vlib_pci_enable_msix_irq (vm, handle, start, count);
+ else
+ error = vlib_pci_disable_msix_irq (vm, handle, start, count);
+ if (error)
+ {
+ clib_error_report (error);
+ return -EINVAL;
+ }
+ if (op == OCT_MSIX_RSRC_FREE)
+ {
+ if (cd->msix_handler)
+ free (cd->msix_handler);
+ }
+
+ return 0;
+}
+
+static int
+oct_plt_intr_config (oct_pci_dev_handle_t handle, uint32_t vec,
+ plt_msix_handler_function_t handler, void *data,
+ int enable)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vnet_dev_t *dev = (vnet_dev_t *) vlib_pci_get_private_data (vm, handle);
+ oct_device_t *cd = vnet_dev_get_data (dev);
+ clib_error_t *error = NULL;
+
+ /* Skip AF_PF_MBOX interrupt FIXME */
+ if (vec == RVU_PF_INT_VEC_AFPF_MBOX)
+ return 0;
+
+ if (enable)
+ {
+ error =
+ vlib_pci_register_msix_handler (vm, handle, vec, 1, plt_msix_handler);
+ if (error)
+ {
+ clib_error_report (error);
+ return -EINVAL;
+ }
+ if (cd->msix_handler)
+ {
+ cd->msix_handler[vec].fn = handler;
+ cd->msix_handler[vec].vec = vec;
+ cd->msix_handler[vec].data = data;
+ }
+ error = vlib_pci_enable_msix_irq (vm, handle, vec, 1);
+ if (error)
+ {
+ clib_error_report (error);
+ return -EINVAL;
+ }
+ }
+ else
+ {
+ error = vlib_pci_disable_msix_irq (vm, handle, vec, 1);
+ if (error)
+ {
+ clib_error_report (error);
+ return -EINVAL;
+ }
+ error = vlib_pci_unregister_msix_handler (vm, handle, vec, 1);
+ if (error)
+ {
+ clib_error_report (error);
+ return -EINVAL;
+ }
+ if (cd->msix_handler)
+ {
+ cd->msix_handler[vec].fn = NULL;
+ cd->msix_handler[vec].data = NULL;
+ }
+ }
+
+ return 0;
+}
+
+static inline __attribute__ ((__always_inline__)) int
+plt_intr_max_intr_get (const struct plt_intr_handle *intr_handle)
+{
+ if (!intr_handle)
+ return -EINVAL;
+
+ return intr_handle->max_intr;
+}
+
+static inline __attribute__ ((__always_inline__)) int
+plt_intr_max_intr_set (struct plt_intr_handle *intr_handle, int max_intr)
+{
+ if (!intr_handle)
+ return -EINVAL;
+
+ intr_handle->max_intr = max_intr;
+
+ return 0;
+}
+
+static int
+irq_get_info (struct plt_intr_handle *intr_handle)
+{
+ int num_vec;
+
+ num_vec = oct_plt_get_num_vectors (intr_handle->pci_handle);
+ if (num_vec == 0)
+ {
+ plt_err ("HW max=%d > PLT_MAX_RXTX_INTR_VEC_ID: %d", num_vec,
+ PLT_MAX_RXTX_INTR_VEC_ID);
+ plt_intr_max_intr_set (intr_handle, PLT_MAX_RXTX_INTR_VEC_ID);
+ }
+ else
+ {
+ if (plt_intr_max_intr_set (intr_handle, num_vec))
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+irq_init (struct plt_intr_handle *intr_handle)
+{
+ int rc = oct_plt_intr_enable (intr_handle->pci_handle, 0,
+ plt_intr_max_intr_get (intr_handle), 0,
+ OCT_MSIX_RSRC_ALLOC);
+
+ if (rc)
+ plt_err ("Failed to set irqs vector rc=%d", rc);
+
+ return rc;
+}
+
+static int
+oct_plt_irq_register (struct oct_pci_intr_handle *intr_handle,
+ oct_plt_pci_intr_callback_fn cb, void *data,
+ unsigned int vec)
+{
+ /* If no max_intr read from VFIO */
+ if (plt_intr_max_intr_get (intr_handle) == 0)
+ {
+ irq_get_info (intr_handle);
+ irq_init (intr_handle);
+ }
+
+ if (vec > (uint32_t) plt_intr_max_intr_get (intr_handle))
+ {
+ plt_err ("Error registering MSI-X interrupts vec:%d > %d", vec,
+ plt_intr_max_intr_get (intr_handle));
+ return -EINVAL;
+ }
+
+ oct_plt_intr_config (intr_handle->pci_handle, vec, cb, data, 1);
+
+ return 0;
+}
+
+static void
+oct_plt_irq_unregister (struct oct_pci_intr_handle *intr_handle,
+ oct_plt_pci_intr_callback_fn cb, void *data,
+ unsigned int vec)
+{
+ if (vec > (uint32_t) plt_intr_max_intr_get (intr_handle))
+ {
+ plt_err ("Error unregistering MSI-X interrupts vec:%d > %d", vec,
+ plt_intr_max_intr_get (intr_handle));
+ return;
+ }
+
+ oct_plt_intr_config (intr_handle->pci_handle, vec, cb, data, 0);
+}
+
+static int
+oct_plt_irq_disable (struct oct_pci_intr_handle *intr_handle)
+{
+ int rc = -EINVAL;
+
+ if (!intr_handle)
+ return rc;
+
+ /* Clear max_intr to indicate re-init next time */
+ rc = oct_plt_intr_enable (intr_handle->pci_handle, 0,
+ plt_intr_max_intr_get (intr_handle), 0,
+ OCT_MSIX_RSRC_FREE);
+ plt_intr_max_intr_set (intr_handle, 0);
+ return rc;
+}
+
+static int
+oct_plt_irq_reconfigure (struct oct_pci_intr_handle *intr_handle,
+ uint16_t max_intr)
+{
+ /* Disable interrupts if enabled. */
+ if (plt_intr_max_intr_get (intr_handle))
+ oct_plt_irq_disable (intr_handle);
+
+ plt_intr_max_intr_set (intr_handle, max_intr);
+ return irq_init (intr_handle);
+}
+
oct_plt_init_param_t oct_plt_init_param = {
.oct_plt_log_reg_class = vlib_log_register_class,
.oct_plt_log = oct_plt_log,
.oct_plt_free = oct_plt_free,
.oct_plt_zmalloc = oct_plt_zmalloc,
+ .oct_plt_realloc = oct_plt_realloc,
.oct_plt_memzone_free = oct_plt_memzone_free,
.oct_plt_memzone_lookup = oct_plt_memzone_lookup,
.oct_plt_memzone_reserve_aligned = oct_plt_memzone_reserve_aligned,
@@ -184,4 +428,8 @@ oct_plt_init_param_t oct_plt_init_param = {
.oct_plt_spinlock_trylock = oct_plt_spinlock_trylock,
.oct_plt_get_thread_index = oct_plt_get_thread_index,
.oct_plt_get_cache_line_size = oct_plt_get_cache_line_size,
+ .oct_plt_irq_reconfigure = oct_plt_irq_reconfigure,
+ .oct_plt_irq_register = oct_plt_irq_register,
+ .oct_plt_irq_unregister = oct_plt_irq_unregister,
+ .oct_plt_irq_disable = oct_plt_irq_disable
};
diff --git a/src/plugins/dev_octeon/rx_node.c b/src/plugins/dev_octeon/rx_node.c
index 833227eeea8..f70c07642a8 100644
--- a/src/plugins/dev_octeon/rx_node.c
+++ b/src/plugins/dev_octeon/rx_node.c
@@ -432,6 +432,10 @@ VNET_DEV_NODE_FN (oct_rx_node)
foreach_vnet_dev_rx_queue_runtime (rxq, node)
{
vnet_dev_port_t *port = rxq->port;
+
+ if (!rxq->started)
+ continue;
+
n_rx += oct_rx_node_inline (vm, node, frame, port, rxq, 0);
}
diff --git a/src/plugins/dma_intel/dsa.c b/src/plugins/dma_intel/dsa.c
index 473f2efa93e..20a90e34b0e 100644
--- a/src/plugins/dma_intel/dsa.c
+++ b/src/plugins/dma_intel/dsa.c
@@ -103,7 +103,6 @@ intel_dsa_batch_fallback (vlib_main_t *vm, intel_dsa_batch_t *b,
clib_memcpy_fast (desc->dst, desc->src, desc->size);
}
b->status = INTEL_DSA_STATUS_CPU_SUCCESS;
- ch->submitted++;
return;
}
@@ -407,6 +406,7 @@ intel_dsa_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
/* fallback to software if exception happened */
intel_dsa_batch_fallback (vm, b, ch);
glitch = 1 & b->barrier_before_last;
+ t->pending_batches[n++] = b;
}
else
{
diff --git a/src/plugins/dpdk/cryptodev/cryptodev.c b/src/plugins/dpdk/cryptodev/cryptodev.c
index c60f9c886ff..af695580363 100644
--- a/src/plugins/dpdk/cryptodev/cryptodev.c
+++ b/src/plugins/dpdk/cryptodev/cryptodev.c
@@ -128,14 +128,14 @@ prepare_linked_xform (struct rte_crypto_sym_xform *xforms,
xform_cipher->cipher.algo = cipher_algo;
xform_cipher->cipher.key.data = key_cipher->data;
- xform_cipher->cipher.key.length = vec_len (key_cipher->data);
+ xform_cipher->cipher.key.length = key_cipher->length;
xform_cipher->cipher.iv.length = 16;
xform_cipher->cipher.iv.offset = CRYPTODEV_IV_OFFSET;
xform_auth->auth.algo = auth_algo;
xform_auth->auth.digest_length = digest_len;
xform_auth->auth.key.data = key_auth->data;
- xform_auth->auth.key.length = vec_len (key_auth->data);
+ xform_auth->auth.key.length = key_auth->length;
return 0;
}
@@ -608,7 +608,7 @@ format_cryptodev_inst (u8 * s, va_list * args)
cryptodev_main_t *cmt = &cryptodev_main;
u32 inst = va_arg (*args, u32);
cryptodev_inst_t *cit = cmt->cryptodev_inst + inst;
- u32 thread_index = 0;
+ clib_thread_index_t thread_index = 0;
struct rte_cryptodev_info info;
rte_cryptodev_info_get (cit->dev_id, &info);
@@ -670,7 +670,7 @@ cryptodev_show_cache_rings_fn (vlib_main_t *vm, unformat_input_t *input,
vlib_cli_command_t *cmd)
{
cryptodev_main_t *cmt = &cryptodev_main;
- u32 thread_index = 0;
+ clib_thread_index_t thread_index = 0;
u16 i;
vec_foreach_index (thread_index, cmt->per_thread_data)
{
@@ -756,7 +756,7 @@ cryptodev_set_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
cryptodev_main_t *cmt = &cryptodev_main;
cryptodev_engine_thread_t *cet;
unformat_input_t _line_input, *line_input = &_line_input;
- u32 thread_index, inst_index;
+ clib_thread_index_t thread_index, inst_index;
u32 thread_present = 0, inst_present = 0;
clib_error_t *error = 0;
int ret;
diff --git a/src/plugins/dpdk/cryptodev/cryptodev_op_data_path.c b/src/plugins/dpdk/cryptodev/cryptodev_op_data_path.c
index 8d55e4fbf0f..2282ffac10c 100644
--- a/src/plugins/dpdk/cryptodev/cryptodev_op_data_path.c
+++ b/src/plugins/dpdk/cryptodev/cryptodev_op_data_path.c
@@ -461,7 +461,8 @@ error_exit:
}
static_always_inline u8
-cryptodev_frame_dequeue_internal (vlib_main_t *vm, u32 *enqueue_thread_idx)
+cryptodev_frame_dequeue_internal (vlib_main_t *vm,
+ clib_thread_index_t *enqueue_thread_idx)
{
cryptodev_main_t *cmt = &cryptodev_main;
cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
@@ -563,7 +564,7 @@ cryptodev_enqueue_frame (vlib_main_t *vm, cryptodev_cache_ring_elt_t *ring_elt)
static_always_inline vnet_crypto_async_frame_t *
cryptodev_frame_dequeue (vlib_main_t *vm, u32 *nb_elts_processed,
- u32 *enqueue_thread_idx)
+ clib_thread_index_t *enqueue_thread_idx)
{
cryptodev_main_t *cmt = &cryptodev_main;
vnet_crypto_main_t *cm = &crypto_main;
@@ -670,7 +671,7 @@ cryptodev_register_cop_hdl (vlib_main_t *vm, u32 eidx)
vec_foreach (cet, cmt->per_thread_data)
{
- u32 thread_index = cet - cmt->per_thread_data;
+ clib_thread_index_t thread_index = cet - cmt->per_thread_data;
u32 numa = vlib_get_main_by_index (thread_index)->numa_node;
name = format (0, "vpp_cop_pool_%u_%u", numa, thread_index);
cet->cop_pool = rte_mempool_create (
diff --git a/src/plugins/dpdk/cryptodev/cryptodev_raw_data_path.c b/src/plugins/dpdk/cryptodev/cryptodev_raw_data_path.c
index 67ab9c89e67..40d0a4299da 100644
--- a/src/plugins/dpdk/cryptodev/cryptodev_raw_data_path.c
+++ b/src/plugins/dpdk/cryptodev/cryptodev_raw_data_path.c
@@ -463,7 +463,8 @@ cryptodev_post_dequeue (void *frame, u32 index, u8 is_op_success)
}
static_always_inline u8
-cryptodev_raw_dequeue_internal (vlib_main_t *vm, u32 *enqueue_thread_idx)
+cryptodev_raw_dequeue_internal (vlib_main_t *vm,
+ clib_thread_index_t *enqueue_thread_idx)
{
cryptodev_main_t *cmt = &cryptodev_main;
cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
@@ -537,7 +538,7 @@ cryptodev_enqueue_frame_to_qat (vlib_main_t *vm,
static_always_inline vnet_crypto_async_frame_t *
cryptodev_raw_dequeue (vlib_main_t *vm, u32 *nb_elts_processed,
- u32 *enqueue_thread_idx)
+ clib_thread_index_t *enqueue_thread_idx)
{
cryptodev_main_t *cmt = &cryptodev_main;
vnet_crypto_main_t *cm = &crypto_main;
diff --git a/src/plugins/dpdk/device/common.c b/src/plugins/dpdk/device/common.c
index d6eed5441b4..7671fc2639c 100644
--- a/src/plugins/dpdk/device/common.c
+++ b/src/plugins/dpdk/device/common.c
@@ -17,7 +17,7 @@
#include <vppinfra/vec.h>
#include <vppinfra/format.h>
#include <vppinfra/file.h>
-#include <vlib/unix/unix.h>
+#include <vlib/file.h>
#include <assert.h>
#include <vnet/ip/ip.h>
@@ -369,8 +369,7 @@ dpdk_setup_interrupts (dpdk_device_t *xd)
if (xd->flags & DPDK_DEVICE_FLAG_INT_UNMASKABLE)
{
clib_file_main_t *fm = &file_main;
- clib_file_t *f =
- pool_elt_at_index (fm->file_pool, rxq->clib_file_index);
+ clib_file_t *f = clib_file_get (fm, rxq->clib_file_index);
fm->file_update (f, UNIX_FILE_UPDATE_DELETE);
}
}
diff --git a/src/plugins/dpdk/device/device.c b/src/plugins/dpdk/device/device.c
index c5abbd5f727..5fd936d1743 100644
--- a/src/plugins/dpdk/device/device.c
+++ b/src/plugins/dpdk/device/device.c
@@ -22,7 +22,7 @@
#include <dpdk/device/dpdk.h>
#include <dpdk/device/dpdk_priv.h>
#include <vppinfra/error.h>
-#include <vlib/unix/unix.h>
+#include <vlib/file.h>
#define foreach_dpdk_tx_func_error \
_(PKT_DROP, "Tx packet drops (dpdk tx failure)")
@@ -159,7 +159,7 @@ tx_burst_vector_internal (vlib_main_t *vm, dpdk_device_t *xd,
{
dpdk_tx_queue_t *txq;
u32 n_retry;
- int n_sent = 0;
+ u32 n_sent = 0;
n_retry = 16;
txq = vec_elt_at_index (xd->tx_queues, queue_id);
@@ -279,9 +279,11 @@ VNET_DEVICE_CLASS_TX_FN (dpdk_device_class) (vlib_main_t * vm,
vnet_hw_if_tx_frame_t *tf = vlib_frame_scalar_args (f);
u32 n_packets = f->n_vectors;
u32 n_left;
- u32 thread_index = vm->thread_index;
+ u32 n_prep;
+ clib_thread_index_t thread_index = vm->thread_index;
int queue_id = tf->queue_id;
u8 is_shared = tf->shared_queue;
+ u8 offload_enabled = 0;
u32 tx_pkts = 0;
dpdk_per_thread_data_t *ptd = vec_elt_at_index (dm->per_thread_data,
thread_index);
@@ -333,6 +335,7 @@ VNET_DEVICE_CLASS_TX_FN (dpdk_device_class) (vlib_main_t * vm,
if (PREDICT_FALSE ((xd->flags & DPDK_DEVICE_FLAG_TX_OFFLOAD) &&
(or_flags & VNET_BUFFER_F_OFFLOAD)))
{
+ offload_enabled = 1;
dpdk_buffer_tx_offload (xd, b[0], mb[0]);
dpdk_buffer_tx_offload (xd, b[1], mb[1]);
dpdk_buffer_tx_offload (xd, b[2], mb[2]);
@@ -386,6 +389,7 @@ VNET_DEVICE_CLASS_TX_FN (dpdk_device_class) (vlib_main_t * vm,
if (PREDICT_FALSE ((xd->flags & DPDK_DEVICE_FLAG_TX_OFFLOAD) &&
(or_flags & VNET_BUFFER_F_OFFLOAD)))
{
+ offload_enabled = 1;
dpdk_buffer_tx_offload (xd, b[0], mb[0]);
dpdk_buffer_tx_offload (xd, b[1], mb[1]);
}
@@ -408,7 +412,13 @@ VNET_DEVICE_CLASS_TX_FN (dpdk_device_class) (vlib_main_t * vm,
b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
dpdk_validate_rte_mbuf (vm, b[0], 1);
- dpdk_buffer_tx_offload (xd, b[0], mb[0]);
+
+ if (PREDICT_FALSE ((xd->flags & DPDK_DEVICE_FLAG_TX_OFFLOAD) &&
+ (b[0]->flags & VNET_BUFFER_F_OFFLOAD)))
+ {
+ offload_enabled = 1;
+ dpdk_buffer_tx_offload (xd, b[0], mb[0]);
+ }
if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
@@ -418,32 +428,44 @@ VNET_DEVICE_CLASS_TX_FN (dpdk_device_class) (vlib_main_t * vm,
n_left--;
}
- /* transmit as many packets as possible */
+ /* prepare and transmit as many packets as possible */
tx_pkts = n_packets = mb - ptd->mbufs;
- n_left = tx_burst_vector_internal (vm, xd, ptd->mbufs, n_packets, queue_id,
- is_shared);
+ n_prep = n_packets;
- {
- /* If there is no callback then drop any non-transmitted packets */
- if (PREDICT_FALSE (n_left))
- {
- tx_pkts -= n_left;
- vlib_simple_counter_main_t *cm;
- vnet_main_t *vnm = vnet_get_main ();
+ if (PREDICT_FALSE (offload_enabled &&
+ (xd->flags & DPDK_DEVICE_FLAG_TX_PREPARE)))
+ {
+ n_prep =
+ rte_eth_tx_prepare (xd->port_id, queue_id, ptd->mbufs, n_packets);
- cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
- VNET_INTERFACE_COUNTER_TX_ERROR);
+ /* If mbufs are malformed then drop any non-prepared packets */
+ if (PREDICT_FALSE (n_prep != n_packets))
+ {
+ n_left = n_packets - n_prep;
+ }
+ }
- vlib_increment_simple_counter (cm, thread_index, xd->sw_if_index,
- n_left);
+ n_left +=
+ tx_burst_vector_internal (vm, xd, ptd->mbufs, n_prep, queue_id, is_shared);
- vlib_error_count (vm, node->node_index, DPDK_TX_FUNC_ERROR_PKT_DROP,
- n_left);
+ /* If there is no callback then drop any non-transmitted packets */
+ if (PREDICT_FALSE (n_left))
+ {
+ tx_pkts -= n_left;
+ vlib_simple_counter_main_t *cm;
+ vnet_main_t *vnm = vnet_get_main ();
- while (n_left--)
- rte_pktmbuf_free (ptd->mbufs[n_packets - n_left - 1]);
- }
- }
+ cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
+ VNET_INTERFACE_COUNTER_TX_ERROR);
+
+ vlib_increment_simple_counter (cm, thread_index, xd->sw_if_index,
+ n_left);
+
+ vlib_error_count (vm, node->node_index, DPDK_TX_FUNC_ERROR_PKT_DROP,
+ n_left);
+
+ rte_pktmbuf_free_bulk (&ptd->mbufs[tx_pkts], n_left);
+ }
return tx_pkts;
}
@@ -707,7 +729,7 @@ dpdk_interface_rx_mode_change (vnet_main_t *vnm, u32 hw_if_index, u32 qid,
else if (mode == VNET_HW_IF_RX_MODE_POLLING)
{
rxq = vec_elt_at_index (xd->rx_queues, qid);
- f = pool_elt_at_index (fm->file_pool, rxq->clib_file_index);
+ f = clib_file_get (fm, rxq->clib_file_index);
fm->file_update (f, UNIX_FILE_UPDATE_DELETE);
}
else if (!(xd->flags & DPDK_DEVICE_FLAG_INT_UNMASKABLE))
@@ -715,7 +737,7 @@ dpdk_interface_rx_mode_change (vnet_main_t *vnm, u32 hw_if_index, u32 qid,
else
{
rxq = vec_elt_at_index (xd->rx_queues, qid);
- f = pool_elt_at_index (fm->file_pool, rxq->clib_file_index);
+ f = clib_file_get (fm, rxq->clib_file_index);
fm->file_update (f, UNIX_FILE_UPDATE_ADD);
}
if (rv)
diff --git a/src/plugins/dpdk/device/dpdk.h b/src/plugins/dpdk/device/dpdk.h
index 2440439989f..70d9cc715dc 100644
--- a/src/plugins/dpdk/device/dpdk.h
+++ b/src/plugins/dpdk/device/dpdk.h
@@ -71,7 +71,8 @@ typedef uint16_t dpdk_portid_t;
_ (11, RX_FLOW_OFFLOAD, "rx-flow-offload") \
_ (12, RX_IP4_CKSUM, "rx-ip4-cksum") \
_ (13, INT_SUPPORTED, "int-supported") \
- _ (14, INT_UNMASKABLE, "int-unmaskable")
+ _ (14, INT_UNMASKABLE, "int-unmaskable") \
+ _ (15, TX_PREPARE, "tx-prepare")
typedef enum
{
@@ -131,6 +132,7 @@ typedef struct
u32 interface_number_from_port_id : 1;
u32 use_intel_phdr_cksum : 1;
u32 int_unmaskable : 1;
+ u32 need_tx_prepare : 1;
} dpdk_driver_t;
dpdk_driver_t *dpdk_driver_find (const char *name, const char **desc);
diff --git a/src/plugins/dpdk/device/dpdk_priv.h b/src/plugins/dpdk/device/dpdk_priv.h
index 794953da55e..2067b118532 100644
--- a/src/plugins/dpdk/device/dpdk_priv.h
+++ b/src/plugins/dpdk/device/dpdk_priv.h
@@ -50,7 +50,7 @@ dpdk_device_flag_set (dpdk_device_t *xd, __typeof__ (xd->flags) flag, int val)
void dpdk_counters_xstats_init (dpdk_device_t *xd);
static inline void
-dpdk_get_xstats (dpdk_device_t *xd, u32 thread_index)
+dpdk_get_xstats (dpdk_device_t *xd, clib_thread_index_t thread_index)
{
int ret;
int i;
@@ -101,7 +101,7 @@ static inline void
dpdk_update_counters (dpdk_device_t * xd, f64 now)
{
vnet_main_t *vnm = vnet_get_main ();
- u32 thread_index = vlib_get_thread_index ();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
xd->time_last_stats_update = now ? now : xd->time_last_stats_update;
clib_memcpy_fast (&xd->last_stats, &xd->stats, sizeof (xd->last_stats));
diff --git a/src/plugins/dpdk/device/driver.c b/src/plugins/dpdk/device/driver.c
index 2fde041684c..469a4b5de2b 100644
--- a/src/plugins/dpdk/device/driver.c
+++ b/src/plugins/dpdk/device/driver.c
@@ -113,6 +113,7 @@ static dpdk_driver_t dpdk_drivers[] = {
.drivers = DPDK_DRIVERS ({ "net_ena", "AWS ENA VF" }),
.interface_name_prefix = "VirtualFunctionEthernet",
.enable_rxq_int = 1,
+ .need_tx_prepare = 1,
},
{
.drivers = DPDK_DRIVERS ({ "net_vmxnet3", "VMware VMXNET3" }),
diff --git a/src/plugins/dpdk/device/format.c b/src/plugins/dpdk/device/format.c
index fd301da8ea5..f0199c929cc 100644
--- a/src/plugins/dpdk/device/format.c
+++ b/src/plugins/dpdk/device/format.c
@@ -117,8 +117,8 @@
_ (TX_MACSEC, "TX MACSEC") \
_ (TX_OUTER_IPV4, "TX outer IPV4") \
_ (TX_OUTER_IPV6, "TX outer IPV6") \
- _ (TX_OUTER_IP_CKSUM, "Outer IP cksum of Tx pkt. computed by NIC") \
- _ (TX_OUTER_UDP_CKSUM, "TX outer UDP cksum") \
+ _ (TX_OUTER_IP_CKSUM, "Outer IP cksum of TX pkt. computed by NIC") \
+ _ (TX_OUTER_UDP_CKSUM, "Outer UDP cksum of TX pkt. computed by NIC") \
_ (TX_QINQ, "TX QINQ") \
_ (TX_SCTP_CKSUM, "SCTP cksum of TX pkt. computed by NIC") \
_ (TX_SEC_OFFLOAD, "TX SEC OFFLOAD") \
@@ -133,7 +133,7 @@
_ (TX_TUNNEL_UDP, "TX tunnel UDP") \
_ (TX_TUNNEL_VXLAN, "TX packet is a VXLAN packet") \
_ (TX_TUNNEL_VXLAN_GPE, "TX tunnel VXLAN GPE") \
- _ (TX_UDP_CKSUM, "TX UDP cksum") \
+ _ (TX_UDP_CKSUM, "UDP cksum of TX pkt. computed by NIC") \
_ (TX_UDP_SEG, "TX UDP SEG") \
_ (TX_VLAN, "TX packet is a 802.1q VLAN packet")
diff --git a/src/plugins/dpdk/device/init.c b/src/plugins/dpdk/device/init.c
index aaa2c1f4a68..83c2614e97e 100644
--- a/src/plugins/dpdk/device/init.c
+++ b/src/plugins/dpdk/device/init.c
@@ -18,7 +18,7 @@
#include <vppinfra/format.h>
#include <vppinfra/bitmap.h>
#include <vppinfra/linux/sysfs.h>
-#include <vlib/unix/unix.h>
+#include <vlib/file.h>
#include <vlib/log.h>
#include <vnet/vnet.h>
@@ -390,6 +390,8 @@ dpdk_lib_init (dpdk_main_t * dm)
dpdk_device_flag_set (xd, DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM, 1);
if (dr->int_unmaskable)
dpdk_device_flag_set (xd, DPDK_DEVICE_FLAG_INT_UNMASKABLE, 1);
+ if (dr->need_tx_prepare)
+ dpdk_device_flag_set (xd, DPDK_DEVICE_FLAG_TX_PREPARE, 1);
}
else
dpdk_log_warn ("[%u] unknown driver '%s'", port_id, di.driver_name);
diff --git a/src/plugins/dpdk/device/node.c b/src/plugins/dpdk/device/node.c
index ca1690b708f..2f4c10ebf46 100644
--- a/src/plugins/dpdk/device/node.c
+++ b/src/plugins/dpdk/device/node.c
@@ -340,8 +340,9 @@ dpdk_process_lro_offload (dpdk_device_t *xd, dpdk_per_thread_data_t *ptd,
}
static_always_inline u32
-dpdk_device_input (vlib_main_t * vm, dpdk_main_t * dm, dpdk_device_t * xd,
- vlib_node_runtime_t * node, u32 thread_index, u16 queue_id)
+dpdk_device_input (vlib_main_t *vm, dpdk_main_t *dm, dpdk_device_t *xd,
+ vlib_node_runtime_t *node, clib_thread_index_t thread_index,
+ u16 queue_id)
{
uword n_rx_packets = 0, n_rx_bytes;
dpdk_rx_queue_t *rxq = vec_elt_at_index (xd->rx_queues, queue_id);
@@ -543,7 +544,7 @@ VLIB_NODE_FN (dpdk_input_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
dpdk_device_t *xd;
uword n_rx_packets = 0;
vnet_hw_if_rxq_poll_vector_t *pv;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
/*
* Poll all devices on this cpu for input/interrupts.
diff --git a/src/plugins/geneve/decap.c b/src/plugins/geneve/decap.c
index c64121e2829..3a1de2af217 100644
--- a/src/plugins/geneve/decap.c
+++ b/src/plugins/geneve/decap.c
@@ -79,7 +79,7 @@ geneve_input (vlib_main_t * vm,
geneve4_tunnel_key_t last_key4;
geneve6_tunnel_key_t last_key6;
u32 pkts_decapsulated = 0;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
diff --git a/src/plugins/geneve/encap.c b/src/plugins/geneve/encap.c
index 609da2218cf..581c47983df 100644
--- a/src/plugins/geneve/encap.c
+++ b/src/plugins/geneve/encap.c
@@ -60,7 +60,7 @@ geneve_encap_inline (vlib_main_t * vm,
vnet_interface_main_t *im = &vnm->interface_main;
u32 pkts_encapsulated = 0;
u16 old_l0 = 0, old_l1 = 0;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
u32 sw_if_index0 = ~0, sw_if_index1 = ~0;
u32 next0 = 0, next1 = 0;
diff --git a/src/plugins/gtpu/gtpu_decap.c b/src/plugins/gtpu/gtpu_decap.c
index 093d85ef13c..4e0f8bf8e16 100644
--- a/src/plugins/gtpu/gtpu_decap.c
+++ b/src/plugins/gtpu/gtpu_decap.c
@@ -85,7 +85,7 @@ gtpu_input (vlib_main_t * vm,
gtpu4_tunnel_key_t last_key4;
gtpu6_tunnel_key_t last_key6;
u32 pkts_decapsulated = 0;
- u32 thread_index = vlib_get_thread_index();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
if (is_ip4)
@@ -1838,7 +1838,7 @@ gtpu_flow_input (vlib_main_t * vm,
vnet_main_t * vnm = gtm->vnet_main;
vnet_interface_main_t * im = &vnm->interface_main;
u32 pkts_decapsulated = 0;
- u32 thread_index = vlib_get_thread_index();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
u8 ip_err0, ip_err1, udp_err0, udp_err1, csum_err0, csum_err1;
diff --git a/src/plugins/gtpu/gtpu_encap.c b/src/plugins/gtpu/gtpu_encap.c
index 2c3c46a4be2..1caca1da915 100644
--- a/src/plugins/gtpu/gtpu_encap.c
+++ b/src/plugins/gtpu/gtpu_encap.c
@@ -67,7 +67,7 @@ gtpu_encap_inline (vlib_main_t * vm,
vnet_interface_main_t * im = &vnm->interface_main;
u32 pkts_encapsulated = 0;
u16 old_l0 = 0, old_l1 = 0, old_l2 = 0, old_l3 = 0;
- u32 thread_index = vlib_get_thread_index();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
u32 sw_if_index0 = 0, sw_if_index1 = 0, sw_if_index2 = 0, sw_if_index3 = 0;
u32 next0 = 0, next1 = 0, next2 = 0, next3 = 0;
diff --git a/src/plugins/hs_apps/CMakeLists.txt b/src/plugins/hs_apps/CMakeLists.txt
index eae100949d4..3e80a84aae4 100644
--- a/src/plugins/hs_apps/CMakeLists.txt
+++ b/src/plugins/hs_apps/CMakeLists.txt
@@ -71,7 +71,11 @@ if(VPP_BUILD_VCL_TESTS)
"vcl/${test}.c"
vcl/vcl_test_protos.c
LINK_LIBRARIES vppcom pthread ${EPOLL_LIB}
- NO_INSTALL
)
endforeach()
+
+ add_vpp_executable(vcl_test_cl_udp SOURCES "vcl/vcl_test_cl_udp.c"
+ LINK_LIBRARIES vppcom pthread ${EPOLL_LIB}
+ NO_INSTALL
+ )
endif(VPP_BUILD_VCL_TESTS)
diff --git a/src/plugins/hs_apps/echo_client.c b/src/plugins/hs_apps/echo_client.c
index ff5a3bd6b3c..54e806a9ba4 100644
--- a/src/plugins/hs_apps/echo_client.c
+++ b/src/plugins/hs_apps/echo_client.c
@@ -53,7 +53,7 @@ signal_evt_to_cli (int code)
}
static inline ec_worker_t *
-ec_worker_get (u32 thread_index)
+ec_worker_get (clib_thread_index_t thread_index)
{
return vec_elt_at_index (ec_main.wrk, thread_index);
}
@@ -79,21 +79,29 @@ ec_session_get (ec_worker_t *wrk, u32 ec_index)
static void
send_data_chunk (ec_main_t *ecm, ec_session_t *es)
{
+ const u64 max_burst = 128000;
u8 *test_data = ecm->connect_test_data;
int test_buf_len, test_buf_offset, rv;
+ u64 bytes_to_send;
u32 bytes_this_chunk;
+ svm_fifo_t *f = es->tx_fifo;
test_buf_len = vec_len (test_data);
ASSERT (test_buf_len > 0);
+ if (ecm->run_time)
+ bytes_to_send = clib_min (svm_fifo_max_enqueue_prod (f), max_burst);
+ else
+ bytes_to_send = clib_min (es->bytes_to_send, max_burst);
+ if (ecm->throughput)
+ bytes_to_send = clib_min (es->bytes_paced_current, bytes_to_send);
test_buf_offset = es->bytes_sent % test_buf_len;
- bytes_this_chunk =
- clib_min (test_buf_len - test_buf_offset, es->bytes_to_send);
+
+ bytes_this_chunk = clib_min (test_buf_len - test_buf_offset, bytes_to_send);
if (!es->is_dgram)
{
if (ecm->no_copy)
{
- svm_fifo_t *f = es->tx_fifo;
rv = clib_min (svm_fifo_max_enqueue_prod (f), bytes_this_chunk);
svm_fifo_enqueue_nocopy (f, rv);
session_program_tx_io_evt (es->tx_fifo->vpp_sh, SESSION_IO_EVT_TX);
@@ -105,7 +113,6 @@ send_data_chunk (ec_main_t *ecm, ec_session_t *es)
}
else
{
- svm_fifo_t *f = es->tx_fifo;
u32 max_enqueue = svm_fifo_max_enqueue_prod (f);
if (max_enqueue < sizeof (session_dgram_hdr_t))
@@ -136,7 +143,8 @@ send_data_chunk (ec_main_t *ecm, ec_session_t *es)
else
{
bytes_this_chunk = clib_min (bytes_this_chunk, max_enqueue);
- bytes_this_chunk = clib_min (bytes_this_chunk, 1460);
+ if (!ecm->throughput)
+ bytes_this_chunk = clib_min (bytes_this_chunk, 1460);
rv =
app_send_dgram ((app_session_t *) es, test_data + test_buf_offset,
bytes_this_chunk, 0);
@@ -147,8 +155,16 @@ send_data_chunk (ec_main_t *ecm, ec_session_t *es)
if (rv > 0)
{
/* Account for it... */
- es->bytes_to_send -= rv;
es->bytes_sent += rv;
+ if (ecm->run_time)
+ es->bytes_to_receive += rv;
+ else
+ es->bytes_to_send -= rv;
+ if (ecm->throughput)
+ {
+ es->bytes_paced_current -= rv;
+ es->bytes_paced_current += es->bytes_paced_target;
+ }
if (ecm->cfg.verbose)
{
@@ -228,6 +244,7 @@ ec_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
{
u32 *conn_indices, *conns_this_batch, nconns_this_batch;
int thread_index = vm->thread_index, i, delete_session;
+ f64 time_now;
ec_main_t *ecm = &ec_main;
ec_worker_t *wrk;
ec_session_t *es;
@@ -266,7 +283,7 @@ ec_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
{
ecm->repeats++;
ecm->prev_conns = vec_len (conns_this_batch);
- if (ecm->repeats == 500000)
+ if (ecm->repeats == 500000 && !ecm->run_time)
{
ec_err ("stuck clients");
}
@@ -277,18 +294,23 @@ ec_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
ecm->repeats = 0;
}
+ time_now = vlib_time_now (ecm->vlib_main);
/*
* Handle connections in this batch
*/
for (i = 0; i < vec_len (conns_this_batch); i++)
{
es = ec_session_get (wrk, conns_this_batch[i]);
+ if (ecm->throughput && time_now < es->time_to_send)
+ continue;
delete_session = 1;
if (es->bytes_to_send > 0)
{
send_data_chunk (ecm, es);
+ if (ecm->throughput)
+ es->time_to_send += ecm->pacing_window_len;
delete_session = 0;
}
@@ -297,7 +319,7 @@ ec_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
delete_session = 0;
}
- if (PREDICT_FALSE (delete_session == 1))
+ if (PREDICT_FALSE (delete_session == 1) || ecm->timer_expired)
{
clib_atomic_fetch_add (&ecm->tx_total, es->bytes_sent);
clib_atomic_fetch_add (&ecm->rx_total, es->bytes_received);
@@ -326,6 +348,8 @@ ec_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
signal_evt_to_cli (EC_CLI_TEST_DONE);
}
}
+ if (ecm->throughput)
+ time_now = vlib_time_now (vm);
}
wrk->conn_indices = conn_indices;
@@ -356,6 +380,7 @@ ec_reset_runtime_config (ec_main_t *ecm)
ecm->tls_engine = CRYPTO_ENGINE_OPENSSL;
ecm->no_copy = 0;
ecm->run_test = EC_STARTING;
+ ecm->timer_expired = false;
ecm->ready_connections = 0;
ecm->connect_conn_index = 0;
ecm->rx_total = 0;
@@ -368,6 +393,9 @@ ec_reset_runtime_config (ec_main_t *ecm)
ecm->attach_flags = 0;
ecm->syn_timeout = 20.0;
ecm->test_timeout = 20.0;
+ ecm->run_time = 0;
+ ecm->throughput = 0;
+ ecm->pacing_window_len = 1;
vec_free (ecm->connect_uri);
}
@@ -474,7 +502,8 @@ ec_cleanup (ec_main_t *ecm)
vec_free (ecm->connect_uri);
vec_free (ecm->appns_id);
-
+ if (ecm->throughput)
+ ecm->pacing_window_len = 1;
if (ecm->barrier_acq_needed)
vlib_worker_thread_barrier_sync (ecm->vlib_main);
}
@@ -565,7 +594,7 @@ quic_ec_session_connected_callback (u32 app_index, u32 api_context,
ec_main_t *ecm = &ec_main;
ec_session_t *es;
ec_worker_t *wrk;
- u32 thread_index;
+ clib_thread_index_t thread_index;
if (PREDICT_FALSE (api_context == HS_CTRL_HANDLE))
return ec_ctrl_session_connected_callback (s);
@@ -616,13 +645,59 @@ quic_ec_session_connected_callback (u32 app_index, u32 api_context,
return 0;
}
+static void
+ec_calc_tput (ec_main_t *ecm)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ ec_worker_t *wrk;
+ ec_session_t *sess;
+ f64 pacing_base;
+ u64 bytes_paced_target;
+ u64 target_size_threshold;
+
+ /* Choose an appropriate data size chunk threshold based on fifo size.
+ ~30k is fine for most scenarios, unless the fifo starts getting
+ smaller than 48k, where a slight curve is needed. */
+ if (PREDICT_TRUE (ecm->fifo_size > 49152))
+ target_size_threshold = 30720;
+ else if (ecm->fifo_size > 20480)
+ target_size_threshold = 12288;
+ else if (ecm->fifo_size > 10240)
+ target_size_threshold = 6144;
+ else
+ target_size_threshold = ecm->fifo_size;
+
+ /* find a suitable pacing window length & data chunk size */
+ bytes_paced_target =
+ ecm->throughput * ecm->pacing_window_len / ecm->n_clients;
+ while (bytes_paced_target > target_size_threshold)
+ {
+ ecm->pacing_window_len /= 2;
+ bytes_paced_target /= 2;
+ }
+
+ /* order sessions to shoot out data sequentially */
+ pacing_base = vlib_time_now (vm) - ecm->pacing_window_len;
+ vec_foreach (wrk, ecm->wrk)
+ {
+ vec_foreach (sess, wrk->sessions)
+ {
+ sess->time_to_send =
+ pacing_base + ecm->pacing_window_len / ecm->n_clients;
+ pacing_base = sess->time_to_send;
+ sess->bytes_paced_target = bytes_paced_target;
+ sess->bytes_paced_current = bytes_paced_target;
+ }
+ }
+}
+
static int
ec_session_connected_callback (u32 app_index, u32 api_context, session_t *s,
session_error_t err)
{
ec_main_t *ecm = &ec_main;
ec_session_t *es;
- u32 thread_index;
+ clib_thread_index_t thread_index;
ec_worker_t *wrk;
if (PREDICT_FALSE (ecm->run_test != EC_STARTING))
@@ -656,12 +731,16 @@ ec_session_connected_callback (u32 app_index, u32 api_context, session_t *s,
es->bytes_to_receive = ecm->echo_bytes ? ecm->bytes_to_send : 0ULL;
es->vpp_session_handle = session_handle (s);
es->vpp_session_index = s->session_index;
+ es->bytes_paced_target = ~0;
+ es->bytes_paced_current = ~0;
s->opaque = es->session_index;
vec_add1 (wrk->conn_indices, es->session_index);
clib_atomic_fetch_add (&ecm->ready_connections, 1);
if (ecm->ready_connections == ecm->expected_connections)
{
+ if (ecm->throughput)
+ ec_calc_tput (ecm);
ecm->run_test = EC_RUNNING;
/* Signal the CLI process that the action is starting... */
signal_evt_to_cli (EC_CLI_CONNECTS_DONE);
@@ -1072,8 +1151,8 @@ ec_command_fn (vlib_main_t *vm, unformat_input_t *input,
ec_main_t *ecm = &ec_main;
uword *event_data = 0, event_type;
clib_error_t *error = 0;
- int rv, had_config = 1;
- u64 tmp, total_bytes;
+ int rv, timed_run_conflict = 0, had_config = 1;
+ u64 total_bytes;
f64 delta;
if (ecm->test_client_attached)
@@ -1099,17 +1178,15 @@ ec_command_fn (vlib_main_t *vm, unformat_input_t *input,
;
else if (unformat (line_input, "quic-streams %d", &ecm->quic_streams))
;
- else if (unformat (line_input, "mbytes %lld", &tmp))
- ecm->bytes_to_send = tmp << 20;
- else if (unformat (line_input, "gbytes %lld", &tmp))
- ecm->bytes_to_send = tmp << 30;
else if (unformat (line_input, "bytes %U", unformat_memory_size,
&ecm->bytes_to_send))
- ;
+ timed_run_conflict++;
else if (unformat (line_input, "test-timeout %f", &ecm->test_timeout))
;
else if (unformat (line_input, "syn-timeout %f", &ecm->syn_timeout))
;
+ else if (unformat (line_input, "run-time %f", &ecm->run_time))
+ ;
else if (unformat (line_input, "echo-bytes"))
ecm->echo_bytes = 1;
else if (unformat (line_input, "fifo-size %U", unformat_memory_size,
@@ -1121,6 +1198,9 @@ ec_command_fn (vlib_main_t *vm, unformat_input_t *input,
else if (unformat (line_input, "private-segment-size %U",
unformat_memory_size, &ecm->private_segment_size))
;
+ else if (unformat (line_input, "throughput %U", unformat_memory_size,
+ &ecm->throughput))
+ ;
else if (unformat (line_input, "preallocate-fifos"))
ecm->prealloc_fifos = 1;
else if (unformat (line_input, "preallocate-sessions"))
@@ -1153,6 +1233,9 @@ ec_command_fn (vlib_main_t *vm, unformat_input_t *input,
}
}
+ if (timed_run_conflict && ecm->run_time)
+ return clib_error_return (0, "failed: invalid arguments for a timed run!");
+
parse_config:
ecm->cfg.num_test_sessions = ecm->expected_connections =
@@ -1237,12 +1320,22 @@ parse_config:
clib_error_return (0, "failed: unexpected event(2): %d", event_type);
goto stop_test;
}
+ /* Testing officially starts now */
+ ecm->test_start_time = vlib_time_now (ecm->vlib_main);
+ ec_cli ("Test started at %.6f", ecm->test_start_time);
+
+ /*
+ * If a timed run, wait and expire timer
+ */
+ if (ecm->run_time)
+ {
+ vlib_process_suspend (vm, ecm->run_time);
+ ec_main.timer_expired = true;
+ }
/*
* Wait for the sessions to finish or test_timeout seconds pass
*/
- ecm->test_start_time = vlib_time_now (ecm->vlib_main);
- ec_cli ("Test started at %.6f", ecm->test_start_time);
vlib_process_wait_for_event_or_clock (vm, ecm->test_timeout);
event_type = vlib_process_get_events (vm, &event_data);
switch (event_type)
@@ -1336,11 +1429,11 @@ cleanup:
VLIB_CLI_COMMAND (ec_command, static) = {
.path = "test echo clients",
.short_help =
- "test echo clients [nclients %d][[m|g]bytes <bytes>]"
- "[test-timeout <time>][syn-timeout <time>][echo-bytes][fifo-size <size>]"
+ "test echo clients [nclients %d][bytes <bytes>[m|g]][test-timeout <time>]"
+ "[run-time <time>][syn-timeout <time>][echo-bytes][fifo-size <size>]"
"[private-segment-count <count>][private-segment-size <bytes>[m|g]]"
"[preallocate-fifos][preallocate-sessions][client-batch <batch-size>]"
- "[uri <tcp://ip/port>][test-bytes][verbose]",
+ "[throughput <bytes>[m|g]][uri <tcp://ip/port>][test-bytes][verbose]",
.function = ec_command_fn,
.is_mp_safe = 1,
};
diff --git a/src/plugins/hs_apps/echo_client.h b/src/plugins/hs_apps/echo_client.h
index 5868c3652ce..d928a4e936f 100644
--- a/src/plugins/hs_apps/echo_client.h
+++ b/src/plugins/hs_apps/echo_client.h
@@ -29,12 +29,15 @@ typedef struct ec_session_
foreach_app_session_field
#undef _
u32 vpp_session_index;
- u32 thread_index;
+ clib_thread_index_t thread_index;
u64 bytes_to_send;
u64 bytes_sent;
u64 bytes_to_receive;
u64 bytes_received;
u64 vpp_session_handle;
+ f64 time_to_send;
+ u64 bytes_paced_target;
+ u64 bytes_paced_current;
} ec_session_t;
typedef struct ec_worker_
@@ -45,7 +48,7 @@ typedef struct ec_worker_
u32 *conn_indices; /**< sessions handled by worker */
u32 *conns_this_batch; /**< sessions handled in batch */
svm_msg_q_t *vpp_event_queue; /**< session layer worker mq */
- u32 thread_index; /**< thread index for worker */
+ clib_thread_index_t thread_index; /**< thread index for worker */
} ec_worker_t;
typedef struct
@@ -57,6 +60,7 @@ typedef struct
volatile u64 rx_total;
volatile u64 tx_total;
volatile int run_test; /**< Signal start of test */
+ volatile bool timer_expired; /**< Signal end of timed test */
f64 syn_start_time;
f64 test_start_time;
@@ -64,6 +68,8 @@ typedef struct
u32 prev_conns;
u32 repeats;
+ f64
+ pacing_window_len; /**< Time between data chunk sends when limiting tput */
u32 connect_conn_index; /**< Connects attempted progress */
/*
@@ -88,6 +94,7 @@ typedef struct
u32 connections_per_batch; /**< Connections to rx/tx at once */
u32 private_segment_count; /**< Number of private fifo segs */
u64 private_segment_size; /**< size of private fifo segs */
+ u64 throughput; /**< Target bytes per second */
u32 tls_engine; /**< TLS engine mbedtls/openssl */
u32 no_copy; /**< Don't memcpy data to tx fifo */
u32 quic_streams; /**< QUIC streams per connection */
@@ -97,6 +104,7 @@ typedef struct
u64 appns_secret; /**< App namespace secret */
f64 syn_timeout; /**< Test syn timeout (s) */
f64 test_timeout; /**< Test timeout (s) */
+ f64 run_time; /**< Length of a test (s) */
/*
* Flags
diff --git a/src/plugins/hs_apps/echo_server.c b/src/plugins/hs_apps/echo_server.c
index dc303e2f83a..61b86769768 100644
--- a/src/plugins/hs_apps/echo_server.c
+++ b/src/plugins/hs_apps/echo_server.c
@@ -40,7 +40,7 @@ typedef struct
es_session_t *sessions;
u8 *rx_buf; /**< Per-thread RX buffer */
svm_msg_q_t *vpp_event_queue;
- u32 thread_index;
+ clib_thread_index_t thread_index;
} es_worker_t;
typedef struct
@@ -87,7 +87,7 @@ echo_server_main_t echo_server_main;
#define es_cli(_fmt, _args...) vlib_cli_output (vm, _fmt, ##_args)
static inline es_worker_t *
-es_worker_get (u32 thread_index)
+es_worker_get (clib_thread_index_t thread_index)
{
return vec_elt_at_index (echo_server_main.wrk, thread_index);
}
@@ -277,7 +277,7 @@ es_wrk_cleanup_sessions (void *args)
{
echo_server_main_t *esm = &echo_server_main;
vnet_disconnect_args_t _a = {}, *a = &_a;
- u32 thread_index = pointer_to_uword (args);
+ clib_thread_index_t thread_index = pointer_to_uword (args);
es_session_t *es;
es_worker_t *wrk;
@@ -373,7 +373,7 @@ echo_server_rx_callback (session_t * s)
int actual_transfer;
svm_fifo_t *tx_fifo, *rx_fifo;
echo_server_main_t *esm = &echo_server_main;
- u32 thread_index = vlib_get_thread_index ();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
es_worker_t *wrk;
es_session_t *es;
diff --git a/src/plugins/hs_apps/http_cli.c b/src/plugins/hs_apps/http_cli.c
index 531e2750c1e..40acf6a1635 100644
--- a/src/plugins/hs_apps/http_cli.c
+++ b/src/plugins/hs_apps/http_cli.c
@@ -37,7 +37,7 @@ typedef struct
typedef struct
{
u32 hs_index;
- u32 thread_index;
+ clib_thread_index_t thread_index;
u64 node_index;
u8 plain_text;
u8 *buf;
@@ -47,7 +47,7 @@ typedef struct
{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
u32 session_index;
- u32 thread_index;
+ clib_thread_index_t thread_index;
u8 *tx_buf;
u32 tx_offset;
u32 vpp_session_index;
@@ -85,7 +85,7 @@ typedef struct
static hcs_main_t hcs_main;
static hcs_session_t *
-hcs_session_alloc (u32 thread_index)
+hcs_session_alloc (clib_thread_index_t thread_index)
{
hcs_main_t *hcm = &hcs_main;
hcs_session_t *hs;
@@ -98,7 +98,7 @@ hcs_session_alloc (u32 thread_index)
}
static hcs_session_t *
-hcs_session_get (u32 thread_index, u32 hs_index)
+hcs_session_get (clib_thread_index_t thread_index, u32 hs_index)
{
hcs_main_t *hcm = &hcs_main;
if (pool_is_free_index (hcm->sessions[thread_index], hs_index))
diff --git a/src/plugins/hs_apps/http_client.c b/src/plugins/hs_apps/http_client.c
index 20271fc4aea..34c2a545650 100644
--- a/src/plugins/hs_apps/http_client.c
+++ b/src/plugins/hs_apps/http_client.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: Apache-2.0
- * Copyright(c) 2024 Cisco Systems, Inc.
+ * Copyright(c) 2025 Cisco Systems, Inc.
*/
#include <vnet/session/application.h>
@@ -12,29 +12,36 @@
typedef struct
{
+ u64 req_per_wrk;
+ u64 request_count;
+ f64 start, end;
+ f64 elapsed_time;
+} hc_stats_t;
+
+typedef struct
+{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
u32 session_index;
- u32 thread_index;
- u32 vpp_session_index;
+ clib_thread_index_t thread_index;
u64 to_recv;
u8 is_closed;
+ hc_stats_t stats;
+ u64 data_offset;
+ u8 *resp_headers;
+ u8 *http_response;
+ u8 *response_status;
} hc_session_t;
typedef struct
{
- u64 request_count;
- f64 start, end;
- f64 elapsed_time;
-} hc_stats_t;
-
-typedef struct
-{
hc_session_t *sessions;
- u32 thread_index;
+ clib_thread_index_t thread_index;
vlib_main_t *vlib_main;
u8 *headers_buf;
http_headers_ctx_t req_headers;
http_msg_t msg;
+ u32 session_index;
+ bool has_common_headers;
} hc_worker_t;
typedef struct
@@ -52,11 +59,7 @@ typedef struct
session_endpoint_cfg_t connect_sep;
u8 *target;
u8 *data;
- u64 data_offset;
hc_worker_t *wrk;
- u8 *resp_headers;
- u8 *http_response;
- u8 *response_status;
hc_http_header_t *custom_header;
u8 is_file;
u8 use_ptr;
@@ -67,6 +70,19 @@ typedef struct
u64 repeat_count;
f64 duration;
bool repeat;
+ bool multi_session;
+ u32 done_count;
+ u32 connected_counter;
+ u32 worker_index;
+ u32 max_sessions;
+ u32 private_segment_size;
+ u32 prealloc_fifos;
+ u32 fifo_size;
+ u8 *appns_id;
+ u64 appns_secret;
+ clib_spinlock_t lock;
+ bool was_transport_closed;
+ u32 ckpair_index;
} hc_main_t;
typedef enum
@@ -82,26 +98,19 @@ static hc_main_t hc_main;
static hc_stats_t hc_stats;
static inline hc_worker_t *
-hc_worker_get (u32 thread_index)
+hc_worker_get (clib_thread_index_t thread_index)
{
return &hc_main.wrk[thread_index];
}
static inline hc_session_t *
-hc_session_get (u32 session_index, u32 thread_index)
+hc_session_get (u32 session_index, clib_thread_index_t thread_index)
{
hc_worker_t *wrk = hc_worker_get (thread_index);
wrk->vlib_main = vlib_get_main_by_index (thread_index);
return pool_elt_at_index (wrk->sessions, session_index);
}
-static void
-hc_ho_session_free (u32 hs_index)
-{
- hc_worker_t *wrk = hc_worker_get (0);
- pool_put_index (wrk->sessions, hs_index);
-}
-
static hc_session_t *
hc_session_alloc (hc_worker_t *wrk)
{
@@ -115,14 +124,14 @@ hc_session_alloc (hc_worker_t *wrk)
}
static int
-hc_request (session_t *s, session_error_t err)
+hc_request (session_t *s, hc_worker_t *wrk, hc_session_t *hc_session,
+ session_error_t err)
{
hc_main_t *hcm = &hc_main;
u64 to_send;
u32 n_enq;
u8 n_segs;
int rv;
- hc_worker_t *wrk = hc_worker_get (s->thread_index);
if (hcm->use_ptr)
{
@@ -166,7 +175,7 @@ hc_request (session_t *s, session_error_t err)
rv = svm_fifo_enqueue (s->tx_fifo, n_enq, hcm->data);
if (rv < to_send)
{
- hcm->data_offset = (rv > 0) ? rv : 0;
+ hc_session->data_offset = (rv > 0) ? rv : 0;
svm_fifo_add_want_deq_ntf (s->tx_fifo, SVM_FIFO_WANT_DEQ_NOTIF);
}
}
@@ -185,9 +194,8 @@ hc_session_connected_callback (u32 app_index, u32 hc_session_index,
{
hc_main_t *hcm = &hc_main;
hc_worker_t *wrk;
- u32 new_hc_index;
+ hc_session_t *hc_session;
hc_http_header_t *header;
- HTTP_DBG (1, "ho hc_index: %d", hc_session_index);
if (err)
{
@@ -199,68 +207,88 @@ hc_session_connected_callback (u32 app_index, u32 hc_session_index,
}
wrk = hc_worker_get (s->thread_index);
- hc_session_t *hc_session, *new_hc_session = hc_session_alloc (wrk);
- hc_session = hc_session_get (hc_session_index, 0);
- new_hc_index = new_hc_session->session_index;
- clib_memcpy_fast (new_hc_session, hc_session, sizeof (*hc_session));
- new_hc_session->session_index = new_hc_index;
- new_hc_session->thread_index = s->thread_index;
- new_hc_session->vpp_session_index = s->session_index;
- HTTP_DBG (1, "new hc_index: %d", new_hc_session->session_index);
- s->opaque = new_hc_index;
+ hc_session = hc_session_alloc (wrk);
+ clib_spinlock_lock_if_init (&hcm->lock);
+ hcm->connected_counter++;
+ clib_spinlock_unlock_if_init (&hcm->lock);
- if (hcm->req_method == HTTP_REQ_POST)
+ hc_session->thread_index = s->thread_index;
+ s->opaque = hc_session->session_index;
+ wrk->session_index = hc_session->session_index;
+
+ if (hcm->multi_session)
{
- if (hcm->is_file)
- http_add_header (
- &wrk->req_headers, HTTP_HEADER_CONTENT_TYPE,
- http_content_type_token (HTTP_CONTENT_APP_OCTET_STREAM));
- else
- http_add_header (
- &wrk->req_headers, HTTP_HEADER_CONTENT_TYPE,
- http_content_type_token (HTTP_CONTENT_APP_X_WWW_FORM_URLENCODED));
+ hc_session->stats.req_per_wrk = hcm->repeat_count / hcm->max_sessions;
+ clib_spinlock_lock_if_init (&hcm->lock);
+ /* add remaining requests to the first connected session */
+ if (hcm->connected_counter == 1)
+ {
+ hc_session->stats.req_per_wrk +=
+ hcm->repeat_count % hcm->max_sessions;
+ }
+ clib_spinlock_unlock_if_init (&hcm->lock);
}
- http_add_header (&wrk->req_headers, HTTP_HEADER_ACCEPT, "*", 1);
-
- vec_foreach (header, hcm->custom_header)
- http_add_custom_header (
- &wrk->req_headers, (const char *) header->name, vec_len (header->name),
- (const char *) header->value, vec_len (header->value));
-
- clib_warning ("%U", format_http_bytes, wrk->headers_buf,
- wrk->req_headers.tail_offset);
- wrk->msg.method_type = hcm->req_method;
- if (hcm->req_method == HTTP_REQ_POST)
- wrk->msg.data.body_len = vec_len (hcm->data);
else
- wrk->msg.data.body_len = 0;
-
- wrk->msg.type = HTTP_MSG_REQUEST;
- /* request target */
- wrk->msg.data.target_path_len = vec_len (hcm->target);
- /* custom headers */
- wrk->msg.data.headers_len = wrk->req_headers.tail_offset;
- /* total length */
- wrk->msg.data.len = wrk->msg.data.target_path_len +
- wrk->msg.data.headers_len + wrk->msg.data.body_len;
-
- if (hcm->use_ptr)
{
- wrk->msg.data.type = HTTP_MSG_DATA_PTR;
+ hc_session->stats.req_per_wrk = hcm->repeat_count;
+ hcm->worker_index = s->thread_index;
}
- else
+
+ if (!wrk->has_common_headers)
{
- wrk->msg.data.type = HTTP_MSG_DATA_INLINE;
- wrk->msg.data.target_path_offset = 0;
- wrk->msg.data.headers_offset = wrk->msg.data.target_path_len;
- wrk->msg.data.body_offset =
- wrk->msg.data.headers_offset + wrk->msg.data.headers_len;
+ wrk->has_common_headers = true;
+ if (hcm->req_method == HTTP_REQ_POST)
+ {
+ if (hcm->is_file)
+ http_add_header (
+ &wrk->req_headers, HTTP_HEADER_CONTENT_TYPE,
+ http_content_type_token (HTTP_CONTENT_APP_OCTET_STREAM));
+ else
+ http_add_header (&wrk->req_headers, HTTP_HEADER_CONTENT_TYPE,
+ http_content_type_token (
+ HTTP_CONTENT_APP_X_WWW_FORM_URLENCODED));
+ }
+ http_add_header (&wrk->req_headers, HTTP_HEADER_ACCEPT, "*", 1);
+
+ vec_foreach (header, hcm->custom_header)
+ http_add_custom_header (&wrk->req_headers, (const char *) header->name,
+ vec_len (header->name),
+ (const char *) header->value,
+ vec_len (header->value));
+
+ wrk->msg.method_type = hcm->req_method;
+ if (hcm->req_method == HTTP_REQ_POST)
+ wrk->msg.data.body_len = vec_len (hcm->data);
+ else
+ wrk->msg.data.body_len = 0;
+
+ wrk->msg.type = HTTP_MSG_REQUEST;
+ /* request target */
+ wrk->msg.data.target_path_len = vec_len (hcm->target);
+ /* custom headers */
+ wrk->msg.data.headers_len = wrk->req_headers.tail_offset;
+ /* total length */
+ wrk->msg.data.len = wrk->msg.data.target_path_len +
+ wrk->msg.data.headers_len + wrk->msg.data.body_len;
+
+ if (hcm->use_ptr)
+ {
+ wrk->msg.data.type = HTTP_MSG_DATA_PTR;
+ }
+ else
+ {
+ wrk->msg.data.type = HTTP_MSG_DATA_INLINE;
+ wrk->msg.data.target_path_offset = 0;
+ wrk->msg.data.headers_offset = wrk->msg.data.target_path_len;
+ wrk->msg.data.body_offset =
+ wrk->msg.data.headers_offset + wrk->msg.data.headers_len;
+ }
}
- if (hcm->repeat)
- hc_stats.start = vlib_time_now (vlib_get_main_by_index (s->thread_index));
+ hc_session->stats.start =
+ vlib_time_now (vlib_get_main_by_index (s->thread_index));
- return hc_request (s, err);
+ return hc_request (s, wrk, hc_session, err);
}
static void
@@ -275,21 +303,38 @@ hc_session_disconnect_callback (session_t *s)
if ((rv = vnet_disconnect_session (a)))
clib_warning ("warning: disconnect returned: %U", format_session_error,
rv);
+ clib_spinlock_lock_if_init (&hcm->lock);
+ hcm->done_count++;
+ clib_spinlock_unlock_if_init (&hcm->lock);
}
static void
hc_session_transport_closed_callback (session_t *s)
{
hc_main_t *hcm = &hc_main;
- vlib_process_signal_event_mt (hcm->wrk->vlib_main, hcm->cli_node_index,
- HC_TRANSPORT_CLOSED, 0);
-}
+ hc_worker_t *wrk = hc_worker_get (s->thread_index);
-static void
-hc_ho_cleanup_callback (session_t *s)
-{
- HTTP_DBG (1, "ho hc_index: %d:", s->opaque);
- hc_ho_session_free (s->opaque);
+ clib_spinlock_lock_if_init (&hcm->lock);
+ if (s->session_state == SESSION_STATE_TRANSPORT_CLOSED)
+ {
+ hcm->was_transport_closed = true;
+ }
+
+ /* send an event when all sessions are closed */
+ if (hcm->done_count >= hcm->max_sessions)
+ {
+ if (hcm->was_transport_closed)
+ {
+ vlib_process_signal_event_mt (wrk->vlib_main, hcm->cli_node_index,
+ HC_TRANSPORT_CLOSED, 0);
+ }
+ else
+ {
+ vlib_process_signal_event_mt (wrk->vlib_main, hcm->cli_node_index,
+ HC_REPEAT_DONE, 0);
+ }
+ }
+ clib_spinlock_unlock_if_init (&hcm->lock);
}
static void
@@ -315,20 +360,23 @@ hc_rx_callback (session_t *s)
{
hc_main_t *hcm = &hc_main;
hc_worker_t *wrk = hc_worker_get (s->thread_index);
- hc_session_t *hc_session;
+ hc_session_t *hc_session = hc_session_get (s->opaque, s->thread_index);
http_msg_t msg;
int rv;
+ u32 max_deq;
session_error_t session_err = 0;
int send_err = 0;
- hc_session = hc_session_get (s->opaque, s->thread_index);
-
if (hc_session->is_closed)
{
clib_warning ("hc_session_index[%d] is closed", s->opaque);
return -1;
}
+ max_deq = svm_fifo_max_dequeue_cons (s->rx_fifo);
+ if (PREDICT_FALSE (max_deq == 0))
+ goto done;
+
if (hc_session->to_recv == 0)
{
rv = svm_fifo_dequeue (s->rx_fifo, sizeof (msg), (u8 *) &msg);
@@ -344,17 +392,20 @@ hc_rx_callback (session_t *s)
if (msg.data.headers_len)
{
- hcm->response_status =
- format (0, "%U", format_http_status_code, msg.code);
+
+ if (!hcm->repeat)
+ hc_session->response_status =
+ format (0, "%U", format_http_status_code, msg.code);
+
svm_fifo_dequeue_drop (s->rx_fifo, msg.data.headers_offset);
- vec_validate (hcm->resp_headers, msg.data.headers_len - 1);
- vec_set_len (hcm->resp_headers, msg.data.headers_len);
+ vec_validate (hc_session->resp_headers, msg.data.headers_len - 1);
+ vec_set_len (hc_session->resp_headers, msg.data.headers_len);
rv = svm_fifo_dequeue (s->rx_fifo, msg.data.headers_len,
- hcm->resp_headers);
+ hc_session->resp_headers);
ASSERT (rv == msg.data.headers_len);
- HTTP_DBG (1, (char *) format (0, "%v", hcm->resp_headers));
+ HTTP_DBG (1, (char *) format (0, "%v", hc_session->resp_headers));
msg.data.body_offset -=
msg.data.headers_len + msg.data.headers_offset;
}
@@ -372,18 +423,18 @@ hc_rx_callback (session_t *s)
{
goto done;
}
- vec_validate (hcm->http_response, msg.data.body_len - 1);
- vec_reset_length (hcm->http_response);
+ vec_validate (hc_session->http_response, msg.data.body_len - 1);
+ vec_reset_length (hc_session->http_response);
}
- u32 max_deq = svm_fifo_max_dequeue (s->rx_fifo);
+ max_deq = svm_fifo_max_dequeue (s->rx_fifo);
if (!max_deq)
{
goto done;
}
u32 n_deq = clib_min (hc_session->to_recv, max_deq);
- u32 curr = vec_len (hcm->http_response);
- rv = svm_fifo_dequeue (s->rx_fifo, n_deq, hcm->http_response + curr);
+ u32 curr = vec_len (hc_session->http_response);
+ rv = svm_fifo_dequeue (s->rx_fifo, n_deq, hc_session->http_response + curr);
if (rv < 0)
{
clib_warning ("app dequeue(n=%d) failed; rv = %d", n_deq, rv);
@@ -393,29 +444,28 @@ hc_rx_callback (session_t *s)
}
ASSERT (rv == n_deq);
- vec_set_len (hcm->http_response, curr + n_deq);
+ vec_set_len (hc_session->http_response, curr + n_deq);
ASSERT (hc_session->to_recv >= rv);
hc_session->to_recv -= rv;
done:
if (hc_session->to_recv == 0)
{
+ hc_session->stats.end = vlib_time_now (wrk->vlib_main);
+ hc_session->stats.elapsed_time =
+ hc_session->stats.end - hc_session->stats.start;
if (hcm->repeat)
{
- hc_stats.request_count++;
- hc_stats.end = vlib_time_now (wrk->vlib_main);
- hc_stats.elapsed_time = hc_stats.end - hc_stats.start;
+ hc_session->stats.request_count++;
- if (hc_stats.elapsed_time >= hcm->duration &&
- hc_stats.request_count >= hcm->repeat_count)
+ if (hc_session->stats.elapsed_time >= hcm->duration &&
+ hc_session->stats.request_count >= hc_session->stats.req_per_wrk)
{
- vlib_process_signal_event_mt (
- wrk->vlib_main, hcm->cli_node_index, HC_REPEAT_DONE, 0);
hc_session_disconnect_callback (s);
}
else
{
- send_err = hc_request (s, session_err);
+ send_err = hc_request (s, wrk, hc_session, session_err);
if (send_err)
clib_warning ("failed to send request, error %d", send_err);
}
@@ -434,11 +484,13 @@ static int
hc_tx_callback (session_t *s)
{
hc_main_t *hcm = &hc_main;
+ hc_session_t *hc_session = hc_session_get (s->opaque, s->thread_index);
u64 to_send;
int rv;
- to_send = vec_len (hcm->data) - hcm->data_offset;
- rv = svm_fifo_enqueue (s->tx_fifo, to_send, hcm->data + hcm->data_offset);
+ to_send = vec_len (hcm->data) - hc_session->data_offset;
+ rv = svm_fifo_enqueue (s->tx_fifo, to_send,
+ hcm->data + hc_session->data_offset);
if (rv <= 0)
{
@@ -448,7 +500,7 @@ hc_tx_callback (session_t *s)
if (rv < to_send)
{
- hcm->data_offset += rv;
+ hc_session->data_offset += rv;
svm_fifo_add_want_deq_ntf (s->tx_fifo, SVM_FIFO_WANT_DEQ_NOTIF);
}
@@ -465,7 +517,6 @@ static session_cb_vft_t hc_session_cb_vft = {
.session_reset_callback = hc_session_reset_callback,
.builtin_app_rx_callback = hc_rx_callback,
.builtin_app_tx_callback = hc_tx_callback,
- .half_open_cleanup_callback = hc_ho_cleanup_callback,
};
static clib_error_t *
@@ -474,8 +525,13 @@ hc_attach ()
hc_main_t *hcm = &hc_main;
vnet_app_attach_args_t _a, *a = &_a;
u64 options[18];
+ u32 segment_size = 128 << 20;
+ vnet_app_add_cert_key_pair_args_t _ck_pair, *ck_pair = &_ck_pair;
int rv;
+ if (hcm->private_segment_size)
+ segment_size = hcm->private_segment_size;
+
clib_memset (a, 0, sizeof (*a));
clib_memset (options, 0, sizeof (options));
@@ -483,7 +539,20 @@ hc_attach ()
a->name = format (0, "http_client");
a->session_cb_vft = &hc_session_cb_vft;
a->options = options;
+ a->options[APP_OPTIONS_SEGMENT_SIZE] = segment_size;
+ a->options[APP_OPTIONS_ADD_SEGMENT_SIZE] = segment_size;
+ a->options[APP_OPTIONS_RX_FIFO_SIZE] =
+ hcm->fifo_size ? hcm->fifo_size : 8 << 10;
+ a->options[APP_OPTIONS_TX_FIFO_SIZE] =
+ hcm->fifo_size ? hcm->fifo_size : 32 << 10;
a->options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_IS_BUILTIN;
+ a->options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] = hcm->prealloc_fifos;
+ a->options[APP_OPTIONS_TLS_ENGINE] = CRYPTO_ENGINE_OPENSSL;
+ if (hcm->appns_id)
+ {
+ a->namespace_id = hcm->appns_id;
+ a->options[APP_OPTIONS_NAMESPACE_SECRET] = hcm->appns_secret;
+ }
if ((rv = vnet_application_attach (a)))
return clib_error_return (0, "attach returned: %U", format_session_error,
@@ -493,6 +562,14 @@ hc_attach ()
vec_free (a->name);
hcm->attached = 1;
+ clib_memset (ck_pair, 0, sizeof (*ck_pair));
+ ck_pair->cert = (u8 *) test_srv_crt_rsa;
+ ck_pair->key = (u8 *) test_srv_key_rsa;
+ ck_pair->cert_len = test_srv_crt_rsa_len;
+ ck_pair->key_len = test_srv_key_rsa_len;
+ vnet_app_add_cert_key_pair (ck_pair);
+ hcm->ckpair_index = ck_pair->index;
+
return 0;
}
@@ -500,14 +577,19 @@ static int
hc_connect_rpc (void *rpc_args)
{
vnet_connect_args_t *a = rpc_args;
- int rv;
+ int rv = ~0;
+ hc_main_t *hcm = &hc_main;
- rv = vnet_connect (a);
- if (rv > 0)
- clib_warning (0, "connect returned: %U", format_session_error, rv);
+ for (u32 i = 0; i < hcm->max_sessions; i++)
+ {
+ rv = vnet_connect (a);
+ if (rv > 0)
+ clib_warning (0, "connect returned: %U", format_session_error, rv);
+ }
session_endpoint_free_ext_cfgs (&a->sep_ext);
vec_free (a);
+
return rv;
}
@@ -516,14 +598,10 @@ hc_connect ()
{
hc_main_t *hcm = &hc_main;
vnet_connect_args_t *a = 0;
- hc_worker_t *wrk;
- hc_session_t *hc_session;
transport_endpt_ext_cfg_t *ext_cfg;
transport_endpt_cfg_http_t http_cfg = { (u32) hcm->timeout, 0 };
-
vec_validate (a, 0);
clib_memset (a, 0, sizeof (a[0]));
-
clib_memcpy (&a->sep_ext, &hcm->connect_sep, sizeof (hcm->connect_sep));
a->app_index = hcm->app_index;
@@ -531,15 +609,60 @@ hc_connect ()
&a->sep_ext, TRANSPORT_ENDPT_EXT_CFG_HTTP, sizeof (http_cfg));
clib_memcpy (ext_cfg->data, &http_cfg, sizeof (http_cfg));
- /* allocate http session on main thread */
- wrk = hc_worker_get (0);
- hc_session = hc_session_alloc (wrk);
- a->api_context = hc_session->session_index;
+ if (hcm->connect_sep.flags & SESSION_ENDPT_CFG_F_SECURE)
+ {
+ ext_cfg = session_endpoint_add_ext_cfg (
+ &a->sep_ext, TRANSPORT_ENDPT_EXT_CFG_CRYPTO,
+ sizeof (transport_endpt_crypto_cfg_t));
+ ext_cfg->crypto.ckpair_index = hcm->ckpair_index;
+ }
session_send_rpc_evt_to_thread_force (transport_cl_thread (), hc_connect_rpc,
a);
}
+static void
+hc_get_repeat_stats (vlib_main_t *vm)
+{
+ hc_main_t *hcm = &hc_main;
+
+ if (hcm->repeat || hcm->verbose)
+ {
+ hc_worker_t *wrk;
+ hc_session_t *hc_session;
+ vec_foreach (wrk, hcm->wrk)
+ {
+ vec_foreach (hc_session, wrk->sessions)
+ {
+ hc_stats.request_count += hc_session->stats.request_count;
+ hc_session->stats.request_count = 0;
+ if (hc_stats.elapsed_time < hc_session->stats.elapsed_time)
+ {
+ hc_stats.elapsed_time = hc_session->stats.elapsed_time;
+ hc_session->stats.elapsed_time = 0;
+ }
+ }
+ }
+
+ if (hcm->repeat)
+ {
+ vlib_cli_output (vm,
+ "* %d request(s) in %.6fs\n"
+ "* avg latency %.4fms\n"
+ "* %.2f req/sec",
+ hc_stats.request_count, hc_stats.elapsed_time,
+ (hc_stats.elapsed_time / hc_stats.request_count) *
+ 1000,
+ hc_stats.request_count / hc_stats.elapsed_time);
+ }
+ else
+ {
+ vlib_cli_output (vm, "* latency: %.4fms",
+ hc_stats.elapsed_time * 1000);
+ }
+ }
+}
+
static clib_error_t *
hc_get_event (vlib_main_t *vm)
{
@@ -548,12 +671,15 @@ hc_get_event (vlib_main_t *vm)
clib_error_t *err = NULL;
FILE *file_ptr;
u64 event_timeout;
+ hc_worker_t *wrk;
+ hc_session_t *hc_session;
event_timeout = hcm->timeout ? hcm->timeout : 10;
if (event_timeout == hcm->duration)
event_timeout += 5;
vlib_process_wait_for_event_or_clock (vm, event_timeout);
event_type = vlib_process_get_events (vm, &event_data);
+ hc_get_repeat_stats (vm);
switch (event_type)
{
@@ -572,32 +698,33 @@ hc_get_event (vlib_main_t *vm)
case HC_REPLY_RECEIVED:
if (hcm->filename)
{
+ wrk = hc_worker_get (hcm->worker_index);
+ hc_session = hc_session_get (wrk->session_index, wrk->thread_index);
file_ptr =
fopen ((char *) format (0, "/tmp/%v", hcm->filename), "a");
if (file_ptr == NULL)
{
- vlib_cli_output (vm, "couldn't open file %v", hcm->filename);
+ vlib_cli_output (vm, "* couldn't open file %v", hcm->filename);
}
else
{
- fprintf (file_ptr, "< %s\n< %s\n< %s", hcm->response_status,
- hcm->resp_headers, hcm->http_response);
+ fprintf (file_ptr, "< %s\n< %s\n< %s",
+ hc_session->response_status, hc_session->resp_headers,
+ hc_session->http_response);
fclose (file_ptr);
- vlib_cli_output (vm, "file saved (/tmp/%v)", hcm->filename);
+ vlib_cli_output (vm, "* file saved (/tmp/%v)", hcm->filename);
}
}
if (hcm->verbose)
- vlib_cli_output (vm, "< %v< %v", hcm->response_status,
- hcm->resp_headers);
- vlib_cli_output (vm, "\n%v\n", hcm->http_response);
+ {
+ wrk = hc_worker_get (hcm->worker_index);
+ hc_session = hc_session_get (wrk->session_index, wrk->thread_index);
+ vlib_cli_output (vm, "< %v\n< %v\n%v", hc_session->response_status,
+ hc_session->resp_headers,
+ hc_session->http_response);
+ }
break;
case HC_REPEAT_DONE:
- vlib_cli_output (vm,
- "< %d request(s) in %.6fs\n< avg latency "
- "%.4fms\n< %.2f req/sec",
- hc_stats.request_count, hc_stats.elapsed_time,
- (hc_stats.elapsed_time / hc_stats.request_count) * 1000,
- hc_stats.request_count / hc_stats.elapsed_time);
break;
default:
err = clib_error_return (0, "error: unexpected event %d", event_type);
@@ -612,15 +739,17 @@ static clib_error_t *
hc_run (vlib_main_t *vm)
{
hc_main_t *hcm = &hc_main;
- vlib_thread_main_t *vtm = vlib_get_thread_main ();
u32 num_threads;
hc_worker_t *wrk;
clib_error_t *err;
- num_threads = 1 /* main thread */ + vtm->n_threads;
+ num_threads = 1 /* main thread */ + vlib_num_workers ();
+ if (vlib_num_workers ())
+ clib_spinlock_init (&hcm->lock);
vec_validate (hcm->wrk, num_threads - 1);
vec_foreach (wrk, hcm->wrk)
{
+ wrk->has_common_headers = false;
wrk->thread_index = wrk - hcm->wrk;
/* 4k for headers should be enough */
vec_validate (wrk->headers_buf, 4095);
@@ -657,10 +786,18 @@ hc_detach ()
}
static void
-hcc_worker_cleanup (hc_worker_t *wrk)
+hc_worker_cleanup (hc_worker_t *wrk)
{
- HTTP_DBG (1, "worker cleanup");
+ hc_session_t *hc_session;
+ HTTP_DBG (1, "worker and worker sessions cleanup");
+
vec_free (wrk->headers_buf);
+ vec_foreach (hc_session, wrk->sessions)
+ {
+ vec_free (hc_session->resp_headers);
+ vec_free (hc_session->http_response);
+ vec_free (hc_session->response_status);
+ }
pool_free (wrk->sessions);
}
@@ -673,16 +810,14 @@ hc_cleanup ()
hc_http_header_t *header;
vec_foreach (wrk, hcm->wrk)
- hcc_worker_cleanup (wrk);
+ hc_worker_cleanup (wrk);
vec_free (hcm->uri);
vec_free (hcm->target);
vec_free (hcm->data);
- vec_free (hcm->resp_headers);
- vec_free (hcm->http_response);
- vec_free (hcm->response_status);
vec_free (hcm->wrk);
vec_free (hcm->filename);
+ vec_free (hcm->appns_id);
vec_foreach (header, hcm->custom_header)
{
vec_free (header->name);
@@ -698,6 +833,8 @@ hc_command_fn (vlib_main_t *vm, unformat_input_t *input,
hc_main_t *hcm = &hc_main;
clib_error_t *err = 0;
unformat_input_t _line_input, *line_input = &_line_input;
+ u64 mem_size;
+ u8 *appns_id = 0;
u8 *path = 0;
u8 *file_data;
hc_http_header_t new_header;
@@ -708,7 +845,16 @@ hc_command_fn (vlib_main_t *vm, unformat_input_t *input,
hcm->repeat_count = 0;
hcm->duration = 0;
hcm->repeat = false;
+ hcm->multi_session = false;
+ hcm->done_count = 0;
+ hcm->connected_counter = 0;
+ hcm->max_sessions = 1;
+ hcm->prealloc_fifos = 0;
+ hcm->private_segment_size = 0;
+ hcm->fifo_size = 0;
+ hcm->was_transport_closed = false;
hc_stats.request_count = 0;
+ hc_stats.elapsed_time = 0;
if (hcm->attached)
return clib_error_return (0, "failed: already running!");
@@ -729,8 +875,6 @@ hc_command_fn (vlib_main_t *vm, unformat_input_t *input,
;
else if (unformat (line_input, "data %v", &hcm->data))
hcm->is_file = 0;
- else if (unformat (line_input, "target %s", &hcm->target))
- ;
else if (unformat (line_input, "file %s", &path))
hcm->is_file = 1;
else if (unformat (line_input, "use-ptr"))
@@ -761,6 +905,29 @@ hc_command_fn (vlib_main_t *vm, unformat_input_t *input,
}
else if (unformat (line_input, "duration %f", &hcm->duration))
hcm->repeat = true;
+ else if (unformat (line_input, "sessions %d", &hcm->max_sessions))
+ {
+ hcm->multi_session = true;
+ if (hcm->max_sessions <= 1)
+ {
+ err = clib_error_return (0, "sessions must be > 1");
+ goto done;
+ }
+ }
+ else if (unformat (line_input, "prealloc-fifos %d",
+ &hcm->prealloc_fifos))
+ ;
+ else if (unformat (line_input, "private-segment-size %U",
+ unformat_memory_size, &mem_size))
+ hcm->private_segment_size = mem_size;
+ else if (unformat (line_input, "fifo-size %U", unformat_memory_size,
+ &mem_size))
+ hcm->fifo_size = mem_size;
+ else if (unformat (line_input, "appns %_%v%_", &appns_id))
+ ;
+ else if (unformat (line_input, "secret %lu", &hcm->appns_secret))
+ ;
+
else
{
err = clib_error_return (0, "unknown input `%U'",
@@ -774,11 +941,7 @@ hc_command_fn (vlib_main_t *vm, unformat_input_t *input,
err = clib_error_return (0, "URI not defined");
goto done;
}
- if (!hcm->target)
- {
- err = clib_error_return (0, "target not defined");
- goto done;
- }
+
if (!hcm->data && hcm->req_method == HTTP_REQ_POST)
{
if (path)
@@ -794,6 +957,7 @@ hc_command_fn (vlib_main_t *vm, unformat_input_t *input,
goto done;
}
}
+
if (hcm->duration && hcm->repeat_count)
{
err = clib_error_return (
@@ -801,6 +965,20 @@ hc_command_fn (vlib_main_t *vm, unformat_input_t *input,
goto done;
}
+ if (hcm->multi_session && !hcm->repeat)
+ {
+ err = clib_error_return (
+ 0, "multiple sessions are only supported with request repeating");
+ goto done;
+ }
+
+ if ((rv = parse_target ((char **) &hcm->uri, (char **) &hcm->target)))
+ {
+ err = clib_error_return (0, "target parse error: %U",
+ format_session_error, rv);
+ goto done;
+ }
+
if ((rv = parse_uri ((char *) hcm->uri, &hcm->connect_sep)))
{
err =
@@ -808,8 +986,14 @@ hc_command_fn (vlib_main_t *vm, unformat_input_t *input,
goto done;
}
+ if (hcm->duration >= hcm->timeout)
+ {
+ hcm->timeout = hcm->duration + 10;
+ }
+ hcm->appns_id = appns_id;
+
if (hcm->repeat)
- vlib_cli_output (vm, "Running, please wait...");
+ vlib_cli_output (vm, "* Running, please wait...");
session_enable_disable_args_t args = { .is_en = 1,
.rt_engine_type =
@@ -842,10 +1026,12 @@ done:
VLIB_CLI_COMMAND (hc_command, static) = {
.path = "http client",
.short_help =
- "[post] uri http://<ip-addr> target <origin-form> "
+ "[post] uri http://<ip-addr>/<origin-form> "
"[data <form-urlencoded> | file <file-path>] [use-ptr] "
"[save-to <filename>] [header <Key:Value>] [verbose] "
- "[timeout <seconds> (default = 10)] [repeat <count> | duration <seconds>]",
+ "[timeout <seconds> (default = 10)] [repeat <count> | duration <seconds>] "
+ "[sessions <# of sessions>] [appns <app-ns> secret <appns-secret>] "
+ "[fifo-size <nM|G>] [private-segment-size <nM|G>] [prealloc-fifos <n>]",
.function = hc_command_fn,
.is_mp_safe = 1,
};
diff --git a/src/plugins/hs_apps/http_client_cli.c b/src/plugins/hs_apps/http_client_cli.c
index 4ee3b49444c..b72d4dfae54 100644
--- a/src/plugins/hs_apps/http_client_cli.c
+++ b/src/plugins/hs_apps/http_client_cli.c
@@ -31,7 +31,7 @@ typedef struct
{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
u32 session_index;
- u32 thread_index;
+ clib_thread_index_t thread_index;
u32 rx_offset;
u32 vpp_session_index;
u64 to_recv;
@@ -41,7 +41,7 @@ typedef struct
typedef struct
{
hcc_session_t *sessions;
- u32 thread_index;
+ clib_thread_index_t thread_index;
} hcc_worker_t;
typedef struct
@@ -62,6 +62,8 @@ typedef struct
u8 *http_response;
u8 *appns_id;
u64 appns_secret;
+ u32 ckpair_index;
+ u8 need_crypto;
} hcc_main_t;
typedef enum
@@ -74,7 +76,7 @@ typedef enum
static hcc_main_t hcc_main;
static hcc_worker_t *
-hcc_worker_get (u32 thread_index)
+hcc_worker_get (clib_thread_index_t thread_index)
{
return vec_elt_at_index (hcc_main.wrk, thread_index);
}
@@ -90,7 +92,7 @@ hcc_session_alloc (hcc_worker_t *wrk)
}
static hcc_session_t *
-hcc_session_get (u32 hs_index, u32 thread_index)
+hcc_session_get (u32 hs_index, clib_thread_index_t thread_index)
{
hcc_worker_t *wrk = hcc_worker_get (thread_index);
return pool_elt_at_index (wrk->sessions, hs_index);
@@ -333,6 +335,7 @@ hcc_attach ()
vnet_app_attach_args_t _a, *a = &_a;
u64 options[18];
u32 segment_size = 128 << 20;
+ vnet_app_add_cert_key_pair_args_t _ck_pair, *ck_pair = &_ck_pair;
int rv;
if (hcm->private_segment_size)
@@ -353,6 +356,7 @@ hcc_attach ()
hcm->fifo_size ? hcm->fifo_size : 32 << 10;
a->options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_IS_BUILTIN;
a->options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] = hcm->prealloc_fifos;
+ a->options[APP_OPTIONS_TLS_ENGINE] = CRYPTO_ENGINE_OPENSSL;
if (hcm->appns_id)
{
a->namespace_id = hcm->appns_id;
@@ -365,6 +369,15 @@ hcc_attach ()
hcm->app_index = a->app_index;
vec_free (a->name);
hcm->test_client_attached = 1;
+
+ clib_memset (ck_pair, 0, sizeof (*ck_pair));
+ ck_pair->cert = (u8 *) test_srv_crt_rsa;
+ ck_pair->key = (u8 *) test_srv_key_rsa;
+ ck_pair->cert_len = test_srv_crt_rsa_len;
+ ck_pair->key_len = test_srv_key_rsa_len;
+ vnet_app_add_cert_key_pair (ck_pair);
+ hcm->ckpair_index = ck_pair->index;
+
return 0;
}
@@ -411,6 +424,14 @@ hcc_connect ()
&a->sep_ext, TRANSPORT_ENDPT_EXT_CFG_HTTP, sizeof (http_cfg));
clib_memcpy (ext_cfg->data, &http_cfg, sizeof (http_cfg));
+ if (hcm->need_crypto)
+ {
+ ext_cfg = session_endpoint_add_ext_cfg (
+ &a->sep_ext, TRANSPORT_ENDPT_EXT_CFG_CRYPTO,
+ sizeof (transport_endpt_crypto_cfg_t));
+ ext_cfg->crypto.ckpair_index = hcm->ckpair_index;
+ }
+
/* allocate http session on main thread */
wrk = hcc_worker_get (0);
hs = hcc_session_alloc (wrk);
@@ -581,6 +602,8 @@ hcc_command_fn (vlib_main_t *vm, unformat_input_t *input,
err = clib_error_return (0, "Uri parse error: %d", rv);
goto done;
}
+ hcm->need_crypto = hcm->connect_sep.transport_proto == TRANSPORT_PROTO_TLS;
+ hcm->connect_sep.transport_proto = TRANSPORT_PROTO_HTTP;
session_enable_disable_args_t args = { .is_en = 1,
.rt_engine_type =
diff --git a/src/plugins/hs_apps/http_tps.c b/src/plugins/hs_apps/http_tps.c
index 59a0309e363..486d4a525e3 100644
--- a/src/plugins/hs_apps/http_tps.c
+++ b/src/plugins/hs_apps/http_tps.c
@@ -25,7 +25,7 @@ typedef struct
{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
u32 session_index;
- u32 thread_index;
+ clib_thread_index_t thread_index;
u64 data_len;
u64 data_offset;
u32 vpp_session_index;
@@ -78,7 +78,7 @@ typedef struct hs_main_
static hts_main_t hts_main;
static hts_session_t *
-hts_session_alloc (u32 thread_index)
+hts_session_alloc (clib_thread_index_t thread_index)
{
hts_main_t *htm = &hts_main;
hts_session_t *hs;
@@ -92,7 +92,7 @@ hts_session_alloc (u32 thread_index)
}
static hts_session_t *
-hts_session_get (u32 thread_index, u32 hts_index)
+hts_session_get (clib_thread_index_t thread_index, u32 hts_index)
{
hts_main_t *htm = &hts_main;
@@ -345,6 +345,11 @@ hts_session_rx_body (hts_session_t *hs, session_t *ts)
ASSERT (rv == n_deq);
}
hs->left_recv -= n_deq;
+ if (svm_fifo_needs_deq_ntf (ts->rx_fifo, n_deq))
+ {
+ svm_fifo_clear_deq_ntf (ts->rx_fifo);
+ session_program_transport_io_evt (ts->handle, SESSION_IO_EVT_RX);
+ }
if (hs->close_threshold > 0)
{
@@ -620,7 +625,7 @@ hts_start_listen (hts_main_t *htm, session_endpoint_cfg_t *sep, u8 *uri,
u8 need_crypto;
hts_session_t *hls;
session_t *ls;
- u32 thread_index = 0;
+ clib_thread_index_t thread_index = 0;
int rv;
clib_memset (a, 0, sizeof (*a));
diff --git a/src/plugins/hs_apps/proxy.c b/src/plugins/hs_apps/proxy.c
index ca088f4bc8a..140183d5f59 100644
--- a/src/plugins/hs_apps/proxy.c
+++ b/src/plugins/hs_apps/proxy.c
@@ -112,7 +112,8 @@ proxy_do_connect (vnet_connect_args_t *a)
static void
proxy_handle_connects_rpc (void *args)
{
- u32 thread_index = pointer_to_uword (args), n_connects = 0, n_pending;
+ clib_thread_index_t thread_index = pointer_to_uword (args), n_connects = 0,
+ n_pending;
proxy_worker_t *wrk;
u32 max_connects;
diff --git a/src/plugins/hs_apps/proxy.h b/src/plugins/hs_apps/proxy.h
index f26f4bf0ea2..88b7cdf41ee 100644
--- a/src/plugins/hs_apps/proxy.h
+++ b/src/plugins/hs_apps/proxy.h
@@ -117,7 +117,7 @@ typedef struct
extern proxy_main_t proxy_main;
static inline proxy_worker_t *
-proxy_worker_get (u32 thread_index)
+proxy_worker_get (clib_thread_index_t thread_index)
{
proxy_main_t *pm = &proxy_main;
return vec_elt_at_index (pm->workers, thread_index);
diff --git a/src/plugins/hs_apps/test_builtins.c b/src/plugins/hs_apps/test_builtins.c
index c314e71b5df..5403be739ca 100644
--- a/src/plugins/hs_apps/test_builtins.c
+++ b/src/plugins/hs_apps/test_builtins.c
@@ -52,14 +52,15 @@ VLIB_REGISTER_NODE (test_builtins_timer_process_node) = {
};
static void
-send_data_to_hss (hss_session_handle_t sh, u8 *data, u8 free_vec_data)
+send_data_to_hss (hss_session_handle_t sh, u8 *data, uword data_len,
+ u8 free_vec_data)
{
tb_main_t *tbm = &tb_main;
hss_url_handler_args_t args = {};
args.sh = sh;
args.data = data;
- args.data_len = vec_len (data);
+ args.data_len = data_len;
args.ct = HTTP_CONTENT_TEXT_PLAIN;
args.sc = HTTP_STATUS_OK;
args.free_vec_data = free_vec_data;
@@ -74,7 +75,7 @@ handle_get_test1 (hss_url_handler_args_t *args)
clib_warning ("get request on test1");
data = format (0, "hello");
- send_data_to_hss (args->sh, data, 1);
+ send_data_to_hss (args->sh, data, vec_len (data), 1);
return HSS_URL_HANDLER_ASYNC;
}
@@ -86,7 +87,7 @@ handle_get_test2 (hss_url_handler_args_t *args)
clib_warning ("get request on test2");
data = format (0, "some data");
- send_data_to_hss (args->sh, data, 1);
+ send_data_to_hss (args->sh, data, vec_len (data), 1);
return HSS_URL_HANDLER_ASYNC;
}
@@ -106,7 +107,7 @@ delayed_resp_cb (u32 *expired_timers)
e = pool_elt_at_index (tbm->delayed_resps, pool_index);
clib_warning ("sending delayed data");
data = format (0, "delayed data");
- send_data_to_hss (e->sh, data, 1);
+ send_data_to_hss (e->sh, data, vec_len (data), 1);
pool_put (tbm->delayed_resps, e);
}
}
@@ -129,7 +130,7 @@ handle_get_test_delayed (hss_url_handler_args_t *args)
static hss_url_handler_rc_t
handle_post_test3 (hss_url_handler_args_t *args)
{
- send_data_to_hss (args->sh, 0, 0);
+ send_data_to_hss (args->sh, 0, 0, 0);
return HSS_URL_HANDLER_ASYNC;
}
@@ -137,7 +138,15 @@ static hss_url_handler_rc_t
handle_get_64bytes (hss_url_handler_args_t *args)
{
tb_main_t *tbm = &tb_main;
- send_data_to_hss (args->sh, tbm->test_data, 0);
+ send_data_to_hss (args->sh, tbm->test_data, 64, 0);
+ return HSS_URL_HANDLER_ASYNC;
+}
+
+static hss_url_handler_rc_t
+handle_get_4kbytes (hss_url_handler_args_t *args)
+{
+ tb_main_t *tbm = &tb_main;
+ send_data_to_hss (args->sh, tbm->test_data, 4 << 10, 0);
return HSS_URL_HANDLER_ASYNC;
}
@@ -157,14 +166,16 @@ test_builtins_init (vlib_main_t *vm)
return;
}
- tbm->test_data = format (
- 0, "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx");
+ /* init test data, big buffer */
+ vec_validate_init_empty (tbm->test_data, (4 << 10) - 1, 'x');
(*fp) (handle_get_test1, "test1", HTTP_REQ_GET);
+ (*fp) (handle_get_test1, "test1", HTTP_REQ_POST);
(*fp) (handle_get_test2, "test2", HTTP_REQ_GET);
(*fp) (handle_get_test_delayed, "test_delayed", HTTP_REQ_GET);
(*fp) (handle_post_test3, "test3", HTTP_REQ_POST);
(*fp) (handle_get_64bytes, "64B", HTTP_REQ_GET);
+ (*fp) (handle_get_4kbytes, "4kB", HTTP_REQ_GET);
tbm->send_data =
vlib_get_plugin_symbol ("http_static_plugin.so", "hss_session_send_data");
diff --git a/src/plugins/hs_apps/vcl/vcl_test_cl_udp.c b/src/plugins/hs_apps/vcl/vcl_test_cl_udp.c
new file mode 100644
index 00000000000..066635e3d9b
--- /dev/null
+++ b/src/plugins/hs_apps/vcl/vcl_test_cl_udp.c
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright(c) 2025 Cisco Systems, Inc.
+ */
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <string.h>
+#include <vcl/vppcom.h>
+#include <hs_apps/vcl/vcl_test.h>
+
+typedef enum vt_clu_type_
+{
+ VT_CLU_TYPE_NONE = 0,
+ VT_CLU_TYPE_SERVER,
+ VT_CLU_TYPE_CLIENT,
+} vt_clu_type_t;
+
+typedef struct vtclu_main_
+{
+ vt_clu_type_t app_type;
+ vppcom_endpt_t endpt;
+ union
+ {
+ struct sockaddr_storage srvr_addr;
+ struct sockaddr_storage clnt_addr;
+ };
+ uint16_t port;
+} vt_clu_main_t;
+
+static vt_clu_main_t vt_clu_main;
+
+static void
+vt_clu_parse_args (vt_clu_main_t *vclum, int argc, char **argv)
+{
+ int c;
+
+ memset (vclum, 0, sizeof (*vclum));
+ vclum->port = VCL_TEST_SERVER_PORT;
+
+ opterr = 0;
+ while ((c = getopt (argc, argv, "s:c:")) != -1)
+ switch (c)
+ {
+ case 's':
+ vclum->app_type = VT_CLU_TYPE_SERVER;
+ if (inet_pton (
+ AF_INET, optarg,
+ &((struct sockaddr_in *) &vclum->srvr_addr)->sin_addr) != 1)
+ vtwrn ("couldn't parse ipv4 addr %s", optarg);
+ break;
+ case 'c':
+ vclum->app_type = VT_CLU_TYPE_CLIENT;
+ if (inet_pton (
+ AF_INET, optarg,
+ &((struct sockaddr_in *) &vclum->clnt_addr)->sin_addr) != 1)
+ break;
+ }
+
+ if (vclum->app_type == VT_CLU_TYPE_NONE)
+ {
+ vtwrn ("client or server must be configured");
+ exit (1);
+ }
+
+ vclum->endpt.is_ip4 = 1;
+ vclum->endpt.ip =
+ (uint8_t *) &((struct sockaddr_in *) &vclum->srvr_addr)->sin_addr;
+ vclum->endpt.port = htons (vclum->endpt.port);
+}
+
+int
+main (int argc, char **argv)
+{
+ vt_clu_main_t *vclum = &vt_clu_main;
+ int rv, vcl_sh;
+ const int buflen = 64;
+ char buf[buflen];
+
+ struct sockaddr_in _addr;
+ vppcom_endpt_t rmt_ep = { .ip = (void *) &_addr };
+
+ vt_clu_parse_args (vclum, argc, argv);
+
+ rv = vppcom_app_create ("vcl_test_cl_udp");
+ if (rv)
+ vtfail ("vppcom_app_create()", rv);
+
+ vcl_sh = vppcom_session_create (VPPCOM_PROTO_UDP, 0 /* is_nonblocking */);
+ if (vcl_sh < 0)
+ {
+ vterr ("vppcom_session_create()", vcl_sh);
+ return vcl_sh;
+ }
+
+ if (vclum->app_type == VT_CLU_TYPE_SERVER)
+ {
+ /* Listen is implicit */
+ rv = vppcom_session_bind (vcl_sh, &vclum->endpt);
+ if (rv < 0)
+ {
+ vterr ("vppcom_session_bind()", rv);
+ return rv;
+ }
+
+ rv = vppcom_session_recvfrom (vcl_sh, buf, buflen, 0, &rmt_ep);
+ if (rv < 0)
+ {
+ vterr ("vppcom_session_recvfrom()", rv);
+ return rv;
+ }
+ buf[rv] = 0;
+ vtinf ("Received message from client: %s", buf);
+
+ char *msg = "hello cl udp client";
+ int msg_len = strnlen (msg, buflen);
+ memcpy (buf, msg, msg_len);
+ /* send 2 times to be sure */
+ for (int i = 0; i < 2; i++)
+ {
+ rv = vppcom_session_sendto (vcl_sh, buf, msg_len, 0, &rmt_ep);
+ if (rv < 0)
+ {
+ vterr ("vppcom_session_sendto()", rv);
+ return rv;
+ }
+ usleep (500);
+ }
+ }
+ else if (vclum->app_type == VT_CLU_TYPE_CLIENT)
+ {
+ char *msg = "hello cl udp server";
+ int msg_len = strnlen (msg, buflen);
+ memcpy (buf, msg, msg_len);
+
+ /* send 3 times to be sure */
+ for (int i = 0; i < 3; i++)
+ {
+ rv = vppcom_session_sendto (vcl_sh, buf, msg_len, 0, &vclum->endpt);
+ if (rv < 0)
+ {
+ vterr ("vppcom_session_sendto()", rv);
+ return rv;
+ }
+ usleep (500);
+ }
+
+ rv = vppcom_session_recvfrom (vcl_sh, buf, buflen, 0, &rmt_ep);
+ if (rv < 0)
+ {
+ vterr ("vppcom_session_recvfrom()", rv);
+ return rv;
+ }
+ buf[rv] = 0;
+ vtinf ("Received message from server: %s", buf);
+ }
+} \ No newline at end of file
diff --git a/src/plugins/http/CMakeLists.txt b/src/plugins/http/CMakeLists.txt
index 075b8d6817b..ca2c0a9dc05 100644
--- a/src/plugins/http/CMakeLists.txt
+++ b/src/plugins/http/CMakeLists.txt
@@ -11,11 +11,20 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+option(VPP_ENABLE_HTTP_2 "Build http plugin with HTTP/2 enabled" OFF)
+if(VPP_ENABLE_HTTP_2)
+ add_compile_definitions(HTTP_2_ENABLE=1)
+endif()
+
add_vpp_plugin(http
SOURCES
+ http2/hpack.c
+ http2/http2.c
+ http2/frame.c
http.c
http_buffer.c
http_timer.c
+ http1.c
)
add_vpp_plugin(http_unittest
diff --git a/src/plugins/http/extras/mk_huffman_table.py b/src/plugins/http/extras/mk_huffman_table.py
new file mode 100644
index 00000000000..378544b0dce
--- /dev/null
+++ b/src/plugins/http/extras/mk_huffman_table.py
@@ -0,0 +1,416 @@
+#!/usr/bin/env python3
+from io import StringIO
+
+
+# SPDX-License-Identifier: Apache-2.0
+# Copyright(c) 2025 Cisco Systems, Inc.
+
+
+# e.g. 5 bit code symbol has 8 slots (2^8-5), last 3 bits are irrelevant
+def generate_slots(fh, s, cl):
+ for i in range(1 << 8 - cl):
+ fh.write(" { 0x%02X, %d },\n" % (s, cl))
+
+
+# list of code and code length tuples
+huff_code_table = []
+
+# Huffman code from RFC7541 Appendix B, EOS removed
+rfc7541_huffman_code = """\
+ ( 0) |11111111|11000 1ff8 [13]
+ ( 1) |11111111|11111111|1011000 7fffd8 [23]
+ ( 2) |11111111|11111111|11111110|0010 fffffe2 [28]
+ ( 3) |11111111|11111111|11111110|0011 fffffe3 [28]
+ ( 4) |11111111|11111111|11111110|0100 fffffe4 [28]
+ ( 5) |11111111|11111111|11111110|0101 fffffe5 [28]
+ ( 6) |11111111|11111111|11111110|0110 fffffe6 [28]
+ ( 7) |11111111|11111111|11111110|0111 fffffe7 [28]
+ ( 8) |11111111|11111111|11111110|1000 fffffe8 [28]
+ ( 9) |11111111|11111111|11101010 ffffea [24]
+ ( 10) |11111111|11111111|11111111|111100 3ffffffc [30]
+ ( 11) |11111111|11111111|11111110|1001 fffffe9 [28]
+ ( 12) |11111111|11111111|11111110|1010 fffffea [28]
+ ( 13) |11111111|11111111|11111111|111101 3ffffffd [30]
+ ( 14) |11111111|11111111|11111110|1011 fffffeb [28]
+ ( 15) |11111111|11111111|11111110|1100 fffffec [28]
+ ( 16) |11111111|11111111|11111110|1101 fffffed [28]
+ ( 17) |11111111|11111111|11111110|1110 fffffee [28]
+ ( 18) |11111111|11111111|11111110|1111 fffffef [28]
+ ( 19) |11111111|11111111|11111111|0000 ffffff0 [28]
+ ( 20) |11111111|11111111|11111111|0001 ffffff1 [28]
+ ( 21) |11111111|11111111|11111111|0010 ffffff2 [28]
+ ( 22) |11111111|11111111|11111111|111110 3ffffffe [30]
+ ( 23) |11111111|11111111|11111111|0011 ffffff3 [28]
+ ( 24) |11111111|11111111|11111111|0100 ffffff4 [28]
+ ( 25) |11111111|11111111|11111111|0101 ffffff5 [28]
+ ( 26) |11111111|11111111|11111111|0110 ffffff6 [28]
+ ( 27) |11111111|11111111|11111111|0111 ffffff7 [28]
+ ( 28) |11111111|11111111|11111111|1000 ffffff8 [28]
+ ( 29) |11111111|11111111|11111111|1001 ffffff9 [28]
+ ( 30) |11111111|11111111|11111111|1010 ffffffa [28]
+ ( 31) |11111111|11111111|11111111|1011 ffffffb [28]
+' ' ( 32) |010100 14 [ 6]
+'!' ( 33) |11111110|00 3f8 [10]
+'"' ( 34) |11111110|01 3f9 [10]
+'#' ( 35) |11111111|1010 ffa [12]
+'$' ( 36) |11111111|11001 1ff9 [13]
+'%' ( 37) |010101 15 [ 6]
+'&' ( 38) |11111000 f8 [ 8]
+''' ( 39) |11111111|010 7fa [11]
+'(' ( 40) |11111110|10 3fa [10]
+')' ( 41) |11111110|11 3fb [10]
+'*' ( 42) |11111001 f9 [ 8]
+'+' ( 43) |11111111|011 7fb [11]
+',' ( 44) |11111010 fa [ 8]
+'-' ( 45) |010110 16 [ 6]
+'.' ( 46) |010111 17 [ 6]
+'/' ( 47) |011000 18 [ 6]
+'0' ( 48) |00000 0 [ 5]
+'1' ( 49) |00001 1 [ 5]
+'2' ( 50) |00010 2 [ 5]
+'3' ( 51) |011001 19 [ 6]
+'4' ( 52) |011010 1a [ 6]
+'5' ( 53) |011011 1b [ 6]
+'6' ( 54) |011100 1c [ 6]
+'7' ( 55) |011101 1d [ 6]
+'8' ( 56) |011110 1e [ 6]
+'9' ( 57) |011111 1f [ 6]
+':' ( 58) |1011100 5c [ 7]
+';' ( 59) |11111011 fb [ 8]
+'<' ( 60) |11111111|1111100 7ffc [15]
+'=' ( 61) |100000 20 [ 6]
+'>' ( 62) |11111111|1011 ffb [12]
+'?' ( 63) |11111111|00 3fc [10]
+'@' ( 64) |11111111|11010 1ffa [13]
+'A' ( 65) |100001 21 [ 6]
+'B' ( 66) |1011101 5d [ 7]
+'C' ( 67) |1011110 5e [ 7]
+'D' ( 68) |1011111 5f [ 7]
+'E' ( 69) |1100000 60 [ 7]
+'F' ( 70) |1100001 61 [ 7]
+'G' ( 71) |1100010 62 [ 7]
+'H' ( 72) |1100011 63 [ 7]
+'I' ( 73) |1100100 64 [ 7]
+'J' ( 74) |1100101 65 [ 7]
+'K' ( 75) |1100110 66 [ 7]
+'L' ( 76) |1100111 67 [ 7]
+'M' ( 77) |1101000 68 [ 7]
+'N' ( 78) |1101001 69 [ 7]
+'O' ( 79) |1101010 6a [ 7]
+'P' ( 80) |1101011 6b [ 7]
+'Q' ( 81) |1101100 6c [ 7]
+'R' ( 82) |1101101 6d [ 7]
+'S' ( 83) |1101110 6e [ 7]
+'T' ( 84) |1101111 6f [ 7]
+'U' ( 85) |1110000 70 [ 7]
+'V' ( 86) |1110001 71 [ 7]
+'W' ( 87) |1110010 72 [ 7]
+'X' ( 88) |11111100 fc [ 8]
+'Y' ( 89) |1110011 73 [ 7]
+'Z' ( 90) |11111101 fd [ 8]
+'[' ( 91) |11111111|11011 1ffb [13]
+'\' ( 92) |11111111|11111110|000 7fff0 [19]
+']' ( 93) |11111111|11100 1ffc [13]
+'^' ( 94) |11111111|111100 3ffc [14]
+'_' ( 95) |100010 22 [ 6]
+'`' ( 96) |11111111|1111101 7ffd [15]
+'a' ( 97) |00011 3 [ 5]
+'b' ( 98) |100011 23 [ 6]
+'c' ( 99) |00100 4 [ 5]
+'d' (100) |100100 24 [ 6]
+'e' (101) |00101 5 [ 5]
+'f' (102) |100101 25 [ 6]
+'g' (103) |100110 26 [ 6]
+'h' (104) |100111 27 [ 6]
+'i' (105) |00110 6 [ 5]
+'j' (106) |1110100 74 [ 7]
+'k' (107) |1110101 75 [ 7]
+'l' (108) |101000 28 [ 6]
+'m' (109) |101001 29 [ 6]
+'n' (110) |101010 2a [ 6]
+'o' (111) |00111 7 [ 5]
+'p' (112) |101011 2b [ 6]
+'q' (113) |1110110 76 [ 7]
+'r' (114) |101100 2c [ 6]
+'s' (115) |01000 8 [ 5]
+'t' (116) |01001 9 [ 5]
+'u' (117) |101101 2d [ 6]
+'v' (118) |1110111 77 [ 7]
+'w' (119) |1111000 78 [ 7]
+'x' (120) |1111001 79 [ 7]
+'y' (121) |1111010 7a [ 7]
+'z' (122) |1111011 7b [ 7]
+'{' (123) |11111111|1111110 7ffe [15]
+'|' (124) |11111111|100 7fc [11]
+'}' (125) |11111111|111101 3ffd [14]
+'~' (126) |11111111|11101 1ffd [13]
+ (127) |11111111|11111111|11111111|1100 ffffffc [28]
+ (128) |11111111|11111110|0110 fffe6 [20]
+ (129) |11111111|11111111|010010 3fffd2 [22]
+ (130) |11111111|11111110|0111 fffe7 [20]
+ (131) |11111111|11111110|1000 fffe8 [20]
+ (132) |11111111|11111111|010011 3fffd3 [22]
+ (133) |11111111|11111111|010100 3fffd4 [22]
+ (134) |11111111|11111111|010101 3fffd5 [22]
+ (135) |11111111|11111111|1011001 7fffd9 [23]
+ (136) |11111111|11111111|010110 3fffd6 [22]
+ (137) |11111111|11111111|1011010 7fffda [23]
+ (138) |11111111|11111111|1011011 7fffdb [23]
+ (139) |11111111|11111111|1011100 7fffdc [23]
+ (140) |11111111|11111111|1011101 7fffdd [23]
+ (141) |11111111|11111111|1011110 7fffde [23]
+ (142) |11111111|11111111|11101011 ffffeb [24]
+ (143) |11111111|11111111|1011111 7fffdf [23]
+ (144) |11111111|11111111|11101100 ffffec [24]
+ (145) |11111111|11111111|11101101 ffffed [24]
+ (146) |11111111|11111111|010111 3fffd7 [22]
+ (147) |11111111|11111111|1100000 7fffe0 [23]
+ (148) |11111111|11111111|11101110 ffffee [24]
+ (149) |11111111|11111111|1100001 7fffe1 [23]
+ (150) |11111111|11111111|1100010 7fffe2 [23]
+ (151) |11111111|11111111|1100011 7fffe3 [23]
+ (152) |11111111|11111111|1100100 7fffe4 [23]
+ (153) |11111111|11111110|11100 1fffdc [21]
+ (154) |11111111|11111111|011000 3fffd8 [22]
+ (155) |11111111|11111111|1100101 7fffe5 [23]
+ (156) |11111111|11111111|011001 3fffd9 [22]
+ (157) |11111111|11111111|1100110 7fffe6 [23]
+ (158) |11111111|11111111|1100111 7fffe7 [23]
+ (159) |11111111|11111111|11101111 ffffef [24]
+ (160) |11111111|11111111|011010 3fffda [22]
+ (161) |11111111|11111110|11101 1fffdd [21]
+ (162) |11111111|11111110|1001 fffe9 [20]
+ (163) |11111111|11111111|011011 3fffdb [22]
+ (164) |11111111|11111111|011100 3fffdc [22]
+ (165) |11111111|11111111|1101000 7fffe8 [23]
+ (166) |11111111|11111111|1101001 7fffe9 [23]
+ (167) |11111111|11111110|11110 1fffde [21]
+ (168) |11111111|11111111|1101010 7fffea [23]
+ (169) |11111111|11111111|011101 3fffdd [22]
+ (170) |11111111|11111111|011110 3fffde [22]
+ (171) |11111111|11111111|11110000 fffff0 [24]
+ (172) |11111111|11111110|11111 1fffdf [21]
+ (173) |11111111|11111111|011111 3fffdf [22]
+ (174) |11111111|11111111|1101011 7fffeb [23]
+ (175) |11111111|11111111|1101100 7fffec [23]
+ (176) |11111111|11111111|00000 1fffe0 [21]
+ (177) |11111111|11111111|00001 1fffe1 [21]
+ (178) |11111111|11111111|100000 3fffe0 [22]
+ (179) |11111111|11111111|00010 1fffe2 [21]
+ (180) |11111111|11111111|1101101 7fffed [23]
+ (181) |11111111|11111111|100001 3fffe1 [22]
+ (182) |11111111|11111111|1101110 7fffee [23]
+ (183) |11111111|11111111|1101111 7fffef [23]
+ (184) |11111111|11111110|1010 fffea [20]
+ (185) |11111111|11111111|100010 3fffe2 [22]
+ (186) |11111111|11111111|100011 3fffe3 [22]
+ (187) |11111111|11111111|100100 3fffe4 [22]
+ (188) |11111111|11111111|1110000 7ffff0 [23]
+ (189) |11111111|11111111|100101 3fffe5 [22]
+ (190) |11111111|11111111|100110 3fffe6 [22]
+ (191) |11111111|11111111|1110001 7ffff1 [23]
+ (192) |11111111|11111111|11111000|00 3ffffe0 [26]
+ (193) |11111111|11111111|11111000|01 3ffffe1 [26]
+ (194) |11111111|11111110|1011 fffeb [20]
+ (195) |11111111|11111110|001 7fff1 [19]
+ (196) |11111111|11111111|100111 3fffe7 [22]
+ (197) |11111111|11111111|1110010 7ffff2 [23]
+ (198) |11111111|11111111|101000 3fffe8 [22]
+ (199) |11111111|11111111|11110110|0 1ffffec [25]
+ (200) |11111111|11111111|11111000|10 3ffffe2 [26]
+ (201) |11111111|11111111|11111000|11 3ffffe3 [26]
+ (202) |11111111|11111111|11111001|00 3ffffe4 [26]
+ (203) |11111111|11111111|11111011|110 7ffffde [27]
+ (204) |11111111|11111111|11111011|111 7ffffdf [27]
+ (205) |11111111|11111111|11111001|01 3ffffe5 [26]
+ (206) |11111111|11111111|11110001 fffff1 [24]
+ (207) |11111111|11111111|11110110|1 1ffffed [25]
+ (208) |11111111|11111110|010 7fff2 [19]
+ (209) |11111111|11111111|00011 1fffe3 [21]
+ (210) |11111111|11111111|11111001|10 3ffffe6 [26]
+ (211) |11111111|11111111|11111100|000 7ffffe0 [27]
+ (212) |11111111|11111111|11111100|001 7ffffe1 [27]
+ (213) |11111111|11111111|11111001|11 3ffffe7 [26]
+ (214) |11111111|11111111|11111100|010 7ffffe2 [27]
+ (215) |11111111|11111111|11110010 fffff2 [24]
+ (216) |11111111|11111111|00100 1fffe4 [21]
+ (217) |11111111|11111111|00101 1fffe5 [21]
+ (218) |11111111|11111111|11111010|00 3ffffe8 [26]
+ (219) |11111111|11111111|11111010|01 3ffffe9 [26]
+ (220) |11111111|11111111|11111111|1101 ffffffd [28]
+ (221) |11111111|11111111|11111100|011 7ffffe3 [27]
+ (222) |11111111|11111111|11111100|100 7ffffe4 [27]
+ (223) |11111111|11111111|11111100|101 7ffffe5 [27]
+ (224) |11111111|11111110|1100 fffec [20]
+ (225) |11111111|11111111|11110011 fffff3 [24]
+ (226) |11111111|11111110|1101 fffed [20]
+ (227) |11111111|11111111|00110 1fffe6 [21]
+ (228) |11111111|11111111|101001 3fffe9 [22]
+ (229) |11111111|11111111|00111 1fffe7 [21]
+ (230) |11111111|11111111|01000 1fffe8 [21]
+ (231) |11111111|11111111|1110011 7ffff3 [23]
+ (232) |11111111|11111111|101010 3fffea [22]
+ (233) |11111111|11111111|101011 3fffeb [22]
+ (234) |11111111|11111111|11110111|0 1ffffee [25]
+ (235) |11111111|11111111|11110111|1 1ffffef [25]
+ (236) |11111111|11111111|11110100 fffff4 [24]
+ (237) |11111111|11111111|11110101 fffff5 [24]
+ (238) |11111111|11111111|11111010|10 3ffffea [26]
+ (239) |11111111|11111111|1110100 7ffff4 [23]
+ (240) |11111111|11111111|11111010|11 3ffffeb [26]
+ (241) |11111111|11111111|11111100|110 7ffffe6 [27]
+ (242) |11111111|11111111|11111011|00 3ffffec [26]
+ (243) |11111111|11111111|11111011|01 3ffffed [26]
+ (244) |11111111|11111111|11111100|111 7ffffe7 [27]
+ (245) |11111111|11111111|11111101|000 7ffffe8 [27]
+ (246) |11111111|11111111|11111101|001 7ffffe9 [27]
+ (247) |11111111|11111111|11111101|010 7ffffea [27]
+ (248) |11111111|11111111|11111101|011 7ffffeb [27]
+ (249) |11111111|11111111|11111111|1110 ffffffe [28]
+ (250) |11111111|11111111|11111101|100 7ffffec [27]
+ (251) |11111111|11111111|11111101|101 7ffffed [27]
+ (252) |11111111|11111111|11111101|110 7ffffee [27]
+ (253) |11111111|11111111|11111101|111 7ffffef [27]
+ (254) |11111111|11111111|11111110|000 7fffff0 [27]
+ (255) |11111111|11111111|11111011|10 3ffffee [26]"""
+
+# parse Huffman code
+for line in StringIO(rfc7541_huffman_code):
+ # we need just last two columns
+ l = line.rstrip().split(" ")
+ # len in bits
+ code_len = l[-1][1:-1].strip()
+ # code as hex aligned to LSB
+ code = l[-2].strip()
+ huff_code_table.append((code_len, code))
+
+f = open("../http2/huffman_table.h", "w")
+f.write(
+ """/* SPDX-License-Identifier: Apache-2.0
+ * Copyright(c) 2025 Cisco Systems, Inc.
+ */
+
+/* generated by mk_huffman_table.py */
+
+#ifndef SRC_PLUGINS_HTTP_HUFFMAN_TABLE_H_
+#define SRC_PLUGINS_HTTP_HUFFMAN_TABLE_H_
+
+#include <vppinfra/types.h>
+
+typedef struct
+{
+ u8 code_len;
+ u32 code;
+} hpack_huffman_symbol_t;
+
+static hpack_huffman_symbol_t huff_sym_table[] = {
+"""
+)
+
+# encoding table
+[f.write(" {" + code[0] + ", 0x" + code[1] + "},\n") for code in huff_code_table]
+
+f.write(
+ """};
+
+typedef struct
+{
+ u8 symbol;
+ u8 code_len;
+} hpack_huffman_code_t;
+
+static hpack_huffman_code_t huff_code_table_fast[] = {
+"""
+)
+
+# fast decoding table, symbols with code length from 5 to 8 bits (most of printable ASCII characters)
+[generate_slots(f, i, 5) for i, code in enumerate(huff_code_table) if code[0] == "5"]
+[generate_slots(f, i, 6) for i, code in enumerate(huff_code_table) if code[0] == "6"]
+[generate_slots(f, i, 7) for i, code in enumerate(huff_code_table) if code[0] == "7"]
+[generate_slots(f, i, 8) for i, code in enumerate(huff_code_table) if code[0] == "8"]
+
+# last 2 entries are longer codes prefixes, code_len set to 0
+f.write(" { 0x00, 0 },\n")
+f.write(" { 0x00, 0 },\n")
+
+f.write(
+ """};
+
+typedef struct
+{
+ u32 first_code;
+ u8 code_len;
+ u8 symbols[29];
+} hpack_huffman_group_t;
+
+/* clang-format off */
+
+static hpack_huffman_group_t huff_code_table_slow[] = {
+"""
+)
+for i in range(10, 31):
+ symbols = [
+ (symbol, code[1])
+ for symbol, code in enumerate(huff_code_table)
+ if code[0] == str(i)
+ ]
+ if symbols:
+ _, first_code = symbols[0]
+ f.write(" {\n 0x" + first_code + ", /* first_code */\n")
+ f.write(" " + str(i) + ", /* code_len */\n")
+ f.write(" {\n ")
+ [f.write(" 0x%02X," % s) for s, c in symbols[:10]]
+ if len(symbols) > 10:
+ f.write("\n ")
+ [f.write(" 0x%02X," % s) for s, c in symbols[10:20]]
+ if len(symbols) > 20:
+ f.write("\n ")
+ [f.write(" 0x%02X," % s) for s, c in symbols[20:30]]
+ f.write("\n } /* symbols */\n },\n")
+
+f.write(
+ """};
+
+/* clang format-on */
+
+always_inline hpack_huffman_group_t *
+hpack_huffman_get_group (u32 value)
+{
+"""
+)
+
+index = 0
+
+symbols = [
+ (symbol, code[1]) for symbol, code in enumerate(huff_code_table) if code[0] == "10"
+]
+_, last_code = symbols[-1]
+boundary = (int(last_code, 16) + 1) << 22
+f.write(" if (value < 0x%X)\n" % boundary)
+f.write(" return &huff_code_table_slow[%d];\n" % index)
+index += 1
+
+for i in range(11, 30):
+ symbols = [
+ (symbol, code[1])
+ for symbol, code in enumerate(huff_code_table)
+ if code[0] == str(i)
+ ]
+ if symbols:
+ _, last_code = symbols[-1]
+ boundary = (int(last_code, 16) + 1) << (32 - i)
+ f.write(" else if (value < 0x%X)\n" % boundary)
+ f.write(" return &huff_code_table_slow[%d];\n" % index)
+ index += 1
+
+f.write(" else\n")
+f.write(" return &huff_code_table_slow[%d];\n" % index)
+
+f.write(
+ """}
+
+#endif /* SRC_PLUGINS_HTTP_HUFFMAN_TABLE_H_ */
+"""
+)
+
+f.close()
diff --git a/src/plugins/http/http.c b/src/plugins/http/http.c
index 69b661d0611..94914aaccc3 100644
--- a/src/plugins/http/http.c
+++ b/src/plugins/http/http.c
@@ -13,43 +13,59 @@
* limitations under the License.
*/
+#include <vpp/app/version.h>
+#include <vnet/session/application_interface.h>
+#include <vnet/session/application.h>
+
#include <http/http.h>
-#include <vnet/session/session.h>
+#include <http/http_private.h>
#include <http/http_timer.h>
-#include <http/http_status_codes.h>
-#include <http/http_header_names.h>
static http_main_t http_main;
-
-#define HTTP_FIFO_THRESH (16 << 10)
-
-/* HTTP state machine result */
-typedef enum http_sm_result_t_
-{
- HTTP_SM_STOP = 0,
- HTTP_SM_CONTINUE = 1,
- HTTP_SM_ERROR = -1,
-} http_sm_result_t;
+static http_engine_vft_t *http_vfts;
const http_buffer_type_t msg_to_buf_type[] = {
[HTTP_MSG_DATA_INLINE] = HTTP_BUFFER_FIFO,
[HTTP_MSG_DATA_PTR] = HTTP_BUFFER_PTR,
};
-const char *http_upgrade_proto_str[] = { "",
-#define _(sym, str) str,
- foreach_http_upgrade_proto
-#undef _
-};
+void
+http_register_engine (const http_engine_vft_t *vft, http_version_t version)
+{
+ vec_validate (http_vfts, version);
+ http_vfts[version] = *vft;
+}
+
+int
+http_v_find_index (u8 *vec, u32 offset, u32 num, char *str)
+{
+ int start_index = offset;
+ u32 slen = (u32) strnlen_s_inline (str, 16);
+ u32 vlen = vec_len (vec);
+
+ ASSERT (slen > 0);
-#define expect_char(c) \
- if (*p++ != c) \
- { \
- clib_warning ("unexpected character"); \
- return -1; \
+ if (vlen <= slen)
+ return -1;
+
+ int end_index = vlen - slen;
+ if (num)
+ {
+ if (num < slen)
+ return -1;
+ end_index = clib_min (end_index, offset + num - slen);
}
-static u8 *
+ for (; start_index <= end_index; start_index++)
+ {
+ if (!memcmp (vec + start_index, str, slen))
+ return start_index;
+ }
+
+ return -1;
+}
+
+u8 *
format_http_req_state (u8 *s, va_list *va)
{
http_req_state_t state = va_arg (*va, http_req_state_t);
@@ -68,18 +84,7 @@ format_http_req_state (u8 *s, va_list *va)
return format (s, "%s", t);
}
-#define http_req_state_change(_hc, _state) \
- do \
- { \
- HTTP_DBG (1, "changing http req state: %U -> %U", \
- format_http_req_state, (_hc)->req.state, \
- format_http_req_state, _state); \
- ASSERT ((_hc)->req.state != HTTP_REQ_STATE_TUNNEL); \
- (_hc)->req.state = _state; \
- } \
- while (0)
-
-static u8 *
+u8 *
format_http_conn_state (u8 *s, va_list *args)
{
http_conn_t *hc = va_arg (*args, http_conn_t *);
@@ -98,36 +103,41 @@ format_http_conn_state (u8 *s, va_list *args)
return format (s, "%s", t);
}
+u8 *
+format_http_time_now (u8 *s, va_list *args)
+{
+ http_conn_t __clib_unused *hc = va_arg (*args, http_conn_t *);
+ http_main_t *hm = &http_main;
+ f64 now = clib_timebase_now (&hm->timebase);
+ return format (s, "%U", format_clib_timebase_time, now);
+}
+
static inline http_worker_t *
-http_worker_get (u32 thread_index)
+http_worker_get (clib_thread_index_t thread_index)
{
return &http_main.wrk[thread_index];
}
static inline u32
-http_conn_alloc_w_thread (u32 thread_index)
+http_conn_alloc_w_thread (clib_thread_index_t thread_index)
{
http_worker_t *wrk = http_worker_get (thread_index);
http_conn_t *hc;
pool_get_aligned_safe (wrk->conn_pool, hc, CLIB_CACHE_LINE_BYTES);
- clib_memset (hc, 0, sizeof (*hc));
- hc->c_thread_index = thread_index;
- hc->h_hc_index = hc - wrk->conn_pool;
- hc->h_pa_session_handle = SESSION_INVALID_HANDLE;
- hc->h_tc_session_handle = SESSION_INVALID_HANDLE;
- return hc->h_hc_index;
+ return (hc - wrk->conn_pool);
}
static inline http_conn_t *
-http_conn_get_w_thread (u32 hc_index, u32 thread_index)
+http_conn_get_w_thread (u32 hc_index, clib_thread_index_t thread_index)
{
http_worker_t *wrk = http_worker_get (thread_index);
return pool_elt_at_index (wrk->conn_pool, hc_index);
}
static inline http_conn_t *
-http_conn_get_w_thread_if_valid (u32 hc_index, u32 thread_index)
+http_conn_get_w_thread_if_valid (u32 hc_index,
+ clib_thread_index_t thread_index)
{
http_worker_t *wrk = http_worker_get (thread_index);
if (pool_is_free_index (wrk->conn_pool, hc_index))
@@ -135,13 +145,22 @@ http_conn_get_w_thread_if_valid (u32 hc_index, u32 thread_index)
return pool_elt_at_index (wrk->conn_pool, hc_index);
}
-void
+static void
http_conn_free (http_conn_t *hc)
{
http_worker_t *wrk = http_worker_get (hc->c_thread_index);
+ if (CLIB_DEBUG)
+ memset (hc, 0xba, sizeof (*hc));
pool_put (wrk->conn_pool, hc);
}
+static void
+http_add_postponed_ho_cleanups (u32 ho_hc_index)
+{
+ http_main_t *hm = &http_main;
+ vec_add1 (hm->postponed_ho_free, ho_hc_index);
+}
+
static inline http_conn_t *
http_ho_conn_get (u32 ho_hc_index)
{
@@ -149,26 +168,66 @@ http_ho_conn_get (u32 ho_hc_index)
return pool_elt_at_index (hm->ho_conn_pool, ho_hc_index);
}
-void
+static void
http_ho_conn_free (http_conn_t *ho_hc)
{
http_main_t *hm = &http_main;
+ if (CLIB_DEBUG)
+ memset (ho_hc, 0xba, sizeof (*ho_hc));
pool_put (hm->ho_conn_pool, ho_hc);
}
+static void
+http_ho_try_free (u32 ho_hc_index)
+{
+ http_conn_t *ho_hc;
+ HTTP_DBG (1, "half open: %x", ho_hc_index);
+ ho_hc = http_ho_conn_get (ho_hc_index);
+ if (!(ho_hc->flags & HTTP_CONN_F_HO_DONE))
+ {
+ HTTP_DBG (1, "postponed cleanup");
+ ho_hc->hc_tc_session_handle = SESSION_INVALID_HANDLE;
+ http_add_postponed_ho_cleanups (ho_hc_index);
+ return;
+ }
+ if (!(ho_hc->flags & HTTP_CONN_F_NO_APP_SESSION))
+ session_half_open_delete_notify (&ho_hc->connection);
+ http_ho_conn_free (ho_hc);
+}
+
+static void
+http_flush_postponed_ho_cleanups ()
+{
+ http_main_t *hm = &http_main;
+ u32 *ho_indexp, *tmp;
+
+ tmp = hm->postponed_ho_free;
+ hm->postponed_ho_free = hm->ho_free_list;
+ hm->ho_free_list = tmp;
+
+ vec_foreach (ho_indexp, hm->ho_free_list)
+ http_ho_try_free (*ho_indexp);
+
+ vec_reset_length (hm->ho_free_list);
+}
+
static inline u32
http_ho_conn_alloc (void)
{
http_main_t *hm = &http_main;
http_conn_t *hc;
+ if (vec_len (hm->postponed_ho_free))
+ http_flush_postponed_ho_cleanups ();
+
pool_get_aligned_safe (hm->ho_conn_pool, hc, CLIB_CACHE_LINE_BYTES);
clib_memset (hc, 0, sizeof (*hc));
- hc->h_hc_index = hc - hm->ho_conn_pool;
- hc->h_pa_session_handle = SESSION_INVALID_HANDLE;
- hc->h_tc_session_handle = SESSION_INVALID_HANDLE;
+ hc->hc_hc_index = hc - hm->ho_conn_pool;
+ hc->hc_pa_session_handle = SESSION_INVALID_HANDLE;
+ hc->hc_tc_session_handle = SESSION_INVALID_HANDLE;
hc->timeout = HTTP_CONN_TIMEOUT;
- return hc->h_hc_index;
+ hc->version = HTTP_VERSION_NA;
+ return hc->hc_hc_index;
}
static u32
@@ -178,18 +237,19 @@ http_listener_alloc (void)
http_conn_t *lhc;
pool_get_zero (hm->listener_pool, lhc);
- lhc->c_c_index = lhc - hm->listener_pool;
+ lhc->hc_hc_index = lhc - hm->listener_pool;
lhc->timeout = HTTP_CONN_TIMEOUT;
- return lhc->c_c_index;
+ lhc->version = HTTP_VERSION_NA;
+ return lhc->hc_hc_index;
}
-http_conn_t *
+static http_conn_t *
http_listener_get (u32 lhc_index)
{
return pool_elt_at_index (http_main.listener_pool, lhc_index);
}
-void
+static void
http_listener_free (http_conn_t *lhc)
{
http_main_t *hm = &http_main;
@@ -204,7 +264,7 @@ void
http_disconnect_transport (http_conn_t *hc)
{
vnet_disconnect_args_t a = {
- .handle = hc->h_tc_session_handle,
+ .handle = hc->hc_tc_session_handle,
.app_index = http_main.app_index,
};
@@ -214,6 +274,110 @@ http_disconnect_transport (http_conn_t *hc)
clib_warning ("disconnect returned");
}
+void
+http_shutdown_transport (http_conn_t *hc)
+{
+ vnet_shutdown_args_t a = {
+ .handle = hc->hc_tc_session_handle,
+ .app_index = http_main.app_index,
+ };
+
+ hc->state = HTTP_CONN_STATE_CLOSED;
+
+ if (vnet_shutdown_session (&a))
+ clib_warning ("shutdown returned");
+}
+
+http_status_code_t
+http_sc_by_u16 (u16 status_code)
+{
+ http_main_t *hm = &http_main;
+ return hm->sc_by_u16[status_code];
+}
+
+u8 *
+http_get_app_header_list (http_req_t *req, http_msg_t *msg)
+{
+ http_main_t *hm = &http_main;
+ session_t *as;
+ u8 *app_headers;
+ int rv;
+
+ as = session_get_from_handle (req->hr_pa_session_handle);
+
+ if (msg->data.type == HTTP_MSG_DATA_PTR)
+ {
+ uword app_headers_ptr;
+ rv = svm_fifo_dequeue (as->tx_fifo, sizeof (app_headers_ptr),
+ (u8 *) &app_headers_ptr);
+ ASSERT (rv == sizeof (app_headers_ptr));
+ app_headers = uword_to_pointer (app_headers_ptr, u8 *);
+ }
+ else
+ {
+ app_headers = hm->app_header_lists[as->thread_index];
+ rv = svm_fifo_dequeue (as->tx_fifo, msg->data.headers_len, app_headers);
+ ASSERT (rv == msg->data.headers_len);
+ }
+
+ return app_headers;
+}
+
+u8 *
+http_get_app_target (http_req_t *req, http_msg_t *msg)
+{
+ session_t *as;
+ u8 *target;
+ int rv;
+
+ as = session_get_from_handle (req->hr_pa_session_handle);
+
+ if (msg->data.type == HTTP_MSG_DATA_PTR)
+ {
+ uword target_ptr;
+ rv = svm_fifo_dequeue (as->tx_fifo, sizeof (target_ptr),
+ (u8 *) &target_ptr);
+ ASSERT (rv == sizeof (target_ptr));
+ target = uword_to_pointer (target_ptr, u8 *);
+ }
+ else
+ {
+ vec_reset_length (req->target);
+ vec_validate (req->target, msg->data.target_path_len - 1);
+ rv =
+ svm_fifo_dequeue (as->tx_fifo, msg->data.target_path_len, req->target);
+ ASSERT (rv == msg->data.target_path_len);
+ target = req->target;
+ }
+ return target;
+}
+
+u8 *
+http_get_tx_buf (http_conn_t *hc)
+{
+ http_main_t *hm = &http_main;
+ u8 *buf = hm->tx_bufs[hc->c_thread_index];
+ vec_reset_length (buf);
+ return buf;
+}
+
+u8 *
+http_get_rx_buf (http_conn_t *hc)
+{
+ http_main_t *hm = &http_main;
+ u8 *buf = hm->rx_bufs[hc->c_thread_index];
+ vec_reset_length (buf);
+ return buf;
+}
+
+void
+http_req_tx_buffer_init (http_req_t *req, http_msg_t *msg)
+{
+ session_t *as = session_get_from_handle (req->hr_pa_session_handle);
+ http_buffer_init (&req->tx_buf, msg_to_buf_type[msg->data.type], as->tx_fifo,
+ msg->data.body_len);
+}
+
static void
http_conn_invalidate_timer_cb (u32 hs_handle)
{
@@ -230,7 +394,7 @@ http_conn_invalidate_timer_cb (u32 hs_handle)
}
hc->timer_handle = HTTP_TIMER_HANDLE_INVALID;
- hc->pending_timer = 1;
+ hc->flags |= HTTP_CONN_F_PENDING_TIMER;
}
static void
@@ -250,24 +414,30 @@ http_conn_timeout_cb (void *hc_handlep)
return;
}
- if (!hc->pending_timer)
+ if (!(hc->flags & HTTP_CONN_F_PENDING_TIMER))
{
HTTP_DBG (1, "timer not pending");
return;
}
- session_transport_closing_notify (&hc->connection);
+ /* in case nothing received on cleartext connection before timeout */
+ if (PREDICT_FALSE (hc->version != HTTP_VERSION_NA))
+ http_vfts[hc->version].transport_close_callback (hc);
http_disconnect_transport (hc);
}
+/*************************/
+/* session VFT callbacks */
+/*************************/
+
int
http_ts_accept_callback (session_t *ts)
{
- session_t *ts_listener, *as, *asl;
- app_worker_t *app_wrk;
+ session_t *ts_listener;
http_conn_t *lhc, *hc;
u32 hc_index, thresh;
- int rv;
+ http_conn_handle_t hc_handle;
+ transport_proto_t tp;
ts_listener = listen_session_get_from_handle (ts->listener_handle);
lhc = http_listener_get (ts_listener->opaque);
@@ -277,61 +447,35 @@ http_ts_accept_callback (session_t *ts)
clib_memcpy_fast (hc, lhc, sizeof (*lhc));
hc->timer_handle = HTTP_TIMER_HANDLE_INVALID;
hc->c_thread_index = ts->thread_index;
- hc->h_hc_index = hc_index;
-
- hc->h_tc_session_handle = session_handle (ts);
+ hc->hc_hc_index = hc_index;
+ hc->flags |= HTTP_CONN_F_NO_APP_SESSION;
+ hc->hc_tc_session_handle = session_handle (ts);
hc->c_flags |= TRANSPORT_CONNECTION_F_NO_LOOKUP;
-
hc->state = HTTP_CONN_STATE_ESTABLISHED;
- http_req_state_change (hc, HTTP_REQ_STATE_WAIT_TRANSPORT_METHOD);
ts->session_state = SESSION_STATE_READY;
- ts->opaque = hc_index;
-
- /*
- * Alloc session and initialize
- */
- as = session_alloc (hc->c_thread_index);
- hc->c_s_index = as->session_index;
-
- as->app_wrk_index = hc->h_pa_wrk_index;
- as->connection_index = hc->c_c_index;
- as->session_state = SESSION_STATE_ACCEPTING;
-
- asl = listen_session_get_from_handle (lhc->h_pa_session_handle);
- as->session_type = asl->session_type;
- as->listener_handle = lhc->h_pa_session_handle;
-
- /*
- * Init session fifos and notify app
- */
- if ((rv = app_worker_init_accepted (as)))
+ tp = session_get_transport_proto (ts);
+ if (tp == TRANSPORT_PROTO_TLS)
{
- HTTP_DBG (1, "failed to allocate fifos");
- hc->h_pa_session_handle = SESSION_INVALID_HANDLE;
- session_free (as);
- return rv;
+ /* TODO: set by ALPN result */
+ hc->version = HTTP_VERSION_1;
}
-
- hc->h_pa_session_handle = session_handle (as);
- hc->h_pa_wrk_index = as->app_wrk_index;
- app_wrk = app_worker_get (as->app_wrk_index);
+ else
+ {
+ /* going to decide in http_ts_rx_callback */
+ hc->version = HTTP_VERSION_NA;
+ }
+ hc_handle.version = hc->version;
+ hc_handle.conn_index = hc_index;
+ ts->opaque = hc_handle.as_u32;
HTTP_DBG (1, "Accepted on listener %u new connection [%u]%x",
ts_listener->opaque, vlib_get_thread_index (), hc_index);
- if ((rv = app_worker_accept_notify (app_wrk, as)))
- {
- HTTP_DBG (0, "app accept returned");
- session_free (as);
- return rv;
- }
-
/* Avoid enqueuing small chunks of data on transport tx notifications. If
* the fifo is small (under 16K) we set the threshold to it's size, meaning
* a notification will be given when the fifo empties.
*/
- ts = session_get_from_handle (hc->h_tc_session_handle);
thresh = clib_min (svm_fifo_size (ts->tx_fifo), HTTP_FIFO_THRESH);
svm_fifo_set_deq_thresh (ts->tx_fifo, thresh);
@@ -345,9 +489,9 @@ http_ts_connected_callback (u32 http_app_index, u32 ho_hc_index, session_t *ts,
session_error_t err)
{
u32 new_hc_index;
- session_t *as;
http_conn_t *hc, *ho_hc;
app_worker_t *app_wrk;
+ http_conn_handle_t hc_handle;
int rv;
ho_hc = http_ho_conn_get (ho_hc_index);
@@ -357,9 +501,10 @@ http_ts_connected_callback (u32 http_app_index, u32 ho_hc_index, session_t *ts,
{
clib_warning ("half-open hc index %d, error: %U", ho_hc_index,
format_session_error, err);
- app_wrk = app_worker_get_if_valid (ho_hc->h_pa_wrk_index);
+ ho_hc->flags |= HTTP_CONN_F_HO_DONE;
+ app_wrk = app_worker_get_if_valid (ho_hc->hc_pa_wrk_index);
if (app_wrk)
- app_worker_connect_notify (app_wrk, 0, err, ho_hc->h_pa_app_api_ctx);
+ app_worker_connect_notify (app_wrk, 0, err, ho_hc->hc_pa_app_api_ctx);
return 0;
}
@@ -368,46 +513,31 @@ http_ts_connected_callback (u32 http_app_index, u32 ho_hc_index, session_t *ts,
clib_memcpy_fast (hc, ho_hc, sizeof (*hc));
+ /* in chain with TLS there is race on half-open cleanup */
+ __atomic_fetch_or (&ho_hc->flags, HTTP_CONN_F_HO_DONE, __ATOMIC_RELEASE);
+
hc->timer_handle = HTTP_TIMER_HANDLE_INVALID;
hc->c_thread_index = ts->thread_index;
- hc->h_tc_session_handle = session_handle (ts);
- hc->c_c_index = new_hc_index;
+ hc->hc_tc_session_handle = session_handle (ts);
+ hc->hc_hc_index = new_hc_index;
hc->c_flags |= TRANSPORT_CONNECTION_F_NO_LOOKUP;
hc->state = HTTP_CONN_STATE_ESTABLISHED;
- http_req_state_change (hc, HTTP_REQ_STATE_WAIT_APP_METHOD);
-
ts->session_state = SESSION_STATE_READY;
- ts->opaque = new_hc_index;
-
- /* allocate app session and initialize */
-
- as = session_alloc (hc->c_thread_index);
- hc->c_s_index = as->session_index;
- as->connection_index = hc->c_c_index;
- as->app_wrk_index = hc->h_pa_wrk_index;
- as->session_state = SESSION_STATE_READY;
- as->opaque = hc->h_pa_app_api_ctx;
- as->session_type = session_type_from_proto_and_ip (
- TRANSPORT_PROTO_HTTP, session_type_is_ip4 (ts->session_type));
+ hc->flags |= HTTP_CONN_F_NO_APP_SESSION;
+ /* TODO: TLS set by ALPN result, TCP: prior knowledge (set in ho) */
+ hc_handle.version = hc->version;
+ hc_handle.conn_index = new_hc_index;
+ ts->opaque = hc_handle.as_u32;
HTTP_DBG (1, "half-open hc index %x, hc [%u]%x", ho_hc_index,
ts->thread_index, new_hc_index);
- app_wrk = app_worker_get (hc->h_pa_wrk_index);
- if (!app_wrk)
+ if ((rv = http_vfts[hc->version].transport_connected_callback (hc)))
{
- clib_warning ("no app worker");
- return -1;
- }
-
- if ((rv = app_worker_init_connected (app_wrk, as)))
- {
- HTTP_DBG (1, "failed to allocate fifos");
- session_free (as);
+ clib_warning ("transport_connected_callback failed, rv=%d", rv);
return rv;
}
- app_worker_connect_notify (app_wrk, as, err, hc->h_pa_app_api_ctx);
- hc->h_pa_session_handle = session_handle (as);
+
http_conn_timer_start (hc);
return 0;
@@ -417,1832 +547,154 @@ static void
http_ts_disconnect_callback (session_t *ts)
{
http_conn_t *hc;
+ http_conn_handle_t hc_handle;
+
+ hc_handle.as_u32 = ts->opaque;
- hc = http_conn_get_w_thread (ts->opaque, ts->thread_index);
+ HTTP_DBG (1, "hc [%u]%x", ts->thread_index, hc_handle.conn_index);
+
+ hc = http_conn_get_w_thread (hc_handle.conn_index, ts->thread_index);
if (hc->state < HTTP_CONN_STATE_TRANSPORT_CLOSED)
hc->state = HTTP_CONN_STATE_TRANSPORT_CLOSED;
- /* Nothing more to rx, propagate to app */
- if (!svm_fifo_max_dequeue_cons (ts->rx_fifo))
- session_transport_closing_notify (&hc->connection);
+ /* in case peer close cleartext connection before send something */
+ if (PREDICT_FALSE (hc->version == HTTP_VERSION_NA))
+ return;
+
+ http_vfts[hc->version].transport_close_callback (hc);
}
static void
http_ts_reset_callback (session_t *ts)
{
http_conn_t *hc;
+ http_conn_handle_t hc_handle;
- hc = http_conn_get_w_thread (ts->opaque, ts->thread_index);
-
- hc->state = HTTP_CONN_STATE_CLOSED;
- http_buffer_free (&hc->req.tx_buf);
- http_req_state_change (hc, HTTP_REQ_STATE_WAIT_TRANSPORT_METHOD);
- session_transport_reset_notify (&hc->connection);
-
- http_disconnect_transport (hc);
-}
-
-/**
- * http error boilerplate
- */
-static const char *http_error_template = "HTTP/1.1 %s\r\n"
- "Date: %U GMT\r\n"
- "Connection: close\r\n"
- "Content-Length: 0\r\n\r\n";
-
-/**
- * http response boilerplate
- */
-static const char *http_response_template = "HTTP/1.1 %s\r\n"
- "Date: %U GMT\r\n"
- "Server: %v\r\n";
-
-static const char *content_len_template = "Content-Length: %llu\r\n";
-
-static const char *connection_upgrade_template = "Connection: upgrade\r\n"
- "Upgrade: %s\r\n";
-
-/**
- * http request boilerplate
- */
-static const char *http_get_request_template = "GET %s HTTP/1.1\r\n"
- "Host: %v\r\n"
- "User-Agent: %v\r\n";
-
-static const char *http_post_request_template = "POST %s HTTP/1.1\r\n"
- "Host: %v\r\n"
- "User-Agent: %v\r\n"
- "Content-Length: %llu\r\n";
-
-static u32
-http_send_data (http_conn_t *hc, u8 *data, u32 length)
-{
- const u32 max_burst = 64 << 10;
- session_t *ts;
- u32 to_send;
- int rv;
-
- ts = session_get_from_handle (hc->h_tc_session_handle);
+ hc_handle.as_u32 = ts->opaque;
- to_send = clib_min (length, max_burst);
- rv = svm_fifo_enqueue (ts->tx_fifo, to_send, data);
- if (rv <= 0)
- {
- clib_warning ("svm_fifo_enqueue failed, rv %d", rv);
- return 0;
- }
+ HTTP_DBG (1, "hc [%u]%x", ts->thread_index, hc_handle.conn_index);
- if (svm_fifo_set_event (ts->tx_fifo))
- session_program_tx_io_evt (ts->handle, SESSION_IO_EVT_TX);
+ hc = http_conn_get_w_thread (hc_handle.conn_index, ts->thread_index);
- return rv;
-}
+ hc->state = HTTP_CONN_STATE_CLOSED;
+ /* in case peer reset cleartext connection before send something */
+ if (PREDICT_FALSE (hc->version != HTTP_VERSION_NA))
+ http_vfts[hc->version].transport_reset_callback (hc);
-static void
-http_send_error (http_conn_t *hc, http_status_code_t ec)
-{
- http_main_t *hm = &http_main;
- u8 *data;
- f64 now;
-
- if (ec >= HTTP_N_STATUS)
- ec = HTTP_STATUS_INTERNAL_ERROR;
-
- now = clib_timebase_now (&hm->timebase);
- data = format (0, http_error_template, http_status_code_str[ec],
- format_clib_timebase_time, now);
- HTTP_DBG (3, "%v", data);
- http_send_data (hc, data, vec_len (data));
- vec_free (data);
+ http_disconnect_transport (hc);
}
static int
-http_read_message (http_conn_t *hc)
+http_ts_rx_callback (session_t *ts)
{
+ http_conn_t *hc;
+ http_conn_handle_t hc_handle;
u32 max_deq;
- session_t *ts;
- int n_read;
-
- ts = session_get_from_handle (hc->h_tc_session_handle);
-
- max_deq = svm_fifo_max_dequeue (ts->rx_fifo);
- if (PREDICT_FALSE (max_deq == 0))
- return -1;
-
- vec_validate (hc->req.rx_buf, max_deq - 1);
- n_read = svm_fifo_peek (ts->rx_fifo, 0, max_deq, hc->req.rx_buf);
- ASSERT (n_read == max_deq);
- HTTP_DBG (1, "read %u bytes from rx_fifo", n_read);
-
- return 0;
-}
-
-static void
-http_read_message_drop (http_conn_t *hc, u32 len)
-{
- session_t *ts;
-
- ts = session_get_from_handle (hc->h_tc_session_handle);
- svm_fifo_dequeue_drop (ts->rx_fifo, len);
- vec_reset_length (hc->req.rx_buf);
+ u8 *rx_buf;
- if (svm_fifo_is_empty (ts->rx_fifo))
- svm_fifo_unset_event (ts->rx_fifo);
-}
-
-static void
-http_read_message_drop_all (http_conn_t *hc)
-{
- session_t *ts;
-
- ts = session_get_from_handle (hc->h_tc_session_handle);
- svm_fifo_dequeue_drop_all (ts->rx_fifo);
- vec_reset_length (hc->req.rx_buf);
-
- if (svm_fifo_is_empty (ts->rx_fifo))
- svm_fifo_unset_event (ts->rx_fifo);
-}
-
-/**
- * @brief Find the first occurrence of the string in the vector.
- *
- * @param vec The vector to be scanned.
- * @param offset Search offset in the vector.
- * @param num Maximum number of characters to be searched if non-zero.
- * @param str The string to be searched.
- *
- * @return @c -1 if the string is not found within the vector; index otherwise.
- */
-static inline int
-v_find_index (u8 *vec, u32 offset, u32 num, char *str)
-{
- int start_index = offset;
- u32 slen = (u32) strnlen_s_inline (str, 16);
- u32 vlen = vec_len (vec);
-
- ASSERT (slen > 0);
-
- if (vlen <= slen)
- return -1;
-
- int end_index = vlen - slen;
- if (num)
- {
- if (num < slen)
- return -1;
- end_index = clib_min (end_index, offset + num - slen);
- }
-
- for (; start_index <= end_index; start_index++)
- {
- if (!memcmp (vec + start_index, str, slen))
- return start_index;
- }
-
- return -1;
-}
-
-static void
-http_identify_optional_query (http_req_t *req)
-{
- int i;
- for (i = req->target_path_offset;
- i < (req->target_path_offset + req->target_path_len); i++)
- {
- if (req->rx_buf[i] == '?')
- {
- req->target_query_offset = i + 1;
- req->target_query_len = req->target_path_offset +
- req->target_path_len -
- req->target_query_offset;
- req->target_path_len =
- req->target_path_len - req->target_query_len - 1;
- break;
- }
- }
-}
-
-static int
-http_parse_target (http_req_t *req)
-{
- int i;
- u8 *p, *end;
-
- /* asterisk-form = "*" */
- if ((req->rx_buf[req->target_path_offset] == '*') &&
- (req->target_path_len == 1))
- {
- req->target_form = HTTP_TARGET_ASTERISK_FORM;
- /* we do not support OPTIONS request */
- return -1;
- }
-
- /* origin-form = 1*( "/" segment ) [ "?" query ] */
- if (req->rx_buf[req->target_path_offset] == '/')
- {
- /* drop leading slash */
- req->target_path_len--;
- req->target_path_offset++;
- req->target_form = HTTP_TARGET_ORIGIN_FORM;
- http_identify_optional_query (req);
- /* can't be CONNECT method */
- return req->method == HTTP_REQ_CONNECT ? -1 : 0;
- }
-
- /* absolute-form =
- * scheme "://" host [ ":" port ] *( "/" segment ) [ "?" query ] */
- if (req->target_path_len > 8 &&
- !memcmp (req->rx_buf + req->target_path_offset, "http", 4))
- {
- req->scheme = HTTP_URL_SCHEME_HTTP;
- p = req->rx_buf + req->target_path_offset + 4;
- if (*p == 's')
- {
- p++;
- req->scheme = HTTP_URL_SCHEME_HTTPS;
- }
- if (*p++ == ':')
- {
- expect_char ('/');
- expect_char ('/');
- req->target_form = HTTP_TARGET_ABSOLUTE_FORM;
- req->target_authority_offset = p - req->rx_buf;
- req->target_authority_len = 0;
- end = req->rx_buf + req->target_path_offset + req->target_path_len;
- while (p < end)
- {
- if (*p == '/')
- {
- p++; /* drop leading slash */
- req->target_path_offset = p - req->rx_buf;
- req->target_path_len = end - p;
- break;
- }
- req->target_authority_len++;
- p++;
- }
- if (!req->target_path_len)
- {
- clib_warning ("zero length host");
- return -1;
- }
- http_identify_optional_query (req);
- /* can't be CONNECT method */
- return req->method == HTTP_REQ_CONNECT ? -1 : 0;
- }
- }
-
- /* authority-form = host ":" port */
- for (i = req->target_path_offset;
- i < (req->target_path_offset + req->target_path_len); i++)
- {
- if ((req->rx_buf[i] == ':') && (isdigit (req->rx_buf[i + 1])))
- {
- req->target_authority_len = req->target_path_len;
- req->target_path_len = 0;
- req->target_authority_offset = req->target_path_offset;
- req->target_path_offset = 0;
- req->target_form = HTTP_TARGET_AUTHORITY_FORM;
- /* "authority-form" is only used for CONNECT requests */
- return req->method == HTTP_REQ_CONNECT ? 0 : -1;
- }
- }
-
- return -1;
-}
+ hc_handle.as_u32 = ts->opaque;
-static int
-http_parse_request_line (http_req_t *req, http_status_code_t *ec)
-{
- int i, target_len;
- u32 next_line_offset, method_offset;
+ HTTP_DBG (1, "hc [%u]%x", ts->thread_index, hc_handle.conn_index);
- /* request-line = method SP request-target SP HTTP-version CRLF */
- i = v_find_index (req->rx_buf, 8, 0, "\r\n");
- if (i < 0)
- {
- clib_warning ("request line incomplete");
- *ec = HTTP_STATUS_BAD_REQUEST;
- return -1;
- }
- HTTP_DBG (2, "request line length: %d", i);
- req->control_data_len = i + 2;
- next_line_offset = req->control_data_len;
-
- /* there should be at least one more CRLF */
- if (vec_len (req->rx_buf) < (next_line_offset + 2))
- {
- clib_warning ("malformed message, too short");
- *ec = HTTP_STATUS_BAD_REQUEST;
- return -1;
- }
+ hc = http_conn_get_w_thread (hc_handle.conn_index, ts->thread_index);
- /*
- * RFC9112 2.2:
- * In the interest of robustness, a server that is expecting to receive and
- * parse a request-line SHOULD ignore at least one empty line (CRLF)
- * received prior to the request-line.
- */
- method_offset = req->rx_buf[0] == '\r' && req->rx_buf[1] == '\n' ? 2 : 0;
- /* parse method */
- if (!memcmp (req->rx_buf + method_offset, "GET ", 4))
- {
- HTTP_DBG (0, "GET method");
- req->method = HTTP_REQ_GET;
- req->target_path_offset = method_offset + 4;
- }
- else if (!memcmp (req->rx_buf + method_offset, "POST ", 5))
- {
- HTTP_DBG (0, "POST method");
- req->method = HTTP_REQ_POST;
- req->target_path_offset = method_offset + 5;
- }
- else if (!memcmp (req->rx_buf + method_offset, "CONNECT ", 8))
- {
- HTTP_DBG (0, "CONNECT method");
- req->method = HTTP_REQ_CONNECT;
- req->upgrade_proto = HTTP_UPGRADE_PROTO_NA;
- req->target_path_offset = method_offset + 8;
- req->is_tunnel = 1;
- }
- else
- {
- if (req->rx_buf[method_offset] - 'A' <= 'Z' - 'A')
- {
- clib_warning ("method not implemented: %8v", req->rx_buf);
- *ec = HTTP_STATUS_NOT_IMPLEMENTED;
- return -1;
- }
- else
- {
- clib_warning ("not method name: %8v", req->rx_buf);
- *ec = HTTP_STATUS_BAD_REQUEST;
- return -1;
- }
- }
-
- /* find version */
- i = v_find_index (req->rx_buf, next_line_offset - 11, 11, " HTTP/");
- if (i < 0)
- {
- clib_warning ("HTTP version not present");
- *ec = HTTP_STATUS_BAD_REQUEST;
- return -1;
- }
- /* verify major version */
- if (isdigit (req->rx_buf[i + 6]))
- {
- if (req->rx_buf[i + 6] != '1')
- {
- clib_warning ("HTTP major version '%c' not supported",
- req->rx_buf[i + 6]);
- *ec = HTTP_STATUS_HTTP_VERSION_NOT_SUPPORTED;
- return -1;
- }
- }
- else
- {
- clib_warning ("HTTP major version '%c' is not digit",
- req->rx_buf[i + 6]);
- *ec = HTTP_STATUS_BAD_REQUEST;
- return -1;
- }
-
- /* parse request-target */
- HTTP_DBG (2, "http at %d", i);
- target_len = i - req->target_path_offset;
- HTTP_DBG (2, "target_len %d", target_len);
- if (target_len < 1)
- {
- clib_warning ("request-target not present");
- *ec = HTTP_STATUS_BAD_REQUEST;
- return -1;
- }
- req->target_path_len = target_len;
- req->target_query_offset = 0;
- req->target_query_len = 0;
- req->target_authority_len = 0;
- req->target_authority_offset = 0;
- if (http_parse_target (req))
- {
- clib_warning ("invalid target");
- *ec = HTTP_STATUS_BAD_REQUEST;
- return -1;
- }
- HTTP_DBG (2, "request-target path length: %u", req->target_path_len);
- HTTP_DBG (2, "request-target path offset: %u", req->target_path_offset);
- HTTP_DBG (2, "request-target query length: %u", req->target_query_len);
- HTTP_DBG (2, "request-target query offset: %u", req->target_query_offset);
-
- /* set buffer offset to nex line start */
- req->rx_buf_offset = next_line_offset;
-
- return 0;
-}
-
-#define parse_int(val, mul) \
- do \
- { \
- if (!isdigit (*p)) \
- { \
- clib_warning ("expected digit"); \
- return -1; \
- } \
- val += mul * (*p++ - '0'); \
- } \
- while (0)
-
-static int
-http_parse_status_line (http_req_t *req)
-{
- int i;
- u32 next_line_offset;
- u8 *p, *end;
- u16 status_code = 0;
- http_main_t *hm = &http_main;
-
- i = v_find_index (req->rx_buf, 0, 0, "\r\n");
- /* status-line = HTTP-version SP status-code SP [ reason-phrase ] CRLF */
- if (i < 0)
- {
- clib_warning ("status line incomplete");
- return -1;
- }
- HTTP_DBG (2, "status line length: %d", i);
- if (i < 12)
- {
- clib_warning ("status line too short (%d)", i);
- return -1;
- }
- req->control_data_len = i + 2;
- next_line_offset = req->control_data_len;
- p = req->rx_buf;
- end = req->rx_buf + i;
-
- /* there should be at least one more CRLF */
- if (vec_len (req->rx_buf) < (next_line_offset + 2))
- {
- clib_warning ("malformed message, too short");
- return -1;
- }
-
- /* parse version */
- expect_char ('H');
- expect_char ('T');
- expect_char ('T');
- expect_char ('P');
- expect_char ('/');
- expect_char ('1');
- expect_char ('.');
- if (!isdigit (*p++))
- {
- clib_warning ("invalid HTTP minor version");
- return -1;
- }
-
- /* skip space(s) */
- if (*p != ' ')
- {
- clib_warning ("no space after HTTP version");
- return -1;
- }
- do
- {
- p++;
- if (p == end)
- {
- clib_warning ("no status code");
- return -1;
- }
- }
- while (*p == ' ');
-
- /* parse status code */
- if ((end - p) < 3)
- {
- clib_warning ("not enough characters for status code");
- return -1;
- }
- parse_int (status_code, 100);
- parse_int (status_code, 10);
- parse_int (status_code, 1);
- if (status_code < 100 || status_code > 599)
- {
- clib_warning ("invalid status code %d", status_code);
- return -1;
- }
- req->status_code = hm->sc_by_u16[status_code];
- HTTP_DBG (0, "status code: %d", status_code);
-
- /* set buffer offset to nex line start */
- req->rx_buf_offset = next_line_offset;
-
- return 0;
-}
-
-static int
-http_identify_headers (http_req_t *req, http_status_code_t *ec)
-{
- int rv;
- u8 *p, *end, *name_start, *value_start;
- u32 name_len, value_len;
- http_field_line_t *field_line;
- uword header_index;
-
- vec_reset_length (req->headers);
- req->content_len_header_index = ~0;
- req->connection_header_index = ~0;
- req->upgrade_header_index = ~0;
- req->host_header_index = ~0;
- req->headers_offset = req->rx_buf_offset;
-
- /* check if we have any header */
- if ((req->rx_buf[req->rx_buf_offset] == '\r') &&
- (req->rx_buf[req->rx_buf_offset + 1] == '\n'))
- {
- /* just another CRLF -> no headers */
- HTTP_DBG (2, "no headers");
- req->headers_len = 0;
- req->control_data_len += 2;
- return 0;
- }
-
- end = req->rx_buf + vec_len (req->rx_buf);
- p = req->rx_buf + req->rx_buf_offset;
-
- while (1)
- {
- rv = _parse_field_name (&p, end, &name_start, &name_len);
- if (rv != 0)
- {
- *ec = HTTP_STATUS_BAD_REQUEST;
- return -1;
- }
- rv = _parse_field_value (&p, end, &value_start, &value_len);
- if (rv != 0 || (end - p) < 2)
- {
- *ec = HTTP_STATUS_BAD_REQUEST;
- return -1;
- }
-
- vec_add2 (req->headers, field_line, 1);
- field_line->name_offset =
- (name_start - req->rx_buf) - req->headers_offset;
- field_line->name_len = name_len;
- field_line->value_offset =
- (value_start - req->rx_buf) - req->headers_offset;
- field_line->value_len = value_len;
- header_index = field_line - req->headers;
-
- /* find headers that will be used later in preprocessing */
- /* names are case-insensitive (RFC9110 section 5.1) */
- if (req->content_len_header_index == ~0 &&
- http_token_is_case (
- (const char *) name_start, name_len,
- http_header_name_token (HTTP_HEADER_CONTENT_LENGTH)))
- req->content_len_header_index = header_index;
- else if (req->connection_header_index == ~0 &&
- http_token_is_case (
- (const char *) name_start, name_len,
- http_header_name_token (HTTP_HEADER_CONNECTION)))
- req->connection_header_index = header_index;
- else if (req->upgrade_header_index == ~0 &&
- http_token_is_case (
- (const char *) name_start, name_len,
- http_header_name_token (HTTP_HEADER_UPGRADE)))
- req->upgrade_header_index = header_index;
- else if (req->host_header_index == ~0 &&
- http_token_is_case ((const char *) name_start, name_len,
- http_header_name_token (HTTP_HEADER_HOST)))
- req->host_header_index = header_index;
-
- /* are we done? */
- if (*p == '\r' && *(p + 1) == '\n')
- break;
- }
-
- req->headers_len = p - (req->rx_buf + req->headers_offset);
- req->control_data_len += (req->headers_len + 2);
- HTTP_DBG (2, "headers length: %u", req->headers_len);
- HTTP_DBG (2, "headers offset: %u", req->headers_offset);
-
- return 0;
-}
-
-static int
-http_identify_message_body (http_req_t *req, http_status_code_t *ec)
-{
- int i;
- u8 *p;
- u64 body_len = 0, digit;
- http_field_line_t *field_line;
-
- req->body_len = 0;
-
- if (req->headers_len == 0)
- {
- HTTP_DBG (2, "no header, no message-body");
- return 0;
- }
- if (req->is_tunnel)
- {
- HTTP_DBG (2, "tunnel, no message-body");
- return 0;
- }
-
- /* TODO check for chunked transfer coding */
-
- if (req->content_len_header_index == ~0)
+ if (hc->state == HTTP_CONN_STATE_CLOSED)
{
- HTTP_DBG (2, "Content-Length header not present, no message-body");
+ HTTP_DBG (1, "conn closed");
+ svm_fifo_dequeue_drop_all (ts->rx_fifo);
return 0;
}
- field_line = vec_elt_at_index (req->headers, req->content_len_header_index);
-
- p = req->rx_buf + req->headers_offset + field_line->value_offset;
- for (i = 0; i < field_line->value_len; i++)
- {
- /* check for digit */
- if (!isdigit (*p))
- {
- clib_warning ("expected digit");
- *ec = HTTP_STATUS_BAD_REQUEST;
- return -1;
- }
- digit = *p - '0';
- u64 new_body_len = body_len * 10 + digit;
- /* check for overflow */
- if (new_body_len < body_len)
- {
- clib_warning ("too big number, overflow");
- *ec = HTTP_STATUS_BAD_REQUEST;
- return -1;
- }
- body_len = new_body_len;
- p++;
- }
-
- req->body_len = body_len;
-
- req->body_offset = req->headers_offset + req->headers_len + 2;
- HTTP_DBG (2, "body length: %llu", req->body_len);
- HTTP_DBG (2, "body offset: %u", req->body_offset);
-
- return 0;
-}
-
-static http_sm_result_t
-http_req_state_wait_transport_reply (http_conn_t *hc,
- transport_send_params_t *sp)
-{
- int rv;
- http_msg_t msg = {};
- app_worker_t *app_wrk;
- session_t *as;
- u32 len, max_enq, body_sent;
- http_status_code_t ec;
-
- rv = http_read_message (hc);
-
- /* Nothing yet, wait for data or timer expire */
- if (rv)
- {
- HTTP_DBG (1, "no data to deq");
- return HTTP_SM_STOP;
- }
-
- HTTP_DBG (3, "%v", hc->req.rx_buf);
-
- if (vec_len (hc->req.rx_buf) < 8)
- {
- clib_warning ("response buffer too short");
- goto error;
- }
-
- rv = http_parse_status_line (&hc->req);
- if (rv)
- goto error;
-
- rv = http_identify_headers (&hc->req, &ec);
- if (rv)
- goto error;
-
- rv = http_identify_message_body (&hc->req, &ec);
- if (rv)
- goto error;
-
- /* send at least "control data" which is necessary minimum,
- * if there is some space send also portion of body */
- as = session_get_from_handle (hc->h_pa_session_handle);
- max_enq = svm_fifo_max_enqueue (as->rx_fifo);
- max_enq -= sizeof (msg);
- if (max_enq < hc->req.control_data_len)
- {
- clib_warning ("not enough room for control data in app's rx fifo");
- goto error;
- }
- len = clib_min (max_enq, vec_len (hc->req.rx_buf));
-
- msg.type = HTTP_MSG_REPLY;
- msg.code = hc->req.status_code;
- msg.data.headers_offset = hc->req.headers_offset;
- msg.data.headers_len = hc->req.headers_len;
- msg.data.body_offset = hc->req.body_offset;
- msg.data.body_len = hc->req.body_len;
- msg.data.type = HTTP_MSG_DATA_INLINE;
- msg.data.len = len;
- msg.data.headers_ctx = pointer_to_uword (hc->req.headers);
-
- svm_fifo_seg_t segs[2] = { { (u8 *) &msg, sizeof (msg) },
- { hc->req.rx_buf, len } };
-
- rv = svm_fifo_enqueue_segments (as->rx_fifo, segs, 2, 0 /* allow partial */);
- ASSERT (rv == (sizeof (msg) + len));
-
- http_read_message_drop (hc, len);
-
- body_sent = len - hc->req.control_data_len;
- hc->req.to_recv = hc->req.body_len - body_sent;
- if (hc->req.to_recv == 0)
- {
- /* all sent, we are done */
- http_req_state_change (hc, HTTP_REQ_STATE_WAIT_APP_METHOD);
- }
- else
- {
- /* stream rest of the response body */
- http_req_state_change (hc, HTTP_REQ_STATE_TRANSPORT_IO_MORE_DATA);
- }
-
- app_wrk = app_worker_get_if_valid (as->app_wrk_index);
- if (app_wrk)
- app_worker_rx_notify (app_wrk, as);
- return HTTP_SM_STOP;
-
-error:
- http_read_message_drop_all (hc);
- session_transport_closing_notify (&hc->connection);
- session_transport_closed_notify (&hc->connection);
- http_disconnect_transport (hc);
- return HTTP_SM_ERROR;
-}
-
-#define http_field_line_value_token(_fl, _req) \
- (const char *) ((_req)->rx_buf + (_req)->headers_offset + \
- (_fl)->value_offset), \
- (_fl)->value_len
-
-static void
-http_check_connection_upgrade (http_req_t *req)
-{
- http_field_line_t *connection, *upgrade;
- u8 skip;
-
- skip = (req->method != HTTP_REQ_GET) + (req->connection_header_index == ~0) +
- (req->upgrade_header_index == ~0);
- if (skip)
- return;
-
- connection = vec_elt_at_index (req->headers, req->connection_header_index);
- /* connection options are case-insensitive (RFC9110 7.6.1) */
- if (http_token_is_case (http_field_line_value_token (connection, req),
- http_token_lit ("upgrade")))
- {
- upgrade = vec_elt_at_index (req->headers, req->upgrade_header_index);
-
- /* check upgrade protocol, we want to ignore something like upgrade to
- * newer HTTP version, only tunnels are supported */
- if (0)
- ;
-#define _(sym, str) \
- else if (http_token_is_case (http_field_line_value_token (upgrade, req), \
- http_token_lit (str))) req->upgrade_proto = \
- HTTP_UPGRADE_PROTO_##sym;
- foreach_http_upgrade_proto
-#undef _
- else return;
-
- HTTP_DBG (1, "connection upgrade: %U", format_http_bytes,
- req->rx_buf + req->headers_offset + upgrade->value_offset,
- upgrade->value_len);
- req->is_tunnel = 1;
- req->method = HTTP_REQ_CONNECT;
- }
-}
-
-static void
-http_target_fixup (http_conn_t *hc)
-{
- http_field_line_t *host;
-
- if (hc->req.target_form == HTTP_TARGET_ABSOLUTE_FORM)
- return;
-
- /* scheme fixup */
- hc->req.scheme = session_get_transport_proto (session_get_from_handle (
- hc->h_tc_session_handle)) == TRANSPORT_PROTO_TLS ?
- HTTP_URL_SCHEME_HTTPS :
- HTTP_URL_SCHEME_HTTP;
-
- if (hc->req.target_form == HTTP_TARGET_AUTHORITY_FORM ||
- hc->req.connection_header_index == ~0)
- return;
-
- /* authority fixup */
- host = vec_elt_at_index (hc->req.headers, hc->req.connection_header_index);
- hc->req.target_authority_offset = host->value_offset;
- hc->req.target_authority_len = host->value_len;
-}
-
-static http_sm_result_t
-http_req_state_wait_transport_method (http_conn_t *hc,
- transport_send_params_t *sp)
-{
- http_status_code_t ec;
- app_worker_t *app_wrk;
- http_msg_t msg;
- session_t *as;
- int rv;
- u32 len, max_enq, body_sent;
- u64 max_deq;
-
- rv = http_read_message (hc);
-
- /* Nothing yet, wait for data or timer expire */
- if (rv)
- return HTTP_SM_STOP;
-
- HTTP_DBG (3, "%v", hc->req.rx_buf);
-
- if (vec_len (hc->req.rx_buf) < 8)
- {
- ec = HTTP_STATUS_BAD_REQUEST;
- goto error;
- }
-
- rv = http_parse_request_line (&hc->req, &ec);
- if (rv)
- goto error;
-
- rv = http_identify_headers (&hc->req, &ec);
- if (rv)
- goto error;
-
- http_target_fixup (hc);
- http_check_connection_upgrade (&hc->req);
-
- rv = http_identify_message_body (&hc->req, &ec);
- if (rv)
- goto error;
-
- /* send at least "control data" which is necessary minimum,
- * if there is some space send also portion of body */
- as = session_get_from_handle (hc->h_pa_session_handle);
- max_enq = svm_fifo_max_enqueue (as->rx_fifo);
- if (max_enq < hc->req.control_data_len)
- {
- clib_warning ("not enough room for control data in app's rx fifo");
- ec = HTTP_STATUS_INTERNAL_ERROR;
- goto error;
- }
- /* do not dequeue more than one HTTP request, we do not support pipelining */
- max_deq = clib_min (hc->req.control_data_len + hc->req.body_len,
- vec_len (hc->req.rx_buf));
- len = clib_min (max_enq, max_deq);
-
- msg.type = HTTP_MSG_REQUEST;
- msg.method_type = hc->req.method;
- msg.data.type = HTTP_MSG_DATA_INLINE;
- msg.data.len = len;
- msg.data.scheme = hc->req.scheme;
- msg.data.target_authority_offset = hc->req.target_authority_offset;
- msg.data.target_authority_len = hc->req.target_authority_len;
- msg.data.target_path_offset = hc->req.target_path_offset;
- msg.data.target_path_len = hc->req.target_path_len;
- msg.data.target_query_offset = hc->req.target_query_offset;
- msg.data.target_query_len = hc->req.target_query_len;
- msg.data.headers_offset = hc->req.headers_offset;
- msg.data.headers_len = hc->req.headers_len;
- msg.data.body_offset = hc->req.body_offset;
- msg.data.body_len = hc->req.body_len;
- msg.data.headers_ctx = pointer_to_uword (hc->req.headers);
- msg.data.upgrade_proto = hc->req.upgrade_proto;
-
- svm_fifo_seg_t segs[2] = { { (u8 *) &msg, sizeof (msg) },
- { hc->req.rx_buf, len } };
-
- rv = svm_fifo_enqueue_segments (as->rx_fifo, segs, 2, 0 /* allow partial */);
- ASSERT (rv == (sizeof (msg) + len));
-
- body_sent = len - hc->req.control_data_len;
- hc->req.to_recv = hc->req.body_len - body_sent;
- if (hc->req.to_recv == 0)
- {
- /* drop everything, we do not support pipelining */
- http_read_message_drop_all (hc);
- /* all sent, we are done */
- http_req_state_change (hc, HTTP_REQ_STATE_WAIT_APP_REPLY);
- }
- else
- {
- http_read_message_drop (hc, len);
- /* stream rest of the response body */
- http_req_state_change (hc, HTTP_REQ_STATE_TRANSPORT_IO_MORE_DATA);
- }
-
- app_wrk = app_worker_get_if_valid (as->app_wrk_index);
- if (app_wrk)
- app_worker_rx_notify (app_wrk, as);
-
- return HTTP_SM_STOP;
-
-error:
- http_read_message_drop_all (hc);
- http_send_error (hc, ec);
- session_transport_closing_notify (&hc->connection);
- http_disconnect_transport (hc);
-
- return HTTP_SM_ERROR;
-}
-
-static void
-http_write_app_headers (http_conn_t *hc, http_msg_t *msg, u8 **tx_buf)
-{
- http_main_t *hm = &http_main;
- session_t *as;
- u8 *app_headers, *p, *end;
- u32 *tmp;
- int rv;
-
- as = session_get_from_handle (hc->h_pa_session_handle);
-
- /* read app header list */
- if (msg->data.type == HTTP_MSG_DATA_PTR)
- {
- uword app_headers_ptr;
- rv = svm_fifo_dequeue (as->tx_fifo, sizeof (app_headers_ptr),
- (u8 *) &app_headers_ptr);
- ASSERT (rv == sizeof (app_headers_ptr));
- app_headers = uword_to_pointer (app_headers_ptr, u8 *);
- }
- else
- {
- app_headers = hm->app_header_lists[hc->c_thread_index];
- rv = svm_fifo_dequeue (as->tx_fifo, msg->data.headers_len, app_headers);
- ASSERT (rv == msg->data.headers_len);
- }
-
- /* serialize app headers to tx_buf */
- end = app_headers + msg->data.headers_len;
- while (app_headers < end)
- {
- /* custom header name? */
- tmp = (u32 *) app_headers;
- if (PREDICT_FALSE (*tmp & HTTP_CUSTOM_HEADER_NAME_BIT))
- {
- http_custom_token_t *name, *value;
- name = (http_custom_token_t *) app_headers;
- u32 name_len = name->len & ~HTTP_CUSTOM_HEADER_NAME_BIT;
- app_headers += sizeof (http_custom_token_t) + name_len;
- value = (http_custom_token_t *) app_headers;
- app_headers += sizeof (http_custom_token_t) + value->len;
- vec_add2 (*tx_buf, p, name_len + value->len + 4);
- clib_memcpy (p, name->token, name_len);
- p += name_len;
- *p++ = ':';
- *p++ = ' ';
- clib_memcpy (p, value->token, value->len);
- p += value->len;
- *p++ = '\r';
- *p++ = '\n';
- }
- else
- {
- http_app_header_t *header;
- header = (http_app_header_t *) app_headers;
- app_headers += sizeof (http_app_header_t) + header->value.len;
- http_token_t name = { http_header_name_token (header->name) };
- vec_add2 (*tx_buf, p, name.len + header->value.len + 4);
- clib_memcpy (p, name.base, name.len);
- p += name.len;
- *p++ = ':';
- *p++ = ' ';
- clib_memcpy (p, header->value.token, header->value.len);
- p += header->value.len;
- *p++ = '\r';
- *p++ = '\n';
- }
- }
-}
-
-static http_sm_result_t
-http_req_state_wait_app_reply (http_conn_t *hc, transport_send_params_t *sp)
-{
- http_main_t *hm = &http_main;
- u8 *response;
- u32 sent;
- f64 now;
- session_t *as;
- http_status_code_t sc;
- http_msg_t msg;
- int rv;
- http_sm_result_t sm_result = HTTP_SM_ERROR;
- http_req_state_t next_state = HTTP_REQ_STATE_WAIT_TRANSPORT_METHOD;
-
- as = session_get_from_handle (hc->h_pa_session_handle);
-
- rv = svm_fifo_dequeue (as->tx_fifo, sizeof (msg), (u8 *) &msg);
- ASSERT (rv == sizeof (msg));
-
- if (msg.data.type > HTTP_MSG_DATA_PTR)
- {
- clib_warning ("no data");
- sc = HTTP_STATUS_INTERNAL_ERROR;
- goto error;
- }
-
- if (msg.type != HTTP_MSG_REPLY)
- {
- clib_warning ("unexpected message type %d", msg.type);
- sc = HTTP_STATUS_INTERNAL_ERROR;
- goto error;
- }
-
- if (msg.code >= HTTP_N_STATUS)
- {
- clib_warning ("unsupported status code: %d", msg.code);
- return HTTP_SM_ERROR;
- }
-
- response = hm->tx_bufs[hc->c_thread_index];
- vec_reset_length (response);
- /*
- * Add "protocol layer" headers:
- * - current time
- * - server name
- * - data length
- */
- now = clib_timebase_now (&hm->timebase);
- response =
- format (response, http_response_template, http_status_code_str[msg.code],
- /* Date */
- format_clib_timebase_time, now,
- /* Server */
- hc->app_name);
-
- /* RFC9110 8.6: A server MUST NOT send Content-Length header field in a
- * 2xx (Successful) response to CONNECT or with a status code of 101
- * (Switching Protocols). */
- if (hc->req.is_tunnel && (http_status_code_str[msg.code][0] == '2' ||
- msg.code == HTTP_STATUS_SWITCHING_PROTOCOLS))
- {
- ASSERT (msg.data.body_len == 0);
- next_state = HTTP_REQ_STATE_TUNNEL;
- if (hc->req.upgrade_proto > HTTP_UPGRADE_PROTO_NA)
- {
- response = format (response, connection_upgrade_template,
- http_upgrade_proto_str[hc->req.upgrade_proto]);
- if (hc->req.upgrade_proto == HTTP_UPGRADE_PROTO_CONNECT_UDP &&
- hc->udp_tunnel_mode == HTTP_UDP_TUNNEL_DGRAM)
- next_state = HTTP_REQ_STATE_UDP_TUNNEL;
- }
- /* cleanup some stuff we don't need anymore in tunnel mode */
- vec_free (hc->req.rx_buf);
- vec_free (hc->req.headers);
- http_buffer_free (&hc->req.tx_buf);
- hc->req.to_skip = 0;
- }
- else
- response = format (response, content_len_template, msg.data.body_len);
-
- /* Add headers from app (if any) */
- if (msg.data.headers_len)
- {
- HTTP_DBG (0, "got headers from app, len %d", msg.data.headers_len);
- http_write_app_headers (hc, &msg, &response);
- }
- /* Add empty line after headers */
- response = format (response, "\r\n");
- HTTP_DBG (3, "%v", response);
-
- sent = http_send_data (hc, response, vec_len (response));
- if (sent != vec_len (response))
- {
- clib_warning ("sending status-line and headers failed!");
- sc = HTTP_STATUS_INTERNAL_ERROR;
- goto error;
- }
-
- if (msg.data.body_len)
- {
- /* Start sending the actual data */
- http_buffer_init (&hc->req.tx_buf, msg_to_buf_type[msg.data.type],
- as->tx_fifo, msg.data.body_len);
- next_state = HTTP_REQ_STATE_APP_IO_MORE_DATA;
- sm_result = HTTP_SM_CONTINUE;
- }
- else
- {
- /* No response body, we are done */
- sm_result = HTTP_SM_STOP;
- }
-
- http_req_state_change (hc, next_state);
-
- ASSERT (sp->max_burst_size >= sent);
- sp->max_burst_size -= sent;
- return sm_result;
-
-error:
- http_send_error (hc, sc);
- session_transport_closing_notify (&hc->connection);
- http_disconnect_transport (hc);
- return HTTP_SM_STOP;
-}
-
-static http_sm_result_t
-http_req_state_wait_app_method (http_conn_t *hc, transport_send_params_t *sp)
-{
- http_main_t *hm = &http_main;
- http_msg_t msg;
- session_t *as;
- u8 *target_buff = 0, *request = 0, *target;
- u32 sent;
- int rv;
- http_sm_result_t sm_result = HTTP_SM_ERROR;
- http_req_state_t next_state;
-
- as = session_get_from_handle (hc->h_pa_session_handle);
-
- rv = svm_fifo_dequeue (as->tx_fifo, sizeof (msg), (u8 *) &msg);
- ASSERT (rv == sizeof (msg));
-
- if (msg.data.type > HTTP_MSG_DATA_PTR)
- {
- clib_warning ("no data");
- goto error;
- }
-
- if (msg.type != HTTP_MSG_REQUEST)
- {
- clib_warning ("unexpected message type %d", msg.type);
- goto error;
- }
-
- /* read request target */
- if (msg.data.type == HTTP_MSG_DATA_PTR)
- {
- uword target_ptr;
- rv = svm_fifo_dequeue (as->tx_fifo, sizeof (target_ptr),
- (u8 *) &target_ptr);
- ASSERT (rv == sizeof (target_ptr));
- target = uword_to_pointer (target_ptr, u8 *);
- }
- else
- {
- vec_validate (target_buff, msg.data.target_path_len - 1);
- rv =
- svm_fifo_dequeue (as->tx_fifo, msg.data.target_path_len, target_buff);
- ASSERT (rv == msg.data.target_path_len);
- target = target_buff;
- }
-
- request = hm->tx_bufs[hc->c_thread_index];
- vec_reset_length (request);
- /* currently we support only GET and POST method */
- if (msg.method_type == HTTP_REQ_GET)
- {
- if (msg.data.body_len)
- {
- clib_warning ("GET request shouldn't include data");
- goto error;
- }
- /*
- * Add "protocol layer" headers:
- * - host
- * - user agent
- */
- request = format (request, http_get_request_template,
- /* target */
- target,
- /* Host */
- hc->host,
- /* User-Agent */
- hc->app_name);
-
- next_state = HTTP_REQ_STATE_WAIT_TRANSPORT_REPLY;
- sm_result = HTTP_SM_STOP;
- }
- else if (msg.method_type == HTTP_REQ_POST)
- {
- if (!msg.data.body_len)
- {
- clib_warning ("POST request should include data");
- goto error;
- }
- /*
- * Add "protocol layer" headers:
- * - host
- * - user agent
- * - content length
- */
- request = format (request, http_post_request_template,
- /* target */
- target,
- /* Host */
- hc->host,
- /* User-Agent */
- hc->app_name,
- /* Content-Length */
- msg.data.body_len);
-
- http_buffer_init (&hc->req.tx_buf, msg_to_buf_type[msg.data.type],
- as->tx_fifo, msg.data.body_len);
-
- next_state = HTTP_REQ_STATE_APP_IO_MORE_DATA;
- sm_result = HTTP_SM_CONTINUE;
- }
- else
- {
- clib_warning ("unsupported method %d", msg.method_type);
- goto error;
- }
-
- /* Add headers from app (if any) */
- if (msg.data.headers_len)
- {
- HTTP_DBG (0, "got headers from app, len %d", msg.data.headers_len);
- http_write_app_headers (hc, &msg, &request);
- }
- /* Add empty line after headers */
- request = format (request, "\r\n");
- HTTP_DBG (3, "%v", request);
-
- sent = http_send_data (hc, request, vec_len (request));
- if (sent != vec_len (request))
- {
- clib_warning ("sending request-line and headers failed!");
- sm_result = HTTP_SM_ERROR;
- goto error;
- }
-
- http_req_state_change (hc, next_state);
- goto done;
-
-error:
- svm_fifo_dequeue_drop_all (as->tx_fifo);
- session_transport_closing_notify (&hc->connection);
- session_transport_closed_notify (&hc->connection);
- http_disconnect_transport (hc);
-
-done:
- vec_free (target_buff);
- return sm_result;
-}
-
-static http_sm_result_t
-http_req_state_transport_io_more_data (http_conn_t *hc,
- transport_send_params_t *sp)
-{
- session_t *as, *ts;
- app_worker_t *app_wrk;
- svm_fifo_seg_t _seg, *seg = &_seg;
- u32 max_len, max_deq, max_enq, n_segs = 1;
- int rv, len;
-
- as = session_get_from_handle (hc->h_pa_session_handle);
- ts = session_get_from_handle (hc->h_tc_session_handle);
-
- max_deq = svm_fifo_max_dequeue (ts->rx_fifo);
- if (max_deq == 0)
- {
- HTTP_DBG (1, "no data to deq");
- return HTTP_SM_STOP;
- }
-
- max_enq = svm_fifo_max_enqueue (as->rx_fifo);
- if (max_enq == 0)
- {
- HTTP_DBG (1, "app's rx fifo full");
- svm_fifo_add_want_deq_ntf (as->rx_fifo, SVM_FIFO_WANT_DEQ_NOTIF);
- return HTTP_SM_STOP;
- }
-
- max_len = clib_min (max_enq, max_deq);
- len = svm_fifo_segments (ts->rx_fifo, 0, seg, &n_segs, max_len);
- if (len < 0)
- {
- HTTP_DBG (1, "svm_fifo_segments() len %d", len);
- return HTTP_SM_STOP;
- }
-
- rv = svm_fifo_enqueue_segments (as->rx_fifo, seg, 1, 0 /* allow partial */);
- if (rv < 0)
- {
- clib_warning ("data enqueue failed, rv: %d", rv);
- return HTTP_SM_ERROR;
- }
-
- svm_fifo_dequeue_drop (ts->rx_fifo, rv);
- if (rv > hc->req.to_recv)
- {
- clib_warning ("http protocol error: received more data than expected");
- session_transport_closing_notify (&hc->connection);
- http_disconnect_transport (hc);
- http_req_state_change (hc, HTTP_REQ_STATE_WAIT_APP_METHOD);
- return HTTP_SM_ERROR;
- }
- hc->req.to_recv -= rv;
- HTTP_DBG (1, "drained %d from ts; remains %lu", rv, hc->req.to_recv);
-
- /* Finished transaction:
- * server back to HTTP_REQ_STATE_WAIT_APP_REPLY
- * client to HTTP_REQ_STATE_WAIT_APP_METHOD */
- if (hc->req.to_recv == 0)
- http_req_state_change (hc, hc->is_server ? HTTP_REQ_STATE_WAIT_APP_REPLY :
- HTTP_REQ_STATE_WAIT_APP_METHOD);
-
- app_wrk = app_worker_get_if_valid (as->app_wrk_index);
- if (app_wrk)
- app_worker_rx_notify (app_wrk, as);
-
- if (svm_fifo_max_dequeue_cons (ts->rx_fifo))
- session_enqueue_notify (ts);
-
- return HTTP_SM_STOP;
-}
-
-static http_sm_result_t
-http_req_state_app_io_more_data (http_conn_t *hc, transport_send_params_t *sp)
-{
- u32 max_send = 64 << 10, n_segs;
- http_buffer_t *hb = &hc->req.tx_buf;
- svm_fifo_seg_t *seg;
- session_t *ts;
- int sent = 0;
-
- max_send = clib_min (max_send, sp->max_burst_size);
- ts = session_get_from_handle (hc->h_tc_session_handle);
- if ((seg = http_buffer_get_segs (hb, max_send, &n_segs)))
- sent = svm_fifo_enqueue_segments (ts->tx_fifo, seg, n_segs,
- 1 /* allow partial */);
-
- if (sent > 0)
- {
- /* Ask scheduler to notify app of deq event if needed */
- sp->bytes_dequeued += http_buffer_drain (hb, sent);
- sp->max_burst_size -= sent;
- }
-
- /* Not finished sending all data */
- if (!http_buffer_is_drained (hb))
- {
- if (sent && svm_fifo_set_event (ts->tx_fifo))
- session_program_tx_io_evt (ts->handle, SESSION_IO_EVT_TX);
-
- if (svm_fifo_max_enqueue (ts->tx_fifo) < HTTP_FIFO_THRESH)
- {
- /* Deschedule http session and wait for deq notification if
- * underlying ts tx fifo almost full */
- svm_fifo_add_want_deq_ntf (ts->tx_fifo, SVM_FIFO_WANT_DEQ_NOTIF);
- transport_connection_deschedule (&hc->connection);
- sp->flags |= TRANSPORT_SND_F_DESCHED;
- }
- }
- else
- {
- if (sent && svm_fifo_set_event (ts->tx_fifo))
- session_program_tx_io_evt (ts->handle, SESSION_IO_EVT_TX_FLUSH);
-
- /* Finished transaction:
- * server back to HTTP_REQ_STATE_WAIT_TRANSPORT_METHOD
- * client to HTTP_REQ_STATE_WAIT_TRANSPORT_REPLY */
- http_req_state_change (hc, hc->is_server ?
- HTTP_REQ_STATE_WAIT_TRANSPORT_METHOD :
- HTTP_REQ_STATE_WAIT_TRANSPORT_REPLY);
- http_buffer_free (hb);
- }
-
- return HTTP_SM_STOP;
-}
-
-static http_sm_result_t
-http_req_state_tunnel_rx (http_conn_t *hc, transport_send_params_t *sp)
-{
- u32 max_deq, max_enq, max_read, n_segs = 2;
- svm_fifo_seg_t segs[n_segs];
- int n_written = 0;
- session_t *as, *ts;
- app_worker_t *app_wrk;
-
- HTTP_DBG (1, "tunnel received data from client");
-
- as = session_get_from_handle (hc->h_pa_session_handle);
- ts = session_get_from_handle (hc->h_tc_session_handle);
-
- max_deq = svm_fifo_max_dequeue (ts->rx_fifo);
- if (PREDICT_FALSE (max_deq == 0))
- {
- HTTP_DBG (1, "max_deq == 0");
- return HTTP_SM_STOP;
- }
- max_enq = svm_fifo_max_enqueue (as->rx_fifo);
- if (max_enq == 0)
- {
- HTTP_DBG (1, "app's rx fifo full");
- svm_fifo_add_want_deq_ntf (as->rx_fifo, SVM_FIFO_WANT_DEQ_NOTIF);
- return HTTP_SM_STOP;
- }
- max_read = clib_min (max_enq, max_deq);
- svm_fifo_segments (ts->rx_fifo, 0, segs, &n_segs, max_read);
- n_written = svm_fifo_enqueue_segments (as->rx_fifo, segs, n_segs, 0);
- ASSERT (n_written > 0);
- HTTP_DBG (1, "transfered %u bytes", n_written);
- svm_fifo_dequeue_drop (ts->rx_fifo, n_written);
- app_wrk = app_worker_get_if_valid (as->app_wrk_index);
- if (app_wrk)
- app_worker_rx_notify (app_wrk, as);
- if (svm_fifo_max_dequeue_cons (ts->rx_fifo))
- session_program_rx_io_evt (session_handle (ts));
-
- return HTTP_SM_STOP;
-}
-
-static http_sm_result_t
-http_req_state_tunnel_tx (http_conn_t *hc, transport_send_params_t *sp)
-{
- u32 max_deq, max_enq, max_read, n_segs = 2;
- svm_fifo_seg_t segs[n_segs];
- session_t *as, *ts;
- int n_written = 0;
- HTTP_DBG (1, "tunnel received data from target");
-
- as = session_get_from_handle (hc->h_pa_session_handle);
- ts = session_get_from_handle (hc->h_tc_session_handle);
-
- max_deq = svm_fifo_max_dequeue_cons (as->tx_fifo);
- if (PREDICT_FALSE (max_deq == 0))
- {
- HTTP_DBG (1, "max_deq == 0");
- goto check_fifo;
- }
- max_enq = svm_fifo_max_enqueue_prod (ts->tx_fifo);
- if (max_enq == 0)
+ if (hc_handle.version == HTTP_VERSION_NA)
{
- HTTP_DBG (1, "ts tx fifo full");
- goto check_fifo;
- }
- max_read = clib_min (max_enq, max_deq);
- max_read = clib_min (max_read, sp->max_burst_size);
- svm_fifo_segments (as->tx_fifo, 0, segs, &n_segs, max_read);
- n_written = svm_fifo_enqueue_segments (ts->tx_fifo, segs, n_segs, 0);
- ASSERT (n_written > 0);
- HTTP_DBG (1, "transfered %u bytes", n_written);
- sp->bytes_dequeued += n_written;
- sp->max_burst_size -= n_written;
- svm_fifo_dequeue_drop (as->tx_fifo, n_written);
- if (svm_fifo_set_event (ts->tx_fifo))
- session_program_tx_io_evt (ts->handle, SESSION_IO_EVT_TX);
-
-check_fifo:
- /* Deschedule and wait for deq notification if ts fifo is almost full */
- if (svm_fifo_max_enqueue (ts->tx_fifo) < HTTP_FIFO_THRESH)
- {
- svm_fifo_add_want_deq_ntf (ts->tx_fifo, SVM_FIFO_WANT_DEQ_NOTIF);
- transport_connection_deschedule (&hc->connection);
- sp->flags |= TRANSPORT_SND_F_DESCHED;
- }
-
- return HTTP_SM_STOP;
-}
-
-static http_sm_result_t
-http_req_state_udp_tunnel_rx (http_conn_t *hc, transport_send_params_t *sp)
-{
- http_main_t *hm = &http_main;
- u32 to_deq, capsule_size, dgram_size, n_written = 0;
- int rv, n_read;
- session_t *as, *ts;
- app_worker_t *app_wrk;
- u8 payload_offset;
- u64 payload_len;
- session_dgram_hdr_t hdr;
- u8 *buf = 0;
-
- HTTP_DBG (1, "udp tunnel received data from client");
-
- as = session_get_from_handle (hc->h_pa_session_handle);
- ts = session_get_from_handle (hc->h_tc_session_handle);
- buf = hm->rx_bufs[hc->c_thread_index];
- to_deq = svm_fifo_max_dequeue_cons (ts->rx_fifo);
-
- while (to_deq > 0)
- {
- /* some bytes remaining to skip? */
- if (PREDICT_FALSE (hc->req.to_skip))
+ HTTP_DBG (1, "unknown http version");
+ max_deq = svm_fifo_max_dequeue_cons (ts->rx_fifo);
+ if (max_deq >= http2_conn_preface.len)
{
- if (hc->req.to_skip >= to_deq)
+ rx_buf = http_get_rx_buf (hc);
+ svm_fifo_peek (ts->rx_fifo, 0, http2_conn_preface.len, rx_buf);
+ if (memcmp (rx_buf, http2_conn_preface.base,
+ http2_conn_preface.len) == 0)
{
- svm_fifo_dequeue_drop (ts->rx_fifo, to_deq);
- hc->req.to_skip -= to_deq;
- goto done;
- }
- else
- {
- svm_fifo_dequeue_drop (ts->rx_fifo, hc->req.to_skip);
- hc->req.to_skip = 0;
- }
- }
- n_read =
- svm_fifo_peek (ts->rx_fifo, 0, HTTP_CAPSULE_HEADER_MAX_SIZE, buf);
- ASSERT (n_read > 0);
- rv = http_decap_udp_payload_datagram (buf, n_read, &payload_offset,
- &payload_len);
- HTTP_DBG (1, "rv=%d, payload_offset=%u, payload_len=%llu", rv,
- payload_offset, payload_len);
- if (PREDICT_FALSE (rv != 0))
- {
- if (rv < 0)
- {
- /* capsule datagram is invalid (session need to be aborted) */
+#if HTTP_2_ENABLE > 0
+ hc->version = HTTP_VERSION_2;
+ http_vfts[hc->version].conn_accept_callback (hc);
+#else
svm_fifo_dequeue_drop_all (ts->rx_fifo);
- session_transport_closing_notify (&hc->connection);
- session_transport_closed_notify (&hc->connection);
http_disconnect_transport (hc);
- return HTTP_SM_STOP;
+ return 0;
+#endif
}
else
- {
- /* unknown capsule should be skipped */
- if (payload_len <= to_deq)
- {
- svm_fifo_dequeue_drop (ts->rx_fifo, payload_len);
- to_deq -= payload_len;
- continue;
- }
- else
- {
- svm_fifo_dequeue_drop (ts->rx_fifo, to_deq);
- hc->req.to_skip = payload_len - to_deq;
- goto done;
- }
- }
- }
- capsule_size = payload_offset + payload_len;
- /* check if we have the full capsule */
- if (PREDICT_FALSE (to_deq < capsule_size))
- {
- HTTP_DBG (1, "capsule not complete");
- goto done;
- }
-
- dgram_size = sizeof (hdr) + payload_len;
- if (svm_fifo_max_enqueue_prod (as->rx_fifo) < dgram_size)
- {
- HTTP_DBG (1, "app's rx fifo full");
- svm_fifo_add_want_deq_ntf (as->rx_fifo, SVM_FIFO_WANT_DEQ_NOTIF);
- goto done;
- }
-
- /* read capsule payload */
- rv = svm_fifo_peek (ts->rx_fifo, payload_offset, payload_len, buf);
- ASSERT (rv == payload_len);
- svm_fifo_dequeue_drop (ts->rx_fifo, capsule_size);
-
- hdr.data_length = payload_len;
- hdr.data_offset = 0;
-
- /* send datagram header and payload */
- svm_fifo_seg_t segs[2] = { { (u8 *) &hdr, sizeof (hdr) },
- { buf, payload_len } };
- rv = svm_fifo_enqueue_segments (as->rx_fifo, segs, 2, 0);
- ASSERT (rv > 0);
-
- n_written += dgram_size;
- to_deq -= capsule_size;
- }
-
-done:
- HTTP_DBG (1, "written %lu bytes", n_written);
-
- if (n_written)
- {
- app_wrk = app_worker_get_if_valid (as->app_wrk_index);
- if (app_wrk)
- app_worker_rx_notify (app_wrk, as);
- }
- if (svm_fifo_max_dequeue_cons (ts->rx_fifo))
- session_program_rx_io_evt (session_handle (ts));
-
- return HTTP_SM_STOP;
-}
-
-static http_sm_result_t
-http_req_state_udp_tunnel_tx (http_conn_t *hc, transport_send_params_t *sp)
-{
- http_main_t *hm = &http_main;
- u32 to_deq, capsule_size, dgram_size, n_written = 0;
- session_t *as, *ts;
- int rv;
- session_dgram_pre_hdr_t hdr;
- u8 *buf;
- u8 *payload;
-
- HTTP_DBG (1, "udp tunnel received data from target");
-
- as = session_get_from_handle (hc->h_pa_session_handle);
- ts = session_get_from_handle (hc->h_tc_session_handle);
- buf = hm->tx_bufs[hc->c_thread_index];
- to_deq = svm_fifo_max_dequeue_cons (as->tx_fifo);
-
- while (to_deq > 0)
- {
- /* read datagram header */
- rv = svm_fifo_peek (as->tx_fifo, 0, sizeof (hdr), (u8 *) &hdr);
- ASSERT (rv == sizeof (hdr) &&
- hdr.data_length <= HTTP_UDP_PAYLOAD_MAX_LEN);
- ASSERT (to_deq >= hdr.data_length + SESSION_CONN_HDR_LEN);
- dgram_size = hdr.data_length + SESSION_CONN_HDR_LEN;
-
- if (svm_fifo_max_enqueue_prod (ts->tx_fifo) <
- (hdr.data_length + HTTP_UDP_PROXY_DATAGRAM_CAPSULE_OVERHEAD))
- {
- HTTP_DBG (1, "ts tx fifo full");
- goto done;
+ hc->version = HTTP_VERSION_1;
}
-
- /* create capsule header */
- payload = http_encap_udp_payload_datagram (buf, hdr.data_length);
- capsule_size = (payload - buf) + hdr.data_length;
- /* read payload */
- rv = svm_fifo_peek (as->tx_fifo, SESSION_CONN_HDR_LEN, hdr.data_length,
- payload);
- ASSERT (rv == hdr.data_length);
- svm_fifo_dequeue_drop (as->tx_fifo, dgram_size);
- /* send capsule */
- rv = svm_fifo_enqueue (ts->tx_fifo, capsule_size, buf);
- ASSERT (rv == capsule_size);
-
- n_written += capsule_size;
- to_deq -= dgram_size;
- }
-
-done:
- HTTP_DBG (1, "written %lu bytes", n_written);
- if (n_written)
- {
- if (svm_fifo_set_event (ts->tx_fifo))
- session_program_tx_io_evt (ts->handle, SESSION_IO_EVT_TX);
- }
-
- /* Deschedule and wait for deq notification if ts fifo is almost full */
- if (svm_fifo_max_enqueue (ts->tx_fifo) < HTTP_FIFO_THRESH)
- {
- svm_fifo_add_want_deq_ntf (ts->tx_fifo, SVM_FIFO_WANT_DEQ_NOTIF);
- transport_connection_deschedule (&hc->connection);
- sp->flags |= TRANSPORT_SND_F_DESCHED;
- }
-
- return HTTP_SM_STOP;
-}
-
-typedef http_sm_result_t (*http_sm_handler) (http_conn_t *,
- transport_send_params_t *sp);
-
-static http_sm_handler tx_state_funcs[HTTP_REQ_N_STATES] = {
- 0, /* idle */
- http_req_state_wait_app_method,
- 0, /* wait transport reply */
- 0, /* transport io more data */
- 0, /* wait transport method */
- http_req_state_wait_app_reply,
- http_req_state_app_io_more_data,
- http_req_state_tunnel_tx,
- http_req_state_udp_tunnel_tx,
-};
-
-static_always_inline int
-http_req_state_is_tx_valid (http_conn_t *hc)
-{
- return tx_state_funcs[hc->req.state] ? 1 : 0;
-}
-
-static http_sm_handler rx_state_funcs[HTTP_REQ_N_STATES] = {
- 0, /* idle */
- 0, /* wait app method */
- http_req_state_wait_transport_reply,
- http_req_state_transport_io_more_data,
- http_req_state_wait_transport_method,
- 0, /* wait app reply */
- 0, /* app io more data */
- http_req_state_tunnel_rx,
- http_req_state_udp_tunnel_rx,
-};
-
-static_always_inline int
-http_req_state_is_rx_valid (http_conn_t *hc)
-{
- return rx_state_funcs[hc->req.state] ? 1 : 0;
-}
-
-static_always_inline void
-http_req_run_state_machine (http_conn_t *hc, transport_send_params_t *sp,
- u8 is_tx)
-{
- http_sm_result_t res;
-
- do
- {
- if (is_tx)
- res = tx_state_funcs[hc->req.state](hc, sp);
else
- res = rx_state_funcs[hc->req.state](hc, sp);
- if (res == HTTP_SM_ERROR)
- {
- HTTP_DBG (1, "error in state machine %d", res);
- return;
- }
+ hc->version = HTTP_VERSION_1;
+
+ HTTP_DBG (1, "identified HTTP/%u",
+ hc->version == HTTP_VERSION_1 ? 1 : 2);
+ hc_handle.version = hc->version;
+ ts->opaque = hc_handle.as_u32;
}
- while (res == HTTP_SM_CONTINUE);
+ http_vfts[hc_handle.version].transport_rx_callback (hc);
- /* Reset the session expiration timer */
- http_conn_timer_update (hc);
+ if (hc->state == HTTP_CONN_STATE_TRANSPORT_CLOSED)
+ http_vfts[hc->version].transport_close_callback (hc);
+ return 0;
}
-static int
-http_ts_rx_callback (session_t *ts)
+int
+http_ts_builtin_tx_callback (session_t *ts)
{
http_conn_t *hc;
+ http_conn_handle_t hc_handle;
- HTTP_DBG (1, "hc [%u]%x", ts->thread_index, ts->opaque);
+ hc_handle.as_u32 = ts->opaque;
- hc = http_conn_get_w_thread (ts->opaque, ts->thread_index);
-
- if (hc->state == HTTP_CONN_STATE_CLOSED)
- {
- HTTP_DBG (1, "conn closed");
- svm_fifo_dequeue_drop_all (ts->rx_fifo);
- return 0;
- }
-
- if (!http_req_state_is_rx_valid (hc))
- {
- clib_warning ("hc [%u]%x invalid rx state: http req state "
- "'%U', session state '%U'",
- ts->thread_index, ts->opaque, format_http_req_state,
- hc->req.state, format_http_conn_state, hc);
- svm_fifo_dequeue_drop_all (ts->rx_fifo);
- return 0;
- }
-
- HTTP_DBG (1, "run state machine");
- http_req_run_state_machine (hc, 0, 0);
+ hc = http_conn_get_w_thread (hc_handle.conn_index, ts->thread_index);
+ HTTP_DBG (1, "transport connection reschedule");
+ http_vfts[hc->version].transport_conn_reschedule_callback (hc);
- if (hc->state == HTTP_CONN_STATE_TRANSPORT_CLOSED)
- {
- if (!svm_fifo_max_dequeue_cons (ts->rx_fifo))
- session_transport_closing_notify (&hc->connection);
- }
return 0;
}
-int
-http_ts_builtin_tx_callback (session_t *ts)
+static void
+http_ts_closed_callback (session_t *ts)
{
+ http_conn_handle_t hc_handle;
http_conn_t *hc;
- hc = http_conn_get_w_thread (ts->opaque, ts->thread_index);
- HTTP_DBG (1, "transport connection reschedule");
- transport_connection_reschedule (&hc->connection);
+ hc_handle.as_u32 = ts->opaque;
+ hc = http_conn_get_w_thread (hc_handle.conn_index, ts->thread_index);
- return 0;
+ http_disconnect_transport (hc);
+ hc->state = HTTP_CONN_STATE_CLOSED;
}
static void
http_ts_cleanup_callback (session_t *ts, session_cleanup_ntf_t ntf)
{
http_conn_t *hc;
+ http_conn_handle_t hc_handle;
if (ntf == SESSION_CLEANUP_TRANSPORT)
return;
- hc = http_conn_get_w_thread (ts->opaque, ts->thread_index);
+ hc_handle.as_u32 = ts->opaque;
+ hc = http_conn_get_w_thread (hc_handle.conn_index, ts->thread_index);
- HTTP_DBG (1, "going to free hc [%u]%x", ts->thread_index, ts->opaque);
+ HTTP_DBG (1, "going to free hc [%u]%x", ts->thread_index,
+ hc_handle.conn_index);
- vec_free (hc->req.rx_buf);
- vec_free (hc->req.headers);
-
- http_buffer_free (&hc->req.tx_buf);
-
- if (hc->pending_timer == 0)
+ if (!(hc->flags & HTTP_CONN_F_PENDING_TIMER))
http_conn_timer_stop (hc);
- session_transport_delete_notify (&hc->connection);
+ /* in case nothing received on cleartext connection */
+ if (PREDICT_FALSE (hc->version != HTTP_VERSION_NA))
+ http_vfts[hc->version].conn_cleanup_callback (hc);
- if (!hc->is_server)
+ if (!(hc->flags & HTTP_CONN_F_IS_SERVER))
{
vec_free (hc->app_name);
vec_free (hc->host);
@@ -2253,11 +705,8 @@ http_ts_cleanup_callback (session_t *ts, session_cleanup_ntf_t ntf)
static void
http_ts_ho_cleanup_callback (session_t *ts)
{
- http_conn_t *ho_hc;
HTTP_DBG (1, "half open: %x", ts->opaque);
- ho_hc = http_ho_conn_get (ts->opaque);
- session_half_open_delete_notify (&ho_hc->connection);
- http_ho_conn_free (ho_hc);
+ http_ho_try_free (ts->opaque);
}
int
@@ -2278,6 +727,7 @@ static session_cb_vft_t http_app_cb_vft = {
.session_disconnect_callback = http_ts_disconnect_callback,
.session_connected_callback = http_ts_connected_callback,
.session_reset_callback = http_ts_reset_callback,
+ .session_transport_closed_callback = http_ts_closed_callback,
.session_cleanup_callback = http_ts_cleanup_callback,
.half_open_cleanup_callback = http_ts_ho_cleanup_callback,
.add_segment_callback = http_add_segment_callback,
@@ -2286,6 +736,10 @@ static session_cb_vft_t http_app_cb_vft = {
.builtin_app_tx_callback = http_ts_builtin_tx_callback,
};
+/*********************************/
+/* transport proto VFT callbacks */
+/*********************************/
+
static clib_error_t *
http_transport_enable (vlib_main_t *vm, u8 is_en)
{
@@ -2295,6 +749,7 @@ http_transport_enable (vlib_main_t *vm, u8 is_en)
u64 options[APP_OPTIONS_N_OPTIONS];
http_main_t *hm = &http_main;
u32 num_threads, i;
+ http_engine_vft_t *http_version;
if (!is_en)
{
@@ -2351,6 +806,12 @@ http_transport_enable (vlib_main_t *vm, u8 is_en)
http_timers_init (vm, http_conn_timeout_cb, http_conn_invalidate_timer_cb);
hm->is_init = 1;
+ vec_foreach (http_version, http_vfts)
+ {
+ if (http_version->enable_callback)
+ http_version->enable_callback ();
+ }
+
return 0;
}
@@ -2377,9 +838,11 @@ http_transport_connect (transport_endpoint_cfg_t *tep)
hc_index = http_ho_conn_alloc ();
hc = http_ho_conn_get (hc_index);
- hc->h_pa_wrk_index = sep->app_wrk_index;
- hc->h_pa_app_api_ctx = sep->opaque;
+ hc->hc_pa_wrk_index = sep->app_wrk_index;
+ hc->hc_pa_app_api_ctx = sep->opaque;
hc->state = HTTP_CONN_STATE_CONNECTING;
+ /* TODO: set to HTTP_VERSION_NA in case of TLS */
+ hc->version = HTTP_VERSION_1;
cargs->api_context = hc_index;
ext_cfg = session_endpoint_get_ext_cfg (sep, TRANSPORT_ENDPT_EXT_CFG_HTTP);
@@ -2391,7 +854,12 @@ http_transport_connect (transport_endpoint_cfg_t *tep)
hc->timeout = http_cfg->timeout;
}
- hc->is_server = 0;
+ ext_cfg = session_endpoint_get_ext_cfg (sep, TRANSPORT_ENDPT_EXT_CFG_CRYPTO);
+ if (ext_cfg)
+ {
+ HTTP_DBG (1, "app set tls");
+ cargs->sep.transport_proto = TRANSPORT_PROTO_TLS;
+ }
if (vec_len (app->name))
hc->app_name = vec_dup (app->name);
@@ -2416,7 +884,7 @@ http_transport_connect (transport_endpoint_cfg_t *tep)
ho->opaque = sep->opaque;
ho->session_type =
session_type_from_proto_and_ip (TRANSPORT_PROTO_HTTP, sep->is_ip4);
- hc->h_tc_session_handle = cargs->sh;
+ hc->hc_tc_session_handle = cargs->sh;
hc->c_s_index = ho->session_index;
return 0;
@@ -2436,6 +904,7 @@ http_start_listen (u32 app_listener_index, transport_endpoint_cfg_t *tep)
http_conn_t *lhc;
u32 lhc_index;
transport_endpt_ext_cfg_t *ext_cfg;
+ segment_manager_props_t *props;
sep = (session_endpoint_cfg_t *) tep;
@@ -2471,19 +940,22 @@ http_start_listen (u32 app_listener_index, transport_endpoint_cfg_t *tep)
}
/* Grab transport connection listener and link to http listener */
- lhc->h_tc_session_handle = args->handle;
- al = app_listener_get_w_handle (lhc->h_tc_session_handle);
+ lhc->hc_tc_session_handle = args->handle;
+ al = app_listener_get_w_handle (lhc->hc_tc_session_handle);
ts_listener = app_listener_get_session (al);
ts_listener->opaque = lhc_index;
/* Grab application listener and link to http listener */
app_listener = listen_session_get (app_listener_index);
- lhc->h_pa_wrk_index = sep->app_wrk_index;
- lhc->h_pa_session_handle = listen_session_get_handle (app_listener);
+ lhc->hc_pa_wrk_index = sep->app_wrk_index;
+ lhc->hc_pa_session_handle = listen_session_get_handle (app_listener);
lhc->c_s_index = app_listener_index;
lhc->c_flags |= TRANSPORT_CONNECTION_F_NO_LOOKUP;
- lhc->is_server = 1;
+ lhc->flags |= HTTP_CONN_F_IS_SERVER;
+
+ props = application_segment_manager_properties (app);
+ lhc->app_rx_fifo_size = props->rx_fifo_size;
if (vec_len (app->name))
lhc->app_name = vec_dup (app->name);
@@ -2502,7 +974,7 @@ http_stop_listen (u32 listener_index)
lhc = http_listener_get (listener_index);
vnet_unlisten_args_t a = {
- .handle = lhc->h_tc_session_handle,
+ .handle = lhc->hc_tc_session_handle,
.app_index = http_main.app_index,
.wrk_map_index = 0 /* default wrk */
};
@@ -2516,16 +988,22 @@ http_stop_listen (u32 listener_index)
}
static void
-http_transport_close (u32 hc_index, u32 thread_index)
+http_transport_close (u32 rh, clib_thread_index_t thread_index)
{
- session_t *as;
http_conn_t *hc;
+ u32 hc_index;
+ http_req_handle_t hr_handle;
+ hr_handle.as_u32 = rh;
+
+ hc_index = http_vfts[hr_handle.version].hc_index_get_by_req_index (
+ hr_handle.req_index, thread_index);
HTTP_DBG (1, "App disconnecting [%u]%x", thread_index, hc_index);
hc = http_conn_get_w_thread (hc_index, thread_index);
if (hc->state == HTTP_CONN_STATE_CONNECTING)
{
+ HTTP_DBG (1, "in connecting state, close now");
hc->state = HTTP_CONN_STATE_APP_CLOSED;
http_disconnect_transport (hc);
return;
@@ -2535,26 +1013,42 @@ http_transport_close (u32 hc_index, u32 thread_index)
HTTP_DBG (1, "nothing to do, already closed");
return;
}
- as = session_get_from_handle (hc->h_pa_session_handle);
- /* Nothing more to send, confirm close */
- if (!svm_fifo_max_dequeue_cons (as->tx_fifo))
- {
- session_transport_closed_notify (&hc->connection);
- http_disconnect_transport (hc);
- }
- else
+ http_vfts[hc->version].app_close_callback (hc, hr_handle.req_index,
+ thread_index);
+}
+
+static void
+http_transport_reset (u32 rh, clib_thread_index_t thread_index)
+{
+ http_conn_t *hc;
+ u32 hc_index;
+ http_req_handle_t hr_handle;
+
+ hr_handle.as_u32 = rh;
+ hc_index = http_vfts[hr_handle.version].hc_index_get_by_req_index (
+ hr_handle.req_index, thread_index);
+ HTTP_DBG (1, "App disconnecting [%u]%x", thread_index, hc_index);
+
+ hc = http_conn_get_w_thread (hc_index, thread_index);
+ if (hc->state == HTTP_CONN_STATE_CLOSED)
{
- /* Wait for all data to be written to ts */
- hc->state = HTTP_CONN_STATE_APP_CLOSED;
+ HTTP_DBG (1, "nothing to do, already closed");
+ return;
}
+
+ http_vfts[hc->version].app_reset_callback (hc, hr_handle.req_index,
+ thread_index);
}
static transport_connection_t *
-http_transport_get_connection (u32 hc_index, u32 thread_index)
+http_transport_get_connection (u32 rh, clib_thread_index_t thread_index)
{
- http_conn_t *hc = http_conn_get_w_thread (hc_index, thread_index);
- return &hc->connection;
+ http_req_handle_t hr_handle;
+
+ hr_handle.as_u32 = rh;
+ return http_vfts[hr_handle.version].req_get_connection (hr_handle.req_index,
+ thread_index);
}
static transport_connection_t *
@@ -2568,46 +1062,32 @@ static int
http_app_tx_callback (void *session, transport_send_params_t *sp)
{
session_t *as = (session_t *) session;
- u32 max_burst_sz, sent;
+ u32 max_burst_sz, sent, hc_index;
http_conn_t *hc;
+ http_req_handle_t hr_handle;
+ hr_handle.as_u32 = as->connection_index;
- HTTP_DBG (1, "hc [%u]%x", as->thread_index, as->connection_index);
+ hc_index = http_vfts[hr_handle.version].hc_index_get_by_req_index (
+ hr_handle.req_index, as->thread_index);
+ HTTP_DBG (1, "hc [%u]%x", hc_index, as->connection_index);
- hc = http_conn_get_w_thread (as->connection_index, as->thread_index);
+ hc = http_conn_get_w_thread (hc_index, as->thread_index);
- max_burst_sz = sp->max_burst_size * TRANSPORT_PACER_MIN_MSS;
- sp->max_burst_size = max_burst_sz;
-
- if (!http_req_state_is_tx_valid (hc))
+ if (hc->state == HTTP_CONN_STATE_CLOSED)
{
- /* Sometimes the server apps can send the response earlier
- * than expected (e.g when rejecting a bad request)*/
- if (hc->req.state == HTTP_REQ_STATE_TRANSPORT_IO_MORE_DATA &&
- hc->is_server)
- {
- svm_fifo_dequeue_drop_all (as->rx_fifo);
- hc->req.state = HTTP_REQ_STATE_WAIT_APP_REPLY;
- }
- else
- {
- clib_warning ("hc [%u]%x invalid tx state: http req state "
- "'%U', session state '%U'",
- as->thread_index, as->connection_index,
- format_http_req_state, hc->req.state,
- format_http_conn_state, hc);
- svm_fifo_dequeue_drop_all (as->tx_fifo);
- return 0;
- }
+ HTTP_DBG (1, "conn closed");
+ svm_fifo_dequeue_drop_all (as->tx_fifo);
+ return 0;
}
- HTTP_DBG (1, "run state machine");
- http_req_run_state_machine (hc, sp, 1);
+ max_burst_sz = sp->max_burst_size * TRANSPORT_PACER_MIN_MSS;
+ sp->max_burst_size = max_burst_sz;
+
+ http_vfts[hc->version].app_tx_callback (hc, hr_handle.req_index, sp);
if (hc->state == HTTP_CONN_STATE_APP_CLOSED)
- {
- if (!svm_fifo_max_dequeue_cons (as->tx_fifo))
- http_disconnect_transport (hc);
- }
+ http_vfts[hc->version].app_close_callback (hc, hr_handle.req_index,
+ as->thread_index);
sent = max_burst_sz - sp->max_burst_size;
@@ -2617,38 +1097,36 @@ http_app_tx_callback (void *session, transport_send_params_t *sp)
static int
http_app_rx_evt_cb (transport_connection_t *tc)
{
- http_conn_t *hc = (http_conn_t *) tc;
- HTTP_DBG (1, "hc [%u]%x", vlib_get_thread_index (), hc->h_hc_index);
+ http_req_t *req = (http_req_t *) tc;
+ http_conn_t *hc;
+ http_req_handle_t hr_handle;
+
+ HTTP_DBG (1, "hc [%u]%x", vlib_get_thread_index (), req->hr_hc_index);
- if (hc->req.state == HTTP_REQ_STATE_TUNNEL)
- http_req_state_tunnel_rx (hc, 0);
+ hr_handle.as_u32 = req->hr_req_handle;
+ hc = http_conn_get_w_thread (req->hr_hc_index, req->c_thread_index);
+ http_vfts[hr_handle.version].app_rx_evt_callback (hc, hr_handle.req_index,
+ req->c_thread_index);
return 0;
}
static void
-http_transport_get_endpoint (u32 hc_index, u32 thread_index,
+http_transport_get_endpoint (u32 rh, clib_thread_index_t thread_index,
transport_endpoint_t *tep, u8 is_lcl)
{
- http_conn_t *hc = http_conn_get_w_thread (hc_index, thread_index);
- session_t *ts;
-
- ts = session_get_from_handle (hc->h_tc_session_handle);
- session_get_endpoint (ts, tep, is_lcl);
-}
-
-static u8 *
-format_http_connection (u8 *s, va_list *args)
-{
- http_conn_t *hc = va_arg (*args, http_conn_t *);
+ http_conn_t *hc;
session_t *ts;
+ u32 hc_index;
+ http_req_handle_t hr_handle;
- ts = session_get_from_handle (hc->h_tc_session_handle);
- s = format (s, "[%d:%d][H] app_wrk %u ts %d:%d", hc->c_thread_index,
- hc->c_s_index, hc->h_pa_wrk_index, ts->thread_index,
- ts->session_index);
+ hr_handle.as_u32 = rh;
+ hc_index = http_vfts[hr_handle.version].hc_index_get_by_req_index (
+ hr_handle.req_index, thread_index);
+ hc = http_conn_get_w_thread (hc_index, thread_index);
- return s;
+ ts = session_get_from_handle (hc->hc_tc_session_handle);
+ session_get_endpoint (ts, tep, is_lcl);
}
static u8 *
@@ -2658,10 +1136,10 @@ format_http_listener (u8 *s, va_list *args)
app_listener_t *al;
session_t *lts;
- al = app_listener_get_w_handle (lhc->h_tc_session_handle);
+ al = app_listener_get_w_handle (lhc->hc_tc_session_handle);
lts = app_listener_get_session (al);
s = format (s, "[%d:%d][H] app_wrk %u ts %d:%d", lhc->c_thread_index,
- lhc->c_s_index, lhc->h_pa_wrk_index, lts->thread_index,
+ lhc->c_s_index, lhc->hc_pa_wrk_index, lts->thread_index,
lts->session_index);
return s;
@@ -2670,22 +1148,18 @@ format_http_listener (u8 *s, va_list *args)
static u8 *
format_http_transport_connection (u8 *s, va_list *args)
{
- u32 tc_index = va_arg (*args, u32);
- u32 thread_index = va_arg (*args, u32);
+ http_req_handle_t rh = va_arg (*args, http_req_handle_t);
+ clib_thread_index_t thread_index = va_arg (*args, u32);
u32 verbose = va_arg (*args, u32);
+ u32 hc_index;
http_conn_t *hc;
- hc = http_conn_get_w_thread (tc_index, thread_index);
-
- s = format (s, "%-" SESSION_CLI_ID_LEN "U", format_http_connection, hc);
- if (verbose)
- {
- s =
- format (s, "%-" SESSION_CLI_STATE_LEN "U", format_http_conn_state, hc);
- if (verbose > 1)
- s = format (s, "\n");
- }
+ hc_index = http_vfts[rh.version].hc_index_get_by_req_index (rh.req_index,
+ thread_index);
+ hc = http_conn_get_w_thread (hc_index, thread_index);
+ s = format (s, "%U", http_vfts[rh.version].format_req, rh.req_index,
+ thread_index, hc, verbose);
return s;
}
@@ -2714,10 +1188,10 @@ format_http_transport_half_open (u8 *s, va_list *args)
session_t *tcp_ho;
ho_hc = http_ho_conn_get (ho_index);
- tcp_ho = session_get_from_handle (ho_hc->h_tc_session_handle);
+ tcp_ho = session_get_from_handle (ho_hc->hc_tc_session_handle);
s = format (s, "[%d:%d][H] half-open app_wrk %u ts %d:%d",
- ho_hc->c_thread_index, ho_hc->c_s_index, ho_hc->h_pa_wrk_index,
+ ho_hc->c_thread_index, ho_hc->c_s_index, ho_hc->hc_pa_wrk_index,
tcp_ho->thread_index, tcp_ho->session_index);
return s;
}
@@ -2739,7 +1213,13 @@ http_transport_cleanup_ho (u32 ho_hc_index)
HTTP_DBG (1, "half open: %x", ho_hc_index);
ho_hc = http_ho_conn_get (ho_hc_index);
- session_cleanup_half_open (ho_hc->h_tc_session_handle);
+ if (ho_hc->hc_tc_session_handle == SESSION_INVALID_HANDLE)
+ {
+ HTTP_DBG (1, "already pending cleanup");
+ ho_hc->flags |= HTTP_CONN_F_NO_APP_SESSION;
+ return;
+ }
+ session_cleanup_half_open (ho_hc->hc_tc_session_handle);
http_ho_conn_free (ho_hc);
}
@@ -2749,6 +1229,7 @@ static const transport_proto_vft_t http_proto = {
.start_listen = http_start_listen,
.stop_listen = http_stop_listen,
.close = http_transport_close,
+ .reset = http_transport_reset,
.cleanup_ho = http_transport_cleanup_ho,
.custom_tx = http_app_tx_callback,
.app_rx_evt = http_app_rx_evt_cb,
@@ -2807,6 +1288,28 @@ http_transport_init (vlib_main_t *vm)
VLIB_INIT_FUNCTION (http_transport_init);
+static uword
+unformat_http_version_cfg (unformat_input_t *input, va_list *va)
+{
+ http_engine_vft_t *http_version;
+ unformat_input_t sub_input;
+ int found = 0;
+
+ vec_foreach (http_version, http_vfts)
+ {
+ if (!unformat (input, http_version->name))
+ continue;
+
+ if (http_version->unformat_cfg_callback &&
+ unformat (input, "%U", unformat_vlib_cli_sub_input, &sub_input))
+ {
+ if (http_version->unformat_cfg_callback (&sub_input))
+ found = 1;
+ }
+ }
+ return found;
+}
+
static clib_error_t *
http_config_fn (vlib_main_t *vm, unformat_input_t *input)
{
@@ -2835,6 +1338,8 @@ http_config_fn (vlib_main_t *vm, unformat_input_t *input)
if (hm->fifo_size != mem_sz)
clib_warning ("invalid fifo size %lu", mem_sz);
}
+ else if (unformat (input, "%U", unformat_http_version_cfg))
+ ;
else
return clib_error_return (0, "unknown input `%U'",
format_unformat_error, input);
diff --git a/src/plugins/http/http.h b/src/plugins/http/http.h
index d1e81ab0617..434ff965b6a 100644
--- a/src/plugins/http/http.h
+++ b/src/plugins/http/http.h
@@ -17,15 +17,9 @@
#define SRC_PLUGINS_HTTP_HTTP_H_
#include <ctype.h>
-
#include <vnet/plugin/plugin.h>
-#include <vpp/app/version.h>
-
-#include <vppinfra/time_range.h>
-
-#include <vnet/session/application_interface.h>
-#include <vnet/session/application.h>
-#include <http/http_buffer.h>
+#include <vnet/ip/format.h>
+#include <vnet/ip/ip46_address.h>
#define HTTP_DEBUG 0
@@ -49,20 +43,6 @@ typedef struct transport_endpt_cfg_http
http_udp_tunnel_mode_t udp_tunnel_mode; /**< connect-udp mode */
} transport_endpt_cfg_http_t;
-typedef struct http_conn_id_
-{
- union
- {
- session_handle_t app_session_handle;
- u32 parent_app_api_ctx;
- };
- session_handle_t tc_session_handle;
- u32 parent_app_wrk_index;
-} http_conn_id_t;
-
-STATIC_ASSERT (sizeof (http_conn_id_t) <= TRANSPORT_CONN_ID_LEN,
- "ctx id must be less than TRANSPORT_CONN_ID_LEN");
-
typedef struct
{
char *base;
@@ -71,45 +51,12 @@ typedef struct
#define http_token_lit(s) (s), sizeof (s) - 1
-#define foreach_http_conn_state \
- _ (LISTEN, "LISTEN") \
- _ (CONNECTING, "CONNECTING") \
- _ (ESTABLISHED, "ESTABLISHED") \
- _ (TRANSPORT_CLOSED, "TRANSPORT-CLOSED") \
- _ (APP_CLOSED, "APP-CLOSED") \
- _ (CLOSED, "CLOSED")
-
-typedef enum http_conn_state_
-{
-#define _(s, str) HTTP_CONN_STATE_##s,
- foreach_http_conn_state
-#undef _
-} http_conn_state_t;
-
-#define foreach_http_req_state \
- _ (0, IDLE, "idle") \
- _ (1, WAIT_APP_METHOD, "wait app method") \
- _ (2, WAIT_TRANSPORT_REPLY, "wait transport reply") \
- _ (3, TRANSPORT_IO_MORE_DATA, "transport io more data") \
- _ (4, WAIT_TRANSPORT_METHOD, "wait transport method") \
- _ (5, WAIT_APP_REPLY, "wait app reply") \
- _ (6, APP_IO_MORE_DATA, "app io more data") \
- _ (7, TUNNEL, "tunnel") \
- _ (8, UDP_TUNNEL, "udp tunnel")
-
-typedef enum http_req_state_
-{
-#define _(n, s, str) HTTP_REQ_STATE_##s = n,
- foreach_http_req_state
-#undef _
- HTTP_REQ_N_STATES
-} http_req_state_t;
-
typedef enum http_req_method_
{
HTTP_REQ_GET = 0,
HTTP_REQ_POST,
HTTP_REQ_CONNECT,
+ HTTP_REQ_UNKNOWN, /* for internal use */
} http_req_method_t;
typedef enum http_msg_type_
@@ -118,14 +65,6 @@ typedef enum http_msg_type_
HTTP_MSG_REPLY
} http_msg_type_t;
-typedef enum http_target_form_
-{
- HTTP_TARGET_ORIGIN_FORM,
- HTTP_TARGET_ABSOLUTE_FORM,
- HTTP_TARGET_AUTHORITY_FORM,
- HTTP_TARGET_ASTERISK_FORM
-} http_target_form_t;
-
#define foreach_http_content_type \
_ (APP_7Z, ".7z", "application/x-7z-compressed") \
_ (APP_DOC, ".doc", "application/msword") \
@@ -271,96 +210,108 @@ typedef enum http_status_code_
} http_status_code_t;
#define foreach_http_header_name \
- _ (ACCEPT, "Accept") \
- _ (ACCEPT_CHARSET, "Accept-Charset") \
- _ (ACCEPT_ENCODING, "Accept-Encoding") \
- _ (ACCEPT_LANGUAGE, "Accept-Language") \
- _ (ACCEPT_RANGES, "Accept-Ranges") \
- _ (ACCESS_CONTROL_ALLOW_CREDENTIALS, "Access-Control-Allow-Credentials") \
- _ (ACCESS_CONTROL_ALLOW_HEADERS, "Access-Control-Allow-Headers") \
- _ (ACCESS_CONTROL_ALLOW_METHODS, "Access-Control-Allow-Methods") \
- _ (ACCESS_CONTROL_ALLOW_ORIGIN, "Access-Control-Allow-Origin") \
- _ (ACCESS_CONTROL_EXPOSE_HEADERS, "Access-Control-Expose-Headers") \
- _ (ACCESS_CONTROL_MAX_AGE, "Access-Control-Max-Age") \
- _ (ACCESS_CONTROL_REQUEST_HEADERS, "Access-Control-Request-Headers") \
- _ (ACCESS_CONTROL_REQUEST_METHOD, "Access-Control-Request-Method") \
- _ (AGE, "Age") \
- _ (ALLOW, "Allow") \
- _ (ALPN, "ALPN") \
- _ (ALT_SVC, "Alt-Svc") \
- _ (ALT_USED, "Alt-Used") \
- _ (ALTERNATES, "Alternates") \
- _ (AUTHENTICATION_CONTROL, "Authentication-Control") \
- _ (AUTHENTICATION_INFO, "Authentication-Info") \
- _ (AUTHORIZATION, "Authorization") \
- _ (CACHE_CONTROL, "Cache-Control") \
- _ (CACHE_STATUS, "Cache-Status") \
- _ (CAPSULE_PROTOCOL, "Capsule-Protocol") \
- _ (CDN_CACHE_CONTROL, "CDN-Cache-Control") \
- _ (CDN_LOOP, "CDN-Loop") \
- _ (CLIENT_CERT, "Client-Cert") \
- _ (CLIENT_CERT_CHAIN, "Client-Cert-Chain") \
- _ (CLOSE, "Close") \
- _ (CONNECTION, "Connection") \
- _ (CONTENT_DIGEST, "Content-Digest") \
- _ (CONTENT_DISPOSITION, "Content-Disposition") \
- _ (CONTENT_ENCODING, "Content-Encoding") \
- _ (CONTENT_LANGUAGE, "Content-Language") \
- _ (CONTENT_LENGTH, "Content-Length") \
- _ (CONTENT_LOCATION, "Content-Location") \
- _ (CONTENT_RANGE, "Content-Range") \
- _ (CONTENT_TYPE, "Content-Type") \
- _ (COOKIE, "Cookie") \
- _ (DATE, "Date") \
- _ (DIGEST, "Digest") \
- _ (DPOP, "DPoP") \
- _ (DPOP_NONCE, "DPoP-Nonce") \
- _ (EARLY_DATA, "Early-Data") \
- _ (ETAG, "ETag") \
- _ (EXPECT, "Expect") \
- _ (EXPIRES, "Expires") \
- _ (FORWARDED, "Forwarded") \
- _ (FROM, "From") \
- _ (HOST, "Host") \
- _ (IF_MATCH, "If-Match") \
- _ (IF_MODIFIED_SINCE, "If-Modified-Since") \
- _ (IF_NONE_MATCH, "If-None-Match") \
- _ (IF_RANGE, "If-Range") \
- _ (IF_UNMODIFIED_SINCE, "If-Unmodified-Since") \
- _ (KEEP_ALIVE, "Keep-Alive") \
- _ (LAST_MODIFIED, "Last-Modified") \
- _ (LINK, "Link") \
- _ (LOCATION, "Location") \
- _ (MAX_FORWARDS, "Max-Forwards") \
- _ (ORIGIN, "Origin") \
- _ (PRIORITY, "Priority") \
- _ (PROXY_AUTHENTICATE, "Proxy-Authenticate") \
- _ (PROXY_AUTHENTICATION_INFO, "Proxy-Authentication-Info") \
- _ (PROXY_AUTHORIZATION, "Proxy-Authorization") \
- _ (PROXY_STATUS, "Proxy-Status") \
- _ (RANGE, "Range") \
- _ (REFERER, "Referer") \
- _ (REPR_DIGEST, "Repr-Digest") \
- _ (SET_COOKIE, "Set-Cookie") \
- _ (SIGNATURE, "Signature") \
- _ (SIGNATURE_INPUT, "Signature-Input") \
- _ (STRICT_TRANSPORT_SECURITY, "Strict-Transport-Security") \
- _ (RETRY_AFTER, "Retry-After") \
- _ (SERVER, "Server") \
- _ (TE, "TE") \
- _ (TRAILER, "Trailer") \
- _ (TRANSFER_ENCODING, "Transfer-Encoding") \
- _ (UPGRADE, "Upgrade") \
- _ (USER_AGENT, "User-Agent") \
- _ (VARY, "Vary") \
- _ (VIA, "Via") \
- _ (WANT_CONTENT_DIGEST, "Want-Content-Digest") \
- _ (WANT_REPR_DIGEST, "Want-Repr-Digest") \
- _ (WWW_AUTHENTICATE, "WWW-Authenticate")
+ _ (ACCEPT_CHARSET, "Accept-Charset", "accept-charset", 15) \
+ _ (ACCEPT_ENCODING, "Accept-Encoding", "accept-encoding", 16) \
+ _ (ACCEPT_LANGUAGE, "Accept-Language", "accept-language", 17) \
+ _ (ACCEPT_RANGES, "Accept-Ranges", "accept-ranges", 18) \
+ _ (ACCEPT, "Accept", "accept", 19) \
+ _ (ACCESS_CONTROL_ALLOW_CREDENTIALS, "Access-Control-Allow-Credentials", \
+ "access-control-allow-credentials", 0) \
+ _ (ACCESS_CONTROL_ALLOW_HEADERS, "Access-Control-Allow-Headers", \
+ "access-control-allow-headers", 0) \
+ _ (ACCESS_CONTROL_ALLOW_METHODS, "Access-Control-Allow-Methods", \
+ "access-control-allow-methods", 0) \
+ _ (ACCESS_CONTROL_ALLOW_ORIGIN, "Access-Control-Allow-Origin", \
+ "access-control-allow-origin", 20) \
+ _ (ACCESS_CONTROL_EXPOSE_HEADERS, "Access-Control-Expose-Headers", \
+ "access-control-expose-headers", 0) \
+ _ (ACCESS_CONTROL_MAX_AGE, "Access-Control-Max-Age", \
+ "access-control-max-age", 0) \
+ _ (ACCESS_CONTROL_REQUEST_HEADERS, "Access-Control-Request-Headers", \
+ "access-control-request-headers", 0) \
+ _ (ACCESS_CONTROL_REQUEST_METHOD, "Access-Control-Request-Method", \
+ "access-control-request-method", 0) \
+ _ (AGE, "Age", "age", 21) \
+ _ (ALLOW, "Allow", "allow", 22) \
+ _ (ALPN, "ALPN", "alpn", 0) \
+ _ (ALT_SVC, "Alt-Svc", "alt-svc", 0) \
+ _ (ALT_USED, "Alt-Used", "alt-used", 0) \
+ _ (ALTERNATES, "Alternates", "alternates", 0) \
+ _ (AUTHENTICATION_CONTROL, "Authentication-Control", \
+ "authentication-control", 0) \
+ _ (AUTHENTICATION_INFO, "Authentication-Info", "authentication-info", 0) \
+ _ (AUTHORIZATION, "Authorization", "authorization", 23) \
+ _ (CACHE_CONTROL, "Cache-Control", "cache-control", 24) \
+ _ (CACHE_STATUS, "Cache-Status", "cache-status", 0) \
+ _ (CAPSULE_PROTOCOL, "Capsule-Protocol", "capsule-protocol", 0) \
+ _ (CDN_CACHE_CONTROL, "CDN-Cache-Control", "cdn-cache-control", 0) \
+ _ (CDN_LOOP, "CDN-Loop", "cdn-loop", 0) \
+ _ (CLIENT_CERT, "Client-Cert", "client-cert", 0) \
+ _ (CLIENT_CERT_CHAIN, "Client-Cert-Chain", "client-cert-chain", 0) \
+ _ (CLOSE, "Close", "close", 0) \
+ _ (CONNECTION, "Connection", "connection", 0) \
+ _ (CONTENT_DIGEST, "Content-Digest", "content-digest", 0) \
+ _ (CONTENT_DISPOSITION, "Content-Disposition", "content-disposition", 25) \
+ _ (CONTENT_ENCODING, "Content-Encoding", "content-encoding", 26) \
+ _ (CONTENT_LANGUAGE, "Content-Language", "content-language", 27) \
+ _ (CONTENT_LENGTH, "Content-Length", "content-length", 28) \
+ _ (CONTENT_LOCATION, "Content-Location", "content-location", 29) \
+ _ (CONTENT_RANGE, "Content-Range", "content-range", 30) \
+ _ (CONTENT_TYPE, "Content-Type", "content-type", 31) \
+ _ (COOKIE, "Cookie", "cookie", 32) \
+ _ (DATE, "Date", "date", 33) \
+ _ (DIGEST, "Digest", "digest", 0) \
+ _ (DPOP, "DPoP", "dpop", 0) \
+ _ (DPOP_NONCE, "DPoP-Nonce", "dpop-nonce", 0) \
+ _ (EARLY_DATA, "Early-Data", "early-data", 0) \
+ _ (ETAG, "ETag", "etag", 34) \
+ _ (EXPECT, "Expect", "expect", 35) \
+ _ (EXPIRES, "Expires", "expires", 36) \
+ _ (FORWARDED, "Forwarded", "forwarded", 0) \
+ _ (FROM, "From", "from", 37) \
+ _ (HOST, "Host", "host", 38) \
+ _ (IF_MATCH, "If-Match", "if-match", 39) \
+ _ (IF_MODIFIED_SINCE, "If-Modified-Since", "if-modified-since", 40) \
+ _ (IF_NONE_MATCH, "If-None-Match", "if-none-match", 41) \
+ _ (IF_RANGE, "If-Range", "if-range", 42) \
+ _ (IF_UNMODIFIED_SINCE, "If-Unmodified-Since", "if-unmodified-since", 43) \
+ _ (KEEP_ALIVE, "Keep-Alive", "keep-alive", 0) \
+ _ (LAST_MODIFIED, "Last-Modified", "last-modified", 44) \
+ _ (LINK, "Link", "link", 45) \
+ _ (LOCATION, "Location", "location", 46) \
+ _ (MAX_FORWARDS, "Max-Forwards", "max-forwards", 47) \
+ _ (ORIGIN, "Origin", "origin", 0) \
+ _ (PRIORITY, "Priority", "priority", 0) \
+ _ (PROXY_AUTHENTICATE, "Proxy-Authenticate", "proxy-authenticate", 48) \
+ _ (PROXY_AUTHENTICATION_INFO, "Proxy-Authentication-Info", \
+ "proxy-authentication-info", 0) \
+ _ (PROXY_AUTHORIZATION, "Proxy-Authorization", "proxy-authorization", 49) \
+ _ (PROXY_STATUS, "Proxy-Status", "proxy-status", 0) \
+ _ (RANGE, "Range", "range", 50) \
+ _ (REFERER, "Referer", "referer", 51) \
+ _ (REFRESH, "Refresh", "refresh", 52) \
+ _ (REPR_DIGEST, "Repr-Digest", "repr-digest", 0) \
+ _ (RETRY_AFTER, "Retry-After", "retry-after", 53) \
+ _ (SERVER, "Server", "server", 54) \
+ _ (SET_COOKIE, "Set-Cookie", "set-cookie", 55) \
+ _ (SIGNATURE, "Signature", "signature", 0) \
+ _ (SIGNATURE_INPUT, "Signature-Input", "signature-input", 0) \
+ _ (STRICT_TRANSPORT_SECURITY, "Strict-Transport-Security", \
+ "strict-transport-security", 56) \
+ _ (TE, "TE", "te", 0) \
+ _ (TRAILER, "Trailer", "trailer", 0) \
+ _ (TRANSFER_ENCODING, "Transfer-Encoding", "transfer-encoding", 57) \
+ _ (UPGRADE, "Upgrade", "upgrade", 0) \
+ _ (USER_AGENT, "User-Agent", "user-agent", 58) \
+ _ (VARY, "Vary", "vary", 59) \
+ _ (VIA, "Via", "via", 60) \
+ _ (WANT_CONTENT_DIGEST, "Want-Content-Digest", "want-content-digest", 0) \
+ _ (WANT_REPR_DIGEST, "Want-Repr-Digest", "want-repr-digest", 0) \
+ _ (WWW_AUTHENTICATE, "WWW-Authenticate", "www-authenticate", 61)
typedef enum http_header_name_
{
-#define _(sym, str) HTTP_HEADER_##sym,
+#define _(sym, str_canonical, str_lower, hpack_index) HTTP_HEADER_##sym,
foreach_http_header_name
#undef _
} http_header_name_t;
@@ -399,6 +350,7 @@ typedef enum http_url_scheme_
{
HTTP_URL_SCHEME_HTTP,
HTTP_URL_SCHEME_HTTPS,
+ HTTP_URL_SCHEME_UNKNOWN, /* for internal use */
} http_url_scheme_t;
typedef struct http_msg_data_
@@ -432,118 +384,6 @@ typedef struct http_msg_
http_msg_data_t data;
} http_msg_t;
-typedef struct http_req_
-{
- http_req_state_t state; /* state-machine state */
-
- http_buffer_t tx_buf; /* message body from app to be sent */
-
- /*
- * for parsing of incoming message from transport
- */
- u8 *rx_buf; /* this should hold at least control data */
- u32 rx_buf_offset; /* current offset during parsing */
- u32 control_data_len; /* start line + headers + empty line */
-
- union
- {
- u64 to_recv; /* remaining bytes of body to receive from transport */
- u64 to_skip; /* remaining bytes of capsule to skip */
- };
-
- u8 is_tunnel;
-
- /*
- * parsed metadata for app
- */
- union
- {
- http_status_code_t status_code;
- http_req_method_t method;
- };
-
- http_target_form_t target_form;
- http_url_scheme_t scheme;
- u32 target_authority_offset;
- u32 target_authority_len;
- u32 target_path_offset;
- u32 target_path_len;
- u32 target_query_offset;
- u32 target_query_len;
-
- u32 headers_offset;
- u32 headers_len;
-
- u32 body_offset;
- u64 body_len;
-
- http_field_line_t *headers;
- uword content_len_header_index;
- uword connection_header_index;
- uword upgrade_header_index;
- uword host_header_index;
-
- http_upgrade_proto_t upgrade_proto;
-} http_req_t;
-
-typedef struct http_tc_
-{
- union
- {
- transport_connection_t connection;
- http_conn_id_t c_http_conn_id;
- };
-#define h_tc_session_handle c_http_conn_id.tc_session_handle
-#define h_pa_wrk_index c_http_conn_id.parent_app_wrk_index
-#define h_pa_session_handle c_http_conn_id.app_session_handle
-#define h_pa_app_api_ctx c_http_conn_id.parent_app_api_ctx
-#define h_hc_index connection.c_index
-
- http_conn_state_t state;
- u32 timer_handle;
- u32 timeout;
- u8 pending_timer;
- u8 *app_name;
- u8 *host;
- u8 is_server;
- http_udp_tunnel_mode_t udp_tunnel_mode;
-
- http_req_t req;
-} http_conn_t;
-
-typedef struct http_worker_
-{
- http_conn_t *conn_pool;
-} http_worker_t;
-
-typedef struct http_main_
-{
- http_worker_t *wrk;
- http_conn_t *listener_pool;
- http_conn_t *ho_conn_pool;
- u32 app_index;
-
- u8 **rx_bufs;
- u8 **tx_bufs;
- u8 **app_header_lists;
-
- clib_timebase_t timebase;
-
- u16 *sc_by_u16;
- /*
- * Runtime config
- */
- u8 debug_level;
- u8 is_init;
-
- /*
- * Config
- */
- u64 first_seg_size;
- u64 add_seg_size;
- u32 fifo_size;
-} http_main_t;
-
always_inline u8 *
format_http_bytes (u8 *s, va_list *va)
{
@@ -669,7 +509,8 @@ http_percent_decode (u8 *src, u32 len)
}
/**
- * Remove dot segments from path (RFC3986 section 5.2.4)
+ * Sanitize HTTP path by squashing repeating slashes and removing
+ * dot segments from path (RFC3986 section 5.2.4)
*
* @param path Path to sanitize.
*
@@ -678,18 +519,18 @@ http_percent_decode (u8 *src, u32 len)
* The caller is always responsible to free the returned vector.
*/
always_inline u8 *
-http_path_remove_dot_segments (u8 *path)
+http_path_sanitize (u8 *path)
{
u32 *segments = 0, *segments_len = 0, segment_len;
u8 *new_path = 0;
int i, ii;
- if (!path)
+ if (!path || vec_len (path) == 0)
return vec_new (u8, 0);
segments = vec_new (u32, 1);
/* first segment */
- segments[0] = 0;
+ segments[0] = (path[0] == '/' ? 1 : 0);
/* find all segments */
for (i = 1; i < (vec_len (path) - 1); i++)
{
@@ -704,7 +545,8 @@ http_path_remove_dot_segments (u8 *path)
for (i = 0; i < vec_len (segments_len); i++)
{
segment_len = segments[i + 1] - segments[i];
- if (segment_len == 2 && path[segments[i]] == '.')
+ /* aside from dots, skip empty segments (double slashes) */
+ if ((segment_len == 2 && path[segments[i]] == '.') || segment_len == 1)
segment_len = 0;
else if (segment_len == 3 && path[segments[i]] == '.' &&
path[segments[i] + 1] == '.')
@@ -736,124 +578,6 @@ http_path_remove_dot_segments (u8 *path)
return new_path;
}
-always_inline int
-_parse_field_name (u8 **pos, u8 *end, u8 **field_name_start,
- u32 *field_name_len)
-{
- u32 name_len = 0;
- u8 *p;
-
- static uword tchar[4] = {
- /* !#$%'*+-.0123456789 */
- 0x03ff6cba00000000,
- /* ABCDEFGHIJKLMNOPQRSTUVWXYZ^_`abcdefghijklmnopqrstuvwxyz|~ */
- 0x57ffffffc7fffffe,
- 0x0000000000000000,
- 0x0000000000000000,
- };
-
- p = *pos;
-
- *field_name_start = p;
- while (p != end)
- {
- if (clib_bitmap_get_no_check (tchar, *p))
- {
- name_len++;
- p++;
- }
- else if (*p == ':')
- {
- if (name_len == 0)
- {
- clib_warning ("empty field name");
- return -1;
- }
- *field_name_len = name_len;
- p++;
- *pos = p;
- return 0;
- }
- else
- {
- clib_warning ("invalid character %d", *p);
- return -1;
- }
- }
- clib_warning ("field name end not found");
- return -1;
-}
-
-always_inline int
-_parse_field_value (u8 **pos, u8 *end, u8 **field_value_start,
- u32 *field_value_len)
-{
- u32 value_len = 0;
- u8 *p;
-
- p = *pos;
-
- /* skip leading whitespace */
- while (1)
- {
- if (p == end)
- {
- clib_warning ("field value not found");
- return -1;
- }
- else if (*p != ' ' && *p != '\t')
- {
- break;
- }
- p++;
- }
-
- *field_value_start = p;
- while (p != end)
- {
- if (*p == '\r')
- {
- if ((end - p) < 1)
- {
- clib_warning ("incorrect field line end");
- return -1;
- }
- p++;
- if (*p == '\n')
- {
- if (value_len == 0)
- {
- clib_warning ("empty field value");
- return -1;
- }
- p++;
- *pos = p;
- /* skip trailing whitespace */
- p = *field_value_start + value_len - 1;
- while (*p == ' ' || *p == '\t')
- {
- p--;
- value_len--;
- }
- *field_value_len = value_len;
- return 0;
- }
- clib_warning ("CR without LF");
- return -1;
- }
- if (*p < ' ' && *p != '\t')
- {
- clib_warning ("invalid character %d", *p);
- return -1;
- }
- p++;
- value_len++;
- }
-
- clib_warning ("field value end not found");
- return -1;
-}
-
typedef struct
{
http_token_t name;
@@ -873,6 +597,16 @@ typedef struct
.values = 0, .value_by_name = 0, .buf = 0, .concatenated_values = 0, \
}
+/**
+ * Case-sensitive comparison of two tokens.
+ *
+ * @param actual Pointer to the first token.
+ * @param actual_len Length of the first token.
+ * @param expected Pointer to the second token.
+ * @param expected_len Length of the second token.
+ *
+ * @return @c 1 if tokens are same, @c 0 otherwise.
+ */
always_inline u8
http_token_is (const char *actual, uword actual_len, const char *expected,
uword expected_len)
@@ -903,6 +637,16 @@ http_tolower_word (uword x)
return (x | y);
}
+/**
+ * Case-insensitive comparison of two tokens.
+ *
+ * @param actual Pointer to the first token.
+ * @param actual_len Length of the first token.
+ * @param expected Pointer to the second token.
+ * @param expected_len Length of the second token.
+ *
+ * @return @c 1 if tokens are same, @c 0 otherwise.
+ */
always_inline u8
http_token_is_case (const char *actual, uword actual_len, const char *expected,
uword expected_len)
@@ -934,6 +678,16 @@ http_token_is_case (const char *actual, uword actual_len, const char *expected,
return 1;
}
+/**
+ * Check if there is occurrence of token in another token.
+ *
+ * @param haystack Pointer to the token being searched.
+ * @param haystack_len Length of the token being searched.
+ * @param needle The token to search for.
+ * @param needle_len Length of the token to search for.
+ *
+ * @return @c 1 if in case of success, @c 0 otherwise.
+ */
always_inline u8
http_token_contains (const char *haystack, uword haystack_len,
const char *needle, uword needle_len)
@@ -1158,6 +912,13 @@ typedef struct
/* Use high bit of header name length as custom header name bit. */
#define HTTP_CUSTOM_HEADER_NAME_BIT (1 << 31)
+/**
+ * Initialize headers list context.
+ *
+ * @param ctx Headers list context.
+ * @param buf Buffer, which store headers list, provided by app.
+ * @param len Length of headers list buffer.
+ */
always_inline void
http_init_headers_ctx (http_headers_ctx_t *ctx, u8 *buf, u32 len)
{
@@ -1166,30 +927,53 @@ http_init_headers_ctx (http_headers_ctx_t *ctx, u8 *buf, u32 len)
ctx->buf = buf;
}
-always_inline void
+/**
+ * Add header with predefined name to the headers list.
+ *
+ * @param ctx Headers list context.
+ * @param name Header name ID (see @ref http_header_name_t).
+ * @param value Header value pointer.
+ * @param value_len Header value length.
+ *
+ * @return @c 0 if in case of success, @c -1 otherwise.
+ */
+always_inline int
http_add_header (http_headers_ctx_t *ctx, http_header_name_t name,
const char *value, uword value_len)
{
http_app_header_t *header;
- ASSERT ((ctx->tail_offset + sizeof (http_app_header_t) + value_len) <
- ctx->len);
+ if ((ctx->tail_offset + sizeof (http_app_header_t) + value_len) > ctx->len)
+ return -1;
header = (http_app_header_t *) (ctx->buf + ctx->tail_offset);
header->name = (u32) name;
header->value.len = (u32) value_len;
clib_memcpy (header->value.token, (u8 *) value, value_len);
ctx->tail_offset += sizeof (http_app_header_t) + value_len;
+ return 0;
}
-always_inline void
+/**
+ * Add header with custom name to the headers list.
+ *
+ * @param ctx Headers list context.
+ * @param name Header name pointer.
+ * @param name_len Header name length.
+ * @param value Header value pointer.
+ * @param value_len Header value length.
+ *
+ * @return @c 0 if in case of success, @c -1 otherwise.
+ */
+always_inline int
http_add_custom_header (http_headers_ctx_t *ctx, const char *name,
uword name_len, const char *value, uword value_len)
{
http_custom_token_t *token;
- ASSERT ((ctx->tail_offset + 2 * sizeof (http_custom_token_t) + name_len +
- value_len) < ctx->len);
+ if ((ctx->tail_offset + 2 * sizeof (http_custom_token_t) + name_len +
+ value_len) > ctx->len)
+ return -1;
/* name */
token = (http_custom_token_t *) (ctx->buf + ctx->tail_offset);
@@ -1202,6 +986,18 @@ http_add_custom_header (http_headers_ctx_t *ctx, const char *name,
token->len = (u32) value_len;
clib_memcpy (token->token, (u8 *) value, token->len);
ctx->tail_offset += sizeof (http_custom_token_t) + value_len;
+ return 0;
+}
+
+/**
+ * Truncate the header list
+ *
+ * @param ctx Headers list context.
+ */
+always_inline void
+http_truncate_headers_list (http_headers_ctx_t *ctx)
+{
+ ctx->tail_offset = 0;
}
typedef enum http_uri_host_type_
@@ -1491,6 +1287,15 @@ http_parse_authority (u8 *authority, u32 authority_len,
return token_start == end ? 0 : -1;
}
+/**
+ * Format given authority (RFC3986 section 3.2)
+ *
+ * @param authority Authority to format.
+ *
+ * @return New vector with formated authority.
+ *
+ * The caller is always responsible to free the returned vector.
+ */
always_inline u8 *
http_serialize_authority (http_uri_authority_t *authority)
{
diff --git a/src/plugins/http/http1.c b/src/plugins/http/http1.c
new file mode 100644
index 00000000000..5ecc1f52300
--- /dev/null
+++ b/src/plugins/http/http1.c
@@ -0,0 +1,1936 @@
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright(c) 2025 Cisco Systems, Inc.
+ */
+
+#include <vnet/session/application.h>
+
+#include <http/http.h>
+#include <http/http_header_names.h>
+#include <http/http_private.h>
+#include <http/http_status_codes.h>
+#include <http/http_timer.h>
+
+typedef struct http1_main_
+{
+ http_req_t **req_pool;
+} http1_main_t;
+
+static http1_main_t http1_main;
+
+const char *http1_upgrade_proto_str[] = { "",
+#define _(sym, str) str,
+ foreach_http_upgrade_proto
+#undef _
+};
+
+/**
+ * http error boilerplate
+ */
+static const char *error_template = "HTTP/1.1 %s\r\n"
+ "Date: %U GMT\r\n"
+ "Connection: close\r\n"
+ "Content-Length: 0\r\n\r\n";
+
+/**
+ * http response boilerplate
+ */
+static const char *response_template = "HTTP/1.1 %s\r\n"
+ "Date: %U GMT\r\n"
+ "Server: %v\r\n";
+
+static const char *content_len_template = "Content-Length: %llu\r\n";
+
+static const char *connection_upgrade_template = "Connection: upgrade\r\n"
+ "Upgrade: %s\r\n";
+
+/**
+ * http request boilerplate
+ */
+static const char *get_request_template = "GET %s HTTP/1.1\r\n"
+ "Host: %v\r\n"
+ "User-Agent: %v\r\n";
+
+static const char *post_request_template = "POST %s HTTP/1.1\r\n"
+ "Host: %v\r\n"
+ "User-Agent: %v\r\n"
+ "Content-Length: %llu\r\n";
+
+always_inline http_req_t *
+http1_conn_alloc_req (http_conn_t *hc)
+{
+ http1_main_t *h1m = &http1_main;
+ http_req_t *req;
+ u32 req_index;
+ http_req_handle_t hr_handle;
+
+ pool_get_aligned_safe (h1m->req_pool[hc->c_thread_index], req,
+ CLIB_CACHE_LINE_BYTES);
+ clib_memset (req, 0, sizeof (*req));
+ req->hr_pa_session_handle = SESSION_INVALID_HANDLE;
+ req_index = req - h1m->req_pool[hc->c_thread_index];
+ hr_handle.version = HTTP_VERSION_1;
+ hr_handle.req_index = req_index;
+ req->hr_req_handle = hr_handle.as_u32;
+ req->hr_hc_index = hc->hc_hc_index;
+ req->c_thread_index = hc->c_thread_index;
+ req->c_flags |= TRANSPORT_CONNECTION_F_NO_LOOKUP;
+ hc->opaque = uword_to_pointer (req_index, void *);
+ hc->flags |= HTTP_CONN_F_HAS_REQUEST;
+ return req;
+}
+
+always_inline http_req_t *
+http1_req_get (u32 req_index, clib_thread_index_t thread_index)
+{
+ http1_main_t *h1m = &http1_main;
+
+ return pool_elt_at_index (h1m->req_pool[thread_index], req_index);
+}
+
+always_inline http_req_t *
+http1_req_get_if_valid (u32 req_index, clib_thread_index_t thread_index)
+{
+ http1_main_t *h1m = &http1_main;
+
+ if (pool_is_free_index (h1m->req_pool[thread_index], req_index))
+ return 0;
+ return pool_elt_at_index (h1m->req_pool[thread_index], req_index);
+}
+
+always_inline http_req_t *
+http1_conn_get_req (http_conn_t *hc)
+{
+ http1_main_t *h1m = &http1_main;
+ u32 req_index;
+
+ req_index = pointer_to_uword (hc->opaque);
+ return pool_elt_at_index (h1m->req_pool[hc->c_thread_index], req_index);
+}
+
+always_inline void
+http1_conn_free_req (http_conn_t *hc)
+{
+ http1_main_t *h1m = &http1_main;
+ http_req_t *req;
+ u32 req_index;
+
+ req_index = pointer_to_uword (hc->opaque);
+ req = pool_elt_at_index (h1m->req_pool[hc->c_thread_index], req_index);
+ vec_free (req->headers);
+ vec_free (req->target);
+ http_buffer_free (&req->tx_buf);
+ if (CLIB_DEBUG)
+ memset (req, 0xba, sizeof (*req));
+ pool_put (h1m->req_pool[hc->c_thread_index], req);
+ hc->flags &= ~HTTP_CONN_F_HAS_REQUEST;
+}
+
+/* Deschedule http session and wait for deq notification if underlying ts tx
+ * fifo almost full */
+static_always_inline void
+http1_check_and_deschedule (http_conn_t *hc, http_req_t *req,
+ transport_send_params_t *sp)
+{
+ if (http_io_ts_check_write_thresh (hc))
+ {
+ http_req_deschedule (req, sp);
+ http_io_ts_add_want_deq_ntf (hc);
+ }
+}
+
+static void
+http1_send_error (http_conn_t *hc, http_status_code_t ec,
+ transport_send_params_t *sp)
+{
+ u8 *data;
+
+ if (ec >= HTTP_N_STATUS)
+ ec = HTTP_STATUS_INTERNAL_ERROR;
+
+ data = format (0, error_template, http_status_code_str[ec],
+ format_http_time_now, hc);
+ HTTP_DBG (3, "%v", data);
+ http_io_ts_write (hc, data, vec_len (data), sp);
+ vec_free (data);
+ http_io_ts_after_write (hc, 0);
+}
+
+static int
+http1_read_message (http_conn_t *hc, u8 *rx_buf)
+{
+ u32 max_deq;
+
+ max_deq = http_io_ts_max_read (hc);
+ if (PREDICT_FALSE (max_deq == 0))
+ return -1;
+
+ vec_validate (rx_buf, max_deq - 1);
+ http_io_ts_read (hc, rx_buf, max_deq, 1);
+
+ return 0;
+}
+
+static int
+http1_parse_target (http_req_t *req, u8 *rx_buf)
+{
+ int i;
+ u8 *p, *end;
+
+ /* asterisk-form = "*" */
+ if ((rx_buf[req->target_path_offset] == '*') && (req->target_path_len == 1))
+ {
+ req->target_form = HTTP_TARGET_ASTERISK_FORM;
+ /* we do not support OPTIONS request */
+ return -1;
+ }
+
+ /* origin-form = 1*( "/" segment ) [ "?" query ] */
+ if (rx_buf[req->target_path_offset] == '/')
+ {
+ /* drop leading slash */
+ req->target_path_len--;
+ req->target_path_offset++;
+ req->target_form = HTTP_TARGET_ORIGIN_FORM;
+ http_identify_optional_query (req, rx_buf);
+ /* can't be CONNECT method */
+ return req->method == HTTP_REQ_CONNECT ? -1 : 0;
+ }
+
+ /* absolute-form =
+ * scheme "://" host [ ":" port ] *( "/" segment ) [ "?" query ] */
+ if (req->target_path_len > 8 &&
+ !memcmp (rx_buf + req->target_path_offset, "http", 4))
+ {
+ req->scheme = HTTP_URL_SCHEME_HTTP;
+ p = rx_buf + req->target_path_offset + 4;
+ if (*p == 's')
+ {
+ p++;
+ req->scheme = HTTP_URL_SCHEME_HTTPS;
+ }
+ if (*p++ == ':')
+ {
+ expect_char ('/');
+ expect_char ('/');
+ req->target_form = HTTP_TARGET_ABSOLUTE_FORM;
+ req->target_authority_offset = p - rx_buf;
+ req->target_authority_len = 0;
+ end = rx_buf + req->target_path_offset + req->target_path_len;
+ while (p < end)
+ {
+ if (*p == '/')
+ {
+ p++; /* drop leading slash */
+ req->target_path_offset = p - rx_buf;
+ req->target_path_len = end - p;
+ break;
+ }
+ req->target_authority_len++;
+ p++;
+ }
+ if (!req->target_path_len)
+ {
+ clib_warning ("zero length host");
+ return -1;
+ }
+ http_identify_optional_query (req, rx_buf);
+ /* can't be CONNECT method */
+ return req->method == HTTP_REQ_CONNECT ? -1 : 0;
+ }
+ }
+
+ /* authority-form = host ":" port */
+ for (i = req->target_path_offset;
+ i < (req->target_path_offset + req->target_path_len); i++)
+ {
+ if ((rx_buf[i] == ':') && (isdigit (rx_buf[i + 1])))
+ {
+ req->target_authority_len = req->target_path_len;
+ req->target_path_len = 0;
+ req->target_authority_offset = req->target_path_offset;
+ req->target_path_offset = 0;
+ req->target_form = HTTP_TARGET_AUTHORITY_FORM;
+ /* "authority-form" is only used for CONNECT requests */
+ return req->method == HTTP_REQ_CONNECT ? 0 : -1;
+ }
+ }
+
+ return -1;
+}
+
+static int
+http1_parse_request_line (http_req_t *req, u8 *rx_buf, http_status_code_t *ec)
+{
+ int i, target_len;
+ u32 next_line_offset, method_offset;
+
+ /* request-line = method SP request-target SP HTTP-version CRLF */
+ i = http_v_find_index (rx_buf, 8, 0, "\r\n");
+ if (i < 0)
+ {
+ clib_warning ("request line incomplete");
+ *ec = HTTP_STATUS_BAD_REQUEST;
+ return -1;
+ }
+ HTTP_DBG (2, "request line length: %d", i);
+ req->control_data_len = i + 2;
+ next_line_offset = req->control_data_len;
+
+ /* there should be at least one more CRLF */
+ if (vec_len (rx_buf) < (next_line_offset + 2))
+ {
+ clib_warning ("malformed message, too short");
+ *ec = HTTP_STATUS_BAD_REQUEST;
+ return -1;
+ }
+
+ /*
+ * RFC9112 2.2:
+ * In the interest of robustness, a server that is expecting to receive and
+ * parse a request-line SHOULD ignore at least one empty line (CRLF)
+ * received prior to the request-line.
+ */
+ method_offset = rx_buf[0] == '\r' && rx_buf[1] == '\n' ? 2 : 0;
+ /* parse method */
+ if (!memcmp (rx_buf + method_offset, "GET ", 4))
+ {
+ HTTP_DBG (0, "GET method");
+ req->method = HTTP_REQ_GET;
+ req->target_path_offset = method_offset + 4;
+ }
+ else if (!memcmp (rx_buf + method_offset, "POST ", 5))
+ {
+ HTTP_DBG (0, "POST method");
+ req->method = HTTP_REQ_POST;
+ req->target_path_offset = method_offset + 5;
+ }
+ else if (!memcmp (rx_buf + method_offset, "CONNECT ", 8))
+ {
+ HTTP_DBG (0, "CONNECT method");
+ req->method = HTTP_REQ_CONNECT;
+ req->upgrade_proto = HTTP_UPGRADE_PROTO_NA;
+ req->target_path_offset = method_offset + 8;
+ req->is_tunnel = 1;
+ }
+ else
+ {
+ if (rx_buf[method_offset] - 'A' <= 'Z' - 'A')
+ {
+ *ec = HTTP_STATUS_NOT_IMPLEMENTED;
+ return -1;
+ }
+ else
+ {
+ *ec = HTTP_STATUS_BAD_REQUEST;
+ return -1;
+ }
+ }
+
+ /* find version */
+ i = http_v_find_index (rx_buf, next_line_offset - 11, 11, " HTTP/");
+ if (i < 0)
+ {
+ clib_warning ("HTTP version not present");
+ *ec = HTTP_STATUS_BAD_REQUEST;
+ return -1;
+ }
+ /* verify major version */
+ if (isdigit (rx_buf[i + 6]))
+ {
+ if (rx_buf[i + 6] != '1')
+ {
+ clib_warning ("HTTP major version '%c' not supported",
+ rx_buf[i + 6]);
+ *ec = HTTP_STATUS_HTTP_VERSION_NOT_SUPPORTED;
+ return -1;
+ }
+ }
+ else
+ {
+ clib_warning ("HTTP major version '%c' is not digit", rx_buf[i + 6]);
+ *ec = HTTP_STATUS_BAD_REQUEST;
+ return -1;
+ }
+
+ /* parse request-target */
+ HTTP_DBG (2, "http at %d", i);
+ target_len = i - req->target_path_offset;
+ HTTP_DBG (2, "target_len %d", target_len);
+ if (target_len < 1)
+ {
+ clib_warning ("request-target not present");
+ *ec = HTTP_STATUS_BAD_REQUEST;
+ return -1;
+ }
+ req->target_path_len = target_len;
+ req->target_query_offset = 0;
+ req->target_query_len = 0;
+ req->target_authority_len = 0;
+ req->target_authority_offset = 0;
+ if (http1_parse_target (req, rx_buf))
+ {
+ clib_warning ("invalid target");
+ *ec = HTTP_STATUS_BAD_REQUEST;
+ return -1;
+ }
+ HTTP_DBG (2, "request-target path length: %u", req->target_path_len);
+ HTTP_DBG (2, "request-target path offset: %u", req->target_path_offset);
+ HTTP_DBG (2, "request-target query length: %u", req->target_query_len);
+ HTTP_DBG (2, "request-target query offset: %u", req->target_query_offset);
+
+ /* set buffer offset to nex line start */
+ req->rx_buf_offset = next_line_offset;
+
+ return 0;
+}
+
+static int
+http1_parse_status_line (http_req_t *req, u8 *rx_buf)
+{
+ int i;
+ u32 next_line_offset;
+ u8 *p, *end;
+ u16 status_code = 0;
+
+ i = http_v_find_index (rx_buf, 0, 0, "\r\n");
+ /* status-line = HTTP-version SP status-code SP [ reason-phrase ] CRLF */
+ if (i < 0)
+ {
+ clib_warning ("status line incomplete");
+ return -1;
+ }
+ HTTP_DBG (2, "status line length: %d", i);
+ if (i < 12)
+ {
+ clib_warning ("status line too short (%d)", i);
+ return -1;
+ }
+ req->control_data_len = i + 2;
+ next_line_offset = req->control_data_len;
+ p = rx_buf;
+ end = rx_buf + i;
+
+ /* there should be at least one more CRLF */
+ if (vec_len (rx_buf) < (next_line_offset + 2))
+ {
+ clib_warning ("malformed message, too short");
+ return -1;
+ }
+
+ /* parse version */
+ expect_char ('H');
+ expect_char ('T');
+ expect_char ('T');
+ expect_char ('P');
+ expect_char ('/');
+ expect_char ('1');
+ expect_char ('.');
+ if (!isdigit (*p++))
+ {
+ clib_warning ("invalid HTTP minor version");
+ return -1;
+ }
+
+ /* skip space(s) */
+ if (*p != ' ')
+ {
+ clib_warning ("no space after HTTP version");
+ return -1;
+ }
+ do
+ {
+ p++;
+ if (p == end)
+ {
+ clib_warning ("no status code");
+ return -1;
+ }
+ }
+ while (*p == ' ');
+
+ /* parse status code */
+ if ((end - p) < 3)
+ {
+ clib_warning ("not enough characters for status code");
+ return -1;
+ }
+ parse_int (status_code, 100);
+ parse_int (status_code, 10);
+ parse_int (status_code, 1);
+ if (status_code < 100 || status_code > 599)
+ {
+ clib_warning ("invalid status code %d", status_code);
+ return -1;
+ }
+ req->status_code = http_sc_by_u16 (status_code);
+ HTTP_DBG (0, "status code: %d", status_code);
+
+ /* set buffer offset to nex line start */
+ req->rx_buf_offset = next_line_offset;
+
+ return 0;
+}
+
+always_inline int
+http1_parse_field_name (u8 **pos, u8 *end, u8 **field_name_start,
+ u32 *field_name_len)
+{
+ u32 name_len = 0;
+ u8 *p;
+
+ static uword tchar[4] = {
+ /* !#$%'*+-.0123456789 */
+ 0x03ff6cba00000000,
+ /* ABCDEFGHIJKLMNOPQRSTUVWXYZ^_`abcdefghijklmnopqrstuvwxyz|~ */
+ 0x57ffffffc7fffffe,
+ 0x0000000000000000,
+ 0x0000000000000000,
+ };
+
+ p = *pos;
+
+ *field_name_start = p;
+ while (p != end)
+ {
+ if (clib_bitmap_get_no_check (tchar, *p))
+ {
+ name_len++;
+ p++;
+ }
+ else if (*p == ':')
+ {
+ if (name_len == 0)
+ {
+ clib_warning ("empty field name");
+ return -1;
+ }
+ *field_name_len = name_len;
+ p++;
+ *pos = p;
+ return 0;
+ }
+ else
+ {
+ clib_warning ("invalid character %d", *p);
+ return -1;
+ }
+ }
+ clib_warning ("field name end not found");
+ return -1;
+}
+
+always_inline int
+http1_parse_field_value (u8 **pos, u8 *end, u8 **field_value_start,
+ u32 *field_value_len)
+{
+ u32 value_len = 0;
+ u8 *p;
+
+ p = *pos;
+
+ /* skip leading whitespace */
+ while (1)
+ {
+ if (p == end)
+ {
+ clib_warning ("field value not found");
+ return -1;
+ }
+ else if (*p != ' ' && *p != '\t')
+ {
+ break;
+ }
+ p++;
+ }
+
+ *field_value_start = p;
+ while (p != end)
+ {
+ if (*p == '\r')
+ {
+ if ((end - p) < 1)
+ {
+ clib_warning ("incorrect field line end");
+ return -1;
+ }
+ p++;
+ if (*p == '\n')
+ {
+ if (value_len == 0)
+ {
+ clib_warning ("empty field value");
+ return -1;
+ }
+ p++;
+ *pos = p;
+ /* skip trailing whitespace */
+ p = *field_value_start + value_len - 1;
+ while (*p == ' ' || *p == '\t')
+ {
+ p--;
+ value_len--;
+ }
+ *field_value_len = value_len;
+ return 0;
+ }
+ clib_warning ("CR without LF");
+ return -1;
+ }
+ if (*p < ' ' && *p != '\t')
+ {
+ clib_warning ("invalid character %d", *p);
+ return -1;
+ }
+ p++;
+ value_len++;
+ }
+
+ clib_warning ("field value end not found");
+ return -1;
+}
+
+static int
+http1_identify_headers (http_req_t *req, u8 *rx_buf, http_status_code_t *ec)
+{
+ int rv;
+ u8 *p, *end, *name_start, *value_start;
+ u32 name_len, value_len;
+ http_field_line_t *field_line;
+ uword header_index;
+
+ vec_reset_length (req->headers);
+ req->content_len_header_index = ~0;
+ req->connection_header_index = ~0;
+ req->upgrade_header_index = ~0;
+ req->host_header_index = ~0;
+ req->headers_offset = req->rx_buf_offset;
+
+ /* check if we have any header */
+ if ((rx_buf[req->rx_buf_offset] == '\r') &&
+ (rx_buf[req->rx_buf_offset + 1] == '\n'))
+ {
+ /* just another CRLF -> no headers */
+ HTTP_DBG (2, "no headers");
+ req->headers_len = 0;
+ req->control_data_len += 2;
+ return 0;
+ }
+
+ end = vec_end (rx_buf);
+ p = rx_buf + req->rx_buf_offset;
+
+ while (1)
+ {
+ rv = http1_parse_field_name (&p, end, &name_start, &name_len);
+ if (rv != 0)
+ {
+ *ec = HTTP_STATUS_BAD_REQUEST;
+ return -1;
+ }
+ rv = http1_parse_field_value (&p, end, &value_start, &value_len);
+ if (rv != 0 || (end - p) < 2)
+ {
+ *ec = HTTP_STATUS_BAD_REQUEST;
+ return -1;
+ }
+
+ vec_add2 (req->headers, field_line, 1);
+ field_line->name_offset = (name_start - rx_buf) - req->headers_offset;
+ field_line->name_len = name_len;
+ field_line->value_offset = (value_start - rx_buf) - req->headers_offset;
+ field_line->value_len = value_len;
+ header_index = field_line - req->headers;
+
+ /* find headers that will be used later in preprocessing */
+ /* names are case-insensitive (RFC9110 section 5.1) */
+ if (req->content_len_header_index == ~0 &&
+ http_token_is_case (
+ (const char *) name_start, name_len,
+ http_header_name_token (HTTP_HEADER_CONTENT_LENGTH)))
+ req->content_len_header_index = header_index;
+ else if (req->connection_header_index == ~0 &&
+ http_token_is_case (
+ (const char *) name_start, name_len,
+ http_header_name_token (HTTP_HEADER_CONNECTION)))
+ req->connection_header_index = header_index;
+ else if (req->upgrade_header_index == ~0 &&
+ http_token_is_case (
+ (const char *) name_start, name_len,
+ http_header_name_token (HTTP_HEADER_UPGRADE)))
+ req->upgrade_header_index = header_index;
+ else if (req->host_header_index == ~0 &&
+ http_token_is_case ((const char *) name_start, name_len,
+ http_header_name_token (HTTP_HEADER_HOST)))
+ req->host_header_index = header_index;
+
+ /* are we done? */
+ if (*p == '\r' && *(p + 1) == '\n')
+ break;
+ }
+
+ req->headers_len = p - (rx_buf + req->headers_offset);
+ req->control_data_len += (req->headers_len + 2);
+ HTTP_DBG (2, "headers length: %u", req->headers_len);
+ HTTP_DBG (2, "headers offset: %u", req->headers_offset);
+
+ return 0;
+}
+
+static int
+http1_identify_message_body (http_req_t *req, u8 *rx_buf,
+ http_status_code_t *ec)
+{
+ int rv;
+
+ req->body_len = 0;
+
+ if (req->headers_len == 0)
+ {
+ HTTP_DBG (2, "no header, no message-body");
+ return 0;
+ }
+ if (req->is_tunnel)
+ {
+ HTTP_DBG (2, "tunnel, no message-body");
+ return 0;
+ }
+
+ /* TODO check for chunked transfer coding */
+
+ if (req->content_len_header_index == ~0)
+ {
+ HTTP_DBG (2, "Content-Length header not present, no message-body");
+ return 0;
+ }
+
+ rv = http_parse_content_length (req, rx_buf);
+ if (rv)
+ {
+ *ec = HTTP_STATUS_BAD_REQUEST;
+ return rv;
+ }
+
+ req->body_offset = req->headers_offset + req->headers_len + 2;
+ HTTP_DBG (2, "body length: %llu", req->body_len);
+ HTTP_DBG (2, "body offset: %u", req->body_offset);
+
+ return 0;
+}
+
+static void
+http1_check_connection_upgrade (http_req_t *req, u8 *rx_buf)
+{
+ http_field_line_t *connection, *upgrade;
+ u8 skip;
+
+ skip = (req->method != HTTP_REQ_GET) + (req->connection_header_index == ~0) +
+ (req->upgrade_header_index == ~0);
+ if (skip)
+ return;
+
+ connection = vec_elt_at_index (req->headers, req->connection_header_index);
+ /* connection options are case-insensitive (RFC9110 7.6.1) */
+ if (http_token_is_case (
+ http_field_line_value_token (connection, req, rx_buf),
+ http_token_lit ("upgrade")))
+ {
+ upgrade = vec_elt_at_index (req->headers, req->upgrade_header_index);
+
+ /* check upgrade protocol, we want to ignore something like upgrade to
+ * newer HTTP version, only tunnels are supported */
+ if (0)
+ ;
+#define _(sym, str) \
+ else if (http_token_is_case ( \
+ http_field_line_value_token (upgrade, req, rx_buf), \
+ http_token_lit (str))) req->upgrade_proto = \
+ HTTP_UPGRADE_PROTO_##sym;
+ foreach_http_upgrade_proto
+#undef _
+ else return;
+
+ req->is_tunnel = 1;
+ req->method = HTTP_REQ_CONNECT;
+ }
+}
+
+static void
+http1_target_fixup (http_conn_t *hc, http_req_t *req)
+{
+ http_field_line_t *host;
+
+ if (req->target_form == HTTP_TARGET_ABSOLUTE_FORM)
+ return;
+
+ /* scheme fixup */
+ req->scheme = http_get_transport_proto (hc) == TRANSPORT_PROTO_TLS ?
+ HTTP_URL_SCHEME_HTTPS :
+ HTTP_URL_SCHEME_HTTP;
+
+ if (req->target_form == HTTP_TARGET_AUTHORITY_FORM ||
+ req->connection_header_index == ~0)
+ return;
+
+ /* authority fixup */
+ host = vec_elt_at_index (req->headers, req->connection_header_index);
+ req->target_authority_offset = host->value_offset;
+ req->target_authority_len = host->value_len;
+}
+
+static void
+http1_write_app_headers (http_req_t *req, http_msg_t *msg, u8 **tx_buf)
+{
+ u8 *app_headers, *p, *end;
+ u32 *tmp;
+
+ /* read app header list */
+ app_headers = http_get_app_header_list (req, msg);
+
+ /* serialize app headers to tx_buf */
+ end = app_headers + msg->data.headers_len;
+ while (app_headers < end)
+ {
+ /* custom header name? */
+ tmp = (u32 *) app_headers;
+ if (PREDICT_FALSE (*tmp & HTTP_CUSTOM_HEADER_NAME_BIT))
+ {
+ http_custom_token_t *name, *value;
+ name = (http_custom_token_t *) app_headers;
+ u32 name_len = name->len & ~HTTP_CUSTOM_HEADER_NAME_BIT;
+ app_headers += sizeof (http_custom_token_t) + name_len;
+ value = (http_custom_token_t *) app_headers;
+ app_headers += sizeof (http_custom_token_t) + value->len;
+ vec_add2 (*tx_buf, p, name_len + value->len + 4);
+ clib_memcpy (p, name->token, name_len);
+ p += name_len;
+ *p++ = ':';
+ *p++ = ' ';
+ clib_memcpy (p, value->token, value->len);
+ p += value->len;
+ *p++ = '\r';
+ *p++ = '\n';
+ }
+ else
+ {
+ http_app_header_t *header;
+ header = (http_app_header_t *) app_headers;
+ app_headers += sizeof (http_app_header_t) + header->value.len;
+ http_token_t name = { http_header_name_token (header->name) };
+ vec_add2 (*tx_buf, p, name.len + header->value.len + 4);
+ clib_memcpy (p, name.base, name.len);
+ p += name.len;
+ *p++ = ':';
+ *p++ = ' ';
+ clib_memcpy (p, header->value.token, header->value.len);
+ p += header->value.len;
+ *p++ = '\r';
+ *p++ = '\n';
+ }
+ }
+}
+
+/*************************************/
+/* request state machine handlers RX */
+/*************************************/
+
+static http_sm_result_t
+http1_req_state_wait_transport_reply (http_conn_t *hc, http_req_t *req,
+ transport_send_params_t *sp)
+{
+ int rv;
+ http_msg_t msg = {};
+ u32 len, max_enq, body_sent;
+ http_status_code_t ec;
+ u8 *rx_buf;
+
+ rx_buf = http_get_rx_buf (hc);
+ rv = http1_read_message (hc, rx_buf);
+
+ /* Nothing yet, wait for data or timer expire */
+ if (rv)
+ {
+ HTTP_DBG (1, "no data to deq");
+ return HTTP_SM_STOP;
+ }
+
+ HTTP_DBG (3, "%v", rx_buf);
+
+ if (vec_len (rx_buf) < 8)
+ {
+ clib_warning ("response buffer too short");
+ goto error;
+ }
+
+ rv = http1_parse_status_line (req, rx_buf);
+ if (rv)
+ goto error;
+
+ rv = http1_identify_headers (req, rx_buf, &ec);
+ if (rv)
+ goto error;
+
+ rv = http1_identify_message_body (req, rx_buf, &ec);
+ if (rv)
+ goto error;
+
+ /* send at least "control data" which is necessary minimum,
+ * if there is some space send also portion of body */
+ max_enq = http_io_as_max_write (req);
+ max_enq -= sizeof (msg);
+ if (max_enq < req->control_data_len)
+ {
+ clib_warning ("not enough room for control data in app's rx fifo");
+ goto error;
+ }
+ len = clib_min (max_enq, vec_len (rx_buf));
+
+ msg.type = HTTP_MSG_REPLY;
+ msg.code = req->status_code;
+ msg.data.headers_offset = req->headers_offset;
+ msg.data.headers_len = req->headers_len;
+ msg.data.body_offset = req->body_offset;
+ msg.data.body_len = req->body_len;
+ msg.data.type = HTTP_MSG_DATA_INLINE;
+ msg.data.len = len;
+ msg.data.headers_ctx = pointer_to_uword (req->headers);
+
+ svm_fifo_seg_t segs[2] = { { (u8 *) &msg, sizeof (msg) }, { rx_buf, len } };
+
+ http_io_as_write_segs (req, segs, 2);
+
+ body_sent = len - req->control_data_len;
+ req->to_recv = req->body_len - body_sent;
+ if (req->to_recv == 0)
+ {
+ /* all sent, we are done */
+ http_req_state_change (req, HTTP_REQ_STATE_WAIT_APP_METHOD);
+ }
+ else
+ {
+ /* stream rest of the response body */
+ http_req_state_change (req, HTTP_REQ_STATE_TRANSPORT_IO_MORE_DATA);
+ }
+
+ http_io_ts_drain (hc, len);
+ http_io_ts_after_read (hc, 1);
+ http_app_worker_rx_notify (req);
+ return HTTP_SM_STOP;
+
+error:
+ http_io_ts_drain_all (hc);
+ http_io_ts_after_read (hc, 1);
+ session_transport_closing_notify (&req->connection);
+ session_transport_closed_notify (&req->connection);
+ http_disconnect_transport (hc);
+ return HTTP_SM_ERROR;
+}
+
+static http_sm_result_t
+http1_req_state_wait_transport_method (http_conn_t *hc, http_req_t *req,
+ transport_send_params_t *sp)
+{
+ http_status_code_t ec;
+ http_msg_t msg;
+ int rv;
+ u32 len, max_enq, body_sent;
+ u64 max_deq;
+ u8 *rx_buf;
+
+ rx_buf = http_get_rx_buf (hc);
+ rv = http1_read_message (hc, rx_buf);
+
+ /* Nothing yet, wait for data or timer expire */
+ if (rv)
+ return HTTP_SM_STOP;
+
+ HTTP_DBG (3, "%v", rx_buf);
+
+ if (vec_len (rx_buf) < 8)
+ {
+ ec = HTTP_STATUS_BAD_REQUEST;
+ goto error;
+ }
+
+ rv = http1_parse_request_line (req, rx_buf, &ec);
+ if (rv)
+ goto error;
+
+ rv = http1_identify_headers (req, rx_buf, &ec);
+ if (rv)
+ goto error;
+
+ http1_target_fixup (hc, req);
+ http1_check_connection_upgrade (req, rx_buf);
+
+ rv = http1_identify_message_body (req, rx_buf, &ec);
+ if (rv)
+ goto error;
+
+ /* send at least "control data" which is necessary minimum,
+ * if there is some space send also portion of body */
+ max_enq = http_io_as_max_write (req);
+ max_enq -= sizeof (msg);
+ if (max_enq < req->control_data_len)
+ {
+ clib_warning ("not enough room for control data in app's rx fifo");
+ ec = HTTP_STATUS_INTERNAL_ERROR;
+ goto error;
+ }
+ /* do not dequeue more than one HTTP request, we do not support pipelining */
+ max_deq = clib_min (req->control_data_len + req->body_len, vec_len (rx_buf));
+ len = clib_min (max_enq, max_deq);
+
+ msg.type = HTTP_MSG_REQUEST;
+ msg.method_type = req->method;
+ msg.data.type = HTTP_MSG_DATA_INLINE;
+ msg.data.len = len;
+ msg.data.scheme = req->scheme;
+ msg.data.target_authority_offset = req->target_authority_offset;
+ msg.data.target_authority_len = req->target_authority_len;
+ msg.data.target_path_offset = req->target_path_offset;
+ msg.data.target_path_len = req->target_path_len;
+ msg.data.target_query_offset = req->target_query_offset;
+ msg.data.target_query_len = req->target_query_len;
+ msg.data.headers_offset = req->headers_offset;
+ msg.data.headers_len = req->headers_len;
+ msg.data.body_offset = req->body_offset;
+ msg.data.body_len = req->body_len;
+ msg.data.headers_ctx = pointer_to_uword (req->headers);
+ msg.data.upgrade_proto = req->upgrade_proto;
+
+ svm_fifo_seg_t segs[2] = { { (u8 *) &msg, sizeof (msg) }, { rx_buf, len } };
+
+ http_io_as_write_segs (req, segs, 2);
+
+ body_sent = len - req->control_data_len;
+ req->to_recv = req->body_len - body_sent;
+ if (req->to_recv == 0)
+ {
+ /* drop everything, we do not support pipelining */
+ http_io_ts_drain_all (hc);
+ /* all sent, we are done */
+ http_req_state_change (req, HTTP_REQ_STATE_WAIT_APP_REPLY);
+ }
+ else
+ {
+ http_io_ts_drain (hc, len);
+ /* stream rest of the response body */
+ http_req_state_change (req, HTTP_REQ_STATE_TRANSPORT_IO_MORE_DATA);
+ }
+
+ http_app_worker_rx_notify (req);
+ http_io_ts_after_read (hc, 1);
+
+ return HTTP_SM_STOP;
+
+error:
+ http_io_ts_drain_all (hc);
+ http_io_ts_after_read (hc, 1);
+ http1_send_error (hc, ec, 0);
+ session_transport_closing_notify (&req->connection);
+ http_disconnect_transport (hc);
+
+ return HTTP_SM_ERROR;
+}
+
+static http_sm_result_t
+http1_req_state_transport_io_more_data (http_conn_t *hc, http_req_t *req,
+ transport_send_params_t *sp)
+{
+ u32 max_len, max_deq, max_enq, n_segs = 2;
+ svm_fifo_seg_t segs[n_segs];
+ int n_written;
+
+ max_deq = http_io_ts_max_read (hc);
+ if (max_deq == 0)
+ {
+ HTTP_DBG (1, "no data to deq");
+ return HTTP_SM_STOP;
+ }
+
+ max_enq = http_io_as_max_write (req);
+ if (max_enq == 0)
+ {
+ HTTP_DBG (1, "app's rx fifo full");
+ http_io_as_add_want_deq_ntf (req);
+ return HTTP_SM_STOP;
+ }
+
+ max_len = clib_min (max_enq, max_deq);
+ http_io_ts_read_segs (hc, segs, &n_segs, max_len);
+
+ n_written = http_io_as_write_segs (req, segs, n_segs);
+
+ if (n_written > req->to_recv)
+ {
+ clib_warning ("http protocol error: received more data than expected");
+ session_transport_closing_notify (&req->connection);
+ http_disconnect_transport (hc);
+ http_req_state_change (req, HTTP_REQ_STATE_WAIT_APP_METHOD);
+ return HTTP_SM_ERROR;
+ }
+ req->to_recv -= n_written;
+ http_io_ts_drain (hc, n_written);
+ HTTP_DBG (1, "drained %d from ts; remains %lu", n_written, req->to_recv);
+
+ /* Finished transaction:
+ * server back to HTTP_REQ_STATE_WAIT_APP_REPLY
+ * client to HTTP_REQ_STATE_WAIT_APP_METHOD */
+ if (req->to_recv == 0)
+ http_req_state_change (req, (hc->flags & HTTP_CONN_F_IS_SERVER) ?
+ HTTP_REQ_STATE_WAIT_APP_REPLY :
+ HTTP_REQ_STATE_WAIT_APP_METHOD);
+
+ http_app_worker_rx_notify (req);
+
+ http_io_ts_after_read (hc, 0);
+
+ return HTTP_SM_STOP;
+}
+
+static http_sm_result_t
+http1_req_state_tunnel_rx (http_conn_t *hc, http_req_t *req,
+ transport_send_params_t *sp)
+{
+ u32 max_deq, max_enq, max_read, n_segs = 2;
+ svm_fifo_seg_t segs[n_segs];
+ int n_written = 0;
+
+ HTTP_DBG (1, "tunnel received data from client");
+
+ max_deq = http_io_ts_max_read (hc);
+ if (PREDICT_FALSE (max_deq == 0))
+ {
+ HTTP_DBG (1, "max_deq == 0");
+ return HTTP_SM_STOP;
+ }
+ max_enq = http_io_as_max_write (req);
+ if (max_enq == 0)
+ {
+ HTTP_DBG (1, "app's rx fifo full");
+ http_io_as_add_want_deq_ntf (req);
+ return HTTP_SM_STOP;
+ }
+ max_read = clib_min (max_enq, max_deq);
+ http_io_ts_read_segs (hc, segs, &n_segs, max_read);
+ n_written = http_io_as_write_segs (req, segs, n_segs);
+ http_io_ts_drain (hc, n_written);
+ HTTP_DBG (1, "transfered %u bytes", n_written);
+ http_app_worker_rx_notify (req);
+ http_io_ts_after_read (hc, 0);
+
+ return HTTP_SM_STOP;
+}
+
+static http_sm_result_t
+http1_req_state_udp_tunnel_rx (http_conn_t *hc, http_req_t *req,
+ transport_send_params_t *sp)
+{
+ u32 to_deq, capsule_size, dgram_size, n_read, n_written = 0;
+ int rv;
+ u8 payload_offset = 0;
+ u64 payload_len = 0;
+ session_dgram_hdr_t hdr;
+ u8 *buf = 0;
+
+ HTTP_DBG (1, "udp tunnel received data from client");
+
+ buf = http_get_rx_buf (hc);
+ to_deq = http_io_ts_max_read (hc);
+
+ while (to_deq > 0)
+ {
+ /* some bytes remaining to skip? */
+ if (PREDICT_FALSE (req->to_skip))
+ {
+ if (req->to_skip >= to_deq)
+ {
+ http_io_ts_drain (hc, to_deq);
+ req->to_skip -= to_deq;
+ goto done;
+ }
+ else
+ {
+ http_io_ts_drain (hc, req->to_skip);
+ req->to_skip = 0;
+ }
+ }
+ n_read = http_io_ts_read (hc, buf, HTTP_CAPSULE_HEADER_MAX_SIZE, 1);
+ rv = http_decap_udp_payload_datagram (buf, n_read, &payload_offset,
+ &payload_len);
+ HTTP_DBG (1, "rv=%d, payload_offset=%u, payload_len=%llu", rv,
+ payload_offset, payload_len);
+ if (PREDICT_FALSE (rv != 0))
+ {
+ if (rv < 0)
+ {
+ /* capsule datagram is invalid (session need to be aborted) */
+ http_io_ts_drain_all (hc);
+ session_transport_closing_notify (&req->connection);
+ session_transport_closed_notify (&req->connection);
+ http_disconnect_transport (hc);
+ return HTTP_SM_STOP;
+ }
+ else
+ {
+ /* unknown capsule should be skipped */
+ if (payload_len <= to_deq)
+ {
+ http_io_ts_drain (hc, payload_len);
+ to_deq -= payload_len;
+ continue;
+ }
+ else
+ {
+ http_io_ts_drain (hc, to_deq);
+ req->to_skip = payload_len - to_deq;
+ goto done;
+ }
+ }
+ }
+ capsule_size = payload_offset + payload_len;
+ /* check if we have the full capsule */
+ if (PREDICT_FALSE (to_deq < capsule_size))
+ {
+ HTTP_DBG (1, "capsule not complete");
+ goto done;
+ }
+
+ dgram_size = sizeof (hdr) + payload_len;
+ if (http_io_as_max_write (req) < dgram_size)
+ {
+ HTTP_DBG (1, "app's rx fifo full");
+ http_io_as_add_want_deq_ntf (req);
+ goto done;
+ }
+
+ http_io_ts_drain (hc, payload_offset);
+
+ /* read capsule payload */
+ http_io_ts_read (hc, buf, payload_len, 0);
+
+ hdr.data_length = payload_len;
+ hdr.data_offset = 0;
+
+ /* send datagram header and payload */
+ svm_fifo_seg_t segs[2] = { { (u8 *) &hdr, sizeof (hdr) },
+ { buf, payload_len } };
+ http_io_as_write_segs (req, segs, 2);
+
+ n_written += dgram_size;
+ to_deq -= capsule_size;
+ }
+
+done:
+ HTTP_DBG (1, "written %lu bytes", n_written);
+
+ if (n_written)
+ http_app_worker_rx_notify (req);
+
+ http_io_ts_after_read (hc, 0);
+
+ return HTTP_SM_STOP;
+}
+
+/*************************************/
+/* request state machine handlers TX */
+/*************************************/
+
+static http_sm_result_t
+http1_req_state_wait_app_reply (http_conn_t *hc, http_req_t *req,
+ transport_send_params_t *sp)
+{
+ u8 *response;
+ u32 max_enq;
+ http_status_code_t sc;
+ http_msg_t msg;
+ http_sm_result_t sm_result = HTTP_SM_ERROR;
+ http_req_state_t next_state = HTTP_REQ_STATE_WAIT_TRANSPORT_METHOD;
+
+ http_get_app_msg (req, &msg);
+
+ if (msg.data.type > HTTP_MSG_DATA_PTR)
+ {
+ clib_warning ("no data");
+ sc = HTTP_STATUS_INTERNAL_ERROR;
+ goto error;
+ }
+
+ if (msg.type != HTTP_MSG_REPLY)
+ {
+ clib_warning ("unexpected message type %d", msg.type);
+ sc = HTTP_STATUS_INTERNAL_ERROR;
+ goto error;
+ }
+
+ if (msg.code >= HTTP_N_STATUS)
+ {
+ clib_warning ("unsupported status code: %d", msg.code);
+ return HTTP_SM_ERROR;
+ }
+
+ response = http_get_tx_buf (hc);
+ /*
+ * Add "protocol layer" headers:
+ * - current time
+ * - server name
+ * - data length
+ */
+ response =
+ format (response, response_template, http_status_code_str[msg.code],
+ /* Date */
+ format_http_time_now, hc,
+ /* Server */
+ hc->app_name);
+
+ /* RFC9110 8.6: A server MUST NOT send Content-Length header field in a
+ * 2xx (Successful) response to CONNECT or with a status code of 101
+ * (Switching Protocols). */
+ if (req->is_tunnel && (http_status_code_str[msg.code][0] == '2' ||
+ msg.code == HTTP_STATUS_SWITCHING_PROTOCOLS))
+ {
+ ASSERT (msg.data.body_len == 0);
+ next_state = HTTP_REQ_STATE_TUNNEL;
+ if (req->upgrade_proto > HTTP_UPGRADE_PROTO_NA)
+ {
+ response = format (response, connection_upgrade_template,
+ http1_upgrade_proto_str[req->upgrade_proto]);
+ if (req->upgrade_proto == HTTP_UPGRADE_PROTO_CONNECT_UDP &&
+ hc->udp_tunnel_mode == HTTP_UDP_TUNNEL_DGRAM)
+ next_state = HTTP_REQ_STATE_UDP_TUNNEL;
+ }
+ /* cleanup some stuff we don't need anymore in tunnel mode */
+ vec_free (req->headers);
+ http_buffer_free (&req->tx_buf);
+ req->to_skip = 0;
+ }
+ else
+ response = format (response, content_len_template, msg.data.body_len);
+
+ /* Add headers from app (if any) */
+ if (msg.data.headers_len)
+ {
+ HTTP_DBG (0, "got headers from app, len %d", msg.data.headers_len);
+ http1_write_app_headers (req, &msg, &response);
+ }
+ /* Add empty line after headers */
+ response = format (response, "\r\n");
+ HTTP_DBG (3, "%v", response);
+
+ max_enq = http_io_ts_max_write (hc, sp);
+ if (max_enq < vec_len (response))
+ {
+ clib_warning ("sending status-line and headers failed!");
+ sc = HTTP_STATUS_INTERNAL_ERROR;
+ goto error;
+ }
+ http_io_ts_write (hc, response, vec_len (response), sp);
+
+ if (msg.data.body_len)
+ {
+ /* Start sending the actual data */
+ http_req_tx_buffer_init (req, &msg);
+ next_state = HTTP_REQ_STATE_APP_IO_MORE_DATA;
+ sm_result = HTTP_SM_CONTINUE;
+ }
+ else
+ {
+ /* No response body, we are done */
+ sm_result = HTTP_SM_STOP;
+ }
+
+ http_req_state_change (req, next_state);
+
+ http_io_ts_after_write (hc, 0);
+ return sm_result;
+
+error:
+ http1_send_error (hc, sc, sp);
+ session_transport_closing_notify (&req->connection);
+ http_disconnect_transport (hc);
+ return HTTP_SM_STOP;
+}
+
+static http_sm_result_t
+http1_req_state_wait_app_method (http_conn_t *hc, http_req_t *req,
+ transport_send_params_t *sp)
+{
+ http_msg_t msg;
+ u8 *request = 0, *target;
+ u32 max_enq;
+ http_sm_result_t sm_result = HTTP_SM_ERROR;
+ http_req_state_t next_state;
+
+ http_get_app_msg (req, &msg);
+
+ if (msg.data.type > HTTP_MSG_DATA_PTR)
+ {
+ clib_warning ("no data");
+ goto error;
+ }
+
+ if (msg.type != HTTP_MSG_REQUEST)
+ {
+ clib_warning ("unexpected message type %d", msg.type);
+ goto error;
+ }
+
+ /* read request target */
+ target = http_get_app_target (req, &msg);
+
+ request = http_get_tx_buf (hc);
+ /* currently we support only GET and POST method */
+ if (msg.method_type == HTTP_REQ_GET)
+ {
+ if (msg.data.body_len)
+ {
+ clib_warning ("GET request shouldn't include data");
+ goto error;
+ }
+ /*
+ * Add "protocol layer" headers:
+ * - host
+ * - user agent
+ */
+ request = format (request, get_request_template,
+ /* target */
+ target,
+ /* Host */
+ hc->host,
+ /* User-Agent */
+ hc->app_name);
+
+ next_state = HTTP_REQ_STATE_WAIT_TRANSPORT_REPLY;
+ sm_result = HTTP_SM_STOP;
+ }
+ else if (msg.method_type == HTTP_REQ_POST)
+ {
+ if (!msg.data.body_len)
+ {
+ clib_warning ("POST request should include data");
+ goto error;
+ }
+ /*
+ * Add "protocol layer" headers:
+ * - host
+ * - user agent
+ * - content length
+ */
+ request = format (request, post_request_template,
+ /* target */
+ target,
+ /* Host */
+ hc->host,
+ /* User-Agent */
+ hc->app_name,
+ /* Content-Length */
+ msg.data.body_len);
+
+ http_req_tx_buffer_init (req, &msg);
+
+ next_state = HTTP_REQ_STATE_APP_IO_MORE_DATA;
+ sm_result = HTTP_SM_CONTINUE;
+ }
+ else
+ {
+ clib_warning ("unsupported method %d", msg.method_type);
+ goto error;
+ }
+
+ /* Add headers from app (if any) */
+ if (msg.data.headers_len)
+ {
+ HTTP_DBG (0, "got headers from app, len %d", msg.data.headers_len);
+ http1_write_app_headers (req, &msg, &request);
+ }
+ /* Add empty line after headers */
+ request = format (request, "\r\n");
+ HTTP_DBG (3, "%v", request);
+
+ max_enq = http_io_ts_max_write (hc, sp);
+ if (max_enq < vec_len (request))
+ {
+ clib_warning ("sending request-line and headers failed!");
+ sm_result = HTTP_SM_ERROR;
+ goto error;
+ }
+ http_io_ts_write (hc, request, vec_len (request), sp);
+
+ http_req_state_change (req, next_state);
+
+ http_io_ts_after_write (hc, 0);
+ goto done;
+
+error:
+ http_io_as_drain_all (req);
+ session_transport_closing_notify (&req->connection);
+ session_transport_closed_notify (&req->connection);
+ http_disconnect_transport (hc);
+
+done:
+ return sm_result;
+}
+
+static http_sm_result_t
+http1_req_state_app_io_more_data (http_conn_t *hc, http_req_t *req,
+ transport_send_params_t *sp)
+{
+ u32 max_write, n_read, n_segs, n_written = 0;
+ http_buffer_t *hb = &req->tx_buf;
+ svm_fifo_seg_t *seg;
+ u8 finished = 0;
+
+ ASSERT (http_buffer_bytes_left (hb) > 0);
+ max_write = http_io_ts_max_write (hc, sp);
+ if (max_write == 0)
+ {
+ HTTP_DBG (1, "ts tx fifo full");
+ goto check_fifo;
+ }
+
+ n_read = http_buffer_get_segs (hb, max_write, &seg, &n_segs);
+ if (n_read == 0)
+ {
+ HTTP_DBG (1, "no data to deq");
+ goto check_fifo;
+ }
+
+ n_written = http_io_ts_write_segs (hc, seg, n_segs, sp);
+
+ http_buffer_drain (hb, n_written);
+ finished = http_buffer_bytes_left (hb) == 0;
+
+ if (finished)
+ {
+ /* Finished transaction:
+ * server back to HTTP_REQ_STATE_WAIT_TRANSPORT_METHOD
+ * client to HTTP_REQ_STATE_WAIT_TRANSPORT_REPLY */
+ http_req_state_change (req, (hc->flags & HTTP_CONN_F_IS_SERVER) ?
+ HTTP_REQ_STATE_WAIT_TRANSPORT_METHOD :
+ HTTP_REQ_STATE_WAIT_TRANSPORT_REPLY);
+ http_buffer_free (hb);
+ }
+ http_io_ts_after_write (hc, finished);
+
+check_fifo:
+ http1_check_and_deschedule (hc, req, sp);
+ return HTTP_SM_STOP;
+}
+
+static http_sm_result_t
+http1_req_state_tunnel_tx (http_conn_t *hc, http_req_t *req,
+ transport_send_params_t *sp)
+{
+ u32 max_deq, max_enq, max_read, n_segs = 2;
+ svm_fifo_seg_t segs[n_segs];
+ int n_written = 0;
+
+ HTTP_DBG (1, "tunnel received data from target");
+
+ max_deq = http_io_as_max_read (req);
+ if (PREDICT_FALSE (max_deq == 0))
+ {
+ HTTP_DBG (1, "max_deq == 0");
+ goto check_fifo;
+ }
+ max_enq = http_io_ts_max_write (hc, sp);
+ if (max_enq == 0)
+ {
+ HTTP_DBG (1, "ts tx fifo full");
+ goto check_fifo;
+ }
+ max_read = clib_min (max_enq, max_deq);
+ http_io_as_read_segs (req, segs, &n_segs, max_read);
+ n_written = http_io_ts_write_segs (hc, segs, n_segs, sp);
+ http_io_as_drain (req, n_written);
+ http_io_ts_after_write (hc, 0);
+
+check_fifo:
+ http1_check_and_deschedule (hc, req, sp);
+ return HTTP_SM_STOP;
+}
+
+static http_sm_result_t
+http1_req_state_udp_tunnel_tx (http_conn_t *hc, http_req_t *req,
+ transport_send_params_t *sp)
+{
+ u32 to_deq, capsule_size, dgram_size;
+ u8 written = 0;
+ session_dgram_hdr_t hdr;
+ u8 *buf;
+ u8 *payload;
+
+ HTTP_DBG (1, "udp tunnel received data from target");
+
+ buf = http_get_tx_buf (hc);
+ to_deq = http_io_as_max_read (req);
+
+ while (to_deq > 0)
+ {
+ /* read datagram header */
+ http_io_as_read (req, (u8 *) &hdr, sizeof (hdr), 1);
+ ASSERT (hdr.data_length <= HTTP_UDP_PAYLOAD_MAX_LEN);
+ dgram_size = hdr.data_length + SESSION_CONN_HDR_LEN;
+ ASSERT (to_deq >= dgram_size);
+
+ if (http_io_ts_max_write (hc, sp) <
+ (hdr.data_length + HTTP_UDP_PROXY_DATAGRAM_CAPSULE_OVERHEAD))
+ {
+ HTTP_DBG (1, "ts tx fifo full");
+ goto done;
+ }
+
+ /* create capsule header */
+ payload = http_encap_udp_payload_datagram (buf, hdr.data_length);
+ capsule_size = (payload - buf) + hdr.data_length;
+ /* read payload */
+ http_io_as_read (req, payload, hdr.data_length, 1);
+ http_io_as_drain (req, dgram_size);
+ /* send capsule */
+ http_io_ts_write (hc, buf, capsule_size, sp);
+
+ written = 1;
+ to_deq -= dgram_size;
+ }
+
+done:
+ if (written)
+ http_io_ts_after_write (hc, 0);
+ http1_check_and_deschedule (hc, req, sp);
+ return HTTP_SM_STOP;
+}
+
+/*************************/
+/* request state machine */
+/*************************/
+
+static http_sm_handler tx_state_funcs[HTTP_REQ_N_STATES] = {
+ 0, /* idle */
+ http1_req_state_wait_app_method,
+ 0, /* wait transport reply */
+ 0, /* transport io more data */
+ 0, /* wait transport method */
+ http1_req_state_wait_app_reply,
+ http1_req_state_app_io_more_data,
+ http1_req_state_tunnel_tx,
+ http1_req_state_udp_tunnel_tx,
+};
+
+static http_sm_handler rx_state_funcs[HTTP_REQ_N_STATES] = {
+ 0, /* idle */
+ 0, /* wait app method */
+ http1_req_state_wait_transport_reply,
+ http1_req_state_transport_io_more_data,
+ http1_req_state_wait_transport_method,
+ 0, /* wait app reply */
+ 0, /* app io more data */
+ http1_req_state_tunnel_rx,
+ http1_req_state_udp_tunnel_rx,
+};
+
+static_always_inline int
+http1_req_state_is_tx_valid (http_req_t *req)
+{
+ return tx_state_funcs[req->state] ? 1 : 0;
+}
+
+static_always_inline int
+http1_req_state_is_rx_valid (http_req_t *req)
+{
+ return rx_state_funcs[req->state] ? 1 : 0;
+}
+
+static_always_inline void
+http1_req_run_state_machine (http_conn_t *hc, http_req_t *req,
+ transport_send_params_t *sp, u8 is_tx)
+{
+ http_sm_result_t res;
+
+ do
+ {
+ if (is_tx)
+ res = tx_state_funcs[req->state](hc, req, sp);
+ else
+ res = rx_state_funcs[req->state](hc, req, 0);
+ if (res == HTTP_SM_ERROR)
+ {
+ HTTP_DBG (1, "error in state machine %d", res);
+ return;
+ }
+ }
+ while (res == HTTP_SM_CONTINUE);
+
+ /* Reset the session expiration timer */
+ http_conn_timer_update (hc);
+}
+
+/*****************/
+/* http core VFT */
+/*****************/
+
+static u32
+http1_hc_index_get_by_req_index (u32 req_index,
+ clib_thread_index_t thread_index)
+{
+ http_req_t *req;
+
+ req = http1_req_get (req_index, thread_index);
+ return req->hr_hc_index;
+}
+
+static transport_connection_t *
+http1_req_get_connection (u32 req_index, clib_thread_index_t thread_index)
+{
+ http_req_t *req;
+ req = http1_req_get (req_index, thread_index);
+ return &req->connection;
+}
+
+static u8 *
+format_http1_req (u8 *s, va_list *args)
+{
+ http_req_t *req = va_arg (*args, http_req_t *);
+ http_conn_t *hc = va_arg (*args, http_conn_t *);
+ session_t *ts;
+
+ ts = session_get_from_handle (hc->hc_tc_session_handle);
+ s = format (s, "[%d:%d][H1] app_wrk %u hc_index %u ts %d:%d",
+ req->c_thread_index, req->c_s_index, req->hr_pa_wrk_index,
+ req->hr_hc_index, ts->thread_index, ts->session_index);
+
+ return s;
+}
+
+static u8 *
+http1_format_req (u8 *s, va_list *args)
+{
+ u32 req_index = va_arg (*args, u32);
+ clib_thread_index_t thread_index = va_arg (*args, u32);
+ http_conn_t *hc = va_arg (*args, http_conn_t *);
+ u32 verbose = va_arg (*args, u32);
+ http_req_t *req;
+
+ req = http1_req_get (req_index, thread_index);
+
+ s = format (s, "%-" SESSION_CLI_ID_LEN "U", format_http1_req, req, hc);
+ if (verbose)
+ {
+ s =
+ format (s, "%-" SESSION_CLI_STATE_LEN "U", format_http_conn_state, hc);
+ if (verbose > 1)
+ s = format (s, "\n");
+ }
+
+ return s;
+}
+
+static void
+http1_app_tx_callback (http_conn_t *hc, u32 req_index,
+ transport_send_params_t *sp)
+{
+ http_req_t *req;
+
+ req = http1_req_get (req_index, hc->c_thread_index);
+
+ if (!http1_req_state_is_tx_valid (req))
+ {
+ /* Sometimes the server apps can send the response earlier
+ * than expected (e.g when rejecting a bad request)*/
+ if (req->state == HTTP_REQ_STATE_TRANSPORT_IO_MORE_DATA &&
+ (hc->flags & HTTP_CONN_F_IS_SERVER))
+ {
+ http_io_ts_drain_all (hc);
+ http_req_state_change (req, HTTP_REQ_STATE_WAIT_APP_REPLY);
+ }
+ else
+ {
+ clib_warning ("hc [%u]%x invalid tx state: http req state "
+ "'%U', session state '%U'",
+ hc->c_thread_index, hc->hc_hc_index,
+ format_http_req_state, req->state,
+ format_http_conn_state, hc);
+ http_io_as_drain_all (req);
+ return;
+ }
+ }
+
+ HTTP_DBG (1, "run state machine");
+ http1_req_run_state_machine (hc, req, sp, 1);
+}
+
+static void
+http1_app_rx_evt_callback (http_conn_t *hc, u32 req_index,
+ clib_thread_index_t thread_index)
+{
+ http_req_t *req;
+
+ req = http1_req_get (req_index, thread_index);
+
+ if (req->state == HTTP_REQ_STATE_TUNNEL)
+ http1_req_state_tunnel_rx (hc, req, 0);
+}
+
+static void
+http1_app_close_callback (http_conn_t *hc, u32 req_index,
+ clib_thread_index_t thread_index)
+{
+ http_req_t *req;
+
+ req = http1_req_get_if_valid (req_index, thread_index);
+ if (!req)
+ {
+ HTTP_DBG (1, "req already deleted");
+ return;
+ }
+ /* Nothing more to send, confirm close */
+ if (!http_io_as_max_read (req) || hc->state == HTTP_CONN_STATE_CLOSED)
+ {
+ HTTP_DBG (1, "nothing more to send, confirm close");
+ session_transport_closed_notify (&req->connection);
+ http_disconnect_transport (hc);
+ }
+ else
+ {
+ /* Wait for all data to be written to ts */
+ hc->state = HTTP_CONN_STATE_APP_CLOSED;
+ }
+}
+
+static void
+http1_app_reset_callback (http_conn_t *hc, u32 req_index,
+ clib_thread_index_t thread_index)
+{
+ http_req_t *req;
+ req = http1_req_get (req_index, thread_index);
+ session_transport_closed_notify (&req->connection);
+ http_disconnect_transport (hc);
+}
+
+static int
+http1_transport_connected_callback (http_conn_t *hc)
+{
+ http_req_t *req;
+
+ ASSERT (hc->flags & HTTP_CONN_F_NO_APP_SESSION);
+
+ req = http1_conn_alloc_req (hc);
+ http_req_state_change (req, HTTP_REQ_STATE_WAIT_APP_METHOD);
+ return http_conn_established (hc, req);
+}
+
+static void
+http1_transport_rx_callback (http_conn_t *hc)
+{
+ http_req_t *req;
+
+ if (!(hc->flags & HTTP_CONN_F_HAS_REQUEST))
+ {
+ ASSERT (hc->flags & HTTP_CONN_F_IS_SERVER);
+ /* first request - create request ctx and notify app about new conn */
+ req = http1_conn_alloc_req (hc);
+ http_conn_accept_request (hc, req);
+ http_req_state_change (req, HTTP_REQ_STATE_WAIT_TRANSPORT_METHOD);
+ hc->flags &= ~HTTP_CONN_F_NO_APP_SESSION;
+ }
+ else
+ req = http1_conn_get_req (hc);
+
+ if (!http1_req_state_is_rx_valid (req))
+ {
+ if (http_io_ts_max_read (hc))
+ clib_warning ("hc [%u]%x invalid rx state: http req state "
+ "'%U', session state '%U'",
+ hc->c_thread_index, hc->hc_hc_index,
+ format_http_req_state, req->state,
+ format_http_conn_state, hc);
+ http_io_ts_drain_all (hc);
+ return;
+ }
+
+ HTTP_DBG (1, "run state machine");
+ http1_req_run_state_machine (hc, req, 0, 0);
+}
+
+static void
+http1_transport_close_callback (http_conn_t *hc)
+{
+ if (!(hc->flags & HTTP_CONN_F_HAS_REQUEST))
+ return;
+ /* Nothing more to rx, propagate to app */
+ if (!http_io_ts_max_read (hc))
+ {
+ http_req_t *req = http1_conn_get_req (hc);
+ session_transport_closing_notify (&req->connection);
+ }
+}
+
+static void
+http1_transport_reset_callback (http_conn_t *hc)
+{
+ if (!(hc->flags & HTTP_CONN_F_HAS_REQUEST))
+ return;
+ http_req_t *req = http1_conn_get_req (hc);
+ session_transport_reset_notify (&req->connection);
+}
+
+static void
+http1_transport_conn_reschedule_callback (http_conn_t *hc)
+{
+ ASSERT (hc->flags & HTTP_CONN_F_HAS_REQUEST);
+ http_req_t *req = http1_conn_get_req (hc);
+ transport_connection_reschedule (&req->connection);
+}
+
+static void
+http1_conn_cleanup_callback (http_conn_t *hc)
+{
+ http_req_t *req;
+ if (!(hc->flags & HTTP_CONN_F_HAS_REQUEST))
+ return;
+
+ req = http1_conn_get_req (hc);
+ session_transport_delete_notify (&req->connection);
+ http1_conn_free_req (hc);
+}
+
+static void
+http1_enable_callback (void)
+{
+ http1_main_t *h1m = &http1_main;
+ vlib_thread_main_t *vtm = vlib_get_thread_main ();
+ u32 num_threads;
+
+ num_threads = 1 /* main thread */ + vtm->n_threads;
+
+ vec_validate (h1m->req_pool, num_threads - 1);
+}
+
+const static http_engine_vft_t http1_engine = {
+ .name = "http1",
+ .hc_index_get_by_req_index = http1_hc_index_get_by_req_index,
+ .req_get_connection = http1_req_get_connection,
+ .format_req = http1_format_req,
+ .app_tx_callback = http1_app_tx_callback,
+ .app_rx_evt_callback = http1_app_rx_evt_callback,
+ .app_close_callback = http1_app_close_callback,
+ .app_reset_callback = http1_app_reset_callback,
+ .transport_connected_callback = http1_transport_connected_callback,
+ .transport_rx_callback = http1_transport_rx_callback,
+ .transport_close_callback = http1_transport_close_callback,
+ .transport_conn_reschedule_callback =
+ http1_transport_conn_reschedule_callback,
+ .transport_reset_callback = http1_transport_reset_callback,
+ .conn_cleanup_callback = http1_conn_cleanup_callback,
+ .enable_callback = http1_enable_callback,
+};
+
+static clib_error_t *
+http1_init (vlib_main_t *vm)
+{
+ http_register_engine (&http1_engine, HTTP_VERSION_1);
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (http1_init) = {
+ .runs_after = VLIB_INITS ("http_transport_init"),
+};
diff --git a/src/plugins/http/http2/frame.c b/src/plugins/http/http2/frame.c
new file mode 100644
index 00000000000..580ffff22c5
--- /dev/null
+++ b/src/plugins/http/http2/frame.c
@@ -0,0 +1,339 @@
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright(c) 2025 Cisco Systems, Inc.
+ */
+
+#include <vppinfra/string.h>
+#include <http/http2/frame.h>
+
+#define MAX_U24 0xFFFFFF
+
+static_always_inline u8 *
+http2_decode_u24 (u8 *src, u32 *value)
+{
+ *value = 0;
+ *value = (u32) (src[0] << 16) | (u32) (src[1] << 8) | (u32) src[2];
+ return src + 3;
+}
+
+static_always_inline u8 *
+http2_encode_u24 (u8 *dst, u32 value)
+{
+ ASSERT (value <= MAX_U24);
+ *dst++ = (value >> 16) & 0xFF;
+ *dst++ = (value >> 8) & 0xFF;
+ *dst++ = value & 0xFF;
+ return dst;
+}
+
+/*
+ * RFC9113 section 4.1
+ *
+ * HTTP Frame {
+ * Length (24),
+ * Type (8),
+ * Flags (8),
+ * Reserved (1),
+ * Stream Identifier (31),
+ * Frame Payload (..),
+ * }
+ */
+
+__clib_export void
+http2_frame_header_read (u8 *src, http2_frame_header_t *fh)
+{
+ u32 *stream_id;
+ src = http2_decode_u24 (src, &fh->length);
+ fh->type = *src++;
+ fh->flags = *src++;
+ stream_id = (u32 *) src;
+ fh->stream_id = clib_net_to_host_u32 (*stream_id) & 0x7FFFFFFF;
+}
+
+static void
+http2_frame_header_write (http2_frame_header_t *fh, u8 *dst)
+{
+ u32 stream_id;
+
+ dst = http2_encode_u24 (dst, fh->length);
+ *dst++ = fh->type;
+ *dst++ = fh->flags;
+ stream_id = clib_host_to_net_u32 (fh->stream_id);
+ clib_memcpy_fast (dst, &stream_id, sizeof (stream_id));
+}
+
+__clib_export http2_error_t
+http2_frame_read_settings (http2_conn_settings_t *settings, u8 *payload,
+ u32 payload_len)
+{
+ http2_settings_entry_t *entry;
+ u32 value;
+
+ while (payload_len >= sizeof (*entry))
+ {
+ entry = (http2_settings_entry_t *) payload;
+ switch (clib_net_to_host_u16 (entry->identifier))
+ {
+#define _(v, label, member, min, max, default_value, err_code) \
+ case HTTP2_SETTINGS_##label: \
+ value = clib_net_to_host_u32 (entry->value); \
+ if (!(value >= min && value <= max)) \
+ return err_code; \
+ settings->member = value; \
+ break;
+ foreach_http2_settings
+#undef _
+ /* ignore unknown or unsupported identifier */
+ default : break;
+ }
+ payload_len -= sizeof (*entry);
+ payload += sizeof (*entry);
+ }
+
+ if (payload_len != 0)
+ return HTTP2_ERROR_FRAME_SIZE_ERROR;
+
+ return HTTP2_ERROR_NO_ERROR;
+}
+
+__clib_export void
+http2_frame_write_settings_ack (u8 **dst)
+{
+ http2_frame_header_t fh = { .flags = HTTP2_FRAME_FLAG_ACK,
+ .type = HTTP2_FRAME_TYPE_SETTINGS };
+ u8 *p = http2_frame_header_alloc (dst);
+ http2_frame_header_write (&fh, p);
+}
+
+__clib_export void
+http2_frame_write_settings (http2_settings_entry_t *settings, u8 **dst)
+{
+ u8 *p;
+ u32 length;
+ http2_settings_entry_t *entry, e;
+
+ ASSERT (settings);
+ ASSERT (vec_len (settings) > 0);
+
+ length = vec_len (settings) * sizeof (*entry);
+ http2_frame_header_t fh = { .type = HTTP2_FRAME_TYPE_SETTINGS,
+ .length = length };
+ p = http2_frame_header_alloc (dst);
+ http2_frame_header_write (&fh, p);
+
+ vec_add2 (*dst, p, length);
+ vec_foreach (entry, settings)
+ {
+ e.identifier = clib_host_to_net_u16 (entry->identifier);
+ e.value = clib_host_to_net_u32 (entry->value);
+ clib_memcpy_fast (p, &e, sizeof (e));
+ p += sizeof (e);
+ }
+}
+
+#define WINDOW_UPDATE_LENGTH 4
+
+__clib_export http2_error_t
+http2_frame_read_window_update (u32 *increment, u8 *payload, u32 payload_len)
+{
+ u32 *value;
+
+ if (payload_len != WINDOW_UPDATE_LENGTH)
+ return HTTP2_ERROR_FRAME_SIZE_ERROR;
+
+ value = (u32 *) payload;
+
+ if (*value == 0)
+ return HTTP2_ERROR_PROTOCOL_ERROR;
+
+ *increment = clib_net_to_host_u32 (*value) & 0x7FFFFFFF;
+ return HTTP2_ERROR_NO_ERROR;
+}
+
+__clib_export void
+http2_frame_write_window_update (u32 increment, u32 stream_id, u8 **dst)
+{
+ u8 *p;
+ u32 value;
+
+ ASSERT (increment > 0 && increment <= 0x7FFFFFFF);
+
+ http2_frame_header_t fh = { .type = HTTP2_FRAME_TYPE_WINDOW_UPDATE,
+ .length = WINDOW_UPDATE_LENGTH,
+ .stream_id = stream_id };
+ p = http2_frame_header_alloc (dst);
+ http2_frame_header_write (&fh, p);
+
+ vec_add2 (*dst, p, WINDOW_UPDATE_LENGTH);
+ value = clib_host_to_net_u32 (increment);
+ clib_memcpy_fast (p, &value, WINDOW_UPDATE_LENGTH);
+}
+
+#define RST_STREAM_LENGTH 4
+
+__clib_export http2_error_t
+http2_frame_read_rst_stream (u32 *error_code, u8 *payload, u32 payload_len)
+{
+ u32 *value;
+
+ if (payload_len != RST_STREAM_LENGTH)
+ return HTTP2_ERROR_FRAME_SIZE_ERROR;
+
+ value = (u32 *) payload;
+
+ *error_code = clib_net_to_host_u32 (*value);
+ return HTTP2_ERROR_NO_ERROR;
+}
+
+__clib_export void
+http2_frame_write_rst_stream (http2_error_t error_code, u32 stream_id,
+ u8 **dst)
+{
+ u8 *p;
+ u32 value;
+
+ ASSERT (stream_id > 0 && stream_id <= 0x7FFFFFFF);
+
+ http2_frame_header_t fh = { .type = HTTP2_FRAME_TYPE_RST_STREAM,
+ .length = RST_STREAM_LENGTH,
+ .stream_id = stream_id };
+ p = http2_frame_header_alloc (dst);
+ http2_frame_header_write (&fh, p);
+
+ vec_add2 (*dst, p, RST_STREAM_LENGTH);
+ value = clib_host_to_net_u32 ((u32) error_code);
+ clib_memcpy_fast (p, &value, RST_STREAM_LENGTH);
+}
+
+#define GOAWAY_MIN_SIZE 8
+
+__clib_export http2_error_t
+http2_frame_read_goaway (u32 *error_code, u32 *last_stream_id, u8 *payload,
+ u32 payload_len)
+{
+ u32 *value;
+
+ if (payload_len < GOAWAY_MIN_SIZE)
+ return HTTP2_ERROR_FRAME_SIZE_ERROR;
+
+ value = (u32 *) payload;
+ *last_stream_id = clib_net_to_host_u32 (*value) & 0x7FFFFFFF;
+ payload += 4;
+
+ value = (u32 *) payload;
+ *error_code = clib_net_to_host_u32 (*value);
+
+ /* TODO: Additional Debug Data */
+ return HTTP2_ERROR_NO_ERROR;
+}
+
+__clib_export void
+http2_frame_write_goaway (http2_error_t error_code, u32 last_stream_id,
+ u8 **dst)
+{
+ u8 *p;
+ u32 value;
+
+ ASSERT (last_stream_id <= 0x7FFFFFFF);
+
+ http2_frame_header_t fh = { .type = HTTP2_FRAME_TYPE_GOAWAY,
+ .length = GOAWAY_MIN_SIZE };
+ p = http2_frame_header_alloc (dst);
+ http2_frame_header_write (&fh, p);
+
+ vec_add2 (*dst, p, GOAWAY_MIN_SIZE);
+ value = clib_host_to_net_u32 (last_stream_id);
+ clib_memcpy_fast (p, &value, 4);
+ p += 4;
+ value = clib_host_to_net_u32 ((u32) error_code);
+ clib_memcpy_fast (p, &value, 4);
+ /* TODO: Additional Debug Data */
+}
+
+void
+http2_frame_write_ping (u8 is_resp, u8 *payload, u8 **dst)
+{
+ u8 *p;
+ http2_frame_header_t fh = {
+ .type = HTTP2_FRAME_TYPE_PING,
+ .length = HTTP2_PING_PAYLOAD_LEN,
+ .flags = is_resp ? HTTP2_FRAME_FLAG_ACK : 0,
+ };
+
+ p = http2_frame_header_alloc (dst);
+ http2_frame_header_write (&fh, p);
+ vec_add2 (*dst, p, HTTP2_PING_PAYLOAD_LEN);
+ clib_memcpy_fast (p, payload, HTTP2_PING_PAYLOAD_LEN);
+}
+
+#define PRIORITY_DATA_LEN 5
+
+__clib_export http2_error_t
+http2_frame_read_headers (u8 **headers, u32 *headers_len, u8 *payload,
+ u32 payload_len, u8 flags)
+{
+ *headers_len = payload_len;
+
+ if (flags & HTTP2_FRAME_FLAG_PADED)
+ {
+ u8 pad_len = *payload++;
+ if ((u32) pad_len >= payload_len)
+ return HTTP2_ERROR_PROTOCOL_ERROR;
+ *headers_len -= (pad_len + 1);
+ }
+
+ if (flags & HTTP2_FRAME_FLAG_PRIORITY)
+ {
+ if (*headers_len <= PRIORITY_DATA_LEN)
+ return HTTP2_ERROR_FRAME_SIZE_ERROR;
+ /* just skip, priority scheme defined in RFC7540 is deprecated */
+ *headers_len -= PRIORITY_DATA_LEN;
+ payload += PRIORITY_DATA_LEN;
+ }
+
+ *headers = payload;
+
+ return HTTP2_ERROR_NO_ERROR;
+}
+
+__clib_export void
+http2_frame_write_headers_header (u32 headers_len, u32 stream_id, u8 flags,
+ u8 *dst)
+{
+ ASSERT (stream_id > 0 && stream_id <= 0x7FFFFFFF);
+
+ http2_frame_header_t fh = { .type = HTTP2_FRAME_TYPE_HEADERS,
+ .length = headers_len,
+ .flags = flags,
+ .stream_id = stream_id };
+ http2_frame_header_write (&fh, dst);
+}
+
+__clib_export http2_error_t
+http2_frame_read_data (u8 **data, u32 *data_len, u8 *payload, u32 payload_len,
+ u8 flags)
+{
+ *data_len = payload_len;
+
+ if (flags & HTTP2_FRAME_FLAG_PADED)
+ {
+ u8 pad_len = *payload++;
+ if ((u32) pad_len >= payload_len)
+ return HTTP2_ERROR_PROTOCOL_ERROR;
+ *data_len -= (pad_len + 1);
+ }
+
+ *data = payload;
+ return HTTP2_ERROR_NO_ERROR;
+}
+
+__clib_export void
+http2_frame_write_data_header (u32 data_len, u32 stream_id, u8 flags, u8 *dst)
+{
+ ASSERT (stream_id > 0 && stream_id <= 0x7FFFFFFF);
+
+ http2_frame_header_t fh = { .type = HTTP2_FRAME_TYPE_DATA,
+ .length = data_len,
+ .flags = flags,
+ .stream_id = stream_id };
+ http2_frame_header_write (&fh, dst);
+}
diff --git a/src/plugins/http/http2/frame.h b/src/plugins/http/http2/frame.h
new file mode 100644
index 00000000000..53a37c1aa0a
--- /dev/null
+++ b/src/plugins/http/http2/frame.h
@@ -0,0 +1,246 @@
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright(c) 2025 Cisco Systems, Inc.
+ */
+
+#ifndef SRC_PLUGINS_HTTP_HTTP2_FRAME_H_
+#define SRC_PLUGINS_HTTP_HTTP2_FRAME_H_
+
+#include <vppinfra/error.h>
+#include <vppinfra/types.h>
+#include <http/http2/http2.h>
+
+#define HTTP2_FRAME_HEADER_SIZE 9
+#define HTTP2_PING_PAYLOAD_LEN 8
+
+#define foreach_http2_frame_type \
+ _ (0x00, DATA, "DATA") \
+ _ (0x01, HEADERS, "HEADERS") \
+ _ (0x02, PRIORITY, "PRIORITY") \
+ _ (0x03, RST_STREAM, "RST_STREAM") \
+ _ (0x04, SETTINGS, "SETTINGS") \
+ _ (0x05, PUSH_PROMISE, "PUSH_PROMISE") \
+ _ (0x06, PING, "PING") \
+ _ (0x07, GOAWAY, "GOAWAY") \
+ _ (0x08, WINDOW_UPDATE, "WINDOW_UPDATE") \
+ _ (0x09, CONTINUATION, "CONTINUATION")
+
+typedef enum
+{
+#define _(v, n, s) HTTP2_FRAME_TYPE_##n = v,
+ foreach_http2_frame_type
+#undef _
+} __clib_packed http2_frame_type_t;
+
+STATIC_ASSERT_SIZEOF (http2_frame_type_t, 1);
+
+#define foreach_http2_frame_flag \
+ _ (0, NONE) \
+ _ (1, END_STREAM) \
+ _ (1, ACK) \
+ _ (1 << 2, END_HEADERS) \
+ _ (1 << 3, PADED) \
+ _ (1 << 5, PRIORITY)
+
+typedef enum
+{
+#define _(v, n) HTTP2_FRAME_FLAG_##n = v,
+ foreach_http2_frame_flag
+#undef _
+} __clib_packed http2_frame_flag_t;
+
+STATIC_ASSERT_SIZEOF (http2_frame_flag_t, 1);
+
+typedef struct
+{
+ u32 length;
+ http2_frame_type_t type;
+ u8 flags;
+ u32 stream_id;
+} http2_frame_header_t;
+
+typedef struct
+{
+ u16 identifier;
+ u32 value;
+} __clib_packed http2_settings_entry_t;
+
+/**
+ * Parse frame header
+ *
+ * @param src Pointer to the beginning of the frame
+ * @param fh Parsed frame header
+ */
+void http2_frame_header_read (u8 *src, http2_frame_header_t *fh);
+
+/**
+ * Add 9 bytes (frame header size) to the end of given vector
+ *
+ * @param dst Pointer to vector
+ *
+ * @return Pointer to the frame header beginning
+ */
+static_always_inline u8 *
+http2_frame_header_alloc (u8 **dst)
+{
+ u8 *p;
+
+ vec_add2 (*dst, p, HTTP2_FRAME_HEADER_SIZE);
+ return p;
+}
+
+/**
+ * Parse SETTINGS frame payload
+ *
+ * @param settings Vector of HTTP/2 settings
+ * @param payload Payload to parse
+ * @param payload_len Payload length
+ *
+ * @return @c HTTP2_ERROR_NO_ERROR on success, error otherwise
+ */
+http2_error_t http2_frame_read_settings (http2_conn_settings_t *settings,
+ u8 *payload, u32 payload_len);
+
+/**
+ * Write SETTINGS ACK frame to the end of given vector
+ *
+ * @param dst Vector where SETTINGS ACK frame will be written
+ */
+void http2_frame_write_settings_ack (u8 **dst);
+
+/**
+ * Write SETTINGS frame to the end of given vector
+ *
+ * @param settings Vector of HTTP/2 settings
+ * @param dst Vector where SETTINGS frame will be written
+ */
+void http2_frame_write_settings (http2_settings_entry_t *settings, u8 **dst);
+
+/**
+ * Parse WINDOW_UPDATE frame payload
+ *
+ * @param increment Parsed window increment value
+ * @param payload Payload to parse
+ * @param payload_len Payload length
+ *
+ * @return @c HTTP2_ERROR_NO_ERROR on success, error otherwise
+ */
+http2_error_t http2_frame_read_window_update (u32 *increment, u8 *payload,
+ u32 payload_len);
+
+/**
+ * Write WINDOW_UPDATE frame to the end of given vector
+ *
+ * @param increment Window increment value
+ * @param stream_id Stream ID
+ * @param dst Vector where WINDOW_UPDATE frame will be written
+ */
+void http2_frame_write_window_update (u32 increment, u32 stream_id, u8 **dst);
+
+/**
+ * Parse RST_STREAM frame payload
+ *
+ * @param error_code Parsed error code
+ * @param payload Payload to parse
+ * @param payload_len Payload length
+ *
+ * @return @c HTTP2_ERROR_NO_ERROR on success, error otherwise
+ */
+http2_error_t http2_frame_read_rst_stream (u32 *error_code, u8 *payload,
+ u32 payload_len);
+
+/**
+ * Write RST_STREAM frame to the end of given vector
+ *
+ * @param error_code Error code
+ * @param stream_id Stream ID, except 0
+ * @param dst Vector where RST_STREAM frame will be written
+ */
+void http2_frame_write_rst_stream (http2_error_t error_code, u32 stream_id,
+ u8 **dst);
+
+/**
+ * Parse GOAWAY frame payload
+ *
+ * @param last_stream_id Parsed last stream ID
+ * @param error_code Parsed error code
+ * @param payload Payload to parse
+ * @param payload_len Payload length
+ *
+ * @return @c HTTP2_ERROR_NO_ERROR on success, error otherwise
+ */
+http2_error_t http2_frame_read_goaway (u32 *last_stream_id, u32 *error_code,
+ u8 *payload, u32 payload_len);
+
+/**
+ * Write GOAWAY frame to the end of given vector
+ *
+ * @param error_code Error code
+ * @param last_stream_id Last stream ID
+ * @param dst Vector where GOAWAY frame will be written
+ */
+void http2_frame_write_goaway (http2_error_t error_code, u32 last_stream_id,
+ u8 **dst);
+
+/**
+ * Write PING frame to the end of given vector
+ *
+ * @param is_resp Indicate that this is PING response
+ * @param payload Payload to parse
+ * @param dst Vector where GOAWAY frame will be written
+ */
+void http2_frame_write_ping (u8 is_resp, u8 *payload, u8 **dst);
+
+/**
+ * Parse HEADERS frame payload
+ *
+ * @param headers Pointer to header block fragment
+ * @param headers_len Header block fragment length
+ * @param payload Payload to parse
+ * @param payload_len Payload length
+ * @param flags Flag field of frame header
+ *
+ * @return @c HTTP2_ERROR_NO_ERROR on success, error otherwise
+ */
+http2_error_t http2_frame_read_headers (u8 **headers, u32 *headers_len,
+ u8 *payload, u32 payload_len,
+ u8 flags);
+
+/**
+ * Write HEADERS frame header
+ *
+ * @param headers_len Header block fragment length
+ * @param stream_id Stream ID, except 0
+ * @param flags Frame header flags
+ * @param dst Pointer where frame header will be written
+ *
+ * @note Use @c http2_frame_header_alloc before
+ */
+void http2_frame_write_headers_header (u32 headers_len, u32 stream_id,
+ u8 flags, u8 *dst);
+
+/**
+ * Parse DATA frame payload
+ *
+ * @param headers Pointer to data
+ * @param headers_len Data length
+ * @param payload Payload to parse
+ * @param payload_len Payload length
+ * @param flags Flag field of frame header
+ *
+ * @return @c HTTP2_ERROR_NO_ERROR on success, error otherwise
+ */
+http2_error_t http2_frame_read_data (u8 **data, u32 *data_len, u8 *payload,
+ u32 payload_len, u8 flags);
+
+/**
+ * Write DATA frame header
+ *
+ * @param data_len Data length
+ * @param stream_id Stream ID, except 0
+ * @param flags Frame header flags
+ * @param dst Pointer where frame header will be written
+ */
+void http2_frame_write_data_header (u32 data_len, u32 stream_id, u8 flags,
+ u8 *dst);
+
+#endif /* SRC_PLUGINS_HTTP_HTTP2_FRAME_H_ */
diff --git a/src/plugins/http/http2/hpack.c b/src/plugins/http/http2/hpack.c
new file mode 100644
index 00000000000..76021ae14a6
--- /dev/null
+++ b/src/plugins/http/http2/hpack.c
@@ -0,0 +1,1173 @@
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright(c) 2025 Cisco Systems, Inc.
+ */
+
+#include <vppinfra/error.h>
+#include <vppinfra/ring.h>
+#include <http/http2/hpack.h>
+#include <http/http2/huffman_table.h>
+#include <http/http_status_codes.h>
+
+#define HPACK_STATIC_TABLE_SIZE 61
+
+typedef struct
+{
+ char *name;
+ uword name_len;
+ char *value;
+ uword value_len;
+} hpack_static_table_entry_t;
+
+#define name_val_token_lit(name, value) \
+ (name), sizeof (name) - 1, (value), sizeof (value) - 1
+
+static hpack_static_table_entry_t
+ hpack_static_table[HPACK_STATIC_TABLE_SIZE] = {
+ { name_val_token_lit (":authority", "") },
+ { name_val_token_lit (":method", "GET") },
+ { name_val_token_lit (":method", "POST") },
+ { name_val_token_lit (":path", "/") },
+ { name_val_token_lit (":path", "/index.html") },
+ { name_val_token_lit (":scheme", "http") },
+ { name_val_token_lit (":scheme", "https") },
+ { name_val_token_lit (":status", "200") },
+ { name_val_token_lit (":status", "204") },
+ { name_val_token_lit (":status", "206") },
+ { name_val_token_lit (":status", "304") },
+ { name_val_token_lit (":status", "400") },
+ { name_val_token_lit (":status", "404") },
+ { name_val_token_lit (":status", "500") },
+ { name_val_token_lit ("accept-charset", "") },
+ { name_val_token_lit ("accept-encoding", "gzip, deflate") },
+ { name_val_token_lit ("accept-language", "") },
+ { name_val_token_lit ("accept-ranges", "") },
+ { name_val_token_lit ("accept", "") },
+ { name_val_token_lit ("access-control-allow-origin", "") },
+ { name_val_token_lit ("age", "") },
+ { name_val_token_lit ("allow", "") },
+ { name_val_token_lit ("authorization", "") },
+ { name_val_token_lit ("cache-control", "") },
+ { name_val_token_lit ("content-disposition", "") },
+ { name_val_token_lit ("content-encoding", "") },
+ { name_val_token_lit ("content-language", "") },
+ { name_val_token_lit ("content-length", "") },
+ { name_val_token_lit ("content-location", "") },
+ { name_val_token_lit ("content-range", "") },
+ { name_val_token_lit ("content-type", "") },
+ { name_val_token_lit ("cookie", "") },
+ { name_val_token_lit ("date", "") },
+ { name_val_token_lit ("etag", "") },
+ { name_val_token_lit ("etag", "") },
+ { name_val_token_lit ("expires", "") },
+ { name_val_token_lit ("from", "") },
+ { name_val_token_lit ("host", "") },
+ { name_val_token_lit ("if-match", "") },
+ { name_val_token_lit ("if-modified-since", "") },
+ { name_val_token_lit ("if-none-match", "") },
+ { name_val_token_lit ("if-range", "") },
+ { name_val_token_lit ("if-unmodified-since", "") },
+ { name_val_token_lit ("last-modified", "") },
+ { name_val_token_lit ("link", "") },
+ { name_val_token_lit ("location", "") },
+ { name_val_token_lit ("max-forwards", "") },
+ { name_val_token_lit ("proxy-authenticate", "") },
+ { name_val_token_lit ("proxy-authorization", "") },
+ { name_val_token_lit ("range", "") },
+ { name_val_token_lit ("referer", "") },
+ { name_val_token_lit ("refresh", "") },
+ { name_val_token_lit ("retry-after", "") },
+ { name_val_token_lit ("server", "") },
+ { name_val_token_lit ("set-cookie", "") },
+ { name_val_token_lit ("strict-transport-security", "") },
+ { name_val_token_lit ("transfer-encoding", "") },
+ { name_val_token_lit ("user-agent", "") },
+ { name_val_token_lit ("vary", "") },
+ { name_val_token_lit ("via", "") },
+ { name_val_token_lit ("www-authenticate", "") },
+ };
+
+typedef struct
+{
+ char *base;
+ uword len;
+ u8 static_table_index;
+} hpack_token_t;
+
+static hpack_token_t hpack_headers[] = {
+#define _(sym, str_canonical, str_lower, hpack_index) \
+ { http_token_lit (str_lower), hpack_index },
+ foreach_http_header_name
+#undef _
+};
+
+__clib_export uword
+hpack_decode_int (u8 **src, u8 *end, u8 prefix_len)
+{
+ uword value, new_value;
+ u8 *p, shift = 0, byte;
+ u16 prefix_max;
+
+ ASSERT (*src < end);
+ ASSERT (prefix_len >= 1 && prefix_len <= 8);
+
+ p = *src;
+ prefix_max = (1 << prefix_len) - 1;
+ value = *p & (u8) prefix_max;
+ p++;
+ /* if integer value is less than 2^prefix_len-1 it's encoded within prefix */
+ if (value != prefix_max)
+ {
+ *src = p;
+ return value;
+ }
+
+ while (p != end)
+ {
+ byte = *p;
+ p++;
+ new_value = value + ((uword) (byte & 0x7F) << shift);
+ shift += 7;
+ /* check for overflow */
+ if (new_value < value)
+ return HPACK_INVALID_INT;
+ value = new_value;
+ /* MSB of the last byte is zero */
+ if ((byte & 0x80) == 0)
+ {
+ *src = p;
+ return value;
+ }
+ }
+
+ return HPACK_INVALID_INT;
+}
+
+http2_error_t
+hpack_decode_huffman (u8 **src, u8 *end, u8 **buf, uword *buf_len)
+{
+ u64 accumulator = 0;
+ u8 accumulator_len = 0;
+ u8 *p;
+ hpack_huffman_code_t *code;
+
+ p = *src;
+ while (1)
+ {
+ /* out of space? */
+ if (*buf_len == 0)
+ return HTTP2_ERROR_INTERNAL_ERROR;
+ /* refill */
+ while (p < end && accumulator_len <= 56)
+ {
+ accumulator <<= 8;
+ accumulator_len += 8;
+ accumulator |= (u64) *p++;
+ }
+ /* first try short codes (5 - 8 bits) */
+ code =
+ &huff_code_table_fast[(u8) (accumulator >> (accumulator_len - 8))];
+ /* zero code length mean no luck */
+ if (PREDICT_TRUE (code->code_len))
+ {
+ **buf = code->symbol;
+ (*buf)++;
+ (*buf_len)--;
+ accumulator_len -= code->code_len;
+ }
+ else
+ {
+ /* slow path / long codes (10 - 30 bits) */
+ u32 tmp;
+ /* group boundaries are aligned to 32 bits */
+ if (accumulator_len < 32)
+ tmp = accumulator << (32 - accumulator_len);
+ else
+ tmp = accumulator >> (accumulator_len - 32);
+ /* figure out which interval code falls into, this is possible
+ * because HPACK use canonical Huffman codes
+ * see Schwartz, E. and B. Kallick, “Generating a canonical prefix
+ * encoding”
+ */
+ hpack_huffman_group_t *hg = hpack_huffman_get_group (tmp);
+ /* trim code to correct length */
+ u32 code = (accumulator >> (accumulator_len - hg->code_len)) &
+ ((1 << hg->code_len) - 1);
+ if (!code)
+ return HTTP2_ERROR_COMPRESSION_ERROR;
+ /* find symbol in the list */
+ **buf = hg->symbols[code - hg->first_code];
+ (*buf)++;
+ (*buf_len)--;
+ accumulator_len -= hg->code_len;
+ }
+ /* all done */
+ if (p == end && accumulator_len < 8)
+ {
+ /* there might be one more symbol encoded with short code */
+ if (accumulator_len >= 5)
+ {
+ /* first check EOF case */
+ if (((1 << accumulator_len) - 1) ==
+ (accumulator & ((1 << accumulator_len) - 1)))
+ break;
+
+ /* out of space? */
+ if (*buf_len == 0)
+ return HTTP2_ERROR_INTERNAL_ERROR;
+
+ /* if bogus EOF check bellow will fail */
+ code = &huff_code_table_fast[(u8) (accumulator
+ << (8 - accumulator_len))];
+ **buf = code->symbol;
+ (*buf)++;
+ (*buf_len)--;
+ accumulator_len -= code->code_len;
+ /* end at byte boundary? */
+ if (accumulator_len == 0)
+ break;
+ }
+ /* we must end with EOF here */
+ if (((1 << accumulator_len) - 1) !=
+ (accumulator & ((1 << accumulator_len) - 1)))
+ return HTTP2_ERROR_COMPRESSION_ERROR;
+ break;
+ }
+ }
+ return HTTP2_ERROR_NO_ERROR;
+}
+
+__clib_export http2_error_t
+hpack_decode_string (u8 **src, u8 *end, u8 **buf, uword *buf_len)
+{
+ u8 *p, is_huffman;
+ uword len;
+
+ if (*src == end)
+ return HTTP2_ERROR_COMPRESSION_ERROR;
+
+ p = *src;
+ /* H flag in first bit */
+ is_huffman = *p & 0x80;
+
+ /* length is integer with 7 bit prefix */
+ len = hpack_decode_int (&p, end, 7);
+ if (PREDICT_FALSE (len == HPACK_INVALID_INT))
+ return HTTP2_ERROR_COMPRESSION_ERROR;
+
+ /* do we have everything? */
+ if (len > (end - p))
+ return HTTP2_ERROR_COMPRESSION_ERROR;
+
+ if (is_huffman)
+ {
+ *src = (p + len);
+ return hpack_decode_huffman (&p, p + len, buf, buf_len);
+ }
+ else
+ {
+ /* enough space? */
+ if (len > *buf_len)
+ return HTTP2_ERROR_INTERNAL_ERROR;
+
+ clib_memcpy (*buf, p, len);
+ *buf_len -= len;
+ *buf += len;
+ *src = (p + len);
+ return HTTP2_ERROR_NO_ERROR;
+ }
+}
+
+__clib_export u8 *
+hpack_encode_int (u8 *dst, uword value, u8 prefix_len)
+{
+ u16 prefix_max;
+
+ ASSERT (prefix_len >= 1 && prefix_len <= 8);
+
+ prefix_max = (1 << prefix_len) - 1;
+
+ /* if integer value is less than 2^prefix_len-1 it's encoded within prefix */
+ if (value < prefix_max)
+ {
+ *dst++ |= (u8) value;
+ return dst;
+ }
+
+ /* otherwise all bits of the prefix are set to 1 */
+ *dst++ |= (u8) prefix_max;
+ /* and the value is decreased by 2^prefix_len-1 */
+ value -= prefix_max;
+ /* MSB of each byte is used as continuation flag */
+ for (; value >= 0x80; value >>= 7)
+ *dst++ = 0x80 | (value & 0x7F);
+ /* except for the last byte */
+ *dst++ = (u8) value;
+
+ return dst;
+}
+
+uword
+hpack_huffman_encoded_len (const u8 *value, uword value_len)
+{
+ uword len = 0;
+ u8 *end;
+ hpack_huffman_symbol_t *sym;
+
+ end = (u8 *) value + value_len;
+ while (value != end)
+ {
+ sym = &huff_sym_table[*value++];
+ len += sym->code_len;
+ }
+ /* round up to byte boundary */
+ return (len + 7) / 8;
+}
+
+u8 *
+hpack_encode_huffman (u8 *dst, const u8 *value, uword value_len)
+{
+ u8 *end;
+ hpack_huffman_symbol_t *sym;
+ u8 accumulator_len = 40; /* leftover (1 byte) + max code_len (4 bytes) */
+ u64 accumulator = 0; /* to fit leftover and current code */
+
+ end = (u8 *) value + value_len;
+
+ while (value != end)
+ {
+ sym = &huff_sym_table[*value++];
+ /* add current code to leftover of previous one */
+ accumulator |= (u64) sym->code << (accumulator_len - sym->code_len);
+ accumulator_len -= sym->code_len;
+ /* write only fully occupied bytes (max 4) */
+ switch (accumulator_len)
+ {
+ case 1 ... 8:
+#define WRITE_BYTE() \
+ *dst = (u8) (accumulator >> 32); \
+ accumulator_len += 8; \
+ accumulator <<= 8; \
+ dst++;
+ WRITE_BYTE ();
+ case 9 ... 16:
+ WRITE_BYTE ();
+ case 17 ... 24:
+ WRITE_BYTE ();
+ case 25 ... 32:
+ WRITE_BYTE ();
+ default:
+ break;
+ }
+ }
+
+ /* padding (0-7 bits)*/
+ ASSERT (accumulator_len > 32 && accumulator_len <= 40);
+ if (accumulator_len != 40)
+ {
+ accumulator |= (u64) 0x7F << (accumulator_len - 7);
+ *dst = (u8) (accumulator >> 32);
+ dst++;
+ }
+ return dst;
+}
+
+__clib_export u8 *
+hpack_encode_string (u8 *dst, const u8 *value, uword value_len)
+{
+ uword huff_len;
+
+ huff_len = hpack_huffman_encoded_len (value, value_len);
+ /* raw bytes might take fewer bytes */
+ if (huff_len >= value_len)
+ {
+ *dst = 0; /* clear H flag */
+ dst = hpack_encode_int (dst, value_len, 7);
+ clib_memcpy (dst, value, value_len);
+ return dst + value_len;
+ }
+
+ *dst = 0x80; /* set H flag */
+ dst = hpack_encode_int (dst, huff_len, 7);
+ dst = hpack_encode_huffman (dst, value, value_len);
+
+ return dst;
+}
+
+__clib_export void
+hpack_dynamic_table_init (hpack_dynamic_table_t *table, u32 max_size)
+{
+ table->max_size = max_size;
+ table->size = max_size;
+ table->used = 0;
+ clib_ring_new (table->entries,
+ max_size / HPACK_DYNAMIC_TABLE_ENTRY_OVERHEAD);
+}
+
+__clib_export void
+hpack_dynamic_table_free (hpack_dynamic_table_t *table)
+{
+ hpack_dynamic_table_entry_t *e;
+
+ while ((e = clib_ring_deq (table->entries)) != 0)
+ vec_free (e->buf);
+
+ clib_ring_free (table->entries);
+}
+
+#define hpack_dynamic_table_entry_value_base(e) \
+ ((char *) ((e)->buf + (e)->name_len))
+#define hpack_dynamic_table_entry_value_len(e) \
+ (vec_len ((e)->buf) - (e)->name_len)
+
+always_inline hpack_dynamic_table_entry_t *
+hpack_dynamic_table_get (hpack_dynamic_table_t *table, uword index)
+{
+ if (index > clib_ring_n_enq (table->entries))
+ return 0;
+
+ hpack_dynamic_table_entry_t *first = clib_ring_get_first (table->entries);
+ u32 first_index = first - table->entries;
+ u32 entry_index =
+ (first_index + (clib_ring_n_enq (table->entries) - 1 - (u32) index)) %
+ vec_len (table->entries);
+ return table->entries + entry_index;
+}
+
+__clib_export u8 *
+format_hpack_dynamic_table (u8 *s, va_list *args)
+{
+ hpack_dynamic_table_t *table = va_arg (*args, hpack_dynamic_table_t *);
+ u32 i;
+ hpack_dynamic_table_entry_t *e;
+
+ s = format (s, "HPACK dynamic table:\n");
+ for (i = 0; i < clib_ring_n_enq (table->entries); i++)
+ {
+ e = hpack_dynamic_table_get (table, i);
+ s = format (s, "\t[%u] %U: %U\n", i, format_http_bytes, e->buf,
+ e->name_len, format_http_bytes,
+ hpack_dynamic_table_entry_value_base (e),
+ hpack_dynamic_table_entry_value_len (e));
+ }
+ return s;
+}
+
+static inline void
+hpack_dynamic_table_evict_one (hpack_dynamic_table_t *table)
+{
+ u32 entry_size;
+ hpack_dynamic_table_entry_t *e;
+
+ e = clib_ring_deq (table->entries);
+ ASSERT (e);
+ HTTP_DBG (2, "%U: %U", format_http_bytes, e->buf, e->name_len,
+ format_http_bytes, hpack_dynamic_table_entry_value_base (e),
+ hpack_dynamic_table_entry_value_len (e));
+ entry_size = vec_len (e->buf) + HPACK_DYNAMIC_TABLE_ENTRY_OVERHEAD;
+ table->used -= entry_size;
+ vec_reset_length (e->buf);
+}
+
+static void
+hpack_dynamic_table_add (hpack_dynamic_table_t *table, http_token_t *name,
+ http_token_t *value)
+{
+ u32 entry_size;
+ hpack_dynamic_table_entry_t *e;
+
+ entry_size = name->len + value->len + HPACK_DYNAMIC_TABLE_ENTRY_OVERHEAD;
+
+ /* make space or evict all */
+ while (clib_ring_n_enq (table->entries) &&
+ (table->used + entry_size > table->size))
+ hpack_dynamic_table_evict_one (table);
+
+ /* attempt to add entry larger than the maximum size is not error */
+ if (entry_size > table->size)
+ return;
+
+ e = clib_ring_enq (table->entries);
+ ASSERT (e);
+ vec_validate (e->buf, name->len + value->len - 1);
+ clib_memcpy (e->buf, name->base, name->len);
+ clib_memcpy (e->buf + name->len, value->base, value->len);
+ e->name_len = name->len;
+ table->used += entry_size;
+
+ HTTP_DBG (2, "%U: %U", format_http_bytes, e->buf, e->name_len,
+ format_http_bytes, hpack_dynamic_table_entry_value_base (e),
+ hpack_dynamic_table_entry_value_len (e));
+}
+
+static http2_error_t
+hpack_get_table_entry (uword index, http_token_t *name, http_token_t *value,
+ u8 value_is_indexed, hpack_dynamic_table_t *dt)
+{
+ if (index <= HPACK_STATIC_TABLE_SIZE)
+ {
+ hpack_static_table_entry_t *e = &hpack_static_table[index - 1];
+ name->base = e->name;
+ name->len = e->name_len;
+ if (value_is_indexed)
+ {
+ value->base = e->value;
+ value->len = e->value_len;
+ }
+ HTTP_DBG (2, "[%llu] %U: %U", index, format_http_bytes, e->name,
+ e->name_len, format_http_bytes, e->value, e->value_len);
+ return HTTP2_ERROR_NO_ERROR;
+ }
+ else
+ {
+ hpack_dynamic_table_entry_t *e =
+ hpack_dynamic_table_get (dt, index - HPACK_STATIC_TABLE_SIZE - 1);
+ if (PREDICT_FALSE (!e))
+ {
+ HTTP_DBG (1, "index %llu not in dynamic table", index);
+ return HTTP2_ERROR_COMPRESSION_ERROR;
+ }
+ name->base = (char *) e->buf;
+ name->len = e->name_len;
+ value->base = hpack_dynamic_table_entry_value_base (e);
+ value->len = hpack_dynamic_table_entry_value_len (e);
+ HTTP_DBG (2, "[%llu] %U: %U", index, format_http_bytes, name->base,
+ name->len, format_http_bytes, value->base, value->len);
+ return HTTP2_ERROR_NO_ERROR;
+ }
+}
+
+__clib_export http2_error_t
+hpack_decode_header (u8 **src, u8 *end, u8 **buf, uword *buf_len,
+ u32 *name_len, u32 *value_len, hpack_dynamic_table_t *dt)
+{
+ u8 *p;
+ u8 value_is_indexed = 0, add_new_entry = 0;
+ uword old_len, new_max, index = 0;
+ http_token_t name, value;
+ http2_error_t rv;
+
+ ASSERT (*src < end);
+ p = *src;
+
+ /* dynamic table size update */
+ while ((*p & 0xE0) == 0x20)
+ {
+ new_max = hpack_decode_int (&p, end, 5);
+ if (p == end || new_max > (uword) dt->max_size)
+ {
+ HTTP_DBG (1, "invalid dynamic table size update");
+ return HTTP2_ERROR_COMPRESSION_ERROR;
+ }
+ while (clib_ring_n_enq (dt->entries) && new_max > dt->used)
+ hpack_dynamic_table_evict_one (dt);
+ dt->size = (u32) new_max;
+ }
+
+ if (*p & 0x80) /* indexed header field */
+ {
+ index = hpack_decode_int (&p, end, 7);
+ /* index value of 0 is not used */
+ if (index == 0 || index == HPACK_INVALID_INT)
+ {
+ HTTP_DBG (1, "invalid index");
+ return HTTP2_ERROR_COMPRESSION_ERROR;
+ }
+ value_is_indexed = 1;
+ }
+ else if (*p > 0x40) /* incremental indexing - indexed name */
+ {
+ index = hpack_decode_int (&p, end, 6);
+ /* index value of 0 is not used */
+ if (index == 0 || index == HPACK_INVALID_INT)
+ {
+ HTTP_DBG (1, "invalid index");
+ return HTTP2_ERROR_COMPRESSION_ERROR;
+ }
+ add_new_entry = 1;
+ }
+ else if (*p == 0x40) /* incremental indexing - new name */
+ {
+ add_new_entry = 1;
+ p++;
+ }
+ else /* without indexing / never indexed */
+ {
+ if ((*p & 0x0F) == 0) /* new name */
+ p++;
+ else /* indexed name */
+ {
+ index = hpack_decode_int (&p, end, 4);
+ /* index value of 0 is not used */
+ if (index == 0 || index == HPACK_INVALID_INT)
+ {
+ HTTP_DBG (1, "invalid index");
+ return HTTP2_ERROR_COMPRESSION_ERROR;
+ }
+ }
+ }
+
+ if (index)
+ {
+ rv = hpack_get_table_entry (index, &name, &value, value_is_indexed, dt);
+ if (rv != HTTP2_ERROR_NO_ERROR)
+ {
+ HTTP_DBG (1, "entry index %llu error", index);
+ return rv;
+ }
+ if (name.len > *buf_len)
+ {
+ HTTP_DBG (1, "not enough space");
+ return HTTP2_ERROR_INTERNAL_ERROR;
+ }
+ clib_memcpy (*buf, name.base, name.len);
+ *buf_len -= name.len;
+ *buf += name.len;
+ *name_len = name.len;
+ if (value_is_indexed)
+ {
+ if (value.len > *buf_len)
+ {
+ HTTP_DBG (1, "not enough space");
+ return HTTP2_ERROR_INTERNAL_ERROR;
+ }
+ clib_memcpy (*buf, value.base, value.len);
+ *buf_len -= value.len;
+ *buf += value.len;
+ *value_len = value.len;
+ }
+ }
+ else
+ {
+ old_len = *buf_len;
+ name.base = (char *) *buf;
+ rv = hpack_decode_string (&p, end, buf, buf_len);
+ if (rv != HTTP2_ERROR_NO_ERROR)
+ {
+ HTTP_DBG (1, "invalid header name");
+ return rv;
+ }
+ *name_len = old_len - *buf_len;
+ name.len = *name_len;
+ }
+
+ if (!value_is_indexed)
+ {
+ old_len = *buf_len;
+ value.base = (char *) *buf;
+ rv = hpack_decode_string (&p, end, buf, buf_len);
+ if (rv != HTTP2_ERROR_NO_ERROR)
+ {
+ HTTP_DBG (1, "invalid header value");
+ return rv;
+ }
+ *value_len = old_len - *buf_len;
+ value.len = *value_len;
+ }
+
+ if (add_new_entry)
+ hpack_dynamic_table_add (dt, &name, &value);
+
+ *src = p;
+ return HTTP2_ERROR_NO_ERROR;
+}
+
+static inline u8
+hpack_header_name_is_valid (u8 *name, u32 name_len)
+{
+ u32 i;
+ static uword tchar[4] = {
+ /* !#$%'*+-.0123456789 */
+ 0x03ff6cba00000000,
+ /* ^_`abcdefghijklmnopqrstuvwxyz|~ */
+ 0x57ffffffc0000000,
+ 0x0000000000000000,
+ 0x0000000000000000,
+ };
+ for (i = 0; i < name_len; i++)
+ {
+ if (!clib_bitmap_get_no_check (tchar, name[i]))
+ return 0;
+ }
+ return 1;
+}
+
+static inline u8
+hpack_header_value_is_valid (u8 *value, u32 value_len)
+{
+ u32 i;
+ /* VCHAR / SP / HTAB / %x80-FF */
+ static uword tchar[4] = {
+ 0xffffffff00000200,
+ 0x7fffffffffffffff,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+ };
+
+ if (value_len == 0)
+ return 1;
+
+ /* must not start or end with SP or HTAB */
+ if ((value[0] == 0x20 || value[0] == 0x09 || value[value_len - 1] == 0x20 ||
+ value[value_len - 1] == 0x09))
+ return 0;
+
+ for (i = 0; i < value_len; i++)
+ {
+ if (!clib_bitmap_get_no_check (tchar, value[i]))
+ return 0;
+ }
+ return 1;
+}
+
+static inline http_req_method_t
+hpack_parse_method (u8 *value, u32 value_len)
+{
+ switch (value_len)
+ {
+ case 3:
+ if (!memcmp (value, "GET", 3))
+ return HTTP_REQ_GET;
+ break;
+ case 4:
+ if (!memcmp (value, "POST", 4))
+ return HTTP_REQ_POST;
+ break;
+ case 7:
+ if (!memcmp (value, "CONNECT", 7))
+ return HTTP_REQ_CONNECT;
+ break;
+ default:
+ break;
+ }
+ /* HPACK should return only connection errors, this one is stream error */
+ return HTTP_REQ_UNKNOWN;
+}
+
+static inline http_url_scheme_t
+hpack_parse_scheme (u8 *value, u32 value_len)
+{
+ switch (value_len)
+ {
+ case 4:
+ if (!memcmp (value, "http", 4))
+ return HTTP_URL_SCHEME_HTTP;
+ break;
+ case 5:
+ if (!memcmp (value, "https", 5))
+ return HTTP_URL_SCHEME_HTTPS;
+ break;
+ default:
+ break;
+ }
+ /* HPACK should return only connection errors, this one is stream error */
+ return HTTP_URL_SCHEME_UNKNOWN;
+}
+
+static http2_error_t
+hpack_parse_req_pseudo_header (u8 *name, u32 name_len, u8 *value,
+ u32 value_len,
+ hpack_request_control_data_t *control_data)
+{
+ HTTP_DBG (2, "%U: %U", format_http_bytes, name, name_len, format_http_bytes,
+ value, value_len);
+ switch (name_len)
+ {
+ case 5:
+ if (!memcmp (name + 1, "path", 4))
+ {
+ if (control_data->parsed_bitmap & HPACK_PSEUDO_HEADER_PATH_PARSED ||
+ value_len == 0)
+ return HTTP2_ERROR_PROTOCOL_ERROR;
+ control_data->parsed_bitmap |= HPACK_PSEUDO_HEADER_PATH_PARSED;
+ control_data->path = value;
+ control_data->path_len = value_len;
+ break;
+ }
+ return HTTP2_ERROR_PROTOCOL_ERROR;
+ case 7:
+ switch (name[1])
+ {
+ case 'm':
+ if (!memcmp (name + 2, "ethod", 5))
+ {
+ if (control_data->parsed_bitmap &
+ HPACK_PSEUDO_HEADER_METHOD_PARSED)
+ return HTTP2_ERROR_PROTOCOL_ERROR;
+ control_data->parsed_bitmap |= HPACK_PSEUDO_HEADER_METHOD_PARSED;
+ control_data->method = hpack_parse_method (value, value_len);
+ break;
+ }
+ return HTTP2_ERROR_PROTOCOL_ERROR;
+ case 's':
+ if (!memcmp (name + 2, "cheme", 5))
+ {
+ if (control_data->parsed_bitmap &
+ HPACK_PSEUDO_HEADER_SCHEME_PARSED)
+ return HTTP2_ERROR_PROTOCOL_ERROR;
+ control_data->parsed_bitmap |= HPACK_PSEUDO_HEADER_SCHEME_PARSED;
+ control_data->scheme = hpack_parse_scheme (value, value_len);
+ break;
+ }
+ return HTTP2_ERROR_PROTOCOL_ERROR;
+ default:
+ return HTTP2_ERROR_PROTOCOL_ERROR;
+ }
+ break;
+ case 10:
+ if (!memcmp (name + 1, "authority", 9))
+ {
+ if (control_data->parsed_bitmap &
+ HPACK_PSEUDO_HEADER_AUTHORITY_PARSED)
+ return HTTP2_ERROR_PROTOCOL_ERROR;
+ control_data->parsed_bitmap |= HPACK_PSEUDO_HEADER_AUTHORITY_PARSED;
+ control_data->authority = value;
+ control_data->authority_len = value_len;
+ break;
+ }
+ return HTTP2_ERROR_PROTOCOL_ERROR;
+ default:
+ return HTTP2_ERROR_PROTOCOL_ERROR;
+ }
+
+ return HTTP2_ERROR_NO_ERROR;
+}
+
+/* Special treatment for headers like:
+ *
+ * RFC9113 8.2.2: any message containing connection-specific header
+ * fields MUST be treated as malformed (connection, upgrade, keep-alive,
+ * proxy-connection, transfer-encoding), TE header MUST NOT contain any value
+ * other than "trailers"
+ *
+ * find headers that will be used later in preprocessing (content-length)
+ */
+always_inline http2_error_t
+hpack_preprocess_header (u8 *name, u32 name_len, u8 *value, u32 value_len,
+ uword index,
+ hpack_request_control_data_t *control_data)
+{
+ switch (name_len)
+ {
+ case 2:
+ if (name[0] == 't' && name[1] == 'e' &&
+ !http_token_is_case ((const char *) value, value_len,
+ http_token_lit ("trailers")))
+ return HTTP2_ERROR_PROTOCOL_ERROR;
+ break;
+ case 7:
+ if (!memcmp (name, "upgrade", 7))
+ return HTTP2_ERROR_PROTOCOL_ERROR;
+ break;
+ case 10:
+ switch (name[0])
+ {
+ case 'c':
+ if (!memcmp (name + 1, "onnection", 9))
+ return HTTP2_ERROR_PROTOCOL_ERROR;
+ break;
+ case 'k':
+ if (!memcmp (name + 1, "eep-alive", 9))
+ return HTTP2_ERROR_PROTOCOL_ERROR;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 14:
+ if (!memcmp (name, "content-length", 7) &&
+ control_data->content_len_header_index == ~0)
+ control_data->content_len_header_index = index;
+ break;
+ case 16:
+ if (!memcmp (name, "proxy-connection", 16))
+ return HTTP2_ERROR_PROTOCOL_ERROR;
+ break;
+ case 17:
+ if (!memcmp (name, "transfer-encoding", 17))
+ return HTTP2_ERROR_PROTOCOL_ERROR;
+ break;
+ default:
+ break;
+ }
+ return HTTP2_ERROR_NO_ERROR;
+}
+
+__clib_export http2_error_t
+hpack_parse_request (u8 *src, u32 src_len, u8 *dst, u32 dst_len,
+ hpack_request_control_data_t *control_data,
+ http_field_line_t **headers,
+ hpack_dynamic_table_t *dynamic_table)
+{
+ u8 *p, *end, *b, *name, *value;
+ u8 regular_header_parsed = 0;
+ u32 name_len, value_len;
+ uword b_left;
+ http_field_line_t *header;
+ http2_error_t rv;
+
+ p = src;
+ end = src + src_len;
+ b = dst;
+ b_left = dst_len;
+ control_data->parsed_bitmap = 0;
+ control_data->headers_len = 0;
+ control_data->content_len_header_index = ~0;
+
+ while (p != end)
+ {
+ name = b;
+ rv = hpack_decode_header (&p, end, &b, &b_left, &name_len, &value_len,
+ dynamic_table);
+ if (rv != HTTP2_ERROR_NO_ERROR)
+ {
+ HTTP_DBG (1, "hpack_decode_header: %U", format_http2_error, rv);
+ return rv;
+ }
+ value = name + name_len;
+
+ /* pseudo header */
+ if (name[0] == ':')
+ {
+ /* all pseudo-headers must be before regular headers */
+ if (regular_header_parsed)
+ {
+ HTTP_DBG (1, "pseudo-headers after regular header");
+ return HTTP2_ERROR_PROTOCOL_ERROR;
+ }
+ rv = hpack_parse_req_pseudo_header (name, name_len, value, value_len,
+ control_data);
+ if (rv != HTTP2_ERROR_NO_ERROR)
+ {
+ HTTP_DBG (1, "hpack_parse_req_pseudo_header: %U",
+ format_http2_error, rv);
+ return rv;
+ }
+ continue;
+ }
+ else
+ {
+ if (!hpack_header_name_is_valid (name, name_len))
+ return HTTP2_ERROR_PROTOCOL_ERROR;
+ if (!regular_header_parsed)
+ {
+ regular_header_parsed = 1;
+ control_data->headers = name;
+ }
+ }
+ if (!hpack_header_value_is_valid (value, value_len))
+ return HTTP2_ERROR_PROTOCOL_ERROR;
+ vec_add2 (*headers, header, 1);
+ HTTP_DBG (2, "%U: %U", format_http_bytes, name, name_len,
+ format_http_bytes, value, value_len);
+ header->name_offset = name - control_data->headers;
+ header->name_len = name_len;
+ header->value_offset = value - control_data->headers;
+ header->value_len = value_len;
+ control_data->headers_len += name_len;
+ control_data->headers_len += value_len;
+ if (regular_header_parsed)
+ {
+ rv = hpack_preprocess_header (name, name_len, value, value_len,
+ header - *headers, control_data);
+ if (rv != HTTP2_ERROR_NO_ERROR)
+ {
+ HTTP_DBG (1, "connection-specific header present");
+ return rv;
+ }
+ }
+ }
+ control_data->control_data_len = dst_len - b_left;
+ HTTP_DBG (2, "%U", format_hpack_dynamic_table, dynamic_table);
+ return HTTP2_ERROR_NO_ERROR;
+}
+
+static inline u8 *
+hpack_encode_header (u8 *dst, http_header_name_t name, const u8 *value,
+ u32 value_len)
+{
+ hpack_token_t *name_token;
+ u8 *a, *b;
+ u32 orig_len, actual_size;
+
+ orig_len = vec_len (dst);
+ name_token = &hpack_headers[name];
+ if (name_token->static_table_index)
+ {
+ /* static table index with 4 bit prefix is max 2 bytes */
+ vec_add2 (dst, a, 2 + value_len + HPACK_ENCODED_INT_MAX_LEN);
+ /* Literal Header Field without Indexing — Indexed Name */
+ *a = 0x00; /* zero first 4 bits */
+ b = hpack_encode_int (a, name_token->static_table_index, 4);
+ }
+ else
+ {
+ /* one extra byte for 4 bit prefix */
+ vec_add2 (dst, a,
+ name_token->len + value_len + HPACK_ENCODED_INT_MAX_LEN * 2 +
+ 1);
+ b = a;
+ /* Literal Header Field without Indexing — New Name */
+ *b++ = 0x00;
+ b = hpack_encode_string (b, (const u8 *) name_token->base,
+ name_token->len);
+ }
+ b = hpack_encode_string (b, value, value_len);
+
+ actual_size = b - a;
+ vec_set_len (dst, orig_len + actual_size);
+ return dst;
+}
+
+static inline u8 *
+hpack_encode_custom_header (u8 *dst, const u8 *name, u32 name_len,
+ const u8 *value, u32 value_len)
+{
+ u32 orig_len, actual_size;
+ u8 *a, *b;
+
+ orig_len = vec_len (dst);
+ /* one extra byte for 4 bit prefix */
+ vec_add2 (dst, a, name_len + value_len + HPACK_ENCODED_INT_MAX_LEN * 2 + 1);
+ b = a;
+ /* Literal Header Field without Indexing — New Name */
+ *b++ = 0x00;
+ b = hpack_encode_string (b, name, name_len);
+ b = hpack_encode_string (b, value, value_len);
+ actual_size = b - a;
+ vec_set_len (dst, orig_len + actual_size);
+ return dst;
+}
+
+static inline u8 *
+hpack_encode_status_code (u8 *dst, http_status_code_t sc)
+{
+ u32 orig_len, actual_size;
+ u8 *a, *b;
+
+#define encode_common_sc(_index) \
+ vec_add2 (dst, a, 1); \
+ *a++ = 0x80 | _index;
+
+ switch (sc)
+ {
+ case HTTP_STATUS_OK:
+ encode_common_sc (8);
+ break;
+ case HTTP_STATUS_NO_CONTENT:
+ encode_common_sc (9);
+ break;
+ case HTTP_STATUS_PARTIAL_CONTENT:
+ encode_common_sc (10);
+ break;
+ case HTTP_STATUS_NOT_MODIFIED:
+ encode_common_sc (11);
+ break;
+ case HTTP_STATUS_BAD_REQUEST:
+ encode_common_sc (12);
+ break;
+ case HTTP_STATUS_NOT_FOUND:
+ encode_common_sc (13);
+ break;
+ case HTTP_STATUS_INTERNAL_ERROR:
+ encode_common_sc (14);
+ break;
+ default:
+ orig_len = vec_len (dst);
+ vec_add2 (dst, a, 5);
+ b = a;
+ /* Literal Header Field without Indexing — Indexed Name */
+ *b++ = 8;
+ b = hpack_encode_string (b, (const u8 *) http_status_code_str[sc], 3);
+ actual_size = b - a;
+ vec_set_len (dst, orig_len + actual_size);
+ break;
+ }
+ return dst;
+}
+
+static inline u8 *
+hpack_encode_content_len (u8 *dst, u64 content_len)
+{
+ u8 digit_buffer[20];
+ u8 *d = digit_buffer + sizeof (digit_buffer);
+ u32 orig_len, actual_size;
+ u8 *a, *b;
+
+ orig_len = vec_len (dst);
+ vec_add2 (dst, a, 3 + sizeof (digit_buffer));
+ b = a;
+
+ /* static table index 28 */
+ *b++ = 0x0F;
+ *b++ = 0x0D;
+ do
+ {
+ *--d = '0' + content_len % 10;
+ content_len /= 10;
+ }
+ while (content_len);
+
+ b = hpack_encode_string (b, d, digit_buffer + sizeof (digit_buffer) - d);
+ actual_size = b - a;
+ vec_set_len (dst, orig_len + actual_size);
+ return dst;
+}
+
+__clib_export void
+hpack_serialize_response (u8 *app_headers, u32 app_headers_len,
+ hpack_response_control_data_t *control_data,
+ u8 **dst)
+{
+ u8 *p, *end;
+
+ p = *dst;
+
+ /* status code must be first since it is pseudo-header */
+ p = hpack_encode_status_code (p, control_data->sc);
+
+ /* server name */
+ p = hpack_encode_header (p, HTTP_HEADER_SERVER, control_data->server_name,
+ control_data->server_name_len);
+
+ /* date */
+ p = hpack_encode_header (p, HTTP_HEADER_DATE, control_data->date,
+ control_data->date_len);
+
+ /* content length if any */
+ if (control_data->content_len != HPACK_ENCODER_SKIP_CONTENT_LEN)
+ p = hpack_encode_content_len (p, control_data->content_len);
+
+ if (!app_headers_len)
+ {
+ *dst = p;
+ return;
+ }
+
+ end = app_headers + app_headers_len;
+ while (app_headers < end)
+ {
+ /* custom header name? */
+ u32 *tmp = (u32 *) app_headers;
+ if (PREDICT_FALSE (*tmp & HTTP_CUSTOM_HEADER_NAME_BIT))
+ {
+ http_custom_token_t *name, *value;
+ name = (http_custom_token_t *) app_headers;
+ u32 name_len = name->len & ~HTTP_CUSTOM_HEADER_NAME_BIT;
+ app_headers += sizeof (http_custom_token_t) + name_len;
+ value = (http_custom_token_t *) app_headers;
+ app_headers += sizeof (http_custom_token_t) + value->len;
+ p = hpack_encode_custom_header (p, name->token, name_len,
+ value->token, value->len);
+ }
+ else
+ {
+ http_app_header_t *header;
+ header = (http_app_header_t *) app_headers;
+ app_headers += sizeof (http_app_header_t) + header->value.len;
+ p = hpack_encode_header (p, header->name, header->value.token,
+ header->value.len);
+ }
+ }
+
+ *dst = p;
+}
diff --git a/src/plugins/http/http2/hpack.h b/src/plugins/http/http2/hpack.h
new file mode 100644
index 00000000000..69144de133a
--- /dev/null
+++ b/src/plugins/http/http2/hpack.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright(c) 2025 Cisco Systems, Inc.
+ */
+
+#ifndef SRC_PLUGINS_HTTP_HPACK_H_
+#define SRC_PLUGINS_HTTP_HPACK_H_
+
+#include <vppinfra/types.h>
+#include <http/http2/http2.h>
+#include <http/http.h>
+
+#define HPACK_INVALID_INT CLIB_UWORD_MAX
+#if uword_bits == 64
+#define HPACK_ENCODED_INT_MAX_LEN 10
+#else
+#define HPACK_ENCODED_INT_MAX_LEN 6
+#endif
+
+#define HPACK_DEFAULT_HEADER_TABLE_SIZE 4096
+#define HPACK_DYNAMIC_TABLE_ENTRY_OVERHEAD 32
+#define HPACK_ENCODER_SKIP_CONTENT_LEN ((u64) ~0)
+
+typedef struct
+{
+ u8 *buf;
+ uword name_len;
+} hpack_dynamic_table_entry_t;
+
+typedef struct
+{
+ /* SETTINGS_HEADER_TABLE_SIZE */
+ u32 max_size;
+ /* dynamic table size update */
+ u32 size;
+ /* current usage (each entry = 32 + name len + value len) */
+ u32 used;
+ /* ring buffer */
+ hpack_dynamic_table_entry_t *entries;
+} hpack_dynamic_table_t;
+
+enum
+{
+#define _(bit, name, str) HPACK_PSEUDO_HEADER_##name##_PARSED = (1 << bit),
+ foreach_http2_pseudo_header
+#undef _
+};
+
+typedef struct
+{
+ http_req_method_t method;
+ http_url_scheme_t scheme;
+ u8 *authority;
+ u32 authority_len;
+ u8 *path;
+ u32 path_len;
+ u8 *headers;
+ uword content_len_header_index;
+ u32 headers_len;
+ u32 control_data_len;
+ u16 parsed_bitmap;
+} hpack_request_control_data_t;
+
+typedef struct
+{
+ http_status_code_t sc;
+ u64 content_len;
+ u8 *server_name;
+ u32 server_name_len;
+ u8 *date;
+ u32 date_len;
+} hpack_response_control_data_t;
+
+/**
+ * Decode unsigned variable-length integer (RFC7541 section 5.1)
+ *
+ * @param src Pointer to source buffer which will be advanced
+ * @param end End of the source buffer
+ * @param prefix_len Number of bits of the prefix (between 1 and 8)
+ *
+ * @return Decoded integer or @c HPACK_INVALID_INT in case of error
+ */
+uword hpack_decode_int (u8 **src, u8 *end, u8 prefix_len);
+
+/**
+ * Encode given value as unsigned variable-length integer (RFC7541 section 5.1)
+ *
+ * @param dst Pointer to destination buffer, should have enough space
+ * @param value Integer value to encode (up to @c CLIB_WORD_MAX)
+ * @param prefix_len Number of bits of the prefix (between 1 and 8)
+ *
+ * @return Advanced pointer to the destination buffer
+ *
+ * @note Encoded integer will take maximum @c HPACK_ENCODED_INT_MAX_LEN bytes
+ */
+u8 *hpack_encode_int (u8 *dst, uword value, u8 prefix_len);
+
+/**
+ * Decode
+ *
+ * @param src Pointer to source buffer which will be advanced
+ * @param end End of the source buffer
+ * @param buf Pointer to the buffer where string is decoded which will be
+ * advanced by number of written bytes
+ * @param buf_len Length the buffer, will be decreased
+ *
+ * @return @c HTTP2_ERROR_NO_ERROR on success
+ *
+ * @note Caller is responsible to check if there is somthing left in source
+ * buffer first
+ */
+http2_error_t hpack_decode_huffman (u8 **src, u8 *end, u8 **buf,
+ uword *buf_len);
+
+/**
+ * Encode given string in Huffman codes.
+ *
+ * @param dst Pointer to destination buffer, should have enough space
+ * @param value String to encode
+ * @param value_len Length of the string
+ *
+ * @return Advanced pointer to the destination buffer
+ */
+u8 *hpack_encode_huffman (u8 *dst, const u8 *value, uword value_len);
+
+/**
+ * Number of bytes required to encode given string in Huffman codes
+ *
+ * @param value Pointer to buffer with string to encode
+ * @param value_len Length of the string
+ *
+ * @return number of bytes required to encode string in Huffman codes, round up
+ * to byte boundary
+ */
+uword hpack_huffman_encoded_len (const u8 *value, uword value_len);
+
+/**
+ * Initialize HPACK dynamic table
+ *
+ * @param table Dynamic table to initialize
+ * @param max_size Maximum table size (SETTINGS_HEADER_TABLE_SIZE)
+ */
+void hpack_dynamic_table_init (hpack_dynamic_table_t *table, u32 max_size);
+
+/**
+ * Free HPACK dynamic table
+ *
+ * @param table Dynamic table to free
+ */
+void hpack_dynamic_table_free (hpack_dynamic_table_t *table);
+
+u8 *format_hpack_dynamic_table (u8 *s, va_list *args);
+
+/**
+ * Request parser
+ *
+ * @param src Header block to parse
+ * @param src_len Length of header block
+ * @param dst Buffer where headers will be decoded
+ * @param dst_len Length of buffer for decoded headers
+ * @param control_data Preparsed pseudo-headers
+ * @param headers List of regular headers
+ * @param dynamic_table Decoder dynamic table
+ *
+ * @return @c HTTP2_ERROR_NO_ERROR on success, connection error otherwise
+ */
+http2_error_t hpack_parse_request (u8 *src, u32 src_len, u8 *dst, u32 dst_len,
+ hpack_request_control_data_t *control_data,
+ http_field_line_t **headers,
+ hpack_dynamic_table_t *dynamic_table);
+
+/**
+ * Serialize response
+ *
+ * @param app_headers App header list
+ * @param app_headers_len App header list length
+ * @param control_data Header values set by protocol layer
+ * @param dst Vector where serialized headers will be added
+ */
+void hpack_serialize_response (u8 *app_headers, u32 app_headers_len,
+ hpack_response_control_data_t *control_data,
+ u8 **dst);
+
+#endif /* SRC_PLUGINS_HTTP_HPACK_H_ */
diff --git a/src/plugins/http/http2/http2.c b/src/plugins/http/http2/http2.c
new file mode 100644
index 00000000000..6c420c59625
--- /dev/null
+++ b/src/plugins/http/http2/http2.c
@@ -0,0 +1,1771 @@
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright(c) 2025 Cisco Systems, Inc.
+ */
+
+#include <vppinfra/llist.h>
+#include <http/http2/hpack.h>
+#include <http/http2/frame.h>
+#include <http/http_private.h>
+#include <http/http_timer.h>
+
+#ifndef HTTP_2_ENABLE
+#define HTTP_2_ENABLE 0
+#endif
+
+#define HTTP2_WIN_SIZE_MAX 0x7FFFFFFF
+#define HTTP2_INITIAL_WIN_SIZE 65535
+/* connection-level flow control window kind of mirrors TCP flow control */
+/* TODO: configurable? */
+#define HTTP2_CONNECTION_WINDOW_SIZE (10 << 20)
+
+#define foreach_http2_stream_state \
+ _ (IDLE, "IDLE") \
+ _ (OPEN, "OPEN") \
+ _ (HALF_CLOSED, "HALF-CLOSED") \
+ _ (CLOSED, "CLOSED")
+
+typedef enum http2_stream_state_
+{
+#define _(s, str) HTTP2_STREAM_STATE_##s,
+ foreach_http2_stream_state
+#undef _
+} http2_stream_state_t;
+
+#define foreach_http2_req_flags \
+ _ (APP_CLOSED, "app-closed") \
+ _ (NEED_WINDOW_UPDATE, "need-window-update")
+
+typedef enum http2_req_flags_bit_
+{
+#define _(sym, str) HTTP2_REQ_F_BIT_##sym,
+ foreach_http2_req_flags
+#undef _
+} http2_req_flags_bit_t;
+
+typedef enum http2_req_flags_
+{
+#define _(sym, str) HTTP2_REQ_F_##sym = 1 << HTTP2_REQ_F_BIT_##sym,
+ foreach_http2_req_flags
+#undef _
+} __clib_packed http2_req_flags_t;
+
+typedef struct http2_req_
+{
+ http_req_t base;
+ http2_stream_state_t stream_state;
+ u8 flags;
+ u32 stream_id;
+ i32 peer_window; /* can become negative after settings change */
+ u32 our_window;
+ u8 *payload;
+ u32 payload_len;
+ clib_llist_anchor_t resume_list;
+} http2_req_t;
+
+#define foreach_http2_conn_flags \
+ _ (EXPECT_PREFACE, "expect-preface") \
+ _ (PREFACE_VERIFIED, "preface-verified")
+
+typedef enum http2_conn_flags_bit_
+{
+#define _(sym, str) HTTP2_CONN_F_BIT_##sym,
+ foreach_http2_conn_flags
+#undef _
+} http2_conn_flags_bit_t;
+
+typedef enum http2_conn_flags_
+{
+#define _(sym, str) HTTP2_CONN_F_##sym = 1 << HTTP2_CONN_F_BIT_##sym,
+ foreach_http2_conn_flags
+#undef _
+} __clib_packed http2_conn_flags_t;
+
+typedef struct http2_conn_ctx_
+{
+ http2_conn_settings_t peer_settings;
+ hpack_dynamic_table_t decoder_dynamic_table;
+ u8 flags;
+ u32 last_opened_stream_id;
+ u32 last_processed_stream_id;
+ u32 peer_window;
+ u32 our_window;
+ uword *req_by_stream_id;
+ clib_llist_index_t streams_to_resume;
+ http2_conn_settings_t settings;
+} http2_conn_ctx_t;
+
+typedef struct http2_main_
+{
+ http2_conn_ctx_t **conn_pool;
+ http2_req_t **req_pool;
+ http2_conn_settings_t settings;
+} http2_main_t;
+
+static http2_main_t http2_main;
+
+http2_conn_ctx_t *
+http2_conn_ctx_alloc_w_thread (http_conn_t *hc)
+{
+ http2_main_t *h2m = &http2_main;
+ http2_conn_ctx_t *h2c;
+
+ pool_get_aligned_safe (h2m->conn_pool[hc->c_thread_index], h2c,
+ CLIB_CACHE_LINE_BYTES);
+ clib_memset (h2c, 0, sizeof (*h2c));
+ h2c->peer_settings = http2_default_conn_settings;
+ h2c->peer_window = HTTP2_INITIAL_WIN_SIZE;
+ h2c->our_window = HTTP2_CONNECTION_WINDOW_SIZE;
+ h2c->settings = h2m->settings;
+ /* adjust settings according to app rx_fifo size */
+ h2c->settings.initial_window_size =
+ clib_min (h2c->settings.initial_window_size, hc->app_rx_fifo_size);
+ h2c->req_by_stream_id = hash_create (0, sizeof (uword));
+ h2c->streams_to_resume =
+ clib_llist_make_head (h2m->req_pool[hc->c_thread_index], resume_list);
+ hc->opaque =
+ uword_to_pointer (h2c - h2m->conn_pool[hc->c_thread_index], void *);
+ HTTP_DBG (1, "h2c [%u]%x", hc->c_thread_index,
+ h2c - h2m->conn_pool[hc->c_thread_index]);
+ return h2c;
+}
+
+static inline http2_conn_ctx_t *
+http2_conn_ctx_get_w_thread (http_conn_t *hc)
+{
+ http2_main_t *h2m = &http2_main;
+ u32 h2c_index = pointer_to_uword (hc->opaque);
+ return pool_elt_at_index (h2m->conn_pool[hc->c_thread_index], h2c_index);
+}
+
+static inline void
+http2_conn_ctx_free (http_conn_t *hc)
+{
+ http2_main_t *h2m = &http2_main;
+ http2_conn_ctx_t *h2c;
+
+ h2c = http2_conn_ctx_get_w_thread (hc);
+ HTTP_DBG (1, "h2c [%u]%x", hc->c_thread_index,
+ h2c - h2m->conn_pool[hc->c_thread_index]);
+ hash_free (h2c->req_by_stream_id);
+ if (hc->flags & HTTP_CONN_F_HAS_REQUEST)
+ hpack_dynamic_table_free (&h2c->decoder_dynamic_table);
+ if (CLIB_DEBUG)
+ memset (h2c, 0xba, sizeof (*h2c));
+ pool_put (h2m->conn_pool[hc->c_thread_index], h2c);
+}
+
+static inline http2_req_t *
+http2_conn_alloc_req (http_conn_t *hc, u32 stream_id)
+{
+ http2_main_t *h2m = &http2_main;
+ http2_conn_ctx_t *h2c;
+ http2_req_t *req;
+ u32 req_index;
+ http_req_handle_t hr_handle;
+
+ pool_get_aligned_safe (h2m->req_pool[hc->c_thread_index], req,
+ CLIB_CACHE_LINE_BYTES);
+ clib_memset (req, 0, sizeof (*req));
+ req->base.hr_pa_session_handle = SESSION_INVALID_HANDLE;
+ req_index = req - h2m->req_pool[hc->c_thread_index];
+ hr_handle.version = HTTP_VERSION_2;
+ hr_handle.req_index = req_index;
+ req->base.hr_req_handle = hr_handle.as_u32;
+ req->base.hr_hc_index = hc->hc_hc_index;
+ req->base.c_thread_index = hc->c_thread_index;
+ req->stream_id = stream_id;
+ req->stream_state = HTTP2_STREAM_STATE_IDLE;
+ req->resume_list.next = CLIB_LLIST_INVALID_INDEX;
+ req->resume_list.prev = CLIB_LLIST_INVALID_INDEX;
+ h2c = http2_conn_ctx_get_w_thread (hc);
+ HTTP_DBG (1, "h2c [%u]%x req_index %x stream_id %u", hc->c_thread_index,
+ h2c - h2m->conn_pool[hc->c_thread_index], req_index, stream_id);
+ req->peer_window = h2c->peer_settings.initial_window_size;
+ req->our_window = h2c->settings.initial_window_size;
+ hash_set (h2c->req_by_stream_id, stream_id, req_index);
+ return req;
+}
+
+static inline void
+http2_conn_free_req (http2_conn_ctx_t *h2c, http2_req_t *req,
+ clib_thread_index_t thread_index)
+{
+ http2_main_t *h2m = &http2_main;
+
+ HTTP_DBG (1, "h2c [%u]%x req_index %x stream_id %u", thread_index,
+ h2c - h2m->conn_pool[thread_index],
+ ((http_req_handle_t) req->base.hr_req_handle).req_index,
+ req->stream_id);
+ if (clib_llist_elt_is_linked (req, resume_list))
+ clib_llist_remove (h2m->req_pool[thread_index], resume_list, req);
+ vec_free (req->base.headers);
+ vec_free (req->base.target);
+ http_buffer_free (&req->base.tx_buf);
+ hash_unset (h2c->req_by_stream_id, req->stream_id);
+ if (CLIB_DEBUG)
+ memset (req, 0xba, sizeof (*req));
+ pool_put (h2m->req_pool[thread_index], req);
+}
+
+http2_req_t *
+http2_conn_get_req (http_conn_t *hc, u32 stream_id)
+{
+ http2_main_t *h2m = &http2_main;
+ http2_conn_ctx_t *h2c;
+ uword *p;
+
+ h2c = http2_conn_ctx_get_w_thread (hc);
+ p = hash_get (h2c->req_by_stream_id, stream_id);
+ if (p)
+ {
+ return pool_elt_at_index (h2m->req_pool[hc->c_thread_index], p[0]);
+ }
+ else
+ {
+ HTTP_DBG (1, "hc [%u]%x streamId %u not found", hc->c_thread_index,
+ hc->hc_hc_index, stream_id);
+ return 0;
+ }
+}
+
+always_inline http2_req_t *
+http2_req_get (u32 req_index, clib_thread_index_t thread_index)
+{
+ http2_main_t *h2m = &http2_main;
+
+ return pool_elt_at_index (h2m->req_pool[thread_index], req_index);
+}
+
+always_inline int
+http2_req_update_peer_window (http2_req_t *req, i64 delta)
+{
+ i64 new_value;
+
+ new_value = (i64) req->peer_window + delta;
+ if (new_value > HTTP2_WIN_SIZE_MAX)
+ return -1;
+ req->peer_window = (i32) new_value;
+ HTTP_DBG (1, "new window size %d", req->peer_window);
+ return 0;
+}
+
+always_inline void
+http2_req_add_to_resume_list (http2_conn_ctx_t *h2c, http2_req_t *req)
+{
+ http2_main_t *h2m = &http2_main;
+ http2_req_t *he;
+
+ req->flags &= ~HTTP2_REQ_F_NEED_WINDOW_UPDATE;
+ he = clib_llist_elt (h2m->req_pool[req->base.c_thread_index],
+ h2c->streams_to_resume);
+ clib_llist_add_tail (h2m->req_pool[req->base.c_thread_index], resume_list,
+ req, he);
+}
+
+always_inline void
+http2_resume_list_process (http_conn_t *hc)
+{
+ http2_main_t *h2m = &http2_main;
+ http2_req_t *he, *req;
+ http2_conn_ctx_t *h2c;
+
+ h2c = http2_conn_ctx_get_w_thread (hc);
+ he =
+ clib_llist_elt (h2m->req_pool[hc->c_thread_index], h2c->streams_to_resume);
+
+ /* check if something in list and reschedule first app session from list if
+ * we have some space in connection window */
+ if (h2c->peer_window > 0 &&
+ !clib_llist_is_empty (h2m->req_pool[hc->c_thread_index], resume_list,
+ he))
+ {
+ req =
+ clib_llist_next (h2m->req_pool[hc->c_thread_index], resume_list, he);
+ clib_llist_remove (h2m->req_pool[hc->c_thread_index], resume_list, req);
+ transport_connection_reschedule (&req->base.connection);
+ }
+}
+
+/* send GOAWAY frame and close TCP connection */
+always_inline void
+http2_connection_error (http_conn_t *hc, http2_error_t error,
+ transport_send_params_t *sp)
+{
+ u8 *response;
+ u32 req_index, stream_id;
+ http2_conn_ctx_t *h2c;
+ http2_req_t *req;
+
+ h2c = http2_conn_ctx_get_w_thread (hc);
+
+ response = http_get_tx_buf (hc);
+ http2_frame_write_goaway (error, h2c->last_processed_stream_id, &response);
+ http_io_ts_write (hc, response, vec_len (response), sp);
+ http_io_ts_after_write (hc, 1);
+
+ hash_foreach (stream_id, req_index, h2c->req_by_stream_id, ({
+ req = http2_req_get (req_index, hc->c_thread_index);
+ if (req->stream_state != HTTP2_STREAM_STATE_CLOSED)
+ session_transport_reset_notify (&req->base.connection);
+ }));
+ http_shutdown_transport (hc);
+}
+
+always_inline void
+http2_send_stream_error (http_conn_t *hc, u32 stream_id, http2_error_t error,
+ transport_send_params_t *sp)
+{
+ u8 *response;
+
+ response = http_get_tx_buf (hc);
+ http2_frame_write_rst_stream (error, stream_id, &response);
+ http_io_ts_write (hc, response, vec_len (response), sp);
+ http_io_ts_after_write (hc, 1);
+}
+
+/* send RST_STREAM frame and notify app */
+always_inline void
+http2_stream_error (http_conn_t *hc, http2_req_t *req, http2_error_t error,
+ transport_send_params_t *sp)
+{
+ ASSERT (req->stream_state > HTTP2_STREAM_STATE_IDLE);
+
+ http2_send_stream_error (hc, req->stream_id, error, sp);
+ req->stream_state = HTTP2_STREAM_STATE_CLOSED;
+ if (req->flags & HTTP2_REQ_F_APP_CLOSED)
+ session_transport_closed_notify (&req->base.connection);
+ else
+ session_transport_closing_notify (&req->base.connection);
+}
+
+always_inline void
+http2_stream_close (http2_req_t *req)
+{
+ req->stream_state = HTTP2_STREAM_STATE_CLOSED;
+ if (req->flags & HTTP2_REQ_F_APP_CLOSED)
+ {
+ HTTP_DBG (1, "req [%u]%x app already closed, confirm",
+ req->base.c_thread_index,
+ ((http_req_handle_t) req->base.hr_req_handle).req_index);
+ session_transport_closed_notify (&req->base.connection);
+ }
+ else
+ {
+ HTTP_DBG (1, "req [%u]%x all done closing, notify app",
+ req->base.c_thread_index,
+ ((http_req_handle_t) req->base.hr_req_handle).req_index);
+ session_transport_closing_notify (&req->base.connection);
+ }
+}
+
+always_inline void
+http2_send_server_preface (http_conn_t *hc)
+{
+ u8 *response;
+ http2_settings_entry_t *setting, *settings_list = 0;
+ http2_conn_ctx_t *h2c = http2_conn_ctx_get_w_thread (hc);
+
+#define _(v, label, member, min, max, default_value, err_code) \
+ if (h2c->settings.member != default_value) \
+ { \
+ vec_add2 (settings_list, setting, 1); \
+ setting->identifier = HTTP2_SETTINGS_##label; \
+ setting->value = h2c->settings.member; \
+ }
+ foreach_http2_settings
+#undef _
+
+ response = http_get_tx_buf (hc);
+ http2_frame_write_settings (settings_list, &response);
+ /* send also connection window update */
+ http2_frame_write_window_update (h2c->our_window - HTTP2_INITIAL_WIN_SIZE, 0,
+ &response);
+ http_io_ts_write (hc, response, vec_len (response), 0);
+ http_io_ts_after_write (hc, 1);
+}
+
+/*************************************/
+/* request state machine handlers RX */
+/*************************************/
+
+static http_sm_result_t
+http2_req_state_wait_transport_method (http_conn_t *hc, http2_req_t *req,
+ transport_send_params_t *sp,
+ http2_error_t *error)
+{
+ http2_conn_ctx_t *h2c;
+ hpack_request_control_data_t control_data;
+ u8 *buf = 0;
+ http_msg_t msg;
+ int rv;
+ http_req_state_t new_state = HTTP_REQ_STATE_WAIT_APP_REPLY;
+
+ h2c = http2_conn_ctx_get_w_thread (hc);
+
+ /* TODO: configurable buf size with bigger default value */
+ vec_validate_init_empty (buf, 1023, 0);
+ *error = hpack_parse_request (req->payload, req->payload_len, buf, 1023,
+ &control_data, &req->base.headers,
+ &h2c->decoder_dynamic_table);
+ if (*error != HTTP2_ERROR_NO_ERROR)
+ {
+ HTTP_DBG (1, "hpack_parse_request failed");
+ return HTTP_SM_ERROR;
+ }
+
+ if (!(control_data.parsed_bitmap & HPACK_PSEUDO_HEADER_METHOD_PARSED))
+ {
+ HTTP_DBG (1, ":method pseudo-header missing in request");
+ http2_stream_error (hc, req, HTTP2_ERROR_PROTOCOL_ERROR, sp);
+ return HTTP_SM_STOP;
+ }
+ if (control_data.method == HTTP_REQ_UNKNOWN ||
+ control_data.method == HTTP_REQ_CONNECT)
+ {
+ HTTP_DBG (1, "unsupported method");
+ http2_stream_error (hc, req, HTTP2_ERROR_PROTOCOL_ERROR, sp);
+ return HTTP_SM_STOP;
+ }
+ if (!(control_data.parsed_bitmap & HPACK_PSEUDO_HEADER_SCHEME_PARSED) &&
+ control_data.method != HTTP_REQ_CONNECT)
+ {
+ HTTP_DBG (1, ":scheme pseudo-header missing in request");
+ http2_stream_error (hc, req, HTTP2_ERROR_PROTOCOL_ERROR, sp);
+ return HTTP_SM_STOP;
+ }
+ if (control_data.scheme == HTTP_URL_SCHEME_UNKNOWN)
+ {
+ HTTP_DBG (1, "unsupported scheme");
+ http2_stream_error (hc, req, HTTP2_ERROR_INTERNAL_ERROR, sp);
+ return HTTP_SM_STOP;
+ }
+ if (!(control_data.parsed_bitmap & HPACK_PSEUDO_HEADER_PATH_PARSED) &&
+ control_data.method != HTTP_REQ_CONNECT)
+ {
+ HTTP_DBG (1, ":path pseudo-header missing in request");
+ http2_stream_error (hc, req, HTTP2_ERROR_PROTOCOL_ERROR, sp);
+ return HTTP_SM_STOP;
+ }
+ if (!(control_data.parsed_bitmap & HPACK_PSEUDO_HEADER_AUTHORITY_PARSED) &&
+ control_data.method != HTTP_REQ_CONNECT)
+ {
+ HTTP_DBG (1, ":path pseudo-header missing in request");
+ http2_stream_error (hc, req, HTTP2_ERROR_PROTOCOL_ERROR, sp);
+ return HTTP_SM_STOP;
+ }
+
+ req->base.control_data_len = control_data.control_data_len;
+ req->base.headers_offset = control_data.headers - buf;
+ req->base.headers_len = control_data.headers_len;
+ if (control_data.content_len_header_index != ~0)
+ {
+ req->base.content_len_header_index =
+ control_data.content_len_header_index;
+ rv = http_parse_content_length (&req->base, buf);
+ if (rv)
+ {
+ http2_stream_error (hc, req, HTTP2_ERROR_PROTOCOL_ERROR, sp);
+ return HTTP_SM_STOP;
+ }
+ new_state = HTTP_REQ_STATE_TRANSPORT_IO_MORE_DATA;
+ http_io_as_add_want_read_ntf (&req->base);
+ }
+ /* TODO: message framing without content length using END_STREAM flag */
+ if (req->base.body_len == 0 && req->stream_state == HTTP2_STREAM_STATE_OPEN)
+ {
+ HTTP_DBG (1, "no content-length and DATA frame expected");
+ *error = HTTP2_ERROR_INTERNAL_ERROR;
+ return HTTP_SM_ERROR;
+ }
+ req->base.to_recv = req->base.body_len;
+
+ req->base.target_path_len = control_data.path_len;
+ req->base.target_path_offset = control_data.path - buf;
+ /* drop leading slash */
+ req->base.target_path_offset++;
+ req->base.target_path_len--;
+ req->base.target_query_offset = 0;
+ req->base.target_query_len = 0;
+ http_identify_optional_query (&req->base, buf);
+
+ msg.type = HTTP_MSG_REQUEST;
+ msg.method_type = control_data.method;
+ msg.data.type = HTTP_MSG_DATA_INLINE;
+ msg.data.len = req->base.connection_header_index;
+ msg.data.scheme = control_data.scheme;
+ msg.data.target_authority_offset = control_data.authority - buf;
+ msg.data.target_authority_len = control_data.authority_len;
+ msg.data.target_path_offset = req->base.target_path_offset;
+ msg.data.target_path_len = req->base.target_path_len;
+ msg.data.target_query_offset = req->base.target_query_offset;
+ msg.data.target_query_len = req->base.target_query_len;
+ msg.data.headers_offset = req->base.headers_offset;
+ msg.data.headers_len = req->base.headers_len;
+ msg.data.headers_ctx = pointer_to_uword (req->base.headers);
+ msg.data.upgrade_proto = HTTP_UPGRADE_PROTO_NA;
+ msg.data.body_offset = req->base.control_data_len;
+ msg.data.body_len = req->base.body_len;
+
+ svm_fifo_seg_t segs[2] = { { (u8 *) &msg, sizeof (msg) },
+ { buf, req->base.control_data_len } };
+ HTTP_DBG (3, "%U", format_http_bytes, buf, req->base.control_data_len);
+ http_io_as_write_segs (&req->base, segs, 2);
+ http_req_state_change (&req->base, new_state);
+ http_app_worker_rx_notify (&req->base);
+
+ if (req->stream_id > h2c->last_processed_stream_id)
+ h2c->last_processed_stream_id = req->stream_id;
+
+ return HTTP_SM_STOP;
+}
+
+static http_sm_result_t
+http2_req_state_transport_io_more_data (http_conn_t *hc, http2_req_t *req,
+ transport_send_params_t *sp,
+ http2_error_t *error)
+{
+ u32 max_enq;
+
+ if (req->payload_len > req->base.to_recv)
+ {
+ HTTP_DBG (1, "received more data than expected");
+ http2_stream_error (hc, req, HTTP2_ERROR_PROTOCOL_ERROR, sp);
+ return HTTP_SM_STOP;
+ }
+ req->base.to_recv -= req->payload_len;
+ if (req->stream_state == HTTP2_STREAM_STATE_HALF_CLOSED &&
+ req->base.to_recv != 0)
+ {
+ HTTP_DBG (1, "peer closed stream but don't send all data");
+ http2_stream_error (hc, req, HTTP2_ERROR_PROTOCOL_ERROR, sp);
+ return HTTP_SM_STOP;
+ }
+ max_enq = http_io_as_max_write (&req->base);
+ if (max_enq < req->payload_len)
+ {
+ clib_warning ("app's rx fifo full");
+ http2_stream_error (hc, req, HTTP2_ERROR_INTERNAL_ERROR, sp);
+ return HTTP_SM_STOP;
+ }
+ if (req->base.to_recv == 0)
+ http_req_state_change (&req->base, HTTP_REQ_STATE_WAIT_APP_REPLY);
+ http_io_as_write (&req->base, req->payload, req->payload_len);
+ http_app_worker_rx_notify (&req->base);
+
+ return HTTP_SM_STOP;
+}
+
+/*************************************/
+/* request state machine handlers TX */
+/*************************************/
+
+static http_sm_result_t
+http2_req_state_wait_app_reply (http_conn_t *hc, http2_req_t *req,
+ transport_send_params_t *sp,
+ http2_error_t *error)
+{
+ http_msg_t msg;
+ u8 *response, *date, *app_headers = 0;
+ u8 fh[HTTP2_FRAME_HEADER_SIZE];
+ hpack_response_control_data_t control_data;
+ u8 flags = HTTP2_FRAME_FLAG_END_HEADERS;
+ http_sm_result_t sm_result = HTTP_SM_ERROR;
+ u32 n_written;
+ http2_conn_ctx_t *h2c;
+
+ http_get_app_msg (&req->base, &msg);
+ ASSERT (msg.type == HTTP_MSG_REPLY);
+
+ response = http_get_tx_buf (hc);
+ date = format (0, "%U", format_http_time_now, hc);
+
+ control_data.sc = msg.code;
+ control_data.content_len = msg.data.body_len;
+ control_data.server_name = hc->app_name;
+ control_data.server_name_len = vec_len (hc->app_name);
+ control_data.date = date;
+ control_data.date_len = vec_len (date);
+
+ if (msg.data.headers_len)
+ app_headers = http_get_app_header_list (&req->base, &msg);
+
+ hpack_serialize_response (app_headers, msg.data.headers_len, &control_data,
+ &response);
+ vec_free (date);
+
+ h2c = http2_conn_ctx_get_w_thread (hc);
+ if (vec_len (response) > h2c->peer_settings.max_frame_size)
+ {
+ /* TODO: CONTINUATION (headers fragmentation) */
+ clib_warning ("resp headers greater than SETTINGS_MAX_FRAME_SIZE");
+ *error = HTTP2_ERROR_INTERNAL_ERROR;
+ return HTTP_SM_ERROR;
+ }
+
+ if (msg.data.body_len)
+ {
+ /* start sending the actual data */
+ http_req_tx_buffer_init (&req->base, &msg);
+ http_req_state_change (&req->base, HTTP_REQ_STATE_APP_IO_MORE_DATA);
+ sm_result = HTTP_SM_CONTINUE;
+ }
+ else
+ {
+ /* no response body, we are done */
+ flags |= HTTP2_FRAME_FLAG_END_STREAM;
+ sm_result = HTTP_SM_STOP;
+ http2_stream_close (req);
+ }
+
+ http2_frame_write_headers_header (vec_len (response), req->stream_id, flags,
+ fh);
+ svm_fifo_seg_t segs[2] = { { fh, HTTP2_FRAME_HEADER_SIZE },
+ { response, vec_len (response) } };
+ n_written = http_io_ts_write_segs (hc, segs, 2, sp);
+ ASSERT (n_written == (HTTP2_FRAME_HEADER_SIZE + vec_len (response)));
+ http_io_ts_after_write (hc, 0);
+
+ return sm_result;
+}
+
+static http_sm_result_t
+http2_req_state_app_io_more_data (http_conn_t *hc, http2_req_t *req,
+ transport_send_params_t *sp,
+ http2_error_t *error)
+{
+ u32 max_write, max_read, n_segs, n_read, n_written = 0;
+ svm_fifo_seg_t *app_segs, *segs = 0;
+ http_buffer_t *hb = &req->base.tx_buf;
+ u8 fh[HTTP2_FRAME_HEADER_SIZE];
+ u8 finished = 0, flags = 0;
+ http2_conn_ctx_t *h2c;
+
+ ASSERT (http_buffer_bytes_left (hb) > 0);
+
+ if (req->peer_window <= 0)
+ {
+ HTTP_DBG (1, "stream window is full");
+ /* mark that we need window update on stream */
+ req->flags |= HTTP2_REQ_F_NEED_WINDOW_UPDATE;
+ http_req_deschedule (&req->base, sp);
+ return HTTP_SM_STOP;
+ }
+ h2c = http2_conn_ctx_get_w_thread (hc);
+ if (h2c->peer_window == 0)
+ {
+ HTTP_DBG (1, "connection window is full");
+ /* add to waiting queue */
+ http2_req_add_to_resume_list (h2c, req);
+ http_req_deschedule (&req->base, sp);
+ return HTTP_SM_STOP;
+ }
+
+ max_write = http_io_ts_max_write (hc, sp);
+ if (max_write <= HTTP2_FRAME_HEADER_SIZE)
+ {
+ HTTP_DBG (1, "ts tx fifo full");
+ goto check_fifo;
+ }
+ max_write -= HTTP2_FRAME_HEADER_SIZE;
+ max_write = clib_min (max_write, (u32) req->peer_window);
+ max_write = clib_min (max_write, h2c->peer_window);
+ max_write = clib_min (max_write, h2c->peer_settings.max_frame_size);
+
+ max_read = http_buffer_bytes_left (hb);
+
+ n_read = http_buffer_get_segs (hb, max_write, &app_segs, &n_segs);
+ if (n_read == 0)
+ {
+ HTTP_DBG (1, "no data to deq");
+ goto check_fifo;
+ }
+
+ finished = (max_read - n_read) == 0;
+ flags = finished ? HTTP2_FRAME_FLAG_END_STREAM : 0;
+ http2_frame_write_data_header (n_read, req->stream_id, flags, fh);
+ vec_validate (segs, 0);
+ segs[0].len = HTTP2_FRAME_HEADER_SIZE;
+ segs[0].data = fh;
+ vec_append (segs, app_segs);
+
+ n_written = http_io_ts_write_segs (hc, segs, n_segs + 1, sp);
+ ASSERT (n_written == (HTTP2_FRAME_HEADER_SIZE + n_read));
+ vec_free (segs);
+ http_buffer_drain (hb, n_read);
+ req->peer_window -= n_read;
+ h2c->peer_window -= n_read;
+
+ if (finished)
+ {
+ http_buffer_free (hb);
+ if (hc->flags & HTTP_CONN_F_IS_SERVER)
+ http2_stream_close (req);
+ else
+ req->stream_state = HTTP2_STREAM_STATE_HALF_CLOSED;
+ }
+ http_io_ts_after_write (hc, finished);
+
+check_fifo:
+ if (http_io_ts_check_write_thresh (hc))
+ {
+ http_io_ts_add_want_deq_ntf (hc);
+ http_req_deschedule (&req->base, sp);
+ }
+ return HTTP_SM_STOP;
+}
+
+/*************************/
+/* request state machine */
+/*************************/
+
+typedef http_sm_result_t (*http2_sm_handler) (http_conn_t *hc,
+ http2_req_t *req,
+ transport_send_params_t *sp,
+ http2_error_t *error);
+
+static http2_sm_handler tx_state_funcs[HTTP_REQ_N_STATES] = {
+ 0, /* idle */
+ 0, /* wait app method */
+ 0, /* wait transport reply */
+ 0, /* transport io more data */
+ 0, /* wait transport method */
+ http2_req_state_wait_app_reply,
+ http2_req_state_app_io_more_data,
+ 0, /* tunnel */
+ 0, /* udp tunnel */
+};
+
+static http2_sm_handler rx_state_funcs[HTTP_REQ_N_STATES] = {
+ 0, /* idle */
+ 0, /* wait app method */
+ 0, /* wait transport reply */
+ http2_req_state_transport_io_more_data,
+ http2_req_state_wait_transport_method,
+ 0, /* wait app reply */
+ 0, /* app io more data */
+ 0, /* tunnel */
+ 0, /* udp tunnel */
+};
+
+static_always_inline int
+http2_req_state_is_tx_valid (http2_req_t *req)
+{
+ return tx_state_funcs[req->base.state] ? 1 : 0;
+}
+
+static_always_inline http2_error_t
+http2_req_run_state_machine (http_conn_t *hc, http2_req_t *req,
+ transport_send_params_t *sp, u8 is_tx)
+{
+ http_sm_result_t res;
+ http2_error_t error;
+ http2_conn_ctx_t *h2c;
+
+ do
+ {
+ if (is_tx)
+ res = tx_state_funcs[req->base.state](hc, req, sp, &error);
+ else
+ res = rx_state_funcs[req->base.state](hc, req, 0, &error);
+
+ if (res == HTTP_SM_ERROR)
+ {
+ HTTP_DBG (1, "protocol error %U", format_http2_error, error);
+ return error;
+ }
+ }
+ while (res == HTTP_SM_CONTINUE);
+
+ if (req->stream_state == HTTP2_STREAM_STATE_CLOSED)
+ {
+ h2c = http2_conn_ctx_get_w_thread (hc);
+ session_transport_delete_notify (&req->base.connection);
+ http2_conn_free_req (h2c, req, hc->c_thread_index);
+ }
+
+ return HTTP2_ERROR_NO_ERROR;
+}
+
+/******************/
+/* frame handlers */
+/******************/
+
+static http2_error_t
+http2_handle_headers_frame (http_conn_t *hc, http2_frame_header_t *fh)
+{
+ http2_req_t *req;
+ u8 *rx_buf;
+ http2_error_t rv;
+ http2_conn_ctx_t *h2c;
+
+ if (!(fh->flags & HTTP2_FRAME_FLAG_END_HEADERS))
+ {
+ /* TODO: fragmented headers */
+ return HTTP2_ERROR_INTERNAL_ERROR;
+ }
+
+ if (hc->flags & HTTP_CONN_F_IS_SERVER)
+ {
+ h2c = http2_conn_ctx_get_w_thread (hc);
+ /* streams initiated by client must use odd-numbered stream id */
+ if ((fh->stream_id & 1) == 0)
+ {
+ HTTP_DBG (1, "invalid stream id %u", fh->stream_id);
+ return HTTP2_ERROR_PROTOCOL_ERROR;
+ }
+ /* stream id must be greater than all streams that client has opened */
+ if (fh->stream_id <= h2c->last_opened_stream_id)
+ {
+ HTTP_DBG (1, "closed stream id %u", fh->stream_id);
+ return HTTP2_ERROR_STREAM_CLOSED;
+ }
+ h2c->last_opened_stream_id = fh->stream_id;
+ if (hash_elts (h2c->req_by_stream_id) ==
+ h2c->settings.max_concurrent_streams)
+ {
+ HTTP_DBG (1, "SETTINGS_MAX_CONCURRENT_STREAMS exceeded");
+ http_io_ts_drain (hc, fh->length);
+ http2_send_stream_error (hc, fh->stream_id,
+ HTTP2_ERROR_REFUSED_STREAM, 0);
+ return HTTP2_ERROR_NO_ERROR;
+ }
+ req = http2_conn_alloc_req (hc, fh->stream_id);
+ http_conn_accept_request (hc, &req->base);
+ http_req_state_change (&req->base, HTTP_REQ_STATE_WAIT_TRANSPORT_METHOD);
+ req->stream_state = HTTP2_STREAM_STATE_OPEN;
+ hc->flags &= ~HTTP_CONN_F_NO_APP_SESSION;
+ if (!(hc->flags & HTTP_CONN_F_HAS_REQUEST))
+ {
+ hc->flags |= HTTP_CONN_F_HAS_REQUEST;
+ hpack_dynamic_table_init (
+ &h2c->decoder_dynamic_table,
+ http2_default_conn_settings.header_table_size);
+ }
+ if (fh->flags & HTTP2_FRAME_FLAG_END_STREAM)
+ req->stream_state = HTTP2_STREAM_STATE_HALF_CLOSED;
+ }
+ else
+ {
+ /* TODO: client */
+ return HTTP2_ERROR_INTERNAL_ERROR;
+ }
+
+ rx_buf = http_get_rx_buf (hc);
+ vec_validate (rx_buf, fh->length - 1);
+ http_io_ts_read (hc, rx_buf, fh->length, 0);
+
+ rv = http2_frame_read_headers (&req->payload, &req->payload_len, rx_buf,
+ fh->length, fh->flags);
+ if (rv != HTTP2_ERROR_NO_ERROR)
+ return rv;
+
+ HTTP_DBG (1, "run state machine");
+ return http2_req_run_state_machine (hc, req, 0, 0);
+}
+
+static http2_error_t
+http2_handle_data_frame (http_conn_t *hc, http2_frame_header_t *fh)
+{
+ http2_req_t *req;
+ u8 *rx_buf;
+ http2_error_t rv;
+ http2_conn_ctx_t *h2c;
+
+ req = http2_conn_get_req (hc, fh->stream_id);
+ h2c = http2_conn_ctx_get_w_thread (hc);
+
+ if (!req)
+ {
+ if (fh->stream_id == 0)
+ {
+ HTTP_DBG (1, "DATA frame with stream id 0");
+ return HTTP2_ERROR_PROTOCOL_ERROR;
+ }
+ if (fh->stream_id <= h2c->last_opened_stream_id)
+ {
+ HTTP_DBG (1, "stream closed, ignoring frame");
+ http2_send_stream_error (hc, fh->stream_id,
+ HTTP2_ERROR_STREAM_CLOSED, 0);
+ return HTTP2_ERROR_NO_ERROR;
+ }
+ else
+ return HTTP2_ERROR_PROTOCOL_ERROR;
+ }
+
+ /* bogus state */
+ if (hc->flags & HTTP_CONN_F_IS_SERVER &&
+ req->stream_state != HTTP2_STREAM_STATE_OPEN)
+ {
+ HTTP_DBG (1, "error: stream already half-closed");
+ http2_stream_error (hc, req, HTTP2_ERROR_STREAM_CLOSED, 0);
+ return HTTP2_ERROR_NO_ERROR;
+ }
+
+ if (fh->length > req->our_window)
+ {
+ HTTP_DBG (1, "error: peer violated stream flow control");
+ http2_stream_error (hc, req, HTTP2_ERROR_FLOW_CONTROL_ERROR, 0);
+ return HTTP2_ERROR_NO_ERROR;
+ }
+ if (fh->length > h2c->our_window)
+ {
+ HTTP_DBG (1, "error: peer violated connection flow control");
+ return HTTP2_ERROR_FLOW_CONTROL_ERROR;
+ }
+
+ if (fh->flags & HTTP2_FRAME_FLAG_END_STREAM)
+ req->stream_state = HTTP2_STREAM_STATE_HALF_CLOSED;
+
+ rx_buf = http_get_rx_buf (hc);
+ vec_validate (rx_buf, fh->length - 1);
+ http_io_ts_read (hc, rx_buf, fh->length, 0);
+
+ rv = http2_frame_read_data (&req->payload, &req->payload_len, rx_buf,
+ fh->length, fh->flags);
+ if (rv != HTTP2_ERROR_NO_ERROR)
+ return rv;
+
+ req->our_window -= fh->length;
+ h2c->our_window -= fh->length;
+
+ HTTP_DBG (1, "run state machine");
+ return http2_req_run_state_machine (hc, req, 0, 0);
+}
+
+static http2_error_t
+http2_handle_window_update_frame (http_conn_t *hc, http2_frame_header_t *fh)
+{
+ u8 *rx_buf;
+ u32 win_increment;
+ http2_error_t rv;
+ http2_req_t *req;
+ http2_conn_ctx_t *h2c;
+
+ h2c = http2_conn_ctx_get_w_thread (hc);
+
+ rx_buf = http_get_rx_buf (hc);
+ vec_validate (rx_buf, fh->length - 1);
+ http_io_ts_read (hc, rx_buf, fh->length, 0);
+
+ rv = http2_frame_read_window_update (&win_increment, rx_buf, fh->length);
+ if (rv != HTTP2_ERROR_NO_ERROR)
+ {
+ HTTP_DBG (1, "invalid WINDOW_UPDATE frame (stream id %u)",
+ fh->stream_id);
+ /* error on the connection flow-control window is connection error */
+ if (fh->stream_id == 0)
+ return rv;
+ /* otherwise it is stream error */
+ req = http2_conn_get_req (hc, fh->stream_id);
+ if (!req)
+ http2_send_stream_error (hc, fh->stream_id, rv, 0);
+ else
+ http2_stream_error (hc, req, rv, 0);
+ return HTTP2_ERROR_NO_ERROR;
+ }
+
+ HTTP_DBG (1, "WINDOW_UPDATE %u (stream id %u)", win_increment,
+ fh->stream_id);
+ if (fh->stream_id == 0)
+ {
+ if (win_increment > (HTTP2_WIN_SIZE_MAX - h2c->peer_window))
+ return HTTP2_ERROR_FLOW_CONTROL_ERROR;
+ h2c->peer_window += win_increment;
+ }
+ else
+ {
+ req = http2_conn_get_req (hc, fh->stream_id);
+ if (!req)
+ {
+ if (fh->stream_id > h2c->last_opened_stream_id)
+ {
+ HTTP_DBG (
+ 1,
+ "received WINDOW_UPDATE frame on idle stream (stream id %u)",
+ fh->stream_id);
+ return HTTP2_ERROR_PROTOCOL_ERROR;
+ }
+ /* ignore window update on closed stream */
+ return HTTP2_ERROR_NO_ERROR;
+ }
+ if (req->stream_state != HTTP2_STREAM_STATE_CLOSED)
+ {
+ if (http2_req_update_peer_window (req, win_increment))
+ {
+ http2_stream_error (hc, req, HTTP2_ERROR_FLOW_CONTROL_ERROR, 0);
+ return HTTP2_ERROR_NO_ERROR;
+ }
+ if (req->flags & HTTP2_REQ_F_NEED_WINDOW_UPDATE)
+ http2_req_add_to_resume_list (h2c, req);
+ }
+ }
+
+ return HTTP2_ERROR_NO_ERROR;
+}
+
+static http2_error_t
+http2_handle_settings_frame (http_conn_t *hc, http2_frame_header_t *fh)
+{
+ u8 *rx_buf, *resp = 0;
+ http2_error_t rv;
+ http2_conn_settings_t new_settings;
+ http2_conn_ctx_t *h2c;
+ http2_req_t *req;
+ u32 stream_id, req_index;
+ i32 win_size_delta;
+
+ if (fh->stream_id != 0)
+ return HTTP2_ERROR_PROTOCOL_ERROR;
+
+ if (fh->flags == HTTP2_FRAME_FLAG_ACK)
+ {
+ if (fh->length != 0)
+ return HTTP2_ERROR_FRAME_SIZE_ERROR;
+ /* TODO: we can start using non-default settings */
+ }
+ else
+ {
+ if (fh->length < sizeof (http2_settings_entry_t))
+ return HTTP2_ERROR_FRAME_SIZE_ERROR;
+
+ rx_buf = http_get_rx_buf (hc);
+ vec_validate (rx_buf, fh->length - 1);
+ http_io_ts_read (hc, rx_buf, fh->length, 0);
+
+ h2c = http2_conn_ctx_get_w_thread (hc);
+ new_settings = h2c->peer_settings;
+ rv = http2_frame_read_settings (&new_settings, rx_buf, fh->length);
+ if (rv != HTTP2_ERROR_NO_ERROR)
+ return rv;
+
+ /* ACK peer settings */
+ http2_frame_write_settings_ack (&resp);
+ http_io_ts_write (hc, resp, vec_len (resp), 0);
+ vec_free (resp);
+ http_io_ts_after_write (hc, 0);
+
+ /* change of SETTINGS_INITIAL_WINDOW_SIZE, we must adjust the size of all
+ * stream flow-control windows */
+ if (h2c->peer_settings.initial_window_size !=
+ new_settings.initial_window_size)
+ {
+ win_size_delta = (i32) new_settings.initial_window_size -
+ (i32) h2c->peer_settings.initial_window_size;
+ hash_foreach (
+ stream_id, req_index, h2c->req_by_stream_id, ({
+ req = http2_req_get (req_index, hc->c_thread_index);
+ if (req->stream_state != HTTP2_STREAM_STATE_CLOSED)
+ {
+ if (http2_req_update_peer_window (req, win_size_delta))
+ http2_stream_error (hc, req,
+ HTTP2_ERROR_FLOW_CONTROL_ERROR, 0);
+ if (req->flags & HTTP2_REQ_F_NEED_WINDOW_UPDATE)
+ http2_req_add_to_resume_list (h2c, req);
+ }
+ }));
+ }
+ h2c->peer_settings = new_settings;
+ }
+
+ return HTTP2_ERROR_NO_ERROR;
+}
+
+static http2_error_t
+http2_handle_rst_stream_frame (http_conn_t *hc, http2_frame_header_t *fh)
+{
+ u8 *rx_buf;
+ http2_error_t rv;
+ http2_req_t *req;
+ u32 error_code;
+ http2_conn_ctx_t *h2c;
+
+ if (fh->stream_id == 0)
+ return HTTP2_ERROR_PROTOCOL_ERROR;
+
+ rx_buf = http_get_rx_buf (hc);
+ vec_validate (rx_buf, fh->length - 1);
+ http_io_ts_read (hc, rx_buf, fh->length, 0);
+
+ rv = http2_frame_read_rst_stream (&error_code, rx_buf, fh->length);
+ if (rv != HTTP2_ERROR_NO_ERROR)
+ return rv;
+
+ req = http2_conn_get_req (hc, fh->stream_id);
+ if (!req)
+ {
+ h2c = http2_conn_ctx_get_w_thread (hc);
+ if (fh->stream_id <= h2c->last_opened_stream_id)
+ {
+ /* we reset stream, but peer might send something meanwhile */
+ HTTP_DBG (1, "stream closed, ignoring frame");
+ return HTTP2_ERROR_NO_ERROR;
+ }
+ else
+ return HTTP2_ERROR_PROTOCOL_ERROR;
+ }
+
+ req->stream_state = HTTP2_STREAM_STATE_CLOSED;
+ session_transport_reset_notify (&req->base.connection);
+
+ return HTTP2_ERROR_NO_ERROR;
+}
+
+static http2_error_t
+http2_handle_goaway_frame (http_conn_t *hc, http2_frame_header_t *fh)
+{
+ u8 *rx_buf;
+ http2_error_t rv;
+ u32 error_code, last_stream_id, req_index, stream_id;
+ http2_conn_ctx_t *h2c;
+ http2_req_t *req;
+
+ if (fh->stream_id != 0)
+ return HTTP2_ERROR_PROTOCOL_ERROR;
+
+ rx_buf = http_get_rx_buf (hc);
+ vec_validate (rx_buf, fh->length - 1);
+ http_io_ts_read (hc, rx_buf, fh->length, 0);
+
+ rv =
+ http2_frame_read_goaway (&error_code, &last_stream_id, rx_buf, fh->length);
+ if (rv != HTTP2_ERROR_NO_ERROR)
+ return rv;
+
+ HTTP_DBG (1, "received GOAWAY %U", format_http2_error, error_code);
+
+ if (error_code == HTTP2_ERROR_NO_ERROR)
+ {
+ /* TODO: graceful shutdown (no new streams) */
+ }
+ else
+ {
+ /* connection error */
+ h2c = http2_conn_ctx_get_w_thread (hc);
+ hash_foreach (stream_id, req_index, h2c->req_by_stream_id, ({
+ req = http2_req_get (req_index, hc->c_thread_index);
+ session_transport_reset_notify (&req->base.connection);
+ }));
+ http_shutdown_transport (hc);
+ }
+
+ return HTTP2_ERROR_NO_ERROR;
+}
+
+static http2_error_t
+http2_handle_ping_frame (http_conn_t *hc, http2_frame_header_t *fh)
+{
+ u8 *rx_buf, *resp = 0;
+
+ if (fh->stream_id != 0 || fh->length != HTTP2_PING_PAYLOAD_LEN)
+ return HTTP2_ERROR_PROTOCOL_ERROR;
+
+ rx_buf = http_get_rx_buf (hc);
+ vec_validate (rx_buf, fh->length - 1);
+ http_io_ts_read (hc, rx_buf, fh->length, 0);
+
+ /* RFC9113 6.7: The endpoint MUST NOT respond to PING frames with ACK */
+ if (fh->flags & HTTP2_FRAME_FLAG_ACK)
+ return HTTP2_ERROR_NO_ERROR;
+
+ http2_frame_write_ping (1, rx_buf, &resp);
+ http_io_ts_write (hc, resp, vec_len (resp), 0);
+ vec_free (resp);
+ http_io_ts_after_write (hc, 1);
+
+ return HTTP2_ERROR_NO_ERROR;
+}
+
+static http2_error_t
+http2_handle_push_promise (http_conn_t *hc, http2_frame_header_t *fh)
+{
+ if (hc->flags & HTTP_CONN_F_IS_SERVER)
+ {
+ HTTP_DBG (1, "error: server received PUSH_PROMISE");
+ return HTTP2_ERROR_PROTOCOL_ERROR;
+ }
+ /* TODO: client */
+ return HTTP2_ERROR_INTERNAL_ERROR;
+}
+
+static_always_inline int
+http2_expect_preface (http_conn_t *hc, http2_conn_ctx_t *h2c)
+{
+ u8 *rx_buf;
+
+ ASSERT (hc->flags & HTTP_CONN_F_IS_SERVER);
+ h2c->flags &= ~HTTP2_CONN_F_EXPECT_PREFACE;
+
+ /* already done in http core */
+ if (h2c->flags & HTTP2_CONN_F_PREFACE_VERIFIED)
+ return 0;
+
+ rx_buf = http_get_rx_buf (hc);
+ http_io_ts_read (hc, rx_buf, http2_conn_preface.len, 1);
+ return memcmp (rx_buf, http2_conn_preface.base, http2_conn_preface.len);
+}
+
+/*****************/
+/* http core VFT */
+/*****************/
+
+static u32
+http2_hc_index_get_by_req_index (u32 req_index,
+ clib_thread_index_t thread_index)
+{
+ http2_req_t *req;
+
+ req = http2_req_get (req_index, thread_index);
+ return req->base.hr_hc_index;
+}
+
+static transport_connection_t *
+http2_req_get_connection (u32 req_index, clib_thread_index_t thread_index)
+{
+ http2_req_t *req;
+ req = http2_req_get (req_index, thread_index);
+ return &(req->base.connection);
+}
+
+static u8 *
+format_http2_req (u8 *s, va_list *args)
+{
+ http2_req_t *req = va_arg (*args, http2_req_t *);
+ http_conn_t *hc = va_arg (*args, http_conn_t *);
+ session_t *ts;
+
+ ts = session_get_from_handle (hc->hc_tc_session_handle);
+ s = format (s, "[%d:%d][H2] stream_id %u app_wrk %u hc_index %u ts %d:%d",
+ req->base.c_thread_index, req->base.c_s_index, req->stream_id,
+ req->base.hr_pa_wrk_index, req->base.hr_hc_index,
+ ts->thread_index, ts->session_index);
+
+ return s;
+}
+
+static u8 *
+http2_format_req (u8 *s, va_list *args)
+{
+ u32 req_index = va_arg (*args, u32);
+ clib_thread_index_t thread_index = va_arg (*args, u32);
+ http_conn_t *hc = va_arg (*args, http_conn_t *);
+ u32 verbose = va_arg (*args, u32);
+ http2_req_t *req;
+
+ req = http2_req_get (req_index, thread_index);
+
+ s = format (s, "%-" SESSION_CLI_ID_LEN "U", format_http2_req, req, hc);
+ if (verbose)
+ {
+ s =
+ format (s, "%-" SESSION_CLI_STATE_LEN "U", format_http_conn_state, hc);
+ if (verbose > 1)
+ s = format (s, "\n");
+ }
+
+ return s;
+}
+
+static void
+http2_app_tx_callback (http_conn_t *hc, u32 req_index,
+ transport_send_params_t *sp)
+{
+ http2_req_t *req;
+ http2_error_t rv;
+
+ HTTP_DBG (1, "hc [%u]%x req_index %u", hc->c_thread_index, hc->hc_hc_index,
+ req_index);
+ req = http2_req_get (req_index, hc->c_thread_index);
+
+ if (!http2_req_state_is_tx_valid (req))
+ {
+ if (req->base.state == HTTP_REQ_STATE_TRANSPORT_IO_MORE_DATA &&
+ (hc->flags & HTTP_CONN_F_IS_SERVER))
+ {
+ /* server app might send error earlier */
+ http_req_state_change (&req->base, HTTP_REQ_STATE_WAIT_APP_REPLY);
+ }
+ else
+ {
+ clib_warning ("hc [%u]%x invalid tx state: http req state "
+ "'%U', session state '%U'",
+ hc->c_thread_index, hc->hc_hc_index,
+ format_http_req_state, req->base.state,
+ format_http_conn_state, hc);
+ http2_stream_error (hc, req, HTTP2_ERROR_INTERNAL_ERROR, sp);
+ return;
+ }
+ }
+
+ /* peer reset stream, but app might send something meanwhile */
+ if (req->stream_state == HTTP2_STREAM_STATE_CLOSED)
+ {
+ HTTP_DBG (1, "stream closed, ignoring app data");
+ http_io_as_drain_all (&req->base);
+ return;
+ }
+
+ HTTP_DBG (1, "run state machine");
+ rv = http2_req_run_state_machine (hc, req, sp, 1);
+ if (rv != HTTP2_ERROR_NO_ERROR)
+ {
+ http2_connection_error (hc, rv, sp);
+ return;
+ }
+
+ /* maybe we can continue sending data on some stream */
+ http2_resume_list_process (hc);
+
+ /* reset http connection expiration timer */
+ http_conn_timer_update (hc);
+}
+
+static void
+http2_app_rx_evt_callback (http_conn_t *hc, u32 req_index,
+ clib_thread_index_t thread_index)
+{
+ /* TODO: continue tunnel RX */
+ http2_req_t *req;
+ u8 *response;
+ u32 increment;
+
+ req = http2_req_get (req_index, thread_index);
+ if (!req)
+ {
+ HTTP_DBG (1, "req already deleted");
+ return;
+ }
+ HTTP_DBG (1, "received app read notification stream id %u", req->stream_id);
+ /* send stream window update if app read data in rx fifo and we expect more
+ * data (stream is still open) */
+ if (req->stream_state == HTTP2_STREAM_STATE_OPEN)
+ {
+ http_io_as_reset_has_read_ntf (&req->base);
+ response = http_get_tx_buf (hc);
+ increment = http_io_as_max_write (&req->base) - req->our_window;
+ HTTP_DBG (1, "stream window increment %u", increment);
+ req->our_window += increment;
+ http2_frame_write_window_update (increment, req->stream_id, &response);
+ http_io_ts_write (hc, response, vec_len (response), 0);
+ http_io_ts_after_write (hc, 0);
+ }
+}
+
+static void
+http2_app_close_callback (http_conn_t *hc, u32 req_index,
+ clib_thread_index_t thread_index)
+{
+ http2_req_t *req;
+
+ HTTP_DBG (1, "hc [%u]%x req_index %u", hc->c_thread_index, hc->hc_hc_index,
+ req_index);
+ req = http2_req_get (req_index, thread_index);
+ if (!req)
+ {
+ HTTP_DBG (1, "req already deleted");
+ return;
+ }
+
+ if (req->stream_state == HTTP2_STREAM_STATE_CLOSED ||
+ hc->state == HTTP_CONN_STATE_CLOSED)
+ {
+ HTTP_DBG (1, "nothing more to send, confirm close");
+ session_transport_closed_notify (&req->base.connection);
+ }
+ else
+ {
+ HTTP_DBG (1, "wait for all data to be written to ts");
+ req->flags |= HTTP2_REQ_F_APP_CLOSED;
+ }
+}
+
+static void
+http2_app_reset_callback (http_conn_t *hc, u32 req_index,
+ clib_thread_index_t thread_index)
+{
+ http2_req_t *req;
+
+ HTTP_DBG (1, "hc [%u]%x req_index %u", hc->c_thread_index, hc->hc_hc_index,
+ req_index);
+ req = http2_req_get (req_index, thread_index);
+ req->flags |= HTTP2_REQ_F_APP_CLOSED;
+ http2_stream_error (hc, req, HTTP2_ERROR_INTERNAL_ERROR, 0);
+}
+
+static int
+http2_transport_connected_callback (http_conn_t *hc)
+{
+ /* TODO */
+ return -1;
+}
+
+static void
+http2_transport_rx_callback (http_conn_t *hc)
+{
+ http2_frame_header_t fh;
+ u32 to_deq;
+ u8 *rx_buf;
+ http2_error_t rv;
+ http2_conn_ctx_t *h2c;
+
+ HTTP_DBG (1, "hc [%u]%x", hc->c_thread_index, hc->hc_hc_index);
+
+ to_deq = http_io_ts_max_read (hc);
+
+ if (PREDICT_FALSE (to_deq == 0))
+ {
+ HTTP_DBG (1, "no data to deq");
+ return;
+ }
+
+ h2c = http2_conn_ctx_get_w_thread (hc);
+ if (h2c->flags & HTTP2_CONN_F_EXPECT_PREFACE)
+ {
+ if (to_deq < http2_conn_preface.len)
+ {
+ HTTP_DBG (1, "to_deq %u is less than conn preface size", to_deq);
+ http_disconnect_transport (hc);
+ return;
+ }
+ if (http2_expect_preface (hc, h2c))
+ {
+ HTTP_DBG (1, "conn preface verification failed");
+ http_disconnect_transport (hc);
+ return;
+ }
+ http2_send_server_preface (hc);
+ http_io_ts_drain (hc, http2_conn_preface.len);
+ to_deq -= http2_conn_preface.len;
+ if (to_deq == 0)
+ return;
+ }
+
+ if (PREDICT_FALSE (to_deq < HTTP2_FRAME_HEADER_SIZE))
+ {
+ HTTP_DBG (1, "to_deq %u is less than frame header size", to_deq);
+ http2_connection_error (hc, HTTP2_ERROR_PROTOCOL_ERROR, 0);
+ return;
+ }
+
+ while (to_deq >= HTTP2_FRAME_HEADER_SIZE)
+ {
+ rx_buf = http_get_rx_buf (hc);
+ http_io_ts_read (hc, rx_buf, HTTP2_FRAME_HEADER_SIZE, 1);
+ to_deq -= HTTP2_FRAME_HEADER_SIZE;
+ http2_frame_header_read (rx_buf, &fh);
+ if (fh.length > h2c->settings.max_frame_size)
+ {
+ HTTP_DBG (1, "frame length %lu exceeded SETTINGS_MAX_FRAME_SIZE %lu",
+ fh.length, h2c->settings.max_frame_size);
+ http2_connection_error (hc, HTTP2_ERROR_FRAME_SIZE_ERROR, 0);
+ return;
+ }
+ if (fh.length > to_deq)
+ {
+ HTTP_DBG (
+ 1, "frame payload not yet received, to deq %lu, frame length %lu",
+ to_deq, fh.length);
+ if (http_io_ts_fifo_size (hc, 1) <
+ (fh.length + HTTP2_FRAME_HEADER_SIZE))
+ {
+ clib_warning ("ts rx fifo too small to hold frame (%u)",
+ fh.length + HTTP2_FRAME_HEADER_SIZE);
+ http2_connection_error (hc, HTTP2_ERROR_PROTOCOL_ERROR, 0);
+ }
+ return;
+ }
+ http_io_ts_drain (hc, HTTP2_FRAME_HEADER_SIZE);
+ to_deq -= fh.length;
+
+ HTTP_DBG (1, "frame type 0x%02x", fh.type);
+ switch (fh.type)
+ {
+ case HTTP2_FRAME_TYPE_HEADERS:
+ rv = http2_handle_headers_frame (hc, &fh);
+ break;
+ case HTTP2_FRAME_TYPE_DATA:
+ rv = http2_handle_data_frame (hc, &fh);
+ break;
+ case HTTP2_FRAME_TYPE_WINDOW_UPDATE:
+ rv = http2_handle_window_update_frame (hc, &fh);
+ break;
+ case HTTP2_FRAME_TYPE_SETTINGS:
+ rv = http2_handle_settings_frame (hc, &fh);
+ break;
+ case HTTP2_FRAME_TYPE_RST_STREAM:
+ rv = http2_handle_rst_stream_frame (hc, &fh);
+ break;
+ case HTTP2_FRAME_TYPE_GOAWAY:
+ rv = http2_handle_goaway_frame (hc, &fh);
+ break;
+ case HTTP2_FRAME_TYPE_PING:
+ rv = http2_handle_ping_frame (hc, &fh);
+ break;
+ case HTTP2_FRAME_TYPE_CONTINUATION:
+ /* TODO */
+ rv = HTTP2_ERROR_INTERNAL_ERROR;
+ break;
+ case HTTP2_FRAME_TYPE_PUSH_PROMISE:
+ rv = http2_handle_push_promise (hc, &fh);
+ break;
+ case HTTP2_FRAME_TYPE_PRIORITY: /* deprecated */
+ default:
+ /* ignore unknown frame type */
+ http_io_ts_drain (hc, fh.length);
+ rv = HTTP2_ERROR_NO_ERROR;
+ break;
+ }
+
+ if (rv != HTTP2_ERROR_NO_ERROR)
+ {
+ http2_connection_error (hc, rv, 0);
+ return;
+ }
+ }
+
+ /* send connection window update if more than half consumed */
+ if (h2c->our_window < HTTP2_CONNECTION_WINDOW_SIZE / 2)
+ {
+ HTTP_DBG (1, "connection window increment %u",
+ HTTP2_CONNECTION_WINDOW_SIZE - h2c->our_window);
+ u8 *response = http_get_tx_buf (hc);
+ http2_frame_write_window_update (
+ HTTP2_CONNECTION_WINDOW_SIZE - h2c->our_window, 0, &response);
+ http_io_ts_write (hc, response, vec_len (response), 0);
+ http_io_ts_after_write (hc, 0);
+ h2c->our_window = HTTP2_CONNECTION_WINDOW_SIZE;
+ }
+ /* maybe we can continue sending data on some stream */
+ http2_resume_list_process (hc);
+
+ /* reset http connection expiration timer */
+ http_conn_timer_update (hc);
+}
+
+static void
+http2_transport_close_callback (http_conn_t *hc)
+{
+ u32 req_index, stream_id, n_open_streams = 0;
+ http2_req_t *req;
+ http2_conn_ctx_t *h2c;
+
+ HTTP_DBG (1, "hc [%u]%x", hc->c_thread_index, hc->hc_hc_index);
+
+ if (!(hc->flags & HTTP_CONN_F_HAS_REQUEST))
+ {
+ HTTP_DBG (1, "no request");
+ return;
+ }
+
+ h2c = http2_conn_ctx_get_w_thread (hc);
+ hash_foreach (stream_id, req_index, h2c->req_by_stream_id, ({
+ req = http2_req_get (req_index, hc->c_thread_index);
+ if (req->stream_state != HTTP2_STREAM_STATE_CLOSED)
+ {
+ HTTP_DBG (1, "req_index %u", req_index);
+ session_transport_closing_notify (&req->base.connection);
+ n_open_streams++;
+ }
+ }));
+ if (n_open_streams == 0)
+ {
+ HTTP_DBG (1, "no open stream disconnecting");
+ http_disconnect_transport (hc);
+ }
+}
+
+static void
+http2_transport_reset_callback (http_conn_t *hc)
+{
+ u32 req_index, stream_id;
+ http2_req_t *req;
+ http2_conn_ctx_t *h2c;
+
+ HTTP_DBG (1, "hc [%u]%x", hc->c_thread_index, hc->hc_hc_index);
+
+ if (!(hc->flags & HTTP_CONN_F_HAS_REQUEST))
+ return;
+
+ h2c = http2_conn_ctx_get_w_thread (hc);
+ hash_foreach (stream_id, req_index, h2c->req_by_stream_id, ({
+ req = http2_req_get (req_index, hc->c_thread_index);
+ if (req->stream_state != HTTP2_STREAM_STATE_CLOSED)
+ {
+ HTTP_DBG (1, "req_index %u", req_index);
+ session_transport_reset_notify (&req->base.connection);
+ }
+ }));
+}
+
+static void
+http2_transport_conn_reschedule_callback (http_conn_t *hc)
+{
+ u32 req_index, stream_id;
+ http2_req_t *req;
+ http2_conn_ctx_t *h2c;
+
+ HTTP_DBG (1, "hc [%u]%x", hc->c_thread_index, hc->hc_hc_index);
+ ASSERT (hc->flags & HTTP_CONN_F_HAS_REQUEST);
+
+ if (!(hc->flags & HTTP_CONN_F_HAS_REQUEST))
+ return;
+
+ h2c = http2_conn_ctx_get_w_thread (hc);
+ hash_foreach (
+ stream_id, req_index, h2c->req_by_stream_id, ({
+ req = http2_req_get (req_index, hc->c_thread_index);
+ if (req->stream_state != HTTP2_STREAM_STATE_CLOSED &&
+ transport_connection_is_descheduled (&req->base.connection))
+ {
+ HTTP_DBG (1, "req_index %u", req_index);
+ transport_connection_reschedule (&req->base.connection);
+ }
+ }));
+}
+
+static void
+http2_conn_accept_callback (http_conn_t *hc)
+{
+ http2_conn_ctx_t *h2c;
+
+ HTTP_DBG (1, "hc [%u]%x", hc->c_thread_index, hc->hc_hc_index);
+ h2c = http2_conn_ctx_alloc_w_thread (hc);
+ h2c->flags |= HTTP2_CONN_F_EXPECT_PREFACE;
+ /* already done in http core */
+ if (http_get_transport_proto (hc) == TRANSPORT_PROTO_TCP)
+ h2c->flags |= HTTP2_CONN_F_PREFACE_VERIFIED;
+}
+
+static void
+http2_conn_cleanup_callback (http_conn_t *hc)
+{
+ u32 req_index, stream_id, *req_index_p, *req_indices = 0;
+ http2_req_t *req;
+ http2_conn_ctx_t *h2c;
+
+ HTTP_DBG (1, "hc [%u]%x", hc->c_thread_index, hc->hc_hc_index);
+ h2c = http2_conn_ctx_get_w_thread (hc);
+ hash_foreach (stream_id, req_index, h2c->req_by_stream_id,
+ ({ vec_add1 (req_indices, req_index); }));
+
+ vec_foreach (req_index_p, req_indices)
+ {
+ req = http2_req_get (*req_index_p, hc->c_thread_index);
+ if (req->stream_state != HTTP2_STREAM_STATE_CLOSED)
+ session_transport_delete_notify (&req->base.connection);
+ http2_conn_free_req (h2c, req, hc->c_thread_index);
+ }
+
+ vec_free (req_indices);
+ http2_conn_ctx_free (hc);
+}
+
+static void
+http2_enable_callback (void)
+{
+ http2_main_t *h2m = &http2_main;
+ vlib_thread_main_t *vtm = vlib_get_thread_main ();
+ u32 num_threads;
+
+ num_threads = 1 /* main thread */ + vtm->n_threads;
+
+ vec_validate (h2m->conn_pool, num_threads - 1);
+ vec_validate (h2m->req_pool, num_threads - 1);
+}
+
+static int
+http2_update_settings (http_settings_t type, u32 value)
+{
+ http2_main_t *h2m = &http2_main;
+
+ switch (type)
+ {
+#define _(v, label, member, min, max, default_value, err_code) \
+ case HTTP2_SETTINGS_##label: \
+ if (!(value >= min && value <= max)) \
+ return -1; \
+ h2m->settings.member = value; \
+ return 0;
+ foreach_http2_settings
+#undef _
+ default : return -1;
+ }
+}
+
+static uword
+http2_unformat_config_callback (unformat_input_t *input)
+{
+ u32 value;
+
+ if (!input)
+ return 0;
+
+ unformat_skip_white_space (input);
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "initial-window-size %u", &value))
+ {
+ if (http2_update_settings (HTTP2_SETTINGS_INITIAL_WINDOW_SIZE,
+ value))
+ return 0;
+ }
+ else if (unformat (input, "max-frame-size %u", &value))
+ {
+ if (http2_update_settings (HTTP2_SETTINGS_MAX_FRAME_SIZE, value))
+ return 0;
+ }
+ else if (unformat (input, "max-header-list-size %u", &value))
+ {
+ if (http2_update_settings (HTTP2_SETTINGS_MAX_HEADER_LIST_SIZE,
+ value))
+ return 0;
+ }
+ else if (unformat (input, "header-table-size %u", &value))
+ {
+ if (http2_update_settings (HTTP2_SETTINGS_HEADER_TABLE_SIZE, value))
+ return 0;
+ }
+ else
+ return 0;
+ }
+ return 1;
+}
+
+const static http_engine_vft_t http2_engine = {
+ .name = "http2",
+ .hc_index_get_by_req_index = http2_hc_index_get_by_req_index,
+ .req_get_connection = http2_req_get_connection,
+ .format_req = http2_format_req,
+ .app_tx_callback = http2_app_tx_callback,
+ .app_rx_evt_callback = http2_app_rx_evt_callback,
+ .app_close_callback = http2_app_close_callback,
+ .app_reset_callback = http2_app_reset_callback,
+ .transport_connected_callback = http2_transport_connected_callback,
+ .transport_rx_callback = http2_transport_rx_callback,
+ .transport_close_callback = http2_transport_close_callback,
+ .transport_reset_callback = http2_transport_reset_callback,
+ .transport_conn_reschedule_callback =
+ http2_transport_conn_reschedule_callback,
+ .conn_accept_callback = http2_conn_accept_callback,
+ .conn_cleanup_callback = http2_conn_cleanup_callback,
+ .enable_callback = http2_enable_callback,
+ .unformat_cfg_callback = http2_unformat_config_callback,
+};
+
+clib_error_t *
+http2_init (vlib_main_t *vm)
+{
+ http2_main_t *h2m = &http2_main;
+
+ clib_warning ("http/2 enabled");
+ h2m->settings = http2_default_conn_settings;
+ h2m->settings.max_concurrent_streams = 100; /* by default unlimited */
+ http_register_engine (&http2_engine, HTTP_VERSION_2);
+
+ return 0;
+}
+
+#if HTTP_2_ENABLE > 0
+VLIB_INIT_FUNCTION (http2_init) = {
+ .runs_after = VLIB_INITS ("http_transport_init"),
+};
+#endif
diff --git a/src/plugins/http/http2/http2.h b/src/plugins/http/http2/http2.h
new file mode 100644
index 00000000000..9fc95344771
--- /dev/null
+++ b/src/plugins/http/http2/http2.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright(c) 2025 Cisco Systems, Inc.
+ */
+
+#ifndef SRC_PLUGINS_HTTP_HTTP2_H_
+#define SRC_PLUGINS_HTTP_HTTP2_H_
+
+#include <vppinfra/format.h>
+#include <vppinfra/types.h>
+
+/* RFC9113 section 7 */
+#define foreach_http2_error \
+ _ (NO_ERROR, "NO_ERROR") \
+ _ (PROTOCOL_ERROR, "PROTOCOL_ERROR") \
+ _ (INTERNAL_ERROR, "INTERNAL_ERROR") \
+ _ (FLOW_CONTROL_ERROR, "FLOW_CONTROL_ERROR") \
+ _ (SETTINGS_TIMEOUT, "SETTINGS_TIMEOUT") \
+ _ (STREAM_CLOSED, "STREAM_CLOSED") \
+ _ (FRAME_SIZE_ERROR, "FRAME_SIZE_ERROR") \
+ _ (REFUSED_STREAM, "REFUSED_STREAM") \
+ _ (CANCEL, "CANCEL") \
+ _ (COMPRESSION_ERROR, "COMPRESSION_ERROR") \
+ _ (CONNECT_ERROR, "CONNECT_ERROR") \
+ _ (ENHANCE_YOUR_CALM, "ENHANCE_YOUR_CALM") \
+ _ (INADEQUATE_SECURITY, "INADEQUATE_SECURITY") \
+ _ (HTTP_1_1_REQUIRED, "HTTP_1_1_REQUIRED")
+
+typedef enum http2_error_
+{
+#define _(s, str) HTTP2_ERROR_##s,
+ foreach_http2_error
+#undef _
+} http2_error_t;
+
+static inline u8 *
+format_http2_error (u8 *s, va_list *va)
+{
+ http2_error_t e = va_arg (*va, http2_error_t);
+ u8 *t = 0;
+
+ switch (e)
+ {
+#define _(s, str) \
+ case HTTP2_ERROR_##s: \
+ t = (u8 *) str; \
+ break;
+ foreach_http2_error
+#undef _
+ default : return format (s, "BUG: unknown");
+ }
+ return format (s, "%s", t);
+}
+
+#define foreach_http2_pseudo_header \
+ _ (0, METHOD, "method") \
+ _ (1, SCHEME, "scheme") \
+ _ (2, AUTHORITY, "authority") \
+ _ (3, PATH, "path") \
+ _ (4, STATUS, "status")
+
+/* value, label, member, min, max, default_value, err_code */
+#define foreach_http2_settings \
+ _ (1, HEADER_TABLE_SIZE, header_table_size, 0, CLIB_U32_MAX, 4096, \
+ HTTP2_ERROR_NO_ERROR) \
+ _ (2, ENABLE_PUSH, enable_push, 0, 1, 1, HTTP2_ERROR_PROTOCOL_ERROR) \
+ _ (3, MAX_CONCURRENT_STREAMS, max_concurrent_streams, 0, CLIB_U32_MAX, \
+ CLIB_U32_MAX, HTTP2_ERROR_NO_ERROR) \
+ _ (4, INITIAL_WINDOW_SIZE, initial_window_size, 0, 0x7FFFFFFF, 65535, \
+ HTTP2_ERROR_FLOW_CONTROL_ERROR) \
+ _ (5, MAX_FRAME_SIZE, max_frame_size, 16384, 16777215, 16384, \
+ HTTP2_ERROR_PROTOCOL_ERROR) \
+ _ (6, MAX_HEADER_LIST_SIZE, max_header_list_size, 0, CLIB_U32_MAX, \
+ CLIB_U32_MAX, HTTP2_ERROR_NO_ERROR)
+
+typedef enum
+{
+#define _(value, label, member, min, max, default_value, err_code) \
+ HTTP2_SETTINGS_##label = value,
+ foreach_http2_settings
+#undef _
+} http_settings_t;
+
+typedef struct
+{
+#define _(value, label, member, min, max, default_value, err_code) u32 member;
+ foreach_http2_settings
+#undef _
+} http2_conn_settings_t;
+
+static const http2_conn_settings_t http2_default_conn_settings = {
+#define _(value, label, member, min, max, default_value, err_code) \
+ default_value,
+ foreach_http2_settings
+#undef _
+};
+
+#endif /* SRC_PLUGINS_HTTP_HTTP2_H_ */
diff --git a/src/plugins/http/http2/huffman_table.h b/src/plugins/http/http2/huffman_table.h
new file mode 100644
index 00000000000..66afffbc54a
--- /dev/null
+++ b/src/plugins/http/http2/huffman_table.h
@@ -0,0 +1,319 @@
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright(c) 2025 Cisco Systems, Inc.
+ */
+
+/* generated by mk_huffman_table.py */
+
+#ifndef SRC_PLUGINS_HTTP_HUFFMAN_TABLE_H_
+#define SRC_PLUGINS_HTTP_HUFFMAN_TABLE_H_
+
+#include <vppinfra/types.h>
+
+typedef struct
+{
+ u8 code_len;
+ u32 code;
+} hpack_huffman_symbol_t;
+
+static hpack_huffman_symbol_t huff_sym_table[] = {
+ { 13, 0x1ff8 }, { 23, 0x7fffd8 }, { 28, 0xfffffe2 }, { 28, 0xfffffe3 },
+ { 28, 0xfffffe4 }, { 28, 0xfffffe5 }, { 28, 0xfffffe6 }, { 28, 0xfffffe7 },
+ { 28, 0xfffffe8 }, { 24, 0xffffea }, { 30, 0x3ffffffc }, { 28, 0xfffffe9 },
+ { 28, 0xfffffea }, { 30, 0x3ffffffd }, { 28, 0xfffffeb }, { 28, 0xfffffec },
+ { 28, 0xfffffed }, { 28, 0xfffffee }, { 28, 0xfffffef }, { 28, 0xffffff0 },
+ { 28, 0xffffff1 }, { 28, 0xffffff2 }, { 30, 0x3ffffffe }, { 28, 0xffffff3 },
+ { 28, 0xffffff4 }, { 28, 0xffffff5 }, { 28, 0xffffff6 }, { 28, 0xffffff7 },
+ { 28, 0xffffff8 }, { 28, 0xffffff9 }, { 28, 0xffffffa }, { 28, 0xffffffb },
+ { 6, 0x14 }, { 10, 0x3f8 }, { 10, 0x3f9 }, { 12, 0xffa },
+ { 13, 0x1ff9 }, { 6, 0x15 }, { 8, 0xf8 }, { 11, 0x7fa },
+ { 10, 0x3fa }, { 10, 0x3fb }, { 8, 0xf9 }, { 11, 0x7fb },
+ { 8, 0xfa }, { 6, 0x16 }, { 6, 0x17 }, { 6, 0x18 },
+ { 5, 0x0 }, { 5, 0x1 }, { 5, 0x2 }, { 6, 0x19 },
+ { 6, 0x1a }, { 6, 0x1b }, { 6, 0x1c }, { 6, 0x1d },
+ { 6, 0x1e }, { 6, 0x1f }, { 7, 0x5c }, { 8, 0xfb },
+ { 15, 0x7ffc }, { 6, 0x20 }, { 12, 0xffb }, { 10, 0x3fc },
+ { 13, 0x1ffa }, { 6, 0x21 }, { 7, 0x5d }, { 7, 0x5e },
+ { 7, 0x5f }, { 7, 0x60 }, { 7, 0x61 }, { 7, 0x62 },
+ { 7, 0x63 }, { 7, 0x64 }, { 7, 0x65 }, { 7, 0x66 },
+ { 7, 0x67 }, { 7, 0x68 }, { 7, 0x69 }, { 7, 0x6a },
+ { 7, 0x6b }, { 7, 0x6c }, { 7, 0x6d }, { 7, 0x6e },
+ { 7, 0x6f }, { 7, 0x70 }, { 7, 0x71 }, { 7, 0x72 },
+ { 8, 0xfc }, { 7, 0x73 }, { 8, 0xfd }, { 13, 0x1ffb },
+ { 19, 0x7fff0 }, { 13, 0x1ffc }, { 14, 0x3ffc }, { 6, 0x22 },
+ { 15, 0x7ffd }, { 5, 0x3 }, { 6, 0x23 }, { 5, 0x4 },
+ { 6, 0x24 }, { 5, 0x5 }, { 6, 0x25 }, { 6, 0x26 },
+ { 6, 0x27 }, { 5, 0x6 }, { 7, 0x74 }, { 7, 0x75 },
+ { 6, 0x28 }, { 6, 0x29 }, { 6, 0x2a }, { 5, 0x7 },
+ { 6, 0x2b }, { 7, 0x76 }, { 6, 0x2c }, { 5, 0x8 },
+ { 5, 0x9 }, { 6, 0x2d }, { 7, 0x77 }, { 7, 0x78 },
+ { 7, 0x79 }, { 7, 0x7a }, { 7, 0x7b }, { 15, 0x7ffe },
+ { 11, 0x7fc }, { 14, 0x3ffd }, { 13, 0x1ffd }, { 28, 0xffffffc },
+ { 20, 0xfffe6 }, { 22, 0x3fffd2 }, { 20, 0xfffe7 }, { 20, 0xfffe8 },
+ { 22, 0x3fffd3 }, { 22, 0x3fffd4 }, { 22, 0x3fffd5 }, { 23, 0x7fffd9 },
+ { 22, 0x3fffd6 }, { 23, 0x7fffda }, { 23, 0x7fffdb }, { 23, 0x7fffdc },
+ { 23, 0x7fffdd }, { 23, 0x7fffde }, { 24, 0xffffeb }, { 23, 0x7fffdf },
+ { 24, 0xffffec }, { 24, 0xffffed }, { 22, 0x3fffd7 }, { 23, 0x7fffe0 },
+ { 24, 0xffffee }, { 23, 0x7fffe1 }, { 23, 0x7fffe2 }, { 23, 0x7fffe3 },
+ { 23, 0x7fffe4 }, { 21, 0x1fffdc }, { 22, 0x3fffd8 }, { 23, 0x7fffe5 },
+ { 22, 0x3fffd9 }, { 23, 0x7fffe6 }, { 23, 0x7fffe7 }, { 24, 0xffffef },
+ { 22, 0x3fffda }, { 21, 0x1fffdd }, { 20, 0xfffe9 }, { 22, 0x3fffdb },
+ { 22, 0x3fffdc }, { 23, 0x7fffe8 }, { 23, 0x7fffe9 }, { 21, 0x1fffde },
+ { 23, 0x7fffea }, { 22, 0x3fffdd }, { 22, 0x3fffde }, { 24, 0xfffff0 },
+ { 21, 0x1fffdf }, { 22, 0x3fffdf }, { 23, 0x7fffeb }, { 23, 0x7fffec },
+ { 21, 0x1fffe0 }, { 21, 0x1fffe1 }, { 22, 0x3fffe0 }, { 21, 0x1fffe2 },
+ { 23, 0x7fffed }, { 22, 0x3fffe1 }, { 23, 0x7fffee }, { 23, 0x7fffef },
+ { 20, 0xfffea }, { 22, 0x3fffe2 }, { 22, 0x3fffe3 }, { 22, 0x3fffe4 },
+ { 23, 0x7ffff0 }, { 22, 0x3fffe5 }, { 22, 0x3fffe6 }, { 23, 0x7ffff1 },
+ { 26, 0x3ffffe0 }, { 26, 0x3ffffe1 }, { 20, 0xfffeb }, { 19, 0x7fff1 },
+ { 22, 0x3fffe7 }, { 23, 0x7ffff2 }, { 22, 0x3fffe8 }, { 25, 0x1ffffec },
+ { 26, 0x3ffffe2 }, { 26, 0x3ffffe3 }, { 26, 0x3ffffe4 }, { 27, 0x7ffffde },
+ { 27, 0x7ffffdf }, { 26, 0x3ffffe5 }, { 24, 0xfffff1 }, { 25, 0x1ffffed },
+ { 19, 0x7fff2 }, { 21, 0x1fffe3 }, { 26, 0x3ffffe6 }, { 27, 0x7ffffe0 },
+ { 27, 0x7ffffe1 }, { 26, 0x3ffffe7 }, { 27, 0x7ffffe2 }, { 24, 0xfffff2 },
+ { 21, 0x1fffe4 }, { 21, 0x1fffe5 }, { 26, 0x3ffffe8 }, { 26, 0x3ffffe9 },
+ { 28, 0xffffffd }, { 27, 0x7ffffe3 }, { 27, 0x7ffffe4 }, { 27, 0x7ffffe5 },
+ { 20, 0xfffec }, { 24, 0xfffff3 }, { 20, 0xfffed }, { 21, 0x1fffe6 },
+ { 22, 0x3fffe9 }, { 21, 0x1fffe7 }, { 21, 0x1fffe8 }, { 23, 0x7ffff3 },
+ { 22, 0x3fffea }, { 22, 0x3fffeb }, { 25, 0x1ffffee }, { 25, 0x1ffffef },
+ { 24, 0xfffff4 }, { 24, 0xfffff5 }, { 26, 0x3ffffea }, { 23, 0x7ffff4 },
+ { 26, 0x3ffffeb }, { 27, 0x7ffffe6 }, { 26, 0x3ffffec }, { 26, 0x3ffffed },
+ { 27, 0x7ffffe7 }, { 27, 0x7ffffe8 }, { 27, 0x7ffffe9 }, { 27, 0x7ffffea },
+ { 27, 0x7ffffeb }, { 28, 0xffffffe }, { 27, 0x7ffffec }, { 27, 0x7ffffed },
+ { 27, 0x7ffffee }, { 27, 0x7ffffef }, { 27, 0x7fffff0 }, { 26, 0x3ffffee },
+};
+
+typedef struct
+{
+ u8 symbol;
+ u8 code_len;
+} hpack_huffman_code_t;
+
+static hpack_huffman_code_t huff_code_table_fast[] = {
+ { 0x30, 5 }, { 0x30, 5 }, { 0x30, 5 }, { 0x30, 5 }, { 0x30, 5 }, { 0x30, 5 },
+ { 0x30, 5 }, { 0x30, 5 }, { 0x31, 5 }, { 0x31, 5 }, { 0x31, 5 }, { 0x31, 5 },
+ { 0x31, 5 }, { 0x31, 5 }, { 0x31, 5 }, { 0x31, 5 }, { 0x32, 5 }, { 0x32, 5 },
+ { 0x32, 5 }, { 0x32, 5 }, { 0x32, 5 }, { 0x32, 5 }, { 0x32, 5 }, { 0x32, 5 },
+ { 0x61, 5 }, { 0x61, 5 }, { 0x61, 5 }, { 0x61, 5 }, { 0x61, 5 }, { 0x61, 5 },
+ { 0x61, 5 }, { 0x61, 5 }, { 0x63, 5 }, { 0x63, 5 }, { 0x63, 5 }, { 0x63, 5 },
+ { 0x63, 5 }, { 0x63, 5 }, { 0x63, 5 }, { 0x63, 5 }, { 0x65, 5 }, { 0x65, 5 },
+ { 0x65, 5 }, { 0x65, 5 }, { 0x65, 5 }, { 0x65, 5 }, { 0x65, 5 }, { 0x65, 5 },
+ { 0x69, 5 }, { 0x69, 5 }, { 0x69, 5 }, { 0x69, 5 }, { 0x69, 5 }, { 0x69, 5 },
+ { 0x69, 5 }, { 0x69, 5 }, { 0x6F, 5 }, { 0x6F, 5 }, { 0x6F, 5 }, { 0x6F, 5 },
+ { 0x6F, 5 }, { 0x6F, 5 }, { 0x6F, 5 }, { 0x6F, 5 }, { 0x73, 5 }, { 0x73, 5 },
+ { 0x73, 5 }, { 0x73, 5 }, { 0x73, 5 }, { 0x73, 5 }, { 0x73, 5 }, { 0x73, 5 },
+ { 0x74, 5 }, { 0x74, 5 }, { 0x74, 5 }, { 0x74, 5 }, { 0x74, 5 }, { 0x74, 5 },
+ { 0x74, 5 }, { 0x74, 5 }, { 0x20, 6 }, { 0x20, 6 }, { 0x20, 6 }, { 0x20, 6 },
+ { 0x25, 6 }, { 0x25, 6 }, { 0x25, 6 }, { 0x25, 6 }, { 0x2D, 6 }, { 0x2D, 6 },
+ { 0x2D, 6 }, { 0x2D, 6 }, { 0x2E, 6 }, { 0x2E, 6 }, { 0x2E, 6 }, { 0x2E, 6 },
+ { 0x2F, 6 }, { 0x2F, 6 }, { 0x2F, 6 }, { 0x2F, 6 }, { 0x33, 6 }, { 0x33, 6 },
+ { 0x33, 6 }, { 0x33, 6 }, { 0x34, 6 }, { 0x34, 6 }, { 0x34, 6 }, { 0x34, 6 },
+ { 0x35, 6 }, { 0x35, 6 }, { 0x35, 6 }, { 0x35, 6 }, { 0x36, 6 }, { 0x36, 6 },
+ { 0x36, 6 }, { 0x36, 6 }, { 0x37, 6 }, { 0x37, 6 }, { 0x37, 6 }, { 0x37, 6 },
+ { 0x38, 6 }, { 0x38, 6 }, { 0x38, 6 }, { 0x38, 6 }, { 0x39, 6 }, { 0x39, 6 },
+ { 0x39, 6 }, { 0x39, 6 }, { 0x3D, 6 }, { 0x3D, 6 }, { 0x3D, 6 }, { 0x3D, 6 },
+ { 0x41, 6 }, { 0x41, 6 }, { 0x41, 6 }, { 0x41, 6 }, { 0x5F, 6 }, { 0x5F, 6 },
+ { 0x5F, 6 }, { 0x5F, 6 }, { 0x62, 6 }, { 0x62, 6 }, { 0x62, 6 }, { 0x62, 6 },
+ { 0x64, 6 }, { 0x64, 6 }, { 0x64, 6 }, { 0x64, 6 }, { 0x66, 6 }, { 0x66, 6 },
+ { 0x66, 6 }, { 0x66, 6 }, { 0x67, 6 }, { 0x67, 6 }, { 0x67, 6 }, { 0x67, 6 },
+ { 0x68, 6 }, { 0x68, 6 }, { 0x68, 6 }, { 0x68, 6 }, { 0x6C, 6 }, { 0x6C, 6 },
+ { 0x6C, 6 }, { 0x6C, 6 }, { 0x6D, 6 }, { 0x6D, 6 }, { 0x6D, 6 }, { 0x6D, 6 },
+ { 0x6E, 6 }, { 0x6E, 6 }, { 0x6E, 6 }, { 0x6E, 6 }, { 0x70, 6 }, { 0x70, 6 },
+ { 0x70, 6 }, { 0x70, 6 }, { 0x72, 6 }, { 0x72, 6 }, { 0x72, 6 }, { 0x72, 6 },
+ { 0x75, 6 }, { 0x75, 6 }, { 0x75, 6 }, { 0x75, 6 }, { 0x3A, 7 }, { 0x3A, 7 },
+ { 0x42, 7 }, { 0x42, 7 }, { 0x43, 7 }, { 0x43, 7 }, { 0x44, 7 }, { 0x44, 7 },
+ { 0x45, 7 }, { 0x45, 7 }, { 0x46, 7 }, { 0x46, 7 }, { 0x47, 7 }, { 0x47, 7 },
+ { 0x48, 7 }, { 0x48, 7 }, { 0x49, 7 }, { 0x49, 7 }, { 0x4A, 7 }, { 0x4A, 7 },
+ { 0x4B, 7 }, { 0x4B, 7 }, { 0x4C, 7 }, { 0x4C, 7 }, { 0x4D, 7 }, { 0x4D, 7 },
+ { 0x4E, 7 }, { 0x4E, 7 }, { 0x4F, 7 }, { 0x4F, 7 }, { 0x50, 7 }, { 0x50, 7 },
+ { 0x51, 7 }, { 0x51, 7 }, { 0x52, 7 }, { 0x52, 7 }, { 0x53, 7 }, { 0x53, 7 },
+ { 0x54, 7 }, { 0x54, 7 }, { 0x55, 7 }, { 0x55, 7 }, { 0x56, 7 }, { 0x56, 7 },
+ { 0x57, 7 }, { 0x57, 7 }, { 0x59, 7 }, { 0x59, 7 }, { 0x6A, 7 }, { 0x6A, 7 },
+ { 0x6B, 7 }, { 0x6B, 7 }, { 0x71, 7 }, { 0x71, 7 }, { 0x76, 7 }, { 0x76, 7 },
+ { 0x77, 7 }, { 0x77, 7 }, { 0x78, 7 }, { 0x78, 7 }, { 0x79, 7 }, { 0x79, 7 },
+ { 0x7A, 7 }, { 0x7A, 7 }, { 0x26, 8 }, { 0x2A, 8 }, { 0x2C, 8 }, { 0x3B, 8 },
+ { 0x58, 8 }, { 0x5A, 8 }, { 0x00, 0 }, { 0x00, 0 },
+};
+
+typedef struct
+{
+ u32 first_code;
+ u8 code_len;
+ u8 symbols[29];
+} hpack_huffman_group_t;
+
+/* clang-format off */
+
+static hpack_huffman_group_t huff_code_table_slow[] = {
+ {
+ 0x3f8, /* first_code */
+ 10, /* code_len */
+ {
+ 0x21, 0x22, 0x28, 0x29, 0x3F,
+ } /* symbols */
+ },
+ {
+ 0x7fa, /* first_code */
+ 11, /* code_len */
+ {
+ 0x27, 0x2B, 0x7C,
+ } /* symbols */
+ },
+ {
+ 0xffa, /* first_code */
+ 12, /* code_len */
+ {
+ 0x23, 0x3E,
+ } /* symbols */
+ },
+ {
+ 0x1ff8, /* first_code */
+ 13, /* code_len */
+ {
+ 0x00, 0x24, 0x40, 0x5B, 0x5D, 0x7E,
+ } /* symbols */
+ },
+ {
+ 0x3ffc, /* first_code */
+ 14, /* code_len */
+ {
+ 0x5E, 0x7D,
+ } /* symbols */
+ },
+ {
+ 0x7ffc, /* first_code */
+ 15, /* code_len */
+ {
+ 0x3C, 0x60, 0x7B,
+ } /* symbols */
+ },
+ {
+ 0x7fff0, /* first_code */
+ 19, /* code_len */
+ {
+ 0x5C, 0xC3, 0xD0,
+ } /* symbols */
+ },
+ {
+ 0xfffe6, /* first_code */
+ 20, /* code_len */
+ {
+ 0x80, 0x82, 0x83, 0xA2, 0xB8, 0xC2, 0xE0, 0xE2,
+ } /* symbols */
+ },
+ {
+ 0x1fffdc, /* first_code */
+ 21, /* code_len */
+ {
+ 0x99, 0xA1, 0xA7, 0xAC, 0xB0, 0xB1, 0xB3, 0xD1, 0xD8, 0xD9,
+ 0xE3, 0xE5, 0xE6,
+ } /* symbols */
+ },
+ {
+ 0x3fffd2, /* first_code */
+ 22, /* code_len */
+ {
+ 0x81, 0x84, 0x85, 0x86, 0x88, 0x92, 0x9A, 0x9C, 0xA0, 0xA3,
+ 0xA4, 0xA9, 0xAA, 0xAD, 0xB2, 0xB5, 0xB9, 0xBA, 0xBB, 0xBD,
+ 0xBE, 0xC4, 0xC6, 0xE4, 0xE8, 0xE9,
+ } /* symbols */
+ },
+ {
+ 0x7fffd8, /* first_code */
+ 23, /* code_len */
+ {
+ 0x01, 0x87, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8F, 0x93, 0x95,
+ 0x96, 0x97, 0x98, 0x9B, 0x9D, 0x9E, 0xA5, 0xA6, 0xA8, 0xAE,
+ 0xAF, 0xB4, 0xB6, 0xB7, 0xBC, 0xBF, 0xC5, 0xE7, 0xEF,
+ } /* symbols */
+ },
+ {
+ 0xffffea, /* first_code */
+ 24, /* code_len */
+ {
+ 0x09, 0x8E, 0x90, 0x91, 0x94, 0x9F, 0xAB, 0xCE, 0xD7, 0xE1,
+ 0xEC, 0xED,
+ } /* symbols */
+ },
+ {
+ 0x1ffffec, /* first_code */
+ 25, /* code_len */
+ {
+ 0xC7, 0xCF, 0xEA, 0xEB,
+ } /* symbols */
+ },
+ {
+ 0x3ffffe0, /* first_code */
+ 26, /* code_len */
+ {
+ 0xC0, 0xC1, 0xC8, 0xC9, 0xCA, 0xCD, 0xD2, 0xD5, 0xDA, 0xDB,
+ 0xEE, 0xF0, 0xF2, 0xF3, 0xFF,
+ } /* symbols */
+ },
+ {
+ 0x7ffffde, /* first_code */
+ 27, /* code_len */
+ {
+ 0xCB, 0xCC, 0xD3, 0xD4, 0xD6, 0xDD, 0xDE, 0xDF, 0xF1, 0xF4,
+ 0xF5, 0xF6, 0xF7, 0xF8, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE,
+ } /* symbols */
+ },
+ {
+ 0xfffffe2, /* first_code */
+ 28, /* code_len */
+ {
+ 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x0B, 0x0C, 0x0E,
+ 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x17, 0x18, 0x19,
+ 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x7F, 0xDC, 0xF9,
+ } /* symbols */
+ },
+ {
+ 0x3ffffffc, /* first_code */
+ 30, /* code_len */
+ {
+ 0x0A, 0x0D, 0x16,
+ } /* symbols */
+ },
+};
+
+/* clang format-on */
+
+always_inline hpack_huffman_group_t *
+hpack_huffman_get_group (u32 value)
+{
+ if (value < 0xFF400000)
+ return &huff_code_table_slow[0];
+ else if (value < 0xFFA00000)
+ return &huff_code_table_slow[1];
+ else if (value < 0xFFC00000)
+ return &huff_code_table_slow[2];
+ else if (value < 0xFFF00000)
+ return &huff_code_table_slow[3];
+ else if (value < 0xFFF80000)
+ return &huff_code_table_slow[4];
+ else if (value < 0xFFFE0000)
+ return &huff_code_table_slow[5];
+ else if (value < 0xFFFE6000)
+ return &huff_code_table_slow[6];
+ else if (value < 0xFFFEE000)
+ return &huff_code_table_slow[7];
+ else if (value < 0xFFFF4800)
+ return &huff_code_table_slow[8];
+ else if (value < 0xFFFFB000)
+ return &huff_code_table_slow[9];
+ else if (value < 0xFFFFEA00)
+ return &huff_code_table_slow[10];
+ else if (value < 0xFFFFF600)
+ return &huff_code_table_slow[11];
+ else if (value < 0xFFFFF800)
+ return &huff_code_table_slow[12];
+ else if (value < 0xFFFFFBC0)
+ return &huff_code_table_slow[13];
+ else if (value < 0xFFFFFE20)
+ return &huff_code_table_slow[14];
+ else if (value < 0xFFFFFFF0)
+ return &huff_code_table_slow[15];
+ else
+ return &huff_code_table_slow[16];
+}
+
+#endif /* SRC_PLUGINS_HTTP_HUFFMAN_TABLE_H_ */
diff --git a/src/plugins/http/http_buffer.c b/src/plugins/http/http_buffer.c
index bc1b8c08630..fd90fbfed8c 100644
--- a/src/plugins/http/http_buffer.c
+++ b/src/plugins/http/http_buffer.c
@@ -57,8 +57,9 @@ buf_fifo_free (http_buffer_t *hb)
vec_free (bf->segs);
}
-static svm_fifo_seg_t *
-buf_fifo_get_segs (http_buffer_t *hb, u32 max_len, u32 *n_segs)
+static u32
+buf_fifo_get_segs (http_buffer_t *hb, u32 max_len, svm_fifo_seg_t **fs,
+ u32 *n_segs)
{
http_buffer_fifo_t *bf = (http_buffer_fifo_t *) &hb->data;
@@ -67,7 +68,7 @@ buf_fifo_get_segs (http_buffer_t *hb, u32 max_len, u32 *n_segs)
max_len = clib_min (bf->len - bf->offset, (u64) max_len);
- vec_validate (bf->segs, _n_segs);
+ vec_validate (bf->segs, _n_segs - 1);
len = svm_fifo_segments (bf->src, 0, bf->segs, &_n_segs, max_len);
if (len < 0)
@@ -77,7 +78,8 @@ buf_fifo_get_segs (http_buffer_t *hb, u32 max_len, u32 *n_segs)
HTTP_DBG (1, "available to send %u n_segs %u", len, *n_segs);
- return bf->segs;
+ *fs = bf->segs;
+ return len;
}
static u32
@@ -92,13 +94,13 @@ buf_fifo_drain (http_buffer_t *hb, u32 len)
return len;
}
-static u8
-buf_fifo_is_drained (http_buffer_t *hb)
+static u64
+buf_fifo_bytes_left (http_buffer_t *hb)
{
http_buffer_fifo_t *bf = (http_buffer_fifo_t *) &hb->data;
ASSERT (bf->offset <= bf->len);
- return (bf->offset == bf->len);
+ return (bf->len - bf->offset);
}
const static http_buffer_vft_t buf_fifo_vft = {
@@ -106,7 +108,7 @@ const static http_buffer_vft_t buf_fifo_vft = {
.free = buf_fifo_free,
.get_segs = buf_fifo_get_segs,
.drain = buf_fifo_drain,
- .is_drained = buf_fifo_is_drained,
+ .bytes_left = buf_fifo_bytes_left,
};
HTTP_BUFFER_REGISTER_VFT (HTTP_BUFFER_FIFO, buf_fifo_vft);
@@ -115,6 +117,7 @@ typedef struct http_buffer_ptr_
{
svm_fifo_seg_t *segs;
svm_fifo_t *f;
+ u64 len;
} http_buffer_ptr_t;
STATIC_ASSERT (sizeof (http_buffer_ptr_t) <= HTTP_BUFFER_DATA_SZ, "buf data");
@@ -135,12 +138,11 @@ buf_ptr_init (http_buffer_t *hb, void *data, u64 len)
bf->f = f;
bf->segs = 0;
- vec_validate (bf->segs, 1);
+ vec_validate (bf->segs, 0);
bf->segs[0].data = uword_to_pointer (ptr, u8 *);
- bf->segs[0].len = len;
- bf->segs[1] = bf->segs[0];
+ bf->len = len;
}
static void
@@ -152,15 +154,17 @@ buf_ptr_free (http_buffer_t *hb)
vec_free (bf->segs);
}
-static svm_fifo_seg_t *
-buf_ptr_get_segs (http_buffer_t *hb, u32 max_len, u32 *n_segs)
+static u32
+buf_ptr_get_segs (http_buffer_t *hb, u32 max_len, svm_fifo_seg_t **fs,
+ u32 *n_segs)
{
http_buffer_ptr_t *bf = (http_buffer_ptr_t *) &hb->data;
*n_segs = 1;
- bf->segs[1].len = clib_min (bf->segs[0].len, max_len);
+ bf->segs[0].len = clib_min (bf->len, (u64) max_len);
- return &bf->segs[1];
+ *fs = bf->segs;
+ return bf->segs[0].len;
}
static u32
@@ -168,14 +172,14 @@ buf_ptr_drain (http_buffer_t *hb, u32 len)
{
http_buffer_ptr_t *bf = (http_buffer_ptr_t *) &hb->data;
- ASSERT (bf->segs[0].len >= len);
+ ASSERT (bf->len >= len);
- bf->segs[1].data += len;
- bf->segs[0].len -= len;
+ bf->segs[0].data += len;
+ bf->len -= len;
- HTTP_DBG (1, "drained %u left %u", len, bf->segs[0].len);
+ HTTP_DBG (1, "drained %u left %u", len, bf->len);
- if (!bf->segs[0].len)
+ if (!bf->len)
{
svm_fifo_dequeue_drop (bf->f, sizeof (uword));
return sizeof (uword);
@@ -184,12 +188,12 @@ buf_ptr_drain (http_buffer_t *hb, u32 len)
return 0;
}
-static u8
-buf_ptr_is_drained (http_buffer_t *hb)
+static u64
+buf_ptr_bytes_left (http_buffer_t *hb)
{
http_buffer_ptr_t *bf = (http_buffer_ptr_t *) &hb->data;
- return (bf->segs[0].len == 0);
+ return bf->len;
}
const static http_buffer_vft_t buf_ptr_vft = {
@@ -197,7 +201,7 @@ const static http_buffer_vft_t buf_ptr_vft = {
.free = buf_ptr_free,
.get_segs = buf_ptr_get_segs,
.drain = buf_ptr_drain,
- .is_drained = buf_ptr_is_drained,
+ .bytes_left = buf_ptr_bytes_left,
};
HTTP_BUFFER_REGISTER_VFT (HTTP_BUFFER_PTR, buf_ptr_vft);
diff --git a/src/plugins/http/http_buffer.h b/src/plugins/http/http_buffer.h
index 1140be42d6e..01b37d4173b 100644
--- a/src/plugins/http/http_buffer.h
+++ b/src/plugins/http/http_buffer.h
@@ -38,9 +38,10 @@ struct http_buffer_vft_
{
void (*init) (http_buffer_t *, void *data, u64 len);
void (*free) (http_buffer_t *);
- svm_fifo_seg_t *(*get_segs) (http_buffer_t *, u32 max_len, u32 *n_segs);
+ u32 (*get_segs) (http_buffer_t *, u32 max_len, svm_fifo_seg_t **fs,
+ u32 *n_segs);
u32 (*drain) (http_buffer_t *, u32 len);
- u8 (*is_drained) (http_buffer_t *);
+ u64 (*bytes_left) (http_buffer_t *);
};
void http_buffer_init (http_buffer_t *hb, http_buffer_type_t type,
@@ -53,10 +54,11 @@ http_buffer_free (http_buffer_t *hb)
hb->vft->free (hb);
}
-static inline svm_fifo_seg_t *
-http_buffer_get_segs (http_buffer_t *hb, u32 max_len, u32 *n_segs)
+static inline u32
+http_buffer_get_segs (http_buffer_t *hb, u32 max_len, svm_fifo_seg_t **fs,
+ u32 *n_segs)
{
- return hb->vft->get_segs (hb, max_len, n_segs);
+ return hb->vft->get_segs (hb, max_len, fs, n_segs);
}
static inline u32
@@ -65,10 +67,10 @@ http_buffer_drain (http_buffer_t *hb, u32 len)
return hb->vft->drain (hb, len);
}
-static inline u8
-http_buffer_is_drained (http_buffer_t *hb)
+static inline u64
+http_buffer_bytes_left (http_buffer_t *hb)
{
- return hb->vft->is_drained (hb);
+ return hb->vft->bytes_left (hb);
}
#endif /* SRC_PLUGINS_HTTP_HTTP_BUFFER_H_ */
diff --git a/src/plugins/http/http_header_names.h b/src/plugins/http/http_header_names.h
index 99acac786db..1778daf10d9 100644
--- a/src/plugins/http/http_header_names.h
+++ b/src/plugins/http/http_header_names.h
@@ -8,7 +8,8 @@
#include <http/http.h>
static http_token_t http_header_names[] = {
-#define _(sym, str) { http_token_lit (str) },
+#define _(sym, str_canonical, str_lower, hpack_index) \
+ { http_token_lit (str_canonical) },
foreach_http_header_name
#undef _
};
diff --git a/src/plugins/http/http_plugin.rst b/src/plugins/http/http_plugin.rst
index 995e55e6f0f..4154a413726 100644
--- a/src/plugins/http/http_plugin.rst
+++ b/src/plugins/http/http_plugin.rst
@@ -15,7 +15,7 @@ Usage
-----
The plugin exposes following inline functions: ``http_validate_abs_path_syntax``, ``http_validate_query_syntax``,
-``http_percent_decode``, ``http_path_remove_dot_segments``, ``http_build_header_table``, ``http_get_header``,
+``http_percent_decode``, ``http_path_sanitize``, ``http_build_header_table``, ``http_get_header``,
``http_reset_header_table``, ``http_free_header_table``, ``http_init_headers_ctx``, ``http_add_header``,
``http_add_custom_header``, ``http_validate_target_syntax``, ``http_parse_authority``, ``http_serialize_authority``,
``http_parse_masque_host_port``, ``http_decap_udp_payload_datagram``, ``http_encap_udp_payload_datagram``,
@@ -231,6 +231,12 @@ Now we can start reading body content, following block of code could be executed
u64 curr = vec_len (ctx->resp_body);
rv = svm_fifo_dequeue (ts->rx_fifo, n_deq, ctx->resp_body + curr);
ASSERT (rv == n_deq);
+ /* notify http transport that we read data if requested */
+ if (svm_fifo_needs_deq_ntf (ts->rx_fifo, n_deq))
+ {
+ svm_fifo_clear_deq_ntf (ts->rx_fifo);
+ session_program_transport_io_evt (ts->handle, SESSION_IO_EVT_RX);
+ }
/* update length of the vector */
vec_set_len (ctx->resp_body, curr + n_deq);
/* update number of remaining bytes to receive */
@@ -242,6 +248,9 @@ Now we can start reading body content, following block of code could be executed
/* send 200 OK response */
}
+.. note::
+ When body content is read from the ``rx_fifo`` app need to send notification to HTTP layer if requested, it is required for HTTP/2 flow control.
+
Sending data
""""""""""""""
diff --git a/src/plugins/http/http_private.h b/src/plugins/http/http_private.h
new file mode 100644
index 00000000000..662be060341
--- /dev/null
+++ b/src/plugins/http/http_private.h
@@ -0,0 +1,901 @@
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright(c) 2025 Cisco Systems, Inc.
+ */
+
+#ifndef SRC_PLUGINS_HTTP_HTTP_PRIVATE_H_
+#define SRC_PLUGINS_HTTP_HTTP_PRIVATE_H_
+
+#include <vppinfra/time_range.h>
+#include <vnet/session/application.h>
+#include <vnet/session/session.h>
+#include <vnet/session/transport.h>
+#include <http/http.h>
+#include <http/http_buffer.h>
+
+#define HTTP_FIFO_THRESH (16 << 10)
+
+static const http_token_t http2_conn_preface = { http_token_lit (
+ "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n") };
+
+typedef union
+{
+ struct
+ {
+ u32 version : 3;
+ u32 conn_index : 29;
+ };
+ u32 as_u32;
+} http_conn_handle_t;
+
+STATIC_ASSERT (sizeof (http_conn_handle_t) == sizeof (u32), "must fit in u32");
+
+typedef union
+{
+ struct
+ {
+ u32 version : 3;
+ u32 req_index : 29;
+ };
+ u32 as_u32;
+} http_req_handle_t;
+
+STATIC_ASSERT (sizeof (http_req_handle_t) == sizeof (u32), "must fit in u32");
+
+#define foreach_http_conn_state \
+ _ (LISTEN, "LISTEN") \
+ _ (CONNECTING, "CONNECTING") \
+ _ (ESTABLISHED, "ESTABLISHED") \
+ _ (TRANSPORT_CLOSED, "TRANSPORT-CLOSED") \
+ _ (APP_CLOSED, "APP-CLOSED") \
+ _ (CLOSED, "CLOSED")
+
+typedef enum http_conn_state_
+{
+#define _(s, str) HTTP_CONN_STATE_##s,
+ foreach_http_conn_state
+#undef _
+} http_conn_state_t;
+
+#define foreach_http_req_state \
+ _ (0, IDLE, "idle") \
+ _ (1, WAIT_APP_METHOD, "wait app method") \
+ _ (2, WAIT_TRANSPORT_REPLY, "wait transport reply") \
+ _ (3, TRANSPORT_IO_MORE_DATA, "transport io more data") \
+ _ (4, WAIT_TRANSPORT_METHOD, "wait transport method") \
+ _ (5, WAIT_APP_REPLY, "wait app reply") \
+ _ (6, APP_IO_MORE_DATA, "app io more data") \
+ _ (7, TUNNEL, "tunnel") \
+ _ (8, UDP_TUNNEL, "udp tunnel")
+
+typedef enum http_req_state_
+{
+#define _(n, s, str) HTTP_REQ_STATE_##s = n,
+ foreach_http_req_state
+#undef _
+ HTTP_REQ_N_STATES
+} http_req_state_t;
+
+typedef enum http_target_form_
+{
+ HTTP_TARGET_ORIGIN_FORM,
+ HTTP_TARGET_ABSOLUTE_FORM,
+ HTTP_TARGET_AUTHORITY_FORM,
+ HTTP_TARGET_ASTERISK_FORM
+} http_target_form_t;
+
+typedef enum http_version_
+{
+ HTTP_VERSION_1,
+ HTTP_VERSION_2,
+ HTTP_VERSION_3,
+ HTTP_VERSION_NA = 7,
+} http_version_t;
+
+typedef struct http_req_id_
+{
+ session_handle_t app_session_handle;
+ u32 parent_app_wrk_index;
+ u32 hc_index;
+} http_req_id_t;
+
+STATIC_ASSERT (sizeof (http_req_id_t) <= TRANSPORT_CONN_ID_LEN,
+ "ctx id must be less than TRANSPORT_CONN_ID_LEN");
+
+typedef struct http_req_
+{
+ union
+ {
+ transport_connection_t connection;
+ http_req_id_t c_http_req_id;
+ };
+#define hr_pa_wrk_index c_http_req_id.parent_app_wrk_index
+#define hr_pa_session_handle c_http_req_id.app_session_handle
+#define hr_hc_index c_http_req_id.hc_index
+#define hr_req_handle connection.c_index
+
+ u32 as_fifo_offset; /* for peek */
+
+ http_req_state_t state; /* state-machine state */
+
+ http_buffer_t tx_buf; /* message body from app to be sent */
+
+ /*
+ * for parsing of incoming message from transport
+ */
+ u32 rx_buf_offset; /* current offset during parsing */
+ u32 control_data_len; /* start line + headers + empty line */
+
+ union
+ {
+ u64 to_recv; /* remaining bytes of body to receive from transport */
+ u64 to_skip; /* remaining bytes of capsule to skip */
+ };
+
+ u8 is_tunnel;
+
+ /*
+ * parsed metadata for app
+ */
+ union
+ {
+ http_status_code_t status_code;
+ http_req_method_t method;
+ };
+
+ http_target_form_t target_form;
+ u8 *target;
+ http_url_scheme_t scheme;
+ u32 target_authority_offset;
+ u32 target_authority_len;
+ u32 target_path_offset;
+ u32 target_path_len;
+ u32 target_query_offset;
+ u32 target_query_len;
+
+ u32 headers_offset;
+ u32 headers_len;
+
+ u32 body_offset;
+ u64 body_len;
+
+ http_field_line_t *headers;
+ uword content_len_header_index;
+ uword connection_header_index;
+ uword upgrade_header_index;
+ uword host_header_index;
+
+ http_upgrade_proto_t upgrade_proto;
+} http_req_t;
+
+#define foreach_http_conn_flags \
+ _ (HO_DONE, "ho-done") \
+ _ (NO_APP_SESSION, "no-app-session") \
+ _ (PENDING_TIMER, "pending-timer") \
+ _ (IS_SERVER, "is-server") \
+ _ (HAS_REQUEST, "has-request")
+
+typedef enum http_conn_flags_bit_
+{
+#define _(sym, str) HTTP_CONN_F_BIT_##sym,
+ foreach_http_conn_flags
+#undef _
+} http_conn_flags_bit_t;
+
+typedef enum http_conn_flags_
+{
+#define _(sym, str) HTTP_CONN_F_##sym = 1 << HTTP_CONN_F_BIT_##sym,
+ foreach_http_conn_flags
+#undef _
+} __clib_packed http_conn_flags_t;
+
+typedef struct http_conn_id_
+{
+ union
+ {
+ session_handle_t app_session_handle;
+ u32 parent_app_api_ctx;
+ };
+ session_handle_t tc_session_handle;
+ u32 parent_app_wrk_index;
+} http_conn_id_t;
+
+STATIC_ASSERT (sizeof (http_conn_id_t) <= TRANSPORT_CONN_ID_LEN,
+ "ctx id must be less than TRANSPORT_CONN_ID_LEN");
+
+typedef struct http_tc_
+{
+ union
+ {
+ transport_connection_t connection;
+ http_conn_id_t c_http_conn_id;
+ };
+#define hc_tc_session_handle c_http_conn_id.tc_session_handle
+#define hc_pa_wrk_index c_http_conn_id.parent_app_wrk_index
+#define hc_pa_session_handle c_http_conn_id.app_session_handle
+#define hc_pa_app_api_ctx c_http_conn_id.parent_app_api_ctx
+#define hc_hc_index connection.c_index
+
+ http_version_t version;
+ http_conn_state_t state;
+ u32 timer_handle;
+ u32 timeout;
+ u32 app_rx_fifo_size;
+ u8 *app_name;
+ u8 *host;
+ http_conn_flags_t flags;
+ http_udp_tunnel_mode_t udp_tunnel_mode;
+
+ void *opaque; /* version specific data */
+} http_conn_t;
+
+typedef struct http_worker_
+{
+ http_conn_t *conn_pool;
+} http_worker_t;
+
+typedef struct http_main_
+{
+ http_worker_t *wrk;
+ http_conn_t *listener_pool;
+ http_conn_t *ho_conn_pool;
+ u32 *postponed_ho_free;
+ u32 *ho_free_list;
+ u32 app_index;
+
+ u8 **rx_bufs;
+ u8 **tx_bufs;
+ u8 **app_header_lists;
+
+ clib_timebase_t timebase;
+
+ http_status_code_t *sc_by_u16;
+ /*
+ * Runtime config
+ */
+ u8 is_init;
+
+ /*
+ * Config
+ */
+ u64 first_seg_size;
+ u64 add_seg_size;
+ u32 fifo_size;
+} http_main_t;
+
+typedef struct http_engine_vft_
+{
+ const char *name;
+ u32 (*hc_index_get_by_req_index) (u32 req_index,
+ clib_thread_index_t thread_index);
+ transport_connection_t *(*req_get_connection) (
+ u32 req_index, clib_thread_index_t thread_index);
+ u8 *(*format_req) (u8 *s, va_list *args);
+ void (*app_tx_callback) (http_conn_t *hc, u32 req_index,
+ transport_send_params_t *sp);
+ void (*app_rx_evt_callback) (http_conn_t *hc, u32 req_index,
+ clib_thread_index_t thread_index);
+ void (*app_close_callback) (http_conn_t *hc, u32 req_index,
+ clib_thread_index_t thread_index);
+ void (*app_reset_callback) (http_conn_t *hc, u32 req_index,
+ clib_thread_index_t thread_index);
+ int (*transport_connected_callback) (http_conn_t *hc);
+ void (*transport_rx_callback) (http_conn_t *hc);
+ void (*transport_close_callback) (http_conn_t *hc);
+ void (*transport_reset_callback) (http_conn_t *hc);
+ void (*transport_conn_reschedule_callback) (http_conn_t *hc);
+ void (*conn_accept_callback) (http_conn_t *hc); /* optional */
+ void (*conn_cleanup_callback) (http_conn_t *hc);
+ void (*enable_callback) (void); /* optional */
+ uword (*unformat_cfg_callback) (unformat_input_t *input); /* optional */
+} http_engine_vft_t;
+
+void http_register_engine (const http_engine_vft_t *vft,
+ http_version_t version);
+
+/* HTTP state machine result */
+typedef enum http_sm_result_t_
+{
+ HTTP_SM_STOP = 0,
+ HTTP_SM_CONTINUE = 1,
+ HTTP_SM_ERROR = -1,
+} http_sm_result_t;
+
+typedef http_sm_result_t (*http_sm_handler) (http_conn_t *hc, http_req_t *req,
+ transport_send_params_t *sp);
+
+#define expect_char(c) \
+ if (*p++ != c) \
+ { \
+ clib_warning ("unexpected character"); \
+ return -1; \
+ }
+
+#define parse_int(val, mul) \
+ do \
+ { \
+ if (!isdigit (*p)) \
+ { \
+ clib_warning ("expected digit"); \
+ return -1; \
+ } \
+ val += mul * (*p++ - '0'); \
+ } \
+ while (0)
+
+#define http_field_line_value_token(_fl, _req, _rx_buf) \
+ (const char *) ((_rx_buf) + (_req)->headers_offset + (_fl)->value_offset), \
+ (_fl)->value_len
+
+u8 *format_http_req_state (u8 *s, va_list *va);
+u8 *format_http_conn_state (u8 *s, va_list *args);
+u8 *format_http_time_now (u8 *s, va_list *args);
+
+/**
+ * @brief Find the first occurrence of the string in the vector.
+ *
+ * @param vec The vector to be scanned.
+ * @param offset Search offset in the vector.
+ * @param num Maximum number of characters to be searched if non-zero.
+ * @param str The string to be searched.
+ *
+ * @return @c -1 if the string is not found within the vector; index otherwise.
+ */
+int http_v_find_index (u8 *vec, u32 offset, u32 num, char *str);
+
+/**
+ * Disconnect HTTP connection.
+ *
+ * @param hc HTTP connection to disconnect.
+ */
+void http_disconnect_transport (http_conn_t *hc);
+
+/**
+ * Shutdown HTTP connection.
+ *
+ * Close TX side of the underlying transport.
+ *
+ * @param hc HTTP connection to shutdown.
+ */
+void http_shutdown_transport (http_conn_t *hc);
+
+/**
+ * Convert numeric representation of status code to @c http_status_code_t.
+ *
+ * @param status_code Status code within the range of 100 to 599, inclusive.
+ *
+ * @return Registered status code or in case of unrecognized status code as
+ * equivalent to the x00 status code of that class.
+ */
+http_status_code_t http_sc_by_u16 (u16 status_code);
+
+/**
+ * Read header list sent by app.
+ *
+ * @param req HTTP request.
+ * @param msg HTTP msg sent by app.
+ *
+ * @return Pointer to the header list.
+ *
+ * @note For immediate processing, not for buffering.
+ */
+u8 *http_get_app_header_list (http_req_t *req, http_msg_t *msg);
+
+/**
+ * Get pre-allocated TX buffer/vector.
+ *
+ * @param hc HTTP connection.
+ *
+ * @return Pointer to the vector.
+ *
+ * @note Vector length is reset to zero, use as temporary storage.
+ */
+u8 *http_get_tx_buf (http_conn_t *hc);
+
+/**
+ * Get pre-allocated RX buffer/vector.
+ *
+ * @param hc HTTP connection.
+ *
+ * @return Pointer to the vector.
+ *
+ * @note Vector length is reset to zero, use as temporary storage.
+ */
+u8 *http_get_rx_buf (http_conn_t *hc);
+
+/**
+ * Read request target path sent by app.
+ *
+ * @param req HTTP request.
+ * @param msg HTTP msg sent by app.
+ *
+ * @return Pointer to the target path.
+ *
+ * @note Valid only with request lifetime.
+ */
+u8 *http_get_app_target (http_req_t *req, http_msg_t *msg);
+
+/**
+ * Initialize per-request HTTP TX buffer.
+ *
+ * @param req HTTP request.
+ * @param msg HTTP msg sent by app.
+ *
+ * @note Use for streaming of body sent by app.
+ */
+void http_req_tx_buffer_init (http_req_t *req, http_msg_t *msg);
+
+/**
+ * Change state of given HTTP request.
+ *
+ * @param req HTTP request.
+ * @param state New state.
+ */
+always_inline void
+http_req_state_change (http_req_t *req, http_req_state_t state)
+{
+ HTTP_DBG (1, "changing http req state: %U -> %U", format_http_req_state,
+ req->state, format_http_req_state, state);
+ ASSERT (req->state != HTTP_REQ_STATE_TUNNEL);
+ req->state = state;
+}
+
+/**
+ * Send RX event to the app worker.
+ *
+ * @param req HTTP request.
+ */
+always_inline void
+http_app_worker_rx_notify (http_req_t *req)
+{
+ session_t *as;
+ app_worker_t *app_wrk;
+
+ as = session_get_from_handle (req->hr_pa_session_handle);
+ if (!(as->flags & SESSION_F_RX_EVT))
+ {
+ app_wrk = app_worker_get_if_valid (as->app_wrk_index);
+ if (app_wrk)
+ {
+ as->flags |= SESSION_F_RX_EVT;
+ app_worker_rx_notify (app_wrk, as);
+ }
+ }
+}
+
+/**
+ * Get underlying transport protocol of the HTTP connection.
+ *
+ * @param hc HTTP connection.
+ *
+ * @return Transport protocol, @ref transport_proto_t.
+ */
+always_inline transport_proto_t
+http_get_transport_proto (http_conn_t *hc)
+{
+ return session_get_transport_proto (
+ session_get_from_handle (hc->hc_tc_session_handle));
+}
+
+/**
+ * Read HTTP msg sent by app.
+ *
+ * @param req HTTP request.
+ * @param msg HTTP msq will be stored here.
+ */
+always_inline void
+http_get_app_msg (http_req_t *req, http_msg_t *msg)
+{
+ session_t *as;
+ int rv;
+
+ as = session_get_from_handle (req->hr_pa_session_handle);
+ rv = svm_fifo_dequeue (as->tx_fifo, sizeof (*msg), (u8 *) msg);
+ ASSERT (rv == sizeof (*msg));
+}
+
+always_inline void
+http_identify_optional_query (http_req_t *req, u8 *rx_buf)
+{
+ int i;
+ for (i = req->target_path_offset;
+ i < (req->target_path_offset + req->target_path_len); i++)
+ {
+ if (rx_buf[i] == '?')
+ {
+ req->target_query_offset = i + 1;
+ req->target_query_len = req->target_path_offset +
+ req->target_path_len -
+ req->target_query_offset;
+ req->target_path_len =
+ req->target_path_len - req->target_query_len - 1;
+ break;
+ }
+ }
+}
+
+always_inline int
+http_parse_content_length (http_req_t *req, u8 *rx_buf)
+{
+ int i;
+ http_field_line_t *field_line;
+ u8 *p;
+ u64 body_len = 0, digit;
+
+ field_line = vec_elt_at_index (req->headers, req->content_len_header_index);
+ p = rx_buf + req->headers_offset + field_line->value_offset;
+ for (i = 0; i < field_line->value_len; i++)
+ {
+ /* check for digit */
+ if (!isdigit (*p))
+ {
+ HTTP_DBG (1, "expected digit");
+ return -1;
+ }
+ digit = *p - '0';
+ u64 new_body_len = body_len * 10 + digit;
+ /* check for overflow */
+ if (new_body_len < body_len)
+ {
+ HTTP_DBG (1, "content-length value too big number, overflow");
+ return -1;
+ }
+ body_len = new_body_len;
+ p++;
+ }
+
+ req->body_len = body_len;
+
+ return 0;
+}
+
+always_inline void
+http_req_deschedule (http_req_t *req, transport_send_params_t *sp)
+{
+ transport_connection_deschedule (&req->connection);
+ sp->flags |= TRANSPORT_SND_F_DESCHED;
+}
+
+/* Abstraction of app session fifo operations */
+
+always_inline void
+http_io_as_add_want_deq_ntf (http_req_t *req)
+{
+ session_t *as = session_get_from_handle (req->hr_pa_session_handle);
+ svm_fifo_add_want_deq_ntf (as->rx_fifo, SVM_FIFO_WANT_DEQ_NOTIF);
+}
+
+always_inline void
+http_io_as_add_want_read_ntf (http_req_t *req)
+{
+ session_t *as = session_get_from_handle (req->hr_pa_session_handle);
+ svm_fifo_add_want_deq_ntf (as->rx_fifo, SVM_FIFO_WANT_DEQ_NOTIF_IF_FULL |
+ SVM_FIFO_WANT_DEQ_NOTIF_IF_EMPTY);
+}
+
+always_inline void
+http_io_as_reset_has_read_ntf (http_req_t *req)
+{
+ session_t *as = session_get_from_handle (req->hr_pa_session_handle);
+ svm_fifo_reset_has_deq_ntf (as->rx_fifo);
+}
+
+always_inline u32
+http_io_as_max_write (http_req_t *req)
+{
+ session_t *as = session_get_from_handle (req->hr_pa_session_handle);
+ return svm_fifo_max_enqueue_prod (as->rx_fifo);
+}
+
+always_inline u32
+http_io_as_max_read (http_req_t *req)
+{
+ session_t *as = session_get_from_handle (req->hr_pa_session_handle);
+ return svm_fifo_max_dequeue_cons (as->tx_fifo);
+}
+
+always_inline void
+http_io_as_write (http_req_t *req, u8 *data, u32 len)
+{
+ int n_written;
+ session_t *as = session_get_from_handle (req->hr_pa_session_handle);
+
+ n_written = svm_fifo_enqueue (as->rx_fifo, len, data);
+ ASSERT (n_written == len);
+}
+
+always_inline u32
+http_io_as_write_segs (http_req_t *req, const svm_fifo_seg_t segs[],
+ u32 n_segs)
+{
+ int n_written;
+ session_t *as = session_get_from_handle (req->hr_pa_session_handle);
+ n_written = svm_fifo_enqueue_segments (as->rx_fifo, segs, n_segs, 0);
+ ASSERT (n_written > 0);
+ return (u32) n_written;
+}
+
+always_inline u32
+http_io_as_read (http_req_t *req, u8 *buf, u32 len, u8 peek)
+{
+ int n_read;
+ session_t *as = session_get_from_handle (req->hr_pa_session_handle);
+
+ if (peek)
+ {
+ n_read = svm_fifo_peek (as->tx_fifo, req->as_fifo_offset, len, buf);
+ ASSERT (n_read > 0);
+ req->as_fifo_offset += len;
+ return (u32) n_read;
+ }
+
+ n_read = svm_fifo_dequeue (as->tx_fifo, len, buf);
+ ASSERT (n_read == len);
+ return (u32) n_read;
+}
+
+always_inline void
+http_io_as_read_segs (http_req_t *req, svm_fifo_seg_t *segs, u32 *n_segs,
+ u32 max_bytes)
+{
+ int n_read;
+ session_t *as = session_get_from_handle (req->hr_pa_session_handle);
+ n_read = svm_fifo_segments (as->tx_fifo, 0, segs, n_segs, max_bytes);
+ ASSERT (n_read > 0);
+}
+
+always_inline void
+http_io_as_drain (http_req_t *req, u32 len)
+{
+ session_t *as = session_get_from_handle (req->hr_pa_session_handle);
+ svm_fifo_dequeue_drop (as->tx_fifo, len);
+ req->as_fifo_offset = 0;
+}
+
+always_inline void
+http_io_as_drain_all (http_req_t *req)
+{
+ session_t *as = session_get_from_handle (req->hr_pa_session_handle);
+ svm_fifo_dequeue_drop_all (as->tx_fifo);
+ req->as_fifo_offset = 0;
+}
+
+/* Abstraction of transport session fifo operations */
+
+always_inline u32
+http_io_ts_fifo_size (http_conn_t *hc, u8 is_rx)
+{
+ session_t *ts = session_get_from_handle (hc->hc_tc_session_handle);
+ if (is_rx)
+ return svm_fifo_size (ts->rx_fifo);
+ else
+ return svm_fifo_size (ts->tx_fifo);
+}
+
+always_inline u32
+http_io_ts_max_read (http_conn_t *hc)
+{
+ session_t *ts = session_get_from_handle (hc->hc_tc_session_handle);
+ return svm_fifo_max_dequeue_cons (ts->rx_fifo);
+}
+
+always_inline u32
+http_io_ts_max_write (http_conn_t *hc, transport_send_params_t *sp)
+{
+ session_t *ts = session_get_from_handle (hc->hc_tc_session_handle);
+ return clib_min (svm_fifo_max_enqueue_prod (ts->tx_fifo),
+ sp->max_burst_size);
+}
+
+always_inline int
+http_io_ts_check_write_thresh (http_conn_t *hc)
+{
+ session_t *ts = session_get_from_handle (hc->hc_tc_session_handle);
+ return (svm_fifo_max_enqueue_prod (ts->tx_fifo) < HTTP_FIFO_THRESH);
+}
+
+always_inline void
+http_io_ts_add_want_deq_ntf (http_conn_t *hc)
+{
+ session_t *ts = session_get_from_handle (hc->hc_tc_session_handle);
+ svm_fifo_add_want_deq_ntf (ts->tx_fifo, SVM_FIFO_WANT_DEQ_NOTIF);
+}
+
+always_inline u32
+http_io_ts_read (http_conn_t *hc, u8 *buf, u32 len, u8 peek)
+{
+ int n_read;
+ session_t *ts = session_get_from_handle (hc->hc_tc_session_handle);
+
+ if (peek)
+ {
+ n_read = svm_fifo_peek (ts->rx_fifo, 0, len, buf);
+ ASSERT (n_read > 0);
+ return (u32) n_read;
+ }
+
+ n_read = svm_fifo_dequeue (ts->rx_fifo, len, buf);
+ ASSERT (n_read == len);
+ return (u32) n_read;
+}
+
+always_inline void
+http_io_ts_read_segs (http_conn_t *hc, svm_fifo_seg_t *segs, u32 *n_segs,
+ u32 max_bytes)
+{
+ int n_read;
+ session_t *ts = session_get_from_handle (hc->hc_tc_session_handle);
+ n_read = svm_fifo_segments (ts->rx_fifo, 0, segs, n_segs, max_bytes);
+ ASSERT (n_read > 0);
+}
+
+always_inline void
+http_io_ts_drain (http_conn_t *hc, u32 len)
+{
+ session_t *ts = session_get_from_handle (hc->hc_tc_session_handle);
+ svm_fifo_dequeue_drop (ts->rx_fifo, len);
+}
+
+always_inline void
+http_io_ts_drain_all (http_conn_t *hc)
+{
+ session_t *ts = session_get_from_handle (hc->hc_tc_session_handle);
+ svm_fifo_dequeue_drop_all (ts->rx_fifo);
+}
+
+always_inline void
+http_io_ts_after_read (http_conn_t *hc, u8 clear_evt)
+{
+ session_t *ts = session_get_from_handle (hc->hc_tc_session_handle);
+ if (clear_evt)
+ {
+ if (svm_fifo_is_empty_cons (ts->rx_fifo))
+ svm_fifo_unset_event (ts->rx_fifo);
+ }
+ else
+ {
+ if (svm_fifo_max_dequeue_cons (ts->rx_fifo))
+ session_program_rx_io_evt (hc->hc_tc_session_handle);
+ }
+}
+
+always_inline void
+http_io_ts_write (http_conn_t *hc, u8 *data, u32 len,
+ transport_send_params_t *sp)
+{
+ int n_written;
+ session_t *ts = session_get_from_handle (hc->hc_tc_session_handle);
+
+ n_written = svm_fifo_enqueue (ts->tx_fifo, len, data);
+ ASSERT (n_written == len);
+ if (sp)
+ {
+ ASSERT (sp->max_burst_size >= len);
+ sp->bytes_dequeued += len;
+ sp->max_burst_size -= len;
+ }
+}
+
+always_inline u32
+http_io_ts_write_segs (http_conn_t *hc, const svm_fifo_seg_t segs[],
+ u32 n_segs, transport_send_params_t *sp)
+{
+ int n_written;
+ session_t *ts = session_get_from_handle (hc->hc_tc_session_handle);
+ n_written = svm_fifo_enqueue_segments (ts->tx_fifo, segs, n_segs, 0);
+ ASSERT (n_written > 0);
+ sp->bytes_dequeued += n_written;
+ sp->max_burst_size -= n_written;
+ return (u32) n_written;
+}
+
+always_inline void
+http_io_ts_after_write (http_conn_t *hc, u8 flush)
+{
+ session_t *ts = session_get_from_handle (hc->hc_tc_session_handle);
+
+ if (!flush)
+ {
+ if (svm_fifo_set_event (ts->tx_fifo))
+ session_program_tx_io_evt (ts->handle, SESSION_IO_EVT_TX);
+ }
+ else
+ {
+ if (svm_fifo_set_event (ts->tx_fifo))
+ session_program_tx_io_evt (ts->handle, SESSION_IO_EVT_TX_FLUSH);
+ }
+}
+
+always_inline int
+http_conn_accept_request (http_conn_t *hc, http_req_t *req)
+{
+ session_t *as, *asl;
+ app_worker_t *app_wrk;
+ int rv;
+
+ HTTP_DBG (1, "hc [%u]%x req %x", hc->hc_hc_index, hc->c_thread_index,
+ req->hr_req_handle);
+
+ /* allocate app session and initialize */
+ as = session_alloc (hc->c_thread_index);
+ HTTP_DBG (1, "allocated session 0x%lx", session_handle (as));
+ req->c_s_index = as->session_index;
+ as->app_wrk_index = hc->hc_pa_wrk_index;
+ as->connection_index = req->hr_req_handle;
+ as->session_state = SESSION_STATE_ACCEPTING;
+ asl = listen_session_get_from_handle (hc->hc_pa_session_handle);
+ as->session_type = asl->session_type;
+ as->listener_handle = hc->hc_pa_session_handle;
+
+ /* init session fifos and notify app */
+ if ((rv = app_worker_init_accepted (as)))
+ {
+ HTTP_DBG (1, "failed to allocate fifos");
+ req->hr_pa_session_handle = SESSION_INVALID_HANDLE;
+ session_free (as);
+ hc->flags |= HTTP_CONN_F_NO_APP_SESSION;
+ return rv;
+ }
+
+ req->hr_pa_session_handle = session_handle (as);
+ req->hr_pa_wrk_index = as->app_wrk_index;
+
+ app_wrk = app_worker_get (as->app_wrk_index);
+
+ if ((rv = app_worker_accept_notify (app_wrk, as)))
+ {
+ HTTP_DBG (1, "app accept returned");
+ req->hr_pa_session_handle = SESSION_INVALID_HANDLE;
+ session_free (as);
+ hc->flags |= HTTP_CONN_F_NO_APP_SESSION;
+ return rv;
+ }
+
+ return 0;
+}
+
+always_inline int
+http_conn_established (http_conn_t *hc, http_req_t *req)
+{
+ session_t *as;
+ app_worker_t *app_wrk;
+ session_t *ts;
+ int rv;
+
+ /* allocate app session and initialize */
+ as = session_alloc (hc->c_thread_index);
+ HTTP_DBG (1, "allocated session 0x%lx", session_handle (as));
+ req->c_s_index = as->session_index;
+ as->app_wrk_index = hc->hc_pa_wrk_index;
+ as->connection_index = req->hr_req_handle;
+ as->session_state = SESSION_STATE_READY;
+ as->opaque = hc->hc_pa_app_api_ctx;
+ ts = session_get_from_handle (hc->hc_tc_session_handle);
+ as->session_type = session_type_from_proto_and_ip (
+ TRANSPORT_PROTO_HTTP, session_type_is_ip4 (ts->session_type));
+
+ /* init session fifos and notify app */
+ app_wrk = app_worker_get_if_valid (hc->hc_pa_wrk_index);
+ if (!app_wrk)
+ {
+ HTTP_DBG (1, "no app worker");
+ hc->flags |= HTTP_CONN_F_NO_APP_SESSION;
+ return -1;
+ }
+
+ if ((rv = app_worker_init_connected (app_wrk, as)))
+ {
+ HTTP_DBG (1, "failed to allocate fifos");
+ session_free (as);
+ hc->flags |= HTTP_CONN_F_NO_APP_SESSION;
+ return rv;
+ }
+
+ app_worker_connect_notify (app_wrk, as, 0, hc->hc_pa_app_api_ctx);
+
+ req->hr_pa_session_handle = session_handle (as);
+ req->hr_pa_wrk_index = as->app_wrk_index;
+
+ return 0;
+}
+
+#endif /* SRC_PLUGINS_HTTP_HTTP_PRIVATE_H_ */
diff --git a/src/plugins/http/http_timer.h b/src/plugins/http/http_timer.h
index 43d20d004d8..5ce42032f20 100644
--- a/src/plugins/http/http_timer.h
+++ b/src/plugins/http/http_timer.h
@@ -16,7 +16,7 @@
#ifndef SRC_PLUGINS_HTTP_HTTP_TIMER_H_
#define SRC_PLUGINS_HTTP_HTTP_TIMER_H_
-#include <http/http.h>
+#include <http/http_private.h>
#include <vppinfra/tw_timer_2t_1w_2048sl.h>
#define HTTP_CONN_TIMEOUT 60
@@ -45,7 +45,8 @@ http_conn_timer_start (http_conn_t *hc)
u32 hs_handle;
ASSERT (hc->timer_handle == HTTP_TIMER_HANDLE_INVALID);
- hs_handle = hc->c_thread_index << 24 | hc->c_c_index;
+ ASSERT (hc->hc_hc_index <= 0x00FFFFFF);
+ hs_handle = hc->c_thread_index << 24 | hc->hc_hc_index;
clib_spinlock_lock (&twc->tw_lock);
hc->timer_handle =
@@ -58,7 +59,7 @@ http_conn_timer_stop (http_conn_t *hc)
{
http_tw_ctx_t *twc = &http_tw_ctx;
- hc->pending_timer = 0;
+ hc->flags &= ~HTTP_CONN_F_PENDING_TIMER;
if (hc->timer_handle == HTTP_TIMER_HANDLE_INVALID)
return;
@@ -79,7 +80,8 @@ http_conn_timer_update (http_conn_t *hc)
tw_timer_update_2t_1w_2048sl (&twc->tw, hc->timer_handle, hc->timeout);
else
{
- hs_handle = hc->c_thread_index << 24 | hc->c_c_index;
+ ASSERT (hc->hc_hc_index <= 0x00FFFFFF);
+ hs_handle = hc->c_thread_index << 24 | hc->hc_hc_index;
hc->timer_handle =
tw_timer_start_2t_1w_2048sl (&twc->tw, hs_handle, 0, hc->timeout);
}
diff --git a/src/plugins/http/test/http_test.c b/src/plugins/http/test/http_test.c
index bfaa285eb35..f44d3cbd31b 100644
--- a/src/plugins/http/test/http_test.c
+++ b/src/plugins/http/test/http_test.c
@@ -6,6 +6,8 @@
#include <vpp/app/version.h>
#include <http/http.h>
#include <http/http_header_names.h>
+#include <http/http2/hpack.h>
+#include <http/http2/frame.h>
#define HTTP_TEST_I(_cond, _comment, _args...) \
({ \
@@ -533,6 +535,771 @@ http_test_http_header_table (vlib_main_t *vm)
return 0;
}
+static int
+http_test_parse_request (const char *first_req, uword first_req_len,
+ const char *second_req, uword second_req_len,
+ const char *third_req, uword third_req_len,
+ hpack_dynamic_table_t *dynamic_table)
+{
+ http2_error_t rv;
+ u8 *buf = 0;
+ hpack_request_control_data_t control_data;
+ http_field_line_t *headers = 0;
+ u16 parsed_bitmap = 0;
+
+ static http2_error_t (*_hpack_parse_request) (
+ u8 * src, u32 src_len, u8 * dst, u32 dst_len,
+ hpack_request_control_data_t * control_data, http_field_line_t * *headers,
+ hpack_dynamic_table_t * dynamic_table);
+
+ _hpack_parse_request =
+ vlib_get_plugin_symbol ("http_plugin.so", "hpack_parse_request");
+
+ parsed_bitmap =
+ HPACK_PSEUDO_HEADER_METHOD_PARSED | HPACK_PSEUDO_HEADER_SCHEME_PARSED |
+ HPACK_PSEUDO_HEADER_PATH_PARSED | HPACK_PSEUDO_HEADER_AUTHORITY_PARSED;
+
+ /* first request */
+ vec_validate_init_empty (buf, 254, 0);
+ memset (&control_data, 0, sizeof (control_data));
+ rv = _hpack_parse_request ((u8 *) first_req, (u32) first_req_len, buf, 254,
+ &control_data, &headers, dynamic_table);
+ if (rv != HTTP2_ERROR_NO_ERROR ||
+ control_data.parsed_bitmap != parsed_bitmap ||
+ control_data.method != HTTP_REQ_GET ||
+ control_data.scheme != HTTP_URL_SCHEME_HTTP ||
+ control_data.path_len != 1 || control_data.authority_len != 15 ||
+ dynamic_table->used != 57 || vec_len (headers) != 0)
+ return 1;
+ if (memcmp (control_data.path, "/", 1))
+ return 1;
+ if (memcmp (control_data.authority, "www.example.com", 15))
+ return 1;
+ vec_free (headers);
+ vec_free (buf);
+
+ /* second request */
+ vec_validate_init_empty (buf, 254, 0);
+ memset (&control_data, 0, sizeof (control_data));
+ rv = _hpack_parse_request ((u8 *) second_req, (u32) second_req_len, buf, 254,
+ &control_data, &headers, dynamic_table);
+ if (rv != HTTP2_ERROR_NO_ERROR ||
+ control_data.parsed_bitmap != parsed_bitmap ||
+ control_data.method != HTTP_REQ_GET ||
+ control_data.scheme != HTTP_URL_SCHEME_HTTP ||
+ control_data.path_len != 1 || control_data.authority_len != 15 ||
+ dynamic_table->used != 110 || vec_len (headers) != 1 ||
+ control_data.headers_len != 21)
+ return 2;
+ if (memcmp (control_data.path, "/", 1))
+ return 2;
+ if (memcmp (control_data.authority, "www.example.com", 15))
+ return 2;
+ if (headers[0].name_len != 13 || headers[0].value_len != 8)
+ return 2;
+ if (memcmp (control_data.headers + headers[0].name_offset, "cache-control",
+ 13))
+ return 2;
+ if (memcmp (control_data.headers + headers[0].value_offset, "no-cache", 8))
+ return 2;
+ vec_free (headers);
+ vec_free (buf);
+
+ /* third request */
+ vec_validate_init_empty (buf, 254, 0);
+ memset (&control_data, 0, sizeof (control_data));
+ rv = _hpack_parse_request ((u8 *) third_req, (u32) third_req_len, buf, 254,
+ &control_data, &headers, dynamic_table);
+ if (rv != HTTP2_ERROR_NO_ERROR ||
+ control_data.parsed_bitmap != parsed_bitmap ||
+ control_data.method != HTTP_REQ_GET ||
+ control_data.scheme != HTTP_URL_SCHEME_HTTPS ||
+ control_data.path_len != 11 || control_data.authority_len != 15 ||
+ dynamic_table->used != 164 || vec_len (headers) != 1 ||
+ control_data.headers_len != 22)
+ return 3;
+ if (memcmp (control_data.path, "/index.html", 11))
+ return 3;
+ if (memcmp (control_data.authority, "www.example.com", 15))
+ return 3;
+ if (headers[0].name_len != 10 || headers[0].value_len != 12)
+ return 3;
+ if (memcmp (control_data.headers + headers[0].name_offset, "custom-key", 10))
+ return 3;
+ if (memcmp (control_data.headers + headers[0].value_offset, "custom-value",
+ 12))
+ return 3;
+ vec_free (headers);
+ vec_free (buf);
+
+ return 0;
+}
+
+static int
+http_test_hpack (vlib_main_t *vm)
+{
+ vlib_cli_output (vm, "hpack_decode_int");
+
+ static uword (*_hpack_decode_int) (u8 * *pos, u8 * end, u8 prefix_len);
+ _hpack_decode_int =
+ vlib_get_plugin_symbol ("http_plugin.so", "hpack_decode_int");
+
+ u8 *pos, *end, *input = 0;
+ uword value;
+#define TEST(i, pl, e) \
+ vec_validate (input, sizeof (i) - 2); \
+ memcpy (input, i, sizeof (i) - 1); \
+ pos = input; \
+ end = vec_end (input); \
+ value = _hpack_decode_int (&pos, end, (u8) pl); \
+ HTTP_TEST ((value == (uword) e && pos == end), \
+ "%U with prefix length %u is %llu", format_hex_bytes, input, \
+ vec_len (input), (u8) pl, value); \
+ vec_free (input);
+
+ TEST ("\x00", 8, 0);
+ TEST ("\x2A", 8, 42);
+ TEST ("\x72", 4, 2);
+ TEST ("\x7F\x00", 7, 127);
+ TEST ("\x7F\x01", 7, 128);
+ TEST ("\x9F\x9A\x0A", 5, 1337);
+ TEST ("\xFF\x80\x01", 7, 255);
+ /* max value to decode is CLIB_WORD_MAX, CLIB_UWORD_MAX is error */
+ TEST ("\x7F\x80\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x7F", 7, CLIB_WORD_MAX);
+
+#undef TEST
+
+#define N_TEST(i, pl) \
+ vec_validate (input, sizeof (i) - 2); \
+ memcpy (input, i, sizeof (i) - 1); \
+ pos = input; \
+ end = vec_end (input); \
+ value = _hpack_decode_int (&pos, end, (u8) pl); \
+ HTTP_TEST ((value == HPACK_INVALID_INT), \
+ "%U with prefix length %u should be invalid", format_hex_bytes, \
+ input, vec_len (input), (u8) pl); \
+ vec_free (input);
+
+ /* incomplete */
+ N_TEST ("\x7F", 7);
+ N_TEST ("\x0F\xFF\xFF", 4);
+ /* overflow */
+ N_TEST ("\x0F\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x00", 4);
+ N_TEST ("\x0F\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x00", 4);
+
+#undef N_TEST
+
+ vlib_cli_output (vm, "hpack_encode_int");
+
+ static u8 *(*_hpack_encode_int) (u8 * dst, uword value, u8 prefix_len);
+ _hpack_encode_int =
+ vlib_get_plugin_symbol ("http_plugin.so", "hpack_encode_int");
+
+ u8 *buf = 0;
+ u8 *p;
+
+#define TEST(v, pl, e) \
+ vec_validate_init_empty (buf, 15, 0); \
+ p = _hpack_encode_int (buf, v, (u8) pl); \
+ HTTP_TEST (((p - buf) == (sizeof (e) - 1) && !memcmp (buf, e, p - buf)), \
+ "%llu with prefix length %u is encoded as %U", v, (u8) pl, \
+ format_hex_bytes, buf, p - buf); \
+ vec_free (buf);
+
+ TEST (0, 8, "\x00");
+ TEST (2, 4, "\x02");
+ TEST (42, 8, "\x2A");
+ TEST (127, 7, "\x7F\x00");
+ TEST (128, 7, "\x7F\x01");
+ TEST (255, 7, "\x7F\x80\x01");
+ TEST (1337, 5, "\x1F\x9A\x0A");
+ TEST (CLIB_WORD_MAX, 7, "\x7F\x80\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x7F");
+#undef TEST
+
+ vlib_cli_output (vm, "hpack_decode_string");
+
+ static http2_error_t (*_hpack_decode_string) (u8 * *src, u8 * end, u8 * *buf,
+ uword * buf_len);
+ _hpack_decode_string =
+ vlib_get_plugin_symbol ("http_plugin.so", "hpack_decode_string");
+
+ u8 *bp;
+ uword blen, len;
+ http2_error_t rv;
+
+#define TEST(i, e) \
+ vec_validate (input, sizeof (i) - 2); \
+ memcpy (input, i, sizeof (i) - 1); \
+ pos = input; \
+ vec_validate_init_empty (buf, 63, 0); \
+ bp = buf; \
+ blen = vec_len (buf); \
+ rv = _hpack_decode_string (&pos, vec_end (input), &bp, &blen); \
+ len = vec_len (buf) - blen; \
+ HTTP_TEST ((len == strlen (e) && !memcmp (buf, e, len) && \
+ pos == vec_end (input) && bp == buf + len && \
+ rv == HTTP2_ERROR_NO_ERROR), \
+ "%U is decoded as %U", format_hex_bytes, input, vec_len (input), \
+ format_http_bytes, buf, len); \
+ vec_free (input); \
+ vec_free (buf);
+
+ /* raw coding */
+ TEST ("\x07private", "private");
+ /* Huffman coding */
+ TEST ("\x85\xAE\xC3\x77\x1A\x4B", "private");
+ TEST ("\x86\xA8\xEB\x10\x64\x9C\xBF", "no-cache");
+ TEST ("\x8C\xF1\xE3\xC2\xE5\xF2\x3A\x6B\xA0\xAB\x90\xF4\xFF",
+ "www.example.com");
+ TEST ("\x96\xD0\x7A\xBE\x94\x10\x54\xD4\x44\xA8\x20\x05\x95\x04\x0B\x81\x66"
+ "\xE0\x82\xA6\x2D\x1B\xFF",
+ "Mon, 21 Oct 2013 20:13:21 GMT")
+ TEST ("\xAD\x94\xE7\x82\x1D\xD7\xF2\xE6\xC7\xB3\x35\xDF\xDF\xCD\x5B\x39\x60"
+ "\xD5\xAF\x27\x08\x7F\x36\x72\xC1\xAB\x27\x0F\xB5\x29\x1F\x95\x87\x31"
+ "\x60\x65\xC0\x03\xED\x4E\xE5\xB1\x06\x3D\x50\x07",
+ "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1");
+ TEST ("\x8A\x9C\xB4\x50\x75\x3C\x1E\xCA\x24\xFE\x3F", "hello world!")
+ TEST ("\x8A\xFF\xFE\x03\x18\xC6\x31\x8C\x63\x18\xC7", "\\aaaaaaaaaaaa");
+ TEST ("\x8C\x1F\xFF\xF0\x18\xC6\x31\x80\x03\x18\xC6\x31\x8F",
+ "a\\aaaaa00aaaaaaa");
+ TEST ("\x87\x1F\xFF\xF0\xFF\xFE\x11\xFF", "a\\\\b");
+ TEST ("\x84\x1F\xF9\xFE\xA3", "a?'b");
+ TEST ("\x84\x1F\xFA\xFF\x23", "a'?b");
+ TEST ("\x8D\x1F\xFF\xFF\xFF\x0C\x63\x18\xC0\x01\x8C\x63\x18\xC7",
+ "\x61\xF9\x61\x61\x61\x61\x61\x30\x30\x61\x61\x61\x61\x61\x61\x61")
+#undef TEST
+
+#define N_TEST(i, e) \
+ vec_validate (input, sizeof (i) - 2); \
+ memcpy (input, i, sizeof (i) - 1); \
+ pos = input; \
+ vec_validate_init_empty (buf, 15, 0); \
+ bp = buf; \
+ blen = vec_len (buf); \
+ rv = _hpack_decode_string (&pos, vec_end (input), &bp, &blen); \
+ HTTP_TEST ((rv == e), "%U should be invalid (%U)", format_hex_bytes, input, \
+ vec_len (input), format_http2_error, rv); \
+ vec_free (input); \
+ vec_free (buf);
+
+ /* incomplete */
+ N_TEST ("\x87", HTTP2_ERROR_COMPRESSION_ERROR);
+ N_TEST ("\x07priv", HTTP2_ERROR_COMPRESSION_ERROR);
+ /* invalid length */
+ N_TEST ("\x7Fprivate", HTTP2_ERROR_COMPRESSION_ERROR);
+ /* invalid EOF */
+ N_TEST ("\x81\x8C", HTTP2_ERROR_COMPRESSION_ERROR);
+ /* not enough space for decoding */
+ N_TEST (
+ "\x96\xD0\x7A\xBE\x94\x10\x54\xD4\x44\xA8\x20\x05\x95\x04\x0B\x81\x66"
+ "\xE0\x82\xA6\x2D\x1B\xFF",
+ HTTP2_ERROR_INTERNAL_ERROR);
+#undef N_TEST
+
+ vlib_cli_output (vm, "hpack_encode_string");
+
+ static u8 *(*_hpack_encode_string) (u8 * dst, const u8 *value,
+ uword value_len);
+ _hpack_encode_string =
+ vlib_get_plugin_symbol ("http_plugin.so", "hpack_encode_string");
+
+#define TEST(i, e) \
+ vec_validate (input, sizeof (i) - 2); \
+ memcpy (input, i, sizeof (i) - 1); \
+ pos = input; \
+ vec_validate_init_empty (buf, 63, 0); \
+ p = _hpack_encode_string (buf, input, vec_len (input)); \
+ HTTP_TEST (((p - buf) == (sizeof (e) - 1) && !memcmp (buf, e, p - buf)), \
+ "%v is encoded as %U", input, format_hex_bytes, buf, p - buf); \
+ vec_free (input); \
+ vec_free (buf);
+
+ /* Huffman coding */
+ TEST ("private", "\x85\xAE\xC3\x77\x1A\x4B");
+ TEST ("no-cache", "\x86\xA8\xEB\x10\x64\x9C\xBF");
+ TEST ("www.example.com",
+ "\x8C\xF1\xE3\xC2\xE5\xF2\x3A\x6B\xA0\xAB\x90\xF4\xFF");
+ TEST ("Mon, 21 Oct 2013 20:13:21 GMT",
+ "\x96\xD0\x7A\xBE\x94\x10\x54\xD4\x44\xA8\x20\x05\x95\x04\x0B\x81\x66"
+ "\xE0\x82\xA6\x2D\x1B\xFF")
+ TEST ("foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1",
+ "\xAD\x94\xE7\x82\x1D\xD7\xF2\xE6\xC7\xB3\x35\xDF\xDF\xCD\x5B\x39\x60"
+ "\xD5\xAF\x27\x08\x7F\x36\x72\xC1\xAB\x27\x0F\xB5\x29\x1F\x95\x87\x31"
+ "\x60\x65\xC0\x03\xED\x4E\xE5\xB1\x06\x3D\x50\x07");
+ TEST ("hello world!", "\x8A\x9C\xB4\x50\x75\x3C\x1E\xCA\x24\xFE\x3F")
+ TEST ("\\aaaaaaaaaaaa", "\x8A\xFF\xFE\x03\x18\xC6\x31\x8C\x63\x18\xC7");
+ /* raw coding */
+ TEST ("[XZ]", "\x4[XZ]");
+#undef TEST
+
+ vlib_cli_output (vm, "hpack_decode_header");
+
+ static http2_error_t (*_hpack_decode_header) (
+ u8 * *src, u8 * end, u8 * *buf, uword * buf_len, u32 * name_len,
+ u32 * value_len, hpack_dynamic_table_t * dt);
+
+ _hpack_decode_header =
+ vlib_get_plugin_symbol ("http_plugin.so", "hpack_decode_header");
+
+ static void (*_hpack_dynamic_table_init) (hpack_dynamic_table_t * table,
+ u32 max_size);
+
+ _hpack_dynamic_table_init =
+ vlib_get_plugin_symbol ("http_plugin.so", "hpack_dynamic_table_init");
+
+ static void (*_hpack_dynamic_table_free) (hpack_dynamic_table_t * table);
+
+ _hpack_dynamic_table_free =
+ vlib_get_plugin_symbol ("http_plugin.so", "hpack_dynamic_table_free");
+
+ u32 name_len, value_len;
+ hpack_dynamic_table_t table;
+
+ _hpack_dynamic_table_init (&table, 128);
+
+#define TEST(i, e_name, e_value, dt_size) \
+ vec_validate (input, sizeof (i) - 2); \
+ memcpy (input, i, sizeof (i) - 1); \
+ pos = input; \
+ vec_validate_init_empty (buf, 63, 0); \
+ bp = buf; \
+ blen = vec_len (buf); \
+ rv = _hpack_decode_header (&pos, vec_end (input), &bp, &blen, &name_len, \
+ &value_len, &table); \
+ len = vec_len (buf) - blen; \
+ HTTP_TEST ((rv == HTTP2_ERROR_NO_ERROR && table.used == dt_size && \
+ name_len == strlen (e_name) && value_len == strlen (e_value) && \
+ !memcmp (buf, e_name, name_len) && \
+ !memcmp (buf + name_len, e_value, value_len) && \
+ vec_len (buf) == (blen + name_len + value_len) && \
+ pos == vec_end (input) && bp == buf + name_len + value_len), \
+ "%U is decoded as '%U: %U'", format_hex_bytes, input, \
+ vec_len (input), format_http_bytes, buf, name_len, \
+ format_http_bytes, buf + name_len, value_len); \
+ vec_free (input); \
+ vec_free (buf);
+
+ /* C.2.1. Literal Header Field with Indexing */
+ TEST ("\x40\x0A\x63\x75\x73\x74\x6F\x6D\x2D\x6B\x65\x79\x0D\x63\x75\x73\x74"
+ "\x6F\x6D\x2D\x68\x65\x61\x64\x65\x72",
+ "custom-key", "custom-header", 55);
+ /* C.2.2. Literal Header Field without Indexing */
+ TEST ("\x04\x0C\x2F\x73\x61\x6D\x70\x6C\x65\x2F\x70\x61\x74\x68", ":path",
+ "/sample/path", 55);
+ /* C.2.3. Literal Header Field Never Indexed */
+ TEST ("\x10\x08\x70\x61\x73\x73\x77\x6F\x72\x64\x06\x73\x65\x63\x72\x65\x74",
+ "password", "secret", 55);
+ /* C.2.4. Indexed Header Field */
+ TEST ("\x82", ":method", "GET", 55);
+ TEST ("\xBE", "custom-key", "custom-header", 55);
+ /* Literal Header Field with Indexing - enough space in dynamic table */
+ TEST ("\x41\x0F\x77\x77\x77\x2E\x65\x78\x61\x6D\x70\x6C\x65\x2E\x63\x6F\x6D",
+ ":authority", "www.example.com", 112);
+ /* verification */
+ TEST ("\xBE", ":authority", "www.example.com", 112);
+ TEST ("\xBF", "custom-key", "custom-header", 112);
+ /* Literal Header Field with Indexing - eviction */
+ TEST ("\x58\x08\x6E\x6F\x2D\x63\x61\x63\x68\x65", "cache-control",
+ "no-cache", 110);
+ /* verification */
+ TEST ("\xBE", "cache-control", "no-cache", 110);
+ TEST ("\xBF", ":authority", "www.example.com", 110);
+ /* Literal Header Field with Indexing - eviction */
+ TEST ("\x40\x0A\x63\x75\x73\x74\x6F\x6D\x2D\x6B\x65\x79\x0D\x63\x75\x73\x74"
+ "\x6F\x6D\x2D\x68\x65\x61\x64\x65\x72",
+ "custom-key", "custom-header", 108);
+ /* verification */
+ TEST ("\xBE", "custom-key", "custom-header", 108);
+ TEST ("\xBF", "cache-control", "no-cache", 108);
+ /* Literal Header Field with Indexing - eviction */
+ TEST ("\x41\x0F\x77\x77\x77\x2E\x65\x78\x61\x6D\x70\x6C\x65\x2E\x63\x6F\x6D",
+ ":authority", "www.example.com", 112);
+ /* verification */
+ TEST ("\xBE", ":authority", "www.example.com", 112);
+ TEST ("\xBF", "custom-key", "custom-header", 112);
+ /* Literal Header Field with Indexing - eviction with reference */
+ TEST ("\x7F\x00\x0C\x63\x75\x73\x74\x6F\x6D\x2D\x76\x61\x6C\x75\x65",
+ "custom-key", "custom-value", 111);
+ /* verification */
+ TEST ("\xBE", "custom-key", "custom-value", 111);
+ TEST ("\xBF", ":authority", "www.example.com", 111);
+#undef TEST
+
+ _hpack_dynamic_table_free (&table);
+
+ vlib_cli_output (vm, "hpack_parse_request");
+
+ int result;
+ /* C.3. Request Examples without Huffman Coding */
+ _hpack_dynamic_table_init (&table, HPACK_DEFAULT_HEADER_TABLE_SIZE);
+ result = http_test_parse_request (
+ http_token_lit ("\x82\x86\x84\x41\x0F\x77\x77\x77\x2E\x65\x78\x61"
+ "\x6D\x70\x6C\x65\x2E\x63\x6F\x6D"),
+ http_token_lit (
+ "\x82\x86\x84\xBE\x58\x08\x6E\x6F\x2D\x63\x61\x63\x68\x65"),
+ http_token_lit (
+ "\x82\x87\x85\xBF\x40\x0A\x63\x75\x73\x74\x6F\x6D\x2D\x6B"
+ "\x65\x79\x0C\x63\x75\x73\x74\x6F\x6D\x2D\x76\x61\x6C\x75\x65"),
+ &table);
+ _hpack_dynamic_table_free (&table);
+ HTTP_TEST ((result == 0), "request without Huffman Coding (result=%d)",
+ result);
+ /* C.4. Request Examples with Huffman Coding */
+ _hpack_dynamic_table_init (&table, HPACK_DEFAULT_HEADER_TABLE_SIZE);
+ result = http_test_parse_request (
+ http_token_lit (
+ "\x82\x86\x84\x41\x8C\xF1\xE3\xC2\xE5\xF2\x3A\x6B\xA0\xAB\x90\xF4\xFF"),
+ http_token_lit ("\x82\x86\x84\xBE\x58\x86\xA8\xEB\x10\x64\x9C\xBF"),
+ http_token_lit ("\x82\x87\x85\xBF\x40\x88\x25\xA8\x49\xE9\x5B\xA9\x7D\x7F"
+ "\x89\x25\xA8\x49\xE9\x5B\xB8\xE8\xB4\xBF"),
+ &table);
+ _hpack_dynamic_table_free (&table);
+ HTTP_TEST ((result == 0), "request with Huffman Coding (result=%d)", result);
+
+ vlib_cli_output (vm, "hpack_serialize_response");
+
+ hpack_response_control_data_t resp_cd;
+ u8 *server_name;
+ u8 *date;
+
+ static void (*_hpack_serialize_response) (
+ u8 * app_headers, u32 app_headers_len,
+ hpack_response_control_data_t * control_data, u8 * *dst);
+
+ _hpack_serialize_response =
+ vlib_get_plugin_symbol ("http_plugin.so", "hpack_serialize_response");
+
+ server_name = format (0, "http unit tests");
+ date = format (0, "Mon, 21 Oct 2013 20:13:21 GMT");
+
+ vec_validate (buf, 127);
+ vec_reset_length (buf);
+ resp_cd.sc = HTTP_STATUS_GATEWAY_TIMEOUT;
+ resp_cd.content_len = HPACK_ENCODER_SKIP_CONTENT_LEN;
+ resp_cd.server_name = server_name;
+ resp_cd.server_name_len = vec_len (server_name);
+ resp_cd.date = date;
+ resp_cd.date_len = vec_len (date);
+ u8 expected1[] =
+ "\x08\x03\x35\x30\x34\x0F\x27\x8B\x9D\x29\xAD\x4B\x6A\x32\x54\x49\x50\x94"
+ "\x7F\x0F\x12\x96\xD0\x7A\xBE\x94\x10\x54\xD4\x44\xA8\x20\x05\x95\x04\x0B"
+ "\x81\x66\xE0\x82\xA6\x2D\x1B\xFF";
+ _hpack_serialize_response (0, 0, &resp_cd, &buf);
+ HTTP_TEST ((vec_len (buf) == (sizeof (expected1) - 1) &&
+ !memcmp (buf, expected1, sizeof (expected1) - 1)),
+ "response encoded as %U", format_hex_bytes, buf, vec_len (buf));
+ vec_reset_length (buf);
+
+ resp_cd.sc = HTTP_STATUS_OK;
+ resp_cd.content_len = 1024;
+ http_headers_ctx_t headers;
+ u8 *headers_buf = 0;
+ vec_validate (headers_buf, 127);
+ http_init_headers_ctx (&headers, headers_buf, vec_len (headers_buf));
+ http_add_header (&headers, HTTP_HEADER_CONTENT_TYPE,
+ http_token_lit ("text/plain"));
+ http_add_header (&headers, HTTP_HEADER_CACHE_STATUS,
+ http_token_lit ("ExampleCache; hit"));
+ http_add_custom_header (&headers, http_token_lit ("sandwich"),
+ http_token_lit ("spam"));
+ u8 expected2[] =
+ "\x88\x0F\x27\x8B\x9D\x29\xAD\x4B\x6A\x32\x54\x49\x50\x94\x7F\x0F\x12\x96"
+ "\xD0\x7A\xBE\x94\x10\x54\xD4\x44\xA8\x20\x05\x95\x04\x0B\x81\x66\xE0\x82"
+ "\xA6\x2D\x1B\xFF\x0F\x0D\x83\x08\x04\xD7\x0F\x10\x87\x49\x7C\xA5\x8A\xE8"
+ "\x19\xAA\x00\x88\x20\xC9\x39\x56\x42\x46\x9B\x51\x8D\xC1\xE4\x74\xD7\x41"
+ "\x6F\x0C\x93\x97\xED\x49\xCC\x9F\x00\x86\x40\xEA\x93\xC1\x89\x3F\x83\x45"
+ "\x63\xA7";
+ _hpack_serialize_response (headers_buf, headers.tail_offset, &resp_cd, &buf);
+ HTTP_TEST ((vec_len (buf) == (sizeof (expected2) - 1) &&
+ !memcmp (buf, expected2, sizeof (expected2) - 1)),
+ "response encoded as %U", format_hex_bytes, buf, vec_len (buf));
+ vec_free (buf);
+ vec_free (headers_buf);
+ vec_free (server_name);
+ vec_free (date);
+
+ return 0;
+}
+
+static int
+http_test_h2_frame (vlib_main_t *vm)
+{
+ static void (*_http2_frame_header_read) (u8 * src,
+ http2_frame_header_t * fh);
+
+ _http2_frame_header_read =
+ vlib_get_plugin_symbol ("http_plugin.so", "http2_frame_header_read");
+
+ vlib_cli_output (vm, "http2_frame_read_settings");
+
+ static http2_error_t (*_http2_frame_read_settings) (
+ http2_conn_settings_t * settings, u8 * payload, u32 payload_len);
+
+ _http2_frame_read_settings =
+ vlib_get_plugin_symbol ("http_plugin.so", "http2_frame_read_settings");
+
+ http2_error_t rv;
+ http2_frame_header_t fh = { 0 };
+ http2_conn_settings_t conn_settings = http2_default_conn_settings;
+
+ u8 settings[] = { 0x0, 0x0, 0x12, 0x4, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x3, 0x0, 0x0, 0x0, 0x64, 0x0, 0x4, 0x40,
+ 0x0, 0x0, 0x0, 0x0, 0x2, 0x0, 0x0, 0x0, 0x0 };
+ _http2_frame_header_read (settings, &fh);
+ HTTP_TEST ((fh.flags == 0 && fh.type == HTTP2_FRAME_TYPE_SETTINGS &&
+ fh.stream_id == 0 && fh.length == 18),
+ "frame identified as SETTINGS");
+
+ rv = _http2_frame_read_settings (
+ &conn_settings, settings + HTTP2_FRAME_HEADER_SIZE, fh.length);
+ HTTP_TEST ((rv == HTTP2_ERROR_NO_ERROR &&
+ conn_settings.max_concurrent_streams == 100 &&
+ conn_settings.initial_window_size == 1073741824 &&
+ conn_settings.enable_push == 0),
+ "SETTINGS frame payload parsed")
+
+ u8 settings_ack[] = { 0x0, 0x0, 0x0, 0x4, 0x1, 0x0, 0x0, 0x0, 0x0 };
+ _http2_frame_header_read (settings_ack, &fh);
+ HTTP_TEST ((fh.flags == HTTP2_FRAME_FLAG_ACK &&
+ fh.type == HTTP2_FRAME_TYPE_SETTINGS && fh.stream_id == 0 &&
+ fh.length == 0),
+ "frame identified as SETTINGS ACK");
+
+ vlib_cli_output (vm, "http2_frame_write_settings_ack");
+
+ static void (*_http2_frame_write_settings_ack) (u8 * *dst);
+
+ _http2_frame_write_settings_ack = vlib_get_plugin_symbol (
+ "http_plugin.so", "http2_frame_write_settings_ack");
+
+ u8 *buf = 0;
+
+ _http2_frame_write_settings_ack (&buf);
+ HTTP_TEST ((vec_len (buf) == sizeof (settings_ack)) &&
+ !memcmp (buf, settings_ack, sizeof (settings_ack)),
+ "SETTINGS ACK frame written");
+ vec_free (buf);
+
+ vlib_cli_output (vm, "http2_frame_write_settings");
+
+ static void (*_http2_frame_write_settings) (
+ http2_settings_entry_t * settings, u8 * *dst);
+
+ _http2_frame_write_settings =
+ vlib_get_plugin_symbol ("http_plugin.so", "http2_frame_write_settings");
+
+ http2_settings_entry_t *settings_list = 0;
+ vec_validate (settings_list, 2);
+ settings_list[0].identifier = HTTP2_SETTINGS_MAX_CONCURRENT_STREAMS;
+ settings_list[0].value = 100;
+ settings_list[1].identifier = HTTP2_SETTINGS_INITIAL_WINDOW_SIZE;
+ settings_list[1].value = 1073741824;
+ settings_list[2].identifier = HTTP2_SETTINGS_ENABLE_PUSH;
+ settings_list[2].value = 0;
+
+ _http2_frame_write_settings (settings_list, &buf);
+ HTTP_TEST ((vec_len (buf) == sizeof (settings) &&
+ !memcmp (buf, settings, sizeof (settings))),
+ "SETTINGS frame written");
+ vec_free (settings_list);
+ vec_free (buf);
+
+ vlib_cli_output (vm, "http2_frame_read_window_update");
+
+ static http2_error_t (*_http2_frame_read_window_update) (
+ u32 * increment, u8 * payload, u32 payload_len);
+
+ _http2_frame_read_window_update = vlib_get_plugin_symbol (
+ "http_plugin.so", "http2_frame_read_window_update");
+
+ u32 win_increment;
+ u8 win_update[] = { 0x0, 0x0, 0x4, 0x8, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x3f, 0xff, 0x0, 0x1 };
+ _http2_frame_header_read (win_update, &fh);
+ HTTP_TEST ((fh.flags == 0 && fh.type == HTTP2_FRAME_TYPE_WINDOW_UPDATE &&
+ fh.stream_id == 0 && fh.length == 4),
+ "frame identified as WINDOW_UPDATE");
+
+ rv = _http2_frame_read_window_update (
+ &win_increment, win_update + HTTP2_FRAME_HEADER_SIZE, fh.length);
+ HTTP_TEST ((rv == HTTP2_ERROR_NO_ERROR && win_increment == 1073676289),
+ "WINDOW_UPDATE frame payload parsed")
+
+ vlib_cli_output (vm, "http2_frame_write_window_update");
+
+ static void (*_http2_frame_write_window_update) (u32 increment,
+ u32 stream_id, u8 * *dst);
+
+ _http2_frame_write_window_update = vlib_get_plugin_symbol (
+ "http_plugin.so", "http2_frame_write_window_update");
+
+ _http2_frame_write_window_update (1073676289, 0, &buf);
+ HTTP_TEST ((vec_len (buf) == sizeof (win_update) &&
+ !memcmp (buf, win_update, sizeof (win_update))),
+ "WINDOW_UPDATE frame written");
+ vec_free (buf);
+
+ vlib_cli_output (vm, "http2_frame_read_rst_stream");
+
+ static http2_error_t (*_http2_frame_read_rst_stream) (
+ u32 * error_code, u8 * payload, u32 payload_len);
+
+ _http2_frame_read_rst_stream =
+ vlib_get_plugin_symbol ("http_plugin.so", "http2_frame_read_rst_stream");
+
+ u32 error_code;
+ u8 rst_stream[] = { 0x0, 0x0, 0x4, 0x3, 0x0, 0x0, 0x0,
+ 0x0, 0x5, 0x0, 0x0, 0x0, 0x01 };
+ _http2_frame_header_read (rst_stream, &fh);
+ HTTP_TEST ((fh.flags == 0 && fh.type == HTTP2_FRAME_TYPE_RST_STREAM &&
+ fh.stream_id == 5 && fh.length == 4),
+ "frame identified as RST_STREAM");
+
+ rv = _http2_frame_read_rst_stream (
+ &error_code, rst_stream + HTTP2_FRAME_HEADER_SIZE, fh.length);
+ HTTP_TEST (
+ (rv == HTTP2_ERROR_NO_ERROR && error_code == HTTP2_ERROR_PROTOCOL_ERROR),
+ "RST_STREAM frame payload parsed")
+
+ vlib_cli_output (vm, "http2_frame_write_rst_stream");
+
+ static void (*_http2_frame_write_rst_stream) (u32 increment, u32 stream_id,
+ u8 * *dst);
+
+ _http2_frame_write_rst_stream =
+ vlib_get_plugin_symbol ("http_plugin.so", "http2_frame_write_rst_stream");
+
+ _http2_frame_write_rst_stream (HTTP2_ERROR_PROTOCOL_ERROR, 5, &buf);
+ HTTP_TEST ((vec_len (buf) == sizeof (rst_stream) &&
+ !memcmp (buf, rst_stream, sizeof (rst_stream))),
+ "RST_STREAM frame written");
+ vec_free (buf);
+
+ vlib_cli_output (vm, "http2_frame_read_goaway");
+
+ static http2_error_t (*_http2_frame_read_goaway) (
+ u32 * error_code, u32 * last_stream_id, u8 * payload, u32 payload_len);
+
+ _http2_frame_read_goaway =
+ vlib_get_plugin_symbol ("http_plugin.so", "http2_frame_read_goaway");
+
+ u32 last_stream_id;
+ u8 goaway[] = { 0x0, 0x0, 0x8, 0x7, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x5, 0x0, 0x0, 0x0, 0x2 };
+
+ _http2_frame_header_read (goaway, &fh);
+ HTTP_TEST ((fh.flags == 0 && fh.type == HTTP2_FRAME_TYPE_GOAWAY &&
+ fh.stream_id == 0 && fh.length == 8),
+ "frame identified as GOAWAY");
+
+ rv = _http2_frame_read_goaway (&error_code, &last_stream_id,
+ goaway + HTTP2_FRAME_HEADER_SIZE, fh.length);
+ HTTP_TEST ((rv == HTTP2_ERROR_NO_ERROR &&
+ error_code == HTTP2_ERROR_INTERNAL_ERROR && last_stream_id == 5),
+ "GOAWAY frame payload parsed")
+
+ vlib_cli_output (vm, "http2_frame_write_goaway");
+
+ static void (*_http2_frame_write_goaway) (http2_error_t error_code,
+ u32 last_stream_id, u8 * *dst);
+
+ _http2_frame_write_goaway =
+ vlib_get_plugin_symbol ("http_plugin.so", "http2_frame_write_goaway");
+
+ _http2_frame_write_goaway (HTTP2_ERROR_INTERNAL_ERROR, 5, &buf);
+ HTTP_TEST ((vec_len (buf) == sizeof (goaway) &&
+ !memcmp (buf, goaway, sizeof (goaway))),
+ "GOAWAY frame written");
+ vec_free (buf);
+
+ vlib_cli_output (vm, "http2_frame_read_headers");
+
+ static http2_error_t (*_http2_frame_read_headers) (
+ u8 * *headers, u32 * headers_len, u8 * payload, u32 payload_len, u8 flags);
+
+ _http2_frame_read_headers =
+ vlib_get_plugin_symbol ("http_plugin.so", "http2_frame_read_headers");
+
+ u8 *h;
+ u32 h_len;
+ u8 headers[] = { 0x0, 0x0, 0x28, 0x1, 0x5, 0x0, 0x0, 0x0, 0x3, 0x3f,
+ 0xe1, 0x1f, 0x82, 0x4, 0x88, 0x62, 0x7b, 0x69, 0x1d, 0x48,
+ 0x5d, 0x3e, 0x53, 0x86, 0x41, 0x88, 0xaa, 0x69, 0xd2, 0x9a,
+ 0xc4, 0xb9, 0xec, 0x9b, 0x7a, 0x88, 0x25, 0xb6, 0x50, 0xc3,
+ 0xab, 0xb8, 0x15, 0xc1, 0x53, 0x3, 0x2a, 0x2f, 0x2a };
+
+ _http2_frame_header_read (headers, &fh);
+ HTTP_TEST ((fh.flags ==
+ (HTTP2_FRAME_FLAG_END_HEADERS | HTTP2_FRAME_FLAG_END_STREAM) &&
+ fh.type == HTTP2_FRAME_TYPE_HEADERS && fh.stream_id == 3 &&
+ fh.length == 40),
+ "frame identified as HEADERS");
+
+ rv = _http2_frame_read_headers (
+ &h, &h_len, headers + HTTP2_FRAME_HEADER_SIZE, fh.length, fh.flags);
+ HTTP_TEST ((rv == HTTP2_ERROR_NO_ERROR && h_len == 40 &&
+ *h == headers[HTTP2_FRAME_HEADER_SIZE]),
+ "HEADERS frame payload parsed")
+
+ vlib_cli_output (vm, "http2_frame_write_headers_header");
+
+ static void (*_http2_frame_write_headers_header) (
+ u32 headers_len, u32 stream_id, u8 flags, u8 * dst);
+
+ _http2_frame_write_headers_header = vlib_get_plugin_symbol (
+ "http_plugin.so", "http2_frame_write_headers_header");
+
+ u8 *p = http2_frame_header_alloc (&buf);
+ _http2_frame_write_headers_header (
+ 40, 3, HTTP2_FRAME_FLAG_END_HEADERS | HTTP2_FRAME_FLAG_END_STREAM, p);
+ HTTP_TEST ((vec_len (buf) == HTTP2_FRAME_HEADER_SIZE &&
+ !memcmp (buf, headers, HTTP2_FRAME_HEADER_SIZE)),
+ "HEADERS frame header written");
+ vec_free (buf);
+
+ vlib_cli_output (vm, "http2_frame_read_data");
+
+ static http2_error_t (*_http2_frame_read_data) (
+ u8 * *data, u32 * data_len, u8 * payload, u32 payload_len, u8 flags);
+
+ _http2_frame_read_data =
+ vlib_get_plugin_symbol ("http_plugin.so", "http2_frame_read_data");
+
+ u8 *d;
+ u32 d_len;
+ u8 data[] = { 0x0, 0x0, 0x9, 0x0, 0x1, 0x0, 0x0, 0x0, 0x3,
+ 0x6e, 0x6f, 0x74, 0x20, 0x66, 0x6f, 0x75, 0x6e, 0x64 };
+
+ _http2_frame_header_read (data, &fh);
+ HTTP_TEST ((fh.flags == HTTP2_FRAME_FLAG_END_STREAM &&
+ fh.type == HTTP2_FRAME_TYPE_DATA && fh.stream_id == 3 &&
+ fh.length == 9),
+ "frame identified as DATA");
+
+ rv = _http2_frame_read_data (&d, &d_len, data + HTTP2_FRAME_HEADER_SIZE,
+ fh.length, fh.flags);
+ HTTP_TEST ((rv == HTTP2_ERROR_NO_ERROR && d_len == 9 &&
+ *d == data[HTTP2_FRAME_HEADER_SIZE]),
+ "DATA frame payload parsed")
+
+ vlib_cli_output (vm, "http2_frame_write_data_header");
+
+ static void (*_http2_frame_write_data_header) (
+ u32 headers_len, u32 stream_id, u8 flags, u8 * dst);
+
+ _http2_frame_write_data_header =
+ vlib_get_plugin_symbol ("http_plugin.so", "http2_frame_write_data_header");
+
+ p = http2_frame_header_alloc (&buf);
+ _http2_frame_write_data_header (9, 3, HTTP2_FRAME_FLAG_END_STREAM, p);
+ HTTP_TEST ((vec_len (buf) == HTTP2_FRAME_HEADER_SIZE &&
+ !memcmp (buf, data, HTTP2_FRAME_HEADER_SIZE)),
+ "DATA frame header written");
+ vec_free (buf);
+
+ return 0;
+}
+
static clib_error_t *
test_http_command_fn (vlib_main_t *vm, unformat_input_t *input,
vlib_cli_command_t *cmd)
@@ -550,6 +1317,10 @@ test_http_command_fn (vlib_main_t *vm, unformat_input_t *input,
res = http_test_http_token_is_case (vm);
else if (unformat (input, "header-table"))
res = http_test_http_header_table (vm);
+ else if (unformat (input, "hpack"))
+ res = http_test_hpack (vm);
+ else if (unformat (input, "h2-frame"))
+ res = http_test_h2_frame (vm);
else if (unformat (input, "all"))
{
if ((res = http_test_parse_authority (vm)))
@@ -562,6 +1333,10 @@ test_http_command_fn (vlib_main_t *vm, unformat_input_t *input,
goto done;
if ((res = http_test_http_header_table (vm)))
goto done;
+ if ((res = http_test_hpack (vm)))
+ goto done;
+ if ((res = http_test_h2_frame (vm)))
+ goto done;
}
else
break;
diff --git a/src/plugins/http_static/http_cache.c b/src/plugins/http_static/http_cache.c
index 2e63e335d47..61f1f50ea3b 100644
--- a/src/plugins/http_static/http_cache.c
+++ b/src/plugins/http_static/http_cache.c
@@ -400,6 +400,14 @@ hss_cache_init (hss_cache_t *hc, uword cache_size, u8 debug_level)
hc->first_index = hc->last_index = ~0;
}
+void
+hss_cache_free (hss_cache_t *hc)
+{
+ hss_cache_clear (hc);
+ BV (clib_bihash_free) (&hc->name_to_data);
+ clib_spinlock_free (&hc->cache_lock);
+}
+
/** \brief format a file cache entry
*/
static u8 *
diff --git a/src/plugins/http_static/http_cache.h b/src/plugins/http_static/http_cache.h
index 21f71a924d5..c1e363443ee 100644
--- a/src/plugins/http_static/http_cache.h
+++ b/src/plugins/http_static/http_cache.h
@@ -67,6 +67,7 @@ u32 hss_cache_add_and_attach (hss_cache_t *hc, u8 *path, u8 **data,
void hss_cache_detach_entry (hss_cache_t *hc, u32 ce_index);
u32 hss_cache_clear (hss_cache_t *hc);
void hss_cache_init (hss_cache_t *hc, uword cache_size, u8 debug_level);
+void hss_cache_free (hss_cache_t *hc);
u8 *format_hss_cache (u8 *s, va_list *args);
diff --git a/src/plugins/http_static/http_static.api b/src/plugins/http_static/http_static.api
index bd0cebc45d2..5c1eaf7b9d2 100644
--- a/src/plugins/http_static/http_static.api
+++ b/src/plugins/http_static/http_static.api
@@ -3,41 +3,7 @@
This file defines static http server control-plane API messages
*/
-option version = "2.4.0";
-
-/** \brief Configure and enable the static http server
- @param client_index - opaque cookie to identify the sender
- @param context - sender context, to match reply w/ request
- @param fifo_size - size (in bytes) of the session FIFOs
- @param cache_size_limit - size (in bytes) of the in-memory file data cache
- @param max_age - how long a response is considered fresh (in seconds)
- @param prealloc_fifos - number of preallocated fifos (usually 0)
- @param private_segment_size - fifo segment size (usually 0)
- @param www_root - html root path
- @param uri - bind URI, defaults to "tcp://0.0.0.0/80"
-*/
-
-autoreply define http_static_enable_v2 {
- option deprecated;
-
- /* Client identifier, set from api_main.my_client_index */
- u32 client_index;
-
- /* Arbitrary context, so client can match reply to request */
- u32 context;
- /* Typical options */
- u32 fifo_size;
- u32 cache_size_limit;
- u32 max_age [default=600];
- /* Unusual options */
- u32 prealloc_fifos;
- u32 private_segment_size;
-
- /* Root of the html path */
- string www_root[256];
- /* The bind URI */
- string uri[256];
-};
+option version = "2.5.0";
/** \brief Configure and enable the static http server
@param client_index - opaque cookie to identify the sender
@@ -45,6 +11,7 @@ autoreply define http_static_enable_v2 {
@param fifo_size - size (in bytes) of the session FIFOs
@param cache_size_limit - size (in bytes) of the in-memory file data cache
@param max_age - how long a response is considered fresh (in seconds)
+ @param max_body_size - maximum size of a request body (in bytes)
@param keepalive_timeout - timeout during which client connection will stay open (in seconds)
@param prealloc_fifos - number of preallocated fifos (usually 0)
@param private_segment_size - fifo segment size (usually 0)
@@ -52,7 +19,7 @@ autoreply define http_static_enable_v2 {
@param uri - bind URI, defaults to "tcp://0.0.0.0/80"
*/
-autoreply define http_static_enable_v3 {
+autoreply define http_static_enable_v4 {
option deprecated;
/* Client identifier, set from api_main.my_client_index */
@@ -65,6 +32,7 @@ autoreply define http_static_enable_v3 {
u32 cache_size_limit;
u32 max_age [default=600];
u32 keepalive_timeout [default=60];
+ u64 max_body_size [default=8000];
/* Unusual options */
u32 prealloc_fifos;
u32 private_segment_size;
@@ -76,12 +44,14 @@ autoreply define http_static_enable_v3 {
};
/** \brief Configure and enable the static http server
+
@param client_index - opaque cookie to identify the sender
@param context - sender context, to match reply w/ request
@param fifo_size - size (in bytes) of the session FIFOs
@param cache_size_limit - size (in bytes) of the in-memory file data cache
@param max_age - how long a response is considered fresh (in seconds)
@param max_body_size - maximum size of a request body (in bytes)
+ @param rx_buff_thresh - maximum size of a large memory allocation (in bytes)
@param keepalive_timeout - timeout during which client connection will stay open (in seconds)
@param prealloc_fifos - number of preallocated fifos (usually 0)
@param private_segment_size - fifo segment size (usually 0)
@@ -89,7 +59,7 @@ autoreply define http_static_enable_v3 {
@param uri - bind URI, defaults to "tcp://0.0.0.0/80"
*/
-autoreply define http_static_enable_v4 {
+autoreply define http_static_enable_v5 {
/* Client identifier, set from api_main.my_client_index */
u32 client_index;
@@ -100,7 +70,8 @@ autoreply define http_static_enable_v4 {
u32 cache_size_limit;
u32 max_age [default=600];
u32 keepalive_timeout [default=60];
- u64 max_body_size [default=8000];
+ u64 max_body_size [default=8192];
+ u32 rx_buff_thresh [default=1048576];
/* Unusual options */
u32 prealloc_fifos;
u32 private_segment_size;
diff --git a/src/plugins/http_static/http_static.c b/src/plugins/http_static/http_static.c
index 7a12f37b8d3..85b044fb860 100644
--- a/src/plugins/http_static/http_static.c
+++ b/src/plugins/http_static/http_static.c
@@ -67,22 +67,25 @@ hss_register_url_handler (hss_url_handler_fn fp, const char *url,
static int
hss_enable_api (u32 fifo_size, u32 cache_limit, u32 prealloc_fifos,
u32 private_segment_size, u8 *www_root, u8 *uri, u32 max_age,
- u32 keepalive_timeout, u64 max_body_size)
+ u32 keepalive_timeout, u64 max_body_size, u32 rx_buff_thresh)
{
hss_main_t *hsm = &hss_main;
int rv;
hsm->fifo_size = fifo_size;
- hsm->cache_size = cache_limit;
hsm->prealloc_fifos = prealloc_fifos;
hsm->private_segment_size = private_segment_size;
- hsm->www_root = format (0, "%s%c", www_root, 0);
- hsm->uri = format (0, "%s%c", uri, 0);
- hsm->max_age = max_age;
- hsm->max_body_size = max_body_size;
- hsm->keepalive_timeout = keepalive_timeout;
-
- if (vec_len (hsm->www_root) < 2)
+ if (uri && parse_uri ((char *) uri, &hsm->default_listener.sep))
+ return VNET_API_ERROR_INVALID_VALUE;
+ hsm->default_listener.www_root = format (0, "%s%c", www_root, 0);
+ hsm->default_listener.cache_size = cache_limit;
+ hsm->default_listener.max_age = max_age;
+ hsm->default_listener.max_body_size = max_body_size;
+ hsm->default_listener.rx_buff_thresh = rx_buff_thresh;
+ hsm->default_listener.keepalive_timeout = keepalive_timeout;
+ hsm->have_default_listener = 1;
+
+ if (vec_len (hsm->default_listener.www_root) < 2)
return VNET_API_ERROR_INVALID_VALUE;
if (hsm->app_index != ~0)
@@ -99,8 +102,7 @@ hss_enable_api (u32 fifo_size, u32 cache_limit, u32 prealloc_fifos,
case 0:
break;
default:
- vec_free (hsm->www_root);
- vec_free (hsm->uri);
+ vec_free (hsm->default_listener.www_root);
return VNET_API_ERROR_INIT_FAILED;
}
return 0;
@@ -108,49 +110,29 @@ hss_enable_api (u32 fifo_size, u32 cache_limit, u32 prealloc_fifos,
/* API message handler */
static void
-vl_api_http_static_enable_v2_t_handler (vl_api_http_static_enable_v2_t *mp)
-{
- vl_api_http_static_enable_v2_reply_t *rmp;
- hss_main_t *hsm = &hss_main;
- int rv;
-
- mp->uri[ARRAY_LEN (mp->uri) - 1] = 0;
- mp->www_root[ARRAY_LEN (mp->www_root) - 1] = 0;
-
- rv = hss_enable_api (ntohl (mp->fifo_size), ntohl (mp->cache_size_limit),
- ntohl (mp->prealloc_fifos),
- ntohl (mp->private_segment_size), mp->www_root, mp->uri,
- ntohl (mp->max_age), HSS_DEFAULT_KEEPALIVE_TIMEOUT,
- HSS_DEFAULT_MAX_BODY_SIZE);
-
- REPLY_MACRO (VL_API_HTTP_STATIC_ENABLE_V2_REPLY);
-}
-
-/* API message handler */
-static void
-vl_api_http_static_enable_v3_t_handler (vl_api_http_static_enable_v3_t *mp)
+vl_api_http_static_enable_v4_t_handler (vl_api_http_static_enable_v4_t *mp)
{
- vl_api_http_static_enable_v3_reply_t *rmp;
+ vl_api_http_static_enable_v4_reply_t *rmp;
hss_main_t *hsm = &hss_main;
int rv;
mp->uri[ARRAY_LEN (mp->uri) - 1] = 0;
mp->www_root[ARRAY_LEN (mp->www_root) - 1] = 0;
- rv = hss_enable_api (ntohl (mp->fifo_size), ntohl (mp->cache_size_limit),
- ntohl (mp->prealloc_fifos),
- ntohl (mp->private_segment_size), mp->www_root, mp->uri,
- ntohl (mp->max_age), ntohl (mp->keepalive_timeout),
- HSS_DEFAULT_MAX_BODY_SIZE);
+ rv = hss_enable_api (
+ ntohl (mp->fifo_size), ntohl (mp->cache_size_limit),
+ ntohl (mp->prealloc_fifos), ntohl (mp->private_segment_size), mp->www_root,
+ mp->uri, ntohl (mp->max_age), ntohl (mp->keepalive_timeout),
+ ntohl (mp->max_body_size), HSS_DEFAULT_RX_BUFFER_THRESH);
- REPLY_MACRO (VL_API_HTTP_STATIC_ENABLE_V3_REPLY);
+ REPLY_MACRO (VL_API_HTTP_STATIC_ENABLE_V4_REPLY);
}
/* API message handler */
static void
-vl_api_http_static_enable_v4_t_handler (vl_api_http_static_enable_v4_t *mp)
+vl_api_http_static_enable_v5_t_handler (vl_api_http_static_enable_v5_t *mp)
{
- vl_api_http_static_enable_v4_reply_t *rmp;
+ vl_api_http_static_enable_v5_reply_t *rmp;
hss_main_t *hsm = &hss_main;
int rv;
@@ -161,9 +143,9 @@ vl_api_http_static_enable_v4_t_handler (vl_api_http_static_enable_v4_t *mp)
ntohl (mp->prealloc_fifos),
ntohl (mp->private_segment_size), mp->www_root, mp->uri,
ntohl (mp->max_age), ntohl (mp->keepalive_timeout),
- ntohl (mp->max_body_size));
+ ntohl (mp->max_body_size), ntohl (mp->rx_buff_thresh));
- REPLY_MACRO (VL_API_HTTP_STATIC_ENABLE_V4_REPLY);
+ REPLY_MACRO (VL_API_HTTP_STATIC_ENABLE_V5_REPLY);
}
#include <http_static/http_static.api.c>
diff --git a/src/plugins/http_static/http_static.h b/src/plugins/http_static/http_static.h
index e158a32dbc9..2b5c065e287 100644
--- a/src/plugins/http_static/http_static.h
+++ b/src/plugins/http_static/http_static.h
@@ -25,6 +25,7 @@
#define HSS_DEFAULT_MAX_AGE 600
#define HSS_DEFAULT_MAX_BODY_SIZE 8192
+#define HSS_DEFAULT_RX_BUFFER_THRESH 1 << 20
#define HSS_DEFAULT_KEEPALIVE_TIMEOUT 60
/** @file http_static.h
@@ -33,15 +34,20 @@
/** \brief Application session
*/
-typedef struct
+typedef struct hss_session_
{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
u32 session_index;
/** rx thread index */
- u32 thread_index;
+ clib_thread_index_t thread_index;
/** vpp session index, handle */
u32 vpp_session_index;
session_handle_t vpp_session_handle;
+ /** Index of listener for which connection was accepted */
+ u32 listener_index;
+ u8 *target_path;
+ u8 *target_query;
+ http_req_method_t rt;
/** Fully-resolved file path */
u8 *path;
/** Data to send */
@@ -58,6 +64,15 @@ typedef struct
http_headers_ctx_t resp_headers;
/** Response header buffer */
u8 *headers_buf;
+ /** RX buffer (POST body) */
+ u8 *rx_buff;
+ /** Current RX buffer offset */
+ u64 rx_buff_offset;
+ /** POST body left to receive */
+ u64 left_recv;
+ /** threshold for switching to pointers */
+ u64 use_ptr_thresh;
+ int (*read_body_handler) (struct hss_session_ *hs, session_t *ts);
} hss_session_t;
typedef struct hss_session_handle_
@@ -67,7 +82,7 @@ typedef struct hss_session_handle_
struct
{
u32 session_index;
- u32 thread_index;
+ clib_thread_index_t thread_index;
};
u64 as_u64;
};
@@ -113,6 +128,36 @@ typedef hss_url_handler_rc_t (*hss_url_handler_fn) (hss_url_handler_args_t *);
typedef void (*hss_register_url_fn) (hss_url_handler_fn, char *, int);
typedef void (*hss_session_send_fn) (hss_url_handler_args_t *args);
+typedef struct hss_listener_
+{
+ /** Path to file hash table */
+ hss_cache_t cache;
+ /** The bind session endpoint e.g., tcp://0.0.0.0:80 */
+ session_endpoint_cfg_t sep;
+ /** root path to be served */
+ u8 *www_root;
+ /** Threshold for switching to ptr data in http msgs */
+ u64 use_ptr_thresh;
+ /** Max cache size before LRU occurs */
+ u64 cache_size;
+ /** Maximum size of a request body (in bytes) **/
+ u64 max_body_size;
+ /** Maximum size of a large memory allocation */
+ u32 rx_buff_thresh;
+ /** Timeout during which client connection will stay open */
+ u32 keepalive_timeout;
+ /** How long a response is considered fresh (in seconds) */
+ u32 max_age;
+ /** Formatted max_age: "max-age=xyz" */
+ u8 *max_age_formatted;
+ /** Enable the use of builtinurls */
+ u8 enable_url_handlers;
+ /** Index in listener pool */
+ u32 l_index;
+ /** Listener session handle */
+ session_handle_t session_handle;
+} hss_listener_t;
+
/** \brief Main data structure
*/
typedef struct
@@ -120,15 +165,13 @@ typedef struct
/** Per thread vector of session pools */
hss_session_t **sessions;
+ /** Listeners pool */
+ hss_listener_t *listeners;
+
/** Hash tables for built-in GET and POST handlers */
uword *get_url_handlers;
uword *post_url_handlers;
- hss_cache_t cache;
-
- /** root path to be served */
- u8 *www_root;
-
/** Application index */
u32 app_index;
@@ -144,6 +187,11 @@ typedef struct
* Config
*/
+ /** Listener configured with server, if any */
+ hss_listener_t default_listener;
+ u8 have_default_listener;
+ u8 is_init;
+
/** Enable debug messages */
int debug_level;
/** Number of preallocated fifos, usually 0 */
@@ -152,22 +200,6 @@ typedef struct
u64 private_segment_size;
/** Size of the allocated rx, tx fifos, roughly 8K or so */
u32 fifo_size;
- /** The bind URI, defaults to tcp://0.0.0.0/80 */
- u8 *uri;
- /** Threshold for switching to ptr data in http msgs */
- u64 use_ptr_thresh;
- /** Enable the use of builtinurls */
- u8 enable_url_handlers;
- /** Max cache size before LRU occurs */
- u64 cache_size;
- /** How long a response is considered fresh (in seconds) */
- u32 max_age;
- /** Maximum size of a request body (in bytes) **/
- u64 max_body_size;
- /** Formatted max_age: "max-age=xyz" */
- u8 *max_age_formatted;
- /** Timeout during which client connection will stay open */
- u32 keepalive_timeout;
/** hash table of file extensions to mime types string indices */
uword *mime_type_indices_by_file_extensions;
@@ -177,6 +209,16 @@ extern hss_main_t hss_main;
int hss_create (vlib_main_t *vm);
+static inline hss_listener_t *
+hss_listener_get (u32 l_index)
+{
+ hss_main_t *hsm = &hss_main;
+
+ if (pool_is_free_index (hsm->listeners, l_index))
+ return 0;
+ return pool_elt_at_index (hsm->listeners, l_index);
+}
+
/**
* Register a GET or POST URL handler
*/
@@ -184,7 +226,8 @@ void hss_register_url_handler (hss_url_handler_fn fp, const char *url,
http_req_method_t type);
void hss_session_send_data (hss_url_handler_args_t *args);
void hss_builtinurl_json_handlers_init (void);
-hss_session_t *hss_session_get (u32 thread_index, u32 hs_index);
+hss_session_t *hss_session_get (clib_thread_index_t thread_index,
+ u32 hs_index);
#endif /* __included_http_static_h__ */
diff --git a/src/plugins/http_static/http_static_test.c b/src/plugins/http_static/http_static_test.c
index 56487893220..aba7bc4ffbf 100644
--- a/src/plugins/http_static/http_static_test.c
+++ b/src/plugins/http_static/http_static_test.c
@@ -39,100 +39,10 @@ http_static_test_main_t http_static_test_main;
#include <vlibapi/vat_helper_macros.h>
static int
-api_http_static_enable_v2 (vat_main_t *vam)
-{
- unformat_input_t *line_input = vam->input;
- vl_api_http_static_enable_v2_t *mp;
- u64 tmp;
- u8 *www_root = 0;
- u8 *uri = 0;
- u32 prealloc_fifos = 0;
- u32 private_segment_size = 0;
- u32 fifo_size = 8 << 10;
- u32 cache_size_limit = 1 << 20;
- u32 max_age = HSS_DEFAULT_MAX_AGE;
- int ret;
-
- /* Parse args required to build the message */
- while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
- {
- if (unformat (line_input, "www-root %s", &www_root))
- ;
- else if (unformat (line_input, "prealloc-fifos %d", &prealloc_fifos))
- ;
- else if (unformat (line_input, "private-segment-size %U",
- unformat_memory_size, &tmp))
- {
- if (tmp >= 0x100000000ULL)
- {
- errmsg ("private segment size %llu, too large", tmp);
- return -99;
- }
- private_segment_size = (u32) tmp;
- }
- else if (unformat (line_input, "fifo-size %U", unformat_memory_size,
- &tmp))
- {
- if (tmp >= 0x100000000ULL)
- {
- errmsg ("fifo-size %llu, too large", tmp);
- return -99;
- }
- fifo_size = (u32) tmp;
- }
- else if (unformat (line_input, "cache-size %U", unformat_memory_size,
- &tmp))
- {
- if (tmp < (128ULL << 10))
- {
- errmsg ("cache-size must be at least 128kb");
- return -99;
- }
- cache_size_limit = (u32) tmp;
- }
- else if (unformat (line_input, "max-age %d", &max_age))
- ;
- else if (unformat (line_input, "uri %s", &uri))
- ;
- else
- {
- errmsg ("unknown input `%U'", format_unformat_error, line_input);
- return -99;
- }
- }
-
- if (www_root == 0)
- {
- errmsg ("Must specify www-root");
- return -99;
- }
-
- if (uri == 0)
- uri = format (0, "tcp://0.0.0.0/80%c", 0);
-
- /* Construct the API message */
- M (HTTP_STATIC_ENABLE_V2, mp);
- strncpy_s ((char *) mp->www_root, 256, (const char *) www_root, 256);
- strncpy_s ((char *) mp->uri, 256, (const char *) uri, 256);
- mp->fifo_size = ntohl (fifo_size);
- mp->cache_size_limit = ntohl (cache_size_limit);
- mp->prealloc_fifos = ntohl (prealloc_fifos);
- mp->private_segment_size = ntohl (private_segment_size);
- mp->max_age = ntohl (max_age);
-
- /* send it... */
- S (mp);
-
- /* Wait for a reply... */
- W (ret);
- return ret;
-}
-
-static int
-api_http_static_enable_v3 (vat_main_t *vam)
+api_http_static_enable_v4 (vat_main_t *vam)
{
unformat_input_t *line_input = vam->input;
- vl_api_http_static_enable_v3_t *mp;
+ vl_api_http_static_enable_v4_t *mp;
u64 tmp;
u8 *www_root = 0;
u8 *uri = 0;
@@ -142,6 +52,7 @@ api_http_static_enable_v3 (vat_main_t *vam)
u32 cache_size_limit = 1 << 20;
u32 max_age = HSS_DEFAULT_MAX_AGE;
u32 keepalive_timeout = HSS_DEFAULT_KEEPALIVE_TIMEOUT;
+ u64 max_body_size = HSS_DEFAULT_MAX_BODY_SIZE;
int ret;
/* Parse args required to build the message */
@@ -188,6 +99,8 @@ api_http_static_enable_v3 (vat_main_t *vam)
;
else if (unformat (line_input, "uri %s", &uri))
;
+ else if (unformat (line_input, "max-body-size %llu", &max_body_size))
+ ;
else
{
errmsg ("unknown input `%U'", format_unformat_error, line_input);
@@ -205,7 +118,7 @@ api_http_static_enable_v3 (vat_main_t *vam)
uri = format (0, "tcp://0.0.0.0/80%c", 0);
/* Construct the API message */
- M (HTTP_STATIC_ENABLE_V3, mp);
+ M (HTTP_STATIC_ENABLE_V4, mp);
strncpy_s ((char *) mp->www_root, 256, (const char *) www_root, 256);
strncpy_s ((char *) mp->uri, 256, (const char *) uri, 256);
mp->fifo_size = ntohl (fifo_size);
@@ -214,6 +127,8 @@ api_http_static_enable_v3 (vat_main_t *vam)
mp->private_segment_size = ntohl (private_segment_size);
mp->max_age = ntohl (max_age);
mp->keepalive_timeout = ntohl (keepalive_timeout);
+ mp->max_body_size = ntohl (max_body_size);
+
/* send it... */
S (mp);
@@ -223,10 +138,10 @@ api_http_static_enable_v3 (vat_main_t *vam)
}
static int
-api_http_static_enable_v4 (vat_main_t *vam)
+api_http_static_enable_v5 (vat_main_t *vam)
{
unformat_input_t *line_input = vam->input;
- vl_api_http_static_enable_v4_t *mp;
+ vl_api_http_static_enable_v5_t *mp;
u64 tmp;
u8 *www_root = 0;
u8 *uri = 0;
@@ -237,6 +152,7 @@ api_http_static_enable_v4 (vat_main_t *vam)
u32 max_age = HSS_DEFAULT_MAX_AGE;
u32 keepalive_timeout = HSS_DEFAULT_KEEPALIVE_TIMEOUT;
u64 max_body_size = HSS_DEFAULT_MAX_BODY_SIZE;
+ u32 rx_buff_thresh = HSS_DEFAULT_RX_BUFFER_THRESH;
int ret;
/* Parse args required to build the message */
@@ -283,7 +199,11 @@ api_http_static_enable_v4 (vat_main_t *vam)
;
else if (unformat (line_input, "uri %s", &uri))
;
- else if (unformat (line_input, "max-body-size %llu", &max_body_size))
+ else if (unformat (line_input, "max-body-size %U", unformat_memory_size,
+ &max_body_size))
+ ;
+ else if (unformat (line_input, "rx-buff-thresh %U", unformat_memory_size,
+ &rx_buff_thresh))
;
else
{
diff --git a/src/plugins/http_static/static_server.c b/src/plugins/http_static/static_server.c
index 074416873e3..692cb53abe3 100644
--- a/src/plugins/http_static/static_server.c
+++ b/src/plugins/http_static/static_server.c
@@ -14,12 +14,14 @@
*/
#include <http_static/http_static.h>
+#include <vnet/session/application.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <http/http_content_types.h>
+#include <http/http_status_codes.h>
/** @file static_server.c
* Static http server, sufficient to serve .html / .css / .js content.
@@ -27,11 +29,59 @@
/*? %%clicmd:group_label Static HTTP Server %% ?*/
#define HSS_FIFO_THRESH (16 << 10)
-
+#define HSS_HEADER_BUF_MAX_SIZE 16192
hss_main_t hss_main;
+static int file_handler_discard_body (hss_session_t *hs, session_t *ts);
+static int url_handler_read_body (hss_session_t *hs, session_t *ts);
+
+static int
+hss_add_header (hss_session_t *hs, http_header_name_t name, const char *value,
+ uword value_len)
+{
+ u32 needed_size = 0;
+ while (http_add_header (&hs->resp_headers, name, value, value_len) == -1)
+ {
+ if (needed_size)
+ {
+ http_truncate_headers_list (&hs->resp_headers);
+ hs->data_len = 0;
+ return -1;
+ }
+ else
+ needed_size = hs->resp_headers.tail_offset +
+ sizeof (http_app_header_t) + value_len;
+ if (needed_size < HSS_HEADER_BUF_MAX_SIZE)
+ {
+ vec_resize (hs->headers_buf, sizeof (http_app_header_t) + value_len);
+ hs->resp_headers.len = needed_size;
+ hs->resp_headers.buf = hs->headers_buf;
+ }
+ else
+ {
+ http_truncate_headers_list (&hs->resp_headers);
+ hs->data_len = 0;
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static_always_inline void
+hss_confirm_data_read (hss_session_t *hs, u32 n_last_deq)
+{
+ session_t *ts;
+
+ ts = session_get (hs->vpp_session_index, hs->thread_index);
+ if (svm_fifo_needs_deq_ntf (ts->rx_fifo, n_last_deq))
+ {
+ svm_fifo_clear_deq_ntf (ts->rx_fifo);
+ session_program_transport_io_evt (ts->handle, SESSION_IO_EVT_RX);
+ }
+}
+
static hss_session_t *
-hss_session_alloc (u32 thread_index)
+hss_session_alloc (clib_thread_index_t thread_index)
{
hss_main_t *hsm = &hss_main;
hss_session_t *hs;
@@ -46,7 +96,7 @@ hss_session_alloc (u32 thread_index)
}
__clib_export hss_session_t *
-hss_session_get (u32 thread_index, u32 hs_index)
+hss_session_get (clib_thread_index_t thread_index, u32 hs_index)
{
hss_main_t *hsm = &hss_main;
if (pool_is_free_index (hsm->sessions[thread_index], hs_index))
@@ -85,6 +135,7 @@ hss_session_disconnect_transport (hss_session_t *hs)
static void
start_send_data (hss_session_t *hs, http_status_code_t status)
{
+ hss_main_t *hsm = &hss_main;
http_msg_t msg;
session_t *ts;
u32 n_enq;
@@ -93,6 +144,9 @@ start_send_data (hss_session_t *hs, http_status_code_t status)
ts = session_get (hs->vpp_session_index, hs->thread_index);
+ if (hsm->debug_level > 0)
+ clib_warning ("status code: %U", format_http_status_code, status);
+
msg.type = HTTP_MSG_REPLY;
msg.code = status;
msg.data.body_len = hs->data_len;
@@ -100,7 +154,7 @@ start_send_data (hss_session_t *hs, http_status_code_t status)
msg.data.headers_len = hs->resp_headers.tail_offset;
msg.data.len = msg.data.body_len + msg.data.headers_len;
- if (msg.data.len > hss_main.use_ptr_thresh)
+ if (msg.data.len > hs->use_ptr_thresh)
{
msg.data.type = HTTP_MSG_DATA_PTR;
rv = svm_fifo_enqueue (ts->tx_fifo, sizeof (msg), (u8 *) &msg);
@@ -175,8 +229,9 @@ hss_session_send_data (hss_url_handler_args_t *args)
/* Set content type only if we have some response data */
if (hs->data_len)
- http_add_header (&hs->resp_headers, HTTP_HEADER_CONTENT_TYPE,
- http_content_type_token (args->ct));
+ if (hss_add_header (hs, HTTP_HEADER_CONTENT_TYPE,
+ http_content_type_token (args->ct)))
+ args->sc = HTTP_STATUS_INTERNAL_ERROR;
start_send_data (hs, args->sc);
}
@@ -247,15 +302,20 @@ content_type_from_request (u8 *request)
}
static int
-try_url_handler (hss_main_t *hsm, hss_session_t *hs, http_req_method_t rt,
- u8 *target_path, u8 *target_query, u8 *data)
+try_url_handler (hss_session_t *hs)
{
+ hss_main_t *hsm = &hss_main;
http_status_code_t sc = HTTP_STATUS_OK;
hss_url_handler_args_t args = {};
uword *p, *url_table;
+ session_t *ts;
+ u32 max_deq;
+ u8 *target_path;
int rv;
- if (!hsm->enable_url_handlers || !target_path)
+ target_path = hs->target_path;
+
+ if (!target_path)
return -1;
/* zero-length? try "index.html" */
@@ -266,28 +326,69 @@ try_url_handler (hss_main_t *hsm, hss_session_t *hs, http_req_method_t rt,
/* Look for built-in GET / POST handlers */
url_table =
- (rt == HTTP_REQ_GET) ? hsm->get_url_handlers : hsm->post_url_handlers;
+ (hs->rt == HTTP_REQ_GET) ? hsm->get_url_handlers : hsm->post_url_handlers;
p = hash_get_mem (url_table, target_path);
if (!p)
return -1;
+ hs->rx_buff = 0;
+
+ /* Read request body */
+ if (hs->left_recv)
+ {
+ hss_listener_t *l = hss_listener_get (hs->listener_index);
+ if (hs->left_recv > l->rx_buff_thresh)
+ {
+ /* TODO: large body (not buffered in memory) */
+ clib_warning ("data length %u above threshold %u", hs->left_recv,
+ l->rx_buff_thresh);
+ hs->left_recv = 0;
+ start_send_data (hs, HTTP_STATUS_INTERNAL_ERROR);
+ hss_session_disconnect_transport (hs);
+ return 0;
+ }
+ hs->rx_buff_offset = 0;
+ vec_validate (hs->rx_buff, hs->left_recv - 1);
+ ts = session_get (hs->vpp_session_index, hs->thread_index);
+ max_deq = svm_fifo_max_dequeue (ts->rx_fifo);
+ if (max_deq < hs->left_recv)
+ {
+ hs->read_body_handler = url_handler_read_body;
+ if (max_deq == 0)
+ return 0;
+ rv = svm_fifo_dequeue (ts->rx_fifo, max_deq, hs->rx_buff);
+ ASSERT (rv == max_deq);
+ hs->rx_buff_offset = max_deq;
+ hs->left_recv -= max_deq;
+ hss_confirm_data_read (hs, max_deq);
+ return 0;
+ }
+ rv = svm_fifo_dequeue (ts->rx_fifo, hs->left_recv,
+ hs->rx_buff + hs->rx_buff_offset);
+ ASSERT (rv == hs->left_recv);
+ hss_confirm_data_read (hs, hs->left_recv);
+ hs->left_recv = 0;
+ }
+
hs->path = 0;
hs->data_offset = 0;
hs->cache_pool_index = ~0;
if (hsm->debug_level > 0)
- clib_warning ("%s '%s'", (rt == HTTP_REQ_GET) ? "GET" : "POST",
+ clib_warning ("%s '%s'", (hs->rt == HTTP_REQ_GET) ? "GET" : "POST",
target_path);
- args.req_type = rt;
- args.query = target_query;
- args.req_data = data;
+ args.req_type = hs->rt;
+ args.query = hs->target_query;
+ args.req_data = hs->rx_buff;
args.sh.thread_index = hs->thread_index;
args.sh.session_index = hs->session_index;
rv = ((hss_url_handler_fn) p[0]) (&args);
+ vec_free (hs->rx_buff);
+
/* Wait for data from handler */
if (rv == HSS_URL_HANDLER_ASYNC)
return 0;
@@ -295,7 +396,7 @@ try_url_handler (hss_main_t *hsm, hss_session_t *hs, http_req_method_t rt,
if (rv == HSS_URL_HANDLER_ERROR)
{
clib_warning ("builtin handler %llx hit on %s '%s' but failed!", p[0],
- (rt == HTTP_REQ_GET) ? "GET" : "POST", target_path);
+ (hs->rt == HTTP_REQ_GET) ? "GET" : "POST", target_path);
sc = HTTP_STATUS_BAD_GATEWAY;
}
@@ -305,8 +406,9 @@ try_url_handler (hss_main_t *hsm, hss_session_t *hs, http_req_method_t rt,
/* Set content type only if we have some response data */
if (hs->data_len)
- http_add_header (&hs->resp_headers, HTTP_HEADER_CONTENT_TYPE,
- http_content_type_token (args.ct));
+ if (hss_add_header (hs, HTTP_HEADER_CONTENT_TYPE,
+ http_content_type_token (args.ct)))
+ sc = HTTP_STATUS_INTERNAL_ERROR;
start_send_data (hs, sc);
@@ -329,8 +431,9 @@ file_path_is_valid (u8 *path)
}
static u32
-try_index_file (hss_main_t *hsm, hss_session_t *hs, u8 *path)
+try_index_file (hss_listener_t *l, hss_session_t *hs, u8 *path)
{
+ hss_main_t *hsm = &hss_main;
u8 *port_str = 0, *redirect;
transport_endpoint_t endpt;
transport_proto_t proto;
@@ -358,7 +461,7 @@ try_index_file (hss_main_t *hsm, hss_session_t *hs, u8 *path)
/*
* We found an index.html file, build a redirect
*/
- vec_delete (path, vec_len (hsm->www_root) - 1, 0);
+ vec_delete (path, vec_len (l->www_root) - 1, 0);
ts = session_get (hs->vpp_session_index, hs->thread_index);
session_get_endpoint (ts, &endpt, 1 /* is_local */);
@@ -383,8 +486,10 @@ try_index_file (hss_main_t *hsm, hss_session_t *hs, u8 *path)
vec_free (port_str);
- http_add_header (&hs->resp_headers, HTTP_HEADER_LOCATION,
- (const char *) redirect, vec_len (redirect));
+ if (hss_add_header (hs, HTTP_HEADER_LOCATION, (const char *) redirect,
+ vec_len (redirect)))
+ return HTTP_STATUS_INTERNAL_ERROR;
+
vec_free (redirect);
hs->data_len = 0;
hs->free_data = 1;
@@ -393,39 +498,61 @@ try_index_file (hss_main_t *hsm, hss_session_t *hs, u8 *path)
}
static int
-try_file_handler (hss_main_t *hsm, hss_session_t *hs, http_req_method_t rt,
- u8 *target)
+try_file_handler (hss_session_t *hs)
{
+ hss_main_t *hsm = &hss_main;
http_status_code_t sc = HTTP_STATUS_OK;
u8 *path, *sanitized_path;
- u32 ce_index;
+ u32 ce_index, max_dequeue;
http_content_type_t type;
u8 *last_modified;
+ hss_listener_t *l;
+ session_t *ts;
+
+ l = hss_listener_get (hs->listener_index);
/* Feature not enabled */
- if (!hsm->www_root)
+ if (!l->www_root)
return -1;
- /* Remove dot segments to prevent path traversal */
- sanitized_path = http_path_remove_dot_segments (target);
+ /* Discard request body */
+ if (hs->left_recv)
+ {
+ ts = session_get (hs->vpp_session_index, hs->thread_index);
+ max_dequeue = svm_fifo_max_dequeue (ts->rx_fifo);
+ if (max_dequeue < hs->left_recv)
+ {
+ svm_fifo_dequeue_drop (ts->rx_fifo, max_dequeue);
+ hs->left_recv -= max_dequeue;
+ hs->read_body_handler = file_handler_discard_body;
+ hss_confirm_data_read (hs, max_dequeue);
+ return 0;
+ }
+ svm_fifo_dequeue_drop (ts->rx_fifo, hs->left_recv);
+ hss_confirm_data_read (hs, hs->left_recv);
+ hs->left_recv = 0;
+ }
+
+ /* Sanitize received path */
+ sanitized_path = http_path_sanitize (hs->target_path);
/*
* Construct the file to open
*/
- if (!target)
- path = format (0, "%s%c", hsm->www_root, 0);
+ if (!sanitized_path)
+ path = format (0, "%s%c", l->www_root, 0);
else
- path = format (0, "%s/%s%c", hsm->www_root, sanitized_path, 0);
+ path = format (0, "%s/%s%c", l->www_root, sanitized_path, 0);
if (hsm->debug_level > 0)
- clib_warning ("%s '%s'", (rt == HTTP_REQ_GET) ? "GET" : "POST", path);
+ clib_warning ("%s '%s'", (hs->rt == HTTP_REQ_GET) ? "GET" : "POST", path);
if (hs->data && hs->free_data)
vec_free (hs->data);
hs->data_offset = 0;
- ce_index = hss_cache_lookup_and_attach (&hsm->cache, path, &hs->data,
+ ce_index = hss_cache_lookup_and_attach (&l->cache, path, &hs->data,
&hs->data_len, &last_modified);
if (ce_index == ~0)
{
@@ -442,10 +569,10 @@ try_file_handler (hss_main_t *hsm, hss_session_t *hs, http_req_method_t rt,
sc = HTTP_STATUS_NOT_FOUND;
goto done;
}
- sc = try_index_file (hsm, hs, path);
+ sc = try_index_file (l, hs, path);
goto done;
}
- ce_index = hss_cache_add_and_attach (&hsm->cache, path, &hs->data,
+ ce_index = hss_cache_add_and_attach (&l->cache, path, &hs->data,
&hs->data_len, &last_modified);
if (ce_index == ~0)
{
@@ -462,14 +589,17 @@ try_file_handler (hss_main_t *hsm, hss_session_t *hs, http_req_method_t rt,
* Cache-Control max-age
* Last-Modified
*/
- type = content_type_from_request (target);
- http_add_header (&hs->resp_headers, HTTP_HEADER_CONTENT_TYPE,
- http_content_type_token (type));
- http_add_header (&hs->resp_headers, HTTP_HEADER_CACHE_CONTROL,
- (const char *) hsm->max_age_formatted,
- vec_len (hsm->max_age_formatted));
- http_add_header (&hs->resp_headers, HTTP_HEADER_LAST_MODIFIED,
- (const char *) last_modified, vec_len (last_modified));
+ type = content_type_from_request (sanitized_path);
+ if (hss_add_header (hs, HTTP_HEADER_CONTENT_TYPE,
+ http_content_type_token (type)) ||
+ hss_add_header (hs, HTTP_HEADER_CACHE_CONTROL,
+ (const char *) l->max_age_formatted,
+ vec_len (l->max_age_formatted)) ||
+ hss_add_header (hs, HTTP_HEADER_LAST_MODIFIED,
+ (const char *) last_modified, vec_len (last_modified)))
+ {
+ sc = HTTP_STATUS_INTERNAL_ERROR;
+ }
done:
vec_free (sanitized_path);
@@ -481,15 +611,23 @@ done:
}
static void
-handle_request (hss_session_t *hs, http_req_method_t rt, u8 *target_path,
- u8 *target_query, u8 *data)
+handle_request (hss_session_t *hs)
{
- hss_main_t *hsm = &hss_main;
+ hss_listener_t *l;
+
+ l = hss_listener_get (hs->listener_index);
+
+ if (hs->left_recv > l->max_body_size)
+ {
+ start_send_data (hs, HTTP_STATUS_CONTENT_TOO_LARGE);
+ hss_session_disconnect_transport (hs);
+ return;
+ }
- if (!try_url_handler (hsm, hs, rt, target_path, target_query, data))
+ if (l->enable_url_handlers && !try_url_handler (hs))
return;
- if (!try_file_handler (hsm, hs, rt, target_path))
+ if (!try_file_handler (hs))
return;
/* Handler did not find anything return 404 */
@@ -498,18 +636,60 @@ handle_request (hss_session_t *hs, http_req_method_t rt, u8 *target_path,
}
static int
+file_handler_discard_body (hss_session_t *hs, session_t *ts)
+{
+ u32 max_dequeue, to_discard;
+
+ max_dequeue = svm_fifo_max_dequeue (ts->rx_fifo);
+ to_discard = clib_min (max_dequeue, hs->left_recv);
+ svm_fifo_dequeue_drop (ts->rx_fifo, to_discard);
+ hs->left_recv -= to_discard;
+ hss_confirm_data_read (hs, to_discard);
+ if (hs->left_recv == 0)
+ return try_file_handler (hs);
+ return 0;
+}
+
+static int
+url_handler_read_body (hss_session_t *hs, session_t *ts)
+{
+ u32 max_dequeue, to_read;
+ int rv;
+
+ max_dequeue = svm_fifo_max_dequeue (ts->rx_fifo);
+ to_read = clib_min (max_dequeue, hs->left_recv);
+ rv =
+ svm_fifo_dequeue (ts->rx_fifo, to_read, hs->rx_buff + hs->rx_buff_offset);
+ ASSERT (rv == to_read);
+ hs->rx_buff_offset += to_read;
+ hs->left_recv -= to_read;
+ hss_confirm_data_read (hs, to_read);
+ if (hs->left_recv == 0)
+ return try_url_handler (hs);
+ return 0;
+}
+
+static int
hss_ts_rx_callback (session_t *ts)
{
- hss_main_t *hsm = &hss_main;
hss_session_t *hs;
- u8 *target_path = 0, *target_query = 0, *data = 0;
http_msg_t msg;
int rv;
hs = hss_session_get (ts->thread_index, ts->opaque);
+ if (hs->left_recv != 0)
+ {
+ ASSERT (hs->read_body_handler);
+ return hs->read_body_handler (hs, ts);
+ }
+
if (hs->free_data)
vec_free (hs->data);
+
hs->data = 0;
+ hs->data_len = 0;
+ vec_free (hs->target_path);
+ vec_free (hs->target_query);
http_init_headers_ctx (&hs->resp_headers, hs->headers_buf,
vec_len (hs->headers_buf));
@@ -520,72 +700,59 @@ hss_ts_rx_callback (session_t *ts)
if (msg.type != HTTP_MSG_REQUEST ||
(msg.method_type != HTTP_REQ_GET && msg.method_type != HTTP_REQ_POST))
{
- http_add_header (&hs->resp_headers, HTTP_HEADER_ALLOW,
- http_token_lit ("GET, POST"));
- start_send_data (hs, HTTP_STATUS_METHOD_NOT_ALLOWED);
+ if (hss_add_header (hs, HTTP_HEADER_ALLOW, http_token_lit ("GET, POST")))
+ start_send_data (hs, HTTP_STATUS_INTERNAL_ERROR);
+ else
+ start_send_data (hs, HTTP_STATUS_METHOD_NOT_ALLOWED);
goto err_done;
}
+ hs->rt = msg.method_type;
+
/* Read target path */
if (msg.data.target_path_len)
{
- vec_validate (target_path, msg.data.target_path_len - 1);
+ vec_validate (hs->target_path, msg.data.target_path_len - 1);
rv = svm_fifo_peek (ts->rx_fifo, msg.data.target_path_offset,
- msg.data.target_path_len, target_path);
+ msg.data.target_path_len, hs->target_path);
ASSERT (rv == msg.data.target_path_len);
- if (http_validate_abs_path_syntax (target_path, 0))
+ if (http_validate_abs_path_syntax (hs->target_path, 0))
{
start_send_data (hs, HTTP_STATUS_BAD_REQUEST);
goto err_done;
}
/* Target path must be a proper C-string in addition to a vector */
- vec_add1 (target_path, 0);
+ vec_add1 (hs->target_path, 0);
}
/* Read target query */
if (msg.data.target_query_len)
{
- vec_validate (target_query, msg.data.target_query_len - 1);
+ vec_validate (hs->target_query, msg.data.target_query_len - 1);
rv = svm_fifo_peek (ts->rx_fifo, msg.data.target_query_offset,
- msg.data.target_query_len, target_query);
+ msg.data.target_query_len, hs->target_query);
ASSERT (rv == msg.data.target_query_len);
- if (http_validate_query_syntax (target_query, 0))
+ if (http_validate_query_syntax (hs->target_query, 0))
{
start_send_data (hs, HTTP_STATUS_BAD_REQUEST);
goto err_done;
}
}
- /* Read request body for POST requests */
if (msg.data.body_len && msg.method_type == HTTP_REQ_POST)
{
- if (msg.data.body_len > hsm->max_body_size)
- {
- start_send_data (hs, HTTP_STATUS_CONTENT_TOO_LARGE);
- goto err_done;
- }
- if (svm_fifo_max_dequeue (ts->rx_fifo) - msg.data.body_offset <
- msg.data.body_len)
- {
- start_send_data (hs, HTTP_STATUS_INTERNAL_ERROR);
- goto err_done;
- }
- vec_validate (data, msg.data.body_len - 1);
- rv = svm_fifo_peek (ts->rx_fifo, msg.data.body_offset, msg.data.body_len,
- data);
- ASSERT (rv == msg.data.body_len);
+ hs->left_recv = msg.data.body_len;
+ /* drop everything up to body */
+ svm_fifo_dequeue_drop (ts->rx_fifo, msg.data.body_offset);
}
/* Find and send data */
- handle_request (hs, msg.method_type, target_path, target_query, data);
+ handle_request (hs);
goto done;
err_done:
hss_session_disconnect_transport (hs);
done:
- vec_free (target_path);
- vec_free (target_query);
- vec_free (data);
svm_fifo_dequeue_drop (ts->rx_fifo, msg.data.len);
return 0;
}
@@ -631,6 +798,7 @@ static int
hss_ts_accept_callback (session_t *ts)
{
hss_session_t *hs;
+ session_t *ls;
u32 thresh;
hs = hss_session_alloc (ts->thread_index);
@@ -638,6 +806,11 @@ hss_ts_accept_callback (session_t *ts)
hs->vpp_session_index = ts->session_index;
hs->vpp_session_handle = session_handle (ts);
+ /* Link to listener context */
+ ls = listen_session_get_from_handle (ts->listener_handle);
+ hs->listener_index = ls->opaque;
+ hs->use_ptr_thresh = hss_listener_get (hs->listener_index)->use_ptr_thresh;
+
/* The application sets a threshold for it's fifo to get notified when
* additional data can be enqueued. We want to keep the TX fifo reasonably
* full, however avoid entering a state where the
@@ -693,7 +866,6 @@ hss_add_segment_callback (u32 client_index, u64 segment_handle)
static void
hss_ts_cleanup (session_t *s, session_cleanup_ntf_t ntf)
{
- hss_main_t *hsm = &hss_main;
hss_session_t *hs;
if (ntf == SESSION_CLEANUP_TRANSPORT)
@@ -705,7 +877,9 @@ hss_ts_cleanup (session_t *s, session_cleanup_ntf_t ntf)
if (hs->cache_pool_index != ~0)
{
- hss_cache_detach_entry (&hsm->cache, hs->cache_pool_index);
+ hss_listener_t *l = hss_listener_get (hs->listener_index);
+ if (l)
+ hss_cache_detach_entry (&l->cache, hs->cache_pool_index);
hs->cache_pool_index = ~0;
}
@@ -716,6 +890,8 @@ hss_ts_cleanup (session_t *s, session_cleanup_ntf_t ntf)
hs->free_data = 0;
vec_free (hs->headers_buf);
vec_free (hs->path);
+ vec_free (hs->target_path);
+ vec_free (hs->target_query);
hss_session_free (hs);
}
@@ -788,30 +964,22 @@ hss_transport_needs_crypto (transport_proto_t proto)
}
static int
-hss_listen (void)
+hss_listen (hss_listener_t *l, session_handle_t *lh)
{
hss_main_t *hsm = &hss_main;
- session_endpoint_cfg_t sep = SESSION_ENDPOINT_CFG_NULL;
vnet_listen_args_t _a, *a = &_a;
- char *uri = "tcp://0.0.0.0/80";
u8 need_crypto;
transport_endpt_ext_cfg_t *ext_cfg;
int rv;
- transport_endpt_cfg_http_t http_cfg = { hsm->keepalive_timeout, 0 };
+ transport_endpt_cfg_http_t http_cfg = { l->keepalive_timeout, 0 };
clib_memset (a, 0, sizeof (*a));
a->app_index = hsm->app_index;
- if (hsm->uri)
- uri = (char *) hsm->uri;
-
- if (parse_uri (uri, &sep))
- return -1;
-
- need_crypto = hss_transport_needs_crypto (sep.transport_proto);
+ need_crypto = hss_transport_needs_crypto (l->sep.transport_proto);
- sep.transport_proto = TRANSPORT_PROTO_HTTP;
- clib_memcpy (&a->sep_ext, &sep, sizeof (sep));
+ l->sep.transport_proto = TRANSPORT_PROTO_HTTP;
+ clib_memcpy (&a->sep_ext, &l->sep, sizeof (l->sep));
ext_cfg = session_endpoint_add_ext_cfg (
&a->sep_ext, TRANSPORT_ENDPT_EXT_CFG_HTTP, sizeof (http_cfg));
@@ -825,7 +993,8 @@ hss_listen (void)
ext_cfg->crypto.ckpair_index = hsm->ckpair_index;
}
- rv = vnet_listen (a);
+ if (!(rv = vnet_listen (a)))
+ *lh = a->handle;
session_endpoint_free_ext_cfgs (&a->sep_ext);
@@ -835,13 +1004,75 @@ hss_listen (void)
static void
hss_url_handlers_init (hss_main_t *hsm)
{
- if (!hsm->get_url_handlers)
+ if (hsm->get_url_handlers)
+ return;
+
+ hsm->get_url_handlers = hash_create_string (0, sizeof (uword));
+ hsm->post_url_handlers = hash_create_string (0, sizeof (uword));
+ hss_builtinurl_json_handlers_init ();
+}
+
+int
+hss_listener_add (hss_listener_t *l_cfg)
+{
+ hss_main_t *hsm = &hss_main;
+ session_handle_t lh;
+ app_listener_t *al;
+ hss_listener_t *l;
+ session_t *ls;
+
+ if (hss_listen (l_cfg, &lh))
{
- hsm->get_url_handlers = hash_create_string (0, sizeof (uword));
- hsm->post_url_handlers = hash_create_string (0, sizeof (uword));
+ clib_warning ("failed to start listening");
+ return -1;
}
- hss_builtinurl_json_handlers_init ();
+ pool_get (hsm->listeners, l);
+ *l = *l_cfg;
+ l->l_index = l - hsm->listeners;
+ l->session_handle = lh;
+
+ al = app_listener_get_w_handle (lh);
+ ls = app_listener_get_session (al);
+ ls->opaque = l->l_index;
+
+ if (l->www_root)
+ hss_cache_init (&l->cache, l->cache_size, hsm->debug_level);
+ if (l->enable_url_handlers)
+ hss_url_handlers_init (hsm);
+
+ l->max_age_formatted = format (0, "max-age=%d", l->max_age);
+
+ return 0;
+}
+
+int
+hss_listener_del (hss_listener_t *l_cfg)
+{
+ hss_main_t *hsm = &hss_main;
+ hss_listener_t *l;
+ u8 found = 0;
+
+ pool_foreach (l, hsm->listeners)
+ {
+ if (clib_memcmp (&l_cfg->sep, &l->sep, sizeof (l_cfg->sep)) == 0)
+ {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found)
+ return -1;
+
+ vnet_unlisten_args_t args = { .handle = l->session_handle, hsm->app_index };
+
+ vec_free (l->www_root);
+ vec_free (l->max_age_formatted);
+ hss_cache_free (&l->cache);
+ pool_put (hsm->listeners, l);
+
+ return vnet_unlisten (&args);
}
int
@@ -854,24 +1085,25 @@ hss_create (vlib_main_t *vm)
num_threads = 1 /* main thread */ + vtm->n_threads;
vec_validate (hsm->sessions, num_threads - 1);
+ /* Make sure session layer is enabled */
+ session_enable_disable_args_t args = { .is_en = 1,
+ .rt_engine_type =
+ RT_BACKEND_ENGINE_RULE_TABLE };
+ vnet_session_enable_disable (vm, &args);
+
if (hss_attach ())
{
clib_warning ("failed to attach server");
return -1;
}
- if (hss_listen ())
+
+ if (hsm->have_default_listener && hss_listener_add (&hsm->default_listener))
{
clib_warning ("failed to start listening");
return -1;
}
- if (hsm->www_root)
- hss_cache_init (&hsm->cache, hsm->cache_size, hsm->debug_level);
-
- if (hsm->enable_url_handlers)
- hss_url_handlers_init (hsm);
-
- hsm->max_age_formatted = format (0, "max-age=%d", hsm->max_age);
+ hsm->is_init = 1;
return 0;
}
@@ -882,20 +1114,24 @@ hss_create_command_fn (vlib_main_t *vm, unformat_input_t *input,
{
unformat_input_t _line_input, *line_input = &_line_input;
hss_main_t *hsm = &hss_main;
+ hss_listener_t *l = &hsm->default_listener;
clib_error_t *error = 0;
+ char *uri = 0;
u64 seg_size;
int rv;
if (hsm->app_index != (u32) ~0)
- return clib_error_return (0, "http server already running...");
+ return clib_error_return (0, "http static server already initialized...");
hsm->prealloc_fifos = 0;
hsm->private_segment_size = 0;
hsm->fifo_size = 0;
- hsm->cache_size = 10 << 20;
- hsm->max_age = HSS_DEFAULT_MAX_AGE;
- hsm->max_body_size = HSS_DEFAULT_MAX_BODY_SIZE;
- hsm->keepalive_timeout = HSS_DEFAULT_KEEPALIVE_TIMEOUT;
+
+ l->cache_size = 10 << 20;
+ l->max_age = HSS_DEFAULT_MAX_AGE;
+ l->max_body_size = HSS_DEFAULT_MAX_BODY_SIZE;
+ l->rx_buff_thresh = HSS_DEFAULT_RX_BUFFER_THRESH;
+ l->keepalive_timeout = HSS_DEFAULT_KEEPALIVE_TIMEOUT;
/* Get a line of input. */
if (!unformat_user (input, unformat_line_input, line_input))
@@ -903,37 +1139,43 @@ hss_create_command_fn (vlib_main_t *vm, unformat_input_t *input,
while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
{
- if (unformat (line_input, "www-root %s", &hsm->www_root))
- ;
- else
- if (unformat (line_input, "prealloc-fifos %d", &hsm->prealloc_fifos))
- ;
- else if (unformat (line_input, "private-segment-size %U",
- unformat_memory_size, &seg_size))
+ /* Server config */
+ if (unformat (line_input, "private-segment-size %U",
+ unformat_memory_size, &seg_size))
hsm->private_segment_size = seg_size;
- else if (unformat (line_input, "fifo-size %d", &hsm->fifo_size))
- hsm->fifo_size <<= 10;
- else if (unformat (line_input, "cache-size %U", unformat_memory_size,
- &hsm->cache_size))
+ else if (unformat (line_input, "fifo-size %U", unformat_memory_size,
+ &hsm->fifo_size))
;
- else if (unformat (line_input, "uri %s", &hsm->uri))
+ else if (unformat (line_input, "prealloc-fifos %d",
+ &hsm->prealloc_fifos))
;
else if (unformat (line_input, "debug %d", &hsm->debug_level))
;
- else if (unformat (line_input, "keepalive-timeout %d",
- &hsm->keepalive_timeout))
- ;
else if (unformat (line_input, "debug"))
hsm->debug_level = 1;
- else if (unformat (line_input, "ptr-thresh %U", unformat_memory_size,
- &hsm->use_ptr_thresh))
+ /* Default listener parameters */
+ else if (unformat (line_input, "uri %s", &uri))
+ ;
+ else if (unformat (line_input, "www-root %s", &l->www_root))
;
else if (unformat (line_input, "url-handlers"))
- hsm->enable_url_handlers = 1;
- else if (unformat (line_input, "max-age %d", &hsm->max_age))
+ l->enable_url_handlers = 1;
+ else if (unformat (line_input, "cache-size %U", unformat_memory_size,
+ &l->cache_size))
+ ;
+ else if (unformat (line_input, "max-age %d", &l->max_age))
;
else if (unformat (line_input, "max-body-size %U", unformat_memory_size,
- &hsm->max_body_size))
+ &l->max_body_size))
+ ;
+ else if (unformat (line_input, "rx-buff-thresh %U", unformat_memory_size,
+ &l->rx_buff_thresh))
+ ;
+ else if (unformat (line_input, "keepalive-timeout %d",
+ &l->keepalive_timeout))
+ ;
+ else if (unformat (line_input, "ptr-thresh %U", unformat_memory_size,
+ &l->use_ptr_thresh))
;
else
{
@@ -950,28 +1192,33 @@ no_input:
if (error)
goto done;
- if (hsm->www_root == 0 && !hsm->enable_url_handlers)
+ if (l->www_root)
{
- error = clib_error_return (0, "Must set www-root or url-handlers");
- goto done;
+ /* Maintain legacy default uri behavior */
+ if (!uri)
+ uri = "tcp://0.0.0.0:80";
+ if (l->cache_size < (128 << 10))
+ {
+ error = clib_error_return (0, "cache-size must be at least 128kb");
+ vec_free (l->www_root);
+ goto done;
+ }
}
- if (hsm->cache_size < (128 << 10))
+ if (uri)
{
- error = clib_error_return (0, "cache-size must be at least 128kb");
- vec_free (hsm->www_root);
- goto done;
+ if (parse_uri (uri, &l->sep))
+ {
+ error = clib_error_return (0, "failed to parse uri %s", uri);
+ goto done;
+ }
+ hsm->have_default_listener = 1;
}
- session_enable_disable_args_t args = { .is_en = 1,
- .rt_engine_type =
- RT_BACKEND_ENGINE_RULE_TABLE };
- vnet_session_enable_disable (vm, &args);
-
if ((rv = hss_create (vm)))
{
error = clib_error_return (0, "server_create returned %d", rv);
- vec_free (hsm->www_root);
+ vec_free (l->www_root);
}
done:
@@ -995,13 +1242,123 @@ done:
VLIB_CLI_COMMAND (hss_create_command, static) = {
.path = "http static server",
.short_help =
- "http static server www-root <path> [prealloc-fifos <nn>]\n"
+ "http static server [www-root <path>] [url-handlers]\n"
"[private-segment-size <nnMG>] [fifo-size <nbytes>] [max-age <nseconds>]\n"
- "[uri <uri>] [ptr-thresh <nn>] [url-handlers] [debug [nn]]\n"
+ "[uri <uri>] [ptr-thresh <nn>] [prealloc-fifos <nn>] [debug [nn]]\n"
"[keepalive-timeout <nn>] [max-body-size <nn>]\n",
.function = hss_create_command_fn,
};
+static clib_error_t *
+hss_add_del_listener_command_fn (vlib_main_t *vm, unformat_input_t *input,
+ vlib_cli_command_t *cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ hss_main_t *hsm = &hss_main;
+ clib_error_t *error = 0;
+ hss_listener_t _l = {}, *l = &_l;
+ u8 is_add = 1;
+ char *uri = 0;
+
+ if (!hsm->is_init)
+ return clib_error_return (0, "Static server not initialized");
+
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return clib_error_return (0, "No input provided");
+
+ l->cache_size = 10 << 20;
+ l->max_age = HSS_DEFAULT_MAX_AGE;
+ l->max_body_size = HSS_DEFAULT_MAX_BODY_SIZE;
+ l->rx_buff_thresh = HSS_DEFAULT_RX_BUFFER_THRESH;
+ l->keepalive_timeout = HSS_DEFAULT_KEEPALIVE_TIMEOUT;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "add"))
+ is_add = 1;
+ else if (unformat (line_input, "del"))
+ is_add = 0;
+ else if (unformat (line_input, "uri %s", &uri))
+ ;
+ else if (unformat (line_input, "www-root %s", &l->www_root))
+ ;
+ else if (unformat (line_input, "url-handlers"))
+ l->enable_url_handlers = 1;
+ else if (unformat (line_input, "cache-size %U", unformat_memory_size,
+ &l->cache_size))
+ ;
+ else if (unformat (line_input, "keepalive-timeout %d",
+ &l->keepalive_timeout))
+ ;
+ else if (unformat (line_input, "ptr-thresh %U", unformat_memory_size,
+ &l->use_ptr_thresh))
+ ;
+ else if (unformat (line_input, "max-age %d", &l->max_age))
+ ;
+ else if (unformat (line_input, "max-body-size %U", unformat_memory_size,
+ &l->max_body_size))
+ ;
+ else if (unformat (line_input, "rx-buff-thresh %U", unformat_memory_size,
+ &l->rx_buff_thresh))
+ ;
+ else
+ {
+ error = clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, line_input);
+ break;
+ }
+ }
+ unformat_free (line_input);
+
+ if (!uri)
+ {
+ error = clib_error_return (0, "Must set uri");
+ goto done;
+ }
+
+ if (parse_uri (uri, &l->sep))
+ {
+ error = clib_error_return (0, "failed to parse uri %s", uri);
+ goto done;
+ }
+
+ if (!is_add)
+ {
+ hss_listener_del (l);
+ goto done;
+ }
+
+ if (l->www_root == 0 && !l->enable_url_handlers)
+ {
+ error = clib_error_return (0, "Must set www-root or url-handlers");
+ goto done;
+ }
+
+ if (l->cache_size < (128 << 10))
+ {
+ error = clib_error_return (0, "cache-size must be at least 128kb");
+ goto done;
+ }
+
+ if (hss_listener_add (l))
+ {
+ error = clib_error_return (0, "failed to create listener");
+ goto done;
+ }
+
+done:
+
+ vec_free (uri);
+ return error;
+}
+
+VLIB_CLI_COMMAND (hss_add_del_listener_command, static) = {
+ .path = "http static listener",
+ .short_help = "http static listener [add|del] uri <uri>\n"
+ "[www-root <path>] [url-handlers] \n",
+ .function = hss_add_del_listener_command_fn,
+};
+
static u8 *
format_hss_session (u8 *s, va_list *args)
{
@@ -1014,14 +1371,29 @@ format_hss_session (u8 *s, va_list *args)
return s;
}
+static u8 *
+format_hss_listener (u8 *s, va_list *args)
+{
+ hss_listener_t *l = va_arg (*args, hss_listener_t *);
+ int __clib_unused verbose = va_arg (*args, int);
+
+ s = format (
+ s, "listener %d, uri %U:%u, www-root %s, cache-size %U url-handlers %d",
+ l->l_index, format_ip46_address, &l->sep.ip, l->sep.is_ip4,
+ clib_net_to_host_u16 (l->sep.port), l->www_root, format_memory_size,
+ l->cache_size, l->enable_url_handlers);
+ return s;
+}
+
static clib_error_t *
hss_show_command_fn (vlib_main_t *vm, unformat_input_t *input,
vlib_cli_command_t *cmd)
{
- int verbose = 0, show_cache = 0, show_sessions = 0;
+ int verbose = 0, show_cache = 0, show_sessions = 0, show_listeners = 0;
+ u32 l_index = 0;
hss_main_t *hsm = &hss_main;
- if (hsm->www_root == 0)
+ if (!hsm->is_init)
return clib_error_return (0, "Static server disabled");
while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
@@ -1032,17 +1404,26 @@ hss_show_command_fn (vlib_main_t *vm, unformat_input_t *input,
verbose = 1;
else if (unformat (input, "cache"))
show_cache = 1;
+ else if (unformat (input, "cache %u", &l_index))
+ show_cache = 1;
else if (unformat (input, "sessions"))
show_sessions = 1;
+ else if (unformat (input, "listeners"))
+ show_listeners = 1;
else
break;
}
- if ((show_cache + show_sessions) == 0)
+ if ((show_cache + show_sessions + show_listeners) == 0)
return clib_error_return (0, "specify one or more of cache, sessions");
if (show_cache)
- vlib_cli_output (vm, "%U", format_hss_cache, &hsm->cache, verbose);
+ {
+ hss_listener_t *l = hss_listener_get (l_index);
+ if (l == 0)
+ return clib_error_return (0, "listener %d not found", l_index);
+ vlib_cli_output (vm, "%U", format_hss_cache, &l->cache, verbose);
+ }
if (show_sessions)
{
@@ -1067,6 +1448,15 @@ hss_show_command_fn (vlib_main_t *vm, unformat_input_t *input,
}
vec_free (session_indices);
}
+
+ if (show_listeners)
+ {
+ hss_listener_t *l;
+ pool_foreach (l, hsm->listeners)
+ {
+ vlib_cli_output (vm, "%U", format_hss_listener, l, verbose);
+ }
+ }
return 0;
}
@@ -1082,7 +1472,8 @@ hss_show_command_fn (vlib_main_t *vm, unformat_input_t *input,
?*/
VLIB_CLI_COMMAND (hss_show_command, static) = {
.path = "show http static server",
- .short_help = "show http static server sessions cache [verbose [<nn>]]",
+ .short_help = "show http static server [sessions] [cache] [listeners] "
+ "[verbose [<nn>]]",
.function = hss_show_command_fn,
};
@@ -1091,12 +1482,28 @@ hss_clear_cache_command_fn (vlib_main_t *vm, unformat_input_t *input,
vlib_cli_command_t *cmd)
{
hss_main_t *hsm = &hss_main;
- u32 busy_items = 0;
+ u32 busy_items = 0, l_index = 0;
+ hss_listener_t *l;
- if (hsm->www_root == 0)
+ if (!hsm->is_init)
return clib_error_return (0, "Static server disabled");
- busy_items = hss_cache_clear (&hsm->cache);
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "index %u", &l_index))
+ ;
+ else
+ {
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+ }
+
+ l = hss_listener_get (l_index);
+ if (l == 0)
+ return clib_error_return (0, "listener %d not found", l_index);
+
+ busy_items = hss_cache_clear (&l->cache);
if (busy_items > 0)
vlib_cli_output (vm, "Note: %d busy items still in cache...", busy_items);
@@ -1118,7 +1525,7 @@ hss_clear_cache_command_fn (vlib_main_t *vm, unformat_input_t *input,
?*/
VLIB_CLI_COMMAND (clear_hss_cache_command, static) = {
.path = "clear http static cache",
- .short_help = "clear http static cache",
+ .short_help = "clear http static cache [index <index>]",
.function = hss_clear_cache_command_fn,
};
diff --git a/src/plugins/ikev2/ikev2.c b/src/plugins/ikev2/ikev2.c
index 94de4f81b0e..c8183feddfd 100644
--- a/src/plugins/ikev2/ikev2.c
+++ b/src/plugins/ikev2/ikev2.c
@@ -2223,7 +2223,7 @@ ikev2_create_tunnel_interface (vlib_main_t *vm, ikev2_sa_t *sa,
ikev2_child_sa_t *child, u32 sa_index,
u32 child_index, u8 is_rekey, u8 kex)
{
- u32 thread_index = vlib_get_thread_index ();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
ikev2_main_t *km = &ikev2_main;
ipsec_crypto_alg_t encr_type;
ipsec_integ_alg_t integ_type;
@@ -3190,7 +3190,7 @@ ikev2_node_internal (vlib_main_t *vm, vlib_node_runtime_t *node,
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
ikev2_main_per_thread_data_t *ptd = ikev2_get_per_thread_data ();
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
ikev2_stats_t _stats, *stats = &_stats;
int res;
diff --git a/src/plugins/ikev2/ikev2_priv.h b/src/plugins/ikev2/ikev2_priv.h
index 2751657bff9..58da36d9d59 100644
--- a/src/plugins/ikev2/ikev2_priv.h
+++ b/src/plugins/ikev2/ikev2_priv.h
@@ -661,7 +661,7 @@ clib_error_t *ikev2_profile_natt_disable (u8 * name);
static_always_inline ikev2_main_per_thread_data_t *
ikev2_get_per_thread_data ()
{
- u32 thread_index = vlib_get_thread_index ();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
return vec_elt_at_index (ikev2_main.per_thread_data, thread_index);
}
#endif /* __included_ikev2_priv_h__ */
diff --git a/src/plugins/ioam/export-vxlan-gpe/vxlan_gpe_ioam_export.c b/src/plugins/ioam/export-vxlan-gpe/vxlan_gpe_ioam_export.c
index 1606f72224f..ca6483b3329 100644
--- a/src/plugins/ioam/export-vxlan-gpe/vxlan_gpe_ioam_export.c
+++ b/src/plugins/ioam/export-vxlan-gpe/vxlan_gpe_ioam_export.c
@@ -21,7 +21,7 @@
#include <vnet/vnet.h>
#include <vnet/plugin/plugin.h>
#include <ioam/export-common/ioam_export.h>
-#include <vnet/vxlan-gpe/vxlan_gpe.h>
+#include <plugins/vxlan-gpe/vxlan_gpe.h>
#include <vlibapi/api.h>
#include <vlibmemory/api.h>
diff --git a/src/plugins/ioam/export-vxlan-gpe/vxlan_gpe_node.c b/src/plugins/ioam/export-vxlan-gpe/vxlan_gpe_node.c
index 839fd80b443..17084767c1e 100644
--- a/src/plugins/ioam/export-vxlan-gpe/vxlan_gpe_node.c
+++ b/src/plugins/ioam/export-vxlan-gpe/vxlan_gpe_node.c
@@ -16,8 +16,8 @@
#include <vnet/vnet.h>
#include <vppinfra/error.h>
#include <vnet/ip/ip.h>
-#include <vnet/vxlan-gpe/vxlan_gpe.h>
-#include <vnet/vxlan-gpe/vxlan_gpe_packet.h>
+#include <plugins/vxlan-gpe/vxlan_gpe.h>
+#include <plugins/vxlan-gpe/vxlan_gpe_packet.h>
#include <ioam/export-common/ioam_export.h>
typedef struct
diff --git a/src/plugins/ioam/ip6/ioam_cache_tunnel_select_node.c b/src/plugins/ioam/ip6/ioam_cache_tunnel_select_node.c
index 61476ebd85c..a4deae2ca60 100644
--- a/src/plugins/ioam/ip6/ioam_cache_tunnel_select_node.c
+++ b/src/plugins/ioam/ip6/ioam_cache_tunnel_select_node.c
@@ -646,7 +646,7 @@ vlib_node_registration_t ioam_cache_ts_timer_tick_node;
typedef struct
{
- u32 thread_index;
+ clib_thread_index_t thread_index;
} ioam_cache_ts_timer_tick_trace_t;
/* packet trace format function */
@@ -696,7 +696,7 @@ expired_cache_ts_timer_callback (u32 * expired_timers)
ioam_cache_main_t *cm = &ioam_cache_main;
int i;
u32 pool_index;
- u32 thread_index = vlib_get_thread_index ();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
u32 count = 0;
for (i = 0; i < vec_len (expired_timers); i++)
diff --git a/src/plugins/ioam/lib-vxlan-gpe/ioam_decap.c b/src/plugins/ioam/lib-vxlan-gpe/ioam_decap.c
index 801faa98066..d8d52e9f0a1 100644
--- a/src/plugins/ioam/lib-vxlan-gpe/ioam_decap.c
+++ b/src/plugins/ioam/lib-vxlan-gpe/ioam_decap.c
@@ -17,8 +17,7 @@
#include <vnet/vnet.h>
#include <vnet/ip/ip.h>
#include <vnet/ethernet/ethernet.h>
-#include <vnet/vxlan-gpe/vxlan_gpe.h>
-#include <vnet/vxlan-gpe/vxlan_gpe.h>
+#include <plugins/vxlan-gpe/vxlan_gpe.h>
#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam_packet.h>
#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h>
#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam_util.h>
@@ -68,8 +67,8 @@ vxlan_gpe_decap_ioam (vlib_main_t * vm,
vlib_frame_t * from_frame, u8 is_ipv6)
{
u32 n_left_from, next_index, *from, *to_next;
- vxlan_gpe_main_t *ngm = &vxlan_gpe_main;
vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+ vxlan_gpe_main_t *ngm = hm->gpe_main;
from = vlib_frame_vector_args (from_frame);
n_left_from = from_frame->n_vectors;
diff --git a/src/plugins/ioam/lib-vxlan-gpe/ioam_encap.c b/src/plugins/ioam/lib-vxlan-gpe/ioam_encap.c
index de375df4f7c..9c742d8c293 100644
--- a/src/plugins/ioam/lib-vxlan-gpe/ioam_encap.c
+++ b/src/plugins/ioam/lib-vxlan-gpe/ioam_encap.c
@@ -17,7 +17,7 @@
#include <vnet/vnet.h>
#include <vnet/ip/ip.h>
#include <vnet/ethernet/ethernet.h>
-#include <vnet/vxlan-gpe/vxlan_gpe.h>
+#include <plugins/vxlan-gpe/vxlan_gpe.h>
#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam_packet.h>
#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h>
#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam_util.h>
@@ -71,7 +71,8 @@ vxlan_gpe_encap_ioam_v4 (vlib_main_t * vm,
vlib_frame_t * from_frame)
{
u32 n_left_from, next_index, *from, *to_next;
- vxlan_gpe_main_t *ngm = &vxlan_gpe_main;
+ vxlan_gpe_ioam_main_t *sm = &vxlan_gpe_ioam_main;
+ vxlan_gpe_main_t *ngm = sm->gpe_main;
from = vlib_frame_vector_args (from_frame);
n_left_from = from_frame->n_vectors;
diff --git a/src/plugins/ioam/lib-vxlan-gpe/ioam_pop.c b/src/plugins/ioam/lib-vxlan-gpe/ioam_pop.c
index 2fa0aa29450..a80662b9d12 100644
--- a/src/plugins/ioam/lib-vxlan-gpe/ioam_pop.c
+++ b/src/plugins/ioam/lib-vxlan-gpe/ioam_pop.c
@@ -17,7 +17,7 @@
#include <vnet/vnet.h>
#include <vnet/ip/ip.h>
#include <vnet/ethernet/ethernet.h>
-#include <vnet/vxlan-gpe/vxlan_gpe.h>
+#include <plugins/vxlan-gpe/vxlan_gpe.h>
#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h>
/* Statistics (not really errors) */
@@ -231,7 +231,8 @@ vxlan_gpe_pop_ioam (vlib_main_t * vm,
vlib_frame_t * from_frame, u8 is_ipv6)
{
u32 n_left_from, next_index, *from, *to_next;
- vxlan_gpe_main_t *ngm = &vxlan_gpe_main;
+ vxlan_gpe_ioam_main_t *sm = &vxlan_gpe_ioam_main;
+ vxlan_gpe_main_t *ngm = sm->gpe_main;
from = vlib_frame_vector_args (from_frame);
n_left_from = from_frame->n_vectors;
diff --git a/src/plugins/ioam/lib-vxlan-gpe/ioam_transit.c b/src/plugins/ioam/lib-vxlan-gpe/ioam_transit.c
index e3c82725e26..02233cf9841 100644
--- a/src/plugins/ioam/lib-vxlan-gpe/ioam_transit.c
+++ b/src/plugins/ioam/lib-vxlan-gpe/ioam_transit.c
@@ -18,7 +18,7 @@
#include <vnet/ip/ip.h>
#include <vnet/udp/udp_local.h>
#include <vnet/ethernet/ethernet.h>
-#include <vnet/vxlan-gpe/vxlan_gpe.h>
+#include <plugins/vxlan-gpe/vxlan_gpe.h>
#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam_packet.h>
#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h>
#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam_util.h>
diff --git a/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_api.c b/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_api.c
index d61832d975a..6de1760b6b7 100644
--- a/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_api.c
+++ b/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_api.c
@@ -80,9 +80,9 @@ static void vl_api_vxlan_gpe_ioam_vni_enable_t_handler
clib_error_t *error;
vxlan4_gpe_tunnel_key_t key4;
uword *p = NULL;
- vxlan_gpe_main_t *gm = &vxlan_gpe_main;
vxlan_gpe_tunnel_t *t = 0;
vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+ vxlan_gpe_main_t *gm = hm->gpe_main;
u32 vni;
@@ -130,7 +130,8 @@ static void vl_api_vxlan_gpe_ioam_vni_disable_t_handler
clib_error_t *error;
vxlan4_gpe_tunnel_key_t key4;
uword *p = NULL;
- vxlan_gpe_main_t *gm = &vxlan_gpe_main;
+ vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+ vxlan_gpe_main_t *gm = hm->gpe_main;
vxlan_gpe_tunnel_t *t = 0;
u32 vni;
@@ -214,6 +215,13 @@ ioam_vxlan_gpe_init (vlib_main_t * vm)
vlib_node_t *vxlan_gpe_decap_node = NULL;
uword next_node = 0;
+ sm->gpe_main =
+ vlib_get_plugin_symbol ("vxlan-gpe_plugin.so", "vxlan_gpe_main");
+ if (sm->gpe_main == 0)
+ {
+ return clib_error_return (0, "vxlan-gpe_plugin.so is not loaded");
+ }
+
sm->vlib_main = vm;
sm->vnet_main = vnet_get_main ();
sm->unix_time_0 = (u32) time (0); /* Store starting time */
@@ -231,7 +239,7 @@ ioam_vxlan_gpe_init (vlib_main_t * vm)
vlib_get_node_by_name (vm, (u8 *) "vxlan4-gpe-input");
next_node =
vlib_node_add_next (vm, vxlan_gpe_decap_node->index, decap_node_index);
- vxlan_gpe_register_decap_protocol (VXLAN_GPE_PROTOCOL_IOAM, next_node);
+ sm->gpe_main->register_decap_protocol (VXLAN_GPE_PROTOCOL_IOAM, next_node);
vec_new (vxlan_gpe_ioam_sw_interface_t, pool_elts (sm->sw_interfaces));
sm->dst_by_ip4 = hash_create_mem (0, sizeof (fib_prefix_t), sizeof (uword));
@@ -243,7 +251,9 @@ ioam_vxlan_gpe_init (vlib_main_t * vm)
return 0;
}
-VLIB_INIT_FUNCTION (ioam_vxlan_gpe_init);
+VLIB_INIT_FUNCTION (ioam_vxlan_gpe_init) = {
+ .runs_after = VLIB_INITS ("vxlan_gpe_init"),
+};
/*
* fd.io coding-style-patch-verification: ON
diff --git a/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam.c b/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam.c
index 327afc3fb61..f83c6e1ecc3 100644
--- a/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam.c
+++ b/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam.c
@@ -12,8 +12,8 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-#include <vnet/vxlan-gpe/vxlan_gpe.h>
-#include <vnet/vxlan-gpe/vxlan_gpe_packet.h>
+#include <plugins/vxlan-gpe/vxlan_gpe.h>
+#include <plugins/vxlan-gpe/vxlan_gpe_packet.h>
#include <vnet/ip/format.h>
#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h>
#include <vnet/dpo/load_balance.h>
@@ -423,7 +423,7 @@ vxlan_gpe_set_ioam_rewrite_command_fn (vlib_main_t *
vxlan4_gpe_tunnel_key_t key4;
vxlan6_gpe_tunnel_key_t key6;
uword *p;
- vxlan_gpe_main_t *gm = &vxlan_gpe_main;
+ vxlan_gpe_main_t *gm = hm->gpe_main;
vxlan_gpe_tunnel_t *t = 0;
while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
{
diff --git a/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h b/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h
index 0711b87abbe..f9374c9bb95 100644
--- a/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h
+++ b/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h
@@ -15,12 +15,11 @@
#ifndef __included_vxlan_gpe_ioam_h__
#define __included_vxlan_gpe_ioam_h__
-#include <vnet/vxlan-gpe/vxlan_gpe.h>
-#include <vnet/vxlan-gpe/vxlan_gpe_packet.h>
+#include <plugins/vxlan-gpe/vxlan_gpe.h>
+#include <plugins/vxlan-gpe/vxlan_gpe_packet.h>
#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam_packet.h>
#include <vnet/ip/ip.h>
-
typedef struct vxlan_gpe_sw_interface_
{
u32 sw_if_index;
@@ -100,7 +99,8 @@ typedef struct vxlan_gpe_ioam_main_
vlib_main_t *vlib_main;
/** State convenience vnet_main_t */
vnet_main_t *vnet_main;
-
+ /** State convenience vxlan_gpe_main_t */
+ vxlan_gpe_main_t *gpe_main;
} vxlan_gpe_ioam_main_t;
extern vxlan_gpe_ioam_main_t vxlan_gpe_ioam_main;
diff --git a/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam_packet.h b/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam_packet.h
index a7ef859ec58..515529ce794 100644
--- a/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam_packet.h
+++ b/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam_packet.h
@@ -15,8 +15,8 @@
#ifndef __included_vxlan_gpe_ioam_packet_h__
#define __included_vxlan_gpe_ioam_packet_h__
-#include <vnet/vxlan-gpe/vxlan_gpe.h>
-#include <vnet/vxlan-gpe/vxlan_gpe_packet.h>
+#include <plugins/vxlan-gpe/vxlan_gpe.h>
+#include <plugins/vxlan-gpe/vxlan_gpe_packet.h>
#include <vnet/ip/ip.h>
diff --git a/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam_trace.c b/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam_trace.c
index 9c783c747d0..9b1b8b824ff 100644
--- a/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam_trace.c
+++ b/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam_trace.c
@@ -16,8 +16,8 @@
#include <vnet/vnet.h>
#include <vppinfra/error.h>
-#include <vnet/vxlan-gpe/vxlan_gpe.h>
-#include <vnet/vxlan-gpe/vxlan_gpe_packet.h>
+#include <plugins/vxlan-gpe/vxlan_gpe.h>
+#include <plugins/vxlan-gpe/vxlan_gpe_packet.h>
#include <vppinfra/hash.h>
#include <vppinfra/error.h>
diff --git a/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam_util.h b/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam_util.h
index c0ad8d9d03a..db7fd5651b1 100644
--- a/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam_util.h
+++ b/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam_util.h
@@ -15,8 +15,8 @@
#ifndef __included_vxlan_gpe_ioam_util_h__
#define __included_vxlan_gpe_ioam_util_h__
-#include <vnet/vxlan-gpe/vxlan_gpe.h>
-#include <vnet/vxlan-gpe/vxlan_gpe_packet.h>
+#include <plugins/vxlan-gpe/vxlan_gpe.h>
+#include <plugins/vxlan-gpe/vxlan_gpe_packet.h>
#include <vnet/ip/ip.h>
diff --git a/src/plugins/ip_session_redirect/api.c b/src/plugins/ip_session_redirect/api.c
index 1d17d55b5b4..5c7bc65771f 100644
--- a/src/plugins/ip_session_redirect/api.c
+++ b/src/plugins/ip_session_redirect/api.c
@@ -12,8 +12,10 @@
* limitations under the License. */
#include <vlib/vlib.h>
+#include <vnet/fib/fib_path_list.h>
#include <vnet/fib/fib_api.h>
#include <vnet/ip/ip_format_fns.h>
+#include <vnet/classify/vnet_classify.h>
#include <vlibmemory/api.h>
#include <vlibapi/api.h>
@@ -105,6 +107,68 @@ vl_api_ip_session_redirect_del_t_handler (vl_api_ip_session_redirect_del_t *mp)
REPLY_MACRO (VL_API_IP_SESSION_REDIRECT_DEL_REPLY);
}
+static void
+send_ip_session_redirect_details (vl_api_registration_t *reg, u32 table_index,
+ u32 context)
+{
+ ip_session_redirect_main_t *im = &ip_session_redirect_main;
+ ip_session_redirect_t *ipr;
+ ip_session_redirect_t *iprs = im->pool;
+
+ pool_foreach (ipr, iprs)
+ {
+ if (~0 == table_index || ipr->table_index == table_index)
+ {
+ vl_api_ip_session_redirect_details_t *rmp;
+ vl_api_fib_path_t *fp;
+ fib_route_path_t *rpath;
+ fib_path_encode_ctx_t walk_ctx = {
+ .rpaths = NULL,
+ };
+ u8 n_paths = fib_path_list_get_n_paths (ipr->pl);
+ /* match_len is computed without table index at the end of the match
+ * string */
+ u32 match_len = vec_len (ipr->match_and_table_index) - 4;
+
+ rmp = vl_msg_api_alloc_zero (sizeof (*rmp) +
+ sizeof (rmp->paths[0]) * n_paths);
+ rmp->_vl_msg_id =
+ ntohs (REPLY_MSG_ID_BASE + VL_API_IP_SESSION_REDIRECT_DETAILS);
+ rmp->context = context;
+ rmp->opaque_index = htonl (ipr->opaque_index);
+ rmp->table_index = htonl (ipr->table_index);
+ rmp->match_length = htonl (match_len);
+ rmp->is_punt = ipr->is_punt;
+ rmp->is_ip6 = ipr->is_ip6;
+ clib_memcpy (rmp->match, ipr->match_and_table_index, match_len);
+ rmp->n_paths = n_paths;
+ fp = rmp->paths;
+ rmp->retval = 0;
+ fib_path_list_walk_w_ext (ipr->pl, NULL, fib_path_encode, &walk_ctx);
+ vec_foreach (rpath, walk_ctx.rpaths)
+ {
+ fib_api_path_encode (rpath, fp);
+ fp++;
+ }
+
+ vl_api_send_msg (reg, (u8 *) rmp);
+ }
+ }
+}
+
+static void
+vl_api_ip_session_redirect_dump_t_handler (
+ vl_api_ip_session_redirect_dump_t *mp)
+{
+ vl_api_registration_t *reg;
+ u32 table_index = ntohl (mp->table_index);
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (reg == 0)
+ return;
+
+ send_ip_session_redirect_details (reg, table_index, mp->context);
+}
+
#include "ip_session_redirect.api.c"
static clib_error_t *
ip_session_redirect_plugin_api_hookup (vlib_main_t *vm)
diff --git a/src/plugins/ip_session_redirect/ip_session_redirect.api b/src/plugins/ip_session_redirect/ip_session_redirect.api
index 2bf2373dbd2..769d15b4751 100644
--- a/src/plugins/ip_session_redirect/ip_session_redirect.api
+++ b/src/plugins/ip_session_redirect/ip_session_redirect.api
@@ -99,6 +99,47 @@ autoreply define ip_session_redirect_del
option status="in_progress";
};
+/** \brief Dump available session redirections
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param table_index - classifier table index
+*/
+
+define ip_session_redirect_dump
+{
+ u32 client_index;
+ u32 context;
+ u32 table_index;
+};
+
+/** \brief Session redirection operational state response
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param table_index - classifier table index
+ @param opaque_index - classifier session opaque index
+ @param is_punt - true = punted traffic, false = forwarded traffic
+ @param is_ip6 - true = payload proto is ip6, false = payload proto is ip4
+ @param match_len - classifier session match length in bytes (max is 80-bytes)
+ @param match - classifier session match
+ @param n_paths - number of paths
+ @param paths - the paths of the redirect
+*/
+
+define ip_session_redirect_details
+{
+ u32 context;
+ i32 retval;
+ u32 table_index;
+ u32 opaque_index;
+ bool is_punt;
+ bool is_ip6;
+ u32 match_length;
+ u8 match[80];
+ u8 n_paths;
+ vl_api_fib_path_t paths[n_paths];
+};
+
+
/*
* Local Variables:
* eval: (c-set-style "gnu")
diff --git a/src/plugins/ip_session_redirect/ip_session_redirect.h b/src/plugins/ip_session_redirect/ip_session_redirect.h
index 45f64eebba1..800527618f4 100644
--- a/src/plugins/ip_session_redirect/ip_session_redirect.h
+++ b/src/plugins/ip_session_redirect/ip_session_redirect.h
@@ -16,6 +16,30 @@
#include <vnet/fib/fib_node.h>
+typedef struct
+{
+ u8 *match_and_table_index;
+ dpo_id_t dpo; /* forwarding dpo */
+ fib_node_t node; /* linkage into the FIB graph */
+ fib_node_index_t pl;
+ u32 sibling;
+ u32 parent_node_index;
+ u32 opaque_index;
+ u32 table_index;
+ fib_forward_chain_type_t payload_type;
+ u8 is_punt : 1;
+ u8 is_ip6 : 1;
+} ip_session_redirect_t;
+
+typedef struct
+{
+ ip_session_redirect_t *pool;
+ u32 *session_by_match_and_table_index;
+ fib_node_type_t fib_node_type;
+} ip_session_redirect_main_t;
+
+extern ip_session_redirect_main_t ip_session_redirect_main;
+
int ip_session_redirect_add (vlib_main_t *vm, u32 table_index,
u32 opaque_index, dpo_proto_t proto, int is_punt,
const u8 *match, const fib_route_path_t *rpaths);
diff --git a/src/plugins/ip_session_redirect/redirect.c b/src/plugins/ip_session_redirect/redirect.c
index ea18182e309..b8442ef8c67 100644
--- a/src/plugins/ip_session_redirect/redirect.c
+++ b/src/plugins/ip_session_redirect/redirect.c
@@ -18,29 +18,7 @@
#include <vpp/app/version.h>
#include "ip_session_redirect.h"
-typedef struct
-{
- u8 *match_and_table_index;
- dpo_id_t dpo; /* forwarding dpo */
- fib_node_t node; /* linkage into the FIB graph */
- fib_node_index_t pl;
- u32 sibling;
- u32 parent_node_index;
- u32 opaque_index;
- u32 table_index;
- fib_forward_chain_type_t payload_type;
- u8 is_punt : 1;
- u8 is_ip6 : 1;
-} ip_session_redirect_t;
-
-typedef struct
-{
- ip_session_redirect_t *pool;
- u32 *session_by_match_and_table_index;
- fib_node_type_t fib_node_type;
-} ip_session_redirect_main_t;
-
-static ip_session_redirect_main_t ip_session_redirect_main;
+ip_session_redirect_main_t ip_session_redirect_main;
static int
ip_session_redirect_stack (ip_session_redirect_t *ipr)
diff --git a/src/plugins/ip_session_redirect/test_api.c b/src/plugins/ip_session_redirect/test_api.c
index e4026a673ff..850a61d5f32 100644
--- a/src/plugins/ip_session_redirect/test_api.c
+++ b/src/plugins/ip_session_redirect/test_api.c
@@ -14,6 +14,7 @@
#include <vlib/vlib.h>
#include <vnet/fib/fib_api.h>
#include <vnet/ip/ip_format_fns.h>
+#include <vnet/fib/fib_path_list.h>
#include <vnet/classify/vnet_classify.h>
#include <vat/vat.h>
#include <vlibapi/api.h>
@@ -184,6 +185,79 @@ api_ip_session_redirect_del (vat_main_t *vam)
return ret;
}
+static int
+api_ip_session_redirect_dump (vat_main_t *vam)
+{
+ unformat_input_t *i = vam->input;
+ vl_api_ip_session_redirect_dump_t *mp;
+ u32 table_index = ~0;
+ int ret;
+
+ /* Parse args required to build the message */
+ while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (i, "table %d", &table_index))
+ ;
+ else
+ break;
+ }
+
+ /* Construct the API message */
+ M (IP_SESSION_REDIRECT_DUMP, mp);
+ mp->table_index = htonl (table_index);
+
+ S (mp)
+
+ /* Wait for a reply... */
+ W (ret);
+ return ret;
+}
+
+static void
+vl_api_ip_session_redirect_details_t_handler (
+ vl_api_ip_session_redirect_details_t *mp)
+{
+ vat_main_t *vam = ip_session_redirect_test_main.vat_main;
+ int rv;
+ u32 table_index;
+ u32 opaque_index;
+ u32 match_len;
+ u8 match[80];
+ u8 n_paths;
+ fib_route_path_t *paths_ = 0;
+ u8 *out = 0;
+
+ table_index = ntohl (mp->table_index);
+ opaque_index = ntohl (mp->opaque_index);
+ match_len = ntohl (mp->match_length);
+ const char *type = mp->is_punt ? "[punt]" : "[acl]";
+ const char *ip = mp->is_ip6 ? "[ip6]" : "[ip4]";
+ clib_memcpy (match, mp->match, match_len);
+ n_paths = mp->n_paths;
+
+ for (int i = 0; i < n_paths; i++)
+ {
+ fib_route_path_t path;
+ if ((rv = fib_api_path_decode (&mp->paths[i], &path)))
+ goto err;
+ vec_add1 (paths_, path);
+ }
+
+ out =
+ format (out, "table %d match %U %s %s opaque_index 0x%x\n", table_index,
+ format_hex_bytes, match, match_len, type, ip, opaque_index);
+ out = format (out, " via:\n");
+ for (int i = 0; i < n_paths; i++)
+ {
+ fib_route_path_t *path = &paths_[i];
+ out = format (out, " %U", format_fib_route_path, path);
+ }
+
+ fformat (vam->ofp, (char *) out);
+err:
+ vec_free (out);
+}
+
#include "ip_session_redirect.api_test.c"
/*
diff --git a/src/plugins/l2tp/l2tp.c b/src/plugins/l2tp/l2tp.c
index 907468b5900..cada9dc2656 100644
--- a/src/plugins/l2tp/l2tp.c
+++ b/src/plugins/l2tp/l2tp.c
@@ -151,7 +151,7 @@ test_counters_command_fn (vlib_main_t * vm,
u32 session_index;
u32 counter_index;
u32 nincr = 0;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
pool_foreach (session, lm->sessions)
{
diff --git a/src/plugins/lb/lb.c b/src/plugins/lb/lb.c
index 7ae1884ff31..0c4f21a4a78 100644
--- a/src/plugins/lb/lb.c
+++ b/src/plugins/lb/lb.c
@@ -108,7 +108,7 @@ u8 *format_lb_main (u8 * s, va_list * args)
s = format(s, " #vips: %u\n", pool_elts(lbm->vips));
s = format(s, " #ass: %u\n", pool_elts(lbm->ass) - 1);
- u32 thread_index;
+ clib_thread_index_t thread_index;
for(thread_index = 0; thread_index < tm->n_vlib_mains; thread_index++ ) {
lb_hash_t *h = lbm->per_cpu[thread_index].sticky_ht;
if (h) {
@@ -764,7 +764,7 @@ next:
int
lb_flush_vip_as (u32 vip_index, u32 as_index)
{
- u32 thread_index;
+ clib_thread_index_t thread_index;
vlib_thread_main_t *tm = vlib_get_thread_main();
lb_main_t *lbm = &lb_main;
diff --git a/src/plugins/lb/node.c b/src/plugins/lb/node.c
index a37fe11a9b4..1ddc556a8bf 100644
--- a/src/plugins/lb/node.c
+++ b/src/plugins/lb/node.c
@@ -124,7 +124,7 @@ format_lb_nat_trace (u8 * s, va_list * args)
}
lb_hash_t *
-lb_get_sticky_table (u32 thread_index)
+lb_get_sticky_table (clib_thread_index_t thread_index)
{
lb_main_t *lbm = &lb_main;
lb_hash_t *sticky_ht = lbm->per_cpu[thread_index].sticky_ht;
@@ -282,7 +282,7 @@ lb_node_fn (vlib_main_t * vm,
{
lb_main_t *lbm = &lb_main;
u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u32 lb_time = lb_hash_time_now (vm);
lb_hash_t *sticky_ht = lb_get_sticky_table (thread_index);
diff --git a/src/plugins/linux-cp/lcp.api b/src/plugins/linux-cp/lcp.api
index e7eaa5a3669..8b0fdb5eb53 100644
--- a/src/plugins/linux-cp/lcp.api
+++ b/src/plugins/linux-cp/lcp.api
@@ -177,6 +177,42 @@ autoendian define lcp_itf_pair_details
option in_progress;
};
+/** \brief Enable linux-cp-punt-xc for a given ethertype
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param ethertype - the ethertype to enable
+*/
+autoreply define lcp_ethertype_enable
+{
+ u32 client_index;
+ u32 context;
+ u16 ethertype;
+};
+
+/** \brief Get the enabled ethertypes for linux-cp-punt-xc
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+*/
+define lcp_ethertype_get
+{
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief Reply to get the enabled ethertypes for linux-cp-punt-xc
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+ @param count - number of enabled ethertypes
+ @param ethertypes - array of enabled ethertypes
+*/
+define lcp_ethertype_get_reply
+{
+ u32 context;
+ i32 retval;
+ u16 count;
+ u16 ethertypes[count];
+};
+
service {
rpc lcp_itf_pair_get returns lcp_itf_pair_get_reply
stream lcp_itf_pair_details;
diff --git a/src/plugins/linux-cp/lcp_api.c b/src/plugins/linux-cp/lcp_api.c
index 74421230e9d..0db502988d7 100644
--- a/src/plugins/linux-cp/lcp_api.c
+++ b/src/plugins/linux-cp/lcp_api.c
@@ -280,6 +280,40 @@ vl_api_lcp_itf_pair_replace_end_t_handler (
REPLY_MACRO (VL_API_LCP_ITF_PAIR_REPLACE_END_REPLY);
}
+static void
+vl_api_lcp_ethertype_enable_t_handler (vl_api_lcp_ethertype_enable_t *mp)
+{
+ vl_api_lcp_ethertype_enable_reply_t *rmp;
+ int rv;
+
+ rv = lcp_ethertype_enable (mp->ethertype);
+
+ REPLY_MACRO (VL_API_LCP_ETHERTYPE_ENABLE_REPLY);
+}
+
+static void
+vl_api_lcp_ethertype_get_t_handler (vl_api_lcp_ethertype_get_t *mp)
+{
+ vl_api_lcp_ethertype_get_reply_t *rmp;
+ ethernet_type_t *ethertypes = vec_new (ethernet_type_t, 0);
+ u16 count = 0;
+ int rv = 0;
+
+ rv = lcp_ethertype_get_enabled (&ethertypes);
+ if (!rv)
+ count = vec_len (ethertypes);
+
+ REPLY_MACRO3 (VL_API_LCP_ETHERTYPE_GET_REPLY, sizeof (u16) * count, ({
+ rmp->count = htons (count);
+ for (int i = 0; i < count; i++)
+ {
+ rmp->ethertypes[i] = htons (ethertypes[i]);
+ }
+ }));
+
+ vec_free (ethertypes);
+}
+
/*
* Set up the API message handling tables
*/
diff --git a/src/plugins/linux-cp/lcp_cli.c b/src/plugins/linux-cp/lcp_cli.c
index 0dcf600b301..e89afd2a753 100644
--- a/src/plugins/linux-cp/lcp_cli.c
+++ b/src/plugins/linux-cp/lcp_cli.c
@@ -337,6 +337,62 @@ VLIB_CLI_COMMAND (lcp_itf_pair_show_cmd_node, static) = {
.is_mp_safe = 1,
};
+static clib_error_t *
+lcp_ethertype_enable_cmd (vlib_main_t *vm, unformat_input_t *input,
+ vlib_cli_command_t *cmd)
+{
+ ethernet_type_t ethertype;
+ int rv;
+
+ if (!unformat (input, "%U", unformat_ethernet_type_host_byte_order,
+ &ethertype))
+ return clib_error_return (0, "Invalid ethertype");
+
+ rv = lcp_ethertype_enable (ethertype);
+ if (rv)
+ return clib_error_return (0, "Failed to enable ethertype (%d)", rv);
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (lcp_ethertype_enable_command, static) = {
+ .path = "lcp ethertype enable",
+ .short_help =
+ "lcp ethertype enable (<hex_ethertype_num>|<uc_ethertype_name>)",
+ .function = lcp_ethertype_enable_cmd,
+};
+
+static clib_error_t *
+lcp_ethertype_show_cmd (vlib_main_t *vm, unformat_input_t *input,
+ vlib_cli_command_t *cmd)
+{
+ ethernet_type_t *ethertypes = vec_new (ethernet_type_t, 0);
+ ethernet_type_t *etype;
+ int rv;
+
+ rv = lcp_ethertype_get_enabled (&ethertypes);
+ if (rv)
+ {
+ vec_free (ethertypes);
+ return clib_error_return (0, "Failed to get enabled ethertypes (%d)",
+ rv);
+ }
+
+ vec_foreach (etype, ethertypes)
+ {
+ vlib_cli_output (vm, "0x%04x", *etype);
+ }
+
+ vec_free (ethertypes);
+ return 0;
+}
+
+VLIB_CLI_COMMAND (lcp_ethertype_show_command, static) = {
+ .path = "show lcp ethertype",
+ .short_help = "show lcp ethertype",
+ .function = lcp_ethertype_show_cmd,
+};
+
clib_error_t *
lcp_cli_init (vlib_main_t *vm)
{
diff --git a/src/plugins/linux-cp/lcp_interface.c b/src/plugins/linux-cp/lcp_interface.c
index 9a6b9b11be5..31864f791af 100644
--- a/src/plugins/linux-cp/lcp_interface.c
+++ b/src/plugins/linux-cp/lcp_interface.c
@@ -1230,6 +1230,53 @@ lcp_itf_pair_link_up_down (vnet_main_t *vnm, u32 hw_if_index, u32 flags)
return 0;
}
+int
+lcp_ethertype_enable (ethernet_type_t ethertype)
+{
+ ethernet_main_t *em = &ethernet_main;
+ ethernet_type_info_t *eti;
+ vlib_main_t *vm = vlib_get_main ();
+ vlib_node_t *node = vlib_get_node_by_name (vm, (u8 *) "linux-cp-punt-xc");
+
+ if (!node)
+ return VNET_API_ERROR_UNIMPLEMENTED;
+
+ eti = ethernet_get_type_info (em, ethertype);
+ if (!eti)
+ return VNET_API_ERROR_INVALID_VALUE;
+
+ if (eti->node_index != ~0 && eti->node_index != node->index)
+ return VNET_API_ERROR_INVALID_REGISTRATION;
+
+ ethernet_register_input_type (vm, ethertype, node->index);
+ return 0;
+}
+
+int
+lcp_ethertype_get_enabled (ethernet_type_t **ethertypes_vec)
+{
+ ethernet_main_t *em = &ethernet_main;
+ ethernet_type_info_t *eti;
+ vlib_main_t *vm = vlib_get_main ();
+ vlib_node_t *node = vlib_get_node_by_name (vm, (u8 *) "linux-cp-punt-xc");
+
+ if (!ethertypes_vec)
+ return VNET_API_ERROR_INVALID_ARGUMENT;
+
+ if (!node)
+ return VNET_API_ERROR_UNIMPLEMENTED;
+
+ vec_foreach (eti, em->type_infos)
+ {
+ if (eti->node_index == node->index)
+ {
+ vec_add1 (*ethertypes_vec, eti->type);
+ }
+ }
+
+ return 0;
+}
+
VNET_HW_INTERFACE_LINK_UP_DOWN_FUNCTION (lcp_itf_pair_link_up_down);
static clib_error_t *
diff --git a/src/plugins/linux-cp/lcp_interface.h b/src/plugins/linux-cp/lcp_interface.h
index cfcd3925a15..8cf6d3f4da1 100644
--- a/src/plugins/linux-cp/lcp_interface.h
+++ b/src/plugins/linux-cp/lcp_interface.h
@@ -18,6 +18,7 @@
#include <vnet/dpo/dpo.h>
#include <vnet/adj/adj.h>
#include <vnet/ip/ip_types.h>
+#include <vnet/ethernet/ethernet.h>
#include <plugins/linux-cp/lcp.h>
@@ -198,6 +199,18 @@ void lcp_itf_pair_sync_state (lcp_itf_pair_t *lip);
void lcp_itf_pair_sync_state_hw (vnet_hw_interface_t *hi);
void lcp_itf_pair_sync_state_all ();
+/**
+ * Enable linux-cp-punt-xc for a given ethertype.
+ * @param ethertype - ethertype to enable
+ */
+int lcp_ethertype_enable (ethernet_type_t ethertype);
+
+/**
+ * Get the list of ethertypes enabled for linux-cp-punt-xc.
+ * @param ethertypes_vec - pointer to a vector to store the list of ethertypes
+ */
+int lcp_ethertype_get_enabled (ethernet_type_t **ethertypes_vec);
+
/*
* fd.io coding-style-patch-verification: ON
*
diff --git a/src/plugins/linux-cp/lcp_nl.c b/src/plugins/linux-cp/lcp_nl.c
index 916877939f0..55d2ea54245 100644
--- a/src/plugins/linux-cp/lcp_nl.c
+++ b/src/plugins/linux-cp/lcp_nl.c
@@ -29,7 +29,7 @@
#include <netlink/route/addr.h>
#include <vlib/vlib.h>
-#include <vlib/unix/unix.h>
+#include <vlib/file.h>
#include <vppinfra/error.h>
#include <vppinfra/linux/netns.h>
diff --git a/src/plugins/linux-cp/lcp_node.c b/src/plugins/linux-cp/lcp_node.c
index 241cc5e4bff..9fa1aa5bd66 100644
--- a/src/plugins/linux-cp/lcp_node.c
+++ b/src/plugins/linux-cp/lcp_node.c
@@ -39,40 +39,51 @@
typedef enum
{
-#define _(sym, str) LIP_PUNT_NEXT_##sym,
+#define _(sym, str) LIP_PUNT_XC_NEXT_##sym,
foreach_lip_punt
#undef _
- LIP_PUNT_N_NEXT,
-} lip_punt_next_t;
+ LIP_PUNT_XC_N_NEXT,
+} lip_punt_xc_next_t;
-typedef struct lip_punt_trace_t_
+typedef struct lip_punt_xc_trace_t_
{
+ bool is_xc;
u32 phy_sw_if_index;
u32 host_sw_if_index;
-} lip_punt_trace_t;
+} lip_punt_xc_trace_t;
/* packet trace format function */
static u8 *
-format_lip_punt_trace (u8 *s, va_list *args)
+format_lip_punt_xc_trace (u8 *s, va_list *args)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
- lip_punt_trace_t *t = va_arg (*args, lip_punt_trace_t *);
+ lip_punt_xc_trace_t *t = va_arg (*args, lip_punt_xc_trace_t *);
- s =
- format (s, "lip-punt: %u -> %u", t->phy_sw_if_index, t->host_sw_if_index);
+ if (t->is_xc)
+ {
+ s = format (s, "lip-xc: %u -> %u", t->host_sw_if_index,
+ t->phy_sw_if_index);
+ }
+ else
+ {
+ s = format (s, "lip-punt: %u -> %u", t->phy_sw_if_index,
+ t->host_sw_if_index);
+ }
return s;
}
/**
* Pass punted packets from the PHY to the HOST.
+ * Conditionally x-connect packets from the HOST to the PHY.
*/
-VLIB_NODE_FN (lip_punt_node)
-(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
+static_always_inline u32
+lip_punt_xc_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame, bool check_xc)
{
u32 n_left_from, *from, *to_next, n_left_to_next;
- lip_punt_next_t next_index;
+ lip_punt_xc_next_t next_index;
next_index = node->cached_next_index;
n_left_from = frame->n_vectors;
@@ -89,6 +100,7 @@ VLIB_NODE_FN (lip_punt_node)
u32 next0 = ~0;
u32 bi0, lipi0;
u32 sw_if_index0;
+ bool is_xc0 = 0;
u8 len0;
bi0 = to_next[0] = from[0];
@@ -97,18 +109,33 @@ VLIB_NODE_FN (lip_punt_node)
to_next += 1;
n_left_from -= 1;
n_left_to_next -= 1;
- next0 = LIP_PUNT_NEXT_DROP;
+ next0 = LIP_PUNT_XC_NEXT_DROP;
b0 = vlib_get_buffer (vm, bi0);
sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
lipi0 = lcp_itf_pair_find_by_phy (sw_if_index0);
- if (PREDICT_FALSE (lipi0 == INDEX_INVALID))
- goto trace0;
+
+ /*
+ * lip_punt_node: expect sw_if_index0 is phy in an itf pair
+ * lip_punt_xc_node: if sw_if_index0 is not phy, expect it is host
+ */
+ if (!check_xc && (PREDICT_FALSE (lipi0 == INDEX_INVALID)))
+ {
+ goto trace0;
+ }
+ else if (check_xc && (lipi0 == INDEX_INVALID))
+ {
+ is_xc0 = 1;
+ lipi0 = lcp_itf_pair_find_by_host (sw_if_index0);
+ if (PREDICT_FALSE (lipi0 == INDEX_INVALID))
+ goto trace0;
+ }
lip0 = lcp_itf_pair_get (lipi0);
- next0 = LIP_PUNT_NEXT_IO;
- vnet_buffer (b0)->sw_if_index[VLIB_TX] = lip0->lip_host_sw_if_index;
+ next0 = LIP_PUNT_XC_NEXT_IO;
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] =
+ is_xc0 ? lip0->lip_phy_sw_if_index : lip0->lip_host_sw_if_index;
if (PREDICT_TRUE (lip0->lip_host_type == LCP_ITF_HOST_TAP))
{
@@ -129,10 +156,22 @@ VLIB_NODE_FN (lip_punt_node)
trace0:
if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
{
- lip_punt_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
- t->phy_sw_if_index = sw_if_index0;
- t->host_sw_if_index =
- (lipi0 == INDEX_INVALID) ? ~0 : lip0->lip_host_sw_if_index;
+ lip_punt_xc_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+
+ t->is_xc = is_xc0;
+ if (is_xc0)
+ {
+ t->phy_sw_if_index =
+ (lipi0 == INDEX_INVALID) ? ~0 : lip0->lip_phy_sw_if_index;
+ t->host_sw_if_index = sw_if_index0;
+ }
+ else
+ {
+ t->phy_sw_if_index = sw_if_index0;
+ t->host_sw_if_index =
+ (lipi0 == INDEX_INVALID) ? ~0 : lip0->lip_host_sw_if_index;
+ }
}
vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
@@ -145,16 +184,41 @@ VLIB_NODE_FN (lip_punt_node)
return frame->n_vectors;
}
+VLIB_NODE_FN (lip_punt_node)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
+{
+ return (lip_punt_xc_inline (vm, node, frame, false /* xc */));
+}
+
+VLIB_NODE_FN (lip_punt_xc_node)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
+{
+ return (lip_punt_xc_inline (vm, node, frame, true /* xc */));
+}
+
VLIB_REGISTER_NODE (lip_punt_node) = {
.name = "linux-cp-punt",
.vector_size = sizeof (u32),
- .format_trace = format_lip_punt_trace,
+ .format_trace = format_lip_punt_xc_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_next_nodes = LIP_PUNT_XC_N_NEXT,
+ .next_nodes = {
+ [LIP_PUNT_XC_NEXT_DROP] = "error-drop",
+ [LIP_PUNT_XC_NEXT_IO] = "interface-output",
+ },
+};
+
+VLIB_REGISTER_NODE (lip_punt_xc_node) = {
+ .name = "linux-cp-punt-xc",
+ .vector_size = sizeof (u32),
+ .format_trace = format_lip_punt_xc_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
- .n_next_nodes = LIP_PUNT_N_NEXT,
+ .n_next_nodes = LIP_PUNT_XC_N_NEXT,
.next_nodes = {
- [LIP_PUNT_NEXT_DROP] = "error-drop",
- [LIP_PUNT_NEXT_IO] = "interface-output",
+ [LIP_PUNT_XC_NEXT_DROP] = "error-drop",
+ [LIP_PUNT_XC_NEXT_IO] = "interface-output",
},
};
@@ -190,7 +254,7 @@ VLIB_NODE_FN (lcp_punt_l3_node)
(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
{
u32 n_left_from, *from, *to_next, n_left_to_next;
- lip_punt_next_t next_index;
+ lip_punt_xc_next_t next_index;
next_index = node->cached_next_index;
n_left_from = frame->n_vectors;
diff --git a/src/plugins/linux-cp/lcp_router.c b/src/plugins/linux-cp/lcp_router.c
index 0efd53e64ef..27f53357a4d 100644
--- a/src/plugins/linux-cp/lcp_router.c
+++ b/src/plugins/linux-cp/lcp_router.c
@@ -17,7 +17,6 @@
#include <linux/if.h>
#include <linux/mpls.h>
-//#include <vlib/vlib.h>
#include <vlib/unix/plugin.h>
#include <linux-cp/lcp_nl.h>
#include <linux-cp/lcp_interface.h>
@@ -1543,6 +1542,12 @@ const nl_vft_t lcp_router_vft = {
.cb = lcp_router_route_sync_end },
};
+static void
+lcp_lcp_router_interface_del_cb (lcp_itf_pair_t *lip)
+{
+ lcp_router_ip6_mroutes_add_del (lip->lip_phy_sw_if_index, 0);
+}
+
static clib_error_t *
lcp_router_init (vlib_main_t *vm)
{
@@ -1550,6 +1555,12 @@ lcp_router_init (vlib_main_t *vm)
nl_register_vft (&lcp_router_vft);
+ lcp_itf_pair_vft_t lcp_router_interface_del_vft = {
+ .pair_del_fn = lcp_lcp_router_interface_del_cb,
+ };
+
+ lcp_itf_pair_register_vft (&lcp_router_interface_del_vft);
+
/*
* allocate 2 route sources. The low priority source will be for
* dynamic routes. If a dynamic route daemon (FRR) tries to remove its
diff --git a/src/plugins/lisp/lisp-gpe/decap.c b/src/plugins/lisp/lisp-gpe/decap.c
index 18e32675a32..b568fef24fa 100644
--- a/src/plugins/lisp/lisp-gpe/decap.c
+++ b/src/plugins/lisp/lisp-gpe/decap.c
@@ -102,9 +102,9 @@ next_index_to_iface (lisp_gpe_main_t * lgm, u32 next_index)
}
static_always_inline void
-incr_decap_stats (vnet_main_t * vnm, u32 thread_index, u32 length,
- u32 sw_if_index, u32 * last_sw_if_index, u32 * n_packets,
- u32 * n_bytes)
+incr_decap_stats (vnet_main_t *vnm, clib_thread_index_t thread_index,
+ u32 length, u32 sw_if_index, u32 *last_sw_if_index,
+ u32 *n_packets, u32 *n_bytes)
{
vnet_interface_main_t *im;
diff --git a/src/plugins/lisp/lisp-gpe/interface.c b/src/plugins/lisp/lisp-gpe/interface.c
index ed2b08f9aaf..5d3ad9463ea 100644
--- a/src/plugins/lisp/lisp-gpe/interface.c
+++ b/src/plugins/lisp/lisp-gpe/interface.c
@@ -233,7 +233,7 @@ l2_lisp_gpe_interface_tx (vlib_main_t * vm, vlib_node_runtime_t * node,
{
u32 n_left_from, next_index, *from, *to_next;
lisp_gpe_main_t *lgm = &lisp_gpe_main;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
vlib_combined_counter_main_t *cm = &load_balance_main.lbm_to_counters;
from = vlib_frame_vector_args (from_frame);
diff --git a/src/plugins/mactime/node.c b/src/plugins/mactime/node.c
index fad487e666e..dfe7a26c2af 100644
--- a/src/plugins/mactime/node.c
+++ b/src/plugins/mactime/node.c
@@ -87,7 +87,7 @@ mactime_node_inline (vlib_main_t * vm,
clib_bihash_8_8_t *lut = &mm->lookup_table;
u32 packets_ok = 0;
f64 now;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
vnet_main_t *vnm = vnet_get_main ();
vnet_interface_main_t *im = &vnm->interface_main;
u8 arc = im->output_feature_arc_index;
diff --git a/src/plugins/map/ip4_map.c b/src/plugins/map/ip4_map.c
index 652808e6d37..1ff585ceb3a 100644
--- a/src/plugins/map/ip4_map.c
+++ b/src/plugins/map/ip4_map.c
@@ -154,7 +154,7 @@ ip4_map (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
next_index = node->cached_next_index;
map_main_t *mm = &map_main;
vlib_combined_counter_main_t *cm = mm->domain_counters;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u32 *buffer0 = 0;
while (n_left_from > 0)
diff --git a/src/plugins/map/ip4_map_t.c b/src/plugins/map/ip4_map_t.c
index fe29af458a2..ec89056e0cc 100644
--- a/src/plugins/map/ip4_map_t.c
+++ b/src/plugins/map/ip4_map_t.c
@@ -117,7 +117,7 @@ ip4_map_t_icmp (vlib_main_t * vm,
n_left_from = frame->n_vectors;
next_index = node->cached_next_index;
vlib_combined_counter_main_t *cm = map_main.domain_counters;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
while (n_left_from > 0)
{
@@ -549,7 +549,7 @@ ip4_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
n_left_from = frame->n_vectors;
next_index = node->cached_next_index;
vlib_combined_counter_main_t *cm = map_main.domain_counters;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
while (n_left_from > 0)
{
diff --git a/src/plugins/map/ip6_map.c b/src/plugins/map/ip6_map.c
index 3d9b21dfcd9..33d5a0ebbd3 100644
--- a/src/plugins/map/ip6_map.c
+++ b/src/plugins/map/ip6_map.c
@@ -166,7 +166,7 @@ ip6_map (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
vlib_node_get_runtime (vm, ip6_map_node.index);
map_main_t *mm = &map_main;
vlib_combined_counter_main_t *cm = mm->domain_counters;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
@@ -559,7 +559,7 @@ ip6_map_post_ip4_reass (vlib_main_t * vm,
vlib_node_get_runtime (vm, ip6_map_post_ip4_reass_node.index);
map_main_t *mm = &map_main;
vlib_combined_counter_main_t *cm = mm->domain_counters;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
@@ -651,7 +651,7 @@ ip6_map_icmp_relay (vlib_main_t * vm,
vlib_node_runtime_t *error_node =
vlib_node_get_runtime (vm, ip6_map_icmp_relay_node.index);
map_main_t *mm = &map_main;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u16 *fragment_ids, *fid;
from = vlib_frame_vector_args (frame);
diff --git a/src/plugins/map/ip6_map_t.c b/src/plugins/map/ip6_map_t.c
index f8d894a013a..854410eb462 100644
--- a/src/plugins/map/ip6_map_t.c
+++ b/src/plugins/map/ip6_map_t.c
@@ -118,7 +118,7 @@ ip6_map_t_icmp (vlib_main_t * vm,
n_left_from = frame->n_vectors;
next_index = node->cached_next_index;
vlib_combined_counter_main_t *cm = map_main.domain_counters;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
while (n_left_from > 0)
{
@@ -494,7 +494,7 @@ ip6_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
vlib_node_get_runtime (vm, ip6_map_t_node.index);
map_main_t *mm = &map_main;
vlib_combined_counter_main_t *cm = map_main.domain_counters;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
diff --git a/src/plugins/memif/memif.c b/src/plugins/memif/memif.c
index 7e3dd44db2c..ad8512ac81e 100644
--- a/src/plugins/memif/memif.c
+++ b/src/plugins/memif/memif.c
@@ -31,7 +31,7 @@
#include <limits.h>
#include <vlib/vlib.h>
-#include <vlib/unix/unix.h>
+#include <vlib/file.h>
#include <vnet/plugin/plugin.h>
#include <vnet/ethernet/ethernet.h>
#include <vnet/interface/rx_queue_funcs.h>
@@ -379,6 +379,12 @@ memif_connect (memif_if_t * mif)
CLIB_CACHE_LINE_BYTES);
vec_foreach (dma_info, mq->dma_info)
{
+ vlib_buffer_t *bt = &dma_info->data.buffer_template;
+
+ clib_memset (bt, 0, sizeof (*bt));
+ bt->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
+ bt->total_length_not_including_first_buffer = 0;
+ vnet_buffer (bt)->sw_if_index[VLIB_TX] = (u32) ~0;
vec_validate_aligned (dma_info->data.desc_data,
pow2_mask (max_log2_ring_sz),
CLIB_CACHE_LINE_BYTES);
diff --git a/src/plugins/memif/node.c b/src/plugins/memif/node.c
index d483f92b2fe..08b248df534 100644
--- a/src/plugins/memif/node.c
+++ b/src/plugins/memif/node.c
@@ -504,7 +504,7 @@ memif_device_input_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
u32 n_left_to_next;
u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
vlib_buffer_t *buffer_ptrs[MEMIF_RX_VECTOR_SZ];
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
memif_per_thread_data_t *ptd =
vec_elt_at_index (mm->per_thread_data, thread_index);
u16 cur_slot, ring_size, n_slots, mask;
@@ -763,7 +763,7 @@ memif_device_input_zc_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
u16 slot, s0;
memif_desc_t *d0;
vlib_buffer_t *b0, *b1, *b2, *b3;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
memif_per_thread_data_t *ptd = vec_elt_at_index (mm->per_thread_data,
thread_index);
u16 cur_slot, last_slot, ring_size, n_slots, mask, head;
@@ -1061,7 +1061,7 @@ CLIB_MARCH_FN (memif_dma_completion_cb, void, vlib_main_t *vm,
{
memif_main_t *mm = &memif_main;
memif_if_t *mif = vec_elt_at_index (mm->interfaces, b->cookie >> 16);
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u32 n_left_to_next = 0;
u16 nexts[MEMIF_RX_VECTOR_SZ], *next;
u32 _to_next_bufs[MEMIF_RX_VECTOR_SZ], *to_next_bufs = _to_next_bufs, *bi;
diff --git a/src/plugins/memif/private.h b/src/plugins/memif/private.h
index 43455d00522..af82a8bfaa3 100644
--- a/src/plugins/memif/private.h
+++ b/src/plugins/memif/private.h
@@ -76,7 +76,7 @@
#define memif_file_del(a) \
do \
{ \
- memif_log_debug (0, "clib_file_del idx %u", a - file_main.file_pool); \
+ memif_log_debug (0, "clib_file_del idx %u", (a)->index); \
clib_file_del (&file_main, a); \
} \
while (0)
diff --git a/src/plugins/memif/socket.c b/src/plugins/memif/socket.c
index 001f26f13ef..c2b11fc2ecb 100644
--- a/src/plugins/memif/socket.c
+++ b/src/plugins/memif/socket.c
@@ -30,7 +30,7 @@
#include <limits.h>
#include <vlib/vlib.h>
-#include <vlib/unix/unix.h>
+#include <vlib/file.h>
#include <vnet/plugin/plugin.h>
#include <vnet/ethernet/ethernet.h>
#include <vpp/app/version.h>
diff --git a/src/plugins/nat/det44/det44.h b/src/plugins/nat/det44/det44.h
index e576bfb65e8..683f554f03c 100644
--- a/src/plugins/nat/det44/det44.h
+++ b/src/plugins/nat/det44/det44.h
@@ -38,7 +38,6 @@
#include <vnet/ip/reass/ip4_sv_reass.h>
#include <nat/lib/lib.h>
-#include <nat/lib/inlines.h>
#include <nat/lib/ipfix_logging.h>
#include <nat/lib/nat_proto.h>
diff --git a/src/plugins/nat/det44/det44_in2out.c b/src/plugins/nat/det44/det44_in2out.c
index 3f5e05a064c..39a9ecabac7 100644
--- a/src/plugins/nat/det44/det44_in2out.c
+++ b/src/plugins/nat/det44/det44_in2out.c
@@ -21,6 +21,7 @@
#include <vlib/vlib.h>
#include <vnet/vnet.h>
#include <vnet/ip/ip.h>
+#include <vnet/ip/ip4_to_ip6.h>
#include <vnet/fib/ip4_fib.h>
#include <vppinfra/error.h>
#include <vppinfra/elog.h>
@@ -29,7 +30,6 @@
#include <nat/det44/det44_inlines.h>
#include <nat/lib/lib.h>
-#include <nat/lib/inlines.h>
#include <nat/lib/nat_inlines.h>
typedef enum
diff --git a/src/plugins/nat/det44/det44_out2in.c b/src/plugins/nat/det44/det44_out2in.c
index ab6acd4f8e9..dd89606ff10 100644
--- a/src/plugins/nat/det44/det44_out2in.c
+++ b/src/plugins/nat/det44/det44_out2in.c
@@ -21,6 +21,7 @@
#include <vlib/vlib.h>
#include <vnet/vnet.h>
#include <vnet/ip/ip.h>
+#include <vnet/ip/ip4_to_ip6.h>
#include <vnet/fib/ip4_fib.h>
#include <vppinfra/error.h>
#include <vppinfra/elog.h>
@@ -29,7 +30,6 @@
#include <nat/det44/det44_inlines.h>
#include <nat/lib/lib.h>
-#include <nat/lib/inlines.h>
#include <nat/lib/nat_inlines.h>
typedef enum
diff --git a/src/plugins/nat/dslite/dslite.h b/src/plugins/nat/dslite/dslite.h
index f05670c9bf5..979afb476b7 100644
--- a/src/plugins/nat/dslite/dslite.h
+++ b/src/plugins/nat/dslite/dslite.h
@@ -22,7 +22,6 @@
#include <nat/lib/lib.h>
#include <nat/lib/alloc.h>
-#include <nat/lib/inlines.h>
typedef struct
{
diff --git a/src/plugins/nat/dslite/dslite_in2out.c b/src/plugins/nat/dslite/dslite_in2out.c
index 522c3cf4123..806969f5f4d 100644
--- a/src/plugins/nat/dslite/dslite_in2out.c
+++ b/src/plugins/nat/dslite/dslite_in2out.c
@@ -12,6 +12,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+#include <vnet/ip/ip4_to_ip6.h>
#include <nat/dslite/dslite.h>
#include <nat/lib/nat_syslog.h>
diff --git a/src/plugins/nat/dslite/dslite_out2in.c b/src/plugins/nat/dslite/dslite_out2in.c
index 531bbb468bb..9ec48d458e5 100644
--- a/src/plugins/nat/dslite/dslite_out2in.c
+++ b/src/plugins/nat/dslite/dslite_out2in.c
@@ -12,6 +12,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+#include <vnet/ip/ip4_to_ip6.h>
#include <nat/dslite/dslite.h>
typedef enum
diff --git a/src/plugins/nat/lib/inlines.h b/src/plugins/nat/lib/inlines.h
deleted file mode 100644
index 24e3ba83a5b..00000000000
--- a/src/plugins/nat/lib/inlines.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2020 Cisco and/or its affiliates.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/**
- * @brief Common NAT inline functions
- */
-#ifndef included_nat_inlines_h__
-#define included_nat_inlines_h__
-
-#include <vnet/ip/icmp46_packet.h>
-
-static_always_inline u64
-icmp_type_is_error_message (u8 icmp_type)
-{
- int bmp = 0;
- bmp |= 1 << ICMP4_destination_unreachable;
- bmp |= 1 << ICMP4_time_exceeded;
- bmp |= 1 << ICMP4_parameter_problem;
- bmp |= 1 << ICMP4_source_quench;
- bmp |= 1 << ICMP4_redirect;
- bmp |= 1 << ICMP4_alternate_host_address;
-
- return (1ULL << icmp_type) & bmp;
-}
-
-#endif /* included_nat_inlines_h__ */
-/*
- * fd.io coding-style-patch-verification: ON
- *
- * Local Variables:
- * eval: (c-set-style "gnu")
- * End:
- */
diff --git a/src/plugins/nat/lib/ipfix_logging.c b/src/plugins/nat/lib/ipfix_logging.c
index 593fa09f7e2..f569ccd1918 100644
--- a/src/plugins/nat/lib/ipfix_logging.c
+++ b/src/plugins/nat/lib/ipfix_logging.c
@@ -22,7 +22,6 @@
#include <vlibmemory/api.h>
#include <vppinfra/atomics.h>
#include <nat/lib/ipfix_logging.h>
-#include <nat/lib/inlines.h>
vlib_node_registration_t nat_ipfix_flush_node;
nat_ipfix_logging_main_t nat_ipfix_logging_main;
diff --git a/src/plugins/nat/lib/nat_syslog.c b/src/plugins/nat/lib/nat_syslog.c
index 98777ebf280..93756a561bc 100644
--- a/src/plugins/nat/lib/nat_syslog.c
+++ b/src/plugins/nat/lib/nat_syslog.c
@@ -21,7 +21,6 @@
#include <vnet/syslog/syslog.h>
#include <nat/lib/nat_syslog.h>
-#include <nat/lib/inlines.h>
#include <nat/lib/nat_syslog_constants.h>
diff --git a/src/plugins/nat/nat44-ed/nat44_ed.h b/src/plugins/nat/nat44-ed/nat44_ed.h
index 706511475cf..c3a959b0635 100644
--- a/src/plugins/nat/nat44-ed/nat44_ed.h
+++ b/src/plugins/nat/nat44-ed/nat44_ed.h
@@ -31,7 +31,6 @@
#include <vlibapi/api.h>
#include <nat/lib/lib.h>
-#include <nat/lib/inlines.h>
/* default number of worker handoff frame queue elements */
#define NAT_FQ_NELTS_DEFAULT 64
diff --git a/src/plugins/nat/nat44-ed/nat44_ed_inlines.h b/src/plugins/nat/nat44-ed/nat44_ed_inlines.h
index 04e5236b7f9..8cd93f263c6 100644
--- a/src/plugins/nat/nat44-ed/nat44_ed_inlines.h
+++ b/src/plugins/nat/nat44-ed/nat44_ed_inlines.h
@@ -27,6 +27,7 @@
#include <nat/lib/log.h>
#include <nat/lib/ipfix_logging.h>
#include <nat/nat44-ed/nat44_ed.h>
+#include <vnet/ip/ip4_to_ip6.h>
always_inline void
init_ed_k (clib_bihash_kv_16_8_t *kv, u32 l_addr, u16 l_port, u32 r_addr,
diff --git a/src/plugins/nat/nat44-ei/nat44_ei.c b/src/plugins/nat/nat44-ei/nat44_ei.c
index e16625a2946..d1959f72ae7 100644
--- a/src/plugins/nat/nat44-ei/nat44_ei.c
+++ b/src/plugins/nat/nat44-ei/nat44_ei.c
@@ -21,6 +21,7 @@
#include <vnet/vnet.h>
#include <vnet/ip/ip.h>
#include <vnet/ip/ip4.h>
+#include <vnet/ip/ip4_to_ip6.h>
#include <vnet/ip/ip_table.h>
#include <vnet/ip/reass/ip4_sv_reass.h>
#include <vnet/fib/fib_table.h>
diff --git a/src/plugins/nat/nat44-ei/nat44_ei.h b/src/plugins/nat/nat44-ei/nat44_ei.h
index b4aa0f26c0b..786fb0cfc2c 100644
--- a/src/plugins/nat/nat44-ei/nat44_ei.h
+++ b/src/plugins/nat/nat44-ei/nat44_ei.h
@@ -35,7 +35,6 @@
#include <vppinfra/hash.h>
#include <nat/lib/lib.h>
-#include <nat/lib/inlines.h>
#include <nat/lib/nat_proto.h>
/* default number of worker handoff frame queue elements */
diff --git a/src/plugins/nat/nat44-ei/nat44_ei_in2out.c b/src/plugins/nat/nat44-ei/nat44_ei_in2out.c
index 3b981d69986..2fbf2832d5e 100644
--- a/src/plugins/nat/nat44-ei/nat44_ei_in2out.c
+++ b/src/plugins/nat/nat44-ei/nat44_ei_in2out.c
@@ -21,6 +21,7 @@
#include <vnet/vnet.h>
#include <vnet/ip/ip.h>
+#include <vnet/ip/ip4_to_ip6.h>
#include <vnet/ethernet/ethernet.h>
#include <vnet/udp/udp_local.h>
#include <vnet/fib/ip4_fib.h>
diff --git a/src/plugins/nat/nat44-ei/nat44_ei_out2in.c b/src/plugins/nat/nat44-ei/nat44_ei_out2in.c
index 5d91cb04f7c..805a6962868 100644
--- a/src/plugins/nat/nat44-ei/nat44_ei_out2in.c
+++ b/src/plugins/nat/nat44-ei/nat44_ei_out2in.c
@@ -21,6 +21,7 @@
#include <vnet/vnet.h>
#include <vnet/ip/ip.h>
+#include <vnet/ip/ip4_to_ip6.h>
#include <vnet/ethernet/ethernet.h>
#include <vnet/udp/udp_local.h>
#include <vnet/fib/ip4_fib.h>
diff --git a/src/plugins/nat/nat64/nat64.c b/src/plugins/nat/nat64/nat64.c
index 950eea60e5e..c59cfbbbd55 100644
--- a/src/plugins/nat/nat64/nat64.c
+++ b/src/plugins/nat/nat64/nat64.c
@@ -15,6 +15,7 @@
#include <vppinfra/crc32.h>
#include <vnet/fib/ip4_fib.h>
+#include <vnet/ip/ip4_to_ip6.h>
#include <vnet/ip/reass/ip4_sv_reass.h>
#include <vnet/ip/reass/ip6_sv_reass.h>
diff --git a/src/plugins/nat/nat64/nat64.h b/src/plugins/nat/nat64/nat64.h
index 9eb8d915390..2577880c7a4 100644
--- a/src/plugins/nat/nat64/nat64.h
+++ b/src/plugins/nat/nat64/nat64.h
@@ -30,7 +30,6 @@
#include <vnet/ip/reass/ip4_sv_reass.h>
#include <nat/lib/lib.h>
-#include <nat/lib/inlines.h>
#include <nat/lib/nat_inlines.h>
#include <nat/nat64/nat64_db.h>
diff --git a/src/plugins/nat/nat64/nat64_db.c b/src/plugins/nat/nat64/nat64_db.c
index e4e9febcb12..6ba77c58965 100644
--- a/src/plugins/nat/nat64/nat64_db.c
+++ b/src/plugins/nat/nat64/nat64_db.c
@@ -16,7 +16,6 @@
#include <vnet/fib/fib_table.h>
#include <nat/lib/ipfix_logging.h>
#include <nat/lib/nat_syslog.h>
-#include <nat/lib/inlines.h>
#include <nat/nat64/nat64_db.h>
int
diff --git a/src/plugins/nat/pnat/pnat.api b/src/plugins/nat/pnat/pnat.api
index de555c41412..82c2de49682 100644
--- a/src/plugins/nat/pnat/pnat.api
+++ b/src/plugins/nat/pnat/pnat.api
@@ -165,6 +165,23 @@ define pnat_interfaces_details
vl_api_pnat_mask_t lookup_mask[2]; /* PNAT_ATTACHMENT_POINT_MAX */
};
+
+autoendian define pnat_flow_lookup
+{
+ u32 client_index;
+ u32 context;
+ vl_api_interface_index_t sw_if_index;
+ vl_api_pnat_attachment_point_t attachment;
+ vl_api_pnat_match_tuple_t match;
+};
+
+autoendian define pnat_flow_lookup_reply
+{
+ u32 context;
+ i32 retval;
+ u32 binding_index;
+};
+
counters pnat {
none {
severity info;
diff --git a/src/plugins/nat/pnat/pnat_api.c b/src/plugins/nat/pnat/pnat_api.c
index a4e7ff192bf..f627307628d 100644
--- a/src/plugins/nat/pnat/pnat_api.c
+++ b/src/plugins/nat/pnat/pnat_api.c
@@ -93,6 +93,20 @@ static void vl_api_pnat_binding_del_t_handler(vl_api_pnat_binding_del_t *mp) {
REPLY_MACRO_END(VL_API_PNAT_BINDING_DEL_REPLY);
}
+static void vl_api_pnat_flow_lookup_t_handler(vl_api_pnat_flow_lookup_t *mp) {
+ pnat_main_t *pm = &pnat_main;
+ vl_api_pnat_flow_lookup_reply_t *rmp;
+ u32 binding_index;
+ int rv = 0;
+ binding_index =
+ pnat_flow_lookup(mp->sw_if_index, mp->attachment, &mp->match);
+ if (binding_index == ~0) {
+ rv = -1;
+ }
+ REPLY_MACRO2_END(VL_API_PNAT_FLOW_LOOKUP_REPLY,
+ ({ rmp->binding_index = binding_index; }));
+}
+
/*
* Workaround for a bug in vppapigen that doesn't register the endian handler
* for _details messages. When that's fixed it should be possible to use
diff --git a/src/plugins/netmap/netmap.c b/src/plugins/netmap/netmap.c
index ebef215eb3b..0d92d03151c 100644
--- a/src/plugins/netmap/netmap.c
+++ b/src/plugins/netmap/netmap.c
@@ -22,7 +22,7 @@
#include <fcntl.h>
#include <vlib/vlib.h>
-#include <vlib/unix/unix.h>
+#include <vlib/file.h>
#include <vnet/ethernet/ethernet.h>
#include <netmap/net_netmap.h>
@@ -53,7 +53,7 @@ close_netmap_if (netmap_main_t * nm, netmap_if_t * nif)
{
if (nif->clib_file_index != ~0)
{
- clib_file_del (&file_main, file_main.file_pool + nif->clib_file_index);
+ clib_file_del_by_index (&file_main, nif->clib_file_index);
nif->clib_file_index = ~0;
}
else if (nif->fd > -1)
diff --git a/src/plugins/netmap/node.c b/src/plugins/netmap/node.c
index 6169847fa79..85e7db5808b 100644
--- a/src/plugins/netmap/node.c
+++ b/src/plugins/netmap/node.c
@@ -98,7 +98,7 @@ netmap_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
u32 n_free_bufs;
struct netmap_ring *ring;
int cur_ring;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u32 n_buffer_bytes = vlib_buffer_get_default_data_size (vm);
if (nif->per_interface_next_index != ~0)
@@ -255,7 +255,7 @@ VLIB_NODE_FN (netmap_input_node) (vlib_main_t * vm,
{
int i;
u32 n_rx_packets = 0;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
netmap_main_t *nm = &netmap_main;
netmap_if_t *nmi;
diff --git a/src/plugins/nsh/nsh.c b/src/plugins/nsh/nsh.c
index a2c24e27b26..06dd45be944 100644
--- a/src/plugins/nsh/nsh.c
+++ b/src/plugins/nsh/nsh.c
@@ -20,7 +20,7 @@
#include <nsh/nsh.h>
#include <gre/gre.h>
#include <vxlan/vxlan.h>
-#include <vnet/vxlan-gpe/vxlan_gpe.h>
+#include <plugins/vxlan-gpe/vxlan_gpe.h>
#include <vnet/l2/l2_classify.h>
#include <vnet/adj/adj.h>
#include <vpp/app/version.h>
@@ -182,7 +182,8 @@ nsh_md2_set_next_ioam_export_override (uword next)
clib_error_t *
nsh_init (vlib_main_t * vm)
{
- vlib_node_t *node, *gre4_input, *gre6_input;
+ vlib_node_t *node, *gre4_input, *gre6_input, *vxlan4_gpe_input,
+ *vxlan6_gpe_input;
nsh_main_t *nm = &nsh_main;
clib_error_t *error = 0;
uword next_node;
@@ -222,20 +223,24 @@ nsh_init (vlib_main_t * vm)
/* Add dispositions to nodes that feed nsh-input */
//alagalah - validate we don't really need to use the node value
+ vxlan4_gpe_input = vlib_get_node_by_name (vm, (u8 *) "vxlan4-gpe-input");
+ vxlan6_gpe_input = vlib_get_node_by_name (vm, (u8 *) "vxlan6-gpe-input");
+ nm->vgm = vlib_get_plugin_symbol ("vxlan-gpe_plugin.so", "vxlan_gpe_main");
+ if (vxlan4_gpe_input == 0 || vxlan6_gpe_input == 0 || nm->vgm == 0)
+ {
+ error = clib_error_return (0, "vxlan_gpe_plugin.so is not loaded");
+ return error;
+ }
next_node =
- vlib_node_add_next (vm, vxlan4_gpe_input_node.index,
- nm->nsh_input_node_index);
- vlib_node_add_next (vm, vxlan4_gpe_input_node.index,
- nm->nsh_proxy_node_index);
- vlib_node_add_next (vm, vxlan4_gpe_input_node.index,
+ vlib_node_add_next (vm, vxlan4_gpe_input->index, nm->nsh_input_node_index);
+ vlib_node_add_next (vm, vxlan4_gpe_input->index, nm->nsh_proxy_node_index);
+ vlib_node_add_next (vm, vxlan4_gpe_input->index,
nsh_aware_vnf_proxy_node.index);
- vxlan_gpe_register_decap_protocol (VXLAN_GPE_PROTOCOL_NSH, next_node);
+ nm->vgm->register_decap_protocol (VXLAN_GPE_PROTOCOL_NSH, next_node);
- vlib_node_add_next (vm, vxlan6_gpe_input_node.index,
- nm->nsh_input_node_index);
- vlib_node_add_next (vm, vxlan6_gpe_input_node.index,
- nm->nsh_proxy_node_index);
- vlib_node_add_next (vm, vxlan6_gpe_input_node.index,
+ vlib_node_add_next (vm, vxlan6_gpe_input->index, nm->nsh_input_node_index);
+ vlib_node_add_next (vm, vxlan6_gpe_input->index, nm->nsh_proxy_node_index);
+ vlib_node_add_next (vm, vxlan6_gpe_input->index,
nsh_aware_vnf_proxy_node.index);
gre4_input = vlib_get_node_by_name (vm, (u8 *) "gre4-input");
@@ -280,7 +285,9 @@ nsh_init (vlib_main_t * vm)
return error;
}
-VLIB_INIT_FUNCTION (nsh_init);
+VLIB_INIT_FUNCTION (nsh_init) = {
+ .runs_after = VLIB_INITS ("vxlan_gpe_init"),
+};
VLIB_PLUGIN_REGISTER () = {
.version = VPP_BUILD_VER,
diff --git a/src/plugins/nsh/nsh.h b/src/plugins/nsh/nsh.h
index 86a9a7e95c3..c408ddb99a2 100644
--- a/src/plugins/nsh/nsh.h
+++ b/src/plugins/nsh/nsh.h
@@ -18,6 +18,7 @@
#include <vnet/vnet.h>
#include <nsh/nsh_packet.h>
#include <vnet/ip/ip4_packet.h>
+#include <plugins/vxlan-gpe/vxlan_gpe.h>
typedef struct {
u16 class;
@@ -166,6 +167,10 @@ typedef struct {
/* convenience */
vlib_main_t * vlib_main;
vnet_main_t * vnet_main;
+
+ /* vxlan gpe plugin */
+ vxlan_gpe_main_t *vgm;
+
} nsh_main_t;
extern nsh_main_t nsh_main;
diff --git a/src/plugins/nsh/nsh_pop.c b/src/plugins/nsh/nsh_pop.c
index 8de319e158b..d66cfc9de27 100644
--- a/src/plugins/nsh/nsh_pop.c
+++ b/src/plugins/nsh/nsh_pop.c
@@ -19,7 +19,7 @@
#include <vnet/plugin/plugin.h>
#include <nsh/nsh.h>
#include <vnet/gre/packet.h>
-#include <vnet/vxlan-gpe/vxlan_gpe.h>
+#include <plugins/vxlan-gpe/vxlan_gpe.h>
#include <vnet/l2/l2_classify.h>
#include <vlibapi/api.h>
diff --git a/src/plugins/ping/ping_api.c b/src/plugins/ping/ping_api.c
index 5578fa560f2..a5af1033d0e 100644
--- a/src/plugins/ping/ping_api.c
+++ b/src/plugins/ping/ping_api.c
@@ -122,16 +122,22 @@ vl_api_want_ping_finished_events_t_handler (
while ((sleep_interval =
time_ping_sent + ping_interval - vlib_time_now (vm)) > 0.0)
{
- uword event_type;
+ uword event_count;
vlib_process_wait_for_event_or_clock (vm, sleep_interval);
- event_type = vlib_process_get_events (vm, 0);
- if (event_type == ~0)
+ if (dst_addr.version == AF_IP4)
+ event_count =
+ vlib_process_get_events_with_type (vm, 0, PING_RESPONSE_IP4);
+ else if (dst_addr.version == AF_IP6)
+ event_count =
+ vlib_process_get_events_with_type (vm, 0, PING_RESPONSE_IP6);
+ else
break;
- if (event_type == PING_RESPONSE_IP4 ||
- event_type == PING_RESPONSE_IP6)
- reply_count += 1;
+ if (event_count == 0)
+ break;
+
+ reply_count += 1;
}
}
diff --git a/src/plugins/pppoe/pppoe_cp_node.c b/src/plugins/pppoe/pppoe_cp_node.c
index c96559679f0..efafc448f98 100644
--- a/src/plugins/pppoe/pppoe_cp_node.c
+++ b/src/plugins/pppoe/pppoe_cp_node.c
@@ -73,7 +73,7 @@ VLIB_NODE_FN (pppoe_cp_dispatch_node) (vlib_main_t * vm,
vnet_main_t * vnm = pem->vnet_main;
vnet_interface_main_t * im = &vnm->interface_main;
u32 pkts_decapsulated = 0;
- u32 thread_index = vlib_get_thread_index();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
pppoe_entry_key_t cached_key;
pppoe_entry_result_t cached_result;
diff --git a/src/plugins/pppoe/pppoe_decap.c b/src/plugins/pppoe/pppoe_decap.c
index 854364b1aca..17fbeaabb43 100644
--- a/src/plugins/pppoe/pppoe_decap.c
+++ b/src/plugins/pppoe/pppoe_decap.c
@@ -54,7 +54,7 @@ VLIB_NODE_FN (pppoe_input_node) (vlib_main_t * vm,
vnet_main_t * vnm = pem->vnet_main;
vnet_interface_main_t * im = &vnm->interface_main;
u32 pkts_decapsulated = 0;
- u32 thread_index = vlib_get_thread_index();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
pppoe_entry_key_t cached_key;
pppoe_entry_result_t cached_result;
diff --git a/src/plugins/prom/prom.c b/src/plugins/prom/prom.c
index 475e98b1038..0ddc96f7474 100644
--- a/src/plugins/prom/prom.c
+++ b/src/plugins/prom/prom.c
@@ -382,13 +382,16 @@ prom_stat_segment_client_init (void)
stat_segment_adjust (scm, (void *) scm->shared_header->directory_vector);
}
-void
+clib_error_t *
prom_enable (vlib_main_t *vm)
{
prom_main_t *pm = &prom_main;
pm->register_url = vlib_get_plugin_symbol ("http_static_plugin.so",
"hss_register_url_handler");
+ if (pm->register_url == 0)
+ return clib_error_return (0, "http_static_plugin.so not loaded");
+
pm->send_data =
vlib_get_plugin_symbol ("http_static_plugin.so", "hss_session_send_data");
pm->register_url (prom_stats_dump, "stats.prom", HTTP_REQ_GET);
@@ -400,6 +403,8 @@ prom_enable (vlib_main_t *vm)
prom_scraper_process_enable (vm);
prom_stat_segment_client_init ();
+
+ return 0;
}
static clib_error_t *
diff --git a/src/plugins/prom/prom.h b/src/plugins/prom/prom.h
index 898e4c209d1..a06302c1ff9 100644
--- a/src/plugins/prom/prom.h
+++ b/src/plugins/prom/prom.h
@@ -44,7 +44,7 @@ typedef enum prom_process_evt_codes_
PROM_SCRAPER_EVT_RUN,
} prom_process_evt_codes_t;
-void prom_enable (vlib_main_t *vm);
+clib_error_t *prom_enable (vlib_main_t *vm);
prom_main_t *prom_get_main (void);
void prom_stat_patterns_set (u8 **patterns);
diff --git a/src/plugins/prom/prom_cli.c b/src/plugins/prom/prom_cli.c
index 705e54ac1b8..09407d46235 100644
--- a/src/plugins/prom/prom_cli.c
+++ b/src/plugins/prom/prom_cli.c
@@ -131,7 +131,7 @@ prom_command_fn (vlib_main_t *vm, unformat_input_t *input,
no_input:
if (is_enable && !pm->is_enabled)
- prom_enable (vm);
+ return prom_enable (vm);
return 0;
}
diff --git a/src/plugins/pvti/input.c b/src/plugins/pvti/input.c
index 6a8806e2795..d7727153aa7 100644
--- a/src/plugins/pvti/input.c
+++ b/src/plugins/pvti/input.c
@@ -124,7 +124,7 @@ pvti_input_node_common (vlib_main_t *vm, vlib_node_runtime_t *node,
pvti_main_t *pvm = &pvti_main;
- u32 thread_index = vlib_get_thread_index ();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
pvti_per_thread_data_t *ptd =
vec_elt_at_index (pvm->per_thread_data[is_ip6], thread_index);
diff --git a/src/plugins/pvti/output.c b/src/plugins/pvti/output.c
index 1939c6f585a..5fb6263795e 100644
--- a/src/plugins/pvti/output.c
+++ b/src/plugins/pvti/output.c
@@ -340,7 +340,7 @@ pvti_output_node_common (vlib_main_t *vm, vlib_node_runtime_t *node,
u8 stream_index = pvti_get_stream_index (is_ip6);
- u32 thread_index = vlib_get_thread_index ();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
pvti_per_thread_data_t *ptd =
vec_elt_at_index (pvm->per_thread_data[is_ip6], thread_index);
diff --git a/src/plugins/pvti/pvti.h b/src/plugins/pvti/pvti.h
index ac097c5ecca..608610362d7 100644
--- a/src/plugins/pvti/pvti.h
+++ b/src/plugins/pvti/pvti.h
@@ -223,7 +223,7 @@ extern vlib_node_registration_t pvti_periodic_node;
always_inline u8
pvti_get_stream_index (int is_ip6)
{
- u32 thread_index = vlib_get_thread_index ();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
ASSERT ((thread_index & 0xffffff80) == 0);
diff --git a/src/plugins/quic/quic.c b/src/plugins/quic/quic.c
index 10651f10e7e..77d35634fa9 100644
--- a/src/plugins/quic/quic.c
+++ b/src/plugins/quic/quic.c
@@ -51,7 +51,8 @@ static void quic_update_timer (quic_ctx_t * ctx);
static void quic_check_quic_session_connected (quic_ctx_t * ctx);
static int quic_reset_connection (u64 udp_session_handle,
quic_rx_packet_ctx_t * pctx);
-static void quic_proto_on_close (u32 ctx_index, u32 thread_index);
+static void quic_proto_on_close (u32 ctx_index,
+ clib_thread_index_t thread_index);
static quicly_stream_open_t on_stream_open;
static quicly_closed_by_remote_t on_closed_by_remote;
@@ -133,7 +134,7 @@ quic_crypto_context_alloc (u8 thread_index)
}
static crypto_context_t *
-quic_crypto_context_get (u32 cr_index, u32 thread_index)
+quic_crypto_context_get (u32 cr_index, clib_thread_index_t thread_index)
{
quic_main_t *qm = &quic_main;
ASSERT (cr_index >> 24 == thread_index);
@@ -381,7 +382,7 @@ error:
/* Helper functions */
static u32
-quic_ctx_alloc (u32 thread_index)
+quic_ctx_alloc (clib_thread_index_t thread_index)
{
quic_main_t *qm = &quic_main;
quic_ctx_t *ctx;
@@ -401,7 +402,7 @@ static void
quic_ctx_free (quic_ctx_t * ctx)
{
QUIC_DBG (2, "Free ctx %u %x", ctx->c_thread_index, ctx->c_c_index);
- u32 thread_index = ctx->c_thread_index;
+ clib_thread_index_t thread_index = ctx->c_thread_index;
QUIC_ASSERT (ctx->timer_handle == QUIC_TIMER_HANDLE_INVALID);
if (CLIB_DEBUG)
clib_memset (ctx, 0xfb, sizeof (*ctx));
@@ -409,13 +410,13 @@ quic_ctx_free (quic_ctx_t * ctx)
}
static quic_ctx_t *
-quic_ctx_get (u32 ctx_index, u32 thread_index)
+quic_ctx_get (u32 ctx_index, clib_thread_index_t thread_index)
{
return pool_elt_at_index (quic_main.ctx_pool[thread_index], ctx_index);
}
static quic_ctx_t *
-quic_ctx_get_if_valid (u32 ctx_index, u32 thread_index)
+quic_ctx_get_if_valid (u32 ctx_index, clib_thread_index_t thread_index)
{
if (pool_is_free_index (quic_main.ctx_pool[thread_index], ctx_index))
return 0;
@@ -1100,7 +1101,7 @@ quic_get_time (quicly_now_t * self)
}
static u32
-quic_set_time_now (u32 thread_index)
+quic_set_time_now (clib_thread_index_t thread_index)
{
vlib_main_t *vlib_main = vlib_get_main ();
f64 time = vlib_time_now (vlib_main);
@@ -1396,7 +1397,7 @@ quic_connect (transport_endpoint_cfg_t * tep)
}
static void
-quic_proto_on_close (u32 ctx_index, u32 thread_index)
+quic_proto_on_close (u32 ctx_index, clib_thread_index_t thread_index)
{
int err;
quic_ctx_t *ctx = quic_ctx_get_if_valid (ctx_index, thread_index);
@@ -1548,7 +1549,7 @@ quic_stop_listen (u32 lctx_index)
}
static transport_connection_t *
-quic_connection_get (u32 ctx_index, u32 thread_index)
+quic_connection_get (u32 ctx_index, clib_thread_index_t thread_index)
{
quic_ctx_t *ctx;
ctx = quic_ctx_get (ctx_index, thread_index);
@@ -1600,7 +1601,7 @@ static u8 *
format_quic_connection (u8 * s, va_list * args)
{
u32 qc_index = va_arg (*args, u32);
- u32 thread_index = va_arg (*args, u32);
+ clib_thread_index_t thread_index = va_arg (*args, u32);
u32 verbose = va_arg (*args, u32);
quic_ctx_t *ctx = quic_ctx_get (qc_index, thread_index);
s = format (s, "%U", format_quic_ctx, ctx, verbose);
@@ -1611,7 +1612,7 @@ static u8 *
format_quic_half_open (u8 * s, va_list * args)
{
u32 qc_index = va_arg (*args, u32);
- u32 thread_index = va_arg (*args, u32);
+ clib_thread_index_t thread_index = va_arg (*args, u32);
quic_ctx_t *ctx = quic_ctx_get (qc_index, thread_index);
s = format (s, "[#%d][Q] half-open app %u", thread_index,
ctx->parent_app_id);
@@ -1623,7 +1624,7 @@ static u8 *
format_quic_listener (u8 * s, va_list * args)
{
u32 tci = va_arg (*args, u32);
- u32 thread_index = va_arg (*args, u32);
+ clib_thread_index_t thread_index = va_arg (*args, u32);
u32 verbose = va_arg (*args, u32);
quic_ctx_t *ctx = quic_ctx_get (tci, thread_index);
s = format (s, "%U", format_quic_ctx, ctx, verbose);
@@ -1660,7 +1661,7 @@ quic_on_quic_session_connected (quic_ctx_t * ctx)
session_t *quic_session;
app_worker_t *app_wrk;
u32 ctx_id = ctx->c_c_index;
- u32 thread_index = ctx->c_thread_index;
+ clib_thread_index_t thread_index = ctx->c_thread_index;
int rv;
quic_session = session_alloc (thread_index);
@@ -1775,7 +1776,7 @@ static void
quic_transfer_connection (u32 ctx_index, u32 dest_thread)
{
quic_ctx_t *ctx, *temp_ctx;
- u32 thread_index = vlib_get_thread_index ();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
QUIC_DBG (2, "Transferring conn %u to thread %u", ctx_index, dest_thread);
@@ -1811,7 +1812,7 @@ quic_udp_session_connected_callback (u32 quic_app_index, u32 ctx_index,
app_worker_t *app_wrk;
quicly_conn_t *conn;
quic_ctx_t *ctx;
- u32 thread_index;
+ clib_thread_index_t thread_index;
int ret;
quicly_context_t *quicly_ctx;
@@ -1918,7 +1919,7 @@ quic_udp_session_accepted_callback (session_t * udp_session)
u32 ctx_index;
quic_ctx_t *ctx, *lctx;
session_t *udp_listen_session;
- u32 thread_index = vlib_get_thread_index ();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
udp_listen_session =
listen_session_get_from_handle (udp_session->listener_handle);
@@ -2199,7 +2200,7 @@ quic_process_one_rx_packet (u64 udp_session_handle, svm_fifo_t * f,
{
size_t plen;
u32 full_len, ret;
- u32 thread_index = vlib_get_thread_index ();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
u32 cur_deq = svm_fifo_max_dequeue (f) - fifo_offset;
quicly_context_t *quicly_ctx;
session_t *udp_session;
@@ -2281,7 +2282,7 @@ quic_udp_session_rx_callback (session_t * udp_session)
u32 max_deq;
u64 udp_session_handle = session_handle (udp_session);
int rv = 0;
- u32 thread_index = vlib_get_thread_index ();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
u32 cur_deq, fifo_offset, max_packets, i;
quic_rx_packet_ctx_t packets_ctx[QUIC_RCV_MAX_PACKETS];
@@ -2306,7 +2307,7 @@ rx_start:
#endif
for (i = 0; i < max_packets; i++)
{
- packets_ctx[i].thread_index = UINT32_MAX;
+ packets_ctx[i].thread_index = CLIB_INVALID_THREAD_INDEX;
packets_ctx[i].ctx_index = UINT32_MAX;
packets_ctx[i].ptype = QUIC_PACKET_TYPE_DROP;
@@ -2421,8 +2422,8 @@ quic_get_transport_listener_endpoint (u32 listener_index,
}
static void
-quic_get_transport_endpoint (u32 ctx_index, u32 thread_index,
- transport_endpoint_t * tep, u8 is_lcl)
+quic_get_transport_endpoint (u32 ctx_index, clib_thread_index_t thread_index,
+ transport_endpoint_t *tep, u8 is_lcl)
{
quic_ctx_t *ctx;
ctx = quic_ctx_get (ctx_index, thread_index);
diff --git a/src/plugins/quic/quic.h b/src/plugins/quic/quic.h
index 081bcb120e9..4474aa15e75 100644
--- a/src/plugins/quic/quic.h
+++ b/src/plugins/quic/quic.h
@@ -205,7 +205,7 @@ typedef struct quic_session_cache_
typedef struct quic_stream_data_
{
u32 ctx_id;
- u32 thread_index;
+ clib_thread_index_t thread_index;
u32 app_rx_data_len; /**< bytes received, to be read by external app */
u32 app_tx_data_len; /**< bytes sent */
} quic_stream_data_t;
@@ -232,7 +232,7 @@ typedef struct quic_rx_packet_ctx_
quicly_decoded_packet_t packet;
u8 data[QUIC_MAX_PACKET_SIZE];
u32 ctx_index;
- u32 thread_index;
+ clib_thread_index_t thread_index;
union
{
struct sockaddr sa;
diff --git a/src/plugins/quic/quic_crypto.c b/src/plugins/quic/quic_crypto.c
index 9e2c915daaa..4e11eff2431 100644
--- a/src/plugins/quic/quic_crypto.c
+++ b/src/plugins/quic/quic_crypto.c
@@ -248,8 +248,7 @@ quic_crypto_decrypt_packet (quic_ctx_t *qctx, quic_rx_packet_ctx_t *pctx)
pctx->packet.octets.len - aead_off, pn, pctx->packet.octets.base,
aead_off)) == SIZE_MAX)
{
- fprintf (stderr, "%s: aead decryption failure (pn: %d)\n", __FUNCTION__,
- pn);
+ fprintf (stderr, "%s: aead decryption failure (pn: %d)\n", __func__, pn);
return;
}
@@ -349,8 +348,7 @@ quic_crypto_cipher_setup_crypto (ptls_cipher_context_t *_ctx, int is_enc,
}
else
{
- QUIC_DBG (1, "%s, Invalid crypto cipher : ", __FUNCTION__,
- _ctx->algo->name);
+ QUIC_DBG (1, "%s, Invalid crypto cipher : ", __func__, _ctx->algo->name);
assert (0);
}
@@ -405,8 +403,7 @@ quic_crypto_aead_setup_crypto (ptls_aead_context_t *_ctx, int is_enc,
}
else
{
- QUIC_DBG (1, "%s, invalied aead cipher %s", __FUNCTION__,
- _ctx->algo->name);
+ QUIC_DBG (1, "%s, invalied aead cipher %s", __func__, _ctx->algo->name);
assert (0);
}
diff --git a/src/plugins/rdma/device.c b/src/plugins/rdma/device.c
index 8aeb586a42d..a4dbdb02831 100644
--- a/src/plugins/rdma/device.c
+++ b/src/plugins/rdma/device.c
@@ -23,7 +23,7 @@
#include <vppinfra/linux/sysfs.h>
#include <vlib/vlib.h>
-#include <vlib/unix/unix.h>
+#include <vlib/file.h>
#include <vlib/pci/pci.h>
#include <vnet/ethernet/ethernet.h>
#include <vnet/interface/rx_queue_funcs.h>
diff --git a/src/plugins/sflow/sflow.c b/src/plugins/sflow/sflow.c
index 02a74d2c7f5..14d07d69233 100644
--- a/src/plugins/sflow/sflow.c
+++ b/src/plugins/sflow/sflow.c
@@ -246,7 +246,8 @@ total_drops (sflow_main_t *smp)
{
// sum sendmsg and worker-fifo drops
u32 all_drops = smp->psample_send_drops;
- for (u32 thread_index = 0; thread_index < smp->total_threads; thread_index++)
+ for (clib_thread_index_t thread_index = 0; thread_index < smp->total_threads;
+ thread_index++)
{
sflow_per_thread_data_t *sfwk =
vec_elt_at_index (smp->per_thread_data, thread_index);
@@ -321,8 +322,8 @@ read_worker_fifos (sflow_main_t *smp)
for (; batch < SFLOW_READ_BATCH; batch++)
{
u32 psample_send = 0, psample_send_fail = 0;
- for (u32 thread_index = 0; thread_index < smp->total_threads;
- thread_index++)
+ for (clib_thread_index_t thread_index = 0;
+ thread_index < smp->total_threads; thread_index++)
{
sflow_per_thread_data_t *sfwk =
vec_elt_at_index (smp->per_thread_data, thread_index);
@@ -388,7 +389,8 @@ read_node_counters (sflow_main_t *smp, sflow_err_ctrs_t *ctrs)
{
for (u32 ec = 0; ec < SFLOW_N_ERROR; ec++)
ctrs->counters[ec] = 0;
- for (u32 thread_index = 0; thread_index < smp->total_threads; thread_index++)
+ for (clib_thread_index_t thread_index = 0; thread_index < smp->total_threads;
+ thread_index++)
{
sflow_per_thread_data_t *sfwk =
vec_elt_at_index (smp->per_thread_data, thread_index);
@@ -487,7 +489,8 @@ sflow_set_worker_sampling_state (sflow_main_t *smp)
vlib_thread_main_t *tm = &vlib_thread_main;
smp->total_threads = 1 + tm->n_threads;
vec_validate (smp->per_thread_data, smp->total_threads);
- for (u32 thread_index = 0; thread_index < smp->total_threads; thread_index++)
+ for (clib_thread_index_t thread_index = 0; thread_index < smp->total_threads;
+ thread_index++)
{
sflow_per_thread_data_t *sfwk =
vec_elt_at_index (smp->per_thread_data, thread_index);
diff --git a/src/plugins/snort/enqueue.c b/src/plugins/snort/enqueue.c
index 84efb4d432f..ae04c58bba0 100644
--- a/src/plugins/snort/enqueue.c
+++ b/src/plugins/snort/enqueue.c
@@ -93,7 +93,7 @@ snort_enq_node_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
snort_main_t *sm = &snort_main;
snort_instance_t *si = 0;
snort_qpair_t *qp = 0;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u32 n_left = frame->n_vectors;
u32 n_trace = 0;
u32 total_enq = 0, n_unprocessed = 0;
diff --git a/src/plugins/snort/main.c b/src/plugins/snort/main.c
index 9bab1185b60..c87ecfd7ebd 100644
--- a/src/plugins/snort/main.c
+++ b/src/plugins/snort/main.c
@@ -392,6 +392,18 @@ snort_instance_create (vlib_main_t *vm, char *name, u8 log2_queue_sz,
u8 align = CLIB_CACHE_LINE_BYTES;
int rv = 0;
+ if (sm->listener == 0)
+ {
+ clib_error_t *err;
+ err = snort_listener_init (vm);
+ if (err)
+ {
+ log_err ("listener init failed: %U", format_clib_error, err);
+ clib_error_free (err);
+ return VNET_API_ERROR_INIT_FAILED;
+ }
+ }
+
if (snort_get_instance_by_name (name))
return VNET_API_ERROR_ENTRY_ALREADY_EXISTS;
@@ -831,7 +843,7 @@ snort_init (vlib_main_t *vm)
if (!sm->socket_name)
snort_set_default_socket (sm, 0);
- return snort_listener_init (vm);
+ return 0;
}
VLIB_INIT_FUNCTION (snort_init);
diff --git a/src/plugins/srtp/srtp.c b/src/plugins/srtp/srtp.c
index 5426b7aa03f..f86b7be980e 100644
--- a/src/plugins/srtp/srtp.c
+++ b/src/plugins/srtp/srtp.c
@@ -19,11 +19,11 @@
static srtp_main_t srtp_main;
-static void srtp_disconnect (u32 ctx_handle, u32 thread_index);
+static void srtp_disconnect (u32 ctx_handle, clib_thread_index_t thread_index);
static void srtp_disconnect_transport (srtp_tc_t *ctx);
static inline u32
-srtp_ctx_alloc_w_thread (u32 thread_index)
+srtp_ctx_alloc_w_thread (clib_thread_index_t thread_index)
{
srtp_tc_t *ctx;
pool_get_aligned_safe (srtp_main.ctx_pool[thread_index], ctx,
@@ -36,7 +36,7 @@ srtp_ctx_alloc_w_thread (u32 thread_index)
}
static inline srtp_tc_t *
-srtp_ctx_get_w_thread (u32 ctx_index, u32 thread_index)
+srtp_ctx_get_w_thread (u32 ctx_index, clib_thread_index_t thread_index)
{
return pool_elt_at_index (srtp_main.ctx_pool[thread_index], ctx_index);
}
@@ -82,7 +82,7 @@ srtp_ctx_free (srtp_tc_t *ctx)
}
static inline u32
-srtp_ctx_attach (u32 thread_index, void *ctx_ptr)
+srtp_ctx_attach (clib_thread_index_t thread_index, void *ctx_ptr)
{
srtp_tc_t *ctx;
@@ -688,7 +688,7 @@ srtp_disconnect_transport (srtp_tc_t *ctx)
}
static void
-srtp_disconnect (u32 ctx_handle, u32 thread_index)
+srtp_disconnect (u32 ctx_handle, clib_thread_index_t thread_index)
{
session_t *app_session;
srtp_tc_t *ctx;
@@ -801,7 +801,7 @@ srtp_stop_listen (u32 lctx_index)
}
transport_connection_t *
-srtp_connection_get (u32 ctx_index, u32 thread_index)
+srtp_connection_get (u32 ctx_index, clib_thread_index_t thread_index)
{
srtp_tc_t *ctx;
ctx = srtp_ctx_get_w_thread (ctx_index, thread_index);
@@ -895,7 +895,7 @@ u8 *
format_srtp_connection (u8 *s, va_list *args)
{
u32 ctx_index = va_arg (*args, u32);
- u32 thread_index = va_arg (*args, u32);
+ clib_thread_index_t thread_index = va_arg (*args, u32);
u32 verbose = va_arg (*args, u32);
srtp_tc_t *ctx;
@@ -935,7 +935,7 @@ format_srtp_half_open (u8 *s, va_list *args)
}
static void
-srtp_transport_endpoint_get (u32 ctx_handle, u32 thread_index,
+srtp_transport_endpoint_get (u32 ctx_handle, clib_thread_index_t thread_index,
transport_endpoint_t *tep, u8 is_lcl)
{
srtp_tc_t *ctx = srtp_ctx_get_w_thread (ctx_handle, thread_index);
diff --git a/src/plugins/srv6-ad-flow/node.c b/src/plugins/srv6-ad-flow/node.c
index 66be2dc7972..28fbc105d84 100644
--- a/src/plugins/srv6-ad-flow/node.c
+++ b/src/plugins/srv6-ad-flow/node.c
@@ -583,7 +583,7 @@ srv6_ad_flow_localsid_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
ip6_sr_main_t *srm = &sr_main;
f64 now = vlib_time_now (vm);
u32 n_left_from, next_index, *from, *to_next, n_left_to_next;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
diff --git a/src/plugins/srv6-am/node.c b/src/plugins/srv6-am/node.c
index beef6a30910..012afda581b 100644
--- a/src/plugins/srv6-am/node.c
+++ b/src/plugins/srv6-am/node.c
@@ -147,7 +147,7 @@ srv6_am_localsid_fn (vlib_main_t * vm,
n_left_from = frame->n_vectors;
next_index = node->cached_next_index;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
while (n_left_from > 0)
{
diff --git a/src/plugins/srv6-mobile/node.c b/src/plugins/srv6-mobile/node.c
index ed0697a8009..c8f619cf044 100644
--- a/src/plugins/srv6-mobile/node.c
+++ b/src/plugins/srv6-mobile/node.c
@@ -325,7 +325,7 @@ VLIB_NODE_FN (srv6_end_m_gtp4_e)
srv6_end_main_v4_t *sm = &srv6_end_main_v4;
ip6_sr_main_t *sm2 = &sr_main;
u32 n_left_from, next_index, *from, *to_next;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u32 good_n = 0, bad_n = 0;
@@ -1327,7 +1327,7 @@ VLIB_NODE_FN (srv6_end_m_gtp6_e)
srv6_end_main_v6_t *sm = &srv6_end_main_v6;
ip6_sr_main_t *sm2 = &sr_main;
u32 n_left_from, next_index, *from, *to_next;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u32 good_n = 0, bad_n = 0;
@@ -2088,7 +2088,7 @@ VLIB_NODE_FN (srv6_end_m_gtp6_d)
srv6_end_main_v6_decap_t *sm = &srv6_end_main_v6_decap;
ip6_sr_main_t *sm2 = &sr_main;
u32 n_left_from, next_index, *from, *to_next;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
ip6_sr_localsid_t *ls0;
srv6_end_gtp6_d_param_t *ls_param;
@@ -2238,7 +2238,7 @@ VLIB_NODE_FN (srv6_end_m_gtp6_d_di)
srv6_end_main_v6_decap_di_t *sm = &srv6_end_main_v6_decap_di;
ip6_sr_main_t *sm2 = &sr_main;
u32 n_left_from, next_index, *from, *to_next;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
srv6_end_gtp6_d_param_t *ls_param;
u32 good_n = 0, bad_n = 0;
@@ -2686,7 +2686,7 @@ VLIB_NODE_FN (srv6_end_m_gtp6_dt)
srv6_end_main_v6_dt_t *sm = &srv6_end_main_v6_dt;
ip6_sr_main_t *sm2 = &sr_main;
u32 n_left_from, next_index, *from, *to_next;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u32 good_n = 0, bad_n = 0;
diff --git a/src/plugins/tlsmbedtls/tls_mbedtls.c b/src/plugins/tlsmbedtls/tls_mbedtls.c
index 2f4757e28a1..44d48b1edb4 100644
--- a/src/plugins/tlsmbedtls/tls_mbedtls.c
+++ b/src/plugins/tlsmbedtls/tls_mbedtls.c
@@ -127,7 +127,7 @@ mbedtls_ctx_get_w_thread (u32 ctx_index, u8 thread_index)
static int
tls_init_ctr_seed_drbgs (void)
{
- u32 thread_index = vlib_get_thread_index ();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
mbedtls_main_t *tm = &mbedtls_main;
u8 *pers;
int rv;
diff --git a/src/plugins/tlsopenssl/tls_async.c b/src/plugins/tlsopenssl/tls_async.c
index cd08da5d9ea..ecc24dea236 100644
--- a/src/plugins/tlsopenssl/tls_async.c
+++ b/src/plugins/tlsopenssl/tls_async.c
@@ -82,7 +82,8 @@ typedef struct openssl_async_queue_
typedef struct openssl_async_
{
openssl_evt_t ***evt_pool;
- openssl_async_queue_t *queue;
+ openssl_async_queue_t *queue_rd;
+ openssl_async_queue_t *queue_wr;
openssl_async_queue_t *queue_in_init;
void (*polling) (void);
u8 start_polling;
@@ -114,8 +115,8 @@ openssl_async_t openssl_async_main;
static vlib_node_registration_t tls_async_process_node;
/* to avoid build warning */
-void session_send_rpc_evt_to_thread (u32 thread_index, void *fp,
- void *rpc_args);
+void session_send_rpc_evt_to_thread (clib_thread_index_t thread_index,
+ void *fp, void *rpc_args);
void
evt_pool_init (vlib_main_t * vm)
@@ -129,7 +130,8 @@ evt_pool_init (vlib_main_t * vm)
TLS_DBG (2, "Totally there is %d thread\n", num_threads);
vec_validate (om->evt_pool, num_threads - 1);
- vec_validate (om->queue, num_threads - 1);
+ vec_validate (om->queue_rd, num_threads - 1);
+ vec_validate (om->queue_wr, num_threads - 1);
vec_validate (om->queue_in_init, num_threads - 1);
om->start_polling = 0;
@@ -137,9 +139,13 @@ evt_pool_init (vlib_main_t * vm)
for (i = 0; i < num_threads; i++)
{
- om->queue[i].evt_run_head = -1;
- om->queue[i].evt_run_tail = -1;
- om->queue[i].depth = 0;
+ om->queue_rd[i].evt_run_head = -1;
+ om->queue_rd[i].evt_run_tail = -1;
+ om->queue_rd[i].depth = 0;
+
+ om->queue_wr[i].evt_run_head = -1;
+ om->queue_wr[i].evt_run_tail = -1;
+ om->queue_wr[i].depth = 0;
om->queue_in_init[i].evt_run_head = -1;
om->queue_in_init[i].evt_run_tail = -1;
@@ -150,6 +156,48 @@ evt_pool_init (vlib_main_t * vm)
return;
}
+void
+tls_async_evts_init_list (tls_async_ctx_t *ctx)
+{
+ clib_llist_index_t head_idx;
+ head_idx = clib_llist_make_head (ctx->rd_evt_list, anchor);
+
+ ctx->rd_evt_head_index = head_idx;
+ head_idx = clib_llist_make_head (ctx->wr_evt_list, anchor);
+ ctx->wr_evt_head_index = head_idx;
+ head_idx = clib_llist_make_head (ctx->hs_evt_list, anchor);
+ ctx->hs_evt_head_index = head_idx;
+}
+
+#define REMOVE_ASYNC_EVTS_FROM_LIST(EVT_LIST, LIST_HEAD, HEAD_IDX) \
+ LIST_HEAD = clib_llist_elt (EVT_LIST, HEAD_IDX); \
+ clib_llist_foreach (EVT_LIST, anchor, LIST_HEAD, elt, ({ \
+ TLS_DBG (3, "Removing Read EIDx: %d", elt->eidx); \
+ openssl_evt_free (elt->eidx, ctx->c_thread_index); \
+ clib_llist_remove (EVT_LIST, anchor, elt); \
+ clib_llist_put (EVT_LIST, elt); \
+ }));
+
+void
+tls_async_evts_free_list (tls_ctx_t *ctx)
+{
+ async_evt_list *elt;
+ async_evt_list *list_head;
+ openssl_ctx_t *oc = (openssl_ctx_t *) ctx;
+ tls_async_ctx_t *async_ctx = &oc->async_ctx;
+
+ REMOVE_ASYNC_EVTS_FROM_LIST (async_ctx->rd_evt_list, list_head,
+ async_ctx->rd_evt_head_index)
+ REMOVE_ASYNC_EVTS_FROM_LIST (async_ctx->wr_evt_list, list_head,
+ async_ctx->wr_evt_head_index)
+ REMOVE_ASYNC_EVTS_FROM_LIST (async_ctx->hs_evt_list, list_head,
+ async_ctx->hs_evt_head_index)
+
+ clib_llist_free (async_ctx->rd_evt_list);
+ clib_llist_free (async_ctx->wr_evt_list);
+ clib_llist_free (async_ctx->hs_evt_list);
+}
+
int
openssl_engine_register (char *engine_name, char *algorithm, int async)
{
@@ -289,14 +337,25 @@ tls_async_openssl_callback (SSL * s, void *cb_arg)
ssl_async_evt_type_t evt_type = args->async_evt_type;
int *evt_run_tail, *evt_run_head;
- TLS_DBG (2, "Set event %d to run\n", event_index);
+ TLS_DBG (1, "Event Type: %d event index %d is ready to run\n", evt_type,
+ event_index);
event = openssl_evt_get_w_thread (event_index, thread_index);
- if (evt_type == SSL_ASYNC_EVT_INIT)
- queue = om->queue_in_init;
- else
- queue = om->queue;
-
+ switch (evt_type)
+ {
+ case SSL_ASYNC_EVT_INIT:
+ queue = om->queue_in_init;
+ break;
+ case SSL_ASYNC_EVT_RD:
+ queue = om->queue_rd;
+ break;
+ case SSL_ASYNC_EVT_WR:
+ queue = om->queue_wr;
+ break;
+ default:
+ clib_warning ("Invalid evt type:");
+ return 0;
+ }
evt_run_tail = &queue[thread_index].evt_run_tail;
evt_run_head = &queue[thread_index].evt_run_head;
@@ -343,7 +402,7 @@ openssl_async_write_from_fifo_into_ssl (svm_fifo_t *f, SSL *ssl,
wrote = SSL_write (ssl, NULL, 0);
ossl_check_err_is_fatal (ssl, wrote);
- oc->total_async_write -= wrote;
+ oc->async_ctx.total_async_write -= wrote;
svm_fifo_dequeue_drop (f, wrote);
return wrote;
@@ -363,6 +422,14 @@ openssl_async_read_from_ssl_into_fifo (svm_fifo_t *f, SSL *ssl)
return read;
}
+#define ADD_ASYNC_EVT_TO_LIST(LIST, HEAD_IDX) \
+ head_idx = HEAD_IDX; \
+ clib_llist_get (LIST, elt); \
+ list_head = clib_llist_elt (LIST, head_idx); \
+ elt->eidx = eidx; \
+ clib_llist_add (LIST, anchor, elt, list_head); \
+ HEAD_IDX = head_idx;
+
int
vpp_tls_async_init_event (tls_ctx_t *ctx, openssl_resume_handler *handler,
session_t *session, ssl_async_evt_type_t evt_type,
@@ -370,25 +437,34 @@ vpp_tls_async_init_event (tls_ctx_t *ctx, openssl_resume_handler *handler,
{
u32 eidx;
openssl_evt_t *event = NULL;
+ clib_llist_index_t head_idx;
+ async_evt_list *elt;
+ async_evt_list *list_head;
openssl_ctx_t *oc = (openssl_ctx_t *) ctx;
u32 thread_id = ctx->c_thread_index;
- if (oc->evt_alloc_flag[evt_type])
+ eidx = openssl_evt_alloc ();
+ TLS_DBG (1, "Event: %d for Type %d allocated", eidx, evt_type);
+ if (evt_type == SSL_ASYNC_EVT_RD)
{
- eidx = oc->evt_index[evt_type];
- if (evt_type == SSL_ASYNC_EVT_WR)
- {
- event = openssl_evt_get (eidx);
- goto update_wr_evnt;
- }
- return 1;
+ ADD_ASYNC_EVT_TO_LIST (oc->async_ctx.rd_evt_list,
+ oc->async_ctx.rd_evt_head_index)
+ }
+ else if (evt_type == SSL_ASYNC_EVT_WR)
+ {
+ ADD_ASYNC_EVT_TO_LIST (oc->async_ctx.wr_evt_list,
+ oc->async_ctx.wr_evt_head_index)
+ }
+ else if (evt_type == SSL_ASYNC_EVT_INIT)
+ {
+ ADD_ASYNC_EVT_TO_LIST (oc->async_ctx.hs_evt_list,
+ oc->async_ctx.hs_evt_head_index)
}
else
{
- eidx = openssl_evt_alloc ();
- oc->evt_alloc_flag[evt_type] = true;
+ clib_warning ("INVALID EVENT");
+ return 0;
}
-
event = openssl_evt_get (eidx);
event->ctx_index = oc->openssl_ctx_index;
/* async call back args */
@@ -397,38 +473,49 @@ vpp_tls_async_init_event (tls_ctx_t *ctx, openssl_resume_handler *handler,
event->async_event_type = evt_type;
event->async_evt_handler = handler;
event->session_index = session->session_index;
- event->status = SSL_ASYNC_INVALID_STATUS;
- oc->evt_index[evt_type] = eidx;
+ event->status = SSL_ASYNC_INFLIGHT;
#ifdef HAVE_OPENSSL_ASYNC
SSL_set_async_callback_arg (oc->ssl, &event->cb_args);
#endif
-update_wr_evnt:
if (evt_type == SSL_ASYNC_EVT_WR)
{
transport_connection_deschedule (&ctx->connection);
sp->flags |= TRANSPORT_SND_F_DESCHED;
- oc->total_async_write = wr_size;
+ oc->async_ctx.total_async_write = wr_size;
}
event->tran_sp = sp;
return 1;
}
+/* Iterates through the list and checks if the async event status is
+ * in flight. If the event is inflight, returns 1.
+ */
+#define CHECK_EVT_IS_INFLIGHT_IN_LIST(EVT_LIST, HEAD_IDX) \
+ list_head = clib_llist_elt (EVT_LIST, HEAD_IDX); \
+ clib_llist_foreach (EVT_LIST, anchor, list_head, elt, ({ \
+ event = openssl_evt_get (elt->eidx); \
+ if (event->status == SSL_ASYNC_INFLIGHT) \
+ return 1; \
+ }));
+
int
vpp_openssl_is_inflight (tls_ctx_t *ctx)
{
- u32 eidx;
openssl_ctx_t *oc = (openssl_ctx_t *) ctx;
openssl_evt_t *event;
- int i;
-
- for (i = SSL_ASYNC_EVT_INIT; i < SSL_ASYNC_EVT_MAX; i++)
- {
- eidx = oc->evt_index[i];
- event = openssl_evt_get (eidx);
-
- if (event->status == SSL_ASYNC_INFLIGHT)
- return 1;
- }
+ async_evt_list *elt;
+ async_evt_list *list_head;
+ tls_async_ctx_t *async_ctx = &oc->async_ctx;
+
+ /* Check for read events */
+ CHECK_EVT_IS_INFLIGHT_IN_LIST (async_ctx->rd_evt_list,
+ async_ctx->rd_evt_head_index)
+ /* Check for write events */
+ CHECK_EVT_IS_INFLIGHT_IN_LIST (async_ctx->wr_evt_list,
+ async_ctx->wr_evt_head_index)
+ /* Check for Handshake events */
+ CHECK_EVT_IS_INFLIGHT_IN_LIST (async_ctx->hs_evt_list,
+ async_ctx->hs_evt_head_index)
return 0;
}
@@ -528,7 +615,7 @@ openssl_async_node_enable_disable (u8 is_en)
}
int
-tls_async_do_job (int eidx, u32 thread_index)
+tls_async_do_job (int eidx, clib_thread_index_t thread_index)
{
tls_ctx_t *ctx;
openssl_evt_t *event;
@@ -593,18 +680,28 @@ resume_handshake_events (int thread_index)
}
void
-resume_read_write_events (int thread_index)
+resume_read_events (int thread_index)
+{
+ openssl_async_t *om = &openssl_async_main;
+
+ openssl_async_queue_t *queue = om->queue_rd;
+ handle_async_cb_events (queue, thread_index);
+}
+
+void
+resume_write_events (int thread_index)
{
openssl_async_t *om = &openssl_async_main;
- openssl_async_queue_t *queue = om->queue;
+ openssl_async_queue_t *queue = om->queue_wr;
handle_async_cb_events (queue, thread_index);
}
int
tls_resume_from_crypto (int thread_index)
{
- resume_read_write_events (thread_index);
+ resume_read_events (thread_index);
+ resume_write_events (thread_index);
resume_handshake_events (thread_index);
return 0;
}
@@ -624,11 +721,11 @@ tls_async_handshake_event_handler (void *async_evt, void *unused)
openssl_ctx_t *oc;
tls_ctx_t *ctx;
int rv, err;
+ session_t *tls_session;
ASSERT (thread_index == vlib_get_thread_index ());
ctx = openssl_ctx_get_w_thread (event->ctx_index, thread_index);
oc = (openssl_ctx_t *) ctx;
- session_t *tls_session = session_get_from_handle (ctx->tls_session_handle);
if (!SSL_in_init (oc->ssl))
{
@@ -637,6 +734,14 @@ tls_async_handshake_event_handler (void *async_evt, void *unused)
return 0;
}
+ tls_session = session_get_from_handle (ctx->tls_session_handle);
+ if (tls_session->session_state >= SESSION_STATE_APP_CLOSED)
+ {
+ SSL_shutdown (oc->ssl);
+ TLS_DBG (2, "Session State: App Closed");
+ return 0;
+ }
+
if (ctx->flags & TLS_CONN_F_RESUME)
{
ctx->flags &= ~TLS_CONN_F_RESUME;
@@ -686,6 +791,9 @@ tls_async_handshake_event_handler (void *async_evt, void *unused)
ctx->flags |= TLS_CONN_F_HS_DONE;
+ /* Read early data */
+ openssl_ctx_read_tls (ctx, tls_session);
+
return 1;
}
@@ -698,6 +806,7 @@ tls_async_read_event_handler (void *async_evt, void *unused)
openssl_ctx_t *oc;
tls_ctx_t *ctx;
SSL *ssl;
+ const u32 max_len = 128 << 10;
ASSERT (thread_index == vlib_get_thread_index ());
ctx = openssl_ctx_get_w_thread (event->ctx_index, thread_index);
@@ -710,9 +819,21 @@ tls_async_read_event_handler (void *async_evt, void *unused)
int read, err;
app_session = session_get_from_handle (ctx->app_session_handle);
+ if ((app_session->flags & SESSION_F_APP_CLOSED))
+ {
+ TLS_DBG (1, "App Closed");
+ SSL_shutdown (oc->ssl);
+ return 0;
+ }
app_rx_fifo = app_session->rx_fifo;
tls_session = session_get_from_handle (ctx->tls_session_handle);
+ if (tls_session->session_state >= SESSION_STATE_APP_CLOSED)
+ {
+ SSL_shutdown (oc->ssl);
+ TLS_DBG (2, "Session State: App Closed");
+ return 0;
+ }
tls_rx_fifo = tls_session->rx_fifo;
/* continue the paused job */
@@ -743,6 +864,27 @@ tls_async_read_event_handler (void *async_evt, void *unused)
if (app_session->session_state >= SESSION_STATE_READY)
tls_notify_app_enqueue (ctx, app_session);
+ /* Try to read more data */
+ while (read > 0)
+ {
+ read = openssl_read_from_ssl_into_fifo (app_rx_fifo, ctx, max_len);
+ if (!read)
+ goto ev_rd_done;
+
+ /* Unrecoverable protocol error. Reset connection */
+ if (PREDICT_FALSE ((read < 0) &&
+ (SSL_get_error (ssl, read) == SSL_ERROR_SSL)))
+ {
+ TLS_DBG (2, "Unrecoverable protocol error");
+ goto ev_rd_done;
+ }
+
+ /* If handshake just completed, session may still be in accepting
+ * state */
+ if (read >= 0 && app_session->session_state >= SESSION_STATE_READY)
+ tls_notify_app_enqueue (ctx, app_session);
+ }
+
ev_rd_done:
/* read done */
ctx->flags &= ~TLS_CONN_F_ASYNC_RD;
@@ -767,7 +909,6 @@ tls_async_write_event_handler (void *async_evt, void *unused)
ctx = openssl_ctx_get_w_thread (event->ctx_index, thread_index);
oc = (openssl_ctx_t *) ctx;
ssl = oc->ssl;
-
/* write event */
int wrote = 0;
u32 space, enq_buf;
@@ -775,10 +916,17 @@ tls_async_write_event_handler (void *async_evt, void *unused)
transport_send_params_t *sp = event->tran_sp;
app_session = session_get_from_handle (ctx->app_session_handle);
+ if (app_session->flags & SESSION_F_APP_CLOSED)
+ {
+ TLS_DBG (2, "Session State: App Closed");
+ SSL_shutdown (oc->ssl);
+ return 0;
+ }
+
app_tx_fifo = app_session->tx_fifo;
/* Check if already data write is completed or not */
- if (oc->total_async_write == 0)
+ if (oc->async_ctx.total_async_write == 0)
return 0;
wrote = openssl_async_write_from_fifo_into_ssl (app_tx_fifo, ssl, oc);
@@ -788,6 +936,13 @@ tls_async_write_event_handler (void *async_evt, void *unused)
return 0;
}
+ tls_session = session_get_from_handle (ctx->tls_session_handle);
+ if (tls_session->session_state >= SESSION_STATE_APP_CLOSED)
+ {
+ TLS_DBG (2, "Session state: App Closed");
+ SSL_shutdown (oc->ssl);
+ return 0;
+ }
/* Unrecoverable protocol error. Reset connection */
if (PREDICT_FALSE (wrote < 0))
{
@@ -795,7 +950,6 @@ tls_async_write_event_handler (void *async_evt, void *unused)
return 0;
}
- tls_session = session_get_from_handle (ctx->tls_session_handle);
tls_tx_fifo = tls_session->tx_fifo;
/* prepare for remaining write(s) */
@@ -807,7 +961,7 @@ tls_async_write_event_handler (void *async_evt, void *unused)
session_dequeue_notify (app_session);
/* we got here, async write is done */
- oc->total_async_write = 0;
+ oc->async_ctx.total_async_write = 0;
if (PREDICT_FALSE (ctx->flags & TLS_CONN_F_APP_CLOSED &&
BIO_ctrl_pending (oc->rbio) <= 0))
diff --git a/src/plugins/tlsopenssl/tls_openssl.c b/src/plugins/tlsopenssl/tls_openssl.c
index f0be025a207..b7486ddfba1 100644
--- a/src/plugins/tlsopenssl/tls_openssl.c
+++ b/src/plugins/tlsopenssl/tls_openssl.c
@@ -35,7 +35,7 @@
openssl_main_t openssl_main;
static u32
-openssl_ctx_alloc_w_thread (u32 thread_index)
+openssl_ctx_alloc_w_thread (clib_thread_index_t thread_index)
{
openssl_main_t *om = &openssl_main;
openssl_ctx_t **ctx;
@@ -50,6 +50,11 @@ openssl_ctx_alloc_w_thread (u32 thread_index)
(*ctx)->ctx.tls_ctx_engine = CRYPTO_ENGINE_OPENSSL;
(*ctx)->ctx.app_session_handle = SESSION_INVALID_HANDLE;
(*ctx)->openssl_ctx_index = ctx - om->ctx_pool[thread_index];
+
+ /* Initialize llists for async events */
+ if (openssl_main.async)
+ tls_async_evts_init_list (&((*ctx)->async_ctx));
+
return ((*ctx)->openssl_ctx_index);
}
@@ -71,19 +76,12 @@ openssl_ctx_free (tls_ctx_t * ctx)
!(ctx->flags & TLS_CONN_F_PASSIVE_CLOSE))
SSL_shutdown (oc->ssl);
+ if (openssl_main.async)
+ tls_async_evts_free_list (ctx);
+
SSL_free (oc->ssl);
vec_free (ctx->srv_hostname);
SSL_CTX_free (oc->client_ssl_ctx);
-
- if (openssl_main.async)
- {
- openssl_evt_free (oc->evt_index[SSL_ASYNC_EVT_INIT],
- ctx->c_thread_index);
- openssl_evt_free (oc->evt_index[SSL_ASYNC_EVT_RD],
- ctx->c_thread_index);
- openssl_evt_free (oc->evt_index[SSL_ASYNC_EVT_WR],
- ctx->c_thread_index);
- }
}
pool_put_index (openssl_main.ctx_pool[ctx->c_thread_index],
@@ -102,7 +100,7 @@ openssl_ctx_detach (tls_ctx_t *ctx)
}
static u32
-openssl_ctx_attach (u32 thread_index, void *ctx_ptr)
+openssl_ctx_attach (clib_thread_index_t thread_index, void *ctx_ptr)
{
openssl_main_t *om = &openssl_main;
session_handle_t sh;
@@ -176,9 +174,6 @@ openssl_read_from_ssl_into_fifo (svm_fifo_t *f, tls_ctx_t *ctx, u32 max_len)
u32 max_enq;
SSL *ssl = oc->ssl;
- if (ctx->flags & TLS_CONN_F_ASYNC_RD)
- return 0;
-
max_enq = svm_fifo_max_enqueue_prod (f);
if (!max_enq)
return 0;
@@ -548,7 +543,7 @@ openssl_ctx_write (tls_ctx_t *ctx, session_t *app_session,
return openssl_ctx_write_dtls (ctx, app_session, sp);
}
-static inline int
+int
openssl_ctx_read_tls (tls_ctx_t *ctx, session_t *tls_session)
{
openssl_ctx_t *oc = (openssl_ctx_t *) ctx;
@@ -1090,7 +1085,10 @@ static int
openssl_transport_close (tls_ctx_t * ctx)
{
if (openssl_main.async && vpp_openssl_is_inflight (ctx))
- return 0;
+ {
+ TLS_DBG (2, "Close Transport but evts inflight: Flags: %ld", ctx->flags);
+ return 0;
+ }
if (!(ctx->flags & TLS_CONN_F_HS_DONE))
{
diff --git a/src/plugins/tlsopenssl/tls_openssl.h b/src/plugins/tlsopenssl/tls_openssl.h
index 1a566f35fa6..d52524161ab 100644
--- a/src/plugins/tlsopenssl/tls_openssl.h
+++ b/src/plugins/tlsopenssl/tls_openssl.h
@@ -33,15 +33,30 @@
if (PREDICT_FALSE (_rv < 0 && SSL_get_error (_ssl, _rv) == SSL_ERROR_SSL)) \
return -1;
+typedef struct tls_async_evt_
+{
+ clib_llist_anchor_t anchor;
+ u32 eidx;
+} async_evt_list;
+
+typedef struct tls_async_ctx_
+{
+ async_evt_list *hs_evt_list;
+ async_evt_list *rd_evt_list;
+ async_evt_list *wr_evt_list;
+ clib_llist_index_t rd_evt_head_index;
+ clib_llist_index_t wr_evt_head_index;
+ clib_llist_index_t hs_evt_head_index;
+ u32 total_async_write;
+} tls_async_ctx_t;
+
typedef struct tls_ctx_openssl_
{
tls_ctx_t ctx; /**< First */
u32 openssl_ctx_index;
SSL_CTX *client_ssl_ctx;
SSL *ssl;
- u32 evt_index[SSL_ASYNC_EVT_MAX];
- bool evt_alloc_flag[SSL_ASYNC_EVT_MAX];
- u32 total_async_write;
+ tls_async_ctx_t async_ctx;
BIO *rbio;
BIO *wbio;
} openssl_ctx_t;
@@ -99,6 +114,9 @@ void openssl_confirm_app_close (tls_ctx_t *ctx);
int tls_async_write_event_handler (void *event, void *session);
int tls_async_read_event_handler (void *event, void *session);
int tls_async_handshake_event_handler (void *event, void *session);
+int openssl_ctx_read_tls (tls_ctx_t *ctx, session_t *tls_session);
+void tls_async_evts_init_list (tls_async_ctx_t *ctx);
+void tls_async_evts_free_list (tls_ctx_t *ctx);
#endif /* SRC_PLUGINS_TLSOPENSSL_TLS_OPENSSL_H_ */
/*
diff --git a/src/plugins/tlspicotls/pico_vpp_crypto.c b/src/plugins/tlspicotls/pico_vpp_crypto.c
index 3d28d50b352..e8e4a875e33 100644
--- a/src/plugins/tlspicotls/pico_vpp_crypto.c
+++ b/src/plugins/tlspicotls/pico_vpp_crypto.c
@@ -107,8 +107,7 @@ ptls_vpp_crypto_cipher_setup_crypto (ptls_cipher_context_t * _ctx, int is_enc,
}
else
{
- TLS_DBG (1, "%s, Invalid crypto cipher : ", __FUNCTION__,
- _ctx->algo->name);
+ TLS_DBG (1, "%s, Invalid crypto cipher : ", __func__, _ctx->algo->name);
assert (0);
}
@@ -226,8 +225,7 @@ ptls_vpp_crypto_aead_setup_crypto (ptls_aead_context_t *_ctx, int is_enc,
}
else
{
- TLS_DBG (1, "%s, invalied aead cipher %s", __FUNCTION__,
- _ctx->algo->name);
+ TLS_DBG (1, "%s, invalied aead cipher %s", __func__, _ctx->algo->name);
return -1;
}
diff --git a/src/plugins/unittest/ipsec_test.c b/src/plugins/unittest/ipsec_test.c
index 23867e1b043..869d53367b6 100644
--- a/src/plugins/unittest/ipsec_test.c
+++ b/src/plugins/unittest/ipsec_test.c
@@ -50,21 +50,15 @@ test_ipsec_command_fn (vlib_main_t *vm, unformat_input_t *input,
ort = ipsec_sa_get_outb_rt (sa);
if (ort)
- {
- ort->seq = seq_num & 0xffffffff;
- ort->seq_hi = seq_num >> 32;
- }
+ ort->seq64 = seq_num;
if (irt)
{
- irt->seq = seq_num & 0xffffffff;
- irt->seq_hi = seq_num >> 32;
+ irt->seq64 = seq_num;
/* clear the window */
- if (ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa))
- clib_bitmap_zero (irt->replay_window_huge);
- else
- irt->replay_window = 0;
+ uword_bitmap_clear (irt->replay_window,
+ irt->anti_replay_window_size / uword_bits);
}
ipsec_sa_unlock (sa_index);
diff --git a/src/plugins/unittest/session_test.c b/src/plugins/unittest/session_test.c
index 993f1be41a9..667851901c4 100644
--- a/src/plugins/unittest/session_test.c
+++ b/src/plugins/unittest/session_test.c
@@ -16,6 +16,7 @@
#include <arpa/inet.h>
#include <vnet/session/application.h>
#include <vnet/session/session.h>
+#include <vnet/session/transport.h>
#include <sys/epoll.h>
#include <vnet/session/session_rules_table.h>
@@ -50,6 +51,11 @@ placeholder_session_reset_callback (session_t * s)
volatile u32 connected_session_index = ~0;
volatile u32 connected_session_thread = ~0;
+static u32 placeholder_accept;
+volatile u32 accepted_session_index;
+volatile u32 accepted_session_thread;
+volatile int app_session_error = 0;
+
int
placeholder_session_connected_callback (u32 app_index, u32 api_context,
session_t * s, session_error_t err)
@@ -81,13 +87,22 @@ placeholder_del_segment_callback (u32 client_index, u64 segment_handle)
void
placeholder_session_disconnect_callback (session_t * s)
{
- clib_warning ("called...");
+ if (!(s->session_index == connected_session_index &&
+ s->thread_index == connected_session_thread) &&
+ !(s->session_index == accepted_session_index &&
+ s->thread_index == accepted_session_thread))
+ {
+ clib_warning (0, "unexpected disconnect s %u thread %u",
+ s->session_index, s->thread_index);
+ app_session_error = 1;
+ }
+ vnet_disconnect_args_t da = {
+ .handle = session_handle (s),
+ .app_index = app_worker_get (s->app_wrk_index)->app_index
+ };
+ vnet_disconnect_session (&da);
}
-static u32 placeholder_accept;
-volatile u32 accepted_session_index;
-volatile u32 accepted_session_thread;
-
int
placeholder_session_accept_callback (session_t * s)
{
@@ -105,12 +120,39 @@ placeholder_server_rx_callback (session_t * s)
return -1;
}
+void
+placeholder_cleanup_callback (session_t *s, session_cleanup_ntf_t ntf)
+{
+ if (ntf == SESSION_CLEANUP_TRANSPORT)
+ return;
+
+ if (s->session_index == connected_session_index &&
+ s->thread_index == connected_session_thread)
+ {
+ connected_session_index = ~0;
+ connected_session_thread = ~0;
+ }
+ else if (s->session_index == accepted_session_index &&
+ s->thread_index == accepted_session_thread)
+ {
+ accepted_session_index = ~0;
+ accepted_session_thread = ~0;
+ }
+ else
+ {
+ clib_warning (0, "unexpected cleanup s %u thread %u", s->session_index,
+ s->thread_index);
+ app_session_error = 1;
+ }
+}
+
static session_cb_vft_t placeholder_session_cbs = {
.session_reset_callback = placeholder_session_reset_callback,
.session_connected_callback = placeholder_session_connected_callback,
.session_accept_callback = placeholder_session_accept_callback,
.session_disconnect_callback = placeholder_session_disconnect_callback,
.builtin_app_rx_callback = placeholder_server_rx_callback,
+ .session_cleanup_callback = placeholder_cleanup_callback,
.add_segment_callback = placeholder_add_segment_callback,
.del_segment_callback = placeholder_del_segment_callback,
};
@@ -278,6 +320,7 @@ session_test_endpoint_cfg (vlib_main_t * vm, unformat_input_t * input)
u64 options[APP_OPTIONS_N_OPTIONS], placeholder_secret = 1234;
u16 placeholder_server_port = 1234, placeholder_client_port = 5678;
session_endpoint_cfg_t server_sep = SESSION_ENDPOINT_CFG_NULL;
+ u32 client_vrf = 0, server_vrf = 1;
ip4_address_t intf_addr[3];
transport_connection_t *tc;
session_t *s;
@@ -288,25 +331,25 @@ session_test_endpoint_cfg (vlib_main_t * vm, unformat_input_t * input)
* Create the loopbacks
*/
intf_addr[0].as_u32 = clib_host_to_net_u32 (0x01010101);
- session_create_lookpback (0, &sw_if_index[0], &intf_addr[0]);
+ session_create_lookpback (client_vrf, &sw_if_index[0], &intf_addr[0]);
intf_addr[1].as_u32 = clib_host_to_net_u32 (0x02020202);
- session_create_lookpback (1, &sw_if_index[1], &intf_addr[1]);
+ session_create_lookpback (server_vrf, &sw_if_index[1], &intf_addr[1]);
- session_add_del_route_via_lookup_in_table (0, 1, &intf_addr[1], 32,
- 1 /* is_add */ );
- session_add_del_route_via_lookup_in_table (1, 0, &intf_addr[0], 32,
- 1 /* is_add */ );
+ session_add_del_route_via_lookup_in_table (
+ client_vrf, server_vrf, &intf_addr[1], 32, 1 /* is_add */);
+ session_add_del_route_via_lookup_in_table (
+ server_vrf, client_vrf, &intf_addr[0], 32, 1 /* is_add */);
/*
* Insert namespace
*/
- appns_id = format (0, "appns1");
+ appns_id = format (0, "appns_server");
vnet_app_namespace_add_del_args_t ns_args = {
.ns_id = appns_id,
.secret = placeholder_secret,
- .sw_if_index = sw_if_index[1],
- .ip4_fib_id = 0,
+ .sw_if_index = sw_if_index[1], /* server interface*/
+ .ip4_fib_id = 0, /* sw_if_index takes precedence */
.is_add = 1
};
error = vnet_app_namespace_add_del (&ns_args);
@@ -357,10 +400,10 @@ session_test_endpoint_cfg (vlib_main_t * vm, unformat_input_t * input)
* Connect and force lcl ip
*/
client_sep.is_ip4 = 1;
- client_sep.ip.ip4.as_u32 = clib_host_to_net_u32 (0x02020202);
+ client_sep.ip.ip4.as_u32 = intf_addr[1].as_u32;
client_sep.port = placeholder_server_port;
client_sep.peer.is_ip4 = 1;
- client_sep.peer.ip.ip4.as_u32 = clib_host_to_net_u32 (0x01010101);
+ client_sep.peer.ip.ip4.as_u32 = intf_addr[0].as_u32;
client_sep.peer.port = placeholder_client_port;
client_sep.transport_proto = TRANSPORT_PROTO_TCP;
@@ -401,6 +444,35 @@ session_test_endpoint_cfg (vlib_main_t * vm, unformat_input_t * input)
SESSION_TEST ((tc->lcl_port == placeholder_client_port),
"ports should be equal");
+ /* Disconnect server session, should lead to faster port cleanup on client */
+ vnet_disconnect_args_t disconnect_args = {
+ .handle =
+ session_make_handle (accepted_session_index, accepted_session_thread),
+ .app_index = server_index,
+ };
+
+ error = vnet_disconnect_session (&disconnect_args);
+ SESSION_TEST ((error == 0), "disconnect should work");
+
+ /* wait for stuff to happen */
+ tries = 0;
+ while (connected_session_index != ~0 && ++tries < 100)
+ {
+ vlib_worker_thread_barrier_release (vm);
+ vlib_process_suspend (vm, 100e-3);
+ vlib_worker_thread_barrier_sync (vm);
+ }
+
+ /* Active closes take longer to cleanup, don't wait */
+
+ clib_warning ("waited %.1f seconds for disconnect", tries / 10.0);
+ SESSION_TEST ((connected_session_index == ~0), "session should not exist");
+ SESSION_TEST ((connected_session_thread == ~0), "thread should not exist");
+ SESSION_TEST (transport_port_local_in_use () == 0,
+ "port should be cleaned up");
+ SESSION_TEST ((app_session_error == 0), "no app session errors");
+
+ /* Start cleanup by detaching apps */
vnet_app_detach_args_t detach_args = {
.app_index = server_index,
.api_client_index = ~0,
@@ -416,13 +488,167 @@ session_test_endpoint_cfg (vlib_main_t * vm, unformat_input_t * input)
/* Allow the disconnects to finish before removing the routes. */
vlib_process_suspend (vm, 10e-3);
- session_add_del_route_via_lookup_in_table (0, 1, &intf_addr[1], 32,
- 0 /* is_add */ );
- session_add_del_route_via_lookup_in_table (1, 0, &intf_addr[0], 32,
- 0 /* is_add */ );
+ session_add_del_route_via_lookup_in_table (
+ client_vrf, server_vrf, &intf_addr[1], 32, 0 /* is_add */);
+ session_add_del_route_via_lookup_in_table (
+ server_vrf, client_vrf, &intf_addr[0], 32, 0 /* is_add */);
+
+ session_delete_loopback (sw_if_index[0]);
+ session_delete_loopback (sw_if_index[1]);
+
+ /*
+ * Redo the test but with client in the non-default namespace
+ */
+
+ /* Create the loopbacks */
+ client_vrf = 1;
+ server_vrf = 0;
+ session_create_lookpback (client_vrf, &sw_if_index[0], &intf_addr[0]);
+ session_create_lookpback (server_vrf, &sw_if_index[1], &intf_addr[1]);
+
+ session_add_del_route_via_lookup_in_table (
+ client_vrf, server_vrf, &intf_addr[1], 32, 1 /* is_add */);
+ session_add_del_route_via_lookup_in_table (
+ server_vrf, client_vrf, &intf_addr[0], 32, 1 /* is_add */);
+
+ /* Insert new client namespace */
+ vec_free (appns_id);
+ appns_id = format (0, "appns_client");
+ ns_args.ns_id = appns_id;
+ ns_args.sw_if_index = sw_if_index[0]; /* client interface*/
+ ns_args.is_add = 1;
+
+ error = vnet_app_namespace_add_del (&ns_args);
+ SESSION_TEST ((error == 0), "app ns insertion should succeed: %U",
+ format_session_error, error);
+
+ /* Attach client */
+ attach_args.name = format (0, "session_test_client");
+ attach_args.namespace_id = appns_id;
+ attach_args.options[APP_OPTIONS_ADD_SEGMENT_SIZE] = 0;
+ attach_args.options[APP_OPTIONS_NAMESPACE_SECRET] = placeholder_secret;
+ attach_args.api_client_index = ~0;
+
+ error = vnet_application_attach (&attach_args);
+ SESSION_TEST ((error == 0), "client app attached: %U", format_session_error,
+ error);
+ client_index = attach_args.app_index;
+ vec_free (attach_args.name);
+
+ /* Attach server */
+ attach_args.name = format (0, "session_test_server");
+ attach_args.namespace_id = 0;
+ attach_args.options[APP_OPTIONS_ADD_SEGMENT_SIZE] = 32 << 20;
+ attach_args.options[APP_OPTIONS_NAMESPACE_SECRET] = 0;
+ attach_args.api_client_index = ~0;
+ error = vnet_application_attach (&attach_args);
+ SESSION_TEST ((error == 0), "server app attached: %U", format_session_error,
+ error);
+ vec_free (attach_args.name);
+ server_index = attach_args.app_index;
+
+ /* Bind server */
+ clib_memset (&server_sep, 0, sizeof (server_sep));
+ server_sep.is_ip4 = 1;
+ server_sep.port = placeholder_server_port;
+ bind_args.sep_ext = server_sep;
+ bind_args.app_index = server_index;
+ error = vnet_listen (&bind_args);
+ SESSION_TEST ((error == 0), "server bind should work: %U",
+ format_session_error, error);
+
+ /* Connect client */
+ connected_session_index = connected_session_thread = ~0;
+ accepted_session_index = accepted_session_thread = ~0;
+ clib_memset (&client_sep, 0, sizeof (client_sep));
+ client_sep.is_ip4 = 1;
+ client_sep.ip.ip4.as_u32 = intf_addr[1].as_u32;
+ client_sep.port = placeholder_server_port;
+ client_sep.peer.is_ip4 = 1;
+ client_sep.peer.ip.ip4.as_u32 = intf_addr[0].as_u32;
+ client_sep.peer.port = placeholder_client_port;
+ client_sep.transport_proto = TRANSPORT_PROTO_TCP;
+
+ connect_args.sep_ext = client_sep;
+ connect_args.app_index = client_index;
+ error = vnet_connect (&connect_args);
+ SESSION_TEST ((error == 0), "connect should work");
+
+ /* wait for stuff to happen */
+ while (connected_session_index == ~0 && ++tries < 100)
+ {
+ vlib_worker_thread_barrier_release (vm);
+ vlib_process_suspend (vm, 100e-3);
+ vlib_worker_thread_barrier_sync (vm);
+ }
+ while (accepted_session_index == ~0 && ++tries < 100)
+ {
+ vlib_worker_thread_barrier_release (vm);
+ vlib_process_suspend (vm, 100e-3);
+ vlib_worker_thread_barrier_sync (vm);
+ }
+
+ clib_warning ("waited %.1f seconds for connections", tries / 10.0);
+ SESSION_TEST ((connected_session_index != ~0), "session should exist");
+ SESSION_TEST ((connected_session_thread != ~0), "thread should exist");
+ SESSION_TEST ((accepted_session_index != ~0), "session should exist");
+ SESSION_TEST ((accepted_session_thread != ~0), "thread should exist");
+ s = session_get (connected_session_index, connected_session_thread);
+ tc = session_get_transport (s);
+ SESSION_TEST ((tc != 0), "transport should exist");
+ SESSION_TEST (
+ (memcmp (&tc->lcl_ip, &client_sep.peer.ip, sizeof (tc->lcl_ip)) == 0),
+ "ips should be equal");
+ SESSION_TEST ((tc->lcl_port == placeholder_client_port),
+ "ports should be equal");
+
+ /* Disconnect server session, for faster port cleanup on client */
+ disconnect_args.app_index = server_index;
+ disconnect_args.handle =
+ session_make_handle (accepted_session_index, accepted_session_thread);
+
+ error = vnet_disconnect_session (&disconnect_args);
+ SESSION_TEST ((error == 0), "disconnect should work");
+
+ /* wait for stuff to happen */
+ tries = 0;
+ while (connected_session_index != ~0 && ++tries < 100)
+ {
+ vlib_worker_thread_barrier_release (vm);
+ vlib_process_suspend (vm, 100e-3);
+ vlib_worker_thread_barrier_sync (vm);
+ }
+
+ /* Active closes take longer to cleanup, don't wait */
+
+ clib_warning ("waited %.1f seconds for disconnect", tries / 10.0);
+ SESSION_TEST ((connected_session_index == ~0), "session should not exist");
+ SESSION_TEST ((connected_session_thread == ~0), "thread should not exist");
+ SESSION_TEST ((app_session_error == 0), "no app session errors");
+ SESSION_TEST (transport_port_local_in_use () == 0,
+ "port should be cleaned up");
+
+ /* Start cleanup by detaching apps */
+ detach_args.app_index = server_index;
+ vnet_application_detach (&detach_args);
+ detach_args.app_index = client_index;
+ vnet_application_detach (&detach_args);
+
+ ns_args.is_add = 0;
+ error = vnet_app_namespace_add_del (&ns_args);
+ SESSION_TEST ((error == 0), "app ns delete should succeed: %d", error);
+
+ /* Allow the disconnects to finish before removing the routes. */
+ vlib_process_suspend (vm, 10e-3);
+
+ session_add_del_route_via_lookup_in_table (
+ client_vrf, server_vrf, &intf_addr[1], 32, 0 /* is_add */);
+ session_add_del_route_via_lookup_in_table (
+ server_vrf, client_vrf, &intf_addr[0], 32, 0 /* is_add */);
session_delete_loopback (sw_if_index[0]);
session_delete_loopback (sw_if_index[1]);
+
return 0;
}
@@ -1781,6 +2007,11 @@ session_test_proxy (vlib_main_t * vm, unformat_input_t * input)
unformat_free (&tmp_input);
vec_free (attach_args.name);
session_delete_loopback (sw_if_index);
+
+ /* Revert default appns sw_if_index */
+ app_ns = app_namespace_get_default ();
+ app_ns->sw_if_index = ~0;
+
return 0;
}
@@ -2131,7 +2362,10 @@ session_get_memory_usage (void)
s = format (s, "%U\n", format_clib_mem_heap, heap, 0);
ss = strstr ((char *) s, "used:");
if (ss)
- sscanf (ss, "used: %f", &used);
+ {
+ if (sscanf (ss, "used: %f", &used) != 1)
+ clib_warning ("invalid 'used' value");
+ }
else
clib_warning ("substring 'used:' not found from show memory");
vec_free (s);
@@ -2501,6 +2735,8 @@ session_test (vlib_main_t * vm,
done:
if (res)
return clib_error_return (0, "Session unit test failed");
+
+ vlib_cli_output (vm, "SUCCESS");
return 0;
}
diff --git a/src/plugins/unittest/svm_fifo_test.c b/src/plugins/unittest/svm_fifo_test.c
index 9feb37cbc25..c6031c59987 100644
--- a/src/plugins/unittest/svm_fifo_test.c
+++ b/src/plugins/unittest/svm_fifo_test.c
@@ -2856,6 +2856,8 @@ svm_fifo_test (vlib_main_t * vm, unformat_input_t * input,
done:
if (res)
return clib_error_return (0, "svm fifo unit test failed");
+
+ vlib_cli_output (vm, "SUCCESS");
return 0;
}
diff --git a/src/plugins/unittest/tcp_test.c b/src/plugins/unittest/tcp_test.c
index bd39474ce93..6236ccdfe08 100644
--- a/src/plugins/unittest/tcp_test.c
+++ b/src/plugins/unittest/tcp_test.c
@@ -1002,16 +1002,16 @@ tbt_seq_lt (u32 a, u32 b)
}
static void
-tcp_test_set_time (u32 thread_index, u32 val)
+tcp_test_set_time (clib_thread_index_t thread_index, u32 val)
{
session_main.wrk[thread_index].last_vlib_time = val;
- tcp_set_time_now (&tcp_main.wrk_ctx[thread_index], val);
+ tcp_set_time_now (&tcp_main.wrk[thread_index], val);
}
static int
tcp_test_delivery (vlib_main_t * vm, unformat_input_t * input)
{
- u32 thread_index = 0, snd_una, *min_seqs = 0;
+ clib_thread_index_t thread_index = 0, snd_una, *min_seqs = 0;
tcp_rate_sample_t _rs = { 0 }, *rs = &_rs;
tcp_connection_t _tc, *tc = &_tc;
sack_scoreboard_t *sb = &tc->sack_sb;
@@ -1337,7 +1337,7 @@ tcp_test_delivery (vlib_main_t * vm, unformat_input_t * input)
static int
tcp_test_bt (vlib_main_t * vm, unformat_input_t * input)
{
- u32 thread_index = 0;
+ clib_thread_index_t thread_index = 0;
tcp_rate_sample_t _rs = { 0 }, *rs = &_rs;
tcp_connection_t _tc, *tc = &_tc;
int __clib_unused verbose = 0, i;
@@ -1594,6 +1594,8 @@ tcp_test (vlib_main_t * vm,
done:
if (res)
return clib_error_return (0, "TCP unit test failed");
+
+ vlib_cli_output (vm, "SUCCESS");
return 0;
}
diff --git a/src/plugins/urpf/urpf_dp.h b/src/plugins/urpf/urpf_dp.h
index b17fed7e04b..edb4ec79171 100644
--- a/src/plugins/urpf/urpf_dp.h
+++ b/src/plugins/urpf/urpf_dp.h
@@ -98,8 +98,8 @@ urpf_perform_check_x1 (ip_address_family_t af, vlib_dir_t dir,
lb_index = ip4_fib_forwarding_lookup (fib_index, &ip->src_address);
/* Pass multicast. */
- lpass = (ip4_address_is_multicast (&ip->src_address) ||
- ip4_address_is_global_broadcast (&ip->src_address));
+ lpass = (ip4_address_is_multicast (&ip->dst_address) ||
+ ip4_address_is_global_broadcast (&ip->dst_address));
}
else
{
@@ -108,7 +108,7 @@ urpf_perform_check_x1 (ip_address_family_t af, vlib_dir_t dir,
ip = (ip6_header_t *) h;
lb_index = ip6_fib_table_fwding_lookup (fib_index, &ip->src_address);
- lpass = ip6_address_is_multicast (&ip->src_address);
+ lpass = ip6_address_is_multicast (&ip->dst_address);
}
llb = load_balance_get (lb_index);
@@ -157,10 +157,10 @@ urpf_perform_check_x2 (ip_address_family_t af, vlib_dir_t dir,
ip4_fib_forwarding_lookup_x2 (fib_index0, fib_index1, &ip0->src_address,
&ip1->src_address, &lb_index0, &lb_index1);
/* Pass multicast. */
- lpass0 = (ip4_address_is_multicast (&ip0->src_address) ||
- ip4_address_is_global_broadcast (&ip0->src_address));
- lpass1 = (ip4_address_is_multicast (&ip1->src_address) ||
- ip4_address_is_global_broadcast (&ip1->src_address));
+ lpass0 = (ip4_address_is_multicast (&ip0->dst_address) ||
+ ip4_address_is_global_broadcast (&ip0->dst_address));
+ lpass1 = (ip4_address_is_multicast (&ip1->dst_address) ||
+ ip4_address_is_global_broadcast (&ip1->dst_address));
}
else
{
@@ -171,8 +171,8 @@ urpf_perform_check_x2 (ip_address_family_t af, vlib_dir_t dir,
lb_index0 = ip6_fib_table_fwding_lookup (fib_index0, &ip0->src_address);
lb_index1 = ip6_fib_table_fwding_lookup (fib_index1, &ip1->src_address);
- lpass0 = ip6_address_is_multicast (&ip0->src_address);
- lpass1 = ip6_address_is_multicast (&ip1->src_address);
+ lpass0 = ip6_address_is_multicast (&ip0->dst_address);
+ lpass1 = ip6_address_is_multicast (&ip1->dst_address);
}
llb0 = load_balance_get (lb_index0);
diff --git a/src/plugins/vhost/vhost_user.c b/src/plugins/vhost/vhost_user.c
index fdee984f97b..592a126c683 100644
--- a/src/plugins/vhost/vhost_user.c
+++ b/src/plugins/vhost/vhost_user.c
@@ -31,7 +31,7 @@
#include <linux/if_tun.h>
#include <vlib/vlib.h>
-#include <vlib/unix/unix.h>
+#include <vlib/file.h>
#include <vnet/ethernet/ethernet.h>
#include <vnet/devices/devices.h>
@@ -325,15 +325,13 @@ vhost_user_vring_close (vhost_user_intf_t * vui, u32 qid)
if (vring->kickfd_idx != ~0)
{
- clib_file_t *uf = pool_elt_at_index (file_main.file_pool,
- vring->kickfd_idx);
+ clib_file_t *uf = clib_file_get (&file_main, vring->kickfd_idx);
clib_file_del (&file_main, uf);
vring->kickfd_idx = ~0;
}
if (vring->callfd_idx != ~0)
{
- clib_file_t *uf = pool_elt_at_index (file_main.file_pool,
- vring->callfd_idx);
+ clib_file_t *uf = clib_file_get (&file_main, vring->callfd_idx);
clib_file_del (&file_main, uf);
vring->callfd_idx = ~0;
}
@@ -349,7 +347,7 @@ vhost_user_vring_close (vhost_user_intf_t * vui, u32 qid)
u16 q = vui->vrings[qid].qid;
u32 queue_index = vui->vrings[qid].queue_index;
u32 mode = vui->vrings[qid].mode;
- u32 thread_index = vui->vrings[qid].thread_index;
+ clib_thread_index_t thread_index = vui->vrings[qid].thread_index;
vhost_user_vring_init (vui, qid);
vui->vrings[qid].qid = q;
vui->vrings[qid].queue_index = queue_index;
@@ -367,7 +365,7 @@ vhost_user_if_disconnect (vhost_user_intf_t * vui)
if (vui->clib_file_index != ~0)
{
- clib_file_del (&file_main, file_main.file_pool + vui->clib_file_index);
+ clib_file_del_by_index (&file_main, vui->clib_file_index);
vui->clib_file_index = ~0;
}
@@ -750,8 +748,8 @@ vhost_user_socket_read (clib_file_t * uf)
/* if there is old fd, delete and close it */
if (vui->vrings[q].callfd_idx != ~0)
{
- clib_file_t *uf = pool_elt_at_index (file_main.file_pool,
- vui->vrings[q].callfd_idx);
+ clib_file_t *uf =
+ clib_file_get (&file_main, vui->vrings[q].callfd_idx);
clib_file_del (&file_main, uf);
vui->vrings[q].callfd_idx = ~0;
}
@@ -823,8 +821,8 @@ vhost_user_socket_read (clib_file_t * uf)
if (vui->vrings[q].kickfd_idx != ~0)
{
- clib_file_t *uf = pool_elt_at_index (file_main.file_pool,
- vui->vrings[q].kickfd_idx);
+ clib_file_t *uf =
+ clib_file_get (&file_main, vui->vrings[q].kickfd_idx);
clib_file_del (&file_main, uf);
vui->vrings[q].kickfd_idx = ~0;
}
@@ -1148,7 +1146,7 @@ vhost_user_socksvr_accept_ready (clib_file_t * uf)
{
vu_log_debug (vui, "Close client socket for vhost interface %d, fd %d",
vui->sw_if_index, UNIX_GET_FD (vui->clib_file_index));
- clib_file_del (&file_main, file_main.file_pool + vui->clib_file_index);
+ clib_file_del_by_index (&file_main, vui->clib_file_index);
}
vu_log_debug (vui, "New client socket for vhost interface %d, fd %d",
@@ -1408,8 +1406,7 @@ vhost_user_term_if (vhost_user_intf_t * vui)
if (vui->unix_server_index != ~0)
{
//Close server socket
- clib_file_t *uf = pool_elt_at_index (file_main.file_pool,
- vui->unix_server_index);
+ clib_file_t *uf = clib_file_get (&file_main, vui->unix_server_index);
clib_file_del (&file_main, uf);
vui->unix_server_index = ~0;
unlink (vui->sock_filename);
@@ -1444,7 +1441,7 @@ vhost_user_delete_if (vnet_main_t * vnm, vlib_main_t * vm, u32 sw_if_index)
vhost_user_vring_t *txvq = &vui->vrings[qid];
if ((txvq->mode == VNET_HW_IF_RX_MODE_POLLING) &&
- (txvq->thread_index != ~0))
+ (txvq->thread_index != CLIB_INVALID_THREAD_INDEX))
{
vhost_cpu_t *cpu = vec_elt_at_index (vum->cpus, txvq->thread_index);
ASSERT (cpu->polling_q_count != 0);
diff --git a/src/plugins/vhost/vhost_user.h b/src/plugins/vhost/vhost_user.h
index a3582affb4b..9e461979007 100644
--- a/src/plugins/vhost/vhost_user.h
+++ b/src/plugins/vhost/vhost_user.h
@@ -62,11 +62,13 @@
dev->hw_if_index, ##__VA_ARGS__); \
};
-#define UNIX_GET_FD(unixfd_idx) ({ \
- typeof(unixfd_idx) __unixfd_idx = (unixfd_idx); \
- (__unixfd_idx != ~0) ? \
- pool_elt_at_index (file_main.file_pool, \
- __unixfd_idx)->file_descriptor : -1; })
+#define UNIX_GET_FD(unixfd_idx) \
+ ({ \
+ typeof (unixfd_idx) __unixfd_idx = (unixfd_idx); \
+ (__unixfd_idx != ~0) ? \
+ clib_file_get (&file_main, __unixfd_idx)->file_descriptor : \
+ -1; \
+ })
#define foreach_virtio_trace_flags \
_ (SIMPLE_CHAINED, 0, "Simple descriptor chaining") \
@@ -229,7 +231,7 @@ typedef struct
u16 last_kick;
u8 first_kick;
u32 queue_index;
- u32 thread_index;
+ clib_thread_index_t thread_index;
} vhost_user_vring_t;
#define VHOST_USER_EVENT_START_TIMER 1
diff --git a/src/plugins/vhost/vhost_user_input.c b/src/plugins/vhost/vhost_user_input.c
index ca5072485ff..5dc1eedf52a 100644
--- a/src/plugins/vhost/vhost_user_input.c
+++ b/src/plugins/vhost/vhost_user_input.c
@@ -31,7 +31,7 @@
#include <linux/if_tun.h>
#include <vlib/vlib.h>
-#include <vlib/unix/unix.h>
+#include <vlib/file.h>
#include <vnet/ethernet/ethernet.h>
#include <vnet/devices/devices.h>
diff --git a/src/plugins/vhost/vhost_user_output.c b/src/plugins/vhost/vhost_user_output.c
index 58fd4309f8c..3052ae39ec1 100644
--- a/src/plugins/vhost/vhost_user_output.c
+++ b/src/plugins/vhost/vhost_user_output.c
@@ -32,7 +32,7 @@
#include <linux/if_tun.h>
#include <vlib/vlib.h>
-#include <vlib/unix/unix.h>
+#include <vlib/file.h>
#include <vnet/ethernet/ethernet.h>
#include <vnet/devices/devices.h>
@@ -382,7 +382,7 @@ vhost_user_device_class_packed (vlib_main_t *vm, vlib_node_runtime_t *node,
vhost_user_main_t *vum = &vhost_user_main;
u32 qid = rxvq->qid;
u8 error;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
vhost_cpu_t *cpu = &vum->cpus[thread_index];
u32 map_hint = 0;
u8 retry = 8;
@@ -698,7 +698,7 @@ VNET_DEVICE_CLASS_TX_FN (vhost_user_device_class) (vlib_main_t * vm,
u32 qid;
vhost_user_vring_t *rxvq;
u8 error;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
vhost_cpu_t *cpu = &vum->cpus[thread_index];
u32 map_hint = 0;
u8 retry = 8;
@@ -1051,7 +1051,7 @@ vhost_user_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index,
return clib_error_return (0, "unsupported");
}
- if (txvq->thread_index == ~0)
+ if (txvq->thread_index == CLIB_INVALID_THREAD_INDEX)
return clib_error_return (0, "Queue initialization is not finished yet");
cpu = vec_elt_at_index (vum->cpus, txvq->thread_index);
diff --git a/src/plugins/vmxnet3/input.c b/src/plugins/vmxnet3/input.c
index 25632546b6d..55fb418e501 100644
--- a/src/plugins/vmxnet3/input.c
+++ b/src/plugins/vmxnet3/input.c
@@ -203,7 +203,7 @@ vmxnet3_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
vmxnet3_rx_comp *rx_comp;
u32 desc_idx;
vmxnet3_rxq_t *rxq;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u32 buffer_indices[VLIB_FRAME_SIZE], *bi;
u16 nexts[VLIB_FRAME_SIZE], *next;
vmxnet3_rx_ring *ring;
diff --git a/src/plugins/vmxnet3/vmxnet3.h b/src/plugins/vmxnet3/vmxnet3.h
index 89602f8ee9e..8de992eaffe 100644
--- a/src/plugins/vmxnet3/vmxnet3.h
+++ b/src/plugins/vmxnet3/vmxnet3.h
@@ -523,7 +523,7 @@ typedef struct
u32 mode;
u8 buffer_pool_index;
u32 queue_index;
- u32 thread_index;
+ clib_thread_index_t thread_index;
vmxnet3_rx_ring rx_ring[VMXNET3_RX_RING_SIZE];
vmxnet3_rx_desc *rx_desc[VMXNET3_RX_RING_SIZE];
vmxnet3_rx_comp *rx_comp;
diff --git a/src/plugins/vrrp/vrrp_periodic.c b/src/plugins/vrrp/vrrp_periodic.c
index 5f9d7ae938e..e3a374a112d 100644
--- a/src/plugins/vrrp/vrrp_periodic.c
+++ b/src/plugins/vrrp/vrrp_periodic.c
@@ -187,7 +187,19 @@ vrrp_periodic_process (vlib_main_t * vm,
timer = pool_elt_at_index (pm->vr_timers, next_timer);
timeout = timer->expire_time - now;
- vlib_process_wait_for_event_or_clock (vm, timeout);
+ /*
+ * Adding a virtual MAC to some NICs can take a significant amount
+ * of time (~1s). If a lot of VRs enter the master state around the
+ * same time, the process node can stay active for a very long time
+ * processing all of the transitions.
+ *
+ * Try to force a 10us sleep between processing events to ensure
+ * that the process node does not prevent API messages and RPCs
+ * from being handled for an extended period. This prevents
+ * vlib_process_wait_for_event_or_clock() from returning
+ * immediately.
+ */
+ vlib_process_wait_for_event_or_clock (vm, clib_max (timeout, 10e-6));
}
event_type = vlib_process_get_events (vm, (uword **) & event_data);
diff --git a/src/plugins/vxlan-gpe/CMakeLists.txt b/src/plugins/vxlan-gpe/CMakeLists.txt
new file mode 100644
index 00000000000..987ebcc2df9
--- /dev/null
+++ b/src/plugins/vxlan-gpe/CMakeLists.txt
@@ -0,0 +1,32 @@
+# Copyright (c) 2024 OpenInfra Foundation Europe
+# Copyright (c) 2025 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+add_vpp_plugin(vxlan-gpe
+ SOURCES
+ encap.c
+ decap.c
+ vxlan_gpe.c
+ vxlan_gpe_api.c
+ vxlan_gpe_packet.h
+ plugin.c
+
+ INSTALL_HEADERS
+ vxlan_gpe.h
+
+ MULTIARCH_SOURCES
+ decap.c
+
+ API_FILES
+ vxlan_gpe.api
+)
diff --git a/src/plugins/vxlan-gpe/FEATURE.yaml b/src/plugins/vxlan-gpe/FEATURE.yaml
new file mode 100644
index 00000000000..f4ec2f4c517
--- /dev/null
+++ b/src/plugins/vxlan-gpe/FEATURE.yaml
@@ -0,0 +1,10 @@
+---
+name: VxLAN-GPE
+maintainer: Hongjun Ni <hongjun.ni@intel.com>
+features:
+ - VxLAN-GPE decapsulation
+ - VxLAN-GPE encapsulation
+
+description: "VxLAN-GPE tunnel handling"
+state: production
+properties: [API, CLI, MULTITHREAD]
diff --git a/src/plugins/vxlan-gpe/decap.c b/src/plugins/vxlan-gpe/decap.c
new file mode 100644
index 00000000000..80f2facef29
--- /dev/null
+++ b/src/plugins/vxlan-gpe/decap.c
@@ -0,0 +1,1167 @@
+/*
+ * decap.c - decapsulate VXLAN GPE
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief Functions for decapsulating VXLAN GPE tunnels
+ *
+*/
+
+#include <vlib/vlib.h>
+#include <vnet/udp/udp_local.h>
+#include <vxlan-gpe/vxlan_gpe.h>
+
+/**
+ * @brief Struct for VXLAN GPE decap packet tracing
+ *
+ */
+typedef struct
+{
+ u32 next_index;
+ u32 tunnel_index;
+ u32 error;
+} vxlan_gpe_rx_trace_t;
+
+/**
+ * @brief Tracing function for VXLAN GPE packet decapsulation
+ *
+ * @param *s
+ * @param *args
+ *
+ * @return *s
+ *
+ */
+static u8 *
+format_vxlan_gpe_rx_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ vxlan_gpe_rx_trace_t *t = va_arg (*args, vxlan_gpe_rx_trace_t *);
+
+ if (t->tunnel_index != ~0)
+ {
+ s = format (s, "VXLAN-GPE: tunnel %d next %d error %d", t->tunnel_index,
+ t->next_index, t->error);
+ }
+ else
+ {
+ s = format (s, "VXLAN-GPE: no tunnel next %d error %d\n", t->next_index,
+ t->error);
+ }
+ return s;
+}
+
+/**
+ * @brief Tracing function for VXLAN GPE packet decapsulation including length
+ *
+ * @param *s
+ * @param *args
+ *
+ * @return *s
+ *
+ */
+static u8 *
+format_vxlan_gpe_with_length (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+
+ return s;
+}
+
+typedef struct
+{
+ vxlan4_gpe_tunnel_key_t key;
+ vxlan_gpe_decap_info_t val;
+} vxlan4_gpe_tunnel_cache_t;
+
+static const vxlan_gpe_decap_info_t decap_not_found = {
+ .tunnel_index = ~0,
+ .next_index = VXLAN_GPE_INPUT_NEXT_DROP,
+ .error = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL
+};
+
+always_inline vxlan_gpe_decap_info_t
+vxlan4_gpe_find_tunnel (vxlan_gpe_main_t *nngm,
+ vxlan4_gpe_tunnel_cache_t *cache,
+ ip4_vxlan_gpe_header_t *iuvn4_0)
+{
+ /* Make sure VXLAN GPE tunnel exist according to packet S/D IP, UDP port and
+ * VNI */
+ vxlan4_gpe_tunnel_key_t key4 = {
+ .local = iuvn4_0->ip4.dst_address.as_u32,
+ .remote = iuvn4_0->ip4.src_address.as_u32,
+ .vni = iuvn4_0->vxlan.vni_res,
+ .port = (u32) iuvn4_0->udp.dst_port,
+ };
+
+ if (PREDICT_TRUE (key4.as_u64[0] == cache->key.as_u64[0] &&
+ key4.as_u64[1] == cache->key.as_u64[1]))
+ {
+ /* cache hit */
+ return cache->val;
+ }
+
+ uword *p = hash_get_mem (nngm->vxlan4_gpe_tunnel_by_key, &key4);
+ if (PREDICT_TRUE (p != 0))
+ {
+ u32 next = (iuvn4_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
+ nngm->decap_next_node_list[iuvn4_0->vxlan.protocol] :
+ VXLAN_GPE_INPUT_NEXT_DROP;
+
+ cache->key.as_u64[0] = key4.as_u64[0];
+ cache->key.as_u64[1] = key4.as_u64[1];
+
+ cache->val.error = 0;
+ cache->val.tunnel_index = p[0];
+ cache->val.next_index = next;
+
+ return cache->val;
+ }
+
+ return decap_not_found;
+}
+
+typedef struct
+{
+ vxlan6_gpe_tunnel_key_t key;
+ vxlan_gpe_decap_info_t val;
+} vxlan6_gpe_tunnel_cache_t;
+
+always_inline vxlan_gpe_decap_info_t
+vxlan6_gpe_find_tunnel (vxlan_gpe_main_t *nngm,
+ vxlan6_gpe_tunnel_cache_t *cache,
+ ip6_vxlan_gpe_header_t *iuvn6_0)
+{
+ /* Make sure VXLAN GPE tunnel exist according to packet S/D IP, UDP port and
+ * VNI */
+ vxlan6_gpe_tunnel_key_t key6;
+
+ ip6_address_copy (&key6.local, &iuvn6_0->ip6.dst_address);
+ ip6_address_copy (&key6.remote, &iuvn6_0->ip6.src_address);
+ key6.vni = iuvn6_0->vxlan.vni_res;
+ key6.port = iuvn6_0->udp.dst_port;
+
+ if (PREDICT_TRUE (memcmp (&key6, &cache->key, sizeof (cache->key)) == 0))
+ {
+ /* cache hit */
+ return cache->val;
+ }
+
+ uword *p = hash_get_mem (nngm->vxlan6_gpe_tunnel_by_key, &key6);
+ if (PREDICT_TRUE (p != 0))
+ {
+ u32 next = (iuvn6_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
+ nngm->decap_next_node_list[iuvn6_0->vxlan.protocol] :
+ VXLAN_GPE_INPUT_NEXT_DROP;
+
+ clib_memcpy_fast (&cache->key, &key6, sizeof (key6));
+ cache->val.error = 0;
+ cache->val.tunnel_index = p[0];
+ cache->val.next_index = next;
+
+ return cache->val;
+ }
+
+ return decap_not_found;
+}
+
+/**
+ * @brief Common processing for IPv4 and IPv6 VXLAN GPE decap dispatch functions
+ *
+ * It is worth noting that other than trivial UDP forwarding (transit), VXLAN GPE
+ * tunnels are "terminate local". This means that there is no "TX" interface for this
+ * decap case, so that field in the buffer_metadata can be "used for something else".
+ * The something else in this case is, for the IPv4/IPv6 inner-packet type case, the
+ * FIB index used to look up the inner-packet's adjacency.
+ *
+ * vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
+ *
+ * @param *vm
+ * @param *node
+ * @param *from_frame
+ * @param is_ip4
+ *
+ * @return from_frame->n_vectors
+ *
+ */
+always_inline uword
+vxlan_gpe_input (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame, u8 is_ip4)
+{
+ u32 n_left_from, next_index, *from, *to_next;
+ vxlan_gpe_main_t *nngm = &vxlan_gpe_main;
+ vnet_main_t *vnm = nngm->vnet_main;
+ vnet_interface_main_t *im = &vnm->interface_main;
+ vxlan4_gpe_tunnel_cache_t last4;
+ vxlan6_gpe_tunnel_cache_t last6;
+ u32 pkts_decapsulated = 0;
+ clib_thread_index_t thread_index = vm->thread_index;
+ u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
+
+ if (is_ip4)
+ clib_memset (&last4, 0xff, sizeof (last4));
+ else
+ clib_memset (&last6, 0xff, sizeof (last6));
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+ stats_sw_if_index = node->runtime_data[0];
+ stats_n_packets = stats_n_bytes = 0;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ u32 next0, next1;
+ ip4_vxlan_gpe_header_t *iuvn4_0, *iuvn4_1;
+ ip6_vxlan_gpe_header_t *iuvn6_0, *iuvn6_1;
+ vxlan_gpe_decap_info_t di0, di1;
+ vxlan_gpe_tunnel_t *t0, *t1;
+ u32 error0, error1;
+ u32 sw_if_index0, sw_if_index1, len0, len1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ bi0 = from[0];
+ bi1 = from[1];
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ from += 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+ n_left_from -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ if (is_ip4)
+ {
+ /* udp leaves current_data pointing at the vxlan-gpe header */
+ vlib_buffer_advance (b0,
+ -(word) (sizeof (udp_header_t) +
+ sizeof (ip4_header_t)));
+ vlib_buffer_advance (b1,
+ -(word) (sizeof (udp_header_t) +
+ sizeof (ip4_header_t)));
+
+ iuvn4_0 = vlib_buffer_get_current (b0);
+ iuvn4_1 = vlib_buffer_get_current (b1);
+
+ /* pop (ip, udp, vxlan) */
+ vlib_buffer_advance (b0, sizeof (*iuvn4_0));
+ vlib_buffer_advance (b1, sizeof (*iuvn4_1));
+
+ di0 = vxlan4_gpe_find_tunnel (nngm, &last4, iuvn4_0);
+ di1 = vxlan4_gpe_find_tunnel (nngm, &last4, iuvn4_1);
+ }
+ else
+ {
+ /* udp leaves current_data pointing at the vxlan-gpe header */
+ vlib_buffer_advance (b0,
+ -(word) (sizeof (udp_header_t) +
+ sizeof (ip6_header_t)));
+ vlib_buffer_advance (b1,
+ -(word) (sizeof (udp_header_t) +
+ sizeof (ip6_header_t)));
+
+ iuvn6_0 = vlib_buffer_get_current (b0);
+ iuvn6_1 = vlib_buffer_get_current (b1);
+
+ /* pop (ip, udp, vxlan) */
+ vlib_buffer_advance (b0, sizeof (*iuvn6_0));
+ vlib_buffer_advance (b1, sizeof (*iuvn6_1));
+
+ di0 = vxlan6_gpe_find_tunnel (nngm, &last6, iuvn6_0);
+ di1 = vxlan6_gpe_find_tunnel (nngm, &last6, iuvn6_1);
+ }
+
+ /* Process packet 0 */
+ next0 = di0.next_index;
+ error0 = di0.error;
+ if (error0 != 0)
+ {
+ goto trace0;
+ }
+
+ t0 = pool_elt_at_index (nngm->tunnels, di0.tunnel_index);
+
+ sw_if_index0 = t0->sw_if_index;
+ len0 = vlib_buffer_length_in_chain (vm, b0);
+
+ /* Required to make the l2 tag push / pop code work on l2 subifs */
+ vnet_update_l2_len (b0);
+
+ /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] = t0->sw_if_index;
+
+ /**
+ * ip[46] lookup in the configured FIB
+ */
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
+
+ pkts_decapsulated++;
+ stats_n_packets += 1;
+ stats_n_bytes += len0;
+
+ if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
+ {
+ stats_n_packets -= 1;
+ stats_n_bytes -= len0;
+ if (stats_n_packets)
+ vlib_increment_combined_counter (im->combined_sw_if_counters +
+ VNET_INTERFACE_COUNTER_RX,
+ thread_index,
+ stats_sw_if_index,
+ stats_n_packets,
+ stats_n_bytes);
+ stats_n_packets = 1;
+ stats_n_bytes = len0;
+ stats_sw_if_index = sw_if_index0;
+ }
+
+ trace0:b0->error = error0 ? node->errors[error0] : 0;
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_gpe_rx_trace_t *tr =
+ vlib_add_trace (vm, node, b0, sizeof (*tr));
+ tr->next_index = next0;
+ tr->error = error0;
+ tr->tunnel_index = di0.tunnel_index;
+ }
+
+ /* Process packet 1 */
+ next1 = di1.next_index;
+ error1 = di1.error;
+ if (error1 != 0)
+ {
+ goto trace1;
+ }
+
+ t1 = pool_elt_at_index (nngm->tunnels, di1.tunnel_index);
+
+ sw_if_index1 = t1->sw_if_index;
+ len1 = vlib_buffer_length_in_chain (vm, b1);
+
+ /* Required to make the l2 tag push / pop code work on l2 subifs */
+ vnet_update_l2_len (b1);
+
+ /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
+ vnet_buffer (b1)->sw_if_index[VLIB_RX] = t1->sw_if_index;
+
+ /*
+ * ip[46] lookup in the configured FIB
+ */
+ vnet_buffer (b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index;
+
+ pkts_decapsulated++;
+ stats_n_packets += 1;
+ stats_n_bytes += len1;
+
+ /* Batch stats increment on the same vxlan tunnel so counter
+ is not incremented per packet */
+ if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index))
+ {
+ stats_n_packets -= 1;
+ stats_n_bytes -= len1;
+ if (stats_n_packets)
+ vlib_increment_combined_counter (im->combined_sw_if_counters +
+ VNET_INTERFACE_COUNTER_RX,
+ thread_index,
+ stats_sw_if_index,
+ stats_n_packets,
+ stats_n_bytes);
+ stats_n_packets = 1;
+ stats_n_bytes = len1;
+ stats_sw_if_index = sw_if_index1;
+ }
+ vnet_buffer (b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index;
+
+ trace1:b1->error = error1 ? node->errors[error1] : 0;
+
+ if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_gpe_rx_trace_t *tr =
+ vlib_add_trace (vm, node, b1, sizeof (*tr));
+ tr->next_index = next1;
+ tr->error = error1;
+ tr->tunnel_index = di1.tunnel_index;
+ }
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, bi1, next0,
+ next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0;
+ ip4_vxlan_gpe_header_t *iuvn4_0;
+ ip6_vxlan_gpe_header_t *iuvn6_0;
+ vxlan_gpe_decap_info_t di0;
+ vxlan_gpe_tunnel_t *t0;
+ u32 error0;
+ u32 sw_if_index0, len0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ if (is_ip4)
+ {
+ /* udp leaves current_data pointing at the vxlan-gpe header */
+ vlib_buffer_advance (b0,
+ -(word) (sizeof (udp_header_t) +
+ sizeof (ip4_header_t)));
+
+ iuvn4_0 = vlib_buffer_get_current (b0);
+
+ /* pop (ip, udp, vxlan) */
+ vlib_buffer_advance (b0, sizeof (*iuvn4_0));
+
+ di0 = vxlan4_gpe_find_tunnel (nngm, &last4, iuvn4_0);
+ }
+ else
+ {
+ /* udp leaves current_data pointing at the vxlan-gpe header */
+ vlib_buffer_advance (b0,
+ -(word) (sizeof (udp_header_t) +
+ sizeof (ip6_header_t)));
+
+ iuvn6_0 = vlib_buffer_get_current (b0);
+
+ /* pop (ip, udp, vxlan) */
+ vlib_buffer_advance (b0, sizeof (*iuvn6_0));
+
+ di0 = vxlan6_gpe_find_tunnel (nngm, &last6, iuvn6_0);
+ }
+
+ next0 = di0.next_index;
+ error0 = di0.error;
+ if (error0 != 0)
+ {
+ goto trace00;
+ }
+
+ t0 = pool_elt_at_index (nngm->tunnels, di0.tunnel_index);
+
+ sw_if_index0 = t0->sw_if_index;
+ len0 = vlib_buffer_length_in_chain (vm, b0);
+
+ /* Required to make the l2 tag push / pop code work on l2 subifs */
+ vnet_update_l2_len (b0);
+
+ /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] = t0->sw_if_index;
+
+ /*
+ * ip[46] lookup in the configured FIB
+ */
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
+
+ pkts_decapsulated++;
+ stats_n_packets += 1;
+ stats_n_bytes += len0;
+
+ /* Batch stats increment on the same vxlan-gpe tunnel so counter
+ is not incremented per packet */
+ if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
+ {
+ stats_n_packets -= 1;
+ stats_n_bytes -= len0;
+ if (stats_n_packets)
+ vlib_increment_combined_counter (im->combined_sw_if_counters +
+ VNET_INTERFACE_COUNTER_RX,
+ thread_index,
+ stats_sw_if_index,
+ stats_n_packets,
+ stats_n_bytes);
+ stats_n_packets = 1;
+ stats_n_bytes = len0;
+ stats_sw_if_index = sw_if_index0;
+ }
+
+ trace00:b0->error = error0 ? node->errors[error0] : 0;
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_gpe_rx_trace_t *tr =
+ vlib_add_trace (vm, node, b0, sizeof (*tr));
+ tr->next_index = next0;
+ tr->error = error0;
+ tr->tunnel_index = di0.tunnel_index;
+ }
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm,
+ is_ip4 ? vxlan4_gpe_input_node.index :
+ vxlan6_gpe_input_node.index,
+ VXLAN_GPE_ERROR_DECAPSULATED,
+ pkts_decapsulated);
+
+ /* Increment any remaining batch stats */
+ if (stats_n_packets)
+ {
+ vlib_increment_combined_counter (im->combined_sw_if_counters +
+ VNET_INTERFACE_COUNTER_RX,
+ thread_index, stats_sw_if_index,
+ stats_n_packets, stats_n_bytes);
+ node->runtime_data[0] = stats_sw_if_index;
+ }
+ return from_frame->n_vectors;
+}
+
+/**
+ * @brief Graph processing dispatch function for IPv4 VXLAN GPE
+ *
+ * @node vxlan4-gpe-input
+ * @param *vm
+ * @param *node
+ * @param *from_frame
+ *
+ * @return from_frame->n_vectors
+ *
+ */
+VLIB_NODE_FN (vxlan4_gpe_input_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return vxlan_gpe_input (vm, node, from_frame, /* is_ip4 */ 1);
+}
+
+#ifndef CLIB_MARCH_VARIANT
+void
+vxlan_gpe_register_decap_protocol (u8 protocol_id, uword next_node_index)
+{
+ vxlan_gpe_main_t *hm = &vxlan_gpe_main;
+ hm->decap_next_node_list[protocol_id] = next_node_index;
+ return;
+}
+
+void
+vxlan_gpe_unregister_decap_protocol (u8 protocol_id, uword next_node_index)
+{
+ vxlan_gpe_main_t *hm = &vxlan_gpe_main;
+ hm->decap_next_node_list[protocol_id] = VXLAN_GPE_INPUT_NEXT_DROP;
+ return;
+}
+#endif /* CLIB_MARCH_VARIANT */
+
+/**
+ * @brief Graph processing dispatch function for IPv6 VXLAN GPE
+ *
+ * @node vxlan6-gpe-input
+ * @param *vm
+ * @param *node
+ * @param *from_frame
+ *
+ * @return from_frame->n_vectors - uword
+ *
+ */
+VLIB_NODE_FN (vxlan6_gpe_input_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return vxlan_gpe_input (vm, node, from_frame, /* is_ip4 */ 0);
+}
+
+/**
+ * @brief VXLAN GPE error strings
+ */
+static char *vxlan_gpe_error_strings[] = {
+#define vxlan_gpe_error(n,s) s,
+#include <vxlan-gpe/vxlan_gpe_error.def>
+#undef vxlan_gpe_error
+#undef _
+};
+
+VLIB_REGISTER_NODE (vxlan4_gpe_input_node) = {
+ .name = "vxlan4-gpe-input",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(vxlan_gpe_error_strings),
+ .error_strings = vxlan_gpe_error_strings,
+
+ .n_next_nodes = VXLAN_GPE_INPUT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [VXLAN_GPE_INPUT_NEXT_##s] = n,
+ foreach_vxlan_gpe_input_next
+#undef _
+ },
+
+ .format_buffer = format_vxlan_gpe_with_length,
+ .format_trace = format_vxlan_gpe_rx_trace,
+ // $$$$ .unformat_buffer = unformat_vxlan_gpe_header,
+};
+
+VLIB_REGISTER_NODE (vxlan6_gpe_input_node) = {
+ .name = "vxlan6-gpe-input",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(vxlan_gpe_error_strings),
+ .error_strings = vxlan_gpe_error_strings,
+
+ .n_next_nodes = VXLAN_GPE_INPUT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [VXLAN_GPE_INPUT_NEXT_##s] = n,
+ foreach_vxlan_gpe_input_next
+#undef _
+ },
+
+ .format_buffer = format_vxlan_gpe_with_length,
+ .format_trace = format_vxlan_gpe_rx_trace,
+ // $$$$ .unformat_buffer = unformat_vxlan_gpe_header,
+};
+
+typedef enum
+{
+ IP_VXLAN_BYPASS_NEXT_DROP,
+ IP_VXLAN_BYPASS_NEXT_VXLAN,
+ IP_VXLAN_BYPASS_N_NEXT,
+} ip_vxlan_bypass_next_t;
+
+always_inline uword
+ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame, u32 is_ip4)
+{
+ vxlan_gpe_main_t *ngm = &vxlan_gpe_main;
+ u32 *from, *to_next, n_left_from, n_left_to_next, next_index;
+ vlib_node_runtime_t *error_node =
+ vlib_node_get_runtime (vm, ip4_input_node.index);
+ vtep4_key_t last_vtep4; /* last IPv4 address / fib index
+ matching a local VTEP address */
+ vtep6_key_t last_vtep6; /* last IPv6 address / fib index
+ matching a local VTEP address */
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
+
+ vxlan4_gpe_tunnel_cache_t last4;
+ vxlan6_gpe_tunnel_cache_t last6;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ vlib_get_buffers (vm, from, bufs, n_left_from);
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ ip4_forward_next_trace (vm, node, frame, VLIB_TX);
+
+ if (is_ip4)
+ {
+ vtep4_key_init (&last_vtep4);
+ clib_memset (&last4, 0xff, sizeof last4);
+ }
+ else
+ {
+ vtep6_key_init (&last_vtep6);
+ clib_memset (&last6, 0xff, sizeof last6);
+ }
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ vlib_buffer_t *b0, *b1;
+ ip4_header_t *ip40, *ip41;
+ ip6_header_t *ip60, *ip61;
+ udp_header_t *udp0, *udp1;
+ ip4_vxlan_gpe_header_t *iuvn4_0, *iuvn4_1;
+ ip6_vxlan_gpe_header_t *iuvn6_0, *iuvn6_1;
+ vxlan_gpe_decap_info_t di0, di1;
+ u32 bi0, ip_len0, udp_len0, flags0, next0;
+ u32 bi1, ip_len1, udp_len1, flags1, next1;
+ i32 len_diff0, len_diff1;
+ u8 error0, good_udp0, proto0;
+ u8 error1, good_udp1, proto1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_prefetch_buffer_header (b[2], LOAD);
+ vlib_prefetch_buffer_header (b[3], LOAD);
+
+ CLIB_PREFETCH (b[2]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (b[3]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ bi0 = to_next[0] = from[0];
+ bi1 = to_next[1] = from[1];
+ from += 2;
+ n_left_from -= 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+
+ b0 = b[0];
+ b1 = b[1];
+ b += 2;
+ if (is_ip4)
+ {
+ ip40 = vlib_buffer_get_current (b0);
+ ip41 = vlib_buffer_get_current (b1);
+ }
+ else
+ {
+ ip60 = vlib_buffer_get_current (b0);
+ ip61 = vlib_buffer_get_current (b1);
+ }
+
+ /* Setup packet for next IP feature */
+ vnet_feature_next (&next0, b0);
+ vnet_feature_next (&next1, b1);
+
+ if (is_ip4)
+ {
+ proto0 = ip40->protocol;
+ proto1 = ip41->protocol;
+ }
+ else
+ {
+ proto0 = ip60->protocol;
+ proto1 = ip61->protocol;
+ }
+
+ /* Process packet 0 */
+ if (proto0 != IP_PROTOCOL_UDP)
+ goto exit0; /* not UDP packet */
+
+ if (is_ip4)
+ {
+ udp0 = ip4_next_header (ip40);
+ iuvn4_0 = vlib_buffer_get_current (b0);
+ di0 = vxlan4_gpe_find_tunnel (ngm, &last4, iuvn4_0);
+ }
+ else
+ {
+ udp0 = ip6_next_header (ip60);
+ iuvn6_0 = vlib_buffer_get_current (b0);
+ di0 = vxlan6_gpe_find_tunnel (ngm, &last6, iuvn6_0);
+ }
+
+ if (PREDICT_FALSE (di0.tunnel_index == ~0))
+ goto exit0; /* unknown interface */
+
+ /* Validate DIP against VTEPs */
+ if (is_ip4)
+ {
+#ifdef CLIB_HAVE_VEC512
+ if (!vtep4_check_vector (&ngm->vtep_table, b0, ip40, &last_vtep4,
+ &ngm->vtep4_u512))
+#else
+ if (!vtep4_check (&ngm->vtep_table, b0, ip40, &last_vtep4))
+#endif
+ goto exit0; /* no local VTEP for VXLAN packet */
+ }
+ else
+ {
+ if (!vtep6_check (&ngm->vtep_table, b0, ip60, &last_vtep6))
+ goto exit0; /* no local VTEP for VXLAN packet */
+ }
+
+ flags0 = b0->flags;
+ good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
+
+ /* Don't verify UDP checksum for packets with explicit zero checksum. */
+ good_udp0 |= udp0->checksum == 0;
+
+ /* Verify UDP length */
+ if (is_ip4)
+ ip_len0 = clib_net_to_host_u16 (ip40->length);
+ else
+ ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
+ udp_len0 = clib_net_to_host_u16 (udp0->length);
+ len_diff0 = ip_len0 - udp_len0;
+
+ /* Verify UDP checksum */
+ if (PREDICT_FALSE (!good_udp0))
+ {
+ if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
+ {
+ if (is_ip4)
+ flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
+ else
+ flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
+ good_udp0 =
+ (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
+ }
+ }
+
+ if (is_ip4)
+ {
+ error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
+ error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
+ }
+ else
+ {
+ error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
+ error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
+ }
+
+ next0 = error0 ?
+ IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
+ b0->error = error0 ? error_node->errors[error0] : 0;
+
+ /* vxlan_gpe-input node expect current at VXLAN header */
+ if (is_ip4)
+ vlib_buffer_advance (b0,
+ sizeof (ip4_header_t) +
+ sizeof (udp_header_t));
+ else
+ vlib_buffer_advance (b0,
+ sizeof (ip6_header_t) +
+ sizeof (udp_header_t));
+
+ exit0:
+ /* Process packet 1 */
+ if (proto1 != IP_PROTOCOL_UDP)
+ goto exit1; /* not UDP packet */
+
+ if (is_ip4)
+ {
+ udp1 = ip4_next_header (ip41);
+ iuvn4_1 = vlib_buffer_get_current (b1);
+ di1 = vxlan4_gpe_find_tunnel (ngm, &last4, iuvn4_1);
+ }
+ else
+ {
+ udp1 = ip6_next_header (ip61);
+ iuvn6_1 = vlib_buffer_get_current (b1);
+ di1 = vxlan6_gpe_find_tunnel (ngm, &last6, iuvn6_1);
+ }
+
+ if (PREDICT_FALSE (di1.tunnel_index == ~0))
+ goto exit1; /* unknown interface */
+
+ /* Validate DIP against VTEPs */
+ if (is_ip4)
+ {
+#ifdef CLIB_HAVE_VEC512
+ if (!vtep4_check_vector (&ngm->vtep_table, b1, ip41, &last_vtep4,
+ &ngm->vtep4_u512))
+#else
+ if (!vtep4_check (&ngm->vtep_table, b1, ip41, &last_vtep4))
+#endif
+ goto exit1; /* no local VTEP for VXLAN packet */
+ }
+ else
+ {
+ if (!vtep6_check (&ngm->vtep_table, b1, ip61, &last_vtep6))
+ goto exit1; /* no local VTEP for VXLAN packet */
+ }
+
+ flags1 = b1->flags;
+ good_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
+
+ /* Don't verify UDP checksum for packets with explicit zero checksum. */
+ good_udp1 |= udp1->checksum == 0;
+
+ /* Verify UDP length */
+ if (is_ip4)
+ ip_len1 = clib_net_to_host_u16 (ip41->length);
+ else
+ ip_len1 = clib_net_to_host_u16 (ip61->payload_length);
+ udp_len1 = clib_net_to_host_u16 (udp1->length);
+ len_diff1 = ip_len1 - udp_len1;
+
+ /* Verify UDP checksum */
+ if (PREDICT_FALSE (!good_udp1))
+ {
+ if ((flags1 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
+ {
+ if (is_ip4)
+ flags1 = ip4_tcp_udp_validate_checksum (vm, b1);
+ else
+ flags1 = ip6_tcp_udp_icmp_validate_checksum (vm, b1);
+ good_udp1 =
+ (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
+ }
+ }
+
+ if (is_ip4)
+ {
+ error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
+ error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
+ }
+ else
+ {
+ error1 = good_udp1 ? 0 : IP6_ERROR_UDP_CHECKSUM;
+ error1 = (len_diff1 >= 0) ? error1 : IP6_ERROR_UDP_LENGTH;
+ }
+
+ next1 = error1 ?
+ IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
+ b1->error = error1 ? error_node->errors[error1] : 0;
+
+ /* vxlan_gpe-input node expect current at VXLAN header */
+ if (is_ip4)
+ vlib_buffer_advance (b1,
+ sizeof (ip4_header_t) +
+ sizeof (udp_header_t));
+ else
+ vlib_buffer_advance (b1,
+ sizeof (ip6_header_t) +
+ sizeof (udp_header_t));
+
+ exit1:
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *b0;
+ ip4_header_t *ip40;
+ ip6_header_t *ip60;
+ udp_header_t *udp0;
+ ip4_vxlan_gpe_header_t *iuvn4_0;
+ ip6_vxlan_gpe_header_t *iuvn6_0;
+ vxlan_gpe_decap_info_t di0;
+ u32 bi0, ip_len0, udp_len0, flags0, next0;
+ i32 len_diff0;
+ u8 error0, good_udp0, proto0;
+
+ bi0 = to_next[0] = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ b0 = b[0];
+ b++;
+ if (is_ip4)
+ ip40 = vlib_buffer_get_current (b0);
+ else
+ ip60 = vlib_buffer_get_current (b0);
+
+ /* Setup packet for next IP feature */
+ vnet_feature_next (&next0, b0);
+
+ if (is_ip4)
+ proto0 = ip40->protocol;
+ else
+ proto0 = ip60->protocol;
+
+ if (proto0 != IP_PROTOCOL_UDP)
+ goto exit; /* not UDP packet */
+
+ if (is_ip4)
+ {
+ udp0 = ip4_next_header (ip40);
+ iuvn4_0 = vlib_buffer_get_current (b0);
+ di0 = vxlan4_gpe_find_tunnel (ngm, &last4, iuvn4_0);
+ }
+ else
+ {
+ udp0 = ip6_next_header (ip60);
+ iuvn6_0 = vlib_buffer_get_current (b0);
+ di0 = vxlan6_gpe_find_tunnel (ngm, &last6, iuvn6_0);
+ }
+
+ if (PREDICT_FALSE (di0.tunnel_index == ~0))
+ goto exit; /* unknown interface */
+
+ /* Validate DIP against VTEPs */
+
+ if (is_ip4)
+ {
+#ifdef CLIB_HAVE_VEC512
+ if (!vtep4_check_vector (&ngm->vtep_table, b0, ip40, &last_vtep4,
+ &ngm->vtep4_u512))
+#else
+ if (!vtep4_check (&ngm->vtep_table, b0, ip40, &last_vtep4))
+#endif
+ goto exit; /* no local VTEP for VXLAN packet */
+ }
+ else
+ {
+ if (!vtep6_check (&ngm->vtep_table, b0, ip60, &last_vtep6))
+ goto exit; /* no local VTEP for VXLAN packet */
+ }
+
+ flags0 = b0->flags;
+ good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
+
+ /* Don't verify UDP checksum for packets with explicit zero checksum. */
+ good_udp0 |= udp0->checksum == 0;
+
+ /* Verify UDP length */
+ if (is_ip4)
+ ip_len0 = clib_net_to_host_u16 (ip40->length);
+ else
+ ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
+ udp_len0 = clib_net_to_host_u16 (udp0->length);
+ len_diff0 = ip_len0 - udp_len0;
+
+ /* Verify UDP checksum */
+ if (PREDICT_FALSE (!good_udp0))
+ {
+ if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
+ {
+ if (is_ip4)
+ flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
+ else
+ flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
+ good_udp0 =
+ (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
+ }
+ }
+
+ if (is_ip4)
+ {
+ error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
+ error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
+ }
+ else
+ {
+ error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
+ error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
+ }
+
+ next0 = error0 ?
+ IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
+ b0->error = error0 ? error_node->errors[error0] : 0;
+
+ /* vxlan_gpe-input node expect current at VXLAN header */
+ if (is_ip4)
+ vlib_buffer_advance (b0,
+ sizeof (ip4_header_t) +
+ sizeof (udp_header_t));
+ else
+ vlib_buffer_advance (b0,
+ sizeof (ip6_header_t) +
+ sizeof (udp_header_t));
+
+ exit:
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+VLIB_NODE_FN (ip4_vxlan_gpe_bypass_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return ip_vxlan_gpe_bypass_inline (vm, node, frame, /* is_ip4 */ 1);
+}
+
+VLIB_REGISTER_NODE (ip4_vxlan_gpe_bypass_node) = {
+ .name = "ip4-vxlan-gpe-bypass",
+ .vector_size = sizeof (u32),
+
+ .n_next_nodes = IP_VXLAN_BYPASS_N_NEXT,
+ .next_nodes = {
+ [IP_VXLAN_BYPASS_NEXT_DROP] = "error-drop",
+ [IP_VXLAN_BYPASS_NEXT_VXLAN] = "vxlan4-gpe-input",
+ },
+
+ .format_buffer = format_ip4_header,
+ .format_trace = format_ip4_forward_next_trace,
+};
+
+#ifndef CLIB_MARCH_VARIANT
+/* Dummy init function to get us linked in. */
+clib_error_t *
+ip4_vxlan_gpe_bypass_init (vlib_main_t * vm)
+{
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (ip4_vxlan_gpe_bypass_init);
+#endif /* CLIB_MARCH_VARIANT */
+
+VLIB_NODE_FN (ip6_vxlan_gpe_bypass_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return ip_vxlan_gpe_bypass_inline (vm, node, frame, /* is_ip4 */ 0);
+}
+
+VLIB_REGISTER_NODE (ip6_vxlan_gpe_bypass_node) = {
+ .name = "ip6-vxlan-gpe-bypass",
+ .vector_size = sizeof (u32),
+
+ .n_next_nodes = IP_VXLAN_BYPASS_N_NEXT,
+ .next_nodes = {
+ [IP_VXLAN_BYPASS_NEXT_DROP] = "error-drop",
+ [IP_VXLAN_BYPASS_NEXT_VXLAN] = "vxlan6-gpe-input",
+ },
+
+ .format_buffer = format_ip6_header,
+ .format_trace = format_ip6_forward_next_trace,
+};
+
+#ifndef CLIB_MARCH_VARIANT
+/* Dummy init function to get us linked in. */
+clib_error_t *
+ip6_vxlan_gpe_bypass_init (vlib_main_t * vm)
+{
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (ip6_vxlan_gpe_bypass_init);
+#endif /* CLIB_MARCH_VARIANT */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/vxlan-gpe/dir.dox b/src/plugins/vxlan-gpe/dir.dox
new file mode 100644
index 00000000000..c154733b21f
--- /dev/null
+++ b/src/plugins/vxlan-gpe/dir.dox
@@ -0,0 +1,32 @@
+/*
+ *
+ * Copyright (c) 2013 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ @dir
+ @brief VXLAN GPE
+
+ Based on IETF: draft-quinn-vxlan-gpe-03.txt
+
+Abstract
+
+ This draft describes extending Virtual eXtensible Local Area Network
+ (VXLAN), via changes to the VXLAN header, with three new
+ capabilities: support for multi-protocol encapsulation, operations,
+ administration and management (OAM) signaling and explicit
+ versioning.
+
+ See file: vxlan-gpe-rfc.txt
+
+*/ \ No newline at end of file
diff --git a/src/plugins/vxlan-gpe/encap.c b/src/plugins/vxlan-gpe/encap.c
new file mode 100644
index 00000000000..701c3af55b5
--- /dev/null
+++ b/src/plugins/vxlan-gpe/encap.c
@@ -0,0 +1,433 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief Functions for encapsulating VXLAN GPE tunnels
+ *
+*/
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/udp/udp_inlines.h>
+#include <vxlan-gpe/vxlan_gpe.h>
+
+/** Statistics (not really errors) */
+#define foreach_vxlan_gpe_encap_error \
+_(ENCAPSULATED, "good packets encapsulated")
+
+/**
+ * @brief VXLAN GPE encap error strings
+ */
+static char *vxlan_gpe_encap_error_strings[] = {
+#define _(sym,string) string,
+ foreach_vxlan_gpe_encap_error
+#undef _
+};
+
+/**
+ * @brief Struct for VXLAN GPE errors/counters
+ */
+typedef enum
+{
+#define _(sym,str) VXLAN_GPE_ENCAP_ERROR_##sym,
+ foreach_vxlan_gpe_encap_error
+#undef _
+ VXLAN_GPE_ENCAP_N_ERROR,
+} vxlan_gpe_encap_error_t;
+
+/**
+ * @brief Struct for tracing VXLAN GPE encapsulated packets
+ */
+typedef struct
+{
+ u32 tunnel_index;
+} vxlan_gpe_encap_trace_t;
+
+/**
+ * @brief Trace of packets encapsulated in VXLAN GPE
+ *
+ * @param *s
+ * @param *args
+ *
+ * @return *s
+ *
+ */
+u8 *
+format_vxlan_gpe_encap_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ vxlan_gpe_encap_trace_t *t = va_arg (*args, vxlan_gpe_encap_trace_t *);
+
+ s = format (s, "VXLAN-GPE-ENCAP: tunnel %d", t->tunnel_index);
+ return s;
+}
+
+/**
+ * @brief Instantiates UDP + VXLAN-GPE header then set next node to IP4|6 lookup
+ *
+ * @param *ngm
+ * @param *b0
+ * @param *t0 contains rewrite header
+ * @param *next0 relative index of next dispatch function (next node)
+ * @param is_v4 Is this IPv4? (or IPv6)
+ *
+ */
+always_inline void
+vxlan_gpe_encap_one_inline (vxlan_gpe_main_t *ngm, vlib_buffer_t *b0,
+ vxlan_gpe_tunnel_t *t0, u32 *next0,
+ ip_address_family_t af)
+{
+ ASSERT (sizeof (ip4_vxlan_gpe_header_t) == 36);
+ ASSERT (sizeof (ip6_vxlan_gpe_header_t) == 56);
+
+ ip_udp_encap_one (ngm->vlib_main, b0, t0->rewrite, t0->rewrite_size, af,
+ N_AF, UDP_ENCAP_FIXUP_NONE);
+ next0[0] = t0->encap_next_node;
+}
+
+/**
+ * @brief Instantiates UDP + VXLAN-GPE header then set next node to IP4|6 lookup for two packets
+ *
+ * @param *ngm
+ * @param *b0 Packet0
+ * @param *b1 Packet1
+ * @param *t0 contains rewrite header for Packet0
+ * @param *t1 contains rewrite header for Packet1
+ * @param *next0 relative index of next dispatch function (next node) for Packet0
+ * @param *next1 relative index of next dispatch function (next node) for Packet1
+ * @param is_v4 Is this IPv4? (or IPv6)
+ *
+ */
+always_inline void
+vxlan_gpe_encap_two_inline (vxlan_gpe_main_t *ngm, vlib_buffer_t *b0,
+ vlib_buffer_t *b1, vxlan_gpe_tunnel_t *t0,
+ vxlan_gpe_tunnel_t *t1, u32 *next0, u32 *next1,
+ ip_address_family_t af)
+{
+ ASSERT (sizeof (ip4_vxlan_gpe_header_t) == 36);
+ ASSERT (sizeof (ip6_vxlan_gpe_header_t) == 56);
+
+ ip_udp_encap_one (ngm->vlib_main, b0, t0->rewrite, t0->rewrite_size, af,
+ N_AF, UDP_ENCAP_FIXUP_NONE);
+ ip_udp_encap_one (ngm->vlib_main, b1, t1->rewrite, t1->rewrite_size, af,
+ N_AF, UDP_ENCAP_FIXUP_NONE);
+ next0[0] = next1[0] = t0->encap_next_node;
+}
+
+/**
+ * @brief Common processing for IPv4 and IPv6 VXLAN GPE encap dispatch functions
+ *
+ * It is worth noting that other than trivial UDP forwarding (transit), VXLAN GPE
+ * tunnels are "establish local". This means that we don't have a TX interface as yet
+ * as we need to look up where the outer-header dest is. By setting the TX index in the
+ * buffer metadata to the encap FIB, we can do a lookup to get the adjacency and real TX.
+ *
+ * vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
+ *
+ * @node vxlan-gpe-input
+ * @param *vm
+ * @param *node
+ * @param *from_frame
+ *
+ * @return from_frame->n_vectors
+ *
+ */
+static uword
+vxlan_gpe_encap (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * from_frame)
+{
+ u32 n_left_from, next_index, *from, *to_next;
+ vxlan_gpe_main_t *ngm = &vxlan_gpe_main;
+ vnet_main_t *vnm = ngm->vnet_main;
+ vnet_interface_main_t *im = &vnm->interface_main;
+ u32 pkts_encapsulated = 0;
+ clib_thread_index_t thread_index = vm->thread_index;
+ u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+ stats_sw_if_index = node->runtime_data[0];
+ stats_n_packets = stats_n_bytes = 0;
+ vlib_get_buffers (vm, from, bufs, n_left_from);
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ u32 sw_if_index0 = ~0, sw_if_index1 = ~0, len0, len1;
+ vnet_hw_interface_t *hi0, *hi1;
+ vxlan_gpe_tunnel_t *t0 = NULL, *t1 = NULL;
+ ip_address_family_t af_0 = AF_IP4, af_1 = AF_IP4;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ u32 next0, next1;
+
+ next0 = next1 = VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_prefetch_buffer_header (b[2], LOAD);
+ vlib_prefetch_buffer_header (b[3], LOAD);
+
+ CLIB_PREFETCH (b[2]->data - CLIB_CACHE_LINE_BYTES,
+ 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (b[3]->data - CLIB_CACHE_LINE_BYTES,
+ 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ bi0 = from[0];
+ bi1 = from[1];
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ from += 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+ n_left_from -= 2;
+
+ /* get "af_0" */
+ if (sw_if_index0 != vnet_buffer (b[0])->sw_if_index[VLIB_TX])
+ {
+ sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
+ hi0 =
+ vnet_get_sup_hw_interface (vnm,
+ vnet_buffer (b[0])->sw_if_index
+ [VLIB_TX]);
+ t0 = pool_elt_at_index (ngm->tunnels, hi0->dev_instance);
+ af_0 = (t0->flags & VXLAN_GPE_TUNNEL_IS_IPV4 ? AF_IP4 : AF_IP6);
+ }
+
+ /* get "af_1" */
+ if (sw_if_index1 != vnet_buffer (b[1])->sw_if_index[VLIB_TX])
+ {
+ if (sw_if_index0 == vnet_buffer (b[1])->sw_if_index[VLIB_TX])
+ {
+ sw_if_index1 = sw_if_index0;
+ hi1 = hi0;
+ t1 = t0;
+ af_1 = af_0;
+ }
+ else
+ {
+ sw_if_index1 = vnet_buffer (b[1])->sw_if_index[VLIB_TX];
+ hi1 =
+ vnet_get_sup_hw_interface (vnm,
+ vnet_buffer (b[1])->sw_if_index
+ [VLIB_TX]);
+ t1 = pool_elt_at_index (ngm->tunnels, hi1->dev_instance);
+ af_1 =
+ (t1->flags & VXLAN_GPE_TUNNEL_IS_IPV4 ? AF_IP4 : AF_IP6);
+ }
+ }
+
+ if (PREDICT_TRUE (af_0 == af_1))
+ {
+ vxlan_gpe_encap_two_inline (ngm, b[0], b[1], t0, t1, &next0,
+ &next1, af_0);
+ }
+ else
+ {
+ vxlan_gpe_encap_one_inline (ngm, b[0], t0, &next0, af_0);
+ vxlan_gpe_encap_one_inline (ngm, b[1], t1, &next1, af_1);
+ }
+
+ /* Reset to look up tunnel partner in the configured FIB */
+ vnet_buffer (b[0])->sw_if_index[VLIB_TX] = t0->encap_fib_index;
+ vnet_buffer (b[1])->sw_if_index[VLIB_TX] = t1->encap_fib_index;
+ vnet_buffer (b[0])->sw_if_index[VLIB_RX] = sw_if_index0;
+ vnet_buffer (b[1])->sw_if_index[VLIB_RX] = sw_if_index1;
+ pkts_encapsulated += 2;
+
+ len0 = vlib_buffer_length_in_chain (vm, b[0]);
+ len1 = vlib_buffer_length_in_chain (vm, b[1]);
+ stats_n_packets += 2;
+ stats_n_bytes += len0 + len1;
+
+ /* Batch stats increment on the same vxlan tunnel so counter is not
+ incremented per packet. Note stats are still incremented for deleted
+ and admin-down tunnel where packets are dropped. It is not worthwhile
+ to check for this rare case and affect normal path performance. */
+ if (PREDICT_FALSE ((sw_if_index0 != stats_sw_if_index)
+ || (sw_if_index1 != stats_sw_if_index)))
+ {
+ stats_n_packets -= 2;
+ stats_n_bytes -= len0 + len1;
+ if (sw_if_index0 == sw_if_index1)
+ {
+ if (stats_n_packets)
+ vlib_increment_combined_counter
+ (im->combined_sw_if_counters +
+ VNET_INTERFACE_COUNTER_TX, thread_index,
+ stats_sw_if_index, stats_n_packets, stats_n_bytes);
+ stats_sw_if_index = sw_if_index0;
+ stats_n_packets = 2;
+ stats_n_bytes = len0 + len1;
+ }
+ else
+ {
+ vlib_increment_combined_counter (im->combined_sw_if_counters
+ +
+ VNET_INTERFACE_COUNTER_TX,
+ thread_index, sw_if_index0,
+ 1, len0);
+ vlib_increment_combined_counter (im->combined_sw_if_counters
+ +
+ VNET_INTERFACE_COUNTER_TX,
+ thread_index, sw_if_index1,
+ 1, len1);
+ }
+ }
+
+ if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_gpe_encap_trace_t *tr =
+ vlib_add_trace (vm, node, b[0], sizeof (*tr));
+ tr->tunnel_index = t0 - ngm->tunnels;
+ }
+
+ if (PREDICT_FALSE (b[1]->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_gpe_encap_trace_t *tr = vlib_add_trace (vm, node, b[1],
+ sizeof (*tr));
+ tr->tunnel_index = t1 - ngm->tunnels;
+ }
+ b += 2;
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, bi1, next0,
+ next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ u32 next0 = VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ /* get "af_0" */
+ if (sw_if_index0 != vnet_buffer (b[0])->sw_if_index[VLIB_TX])
+ {
+ sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
+ hi0 =
+ vnet_get_sup_hw_interface (vnm,
+ vnet_buffer (b[0])->sw_if_index
+ [VLIB_TX]);
+
+ t0 = pool_elt_at_index (ngm->tunnels, hi0->dev_instance);
+
+ af_0 = (t0->flags & VXLAN_GPE_TUNNEL_IS_IPV4 ? AF_IP4 : AF_IP6);
+ }
+
+ vxlan_gpe_encap_one_inline (ngm, b[0], t0, &next0, af_0);
+
+ /* Reset to look up tunnel partner in the configured FIB */
+ vnet_buffer (b[0])->sw_if_index[VLIB_TX] = t0->encap_fib_index;
+ vnet_buffer (b[0])->sw_if_index[VLIB_RX] = sw_if_index0;
+ pkts_encapsulated++;
+
+ len0 = vlib_buffer_length_in_chain (vm, b[0]);
+ stats_n_packets += 1;
+ stats_n_bytes += len0;
+
+ /* Batch stats increment on the same vxlan tunnel so counter is not
+ * incremented per packet. Note stats are still incremented for deleted
+ * and admin-down tunnel where packets are dropped. It is not worthwhile
+ * to check for this rare case and affect normal path performance. */
+ if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
+ {
+ stats_n_packets -= 1;
+ stats_n_bytes -= len0;
+ if (stats_n_packets)
+ vlib_increment_combined_counter (im->combined_sw_if_counters +
+ VNET_INTERFACE_COUNTER_TX,
+ thread_index,
+ stats_sw_if_index,
+ stats_n_packets,
+ stats_n_bytes);
+ stats_n_packets = 1;
+ stats_n_bytes = len0;
+ stats_sw_if_index = sw_if_index0;
+ }
+ if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vxlan_gpe_encap_trace_t *tr = vlib_add_trace (vm, node, b[0],
+ sizeof (*tr));
+ tr->tunnel_index = t0 - ngm->tunnels;
+ }
+ b += 1;
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ vlib_node_increment_counter (vm, node->node_index,
+ VXLAN_GPE_ENCAP_ERROR_ENCAPSULATED,
+ pkts_encapsulated);
+ /* Increment any remaining batch stats */
+ if (stats_n_packets)
+ {
+ vlib_increment_combined_counter (im->combined_sw_if_counters +
+ VNET_INTERFACE_COUNTER_TX,
+ thread_index, stats_sw_if_index,
+ stats_n_packets, stats_n_bytes);
+ node->runtime_data[0] = stats_sw_if_index;
+ }
+
+ return from_frame->n_vectors;
+}
+
+VLIB_REGISTER_NODE (vxlan_gpe_encap_node) = {
+ .function = vxlan_gpe_encap,
+ .name = "vxlan-gpe-encap",
+ .vector_size = sizeof (u32),
+ .format_trace = format_vxlan_gpe_encap_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(vxlan_gpe_encap_error_strings),
+ .error_strings = vxlan_gpe_encap_error_strings,
+
+ .n_next_nodes = VXLAN_GPE_ENCAP_N_NEXT,
+
+ .next_nodes = {
+ [VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP] = "ip4-lookup",
+ [VXLAN_GPE_ENCAP_NEXT_IP6_LOOKUP] = "ip6-lookup",
+ [VXLAN_GPE_ENCAP_NEXT_DROP] = "error-drop",
+ },
+};
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/vxlan-gpe/plugin.c b/src/plugins/vxlan-gpe/plugin.c
new file mode 100644
index 00000000000..5a711a39d78
--- /dev/null
+++ b/src/plugins/vxlan-gpe/plugin.c
@@ -0,0 +1,26 @@
+/*
+ * plugin.c: vxlan-gpe
+ *
+ * Copyright (c) OpenInfra Foundation Europe.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/plugin/plugin.h>
+#include <vpp/app/version.h>
+// register a plugin
+
+VLIB_PLUGIN_REGISTER () = {
+ .version = VPP_BUILD_VER,
+ .description = "VxLan GPE Tunnels",
+};
diff --git a/src/plugins/vxlan-gpe/vxlan-gpe-rfc.txt b/src/plugins/vxlan-gpe/vxlan-gpe-rfc.txt
new file mode 100644
index 00000000000..35cee50f573
--- /dev/null
+++ b/src/plugins/vxlan-gpe/vxlan-gpe-rfc.txt
@@ -0,0 +1,868 @@
+Network Working Group P. Quinn
+Internet-Draft Cisco Systems, Inc.
+Intended status: Experimental P. Agarwal
+Expires: January 4, 2015 Broadcom
+ R. Fernando
+ L. Kreeger
+ D. Lewis
+ F. Maino
+ M. Smith
+ N. Yadav
+ Cisco Systems, Inc.
+ L. Yong
+ Huawei USA
+ X. Xu
+ Huawei Technologies
+ U. Elzur
+ Intel
+ P. Garg
+ Microsoft
+ July 3, 2014
+
+
+ Generic Protocol Extension for VXLAN
+ draft-quinn-vxlan-gpe-03.txt
+
+Abstract
+
+ This draft describes extending Virtual eXtensible Local Area Network
+ (VXLAN), via changes to the VXLAN header, with three new
+ capabilities: support for multi-protocol encapsulation, operations,
+ administration and management (OAM) signaling and explicit
+ versioning.
+
+Status of this Memo
+
+ This Internet-Draft is submitted in full conformance with the
+ provisions of BCP 78 and BCP 79.
+
+ Internet-Drafts are working documents of the Internet Engineering
+ Task Force (IETF). Note that other groups may also distribute
+ working documents as Internet-Drafts. The list of current Internet-
+ Drafts is at http://datatracker.ietf.org/drafts/current/.
+
+ Internet-Drafts are draft documents valid for a maximum of six months
+ and may be updated, replaced, or obsoleted by other documents at any
+ time. It is inappropriate to use Internet-Drafts as reference
+ material or to cite them other than as "work in progress."
+
+
+
+
+Quinn, et al. Expires January 4, 2015 [Page 1]
+
+Internet-Draft Generic Protocol Extension for VXLAN July 2014
+
+
+ This Internet-Draft will expire on January 4, 2015.
+
+Copyright Notice
+
+ Copyright (c) 2014 IETF Trust and the persons identified as the
+ document authors. All rights reserved.
+
+ This document is subject to BCP 78 and the IETF Trust's Legal
+ Provisions Relating to IETF Documents
+ (http://trustee.ietf.org/license-info) in effect on the date of
+ publication of this document. Please review these documents
+ carefully, as they describe your rights and restrictions with respect
+ to this document. Code Components extracted from this document must
+ include Simplified BSD License text as described in Section 4.e of
+ the Trust Legal Provisions and are provided without warranty as
+ described in the Simplified BSD License.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Quinn, et al. Expires January 4, 2015 [Page 2]
+
+Internet-Draft Generic Protocol Extension for VXLAN July 2014
+
+
+Table of Contents
+
+ 1. Introduction . . . . . . . . . . . . . . . . . . . . . . . . . 4
+ 2. VXLAN Without Protocol Extension . . . . . . . . . . . . . . . 5
+ 3. Generic Protocol Extension VXLAN (VXLAN-gpe) . . . . . . . . . 6
+ 3.1. Multi Protocol Support . . . . . . . . . . . . . . . . . . 6
+ 3.2. OAM Support . . . . . . . . . . . . . . . . . . . . . . . 7
+ 3.3. Version Bits . . . . . . . . . . . . . . . . . . . . . . . 7
+ 4. Backward Compatibility . . . . . . . . . . . . . . . . . . . . 8
+ 4.1. VXLAN VTEP to VXLAN-gpe VTEP . . . . . . . . . . . . . . . 8
+ 4.2. VXLAN-gpe VTEP to VXLAN VTEP . . . . . . . . . . . . . . . 8
+ 4.3. VXLAN-gpe UDP Ports . . . . . . . . . . . . . . . . . . . 8
+ 4.4. VXLAN-gpe and Encapsulated IP Header Fields . . . . . . . 8
+ 5. VXLAN-gpe Examples . . . . . . . . . . . . . . . . . . . . . . 9
+ 6. Security Considerations . . . . . . . . . . . . . . . . . . . 11
+ 7. Acknowledgments . . . . . . . . . . . . . . . . . . . . . . . 12
+ 8. IANA Considerations . . . . . . . . . . . . . . . . . . . . . 13
+ 8.1. UDP Port . . . . . . . . . . . . . . . . . . . . . . . . . 13
+ 8.2. VXLAN-gpe Next Protocol . . . . . . . . . . . . . . . . . 13
+ 8.3. VXLAN-gpe Reserved Bits . . . . . . . . . . . . . . . . . 13
+ 9. References . . . . . . . . . . . . . . . . . . . . . . . . . . 14
+ 9.1. Normative References . . . . . . . . . . . . . . . . . . . 14
+ 9.2. Informative References . . . . . . . . . . . . . . . . . . 14
+ Authors' Addresses . . . . . . . . . . . . . . . . . . . . . . . . 15
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Quinn, et al. Expires January 4, 2015 [Page 3]
+
+Internet-Draft Generic Protocol Extension for VXLAN July 2014
+
+
+1. Introduction
+
+ Virtual eXtensible Local Area Network [VXLAN] defines an
+ encapsulation format that encapsulates Ethernet frames in an outer
+ UDP/IP transport. As data centers evolve, the need to carry other
+ protocols encapsulated in an IP packet is required, as well as the
+ need to provide increased visibility and diagnostic capabilities
+ within the overlay. The VXLAN header does not specify the protocol
+ being encapsulated and therefore is currently limited to
+ encapsulating only Ethernet frame payload, nor does it provide the
+ ability to define OAM protocols. Rather than defining yet another
+ encapsulation, VXLAN is extended to provide protocol typing and OAM
+ capabilities.
+
+ This document describes extending VXLAN via the following changes:
+
+ Next Protocol Bit (P bit): A reserved flag bit is allocated, and set
+ in the VXLAN-gpe header to indicate that a next protocol field is
+ present.
+
+ OAM Flag Bit (O bit): A reserved flag bit is allocated, and set in
+ the VXLAN-gpe header, to indicate that the packet is an OAM
+ packet.
+
+ Version: Two reserved bits are allocated, and set in the VXLAN-gpe
+ header, to indicate VXLAN-gpe protocol version.
+
+ Next Protocol: A 8 bit next protocol field is present in the VXLAN-
+ gpe header.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Quinn, et al. Expires January 4, 2015 [Page 4]
+
+Internet-Draft Generic Protocol Extension for VXLAN July 2014
+
+
+2. VXLAN Without Protocol Extension
+
+ As described in the introduction, the VXLAN header has no protocol
+ identifier that indicates the type of payload being carried by VXLAN.
+ Because of this, VXLAN is limited to an Ethernet payload.
+ Furthermore, the VXLAN header has no mechanism to signal OAM packets.
+
+ The VXLAN header defines bits 0-7 as flags (some defined, some
+ reserved), the VXLAN network identifier (VNI) field and several
+ reserved bits. The flags provide flexibility to define how the
+ reserved bits can be used to change the definition of the VXLAN
+ header.
+
+
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |R|R|R|R|I|R|R|R| Reserved |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | VXLAN Network Identifier (VNI) | Reserved |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+
+ Figure 1: VXLAN Header
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Quinn, et al. Expires January 4, 2015 [Page 5]
+
+Internet-Draft Generic Protocol Extension for VXLAN July 2014
+
+
+3. Generic Protocol Extension VXLAN (VXLAN-gpe)
+
+3.1. Multi Protocol Support
+
+ This draft defines the following two changes to the VXLAN header in
+ order to support multi-protocol encapsulation:
+
+ P Bit: Flag bit 5 is defined as the Next Protocol bit. The P bit
+ MUST be set to 1 to indicate the presence of the 8 bit next
+ protocol field.
+
+ P = 0 indicates that the payload MUST conform to VXLAN as defined
+ in [VXLAN].
+
+ Flag bit 5 was chosen as the P bit because this flag bit is
+ currently reserved in VXLAN.
+
+ Next Protocol Field: The lower 8 bits of the first word are used to
+ carry a next protocol. This next protocol field contains the
+ protocol of the encapsulated payload packet. A new protocol
+ registry will be requested from IANA.
+
+ This draft defines the following Next Protocol values:
+
+ 0x1 : IPv4
+ 0x2 : IPv6
+ 0x3 : Ethernet
+ 0x4 : Network Service Header [NSH]
+
+
+
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |R|R|R|R|I|P|R|R| Reserved |Next Protocol |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | VXLAN Network Identifier (VNI) | Reserved |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+
+
+ Figure 2: VXLAN-gpe Next Protocol
+
+
+
+
+
+
+
+
+Quinn, et al. Expires January 4, 2015 [Page 6]
+
+Internet-Draft Generic Protocol Extension for VXLAN July 2014
+
+
+3.2. OAM Support
+
+ Flag bit 7 is defined as the O bit. When the O bit is set to 1, the
+ packet is an OAM packet and OAM processing MUST occur. The OAM
+ protocol details are out of scope for this document. As with the
+ P-bit, bit 7 is currently a reserved flag in VXLAN.
+
+
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |R|R|R|R|I|P|R|O| Reserved |Next Protocol |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | VXLAN Network Identifier (VNI) | Reserved |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+
+
+ Figure 3: VXLAN-gpe OAM Bit
+
+3.3. Version Bits
+
+ VXLAN-gpe bits 8 and 9 are defined as version bits. These bits are
+ reserved in VXLAN. The version field is used to ensure backward
+ compatibility going forward with future VXLAN-gpe updates.
+
+ The initial version for VXLAN-gpe is 0.
+
+
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |R|R|R|R|I|P|R|O|Ver| Reserved |Next Protocol |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | VXLAN Network Identifier (VNI) | Reserved |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+
+
+
+
+ Figure 4: VXLAN-gpe Version Bits
+
+
+
+
+
+
+
+Quinn, et al. Expires January 4, 2015 [Page 7]
+
+Internet-Draft Generic Protocol Extension for VXLAN July 2014
+
+
+4. Backward Compatibility
+
+4.1. VXLAN VTEP to VXLAN-gpe VTEP
+
+ As per VXLAN, reserved bits 5 and 7, VXLAN-gpe P and O-bits
+ respectively must be set to zero. The remaining reserved bits must
+ be zero, including the VXLAN-gpe version field, bits 8 and 9. The
+ encapsulated payload MUST be Ethernet.
+
+4.2. VXLAN-gpe VTEP to VXLAN VTEP
+
+ A VXLAN-gpe VTEP MUST NOT encapsulate non-Ethernet frames to a VXLAN
+ VTEP. When encapsulating Ethernet frames to a VXLAN VTEP, the VXLAN-
+ gpe VTEP will set the P bit to 0, the Next Protocol to 0 and use UDP
+ destination port 4789. A VXLAN-gpe VTEP MUST also set O = 0 and Ver
+ = 0 when encapsulating Ethernet frames to VXLAN VTEP. The receiving
+ VXLAN VTEP will threat this packet as a VXLAN packet.
+
+ A method for determining the capabilities of a VXLAN VTEP (gpe or
+ non-gpe) is out of the scope of this draft.
+
+4.3. VXLAN-gpe UDP Ports
+
+ VXLAN-gpe uses a new UDP destination port (to be assigned by IANA)
+ when sending traffic to VXLAN-gpe VTEPs.
+
+4.4. VXLAN-gpe and Encapsulated IP Header Fields
+
+ When encapsulating and decapsulating IPv4 and IPv6 packets, certain
+ fields, such as IPv4 Time to Live (TTL) from the inner IP header need
+ to be considered. VXLAN-gpe IP encapsulation and decapsulation
+ utilizes the techniques described in [RFC6830], section 5.3.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Quinn, et al. Expires January 4, 2015 [Page 8]
+
+Internet-Draft Generic Protocol Extension for VXLAN July 2014
+
+
+5. VXLAN-gpe Examples
+
+ This section provides three examples of protocols encapsulated using
+ the Generic Protocol Extension for VXLAN described in this document.
+
+
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |R|R|R|R|I|1|R|0|0|0| Reserved | NP = IPv4 |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | VXLAN Network Identifier (VNI) | Reserved |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Original IPv4 Packet |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+
+
+ Figure 5: IPv4 and VXLAN-gpe
+
+
+
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |R|R|R|R|I|1|R|0|0|0| Reserved | NP = IPv6 |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | VXLAN Network Identifier (VNI) | Reserved |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Original IPv6 Packet |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+
+
+ Figure 6: IPv6 and VXLAN-gpe
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Quinn, et al. Expires January 4, 2015 [Page 9]
+
+Internet-Draft Generic Protocol Extension for VXLAN July 2014
+
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |R|R|R|R|I|1|R|0|0|0| Reserved |NP = Ethernet |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | VXLAN Network Identifier (VNI) | Reserved |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Original Ethernet Frame |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+
+
+ Figure 7: Ethernet and VXLAN-gpe
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Quinn, et al. Expires January 4, 2015 [Page 10]
+
+Internet-Draft Generic Protocol Extension for VXLAN July 2014
+
+
+6. Security Considerations
+
+ VXLAN's security is focused on issues around L2 encapsulation into
+ L3. With VXLAN-gpe, issues such as spoofing, flooding, and traffic
+ redirection are dependent on the particular protocol payload
+ encapsulated.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Quinn, et al. Expires January 4, 2015 [Page 11]
+
+Internet-Draft Generic Protocol Extension for VXLAN July 2014
+
+
+7. Acknowledgments
+
+ A special thank you goes to Dino Farinacci for his guidance and
+ detailed review.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Quinn, et al. Expires January 4, 2015 [Page 12]
+
+Internet-Draft Generic Protocol Extension for VXLAN July 2014
+
+
+8. IANA Considerations
+
+8.1. UDP Port
+
+ A new UDP port will be requested from IANA.
+
+8.2. VXLAN-gpe Next Protocol
+
+ IANA is requested to set up a registry of "Next Protocol". These are
+ 8-bit values. Next Protocol values 0, 1, 2, 3 and 4 are defined in
+ this draft. New values are assigned via Standards Action [RFC5226].
+
+ +---------------+-------------+---------------+
+ | Next Protocol | Description | Reference |
+ +---------------+-------------+---------------+
+ | 0 | Reserved | This document |
+ | | | |
+ | 1 | IPv4 | This document |
+ | | | |
+ | 2 | IPv6 | This document |
+ | | | |
+ | 3 | Ethernet | This document |
+ | | | |
+ | 4 | NSH | This document |
+ | | | |
+ | 5..253 | Unassigned | |
+ +---------------+-------------+---------------+
+
+ Table 1
+
+8.3. VXLAN-gpe Reserved Bits
+
+ There are ten bits at the beginning of the VXLAN-gpe header. New
+ bits are assigned via Standards Action [RFC5226].
+
+ Bits 0-3 - Reserved
+ Bit 4 - Instance ID (I bit)
+ Bit 5 - Next Protocol (P bit)
+ Bit 6 - Reserved
+ Bit 7 - OAM (O bit)
+ Bits 8-9 - Version
+
+
+
+
+
+
+
+
+
+
+Quinn, et al. Expires January 4, 2015 [Page 13]
+
+Internet-Draft Generic Protocol Extension for VXLAN July 2014
+
+
+9. References
+
+9.1. Normative References
+
+ [RFC0768] Postel, J., "User Datagram Protocol", STD 6, RFC 768,
+ August 1980.
+
+ [RFC0791] Postel, J., "Internet Protocol", STD 5, RFC 791,
+ September 1981.
+
+ [RFC2119] Bradner, S., "Key words for use in RFCs to Indicate
+ Requirement Levels", BCP 14, RFC 2119, March 1997.
+
+ [RFC5226] Narten, T. and H. Alvestrand, "Guidelines for Writing an
+ IANA Considerations Section in RFCs", BCP 26, RFC 5226,
+ May 2008.
+
+9.2. Informative References
+
+ [NSH] Quinn, P. and et al. , "Network Service Header", 2014.
+
+ [RFC1700] Reynolds, J. and J. Postel, "Assigned Numbers", RFC 1700,
+ October 1994.
+
+ [RFC6830] Farinacci, D., Fuller, V., Meyer, D., and D. Lewis, "The
+ Locator/ID Separation Protocol (LISP)", RFC 6830,
+ January 2013.
+
+ [VXLAN] Dutt, D., Mahalingam, M., Duda, K., Agarwal, P., Kreeger,
+ L., Sridhar, T., Bursell, M., and C. Wright, "VXLAN: A
+ Framework for Overlaying Virtualized Layer 2 Networks over
+ Layer 3 Networks", 2013.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Quinn, et al. Expires January 4, 2015 [Page 14]
+
+Internet-Draft Generic Protocol Extension for VXLAN July 2014
+
+
+Authors' Addresses
+
+ Paul Quinn
+ Cisco Systems, Inc.
+
+ Email: paulq@cisco.com
+
+
+ Puneet Agarwal
+ Broadcom
+
+ Email: pagarwal@broadcom.com
+
+
+ Rex Fernando
+ Cisco Systems, Inc.
+
+ Email: rex@cisco.com
+
+
+ Larry Kreeger
+ Cisco Systems, Inc.
+
+ Email: kreeger@cisco.com
+
+
+ Darrel Lewis
+ Cisco Systems, Inc.
+
+ Email: darlewis@cisco.com
+
+
+ Fabio Maino
+ Cisco Systems, Inc.
+
+ Email: kreeger@cisco.com
+
+
+ Michael Smith
+ Cisco Systems, Inc.
+
+ Email: michsmit@cisco.com
+
+
+
+
+
+
+
+
+
+Quinn, et al. Expires January 4, 2015 [Page 15]
+
+Internet-Draft Generic Protocol Extension for VXLAN July 2014
+
+
+ Navindra Yadav
+ Cisco Systems, Inc.
+
+ Email: nyadav@cisco.com
+
+
+ Lucy Yong
+ Huawei USA
+
+ Email: lucy.yong@huawei.com
+
+
+ Xiaohu Xu
+ Huawei Technologies
+
+ Email: xuxiaohu@huawei.com
+
+
+ Uri Elzur
+ Intel
+
+ Email: uri.elzur@intel.com
+
+
+ Pankaj Garg
+ Microsoft
+
+ Email: Garg.Pankaj@microsoft.com
diff --git a/src/plugins/vxlan-gpe/vxlan_gpe.api b/src/plugins/vxlan-gpe/vxlan_gpe.api
new file mode 100644
index 00000000000..3cbd7ab7f71
--- /dev/null
+++ b/src/plugins/vxlan-gpe/vxlan_gpe.api
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2015-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+option version = "2.1.0";
+
+import "vnet/interface_types.api";
+import "vnet/ip/ip_types.api";
+
+define vxlan_gpe_add_del_tunnel
+{
+ u32 client_index;
+ u32 context;
+ vl_api_address_t local;
+ vl_api_address_t remote;
+ vl_api_interface_index_t mcast_sw_if_index;
+ u32 encap_vrf_id;
+ u32 decap_vrf_id;
+ vl_api_ip_proto_t protocol;
+ u32 vni;
+ bool is_add [default=true];
+};
+
+/** \brief Create or delete a VXLAN-GPE tunnel
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param local - Source IP address
+ @param remote - Destination IP address, can be multicast
+ @param local_port - Source UDP port. It is not included in sent packets. Used only for port registration
+ @param remote_port - Destination UDP port
+ @param mcast_sw_if_index - Interface for multicast destination
+ @param encap_vrf_id - Encap route table FIB index
+ @param decap_vrf_id - Decap route table FIB index
+ @param protocol - Encapsulated protocol
+ @param vni - The VXLAN Network Identifier, uint24
+ @param is_add - Use 1 to create the tunnel, 0 to remove it
+*/
+define vxlan_gpe_add_del_tunnel_v2
+{
+ u32 client_index;
+ u32 context;
+ vl_api_address_t local;
+ vl_api_address_t remote;
+ u16 local_port;
+ u16 remote_port;
+ vl_api_interface_index_t mcast_sw_if_index;
+ u32 encap_vrf_id;
+ u32 decap_vrf_id;
+ vl_api_ip_proto_t protocol;
+ u32 vni;
+ bool is_add [default=true];
+};
+
+define vxlan_gpe_add_del_tunnel_reply
+{
+ u32 context;
+ i32 retval;
+ vl_api_interface_index_t sw_if_index;
+};
+define vxlan_gpe_add_del_tunnel_v2_reply
+{
+ u32 context;
+ i32 retval;
+ vl_api_interface_index_t sw_if_index;
+};
+
+define vxlan_gpe_tunnel_dump
+{
+ u32 client_index;
+ u32 context;
+ vl_api_interface_index_t sw_if_index;
+};
+define vxlan_gpe_tunnel_v2_dump
+{
+ u32 client_index;
+ u32 context;
+ vl_api_interface_index_t sw_if_index;
+};
+
+define vxlan_gpe_tunnel_details
+{
+ u32 context;
+ vl_api_interface_index_t sw_if_index;
+ vl_api_address_t local;
+ vl_api_address_t remote;
+ u32 vni;
+ vl_api_ip_proto_t protocol;
+ vl_api_interface_index_t mcast_sw_if_index;
+ u32 encap_vrf_id;
+ u32 decap_vrf_id;
+ bool is_ipv6;
+};
+define vxlan_gpe_tunnel_v2_details
+{
+ u32 context;
+ vl_api_interface_index_t sw_if_index;
+ vl_api_address_t local;
+ vl_api_address_t remote;
+ u16 local_port;
+ u16 remote_port;
+ u32 vni;
+ vl_api_ip_proto_t protocol;
+ vl_api_interface_index_t mcast_sw_if_index;
+ u32 encap_vrf_id;
+ u32 decap_vrf_id;
+ bool is_ipv6;
+};
+
+/** \brief Interface set vxlan-gpe-bypass request
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_if_index - interface used to reach neighbor
+ @param is_ipv6 - if non-zero, enable ipv6-vxlan-bypass, else ipv4-vxlan-bypass
+ @param enable - if non-zero enable, else disable
+*/
+autoreply define sw_interface_set_vxlan_gpe_bypass
+{
+ u32 client_index;
+ u32 context;
+ vl_api_interface_index_t sw_if_index;
+ bool is_ipv6;
+ bool enable [default=true];
+};
+
+/*
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/vxlan-gpe/vxlan_gpe.c b/src/plugins/vxlan-gpe/vxlan_gpe.c
new file mode 100644
index 00000000000..abb2049a356
--- /dev/null
+++ b/src/plugins/vxlan-gpe/vxlan_gpe.c
@@ -0,0 +1,1259 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief Common utility functions for IPv4 and IPv6 VXLAN GPE tunnels
+ *
+*/
+#include <vxlan-gpe/vxlan_gpe.h>
+#include <vnet/fib/fib.h>
+#include <vnet/ip/format.h>
+#include <vnet/fib/fib_entry.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/fib/fib_entry_track.h>
+#include <vnet/mfib/mfib_table.h>
+#include <vnet/adj/adj_mcast.h>
+#include <vnet/interface.h>
+#include <vnet/udp/udp_local.h>
+#include <vlib/vlib.h>
+
+/**
+ * @file
+ * @brief VXLAN-GPE.
+ *
+ * VXLAN-GPE provides the features needed to allow L2 bridge domains (BDs)
+ * to span multiple servers. This is done by building an L2 overlay on
+ * top of an L3 network underlay using VXLAN-GPE tunnels.
+ *
+ * This makes it possible for servers to be co-located in the same data
+ * center or be separated geographically as long as they are reachable
+ * through the underlay L3 network.
+ *
+ * You can refer to this kind of L2 overlay bridge domain as a VXLAN-GPE segment.
+ */
+
+vxlan_gpe_main_t vxlan_gpe_main __clib_export;
+
+static u8 *
+format_decap_next (u8 * s, va_list * args)
+{
+ vxlan_gpe_tunnel_t *t = va_arg (*args, vxlan_gpe_tunnel_t *);
+
+ switch (t->protocol)
+ {
+ case VXLAN_GPE_PROTOCOL_IP4:
+ s = format (s, "protocol ip4 fib-idx %d", t->decap_fib_index);
+ break;
+ case VXLAN_GPE_PROTOCOL_IP6:
+ s = format (s, "protocol ip6 fib-idx %d", t->decap_fib_index);
+ break;
+ case VXLAN_GPE_PROTOCOL_ETHERNET:
+ s = format (s, "protocol ethernet");
+ break;
+ case VXLAN_GPE_PROTOCOL_NSH:
+ s = format (s, "protocol nsh");
+ break;
+ default:
+ s = format (s, "protocol unknown %d", t->protocol);
+ }
+
+ return s;
+}
+
+/**
+ * @brief Format function for VXLAN GPE tunnel
+ *
+ * @param *s formatting string
+ * @param *args
+ *
+ * @return *s formatted string
+ *
+ */
+u8 *
+format_vxlan_gpe_tunnel (u8 * s, va_list * args)
+{
+ vxlan_gpe_tunnel_t *t = va_arg (*args, vxlan_gpe_tunnel_t *);
+ vxlan_gpe_main_t *ngm = &vxlan_gpe_main;
+
+ s = format (s,
+ "[%d] lcl %U rmt %U lcl_port %d rmt_port %d vni %d "
+ "fib-idx %d sw-if-idx %d ",
+ t - ngm->tunnels, format_ip46_address, &t->local, IP46_TYPE_ANY,
+ format_ip46_address, &t->remote, IP46_TYPE_ANY, t->local_port,
+ t->remote_port, t->vni, t->encap_fib_index, t->sw_if_index);
+
+#if 0
+ /* next_dpo not yet used by vxlan-gpe-encap node */
+ s = format (s, "encap-dpo-idx %d ", t->next_dpo.dpoi_index);
+ */
+#endif
+ s = format (s, "decap-next-%U ", format_decap_next, t);
+
+ if (PREDICT_FALSE (ip46_address_is_multicast (&t->remote)))
+ s = format (s, "mcast-sw-if-idx %d ", t->mcast_sw_if_index);
+
+ return s;
+}
+
+/**
+ * @brief Naming for VXLAN GPE tunnel
+ *
+ * @param *s formatting string
+ * @param *args
+ *
+ * @return *s formatted string
+ *
+ */
+static u8 *
+format_vxlan_gpe_name (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ return format (s, "vxlan_gpe_tunnel%d", dev_instance);
+}
+
+/**
+ * @brief CLI function for VXLAN GPE admin up/down
+ *
+ * @param *vnm
+ * @param hw_if_index
+ * @param flag
+ *
+ * @return *rc
+ *
+ */
+static clib_error_t *
+vxlan_gpe_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index,
+ u32 flags)
+{
+ u32 hw_flags = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ?
+ VNET_HW_INTERFACE_FLAG_LINK_UP : 0;
+ vnet_hw_interface_set_flags (vnm, hw_if_index, hw_flags);
+
+ return 0;
+}
+
+VNET_DEVICE_CLASS (vxlan_gpe_device_class,static) = {
+ .name = "VXLAN_GPE",
+ .format_device_name = format_vxlan_gpe_name,
+ .format_tx_trace = format_vxlan_gpe_encap_trace,
+ .admin_up_down_function = vxlan_gpe_interface_admin_up_down,
+};
+
+
+/**
+ * @brief Formatting function for tracing VXLAN GPE with length
+ *
+ * @param *s
+ * @param *args
+ *
+ * @return *s
+ *
+ */
+static u8 *
+format_vxlan_gpe_header_with_length (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ s = format (s, "unimplemented dev %u", dev_instance);
+ return s;
+}
+
+VNET_HW_INTERFACE_CLASS (vxlan_gpe_hw_class) = {
+ .name = "VXLAN_GPE",
+ .format_header = format_vxlan_gpe_header_with_length,
+ .build_rewrite = default_build_rewrite,
+};
+
+static void
+vxlan_gpe_tunnel_restack_dpo (vxlan_gpe_tunnel_t * t)
+{
+ dpo_id_t dpo = DPO_INVALID;
+ u32 encap_index = vxlan_gpe_encap_node.index;
+ fib_forward_chain_type_t forw_type = ip46_address_is_ip4 (&t->remote) ?
+ FIB_FORW_CHAIN_TYPE_UNICAST_IP4 : FIB_FORW_CHAIN_TYPE_UNICAST_IP6;
+
+ fib_entry_contribute_forwarding (t->fib_entry_index, forw_type, &dpo);
+ dpo_stack_from_node (encap_index, &t->next_dpo, &dpo);
+ dpo_reset (&dpo);
+}
+
+static vxlan_gpe_tunnel_t *
+vxlan_gpe_tunnel_from_fib_node (fib_node_t * node)
+{
+ ASSERT (FIB_NODE_TYPE_VXLAN_GPE_TUNNEL == node->fn_type);
+ return ((vxlan_gpe_tunnel_t *) (((char *) node) -
+ STRUCT_OFFSET_OF (vxlan_gpe_tunnel_t,
+ node)));
+}
+
+/**
+ * Function definition to backwalk a FIB node -
+ * Here we will restack the new dpo of VXLAN_GPE DIP to encap node.
+ */
+static fib_node_back_walk_rc_t
+vxlan_gpe_tunnel_back_walk (fib_node_t * node, fib_node_back_walk_ctx_t * ctx)
+{
+ vxlan_gpe_tunnel_restack_dpo (vxlan_gpe_tunnel_from_fib_node (node));
+ return (FIB_NODE_BACK_WALK_CONTINUE);
+}
+
+/**
+ * Function definition to get a FIB node from its index
+ */
+static fib_node_t *
+vxlan_gpe_tunnel_fib_node_get (fib_node_index_t index)
+{
+ vxlan_gpe_tunnel_t *t;
+ vxlan_gpe_main_t *ngm = &vxlan_gpe_main;
+
+ t = pool_elt_at_index (ngm->tunnels, index);
+
+ return (&t->node);
+}
+
+/**
+ * Function definition to inform the FIB node that its last lock has gone.
+ */
+static void
+vxlan_gpe_tunnel_last_lock_gone (fib_node_t * node)
+{
+ /*
+ * The VXLAN_GPE tunnel is a root of the graph. As such
+ * it never has children and thus is never locked.
+ */
+ ASSERT (0);
+}
+
+/*
+ * Virtual function table registered by VXLAN_GPE tunnels
+ * for participation in the FIB object graph.
+ */
+const static fib_node_vft_t vxlan_gpe_vft = {
+ .fnv_get = vxlan_gpe_tunnel_fib_node_get,
+ .fnv_last_lock = vxlan_gpe_tunnel_last_lock_gone,
+ .fnv_back_walk = vxlan_gpe_tunnel_back_walk,
+};
+
+#define foreach_gpe_copy_field \
+ _ (vni) \
+ _ (protocol) \
+ _ (mcast_sw_if_index) \
+ _ (encap_fib_index) \
+ _ (decap_fib_index) \
+ _ (local_port) \
+ _ (remote_port)
+
+#define foreach_copy_ipv4 { \
+ _(local.ip4.as_u32) \
+ _(remote.ip4.as_u32) \
+}
+
+#define foreach_copy_ipv6 { \
+ _(local.ip6.as_u64[0]) \
+ _(local.ip6.as_u64[1]) \
+ _(remote.ip6.as_u64[0]) \
+ _(remote.ip6.as_u64[1]) \
+}
+
+
+/**
+ * @brief Calculate IPv4 VXLAN GPE rewrite header
+ *
+ * @param *t
+ *
+ * @return rc
+ *
+ */
+int
+vxlan4_gpe_rewrite (vxlan_gpe_tunnel_t * t, u32 extension_size,
+ u8 protocol_override, uword encap_next_node)
+{
+ u8 *rw = 0;
+ ip4_header_t *ip0;
+ ip4_vxlan_gpe_header_t *h0;
+ int len;
+
+ len = sizeof (*h0) + extension_size;
+
+ vec_free (t->rewrite);
+ vec_validate_aligned (rw, len - 1, CLIB_CACHE_LINE_BYTES);
+
+ h0 = (ip4_vxlan_gpe_header_t *) rw;
+
+ /* Fixed portion of the (outer) ip4 header */
+ ip0 = &h0->ip4;
+ ip0->ip_version_and_header_length = 0x45;
+ ip0->ttl = 254;
+ ip0->protocol = IP_PROTOCOL_UDP;
+
+ /* we fix up the ip4 header length and checksum after-the-fact */
+ ip0->src_address.as_u32 = t->local.ip4.as_u32;
+ ip0->dst_address.as_u32 = t->remote.ip4.as_u32;
+ ip0->checksum = ip4_header_checksum (ip0);
+
+ /* UDP header, randomize src port on something, maybe? */
+ h0->udp.src_port = clib_host_to_net_u16 (t->local_port);
+ h0->udp.dst_port = clib_host_to_net_u16 (t->remote_port);
+
+ /* VXLAN header. Are we having fun yet? */
+ h0->vxlan.flags = VXLAN_GPE_FLAGS_I | VXLAN_GPE_FLAGS_P;
+ h0->vxlan.ver_res = VXLAN_GPE_VERSION;
+ if (protocol_override)
+ {
+ h0->vxlan.protocol = protocol_override;
+ }
+ else
+ {
+ h0->vxlan.protocol = t->protocol;
+ }
+ t->rewrite_size = sizeof (ip4_vxlan_gpe_header_t) + extension_size;
+ h0->vxlan.vni_res = clib_host_to_net_u32 (t->vni << 8);
+
+ t->rewrite = rw;
+ t->encap_next_node = encap_next_node;
+ return (0);
+}
+
+/**
+ * @brief Calculate IPv6 VXLAN GPE rewrite header
+ *
+ * @param *t
+ *
+ * @return rc
+ *
+ */
+int
+vxlan6_gpe_rewrite (vxlan_gpe_tunnel_t * t, u32 extension_size,
+ u8 protocol_override, uword encap_next_node)
+{
+ u8 *rw = 0;
+ ip6_header_t *ip0;
+ ip6_vxlan_gpe_header_t *h0;
+ int len;
+
+ len = sizeof (*h0) + extension_size;
+
+ vec_free (t->rewrite);
+ vec_validate_aligned (rw, len - 1, CLIB_CACHE_LINE_BYTES);
+
+ h0 = (ip6_vxlan_gpe_header_t *) rw;
+
+ /* Fixed portion of the (outer) ip4 header */
+ ip0 = &h0->ip6;
+ ip0->ip_version_traffic_class_and_flow_label =
+ clib_host_to_net_u32 (6 << 28);
+ ip0->hop_limit = 255;
+ ip0->protocol = IP_PROTOCOL_UDP;
+
+ ip0->src_address.as_u64[0] = t->local.ip6.as_u64[0];
+ ip0->src_address.as_u64[1] = t->local.ip6.as_u64[1];
+ ip0->dst_address.as_u64[0] = t->remote.ip6.as_u64[0];
+ ip0->dst_address.as_u64[1] = t->remote.ip6.as_u64[1];
+
+ /* UDP header, randomize src port on something, maybe? */
+ h0->udp.src_port = clib_host_to_net_u16 (t->local_port);
+ h0->udp.dst_port = clib_host_to_net_u16 (t->remote_port);
+
+ /* VXLAN header. Are we having fun yet? */
+ h0->vxlan.flags = VXLAN_GPE_FLAGS_I | VXLAN_GPE_FLAGS_P;
+ h0->vxlan.ver_res = VXLAN_GPE_VERSION;
+ if (protocol_override)
+ {
+ h0->vxlan.protocol = t->protocol;
+ }
+ else
+ {
+ h0->vxlan.protocol = protocol_override;
+ }
+ t->rewrite_size = sizeof (ip4_vxlan_gpe_header_t) + extension_size;
+ h0->vxlan.vni_res = clib_host_to_net_u32 (t->vni << 8);
+
+ t->rewrite = rw;
+ t->encap_next_node = encap_next_node;
+ return (0);
+}
+
+typedef CLIB_PACKED(union {
+ struct {
+ fib_node_index_t mfib_entry_index;
+ adj_index_t mcast_adj_index;
+ };
+ u64 as_u64;
+}) mcast_shared_t;
+
+static inline mcast_shared_t
+mcast_shared_get (ip46_address_t * ip)
+{
+ ASSERT (ip46_address_is_multicast (ip));
+ uword *p = hash_get_mem (vxlan_gpe_main.mcast_shared, ip);
+ ALWAYS_ASSERT (p);
+ return (mcast_shared_t)
+ {
+ .as_u64 = *p};
+}
+
+static inline void
+mcast_shared_add (ip46_address_t * remote,
+ fib_node_index_t mfei, adj_index_t ai)
+{
+ mcast_shared_t new_ep = {
+ .mcast_adj_index = ai,
+ .mfib_entry_index = mfei,
+ };
+
+ hash_set_mem_alloc (&vxlan_gpe_main.mcast_shared, remote, new_ep.as_u64);
+}
+
+static inline void
+mcast_shared_remove (ip46_address_t * remote)
+{
+ mcast_shared_t ep = mcast_shared_get (remote);
+
+ adj_unlock (ep.mcast_adj_index);
+ mfib_table_entry_delete_index (ep.mfib_entry_index, MFIB_SOURCE_VXLAN_GPE);
+
+ hash_unset_mem_free (&vxlan_gpe_main.mcast_shared, remote);
+}
+
+/**
+ * @brief Add or Del a VXLAN GPE tunnel
+ *
+ * @param *a
+ * @param *sw_if_index
+ *
+ * @return rc
+ *
+ */
+int vnet_vxlan_gpe_add_del_tunnel
+ (vnet_vxlan_gpe_add_del_tunnel_args_t * a, u32 * sw_if_indexp)
+{
+ vxlan_gpe_main_t *ngm = &vxlan_gpe_main;
+ vxlan_gpe_tunnel_t *t = 0;
+ vnet_main_t *vnm = ngm->vnet_main;
+ vnet_hw_interface_t *hi;
+ uword *p;
+ u32 hw_if_index = ~0;
+ u32 sw_if_index = ~0;
+ int rv;
+ vxlan4_gpe_tunnel_key_t key4, *key4_copy;
+ vxlan6_gpe_tunnel_key_t key6, *key6_copy;
+ u32 is_ip6 = a->is_ip6;
+
+ /* Set udp-ports */
+ if (a->local_port == 0)
+ a->local_port = is_ip6 ? UDP_DST_PORT_VXLAN6_GPE : UDP_DST_PORT_VXLAN_GPE;
+
+ if (a->remote_port == 0)
+ a->remote_port = is_ip6 ? UDP_DST_PORT_VXLAN6_GPE : UDP_DST_PORT_VXLAN_GPE;
+
+ if (!is_ip6)
+ {
+ key4.local = a->local.ip4.as_u32;
+ key4.remote = a->remote.ip4.as_u32;
+ key4.vni = clib_host_to_net_u32 (a->vni << 8);
+ key4.port = (u32) clib_host_to_net_u16 (a->local_port);
+
+ p = hash_get_mem (ngm->vxlan4_gpe_tunnel_by_key, &key4);
+ }
+ else
+ {
+ key6.local.as_u64[0] = a->local.ip6.as_u64[0];
+ key6.local.as_u64[1] = a->local.ip6.as_u64[1];
+ key6.remote.as_u64[0] = a->remote.ip6.as_u64[0];
+ key6.remote.as_u64[1] = a->remote.ip6.as_u64[1];
+ key6.vni = clib_host_to_net_u32 (a->vni << 8);
+ key6.port = (u32) clib_host_to_net_u16 (a->local_port);
+
+ p = hash_get_mem (ngm->vxlan6_gpe_tunnel_by_key, &key6);
+ }
+
+ if (a->is_add)
+ {
+ l2input_main_t *l2im = &l2input_main;
+
+ /* adding a tunnel: tunnel must not already exist */
+ if (p)
+ return VNET_API_ERROR_TUNNEL_EXIST;
+
+ pool_get_aligned (ngm->tunnels, t, CLIB_CACHE_LINE_BYTES);
+ clib_memset (t, 0, sizeof (*t));
+
+ /* copy from arg structure */
+#define _(x) t->x = a->x;
+ foreach_gpe_copy_field;
+ if (!a->is_ip6)
+ foreach_copy_ipv4
+ else
+ foreach_copy_ipv6
+#undef _
+
+ if (!a->is_ip6)
+ t->flags |= VXLAN_GPE_TUNNEL_IS_IPV4;
+
+ if (!a->is_ip6)
+ {
+ rv = vxlan4_gpe_rewrite (t, 0, 0, VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP);
+ }
+ else
+ {
+ rv = vxlan6_gpe_rewrite (t, 0, 0, VXLAN_GPE_ENCAP_NEXT_IP6_LOOKUP);
+ }
+
+ if (rv)
+ {
+ pool_put (ngm->tunnels, t);
+ return rv;
+ }
+
+ if (!is_ip6)
+ {
+ key4_copy = clib_mem_alloc (sizeof (*key4_copy));
+ clib_memcpy_fast (key4_copy, &key4, sizeof (*key4_copy));
+ hash_set_mem (ngm->vxlan4_gpe_tunnel_by_key, key4_copy,
+ t - ngm->tunnels);
+ }
+ else
+ {
+ key6_copy = clib_mem_alloc (sizeof (*key6_copy));
+ clib_memcpy_fast (key6_copy, &key6, sizeof (*key6_copy));
+ hash_set_mem (ngm->vxlan6_gpe_tunnel_by_key, key6_copy,
+ t - ngm->tunnels);
+ }
+
+ if (vec_len (ngm->free_vxlan_gpe_tunnel_hw_if_indices) > 0)
+ {
+ vnet_interface_main_t *im = &vnm->interface_main;
+ hw_if_index = ngm->free_vxlan_gpe_tunnel_hw_if_indices
+ [vec_len (ngm->free_vxlan_gpe_tunnel_hw_if_indices) - 1];
+ vec_dec_len (ngm->free_vxlan_gpe_tunnel_hw_if_indices, 1);
+
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+ hi->dev_instance = t - ngm->tunnels;
+ hi->hw_instance = hi->dev_instance;
+ /* clear old stats of freed tunnel before reuse */
+ sw_if_index = hi->sw_if_index;
+ vnet_interface_counter_lock (im);
+ vlib_zero_combined_counter
+ (&im->combined_sw_if_counters[VNET_INTERFACE_COUNTER_TX],
+ sw_if_index);
+ vlib_zero_combined_counter (&im->combined_sw_if_counters
+ [VNET_INTERFACE_COUNTER_RX],
+ sw_if_index);
+ vlib_zero_simple_counter (&im->sw_if_counters
+ [VNET_INTERFACE_COUNTER_DROP],
+ sw_if_index);
+ vnet_interface_counter_unlock (im);
+ }
+ else
+ {
+ hw_if_index = vnet_register_interface
+ (vnm, vxlan_gpe_device_class.index, t - ngm->tunnels,
+ vxlan_gpe_hw_class.index, t - ngm->tunnels);
+ hi = vnet_get_hw_interface (vnm, hw_if_index);
+ }
+
+ /* Set vxlan-gpe tunnel output node */
+ u32 encap_index = vxlan_gpe_encap_node.index;
+ vnet_set_interface_output_node (vnm, hw_if_index, encap_index);
+
+ t->hw_if_index = hw_if_index;
+ t->sw_if_index = sw_if_index = hi->sw_if_index;
+ vec_validate_init_empty (ngm->tunnel_index_by_sw_if_index, sw_if_index,
+ ~0);
+ ngm->tunnel_index_by_sw_if_index[sw_if_index] = t - ngm->tunnels;
+
+ /* setup l2 input config with l2 feature and bd 0 to drop packet */
+ vec_validate (l2im->configs, sw_if_index);
+ l2im->configs[sw_if_index].feature_bitmap = L2INPUT_FEAT_DROP;
+ l2im->configs[sw_if_index].bd_index = 0;
+
+ vnet_sw_interface_t *si = vnet_get_sw_interface (vnm, sw_if_index);
+ si->flags &= ~VNET_SW_INTERFACE_FLAG_HIDDEN;
+ vnet_sw_interface_set_flags (vnm, hi->sw_if_index,
+ VNET_SW_INTERFACE_FLAG_ADMIN_UP);
+ fib_node_init (&t->node, FIB_NODE_TYPE_VXLAN_GPE_TUNNEL);
+ fib_prefix_t tun_remote_pfx;
+ vnet_flood_class_t flood_class = VNET_FLOOD_CLASS_TUNNEL_NORMAL;
+
+ fib_protocol_t fp = fib_ip_proto (is_ip6);
+ fib_prefix_from_ip46_addr (fp, &t->remote, &tun_remote_pfx);
+ if (!ip46_address_is_multicast (&t->remote))
+ {
+ /* Unicast tunnel -
+ * source the FIB entry for the tunnel's destination
+ * and become a child thereof. The tunnel will then get poked
+ * when the forwarding for the entry updates, and the tunnel can
+ * re-stack accordingly
+ */
+ vtep_addr_ref (&ngm->vtep_table, t->encap_fib_index, &t->local);
+ t->fib_entry_index = fib_entry_track (t->encap_fib_index,
+ &tun_remote_pfx,
+ FIB_NODE_TYPE_VXLAN_GPE_TUNNEL,
+ t - ngm->tunnels,
+ &t->sibling_index);
+ vxlan_gpe_tunnel_restack_dpo (t);
+ }
+ else
+ {
+ /* Multicast tunnel -
+ * as the same mcast group can be used for multiple mcast tunnels
+ * with different VNIs, create the output fib adjacency only if
+ * it does not already exist
+ */
+ if (vtep_addr_ref (&ngm->vtep_table,
+ t->encap_fib_index, &t->remote) == 1)
+ {
+ fib_node_index_t mfei;
+ adj_index_t ai;
+ fib_route_path_t path = {
+ .frp_proto = fib_proto_to_dpo (fp),
+ .frp_addr = zero_addr,
+ .frp_sw_if_index = 0xffffffff,
+ .frp_fib_index = ~0,
+ .frp_weight = 1,
+ .frp_flags = FIB_ROUTE_PATH_LOCAL,
+ .frp_mitf_flags = MFIB_ITF_FLAG_FORWARD,
+ };
+ const mfib_prefix_t mpfx = {
+ .fp_proto = fp,
+ .fp_len = (is_ip6 ? 128 : 32),
+ .fp_grp_addr = tun_remote_pfx.fp_addr,
+ };
+
+ /*
+ * Setup the (*,G) to receive traffic on the mcast group
+ * - the forwarding interface is for-us
+ * - the accepting interface is that from the API
+ */
+ mfib_table_entry_path_update (t->encap_fib_index, &mpfx,
+ MFIB_SOURCE_VXLAN_GPE,
+ MFIB_ENTRY_FLAG_NONE, &path);
+
+ path.frp_sw_if_index = a->mcast_sw_if_index;
+ path.frp_flags = FIB_ROUTE_PATH_FLAG_NONE;
+ path.frp_mitf_flags = MFIB_ITF_FLAG_ACCEPT;
+ mfei = mfib_table_entry_path_update (
+ t->encap_fib_index, &mpfx, MFIB_SOURCE_VXLAN_GPE,
+ MFIB_ENTRY_FLAG_NONE, &path);
+
+ /*
+ * Create the mcast adjacency to send traffic to the group
+ */
+ ai = adj_mcast_add_or_lock (fp,
+ fib_proto_to_link (fp),
+ a->mcast_sw_if_index);
+
+ /*
+ * create a new end-point
+ */
+ mcast_shared_add (&t->remote, mfei, ai);
+ }
+
+ dpo_id_t dpo = DPO_INVALID;
+ mcast_shared_t ep = mcast_shared_get (&t->remote);
+
+ /* Stack shared mcast remote mac addr rewrite on encap */
+ dpo_set (&dpo, DPO_ADJACENCY_MCAST,
+ fib_proto_to_dpo (fp), ep.mcast_adj_index);
+
+ dpo_stack_from_node (encap_index, &t->next_dpo, &dpo);
+ dpo_reset (&dpo);
+ flood_class = VNET_FLOOD_CLASS_TUNNEL_MASTER;
+ }
+
+ vnet_get_sw_interface (vnet_get_main (), sw_if_index)->flood_class =
+ flood_class;
+ }
+ else
+ {
+ /* deleting a tunnel: tunnel must exist */
+ if (!p)
+ return VNET_API_ERROR_NO_SUCH_ENTRY;
+
+ t = pool_elt_at_index (ngm->tunnels, p[0]);
+
+ sw_if_index = t->sw_if_index;
+ vnet_sw_interface_set_flags (vnm, t->sw_if_index, 0 /* down */ );
+ vnet_sw_interface_t *si = vnet_get_sw_interface (vnm, t->sw_if_index);
+ si->flags |= VNET_SW_INTERFACE_FLAG_HIDDEN;
+ set_int_l2_mode (ngm->vlib_main, vnm, MODE_L3, t->sw_if_index, 0,
+ L2_BD_PORT_TYPE_NORMAL, 0, 0);
+ vec_add1 (ngm->free_vxlan_gpe_tunnel_hw_if_indices, t->hw_if_index);
+
+ ngm->tunnel_index_by_sw_if_index[t->sw_if_index] = ~0;
+
+ if (!is_ip6)
+ hash_unset (ngm->vxlan4_gpe_tunnel_by_key, key4.as_u64);
+ else
+ hash_unset_mem_free (&ngm->vxlan6_gpe_tunnel_by_key, &key6);
+
+ if (!ip46_address_is_multicast (&t->remote))
+ {
+ vtep_addr_unref (&ngm->vtep_table, t->encap_fib_index, &t->local);
+ fib_entry_untrack (t->fib_entry_index, t->sibling_index);
+ }
+ else if (vtep_addr_unref (&ngm->vtep_table,
+ t->encap_fib_index, &t->remote) == 0)
+ {
+ mcast_shared_remove (&t->remote);
+ }
+
+ fib_node_deinit (&t->node);
+ vec_free (t->rewrite);
+ pool_put (ngm->tunnels, t);
+ }
+
+ if (sw_if_indexp)
+ *sw_if_indexp = sw_if_index;
+
+ if (a->is_add)
+ {
+ /* register udp ports */
+ if (!is_ip6 && !udp_is_valid_dst_port (a->local_port, 1))
+ udp_register_dst_port (ngm->vlib_main, a->local_port,
+ vxlan4_gpe_input_node.index, 1 /* is_ip4 */);
+ if (is_ip6 && !udp_is_valid_dst_port (a->remote_port, 0))
+ udp_register_dst_port (ngm->vlib_main, a->remote_port,
+ vxlan6_gpe_input_node.index, 0 /* is_ip4 */);
+ }
+
+ return 0;
+}
+
+static clib_error_t *
+vxlan_gpe_add_del_tunnel_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u8 is_add = 1;
+ ip46_address_t local, remote;
+ u8 local_set = 0;
+ u8 remote_set = 0;
+ u8 grp_set = 0;
+ u8 ipv4_set = 0;
+ u8 ipv6_set = 0;
+ u32 mcast_sw_if_index = ~0;
+ u32 encap_fib_index = 0;
+ u32 decap_fib_index = 0;
+ u8 protocol = VXLAN_GPE_PROTOCOL_IP4;
+ u32 vni;
+ u8 vni_set = 0;
+ u32 local_port = 0;
+ u32 remote_port = 0;
+ int rv;
+ u32 tmp;
+ vnet_vxlan_gpe_add_del_tunnel_args_t _a, *a = &_a;
+ u32 sw_if_index;
+ clib_error_t *error = NULL;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "del"))
+ is_add = 0;
+ else if (unformat (line_input, "local %U",
+ unformat_ip4_address, &local.ip4))
+ {
+ local_set = 1;
+ ipv4_set = 1;
+ }
+ else if (unformat (line_input, "remote %U",
+ unformat_ip4_address, &remote.ip4))
+ {
+ remote_set = 1;
+ ipv4_set = 1;
+ }
+ else if (unformat (line_input, "local %U",
+ unformat_ip6_address, &local.ip6))
+ {
+ local_set = 1;
+ ipv6_set = 1;
+ }
+ else if (unformat (line_input, "remote %U",
+ unformat_ip6_address, &remote.ip6))
+ {
+ remote_set = 1;
+ ipv6_set = 1;
+ }
+ else if (unformat (line_input, "group %U %U",
+ unformat_ip4_address, &remote.ip4,
+ unformat_vnet_sw_interface,
+ vnet_get_main (), &mcast_sw_if_index))
+ {
+ grp_set = remote_set = 1;
+ ipv4_set = 1;
+ }
+ else if (unformat (line_input, "group %U %U",
+ unformat_ip6_address, &remote.ip6,
+ unformat_vnet_sw_interface,
+ vnet_get_main (), &mcast_sw_if_index))
+ {
+ grp_set = remote_set = 1;
+ ipv6_set = 1;
+ }
+ else if (unformat (line_input, "encap-vrf-id %d", &tmp))
+ {
+ if (ipv6_set)
+ encap_fib_index = fib_table_find (FIB_PROTOCOL_IP6, tmp);
+ else
+ encap_fib_index = fib_table_find (FIB_PROTOCOL_IP4, tmp);
+
+ if (encap_fib_index == ~0)
+ {
+ error =
+ clib_error_return (0, "nonexistent encap fib id %d", tmp);
+ goto done;
+ }
+ }
+ else if (unformat (line_input, "decap-vrf-id %d", &tmp))
+ {
+ if (ipv6_set)
+ decap_fib_index = fib_table_find (FIB_PROTOCOL_IP6, tmp);
+ else
+ decap_fib_index = fib_table_find (FIB_PROTOCOL_IP4, tmp);
+
+ if (decap_fib_index == ~0)
+ {
+ error =
+ clib_error_return (0, "nonexistent decap fib id %d", tmp);
+ goto done;
+ }
+ }
+ else if (unformat (line_input, "vni %d", &vni))
+ vni_set = 1;
+ else if (unformat (line_input, "local_port %d", &local_port))
+ ;
+ else if (unformat (line_input, "remote_port %d", &remote_port))
+ ;
+ else if (unformat (line_input, "next-ip4"))
+ protocol = VXLAN_GPE_PROTOCOL_IP4;
+ else if (unformat (line_input, "next-ip6"))
+ protocol = VXLAN_GPE_PROTOCOL_IP6;
+ else if (unformat (line_input, "next-ethernet"))
+ protocol = VXLAN_GPE_PROTOCOL_ETHERNET;
+ else if (unformat (line_input, "next-nsh"))
+ protocol = VXLAN_GPE_PROTOCOL_NSH;
+ else
+ {
+ error = clib_error_return (0, "parse error: '%U'",
+ format_unformat_error, line_input);
+ goto done;
+ }
+ }
+
+ if (local_set == 0)
+ {
+ error = clib_error_return (0, "tunnel local address not specified");
+ goto done;
+ }
+
+ if (remote_set == 0)
+ {
+ error = clib_error_return (0, "tunnel remote address not specified");
+ goto done;
+ }
+
+ if (grp_set && !ip46_address_is_multicast (&remote))
+ {
+ error = clib_error_return (0, "tunnel group address not multicast");
+ goto done;
+ }
+
+ if (grp_set == 0 && ip46_address_is_multicast (&remote))
+ {
+ error = clib_error_return (0, "remote address must be unicast");
+ goto done;
+ }
+
+ if (grp_set && mcast_sw_if_index == ~0)
+ {
+ error = clib_error_return (0, "tunnel nonexistent multicast device");
+ goto done;
+ }
+ if (ipv4_set && ipv6_set)
+ {
+ error = clib_error_return (0, "both IPv4 and IPv6 addresses specified");
+ goto done;
+ }
+
+ if ((ipv4_set && memcmp (&local.ip4, &remote.ip4, sizeof (local.ip4)) == 0)
+ || (ipv6_set
+ && memcmp (&local.ip6, &remote.ip6, sizeof (local.ip6)) == 0))
+ {
+ error = clib_error_return (0, "src and remote addresses are identical");
+ goto done;
+ }
+
+ if (vni_set == 0)
+ {
+ error = clib_error_return (0, "vni not specified");
+ goto done;
+ }
+
+ clib_memset (a, 0, sizeof (*a));
+
+ a->is_add = is_add;
+ a->is_ip6 = ipv6_set;
+
+#define _(x) a->x = x;
+ foreach_gpe_copy_field;
+ if (ipv4_set)
+ foreach_copy_ipv4
+ else
+ foreach_copy_ipv6
+#undef _
+
+ rv = vnet_vxlan_gpe_add_del_tunnel (a, &sw_if_index);
+
+ switch (rv)
+ {
+ case 0:
+ vlib_cli_output (vm, "%U\n", format_vnet_sw_if_index_name,
+ vnet_get_main (), sw_if_index);
+ break;
+ case VNET_API_ERROR_INVALID_DECAP_NEXT:
+ error = clib_error_return (0, "invalid decap-next...");
+ goto done;
+
+ case VNET_API_ERROR_TUNNEL_EXIST:
+ error = clib_error_return (0, "tunnel already exists...");
+ goto done;
+
+ case VNET_API_ERROR_NO_SUCH_ENTRY:
+ error = clib_error_return (0, "tunnel does not exist...");
+ goto done;
+
+ default:
+ error = clib_error_return
+ (0, "vnet_vxlan_gpe_add_del_tunnel returned %d", rv);
+ goto done;
+ }
+
+done:
+ unformat_free (line_input);
+
+ return error;
+}
+
+/*?
+ * Add or delete a VXLAN-GPE Tunnel.
+ *
+ * VXLAN-GPE provides the features needed to allow L2 bridge domains (BDs)
+ * to span multiple servers. This is done by building an L2 overlay on
+ * top of an L3 network underlay using VXLAN-GPE tunnels.
+ *
+ * This makes it possible for servers to be co-located in the same data
+ * center or be separated geographically as long as they are reachable
+ * through the underlay L3 network.
+ *
+ * You can refer to this kind of L2 overlay bridge domain as a VXLAN-GPE segment.
+ *
+ * @cliexpar
+ * Example of how to create a VXLAN-GPE Tunnel:
+ * @cliexcmd{create vxlan-gpe tunnel local 10.0.3.1 remote 10.0.3.3 vni 13 encap-vrf-id 7}
+ * Example of how to delete a VXLAN-GPE Tunnel:
+ * @cliexcmd{create vxlan-gpe tunnel local 10.0.3.1 remote 10.0.3.3 vni 13 del}
+ ?*/
+VLIB_CLI_COMMAND (create_vxlan_gpe_tunnel_command, static) = {
+ .path = "create vxlan-gpe tunnel",
+ .short_help =
+ "create vxlan-gpe tunnel local <local-addr> "
+ " {remote <remote-addr>|group <mcast-addr> <intf-name>}"
+ " vni <nn> [next-ip4][next-ip6][next-ethernet][next-nsh]"
+ " [encap-vrf-id <nn>] [decap-vrf-id <nn>] [del]\n",
+ .function = vxlan_gpe_add_del_tunnel_command_fn,
+};
+
+/**
+ * @brief CLI function for showing VXLAN GPE tunnels
+ *
+ * @param *vm
+ * @param *input
+ * @param *cmd
+ *
+ * @return error
+ *
+ */
+static clib_error_t *
+show_vxlan_gpe_tunnel_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ vxlan_gpe_main_t *ngm = &vxlan_gpe_main;
+ vxlan_gpe_tunnel_t *t;
+
+ if (pool_elts (ngm->tunnels) == 0)
+ vlib_cli_output (vm, "No vxlan-gpe tunnels configured.");
+
+ pool_foreach (t, ngm->tunnels)
+ {
+ vlib_cli_output (vm, "%U", format_vxlan_gpe_tunnel, t);
+ }
+
+ return 0;
+}
+
+/*?
+ * Display all the VXLAN-GPE Tunnel entries.
+ *
+ * @cliexpar
+ * Example of how to display the VXLAN-GPE Tunnel entries:
+ * @cliexstart{show vxlan-gpe tunnel}
+ * [0] local 10.0.3.1 remote 10.0.3.3 vni 13 encap_fib_index 0 sw_if_index 5 decap_next l2
+ * @cliexend
+ ?*/
+VLIB_CLI_COMMAND (show_vxlan_gpe_tunnel_command, static) = {
+ .path = "show vxlan-gpe",
+ .function = show_vxlan_gpe_tunnel_command_fn,
+};
+
+void
+vnet_int_vxlan_gpe_bypass_mode (u32 sw_if_index, u8 is_ip6, u8 is_enable)
+{
+ if (is_ip6)
+ vnet_feature_enable_disable ("ip6-unicast", "ip6-vxlan-gpe-bypass",
+ sw_if_index, is_enable, 0, 0);
+ else
+ vnet_feature_enable_disable ("ip4-unicast", "ip4-vxlan-gpe-bypass",
+ sw_if_index, is_enable, 0, 0);
+}
+
+
+static clib_error_t *
+set_ip_vxlan_gpe_bypass (u32 is_ip6,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ vnet_main_t *vnm = vnet_get_main ();
+ clib_error_t *error = 0;
+ u32 sw_if_index, is_enable;
+
+ sw_if_index = ~0;
+ is_enable = 1;
+
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat_user
+ (line_input, unformat_vnet_sw_interface, vnm, &sw_if_index))
+ ;
+ else if (unformat (line_input, "del"))
+ is_enable = 0;
+ else
+ {
+ error = unformat_parse_error (line_input);
+ goto done;
+ }
+ }
+
+ if (~0 == sw_if_index)
+ {
+ error = clib_error_return (0, "unknown interface `%U'",
+ format_unformat_error, line_input);
+ goto done;
+ }
+
+ vnet_int_vxlan_gpe_bypass_mode (sw_if_index, is_ip6, is_enable);
+
+done:
+ unformat_free (line_input);
+
+ return error;
+}
+
+static clib_error_t *
+set_ip4_vxlan_gpe_bypass (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ return set_ip_vxlan_gpe_bypass (0, input, cmd);
+}
+
+/*?
+ * This command adds the 'ip4-vxlan-gpe-bypass' graph node for a given
+ * interface. By adding the IPv4 vxlan-gpe-bypass graph node to an interface,
+ * the node checks for and validate input vxlan_gpe packet and bypass
+ * ip4-lookup, ip4-local, ip4-udp-lookup nodes to speedup vxlan_gpe packet
+ * forwarding. This node will cause extra overhead to for non-vxlan_gpe
+ * packets which is kept at a minimum.
+ *
+ * @cliexpar
+ * @parblock
+ * Example of graph node before ip4-vxlan-gpe-bypass is enabled:
+ * @cliexstart{show vlib graph ip4-vxlan-gpe-bypass}
+ * Name Next Previous
+ * ip4-vxlan-gpe-bypass error-drop [0]
+ * vxlan4-gpe-input [1]
+ * ip4-lookup [2]
+ * @cliexend
+ *
+ * Example of how to enable ip4-vxlan-gpe-bypass on an interface:
+ * @cliexcmd{set interface ip vxlan-gpe-bypass GigabitEthernet2/0/0}
+ *
+ * Example of graph node after ip4-vxlan-gpe-bypass is enabled:
+ * @cliexstart{show vlib graph ip4-vxlan-gpe-bypass}
+ * Name Next Previous
+ * ip4-vxlan-gpe-bypass error-drop [0] ip4-input
+ * vxlan4-gpe-input [1] ip4-input-no-checksum
+ * ip4-lookup [2]
+ * @cliexend
+ *
+ * Example of how to display the feature enabled on an interface:
+ * @cliexstart{show ip interface features GigabitEthernet2/0/0}
+ * IP feature paths configured on GigabitEthernet2/0/0...
+ * ...
+ * ipv4 unicast:
+ * ip4-vxlan-gpe-bypass
+ * ip4-lookup
+ * ...
+ * @cliexend
+ *
+ * Example of how to disable ip4-vxlan-gpe-bypass on an interface:
+ * @cliexcmd{set interface ip vxlan-gpe-bypass GigabitEthernet2/0/0 del}
+ * @endparblock
+?*/
+VLIB_CLI_COMMAND (set_interface_ip_vxlan_gpe_bypass_command, static) = {
+ .path = "set interface ip vxlan-gpe-bypass",
+ .function = set_ip4_vxlan_gpe_bypass,
+ .short_help = "set interface ip vxlan-gpe-bypass <interface> [del]",
+};
+
+static clib_error_t *
+set_ip6_vxlan_gpe_bypass (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ return set_ip_vxlan_gpe_bypass (1, input, cmd);
+}
+
+/*?
+ * This command adds the 'ip6-vxlan-gpe-bypass' graph node for a given
+ * interface. By adding the IPv6 vxlan-gpe-bypass graph node to an interface,
+ * the node checks for and validate input vxlan_gpe packet and bypass
+ * ip6-lookup, ip6-local, ip6-udp-lookup nodes to speedup vxlan_gpe packet
+ * forwarding. This node will cause extra overhead to for non-vxlan_gpe packets
+ * which is kept at a minimum.
+ *
+ * @cliexpar
+ * @parblock
+ * Example of graph node before ip6-vxlan-gpe-bypass is enabled:
+ * @cliexstart{show vlib graph ip6-vxlan-gpe-bypass}
+ * Name Next Previous
+ * ip6-vxlan-gpe-bypass error-drop [0]
+ * vxlan6-gpe-input [1]
+ * ip6-lookup [2]
+ * @cliexend
+ *
+ * Example of how to enable ip6-vxlan-gpe-bypass on an interface:
+ * @cliexcmd{set interface ip6 vxlan-gpe-bypass GigabitEthernet2/0/0}
+ *
+ * Example of graph node after ip6-vxlan-gpe-bypass is enabled:
+ * @cliexstart{show vlib graph ip6-vxlan-gpe-bypass}
+ * Name Next Previous
+ * ip6-vxlan-gpe-bypass error-drop [0] ip6-input
+ * vxlan6-gpe-input [1] ip4-input-no-checksum
+ * ip6-lookup [2]
+ * @cliexend
+ *
+ * Example of how to display the feature enabled on an interface:
+ * @cliexstart{show ip interface features GigabitEthernet2/0/0}
+ * IP feature paths configured on GigabitEthernet2/0/0...
+ * ...
+ * ipv6 unicast:
+ * ip6-vxlan-gpe-bypass
+ * ip6-lookup
+ * ...
+ * @cliexend
+ *
+ * Example of how to disable ip6-vxlan-gpe-bypass on an interface:
+ * @cliexcmd{set interface ip6 vxlan-gpe-bypass GigabitEthernet2/0/0 del}
+ * @endparblock
+?*/
+VLIB_CLI_COMMAND (set_interface_ip6_vxlan_gpe_bypass_command, static) = {
+ .path = "set interface ip6 vxlan-gpe-bypass",
+ .function = set_ip6_vxlan_gpe_bypass,
+ .short_help = "set interface ip6 vxlan-gpe-bypass <interface> [del]",
+};
+
+VNET_FEATURE_INIT (ip4_vxlan_gpe_bypass, static) =
+{
+ .arc_name = "ip4-unicast",
+ .node_name = "ip4-vxlan-gpe-bypass",
+ .runs_before = VNET_FEATURES ("ip4-lookup"),
+};
+
+VNET_FEATURE_INIT (ip6_vxlan_gpe_bypass, static) =
+{
+ .arc_name = "ip6-unicast",
+ .node_name = "ip6-vxlan-gpe-bypass",
+ .runs_before = VNET_FEATURES ("ip6-lookup"),
+};
+
+/**
+ * @brief Feature init function for VXLAN GPE
+ *
+ * @param *vm
+ *
+ * @return error
+ *
+ */
+__clib_export clib_error_t *
+vxlan_gpe_init (vlib_main_t *vm)
+{
+ vxlan_gpe_main_t *ngm = &vxlan_gpe_main;
+
+ ngm->register_decap_protocol = vxlan_gpe_register_decap_protocol;
+ ngm->unregister_decap_protocol = vxlan_gpe_unregister_decap_protocol;
+ ngm->vnet_main = vnet_get_main ();
+ ngm->vlib_main = vm;
+
+ ngm->vxlan4_gpe_tunnel_by_key
+ = hash_create_mem (0, sizeof (vxlan4_gpe_tunnel_key_t), sizeof (uword));
+
+ ngm->vxlan6_gpe_tunnel_by_key
+ = hash_create_mem (0, sizeof (vxlan6_gpe_tunnel_key_t), sizeof (uword));
+
+
+ ngm->mcast_shared = hash_create_mem (0,
+ sizeof (ip46_address_t),
+ sizeof (mcast_shared_t));
+ ngm->vtep_table = vtep_table_create ();
+
+ /* Register the list of standard decap protocols supported */
+ vxlan_gpe_register_decap_protocol (VXLAN_GPE_PROTOCOL_IP4,
+ VXLAN_GPE_INPUT_NEXT_IP4_INPUT);
+ vxlan_gpe_register_decap_protocol (VXLAN_GPE_PROTOCOL_IP6,
+ VXLAN_GPE_INPUT_NEXT_IP6_INPUT);
+ vxlan_gpe_register_decap_protocol (VXLAN_GPE_PROTOCOL_ETHERNET,
+ VXLAN_GPE_INPUT_NEXT_L2_INPUT);
+
+ fib_node_register_type (FIB_NODE_TYPE_VXLAN_GPE_TUNNEL, &vxlan_gpe_vft);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (vxlan_gpe_init);
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/vxlan-gpe/vxlan_gpe.h b/src/plugins/vxlan-gpe/vxlan_gpe.h
new file mode 100644
index 00000000000..138ae840ef5
--- /dev/null
+++ b/src/plugins/vxlan-gpe/vxlan_gpe.h
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief VXLAN GPE definitions
+ *
+*/
+#ifndef included_vnet_vxlan_gpe_h
+#define included_vnet_vxlan_gpe_h
+
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ip/vtep.h>
+#include <vnet/l2/l2_input.h>
+#include <vnet/l2/l2_output.h>
+#include <vnet/l2/l2_bd.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vxlan-gpe/vxlan_gpe_packet.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/ip/ip6_packet.h>
+#include <vnet/udp/udp_packet.h>
+#include <vnet/dpo/dpo.h>
+#include <vnet/adj/adj_types.h>
+
+/**
+ * @brief VXLAN GPE header struct
+ *
+ */
+typedef CLIB_PACKED (struct {
+ /** 20 bytes */
+ ip4_header_t ip4;
+ /** 8 bytes */
+ udp_header_t udp;
+ /** 8 bytes */
+ vxlan_gpe_header_t vxlan;
+}) ip4_vxlan_gpe_header_t;
+
+typedef CLIB_PACKED (struct {
+ /** 40 bytes */
+ ip6_header_t ip6;
+ /** 8 bytes */
+ udp_header_t udp;
+ /** 8 bytes */
+ vxlan_gpe_header_t vxlan;
+}) ip6_vxlan_gpe_header_t;
+
+/**
+ * @brief Key struct for IPv4 VXLAN GPE tunnel.
+ * Key fields: local remote, vni, udp-port
+ * all fields in NET byte order
+ * VNI shifted 8 bits
+ */
+typedef CLIB_PACKED(struct {
+ union {
+ struct {
+ u32 local;
+ u32 remote;
+
+ u32 vni;
+ u32 port;
+ };
+ u64 as_u64[2];
+ };
+}) vxlan4_gpe_tunnel_key_t;
+
+/**
+ * @brief Key struct for IPv6 VXLAN GPE tunnel.
+ * Key fields: local remote, vni, udp-port
+ * all fields in NET byte order
+ * VNI shifted 8 bits
+ */
+typedef CLIB_PACKED(struct {
+ ip6_address_t local;
+ ip6_address_t remote;
+ u32 vni;
+ u32 port;
+}) vxlan6_gpe_tunnel_key_t;
+
+typedef union
+{
+ struct
+ {
+ u32 tunnel_index;
+ u16 next_index;
+ u8 error;
+ };
+ u64 as_u64;
+} vxlan_gpe_decap_info_t;
+
+/**
+ * @brief Struct for VXLAN GPE tunnel
+ */
+typedef struct
+{
+ /* Required for pool_get_aligned */
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+
+ /** Rewrite string. $$$$ embed vnet_rewrite header */
+ u8 *rewrite;
+
+ /** encapsulated protocol */
+ u8 protocol;
+
+ /* FIB DPO for IP forwarding of VXLAN-GPE encap packet */
+ dpo_id_t next_dpo;
+ /** tunnel local address */
+ ip46_address_t local;
+ /** tunnel remote address */
+ ip46_address_t remote;
+ /** local udp-port **/
+ u16 local_port;
+ /** remote udp-port **/
+ u16 remote_port;
+
+ /* mcast packet output intfc index (used only if dst is mcast) */
+ u32 mcast_sw_if_index;
+
+ /** FIB indices - tunnel partner lookup here */
+ u32 encap_fib_index;
+ /** FIB indices - inner IP packet lookup here */
+ u32 decap_fib_index;
+
+ /** VXLAN GPE VNI in HOST byte order, shifted left 8 bits */
+ u32 vni;
+
+ /** vnet intfc hw_if_index */
+ u32 hw_if_index;
+ /** vnet intfc sw_if_index */
+ u32 sw_if_index;
+
+ /** flags */
+ u32 flags;
+
+ /** rewrite size for dynamic plugins like iOAM */
+ u8 rewrite_size;
+
+ /** Next node after VxLAN-GPE encap */
+ uword encap_next_node;
+
+ /**
+ * Linkage into the FIB object graph
+ */
+ fib_node_t node;
+
+ /*
+ * The FIB entry for (depending on VXLAN-GPE tunnel is unicast or mcast)
+ * sending unicast VXLAN-GPE encap packets or receiving mcast VXLAN-GPE packets
+ */
+ fib_node_index_t fib_entry_index;
+ adj_index_t mcast_adj_index;
+
+ /**
+ * The tunnel is a child of the FIB entry for its destination. This is
+ * so it receives updates when the forwarding information for that entry
+ * changes.
+ * The tunnels sibling index on the FIB entry's dependency list.
+ */
+ u32 sibling_index;
+
+} vxlan_gpe_tunnel_t;
+
+/** Flags for vxlan_gpe_tunnel_t */
+#define VXLAN_GPE_TUNNEL_IS_IPV4 1
+
+/** next nodes for VXLAN GPE input */
+#define foreach_vxlan_gpe_input_next \
+_(DROP, "error-drop") \
+_(IP4_INPUT, "ip4-input") \
+_(IP6_INPUT, "ip6-input") \
+_(L2_INPUT, "l2-input")
+
+/** struct for next nodes for VXLAN GPE input */
+typedef enum
+{
+#define _(s,n) VXLAN_GPE_INPUT_NEXT_##s,
+ foreach_vxlan_gpe_input_next
+#undef _
+ VXLAN_GPE_INPUT_N_NEXT,
+} vxlan_gpe_input_next_t;
+
+/** struct for VXLAN GPE errors */
+typedef enum
+{
+#define vxlan_gpe_error(n,s) VXLAN_GPE_ERROR_##n,
+#include <plugins/vxlan-gpe/vxlan_gpe_error.def>
+#undef vxlan_gpe_error
+ VXLAN_GPE_N_ERROR,
+} vxlan_gpe_input_error_t;
+
+typedef void (*vxlan_gpe_register_decap_protocol_callback_t) (
+ u8 protocol_id, uword next_node_index);
+typedef void (*vxlan_gpe_unregister_decap_protocol_callback_t) (
+ u8 protocol_id, uword next_node_index);
+
+/** Struct for VXLAN GPE node state */
+typedef struct
+{
+ /** vector of encap tunnel instances */
+ vxlan_gpe_tunnel_t *tunnels;
+
+ /** lookup IPv4 VXLAN GPE tunnel by key */
+ uword *vxlan4_gpe_tunnel_by_key;
+ /** lookup IPv6 VXLAN GPE tunnel by key */
+ uword *vxlan6_gpe_tunnel_by_key;
+
+ /* local VTEP IPs ref count used by vxlan-bypass node to check if
+ received VXLAN packet DIP matches any local VTEP address */
+ vtep_table_t vtep_table;
+ /* mcast shared info */
+ uword *mcast_shared; /* keyed on mcast ip46 addr */
+ /** Free vlib hw_if_indices */
+ u32 *free_vxlan_gpe_tunnel_hw_if_indices;
+
+ /** Mapping from sw_if_index to tunnel index */
+ u32 *tunnel_index_by_sw_if_index;
+
+ /** State convenience vlib_main_t */
+ vlib_main_t *vlib_main;
+ /** State convenience vnet_main_t */
+ vnet_main_t *vnet_main;
+
+ /* cache for last 8 vxlan_gpe tunnel */
+ vtep4_cache_t vtep4_u512;
+
+ /** List of next nodes for the decap indexed on protocol */
+ uword decap_next_node_list[VXLAN_GPE_PROTOCOL_MAX];
+
+ /* export callbacks to register/unregister decapsulation protocol */
+ vxlan_gpe_register_decap_protocol_callback_t register_decap_protocol;
+ vxlan_gpe_unregister_decap_protocol_callback_t unregister_decap_protocol;
+} vxlan_gpe_main_t;
+
+extern vxlan_gpe_main_t vxlan_gpe_main;
+
+extern vlib_node_registration_t vxlan_gpe_encap_node;
+extern vlib_node_registration_t vxlan4_gpe_input_node;
+extern vlib_node_registration_t vxlan6_gpe_input_node;
+
+u8 *format_vxlan_gpe_encap_trace (u8 * s, va_list * args);
+
+/** Struct for VXLAN GPE add/del args */
+typedef struct
+{
+ u8 is_add;
+ u8 is_ip6;
+ ip46_address_t local, remote;
+ u8 protocol;
+ u32 mcast_sw_if_index;
+ u32 encap_fib_index;
+ u32 decap_fib_index;
+ u32 vni;
+ u16 local_port;
+ u16 remote_port;
+} vnet_vxlan_gpe_add_del_tunnel_args_t;
+
+
+int vnet_vxlan_gpe_add_del_tunnel
+ (vnet_vxlan_gpe_add_del_tunnel_args_t * a, u32 * sw_if_indexp);
+
+
+int vxlan4_gpe_rewrite (vxlan_gpe_tunnel_t * t, u32 extension_size,
+ u8 protocol_override, uword encap_next_node);
+int vxlan6_gpe_rewrite (vxlan_gpe_tunnel_t * t, u32 extension_size,
+ u8 protocol_override, uword encap_next_node);
+
+/**
+ * @brief Struct for defining VXLAN GPE next nodes
+ */
+typedef enum
+{
+ VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP,
+ VXLAN_GPE_ENCAP_NEXT_IP6_LOOKUP,
+ VXLAN_GPE_ENCAP_NEXT_DROP,
+ VXLAN_GPE_ENCAP_N_NEXT
+} vxlan_gpe_encap_next_t;
+
+void vxlan_gpe_register_decap_protocol (u8 protocol_id, uword next_node_index);
+void vxlan_gpe_unregister_decap_protocol (u8 protocol_id,
+ uword next_node_index);
+
+void vnet_int_vxlan_gpe_bypass_mode (u32 sw_if_index, u8 is_ip6,
+ u8 is_enable);
+
+#endif /* included_vnet_vxlan_gpe_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/vxlan-gpe/vxlan_gpe_api.c b/src/plugins/vxlan-gpe/vxlan_gpe_api.c
new file mode 100644
index 00000000000..e82445498e8
--- /dev/null
+++ b/src/plugins/vxlan-gpe/vxlan_gpe_api.c
@@ -0,0 +1,360 @@
+/*
+ *------------------------------------------------------------------
+ * vxlan_gpe_api.c - vxlan_gpe api
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vnet/vnet.h>
+#include <vlibmemory/api.h>
+
+#include <vnet/interface.h>
+#include <vnet/api_errno.h>
+#include <vnet/feature/feature.h>
+#include <vxlan-gpe/vxlan_gpe.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/format_fns.h>
+
+#include <vnet/ip/ip_types_api.h>
+#include <vxlan-gpe/vxlan_gpe.api_enum.h>
+#include <vxlan-gpe/vxlan_gpe.api_types.h>
+
+#define REPLY_MSG_ID_BASE msg_id_base
+#include <vlibapi/api_helper_macros.h>
+
+static u16 msg_id_base;
+
+static void
+ vl_api_sw_interface_set_vxlan_gpe_bypass_t_handler
+ (vl_api_sw_interface_set_vxlan_gpe_bypass_t * mp)
+{
+ vl_api_sw_interface_set_vxlan_gpe_bypass_reply_t *rmp;
+ int rv = 0;
+ u32 sw_if_index = ntohl (mp->sw_if_index);
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+ vnet_int_vxlan_gpe_bypass_mode (sw_if_index, mp->is_ipv6, mp->enable);
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_SW_INTERFACE_SET_VXLAN_GPE_BYPASS_REPLY);
+}
+
+static void
+ vl_api_vxlan_gpe_add_del_tunnel_t_handler
+ (vl_api_vxlan_gpe_add_del_tunnel_t * mp)
+{
+ vl_api_vxlan_gpe_add_del_tunnel_reply_t *rmp;
+ int rv = 0;
+ vnet_vxlan_gpe_add_del_tunnel_args_t _a, *a = &_a;
+ u32 encap_fib_index, decap_fib_index;
+ u8 protocol;
+ uword *p;
+ ip4_main_t *im = &ip4_main;
+ u32 sw_if_index = ~0;
+
+ p = hash_get (im->fib_index_by_table_id, ntohl (mp->encap_vrf_id));
+ if (!p)
+ {
+ rv = VNET_API_ERROR_NO_SUCH_FIB;
+ goto out;
+ }
+ encap_fib_index = p[0];
+
+ protocol = mp->protocol;
+
+ /* Interpret decap_vrf_id as an opaque if sending to other-than-ip4-input */
+ if (protocol == VXLAN_GPE_INPUT_NEXT_IP4_INPUT)
+ {
+ p = hash_get (im->fib_index_by_table_id, ntohl (mp->decap_vrf_id));
+ if (!p)
+ {
+ rv = VNET_API_ERROR_NO_SUCH_INNER_FIB;
+ goto out;
+ }
+ decap_fib_index = p[0];
+ }
+ else
+ {
+ decap_fib_index = ntohl (mp->decap_vrf_id);
+ }
+
+
+ clib_memset (a, 0, sizeof (*a));
+
+ a->is_add = mp->is_add;
+ ip_address_decode (&mp->local, &a->local);
+ ip_address_decode (&mp->remote, &a->remote);
+
+ /* Check src & dst are different */
+ if (ip46_address_is_equal (&a->local, &a->remote))
+ {
+ rv = VNET_API_ERROR_SAME_SRC_DST;
+ goto out;
+ }
+
+ a->is_ip6 = !ip46_address_is_ip4 (&a->local);
+ a->mcast_sw_if_index = ntohl (mp->mcast_sw_if_index);
+ a->encap_fib_index = encap_fib_index;
+ a->decap_fib_index = decap_fib_index;
+ a->protocol = protocol;
+ a->vni = ntohl (mp->vni);
+ rv = vnet_vxlan_gpe_add_del_tunnel (a, &sw_if_index);
+
+out:
+ REPLY_MACRO2(VL_API_VXLAN_GPE_ADD_DEL_TUNNEL_REPLY,
+ ({
+ rmp->sw_if_index = ntohl (sw_if_index);
+ }));
+}
+
+static void
+vl_api_vxlan_gpe_add_del_tunnel_v2_t_handler (
+ vl_api_vxlan_gpe_add_del_tunnel_v2_t *mp)
+{
+ vl_api_vxlan_gpe_add_del_tunnel_v2_reply_t *rmp;
+ int rv = 0;
+ vnet_vxlan_gpe_add_del_tunnel_args_t _a, *a = &_a;
+ u32 encap_fib_index, decap_fib_index;
+ u8 protocol;
+ uword *p;
+ ip4_main_t *im = &ip4_main;
+ u32 sw_if_index = ~0;
+
+ p = hash_get (im->fib_index_by_table_id, ntohl (mp->encap_vrf_id));
+ if (!p)
+ {
+ rv = VNET_API_ERROR_NO_SUCH_FIB;
+ goto out;
+ }
+ encap_fib_index = p[0];
+
+ protocol = mp->protocol;
+
+ /* Interpret decap_vrf_id as an opaque if sending to other-than-ip4-input */
+ if (protocol == VXLAN_GPE_INPUT_NEXT_IP4_INPUT)
+ {
+ p = hash_get (im->fib_index_by_table_id, ntohl (mp->decap_vrf_id));
+ if (!p)
+ {
+ rv = VNET_API_ERROR_NO_SUCH_INNER_FIB;
+ goto out;
+ }
+ decap_fib_index = p[0];
+ }
+ else
+ {
+ decap_fib_index = ntohl (mp->decap_vrf_id);
+ }
+
+ clib_memset (a, 0, sizeof (*a));
+
+ a->is_add = mp->is_add;
+ ip_address_decode (&mp->local, &a->local);
+ ip_address_decode (&mp->remote, &a->remote);
+
+ /* Check src & dst are different */
+ if (ip46_address_is_equal (&a->local, &a->remote))
+ {
+ rv = VNET_API_ERROR_SAME_SRC_DST;
+ goto out;
+ }
+
+ a->local_port = ntohs (mp->local_port);
+ a->remote_port = ntohs (mp->remote_port);
+ a->is_ip6 = !ip46_address_is_ip4 (&a->local);
+ a->mcast_sw_if_index = ntohl (mp->mcast_sw_if_index);
+ a->encap_fib_index = encap_fib_index;
+ a->decap_fib_index = decap_fib_index;
+ a->protocol = protocol;
+ a->vni = ntohl (mp->vni);
+ rv = vnet_vxlan_gpe_add_del_tunnel (a, &sw_if_index);
+
+out:
+ REPLY_MACRO2 (VL_API_VXLAN_GPE_ADD_DEL_TUNNEL_V2_REPLY,
+ ({ rmp->sw_if_index = ntohl (sw_if_index); }));
+}
+
+static void send_vxlan_gpe_tunnel_details
+ (vxlan_gpe_tunnel_t * t, vl_api_registration_t * reg, u32 context)
+{
+ vl_api_vxlan_gpe_tunnel_details_t *rmp;
+ ip4_main_t *im4 = &ip4_main;
+ ip6_main_t *im6 = &ip6_main;
+ u8 is_ipv6 = !(t->flags & VXLAN_GPE_TUNNEL_IS_IPV4);
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ clib_memset (rmp, 0, sizeof (*rmp));
+ rmp->_vl_msg_id =
+ ntohs (REPLY_MSG_ID_BASE + VL_API_VXLAN_GPE_TUNNEL_DETAILS);
+
+ ip_address_encode (&t->local, is_ipv6 ? IP46_TYPE_IP6 : IP46_TYPE_IP4,
+ &rmp->local);
+ ip_address_encode (&t->remote, is_ipv6 ? IP46_TYPE_IP6 : IP46_TYPE_IP4,
+ &rmp->remote);
+
+ if (ip46_address_is_ip4 (&t->local))
+ {
+ rmp->encap_vrf_id = htonl (im4->fibs[t->encap_fib_index].ft_table_id);
+ rmp->decap_vrf_id = htonl (im4->fibs[t->decap_fib_index].ft_table_id);
+ }
+ else
+ {
+ rmp->encap_vrf_id = htonl (im6->fibs[t->encap_fib_index].ft_table_id);
+ rmp->decap_vrf_id = htonl (im6->fibs[t->decap_fib_index].ft_table_id);
+ }
+ rmp->mcast_sw_if_index = htonl (t->mcast_sw_if_index);
+ rmp->vni = htonl (t->vni);
+ rmp->protocol = t->protocol;
+ rmp->sw_if_index = htonl (t->sw_if_index);
+ rmp->context = context;
+
+ vl_api_send_msg (reg, (u8 *) rmp);
+}
+
+static void vl_api_vxlan_gpe_tunnel_dump_t_handler
+ (vl_api_vxlan_gpe_tunnel_dump_t * mp)
+{
+ vl_api_registration_t *reg;
+ vxlan_gpe_main_t *vgm = &vxlan_gpe_main;
+ vxlan_gpe_tunnel_t *t;
+ u32 sw_if_index;
+
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
+ return;
+
+ sw_if_index = ntohl (mp->sw_if_index);
+
+ if (~0 == sw_if_index)
+ {
+ pool_foreach (t, vgm->tunnels)
+ {
+ send_vxlan_gpe_tunnel_details (t, reg, mp->context);
+ }
+ }
+ else
+ {
+ if ((sw_if_index >= vec_len (vgm->tunnel_index_by_sw_if_index)) ||
+ (~0 == vgm->tunnel_index_by_sw_if_index[sw_if_index]))
+ {
+ return;
+ }
+ t = &vgm->tunnels[vgm->tunnel_index_by_sw_if_index[sw_if_index]];
+ send_vxlan_gpe_tunnel_details (t, reg, mp->context);
+ }
+}
+
+static void
+send_vxlan_gpe_tunnel_v2_details (vxlan_gpe_tunnel_t *t,
+ vl_api_registration_t *reg, u32 context)
+{
+ vl_api_vxlan_gpe_tunnel_v2_details_t *rmp;
+ ip4_main_t *im4 = &ip4_main;
+ ip6_main_t *im6 = &ip6_main;
+ u8 is_ipv6 = !(t->flags & VXLAN_GPE_TUNNEL_IS_IPV4);
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ clib_memset (rmp, 0, sizeof (*rmp));
+ rmp->_vl_msg_id =
+ ntohs (REPLY_MSG_ID_BASE + VL_API_VXLAN_GPE_TUNNEL_V2_DETAILS);
+
+ ip_address_encode (&t->local, is_ipv6 ? IP46_TYPE_IP6 : IP46_TYPE_IP4,
+ &rmp->local);
+ ip_address_encode (&t->remote, is_ipv6 ? IP46_TYPE_IP6 : IP46_TYPE_IP4,
+ &rmp->remote);
+ rmp->local_port = htons (t->local_port);
+ rmp->remote_port = htons (t->remote_port);
+
+ if (ip46_address_is_ip4 (&t->local))
+ {
+ rmp->encap_vrf_id = htonl (im4->fibs[t->encap_fib_index].ft_table_id);
+ rmp->decap_vrf_id = htonl (im4->fibs[t->decap_fib_index].ft_table_id);
+ }
+ else
+ {
+ rmp->encap_vrf_id = htonl (im6->fibs[t->encap_fib_index].ft_table_id);
+ rmp->decap_vrf_id = htonl (im6->fibs[t->decap_fib_index].ft_table_id);
+ }
+ rmp->mcast_sw_if_index = htonl (t->mcast_sw_if_index);
+ rmp->vni = htonl (t->vni);
+ rmp->protocol = t->protocol;
+ rmp->sw_if_index = htonl (t->sw_if_index);
+ rmp->context = context;
+
+ vl_api_send_msg (reg, (u8 *) rmp);
+}
+
+static void
+vl_api_vxlan_gpe_tunnel_v2_dump_t_handler (
+ vl_api_vxlan_gpe_tunnel_v2_dump_t *mp)
+{
+ vl_api_registration_t *reg;
+ vxlan_gpe_main_t *vgm = &vxlan_gpe_main;
+ vxlan_gpe_tunnel_t *t;
+ u32 sw_if_index;
+
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
+ return;
+
+ sw_if_index = ntohl (mp->sw_if_index);
+
+ if (~0 == sw_if_index)
+ {
+ pool_foreach (t, vgm->tunnels)
+ {
+ send_vxlan_gpe_tunnel_v2_details (t, reg, mp->context);
+ }
+ }
+ else
+ {
+ if ((sw_if_index >= vec_len (vgm->tunnel_index_by_sw_if_index)) ||
+ (~0 == vgm->tunnel_index_by_sw_if_index[sw_if_index]))
+ {
+ return;
+ }
+ t = &vgm->tunnels[vgm->tunnel_index_by_sw_if_index[sw_if_index]];
+ send_vxlan_gpe_tunnel_v2_details (t, reg, mp->context);
+ }
+}
+
+#include <vxlan-gpe/vxlan_gpe.api.c>
+
+static clib_error_t *
+vxlan_gpe_api_hookup (vlib_main_t * vm)
+{
+ api_main_t *am = vlibapi_get_main ();
+
+ vl_api_increase_msg_trace_size (am, VL_API_VXLAN_GPE_ADD_DEL_TUNNEL,
+ 17 * sizeof (u32));
+
+ /*
+ * Set up the (msg_name, crc, message-id) table
+ */
+ msg_id_base = setup_message_id_table ();
+
+ return 0;
+}
+
+VLIB_API_INIT_FUNCTION (vxlan_gpe_api_hookup);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/vxlan-gpe/vxlan_gpe_error.def b/src/plugins/vxlan-gpe/vxlan_gpe_error.def
new file mode 100644
index 00000000000..9cf1b1cb656
--- /dev/null
+++ b/src/plugins/vxlan-gpe/vxlan_gpe_error.def
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+vxlan_gpe_error (DECAPSULATED, "good packets decapsulated")
+vxlan_gpe_error (NO_SUCH_TUNNEL, "no such tunnel packets")
diff --git a/src/plugins/vxlan-gpe/vxlan_gpe_packet.h b/src/plugins/vxlan-gpe/vxlan_gpe_packet.h
new file mode 100644
index 00000000000..f5e5ddc2347
--- /dev/null
+++ b/src/plugins/vxlan-gpe/vxlan_gpe_packet.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief VXLAN GPE packet header structure
+ *
+*/
+#ifndef included_vxlan_gpe_packet_h
+#define included_vxlan_gpe_packet_h
+
+/**
+ * From draft-quinn-vxlan-gpe-03.txt
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |R|R|R|R|I|P|R|O|Ver| Reserved |Next Protocol |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | VXLAN Network Identifier (VNI) | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * I Bit: Flag bit 4 indicates that the VNI is valid.
+ *
+ * P Bit: Flag bit 5 is defined as the Next Protocol bit. The P bit
+ * MUST be set to 1 to indicate the presence of the 8 bit next
+ * protocol field.
+ *
+ * O Bit: Flag bit 7 is defined as the O bit. When the O bit is set to 1,
+ *
+ * the packet is an OAM packet and OAM processing MUST occur. The OAM
+ * protocol details are out of scope for this document. As with the
+ * P-bit, bit 7 is currently a reserved flag in VXLAN.
+ *
+ * VXLAN-gpe bits 8 and 9 are defined as version bits. These bits are
+ * reserved in VXLAN. The version field is used to ensure backward
+ * compatibility going forward with future VXLAN-gpe updates.
+ *
+ * The initial version for VXLAN-gpe is 0.
+ *
+ * This draft defines the following Next Protocol values:
+ *
+ * 0x1 : IPv4
+ * 0x2 : IPv6
+ * 0x3 : Ethernet
+ * 0x4 : Network Service Header [NSH]
+ */
+
+/**
+ * @brief VXLAN GPE support inner protocol definition.
+ * 1 - IP4
+ * 2 - IP6
+ * 3 - ETHERNET
+ * 4 - NSH
+ */
+#define foreach_vxlan_gpe_protocol \
+_ (0x01, IP4) \
+_ (0x02, IP6) \
+_ (0x03, ETHERNET) \
+_ (0x04, NSH) \
+_ (0x05, IOAM)
+
+
+/**
+ * @brief Struct for VXLAN GPE support inner protocol definition.
+ * 1 - IP4
+ * 2 - IP6
+ * 3 - ETHERNET
+ * 4 - NSH
+ * 5 - IOAM
+ */
+typedef enum
+{
+#define _(n,f) VXLAN_GPE_PROTOCOL_##f = n,
+ foreach_vxlan_gpe_protocol
+#undef _
+ VXLAN_GPE_PROTOCOL_MAX,
+} vxlan_gpe_protocol_t;
+
+/**
+ * @brief VXLAN GPE Header definition
+ */
+typedef struct
+{
+ u8 flags;
+ /** Version and Reserved */
+ u8 ver_res;
+ /** Reserved */
+ u8 res;
+ /** see vxlan_gpe_protocol_t */
+ u8 protocol;
+ /** VNI and Reserved */
+ u32 vni_res;
+} vxlan_gpe_header_t;
+
+#define VXLAN_GPE_FLAGS_I 0x08
+#define VXLAN_GPE_FLAGS_P 0x04
+#define VXLAN_GPE_FLAGS_O 0x01
+#define VXLAN_GPE_VERSION 0x0
+
+#endif /* included_vxlan_gpe_packet_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/vxlan/decap.c b/src/plugins/vxlan/decap.c
index 5f28c5e97bb..4ad35bc2d5d 100644
--- a/src/plugins/vxlan/decap.c
+++ b/src/plugins/vxlan/decap.c
@@ -193,7 +193,7 @@ vxlan_input (vlib_main_t * vm,
last_tunnel_cache4 last4;
last_tunnel_cache6 last6;
u32 pkts_dropped = 0;
- u32 thread_index = vlib_get_thread_index ();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
if (is_ip4)
clib_memset (&last4, 0xff, sizeof last4);
@@ -1039,7 +1039,7 @@ VLIB_NODE_FN (vxlan4_flow_input_node) (vlib_main_t * vm,
[VXLAN_FLOW_NEXT_L2_INPUT] =
im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
};
- u32 thread_index = vlib_get_thread_index ();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
u32 *from = vlib_frame_vector_args (f);
u32 n_left_from = f->n_vectors;
diff --git a/src/plugins/vxlan/encap.c b/src/plugins/vxlan/encap.c
index 98464d809ba..60181bff451 100644
--- a/src/plugins/vxlan/encap.c
+++ b/src/plugins/vxlan/encap.c
@@ -78,7 +78,7 @@ vxlan_encap_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
vlib_combined_counter_main_t *tx_counter =
im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX;
u32 pkts_encapsulated = 0;
- u32 thread_index = vlib_get_thread_index ();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
u32 sw_if_index0 = 0, sw_if_index1 = 0;
u32 next0 = 0, next1 = 0;
vxlan_tunnel_t *t0 = NULL, *t1 = NULL;
diff --git a/src/plugins/wireguard/wireguard_input.c b/src/plugins/wireguard/wireguard_input.c
index 1eb7fbfed0b..0ae0480fc2c 100644
--- a/src/plugins/wireguard/wireguard_input.c
+++ b/src/plugins/wireguard/wireguard_input.c
@@ -698,7 +698,7 @@ wg_input_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
vlib_buffer_t *lb;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
vnet_crypto_op_t **crypto_ops;
const u16 drop_next = WG_INPUT_NEXT_PUNT;
message_type_t header_type;
diff --git a/src/plugins/wireguard/wireguard_output_tun.c b/src/plugins/wireguard/wireguard_output_tun.c
index c9411f6ff20..7bbec11fdcb 100644
--- a/src/plugins/wireguard/wireguard_output_tun.c
+++ b/src/plugins/wireguard/wireguard_output_tun.c
@@ -436,7 +436,7 @@ wg_output_tun_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
vnet_crypto_op_t **crypto_ops;
u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
vlib_buffer_t *sync_bufs[VLIB_FRAME_SIZE];
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u16 n_sync = 0;
const u16 drop_next = WG_OUTPUT_NEXT_ERROR;
const u8 is_async = wg_op_mode_is_set_ASYNC ();