summaryrefslogtreecommitdiffstats
path: root/src/vnet/gre/node.c
diff options
context:
space:
mode:
authorDamjan Marion <damarion@cisco.com>2016-12-19 23:05:39 +0100
committerDamjan Marion <damarion@cisco.com>2016-12-28 12:25:14 +0100
commit7cd468a3d7dee7d6c92f69a0bb7061ae208ec727 (patch)
tree5de62f8dbd3a752f5a676ca600e43d2652d1ff1a /src/vnet/gre/node.c
parent696f1adec0df3b8f161862566dd9c86174302658 (diff)
Reorganize source tree to use single autotools instance
Change-Id: I7b51f88292e057c6443b12224486f2d0c9f8ae23 Signed-off-by: Damjan Marion <damarion@cisco.com>
Diffstat (limited to 'src/vnet/gre/node.c')
-rw-r--r--src/vnet/gre/node.c531
1 files changed, 531 insertions, 0 deletions
diff --git a/src/vnet/gre/node.c b/src/vnet/gre/node.c
new file mode 100644
index 00000000000..86f7a6eeea4
--- /dev/null
+++ b/src/vnet/gre/node.c
@@ -0,0 +1,531 @@
+/*
+ * node.c: gre packet processing
+ *
+ * Copyright (c) 2012 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/pg/pg.h>
+#include <vnet/gre/gre.h>
+#include <vnet/mpls/mpls.h>
+#include <vppinfra/sparse_vec.h>
+
+#define foreach_gre_input_next \
+_(PUNT, "error-punt") \
+_(DROP, "error-drop") \
+_(ETHERNET_INPUT, "ethernet-input") \
+_(IP4_INPUT, "ip4-input") \
+_(IP6_INPUT, "ip6-input") \
+_(MPLS_INPUT, "mpls-input")
+
+typedef enum {
+#define _(s,n) GRE_INPUT_NEXT_##s,
+ foreach_gre_input_next
+#undef _
+ GRE_INPUT_N_NEXT,
+} gre_input_next_t;
+
+typedef struct {
+ u32 tunnel_id;
+ u32 length;
+ ip4_address_t src;
+ ip4_address_t dst;
+} gre_rx_trace_t;
+
+u8 * format_gre_rx_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ gre_rx_trace_t * t = va_arg (*args, gre_rx_trace_t *);
+
+ s = format (s, "GRE: tunnel %d len %d src %U dst %U",
+ t->tunnel_id, clib_net_to_host_u16(t->length),
+ format_ip4_address, &t->src.as_u8,
+ format_ip4_address, &t->dst.as_u8);
+ return s;
+}
+
+typedef struct {
+ /* Sparse vector mapping gre protocol in network byte order
+ to next index. */
+ u16 * next_by_protocol;
+} gre_input_runtime_t;
+
+static uword
+gre_input (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ gre_main_t * gm = &gre_main;
+ gre_input_runtime_t * rt = (void *) node->runtime_data;
+ __attribute__((unused)) u32 n_left_from, next_index, * from, * to_next;
+ u64 cached_tunnel_key = (u64) ~0;
+ u32 cached_tunnel_sw_if_index = 0, tunnel_sw_if_index = 0;
+
+ u32 cpu_index = os_get_cpu_number();
+ u32 len;
+ vnet_interface_main_t *im = &gm->vnet_main->interface_main;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index,
+ to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 bi0, bi1;
+ vlib_buffer_t * b0, * b1;
+ gre_header_t * h0, * h1;
+ u16 version0, version1;
+ int verr0, verr1;
+ u32 i0, i1, next0, next1, protocol0, protocol1;
+ ip4_header_t *ip0, *ip1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t * p2, * p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, sizeof (h0[0]), LOAD);
+ CLIB_PREFETCH (p3->data, sizeof (h1[0]), LOAD);
+ }
+
+ bi0 = from[0];
+ bi1 = from[1];
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ from += 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+ n_left_from -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ /* ip4_local hands us the ip header, not the gre header */
+ ip0 = vlib_buffer_get_current (b0);
+ ip1 = vlib_buffer_get_current (b1);
+
+ /* Save src + dst ip4 address, e.g. for mpls-o-gre */
+ vnet_buffer(b0)->gre.src = ip0->src_address.as_u32;
+ vnet_buffer(b0)->gre.dst = ip0->dst_address.as_u32;
+ vnet_buffer(b1)->gre.src = ip1->src_address.as_u32;
+ vnet_buffer(b1)->gre.dst = ip1->dst_address.as_u32;
+
+ vlib_buffer_advance (b0, sizeof (*ip0));
+ vlib_buffer_advance (b1, sizeof (*ip1));
+
+ h0 = vlib_buffer_get_current (b0);
+ h1 = vlib_buffer_get_current (b1);
+
+ /* Index sparse array with network byte order. */
+ protocol0 = h0->protocol;
+ protocol1 = h1->protocol;
+ sparse_vec_index2 (rt->next_by_protocol, protocol0, protocol1,
+ &i0, &i1);
+ next0 = vec_elt(rt->next_by_protocol, i0);
+ next1 = vec_elt(rt->next_by_protocol, i1);
+
+ b0->error = node->errors[i0 == SPARSE_VEC_INVALID_INDEX ? GRE_ERROR_UNKNOWN_PROTOCOL : GRE_ERROR_NONE];
+ b1->error = node->errors[i1 == SPARSE_VEC_INVALID_INDEX ? GRE_ERROR_UNKNOWN_PROTOCOL : GRE_ERROR_NONE];
+
+ version0 = clib_net_to_host_u16 (h0->flags_and_version);
+ verr0 = version0 & GRE_VERSION_MASK;
+ version1 = clib_net_to_host_u16 (h1->flags_and_version);
+ verr1 = version1 & GRE_VERSION_MASK;
+
+ b0->error = verr0 ? node->errors[GRE_ERROR_UNSUPPORTED_VERSION]
+ : b0->error;
+ next0 = verr0 ? GRE_INPUT_NEXT_DROP : next0;
+ b1->error = verr1 ? node->errors[GRE_ERROR_UNSUPPORTED_VERSION]
+ : b1->error;
+ next1 = verr1 ? GRE_INPUT_NEXT_DROP : next1;
+
+
+ /* RPF check for ip4/ip6 input */
+ if (PREDICT_TRUE(next0 == GRE_INPUT_NEXT_IP4_INPUT
+ || next0 == GRE_INPUT_NEXT_IP6_INPUT
+ || next0 == GRE_INPUT_NEXT_ETHERNET_INPUT
+ || next0 == GRE_INPUT_NEXT_MPLS_INPUT))
+ {
+ u64 key = ((u64)(vnet_buffer(b0)->gre.dst) << 32) |
+ (u64)(vnet_buffer(b0)->gre.src);
+
+ if (cached_tunnel_key != key)
+ {
+ vnet_hw_interface_t * hi;
+ gre_tunnel_t * t;
+ uword * p;
+
+ p = hash_get (gm->tunnel_by_key, key);
+ if (!p)
+ {
+ next0 = GRE_INPUT_NEXT_DROP;
+ b0->error = node->errors[GRE_ERROR_NO_SUCH_TUNNEL];
+ goto drop0;
+ }
+ t = pool_elt_at_index (gm->tunnels, p[0]);
+ hi = vnet_get_hw_interface (gm->vnet_main,
+ t->hw_if_index);
+ tunnel_sw_if_index = hi->sw_if_index;
+
+ cached_tunnel_sw_if_index = tunnel_sw_if_index;
+ }
+ else
+ {
+ tunnel_sw_if_index = cached_tunnel_sw_if_index;
+ }
+ }
+ else
+ {
+ next0 = GRE_INPUT_NEXT_DROP;
+ goto drop0;
+ }
+ len = vlib_buffer_length_in_chain (vm, b0);
+ vlib_increment_combined_counter (im->combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_RX,
+ cpu_index,
+ tunnel_sw_if_index,
+ 1 /* packets */,
+ len /* bytes */);
+
+ vnet_buffer(b0)->sw_if_index[VLIB_RX] = tunnel_sw_if_index;
+
+drop0:
+ if (PREDICT_TRUE(next1 == GRE_INPUT_NEXT_IP4_INPUT
+ || next1 == GRE_INPUT_NEXT_IP6_INPUT
+ || next1 == GRE_INPUT_NEXT_ETHERNET_INPUT
+ || next1 == GRE_INPUT_NEXT_MPLS_INPUT))
+ {
+ u64 key = ((u64)(vnet_buffer(b1)->gre.dst) << 32) |
+ (u64)(vnet_buffer(b1)->gre.src);
+
+ if (cached_tunnel_key != key)
+ {
+ vnet_hw_interface_t * hi;
+ gre_tunnel_t * t;
+ uword * p;
+
+ p = hash_get (gm->tunnel_by_key, key);
+ if (!p)
+ {
+ next1 = GRE_INPUT_NEXT_DROP;
+ b1->error = node->errors[GRE_ERROR_NO_SUCH_TUNNEL];
+ goto drop1;
+ }
+ t = pool_elt_at_index (gm->tunnels, p[0]);
+ hi = vnet_get_hw_interface (gm->vnet_main,
+ t->hw_if_index);
+ tunnel_sw_if_index = hi->sw_if_index;
+
+ cached_tunnel_sw_if_index = tunnel_sw_if_index;
+ }
+ else
+ {
+ tunnel_sw_if_index = cached_tunnel_sw_if_index;
+ }
+ }
+ else
+ {
+ next1 = GRE_INPUT_NEXT_DROP;
+ goto drop1;
+ }
+ len = vlib_buffer_length_in_chain (vm, b1);
+ vlib_increment_combined_counter (im->combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_RX,
+ cpu_index,
+ tunnel_sw_if_index,
+ 1 /* packets */,
+ len /* bytes */);
+
+ vnet_buffer(b1)->sw_if_index[VLIB_RX] = tunnel_sw_if_index;
+
+drop1:
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ gre_rx_trace_t *tr = vlib_add_trace (vm, node,
+ b0, sizeof (*tr));
+ tr->tunnel_id = tunnel_sw_if_index;
+ tr->length = ip0->length;
+ tr->src.as_u32 = ip0->src_address.as_u32;
+ tr->dst.as_u32 = ip0->dst_address.as_u32;
+ }
+
+ if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ gre_rx_trace_t *tr = vlib_add_trace (vm, node,
+ b1, sizeof (*tr));
+ tr->tunnel_id = tunnel_sw_if_index;
+ tr->length = ip1->length;
+ tr->src.as_u32 = ip1->src_address.as_u32;
+ tr->dst.as_u32 = ip1->dst_address.as_u32;
+ }
+
+ vlib_buffer_advance (b0, sizeof (*h0));
+ vlib_buffer_advance (b1, sizeof (*h1));
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t * b0;
+ gre_header_t * h0;
+ ip4_header_t * ip0;
+ u16 version0;
+ int verr0;
+ u32 i0, next0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ ip0 = vlib_buffer_get_current (b0);
+
+ vnet_buffer(b0)->gre.src = ip0->src_address.as_u32;
+ vnet_buffer(b0)->gre.dst = ip0->dst_address.as_u32;
+
+ vlib_buffer_advance (b0, sizeof (*ip0));
+
+ h0 = vlib_buffer_get_current (b0);
+
+ i0 = sparse_vec_index (rt->next_by_protocol, h0->protocol);
+ next0 = vec_elt(rt->next_by_protocol, i0);
+
+ b0->error =
+ node->errors[i0 == SPARSE_VEC_INVALID_INDEX
+ ? GRE_ERROR_UNKNOWN_PROTOCOL : GRE_ERROR_NONE];
+
+ version0 = clib_net_to_host_u16 (h0->flags_and_version);
+ verr0 = version0 & GRE_VERSION_MASK;
+ b0->error = verr0 ? node->errors[GRE_ERROR_UNSUPPORTED_VERSION]
+ : b0->error;
+ next0 = verr0 ? GRE_INPUT_NEXT_DROP : next0;
+
+
+ /* For IP payload we need to find source interface
+ so we can increase counters and help forward node to
+ pick right FIB */
+ /* RPF check for ip4/ip6 input */
+ if (PREDICT_TRUE(next0 == GRE_INPUT_NEXT_IP4_INPUT
+ || next0 == GRE_INPUT_NEXT_IP6_INPUT
+ || next0 == GRE_INPUT_NEXT_ETHERNET_INPUT
+ || next0 == GRE_INPUT_NEXT_MPLS_INPUT))
+ {
+ u64 key = ((u64)(vnet_buffer(b0)->gre.dst) << 32) |
+ (u64)(vnet_buffer(b0)->gre.src);
+
+ if (cached_tunnel_key != key)
+ {
+ vnet_hw_interface_t * hi;
+ gre_tunnel_t * t;
+ uword * p;
+
+ p = hash_get (gm->tunnel_by_key, key);
+ if (!p)
+ {
+ next0 = GRE_INPUT_NEXT_DROP;
+ b0->error = node->errors[GRE_ERROR_NO_SUCH_TUNNEL];
+ goto drop;
+ }
+ t = pool_elt_at_index (gm->tunnels, p[0]);
+ hi = vnet_get_hw_interface (gm->vnet_main,
+ t->hw_if_index);
+ tunnel_sw_if_index = hi->sw_if_index;
+
+ cached_tunnel_sw_if_index = tunnel_sw_if_index;
+ }
+ else
+ {
+ tunnel_sw_if_index = cached_tunnel_sw_if_index;
+ }
+ }
+ else
+ {
+ next0 = GRE_INPUT_NEXT_DROP;
+ goto drop;
+ }
+ len = vlib_buffer_length_in_chain (vm, b0);
+ vlib_increment_combined_counter (im->combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_RX,
+ cpu_index,
+ tunnel_sw_if_index,
+ 1 /* packets */,
+ len /* bytes */);
+
+ vnet_buffer(b0)->sw_if_index[VLIB_RX] = tunnel_sw_if_index;
+
+drop:
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ gre_rx_trace_t *tr = vlib_add_trace (vm, node,
+ b0, sizeof (*tr));
+ tr->tunnel_id = tunnel_sw_if_index;
+ tr->length = ip0->length;
+ tr->src.as_u32 = ip0->src_address.as_u32;
+ tr->dst.as_u32 = ip0->dst_address.as_u32;
+ }
+
+ vlib_buffer_advance (b0, sizeof (*h0));
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ vlib_node_increment_counter (vm, gre_input_node.index,
+ GRE_ERROR_PKTS_DECAP, from_frame->n_vectors);
+ return from_frame->n_vectors;
+}
+
+static char * gre_error_strings[] = {
+#define gre_error(n,s) s,
+#include "error.def"
+#undef gre_error
+};
+
+VLIB_REGISTER_NODE (gre_input_node) = {
+ .function = gre_input,
+ .name = "gre-input",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+
+ .runtime_data_bytes = sizeof (gre_input_runtime_t),
+
+ .n_errors = GRE_N_ERROR,
+ .error_strings = gre_error_strings,
+
+ .n_next_nodes = GRE_INPUT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [GRE_INPUT_NEXT_##s] = n,
+ foreach_gre_input_next
+#undef _
+ },
+
+ .format_buffer = format_gre_header_with_length,
+ .format_trace = format_gre_rx_trace,
+ .unformat_buffer = unformat_gre_header,
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (gre_input_node, gre_input)
+
+void
+gre_register_input_protocol (vlib_main_t * vm,
+ gre_protocol_t protocol,
+ u32 node_index)
+{
+ gre_main_t * em = &gre_main;
+ gre_protocol_info_t * pi;
+ gre_input_runtime_t * rt;
+ u16 * n;
+
+ {
+ clib_error_t * error = vlib_call_init_function (vm, gre_input_init);
+ if (error)
+ clib_error_report (error);
+ }
+
+ pi = gre_get_protocol_info (em, protocol);
+ pi->node_index = node_index;
+ pi->next_index = vlib_node_add_next (vm,
+ gre_input_node.index,
+ node_index);
+
+ /* Setup gre protocol -> next index sparse vector mapping. */
+ rt = vlib_node_get_runtime_data (vm, gre_input_node.index);
+ n = sparse_vec_validate (rt->next_by_protocol,
+ clib_host_to_net_u16 (protocol));
+ n[0] = pi->next_index;
+}
+
+static void
+gre_setup_node (vlib_main_t * vm, u32 node_index)
+{
+ vlib_node_t * n = vlib_get_node (vm, node_index);
+ pg_node_t * pn = pg_get_node (node_index);
+
+ n->format_buffer = format_gre_header_with_length;
+ n->unformat_buffer = unformat_gre_header;
+ pn->unformat_edit = unformat_pg_gre_header;
+}
+
+static clib_error_t * gre_input_init (vlib_main_t * vm)
+{
+ gre_input_runtime_t * rt;
+ vlib_node_t *ethernet_input, *ip4_input, *ip6_input, *mpls_unicast_input;
+
+ {
+ clib_error_t * error;
+ error = vlib_call_init_function (vm, gre_init);
+ if (error)
+ clib_error_report (error);
+ }
+
+ gre_setup_node (vm, gre_input_node.index);
+
+ rt = vlib_node_get_runtime_data (vm, gre_input_node.index);
+
+ rt->next_by_protocol = sparse_vec_new
+ (/* elt bytes */ sizeof (rt->next_by_protocol[0]),
+ /* bits in index */ BITS (((gre_header_t *) 0)->protocol));
+
+ /* These could be moved to the supported protocol input node defn's */
+ ethernet_input = vlib_get_node_by_name (vm, (u8 *)"ethernet-input");
+ ASSERT(ethernet_input);
+ ip4_input = vlib_get_node_by_name (vm, (u8 *)"ip4-input");
+ ASSERT(ip4_input);
+ ip6_input = vlib_get_node_by_name (vm, (u8 *)"ip6-input");
+ ASSERT(ip6_input);
+ mpls_unicast_input = vlib_get_node_by_name (vm, (u8 *)"mpls-input");
+ ASSERT(mpls_unicast_input);
+
+ gre_register_input_protocol (vm, GRE_PROTOCOL_teb,
+ ethernet_input->index);
+
+ gre_register_input_protocol (vm, GRE_PROTOCOL_ip4,
+ ip4_input->index);
+
+ gre_register_input_protocol (vm, GRE_PROTOCOL_ip6,
+ ip6_input->index);
+
+ gre_register_input_protocol (vm, GRE_PROTOCOL_mpls_unicast,
+ mpls_unicast_input->index);
+
+ ip4_register_protocol (IP_PROTOCOL_GRE, gre_input_node.index);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (gre_input_init);
color: #a6e22e } /* Name.Other */ .highlight .py { color: #f8f8f2 } /* Name.Property */ .highlight .nt { color: #f92672 } /* Name.Tag */ .highlight .nv { color: #f8f8f2 } /* Name.Variable */ .highlight .ow { color: #f92672 } /* Operator.Word */ .highlight .w { color: #f8f8f2 } /* Text.Whitespace */ .highlight .mb { color: #ae81ff } /* Literal.Number.Bin */ .highlight .mf { color: #ae81ff } /* Literal.Number.Float */ .highlight .mh { color: #ae81ff } /* Literal.Number.Hex */ .highlight .mi { color: #ae81ff } /* Literal.Number.Integer */ .highlight .mo { color: #ae81ff } /* Literal.Number.Oct */ .highlight .sa { color: #e6db74 } /* Literal.String.Affix */ .highlight .sb { color: #e6db74 } /* Literal.String.Backtick */ .highlight .sc { color: #e6db74 } /* Literal.String.Char */ .highlight .dl { color: #e6db74 } /* Literal.String.Delimiter */ .highlight .sd { color: #e6db74 } /* Literal.String.Doc */ .highlight .s2 { color: #e6db74 } /* Literal.String.Double */ .highlight .se { color: #ae81ff } /* Literal.String.Escape */ .highlight .sh { color: #e6db74 } /* Literal.String.Heredoc */ .highlight .si { color: #e6db74 } /* Literal.String.Interpol */ .highlight .sx { color: #e6db74 } /* Literal.String.Other */ .highlight .sr { color: #e6db74 } /* Literal.String.Regex */ .highlight .s1 { color: #e6db74 } /* Literal.String.Single */ .highlight .ss { color: #e6db74 } /* Literal.String.Symbol */ .highlight .bp { color: #f8f8f2 } /* Name.Builtin.Pseudo */ .highlight .fm { color: #a6e22e } /* Name.Function.Magic */ .highlight .vc { color: #f8f8f2 } /* Name.Variable.Class */ .highlight .vg { color: #f8f8f2 } /* Name.Variable.Global */ .highlight .vi { color: #f8f8f2 } /* Name.Variable.Instance */ .highlight .vm { color: #f8f8f2 } /* Name.Variable.Magic */ .highlight .il { color: #ae81ff } /* Literal.Number.Integer.Long */ } @media (prefers-color-scheme: light) { .highlight .hll { background-color: #ffffcc } .highlight .c { color: #888888 } /* Comment */ .highlight .err { color: #a61717; background-color: #e3d2d2 } /* Error */ .highlight .k { color: #008800; font-weight: bold } /* Keyword */ .highlight .ch { color: #888888 } /* Comment.Hashbang */ .highlight .cm { color: #888888 } /* Comment.Multiline */ .highlight .cp { color: #cc0000; font-weight: bold } /* Comment.Preproc */ .highlight .cpf { color: #888888 } /* Comment.PreprocFile */ .highlight .c1 { color: #888888 } /* Comment.Single */ .highlight .cs { color: #cc0000; font-weight: bold; background-color: #fff0f0 } /* Comment.Special */ .highlight .gd { color: #000000; background-color: #ffdddd } /* Generic.Deleted */ .highlight .ge { font-style: italic } /* Generic.Emph */ .highlight .gr { color: #aa0000 } /* Generic.Error */ .highlight .gh { color: #333333 } /* Generic.Heading */ .highlight .gi { color: #000000; background-color: #ddffdd } /* Generic.Inserted */ .highlight .go { color: #888888 } /* Generic.Output */ .highlight .gp { color: #555555 } /* Generic.Prompt */ .highlight .gs { font-weight: bold } /* Generic.Strong */ .highlight .gu { color: #666666 } /* Generic.Subheading */ .highlight .gt { color: #aa0000 } /* Generic.Traceback */ .highlight .kc { color: #008800; font-weight: bold } /* Keyword.Constant */ .highlight .kd { color: #008800; font-weight: bold } /* Keyword.Declaration */ .highlight .kn { color: #008800; font-weight: bold } /* Keyword.Namespace */ .highlight .kp { color: #008800 } /* Keyword.Pseudo */ .highlight .kr { color: #008800; font-weight: bold } /* Keyword.Reserved */ .highlight .kt { color: #888888; font-weight: bold } /* Keyword.Type */ .highlight .m { color: #0000DD; font-weight: bold } /* Literal.Number */ .highlight .s { color: #dd2200; background-color: #fff0f0 } /* Literal.String */ .highlight .na { color: #336699 } /* Name.Attribute */ .highlight .nb { color: #003388 } /* Name.Builtin */ .highlight .nc { color: #bb0066; font-weight: bold } /* Name.Class */ .highlight .no { color: #003366; font-weight: bold } /* Name.Constant */ .highlight .nd { color: #555555 } /* Name.Decorator */ .highlight .ne { color: #bb0066; font-weight: bold } /* Name.Exception */ .highlight .nf { color: #0066bb; font-weight: bold } /* Name.Function */ .highlight .nl { color: #336699; font-style: italic } /* Name.Label */ .highlight .nn { color: #bb0066; font-weight: bold } /* Name.Namespace */ .highlight .py { color: #336699; font-weight: bold } /* Name.Property */ .highlight .nt { color: #bb0066; font-weight: bold } /* Name.Tag */ .highlight .nv { color: #336699 } /* Name.Variable */ .highlight .ow { color: #008800 } /* Operator.Word */ .highlight .w { color: #bbbbbb } /* Text.Whitespace */ .highlight .mb { color: #0000DD; font-weight: bold } /* Literal.Number.Bin */ .highlight .mf { color: #0000DD; font-weight: bold } /* Literal.Number.Float */ .highlight .mh { color: #0000DD; font-weight: bold } /* Literal.Number.Hex */ .highlight .mi { color: #0000DD; font-weight: bold } /* Literal.Number.Integer */ .highlight .mo { color: #0000DD; font-weight: bold } /* Literal.Number.Oct */ .highlight .sa { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Affix */ .highlight .sb { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Backtick */ .highlight .sc { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Char */ .highlight .dl { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Delimiter */ .highlight .sd { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Doc */ .highlight .s2 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Double */ .highlight .se { color: #0044dd; background-color: #fff0f0 } /* Literal.String.Escape */ .highlight .sh { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Heredoc */ .highlight .si { color: #3333bb; background-color: #fff0f0 } /* Literal.String.Interpol */ .highlight .sx { color: #22bb22; background-color: #f0fff0 } /* Literal.String.Other */ .highlight .sr { color: #008800; background-color: #fff0ff } /* Literal.String.Regex */ .highlight .s1 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Single */ .highlight .ss { color: #aa6600; background-color: #fff0f0 } /* Literal.String.Symbol */ .highlight .bp { color: #003388 } /* Name.Builtin.Pseudo */ .highlight .fm { color: #0066bb; font-weight: bold } /* Name.Function.Magic */ .highlight .vc { color: #336699 } /* Name.Variable.Class */ .highlight .vg { color: #dd7700 } /* Name.Variable.Global */ .highlight .vi { color: #3333bb } /* Name.Variable.Instance */ .highlight .vm { color: #336699 } /* Name.Variable.Magic */ .highlight .il { color: #0000DD; font-weight: bold } /* Literal.Number.Integer.Long */ }
# Copyright (c) 2017 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Implementation of keywords for Honeycomb setup."""

from json import loads
from time import time, sleep

from ipaddress import IPv6Address, AddressValueError

from robot.api import logger

from resources.libraries.python.HTTPRequest import HTTPRequest, HTTPCodes, \
    HTTPRequestError
from resources.libraries.python.constants import Constants as Const
from resources.libraries.python.honeycomb.HoneycombUtil import HoneycombError
from resources.libraries.python.honeycomb.HoneycombUtil \
    import HoneycombUtil as HcUtil
from resources.libraries.python.ssh import SSH
from resources.libraries.python.topology import NodeType


class HoneycombSetup(object):
    """Implements keywords for Honeycomb setup.

    The keywords implemented in this class make possible to:
    - start Honeycomb,
    - stop Honeycomb,
    - check the Honeycomb start-up state,
    - check the Honeycomb shutdown state,
    - add VPP to the topology.
    """

    def __init__(self):
        pass

    @staticmethod
    def start_honeycomb_on_duts(*nodes):
        """Start Honeycomb on specified DUT nodes.

        This keyword starts the Honeycomb service on specified DUTs.
        The keyword just starts the Honeycomb and does not check its startup
        state. Use the keyword "Check Honeycomb Startup State" to check if the
        Honeycomb is up and running.
        Honeycomb must be installed in "/opt" directory, otherwise the start
        will fail.
        :param nodes: List of nodes to start Honeycomb on.
        :type nodes: list
        :raises HoneycombError: If Honeycomb fails to start.
        """

        HoneycombSetup.print_environment(nodes)

        logger.console("\n(re)Starting Honeycomb service ...")

        cmd = "sudo service honeycomb start"

        for node in nodes:
            if node['type'] == NodeType.DUT:
                ssh = SSH()
                ssh.connect(node)
                (ret_code, _, _) = ssh.exec_command_sudo(cmd)
                if int(ret_code) != 0:
                    raise HoneycombError('Node {0} failed to start Honeycomb.'.
                                         format(node['host']))
                else:
                    logger.info("Starting the Honeycomb service on node {0} is "
                                "in progress ...".format(node['host']))

    @staticmethod
    def stop_honeycomb_on_duts(*nodes):
        """Stop the Honeycomb service on specified DUT nodes.

        This keyword stops the Honeycomb service on specified nodes. It just
        stops the Honeycomb and does not check its shutdown state. Use the
        keyword "Check Honeycomb Shutdown State" to check if Honeycomb has
        stopped.
        :param nodes: List of nodes to stop Honeycomb on.
        :type nodes: list
        :raises HoneycombError: If Honeycomb failed to stop.
        """
        logger.console("\nShutting down Honeycomb service ...")

        cmd = "sudo service honeycomb stop"
        errors = []

        for node in nodes:
            if node['type'] == NodeType.DUT:
                ssh = SSH()
                ssh.connect(node)
                (ret_code, _, _) = ssh.exec_command_sudo(cmd)
                if int(ret_code) != 0:
                    errors.append(node['host'])
                else:
                    logger.info("Stopping the Honeycomb service on node {0} is "
                                "in progress ...".format(node['host']))
        if errors:
            raise HoneycombError('Node(s) {0} failed to stop Honeycomb.'.
                                 format(errors))

    @staticmethod
    def restart_honeycomb_on_dut(node):
        """Restart Honeycomb on specified DUT nodes.

        This keyword restarts the Honeycomb service on specified DUTs. Use the
        keyword "Check Honeycomb Startup State" to check if the Honeycomb is up
        and running.

        :param node: Node to restart Honeycomb on.
        :type node: dict
        :raises HoneycombError: If Honeycomb fails to start.
        """

        logger.console("\n(re)Starting Honeycomb service ...")

        cmd = "sudo service honeycomb restart"

        ssh = SSH()
        ssh.connect(node)
        (ret_code, _, _) = ssh.exec_command_sudo(cmd)
        if int(ret_code) != 0:
            raise HoneycombError('Node {0} failed to restart Honeycomb.'.
                                 format(node['host']))
        else:
            logger.info(
                "Honeycomb service restart is in progress on node {0}".format(
                    node['host']))

    @staticmethod
    def check_honeycomb_startup_state(node, timeout=360, retries=20,
                                      interval=15):
        """Repeatedly check the status of Honeycomb startup until it is fully
        started or until timeout or max retries is reached.

        :param node: Honeycomb node.
        :param timeout: Timeout value in seconds.
        :param retries: Max number of retries.
        :param interval: Interval between checks, in seconds.
        :type node: dict
        :type timeout: int
        :type retries: int
        :type interval: int
        :raises HoneycombError: If the Honeycomb process IP cannot be found,
        or if timeout or number of retries is exceeded."""

        ssh = SSH()
        ssh.connect(node)
        ret_code, pid, _ = ssh.exec_command("pgrep honeycomb")
        if ret_code != 0:
            raise HoneycombError("No process named 'honeycomb' found.")

        pid = int(pid)
        count = 0
        start = time()
        while time() - start < timeout and count < retries:
            count += 1
            ret_code, _, _ = ssh.exec_command(
                " | ".join([
                    "sudo tail -n 1000 /var/log/syslog",
                    "grep {pid}".format(pid=pid),
                    "grep 'Honeycomb started successfully!'"])
            )
            if ret_code != 0:
                logger.debug(
                    "Attempt #{count} failed on log check.".format(
                        count=count))
                sleep(interval)
                continue
            status_code_version, _ = HcUtil.get_honeycomb_data(
                node, "oper_vpp_version")
            status_code_if_cfg, _ = HcUtil.get_honeycomb_data(
                node, "config_vpp_interfaces")
            status_code_if_oper, _ = HcUtil.get_honeycomb_data(
                node, "oper_vpp_interfaces")
            if status_code_if_cfg == HTTPCodes.OK\
                    and status_code_if_cfg == HTTPCodes.OK\
                    and status_code_if_oper == HTTPCodes.OK:
                logger.info("Check successful, Honeycomb is up and running.")
                break
            else:
                logger.debug(
                    "Attempt ${count} failed on Restconf check. Status codes:\n"
                    "Version: {version}\n"
                    "Interface config: {if_cfg}\n"
                    "Interface operational: {if_oper}".format(
                        count=count,
                        version=status_code_version,
                        if_cfg=status_code_if_cfg,
                        if_oper=status_code_if_oper))
                sleep(interval)
                continue
        else:
            _, vpp_status, _ = ssh.exec_command("service vpp status")
            ret_code, hc_log, _ = ssh.exec_command(
                " | ".join([
                    "sudo tail -n 1000 /var/log/syslog",
                    "grep {pid}".format(pid=pid)]))
            raise HoneycombError(
                "Timeout or max retries exceeded. Status of VPP:\n"
                "{vpp_status}\n"
                "Syslog entries filtered by Honeycomb's pid:\n"
                "{hc_log}".format(vpp_status=vpp_status, hc_log=hc_log))

    @staticmethod
    def check_honeycomb_shutdown_state(node):
        """Check state of Honeycomb service during shutdown on specified nodes.

        Honeycomb nodes reply with connection refused or the following status
        codes depending on shutdown progress: codes 200, 404.

        :param node: List of DUT nodes stopping Honeycomb.
        :type node: dict
        :return: True if all GETs fail to connect.
        :rtype bool
        """
        cmd = "pgrep honeycomb"

        ssh = SSH()
        ssh.connect(node)
        (ret_code, _, _) = ssh.exec_command_sudo(cmd)
        if ret_code == 0:
            raise HoneycombError('Honeycomb on node {0} is still '
                                 'running.'.format(node['host']),
                                 enable_logging=False)
        else:
            logger.info("Honeycomb on node {0} has stopped".
                        format(node['host']))
        return True

    @staticmethod
    def configure_restconf_binding_address(node):
        """Configure Honeycomb to accept restconf requests from all IP
        addresses. IP version is determined by node data.

         :param node: Information about a DUT node.
         :type node: dict
         :raises HoneycombError: If the configuration could not be changed.
         """

        find = "restconf-binding-address"
        try:
            IPv6Address(unicode(node["host"]))
            # if management IP of the node is in IPv6 format
            replace = '\\"restconf-binding-address\\": \\"0::0\\",'
        except (AttributeError, AddressValueError):
            replace = '\\"restconf-binding-address\\": \\"0.0.0.0\\",'

        argument = '"/{0}/c\\ {1}"'.format(find, replace)
        path = "{0}/config/restconf.json".format(Const.REMOTE_HC_DIR)
        command = "sed -i {0} {1}".format(argument, path)

        ssh = SSH()
        ssh.connect(node)
        (ret_code, _, stderr) = ssh.exec_command_sudo(command)
        if ret_code != 0:
            raise HoneycombError("Failed to modify configuration on "
                                 "node {0}, {1}".format(node, stderr))

    @staticmethod
    def configure_jvpp_timeout(node, timeout=10):
        """Configure timeout value for Java API commands Honeycomb sends to VPP.

         :param node: Information about a DUT node.
         :param timeout: Timeout value in seconds.
         :type node: dict
         :type timeout: int
         :raises HoneycombError: If the configuration could not be changed.
         """

        find = "jvpp-request-timeout"
        replace = '\\"jvpp-request-timeout\\": {0}'.format(timeout)

        argument = '"/{0}/c\\ {1}"'.format(find, replace)
        path = "{0}/config/jvpp.json".format(Const.REMOTE_HC_DIR)
        command = "sed -i {0} {1}".format(argument, path)

        ssh = SSH()
        ssh.connect(node)
        (ret_code, _, stderr) = ssh.exec_command_sudo(command)
        if ret_code != 0:
            raise HoneycombError("Failed to modify configuration on "
                                 "node {0}, {1}".format(node, stderr))

    @staticmethod
    def print_environment(nodes):
        """Print information about the nodes to log. The information is defined
        by commands in cmds tuple at the beginning of this method.

        :param nodes: List of DUT nodes to get information about.
        :type nodes: list
        """

        # TODO: When everything is set and running in VIRL env, transform this
        # method to a keyword checking the environment.

        cmds = ("uname -a",
                "df -lh",
                "echo $JAVA_HOME",
                "echo $PATH",
                "which java",
                "java -version",
                "dpkg --list | grep openjdk",
                "ls -la /opt/honeycomb")

        for node in nodes:
            if node['type'] == NodeType.DUT:
                logger.info("Checking node {} ...".format(node['host']))
                for cmd in cmds:
                    logger.info("Command: {}".format(cmd))
                    ssh = SSH()
                    ssh.connect(node)
                    ssh.exec_command_sudo(cmd)

    @staticmethod
    def print_ports(node):
        """Uses "sudo netstat -anp | grep java" to print port where a java
        application listens.

        :param node: Honeycomb node where we want to print the ports.
        :type node: dict
        """

        cmds = ("netstat -anp | grep java",
                "ps -ef | grep [h]oneycomb")

        logger.info("Checking node {} ...".format(node['host']))
        for cmd in cmds:
            logger.info("Command: {}".format(cmd))
            ssh = SSH()
            ssh.connect(node)
            ssh.exec_command_sudo(cmd)

    @staticmethod
    def configure_log_level(node, level):
        """Set Honeycomb logging to the specified level.

        :param node: Honeycomb node.
        :param level: Log level (INFO, DEBUG, TRACE).
        :type node: dict
        :type level: str
        """

        find = 'logger name=\\"io.fd\\"'
        replace = '<logger name=\\"io.fd\\" level=\\"{0}\\"/>'.format(level)

        argument = '"/{0}/c\\ {1}"'.format(find, replace)
        path = "{0}/config/logback.xml".format(Const.REMOTE_HC_DIR)
        command = "sed -i {0} {1}".format(argument, path)

        ssh = SSH()
        ssh.connect(node)
        (ret_code, _, stderr) = ssh.exec_command_sudo(command)
        if ret_code != 0:
            raise HoneycombError("Failed to modify configuration on "
                                 "node {0}, {1}".format(node, stderr))

    @staticmethod
    def manage_honeycomb_features(node, feature, disable=False):
        """Configure Honeycomb to use features that are disabled by default, or
        disable previously enabled features.

        ..Note:: If the module is not enabled in VPP, Honeycomb will
        be unable to establish VPP connection.

        :param node: Honeycomb node.
        :param feature: Feature to enable.
        :param disable: Disable the specified feature instead of enabling it.
        :type node: dict
        :type feature: string
        :type disable: bool
        :raises HoneycombError: If the configuration could not be changed.
         """

        disabled_features = {
            "NSH": "io.fd.hc2vpp.vppnsh.impl.VppNshModule"
        }

        ssh = SSH()
        ssh.connect(node)

        if feature in disabled_features.keys():
            # uncomment by replacing the entire line
            find = replace = "{0}".format(disabled_features[feature])
            if disable:
                replace = "// {0}".format(find)

            argument = '"/{0}/c\\ {1}"'.format(find, replace)
            path = "{0}/modules/*module-config"\
                .format(Const.REMOTE_HC_DIR)
            command = "sed -i {0} {1}".format(argument, path)

            (ret_code, _, stderr) = ssh.exec_command_sudo(command)
            if ret_code != 0:
                raise HoneycombError("Failed to modify configuration on "
                                     "node {0}, {1}".format(node, stderr))
        else:
            raise HoneycombError(
                "Unrecognized feature {0}.".format(feature))

    @staticmethod
    def copy_java_libraries(node):
        """Copy Java libraries installed by vpp-api-java package to honeycomb
        lib folder.

        This is a (temporary?) workaround for jvpp version mismatches.

        :param node: Honeycomb node
        :type node: dict
        """

        ssh = SSH()
        ssh.connect(node)
        (_, stdout, _) = ssh.exec_command_sudo(
            "ls /usr/share/java | grep ^jvpp-*")

        files = stdout.split("\n")[:-1]
        for item in files:
            # example filenames:
            # jvpp-registry-17.04.jar
            # jvpp-core-17.04.jar

            parts = item.split("-")
            version = "{0}-SNAPSHOT".format(parts[2][:5])
            artifact_id = "{0}-{1}".format(parts[0], parts[1])

            directory = "{0}/lib/io/fd/vpp/{1}/{2}".format(
                Const.REMOTE_HC_DIR, artifact_id, version)
            cmd = "sudo mkdir -p {0}; " \
                  "sudo cp /usr/share/java/{1} {0}/{2}-{3}.jar".format(
                      directory, item, artifact_id, version)

            (ret_code, _, stderr) = ssh.exec_command(cmd)
            if ret_code != 0:
                raise HoneycombError("Failed to copy JVPP libraries on "
                                     "node {0}, {1}".format(node, stderr))

    @staticmethod
    def copy_odl_client(node, odl_name, src_path, dst_path):
        """Copy ODL Client from source path to destination path.

        :param node: Honeycomb node.
        :param odl_name: Name of ODL client version to use.
        :param src_path: Source Path where to find ODl client.
        :param dst_path: Destination path.
        :type node: dict
        :type odl_name: str
        :type src_path: str
        :type dst_path: str
        :raises HoneycombError: If the operation fails.
        """

        ssh = SSH()
        ssh.connect(node)

        cmd = "sudo rm -rf {dst}/*karaf_{odl_name} && " \
              "cp -r {src}/*karaf_{odl_name}* {dst}".format(
                  src=src_path, odl_name=odl_name, dst=dst_path)

        ret_code, _, _ = ssh.exec_command_sudo(cmd, timeout=180)
        if int(ret_code) != 0:
            raise HoneycombError(
                "Failed to copy ODL client on node {0}".format(node["host"]))

    @staticmethod
    def setup_odl_client(node, path):
        """Start ODL client on the specified node.

        Karaf should be located in the provided path, and VPP and Honeycomb
        should already be running, otherwise the start will fail.
        :param node: Node to start ODL client on.
        :param path: Path to ODL client on node.
        :type node: dict
        :type path: str
        :raises HoneycombError: If Honeycomb fails to start.
        """

        logger.console("\nStarting ODL client ...")
        ssh = SSH()
        ssh.connect(node)

        cmd = "{path}/*karaf*/bin/start clean".format(path=path)
        ret_code, _, _ = ssh.exec_command_sudo(cmd)

        if int(ret_code) != 0:
            raise HoneycombError('Node {0} failed to start ODL.'.
                                 format(node['host']))
        else:
            logger.info("Starting the ODL client on node {0} is "
                        "in progress ...".format(node['host']))

    @staticmethod
    def install_odl_features(node, path, *features):
        """Install required features on a running ODL client.

        :param node: Honeycomb node.
        :param path: Path to ODL client on node.
        :param features: Optional, list of additional features to install.
        :type node: dict
        :type path: str
        :type features: list
        """

        ssh = SSH()
        ssh.connect(node)

        cmd = "{path}/*karaf*/bin/client -u karaf feature:install " \
              "odl-restconf-all " \
              "odl-netconf-connector-all " \
              "odl-netconf-topology".format(path=path)
        for feature in features:
            cmd += " {0}".format(feature)

        ret_code, _, _ = ssh.exec_command_sudo(cmd, timeout=250)

        if int(ret_code) != 0:
            raise HoneycombError("Feature install did not succeed.")

    @staticmethod
    def check_odl_startup_state(node):
        """Check the status of ODL client startup.

        :param node: Honeycomb node.
        :param node: dict
        :returns: True when ODL is started.
        :rtype: bool
        :raises HoneycombError: When the response is not code 200: OK.
        """

        path = HcUtil.read_path_from_url_file(
            "odl_client/odl_netconf_connector")
        expected_status_codes = (HTTPCodes.UNAUTHORIZED,
                                 HTTPCodes.FORBIDDEN,
                                 HTTPCodes.NOT_FOUND,
                                 HTTPCodes.SERVICE_UNAVAILABLE,
                                 HTTPCodes.INTERNAL_SERVER_ERROR)

        status_code, _ = HTTPRequest.get(node, path, timeout=10,
                                         enable_logging=False)
        if status_code == HTTPCodes.OK:
            logger.info("ODL client on node {0} is up and running".
                        format(node['host']))
        elif status_code in expected_status_codes:
            if status_code == HTTPCodes.UNAUTHORIZED:
                logger.info('Unauthorized. If this triggers keyword '
                            'timeout, verify username and password.')
            raise HoneycombError('ODL client on node {0} running but '
                                 'not yet ready.'.format(node['host']),
                                 enable_logging=False)
        else:
            raise HoneycombError('Unexpected return code: {0}.'.
                                 format(status_code))
        return True

    @staticmethod
    def check_odl_shutdown_state(node):
        """Check the status of ODL client shutdown.

        :param node: Honeycomb node.
        :type node: dict
        :returns: True when ODL is stopped.
        :rtype: bool
        :raises HoneycombError: When the response is not code 200: OK.
        """

        cmd = "pgrep -f karaf"
        path = HcUtil.read_path_from_url_file(
            "odl_client/odl_netconf_connector")

        try:
            HTTPRequest.get(node, path, timeout=10, enable_logging=False)
            raise HoneycombError("ODL client is still running.")
        except HTTPRequestError:
            logger.debug("Connection refused, checking process state....")
            ssh = SSH()
            ssh.connect(node)
            ret_code, _, _ = ssh.exec_command(cmd)
            if ret_code == 0:
                raise HoneycombError("ODL client is still running.")

        return True

    @staticmethod
    def mount_honeycomb_on_odl(node):
        """Tell ODL client to mount Honeycomb instance over netconf.

        :param node: Honeycomb node.
        :type node: dict
        :raises HoneycombError: When the response is not code 200: OK.
        """

        path = HcUtil.read_path_from_url_file(
            "odl_client/odl_netconf_connector")

        url_file = "{0}/{1}".format(Const.RESOURCES_TPL_HC,
                                    "odl_client/mount_honeycomb.json")

        with open(url_file) as template:
            data = template.read()

        data = loads(data)

        status_code, _ = HTTPRequest.post(
            node,
            path,
            headers={"Content-Type": "application/json",
                     "Accept": "text/plain"},
            json=data,
            timeout=10,
            enable_logging=False)

        if status_code == HTTPCodes.OK:
            logger.info("ODL mount point configured successfully.")
        elif status_code == HTTPCodes.CONFLICT:
            logger.info("ODL mount point was already configured.")
        else:
            raise HoneycombError('Mount point configuration not successful')

    @staticmethod
    def stop_odl_client(node, path):
        """Stop ODL client service on the specified node.

        :param node: Node to start ODL client on.
        :param path: Path to ODL client.
        :type node: dict
        :type path: str
        :raises HoneycombError: If ODL client fails to stop.
        """

        ssh = SSH()
        ssh.connect(node)

        cmd = "{0}/*karaf*/bin/stop".format(path)

        ssh = SSH()
        ssh.connect(node)
        ret_code, _, _ = ssh.exec_command_sudo(cmd)
        if int(ret_code) != 0:
            logger.debug("ODL Client refused to shut down.")
            cmd = "pkill -f 'karaf'"
            (ret_code, _, _) = ssh.exec_command_sudo(cmd)
            if int(ret_code) != 0:
                raise HoneycombError('Node {0} failed to stop ODL.'.
                                     format(node['host']))

        logger.info("ODL client service stopped.")

    @staticmethod
    def stop_vpp_service(node):
        """Stop VPP service on the specified node.

        :param node: VPP node.
        :type node: dict
        :raises RuntimeError: If VPP fails to stop.
        """

        ssh = SSH()
        ssh.connect(node)
        cmd = "service vpp stop"
        ret_code, _, _ = ssh.exec_command_sudo(cmd, timeout=80)
        if int(ret_code) != 0:
            logger.debug("VPP service refused to shut down.")


class HoneycombStartupConfig(object):
    """Generator for Honeycomb startup configuration.
    """
    def __init__(self):
        """Initializer."""

        self.template = """
        #!/bin/sh -
        STATUS=100

        while [ $STATUS -eq 100 ]
        do
          {java_call} -jar $(dirname $0)/{jar_filename}
          STATUS=$?
          echo "Honeycomb exited with status: $STATUS"
          if [ $STATUS -eq 100 ]
          then
            echo "Restarting..."
          fi
        done
        """

        self.java_call = "{scheduler} {affinity} java{jit_mode}{params}"

        self.scheduler = ""
        self.core_affinity = ""
        self.jit_mode = ""
        self.params = ""
        self.numa = ""

        self.config = ""
        self.ssh = SSH()

    def apply_config(self, node):
        """Generate configuration file /opt/honeycomb/honeycomb on the specified
         node.

         :param node: Honeycomb node.
         :type node: dict
         """

        self.ssh.connect(node)
        _, filename, _ = self.ssh.exec_command("ls /opt/honeycomb | grep .jar")

        java_call = self.java_call.format(scheduler=self.scheduler,
                                          affinity=self.core_affinity,
                                          jit_mode=self.jit_mode,
                                          params=self.params)
        self.config = self.template.format(java_call=java_call,
                                           jar_filename=filename)

        self.ssh.connect(node)
        cmd = "echo '{config}' > /tmp/honeycomb " \
              "&& chmod +x /tmp/honeycomb " \
              "&& sudo mv -f /tmp/honeycomb /opt/honeycomb".\
            format(config=self.config)
        self.ssh.exec_command(cmd)

    def set_cpu_scheduler(self, scheduler="FIFO"):
        """Use alternate CPU scheduler.

        Note: OTHER scheduler doesn't load-balance over isolcpus.

        :param scheduler: CPU scheduler to use.
        :type scheduler: str
        """

        schedulers = {"FIFO": "-f 99",  # First In, First Out
                      "RR": "-r 99",  # Round Robin
                      "OTHER": "-o",  # Ubuntu default
                     }
        self.scheduler = "chrt {0}".format(schedulers[scheduler])

    def set_cpu_core_affinity(self, low, high=None):
        """Set core affinity for the honeycomb process and subprocesses.

        :param low: Lowest core ID number.
        :param high: Highest core ID number. Leave empty to use a single core.
        :type low: int
        :type high: int
        """

        self.core_affinity = "taskset -c {low}-{high}".format(
            low=low, high=high if high else low)

    def set_jit_compiler_mode(self, jit_mode):
        """Set running mode for Java's JIT compiler.

        :param jit_mode: Desiret JIT mode.
        :type jit_mode: str
        """

        modes = {"client": " -client",  # Default
                 "server": " -server",  # Higher performance but longer warmup
                 "classic": " -classic"  # Disables JIT compiler
                }

        self.jit_mode = modes[jit_mode]

    def set_memory_size(self, mem_min, mem_max=None):
        """Set minimum and maximum memory use for the JVM.

        :param mem_min: Minimum amount of memory (MB).
        :param mem_max: Maximum amount of memory (MB). Default is 4 times
        minimum value.
        :type mem_min: int
        :type mem_max: int
        """

        self.params += " -Xms{min}m -Xmx{max}m".format(
            min=mem_min, max=mem_max if mem_max else mem_min*4)

    def set_metaspace_size(self, mem_min, mem_max=None):
        """Set minimum and maximum memory used for class metadata in the JVM.

        :param mem_min: Minimum metaspace size (MB).
        :param mem_max: Maximum metaspace size (MB). Defailt is 4 times
        minimum value.
        :type mem_min: int
        :type mem_max: int
        """

        self.params += " -XX:MetaspaceSize={min}m " \
                       "-XX:MaxMetaspaceSize={max}m".format(
                           min=mem_min, max=mem_max if mem_max else mem_min*4)

    def set_numa_optimization(self):
        """Use optimization of memory use and garbage collection for NUMA
        architectures."""

        self.params += " -XX:+UseNUMA -XX:+UseParallelGC"

    def set_ssh_security_provider(self):
        """Disables BouncyCastle for SSHD."""
        # Workaround for issue described in:
        # https://wiki.fd.io/view/Honeycomb/Releases/1609/Honeycomb_and_ODL

        self.params += " -Dorg.apache.sshd.registerBouncyCastle=false"