/* * Copyright (c) 2016 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #include #include #include #include #include #include #define MFIB_TEST_I(_cond, _comment, _args...) \ ({ \ int _evald = (_cond); \ if (!(_evald)) { \ fformat(stderr, "FAIL:%d: " _comment "\n", \ __LINE__, ##_args); \ res = 1; \ } \ res; \ }) #define MFIB_TEST(_cond, _comment, _args...) \ { \ if (MFIB_TEST_I(_cond, _comment, ##_args)) { \ return 1; \ ASSERT(!("FAIL: " _comment)); \ } \ } #define MFIB_TEST_NS(_cond) \ { \ if (MFIB_TEST_I(_cond, "")) { \ return 1; \ ASSERT(!("FAIL: ")); \ } \ } /** * A 'i'm not fussed is this is not efficient' store of test data */ typedef struct test_main_t_ { /** * HW if indicies */ u32 hw_if_indicies[4]; /** * HW interfaces */ vnet_hw_interface_t * hw[4]; } test_main_t; static test_main_t test_main; /* fake ethernet device class, distinct from "fake-ethX" */ static u8 * format_test_interface_name (u8 * s, va_list * args) { u32 dev_instance = va_arg (*args, u32); return format (s, "test-eth%d", dev_instance); } static uword dummy_interface_tx (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { clib_warning ("you shouldn't be here, leaking buffers..."); return frame->n_vectors; } static clib_error_t * test_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags) { u32 hw_flags = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ? VNET_HW_INTERFACE_FLAG_LINK_UP : 0; vnet_hw_interface_set_flags (vnm, hw_if_index, hw_flags); return 0; } VNET_DEVICE_CLASS (test_interface_device_class,static) = { .name = "Test interface", .format_device_name = format_test_interface_name, .tx_function = dummy_interface_tx, .admin_up_down_function = test_interface_admin_up_down, }; static u8 *hw_address; static int mfib_test_mk_intf (u32 ninterfaces) { clib_error_t * error = NULL; test_main_t *tm = &test_main; u8 byte; int res; u32 i; res = 0; ASSERT(ninterfaces <= ARRAY_LEN(tm->hw_if_indicies)); for (i=0; i<6; i++) { byte = 0xd0+i; vec_add1(hw_address, byte); } for (i = 0; i < ninterfaces; i++) { hw_address[5] = i; error = ethernet_register_interface(vnet_get_main(), test_interface_device_class.index, i /* instance */, hw_address, &tm->hw_if_indicies[i], /* flag change */ 0); MFIB_TEST((NULL == error), "ADD interface %d", i); error = vnet_hw_interface_set_flags(vnet_get_main(), tm->hw_if_indicies[i], VNET_HW_INTERFACE_FLAG_LINK_UP); tm->hw[i] = vnet_get_hw_interface(vnet_get_main(), tm->hw_if_indicies[i]); vec_validate (ip4_main.fib_index_by_sw_if_index, tm->hw[i]->sw_if_index); vec_validate (ip6_main.fib_index_by_sw_if_index, tm->hw[i]->sw_if_index); ip4_main.fib_index_by_sw_if_index[tm->hw[i]->sw_if_index] = 0; ip6_main.fib_index_by_sw_if_index[tm->hw[i]->sw_if_index] = 0; vec_validate (ip4_main.mfib_index_by_sw_if_index, tm->hw[i]->sw_if_index); vec_validate (ip6_main.mfib_index_by_sw_if_index, tm->hw[i]->sw_if_index); ip4_main.mfib_index_by_sw_if_index[tm->hw[i]->sw_if_index] = 0; ip6_main.mfib_index_by_sw_if_index[tm->hw[i]->sw_if_index] = 0; error = vnet_sw_interface_set_flags(vnet_get_main(), tm->hw[i]->sw_if_index, VNET_SW_INTERFACE_FLAG_ADMIN_UP); MFIB_TEST((NULL == error), "UP interface %d", i); } /* * re-eval after the inevitable realloc */ for (i = 0; i < ninterfaces; i++) { tm->hw[i] = vnet_get_hw_interface(vnet_get_main(), tm->hw_if_indicies[i]); } return (res); } #define MFIB_TEST_REP(_cond, _comment, _args...) \ { \ if (MFIB_TEST_I(_cond, _comment, ##_args)) { \ return (1); \ } \ } static int mfib_test_validate_rep_v (const replicate_t *rep, u16 n_buckets, va_list *ap) { const dpo_id_t *dpo; adj_index_t ai; dpo_type_t dt; int bucket; int res; res = 0; MFIB_TEST_REP((n_buckets == rep->rep_n_buckets), "n_buckets = %d", rep->rep_n_buckets); for (bucket = 0; bucket < n_buckets; bucket++) { dt = va_arg(*ap, int); // type promotion ai = va_arg(*ap, adj_index_t); dpo = replicate_get_bucket_i(rep, bucket); MFIB_TEST_REP((dt == dpo->dpoi_type), "bucket %d stacks on %U", bucket, format_dpo_type, dpo->dpoi_type); if (DPO_RECEIVE != dt) { MFIB_TEST_REP((ai == dpo->dpoi_index), "bucket %d [exp:%d] stacks on %U", bucket, ai, format_dpo_id, dpo, 0); } } return (res); } static int mfib_test_entry (fib_node_index_t fei, mfib_entry_flags_t eflags, int n_buckets, ...) { const mfib_prefix_t *pfx; const mfib_entry_t *mfe; const replicate_t *rep; va_list ap; int res; res = 0; mfe = mfib_entry_get(fei); pfx = mfib_entry_get_prefix(fei); MFIB_TEST_REP((eflags == mfe->mfe_flags), "%U has %U expect %U", format_mfib_prefix, &pfx, format_mfib_entry_flags, mfe->mfe_flags, format_mfib_entry_flags, eflags); if (0 == n_buckets) { MFIB_TEST_REP((DPO_DROP == mfe->mfe_rep.dpoi_type), "%U links to %U", format_mfib_prefix, &pfx, format_dpo_id, &mfe->mfe_rep, 0); } else { dpo_id_t tmp = DPO_INVALID; mfib_entry_contribute_forwarding( fei, mfib_forw_chain_type_from_fib_proto(pfx->fp_proto), MFIB_ENTRY_FWD_FLAG_NONE, &tmp); rep = replicate_get(tmp.dpoi_index); MFIB_TEST_REP((DPO_REPLICATE == tmp.dpoi_type), "%U links to %U", format_mfib_prefix, &pfx, format_dpo_type, tmp.dpoi_type); va_start(ap, n_buckets); res = mfib_test_validate_rep_v(rep, n_buckets, &ap); va_end(ap); dpo_reset(&tmp); } return (res); } static int mfib_test_entry_itf (fib_node_index_t fei, u32 sw_if_index, mfib_itf_flags_t flags) { const mfib_prefix_t *pfx; const mfib_entry_t *mfe; const mfib_itf_t *mfi; int res; res = 0; mfe = mfib_entry_get(fei); mfi = mfib_entry_get_itf(mfe, sw_if_index); pfx = mfib_entry_get_prefix(fei); MFIB_TEST_REP((NULL != mfi), "%U has interface %d", format_mfib_prefix, &pfx, sw_if_index); MFIB_TEST_REP((flags == mfi->mfi_flags), "%U interface %d has flags %U expect %U", format_mfib_prefix, &pfx, sw_if_index, format_mfib_itf_flags, flags, format_mfib_itf_flags, mfi->mfi_flags); return (res); } static int mfib_test_entry_no_itf (fib_node_index_t fei, u32 sw_if_index) { const mfib_prefix_t *pfx; const mfib_entry_t *mfe; const mfib_itf_t *mfi; int res; res = 0; mfe = mfib_entry_get(fei); mfi = mfib_entry_get_itf(mfe, sw_if_index); pfx = mfib_entry_get_prefix(fei); MFIB_TEST_REP((NULL == mfi), "%U has no interface %d", format_mfib_prefix, pfx, sw_if_index); return (res); } static int mfib_test_i (fib_protocol_t PROTO, vnet_link_t LINKT, const mfib_prefix_t *pfx_no_forward, const mfib_prefix_t *pfx_s_g, const mfib_prefix_t *pfx_star_g_1, const mfib_prefix_t *pfx_star_g_2, const mfib_prefix_t *pfx_star_g_3, const mfib_prefix_t *pfx_star_g_slash_m, const fib_prefix_t *pfx_itf, const ip46_address_t *addr_nbr1, const ip46_address_t *addr_nbr2) { fib_node_index_t mfei, mfei_dflt, mfei_no_f, mfei_s_g, mfei_g_1, mfei_g_2, mfei_g_3, mfei_g_m; u32 fib_index, n_entries, n_itfs, n_reps, n_pls; fib_node_index_t ai_1, ai_2, ai_3, ai_nbr1, ai_nbr2; test_main_t *tm; int res; mfib_prefix_t all_1s; clib_memset(&all_1s, 0xfd, sizeof(all_1s)); res = 0; n_entries = pool_elts(mfib_entry_pool); n_itfs = pool_elts(mfib_itf_pool); n_reps = pool_elts(replicate_pool); n_pls = fib_path_list_pool_size(); tm = &test_main; ai_1 = adj_mcast_add_or_lock(PROTO, LINKT, tm->hw[1]->sw_if_index); ai_2 = adj_mcast_add_or_lock(PROTO, LINKT, tm->hw[2]->sw_if_index); ai_3 = adj_mcast_add_or_lock(PROTO, LINKT, tm->hw[3]->sw_if_index); ai_nbr1 = adj_nbr_add_or_lock(PROTO, LINKT, addr_nbr1, tm->hw[0]->sw_if_index); ai_nbr2 = adj_nbr_add_or_lock(PROTO, LINKT, addr_nbr2, tm->hw[0]->sw_if_index); MFIB_TEST(3 == adj_mcast_db_size(), "3 MCAST adjs"); /* Find or create FIB table 11 */ fib_index = mfib_table_find_or_create_and_lock(PROTO, 11, MFIB_SOURCE_API); fib_table_entry_update_one_path(0, pfx_itf, FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_ATTACHED), DPO_PROTO_IP4, NULL, tm->hw[0]->sw_if_index, ~0, // invalid fib index 1, // weight NULL, FIB_ROUTE_PATH_FLAG_NONE); mfib_prefix_t pfx_dft = { .fp_len = 0, .fp_proto = PROTO, }; mfei_dflt = mfib_table_lookup_exact_match(fib_index, &pfx_dft); MFIB_TEST(FIB_NODE_INDEX_INVALID != mfei_dflt, "(*,*) presnet"); MFIB_TEST(!mfib_test_entry(mfei_dflt, MFIB_ENTRY_FLAG_DROP, 0), "(*,*) no replcaitions"); MFIB_TEST(FIB_NODE_INDEX_INVALID != mfei_dflt, "(*,*) presnet"); MFIB_TEST(!mfib_test_entry(mfei_dflt, MFIB_ENTRY_FLAG_DROP, 0), "(*,*) no replcaitions"); fib_route_path_t path_via_if0 = { .frp_proto = fib_proto_to_dpo(PROTO), .frp_addr = zero_addr, .frp_sw_if_index = tm->hw[0]->sw_if_index, .frp_fib_index = ~0, .frp_weight = 0, .frp_flags = 0, }; mfib_table_entry_path_update(fib_index, pfx_no_forward, MFIB_SOURCE_API, &path_via_if0, MFIB_ITF_FLAG_ACCEPT); mfei_no_f = mfib_table_lookup_exact_match(fib_index, pfx_no_forward); MFIB_TEST(!mfib_test_entry(mfei_no_f, MFIB_ENTRY_FLAG_NONE, 0), "%U no replcaitions", format_mfib_prefix, pfx_no_forward); MFIB_TEST_NS(!mfib_test_entry_itf(mfei_no_f, tm->hw[0]->sw_if_index, MFIB_ITF_FLAG_ACCEPT)); fib_route_path_t path_via_if1 = { .frp_proto = fib_proto_to_dpo(PROTO), .frp_addr = zero_addr, .frp_sw_if_index = tm->hw[1]->sw_if_index, .frp_fib_index = ~0, .frp_weight = 0, .frp_flags = 0, }; fib_route_path_t path_via_if2 = { .frp_proto = fib_proto_to_dpo(PROTO), .frp_addr = zero_addr, .frp_sw_if_index = tm->hw[2]->sw_if_index, .frp_fib_index = ~0, .frp_weight = 0, .frp_flags = 0, }; fib_route_path_t path_via_if3 = { .frp_proto = fib_proto_to_dpo(PROTO), .frp_addr = zero_addr, .frp_sw_if_index = tm->hw[3]->sw_if_index, .frp_fib_index = ~0, .frp_weight = 0, .frp_flags = 0, }; fib_route_path_t path_for_us = { .frp_proto = fib_proto_to_dpo(PROTO), .frp_addr = zero_addr, .frp_sw_if_index = 0xffffffff, .frp_fib_index = ~0, .frp_weight = 0, .frp_flags = FIB_ROUTE_PATH_LOCAL, }; /* * An (S,G) with 1 accepting and 3 forwarding paths */ mfib_table_entry_path_update(fib_index, pfx_s_g, MFIB_SOURCE_API, &path_via_if0, MFIB_ITF_FLAG_ACCEPT); mfib_table_entry_path_update(fib_index, pfx_s_g, MFIB_SOURCE_API, &path_via_if1, MFIB_ITF_FLAG_FORWARD); mfib_table_entry_path_update(fib_index, pfx_s_g, MFIB_SOURCE_API, &path_via_if2, MFIB_ITF_FLAG_FORWARD); mfib_table_entry_path_update(fib_index, pfx_s_g, MFIB_SOURCE_API, &path_via_if3, (MFIB_ITF_FLAG_FORWARD | MFIB_ITF_FLAG_NEGATE_SIGNAL)); mfei_s_g = mfib_table_lookup_exact_match(fib_index, pfx_s_g); MFIB_TEST(FIB_NODE_INDEX_INVALID != mfei_s_g, "%U present", format_mfib_prefix, pfx_s_g); MFIB_TEST(!mfib_test_entry(mfei_s_g, MFIB_ENTRY_FLAG_NONE, 3, DPO_ADJACENCY_MCAST, ai_1, DPO_ADJACENCY_MCAST, ai_2, DPO_ADJACENCY_MCAST, ai_3), "%U replicate ok", format_mfib_prefix, pfx_s_g); MFIB_TEST_NS(!mfib_test_entry_itf(mfei_s_g, tm->hw[0]->sw_if_index, MFIB_ITF_FLAG_ACCEPT)); MFIB_TEST_NS(!mfib_test_entry_itf(mfei_s_g, tm->hw[1]->sw_if_index, MFIB_ITF_FLAG_FORWARD)); MFIB_TEST_NS(!mfib_test_entry_itf(mfei_s_g, tm->hw[2]->sw_if_index, MFIB_ITF_FLAG_FORWARD)); MFIB_TEST_NS(!mfib_test_entry_itf(mfei_s_g, tm->hw[3]->sw_if_index, (MFIB_ITF_FLAG_FORWARD | MFIB_ITF_FLAG_NEGATE_SIGNAL))); /* * A (*,G), which the same G as the (S,G). * different paths. test our LPM. */ mfei_g_1 = mfib_table_entry_path_update(fib_index, pfx_star_g_1, MFIB_SOURCE_API, &path_via_if0, MFIB_ITF_FLAG_ACCEPT); mfib_table_entry_path_update(fib_index, pfx_star_g_1, MFIB_SOURCE_API, &path_via_if1, MFIB_ITF_FLAG_FORWARD); /* * test we find the *,G and S,G via LPM and exact matches */ mfei = mfib_table_lookup_exact_match(fib_index, pfx_star_g_1); MFIB_TEST(mfei == mfei_g_1, "%U found via exact match", format_mfib_prefix, pfx_star_g_1); MFIB_TEST(!mfib_test_entry(mfei, MFIB_ENTRY_FLAG_NONE, 1, DPO_ADJACENCY_MCAST, ai_1), "%U replicate ok", format_mfib_prefix, pfx_star_g_1); mfei = mfib_table_lookup(fib_index, pfx_star_g_1); MFIB_TEST(mfei == mfei_g_1, "[e:%d a:%d] %U found via LP match", mfei, mfei_g_1, format_mfib_prefix, pfx_star_g_1); MFIB_TEST(!mfib_test_entry(mfei, MFIB_ENTRY_FLAG_NONE, 1, DPO_ADJACENCY_MCAST, ai_1), "%U replicate ok", format_mfib_prefix, pfx_star_g_1); mfei = mfib_table_lookup_exact_match(fib_index, pfx_s_g); MFIB_TEST(mfei == mfei_s_g, "%U found via exact match", format_mfib_prefix, pfx_s_g); MFIB_TEST(!mfib_test_entry(mfei, MFIB_ENTRY_FLAG_NONE, 3, DPO_ADJACENCY_MCAST, ai_1, DPO_ADJACENCY_MCAST, ai_2, DPO_ADJACENCY_MCAST, ai_3), "%U replicate OK", format_mfib_prefix, pfx_s_g); mfei = mfib_table_lookup(fib_index, pfx_s_g); MFIB_TEST(mfei == mfei_s_g, "%U found via LP match", format_mfib_prefix, pfx_s_g); MFIB_TEST(!mfib_test_entry(mfei, MFIB_ENTRY_FLAG_NONE, 3, DPO_ADJACENCY_MCAST, ai_1, DPO_ADJACENCY_MCAST, ai_2, DPO_ADJACENCY_MCAST, ai_3), "%U replicate OK", format_mfib_prefix, pfx_s_g); /* * A (*,G/m), which the same root G as the (*,G). * different paths. test our LPM. */ mfei_g_m = mfib_table_entry_path_update(fib_index, pfx_star_g_slash_m, MFIB_SOURCE_API, &path_via_if2, MFIB_ITF_FLAG_ACCEPT); mfib_table_entry_path_update(fib_index, pfx_star_g_slash_m, MFIB_SOURCE_API, &path_via_if3, MFIB_ITF_FLAG_FORWARD); /* * test we find the (*,G/m), (*,G) and (S,G) via LPM and exact matches */ mfei = mfib_table_lookup_exact_match(fib_index, pfx_star_g_1); MFIB_TEST((mfei_g_1 == mfei), "%U found via DP LPM: %d", format_mfib_prefix, pfx_star_g_1, mfei); MFIB_TEST(!mfib_test_entry(mfei, MFIB_ENTRY_FLAG_NONE, 1, DPO_ADJACENCY_MCAST, ai_1), "%U replicate ok", format_mfib_prefix, pfx_star_g_1); mfei = mfib_table_lookup(fib_index, pfx_star_g_1); MFIB_TEST(!mfib_test_entry(mfei, MFIB_ENTRY_FLAG_NONE, 1, DPO_ADJACENCY_MCAST, ai_1), "%U replicate ok", format_mfib_prefix, pfx_star_g_1); mfei = mfib_table_lookup_exact_match(fib_index, pfx_s_g); MFIB_TEST(!mfib_test_entry(mfei, MFIB_ENTRY_FLAG_NONE, 3, DPO_ADJACENCY_MCAST, ai_1, DPO_ADJACENCY_MCAST, ai_2, DPO_ADJACENCY_MCAST, ai_3), "%U replicate OK", format_mfib_prefix, pfx_s_g); mfei = mfib_table_lookup(fib_index, pfx_s_g); MFIB_TEST(!mfib_test_entry(mfei, MFIB_ENTRY_FLAG_NONE, 3, DPO_ADJACENCY_MCAST, ai_1, DPO_ADJACENCY_MCAST, ai_2, DPO_ADJACENCY_MCAST, ai_3), "%U replicate OK", format_mfib_prefix, pfx_s_g); mfei = mfib_table_lookup_exact_match(fib_index, pfx_star_g_slash_m); MFIB_TEST(mfei = mfei_g_m, "%U Found via exact match", format_mfib_prefix, pfx_star_g_slash_m); MFIB_TEST(!mfib_test_entry(mfei, MFIB_ENTRY_FLAG_NONE, 1, DPO_ADJACENCY_MCAST, ai_3), "%U replicate OK", format_mfib_prefix, pfx_star_g_slash_m); MFIB_TEST(mfei_g_m == mfib_table_lookup(fib_index, pfx_star_g_slash_m), "%U found via LPM", format_mfib_prefix, pfx_star_g_slash_m); /* * Add a for-us path */ mfei = mfib_table_entry_path_update(fib_index, pfx_s_g, MFIB_SOURCE_API, &path_for_us, MFIB_ITF_FLAG_FORWARD); MFIB_TEST(!mfib_test_entry(mfei, MFIB_ENTRY_FLAG_NONE, 4, DPO_ADJACENCY_MCAST, ai_1, DPO_ADJACENCY_MCAST, ai_2, DPO_ADJACENCY_MCAST, ai_3, DPO_RECEIVE, 0), "%U replicate OK", format_mfib_prefix, pfx_s_g); /* * remove a for-us path */ mfib_table_entry_path_remove(fib_index, pfx_s_g, MFIB_SOURCE_API, &path_for_us); MFIB_TEST(!mfib_test_entry(mfei, MFIB_ENTRY_FLAG_NONE, 3, DPO_ADJACENCY_MCAST, ai_1, DPO_ADJACENCY_MCAST, ai_2, DPO_ADJACENCY_MCAST, ai_3), "%U replicate OK", format_mfib_prefix, pfx_s_g); /* * update an existing forwarding path to be only accepting * - expect it to be removed from the replication set. */ mfib_table_entry_path_update(fib_index, pfx_s_g, MFIB_SOURCE_API, &path_via_if3, MFIB_ITF_FLAG_ACCEPT); MFIB_TEST(!mfib_test_entry(mfei, MFIB_ENTRY_FLAG_NONE, 2, DPO_ADJACENCY_MCAST, ai_1, DPO_ADJACENCY_MCAST, ai_2), "%U replicate OK", format_mfib_prefix, pfx_s_g); MFIB_TEST_NS(!mfib_test_entry_itf(mfei, tm->hw[0]->sw_if_index, MFIB_ITF_FLAG_ACCEPT)); MFIB_TEST_NS(!mfib_test_entry_itf(mfei, tm->hw[1]->sw_if_index, MFIB_ITF_FLAG_FORWARD)); MFIB_TEST_NS(!mfib_test_entry_itf(mfei, tm->hw[2]->sw_if_index, MFIB_ITF_FLAG_FORWARD)); MFIB_TEST_NS(!mfib_test_entry_itf(mfei, tm->hw[3]->sw_if_index, MFIB_ITF_FLAG_ACCEPT)); /* * Make the path forwarding again * - expect it to be added back to the replication set */ mfib_table_entry_path_update(fib_index, pfx_s_g, MFIB_SOURCE_API, &path_via_if3, (MFIB_ITF_FLAG_FORWARD | MFIB_ITF_FLAG_ACCEPT | MFIB_ITF_FLAG_NEGATE_SIGNAL)); mfei = mfib_table_lookup_exact_match(fib_index, pfx_s_g); MFIB_TEST(!mfib_test_entry(mfei, MFIB_ENTRY_FLAG_NONE, 3, DPO_ADJACENCY_MCAST, ai_1, DPO_ADJACENCY_MCAST, ai_2, DPO_ADJACENCY_MCAST, ai_3), "%U replicate OK", format_mfib_prefix, pfx_s_g); MFIB_TEST_NS(!mfib_test_entry_itf(mfei, tm->hw[0]->sw_if_index, MFIB_ITF_FLAG_ACCEPT)); MFIB_TEST_NS(!mfib_test_entry_itf(mfei, tm->hw[1]->sw_if_index, MFIB_ITF_FLAG_FORWARD)); MFIB_TEST_NS(!mfib_test_entry_itf(mfei, tm->hw[2]->sw_if_index, MFIB_ITF_FLAG_FORWARD)); MFIB_TEST_NS(!mfib_test_entry_itf(mfei, tm->hw[3]->sw_if_index, (MFIB_ITF_FLAG_FORWARD | MFIB_ITF_FLAG_ACCEPT | MFIB_ITF_FLAG_NEGATE_SIGNAL))); /* * update flags on the entry */ mfib_table_entry_update(fib_index, pfx_s_g, MFIB_SOURCE_API, MFIB_RPF_ID_NONE, MFIB_ENTRY_FLAG_SIGNAL); MFIB_TEST(!mfib_test_entry(mfei, MFIB_ENTRY_FLAG_SIGNAL, 3, DPO_ADJACENCY_MCAST, ai_1, DPO_ADJACENCY_MCAST, ai_2, DPO_ADJACENCY_MCAST, ai_3), "%U replicate OK", format_mfib_prefix, pfx_s_g); /* * remove paths */ mfib_table_entry_path_remove(fib_index, pfx_s_g, MFIB_SOURCE_API, &path_via_if3); MFIB_TEST(!mfib_test_entry(mfei, MFIB_ENTRY_FLAG
# Copyright (c) 2016 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""QEMU utilities library."""

from time import time, sleep
import json
import logging

from vpplib.VPPUtil import VPPUtil
from vpplib.constants import Constants


class NodeType(object):
    """Defines node types used in topology dictionaries."""
    # Device Under Test (this node has VPP running on it)
    DUT = 'DUT'
    # Traffic Generator (this node has traffic generator on it)
    TG = 'TG'
    # Virtual Machine (this node running on DUT node)
    VM = 'VM'


class QemuUtils(object):
    """QEMU utilities."""

    # noinspection PyDictCreation
    def __init__(self, qemu_id=1):
        self._qemu_id = qemu_id
        # Path to QEMU binary
        self._qemu_bin = '/usr/bin/qemu-system-x86_64'
        # QEMU Machine Protocol socket
        self._qmp_sock = '/tmp/qmp{0}.sock'.format(self._qemu_id)
        # QEMU Guest Agent socket
        self._qga_sock = '/tmp/qga{0}.sock'.format(self._qemu_id)
        # QEMU PID file
        self._pid_file = '/tmp/qemu{0}.pid'.format(self._qemu_id)
        self._qemu_opt = {}
        # Default 1 CPU.
        self._qemu_opt['smp'] = '-smp 1,sockets=1,cores=1,threads=1'
        # Daemonize the QEMU process after initialization. Default one
        # management interface.
        self._qemu_opt['options'] = '-cpu host -daemonize -enable-kvm ' \
            '-machine pc,accel=kvm,usb=off,mem-merge=off ' \
            '-net nic,macaddr=52:54:00:00:{0:02x}:ff -balloon none'\
            .format(self._qemu_id)
        self._qemu_opt['ssh_fwd_port'] = 10021 + qemu_id
        # Default serial console port
        self._qemu_opt['serial_port'] = 4555 + qemu_id
        # Default 512MB virtual RAM
        self._qemu_opt['mem_size'] = 512
        # Default huge page mount point, required for Vhost-user interfaces.
        self._qemu_opt['huge_mnt'] = '/mnt/huge'
        # Default do not allocate huge pages.
        self._qemu_opt['huge_allocate'] = False
        # Default image for CSIT virl setup
        self._qemu_opt['disk_image'] = '/var/lib/vm/vhost-nested.img'
        # VM node info dict
        self._vm_info = {
            'type': NodeType.VM,
            'port': self._qemu_opt['ssh_fwd_port'],
            'username': 'cisco',
            'password': 'cisco',
            'interfaces': {},
        }
        # Virtio queue count
        self._qemu_opt['queues'] = 1
        self._vhost_id = 0
        self._ssh = None
        self._node = None
        self._socks = [self._qmp_sock, self._qga_sock]

    def qemu_set_bin(self, path):
        """Set binary path for QEMU.

        :param path: Absolute path in filesystem.
        :type path: str
        """
        self._qemu_bin = path

    def qemu_set_smp(self, cpus, cores, threads, sockets):
        """Set SMP option for QEMU.

        :param cpus: Number of CPUs.
        :param cores: Number of CPU cores on one socket.
        :param threads: Number of threads on one CPU core.
        :param sockets: Number of discrete sockets in the system.
        :type cpus: int
        :type cores: int
        :type threads: int
        :type sockets: int
        """
        self._qemu_opt['smp'] = '-smp {},cores={},threads={},sockets={}'.format(
            cpus, cores, threads, sockets)

    def qemu_set_ssh_fwd_port(self, fwd_port):
        """Set host port for guest SSH forwarding.

        :param fwd_port: Port number on host for guest SSH forwarding.
        :type fwd_port: int
        """
        self._qemu_opt['ssh_fwd_port'] = fwd_port
        self._vm_info['port'] = fwd_port

    def qemu_set_serial_port(self, port):
        """Set serial console port.

        :param port: Serial console port.
        :type port: int
        """
        self._qemu_opt['serial_port'] = port

    def qemu_set_mem_size(self, mem_size):
        """Set virtual RAM size.

        :param mem_size: RAM size in Mega Bytes.
        :type mem_size: int
        """
        self._qemu_opt['mem_size'] = int(mem_size)

    def qemu_set_huge_mnt(self, huge_mnt):
        """Set hugefile mount point.

        :param huge_mnt: System hugefile mount point.
        :type huge_mnt: int
        """
        self._qemu_opt['huge_mnt'] = huge_mnt

    def qemu_set_huge_allocate(self):
        """Set flag to allocate more huge pages if needed."""
        self._qemu_opt['huge_allocate'] = True

    def qemu_set_disk_image(self, disk_image):
        """Set disk image.

        :param disk_image: Path of the disk image.
        :type disk_image: str
        """
        self._qemu_opt['disk_image'] = disk_image

    def qemu_set_affinity(self, *host_cpus):
        """Set qemu affinity by getting thread PIDs via QMP and taskset to list
        of CPU cores.

        :param host_cpus: List of CPU cores.
        :type host_cpus: list
        """
        qemu_cpus = self._qemu_qmp_exec('query-cpus')['return']

        if len(qemu_cpus) != len(host_cpus):
            logging.debug('Host CPU count {0}, Qemu Thread count {1}'.format(
                len(host_cpus), len(qemu_cpus)))
            raise ValueError('Host CPU count must match Qemu Thread count')

        for qemu_cpu, host_cpu in zip(qemu_cpus, host_cpus):
            cmd = 'taskset -pc {0} {1}'.format(host_cpu, qemu_cpu['thread_id'])
            (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd)
            if int(ret_code) != 0:
                logging.debug('Set affinity failed {0}'.format(stderr))
                raise RuntimeError('Set affinity failed on {0}'.format(
                    self._node['host']))

    def qemu_set_scheduler_policy(self):
        """Set scheduler policy to SCHED_RR with priority 1 for all Qemu CPU
        processes.

       :raises RuntimeError: Set scheduler policy failed.
        """
        qemu_cpus = self._qemu_qmp_exec('query-cpus')['return']

        for qemu_cpu in qemu_cpus:
            cmd = 'chrt -r -p 1 {0}'.format(qemu_cpu['thread_id'])
            (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd)
            if int(ret_code) != 0:
                logging.debug('Set SCHED_RR failed {0}'.format(stderr))
                raise RuntimeError('Set SCHED_RR failed on {0}'.format(
                    self._node['host']))

    def qemu_set_node(self, node):
        """Set node to run QEMU on.

        :param node: Node to run QEMU on.
        :type node: dict
        """
        self._node = node
        self._vm_info['host'] = node['host']

    def qemu_add_vhost_user_if(self, socket, server=True, mac=None):
        """Add Vhost-user interface.

        :param socket: Path of the unix socket.
        :param server: If True the socket shall be a listening socket.
        :param mac: Vhost-user interface MAC address (optional, otherwise is
            used auto-generated MAC 52:54:00:00:xx:yy).
        :type socket: str
        :type server: bool
        :type mac: str
        """
        self._vhost_id += 1
        # Create unix socket character device.
        chardev = ' -chardev socket,id=char{0},path={1}'.format(self._vhost_id,
                                                                socket)
        if server is True:
            chardev += ',server'
        self._qemu_opt['options'] += chardev
        # Create Vhost-user network backend.
        netdev = (' -netdev vhost-user,id=vhost{0},chardev=char{0},queues={1}'
                  .format(self._vhost_id, self._qemu_opt['queues']))
        self._qemu_opt['options'] += netdev
        # If MAC is not specified use auto-generated MAC address based on
        # template 52:54:00:00:<qemu_id>:<vhost_id>, e.g. vhost1 MAC of QEMU
        #  with ID 1 is 52:54:00:00:01:01
        if mac is None:
            mac = '52:54:00:00:{0:02x}:{1:02x}'.\
                format(self._qemu_id, self._vhost_id)
        extend_options = 'mq=on,csum=off,gso=off,guest_tso4=off,'\
            'guest_tso6=off,guest_ecn=off,mrg_rxbuf=off'
        # Create Virtio network device.
        device = ' -device virtio-net-pci,netdev=vhost{0},mac={1},{2}'.format(
            self._vhost_id, mac, extend_options)
        self._qemu_opt['options'] += device
        # Add interface MAC and socket to the node dict
        if_data = {'mac_address': mac, 'socket': socket}
        if_name = 'vhost{}'.format(self._vhost_id)
        self._vm_info['interfaces'][if_name] = if_data
        # Add socket to the socket list
        self._socks.append(socket)

    def _qemu_qmp_exec(self, cmd):
        """Execute QMP command.

        QMP is JSON based protocol which allows to control QEMU instance.

        :param cmd: QMP command to execute.
        :type cmd: str
        :return: Command output in python representation of JSON format. The
            { "return": {} } response is QMP's success response. An error
            response will contain the "error" keyword instead of "return".
        """
        # To enter command mode, the qmp_capabilities command must be issued.
        qmp_cmd = 'echo "{ \\"execute\\": \\"qmp_capabilities\\" }' \
                  '{ \\"execute\\": \\"' + cmd + \
                  '\\" }" | sudo -S socat - UNIX-CONNECT:' + self._qmp_sock

        (ret_code, stdout, stderr) = self._ssh.exec_command(qmp_cmd)
        if int(ret_code) != 0:
            logging.debug('QMP execute failed {0}'.format(stderr))
            raise RuntimeError('QMP execute "{0}"'
                               ' failed on {1}'.format(cmd, self._node['host']))
        logging.debug(stdout)
        # Skip capab