aboutsummaryrefslogtreecommitdiffstats
path: root/src/vpp-api/vom/tap_interface.hpp
blob: d9df9a92a8dea1e0c1972a879a8907e8ad101991 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
/*
 * Copyright (c) 2017 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef __VOM_TAP_INTERFACE_H__
#define __VOM_TAP_INTERFACE_H__

#include "vom/interface.hpp"

namespace VOM {
/**
 * A tap-interface. e.g. a tap interface
 */
class tap_interface : public interface
{
public:
  tap_interface(const std::string& name,
                admin_state_t state,
                route::prefix_t prefix);

  tap_interface(const std::string& name,
                admin_state_t state,
                route::prefix_t prefix,
                const l2_address_t& l2_address);

  ~tap_interface();
  tap_interface(const tap_interface& o);

  /**
   * Return the matching 'singular instance' of the TAP interface
   */
  std::shared_ptr<tap_interface> singular() const;

private:
  /**
   * Class definition for listeners to OM events
   */
  class event_handler : public OM::listener, public inspect::command_handler
  {
  public:
    event_handler();
    virtual ~event_handler() = default;

    /**
     * Handle a populate event
     */
    void handle_populate(const client_db::key_t& key);

    /**
     * Handle a replay event
     */
    void handle_replay();

    /**
     * Show the object in the Singular DB
     */
    void show(std::ostream& os);

    /**
     * Get the sortable Id of the listener
     */
    dependency_t order() const;
  };
  static event_handler m_evh;

  /**
   * Ip Prefix
   */
  route::prefix_t m_prefix;

  l2_address_t m_l2_address;

  /**
   * interface is a friend so it can construct with handles
   */
  friend class interface;

  /**
   * Return the matching 'instance' of the sub-interface
   *  over-ride from the base class
   */
  std::shared_ptr<interface> singular_i() const;

  /**
   * Virtual functions to construct an interface create commands.
   */
  virtual std::queue<cmd*>& mk_create_cmd(std::queue<cmd*>& cmds);

  /**
   * Virtual functions to construct an interface delete commands.
   */
  virtual std::queue<cmd*>& mk_delete_cmd(std::queue<cmd*>& cmds);

  /*
   * It's the OM class that call singular()
   */
  friend class OM;
};
}

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "mozilla")
 * End:
 */

#endif
es.libraries.python.ssh import exec_cmd_no_error, scp_node from resources.libraries.python.topology import NodeType __all__ = ["CoreDumpUtil"] class CoreDumpUtil(object): """Class contains methods for processing core dumps.""" # Use one instance of class for all tests. If the functionality should # be enabled per suite or per test case, change the scope to "TEST SUITE" or # "TEST CASE" respectively. ROBOT_LIBRARY_SCOPE = 'GLOBAL' def __init__(self): """Initialize CoreDumpUtil class.""" # Corekeeper is configured. self._corekeeper_configured = False # Enable setting core limit for process. This can be used to prevent # library to further set the core limit for unwanted behavior. self._core_limit_enabled = True def set_core_limit_enabled(self): """Enable setting of core limit for PID.""" self._core_limit_enabled = True def set_core_limit_disabled(self): """Disable setting of core limit for PID.""" self._core_limit_enabled = False def is_core_limit_enabled(self): """Check if core limit is set for process. :returns: True if core limit is set for process. :rtype: bool """ return self._corekeeper_configured and self._core_limit_enabled def setup_corekeeper_on_all_nodes(self, nodes): """Setup core dumps system wide on all nodes. :param nodes: Nodes in the topology. :type nodes: dict """ for node in nodes.values(): # Any binary which normally would not be dumped is dumped anyway, # but only if the "core_pattern" kernel sysctl is set to either a # pipe handler or a fully qualified path. (For more details on this # limitation, see CVE-2006-2451.) This mode is appropriate when # administrators are attempting to debug problems in a normal # environment, and either have a core dump pipe handler that knows # to treat privileged core dumps with care, or specific directory # defined for catching core dumps. If a core dump happens without a # pipe handler or fully qualifid path, a message will be emitted to # syslog warning about the lack of a correct setting. SysctlUtil.set_sysctl_value(node, 'fs.suid_dumpable', 2) # Specify a core dumpfile pattern name (for the output filename). # %p pid # %u uid (in initial user namespace) # %g gid (in initial user namespace) # %s signal number # %t UNIX time of dump # %h hostname # %e executable filename (may be shortened) SysctlUtil.set_sysctl_value(node, 'kernel.core_pattern', Constants.KERNEL_CORE_PATTERN) self._corekeeper_configured = True @staticmethod def enable_coredump_limit(node, pid): """Enable coredump for PID(s) by setting no core limits. :param node: Node in the topology. :param pid: Process ID(s) to set core dump limit to unlimited. :type node: dict :type pid: list or int """ if isinstance(pid, list): for item in pid: LimitUtil.set_pid_limit(node, item, 'core', 'unlimited') LimitUtil.get_pid_limit(node, item) else: LimitUtil.set_pid_limit(node, pid, 'core', 'unlimited') LimitUtil.get_pid_limit(node, pid) def enable_coredump_limit_vpp_on_all_duts(self, nodes): """Enable coredump for all VPP PIDs by setting no core limits on all DUTs if setting of core limit by this library is enabled. :param nodes: Nodes in the topology. :type nodes: dict """ for node in nodes.values(): if node['type'] == NodeType.DUT and self.is_core_limit_enabled(): vpp_pid = DUTSetup.get_vpp_pid(node) self.enable_coredump_limit(node, vpp_pid) def get_core_files_on_all_nodes(self, nodes, disable_on_success=True): """Compress all core files into single file and remove the original core files on all nodes. :param nodes: Nodes in the topology. :param disable_on_success: If True, disable setting of core limit by this instance of library. Default: True :type nodes: dict :type disable_on_success: bool """ for node in nodes.values(): uuid = str(time()).replace('.', '') name = '{uuid}.tar.lzo.lrz.xz'.format(uuid=uuid) command = ('[ -e {dir}/*.core ] && cd {dir} && ' 'sudo tar c *.core | ' 'lzop -1 | ' 'lrzip -n -T -p 1 -w 5 | ' 'xz -9e > {name} && ' 'sudo rm -f *.core' .format(dir=Constants.CORE_DUMP_DIR, name=name)) try: exec_cmd_no_error(node, command, timeout=3600) if disable_on_success: self.set_core_limit_disabled() except RuntimeError: # If compress was not sucessfull ignore error and skip further # processing. continue local_path = 'archive/{name}'.format(name=name) remote_path = '{dir}/{name}'.format(dir=Constants.CORE_DUMP_DIR, name=name) try: scp_node(node, local_path, remote_path, get=True, timeout=3600) command = 'rm -f {dir}/{name}'\ .format(dir=Constants.CORE_DUMP_DIR, name=name) exec_cmd_no_error(node, command, sudo=True) except RuntimeError: pass