aboutsummaryrefslogtreecommitdiffstats
path: root/src/vnet/bier/bier_lookup.c
blob: f7a21a1c744c845674725a546c6c20863dfddc50 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
/*
 * Copyright (c) 2016 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <vnet/buffer.h>
#include <vnet/vnet.h>

#include <vnet/bier/bier_fmask.h>
#include <vnet/bier/bier_hdr_inlines.h>
#include <vnet/bier/bier_table.h>
#include <vnet/bier/bier_fmask.h>

/**
 * Struct maintaining the per-worker thread data for BIER lookups
 */
typedef struct bier_lookup_main_t_
{
    /* per-cpu vector of cloned packets */
    u32 **blm_clones;
    /* per-cpu vector of BIER fmasks */
    u32 **blm_fmasks;
} bier_lookup_main_t;

/**
 * Single instance of the lookup main
 */
static bier_lookup_main_t bier_lookup_main;

static char * bier_lookup_error_strings[] = {
#define bier_error(n,s) s,
#include <vnet/bier/bier_lookup_error.def>
#undef bier_error
};

/*
 * Keep these values semantically the same as BIER lookup
 */
#define foreach_bier_lookup_next                \
    _(DROP, "bier-drop")                        \
    _(OUTPUT, "bier-output")

typedef enum {
#define _(s,n) BIER_LOOKUP_NEXT_##s,
    foreach_bier_lookup_next
#undef _
    BIER_LOOKUP_N_NEXT,
} bier_lookup_next_t;

typedef enum {
#define bier_error(n,s) BIER_LOOKUP_ERROR_##n,
#include <vnet/bier/bier_lookup_error.def>
#undef bier_error
    BIER_LOOKUP_N_ERROR,
} bier_lookup_error_t;

vlib_node_registration_t bier_lookup_node;

/**
 * @brief Packet trace record for a BIER lookup
 */
typedef struct bier_lookup_trace_t_
{
    u32 next_index;
    index_t bt_index;
    index_t bfm_index;
} bier_lookup_trace_t;

static uword
bier_lookup (vlib_main_t * vm,
             vlib_node_runtime_t * node,
             vlib_frame_t * from_frame)
{
    u32 n_left_from, next_index, * from, * to_next;
    bier_lookup_main_t *blm = &bier_lookup_main;
    u32 thread_index = vlib_get_thread_index();
    bier_bit_mask_bucket_t buckets_copy[BIER_HDR_BUCKETS_4096];

    from = vlib_frame_vector_args (from_frame);
    n_left_from = from_frame->n_vectors;
    next_index = BIER_LOOKUP_NEXT_DROP;

    while (n_left_from > 0)
    {
        u32 n_left_to_next;

        vlib_get_next_frame (vm, node, next_index,
                             to_next, n_left_to_next);

        while (n_left_from > 0 && n_left_to_next > 0)
        {
            u32 next0, bi0, n_bytes, bti0, bfmi0;
            const bier_fmask_t *bfm0;
            const bier_table_t *bt0;
            u16 index, num_buckets;
            const bier_hdr_t *bh0;
            bier_bit_string_t bbs;
            vlib_buffer_t *b0;
            bier_bp_t fbs;
            int bucket;

            bi0 = from[0];
            from += 1;
            n_left_from -= 1;

            b0 = vlib_get_buffer (vm, bi0);
            bh0 = vlib_buffer_get_current (b0);
            bti0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];

            /*
             * default to drop so that if no bits are matched then
             * that is where we go - DROP.
             */
            next0 = BIER_LOOKUP_NEXT_DROP;

            /*
             * At the imposition or input node,
             * we stored the BIER Table index in the TX adjacency
             */
            bt0 = bier_table_get(vnet_buffer(b0)->ip.adj_index[VLIB_TX]);

            /*
             * we should only forward via one for the ECMP tables
             */
            ASSERT(!bier_table_is_main(bt0));

            /*
             * number of integer sized buckets
             */
            n_bytes = bier_hdr_len_id_to_num_buckets(bt0->bt_id.bti_hdr_len);
            vnet_buffer(b0)->mpls.bier.n_bytes = n_bytes;
            vnet_buffer(b0)->sw_if_index[VLIB_TX] = ~0;
            num_buckets = n_bytes / sizeof(int);
            bier_bit_string_init(&bbs,
                                 bt0->bt_id.bti_hdr_len,
                                 buckets_copy);
            memcpy(bbs.bbs_buckets, bh0->bh_bit_string, bbs.bbs_len);

            /*
             * reset the fmask storage vector
             */
            vec_reset_length (blm->blm_fmasks[thread_index]);

            /*
             * Loop through the buckets in the header
             */
            for (index = 0; index < num_buckets; index++) {
                /*
                 * loop through each bit in the bucket
                 */
                bucket = ((int*)bbs.bbs_buckets)[index];

                while (bucket) {
                    fbs  = bier_find_first_bit_string_set(bucket);
                    fbs += (((num_buckets - 1) - index) *
                            BIER_BIT_MASK_BITS_PER_INT);

                    bfmi0 = bier_table_fwd_lookup(bt0, fbs);

                    /*
                     * whatever happens, the bit we just looked for
                     * MUST be cleared from the packet
                     * otherwise we could be in this loop a while ...
                     */
                    bier_bit_string_clear_bit(&bbs, fbs);

                    if (PREDICT_TRUE(INDEX_INVALID != bfmi0))
                    {
                        bfm0 = bier_fmask_get(bfmi0);

                        /*
                         * use the bit-string on the fmask to reset
                         * the bits in the header we are walking
                         */
                        bier_bit_string_clear_string(
                            &bfm0->bfm_bits.bfmb_input_reset_string,
                            &bbs);
                        bucket = ((int*)bbs.bbs_buckets)[index];

                        /*
                         * the fmask is resolved so replicate a
                         * packet its way
                         */
                        next0 = BIER_LOOKUP_NEXT_OUTPUT;

                        vec_add1 (blm->blm_fmasks[thread_index], bfmi0);
                    } else {
                        /*
                         * go to the next bit-position set
                         */
                        vlib_node_increment_counter(
                            vm, node->node_index,
                            BIER_LOOKUP_ERROR_FMASK_UNRES, 1);
                        bucket = ((int*)bbs.bbs_buckets)[index];
                        continue;
                    }
                }
            }

            /*
             * Full mask now processed.
             * Create the number of clones we need based on the number
             * of fmasks we are sending to.
             */
            u16 num_cloned, clone;
            u32 n_clones;

            n_clones = vec_len(blm->blm_fmasks[thread_index]);

            if (PREDICT_TRUE(0 != n_clones))
            {
                vec_set_len(blm->blm_clones[thread_index], n_clones);
                num_cloned = vlib_buffer_clone(vm, bi0,
                                               blm->blm_clones[thread_index],
                                               n_clones,
					       VLIB_BUFFER_CLONE_HEAD_SIZE);


                if (num_cloned != n_clones)
                {
                    vec_set_len(blm->blm_clones[thread_index], num_cloned);
                    vlib_node_increment_counter
                        (vm, node->node_index,
                         BIER_LOOKUP_ERROR_BUFFER_ALLOCATION_FAILURE, 1);
                }

                for (clone = 0; clone < num_cloned; clone++)
                {
                    vlib_buffer_t *c0;
                    u32 ci0;

                    ci0 = blm->blm_clones[thread_index][clone];
                    c0 = vlib_get_buffer(vm, ci0);
                    vnet_buffer(c0)->ip.adj_index[VLIB_TX] =
                        blm->blm_fmasks[thread_index][clone];

                    to_next[0] = ci0;
                    to_next += 1;
                    n_left_to_next -= 1;

                    if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
                    {
                        bier_lookup_trace_t *tr;

                        tr = vlib_add_trace (vm, node, c0, sizeof (*tr));
                        tr->bt_index = bti0;
                        tr->bfm_index = blm->blm_fmasks[thread_index][clone];
                    }

                    vlib_validate_buffer_enqueue_x1(vm, node, next_index,
                                                    to_next, n_left_to_next,
                                                    ci0, next0);

                    /*
                     * After the enqueue it is possible that we over-flow the
                     * frame of the to-next node. When this happens we need to
                     * 'put' that full frame to the node and get a fresh empty
                     * one. Note that these are macros with side effects that
                     * change to_next & n_left_to_next
                     */
                    if (PREDICT_FALSE(0 == n_left_to_next))
                    {
                        vlib_put_next_frame (vm, node, next_index,
                                             n_left_to_next);
                        vlib_get_next_frame (vm, node, next_index,
                                             to_next, n_left_to_next);
                    }
                }
            }
            else
            {
                /*
                 * no clones/replications required. drop this packet
                 */
                next0 = BIER_LOOKUP_NEXT_DROP;
                to_next[0] = bi0;
                to_next += 1;
                n_left_to_next -= 1;

                if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
                {
                    bier_lookup_trace_t *tr;

                    tr = vlib_add_trace (vm, node, b0, sizeof (*tr));

                    tr->bt_index = bti0;
                    tr->bfm_index = ~0;
                }

                vlib_validate_buffer_enqueue_x1(vm, node, next_index,
                                                to_next, n_left_to_next,
                                                bi0, next0);
            }
        }

        vlib_put_next_frame(vm, node, next_index, n_left_to_next);
    }

    vlib_node_increment_counter(vm, bier_lookup_node.index,
                                BIER_LOOKUP_ERROR_NONE,
                                from_frame->n_vectors);
    return (from_frame->n_vectors);
}

static u8 *
format_bier_lookup_trace (u8 * s, va_list * args)
{
    CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
    CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
    bier_lookup_trace_t * t = va_arg (*args, bier_lookup_trace_t *);

    s = format (s, "BIER: next [%d], tbl:%d BFM:%d",
                t->next_index,
                t->bt_index,
                t->bfm_index);
    return s;
}

VLIB_REGISTER_NODE (bier_lookup_node) = {
    .function = bier_lookup,
    .name = "bier-lookup",
    /* Takes a vector of packets. */
    .vector_size = sizeof (u32),

    .n_errors = BIER_LOOKUP_N_ERROR,
    .error_strings = bier_lookup_error_strings,

    .format_trace = format_bier_lookup_trace,
    .n_next_nodes = BIER_LOOKUP_N_NEXT,
    .next_nodes = {
        [BIER_LOOKUP_NEXT_DROP] = "bier-drop",
        [BIER_LOOKUP_NEXT_OUTPUT] = "bier-output",
    },
};

clib_error_t *
bier_lookup_module_init (vlib_main_t * vm)
{
    bier_lookup_main_t *blm = &bier_lookup_main;
    u32 thread_index;

    vec_validate (blm->blm_clones, vlib_num_workers());
    vec_validate (blm->blm_fmasks, vlib_num_workers());

    for (thread_index = 0;
         thread_index <= vlib_num_workers();
         thread_index++)
    {
        /*
         *  1024 is the most we will ever need to support
         * a Bit-Mask length of 1024
         */
        vec_validate(blm->blm_fmasks[thread_index], 1023);
        vec_validate(blm->blm_clones[thread_index], 1023);
    }

    return 0;
}

VLIB_INIT_FUNCTION (bier_lookup_module_init);
an class="o">.format(self._vhost_id, self._qemu_opt['queues']) self._qemu_opt['options'] += netdev # If MAC is not specified use auto-generated MAC address based on # template 52:54:00:00:<qemu_id>:<vhost_id>, e.g. vhost1 MAC of QEMU # with ID 1 is 52:54:00:00:01:01 if mac is None: mac = '52:54:00:00:{0:02x}:{1:02x}'.\ format(self._qemu_id, self._vhost_id) extend_options = 'mq=on,csum=off,gso=off,guest_tso4=off,'\ 'guest_tso6=off,guest_ecn=off,mrg_rxbuf=off' # Create Virtio network device. device = ' -device virtio-net-pci,netdev=vhost{0},mac={1},{2}'.format( self._vhost_id, mac, extend_options) self._qemu_opt['options'] += device # Add interface MAC and socket to the node dict if_data = {'mac_address': mac, 'socket': socket} if_name = 'vhost{}'.format(self._vhost_id) self._vm_info['interfaces'][if_name] = if_data # Add socket to the socket list self._socks.append(socket) def _qemu_qmp_exec(self, cmd): """Execute QMP command. QMP is JSON based protocol which allows to control QEMU instance. :param cmd: QMP command to execute. :type cmd: str :return: Command output in python representation of JSON format. The { "return": {} } response is QMP's success response. An error response will contain the "error" keyword instead of "return". """ # To enter command mode, the qmp_capabilities command must be issued. qmp_cmd = 'echo "{ \\"execute\\": \\"qmp_capabilities\\" }' \ '{ \\"execute\\": \\"' + cmd + \ '\\" }" | sudo -S socat - UNIX-CONNECT:' + self._qmp_sock (ret_code, stdout, stderr) = self._ssh.exec_command(qmp_cmd) if int(ret_code) != 0: logger.debug('QMP execute failed {0}'.format(stderr)) raise RuntimeError('QMP execute "{0}"' ' failed on {1}'.format(cmd, self._node['host'])) logger.trace(stdout) # Skip capabilities negotiation messages. out_list = stdout.splitlines() if len(out_list) < 3: raise RuntimeError('Invalid QMP output on {0}'.format( self._node['host'])) return json.loads(out_list[2]) def _qemu_qga_flush(self): """Flush the QGA parser state """ qga_cmd = '(printf "\xFF"; sleep 1) | sudo -S socat - UNIX-CONNECT:' + \ self._qga_sock #TODO: probably need something else (ret_code, stdout, stderr) = self._ssh.exec_command(qga_cmd) if int(ret_code) != 0: logger.debug('QGA execute failed {0}'.format(stderr)) raise RuntimeError('QGA execute "{0}" ' 'failed on {1}'.format(qga_cmd, self._node['host'])) logger.trace(stdout) if not stdout: return {} return json.loads(stdout.split('\n', 1)[0]) def _qemu_qga_exec(self, cmd): """Execute QGA command. QGA provide access to a system-level agent via standard QMP commands. :param cmd: QGA command to execute. :type cmd: str """ qga_cmd = '(echo "{ \\"execute\\": \\"' + \ cmd + \ '\\" }"; sleep 1) | sudo -S socat - UNIX-CONNECT:' + \ self._qga_sock (ret_code, stdout, stderr) = self._ssh.exec_command(qga_cmd) if int(ret_code) != 0: logger.debug('QGA execute failed {0}'.format(stderr)) raise RuntimeError('QGA execute "{0}"' ' failed on {1}'.format(cmd, self._node['host'])) logger.trace(stdout) if not stdout: return {} return json.loads(stdout.split('\n', 1)[0]) def _wait_until_vm_boot(self, timeout=60): """Wait until QEMU VM is booted. Ping QEMU guest agent each 5s until VM booted or timeout. :param timeout: Waiting timeout in seconds (optional, default 60s). :type timeout: int """ start = time() while True: if time() - start > timeout: raise RuntimeError('timeout, VM {0} not booted on {1}'.format( self._qemu_opt['disk_image'], self._node['host'])) out = None try: self._qemu_qga_flush() out = self._qemu_qga_exec('guest-ping') except ValueError: logger.trace('QGA guest-ping unexpected output {}'.format(out)) # Empty output - VM not booted yet if not out: sleep(5) # Non-error return - VM booted elif out.get('return') is not None: break # Skip error and wait elif out.get('error') is not None: sleep(5) else: # If there is an unexpected output from QGA guest-info, try # again until timeout. logger.trace('QGA guest-ping unexpected output {}'.format(out)) logger.trace('VM {0} booted on {1}'.format(self._qemu_opt['disk_image'], self._node['host'])) def _update_vm_interfaces(self): """Update interface names in VM node dict.""" # Send guest-network-get-interfaces command via QGA, output example: # {"return": [{"name": "eth0", "hardware-address": "52:54:00:00:04:01"}, # {"name": "eth1", "hardware-address": "52:54:00:00:04:02"}]} out = self._qemu_qga_exec('guest-network-get-interfaces') interfaces = out.get('return') mac_name = {} if not interfaces: raise RuntimeError('Get VM {0} interface list failed on {1}'.format( self._qemu_opt['disk_image'], self._node['host'])) # Create MAC-name dict for interface in interfaces: if 'hardware-address' not in interface: continue mac_name[interface['hardware-address']] = interface['name'] # Match interface by MAC and save interface name for interface in self._vm_info['interfaces'].values(): mac = interface.get('mac_address') if_name = mac_name.get(mac) if if_name is None: logger.trace('Interface name for MAC {} not found'.format(mac)) else: interface['name'] = if_name def _huge_page_check(self, allocate=False): """Huge page check.""" huge_mnt = self._qemu_opt.get('huge_mnt') mem_size = self._qemu_opt.get('mem_size') # Get huge pages information huge_size = self._get_huge_page_size() huge_free = self._get_huge_page_free(huge_size) huge_total = self._get_huge_page_total(huge_size) # Check if memory reqested by qemu is available on host if (mem_size * 1024) > (huge_free * huge_size): # If we want to allocate hugepage dynamically if allocate: mem_needed = abs((huge_free * huge_size) - (mem_size * 1024)) huge_to_allocate = ((mem_needed / huge_size) * 2) + huge_total max_map_count = huge_to_allocate*4 # Increase maximum number of memory map areas a process may have cmd = 'echo "{0}" | sudo tee /proc/sys/vm/max_map_count'.format( max_map_count) (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd) # Increase hugepage count cmd = 'echo "{0}" | sudo tee /proc/sys/vm/nr_hugepages'.format( huge_to_allocate) (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd) if int(ret_code) != 0: logger.debug('Mount huge pages failed {0}'.format(stderr)) raise RuntimeError('Mount huge pages failed on {0}'.format( self._node['host'])) # If we do not want to allocate dynamicaly end with error else: raise RuntimeError( 'Not enough free huge pages: {0}, ' '{1} MB'.format(huge_free, huge_free * huge_size) ) # Check if huge pages mount point exist has_huge_mnt = False (_, output, _) = self._ssh.exec_command('cat /proc/mounts') for line in output.splitlines(): # Try to find something like: # none /mnt/huge hugetlbfs rw,relatime,pagesize=2048k 0 0 mount = line.split() if mount[2] == 'hugetlbfs' and mount[1] == huge_mnt: has_huge_mnt = True break # If huge page mount point not exist create one if not has_huge_mnt: cmd = 'mkdir -p {0}'.format(huge_mnt) (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd) if int(ret_code) != 0: logger.debug('Create mount dir failed: {0}'.format(stderr)) raise RuntimeError('Create mount dir failed on {0}'.format( self._node['host'])) cmd = 'mount -t hugetlbfs -o pagesize=2048k none {0}'.format( huge_mnt) (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd) if int(ret_code) != 0: logger.debug('Mount huge pages failed {0}'.format(stderr)) raise RuntimeError('Mount huge pages failed on {0}'.format( self._node['host'])) def _get_huge_page_size(self): """Get default size of huge pages in system. :returns: Default size of free huge pages in system. :rtype: int :raises: RuntimeError if reading failed for three times. """ # TODO: remove to dedicated library cmd_huge_size = "grep Hugepagesize /proc/meminfo | awk '{ print $2 }'" for _ in range(3): (ret, out, _) = self._ssh.exec_command_sudo(cmd_huge_size) if ret == 0: try: huge_size = int(out) except ValueError: logger.trace('Reading huge page size information failed') else: break else: raise RuntimeError('Getting huge page size information failed.') return huge_size def _get_huge_page_free(self, huge_size): """Get total number of huge pages in system. :param huge_size: Size of hugepages. :type huge_size: int :returns: Number of free huge pages in system. :rtype: int :raises: RuntimeError if reading failed for three times. """ # TODO: add numa aware option # TODO: remove to dedicated library cmd_huge_free = 'cat /sys/kernel/mm/hugepages/hugepages-{0}kB/'\ 'free_hugepages'.format(huge_size) for _ in range(3): (ret, out, _) = self._ssh.exec_command_sudo(cmd_huge_free) if ret == 0: try: huge_free = int(out) except ValueError: logger.trace('Reading free huge pages information failed') else: break else: raise RuntimeError('Getting free huge pages information failed.') return huge_free def _get_huge_page_total(self, huge_size): """Get total number of huge pages in system. :param huge_size: Size of hugepages. :type huge_size: int :returns: Total number of huge pages in system. :rtype: int :raises: RuntimeError if reading failed for three times. """ # TODO: add numa aware option # TODO: remove to dedicated library cmd_huge_total = 'cat /sys/kernel/mm/hugepages/hugepages-{0}kB/'\ 'nr_hugepages'.format(huge_size) for _ in range(3): (ret, out, _) = self._ssh.exec_command_sudo(cmd_huge_total) if ret == 0: try: huge_total = int(out) except ValueError: logger.trace('Reading total huge pages information failed') else: break else: raise RuntimeError('Getting total huge pages information failed.') return huge_total def qemu_start(self): """Start QEMU and wait until VM boot. :return: VM node info. :rtype: dict .. note:: First set at least node to run QEMU on. .. warning:: Starts only one VM on the node. """ # SSH forwarding ssh_fwd = '-net user,hostfwd=tcp::{0}-:22'.format( self._qemu_opt.get('ssh_fwd_port')) # Memory and huge pages mem = '-object memory-backend-file,id=mem,size={0}M,mem-path={1},' \ 'share=on -m {0} -numa node,memdev=mem'.format( self._qemu_opt.get('mem_size'), self._qemu_opt.get('huge_mnt')) # By default check only if hugepages are available. # If 'huge_allocate' is set to true try to allocate as well. self._huge_page_check(allocate=self._qemu_opt.get('huge_allocate')) # Disk option drive = '-drive file={0},format=raw,cache=none,if=virtio'.format( self._qemu_opt.get('disk_image')) # Setup QMP via unix socket qmp = '-qmp unix:{0},server,nowait'.format(self._qmp_sock) # Setup serial console serial = '-chardev socket,host=127.0.0.1,port={0},id=gnc0,server,' \ 'nowait -device isa-serial,chardev=gnc0'.format( self._qemu_opt.get('serial_port')) # Setup QGA via chardev (unix socket) and isa-serial channel qga = '-chardev socket,path={0},server,nowait,id=qga0 ' \ '-device isa-serial,chardev=qga0'.format(self._qga_sock) # Graphic setup graphic = '-monitor none -display none -vga none' # Run QEMU cmd = '{0} {1} {2} {3} {4} {5} {6} {7} {8} {9}'.format( self.__QEMU_BIN, self._qemu_opt.get('smp'), mem, ssh_fwd, self._qemu_opt.get('options'), drive, qmp, serial, qga, graphic) (ret_code, _, stderr) = self._ssh.exec_command_sudo(cmd, timeout=300) if int(ret_code) != 0: logger.debug('QEMU start failed {0}'.format(stderr)) raise RuntimeError('QEMU start failed on {0}'.format( self._node['host'])) logger.trace('QEMU running') # Wait until VM boot try: self._wait_until_vm_boot() except (RuntimeError, SSHTimeout): self.qemu_kill() self.qemu_clear_socks() raise # Update interface names in VM node dict self._update_vm_interfaces() # Return VM node dict return self._vm_info def qemu_quit(self): """Quit the QEMU emulator.""" out = self._qemu_qmp_exec('quit') err = out.get('error') if err is not None: raise RuntimeError('QEMU quit failed on {0}, error: {1}'.format( self._node['host'], json.dumps(err))) def qemu_system_powerdown(self): """Power down the system (if supported).""" out = self._qemu_qmp_exec('system_powerdown') err = out.get('error') if err is not None: raise RuntimeError( 'QEMU system powerdown failed on {0}, ' 'error: {1}'.format(self._node['host'], json.dumps(err)) ) def qemu_system_reset(self): """Reset the system.""" out = self._qemu_qmp_exec('system_reset') err = out.get('error') if err is not None: raise RuntimeError( 'QEMU system reset failed on {0}, ' 'error: {1}'.format(self._node['host'], json.dumps(err))) def qemu_kill(self): """Kill qemu process.""" # TODO: add PID storage so that we can kill specific PID # Note: in QEMU start phase there are 3 QEMU processes because we # daemonize QEMU self._ssh.exec_command_sudo('pkill -SIGKILL qemu') def qemu_clear_socks(self): """Remove all sockets created by QEMU.""" # If serial console port still open kill process cmd = 'fuser -k {}/tcp'.format(self._qemu_opt.get('serial_port')) self._ssh.exec_command_sudo(cmd) # Delete all created sockets for sock in self._socks: cmd = 'rm -f {}'.format(sock) self._ssh.exec_command_sudo(cmd) def qemu_system_status(self): """Return current VM status. VM should be in following status: - debug: QEMU running on a debugger - finish-migrate: paused to finish the migration process - inmigrate: waiting for an incoming migration - internal-error: internal error has occurred - io-error: the last IOP has failed - paused: paused - postmigrate: paused following a successful migrate - prelaunch: QEMU was started with -S and guest has not started - restore-vm: paused to restore VM state - running: actively running - save-vm: paused to save the VM state - shutdown: shut down (and -no-shutdown is in use) - suspended: suspended (ACPI S3) - watchdog: watchdog action has been triggered - guest-panicked: panicked as a result of guest OS panic :return: VM status. :rtype: str """ out = self._qemu_qmp_exec('query-status') ret = out.get('return') if ret is not None: return ret.get('status') else: err = out.get('error') raise RuntimeError( 'QEMU query-status failed on {0}, ' 'error: {1}'.format(self._node['host'], json.dumps(err))) @staticmethod def build_qemu(node): """Build QEMU from sources. :param node: Node to build QEMU on. :type node: dict """ ssh = SSH() ssh.connect(node) (ret_code, stdout, stderr) = \ ssh.exec_command('sudo -Sn bash {0}/{1}/qemu_build.sh'.format( Constants.REMOTE_FW_DIR, Constants.RESOURCES_LIB_SH), 1000) logger.trace(stdout) if int(ret_code) != 0: logger.debug('QEMU build failed {0}'.format(stderr)) raise RuntimeError('QEMU build failed on {0}'.format(node['host']))