summaryrefslogtreecommitdiffstats
path: root/src/vlib/vlib_process_doc.h
blob: a47c5e4bbe438d795a68cf3de43cd713a999e9f2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
/*
 * Copyright (c) 2016 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
*/

#error do not #include this file!

/** \file

    Cooperative multi-tasking thread support.

    Vlib provides a lightweight cooperative multi-tasking thread
    model. Context switching costs a setjmp/longjump pair.  It's not
    unreasonable to put vlib threads to sleep for 10us.

    The graph node scheduler invokes these processes in much the same
    way as traditional vector-processing run-to-completion graph
    nodes; plus-or-minus a setjmp/longjmp pair required to switch
    stacks. Simply set the vlib_node_registration_t type field to
    VLIB_NODE_TYPE_PROCESS. Process is a misnomer; these are threads.

    As of this writing, the default stack size is 2<<15;
    32kb. Initialize the node registration's
    process_log2_n_stack_bytes member as needed. The graph node
    dispatcher makes some effort to detect stack overrun. We map a
    no-access page below each thread stack.

    Process node dispatch functions are expected to be while(1) { }
    loops which suspend when not otherwise occupied, and which must
    not run for unreasonably long periods of time.  Unreasonably long
    is an application-dependent concept. Over the years, we have
    constructed frame-size sensitive control-plane nodes which will
    use a much higher fraction of the available CPU bandwidth when the
    frame size is low. Classic example: modifying forwarding
    tables. So long as the table-builder leaves the forwarding tables
    in a valid state, one can suspend the table builder to avoid
    dropping packets as a result of control-plane activity.

    Process nodes can suspend for fixed amounts of time, or until another
    entity signals an event, or both. See the example below.

    When running in VLIB process context, one must pay strict attention to
    loop invariant issues. If one walks a data structure and calls a
    function which may suspend, one had best know by construction that it
    cannot change. Often, it s best to simply make a snapshot copy of a
    data structure, walk the copy at leisure, then free the copy.

    Here's an example:

    <code><pre>
    \#define EXAMPLE_POLL_PERIOD 10.0

    static uword
    example_process (vlib_main_t * vm, vlib_node_runtime_t * rt,
                     vlib_frame_t * f)
    {
      f64 poll_time_remaining;
      uword event_type, *event_data = 0;

      poll_time_remaining = EXAMPLE_POLL_PERIOD;
      while (1)
        {
          int i;

           // Sleep until next periodic call due,
           // or until we receive event(s)
           //
          poll_time_remaining =
    	    vlib_process_wait_for_event_or_clock (vm, poll_time_remaining);

          event_type = vlib_process_get_events (vm, &event_data);
          switch (event_type)
     	    {
       	    case ~0:		// no events => timeout
      	      break;

            case EVENT1:
    	      for (i = 0; i < vec_len (event_data); i++)
    	        handle_event1 (mm, event_data[i]);
    	      break;

    	    case EVENT2:
    	      for (i = 0; i < vec_len (event_data); i++)
    	        handle_event2 (vm, event_data[i]);
    	      break;

              // ... and so forth for each event type

            default:
              // This should never happen...
    	      clib_warning ("BUG: unhandled event type %d",
                            event_type);
    	      break;
      	    }
          vec_reset_length (event_data);

          // Timer expired, call periodic function
          if (vlib_process_suspend_time_is_zero (poll_time_remaining))
    	    {
    	      example_periodic (vm);
    	      poll_time_remaining = EXAMPLE_POLL_PERIOD;
    	    }
        }
      // NOTREACHED
      return 0;
    }

    static VLIB_REGISTER_NODE (example_node) = {
      .function = example_process,
      .type = VLIB_NODE_TYPE_PROCESS,
      .name = "example-process",
    };
    </pre></code>

    In this example, the VLIB process node waits for an event to
    occur, or for 10 seconds to elapse. The code demuxes on the event
    type, calling the appropriate handler function.

    Each call to vlib_process_get_events returns a vector of
    per-event-type data passed to successive vlib_process_signal_event
    calls; vec_len (event_data) >= 1.  It is an error to process only
    event_data[0].

    Resetting the event_data vector-length to 0 by calling
    vec_reset_length (event_data) - instead of calling vec_free (...)
    - means that the event scheme doesn t burn cycles continuously
    allocating and freeing the event data vector. This is a common
    coding pattern, well worth using when appropriate.
*/

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */
A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /* Heaps of objects of type T (e.g. int, struct foo, ...). Usage. To declare a null heap: T * heap = 0; To allocate: offset = heap_alloc (heap, size, handle); New object is heap[offset] ... heap[offset + size] Handle is used to free/query object. To free object: heap_dealloc (heap, handle); To query the size of an object: heap_size (heap, handle) */ #ifndef included_heap_h #define included_heap_h #include <vppinfra/clib.h> #include <vppinfra/cache.h> #include <vppinfra/hash.h> #include <vppinfra/format.h> #include <vppinfra/bitmap.h> /* Doubly linked list of elements. */ typedef struct { /* Offset of this element (plus free bit). If element is free, data at offset contains pointer to free list. */ u32 offset; /* Index of next and previous elements relative to current element. */ i32 next, prev; } heap_elt_t; /* Use high bit of offset as free bit. */ #define HEAP_ELT_FREE_BIT (1 << 31) always_inline uword heap_is_free (heap_elt_t * e) { return (e->offset & HEAP_ELT_FREE_BIT) != 0; } always_inline uword heap_offset (heap_elt_t * e) { return e->offset & ~HEAP_ELT_FREE_BIT; } always_inline heap_elt_t * heap_next (heap_elt_t * e) { return e + e->next; } always_inline heap_elt_t * heap_prev (heap_elt_t * e) { return e + e->prev; } always_inline uword heap_elt_size (void *v, heap_elt_t * e) { heap_elt_t *n = heap_next (e); uword next_offset = n != e ? heap_offset (n) : vec_len (v); return next_offset - heap_offset (e); } /* Sizes are binned. Sizes 1 to 2^log2_small_bins have their own free lists. Larger sizes are grouped in powers of two. */ #define HEAP_LOG2_SMALL_BINS (5) #define HEAP_SMALL_BINS (1 << HEAP_LOG2_SMALL_BINS) #define HEAP_N_BINS (2 * HEAP_SMALL_BINS) /* Header for heaps. */ typedef struct { /* Vector of used and free elements. */ heap_elt_t *elts; /* For elt_bytes < sizeof (u32) we need some extra space per elt to store free list index. */ u32 *small_free_elt_free_index; /* Vector of free indices of elts array. */ u32 *free_elts; /* Indices of free elts indexed by size bin. */ u32 **free_lists; format_function_t *format_elt; /* Used for validation/debugging. */ uword *used_elt_bitmap; /* First and last element of doubly linked chain of elements. */ u32 head, tail; u32 used_count, max_len; /* Number of bytes in a help element. */ u32 elt_bytes; u32 flags; /* Static heaps are made from external memory given to us by user and are not re-sizable vectors. */ #define HEAP_IS_STATIC (1) } heap_header_t; /* Start of heap elements is always cache aligned. */ #define HEAP_DATA_ALIGN (CLIB_CACHE_LINE_BYTES) always_inline heap_header_t * heap_header (void *v) { return vec_header (v, sizeof (heap_header_t)); } always_inline uword heap_header_bytes () { return vec_header_bytes (sizeof (heap_header_t)); } always_inline void heap_dup_header (heap_header_t * old, heap_header_t * new) { uword i; new[0] = old[0]; new->elts = vec_dup (new->elts); new->free_elts = vec_dup (new->free_elts); new->free_lists = vec_dup (new->free_lists); for (i = 0; i < vec_len (new->free_lists); i++) new->free_lists[i] = vec_dup (new->free_lists[i]); new->used_elt_bitmap = clib_bitmap_dup (new->used_elt_bitmap); new->small_free_elt_free_index = vec_dup (new->small_free_elt_free_index); } /* Make a duplicate copy of a heap. */ #define heap_dup(v) _heap_dup(v, vec_len (v) * sizeof (v[0])) always_inline void * _heap_dup (void *v_old, uword v_bytes) { heap_header_t *h_old, *h_new; void *v_new; h_old = heap_header (v_old); if (!v_old) return v_old; v_new = 0; v_new = _vec_resize (v_new, _vec_len (v_old), v_bytes, sizeof (heap_header_t), HEAP_DATA_ALIGN); h_new = heap_header (v_new); heap_dup_header (h_old, h_new); clib_memcpy_fast (v_new, v_old, v_bytes); return v_new; } always_inline uword heap_elts (void *v) { heap_header_t *h = heap_header (v); return h->used_count; } uword heap_bytes (void *v); always_inline void * _heap_new (u32 len, u32 n_elt_bytes) { void *v = _vec_resize ((void *) 0, len, (uword) len * n_elt_bytes, sizeof (heap_header_t), HEAP_DATA_ALIGN); heap_header (v)->elt_bytes = n_elt_bytes; return v; } #define heap_new(v) (v) = _heap_new (0, sizeof ((v)[0])) always_inline void heap_set_format (void *v, format_function_t * format_elt) { ASSERT (v); heap_header (v)->format_elt = format_elt; } always_inline void heap_set_max_len (void *v, uword max_len) { ASSERT (v); heap_header (v)->max_len = max_len; } always_inline uword heap_get_max_len (void *v) { return v ? heap_header (v)->max_len : 0; } /* Create fixed size heap with given block of memory. */ always_inline void * heap_create_from_memory (void *memory, uword max_len, uword elt_bytes) { heap_header_t *h; void *v; if (max_len * elt_bytes < sizeof (h[0])) return 0; h = memory; clib_memset (h, 0, sizeof (h[0])); h->max_len = max_len; h->elt_bytes = elt_bytes; h->flags = HEAP_IS_STATIC; v = (void *) (memory + heap_header_bytes ()); _vec_len (v) = 0; return v; } /* Execute BODY for each allocated heap element. */ #define heap_foreach(var,len,heap,body) \ do { \ if (vec_len (heap) > 0) \ { \ heap_header_t * _h = heap_header (heap); \ heap_elt_t * _e = _h->elts + _h->head; \ heap_elt_t * _end = _h->elts + _h->tail; \ while (1) \ { \ if (! heap_is_free (_e)) \ { \ (var) = (heap) + heap_offset (_e); \ (len) = heap_elt_size ((heap), _e); \ do { body; } while (0); \ } \ if (_e == _end) \ break; \ _e = heap_next (_e); \ } \ } \ } while (0) #define heap_elt_at_index(v,index) vec_elt_at_index(v,index) always_inline heap_elt_t * heap_get_elt (void *v, uword handle) { heap_header_t *h = heap_header (v); heap_elt_t *e = vec_elt_at_index (h->elts, handle); ASSERT (!heap_is_free (e)); return e; } #define heap_elt_with_handle(v,handle) \ ({ \ heap_elt_t * _e = heap_get_elt ((v), (handle)); \ (v) + heap_offset (_e); \ }) always_inline uword heap_is_free_handle (void *v, uword heap_handle) { heap_header_t *h = heap_header (v); heap_elt_t *e = vec_elt_at_index (h->elts, heap_handle); return heap_is_free (e); } extern uword heap_len (void *v, word handle); /* Low level allocation call. */ extern void *_heap_alloc (void *v, uword size, uword alignment, uword elt_bytes, uword * offset, uword * handle); #define heap_alloc_aligned(v,size,align,handle) \ ({ \ uword _o, _h; \ uword _a = (align); \ uword _s = (size); \ (v) = _heap_alloc ((v), _s, _a, sizeof ((v)[0]), &_o, &_h); \ (handle) = _h; \ _o; \ }) #define heap_alloc(v,size,handle) heap_alloc_aligned((v),(size),0,(handle)) extern void heap_dealloc (void *v, uword handle); extern void heap_validate (void *v); /* Format heap internal data structures as string. */ extern u8 *format_heap (u8 * s, va_list * va); void *_heap_free (void *v); #define heap_free(v) (v)=_heap_free(v) #endif /* included_heap_h */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */