aboutsummaryrefslogtreecommitdiffstats
path: root/resources/libraries/python/model/MemDump.py
blob: bf8835244b8c542e267e30d434db818afc1add74 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
# Copyright (c) 2022 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Module for converting in-memory data into JSON output.

CSIT and VPP PAPI are using custom data types that are not directly serializable
into JSON.

Thus, before writing the output onto disk, the data is recursively converted to
equivalent serializable types, in extreme cases replaced by string
representation.

Validation is outside the scope of this module, as it should use the JSON data
read from disk.
"""

import json
import os

from collections.abc import Iterable, Mapping, Set
from enum import IntFlag
from dateutil.parser import parse


def _pre_serialize_recursive(data):
    """Recursively sort and convert to a more serializable form.

    VPP PAPI code can give data with its own MACAddres type,
    or various other enum and flag types.
    The default json.JSONEncoder method raises TypeError on that.
    First point of this function is to apply str() or repr()
    to leaf values that need it.

    Also, PAPI responses are namedtuples, which confuses
    the json.JSONEncoder method (so it does not recurse).
    Dictization (see PapiExecutor) helps somewhat, but it turns namedtuple
    into a UserDict, which also confuses json.JSONEncoder.
    Therefore, we recursively convert any Mapping into an ordinary dict.

    We also convert iterables to list (sorted if the iterable was a set),
    and prevent numbers from getting converted to strings.

    As we are doing such low level operations,
    we also convert mapping keys to strings
    and sort the mapping items by keys alphabetically,
    except "data" field moved to the end.

    :param data: Object to make serializable, dictized when applicable.
    :type data: object
    :returns: Serializable equivalent of the argument.
    :rtype: object
    :raises ValueError: If the argument does not support string conversion.
    """
    # Recursion ends at scalar values, first handle irregular ones.
    if isinstance(data, IntFlag):
        return repr(data)
    if isinstance(data, bytes):
        return data.hex()
    # The regular ones are good to go.
    if isinstance(data, (str, int, float, bool)):
        return data
    # Recurse over, convert and sort mappings.
    if isinstance(data, Mapping):
        # Convert and sort alphabetically.
        ret = {
            str(key): _pre_serialize_recursive(data[key])
            for key in sorted(data.keys())
        }
        # If exists, move "data" field to the end.
        if u"data" in ret:
            data_value = ret.pop(u"data")
            ret[u"data"] = data_value
        # If exists, move "type" field at the start.
        if u"type" in ret:
            type_value = ret.pop(u"type")
            ret_old = ret
            ret = dict(type=type_value)
            ret.update(ret_old)
        return ret
    # Recurse over and convert iterables.
    if isinstance(data, Iterable):
        list_data = [_pre_serialize_recursive(item) for item in data]
        # Additionally, sets are exported as sorted.
        if isinstance(data, Set):
            list_data = sorted(list_data)
        return list_data
    # Unknown structure, attempt str().
    return str(data)


def _pre_serialize_root(data):
    """Recursively convert to a more serializable form, tweak order.

    See _pre_serialize_recursive for most of changes this does.

    The logic here (outside the recursive function) only affects
    field ordering in the root mapping,
    to make it more human friendly.
    We are moving "version" to the top,
    followed by start time and end time.
    and various long fields to the bottom.

    Some edits are done in-place, do not trust the argument value after calling.

    :param data: Root data to make serializable, dictized when applicable.
    :type data: dict
    :returns: Order-tweaked version of the argument.
    :rtype: dict
    :raises KeyError: If the data does not contain required fields.
    :raises TypeError: If the argument is not a dict.
    :raises ValueError: If the argument does not support string conversion.
    """
    if not isinstance(data, dict):
        raise RuntimeError(f"Root data object needs to be a dict: {data!r}")
    data = _pre_serialize_recursive(data)
    new_data = dict(version=data.pop(u"version"))
    new_data[u"start_time"] = data.pop(u"start_time")
    new_data[u"end_time"] = data.pop(u"end_time")
    new_data.update(data)
    return new_data


def _merge_into_suite_info_file(teardown_path):
    """Move setup and teardown data into a singe file, remove old files.

    The caller has to confirm the argument is correct, e.g. ending in
    "/teardown.info.json".

    :param teardown_path: Local filesystem path to teardown file.
    :type teardown_path: str
    :returns: Local filesystem path to newly created suite file.
    :rtype: str
    """
    # Manual right replace: https://stackoverflow.com/a/9943875
    setup_path = u"setup".join(teardown_path.rsplit(u"teardown", 1))
    with open(teardown_path, u"rt", encoding="utf-8") as file_in:
        teardown_data = json.load(file_in)
    # Transforming setup data into suite data.
    with open(setup_path, u"rt", encoding="utf-8") as file_in:
        suite_data = json.load(file_in)

    end_time = teardown_data[u"end_time"]
    suite_data[u"end_time"] = end_time
    start_float = parse(suite_data[u"start_time"]).timestamp()
    end_float = parse(suite_data[u"end_time"]).timestamp()
    suite_data[u"duration"] = end_float - start_float
    setup_telemetry = suite_data.pop(u"telemetry")
    suite_data[u"setup_telemetry"] = setup_telemetry
    suite_data[u"teardown_telemetry"] = teardown_data[u"telemetry"]

    suite_path = u"suite".join(teardown_path.rsplit(u"teardown", 1))
    with open(suite_path, u"wt", encoding="utf-8") as file_out:
        json.dump(suite_data, file_out, indent=1)
    # We moved everything useful from temporary setup/teardown info files.
    os.remove(setup_path)
    os.remove(teardown_path)

    return suite_path


def write_output(file_path, data):
    """Prepare data for serialization and dump into a file.

    Ancestor directories are created if needed.

    :param file_path: Local filesystem path, including the file name.
    :param data: Root data to make serializable, dictized when applicable.
    :type file_path: str
    :type data: dict
    """
    data = _pre_serialize_root(data)

    # Lets move Telemetry to the end.
    telemetry = data.pop(u"telemetry")
    data[u"telemetry"] = telemetry

    os.makedirs(os.path.dirname(file_path), exist_ok=True)
    with open(file_path, u"wt", encoding="utf-8") as file_out:
        json.dump(data, file_out, indent=1)

    if file_path.endswith(u"/teardown.info.json"):
        file_path = _merge_into_suite_info_file(file_path)

    return file_path
s="p">, 0x243185be4ee4b28c, 0x550c7dc3d5ffb4e2, 0x72be5d74f27b896f, 0x80deb1fe3b1696b1, 0x9bdc06a725c71235, 0xc19bf174cf692694, 0xe49b69c19ef14ad2, 0xefbe4786384f25e3, 0x0fc19dc68b8cd5b5, 0x240ca1cc77ac9c65, 0x2de92c6f592b0275, 0x4a7484aa6ea6e483, 0x5cb0a9dcbd41fbd4, 0x76f988da831153b5, 0x983e5152ee66dfab, 0xa831c66d2db43210, 0xb00327c898fb213f, 0xbf597fc7beef0ee4, 0xc6e00bf33da88fc2, 0xd5a79147930aa725, 0x06ca6351e003826f, 0x142929670a0e6e70, 0x27b70a8546d22ffc, 0x2e1b21385c26c926, 0x4d2c6dfc5ac42aed, 0x53380d139d95b3df, 0x650a73548baf63de, 0x766a0abb3c77b2a8, 0x81c2c92e47edaee6, 0x92722c851482353b, 0xa2bfe8a14cf10364, 0xa81a664bbc423001, 0xc24b8b70d0f89791, 0xc76c51a30654be30, 0xd192e819d6ef5218, 0xd69906245565a910, 0xf40e35855771202a, 0x106aa07032bbd1b8, 0x19a4c116b8d2d0c8, 0x1e376c085141ab53, 0x2748774cdf8eeb99, 0x34b0bcb5e19b48a8, 0x391c0cb3c5c95a63, 0x4ed8aa4ae3418acb, 0x5b9cca4f7763e373, 0x682e6ff3d6b2b8a3, 0x748f82ee5defb2fc, 0x78a5636f43172f60, 0x84c87814a1f0ab72, 0x8cc702081a6439ec, 0x90befffa23631e28, 0xa4506cebde82bde9, 0xbef9a3f7b2c67915, 0xc67178f2e372532b, 0xca273eceea26619c, 0xd186b8c721c0c207, 0xeada7dd6cde0eb1e, 0xf57d4f7fee6ed178, 0x06f067aa72176fba, 0x0a637dc5a2c898a6, 0x113f9804bef90dae, 0x1b710b35131c471b, 0x28db77f523047d84, 0x32caab7b40c72493, 0x3c9ebe0a15c9bebc, 0x431d67c49c100d4c, 0x4cc5d4becb3e42b6, 0x597f299cfc657e2a, 0x5fcb6fab3ad6faec, 0x6c44198c4a475817 }; typedef enum { CLIB_SHA2_224, CLIB_SHA2_256, CLIB_SHA2_384, CLIB_SHA2_512, CLIB_SHA2_512_224, CLIB_SHA2_512_256, } clib_sha2_type_t; #define SHA2_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE #define SHA2_MAX_DIGEST_SIZE SHA512_DIGEST_SIZE typedef struct { u64 total_bytes; u16 n_pending; u8 block_size; u8 digest_size; union { u32 h32[8]; u64 h64[8]; #if defined(__SHA__) && defined (__x86_64__) u32x4 h32x4[2]; #endif }; union { u8 as_u8[SHA2_MAX_BLOCK_SIZE]; u64 as_u64[SHA2_MAX_BLOCK_SIZE / sizeof (u64)]; uword as_uword[SHA2_MAX_BLOCK_SIZE / sizeof (uword)]; } pending; } clib_sha2_ctx_t; static_always_inline void clib_sha2_init (clib_sha2_ctx_t * ctx, clib_sha2_type_t type) { const u32 *h32 = 0; const u64 *h64 = 0; ctx->total_bytes = 0; ctx->n_pending = 0; switch (type) { case CLIB_SHA2_224: h32 = sha224_h; ctx->block_size = SHA224_BLOCK_SIZE; ctx->digest_size = SHA224_DIGEST_SIZE; break; case CLIB_SHA2_256: h32 = sha256_h; ctx->block_size = SHA256_BLOCK_SIZE; ctx->digest_size = SHA256_DIGEST_SIZE; break; case CLIB_SHA2_384: h64 = sha384_h; ctx->block_size = SHA384_BLOCK_SIZE; ctx->digest_size = SHA384_DIGEST_SIZE; break; case CLIB_SHA2_512: h64 = sha512_h; ctx->block_size = SHA512_BLOCK_SIZE; ctx->digest_size = SHA512_DIGEST_SIZE; break; case CLIB_SHA2_512_224: h64 = sha512_224_h; ctx->block_size = SHA512_224_BLOCK_SIZE; ctx->digest_size = SHA512_224_DIGEST_SIZE; break; case CLIB_SHA2_512_256: h64 = sha512_256_h; ctx->block_size = SHA512_256_BLOCK_SIZE; ctx->digest_size = SHA512_256_DIGEST_SIZE; break; } if (h32) for (int i = 0; i < 8; i++) ctx->h32[i] = h32[i]; if (h64) for (int i = 0; i < 8; i++) ctx->h64[i] = h64[i]; } #if defined(__SHA__) && defined (__x86_64__) static inline void shani_sha256_cycle_w (u32x4 cw[], u8 a, u8 b, u8 c, u8 d) { cw[a] = (u32x4) _mm_sha256msg1_epu32 ((__m128i) cw[a], (__m128i) cw[b]); cw[a] += (u32x4) _mm_alignr_epi8 ((__m128i) cw[d], (__m128i) cw[c], 4); cw[a] = (u32x4) _mm_sha256msg2_epu32 ((__m128i) cw[a], (__m128i) cw[d]); } static inline void shani_sha256_4_rounds (u32x4 cw, u8 n, u32x4 s[]) { u32x4 r = *(u32x4 *) (sha256_k + 4 * n) + cw; s[0] = (u32x4) _mm_sha256rnds2_epu32 ((__m128i) s[0], (__m128i) s[1], (__m128i) r); r = (u32x4) u64x2_interleave_hi ((u64x2) r, (u64x2) r); s[1] = (u32x4) _mm_sha256rnds2_epu32 ((__m128i) s[1], (__m128i) s[0], (__m128i) r); } static inline void shani_sha256_shuffle (u32x4 d[2], u32x4 s[2]) { /* {0, 1, 2, 3}, {4, 5, 6, 7} -> {7, 6, 3, 2}, {5, 4, 1, 0} */ d[0] = (u32x4) _mm_shuffle_ps ((__m128) s[1], (__m128) s[0], 0xbb); d[1] = (u32x4) _mm_shuffle_ps ((__m128) s[1], (__m128) s[0], 0x11); } #endif void clib_sha256_block (clib_sha2_ctx_t * ctx, const u8 * msg, uword n_blocks) { #if defined(__SHA__) && defined (__x86_64__) u32x4 h[2], s[2], w[4]; shani_sha256_shuffle (h, ctx->h32x4); while (n_blocks) { w[0] = u32x4_byte_swap (u32x4_load_unaligned ((u8 *) msg + 0)); w[1] = u32x4_byte_swap (u32x4_load_unaligned ((u8 *) msg + 16)); w[2] = u32x4_byte_swap (u32x4_load_unaligned ((u8 *) msg + 32)); w[3] = u32x4_byte_swap (u32x4_load_unaligned ((u8 *) msg + 48)); s[0] = h[0]; s[1] = h[1]; shani_sha256_4_rounds (w[0], 0, s); shani_sha256_4_rounds (w[1], 1, s); shani_sha256_4_rounds (w[2], 2, s); shani_sha256_4_rounds (w[3], 3, s); shani_sha256_cycle_w (w, 0, 1, 2, 3); shani_sha256_4_rounds (w[0], 4, s); shani_sha256_cycle_w (w, 1, 2, 3, 0); shani_sha256_4_rounds (w[1], 5, s); shani_sha256_cycle_w (w, 2, 3, 0, 1); shani_sha256_4_rounds (w[2], 6, s); shani_sha256_cycle_w (w, 3, 0, 1, 2); shani_sha256_4_rounds (w[3], 7, s); shani_sha256_cycle_w (w, 0, 1, 2, 3); shani_sha256_4_rounds (w[0], 8, s); shani_sha256_cycle_w (w, 1, 2, 3, 0); shani_sha256_4_rounds (w[1], 9, s); shani_sha256_cycle_w (w, 2, 3, 0, 1); shani_sha256_4_rounds (w[2], 10, s); shani_sha256_cycle_w (w, 3, 0, 1, 2); shani_sha256_4_rounds (w[3], 11, s); shani_sha256_cycle_w (w, 0, 1, 2, 3); shani_sha256_4_rounds (w[0], 12, s); shani_sha256_cycle_w (w, 1, 2, 3, 0); shani_sha256_4_rounds (w[1], 13, s); shani_sha256_cycle_w (w, 2, 3, 0, 1); shani_sha256_4_rounds (w[2], 14, s); shani_sha256_cycle_w (w, 3, 0, 1, 2); shani_sha256_4_rounds (w[3], 15, s); h[0] += s[0]; h[1] += s[1]; /* next */ msg += SHA256_BLOCK_SIZE; n_blocks--; } shani_sha256_shuffle (ctx->h32x4, h); #else u32 w[64], s[8], i; while (n_blocks) { for (i = 0; i < 8; i++) s[i] = ctx->h32[i]; for (i = 0; i < 16; i++) { w[i] = clib_net_to_host_u32 (*((u32 *) msg + i)); SHA256_TRANSFORM (s, w, i, sha256_k[i]); } for (i = 16; i < 64; i++) { SHA256_MSG_SCHED (w, i); SHA256_TRANSFORM (s, w, i, sha256_k[i]); } for (i = 0; i < 8; i++) ctx->h32[i] += s[i]; /* next */ msg += SHA256_BLOCK_SIZE; n_blocks--; } #endif } static_always_inline void clib_sha512_block (clib_sha2_ctx_t * ctx, const u8 * msg, uword n_blocks) { u64 w[80], s[8], i; while (n_blocks) { for (i = 0; i < 8; i++) s[i] = ctx->h64[i]; for (i = 0; i < 16; i++) { w[i] = clib_net_to_host_u64 (*((u64 *) msg + i)); SHA512_TRANSFORM (s, w, i, sha512_k[i]); } for (i = 16; i < 80; i++) { SHA512_MSG_SCHED (w, i); SHA512_TRANSFORM (s, w, i, sha512_k[i]); } for (i = 0; i < 8; i++) ctx->h64[i] += s[i]; /* next */ msg += SHA512_BLOCK_SIZE; n_blocks--; } } static_always_inline void clib_sha2_update (clib_sha2_ctx_t * ctx, const u8 * msg, uword n_bytes) { uword n_blocks; if (ctx->n_pending) { uword n_left = ctx->block_size - ctx->n_pending; if (n_bytes < n_left) { clib_memcpy_fast (ctx->pending.as_u8 + ctx->n_pending, msg, n_bytes); ctx->n_pending += n_bytes; return; } else { clib_memcpy_fast (ctx->pending.as_u8 + ctx->n_pending, msg, n_left); if (ctx->block_size == SHA512_BLOCK_SIZE) clib_sha512_block (ctx, ctx->pending.as_u8, 1); else clib_sha256_block (ctx, ctx->pending.as_u8, 1); ctx->n_pending = 0; ctx->total_bytes += ctx->block_size; n_bytes -= n_left; msg += n_left; } } if ((n_blocks = n_bytes / ctx->block_size)) { if (ctx->block_size == SHA512_BLOCK_SIZE) clib_sha512_block (ctx, msg, n_blocks); else clib_sha256_block (ctx, msg, n_blocks); n_bytes -= n_blocks * ctx->block_size; msg += n_blocks * ctx->block_size; ctx->total_bytes += n_blocks * ctx->block_size; } if (n_bytes) { clib_memset_u8 (ctx->pending.as_u8, 0, ctx->block_size); clib_memcpy_fast (ctx->pending.as_u8, msg, n_bytes); ctx->n_pending = n_bytes; } else ctx->n_pending = 0; } static_always_inline void clib_sha2_final (clib_sha2_ctx_t * ctx, u8 * digest) { int i; ctx->total_bytes += ctx->n_pending; if (ctx->n_pending == 0) { clib_memset (ctx->pending.as_u8, 0, ctx->block_size); ctx->pending.as_u8[0] = 0x80; } else if (ctx->n_pending + sizeof (u64) + sizeof (u8) > ctx->block_size) { ctx->pending.as_u8[ctx->n_pending] = 0x80; if (ctx->block_size == SHA512_BLOCK_SIZE) clib_sha512_block (ctx, ctx->pending.as_u8, 1); else clib_sha256_block (ctx, ctx->pending.as_u8, 1); clib_memset (ctx->pending.as_u8, 0, ctx->block_size); } else ctx->pending.as_u8[ctx->n_pending] = 0x80; ctx->pending.as_u64[ctx->block_size / 8 - 1] = clib_net_to_host_u64 (ctx->total_bytes * 8); if (ctx->block_size == SHA512_BLOCK_SIZE) clib_sha512_block (ctx, ctx->pending.as_u8, 1); else clib_sha256_block (ctx, ctx->pending.as_u8, 1); if (ctx->block_size == SHA512_BLOCK_SIZE) { for (i = 0; i < ctx->digest_size / sizeof (u64); i++) *((u64 *) digest + i) = clib_net_to_host_u64 (ctx->h64[i]); /* sha512-224 case - write half of u64 */ if (i * sizeof (u64) < ctx->digest_size) *((u32 *) digest + 2 * i) = clib_net_to_host_u32 (ctx->h64[i] >> 32); } else for (i = 0; i < ctx->digest_size / sizeof (u32); i++) *((u32 *) digest + i) = clib_net_to_host_u32 (ctx->h32[i]); } static_always_inline void clib_sha2 (clib_sha2_type_t type, const u8 * msg, uword len, u8 * digest) { clib_sha2_ctx_t ctx; clib_sha2_init (&ctx, type); clib_sha2_update (&ctx, msg, len); clib_sha2_final (&ctx, digest); } #define clib_sha224(...) clib_sha2 (CLIB_SHA2_224, __VA_ARGS__) #define clib_sha256(...) clib_sha2 (CLIB_SHA2_256, __VA_ARGS__) #define clib_sha384(...) clib_sha2 (CLIB_SHA2_384, __VA_ARGS__) #define clib_sha512(...) clib_sha2 (CLIB_SHA2_512, __VA_ARGS__) #define clib_sha512_224(...) clib_sha2 (CLIB_SHA2_512_224, __VA_ARGS__) #define clib_sha512_256(...) clib_sha2 (CLIB_SHA2_512_256, __VA_ARGS__) static_always_inline void clib_hmac_sha2 (clib_sha2_type_t type, const u8 * key, uword key_len, const u8 * msg, uword len, u8 * digest) { clib_sha2_ctx_t _ctx, *ctx = &_ctx; uword key_data[SHA2_MAX_BLOCK_SIZE / sizeof (uword)]; u8 i_digest[SHA2_MAX_DIGEST_SIZE]; int i, n_words; clib_sha2_init (ctx, type); n_words = ctx->block_size / sizeof (uword); /* key */ if (key_len > ctx->block_size) { /* key is longer than block, calculate hash of key */ clib_sha2_update (ctx, key, key_len); for (i = (ctx->digest_size / sizeof (uword)) / 2; i < n_words; i++) key_data[i] = 0; clib_sha2_final (ctx, (u8 *) key_data); clib_sha2_init (ctx, type); } else { for (i = 0; i < n_words; i++) key_data[i] = 0; clib_memcpy_fast (key_data, key, key_len); } /* ipad */ for (i = 0; i < n_words; i++) ctx->pending.as_uword[i] = key_data[i] ^ (uword) 0x3636363636363636; if (ctx->block_size == SHA512_BLOCK_SIZE) clib_sha512_block (ctx, ctx->pending.as_u8, 1); else clib_sha256_block (ctx, ctx->pending.as_u8, 1); ctx->total_bytes += ctx->block_size; /* message */ clib_sha2_update (ctx, msg, len); clib_sha2_final (ctx, i_digest); /* opad */ clib_sha2_init (ctx, type); for (i = 0; i < n_words; i++) ctx->pending.as_uword[i] = key_data[i] ^ (uword) 0x5c5c5c5c5c5c5c5c; if (ctx->block_size == SHA512_BLOCK_SIZE) clib_sha512_block (ctx, ctx->pending.as_u8, 1); else clib_sha256_block (ctx, ctx->pending.as_u8, 1); ctx->total_bytes += ctx->block_size; /* digest */ clib_sha2_update (ctx, i_digest, ctx->digest_size); clib_sha2_final (ctx, digest); } #define clib_hmac_sha224(...) clib_hmac_sha2 (CLIB_SHA2_224, __VA_ARGS__) #define clib_hmac_sha256(...) clib_hmac_sha2 (CLIB_SHA2_256, __VA_ARGS__) #define clib_hmac_sha384(...) clib_hmac_sha2 (CLIB_SHA2_384, __VA_ARGS__) #define clib_hmac_sha512(...) clib_hmac_sha2 (CLIB_SHA2_512, __VA_ARGS__) #define clib_hmac_sha512_224(...) clib_hmac_sha2 (CLIB_SHA2_512_224, __VA_ARGS__) #define clib_hmac_sha512_256(...) clib_hmac_sha2 (CLIB_SHA2_512_256, __VA_ARGS__) #endif /* included_sha2_h */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */