/* SPDX-License-Identifier: BSD-3-Clause * Copyright(c) 2010-2017 Intel Corporation */ #include #include #include #include #include #include #include #include "rte_table_hash.h" #include "rte_lru.h" #define KEYS_PER_BUCKET 4 #ifdef RTE_TABLE_STATS_COLLECT #define RTE_TABLE_HASH_LRU_STATS_PKTS_IN_ADD(table, val) \ table->stats.n_pkts_in += val #define RTE_TABLE_HASH_LRU_STATS_PKTS_LOOKUP_MISS(table, val) \ table->stats.n_pkts_lookup_miss += val #else #define RTE_TABLE_HASH_LRU_STATS_PKTS_IN_ADD(table, val) #define RTE_TABLE_HASH_LRU_STATS_PKTS_LOOKUP_MISS(table, val) #endif struct bucket { union { struct bucket *next; uint64_t lru_list; }; uint16_t sig[KEYS_PER_BUCKET]; uint32_t key_pos[KEYS_PER_BUCKET]; }; struct grinder { struct bucket *bkt; uint64_t sig; uint64_t match; uint64_t match_pos; uint32_t key_index; }; struct rte_table_hash { struct rte_table_stats stats; /* Input parameters */ uint32_t key_size; uint32_t entry_size; uint32_t n_keys; uint32_t n_buckets; rte_table_hash_op_hash f_hash; uint64_t seed; uint32_t key_offset; /* Internal */ uint64_t bucket_mask; uint32_t key_size_shl; uint32_t data_size_shl; uint32_t key_stack_tos; /* Grinder */ struct grinder grinders[RTE_PORT_IN_BURST_SIZE_MAX]; /* Tables */ uint64_t *key_mask; struct bucket *buckets; uint8_t *key_mem; uint8_t *data_mem; uint32_t *key_stack; /* Table memory */ uint8_t memory[0] __rte_cache_aligned; }; static int keycmp(void *a, void *b, void *b_mask, uint32_t n_bytes) { uint64_t *a64 = a, *b64 = b, *b_mask64 = b_mask; uint32_t i; for (i = 0; i < n_bytes / sizeof(uint64_t); i++) if (a64[i] != (b64[i] & b_mask64[i])) return 1; return 0; } static void keycpy(void *dst, void *src, void *src_mask, uint32_t n_bytes) { uint64_t *dst64 = dst, *src64 = src, *src_mask64 = src_mask; uint32_t i; for (i = 0; i < n_bytes / sizeof(uint64_t); i++) dst64[i] = src64[i] & src_mask64[i]; } static int check_params_create(struct rte_table_hash_params *params) { /* name */ if (params->name == NULL) { RTE_LOG(ERR, TABLE, "%s: name invalid value\n", __func__); return -EINVAL; } /* key_size */ if ((params->key_size < sizeof(uint64_t)) || (!rte_is_power_of_2(params->key_size))) { RTE_LOG(ERR, TABLE, "%s: key_size invalid value\n", __func__); return -EINVAL; } /* n_keys */ if (params->n_keys == 0) { RTE_LOG(ERR, TABLE, "%s: n_keys invalid value\n", __func__); return -EINVAL; } /* n_buckets */ if ((params->n_buckets == 0) || (!rte_is_power_of_2(params->n_buckets))) { RTE_LOG(ERR, TABLE, "%s: n_buckets invalid value\n", __func__); return -EINVAL; } /* f_hash */ if (params->f_hash == NULL) { RTE_LOG(ERR, TABLE, "%s: f_hash invalid value\n", __func__); return -EINVAL; } return 0; } static void * rte_table_hash_lru_create(void *params, int socket_id, uint32_t entry_size) { struct rte_table_hash_params *p = params; struct rte_table_hash *t; uint64_t table_meta_sz, key_mask_sz, bucket_sz, key_sz, key_stack_sz; uint64_t data_sz, total_size; uint64_t key_mask_offset, bucket_offset, key_offset, key_stack_offset; uint64_t data_offset; uint32_t n_buckets, i; /* Check input parameters */ if ((check_params_create(p) != 0) || (!rte_is_power_of_2(entry_size)) || ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) || (sizeof(struct bucket) != (RTE_CACHE_LINE_SIZE / 2))) { return NULL; } /* * Table dimensioning * * Objective: Pick the number of buckets (n_buckets) so that there a chance * to store n_keys keys in the table. * * Note: Since the buckets do not get extended, it is not possible to * guarantee that n_keys keys can be stored in the table at any time. In the * worst case scenario when all the n_keys fall into the same bucket, only * a maximum of KEYS_PER_BUCKET keys will be stored in the table. This case * defeats the purpose of the hash table. It indicates unsuitable f_hash or * n_keys to n_buckets ratio. * * MIN(n_buckets) = (n_keys + KEYS_PER_BUCKET - 1) / KEYS_PER_BUCKET */ n_buckets = rte_align32pow2( (p->n_keys + KEYS_PER_BUCKET - 1) / KEYS_PER_BUCKET); n_buckets = RTE_MAX(n_buckets, p->n_buckets); /* Memory allocation */ table_meta_sz = RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_table_hash)); key_mask_sz = RTE_CACHE_LINE_ROUNDUP(p->key_size); bucket_sz = RTE_CACHE_LINE_ROUNDUP(n_buckets * sizeof(struct bucket)); key_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * p->key_size); key_stack_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * sizeof(uint32_t)); data_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * entry_size); total_size = table_meta_sz + key_mask_sz + bucket_sz + key_sz + key_stack_sz + data_sz; if (total_size > SIZE_MAX) { RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes for hash " "table %s\n", __func__, total_size, p->name); return NULL; } t = rte_zmalloc_socket(p->name, (size_t)total_size, RTE_CACHE_LINE_SIZE, socket_id); if (t == NULL) { RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes for hash " "table %s\n", __func__, total_size, p->name); return NULL; } RTE_LOG(INFO, TABLE, "%s (%u-byte key): Hash table %s memory footprint" " is %" PRIu64 " bytes\n", __func__, p->key_size, p->name, total_size); /* Memory initialization */ t->key_size = p->key_size; t->entry_size = entry_size; t->n_keys = p->n_keys; t->n_buckets = n_buckets; t->f_hash = p->f_hash; t->seed = p->seed; t->key_offset = p->key_offset; /* Internal */ t->bucket_mask = t->n_buckets - 1; t->key_size_shl = __builtin_ctzl(p->key_size); t->data_size_shl = __builtin_ctzl(entry_size); /* Tables */ key_mask_offset = 0; bucket_offset = key_mask_offset + key_mask_sz; key_offset = bucket_offset + bucket_sz; key_stack_offset = key_offset + key_sz; data_offset = key_stack_offset + key_stack_sz; t->key_mask = (uint64_t *) &t->memory[key_mask_offset]; t->buckets = (struct bucket *) &t->memory[bucket_offset]; t->key_mem = &t->memory[key_offset]; t->key_stack = (uint32_t *) &t->memory[key_stack_offset]; t->data_mem = &t->memory[data_offset]; /* Key mask */ if (p->key_mask == NULL) memset(t->key_mask, 0xFF, p->key_size); else memcpy(t->key_mask, p->key_mask, p->key_size); /* Key stack */ for (i = 0; i < t->n_keys; i++) t->key_stack[i] = t->n_keys - 1 - i; t->key_stack_tos = t->n_keys; /* LRU */ for (i = 0; i < t->n_buckets; i++) { struct bucket *bkt = &t->buckets[i]; lru_init(bkt); } return t; } static int rte_table_hash_lru_free(void *table) { struct rte_table_hash *t = table; /* Check input parameters */ if (t == NULL) return -EINVAL; rte_free(t); return 0; } static int rte_table_hash_lru_entry_add(void *table, void *key, void *entry, int *key_found, void **entry_ptr) { struct rte_table_hash *t = table; struct bucket *bkt; uint64_t sig; uint32_t bkt_index, i; sig = t->f_hash(key, t->key_mask, t->key_size, t->seed); bkt_index = sig & t->bucket_mask; bkt = &t->buckets[bkt_index]; sig = (sig >> 16) | 1LLU; /*
/* 
 *------------------------------------------------------------------
 * Copyright (c) 2005-2016 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef _CPEL_H_
#define _CPEL_H_ 1

typedef struct cpel_file_header_ {
    unsigned char endian_version;
    unsigned char pad;
    unsigned short nsections;
    unsigned int file_date;
} cpel_file_header_t;

#define CPEL_FILE_LITTLE_ENDIAN	0x80
#define CPEL_FILE_VERSION       0x01
#define CPEL_FILE_VERSION_MASK  0x7F

typedef struct cpel_section_header_ {
    unsigned int section_type;
    unsigned int data_length;        /* does NOT include type and itself */
} cpel_section_header_t;

#define CPEL_SECTION_STRTAB	1
/* string at offset 0 is the name of the table */

#define CPEL_SECTION_SYMTAB     2
#define CPEL_SECTION_EVTDEF     3

typedef struct event_definition_section_header_ {
    char string_table_name[64];
    unsigned int number_of_event_definitions;
} event_definition_section_header_t;

typedef struct event_definition_ {
    unsigned int event;
    unsigned int event_format;
    unsigned int datum_format;
} event_definition_t;

#define CPEL_SECTION_TRACKDEF   4

typedef struct track_definition_section_header_ {
    char string_table_name[64];
    unsigned int number_of_track_definitions;
} track_definition_section_header_t;

typedef struct track_definition_ {
    unsigned int track;
    unsigned int track_format;
} track_definition_t;

#define CPEL_SECTION_EVENT      5

typedef struct event_section_header_ {
    char string_table_name[64];
    unsigned int number_of_events;
    unsigned int clock_ticks_per_second;
} event_section_header_t;

typedef struct event_entry_ {
    unsigned int time[2];
    unsigned int track;
    unsigned int event_code;
    unsigned int event_datum;
} event_entry_t;

#define CPEL_NUM_SECTION_TYPES 5

#endif /* _CPEL_H_ */
pkts, pkts_mask, pkt00_index, pkt01_index); /* Pipeline stage 1 */ lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index); /* Pipeline stage 2 */ lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many); /* * Pipeline run * */ for ( ; pkts_mask; ) { /* Pipeline feed */ pkt30_index = pkt20_index; pkt31_index = pkt21_index; pkt20_index = pkt10_index; pkt21_index = pkt11_index; pkt10_index = pkt00_index; pkt11_index = pkt01_index; /* Pipeline stage 0 */ lookup2_stage0_with_odd_support(t, g, pkts, pkts_mask, pkt00_index, pkt01_index); /* Pipeline stage 1 */ lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index); /* Pipeline stage 2 */ lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many); /* Pipeline stage 3 */ lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out, entries); } /* Pipeline feed */ pkt30_index = pkt20_index; pkt31_index = pkt21_index; pkt20_index = pkt10_index; pkt21_index = pkt11_index; pkt10_index = pkt00_index; pkt11_index = pkt01_index; /* Pipeline stage 1 */ lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index); /* Pipeline stage 2 */ lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many); /* Pipeline stage 3 */ lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out, entries); /* Pipeline feed */ pkt30_index = pkt20_index; pkt31_index = pkt21_index; pkt20_index = pkt10_index; pkt21_index = pkt11_index; /* Pipeline stage 2 */ lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many); /* Pipeline stage 3 */ lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out, entries); /* Pipeline feed */ pkt30_index = pkt20_index; pkt31_index = pkt21_index; /* Pipeline stage 3 */ lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out, entries); /* Slow path */ pkts_mask_match_many &= ~pkts_mask_out; if (pkts_mask_match_many) { uint64_t pkts_mask_out_slow = 0; status = rte_table_hash_lru_lookup_unoptimized(table, pkts, pkts_mask_match_many, &pkts_mask_out_slow, entries); pkts_mask_out |= pkts_mask_out_slow; } *lookup_hit_mask = pkts_mask_out; RTE_TABLE_HASH_LRU_STATS_PKTS_LOOKUP_MISS(t, n_pkts_in - __builtin_popcountll(pkts_mask_out)); return status; } static int rte_table_hash_lru_stats_read(void *table, struct rte_table_stats *stats, int clear) { struct rte_table_hash *t = table; if (stats != NULL) memcpy(stats, &t->stats, sizeof(t->stats)); if (clear) memset(&t->stats, 0, sizeof(t->stats)); return 0; } struct rte_table_ops rte_table_hash_lru_ops = { .f_create = rte_table_hash_lru_create, .f_free = rte_table_hash_lru_free, .f_add = rte_table_hash_lru_entry_add, .f_delete = rte_table_hash_lru_entry_delete, .f_add_bulk = NULL, .f_delete_bulk = NULL, .f_lookup = rte_table_hash_lru_lookup, .f_stats = rte_table_hash_lru_stats_read, };