diff options
author | Christian Ehrhardt <christian.ehrhardt@canonical.com> | 2017-09-21 11:34:38 +0200 |
---|---|---|
committer | Christian Ehrhardt <christian.ehrhardt@canonical.com> | 2017-09-21 11:37:31 +0200 |
commit | 90fb1fd9c01fbb2f44af75c63adb65d721da88ff (patch) | |
tree | 45252ac845d75865125a918863ee6897aa7a805e /lib | |
parent | bf7567fd2a5b0b28ab724046143c24561d38d015 (diff) |
Imported Upstream version 17.05.2upstream-17.05-stable
Change-Id: I562c7c338dad65639f764aea8b598ff6711acd54
Signed-off-by: Christian Ehrhardt <christian.ehrhardt@canonical.com>
Diffstat (limited to 'lib')
24 files changed, 316 insertions, 130 deletions
diff --git a/lib/librte_cmdline/cmdline_parse.c b/lib/librte_cmdline/cmdline_parse.c index b8148808..56491eac 100644 --- a/lib/librte_cmdline/cmdline_parse.c +++ b/lib/librte_cmdline/cmdline_parse.c @@ -139,6 +139,21 @@ nb_common_chars(const char * s1, const char * s2) return i; } +/** Retrieve either static or dynamic token at a given index. */ +static cmdline_parse_token_hdr_t * +get_token(cmdline_parse_inst_t *inst, unsigned int index) +{ + cmdline_parse_token_hdr_t *token_p; + + /* check presence of static tokens first */ + if (inst->tokens[0] || !inst->f) + return inst->tokens[index]; + /* generate dynamic token */ + token_p = NULL; + inst->f(&token_p, NULL, &inst->tokens[index]); + return token_p; +} + /** * try to match the buffer with an instruction (only the first * nb_match_token tokens if != 0). Return 0 if we match all the @@ -146,27 +161,20 @@ nb_common_chars(const char * s1, const char * s2) */ static int match_inst(cmdline_parse_inst_t *inst, const char *buf, - unsigned int nb_match_token, void *resbuf, unsigned resbuf_size, - cmdline_parse_token_hdr_t - *(*dyn_tokens)[CMDLINE_PARSE_DYNAMIC_TOKENS]) + unsigned int nb_match_token, void *resbuf, unsigned resbuf_size) { - unsigned int token_num=0; cmdline_parse_token_hdr_t * token_p; unsigned int i=0; int n = 0; struct cmdline_token_hdr token_hdr; - token_p = inst->tokens[token_num]; - if (!token_p && dyn_tokens && inst->f) { - if (!(*dyn_tokens)[0]) - inst->f(&(*dyn_tokens)[0], NULL, dyn_tokens); - token_p = (*dyn_tokens)[0]; - } - if (token_p) + /* check if we match all tokens of inst */ + while (!nb_match_token || i < nb_match_token) { + token_p = get_token(inst, i); + if (!token_p) + break; memcpy(&token_hdr, token_p, sizeof(token_hdr)); - /* check if we match all tokens of inst */ - while (token_p && (!nb_match_token || i<nb_match_token)) { debug_printf("TK\n"); /* skip spaces */ while (isblank2(*buf)) { @@ -201,21 +209,6 @@ match_inst(cmdline_parse_inst_t *inst, const char *buf, debug_printf("TK parsed (len=%d)\n", n); i++; buf += n; - - token_num ++; - if (!inst->tokens[0]) { - if (token_num < (CMDLINE_PARSE_DYNAMIC_TOKENS - 1)) { - if (!(*dyn_tokens)[token_num]) - inst->f(&(*dyn_tokens)[token_num], - NULL, - dyn_tokens); - token_p = (*dyn_tokens)[token_num]; - } else - token_p = NULL; - } else - token_p = inst->tokens[token_num]; - if (token_p) - memcpy(&token_hdr, token_p, sizeof(token_hdr)); } /* does not match */ @@ -259,7 +252,6 @@ cmdline_parse(struct cmdline *cl, const char * buf) char buf[CMDLINE_PARSE_RESULT_BUFSIZE]; long double align; /* strong alignment constraint for buf */ } result, tmp_result; - cmdline_parse_token_hdr_t *dyn_tokens[CMDLINE_PARSE_DYNAMIC_TOKENS]; void (*f)(void *, struct cmdline *, void *) = NULL; void *data = NULL; int comment = 0; @@ -276,7 +268,6 @@ cmdline_parse(struct cmdline *cl, const char * buf) return CMDLINE_PARSE_BAD_ARGS; ctx = cl->ctx; - memset(&dyn_tokens, 0, sizeof(dyn_tokens)); /* * - look if the buffer contains at least one line @@ -322,7 +313,7 @@ cmdline_parse(struct cmdline *cl, const char * buf) /* fully parsed */ tok = match_inst(inst, buf, 0, tmp_result.buf, - sizeof(tmp_result.buf), &dyn_tokens); + sizeof(tmp_result.buf)); if (tok > 0) /* we matched at least one token */ err = CMDLINE_PARSE_BAD_ARGS; @@ -380,7 +371,6 @@ cmdline_complete(struct cmdline *cl, const char *buf, int *state, cmdline_parse_token_hdr_t *token_p; struct cmdline_token_hdr token_hdr; char tmpbuf[CMDLINE_BUFFER_SIZE], comp_buf[CMDLINE_BUFFER_SIZE]; - cmdline_parse_token_hdr_t *dyn_tokens[CMDLINE_PARSE_DYNAMIC_TOKENS]; unsigned int partial_tok_len; int comp_len = -1; int tmp_len = -1; @@ -400,7 +390,6 @@ cmdline_complete(struct cmdline *cl, const char *buf, int *state, debug_printf("%s called\n", __func__); memset(&token_hdr, 0, sizeof(token_hdr)); - memset(&dyn_tokens, 0, sizeof(dyn_tokens)); /* count the number of complete token to parse */ for (i=0 ; buf[i] ; i++) { @@ -424,23 +413,11 @@ cmdline_complete(struct cmdline *cl, const char *buf, int *state, while (inst) { /* parse the first tokens of the inst */ if (nb_token && - match_inst(inst, buf, nb_token, NULL, 0, - &dyn_tokens)) + match_inst(inst, buf, nb_token, NULL, 0)) goto next; debug_printf("instruction match\n"); - if (!inst->tokens[0]) { - if (nb_token < - (CMDLINE_PARSE_DYNAMIC_TOKENS - 1)) { - if (!dyn_tokens[nb_token]) - inst->f(&dyn_tokens[nb_token], - NULL, - &dyn_tokens); - token_p = dyn_tokens[nb_token]; - } else - token_p = NULL; - } else - token_p = inst->tokens[nb_token]; + token_p = get_token(inst, nb_token); if (token_p) memcpy(&token_hdr, token_p, sizeof(token_hdr)); @@ -531,20 +508,10 @@ cmdline_complete(struct cmdline *cl, const char *buf, int *state, inst = ctx[inst_num]; if (nb_token && - match_inst(inst, buf, nb_token, NULL, 0, &dyn_tokens)) + match_inst(inst, buf, nb_token, NULL, 0)) goto next2; - if (!inst->tokens[0]) { - if (nb_token < (CMDLINE_PARSE_DYNAMIC_TOKENS - 1)) { - if (!dyn_tokens[nb_token]) - inst->f(&dyn_tokens[nb_token], - NULL, - &dyn_tokens); - token_p = dyn_tokens[nb_token]; - } else - token_p = NULL; - } else - token_p = inst->tokens[nb_token]; + token_p = get_token(inst, nb_token); if (token_p) memcpy(&token_hdr, token_p, sizeof(token_hdr)); diff --git a/lib/librte_cmdline/cmdline_parse.h b/lib/librte_cmdline/cmdline_parse.h index 65b18d4f..13e086f2 100644 --- a/lib/librte_cmdline/cmdline_parse.h +++ b/lib/librte_cmdline/cmdline_parse.h @@ -83,9 +83,6 @@ extern "C" { /* maximum buffer size for parsed result */ #define CMDLINE_PARSE_RESULT_BUFSIZE 8192 -/* maximum number of dynamic tokens */ -#define CMDLINE_PARSE_DYNAMIC_TOKENS 128 - /** * Stores a pointer to the ops struct, and the offset: the place to * write the parsed result in the destination structure. @@ -137,20 +134,53 @@ struct cmdline; * When no tokens are defined (tokens[0] == NULL), they are retrieved * dynamically by calling f() as follows: * - * f((struct cmdline_token_hdr **)&token_hdr, - * NULL, - * (struct cmdline_token_hdr *[])tokens)); + * @code + * + * f((struct cmdline_token_hdr **)&token_p, + * NULL, + * (struct cmdline_token_hdr **)&inst->tokens[num]); + * + * @endcode * * The address of the resulting token is expected at the location pointed by * the first argument. Can be set to NULL to end the list. * * The cmdline argument (struct cmdline *) is always NULL. * - * The last argument points to the NULL-terminated list of dynamic tokens - * defined so far. Since token_hdr points to an index of that list, the - * current index can be derived as follows: + * The last argument points to the inst->tokens[] entry to retrieve, which + * is not necessarily inside allocated memory and should neither be read nor + * written. Its sole purpose is to deduce the token entry index of interest + * as described in the example below. + * + * Note about constraints: + * + * - Only the address of these tokens is dynamic, their storage should be + * static like normal tokens. + * - Dynamic token lists that need to maintain an internal context (e.g. in + * order to determine the next token) must store it statically also. This + * context must be reinitialized when the first token is requested, that + * is, when &inst->tokens[0] is provided as the third argument. + * - Dynamic token lists must be NULL-terminated to generate usable + * commands. + * + * @code + * + * // Assuming first and third arguments are respectively named "token_p" + * // and "token": + * + * int index = token - inst->tokens; + * + * if (!index) { + * [...] // Clean up internal context if any. + * } + * [...] // Then set up dyn_token according to index. + * + * if (no_more_tokens) + * *token_p = NULL; + * else + * *token_p = &dyn_token; * - * int index = token_hdr - &(*tokens)[0]; + * @endcode */ struct cmdline_inst { /* f(parsed_struct, data) */ diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c index b65cd9ce..bac6bdca 100644 --- a/lib/librte_cryptodev/rte_cryptodev.c +++ b/lib/librte_cryptodev/rte_cryptodev.c @@ -523,7 +523,7 @@ rte_cryptodev_count_devtype(enum rte_cryptodev_type type) } uint8_t -rte_cryptodev_devices_get(const char *dev_name, uint8_t *devices, +rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices, uint8_t nb_devices) { uint8_t i, count = 0; @@ -538,10 +538,10 @@ rte_cryptodev_devices_get(const char *dev_name, uint8_t *devices, if (drv) cmp = strncmp(drv->pci_drv.driver.name, - dev_name, strlen(dev_name)); + driver_name, strlen(driver_name)); else cmp = strncmp(devs[i].data->name, - dev_name, strlen(dev_name)); + driver_name, strlen(driver_name)); if (cmp == 0) devices[count++] = devs[i].data->dev_id; @@ -1032,8 +1032,8 @@ rte_cryptodev_stop(uint8_t dev_id) return; } - dev->data->dev_started = 0; (*dev->dev_ops->dev_stop)(dev); + dev->data->dev_started = 0; } int diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h index 88aeb873..af935ab9 100644 --- a/lib/librte_cryptodev/rte_cryptodev.h +++ b/lib/librte_cryptodev/rte_cryptodev.h @@ -463,9 +463,10 @@ extern uint8_t rte_cryptodev_count_devtype(enum rte_cryptodev_type type); /** - * Get number and identifiers of attached crypto device. + * Get number and identifiers of attached crypto devices that + * use the same crypto driver. * - * @param dev_name device name. + * @param driver_name driver name. * @param devices output devices identifiers. * @param nb_devices maximal number of devices. * @@ -473,7 +474,7 @@ rte_cryptodev_count_devtype(enum rte_cryptodev_type type); * Returns number of attached crypto device. */ uint8_t -rte_cryptodev_devices_get(const char *dev_name, uint8_t *devices, +rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices, uint8_t nb_devices); /* * Return the NUMA socket to which a device is connected diff --git a/lib/librte_eal/bsdapp/contigmem/contigmem.c b/lib/librte_eal/bsdapp/contigmem/contigmem.c index da971deb..e8fb9087 100644 --- a/lib/librte_eal/bsdapp/contigmem/contigmem.c +++ b/lib/librte_eal/bsdapp/contigmem/contigmem.c @@ -50,24 +50,37 @@ __FBSDID("$FreeBSD$"); #include <vm/vm.h> #include <vm/pmap.h> +#include <vm/vm_param.h> #include <vm/vm_object.h> #include <vm/vm_page.h> #include <vm/vm_pager.h> +#include <vm/vm_phys.h> + +struct contigmem_buffer { + void *addr; + int refcnt; + struct mtx mtx; +}; + +struct contigmem_vm_handle { + int buffer_index; +}; static int contigmem_load(void); static int contigmem_unload(void); static int contigmem_physaddr(SYSCTL_HANDLER_ARGS); -static d_mmap_t contigmem_mmap; static d_mmap_single_t contigmem_mmap_single; static d_open_t contigmem_open; +static d_close_t contigmem_close; static int contigmem_num_buffers = RTE_CONTIGMEM_DEFAULT_NUM_BUFS; static int64_t contigmem_buffer_size = RTE_CONTIGMEM_DEFAULT_BUF_SIZE; static eventhandler_tag contigmem_eh_tag; -static void *contigmem_buffers[RTE_CONTIGMEM_MAX_NUM_BUFS]; +static struct contigmem_buffer contigmem_buffers[RTE_CONTIGMEM_MAX_NUM_BUFS]; static struct cdev *contigmem_cdev = NULL; +static int contigmem_refcnt; TUNABLE_INT("hw.contigmem.num_buffers", &contigmem_num_buffers); TUNABLE_QUAD("hw.contigmem.buffer_size", &contigmem_buffer_size); @@ -78,6 +91,8 @@ SYSCTL_INT(_hw_contigmem, OID_AUTO, num_buffers, CTLFLAG_RD, &contigmem_num_buffers, 0, "Number of contigmem buffers allocated"); SYSCTL_QUAD(_hw_contigmem, OID_AUTO, buffer_size, CTLFLAG_RD, &contigmem_buffer_size, 0, "Size of each contiguous buffer"); +SYSCTL_INT(_hw_contigmem, OID_AUTO, num_references, CTLFLAG_RD, + &contigmem_refcnt, 0, "Number of references to contigmem"); static SYSCTL_NODE(_hw_contigmem, OID_AUTO, physaddr, CTLFLAG_RD, 0, "physaddr"); @@ -114,42 +129,49 @@ MODULE_VERSION(contigmem, 1); static struct cdevsw contigmem_ops = { .d_name = "contigmem", .d_version = D_VERSION, - .d_mmap = contigmem_mmap, + .d_flags = D_TRACKCLOSE, .d_mmap_single = contigmem_mmap_single, .d_open = contigmem_open, + .d_close = contigmem_close, }; static int contigmem_load() { char index_string[8], description[32]; - int i; + int i, error = 0; + void *addr; if (contigmem_num_buffers > RTE_CONTIGMEM_MAX_NUM_BUFS) { printf("%d buffers requested is greater than %d allowed\n", contigmem_num_buffers, RTE_CONTIGMEM_MAX_NUM_BUFS); - return EINVAL; + error = EINVAL; + goto error; } if (contigmem_buffer_size < PAGE_SIZE || (contigmem_buffer_size & (contigmem_buffer_size - 1)) != 0) { printf("buffer size 0x%lx is not greater than PAGE_SIZE and " "power of two\n", contigmem_buffer_size); - return EINVAL; + error = EINVAL; + goto error; } for (i = 0; i < contigmem_num_buffers; i++) { - contigmem_buffers[i] = - contigmalloc(contigmem_buffer_size, M_CONTIGMEM, M_ZERO, 0, - BUS_SPACE_MAXADDR, contigmem_buffer_size, 0); - - if (contigmem_buffers[i] == NULL) { + addr = contigmalloc(contigmem_buffer_size, M_CONTIGMEM, M_ZERO, + 0, BUS_SPACE_MAXADDR, contigmem_buffer_size, 0); + if (addr == NULL) { printf("contigmalloc failed for buffer %d\n", i); - return ENOMEM; + error = ENOMEM; + goto error; } - printf("%2u: virt=%p phys=%p\n", i, contigmem_buffers[i], - (void *)pmap_kextract((vm_offset_t)contigmem_buffers[i])); + printf("%2u: virt=%p phys=%p\n", i, addr, + (void *)pmap_kextract((vm_offset_t)addr)); + + mtx_init(&contigmem_buffers[i].mtx, "contigmem", NULL, MTX_DEF); + contigmem_buffers[i].addr = addr; + contigmem_buffers[i].refcnt = 0; snprintf(index_string, sizeof(index_string), "%d", i); snprintf(description, sizeof(description), @@ -165,6 +187,17 @@ contigmem_load() GID_WHEEL, 0600, "contigmem"); return 0; + +error: + for (i = 0; i < contigmem_num_buffers; i++) { + if (contigmem_buffers[i].addr != NULL) + contigfree(contigmem_buffers[i].addr, + contigmem_buffer_size, M_CONTIGMEM); + if (mtx_initialized(&contigmem_buffers[i].mtx)) + mtx_destroy(&contigmem_buffers[i].mtx); + } + + return error; } static int @@ -172,16 +205,22 @@ contigmem_unload() { int i; + if (contigmem_refcnt > 0) + return EBUSY; + if (contigmem_cdev != NULL) destroy_dev(contigmem_cdev); if (contigmem_eh_tag != NULL) EVENTHANDLER_DEREGISTER(process_exit, contigmem_eh_tag); - for (i = 0; i < RTE_CONTIGMEM_MAX_NUM_BUFS; i++) - if (contigmem_buffers[i] != NULL) - contigfree(contigmem_buffers[i], contigmem_buffer_size, - M_CONTIGMEM); + for (i = 0; i < RTE_CONTIGMEM_MAX_NUM_BUFS; i++) { + if (contigmem_buffers[i].addr != NULL) + contigfree(contigmem_buffers[i].addr, + contigmem_buffer_size, M_CONTIGMEM); + if (mtx_initialized(&contigmem_buffers[i].mtx)) + mtx_destroy(&contigmem_buffers[i].mtx); + } return 0; } @@ -192,7 +231,7 @@ contigmem_physaddr(SYSCTL_HANDLER_ARGS) uint64_t physaddr; int index = (int)(uintptr_t)arg1; - physaddr = (uint64_t)vtophys(contigmem_buffers[index]); + physaddr = (uint64_t)vtophys(contigmem_buffers[index].addr); return sysctl_handle_64(oidp, &physaddr, 0, req); } @@ -200,22 +239,121 @@ static int contigmem_open(struct cdev *cdev, int fflags, int devtype, struct thread *td) { + + atomic_add_int(&contigmem_refcnt, 1); + return 0; } static int -contigmem_mmap(struct cdev *cdev, vm_ooffset_t offset, vm_paddr_t *paddr, - int prot, vm_memattr_t *memattr) +contigmem_close(struct cdev *cdev, int fflags, int devtype, + struct thread *td) { - *paddr = offset; + atomic_subtract_int(&contigmem_refcnt, 1); + return 0; } static int +contigmem_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, + vm_ooffset_t foff, struct ucred *cred, u_short *color) +{ + struct contigmem_vm_handle *vmh = handle; + struct contigmem_buffer *buf; + + buf = &contigmem_buffers[vmh->buffer_index]; + + atomic_add_int(&contigmem_refcnt, 1); + + mtx_lock(&buf->mtx); + if (buf->refcnt == 0) + memset(buf->addr, 0, contigmem_buffer_size); + buf->refcnt++; + mtx_unlock(&buf->mtx); + + return 0; +} + +static void +contigmem_cdev_pager_dtor(void *handle) +{ + struct contigmem_vm_handle *vmh = handle; + struct contigmem_buffer *buf; + + buf = &contigmem_buffers[vmh->buffer_index]; + + mtx_lock(&buf->mtx); + buf->refcnt--; + mtx_unlock(&buf->mtx); + + free(vmh, M_CONTIGMEM); + + atomic_subtract_int(&contigmem_refcnt, 1); +} + +static int +contigmem_cdev_pager_fault(vm_object_t object, vm_ooffset_t offset, int prot, + vm_page_t *mres) +{ + vm_paddr_t paddr; + vm_page_t m_paddr, page; + vm_memattr_t memattr, memattr1; + + memattr = object->memattr; + + VM_OBJECT_WUNLOCK(object); + + paddr = offset; + + m_paddr = vm_phys_paddr_to_vm_page(paddr); + if (m_paddr != NULL) { + memattr1 = pmap_page_get_memattr(m_paddr); + if (memattr1 != memattr) + memattr = memattr1; + } + + if (((*mres)->flags & PG_FICTITIOUS) != 0) { + /* + * If the passed in result page is a fake page, update it with + * the new physical address. + */ + page = *mres; + VM_OBJECT_WLOCK(object); + vm_page_updatefake(page, paddr, memattr); + } else { + vm_page_t mret; + /* + * Replace the passed in reqpage page with our own fake page and + * free up the original page. + */ + page = vm_page_getfake(paddr, memattr); + VM_OBJECT_WLOCK(object); + mret = vm_page_replace(page, object, (*mres)->pindex); + KASSERT(mret == *mres, + ("invalid page replacement, old=%p, ret=%p", *mres, mret)); + vm_page_lock(mret); + vm_page_free(mret); + vm_page_unlock(mret); + *mres = page; + } + + page->valid = VM_PAGE_BITS_ALL; + + return VM_PAGER_OK; +} + +static struct cdev_pager_ops contigmem_cdev_pager_ops = { + .cdev_pg_ctor = contigmem_cdev_pager_ctor, + .cdev_pg_dtor = contigmem_cdev_pager_dtor, + .cdev_pg_fault = contigmem_cdev_pager_fault, +}; + +static int contigmem_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t size, struct vm_object **obj, int nprot) { + struct contigmem_vm_handle *vmh; uint64_t buffer_index; /* @@ -227,10 +365,17 @@ contigmem_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t size, if (buffer_index >= contigmem_num_buffers) return EINVAL; - memset(contigmem_buffers[buffer_index], 0, contigmem_buffer_size); - *offset = (vm_ooffset_t)vtophys(contigmem_buffers[buffer_index]); - *obj = vm_pager_allocate(OBJT_DEVICE, cdev, size, nprot, *offset, - curthread->td_ucred); + if (size > contigmem_buffer_size) + return EINVAL; + + vmh = malloc(sizeof(*vmh), M_CONTIGMEM, M_NOWAIT | M_ZERO); + if (vmh == NULL) + return ENOMEM; + vmh->buffer_index = buffer_index; + + *offset = (vm_ooffset_t)vtophys(contigmem_buffers[buffer_index].addr); + *obj = cdev_pager_allocate(vmh, OBJT_DEVICE, &contigmem_cdev_pager_ops, + size, nprot, *offset, curthread->td_ucred); return 0; } diff --git a/lib/librte_eal/common/eal_common_proc.c b/lib/librte_eal/common/eal_common_proc.c index 12e0fcac..60526cad 100644 --- a/lib/librte_eal/common/eal_common_proc.c +++ b/lib/librte_eal/common/eal_common_proc.c @@ -46,10 +46,10 @@ rte_eal_primary_proc_alive(const char *config_file_path) if (config_file_path) config_fd = open(config_file_path, O_RDONLY); else { - char default_path[PATH_MAX+1]; - snprintf(default_path, PATH_MAX, RUNTIME_CONFIG_FMT, - default_config_dir, "rte"); - config_fd = open(default_path, O_RDONLY); + const char *path; + + path = eal_runtime_config_path(); + config_fd = open(path, O_RDONLY); } if (config_fd < 0) return 0; diff --git a/lib/librte_eal/common/include/rte_malloc.h b/lib/librte_eal/common/include/rte_malloc.h index 008ce134..946db6f7 100644 --- a/lib/librte_eal/common/include/rte_malloc.h +++ b/lib/librte_eal/common/include/rte_malloc.h @@ -329,7 +329,7 @@ rte_malloc_set_limit(const char *type, size_t max); * @param addr * Adress obtained from a previous rte_malloc call * @return - * NULL on error + * RTE_BAD_PHYS_ADDR on error * otherwise return physical address of the buffer */ phys_addr_t diff --git a/lib/librte_eal/common/include/rte_version.h b/lib/librte_eal/common/include/rte_version.h index c36d8526..93cc0995 100644 --- a/lib/librte_eal/common/include/rte_version.h +++ b/lib/librte_eal/common/include/rte_version.h @@ -66,7 +66,7 @@ extern "C" { /** * Patch level number i.e. the z in yy.mm.z */ -#define RTE_VER_MINOR 1 +#define RTE_VER_MINOR 2 /** * Extra string to be appended to version number diff --git a/lib/librte_eal/common/rte_malloc.c b/lib/librte_eal/common/rte_malloc.c index f4a88352..5c0627bf 100644 --- a/lib/librte_eal/common/rte_malloc.c +++ b/lib/librte_eal/common/rte_malloc.c @@ -253,6 +253,8 @@ rte_malloc_virt2phy(const void *addr) { const struct malloc_elem *elem = malloc_elem_from_data(addr); if (elem == NULL) - return 0; + return RTE_BAD_PHYS_ADDR; + if (elem->ms->phys_addr == RTE_BAD_PHYS_ADDR) + return RTE_BAD_PHYS_ADDR; return elem->ms->phys_addr + ((uintptr_t)addr - (uintptr_t)elem->ms->addr); } diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c index ebe06833..cdce8459 100644 --- a/lib/librte_eal/linuxapp/eal/eal_memory.c +++ b/lib/librte_eal/linuxapp/eal/eal_memory.c @@ -137,6 +137,13 @@ test_phys_addrs_available(void) if (rte_xen_dom0_supported()) return; + if (!rte_eal_has_hugepages()) { + RTE_LOG(ERR, EAL, + "Started without hugepages support, physical addresses not available\n"); + phys_addrs_available = false; + return; + } + physaddr = rte_mem_virt2phy(&tmp); if (physaddr == RTE_BAD_PHYS_ADDR) { RTE_LOG(ERR, EAL, @@ -995,7 +1002,7 @@ rte_eal_hugepage_init(void) strerror(errno)); return -1; } - mcfg->memseg[0].phys_addr = (phys_addr_t)(uintptr_t)addr; + mcfg->memseg[0].phys_addr = RTE_BAD_PHYS_ADDR; mcfg->memseg[0].addr = addr; mcfg->memseg[0].hugepage_sz = RTE_PGSIZE_4K; mcfg->memseg[0].len = internal_config.memory; diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c index 83898a8f..2d442a9b 100644 --- a/lib/librte_ether/rte_ethdev.c +++ b/lib/librte_ether/rte_ethdev.c @@ -2351,6 +2351,7 @@ get_mac_addr_index(uint8_t port_id, const struct ether_addr *addr) struct rte_eth_dev *dev = &rte_eth_devices[port_id]; unsigned i; + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); rte_eth_dev_info_get(port_id, &dev_info); for (i = 0; i < dev_info.max_mac_addrs; i++) diff --git a/lib/librte_ether/rte_ethdev_pci.h b/lib/librte_ether/rte_ethdev_pci.h index d3bc03cf..60730073 100644 --- a/lib/librte_ether/rte_ethdev_pci.h +++ b/lib/librte_ether/rte_ethdev_pci.h @@ -134,6 +134,12 @@ rte_eth_dev_pci_release(struct rte_eth_dev *eth_dev) eth_dev->data->dev_private = NULL; + /* + * Secondary process will check the name to attach. + * Clear this field to avoid attaching a released ports. + */ + eth_dev->data->name[0] = '\0'; + eth_dev->device = NULL; eth_dev->intr_handle = NULL; } diff --git a/lib/librte_ether/rte_ether_version.map b/lib/librte_ether/rte_ether_version.map index d6726bb1..894b5349 100644 --- a/lib/librte_ether/rte_ether_version.map +++ b/lib/librte_ether/rte_ether_version.map @@ -151,6 +151,7 @@ DPDK_17.05 { rte_eth_dev_attach_secondary; rte_eth_find_next; + rte_eth_tx_done_cleanup; rte_eth_xstats_get_by_id; rte_eth_xstats_get_id_by_name; rte_eth_xstats_get_names_by_id; diff --git a/lib/librte_eventdev/rte_eventdev.c b/lib/librte_eventdev/rte_eventdev.c index 20afc3f0..755f9f74 100644 --- a/lib/librte_eventdev/rte_eventdev.c +++ b/lib/librte_eventdev/rte_eventdev.c @@ -301,7 +301,7 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports) sizeof(dev->data->links_map[0]) * nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV, RTE_CACHE_LINE_SIZE); - if (dev->data->links_map == NULL) { + if (links_map == NULL) { dev->data->nb_ports = 0; RTE_EDEV_LOG_ERR("failed to realloc mem for port_map," "nb_ports %u", nb_ports); diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c index 645c0cfa..37a81105 100644 --- a/lib/librte_hash/rte_cuckoo_hash.c +++ b/lib/librte_hash/rte_cuckoo_hash.c @@ -538,8 +538,10 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key, n_slots = rte_ring_mc_dequeue_burst(h->free_slots, cached_free_slots->objs, LCORE_CACHE_SIZE, NULL); - if (n_slots == 0) - return -ENOSPC; + if (n_slots == 0) { + ret = -ENOSPC; + goto failure; + } cached_free_slots->len += n_slots; } @@ -548,8 +550,10 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key, cached_free_slots->len--; slot_id = cached_free_slots->objs[cached_free_slots->len]; } else { - if (rte_ring_sc_dequeue(h->free_slots, &slot_id) != 0) - return -ENOSPC; + if (rte_ring_sc_dequeue(h->free_slots, &slot_id) != 0) { + ret = -ENOSPC; + goto failure; + } } new_k = RTE_PTR_ADD(keys, (uintptr_t)slot_id * h->key_entry_size); @@ -659,6 +663,7 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key, /* Error in addition, store new slot back in the ring and return error */ enqueue_slot_back(h, cached_free_slots, (void *)((uintptr_t) new_idx)); +failure: if (h->add_key == ADD_KEY_MULTIWRITER) rte_spinlock_unlock(h->multiwriter_lock); return ret; diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h index 1cb03109..1276b3c4 100644 --- a/lib/librte_mbuf/rte_mbuf.h +++ b/lib/librte_mbuf/rte_mbuf.h @@ -1136,6 +1136,7 @@ static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp) * Array size * @return * - 0: Success + * - -ENOENT: Not enough entries in the mempool; no mbufs are retrieved. */ static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs, unsigned count) @@ -1453,7 +1454,7 @@ static inline void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v) */ static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m) { - __rte_mbuf_sanity_check(m, 1); + __rte_mbuf_sanity_check(m, 0); return m->data_off; } @@ -1467,7 +1468,7 @@ static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m) */ static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m) { - __rte_mbuf_sanity_check(m, 1); + __rte_mbuf_sanity_check(m, 0); return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) - m->data_len); } diff --git a/lib/librte_mbuf/rte_mbuf_ptype.h b/lib/librte_mbuf/rte_mbuf_ptype.h index a3269c4c..acd70bb6 100644 --- a/lib/librte_mbuf/rte_mbuf_ptype.h +++ b/lib/librte_mbuf/rte_mbuf_ptype.h @@ -341,11 +341,11 @@ extern "C" { * Packet format: * <'ether type'=0x0800 * | 'version'=4, 'protocol'=17 - * | 'destination port'=4798> + * | 'destination port'=4789> * or, * <'ether type'=0x86DD * | 'version'=6, 'next header'=17 - * | 'destination port'=4798> + * | 'destination port'=4789> */ #define RTE_PTYPE_TUNNEL_VXLAN 0x00003000 /** diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c index f65310f6..6fc3c9c7 100644 --- a/lib/librte_mempool/rte_mempool.c +++ b/lib/librte_mempool/rte_mempool.c @@ -476,7 +476,7 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, /* required for xen_dom0 to get the machine address */ paddr = rte_mem_phy2mch(-1, paddr); - if (paddr == RTE_BAD_PHYS_ADDR) { + if (paddr == RTE_BAD_PHYS_ADDR && rte_eal_has_hugepages()) { ret = -EINVAL; goto fail; } diff --git a/lib/librte_metrics/rte_metrics.c b/lib/librte_metrics/rte_metrics.c index e9a122c1..dbbad328 100644 --- a/lib/librte_metrics/rte_metrics.c +++ b/lib/librte_metrics/rte_metrics.c @@ -144,6 +144,8 @@ rte_metrics_reg_names(const char * const *names, uint16_t cnt_names) entry = &stats->metadata[idx_name + stats->cnt_stats]; strncpy(entry->name, names[idx_name], RTE_METRICS_MAX_NAME_LEN); + /* Enforce NULL-termination */ + entry->name[RTE_METRICS_MAX_NAME_LEN - 1] = '\0'; memset(entry->value, 0, sizeof(entry->value)); entry->idx_next_stat = idx_name + stats->cnt_stats + 1; } diff --git a/lib/librte_metrics/rte_metrics.h b/lib/librte_metrics/rte_metrics.h index 0fa3104e..297300ad 100644 --- a/lib/librte_metrics/rte_metrics.h +++ b/lib/librte_metrics/rte_metrics.h @@ -118,7 +118,8 @@ void rte_metrics_init(int socket_id); * is required for updating said metric's value. * * @param name - * Metric name + * Metric name. If this exceeds RTE_METRICS_MAX_NAME_LEN (including + * the NULL terminator), it is truncated. * * @return * - Zero or positive: Success (index key of new metric) diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c index 5f98c33f..6f58fafe 100644 --- a/lib/librte_ring/rte_ring.c +++ b/lib/librte_ring/rte_ring.c @@ -189,7 +189,8 @@ rte_ring_create(const char *name, unsigned count, int socket_id, /* reserve a memory zone for this ring. If we can't get rte_config or * we are secondary process, the memzone_reserve function will set * rte_errno for us appropriately - hence no check in this this function */ - mz = rte_memzone_reserve(mz_name, ring_size, socket_id, mz_flags); + mz = rte_memzone_reserve_aligned(mz_name, ring_size, socket_id, + mz_flags, __alignof__(*r)); if (mz != NULL) { r = mz->addr; /* no need to check return value here, we already checked the diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h index 97f025a1..3400ed88 100644 --- a/lib/librte_ring/rte_ring.h +++ b/lib/librte_ring/rte_ring.h @@ -801,7 +801,7 @@ rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n, static inline int __attribute__((always_inline)) rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p) { - return rte_ring_mc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOBUFS; + return rte_ring_mc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOENT; } /** @@ -819,7 +819,7 @@ rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p) static inline int __attribute__((always_inline)) rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p) { - return rte_ring_sc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOBUFS; + return rte_ring_sc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOENT; } /** diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c index 1f565fbb..9ba4d3d7 100644 --- a/lib/librte_vhost/vhost.c +++ b/lib/librte_vhost/vhost.c @@ -272,7 +272,7 @@ rte_vhost_get_mtu(int vid, uint16_t *mtu) if (!(dev->flags & VIRTIO_DEV_READY)) return -EAGAIN; - if (!(dev->features & VIRTIO_NET_F_MTU)) + if (!(dev->features & (1ULL << VIRTIO_NET_F_MTU))) return -ENOTSUP; *mtu = dev->mtu; diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index 48219e05..f8da78ab 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -114,11 +114,16 @@ update_shadow_used_ring(struct vhost_virtqueue *vq, static void virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr) { - if (m_buf->ol_flags & PKT_TX_L4_MASK) { + uint64_t csum_l4 = m_buf->ol_flags & PKT_TX_L4_MASK; + + if (m_buf->ol_flags & PKT_TX_TCP_SEG) + csum_l4 |= PKT_TX_TCP_CKSUM; + + if (csum_l4) { net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len; - switch (m_buf->ol_flags & PKT_TX_L4_MASK) { + switch (csum_l4) { case PKT_TX_TCP_CKSUM: net_hdr->csum_offset = (offsetof(struct tcp_hdr, cksum)); @@ -138,6 +143,15 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr) ASSIGN_UNLESS_EQUAL(net_hdr->flags, 0); } + /* IP cksum verification cannot be bypassed, then calculate here */ + if (m_buf->ol_flags & PKT_TX_IP_CKSUM) { + struct ipv4_hdr *ipv4_hdr; + + ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct ipv4_hdr *, + m_buf->l2_len); + ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr); + } + if (m_buf->ol_flags & PKT_TX_TCP_SEG) { if (m_buf->ol_flags & PKT_TX_IPV4) net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; @@ -601,9 +615,11 @@ static inline bool virtio_net_with_host_offload(struct virtio_net *dev) { if (dev->features & - (VIRTIO_NET_F_CSUM | VIRTIO_NET_F_HOST_ECN | - VIRTIO_NET_F_HOST_TSO4 | VIRTIO_NET_F_HOST_TSO6 | - VIRTIO_NET_F_HOST_UFO)) + ((1ULL << VIRTIO_NET_F_CSUM) | + (1ULL << VIRTIO_NET_F_HOST_ECN) | + (1ULL << VIRTIO_NET_F_HOST_TSO4) | + (1ULL << VIRTIO_NET_F_HOST_TSO6) | + (1ULL << VIRTIO_NET_F_HOST_UFO))) return true; return false; |