diff options
Diffstat (limited to 'lib')
42 files changed, 72 insertions, 54 deletions
diff --git a/lib/librte_distributor/rte_distributor.c b/lib/librte_distributor/rte_distributor.c index 57ad3397..6ad23013 100644 --- a/lib/librte_distributor/rte_distributor.c +++ b/lib/librte_distributor/rte_distributor.c @@ -442,7 +442,7 @@ rte_distributor_process_v1705(struct rte_distributor *d, /* * Uncommenting the next line will cause the find_match - * function to be optimised out, making this function + * function to be optimized out, making this function * do parallel (non-atomic) distribution */ /* matches[j] = 0; */ @@ -536,7 +536,7 @@ MAP_STATIC_SYMBOL(int rte_distributor_returned_pkts(struct rte_distributor *d, /* * Return the number of packets in-flight in a distributor, i.e. packets - * being workered on or queued up in a backlog. + * being worked on or queued up in a backlog. */ static inline unsigned int total_outstanding(const struct rte_distributor *d) @@ -663,7 +663,7 @@ rte_distributor_create_v1705(const char *name, #endif /* - * Set up the backog tags so they're pointing at the second cache + * Set up the backlog tags so they're pointing at the second cache * line for performance during flow matching */ for (i = 0 ; i < num_workers ; i++) diff --git a/lib/librte_distributor/rte_distributor.h b/lib/librte_distributor/rte_distributor.h index 9b9efdbe..cbeed04d 100644 --- a/lib/librte_distributor/rte_distributor.h +++ b/lib/librte_distributor/rte_distributor.h @@ -71,7 +71,7 @@ struct rte_mbuf; * @param alg_type * Call the legacy API, or use the new burst API. legacy uses 32-bit * flow ID, and works on a single packet at a time. Latest uses 15- - * bit flow ID and works on up to 8 packets at a time to worers. + * bit flow ID and works on up to 8 packets at a time to workers. * @return * The newly created distributor instance */ diff --git a/lib/librte_distributor/rte_distributor_private.h b/lib/librte_distributor/rte_distributor_private.h index 250b23e1..24f41b95 100644 --- a/lib/librte_distributor/rte_distributor_private.h +++ b/lib/librte_distributor/rte_distributor_private.h @@ -90,7 +90,7 @@ union rte_distributor_buffer_v20 { /* * Transfer up to 8 mbufs at a time to/from workers, and - * flow matching algorithm optimised for 8 flow IDs at a time + * flow matching algorithm optimized for 8 flow IDs at a time */ #define RTE_DIST_BURST_SIZE 8 diff --git a/lib/librte_distributor/rte_distributor_v20.c b/lib/librte_distributor/rte_distributor_v20.c index 9adda52b..5be6efd4 100644 --- a/lib/librte_distributor/rte_distributor_v20.c +++ b/lib/librte_distributor/rte_distributor_v20.c @@ -345,7 +345,8 @@ rte_distributor_returned_pkts_v20(struct rte_distributor_v20 *d, VERSION_SYMBOL(rte_distributor_returned_pkts, _v20, 2.0); /* return the number of packets in-flight in a distributor, i.e. packets - * being workered on or queued up in a backlog. */ + * being worked on or queued up in a backlog. + */ static inline unsigned total_outstanding(const struct rte_distributor_v20 *d) { diff --git a/lib/librte_eal/common/eal_common_log.c b/lib/librte_eal/common/eal_common_log.c index be404136..e894b75e 100644 --- a/lib/librte_eal/common/eal_common_log.c +++ b/lib/librte_eal/common/eal_common_log.c @@ -249,7 +249,7 @@ static const struct logtype logtype_strings[] = { {RTE_LOGTYPE_USER8, "user8"} }; -/* Logging should be first initialzer (before drivers and bus) */ +/* Logging should be first initializer (before drivers and bus) */ RTE_INIT_PRIO(rte_log_init, 101); static void rte_log_init(void) diff --git a/lib/librte_eal/common/include/arch/arm/rte_memcpy_32.h b/lib/librte_eal/common/include/arch/arm/rte_memcpy_32.h index c3a26192..e4dafda1 100644 --- a/lib/librte_eal/common/include/arch/arm/rte_memcpy_32.h +++ b/lib/librte_eal/common/include/arch/arm/rte_memcpy_32.h @@ -225,7 +225,7 @@ rte_memcpy_func(void *dst, const void *src, size_t n) * We split the remaining bytes (which will be less than 256) into * 64byte (2^6) chunks. * Using incrementing integers in the case labels of a switch statement - * enourages the compiler to use a jump table. To get incrementing + * encourages the compiler to use a jump table. To get incrementing * integers, we shift the 2 relevant bits to the LSB position to first * get decrementing integers, and then subtract. */ diff --git a/lib/librte_eal/common/include/arch/ppc_64/rte_memcpy.h b/lib/librte_eal/common/include/arch/ppc_64/rte_memcpy.h index ca9d1dc5..75f74897 100644 --- a/lib/librte_eal/common/include/arch/ppc_64/rte_memcpy.h +++ b/lib/librte_eal/common/include/arch/ppc_64/rte_memcpy.h @@ -164,7 +164,7 @@ rte_memcpy_func(void *dst, const void *src, size_t n) * We split the remaining bytes (which will be less than 256) into * 64byte (2^6) chunks. * Using incrementing integers in the case labels of a switch statement - * enourages the compiler to use a jump table. To get incrementing + * encourages the compiler to use a jump table. To get incrementing * integers, we shift the 2 relevant bits to the LSB position to first * get decrementing integers, and then subtract. */ diff --git a/lib/librte_eal/common/include/rte_eal.h b/lib/librte_eal/common/include/rte_eal.h index 09b66819..8e4e71cc 100644 --- a/lib/librte_eal/common/include/rte_eal.h +++ b/lib/librte_eal/common/include/rte_eal.h @@ -217,7 +217,7 @@ int rte_eal_primary_proc_alive(const char *config_file_path); /** * Usage function typedef used by the application usage function. * - * Use this function typedef to define and call rte_set_applcation_usage_hook() + * Use this function typedef to define and call rte_set_application_usage_hook() * routine. */ typedef void (*rte_usage_hook_t)(const char * prgname); diff --git a/lib/librte_eal/common/include/rte_log.h b/lib/librte_eal/common/include/rte_log.h index 16564d41..6c2d3566 100644 --- a/lib/librte_eal/common/include/rte_log.h +++ b/lib/librte_eal/common/include/rte_log.h @@ -218,7 +218,7 @@ int rte_log_cur_msg_logtype(void); * The string identifying the log type. * @return * - >0: success, the returned value is the log type identifier. - * - (-ENONEM): cannot allocate memory. + * - (-ENOMEM): cannot allocate memory. */ int rte_log_register(const char *name); diff --git a/lib/librte_eal/common/include/rte_random.h b/lib/librte_eal/common/include/rte_random.h index 24ae8363..aeff1f05 100644 --- a/lib/librte_eal/common/include/rte_random.h +++ b/lib/librte_eal/common/include/rte_random.h @@ -88,4 +88,4 @@ rte_rand(void) #endif -#endif /* _RTE_PER_LCORE_H_ */ +#endif /* _RTE_RANDOM_H_ */ diff --git a/lib/librte_eal/common/include/rte_version.h b/lib/librte_eal/common/include/rte_version.h index d08cf48a..fa018074 100644 --- a/lib/librte_eal/common/include/rte_version.h +++ b/lib/librte_eal/common/include/rte_version.h @@ -78,7 +78,7 @@ extern "C" { * 0-15 = release candidates * 16 = release */ -#define RTE_VER_RELEASE 3 +#define RTE_VER_RELEASE 4 /** * Macro to compute a version number usable for comparisons diff --git a/lib/librte_eal/common/malloc_elem.c b/lib/librte_eal/common/malloc_elem.c index 889dffd2..98bcd37b 100644 --- a/lib/librte_eal/common/malloc_elem.c +++ b/lib/librte_eal/common/malloc_elem.c @@ -252,7 +252,7 @@ malloc_elem_alloc(struct malloc_elem *elem, size_t size, unsigned align, } /* - * joing two struct malloc_elem together. elem1 and elem2 must + * join two struct malloc_elem together. elem1 and elem2 must * be contiguous in memory. */ static inline void diff --git a/lib/librte_eal/common/rte_service.c b/lib/librte_eal/common/rte_service.c index 09b758c9..ae97e6b7 100644 --- a/lib/librte_eal/common/rte_service.c +++ b/lib/librte_eal/common/rte_service.c @@ -153,7 +153,7 @@ service_valid(uint32_t id) service = &rte_services[id]; \ } while (0) -/* returns 1 if statistics should be colleced for service +/* returns 1 if statistics should be collected for service * Returns 0 if statistics should not be collected for service */ static inline int diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c index a54b822a..16a181c3 100644 --- a/lib/librte_eal/linuxapp/eal/eal_memory.c +++ b/lib/librte_eal/linuxapp/eal/eal_memory.c @@ -344,7 +344,7 @@ void numa_error(char *where) * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the * virtual address is stored in hugepg_tbl[i].orig_va, else it is stored * in hugepg_tbl[i].final_va. The second mapping (when orig is 0) tries to - * map continguous physical blocks in contiguous virtual blocks. + * map contiguous physical blocks in contiguous virtual blocks. */ static unsigned map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi, diff --git a/lib/librte_eal/linuxapp/eal/eal_timer.c b/lib/librte_eal/linuxapp/eal/eal_timer.c index 24349dab..a616928b 100644 --- a/lib/librte_eal/linuxapp/eal/eal_timer.c +++ b/lib/librte_eal/linuxapp/eal/eal_timer.c @@ -113,7 +113,7 @@ static pthread_t msb_inc_thread_id; /* * This function runs on a specific thread to update a global variable - * containing used to process MSB of the HPET (unfortunatelly, we need + * containing used to process MSB of the HPET (unfortunately, we need * this because hpet is 32 bits by default under linux). */ static void diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.c b/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.c index 1c30d12b..5da7f91f 100644 --- a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.c +++ b/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.c @@ -241,7 +241,7 @@ static s32 e1000_init_phy_params_82575(struct e1000_hw *hw) else phy->ops.get_cable_length = e1000_get_cable_length_m88; phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; - /* Check if this PHY is confgured for media swap. */ + /* Check if this PHY is configured for media swap. */ if (phy->id == M88E1112_E_PHY_ID) { u16 data; diff --git a/lib/librte_efd/rte_efd.c b/lib/librte_efd/rte_efd.c index 8771d042..7d0b5cc6 100644 --- a/lib/librte_efd/rte_efd.c +++ b/lib/librte_efd/rte_efd.c @@ -952,7 +952,7 @@ revert_groups(struct efd_offline_group_rules *previous_group, * This operation was still successful, and entry contains a valid update * RTE_EFD_UPDATE_FAILED * Either the EFD failed to find a suitable perfect hash or the group was full - * This is a fatal error, and the table is now in an indeterminite state + * This is a fatal error, and the table is now in an indeterminate state * RTE_EFD_UPDATE_NO_CHANGE * Operation resulted in no change to the table (same value already exists) * 0 diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h index 18e474db..341c2d62 100644 --- a/lib/librte_ether/rte_ethdev.h +++ b/lib/librte_ether/rte_ethdev.h @@ -1062,7 +1062,7 @@ struct rte_eth_rxq_info { /** * Ethernet device TX queue information structure. - * Used to retieve information about configured queue. + * Used to retrieve information about configured queue. */ struct rte_eth_txq_info { struct rte_eth_txconf conf; /**< queue config parameters. */ diff --git a/lib/librte_ether/rte_tm_driver.h b/lib/librte_ether/rte_tm_driver.h index b2e8ccf8..2376943d 100644 --- a/lib/librte_ether/rte_tm_driver.h +++ b/lib/librte_ether/rte_tm_driver.h @@ -183,7 +183,7 @@ typedef int (*rte_tm_node_stats_update_t)(struct rte_eth_dev *dev, typedef int (*rte_tm_node_wfq_weight_mode_update_t)( struct rte_eth_dev *dev, uint32_t node_id, - int *wfq_weigth_mode, + int *wfq_weight_mode, uint32_t n_sp_priorities, struct rte_tm_error *error); diff --git a/lib/librte_gro/gro_tcp4.h b/lib/librte_gro/gro_tcp4.h index f41dcee3..0a817162 100644 --- a/lib/librte_gro/gro_tcp4.h +++ b/lib/librte_gro/gro_tcp4.h @@ -116,7 +116,7 @@ struct gro_tcp4_tbl { * This function creates a TCP/IPv4 reassembly table. * * @param socket_id - * socket index for allocating TCP/IPv4 reassemblt table + * socket index for allocating TCP/IPv4 reassemble table * @param max_flow_num * the maximum number of flows in the TCP/IPv4 GRO table * @param max_item_per_flow diff --git a/lib/librte_gso/rte_gso.h b/lib/librte_gso/rte_gso.h index 4b77176f..dbaedec7 100644 --- a/lib/librte_gso/rte_gso.h +++ b/lib/librte_gso/rte_gso.h @@ -103,7 +103,7 @@ struct rte_gso_ctx { * Before calling rte_gso_segment(), applications must set proper ol_flags * for the packet. The GSO library uses the same macros as that of TSO. * For example, set PKT_TX_TCP_SEG and PKT_TX_IPV4 in ol_flags to segment - * a TCP/IPv4 packet. If rte_gso_segment() succceds, the PKT_TX_TCP_SEG + * a TCP/IPv4 packet. If rte_gso_segment() succeeds, the PKT_TX_TCP_SEG * flag is removed for all GSO segments and the input packet. * * Each of the newly-created GSO segments is organized as a two-segment diff --git a/lib/librte_ip_frag/ip_frag_internal.c b/lib/librte_ip_frag/ip_frag_internal.c index 09b755c9..46c44fff 100644 --- a/lib/librte_ip_frag/ip_frag_internal.c +++ b/lib/librte_ip_frag/ip_frag_internal.c @@ -160,7 +160,7 @@ ip_frag_process(struct ip_frag_pkt *fp, struct rte_ip_frag_death_row *dr, } /* - * errorneous packet: either exceeed max allowed number of fragments, + * erroneous packet: either exceed max allowed number of fragments, * or duplicate first/last fragment encountered. */ if (idx >= sizeof (fp->frags) / sizeof (fp->frags[0])) { diff --git a/lib/librte_ip_frag/rte_ip_frag.h b/lib/librte_ip_frag/rte_ip_frag.h index 35d0ecc3..9f8cede8 100644 --- a/lib/librte_ip_frag/rte_ip_frag.h +++ b/lib/librte_ip_frag/rte_ip_frag.h @@ -70,7 +70,7 @@ struct ip_frag { struct rte_mbuf *mb; /**< fragment mbuf */ }; -/** @internal <src addr, dst_addr, id> to uniquely indetify fragmented datagram. */ +/** @internal <src addr, dst_addr, id> to uniquely identify fragmented datagram. */ struct ip_frag_key { uint64_t src_dst[4]; /**< src address, first 8 bytes used for IPv4 */ uint32_t id; /**< dst address */ @@ -118,7 +118,7 @@ struct rte_ip_frag_tbl { uint32_t entry_mask; /**< hash value mask. */ uint32_t max_entries; /**< max entries allowed. */ uint32_t use_entries; /**< entries in use. */ - uint32_t bucket_entries; /**< hash assocaitivity. */ + uint32_t bucket_entries; /**< hash associativity. */ uint32_t nb_entries; /**< total size of the table. */ uint32_t nb_buckets; /**< num of associativity lines. */ struct ip_frag_pkt *last; /**< last used entry. */ @@ -303,7 +303,7 @@ int32_t rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in, * @param ip_hdr * Pointer to the IPV4 header inside the fragment. * @return - * Pointer to mbuf for reassebled packet, or NULL if: + * Pointer to mbuf for reassembled packet, or NULL if: * - an error occurred. * - not all fragments of the packet are collected yet. */ diff --git a/lib/librte_ip_frag/rte_ipv4_reassembly.c b/lib/librte_ip_frag/rte_ipv4_reassembly.c index b1330896..040bd70a 100644 --- a/lib/librte_ip_frag/rte_ipv4_reassembly.c +++ b/lib/librte_ip_frag/rte_ipv4_reassembly.c @@ -93,7 +93,7 @@ ipv4_frag_reassemble(struct ip_frag_pkt *fp) /* update mbuf fields for reassembled packet. */ m->ol_flags |= PKT_TX_IP_CKSUM; - /* update ipv4 header for the reassmebled packet */ + /* update ipv4 header for the reassembled packet */ ip_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len); ip_hdr->total_length = rte_cpu_to_be_16((uint16_t)(fp->total_size + @@ -117,7 +117,7 @@ ipv4_frag_reassemble(struct ip_frag_pkt *fp) * @param ip_hdr * Pointer to the IPV4 header inside the fragment. * @return - * Pointer to mbuf for reassebled packet, or NULL if: + * Pointer to mbuf for reassembled packet, or NULL if: * - an error occurred. * - not all fragments of the packet are collected yet. */ diff --git a/lib/librte_jobstats/rte_jobstats.h b/lib/librte_jobstats/rte_jobstats.h index 70e034ca..e1591562 100644 --- a/lib/librte_jobstats/rte_jobstats.h +++ b/lib/librte_jobstats/rte_jobstats.h @@ -313,7 +313,7 @@ rte_jobstats_set_max(struct rte_jobstats *job, uint64_t period); * * @param job * Job object. - * @param update_pedriod_cb + * @param update_period_cb * Callback to set. If NULL restore default update function. */ void diff --git a/lib/librte_kni/rte_kni.c b/lib/librte_kni/rte_kni.c index 5ee38e9a..8eca8c03 100644 --- a/lib/librte_kni/rte_kni.c +++ b/lib/librte_kni/rte_kni.c @@ -340,7 +340,7 @@ rte_kni_alloc(struct rte_mempool *pktmbuf_pool, /* Get an available slot from the pool */ slot = kni_memzone_pool_alloc(); if (!slot) { - RTE_LOG(ERR, KNI, "Cannot allocate more KNI interfaces; increase the number of max_kni_ifaces(current %d) or release unusued ones.\n", + RTE_LOG(ERR, KNI, "Cannot allocate more KNI interfaces; increase the number of max_kni_ifaces(current %d) or release unused ones.\n", kni_memzone_pool.max_ifaces); return NULL; } @@ -659,7 +659,7 @@ kni_allocate_mbufs(struct rte_kni *kni) phys[i] = va2pa(pkts[i]); } - /* No pkt mbuf alocated */ + /* No pkt mbuf allocated */ if (i <= 0) return; diff --git a/lib/librte_kni/rte_kni.h b/lib/librte_kni/rte_kni.h index d1950791..d43b5b28 100644 --- a/lib/librte_kni/rte_kni.h +++ b/lib/librte_kni/rte_kni.h @@ -228,7 +228,7 @@ const char *rte_kni_get_name(const struct rte_kni *kni); * @param kni * pointer to struct rte_kni. * @param ops - * ponter to struct rte_kni_ops. + * pointer to struct rte_kni_ops. * * @return * On success: 0 diff --git a/lib/librte_kni/rte_kni_fifo.h b/lib/librte_kni/rte_kni_fifo.h index c7cd5c26..6f2c3cb3 100644 --- a/lib/librte_kni/rte_kni_fifo.h +++ b/lib/librte_kni/rte_kni_fifo.h @@ -73,7 +73,7 @@ kni_fifo_put(struct rte_kni_fifo *fifo, void **data, unsigned num) } /** - * Get up to num elements from the fifo. Return the number actully read + * Get up to num elements from the fifo. Return the number actually read */ static inline unsigned kni_fifo_get(struct rte_kni_fifo *fifo, void **data, unsigned num) diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h index 6d91f7d3..7e326bbc 100644 --- a/lib/librte_mbuf/rte_mbuf.h +++ b/lib/librte_mbuf/rte_mbuf.h @@ -850,10 +850,10 @@ rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header); } while (0) /** - * Allocate an unitialized mbuf from mempool *mp*. + * Allocate an uninitialized mbuf from mempool *mp*. * * This function can be used by PMDs (especially in RX functions) to - * allocate an unitialized mbuf. The driver is responsible of + * allocate an uninitialized mbuf. The driver is responsible of * initializing all the required fields. See rte_pktmbuf_reset(). * For standard needs, prefer rte_pktmbuf_alloc(). * @@ -1778,7 +1778,7 @@ const void *__rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off, * @param len * The amount of bytes to read. * @param buf - * The buffer where data is copied if it is not contigous in mbuf + * The buffer where data is copied if it is not contiguous in mbuf * data. Its length should be at least equal to the len parameter. * @return * The pointer to the data, either in the mbuf if it is contiguous, diff --git a/lib/librte_net/net_crc_neon.h b/lib/librte_net/net_crc_neon.h index 201b2c88..cb8f63d9 100644 --- a/lib/librte_net/net_crc_neon.h +++ b/lib/librte_net/net_crc_neon.h @@ -64,7 +64,7 @@ struct crc_pmull_ctx crc16_ccitt_pmull __rte_aligned(16); * FOLD = XOR(T1, T2, DATA) * * @param data_block 16 byte data block - * @param precomp precomputed rk1 constanst + * @param precomp precomputed rk1 constant * @param fold running 16 byte folded data * * @return New 16 byte folded data diff --git a/lib/librte_net/net_crc_sse.h b/lib/librte_net/net_crc_sse.h index ac93637b..7eae1479 100644 --- a/lib/librte_net/net_crc_sse.h +++ b/lib/librte_net/net_crc_sse.h @@ -66,7 +66,7 @@ struct crc_pclmulqdq_ctx crc16_ccitt_pclmulqdq __rte_aligned(16); * @param data_block * 16 byte data block * @param precomp - * Precomputed rk1 constanst + * Precomputed rk1 constant * @param fold * Current16 byte folded data * diff --git a/lib/librte_net/rte_ip.h b/lib/librte_net/rte_ip.h index 4491b86e..73ec398f 100644 --- a/lib/librte_net/rte_ip.h +++ b/lib/librte_net/rte_ip.h @@ -237,7 +237,7 @@ rte_raw_cksum(const void *buf, size_t len) * @param off * The offset in bytes to start the checksum. * @param len - * The length in bytes of the data to ckecksum. + * The length in bytes of the data to checksum. * @param cksum * A pointer to the checksum, filled on success. * @return diff --git a/lib/librte_pdump/rte_pdump.c b/lib/librte_pdump/rte_pdump.c index e6182d35..29a6c99b 100644 --- a/lib/librte_pdump/rte_pdump.c +++ b/lib/librte_pdump/rte_pdump.c @@ -153,6 +153,8 @@ pdump_pktmbuf_copy(struct rte_mbuf *m, struct rte_mempool *mp) do { nseg++; if (pdump_pktmbuf_copy_data(seg, m) < 0) { + if (seg != m_dup) + rte_pktmbuf_free_seg(seg); rte_pktmbuf_free(m_dup); return NULL; } @@ -225,7 +227,7 @@ pdump_tx(uint16_t port __rte_unused, uint16_t qidx __rte_unused, } static int -pdump_regitser_rx_callbacks(uint16_t end_q, uint16_t port, uint16_t queue, +pdump_register_rx_callbacks(uint16_t end_q, uint16_t port, uint16_t queue, struct rte_ring *ring, struct rte_mempool *mp, uint16_t operation) { @@ -279,7 +281,7 @@ pdump_regitser_rx_callbacks(uint16_t end_q, uint16_t port, uint16_t queue, } static int -pdump_regitser_tx_callbacks(uint16_t end_q, uint16_t port, uint16_t queue, +pdump_register_tx_callbacks(uint16_t end_q, uint16_t port, uint16_t queue, struct rte_ring *ring, struct rte_mempool *mp, uint16_t operation) { @@ -400,7 +402,7 @@ set_pdump_rxtx_cbs(struct pdump_request *p) /* register RX callback */ if (flags & RTE_PDUMP_FLAG_RX) { end_q = (queue == RTE_PDUMP_ALL_QUEUES) ? nb_rx_q : queue + 1; - ret = pdump_regitser_rx_callbacks(end_q, port, queue, ring, mp, + ret = pdump_register_rx_callbacks(end_q, port, queue, ring, mp, operation); if (ret < 0) return ret; @@ -409,7 +411,7 @@ set_pdump_rxtx_cbs(struct pdump_request *p) /* register TX callback */ if (flags & RTE_PDUMP_FLAG_TX) { end_q = (queue == RTE_PDUMP_ALL_QUEUES) ? nb_tx_q : queue + 1; - ret = pdump_regitser_tx_callbacks(end_q, port, queue, ring, mp, + ret = pdump_register_tx_callbacks(end_q, port, queue, ring, mp, operation); if (ret < 0) return ret; diff --git a/lib/librte_pipeline/rte_pipeline.h b/lib/librte_pipeline/rte_pipeline.h index f3663483..fdc44a79 100644 --- a/lib/librte_pipeline/rte_pipeline.h +++ b/lib/librte_pipeline/rte_pipeline.h @@ -483,7 +483,7 @@ int rte_pipeline_table_entry_delete(struct rte_pipeline *p, * @param keys * Array containing table entry keys * @param entries - * Array containung new contents for every table entry identified by key + * Array containing new contents for every table entry identified by key * @param n_keys * Number of keys to add * @param key_found diff --git a/lib/librte_power/rte_power_acpi_cpufreq.c b/lib/librte_power/rte_power_acpi_cpufreq.c index 01ac5acb..6b0cdb2e 100644 --- a/lib/librte_power/rte_power_acpi_cpufreq.c +++ b/lib/librte_power/rte_power_acpi_cpufreq.c @@ -267,7 +267,7 @@ power_get_available_freqs(struct rte_power_info *pi) } ret = 0; - POWER_DEBUG_TRACE("%d frequencie(s) of lcore %u are available\n", + POWER_DEBUG_TRACE("%d frequency(s) of lcore %u are available\n", count, pi->lcore_id); out: fclose(f); @@ -359,7 +359,7 @@ rte_power_acpi_cpufreq_init(unsigned lcore_id) } RTE_LOG(INFO, POWER, "Initialized successfully for lcore %u " - "power manamgement\n", lcore_id); + "power management\n", lcore_id); rte_atomic32_cmpset(&(pi->state), POWER_ONGOING, POWER_USED); return 0; diff --git a/lib/librte_power/rte_power_acpi_cpufreq.h b/lib/librte_power/rte_power_acpi_cpufreq.h index eee0ca0a..bc20dfd6 100644 --- a/lib/librte_power/rte_power_acpi_cpufreq.h +++ b/lib/librte_power/rte_power_acpi_cpufreq.h @@ -180,7 +180,7 @@ int rte_power_acpi_cpufreq_freq_max(unsigned lcore_id); * * @return * - 1 on success with frequency changed. - * - 0 on success without frequency chnaged. + * - 0 on success without frequency changed. * - Negative on error. */ int rte_power_acpi_cpufreq_freq_min(unsigned lcore_id); diff --git a/lib/librte_reorder/rte_reorder.h b/lib/librte_reorder/rte_reorder.h index 4cd8de76..dc83f8e6 100644 --- a/lib/librte_reorder/rte_reorder.h +++ b/lib/librte_reorder/rte_reorder.h @@ -147,9 +147,9 @@ rte_reorder_free(struct rte_reorder_buffer *b); * -1 on error * On error case, rte_errno will be set appropriately: * - ENOSPC - Cannot move existing mbufs from reorder buffer to accommodate - * ealry mbuf, but it can be accommodated by performing drain and then insert. + * early mbuf, but it can be accommodated by performing drain and then insert. * - ERANGE - Too early or late mbuf which is vastly out of range of expected - * window should be ingnored without any handling. + * window should be ignored without any handling. */ int rte_reorder_insert(struct rte_reorder_buffer *b, struct rte_mbuf *mbuf); diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h index 5e9b3b7b..e9244381 100644 --- a/lib/librte_ring/rte_ring.h +++ b/lib/librte_ring/rte_ring.h @@ -409,6 +409,12 @@ __rte_ring_move_prod_head(struct rte_ring *r, int is_sp, n = max; *old_head = r->prod.head; + + /* add rmb barrier to avoid load/load reorder in weak + * memory model. It is noop on x86 + */ + rte_smp_rmb(); + const uint32_t cons_tail = r->cons.tail; /* * The subtraction is done between two unsigned 32bits value @@ -517,6 +523,12 @@ __rte_ring_move_cons_head(struct rte_ring *r, int is_sc, n = max; *old_head = r->cons.head; + + /* add rmb barrier to avoid load/load reorder in weak + * memory model. It is noop on x86 + */ + rte_smp_rmb(); + const uint32_t prod_tail = r->prod.tail; /* The subtraction is done between two unsigned 32bits value * (the result is always modulo 32 bits even if we have diff --git a/lib/librte_sched/rte_red.h b/lib/librte_sched/rte_red.h index ca122275..6edf914f 100644 --- a/lib/librte_sched/rte_red.h +++ b/lib/librte_sched/rte_red.h @@ -139,7 +139,7 @@ rte_red_config_init(struct rte_red_config *red_cfg, /** * @brief Generate random number for RED * - * Implemenetation based on: + * Implementation based on: * http://software.intel.com/en-us/articles/fast-random-number-generator-on-the-intel-pentiumr-4-processor/ * * 10 bit shift has been found through empirical tests (was 16). @@ -200,7 +200,7 @@ __rte_red_calc_qempty_factor(uint8_t wq_log2, uint16_t m) * Now using basic math we compute 2^n: * 2^(f+n) = 2^f * 2^n * 2^f - we use lookup table - * 2^n - can be replaced with bit shift right oeprations + * 2^n - can be replaced with bit shift right operations */ f = (n >> 6) & 0xf; diff --git a/lib/librte_sched/rte_sched.c b/lib/librte_sched/rte_sched.c index a2d0d685..7252f850 100644 --- a/lib/librte_sched/rte_sched.c +++ b/lib/librte_sched/rte_sched.c @@ -1020,7 +1020,7 @@ rte_sched_subport_read_stats(struct rte_sched_port *port, memcpy(stats, &s->stats, sizeof(struct rte_sched_subport_stats)); memset(&s->stats, 0, sizeof(struct rte_sched_subport_stats)); - /* Subport TC ovesubscription status */ + /* Subport TC oversubscription status */ *tc_ov = s->tc_ov; return 0; diff --git a/lib/librte_security/rte_security.h b/lib/librte_security/rte_security.h index 7e687d29..653929b9 100644 --- a/lib/librte_security/rte_security.h +++ b/lib/librte_security/rte_security.h @@ -266,6 +266,7 @@ struct rte_security_session_conf { /**< Type of action to be performed on the session */ enum rte_security_session_protocol protocol; /**< Security protocol to be configured */ + RTE_STD_C11 union { struct rte_security_ipsec_xform ipsec; struct rte_security_macsec_xform macsec; @@ -406,6 +407,7 @@ struct rte_security_stats { enum rte_security_session_protocol protocol; /**< Security protocol to be configured */ + RTE_STD_C11 union { struct rte_security_macsec_stats macsec; struct rte_security_ipsec_stats ipsec; @@ -486,6 +488,7 @@ struct rte_security_capability_idx { enum rte_security_session_action_type action; enum rte_security_session_protocol protocol; + RTE_STD_C11 union { struct { enum rte_security_ipsec_sa_protocol proto; diff --git a/lib/librte_timer/rte_timer.c b/lib/librte_timer/rte_timer.c index 28decc39..88826f57 100644 --- a/lib/librte_timer/rte_timer.c +++ b/lib/librte_timer/rte_timer.c @@ -195,7 +195,7 @@ timer_set_running_state(struct rte_timer *tim) /* * Return a skiplist level for a new entry. - * This probabalistically gives a level with p=1/4 that an entry at level n + * This probabilistically gives a level with p=1/4 that an entry at level n * will also appear at level n+1. */ static uint32_t |