aboutsummaryrefslogtreecommitdiffstats
path: root/src/vnet/interface_output.c
AgeCommit message (Expand)AuthorFilesLines
2021-04-19ip: fix offload flags handlingAloys Augustin1-3/+2
2021-04-19interface: interface-output end node reworkDamjan Marion1-53/+43
2021-04-17classify: honor pcap interface filter also when classify filter is usedBenoît Ganne1-107/+74
2021-03-30interface: fix coverityDamjan Marion1-2/+2
2021-03-24interface: do subif or feature arc work in interface-output only if neededDamjan Marion1-42/+61
2021-03-23interface: simplify and optimize interface-output nodeDamjan Marion1-204/+121
2021-03-23interface: reduce duplicate code in the interface-output nodeDamjan Marion1-42/+41
2021-03-19interface: add capabilities flagsMohsin Kazmi1-1/+1
2021-03-10interface: fix interface-output and interface-tx multiarch selectionDamjan Marion1-15/+6
2021-03-05interface: move vnet_pcap_t to vnetDamjan Marion1-2/+4
2021-02-15vlib: refactor checksum offload supportMohsin Kazmi1-13/+6
2021-02-04vlib: "revert startup multi-arch variant configuration fix for interfaces"Damjan Marion1-28/+2
2021-01-25vlib: startup multi-arch variant configuration fix for interfacesRadu Nicolau1-2/+28
2020-11-13interface: add multiarch support for per-interface tx nodeBenoît Ganne1-5/+15
2020-11-09vlib: fix trace number accountingBenoît Ganne1-13/+15
2020-10-21misc: minimize dependencies on udp.hFlorin Coras1-0/+1
2020-10-13stats: counters data modelOle Troan1-2/+2
2020-06-16interface: add minimal vpp pkt trace for error-dropDave Barach1-7/+124
2020-06-08vxlan: Fixed checksum caclculation offsetVladimir Isaev1-5/+5
2020-05-13interface: fix the checksum offload in quad loopMohsin Kazmi1-20/+25
2020-04-15misc: refactor calc_checksumsDave Barach1-66/+21
2020-01-27interface: Add missing ip4 udp->checksum = 0 prior to computing checksumSteven Luong1-1/+4
2019-12-05gso: add protocol header parserMohsin Kazmi1-4/+9
2019-11-20classify: per-interface rx/tx pcap capture filtersDave Barach1-1/+10
2019-10-14gso: Add gso feature arcMohsin Kazmi1-377/+14
2019-09-26misc: add vnet classify filter set supportDave Barach1-7/+15
2019-09-23misc: improve pcap drop trace outputDave Barach1-0/+54
2019-09-23misc: unify pcap rx / tx / drop traceDave Barach1-150/+15
2019-09-23interface: use the correct condition for checking if the pcap fd is openAndrew Yourtchenko1-1/+1
2019-09-20misc: classifier-based packet trace filterDave Barach1-2/+17
2019-09-20interface: fix pcap drop trace in case of vlanBenoît Ganne1-1/+12
2019-09-02ipsec ip tcp l2: multiarch nodes cannot be declared as staticDamjan Marion1-1/+1
2019-08-27interface: Remove residual dpdk bonding codeSteven Luong1-2/+1
2019-08-26gso: fix segmentation when gso_size greater than vlib buffer sizeMohsin Kazmi1-1/+4
2019-08-20fix pcap_write functionJack Xu1-0/+2
2019-08-17gso: fix the segmentation to use current_data offsetMohsin Kazmi1-15/+16
2019-08-07interface: fix checksum flagsFlorin Coras1-0/+3
2019-08-01interface: fix pcap tx/rx trace cli handlingJohn Lo1-1/+1
2019-07-31devices interface tests: vhosst GSO supportSteven Luong1-4/+0
2019-07-15interface: fix issue that pcap rx/tx trace not available when there are worke...Wei CHEN1-4/+5
2019-06-20interface: fix the incorrect sizes/offsets in the tso segmentationAndrew Yourtchenko1-2/+2
2019-06-19interface: Fix the tso segmentationMohsin Kazmi1-10/+9
2019-05-27rewrite vnet_interface_output_node_inline_gsoZhiyong Yang1-59/+47
2019-04-23vnet: clean up calc_checksums()Zhiyong Yang1-7/+8
2019-04-17Duplicate pcap tx trace fixNeale Ranns1-3/+0
2019-03-21error-drop; print interface by nameNeale Ranns1-1/+2
2019-03-13Move the punt/drop nodes into vlibNeale Ranns1-286/+91
2019-03-11disable multiarch for error-dropFilip Tehlar1-2/+4
2019-03-06interface: migrate old MULTIARCH macros to VLIB_NODE_FNFilip Tehlar1-23/+19
2019-02-27VPP-1576: fix Coverity issuesDave Barach1-1/+2
pid, * ring configs * @return message queue */ svm_msg_q_shared_t *svm_msg_q_alloc (svm_msg_q_cfg_t *cfg); svm_msg_q_shared_t *svm_msg_q_init (void *base, svm_msg_q_cfg_t *cfg); uword svm_msg_q_size_to_alloc (svm_msg_q_cfg_t *cfg); void svm_msg_q_attach (svm_msg_q_t *mq, void *smq_base); /** * Cleanup mq's private data */ void svm_msg_q_cleanup (svm_msg_q_t *mq); /** * Free message queue * * @param mq message queue to be freed */ void svm_msg_q_free (svm_msg_q_t * mq); /** * Allocate message buffer * * Message is allocated on the first available ring capable of holding * the requested number of bytes. * * @param mq message queue * @param nbytes number of bytes needed for message * @return message structure pointing to the ring and position * allocated */ svm_msg_q_msg_t svm_msg_q_alloc_msg (svm_msg_q_t * mq, u32 nbytes); /** * Allocate message buffer on ring * * Message is allocated, on requested ring. The caller MUST check that * the ring is not full. * * @param mq message queue * @param ring_index ring on which the allocation should occur * @return message structure pointing to the ring and position * allocated */ svm_msg_q_msg_t svm_msg_q_alloc_msg_w_ring (svm_msg_q_t * mq, u32 ring_index); /** * Lock message queue and allocate message buffer on ring * * This should be used when multiple writers/readers are expected to * compete for the rings/queue. Message should be enqueued by calling * @ref svm_msg_q_add_w_lock and the caller MUST unlock the queue once * the message in enqueued. * * @param mq message queue * @param ring_index ring on which the allocation should occur * @param noblock flag that indicates if request should block * @param msg pointer to message to be filled in * @return 0 on success, negative number otherwise */ int svm_msg_q_lock_and_alloc_msg_w_ring (svm_msg_q_t * mq, u32 ring_index, u8 noblock, svm_msg_q_msg_t * msg); /** * Free message buffer * * Marks message buffer on ring as free. * * @param mq message queue * @param msg message to be freed */ void svm_msg_q_free_msg (svm_msg_q_t * mq, svm_msg_q_msg_t * msg); /** * Producer enqueue one message to queue * * Prior to calling this, the producer should've obtained a message buffer * from one of the rings by calling @ref svm_msg_q_alloc_msg. * * @param mq message queue * @param msg message (pointer to ring position) to be enqueued * @param nowait flag to indicate if request is blocking or not * @return success status */ int svm_msg_q_add (svm_msg_q_t * mq, svm_msg_q_msg_t * msg, int nowait); /** * Producer enqueue one message to queue with mutex held * * Prior to calling this, the producer should've obtained a message buffer * from one of the rings by calling @ref svm_msg_q_alloc_msg. It assumes * the queue mutex is held. * * @param mq message queue * @param msg message (pointer to ring position) to be enqueued * @return success status */ void svm_msg_q_add_and_unlock (svm_msg_q_t * mq, svm_msg_q_msg_t * msg); /** * Consumer dequeue one message from queue * * This returns the message pointing to the data in the message rings. * Should only be used in single consumer scenarios as no locks are grabbed. * The consumer is expected to call @ref svm_msg_q_free_msg once it * finishes processing/copies the message data. * * @param mq message queue * @param msg pointer to structure where message is to be received * @param cond flag that indicates if request should block or not * @param time time to wait if condition it SVM_Q_TIMEDWAIT * @return success status */ int svm_msg_q_sub (svm_msg_q_t * mq, svm_msg_q_msg_t * msg, svm_q_conditional_wait_t cond, u32 time); /** * Consumer dequeue one message from queue * * Returns the message pointing to the data in the message rings. Should only * be used in single consumer scenarios as no locks are grabbed. The consumer * is expected to call @ref svm_msg_q_free_msg once it finishes * processing/copies the message data. * * @param mq message queue * @param msg pointer to structure where message is to be received * @return success status */ int svm_msg_q_sub_raw (svm_msg_q_t *mq, svm_msg_q_msg_t *elem); /** * Consumer dequeue multiple messages from queue * * Returns the message pointing to the data in the message rings. Should only * be used in single consumer scenarios as no locks are grabbed. The consumer * is expected to call @ref svm_msg_q_free_msg once it finishes * processing/copies the message data. * * @param mq message queue * @param msg_buf pointer to array of messages to received * @param n_msgs lengt of msg_buf array * @return number of messages dequeued */ int svm_msg_q_sub_raw_batch (svm_msg_q_t *mq, svm_msg_q_msg_t *msg_buf, u32 n_msgs); /** * Get data for message in queue * * @param mq message queue * @param msg message for which the data is requested * @return pointer to data */ void *svm_msg_q_msg_data (svm_msg_q_t * mq, svm_msg_q_msg_t * msg); /** * Get message queue ring * * @param mq message queue * @param ring_index index of ring * @return pointer to ring */ svm_msg_q_ring_t *svm_msg_q_ring (svm_msg_q_t * mq, u32 ring_index); /** * Set event fd for queue * * If set, queue will exclusively use eventfds for signaling. Moreover, * afterwards, the queue should only be used in non-blocking mode. Waiting * for events should be done externally using something like epoll. * * @param mq message queue * @param fd consumer eventfd */ void svm_msg_q_set_eventfd (svm_msg_q_t *mq, int fd); /** * Allocate event fd for queue */ int svm_msg_q_alloc_eventfd (svm_msg_q_t *mq); /** * Format message queue, shows msg count for each ring */ u8 *format_svm_msg_q (u8 *s, va_list *args); /** * Check length of message queue */ static inline u32 svm_msg_q_size (svm_msg_q_t *mq) { return clib_atomic_load_relax_n (&mq->q.shr->cursize); } /** * Check if message queue is full */ static inline u8 svm_msg_q_is_full (svm_msg_q_t * mq) { return (svm_msg_q_size (mq) == mq->q.shr->maxsize); } static inline u8 svm_msg_q_ring_is_full (svm_msg_q_t * mq, u32 ring_index) { svm_msg_q_ring_t *ring = vec_elt_at_index (mq->rings, ring_index); return (clib_atomic_load_relax_n (&ring->shr->cursize) >= ring->nitems); } static inline u8 svm_msg_q_or_ring_is_full (svm_msg_q_t *mq, u32 ring_index) { return (svm_msg_q_is_full (mq) || svm_msg_q_ring_is_full (mq, ring_index)); } /** * Check if message queue is empty */ static inline u8 svm_msg_q_is_empty (svm_msg_q_t * mq) { return (svm_msg_q_size (mq) == 0); } /** * Check if message is invalid */ static inline u8 svm_msg_q_msg_is_invalid (svm_msg_q_msg_t * msg) { return (msg->as_u64 == (u64) ~ 0); } /** * Try locking message queue */ static inline int svm_msg_q_try_lock (svm_msg_q_t * mq) { if (mq->q.evtfd == -1) { int rv = pthread_mutex_trylock (&mq->q.shr->mutex); if (PREDICT_FALSE (rv == EOWNERDEAD)) rv = pthread_mutex_consistent (&mq->q.shr->mutex); return rv; } else { return !clib_spinlock_trylock (&mq->q.lock); } } /** * Lock, or block trying, the message queue */ static inline int svm_msg_q_lock (svm_msg_q_t * mq) { if (mq->q.evtfd == -1) { int rv = pthread_mutex_lock (&mq->q.shr->mutex); if (PREDICT_FALSE (rv == EOWNERDEAD)) rv = pthread_mutex_consistent (&mq->q.shr->mutex); return rv; } else { clib_spinlock_lock (&mq->q.lock); return 0; } } /** * Unlock message queue */ static inline void svm_msg_q_unlock (svm_msg_q_t * mq) { if (mq->q.evtfd == -1) { pthread_mutex_unlock (&mq->q.shr->mutex); } else { clib_spinlock_unlock (&mq->q.lock); } } /** * Wait for message queue event * * When eventfds are not configured, the shared memory mutex is locked * before waiting on the condvar. Typically called by consumers. */ int svm_msg_q_wait (svm_msg_q_t *mq, svm_msg_q_wait_type_t type); /** * Wait for message queue event as producer * * Similar to @ref svm_msg_q_wait but lock (mutex or spinlock) must * be held. Should only be called by producers. */ int svm_msg_q_wait_prod (svm_msg_q_t *mq); /** * Wait for message queue or ring event as producer * * Similar to @ref svm_msg_q_wait but lock (mutex or spinlock) must * be held. Should only be called by producers. */ int svm_msg_q_or_ring_wait_prod (svm_msg_q_t *mq, u32 ring_index); /** * Timed wait for message queue event * * Must be called with mutex held. * * @param mq message queue * @param timeout time in seconds */ int svm_msg_q_timedwait (svm_msg_q_t *mq, double timeout); static inline int svm_msg_q_get_eventfd (svm_msg_q_t *mq) { return mq->q.evtfd; } #endif /* SRC_SVM_MESSAGE_QUEUE_H_ */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */