summaryrefslogtreecommitdiffstats
path: root/src/vnet/bfd/bfd_protocol.c
blob: cd51e91a85680c78a23c820447aef9a692c838db (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
/*
 * Copyright (c) 2011-2016 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
/**
 * @file
 * @brief BFD protocol implementation
 */
#include <vnet/bfd/bfd_protocol.h>

u8
bfd_pkt_get_version (const bfd_pkt_t * pkt)
{
  return pkt->head.vers_diag >> 5;
}

void
bfd_pkt_set_version (bfd_pkt_t * pkt, int version)
{
  pkt->head.vers_diag =
    (version << 5) | (pkt->head.vers_diag & ((1 << 5) - 1));
}

u8
bfd_pkt_get_diag_code (const bfd_pkt_t * pkt)
{
  return pkt->head.vers_diag & ((1 << 5) - 1);
}

void
bfd_pkt_set_diag_code (bfd_pkt_t * pkt, int value)
{
  pkt->head.vers_diag =
    (pkt->head.vers_diag & ~((1 << 5) - 1)) | (value & ((1 << 5) - 1));
}

u8
bfd_pkt_get_state (const bfd_pkt_t * pkt)
{
  return pkt->head.sta_flags >> 6;
}

void
bfd_pkt_set_state (bfd_pkt_t * pkt, int value)
{
  pkt->head.sta_flags = (value << 6) | (pkt->head.sta_flags & ((1 << 6) - 1));
}

u8
bfd_pkt_get_poll (const bfd_pkt_t * pkt)
{
  return (pkt->head.sta_flags >> 5) & 1;
}

void
bfd_pkt_set_poll (bfd_pkt_t * pkt)
{
  pkt->head.sta_flags |= 1 << 5;
}

u8
bfd_pkt_get_final (const bfd_pkt_t * pkt)
{
  return (pkt->head.sta_flags >> 4) & 1;
}

void
bfd_pkt_set_final (bfd_pkt_t * pkt)
{
  pkt->head.sta_flags |= 1 << 4;
}

u8
bfd_pkt_get_control_plane_independent (const bfd_pkt_t * pkt)
{
  return (pkt->head.sta_flags >> 3) & 1;
}

#if 0
void
bfd_pkt_set_control_plane_independent (bfd_pkt_t * pkt)
{
  pkt->head.sta_flags |= 1 << 3;
}
#endif

u8
bfd_pkt_get_auth_present (const bfd_pkt_t * pkt)
{
  return (pkt->head.sta_flags >> 2) & 1;
}

void
bfd_pkt_set_auth_present (bfd_pkt_t * pkt)
{
  pkt->head.sta_flags |= 1 << 2;
}

u8
bfd_pkt_get_demand (const bfd_pkt_t * pkt)
{
  return (pkt->head.sta_flags >> 1) & 1;
}

#if 0
void
bfd_pkt_set_demand (bfd_pkt_t * pkt)
{
  pkt->head.sta_flags |= 1 << 1;
}
#endif

u8
bfd_pkt_get_multipoint (const bfd_pkt_t * pkt)
{
  return (pkt->head.sta_flags >> 0) & 1;
}

#if 0
void
bfd_pkt_set_multipoint (bfd_pkt_t * pkt)
{
  pkt->head.sta_flags |= 1 << 0;
}
#endif

u32
bfd_max_key_len_for_auth_type (bfd_auth_type_e auth_type)
{
#define F(t, l, n, s) \
  if (auth_type == t) \
    {                 \
      return l;       \
    }
  foreach_bfd_auth_type (F);
#undef F
  return 0;
}

const char *
bfd_auth_type_str (bfd_auth_type_e auth_type)
{
#define F(t, l, n, s) \
  if (auth_type == t) \
    {                 \
      return s;       \
    }
  foreach_bfd_auth_type (F);
#undef F
  return "UNKNOWN";
}

const char *
bfd_diag_code_string (bfd_diag_code_e diag)
{
#define F(n, t, s)             \
  case BFD_DIAG_CODE_NAME (t): \
    return s;
  switch (diag)
    {
    foreach_bfd_diag_code (F)}
  return "UNKNOWN";
#undef F
}

const char *
bfd_state_string (bfd_state_e state)
{
#define F(n, t, s)         \
  case BFD_STATE_NAME (t): \
    return s;
  switch (state)
    {
    foreach_bfd_state (F)}
  return "UNKNOWN";
#undef F
}

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */
/span> * mq, svm_msg_q_msg_t * msg, int nowait); /** * Producer enqueue one message to queue with mutex held * * Prior to calling this, the producer should've obtained a message buffer * from one of the rings by calling @ref svm_msg_q_alloc_msg. It assumes * the queue mutex is held. * * @param mq message queue * @param msg message (pointer to ring position) to be enqueued * @return success status */ void svm_msg_q_add_and_unlock (svm_msg_q_t * mq, svm_msg_q_msg_t * msg); /** * Consumer dequeue one message from queue * * This returns the message pointing to the data in the message rings. * The consumer is expected to call @ref svm_msg_q_free_msg once it * finishes processing/copies the message data. * * @param mq message queue * @param msg pointer to structure where message is to be received * @param cond flag that indicates if request should block or not * @param time time to wait if condition it SVM_Q_TIMEDWAIT * @return success status */ int svm_msg_q_sub (svm_msg_q_t * mq, svm_msg_q_msg_t * msg, svm_q_conditional_wait_t cond, u32 time); /** * Consumer dequeue one message from queue with mutex held * * Returns the message pointing to the data in the message rings under the * assumption that the message queue lock is already held. The consumer is * expected to call @ref svm_msg_q_free_msg once it finishes * processing/copies the message data. * * @param mq message queue * @param msg pointer to structure where message is to be received * @return success status */ void svm_msg_q_sub_w_lock (svm_msg_q_t * mq, svm_msg_q_msg_t * msg); /** * Get data for message in queue * * @param mq message queue * @param msg message for which the data is requested * @return pointer to data */ void *svm_msg_q_msg_data (svm_msg_q_t * mq, svm_msg_q_msg_t * msg); /** * Get message queue ring * * @param mq message queue * @param ring_index index of ring * @return pointer to ring */ svm_msg_q_ring_t *svm_msg_q_ring (svm_msg_q_t * mq, u32 ring_index); /** * Set event fd for queue consumer * * If set, queue will exclusively use eventfds for signaling. Moreover, * afterwards, the queue should only be used in non-blocking mode. Waiting * for events should be done externally using something like epoll. * * @param mq message queue * @param fd consumer eventfd */ void svm_msg_q_set_consumer_eventfd (svm_msg_q_t * mq, int fd); /** * Set event fd for queue producer * * If set, queue will exclusively use eventfds for signaling. Moreover, * afterwards, the queue should only be used in non-blocking mode. Waiting * for events should be done externally using something like epoll. * * @param mq message queue * @param fd producer eventfd */ void svm_msg_q_set_producer_eventfd (svm_msg_q_t * mq, int fd); /** * Allocate event fd for queue consumer */ int svm_msg_q_alloc_consumer_eventfd (svm_msg_q_t * mq); /** * Allocate event fd for queue consumer */ int svm_msg_q_alloc_producer_eventfd (svm_msg_q_t * mq); /** * Check if message queue is full */ static inline u8 svm_msg_q_is_full (svm_msg_q_t * mq) { return (mq->q->cursize == mq->q->maxsize); } static inline u8 svm_msg_q_ring_is_full (svm_msg_q_t * mq, u32 ring_index) { ASSERT (ring_index < vec_len (mq->rings)); return (mq->rings[ring_index].cursize == mq->rings[ring_index].nitems); } /** * Check if message queue is empty */ static inline u8 svm_msg_q_is_empty (svm_msg_q_t * mq) { return (mq->q->cursize == 0); } /** * Check length of message queue */ static inline u32 svm_msg_q_size (svm_msg_q_t * mq) { return mq->q->cursize; } /** * Check if message is invalid */ static inline u8 svm_msg_q_msg_is_invalid (svm_msg_q_msg_t * msg) { return (msg->as_u64 == (u64) ~ 0); } /** * Try locking message queue */ static inline int svm_msg_q_try_lock (svm_msg_q_t * mq) { return pthread_mutex_trylock (&mq->q->mutex); } /** * Lock, or block trying, the message queue */ static inline int svm_msg_q_lock (svm_msg_q_t * mq) { return pthread_mutex_lock (&mq->q->mutex); } /** * Unlock message queue */ static inline void svm_msg_q_unlock (svm_msg_q_t * mq) { /* The other side of the connection is not polling */ if (mq->q->cursize < (mq->q->maxsize / 8)) (void) pthread_cond_broadcast (&mq->q->condvar); pthread_mutex_unlock (&mq->q->mutex); } /** * Wait for message queue event * * Must be called with mutex held. The queue only works non-blocking * with eventfds, so handle blocking calls as an exception here. */ static inline void svm_msg_q_wait (svm_msg_q_t * mq) { svm_queue_wait (mq->q); } /** * Timed wait for message queue event * * Must be called with mutex held. * * @param mq message queue * @param timeout time in seconds */ static inline int svm_msg_q_timedwait (svm_msg_q_t * mq, double timeout) { return svm_queue_timedwait (mq->q, timeout); } static inline int svm_msg_q_get_consumer_eventfd (svm_msg_q_t * mq) { return mq->q->consumer_evtfd; } static inline int svm_msg_q_get_producer_eventfd (svm_msg_q_t * mq) { return mq->q->producer_evtfd; } #endif /* SRC_SVM_MESSAGE_QUEUE_H_ */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */