1 /*
2 * Copyright (c) 2015-2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Copyright (C) 2013-2014 Vincenzo Maffione. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
42 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51 * SUCH DAMAGE.
52 */
53
54 #ifndef _SKYWALK_NEXUS_MBQ_H_
55 #define _SKYWALK_NEXUS_MBQ_H_
56
57 #include <sys/mbuf.h>
58 #include <kern/locks.h>
59 #include <net/classq/classq.h>
60
61 #define NX_MBQ_NO_LIMIT ((uint32_t)-1)
62
63 /*
64 * These function implement an mbuf tailq with an optional lock.
65 * The base functions act ONLY ON THE QUEUE, whereas the "safe"
66 * variants (nx_mbq_safe_*) also handle the lock.
67 */
68
69 /* A FIFO queue of mbufs with an optional lock. */
70 struct nx_mbq {
71 decl_lck_mtx_data(, nx_mbq_lock);
72 class_queue_t nx_mbq_q;
73 lck_grp_t *nx_mbq_grp;
74 struct __kern_channel_ring *nx_mbq_kring;
75 };
76
77 __attribute__((always_inline))
78 static inline void
nx_mbq_lock(struct nx_mbq * q)79 nx_mbq_lock(struct nx_mbq *q)
80 {
81 lck_mtx_lock(&q->nx_mbq_lock);
82 }
83
84 __attribute__((always_inline))
85 static inline void
nx_mbq_lock_spin(struct nx_mbq * q)86 nx_mbq_lock_spin(struct nx_mbq *q)
87 {
88 lck_mtx_lock_spin(&q->nx_mbq_lock);
89 }
90
91 __attribute__((always_inline))
92 static inline void
nx_mbq_convert_spin(struct nx_mbq * q)93 nx_mbq_convert_spin(struct nx_mbq *q)
94 {
95 lck_mtx_convert_spin(&q->nx_mbq_lock);
96 }
97
98 __attribute__((always_inline))
99 static inline void
nx_mbq_unlock(struct nx_mbq * q)100 nx_mbq_unlock(struct nx_mbq *q)
101 {
102 lck_mtx_unlock(&q->nx_mbq_lock);
103 }
104
105 __attribute__((always_inline))
106 static inline struct mbuf *
nx_mbq_peek(struct nx_mbq * q)107 nx_mbq_peek(struct nx_mbq *q)
108 {
109 return qhead(&q->nx_mbq_q);
110 }
111
112 __attribute__((always_inline))
113 static inline unsigned int
nx_mbq_len(struct nx_mbq * q)114 nx_mbq_len(struct nx_mbq *q)
115 {
116 return qlen(&q->nx_mbq_q);
117 }
118
119 __attribute__((always_inline))
120 static inline size_t
nx_mbq_size(struct nx_mbq * q)121 nx_mbq_size(struct nx_mbq *q)
122 {
123 u_int64_t qsize = qsize(&q->nx_mbq_q);
124 VERIFY(qsize <= UINT_MAX);
125 return (size_t)qsize;
126 }
127
128 __attribute__((always_inline))
129 static inline unsigned int
nx_mbq_limit(struct nx_mbq * q)130 nx_mbq_limit(struct nx_mbq *q)
131 {
132 return qlimit(&q->nx_mbq_q);
133 }
134
135 __attribute__((always_inline))
136 static inline void
__nx_mbq_enq(struct nx_mbq * q,struct mbuf * m)137 __nx_mbq_enq(struct nx_mbq *q, struct mbuf *m)
138 {
139 classq_pkt_t pkt;
140
141 CLASSQ_PKT_INIT_MBUF(&pkt, m);
142 _addq(&q->nx_mbq_q, &pkt);
143 }
144
145 __attribute__((always_inline))
146 static inline void
nx_mbq_safe_enq(struct nx_mbq * q,struct mbuf * m)147 nx_mbq_safe_enq(struct nx_mbq *q, struct mbuf *m)
148 {
149 nx_mbq_lock(q);
150 __nx_mbq_enq(q, m);
151 nx_mbq_unlock(q);
152 }
153
154 __attribute__((always_inline))
155 static inline void
nx_mbq_enq(struct nx_mbq * q,struct mbuf * m)156 nx_mbq_enq(struct nx_mbq *q, struct mbuf *m)
157 {
158 __nx_mbq_enq(q, m);
159 }
160
161 __attribute__((always_inline))
162 static inline void
__nx_mbq_enq_multi(struct nx_mbq * q,struct mbuf * m_head,struct mbuf * m_tail,uint32_t cnt,uint32_t size)163 __nx_mbq_enq_multi(struct nx_mbq *q, struct mbuf *m_head, struct mbuf *m_tail,
164 uint32_t cnt, uint32_t size)
165 {
166 classq_pkt_t head, tail;
167
168 CLASSQ_PKT_INIT_MBUF(&head, m_head);
169 CLASSQ_PKT_INIT_MBUF(&tail, m_tail);
170 _addq_multi(&q->nx_mbq_q, &head, &tail, cnt, size);
171 }
172
173 __attribute__((always_inline))
174 static inline void
nx_mbq_safe_enq_multi(struct nx_mbq * q,struct mbuf * m_head,struct mbuf * m_tail,uint32_t cnt,uint32_t size)175 nx_mbq_safe_enq_multi(struct nx_mbq *q, struct mbuf *m_head,
176 struct mbuf *m_tail, uint32_t cnt, uint32_t size)
177 {
178 nx_mbq_lock(q);
179 __nx_mbq_enq_multi(q, m_head, m_tail, cnt, size);
180 nx_mbq_unlock(q);
181 }
182
183 __attribute__((always_inline))
184 static inline void
nx_mbq_enq_multi(struct nx_mbq * q,struct mbuf * m_head,struct mbuf * m_tail,uint32_t cnt,uint32_t size)185 nx_mbq_enq_multi(struct nx_mbq *q, struct mbuf *m_head, struct mbuf *m_tail,
186 uint32_t cnt, uint32_t size)
187 {
188 __nx_mbq_enq_multi(q, m_head, m_tail, cnt, size);
189 }
190
191 __attribute__((always_inline))
192 static inline struct mbuf *
__mbq_deq(struct nx_mbq * q)193 __mbq_deq(struct nx_mbq *q)
194 {
195 classq_pkt_t pkt = CLASSQ_PKT_INITIALIZER(pkt);
196
197 _getq(&q->nx_mbq_q, &pkt);
198 ASSERT((pkt.cp_mbuf == NULL) || (pkt.cp_ptype == QP_MBUF));
199 return pkt.cp_mbuf;
200 }
201
202 __attribute__((always_inline))
203 static inline struct mbuf *
nx_mbq_safe_deq(struct nx_mbq * q)204 nx_mbq_safe_deq(struct nx_mbq *q)
205 {
206 struct mbuf *ret;
207
208 nx_mbq_lock(q);
209 ret = __mbq_deq(q);
210 nx_mbq_unlock(q);
211
212 return ret;
213 }
214
215 __attribute__((always_inline))
216 static inline struct mbuf *
nx_mbq_deq(struct nx_mbq * q)217 nx_mbq_deq(struct nx_mbq *q)
218 {
219 return __mbq_deq(q);
220 }
221
222 __attribute__((always_inline))
223 static inline struct mbuf *
__mbq_deq_all(struct nx_mbq * q,struct mbuf ** mlast,uint32_t * qlenp,uint64_t * qsizep)224 __mbq_deq_all(struct nx_mbq *q, struct mbuf **mlast, uint32_t *qlenp,
225 uint64_t *qsizep)
226 {
227 classq_pkt_t first = CLASSQ_PKT_INITIALIZER(first);
228 classq_pkt_t last = CLASSQ_PKT_INITIALIZER(last);
229
230 _getq_all(&q->nx_mbq_q, &first, &last, qlenp, qsizep);
231 *mlast = last.cp_mbuf;
232 ASSERT((first.cp_mbuf == NULL) || (first.cp_ptype == QP_MBUF));
233 return first.cp_mbuf;
234 }
235
236 __attribute__((always_inline))
237 static inline struct mbuf *
nx_mbq_safe_deq_all(struct nx_mbq * q,struct mbuf ** last,uint32_t * qlenp,uint64_t * qsizep)238 nx_mbq_safe_deq_all(struct nx_mbq *q, struct mbuf **last, uint32_t *qlenp,
239 uint64_t *qsizep)
240 {
241 struct mbuf *ret;
242
243 nx_mbq_lock(q);
244 ret = __mbq_deq_all(q, last, qlenp, qsizep);
245 nx_mbq_unlock(q);
246
247 return ret;
248 }
249
250 __attribute__((always_inline))
251 static inline struct mbuf *
nx_mbq_deq_all(struct nx_mbq * q,struct mbuf ** last,uint32_t * qlenp,uint64_t * qsizep)252 nx_mbq_deq_all(struct nx_mbq *q, struct mbuf **last, uint32_t *qlenp,
253 uint64_t *qsizep)
254 {
255 return __mbq_deq_all(q, last, qlenp, qsizep);
256 }
257
258 __BEGIN_DECLS
259 extern void nx_mbq_init(struct nx_mbq *q, uint32_t lim);
260 extern void nx_mbq_concat(struct nx_mbq *, struct nx_mbq *);
261 extern boolean_t nx_mbq_empty(struct nx_mbq *);
262 extern void nx_mbq_destroy(struct nx_mbq *q);
263 extern void nx_mbq_purge(struct nx_mbq *q);
264
265 extern void nx_mbq_safe_init(struct __kern_channel_ring *kr, struct nx_mbq *q,
266 uint32_t lim, lck_grp_t *lck_grp, lck_attr_t *lck_attr);
267 extern void nx_mbq_safe_destroy(struct nx_mbq *q);
268 extern void nx_mbq_safe_purge(struct nx_mbq *q);
269 __END_DECLS
270 #endif /* _SKYWALK_NEXUS_MBQ_H_ */
271