1 /*
2 * Copyright (c) 2000-2024 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1998-2002 Luigi Rizzo, Universita` di Pisa
30 * Portions Copyright (c) 2000 Akamba Corp.
31 * All rights reserved
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * SUCH DAMAGE.
53 *
54 * $FreeBSD: src/sys/netinet/ip_dummynet.c,v 1.84 2004/08/25 09:31:30 pjd Exp $
55 */
56
57 #define DUMMYNET_DEBUG 0
58
59 /*
60 * This module implements IP dummynet, a bandwidth limiter/delay emulator
61 * Description of the data structures used is in ip_dummynet.h
62 * Here you mainly find the following blocks of code:
63 * + variable declarations;
64 * + heap management functions;
65 * + scheduler and dummynet functions;
66 * + configuration and initialization.
67 *
68 * NOTA BENE: critical sections are protected by the "dummynet lock".
69 *
70 * Most important Changes:
71 *
72 * 010124: Fixed WF2Q behaviour
73 * 010122: Fixed spl protection.
74 * 000601: WF2Q support
75 * 000106: large rewrite, use heaps to handle very many pipes.
76 * 980513: initial release
77 *
78 * include files marked with XXX are probably not needed
79 */
80
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/malloc.h>
84 #include <sys/mbuf.h>
85 #include <sys/queue.h> /* XXX */
86 #include <sys/kernel.h>
87 #include <sys/random.h>
88 #include <sys/socket.h>
89 #include <sys/socketvar.h>
90 #include <sys/time.h>
91 #include <sys/sysctl.h>
92 #include <kern/debug.h>
93 #include <net/if.h>
94 #include <net/route.h>
95 #include <net/kpi_protocol.h>
96 #if DUMMYNET
97 #include <net/kpi_protocol.h>
98 #endif /* DUMMYNET */
99 #include <net/nwk_wq.h>
100 #include <net/pfvar.h>
101 #include <netinet/in.h>
102 #include <netinet/in_systm.h>
103 #include <netinet/in_var.h>
104 #include <netinet/ip.h>
105 #include <netinet/ip_dummynet.h>
106 #include <netinet/ip_var.h>
107
108 #include <netinet/ip6.h> /* for ip6_input, ip6_output prototypes */
109 #include <netinet6/ip6_var.h>
110
111 #include <stdbool.h>
112 #include <net/sockaddr_utils.h>
113
114 /*
115 * We keep a private variable for the simulation time, but we could
116 * probably use an existing one ("softticks" in sys/kern/kern_timer.c)
117 */
118 static dn_key curr_time = 0; /* current simulation time */
119
120 /* this is for the timer that fires to call dummynet() - we only enable the timer when
121 * there are packets to process, otherwise it's disabled */
122 static int timer_enabled = 0;
123
124 static int dn_hash_size = 64; /* default hash size */
125
126 /* statistics on number of queue searches and search steps */
127 static int searches, search_steps;
128 static int pipe_expire = 1; /* expire queue if empty */
129 static int dn_max_ratio = 16; /* max queues/buckets ratio */
130
131 static int red_lookup_depth = 256; /* RED - default lookup table depth */
132 static int red_avg_pkt_size = 512; /* RED - default medium packet size */
133 static int red_max_pkt_size = 1500; /* RED - default max packet size */
134
135 static int serialize = 0;
136
137 /*
138 * Three heaps contain queues and pipes that the scheduler handles:
139 *
140 * ready_heap contains all dn_flow_queue related to fixed-rate pipes.
141 *
142 * wfq_ready_heap contains the pipes associated with WF2Q flows
143 *
144 * extract_heap contains pipes associated with delay lines.
145 *
146 */
147 static struct dn_heap ready_heap, extract_heap, wfq_ready_heap;
148
149 static int heap_init(struct dn_heap *h, int size);
150 static int heap_insert(struct dn_heap *h, dn_key key1, void *__sized_by_or_null(size)p, size_t size);
151 static void heap_extract(struct dn_heap *h, void *__sized_by_or_null(size)obj, size_t size);
152
153
154 static void transmit_event(struct dn_pipe *pipe, struct mbuf **head,
155 struct mbuf **tail);
156 static void ready_event(struct dn_flow_queue *q, struct mbuf **head,
157 struct mbuf **tail);
158 static void ready_event_wfq(struct dn_pipe *p, struct mbuf **head,
159 struct mbuf **tail);
160
161 #ifdef SYSCTL_NODE
162 /*
163 * Sysctl handler for reading the heap sizes.
164 */
165 static int dn_heap_size_sysctl SYSCTL_HANDLER_ARGS;
166 #endif /* SYSCTL_NODE */
167
168 /*
169 * Packets are retrieved from queues in Dummynet in chains instead of
170 * packet-by-packet. The entire list of packets is first dequeued and
171 * sent out by the following function.
172 */
173 static void dummynet_send(struct mbuf *m);
174
175 #define HASHSIZE 16
176 #define HASH(num) ((((num) >> 8) ^ ((num) >> 4) ^ (num)) & 0x0f)
177 static struct dn_pipe_head pipehash[HASHSIZE]; /* all pipes */
178 static struct dn_flow_set_head flowsethash[HASHSIZE]; /* all flowsets */
179
180 #ifdef SYSCTL_NODE
181
182 const caddr_t SYSCTL_READY_HEAP_ARG = __unsafe_forge_single(caddr_t, &ready_heap);
183 const caddr_t SYSCTL_EXTRACT_HEAP_ARG = __unsafe_forge_single(caddr_t, &extract_heap);
184
185 static int
186 dn_heap_size_sysctl SYSCTL_HANDLER_ARGS
187 {
188 #pragma unused(oidp, arg2)
189 int error = 0;
190 int heap_size;
191 const caddr_t heap_selector =
192 __unsafe_forge_single(const caddr_t, arg1);
193
194 if (heap_selector == SYSCTL_READY_HEAP_ARG) {
195 heap_size = ready_heap.size;
196 } else if (heap_selector == SYSCTL_EXTRACT_HEAP_ARG) {
197 heap_size = extract_heap.size;
198 } else {
199 error = EINVAL;
200 os_log_error(OS_LOG_DEFAULT, "Unrecognized heap selector");
201 }
202
203 if (error == 0) {
204 error = SYSCTL_OUT(req, &heap_size, sizeof(heap_size));
205 }
206
207 return error;
208 }
209
210 SYSCTL_NODE(_net_inet_ip, OID_AUTO, dummynet,
211 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Dummynet");
212 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, hash_size,
213 CTLFLAG_RW | CTLFLAG_LOCKED, &dn_hash_size, 0, "Default hash table size");
214 SYSCTL_QUAD(_net_inet_ip_dummynet, OID_AUTO, curr_time,
215 CTLFLAG_RD | CTLFLAG_LOCKED, &curr_time, "Current tick");
216 SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, ready_heap,
217 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
218 SYSCTL_READY_HEAP_ARG, 0, dn_heap_size_sysctl,
219 "I", "Size of ready heap");
220 SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, extract_heap,
221 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
222 SYSCTL_EXTRACT_HEAP_ARG, 0, dn_heap_size_sysctl,
223 "I", "Size of extract heap");
224 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, searches,
225 CTLFLAG_RD | CTLFLAG_LOCKED, &searches, 0, "Number of queue searches");
226 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, search_steps,
227 CTLFLAG_RD | CTLFLAG_LOCKED, &search_steps, 0, "Number of queue search steps");
228 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, expire,
229 CTLFLAG_RW | CTLFLAG_LOCKED, &pipe_expire, 0, "Expire queue if empty");
230 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, max_chain_len,
231 CTLFLAG_RW | CTLFLAG_LOCKED, &dn_max_ratio, 0,
232 "Max ratio between dynamic queues and buckets");
233 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_lookup_depth,
234 CTLFLAG_RD | CTLFLAG_LOCKED, &red_lookup_depth, 0, "Depth of RED lookup table");
235 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_avg_pkt_size,
236 CTLFLAG_RD | CTLFLAG_LOCKED, &red_avg_pkt_size, 0, "RED Medium packet size");
237 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_max_pkt_size,
238 CTLFLAG_RD | CTLFLAG_LOCKED, &red_max_pkt_size, 0, "RED Max packet size");
239 #endif
240
241 #ifdef DUMMYNET_DEBUG
242 int dummynet_debug = 0;
243 #ifdef SYSCTL_NODE
244 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED, &dummynet_debug,
245 0, "control debugging printfs");
246 #endif
247 #define DPRINTF(X) if (dummynet_debug) printf X
248 #define IP_DN_DBG_LOG(fmt, ...) os_log_error(OS_LOG_DEFAULT, "ip_dn_debug %s:%d : " fmt "\n", __func__, __LINE__, ## __VA_ARGS__)
249 #else
250 #define DPRINTF(X)
251 #define IP_DN_DBG_LOG(fmt, ...) do {} while (0)
252 #endif
253 #define IP_DN_ERR_LOG(fmt, ...) os_log_error(OS_LOG_DEFAULT, fmt, ## __VA_ARGS__)
254
255 /* dummynet lock */
256 static LCK_GRP_DECLARE(dn_mutex_grp, "dn");
257 static LCK_MTX_DECLARE(dn_mutex, &dn_mutex_grp);
258
259 static int config_pipe(struct dn_pipe *p);
260 static int ip_dn_ctl(struct sockopt *sopt);
261
262 static void dummynet(void *);
263 static void dummynet_flush(void);
264 void dummynet_drain(void);
265 static ip_dn_io_t dummynet_io;
266
267 /*
268 * Functions to copy dat from/to userspace.
269 * Functions that operate with buffers use `__ended_by' sentinel
270 * to avoid out-of-bounds memory accesses.
271 */
272 static void cp_flow_set_to_64_user(struct dn_flow_set *set, struct dn_flow_set_64 *fs_bp);
273 static void cp_queue_to_64_user( struct dn_flow_queue *q, struct dn_flow_queue_64 *qp);
274 static char *cp_pipe_to_64_user(struct dn_pipe *p, char *bp __ended_by(bp_end), char *bp_end);
275 static char *dn_copy_set_64(struct dn_flow_set *set, char *bp __ended_by(bp_end), char *bp_end);
276 static int cp_pipe_from_user_64( struct sockopt *sopt, struct dn_pipe *p );
277
278 static void cp_flow_set_to_32_user(struct dn_flow_set *set, struct dn_flow_set_32 *fs_bp);
279 static void cp_queue_to_32_user( struct dn_flow_queue *q, struct dn_flow_queue_32 *qp);
280 static char *cp_pipe_to_32_user(struct dn_pipe *p, char *bp __ended_by(bp_end), char *bp_end);
281 static char *dn_copy_set_32(struct dn_flow_set *set, char *bp __ended_by(bp_end), char *bp_end);
282 static int cp_pipe_from_user_32( struct sockopt *sopt, struct dn_pipe *p );
283
284 static struct m_tag * m_tag_kalloc_dummynet(u_int32_t id, u_int16_t type, uint16_t len, int wait);
285 static void m_tag_kfree_dummynet(struct m_tag *tag);
286
287 struct eventhandler_lists_ctxt dummynet_evhdlr_ctxt;
288
289 uint32_t
my_random(void)290 my_random(void)
291 {
292 uint32_t val;
293 read_frandom(&val, sizeof(val));
294 val &= 0x7FFFFFFF;
295
296 return val;
297 }
298
299 /*
300 * Heap management functions.
301 *
302 * In the heap, first node is element 0. Children of i are 2i+1 and 2i+2.
303 * Some macros help finding parent/children so we can optimize them.
304 *
305 * heap_init() is called to expand the heap when needed.
306 * Increment size in blocks of 16 entries.
307 * XXX failure to allocate a new element is a pretty bad failure
308 * as we basically stall a whole queue forever!!
309 * Returns 1 on error, 0 on success
310 */
311 #define HEAP_FATHER(x) ( ( (x) - 1 ) / 2 )
312 #define HEAP_LEFT(x) ( 2*(x) + 1 )
313 #define HEAP_IS_LEFT(x) ( (x) & 1 )
314 #define HEAP_RIGHT(x) ( 2*(x) + 2 )
315 #define HEAP_SWAP(a, b, buffer) { buffer = a ; a = b ; b = buffer ; }
316 #define HEAP_INCREMENT 15
317
318
319 int
cp_pipe_from_user_32(struct sockopt * sopt,struct dn_pipe * p)320 cp_pipe_from_user_32( struct sockopt *sopt, struct dn_pipe *p )
321 {
322 struct dn_pipe_32 user_pipe_32;
323 int error = 0;
324
325 error = sooptcopyin(sopt, &user_pipe_32, sizeof(struct dn_pipe_32), sizeof(struct dn_pipe_32));
326 if (!error) {
327 p->pipe_nr = user_pipe_32.pipe_nr;
328 p->bandwidth = user_pipe_32.bandwidth;
329 p->delay = user_pipe_32.delay;
330 p->V = user_pipe_32.V;
331 p->sum = user_pipe_32.sum;
332 p->numbytes = user_pipe_32.numbytes;
333 p->sched_time = user_pipe_32.sched_time;
334 bcopy( user_pipe_32.if_name, p->if_name, IFNAMSIZ);
335 p->if_name[IFNAMSIZ - 1] = '\0';
336 p->ready = user_pipe_32.ready;
337
338 p->fs.fs_nr = user_pipe_32.fs.fs_nr;
339 p->fs.flags_fs = user_pipe_32.fs.flags_fs;
340 p->fs.parent_nr = user_pipe_32.fs.parent_nr;
341 p->fs.weight = user_pipe_32.fs.weight;
342 p->fs.qsize = user_pipe_32.fs.qsize;
343 p->fs.plr = user_pipe_32.fs.plr;
344 p->fs.flow_mask = user_pipe_32.fs.flow_mask;
345 p->fs.rq = NULL;
346 p->fs.rq_size = user_pipe_32.fs.rq_size;
347 p->fs.rq_elements = user_pipe_32.fs.rq_elements;
348 p->fs.last_expired = user_pipe_32.fs.last_expired;
349 p->fs.backlogged = user_pipe_32.fs.backlogged;
350 p->fs.w_q = user_pipe_32.fs.w_q;
351 p->fs.max_th = user_pipe_32.fs.max_th;
352 p->fs.min_th = user_pipe_32.fs.min_th;
353 p->fs.max_p = user_pipe_32.fs.max_p;
354 p->fs.c_1 = user_pipe_32.fs.c_1;
355 p->fs.c_2 = user_pipe_32.fs.c_2;
356 p->fs.c_3 = user_pipe_32.fs.c_3;
357 p->fs.c_4 = user_pipe_32.fs.c_4;
358 p->fs.w_q_lookup = NULL;
359 p->fs.lookup_depth = user_pipe_32.fs.lookup_depth;
360 p->fs.lookup_step = user_pipe_32.fs.lookup_step;
361 p->fs.lookup_weight = user_pipe_32.fs.lookup_weight;
362 p->fs.avg_pkt_size = user_pipe_32.fs.avg_pkt_size;
363 p->fs.max_pkt_size = user_pipe_32.fs.max_pkt_size;
364 }
365 return error;
366 }
367
368
369 int
cp_pipe_from_user_64(struct sockopt * sopt,struct dn_pipe * p)370 cp_pipe_from_user_64( struct sockopt *sopt, struct dn_pipe *p )
371 {
372 struct dn_pipe_64 user_pipe_64;
373 int error = 0;
374
375 error = sooptcopyin(sopt, &user_pipe_64, sizeof(struct dn_pipe_64), sizeof(struct dn_pipe_64));
376 if (!error) {
377 p->pipe_nr = user_pipe_64.pipe_nr;
378 p->bandwidth = user_pipe_64.bandwidth;
379 p->delay = user_pipe_64.delay;
380 p->V = user_pipe_64.V;
381 p->sum = user_pipe_64.sum;
382 p->numbytes = user_pipe_64.numbytes;
383 p->sched_time = user_pipe_64.sched_time;
384 bcopy( user_pipe_64.if_name, p->if_name, IFNAMSIZ);
385 p->if_name[IFNAMSIZ - 1] = '\0';
386 p->ready = user_pipe_64.ready;
387
388 p->fs.fs_nr = user_pipe_64.fs.fs_nr;
389 p->fs.flags_fs = user_pipe_64.fs.flags_fs;
390 p->fs.parent_nr = user_pipe_64.fs.parent_nr;
391 p->fs.weight = user_pipe_64.fs.weight;
392 p->fs.qsize = user_pipe_64.fs.qsize;
393 p->fs.plr = user_pipe_64.fs.plr;
394 p->fs.flow_mask = user_pipe_64.fs.flow_mask;
395 p->fs.rq = NULL;
396 p->fs.rq_size = user_pipe_64.fs.rq_size;
397 p->fs.rq_elements = user_pipe_64.fs.rq_elements;
398 p->fs.last_expired = user_pipe_64.fs.last_expired;
399 p->fs.backlogged = user_pipe_64.fs.backlogged;
400 p->fs.w_q = user_pipe_64.fs.w_q;
401 p->fs.max_th = user_pipe_64.fs.max_th;
402 p->fs.min_th = user_pipe_64.fs.min_th;
403 p->fs.max_p = user_pipe_64.fs.max_p;
404 p->fs.c_1 = user_pipe_64.fs.c_1;
405 p->fs.c_2 = user_pipe_64.fs.c_2;
406 p->fs.c_3 = user_pipe_64.fs.c_3;
407 p->fs.c_4 = user_pipe_64.fs.c_4;
408 p->fs.w_q_lookup = NULL;
409 p->fs.lookup_depth = user_pipe_64.fs.lookup_depth;
410 p->fs.lookup_step = user_pipe_64.fs.lookup_step;
411 p->fs.lookup_weight = user_pipe_64.fs.lookup_weight;
412 p->fs.avg_pkt_size = user_pipe_64.fs.avg_pkt_size;
413 p->fs.max_pkt_size = user_pipe_64.fs.max_pkt_size;
414 } else {
415 IP_DN_ERR_LOG("Error copying pipe from user, %d", error);
416 }
417 return error;
418 }
419
420 static void
cp_flow_set_to_32_user(struct dn_flow_set * set,struct dn_flow_set_32 * fs_bp)421 cp_flow_set_to_32_user(struct dn_flow_set *set, struct dn_flow_set_32 *fs_bp)
422 {
423 fs_bp->fs_nr = set->fs_nr;
424 fs_bp->flags_fs = set->flags_fs;
425 fs_bp->parent_nr = set->parent_nr;
426 fs_bp->weight = set->weight;
427 fs_bp->qsize = set->qsize;
428 fs_bp->plr = set->plr;
429 fs_bp->flow_mask = set->flow_mask;
430 fs_bp->rq_size = set->rq_size;
431 fs_bp->rq_elements = set->rq_elements;
432 fs_bp->last_expired = set->last_expired;
433 fs_bp->backlogged = set->backlogged;
434 fs_bp->w_q = set->w_q;
435 fs_bp->max_th = set->max_th;
436 fs_bp->min_th = set->min_th;
437 fs_bp->max_p = set->max_p;
438 fs_bp->c_1 = set->c_1;
439 fs_bp->c_2 = set->c_2;
440 fs_bp->c_3 = set->c_3;
441 fs_bp->c_4 = set->c_4;
442 fs_bp->w_q_lookup = CAST_DOWN_EXPLICIT(user32_addr_t, VM_KERNEL_ADDRHIDE(set->w_q_lookup));
443 fs_bp->lookup_depth = set->lookup_depth;
444 fs_bp->lookup_step = set->lookup_step;
445 fs_bp->lookup_weight = set->lookup_weight;
446 fs_bp->avg_pkt_size = set->avg_pkt_size;
447 fs_bp->max_pkt_size = set->max_pkt_size;
448 }
449
450 static void
cp_flow_set_to_64_user(struct dn_flow_set * set,struct dn_flow_set_64 * fs_bp)451 cp_flow_set_to_64_user(struct dn_flow_set *set, struct dn_flow_set_64 *fs_bp)
452 {
453 fs_bp->fs_nr = set->fs_nr;
454 fs_bp->flags_fs = set->flags_fs;
455 fs_bp->parent_nr = set->parent_nr;
456 fs_bp->weight = set->weight;
457 fs_bp->qsize = set->qsize;
458 fs_bp->plr = set->plr;
459 fs_bp->flow_mask = set->flow_mask;
460 fs_bp->rq_size = set->rq_size;
461 fs_bp->rq_elements = set->rq_elements;
462 fs_bp->last_expired = set->last_expired;
463 fs_bp->backlogged = set->backlogged;
464 fs_bp->w_q = set->w_q;
465 fs_bp->max_th = set->max_th;
466 fs_bp->min_th = set->min_th;
467 fs_bp->max_p = set->max_p;
468 fs_bp->c_1 = set->c_1;
469 fs_bp->c_2 = set->c_2;
470 fs_bp->c_3 = set->c_3;
471 fs_bp->c_4 = set->c_4;
472 fs_bp->w_q_lookup = CAST_DOWN(user64_addr_t, VM_KERNEL_ADDRHIDE(set->w_q_lookup));
473 fs_bp->lookup_depth = set->lookup_depth;
474 fs_bp->lookup_step = set->lookup_step;
475 fs_bp->lookup_weight = set->lookup_weight;
476 fs_bp->avg_pkt_size = set->avg_pkt_size;
477 fs_bp->max_pkt_size = set->max_pkt_size;
478 }
479
480 static
481 void
cp_queue_to_32_user(struct dn_flow_queue * q,struct dn_flow_queue_32 * qp)482 cp_queue_to_32_user( struct dn_flow_queue *q, struct dn_flow_queue_32 *qp)
483 {
484 qp->id = q->id;
485 qp->len = q->len;
486 qp->len_bytes = q->len_bytes;
487 qp->numbytes = q->numbytes;
488 qp->tot_pkts = q->tot_pkts;
489 qp->tot_bytes = q->tot_bytes;
490 qp->drops = q->drops;
491 qp->hash_slot = q->hash_slot;
492 qp->avg = q->avg;
493 qp->count = q->count;
494 qp->random = q->random;
495 qp->q_time = (u_int32_t)q->q_time;
496 qp->heap_pos = q->heap_pos;
497 qp->sched_time = q->sched_time;
498 qp->S = q->S;
499 qp->F = q->F;
500 }
501
502 static
503 void
cp_queue_to_64_user(struct dn_flow_queue * q,struct dn_flow_queue_64 * qp)504 cp_queue_to_64_user( struct dn_flow_queue *q, struct dn_flow_queue_64 *qp)
505 {
506 qp->id = q->id;
507 qp->len = q->len;
508 qp->len_bytes = q->len_bytes;
509 qp->numbytes = q->numbytes;
510 qp->tot_pkts = q->tot_pkts;
511 qp->tot_bytes = q->tot_bytes;
512 qp->drops = q->drops;
513 qp->hash_slot = q->hash_slot;
514 qp->avg = q->avg;
515 qp->count = q->count;
516 qp->random = q->random;
517 qp->q_time = (u_int32_t)q->q_time;
518 qp->heap_pos = q->heap_pos;
519 qp->sched_time = q->sched_time;
520 qp->S = q->S;
521 qp->F = q->F;
522 }
523
524 static
525 char *
cp_pipe_to_32_user(struct dn_pipe * p,char * bp0 __ended_by (bp_end),char * bp_end)526 cp_pipe_to_32_user(struct dn_pipe *p, char *bp0 __ended_by(bp_end), char *bp_end)
527 {
528 struct dn_pipe_32 *pipe_bp = NULL; /* Pipe header */
529 caddr_t bp; /* Pointer after the pipe header */
530
531 /* Check for overflow, and assign `pipe_bp' and `bp' pointers, if safe */
532 if (bp_end - bp0 < sizeof(*pipe_bp)) {
533 /* Will overflow, can not proceed */
534 return NULL;
535 }
536 pipe_bp = __unsafe_forge_single(struct dn_pipe_32 *, bp0);
537 bp = bp0 + sizeof(*pipe_bp);
538
539 pipe_bp->pipe_nr = p->pipe_nr;
540 pipe_bp->bandwidth = p->bandwidth;
541 pipe_bp->delay = p->delay;
542 bcopy( &(p->scheduler_heap), &(pipe_bp->scheduler_heap), sizeof(struct dn_heap_32));
543 pipe_bp->scheduler_heap.p = CAST_DOWN_EXPLICIT(user32_addr_t, VM_KERNEL_ADDRHIDE(pipe_bp->scheduler_heap.p));
544 bcopy( &(p->not_eligible_heap), &(pipe_bp->not_eligible_heap), sizeof(struct dn_heap_32));
545 pipe_bp->not_eligible_heap.p = CAST_DOWN_EXPLICIT(user32_addr_t, VM_KERNEL_ADDRHIDE(pipe_bp->not_eligible_heap.p));
546 bcopy( &(p->idle_heap), &(pipe_bp->idle_heap), sizeof(struct dn_heap_32));
547 pipe_bp->idle_heap.p = CAST_DOWN_EXPLICIT(user32_addr_t, VM_KERNEL_ADDRHIDE(pipe_bp->idle_heap.p));
548 pipe_bp->V = p->V;
549 pipe_bp->sum = p->sum;
550 pipe_bp->numbytes = p->numbytes;
551 pipe_bp->sched_time = p->sched_time;
552 bcopy( p->if_name, pipe_bp->if_name, IFNAMSIZ);
553 pipe_bp->ifp = CAST_DOWN_EXPLICIT(user32_addr_t, VM_KERNEL_ADDRHIDE(p->ifp));
554 pipe_bp->ready = p->ready;
555
556 cp_flow_set_to_32_user( &(p->fs), &(pipe_bp->fs));
557
558 pipe_bp->delay = (pipe_bp->delay * 1000) / (hz * 10);
559 /*
560 * XXX the following is a hack based on ->next being the
561 * first field in dn_pipe and dn_flow_set. The correct
562 * solution would be to move the dn_flow_set to the beginning
563 * of struct dn_pipe.
564 */
565 pipe_bp->next = CAST_DOWN_EXPLICIT( user32_addr_t, DN_IS_PIPE );
566 /* clean pointers */
567 pipe_bp->head = pipe_bp->tail = (user32_addr_t) 0;
568 pipe_bp->fs.next = (user32_addr_t)0;
569 pipe_bp->fs.pipe = (user32_addr_t)0;
570 pipe_bp->fs.rq = (user32_addr_t)0;
571 return dn_copy_set_32( &(p->fs), bp, bp_end);
572 }
573
574 /*
575 * Returns NULL on error.
576 */
577 static
578 char *
cp_pipe_to_64_user(struct dn_pipe * p,char * bp0 __ended_by (bp_end),char * bp_end)579 cp_pipe_to_64_user(struct dn_pipe *p, char *bp0 __ended_by(bp_end), char *bp_end)
580 {
581 struct dn_pipe_64 *pipe_bp = NULL; /* Pipe header */
582 caddr_t bp; /* Pointer after the pipe header */
583
584 /* Check for overflow, and assign `pipe_bp' and `bp' pointers, if safe */
585 if (bp_end - bp0 < sizeof(*pipe_bp)) {
586 IP_DN_DBG_LOG("Would overflow: available=%ld", bp_end - bp0);
587 /* Will overflow, can not proceed */
588 return NULL;
589 }
590 pipe_bp = __unsafe_forge_single(struct dn_pipe_64 *, bp0);
591 bp = bp0 + sizeof(*pipe_bp);
592
593 /* Populate the pipe header */
594 pipe_bp->pipe_nr = p->pipe_nr;
595 pipe_bp->bandwidth = p->bandwidth;
596 pipe_bp->delay = p->delay;
597 bcopy( &(p->scheduler_heap), &(pipe_bp->scheduler_heap), sizeof(struct dn_heap_64));
598 pipe_bp->scheduler_heap.p = CAST_DOWN(user64_addr_t, VM_KERNEL_ADDRHIDE(pipe_bp->scheduler_heap.p));
599 bcopy( &(p->not_eligible_heap), &(pipe_bp->not_eligible_heap), sizeof(struct dn_heap_64));
600 pipe_bp->not_eligible_heap.p = CAST_DOWN(user64_addr_t, VM_KERNEL_ADDRHIDE(pipe_bp->not_eligible_heap.p));
601 bcopy( &(p->idle_heap), &(pipe_bp->idle_heap), sizeof(struct dn_heap_64));
602 pipe_bp->idle_heap.p = CAST_DOWN(user64_addr_t, VM_KERNEL_ADDRHIDE(pipe_bp->idle_heap.p));
603 pipe_bp->V = p->V;
604 pipe_bp->sum = p->sum;
605 pipe_bp->numbytes = p->numbytes;
606 pipe_bp->sched_time = p->sched_time;
607 bcopy( p->if_name, pipe_bp->if_name, IFNAMSIZ);
608 pipe_bp->ifp = CAST_DOWN(user64_addr_t, VM_KERNEL_ADDRHIDE(p->ifp));
609 pipe_bp->ready = p->ready;
610
611 cp_flow_set_to_64_user( &(p->fs), &(pipe_bp->fs));
612
613 pipe_bp->delay = (pipe_bp->delay * 1000) / (hz * 10);
614 /*
615 * XXX the following is a hack based on ->next being the
616 * first field in dn_pipe and dn_flow_set. The correct
617 * solution would be to move the dn_flow_set to the beginning
618 * of struct dn_pipe.
619 */
620 pipe_bp->next = CAST_DOWN( user64_addr_t, DN_IS_PIPE );
621 /* clean pointers */
622 pipe_bp->head = pipe_bp->tail = USER_ADDR_NULL;
623 pipe_bp->fs.next = USER_ADDR_NULL;
624 pipe_bp->fs.pipe = USER_ADDR_NULL;
625 pipe_bp->fs.rq = USER_ADDR_NULL;
626 return dn_copy_set_64( &(p->fs), bp, bp_end);
627 }
628
629 static int
heap_init(struct dn_heap * h,int new_size)630 heap_init(struct dn_heap *h, int new_size)
631 {
632 struct dn_heap_entry *p;
633
634 if (h->size >= new_size) {
635 printf("dummynet: heap_init, Bogus call, have %d want %d\n",
636 h->size, new_size);
637 return 0;
638 }
639 new_size = (new_size + HEAP_INCREMENT) & ~HEAP_INCREMENT;
640 p = krealloc_type(struct dn_heap_entry, h->size, new_size,
641 h->p, Z_NOWAIT | Z_ZERO);
642 if (p == NULL) {
643 printf("dummynet: heap_init, resize %d failed\n", new_size );
644 return ENOMEM; /* error */
645 }
646 h->p = p;
647 h->size = new_size;
648 return 0;
649 }
650
651 /*
652 * Routines to safely manage node offsets.
653 *
654 * If `heap->offset' > 0, the offset of the node in the heap
655 * is also stored in the heap entries at the given offset.
656 *
657 * Otherwise, the heap entries do not contain heap offsets.
658 */
659 static inline void
dn_heap_update_node_offset(struct dn_heap * heap,int node,int pos)660 dn_heap_update_node_offset(struct dn_heap *heap, int node, int pos)
661 {
662 caddr_t offset_ptr, obj_bytes;
663 struct dn_heap_entry *ent;
664
665 if (heap->offset <= 0) {
666 /* Heap entries don't contain offsets, nothing to do. */
667 return;
668 }
669 VERIFY(heap->size <= node);
670 ent = &(heap->p[node]);
671 VERIFY(heap->offset + sizeof(int) <= ent->obj_size);
672 obj_bytes = ent->object;
673 offset_ptr = obj_bytes + heap->offset;
674
675 memcpy(offset_ptr, &pos, sizeof(int));
676 }
677
678 #define DN_INVALID_OFFSET (-1)
679
680 static inline int
dn_heap_get_node_offset(struct dn_heap * heap,char * obj_bytes __sized_by (obj_size),size_t obj_size)681 dn_heap_get_node_offset(struct dn_heap *heap, char *obj_bytes __sized_by(obj_size), size_t obj_size)
682 {
683 int obj_offset;
684 caddr_t offset_ptr;
685 if (heap->offset <= 0) {
686 /* Heap entries don't contain offsets, nothing to do. */
687 os_log(OS_LOG_DEFAULT,
688 "dummynet: dn_heap_get_node_offset from middle not supported on this heap!!!");
689 return DN_INVALID_OFFSET;
690 }
691 VERIFY(heap->offset + sizeof(int) <= obj_size);
692 offset_ptr = obj_bytes + heap->offset;
693 memcpy(&obj_offset, offset_ptr, sizeof(int));
694 if (obj_offset < 0 || obj_offset >= heap->elements) {
695 printf("dummynet: dn_heap_get_node_offset, offset %d out of bound 0..%d\n",
696 obj_offset, heap->elements);
697 return DN_INVALID_OFFSET;
698 }
699 return obj_offset;
700 }
701
702 #define GET_OFFSET(heap, obj, obj_size) dn_heap_get_node_offset((heap), (obj), (obj_size))
703
704 #define SET_OFFSET(heap, node) dn_heap_update_node_offset((heap), (node), (node))
705
706 /*
707 * RESET_OFFSET is used for sanity checks. It sets offset to an invalid value.
708 */
709 #define RESET_OFFSET(heap, node) dn_heap_update_node_offset((heap), (node), DN_INVALID_OFFSET)
710
711 /*
712 * Insert element in heap. Normally, p != NULL, we insert p in
713 * a new position and bubble up. If p == NULL, then the element is
714 * already in place, and key is the position where to start the
715 * bubble-up.
716 * Returns 1 on failure (cannot allocate new heap entry)
717 *
718 * If offset > 0 the position (index, int) of the element in the heap is
719 * also stored in the element itself at the given offset in bytes.
720 */
721 static int
heap_insert(struct dn_heap * h,dn_key key1,void * __sized_by_or_null (obj_size)object,size_t obj_size)722 heap_insert(struct dn_heap *h, dn_key key1, void *__sized_by_or_null(obj_size)object, size_t obj_size)
723 {
724 int son = h->elements;
725
726 if (object == NULL) { /* data already there, set starting point */
727 VERIFY(key1 < INT_MAX);
728 son = (int)key1;
729 } else { /* insert new element at the end, possibly resize */
730 son = h->elements;
731 if (son == h->size) { /* need resize... */
732 if (heap_init(h, h->elements + 1)) {
733 return 1; /* failure... */
734 }
735 }
736 h->p[son].object = object;
737 h->p[son].obj_size = obj_size;
738 h->p[son].key = key1;
739 h->elements++;
740 }
741 while (son > 0) { /* bubble up */
742 int father = HEAP_FATHER(son);
743 struct dn_heap_entry tmp;
744
745 if (DN_KEY_LT( h->p[father].key, h->p[son].key )) {
746 break; /* found right position */
747 }
748 /* son smaller than father, swap and repeat */
749 HEAP_SWAP(h->p[son], h->p[father], tmp);
750 SET_OFFSET(h, son);
751 son = father;
752 }
753 SET_OFFSET(h, son);
754 return 0;
755 }
756
757 /*
758 * remove top element from heap, or obj if obj != NULL
759 */
760 static void
heap_extract(struct dn_heap * h,void * __sized_by_or_null (obj_size)obj,size_t obj_size)761 heap_extract(struct dn_heap *h, void *__sized_by_or_null(obj_size)obj, size_t obj_size)
762 {
763 int child, father, maxelt = h->elements - 1;
764
765 if (maxelt < 0) {
766 printf("dummynet: warning, extract from empty heap 0x%llx\n",
767 (uint64_t)VM_KERNEL_ADDRPERM(h));
768 return;
769 }
770 father = 0; /* default: move up smallest child */
771 if (obj != NULL) { /* extract specific element, index is at offset */
772 father = GET_OFFSET(h, obj, obj_size);
773 if (father == DN_INVALID_OFFSET) {
774 panic("dummynet: heap_extract");
775 }
776 }
777 RESET_OFFSET(h, father);
778 child = HEAP_LEFT(father); /* left child */
779 while (child <= maxelt) { /* valid entry */
780 if (child != maxelt && DN_KEY_LT(h->p[child + 1].key, h->p[child].key)) {
781 child = child + 1; /* take right child, otherwise left */
782 }
783 h->p[father] = h->p[child];
784 SET_OFFSET(h, father);
785 father = child;
786 child = HEAP_LEFT(child); /* left child for next loop */
787 }
788 h->elements--;
789 if (father != maxelt) {
790 /*
791 * Fill hole with last entry and bubble up, reusing the insert code
792 */
793 h->p[father] = h->p[maxelt];
794 heap_insert(h, father, NULL, 0); /* this one cannot fail */
795 }
796 }
797
798 /*
799 * heapify() will reorganize data inside an array to maintain the
800 * heap property. It is needed when we delete a bunch of entries.
801 */
802 static void
heapify(struct dn_heap * h)803 heapify(struct dn_heap *h)
804 {
805 int i;
806
807 for (i = 0; i < h->elements; i++) {
808 heap_insert(h, i, NULL, 0);
809 }
810 }
811
812 /*
813 * cleanup the heap and free data structure
814 */
815 static void
heap_free(struct dn_heap * h)816 heap_free(struct dn_heap *h)
817 {
818 kfree_type_counted_by(struct dn_heap_entry, h->size, h->p);
819 bzero(h, sizeof(*h));
820 }
821
822 /*
823 * --- end of heap management functions ---
824 */
825
826 /*
827 * Return the mbuf tag holding the dummynet state. As an optimization
828 * this is assumed to be the first tag on the list. If this turns out
829 * wrong we'll need to search the list.
830 */
831 static struct dn_pkt_tag *
dn_tag_get(struct mbuf * m)832 dn_tag_get(struct mbuf *m)
833 {
834 struct m_tag *mtag = m_tag_first(m);
835
836 if (!(mtag != NULL &&
837 mtag->m_tag_id == KERNEL_MODULE_TAG_ID &&
838 mtag->m_tag_type == KERNEL_TAG_TYPE_DUMMYNET &&
839 sizeof(struct dn_pkt_tag) <= mtag->m_tag_len)) {
840 panic("packet on dummynet queue w/o dummynet tag: 0x%llx",
841 (uint64_t)VM_KERNEL_ADDRPERM(m));
842 }
843
844 return (struct dn_pkt_tag *)(mtag->m_tag_data);
845 }
846
847 /*
848 * Scheduler functions:
849 *
850 * transmit_event() is called when the delay-line needs to enter
851 * the scheduler, either because of existing pkts getting ready,
852 * or new packets entering the queue. The event handled is the delivery
853 * time of the packet.
854 *
855 * ready_event() does something similar with fixed-rate queues, and the
856 * event handled is the finish time of the head pkt.
857 *
858 * wfq_ready_event() does something similar with WF2Q queues, and the
859 * event handled is the start time of the head pkt.
860 *
861 * In all cases, we make sure that the data structures are consistent
862 * before passing pkts out, because this might trigger recursive
863 * invocations of the procedures.
864 */
865 static void
transmit_event(struct dn_pipe * pipe,struct mbuf ** head,struct mbuf ** tail)866 transmit_event(struct dn_pipe *pipe, struct mbuf **head, struct mbuf **tail)
867 {
868 struct mbuf *m;
869 struct dn_pkt_tag *pkt = NULL;
870 u_int64_t schedule_time;
871
872 LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
873 ASSERT(serialize >= 0);
874 if (serialize == 0) {
875 while ((m = pipe->head) != NULL) {
876 pkt = dn_tag_get(m);
877 if (!DN_KEY_LEQ(pkt->dn_output_time, curr_time)) {
878 break;
879 }
880
881 pipe->head = m->m_nextpkt;
882 if (*tail != NULL) {
883 (*tail)->m_nextpkt = m;
884 } else {
885 *head = m;
886 }
887 *tail = m;
888 }
889
890 if (*tail != NULL) {
891 (*tail)->m_nextpkt = NULL;
892 }
893 }
894
895 schedule_time = pkt == NULL || DN_KEY_LEQ(pkt->dn_output_time, curr_time) ?
896 curr_time + 1 : pkt->dn_output_time;
897
898 /* if there are leftover packets, put the pipe into the heap for next ready event */
899 if ((m = pipe->head) != NULL) {
900 pkt = dn_tag_get(m);
901 /* XXX should check errors on heap_insert, by draining the
902 * whole pipe p and hoping in the future we are more successful
903 */
904 heap_insert(&extract_heap, schedule_time, pipe, sizeof(struct dn_pipe));
905 }
906 }
907
908 /*
909 * the following macro computes how many ticks we have to wait
910 * before being able to transmit a packet. The credit is taken from
911 * either a pipe (WF2Q) or a flow_queue (per-flow queueing)
912 */
913
914 /* hz is 100, which gives a granularity of 10ms in the old timer.
915 * The timer has been changed to fire every 1ms, so the use of
916 * hz has been modified here. All instances of hz have been left
917 * in place but adjusted by a factor of 10 so that hz is functionally
918 * equal to 1000.
919 */
920 #define SET_TICKS(_m, q, p) \
921 ((_m)->m_pkthdr.len*8*(hz*10) - (q)->numbytes + p->bandwidth - 1 ) / \
922 p->bandwidth ;
923
924 /*
925 * extract pkt from queue, compute output time (could be now)
926 * and put into delay line (p_queue)
927 */
928 static void
move_pkt(struct mbuf * pkt,struct dn_flow_queue * q,struct dn_pipe * p,int len)929 move_pkt(struct mbuf *pkt, struct dn_flow_queue *q,
930 struct dn_pipe *p, int len)
931 {
932 struct dn_pkt_tag *dt = dn_tag_get(pkt);
933
934 q->head = pkt->m_nextpkt;
935 q->len--;
936 q->len_bytes -= len;
937
938 dt->dn_output_time = curr_time + p->delay;
939
940 if (p->head == NULL) {
941 p->head = pkt;
942 } else {
943 p->tail->m_nextpkt = pkt;
944 }
945 p->tail = pkt;
946 p->tail->m_nextpkt = NULL;
947 }
948
949 /*
950 * ready_event() is invoked every time the queue must enter the
951 * scheduler, either because the first packet arrives, or because
952 * a previously scheduled event fired.
953 * On invokation, drain as many pkts as possible (could be 0) and then
954 * if there are leftover packets reinsert the pkt in the scheduler.
955 */
956 static void
ready_event(struct dn_flow_queue * q,struct mbuf ** head,struct mbuf ** tail)957 ready_event(struct dn_flow_queue *q, struct mbuf **head, struct mbuf **tail)
958 {
959 struct mbuf *pkt;
960 struct dn_pipe *p = q->fs->pipe;
961 int p_was_empty;
962
963 LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
964
965 if (p == NULL) {
966 printf("dummynet: ready_event pipe is gone\n");
967 return;
968 }
969 p_was_empty = (p->head == NULL);
970
971 /*
972 * schedule fixed-rate queues linked to this pipe:
973 * Account for the bw accumulated since last scheduling, then
974 * drain as many pkts as allowed by q->numbytes and move to
975 * the delay line (in p) computing output time.
976 * bandwidth==0 (no limit) means we can drain the whole queue,
977 * setting len_scaled = 0 does the job.
978 */
979 q->numbytes += (curr_time - q->sched_time) * p->bandwidth;
980 while ((pkt = q->head) != NULL) {
981 int len = pkt->m_pkthdr.len;
982 int len_scaled = p->bandwidth ? len * 8 * (hz * 10) : 0;
983 if (len_scaled > q->numbytes) {
984 break;
985 }
986 q->numbytes -= len_scaled;
987 move_pkt(pkt, q, p, len);
988 }
989 /*
990 * If we have more packets queued, schedule next ready event
991 * (can only occur when bandwidth != 0, otherwise we would have
992 * flushed the whole queue in the previous loop).
993 * To this purpose we record the current time and compute how many
994 * ticks to go for the finish time of the packet.
995 */
996 if ((pkt = q->head) != NULL) { /* this implies bandwidth != 0 */
997 dn_key t = SET_TICKS(pkt, q, p); /* ticks i have to wait */
998 q->sched_time = curr_time;
999 heap_insert(&ready_heap, curr_time + t, q, sizeof(struct dn_flow_queue));
1000 /* XXX should check errors on heap_insert, and drain the whole
1001 * queue on error hoping next time we are luckier.
1002 */
1003 } else { /* RED needs to know when the queue becomes empty */
1004 q->q_time = curr_time;
1005 q->numbytes = 0;
1006 }
1007 /*
1008 * If the delay line was empty call transmit_event(p) now.
1009 * Otherwise, the scheduler will take care of it.
1010 */
1011 if (p_was_empty) {
1012 transmit_event(p, head, tail);
1013 }
1014 }
1015
1016 /*
1017 * Called when we can transmit packets on WF2Q queues. Take pkts out of
1018 * the queues at their start time, and enqueue into the delay line.
1019 * Packets are drained until p->numbytes < 0. As long as
1020 * len_scaled >= p->numbytes, the packet goes into the delay line
1021 * with a deadline p->delay. For the last packet, if p->numbytes<0,
1022 * there is an additional delay.
1023 */
1024 static void
ready_event_wfq(struct dn_pipe * p,struct mbuf ** head,struct mbuf ** tail)1025 ready_event_wfq(struct dn_pipe *p, struct mbuf **head, struct mbuf **tail)
1026 {
1027 int p_was_empty = (p->head == NULL);
1028 struct dn_heap *sch = &(p->scheduler_heap);
1029 struct dn_heap *neh = &(p->not_eligible_heap);
1030 int64_t p_numbytes = p->numbytes;
1031
1032 LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
1033
1034 if (p->if_name[0] == 0) { /* tx clock is simulated */
1035 p_numbytes += (curr_time - p->sched_time) * p->bandwidth;
1036 } else { /* tx clock is for real, the ifq must be empty or this is a NOP */
1037 if (p->ifp && !IFCQ_IS_EMPTY(p->ifp->if_snd)) {
1038 return;
1039 } else {
1040 DPRINTF(("dummynet: pipe %d ready from %s --\n",
1041 p->pipe_nr, p->if_name));
1042 }
1043 }
1044
1045 /*
1046 * While we have backlogged traffic AND credit, we need to do
1047 * something on the queue.
1048 */
1049 while (p_numbytes >= 0 && (sch->elements > 0 || neh->elements > 0)) {
1050 if (sch->elements > 0) { /* have some eligible pkts to send out */
1051 struct dn_flow_queue *q = sch->p[0].object;
1052 struct mbuf *pkt = q->head;
1053 struct dn_flow_set *fs = q->fs;
1054 u_int32_t len = pkt->m_pkthdr.len;
1055 u_int64_t len_scaled = p->bandwidth ? len * 8 * (hz * 10) : 0;
1056
1057 heap_extract(sch, NULL, 0); /* remove queue from heap */
1058 p_numbytes -= len_scaled;
1059 move_pkt(pkt, q, p, len);
1060
1061 p->V += (len << MY_M) / p->sum; /* update V */
1062 q->S = q->F; /* update start time */
1063 if (q->len == 0) { /* Flow not backlogged any more */
1064 fs->backlogged--;
1065 heap_insert(&(p->idle_heap), q->F, q, sizeof(struct dn_flow_queue));
1066 } else { /* still backlogged */
1067 /*
1068 * update F and position in backlogged queue, then
1069 * put flow in not_eligible_heap (we will fix this later).
1070 */
1071 len = (q->head)->m_pkthdr.len;
1072 q->F += (len << MY_M) / (u_int64_t) fs->weight;
1073 if (DN_KEY_LEQ(q->S, p->V)) {
1074 heap_insert(neh, q->S, q, sizeof(struct dn_flow_queue));
1075 } else {
1076 heap_insert(sch, q->F, q, sizeof(struct dn_flow_queue));
1077 }
1078 }
1079 }
1080 /*
1081 * now compute V = max(V, min(S_i)). Remember that all elements in sch
1082 * have by definition S_i <= V so if sch is not empty, V is surely
1083 * the max and we must not update it. Conversely, if sch is empty
1084 * we only need to look at neh.
1085 */
1086 if (sch->elements == 0 && neh->elements > 0) {
1087 p->V = MAX64( p->V, neh->p[0].key );
1088 }
1089 /* move from neh to sch any packets that have become eligible */
1090 while (neh->elements > 0 && DN_KEY_LEQ(neh->p[0].key, p->V)) {
1091 struct dn_flow_queue *q = neh->p[0].object;
1092 heap_extract(neh, NULL, 0);
1093 heap_insert(sch, q->F, q, sizeof(struct dn_flow_queue));
1094 }
1095
1096 if (p->if_name[0] != '\0') {/* tx clock is from a real thing */
1097 p_numbytes = -1; /* mark not ready for I/O */
1098 break;
1099 }
1100 }
1101 if (sch->elements == 0 && neh->elements == 0 && p_numbytes >= 0
1102 && p->idle_heap.elements > 0) {
1103 /*
1104 * no traffic and no events scheduled. We can get rid of idle-heap.
1105 */
1106 int i;
1107
1108 for (i = 0; i < p->idle_heap.elements; i++) {
1109 struct dn_flow_queue *q = p->idle_heap.p[i].object;
1110
1111 q->F = 0;
1112 q->S = q->F + 1;
1113 }
1114 p->sum = 0;
1115 p->V = 0;
1116 p->idle_heap.elements = 0;
1117 }
1118 /*
1119 * If we are getting clocks from dummynet (not a real interface) and
1120 * If we are under credit, schedule the next ready event.
1121 * Also fix the delivery time of the last packet.
1122 */
1123 if (p->if_name[0] == 0 && p_numbytes < 0) { /* this implies bandwidth >0 */
1124 dn_key t = 0; /* number of ticks i have to wait */
1125
1126 if (p->bandwidth > 0) {
1127 t = (p->bandwidth - 1 - p_numbytes) / p->bandwidth;
1128 }
1129 dn_tag_get(p->tail)->dn_output_time += t;
1130 p->sched_time = curr_time;
1131 heap_insert(&wfq_ready_heap, curr_time + t, p, sizeof(struct dn_pipe));
1132 /* XXX should check errors on heap_insert, and drain the whole
1133 * queue on error hoping next time we are luckier.
1134 */
1135 }
1136
1137 /* Fit (adjust if necessary) 64bit result into 32bit variable. */
1138 if (p_numbytes > INT_MAX) {
1139 p->numbytes = INT_MAX;
1140 } else if (p_numbytes < INT_MIN) {
1141 p->numbytes = INT_MIN;
1142 } else {
1143 p->numbytes = (int)p_numbytes;
1144 }
1145
1146 /*
1147 * If the delay line was empty call transmit_event(p) now.
1148 * Otherwise, the scheduler will take care of it.
1149 */
1150 if (p_was_empty) {
1151 transmit_event(p, head, tail);
1152 }
1153 }
1154
1155 /*
1156 * This is called every 1ms. It is used to
1157 * increment the current tick counter and schedule expired events.
1158 */
1159 static void
dummynet(__unused void * unused)1160 dummynet(__unused void * unused)
1161 {
1162 void *p; /* generic parameter to handler */
1163 struct dn_heap *h;
1164 struct dn_heap *heaps[3];
1165 struct mbuf * __single head = NULL, * __single tail = NULL;
1166 int i;
1167 struct dn_pipe *pe;
1168 struct timespec ts;
1169 struct timeval tv;
1170
1171 heaps[0] = &ready_heap; /* fixed-rate queues */
1172 heaps[1] = &wfq_ready_heap; /* wfq queues */
1173 heaps[2] = &extract_heap; /* delay line */
1174
1175 lck_mtx_lock(&dn_mutex);
1176
1177 /* make all time measurements in milliseconds (ms) -
1178 * here we convert secs and usecs to msecs (just divide the
1179 * usecs and take the closest whole number).
1180 */
1181 microuptime(&tv);
1182 curr_time = (tv.tv_sec * 1000) + (tv.tv_usec / 1000);
1183
1184 for (i = 0; i < 3; i++) {
1185 h = heaps[i];
1186 while (h->elements > 0 && DN_KEY_LEQ(h->p[0].key, curr_time)) {
1187 if (h->p[0].key > curr_time) {
1188 printf("dummynet: warning, heap %d is %d ticks late\n",
1189 i, (int)(curr_time - h->p[0].key));
1190 }
1191 p = h->p[0].object; /* store a copy before heap_extract */
1192 heap_extract(h, NULL, 0); /* need to extract before processing */
1193 if (i == 0) {
1194 ready_event(p, &head, &tail);
1195 } else if (i == 1) {
1196 struct dn_pipe *pipe = p;
1197 if (pipe->if_name[0] != '\0') {
1198 printf("dummynet: bad ready_event_wfq for pipe %s\n",
1199 pipe->if_name);
1200 } else {
1201 ready_event_wfq(p, &head, &tail);
1202 }
1203 } else {
1204 transmit_event(p, &head, &tail);
1205 }
1206 }
1207 }
1208 /* sweep pipes trying to expire idle flow_queues */
1209 for (i = 0; i < HASHSIZE; i++) {
1210 SLIST_FOREACH(pe, &pipehash[i], next) {
1211 if (pe->idle_heap.elements > 0 &&
1212 DN_KEY_LT(pe->idle_heap.p[0].key, pe->V)) {
1213 struct dn_flow_queue *q = pe->idle_heap.p[0].object;
1214
1215 heap_extract(&(pe->idle_heap), NULL, 0);
1216 q->S = q->F + 1; /* mark timestamp as invalid */
1217 pe->sum -= q->fs->weight;
1218 }
1219 }
1220 }
1221
1222 /* check the heaps to see if there's still stuff in there, and
1223 * only set the timer if there are packets to process
1224 */
1225 timer_enabled = 0;
1226 for (i = 0; i < 3; i++) {
1227 h = heaps[i];
1228 if (h->elements > 0) { // set the timer
1229 ts.tv_sec = 0;
1230 ts.tv_nsec = 1 * 1000000; // 1ms
1231 timer_enabled = 1;
1232 bsd_timeout(dummynet, NULL, &ts);
1233 break;
1234 }
1235 }
1236
1237 if (head != NULL) {
1238 serialize++;
1239 }
1240
1241 lck_mtx_unlock(&dn_mutex);
1242
1243 /* Send out the de-queued list of ready-to-send packets */
1244 if (head != NULL) {
1245 dummynet_send(head);
1246 lck_mtx_lock(&dn_mutex);
1247 serialize--;
1248 lck_mtx_unlock(&dn_mutex);
1249 }
1250 }
1251
1252
1253 static void
dummynet_send(struct mbuf * m)1254 dummynet_send(struct mbuf *m)
1255 {
1256 struct dn_pkt_tag *pkt;
1257 struct mbuf *n;
1258
1259 for (; m != NULL; m = n) {
1260 n = m->m_nextpkt;
1261 m->m_nextpkt = NULL;
1262 pkt = dn_tag_get(m);
1263
1264 DPRINTF(("dummynet_send m: 0x%llx dn_dir: %d dn_flags: 0x%x\n",
1265 (uint64_t)VM_KERNEL_ADDRPERM(m), pkt->dn_dir,
1266 pkt->dn_flags));
1267
1268 switch (pkt->dn_dir) {
1269 case DN_TO_IP_OUT: {
1270 struct route tmp_rt;
1271
1272 /* route is already in the packet's dn_ro */
1273 bzero(&tmp_rt, sizeof(tmp_rt));
1274
1275 /* Force IP_RAWOUTPUT as the IP header is fully formed */
1276 pkt->dn_flags |= IP_RAWOUTPUT | IP_FORWARDING;
1277 (void)ip_output(m, NULL, &tmp_rt, pkt->dn_flags, NULL, NULL);
1278 ROUTE_RELEASE(&tmp_rt);
1279 break;
1280 }
1281 case DN_TO_IP_IN:
1282 proto_inject(PF_INET, m);
1283 break;
1284 case DN_TO_IP6_OUT: {
1285 /* routes already in the packet's dn_{ro6,pmtu} */
1286 if (pkt->dn_origifp != NULL) {
1287 ip6_output_setsrcifscope(m, pkt->dn_origifp->if_index, NULL);
1288 ip6_output_setdstifscope(m, pkt->dn_origifp->if_index, NULL);
1289 } else {
1290 ip6_output_setsrcifscope(m, IFSCOPE_UNKNOWN, NULL);
1291 ip6_output_setdstifscope(m, IFSCOPE_UNKNOWN, NULL);
1292 }
1293
1294 ip6_output(m, NULL, NULL, IPV6_FORWARDING, NULL, NULL, NULL);
1295 break;
1296 }
1297 case DN_TO_IP6_IN:
1298 proto_inject(PF_INET6, m);
1299 break;
1300 default:
1301 printf("dummynet: bad switch %d!\n", pkt->dn_dir);
1302 m_freem(m);
1303 break;
1304 }
1305 }
1306 }
1307
1308 /*
1309 * Unconditionally expire empty queues in case of shortage.
1310 * Returns the number of queues freed.
1311 */
1312 static int
expire_queues(struct dn_flow_set * fs)1313 expire_queues(struct dn_flow_set *fs)
1314 {
1315 struct dn_flow_queue *q, *prev;
1316 int i, initial_elements = fs->rq_elements;
1317 struct timeval timenow;
1318
1319 /* reviewed for getmicrotime usage */
1320 getmicrotime(&timenow);
1321
1322 if (fs->last_expired == timenow.tv_sec) {
1323 return 0;
1324 }
1325 fs->last_expired = (int)timenow.tv_sec;
1326 for (i = 0; i <= fs->rq_size; i++) { /* last one is overflow */
1327 for (prev = NULL, q = fs->rq[i]; q != NULL;) {
1328 if (q->head != NULL || q->S != q->F + 1) {
1329 prev = q;
1330 q = q->next;
1331 } else { /* entry is idle, expire it */
1332 struct dn_flow_queue *old_q = q;
1333
1334 if (prev != NULL) {
1335 prev->next = q = q->next;
1336 } else {
1337 fs->rq[i] = q = q->next;
1338 }
1339 fs->rq_elements--;
1340 kfree_type(struct dn_flow_queue, old_q);
1341 }
1342 }
1343 }
1344 return initial_elements - fs->rq_elements;
1345 }
1346
1347 /*
1348 * If room, create a new queue and put at head of slot i;
1349 * otherwise, create or use the default queue.
1350 */
1351 static struct dn_flow_queue *
create_queue(struct dn_flow_set * fs,int i)1352 create_queue(struct dn_flow_set *fs, int i)
1353 {
1354 struct dn_flow_queue *q;
1355
1356 if (fs->rq_elements > fs->rq_size * dn_max_ratio &&
1357 expire_queues(fs) == 0) {
1358 /*
1359 * No way to get room, use or create overflow queue.
1360 */
1361 i = fs->rq_size;
1362 if (fs->rq[i] != NULL) {
1363 return fs->rq[i];
1364 }
1365 }
1366 q = kalloc_type(struct dn_flow_queue, Z_NOWAIT | Z_ZERO);
1367 if (q == NULL) {
1368 printf("dummynet: sorry, cannot allocate queue for new flow\n");
1369 return NULL;
1370 }
1371 q->fs = fs;
1372 q->hash_slot = i;
1373 q->next = fs->rq[i];
1374 q->S = q->F + 1; /* hack - mark timestamp as invalid */
1375 fs->rq[i] = q;
1376 fs->rq_elements++;
1377 return q;
1378 }
1379
1380 /*
1381 * Given a flow_set and a pkt in last_pkt, find a matching queue
1382 * after appropriate masking. The queue is moved to front
1383 * so that further searches take less time.
1384 */
1385 static struct dn_flow_queue *
find_queue(struct dn_flow_set * fs,struct ip_flow_id * id)1386 find_queue(struct dn_flow_set *fs, struct ip_flow_id *id)
1387 {
1388 int i = 0; /* we need i and q for new allocations */
1389 struct dn_flow_queue *q, *prev;
1390 int is_v6 = IS_IP6_FLOW_ID(id);
1391
1392 if (!(fs->flags_fs & DN_HAVE_FLOW_MASK)) {
1393 q = fs->rq[0];
1394 } else {
1395 /* first, do the masking, then hash */
1396 id->dst_port &= fs->flow_mask.dst_port;
1397 id->src_port &= fs->flow_mask.src_port;
1398 id->proto &= fs->flow_mask.proto;
1399 id->flags = 0; /* we don't care about this one */
1400 if (is_v6) {
1401 APPLY_MASK(&id->dst_ip6, &fs->flow_mask.dst_ip6);
1402 APPLY_MASK(&id->src_ip6, &fs->flow_mask.src_ip6);
1403 id->flow_id6 &= fs->flow_mask.flow_id6;
1404
1405 i = ((id->dst_ip6.__u6_addr.__u6_addr32[0]) & 0xffff) ^
1406 ((id->dst_ip6.__u6_addr.__u6_addr32[1]) & 0xffff) ^
1407 ((id->dst_ip6.__u6_addr.__u6_addr32[2]) & 0xffff) ^
1408 ((id->dst_ip6.__u6_addr.__u6_addr32[3]) & 0xffff) ^
1409
1410 ((id->dst_ip6.__u6_addr.__u6_addr32[0] >> 15) & 0xffff) ^
1411 ((id->dst_ip6.__u6_addr.__u6_addr32[1] >> 15) & 0xffff) ^
1412 ((id->dst_ip6.__u6_addr.__u6_addr32[2] >> 15) & 0xffff) ^
1413 ((id->dst_ip6.__u6_addr.__u6_addr32[3] >> 15) & 0xffff) ^
1414
1415 ((id->src_ip6.__u6_addr.__u6_addr32[0] << 1) & 0xfffff) ^
1416 ((id->src_ip6.__u6_addr.__u6_addr32[1] << 1) & 0xfffff) ^
1417 ((id->src_ip6.__u6_addr.__u6_addr32[2] << 1) & 0xfffff) ^
1418 ((id->src_ip6.__u6_addr.__u6_addr32[3] << 1) & 0xfffff) ^
1419
1420 ((id->src_ip6.__u6_addr.__u6_addr32[0] >> 16) & 0xffff) ^
1421 ((id->src_ip6.__u6_addr.__u6_addr32[1] >> 16) & 0xffff) ^
1422 ((id->src_ip6.__u6_addr.__u6_addr32[2] >> 16) & 0xffff) ^
1423 ((id->src_ip6.__u6_addr.__u6_addr32[3] >> 16) & 0xffff) ^
1424
1425 (id->dst_port << 1) ^ (id->src_port) ^
1426 (id->proto) ^
1427 (id->flow_id6);
1428 } else {
1429 id->dst_ip &= fs->flow_mask.dst_ip;
1430 id->src_ip &= fs->flow_mask.src_ip;
1431
1432 i = ((id->dst_ip) & 0xffff) ^
1433 ((id->dst_ip >> 15) & 0xffff) ^
1434 ((id->src_ip << 1) & 0xffff) ^
1435 ((id->src_ip >> 16) & 0xffff) ^
1436 (id->dst_port << 1) ^ (id->src_port) ^
1437 (id->proto);
1438 }
1439 i = i % fs->rq_size;
1440 /* finally, scan the current list for a match */
1441 searches++;
1442 for (prev = NULL, q = fs->rq[i]; q;) {
1443 search_steps++;
1444 if (is_v6 &&
1445 IN6_ARE_ADDR_EQUAL(&id->dst_ip6, &q->id.dst_ip6) &&
1446 IN6_ARE_ADDR_EQUAL(&id->src_ip6, &q->id.src_ip6) &&
1447 id->dst_port == q->id.dst_port &&
1448 id->src_port == q->id.src_port &&
1449 id->proto == q->id.proto &&
1450 id->flags == q->id.flags &&
1451 id->flow_id6 == q->id.flow_id6) {
1452 break; /* found */
1453 }
1454 if (!is_v6 && id->dst_ip == q->id.dst_ip &&
1455 id->src_ip == q->id.src_ip &&
1456 id->dst_port == q->id.dst_port &&
1457 id->src_port == q->id.src_port &&
1458 id->proto == q->id.proto &&
1459 id->flags == q->id.flags) {
1460 break; /* found */
1461 }
1462 /* No match. Check if we can expire the entry */
1463 if (pipe_expire && q->head == NULL && q->S == q->F + 1) {
1464 /* entry is idle and not in any heap, expire it */
1465 struct dn_flow_queue *old_q = q;
1466
1467 if (prev != NULL) {
1468 prev->next = q = q->next;
1469 } else {
1470 fs->rq[i] = q = q->next;
1471 }
1472 fs->rq_elements--;
1473 kfree_type(struct dn_flow_queue, old_q);
1474 continue;
1475 }
1476 prev = q;
1477 q = q->next;
1478 }
1479 if (q && prev != NULL) { /* found and not in front */
1480 prev->next = q->next;
1481 q->next = fs->rq[i];
1482 fs->rq[i] = q;
1483 }
1484 }
1485 if (q == NULL) { /* no match, need to allocate a new entry */
1486 q = create_queue(fs, i);
1487 if (q != NULL) {
1488 q->id = *id;
1489 }
1490 }
1491 return q;
1492 }
1493
1494 static int
red_drops(struct dn_flow_set * fs,struct dn_flow_queue * q,int len)1495 red_drops(struct dn_flow_set *fs, struct dn_flow_queue *q, int len)
1496 {
1497 /*
1498 * RED algorithm
1499 *
1500 * RED calculates the average queue size (avg) using a low-pass filter
1501 * with an exponential weighted (w_q) moving average:
1502 * avg <- (1-w_q) * avg + w_q * q_size
1503 * where q_size is the queue length (measured in bytes or * packets).
1504 *
1505 * If q_size == 0, we compute the idle time for the link, and set
1506 * avg = (1 - w_q)^(idle/s)
1507 * where s is the time needed for transmitting a medium-sized packet.
1508 *
1509 * Now, if avg < min_th the packet is enqueued.
1510 * If avg > max_th the packet is dropped. Otherwise, the packet is
1511 * dropped with probability P function of avg.
1512 *
1513 */
1514
1515 int64_t p_b = 0;
1516 /* queue in bytes or packets ? */
1517 u_int q_size = (fs->flags_fs & DN_QSIZE_IS_BYTES) ? q->len_bytes : q->len;
1518
1519 DPRINTF(("\ndummynet: %d q: %2u ", (int) curr_time, q_size));
1520
1521 /* average queue size estimation */
1522 if (q_size != 0) {
1523 /*
1524 * queue is not empty, avg <- avg + (q_size - avg) * w_q
1525 */
1526 int diff = SCALE(q_size) - q->avg;
1527 int64_t v = SCALE_MUL((int64_t) diff, (int64_t) fs->w_q);
1528
1529 q->avg += (int) v;
1530 } else {
1531 /*
1532 * queue is empty, find for how long the queue has been
1533 * empty and use a lookup table for computing
1534 * (1 - * w_q)^(idle_time/s) where s is the time to send a
1535 * (small) packet.
1536 * XXX check wraps...
1537 */
1538 if (q->avg) {
1539 u_int64_t t = (curr_time - q->q_time) / fs->lookup_step;
1540
1541 q->avg = (t < fs->lookup_depth) ?
1542 SCALE_MUL(q->avg, fs->w_q_lookup[t]) : 0;
1543 }
1544 }
1545 DPRINTF(("dummynet: avg: %u ", SCALE_VAL(q->avg)));
1546
1547 /* should i drop ? */
1548
1549 if (q->avg < fs->min_th) {
1550 q->count = -1;
1551 return 0; /* accept packet ; */
1552 }
1553 if (q->avg >= fs->max_th) { /* average queue >= max threshold */
1554 if (fs->flags_fs & DN_IS_GENTLE_RED) {
1555 /*
1556 * According to Gentle-RED, if avg is greater than max_th the
1557 * packet is dropped with a probability
1558 * p_b = c_3 * avg - c_4
1559 * where c_3 = (1 - max_p) / max_th, and c_4 = 1 - 2 * max_p
1560 */
1561 p_b = SCALE_MUL((int64_t) fs->c_3, (int64_t) q->avg) - fs->c_4;
1562 } else {
1563 q->count = -1;
1564 DPRINTF(("dummynet: - drop"));
1565 return 1;
1566 }
1567 } else if (q->avg > fs->min_th) {
1568 /*
1569 * we compute p_b using the linear dropping function p_b = c_1 *
1570 * avg - c_2, where c_1 = max_p / (max_th - min_th), and c_2 =
1571 * max_p * min_th / (max_th - min_th)
1572 */
1573 p_b = SCALE_MUL((int64_t) fs->c_1, (int64_t) q->avg) - fs->c_2;
1574 }
1575 if (fs->flags_fs & DN_QSIZE_IS_BYTES) {
1576 p_b = (p_b * len) / fs->max_pkt_size;
1577 }
1578 if (++q->count == 0) {
1579 q->random = (my_random() & 0xffff);
1580 } else {
1581 /*
1582 * q->count counts packets arrived since last drop, so a greater
1583 * value of q->count means a greater packet drop probability.
1584 */
1585 if (SCALE_MUL(p_b, SCALE((int64_t) q->count)) > q->random) {
1586 q->count = 0;
1587 DPRINTF(("dummynet: - red drop"));
1588 /* after a drop we calculate a new random value */
1589 q->random = (my_random() & 0xffff);
1590 return 1; /* drop */
1591 }
1592 }
1593 /* end of RED algorithm */
1594 return 0; /* accept */
1595 }
1596
1597 static __inline
1598 struct dn_flow_set *
locate_flowset(int fs_nr)1599 locate_flowset(int fs_nr)
1600 {
1601 struct dn_flow_set *fs;
1602 SLIST_FOREACH(fs, &flowsethash[HASH(fs_nr)], next) {
1603 if (fs->fs_nr == fs_nr) {
1604 return fs;
1605 }
1606 }
1607
1608 return NULL;
1609 }
1610
1611 static __inline struct dn_pipe *
locate_pipe(int pipe_nr)1612 locate_pipe(int pipe_nr)
1613 {
1614 struct dn_pipe *pipe;
1615
1616 SLIST_FOREACH(pipe, &pipehash[HASH(pipe_nr)], next) {
1617 if (pipe->pipe_nr == pipe_nr) {
1618 return pipe;
1619 }
1620 }
1621
1622 return NULL;
1623 }
1624
1625
1626
1627 /*
1628 * dummynet hook for packets. Below 'pipe' is a pipe or a queue
1629 * depending on whether WF2Q or fixed bw is used.
1630 *
1631 * pipe_nr pipe or queue the packet is destined for.
1632 * dir where shall we send the packet after dummynet.
1633 * m the mbuf with the packet
1634 * ifp the 'ifp' parameter from the caller.
1635 * NULL in ip_input, destination interface in ip_output,
1636 * real_dst in bdg_forward
1637 * ro route parameter (only used in ip_output, NULL otherwise)
1638 * dst destination address, only used by ip_output
1639 * rule matching rule, in case of multiple passes
1640 * flags flags from the caller, only used in ip_output
1641 *
1642 */
1643 static int
dummynet_io(struct mbuf * m,int pipe_nr,int dir,struct ip_fw_args * fwa)1644 dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa)
1645 {
1646 struct mbuf *__single head = NULL, *__single tail = NULL;
1647 struct dn_pkt_tag *pkt;
1648 struct m_tag *mtag;
1649 struct dn_flow_set *fs = NULL;
1650 struct dn_pipe *pipe;
1651 u_int32_t len = m->m_pkthdr.len;
1652 struct dn_flow_queue *q = NULL;
1653 int is_pipe = 0;
1654 struct timespec ts;
1655 struct timeval tv;
1656
1657 DPRINTF(("dummynet_io m: 0x%llx pipe: %d dir: %d\n",
1658 (uint64_t)VM_KERNEL_ADDRPERM(m), pipe_nr, dir));
1659
1660
1661 #if DUMMYNET
1662 is_pipe = fwa->fwa_flags == DN_IS_PIPE ? 1 : 0;
1663 #endif /* DUMMYNET */
1664
1665 pipe_nr &= 0xffff;
1666
1667 lck_mtx_lock(&dn_mutex);
1668
1669 /* make all time measurements in milliseconds (ms) -
1670 * here we convert secs and usecs to msecs (just divide the
1671 * usecs and take the closest whole number).
1672 */
1673 microuptime(&tv);
1674 curr_time = (tv.tv_sec * 1000) + (tv.tv_usec / 1000);
1675
1676 /*
1677 * This is a dummynet rule, so we expect an O_PIPE or O_QUEUE rule.
1678 */
1679 if (is_pipe) {
1680 pipe = locate_pipe(pipe_nr);
1681 if (pipe != NULL) {
1682 fs = &(pipe->fs);
1683 }
1684 } else {
1685 fs = locate_flowset(pipe_nr);
1686 }
1687
1688
1689 if (fs == NULL) {
1690 goto dropit; /* this queue/pipe does not exist! */
1691 }
1692 pipe = fs->pipe;
1693 if (pipe == NULL) { /* must be a queue, try find a matching pipe */
1694 pipe = locate_pipe(fs->parent_nr);
1695
1696 if (pipe != NULL) {
1697 fs->pipe = pipe;
1698 } else {
1699 printf("dummynet: no pipe %d for queue %d, drop pkt\n",
1700 fs->parent_nr, fs->fs_nr);
1701 goto dropit;
1702 }
1703 }
1704 q = find_queue(fs, &(fwa->fwa_id));
1705 if (q == NULL) {
1706 goto dropit; /* cannot allocate queue */
1707 }
1708 /*
1709 * update statistics, then check reasons to drop pkt
1710 */
1711 q->tot_bytes += len;
1712 q->tot_pkts++;
1713 if (fs->plr && (my_random() < fs->plr)) {
1714 goto dropit; /* random pkt drop */
1715 }
1716 if (fs->flags_fs & DN_QSIZE_IS_BYTES) {
1717 if (q->len_bytes > fs->qsize) {
1718 goto dropit; /* queue size overflow */
1719 }
1720 } else {
1721 if (q->len >= fs->qsize) {
1722 goto dropit; /* queue count overflow */
1723 }
1724 }
1725 if (fs->flags_fs & DN_IS_RED && red_drops(fs, q, len)) {
1726 goto dropit;
1727 }
1728
1729 /* XXX expensive to zero, see if we can remove it*/
1730 mtag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_DUMMYNET,
1731 sizeof(struct dn_pkt_tag), M_NOWAIT, m);
1732 if (mtag == NULL) {
1733 goto dropit; /* cannot allocate packet header */
1734 }
1735 m_tag_prepend(m, mtag); /* attach to mbuf chain */
1736
1737 pkt = (struct dn_pkt_tag *)(mtag->m_tag_data);
1738 bzero(pkt, sizeof(struct dn_pkt_tag));
1739 /* ok, i can handle the pkt now... */
1740 /* build and enqueue packet + parameters */
1741 pkt->dn_pf_rule = fwa->fwa_pf_rule;
1742 pkt->dn_dir = dir;
1743
1744 pkt->dn_ifp = fwa->fwa_oif;
1745 if (dir == DN_TO_IP_OUT) {
1746 /*
1747 * We need to copy *ro because for ICMP pkts (and maybe others)
1748 * the caller passed a pointer into the stack; dst might also be
1749 * a pointer into *ro so it needs to be updated.
1750 */
1751 if (fwa->fwa_ro) {
1752 route_copyout(&pkt->dn_ro, fwa->fwa_ro, sizeof(pkt->dn_ro));
1753 }
1754 if (fwa->fwa_dst) {
1755 if (fwa->fwa_dst == SIN(&fwa->fwa_ro->ro_dst)) { /* dst points into ro */
1756 fwa->fwa_dst = SIN(&(pkt->dn_ro.ro_dst));
1757 }
1758
1759 SOCKADDR_COPY(fwa->fwa_dst, &pkt->dn_dst, sizeof(pkt->dn_dst));
1760 }
1761 } else if (dir == DN_TO_IP6_OUT) {
1762 if (fwa->fwa_ro6) {
1763 route_copyout((struct route *)&pkt->dn_ro6,
1764 (struct route *)fwa->fwa_ro6, sizeof(pkt->dn_ro6));
1765 }
1766 if (fwa->fwa_ro6_pmtu) {
1767 route_copyout((struct route *)&pkt->dn_ro6_pmtu,
1768 (struct route *)fwa->fwa_ro6_pmtu, sizeof(pkt->dn_ro6_pmtu));
1769 }
1770 if (fwa->fwa_dst6) {
1771 if (fwa->fwa_dst6 == SIN6(&fwa->fwa_ro6->ro_dst)) { /* dst points into ro */
1772 fwa->fwa_dst6 = SIN6(&(pkt->dn_ro6.ro_dst));
1773 }
1774
1775 SOCKADDR_COPY(fwa->fwa_dst6, &pkt->dn_dst6, sizeof(pkt->dn_dst6));
1776 }
1777 pkt->dn_origifp = fwa->fwa_origifp;
1778 pkt->dn_mtu = fwa->fwa_mtu;
1779 pkt->dn_unfragpartlen = fwa->fwa_unfragpartlen;
1780 if (fwa->fwa_exthdrs) {
1781 bcopy(fwa->fwa_exthdrs, &pkt->dn_exthdrs, sizeof(pkt->dn_exthdrs));
1782 /*
1783 * Need to zero out the source structure so the mbufs
1784 * won't be freed by ip6_output()
1785 */
1786 bzero(fwa->fwa_exthdrs, sizeof(struct ip6_exthdrs));
1787 }
1788 }
1789 if (dir == DN_TO_IP_OUT || dir == DN_TO_IP6_OUT) {
1790 pkt->dn_flags = fwa->fwa_oflags;
1791 if (fwa->fwa_ipoa != NULL) {
1792 pkt->dn_ipoa = *(fwa->fwa_ipoa);
1793 }
1794 }
1795 if (q->head == NULL) {
1796 q->head = m;
1797 } else {
1798 q->tail->m_nextpkt = m;
1799 }
1800 q->tail = m;
1801 q->len++;
1802 q->len_bytes += len;
1803
1804 if (q->head != m) { /* flow was not idle, we are done */
1805 goto done;
1806 }
1807 /*
1808 * If we reach this point the flow was previously idle, so we need
1809 * to schedule it. This involves different actions for fixed-rate or
1810 * WF2Q queues.
1811 */
1812 if (is_pipe) {
1813 /*
1814 * Fixed-rate queue: just insert into the ready_heap.
1815 */
1816 dn_key t = 0;
1817 if (pipe->bandwidth) {
1818 t = SET_TICKS(m, q, pipe);
1819 }
1820 q->sched_time = curr_time;
1821 if (t == 0) { /* must process it now */
1822 ready_event( q, &head, &tail );
1823 } else {
1824 heap_insert(&ready_heap, curr_time + t, q, sizeof(struct dn_flow_queue));
1825 }
1826 } else {
1827 /*
1828 * WF2Q. First, compute start time S: if the flow was idle (S=F+1)
1829 * set S to the virtual time V for the controlling pipe, and update
1830 * the sum of weights for the pipe; otherwise, remove flow from
1831 * idle_heap and set S to max(F,V).
1832 * Second, compute finish time F = S + len/weight.
1833 * Third, if pipe was idle, update V=max(S, V).
1834 * Fourth, count one more backlogged flow.
1835 */
1836 if (DN_KEY_GT(q->S, q->F)) { /* means timestamps are invalid */
1837 q->S = pipe->V;
1838 pipe->sum += fs->weight; /* add weight of new queue */
1839 } else {
1840 heap_extract(&(pipe->idle_heap), q, sizeof(struct dn_flow_queue));
1841 q->S = MAX64(q->F, pipe->V );
1842 }
1843 q->F = q->S + (len << MY_M) / (u_int64_t) fs->weight;
1844
1845 if (pipe->not_eligible_heap.elements == 0 &&
1846 pipe->scheduler_heap.elements == 0) {
1847 pipe->V = MAX64( q->S, pipe->V );
1848 }
1849 fs->backlogged++;
1850 /*
1851 * Look at eligibility. A flow is not eligibile if S>V (when
1852 * this happens, it means that there is some other flow already
1853 * scheduled for the same pipe, so the scheduler_heap cannot be
1854 * empty). If the flow is not eligible we just store it in the
1855 * not_eligible_heap. Otherwise, we store in the scheduler_heap
1856 * and possibly invoke ready_event_wfq() right now if there is
1857 * leftover credit.
1858 * Note that for all flows in scheduler_heap (SCH), S_i <= V,
1859 * and for all flows in not_eligible_heap (NEH), S_i > V .
1860 * So when we need to compute max( V, min(S_i) ) forall i in SCH+NEH,
1861 * we only need to look into NEH.
1862 */
1863 if (DN_KEY_GT(q->S, pipe->V)) { /* not eligible */
1864 if (pipe->scheduler_heap.elements == 0) {
1865 printf("dummynet: ++ ouch! not eligible but empty scheduler!\n");
1866 }
1867 heap_insert(&(pipe->not_eligible_heap), q->S, q, sizeof(struct dn_flow_queue));
1868 } else {
1869 heap_insert(&(pipe->scheduler_heap), q->F, q, sizeof(struct dn_flow_queue));
1870 if (pipe->numbytes >= 0) { /* pipe is idle */
1871 if (pipe->scheduler_heap.elements != 1) {
1872 printf("dummynet: OUCH! pipe should have been idle!\n");
1873 }
1874 DPRINTF(("dummynet: waking up pipe %d at %d\n",
1875 pipe->pipe_nr, (int)(q->F >> MY_M)));
1876 pipe->sched_time = curr_time;
1877 ready_event_wfq(pipe, &head, &tail);
1878 }
1879 }
1880 }
1881 done:
1882 /* start the timer and set global if not already set */
1883 if (!timer_enabled) {
1884 ts.tv_sec = 0;
1885 ts.tv_nsec = 1 * 1000000; // 1ms
1886 timer_enabled = 1;
1887 bsd_timeout(dummynet, NULL, &ts);
1888 }
1889
1890 lck_mtx_unlock(&dn_mutex);
1891
1892 if (head != NULL) {
1893 dummynet_send(head);
1894 }
1895
1896 return 0;
1897
1898 dropit:
1899 if (q) {
1900 q->drops++;
1901 }
1902 lck_mtx_unlock(&dn_mutex);
1903 m_freem(m);
1904 return (fs && (fs->flags_fs & DN_NOERROR)) ? 0 : ENOBUFS;
1905 }
1906
1907 /*
1908 * Below, the ROUTE_RELEASE is only needed when (pkt->dn_dir == DN_TO_IP_OUT)
1909 * Doing this would probably save us the initial bzero of dn_pkt
1910 */
1911 #define DN_FREE_PKT(_m) do { \
1912 struct m_tag *tag = m_tag_locate( \
1913 (_m), KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_DUMMYNET); \
1914 if (tag) { \
1915 VERIFY(sizeof(struct dn_pkt_tag) <= tag->m_tag_len); \
1916 struct dn_pkt_tag *n = (struct dn_pkt_tag *)(tag->m_tag_data); \
1917 ROUTE_RELEASE(&n->dn_ro); \
1918 } \
1919 m_tag_delete((_m), tag); \
1920 m_freem((_m)); \
1921 } while (0)
1922
1923 /*
1924 * Dispose all packets and flow_queues on a flow_set.
1925 * If all=1, also remove red lookup table and other storage,
1926 * including the descriptor itself.
1927 * For the one in dn_pipe MUST also cleanup ready_heap...
1928 */
1929 static void
purge_flow_set(struct dn_flow_set * fs,int all)1930 purge_flow_set(struct dn_flow_set *fs, int all)
1931 {
1932 struct dn_flow_queue *q, *qn;
1933 int i;
1934
1935 LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
1936
1937 for (i = 0; i <= fs->rq_size; i++) {
1938 for (q = fs->rq[i]; q; q = qn) {
1939 struct mbuf *m, *mnext;
1940
1941 mnext = q->head;
1942 while ((m = mnext) != NULL) {
1943 mnext = m->m_nextpkt;
1944 DN_FREE_PKT(m);
1945 }
1946 qn = q->next;
1947 kfree_type(struct dn_flow_queue, q);
1948 }
1949 fs->rq[i] = NULL;
1950 }
1951 fs->rq_elements = 0;
1952 if (all) {
1953 /* RED - free lookup table */
1954 if (fs->w_q_lookup) {
1955 kfree_data_counted_by(fs->w_q_lookup, fs->lookup_depth);
1956 }
1957 /*
1958 * Can not use `kfree_type_counted_by' `for fs->rq',
1959 * because the latter is counted by `fs->rq_size + 1'
1960 */
1961 struct dn_flow_queue ** rq = fs->rq;
1962 kfree_type(struct dn_flow_queue *, fs->rq_size + 1, rq);
1963 fs->rq = NULL;
1964 fs->rq_size = 0;
1965 /* if this fs is not part of a pipe, free it */
1966 if (fs->pipe && fs != &(fs->pipe->fs)) {
1967 kfree_type(struct dn_flow_set, fs);
1968 }
1969 }
1970 }
1971
1972 /*
1973 * Dispose all packets queued on a pipe (not a flow_set).
1974 * Also free all resources associated to a pipe, which is about
1975 * to be deleted.
1976 */
1977 static void
purge_pipe(struct dn_pipe * pipe)1978 purge_pipe(struct dn_pipe *pipe)
1979 {
1980 struct mbuf *m, *mnext;
1981
1982 purge_flow_set( &(pipe->fs), 1 );
1983
1984 mnext = pipe->head;
1985 while ((m = mnext) != NULL) {
1986 mnext = m->m_nextpkt;
1987 DN_FREE_PKT(m);
1988 }
1989
1990 heap_free( &(pipe->scheduler_heap));
1991 heap_free( &(pipe->not_eligible_heap));
1992 heap_free( &(pipe->idle_heap));
1993 }
1994
1995 /*
1996 * Delete all pipes and heaps returning memory.
1997 */
1998 static void
dummynet_flush(void)1999 dummynet_flush(void)
2000 {
2001 struct dn_pipe *pipe, *pipe1;
2002 struct dn_flow_set *fs, *fs1;
2003 int i;
2004
2005 lck_mtx_lock(&dn_mutex);
2006
2007
2008 /* Free heaps so we don't have unwanted events. */
2009 heap_free(&ready_heap);
2010 heap_free(&wfq_ready_heap);
2011 heap_free(&extract_heap);
2012
2013 /*
2014 * Now purge all queued pkts and delete all pipes.
2015 *
2016 * XXXGL: can we merge the for(;;) cycles into one or not?
2017 */
2018 for (i = 0; i < HASHSIZE; i++) {
2019 SLIST_FOREACH_SAFE(fs, &flowsethash[i], next, fs1) {
2020 SLIST_REMOVE(&flowsethash[i], fs, dn_flow_set, next);
2021 purge_flow_set(fs, 1);
2022 }
2023 }
2024 for (i = 0; i < HASHSIZE; i++) {
2025 SLIST_FOREACH_SAFE(pipe, &pipehash[i], next, pipe1) {
2026 SLIST_REMOVE(&pipehash[i], pipe, dn_pipe, next);
2027 purge_pipe(pipe);
2028 kfree_type(struct dn_pipe, pipe);
2029 }
2030 }
2031 lck_mtx_unlock(&dn_mutex);
2032 }
2033
2034 /*
2035 * setup RED parameters
2036 */
2037 static int
config_red(struct dn_flow_set * p,struct dn_flow_set * x)2038 config_red(struct dn_flow_set *p, struct dn_flow_set * x)
2039 {
2040 int i;
2041
2042 x->w_q = p->w_q;
2043 x->min_th = SCALE(p->min_th);
2044 x->max_th = SCALE(p->max_th);
2045 x->max_p = p->max_p;
2046
2047 x->c_1 = p->max_p / (p->max_th - p->min_th);
2048 x->c_2 = SCALE_MUL(x->c_1, SCALE(p->min_th));
2049 if (x->flags_fs & DN_IS_GENTLE_RED) {
2050 x->c_3 = (SCALE(1) - p->max_p) / p->max_th;
2051 x->c_4 = (SCALE(1) - 2 * p->max_p);
2052 }
2053
2054 /* if the lookup table already exist, free and create it again */
2055 if (x->w_q_lookup) {
2056 kfree_data_counted_by(x->w_q_lookup, x->lookup_depth);
2057 }
2058 if (red_lookup_depth == 0) {
2059 printf("\ndummynet: net.inet.ip.dummynet.red_lookup_depth must be > 0\n");
2060 return EINVAL;
2061 }
2062 x->w_q_lookup = (u_int *) kalloc_data(red_lookup_depth * sizeof(int),
2063 Z_WAITOK | Z_ZERO);
2064 x->lookup_depth = red_lookup_depth;
2065 if (x->w_q_lookup == NULL) {
2066 printf("dummynet: sorry, cannot allocate red lookup table\n");
2067 return ENOMEM;
2068 }
2069
2070 /* fill the lookup table with (1 - w_q)^x */
2071 x->lookup_step = p->lookup_step;
2072 x->lookup_weight = p->lookup_weight;
2073 x->w_q_lookup[0] = SCALE(1) - x->w_q;
2074 for (i = 1; i < x->lookup_depth; i++) {
2075 x->w_q_lookup[i] = SCALE_MUL(x->w_q_lookup[i - 1], x->lookup_weight);
2076 }
2077 if (red_avg_pkt_size < 1) {
2078 red_avg_pkt_size = 512;
2079 }
2080 x->avg_pkt_size = red_avg_pkt_size;
2081 if (red_max_pkt_size < 1) {
2082 red_max_pkt_size = 1500;
2083 }
2084 x->max_pkt_size = red_max_pkt_size;
2085 return 0;
2086 }
2087
2088 static int
alloc_hash(struct dn_flow_set * x,struct dn_flow_set * pfs)2089 alloc_hash(struct dn_flow_set *x, struct dn_flow_set *pfs)
2090 {
2091 if (x->flags_fs & DN_HAVE_FLOW_MASK) { /* allocate some slots */
2092 int l = pfs->rq_size;
2093
2094 if (l == 0) {
2095 l = dn_hash_size;
2096 }
2097 if (l < 4) {
2098 l = 4;
2099 } else if (l > DN_MAX_HASH_SIZE) {
2100 l = DN_MAX_HASH_SIZE;
2101 }
2102 x->rq = kalloc_type(struct dn_flow_queue *, l + 1,
2103 Z_NOWAIT | Z_ZERO);
2104 x->rq_size = l;
2105 } else { /* one is enough for null mask */
2106 x->rq = kalloc_type(struct dn_flow_queue *, 1 + 1,
2107 Z_NOWAIT | Z_ZERO);
2108 x->rq_size = 1;
2109 }
2110
2111 if (x->rq == NULL) {
2112 printf("dummynet: sorry, cannot allocate queue\n");
2113 return ENOMEM;
2114 }
2115 x->rq_elements = 0;
2116 return 0;
2117 }
2118
2119 static int
set_fs_parms(struct dn_flow_set * x,struct dn_flow_set * src)2120 set_fs_parms(struct dn_flow_set *x, struct dn_flow_set *src)
2121 {
2122 x->flags_fs = src->flags_fs;
2123 x->qsize = src->qsize;
2124 x->plr = src->plr;
2125 x->flow_mask = src->flow_mask;
2126 if (x->flags_fs & DN_QSIZE_IS_BYTES) {
2127 if (x->qsize > 1024 * 1024) {
2128 x->qsize = 1024 * 1024;
2129 }
2130 } else {
2131 if (x->qsize == 0) {
2132 x->qsize = 50;
2133 }
2134 if (x->qsize > 100) {
2135 x->qsize = 50;
2136 }
2137 }
2138 /* configuring RED */
2139 if (x->flags_fs & DN_IS_RED) {
2140 return config_red(src, x); /* XXX should check errors */
2141 }
2142 return 0;
2143 }
2144
2145 /*
2146 * setup pipe or queue parameters.
2147 */
2148 static int
config_pipe(struct dn_pipe * p)2149 config_pipe(struct dn_pipe *p)
2150 {
2151 int i, r;
2152 struct dn_flow_set *pfs = &(p->fs);
2153 struct dn_flow_queue *q;
2154 bool is_new = false;
2155
2156 /*
2157 * The config program passes parameters as follows:
2158 * bw = bits/second (0 means no limits),
2159 * delay = ms, must be translated into ticks.
2160 * qsize = slots/bytes
2161 */
2162 p->delay = (p->delay * (hz * 10)) / 1000;
2163 /* We need either a pipe number or a flow_set number */
2164 if (p->pipe_nr == 0 && pfs->fs_nr == 0) {
2165 return EINVAL;
2166 }
2167 if (p->pipe_nr != 0 && pfs->fs_nr != 0) {
2168 return EINVAL;
2169 }
2170 if (p->pipe_nr != 0) { /* this is a pipe */
2171 struct dn_pipe *x, *b;
2172 struct dummynet_event dn_event;
2173 lck_mtx_lock(&dn_mutex);
2174
2175 /* locate pipe */
2176 b = locate_pipe(p->pipe_nr);
2177
2178 if (b == NULL || b->pipe_nr != p->pipe_nr) { /* new pipe */
2179 is_new = true;
2180 x = kalloc_type(struct dn_pipe, Z_WAITOK | Z_ZERO);
2181 if (x == NULL) {
2182 lck_mtx_unlock(&dn_mutex);
2183 printf("dummynet: no memory for new pipe\n");
2184 return ENOMEM;
2185 }
2186 x->pipe_nr = p->pipe_nr;
2187 x->fs.pipe = x;
2188 /* idle_heap is the only one from which we extract from the middle.
2189 */
2190 x->idle_heap.p = NULL;
2191 x->idle_heap.size = x->idle_heap.elements = 0;
2192 x->idle_heap.offset = offsetof(struct dn_flow_queue, heap_pos);
2193 } else {
2194 x = b;
2195 /* Flush accumulated credit for all queues */
2196 for (i = 0; i <= x->fs.rq_size; i++) {
2197 for (q = x->fs.rq[i]; q; q = q->next) {
2198 q->numbytes = 0;
2199 }
2200 }
2201 }
2202
2203 x->bandwidth = p->bandwidth;
2204 x->numbytes = 0; /* just in case... */
2205 bcopy(p->if_name, x->if_name, sizeof(p->if_name));
2206 x->ifp = NULL; /* reset interface ptr */
2207 x->delay = p->delay;
2208 r = set_fs_parms(&(x->fs), pfs);
2209 if (r != 0) {
2210 lck_mtx_unlock(&dn_mutex);
2211 if (is_new) { /* a new pipe */
2212 kfree_type(struct dn_pipe, x);
2213 }
2214 return r;
2215 }
2216
2217 if (x->fs.rq == NULL) { /* a new pipe */
2218 r = alloc_hash(&(x->fs), pfs);
2219 if (r) {
2220 lck_mtx_unlock(&dn_mutex);
2221 if (is_new) {
2222 kfree_type(struct dn_pipe, x);
2223 }
2224 return r;
2225 }
2226 SLIST_INSERT_HEAD(&pipehash[HASH(x->pipe_nr)],
2227 x, next);
2228 }
2229 lck_mtx_unlock(&dn_mutex);
2230
2231 bzero(&dn_event, sizeof(dn_event));
2232 dn_event.dn_event_code = DUMMYNET_PIPE_CONFIG;
2233 dn_event.dn_event_pipe_config.bandwidth = p->bandwidth;
2234 dn_event.dn_event_pipe_config.delay = p->delay;
2235 dn_event.dn_event_pipe_config.plr = pfs->plr;
2236
2237 dummynet_event_enqueue_nwk_wq_entry(&dn_event);
2238 } else { /* config queue */
2239 struct dn_flow_set *x, *b;
2240
2241 lck_mtx_lock(&dn_mutex);
2242 /* locate flow_set */
2243 b = locate_flowset(pfs->fs_nr);
2244
2245 if (b == NULL || b->fs_nr != pfs->fs_nr) { /* new */
2246 is_new = true;
2247 if (pfs->parent_nr == 0) { /* need link to a pipe */
2248 lck_mtx_unlock(&dn_mutex);
2249 return EINVAL;
2250 }
2251 x = kalloc_type(struct dn_flow_set, Z_WAITOK | Z_ZERO);
2252 if (x == NULL) {
2253 lck_mtx_unlock(&dn_mutex);
2254 printf("dummynet: no memory for new flow_set\n");
2255 return ENOMEM;
2256 }
2257 x->fs_nr = pfs->fs_nr;
2258 x->parent_nr = pfs->parent_nr;
2259 x->weight = pfs->weight;
2260 if (x->weight == 0) {
2261 x->weight = 1;
2262 } else if (x->weight > 100) {
2263 x->weight = 100;
2264 }
2265 } else {
2266 /* Change parent pipe not allowed; must delete and recreate */
2267 if (pfs->parent_nr != 0 && b->parent_nr != pfs->parent_nr) {
2268 lck_mtx_unlock(&dn_mutex);
2269 return EINVAL;
2270 }
2271 x = b;
2272 }
2273 r = set_fs_parms(x, pfs);
2274 if (r != 0) {
2275 lck_mtx_unlock(&dn_mutex);
2276 printf("dummynet: no memory for new flow_set\n");
2277 if (is_new) {
2278 kfree_type(struct dn_flow_set, x);
2279 }
2280 return r;
2281 }
2282
2283 if (x->rq == NULL) { /* a new flow_set */
2284 r = alloc_hash(x, pfs);
2285 if (r) {
2286 lck_mtx_unlock(&dn_mutex);
2287 kfree_type(struct dn_flow_set, x);
2288 return r;
2289 }
2290 SLIST_INSERT_HEAD(&flowsethash[HASH(x->fs_nr)],
2291 x, next);
2292 }
2293 lck_mtx_unlock(&dn_mutex);
2294 }
2295 return 0;
2296 }
2297
2298 /*
2299 * Helper function to remove from a heap queues which are linked to
2300 * a flow_set about to be deleted.
2301 */
2302 static void
fs_remove_from_heap(struct dn_heap * h,struct dn_flow_set * fs)2303 fs_remove_from_heap(struct dn_heap *h, struct dn_flow_set *fs)
2304 {
2305 int i = 0, found = 0;
2306 for (; i < h->elements;) {
2307 if (((struct dn_flow_queue *)h->p[i].object)->fs == fs) {
2308 h->elements--;
2309 h->p[i] = h->p[h->elements];
2310 found++;
2311 } else {
2312 i++;
2313 }
2314 }
2315 if (found) {
2316 heapify(h);
2317 }
2318 }
2319
2320 /*
2321 * helper function to remove a pipe from a heap (can be there at most once)
2322 */
2323 static void
pipe_remove_from_heap(struct dn_heap * h,struct dn_pipe * p)2324 pipe_remove_from_heap(struct dn_heap *h, struct dn_pipe *p)
2325 {
2326 if (h->elements > 0) {
2327 int i = 0;
2328 for (i = 0; i < h->elements; i++) {
2329 if (h->p[i].object == p) { /* found it */
2330 h->elements--;
2331 h->p[i] = h->p[h->elements];
2332 heapify(h);
2333 break;
2334 }
2335 }
2336 }
2337 }
2338
2339 /*
2340 * drain all queues. Called in case of severe mbuf shortage.
2341 */
2342 void
dummynet_drain(void)2343 dummynet_drain(void)
2344 {
2345 struct dn_flow_set *fs;
2346 struct dn_pipe *p;
2347 struct mbuf *m, *mnext;
2348 int i;
2349
2350 LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
2351
2352 heap_free(&ready_heap);
2353 heap_free(&wfq_ready_heap);
2354 heap_free(&extract_heap);
2355 /* remove all references to this pipe from flow_sets */
2356 for (i = 0; i < HASHSIZE; i++) {
2357 SLIST_FOREACH(fs, &flowsethash[i], next) {
2358 purge_flow_set(fs, 0);
2359 }
2360 }
2361
2362 for (i = 0; i < HASHSIZE; i++) {
2363 SLIST_FOREACH(p, &pipehash[i], next) {
2364 purge_flow_set(&(p->fs), 0);
2365
2366 mnext = p->head;
2367 while ((m = mnext) != NULL) {
2368 mnext = m->m_nextpkt;
2369 DN_FREE_PKT(m);
2370 }
2371 p->head = p->tail = NULL;
2372 }
2373 }
2374 }
2375
2376 /*
2377 * Fully delete a pipe or a queue, cleaning up associated info.
2378 */
2379 static int
delete_pipe(struct dn_pipe * p)2380 delete_pipe(struct dn_pipe *p)
2381 {
2382 if (p->pipe_nr == 0 && p->fs.fs_nr == 0) {
2383 return EINVAL;
2384 }
2385 if (p->pipe_nr != 0 && p->fs.fs_nr != 0) {
2386 return EINVAL;
2387 }
2388 if (p->pipe_nr != 0) { /* this is an old-style pipe */
2389 struct dn_pipe *b;
2390 struct dn_flow_set *fs;
2391 int i;
2392
2393 lck_mtx_lock(&dn_mutex);
2394 /* locate pipe */
2395 b = locate_pipe(p->pipe_nr);
2396 if (b == NULL) {
2397 lck_mtx_unlock(&dn_mutex);
2398 return EINVAL; /* not found */
2399 }
2400
2401 /* Unlink from list of pipes. */
2402 SLIST_REMOVE(&pipehash[HASH(b->pipe_nr)], b, dn_pipe, next);
2403
2404
2405 /* Remove all references to this pipe from flow_sets. */
2406 for (i = 0; i < HASHSIZE; i++) {
2407 SLIST_FOREACH(fs, &flowsethash[i], next) {
2408 if (fs->pipe == b) {
2409 printf("dummynet: ++ ref to pipe %d from fs %d\n",
2410 p->pipe_nr, fs->fs_nr);
2411 fs->pipe = NULL;
2412 purge_flow_set(fs, 0);
2413 }
2414 }
2415 }
2416 fs_remove_from_heap(&ready_heap, &(b->fs));
2417
2418 purge_pipe(b); /* remove all data associated to this pipe */
2419 /* remove reference to here from extract_heap and wfq_ready_heap */
2420 pipe_remove_from_heap(&extract_heap, b);
2421 pipe_remove_from_heap(&wfq_ready_heap, b);
2422 lck_mtx_unlock(&dn_mutex);
2423
2424 kfree_type(struct dn_pipe, b);
2425 } else { /* this is a WF2Q queue (dn_flow_set) */
2426 struct dn_flow_set *b;
2427
2428 lck_mtx_lock(&dn_mutex);
2429 /* locate set */
2430 b = locate_flowset(p->fs.fs_nr);
2431 if (b == NULL) {
2432 lck_mtx_unlock(&dn_mutex);
2433 return EINVAL; /* not found */
2434 }
2435
2436
2437 /* Unlink from list of flowsets. */
2438 SLIST_REMOVE( &flowsethash[HASH(b->fs_nr)], b, dn_flow_set, next);
2439
2440 if (b->pipe != NULL) {
2441 /* Update total weight on parent pipe and cleanup parent heaps */
2442 b->pipe->sum -= b->weight * b->backlogged;
2443 fs_remove_from_heap(&(b->pipe->not_eligible_heap), b);
2444 fs_remove_from_heap(&(b->pipe->scheduler_heap), b);
2445 #if 1 /* XXX should i remove from idle_heap as well ? */
2446 fs_remove_from_heap(&(b->pipe->idle_heap), b);
2447 #endif
2448 }
2449 purge_flow_set(b, 1);
2450 lck_mtx_unlock(&dn_mutex);
2451 }
2452 return 0;
2453 }
2454
2455 /*
2456 * helper function used to copy data from kernel in DUMMYNET_GET
2457 */
2458 static
2459 char*
dn_copy_set_32(struct dn_flow_set * set,char * bp __ended_by (bp_end),char * bp_end)2460 dn_copy_set_32(struct dn_flow_set *set, char *bp __ended_by(bp_end), char *bp_end)
2461 {
2462 int i, copied = 0, overflow = 0;
2463 struct dn_flow_queue *q;
2464 struct dn_flow_queue_32 *qp;
2465 caddr_t bp_inner = bp;
2466
2467 IP_DN_DBG_LOG("Asked to copy set_32: available=%ld, required=%lu",
2468 bp_end - bp_inner, sizeof(*qp));
2469 if (bp_end - bp_inner < sizeof(*qp)) {
2470 IP_DN_DBG_LOG("Insufficient space to copy: available=%ld, required=%lu",
2471 bp_end - bp_inner, sizeof(*qp));
2472 return NULL;
2473 }
2474
2475 LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
2476
2477 for (i = 0; i <= set->rq_size && !overflow; i++) {
2478 for (q = set->rq[i]; q && !overflow; q = q->next) {
2479 /* Check for overflow before attempting to write to `bp_inner' */
2480 if (bp_end - bp_inner < sizeof(*qp)) {
2481 os_log_error(OS_LOG_DEFAULT,
2482 "Stoping flow set copy after %d elements to avoid overflow; "
2483 "mem: available=%lu, required=%lu", copied, bp_end - bp_inner, sizeof(*qp));
2484 overflow = 1;
2485 break;
2486 }
2487
2488 if (q->hash_slot != i) {
2489 printf("dummynet: ++ at %d: wrong slot (have %d, "
2490 "should be %d)\n", copied, q->hash_slot, i);
2491 }
2492 if (q->fs != set) {
2493 printf("dummynet: ++ at %d: wrong fs ptr "
2494 "(have 0x%llx, should be 0x%llx)\n", i,
2495 (uint64_t)VM_KERNEL_ADDRPERM(q->fs),
2496 (uint64_t)VM_KERNEL_ADDRPERM(set));
2497 }
2498 copied++;
2499 qp = (struct dn_flow_queue_32 *)(void *)bp_inner;
2500 bp_inner += sizeof(*qp);
2501 IP_DN_DBG_LOG("Advancing bp_inner old=0x%llx new=0x%llx end=0x%llx available=%lu",
2502 (uint64_t)VM_KERNEL_ADDRPERM(qp),
2503 (uint64_t)VM_KERNEL_ADDRPERM(bp_inner),
2504 (uint64_t)VM_KERNEL_ADDRPERM(bp_end),
2505 bp_end - bp_inner);
2506 cp_queue_to_32_user( q, qp );
2507 /* cleanup pointers */
2508 qp->next = (user32_addr_t)0;
2509 qp->head = qp->tail = (user32_addr_t)0;
2510 qp->fs = (user32_addr_t)0;
2511 }
2512 }
2513 if (copied != set->rq_elements) {
2514 printf("dummynet: ++ wrong count, have %d should be %d\n",
2515 copied, set->rq_elements);
2516 }
2517 return overflow ? NULL : bp_inner;
2518 }
2519
2520 /*
2521 * Returns NULL on overflow.
2522 */
2523 static
2524 char*
dn_copy_set_64(struct dn_flow_set * set,char * bp __ended_by (bp_end),char * bp_end)2525 dn_copy_set_64(struct dn_flow_set *set, char *bp __ended_by(bp_end), char *bp_end)
2526 {
2527 int i, copied = 0, overflow = 0, total_required = 0;
2528 struct dn_flow_queue *q;
2529 struct dn_flow_queue_64 *qp;
2530 caddr_t bp_inner = bp;
2531
2532 IP_DN_DBG_LOG("Asked to copy set_64: available=%ld", bp_end - bp_inner);
2533 if (bp_end - bp_inner < sizeof(*qp)) {
2534 IP_DN_DBG_LOG("Insufficient space to copy: available=%ld, required=%lu",
2535 bp_end - bp_inner, sizeof(*qp));
2536 return NULL;
2537 }
2538
2539 LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
2540
2541 for (i = 0; i <= set->rq_size && !overflow; i++) {
2542 for (q = set->rq[i]; q && !overflow; q = q->next) {
2543 IP_DN_DBG_LOG("Iter=%d to copied=%d available=%ld",
2544 i, copied, bp_end - bp_inner);
2545 /* Check for overflow before attempting to write to `bp_inner' */
2546 if (bp_end - bp_inner < sizeof(*qp)) {
2547 os_log_error(OS_LOG_DEFAULT,
2548 "Stoping flow set copy after %d elements to avoid overflow; "
2549 "mem: available=%lu, required=%lu", copied, bp_end - bp_inner, sizeof(*qp));
2550 overflow = 1;
2551 }
2552 /* If overflow bit is set, continue the iteration only to count the total required memory */
2553 total_required += sizeof(*qp);
2554 if (overflow) {
2555 continue; /* to next q, we are only counting memory here. */
2556 }
2557
2558 if (q->hash_slot != i) {
2559 printf("dummynet: ++ at %d: wrong slot (have %d, "
2560 "should be %d)\n", copied, q->hash_slot, i);
2561 }
2562 if (q->fs != set) {
2563 printf("dummynet: ++ at %d: wrong fs ptr "
2564 "(have 0x%llx, should be 0x%llx)\n", i,
2565 (uint64_t)VM_KERNEL_ADDRPERM(q->fs),
2566 (uint64_t)VM_KERNEL_ADDRPERM(set));
2567 }
2568 copied++;
2569 //bcopy(q, qp, sizeof(*q));
2570 qp = (struct dn_flow_queue_64 *)(void *)bp_inner;
2571 bp_inner += sizeof(*qp);
2572 IP_DN_DBG_LOG("Advancing bp_inner old=0x%llx new=0x%llx end=0x%llx available=%lu",
2573 (uint64_t)VM_KERNEL_ADDRPERM(qp),
2574 (uint64_t)VM_KERNEL_ADDRPERM(bp_inner),
2575 (uint64_t)VM_KERNEL_ADDRPERM(bp_end),
2576 bp_end - bp_inner);
2577 cp_queue_to_64_user( q, qp );
2578 /* cleanup pointers */
2579 qp->next = USER_ADDR_NULL;
2580 qp->head = qp->tail = USER_ADDR_NULL;
2581 qp->fs = USER_ADDR_NULL;
2582 }
2583 }
2584 if (copied != set->rq_elements) {
2585 printf("dummynet: ++ wrong count, have %d should be %d\n",
2586 copied, set->rq_elements);
2587 }
2588
2589 char * ret = overflow ? NULL : bp_inner;
2590 IP_DN_DBG_LOG("Returning 0x%llx overflow=%d bp=0x%llx bp_inner=0x%llx bp_end=0x%llx available=%ld total_required=%d total_available=%lu",
2591 (uint64_t)VM_KERNEL_ADDRPERM(ret),
2592 overflow,
2593 (uint64_t)VM_KERNEL_ADDRPERM(bp),
2594 (uint64_t)VM_KERNEL_ADDRPERM(bp_inner),
2595 (uint64_t)VM_KERNEL_ADDRPERM(bp_end),
2596 bp_end - bp_inner,
2597 total_required,
2598 bp_end - bp);
2599 return ret;
2600 }
2601
2602 static size_t
dn_calc_size(int is64user)2603 dn_calc_size(int is64user)
2604 {
2605 struct dn_flow_set *set;
2606 struct dn_pipe *p;
2607 size_t size = 0;
2608 size_t pipesize;
2609 size_t queuesize;
2610 size_t setsize;
2611 int i;
2612
2613 #if DUMMYNET_DEBUG
2614 unsigned pq_count = 0, pqe_count = 0;
2615 unsigned fsq_count = 0, fsqe_count = 0;
2616 #endif /* DUMMYNET_DEBUG */
2617
2618 LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
2619 if (is64user) {
2620 pipesize = sizeof(struct dn_pipe_64);
2621 queuesize = sizeof(struct dn_flow_queue_64);
2622 setsize = sizeof(struct dn_flow_set_64);
2623 } else {
2624 pipesize = sizeof(struct dn_pipe_32);
2625 queuesize = sizeof(struct dn_flow_queue_32);
2626 setsize = sizeof(struct dn_flow_set_32);
2627 }
2628 /*
2629 * compute size of data structures: list of pipes and flow_sets.
2630 */
2631 for (i = 0; i < HASHSIZE; i++) {
2632 SLIST_FOREACH(p, &pipehash[i], next) {
2633 #if DUMMYNET_DEBUG
2634 pq_count++;
2635 pqe_count += p->fs.rq_elements;
2636 #endif /* DUMMYNET_DEBUG */
2637 size += sizeof(*p) +
2638 p->fs.rq_elements * sizeof(struct dn_flow_queue);
2639 }
2640 SLIST_FOREACH(set, &flowsethash[i], next) {
2641 #if DUMMYNET_DEBUG
2642 fsq_count++;
2643 fsqe_count += set->rq_elements;
2644 #endif /* DUMMYNET_DEBUG */
2645 size += sizeof(*set) +
2646 set->rq_elements * sizeof(struct dn_flow_queue);
2647 }
2648 }
2649
2650 #if DUMMYNET_DEBUG
2651 IP_DN_DBG_LOG("dn_size=%lu (pipe queues: count=%u elements=%u; flowset queues: count=%u elemnts=%u)",
2652 size, pq_count, pqe_count, fsq_count, fsqe_count);
2653 #endif /* DUMMYNET_DEBUG */
2654
2655 return size;
2656 }
2657
2658 static int
dummynet_get(struct sockopt * sopt)2659 dummynet_get(struct sockopt *sopt)
2660 {
2661 char *buf = NULL;
2662 char *bp_end = NULL; /* sentinel pointer */
2663 char *bp __ended_by(bp_end) = bp_end; /* "copy-pointer" */
2664 char *bp_next = NULL; /* next value for copy-pointer, may overflow */
2665 size_t size = 0;
2666 struct dn_flow_set *set;
2667 struct dn_pipe *p;
2668 int error = 0, i;
2669 int overflow = 0;
2670 int is64user = 0;
2671
2672 /* XXX lock held too long */
2673 lck_mtx_lock(&dn_mutex);
2674 /*
2675 * XXX: Ugly, but we need to allocate memory with M_WAITOK flag
2676 * and we cannot use this flag while holding a mutex.
2677 */
2678 if (proc_is64bit(sopt->sopt_p)) {
2679 is64user = 1;
2680 }
2681 for (i = 0; i < 10; i++) {
2682 size = dn_calc_size(is64user);
2683 lck_mtx_unlock(&dn_mutex);
2684 buf = kalloc_data(size, Z_WAITOK | Z_ZERO);
2685 if (buf == NULL) {
2686 return ENOMEM;
2687 }
2688 lck_mtx_lock(&dn_mutex);
2689 if (size == dn_calc_size(is64user)) {
2690 /*
2691 * This check is necessary, because pipes/flowsets can change
2692 * while we weren't holding the lock
2693 */
2694 break;
2695 }
2696 kfree_data(buf, size);
2697 buf = NULL;
2698 }
2699 if (buf == NULL) {
2700 lck_mtx_unlock(&dn_mutex);
2701 return ENOMEM;
2702 }
2703
2704 /* TODO: follow up why without this forge `bp' gets fishy bounds. */
2705 bp = __unsafe_forge_bidi_indexable(char *, buf, size);
2706 bp_end = buf + size;
2707
2708 for (i = 0; i < HASHSIZE && !overflow; i++) {
2709 SLIST_FOREACH(p, &pipehash[i], next) {
2710 /*
2711 * copy pipe descriptor into *bp, convert delay
2712 * back to ms, then copy the flow_set descriptor(s)
2713 * one at a time. After each flow_set, copy the
2714 * queue descriptor it owns.
2715 */
2716 if (is64user) {
2717 bp_next = cp_pipe_to_64_user(p, bp, bp_end);
2718 } else {
2719 bp_next = cp_pipe_to_32_user(p, bp, bp_end);
2720 }
2721 if (bp_next == NULL) {
2722 IP_DN_DBG_LOG("Overflow detected old=0x%llx end=0x%llx",
2723 (uint64_t)VM_KERNEL_ADDRPERM(bp),
2724 (uint64_t)VM_KERNEL_ADDRPERM(bp_end));
2725 overflow = 1;
2726 break;
2727 }
2728 IP_DN_DBG_LOG("advancing bp: old=0x%llx new=0x%llx end=0x%llx",
2729 (uint64_t)VM_KERNEL_ADDRPERM(bp),
2730 (uint64_t)VM_KERNEL_ADDRPERM(bp_next),
2731 (uint64_t)VM_KERNEL_ADDRPERM(bp_end));
2732 bp = bp_next;
2733 bp_end = bp_end;
2734 }
2735 }
2736
2737 #if DUMMYNET_DEBUG
2738 if (overflow) {
2739 IP_DN_DBG_LOG("Overflow detected old=0x%llx end=0x%llx",
2740 (uint64_t)VM_KERNEL_ADDRPERM(bp),
2741 (uint64_t)VM_KERNEL_ADDRPERM(bp_end));
2742 }
2743 #endif
2744
2745 for (i = 0; i < HASHSIZE && !overflow; i++) {
2746 SLIST_FOREACH(set, &flowsethash[i], next) {
2747 struct dn_flow_set_64 *fs_bp;
2748 bp_next = bp + sizeof(*fs_bp);
2749 if (bp_end < bp_next) {
2750 IP_DN_DBG_LOG("Overflow detected old=0x%llx end=0x%llx available=%lu required=%lu",
2751 (uint64_t)VM_KERNEL_ADDRPERM(bp),
2752 (uint64_t)VM_KERNEL_ADDRPERM(bp_end),
2753 bp_end - bp,
2754 sizeof(*fs_bp));
2755 overflow = 1;
2756 break;
2757 }
2758 fs_bp = (struct dn_flow_set_64 *)(void *)bp;
2759 bp = bp_next;
2760 bp_end = bp_end;
2761 cp_flow_set_to_64_user(set, fs_bp);
2762 /* XXX same hack as above */
2763 fs_bp->next = CAST_DOWN(user64_addr_t,
2764 DN_IS_QUEUE);
2765 fs_bp->pipe = USER_ADDR_NULL;
2766 fs_bp->rq = USER_ADDR_NULL;
2767 bp_next = dn_copy_set_64( set, bp, bp_end);
2768 if (bp_next == NULL) {
2769 overflow = 1;
2770 break;
2771 }
2772 bp = bp_next;
2773 bp_end = bp_end;
2774 }
2775 }
2776 lck_mtx_unlock(&dn_mutex);
2777 error = sooptcopyout(sopt, buf, size);
2778 kfree_data(buf, size);
2779 return error;
2780 }
2781
2782 /*
2783 * Handler for the various dummynet socket options (get, flush, config, del)
2784 */
2785 static int
ip_dn_ctl(struct sockopt * sopt)2786 ip_dn_ctl(struct sockopt *sopt)
2787 {
2788 int error = 0;
2789 struct dn_pipe *p, tmp_pipe;
2790
2791 /* Disallow sets in really-really secure mode. */
2792 if (sopt->sopt_dir == SOPT_SET && securelevel >= 3) {
2793 return EPERM;
2794 }
2795
2796 switch (sopt->sopt_name) {
2797 default:
2798 printf("dummynet: -- unknown option %d", sopt->sopt_name);
2799 return EINVAL;
2800
2801 case IP_DUMMYNET_GET:
2802 error = dummynet_get(sopt);
2803 break;
2804
2805 case IP_DUMMYNET_FLUSH:
2806 dummynet_flush();
2807 break;
2808
2809 case IP_DUMMYNET_CONFIGURE:
2810 p = &tmp_pipe;
2811 if (proc_is64bit(sopt->sopt_p)) {
2812 error = cp_pipe_from_user_64( sopt, p );
2813 } else {
2814 error = cp_pipe_from_user_32( sopt, p );
2815 }
2816
2817 if (error) {
2818 break;
2819 }
2820 error = config_pipe(p);
2821 break;
2822
2823 case IP_DUMMYNET_DEL: /* remove a pipe or queue */
2824 p = &tmp_pipe;
2825 if (proc_is64bit(sopt->sopt_p)) {
2826 error = cp_pipe_from_user_64( sopt, p );
2827 } else {
2828 error = cp_pipe_from_user_32( sopt, p );
2829 }
2830 if (error) {
2831 break;
2832 }
2833
2834 error = delete_pipe(p);
2835 break;
2836 }
2837 return error;
2838 }
2839
2840 void
dummynet_init(void)2841 dummynet_init(void)
2842 {
2843 eventhandler_lists_ctxt_init(&dummynet_evhdlr_ctxt);
2844 }
2845
2846 void
ip_dn_init(void)2847 ip_dn_init(void)
2848 {
2849 /* setup locks */
2850 ready_heap.p = NULL;
2851 ready_heap.size = ready_heap.elements = 0;
2852 ready_heap.offset = 0;
2853
2854 wfq_ready_heap.p = NULL;
2855 wfq_ready_heap.size = wfq_ready_heap.elements = 0;
2856 wfq_ready_heap.offset = 0;
2857
2858 extract_heap.p = NULL;
2859 extract_heap.size = extract_heap.elements = 0;
2860 extract_heap.offset = 0;
2861 ip_dn_ctl_ptr = ip_dn_ctl;
2862 ip_dn_io_ptr = dummynet_io;
2863 }
2864
2865 struct dn_event_nwk_wq_entry {
2866 struct nwk_wq_entry nwk_wqe;
2867 struct dummynet_event dn_ev_arg;
2868 };
2869
2870 static void
dummynet_event_callback(struct nwk_wq_entry * nwk_item)2871 dummynet_event_callback(struct nwk_wq_entry *nwk_item)
2872 {
2873 struct dn_event_nwk_wq_entry *p_ev;
2874
2875 p_ev = __container_of(nwk_item, struct dn_event_nwk_wq_entry, nwk_wqe);
2876
2877 EVENTHANDLER_INVOKE(&dummynet_evhdlr_ctxt, dummynet_event, &p_ev->dn_ev_arg);
2878
2879 kfree_type(struct dn_event_nwk_wq_entry, p_ev);
2880 }
2881
2882 void
dummynet_event_enqueue_nwk_wq_entry(struct dummynet_event * p_dn_event)2883 dummynet_event_enqueue_nwk_wq_entry(struct dummynet_event *p_dn_event)
2884 {
2885 struct dn_event_nwk_wq_entry *p_ev = NULL;
2886
2887 evhlog(debug, "%s: eventhandler enqueuing event of type=dummynet_event event_code=%s",
2888 __func__, dummynet_event2str(p_dn_event->dn_event_code));
2889
2890 p_ev = kalloc_type(struct dn_event_nwk_wq_entry,
2891 Z_WAITOK | Z_ZERO | Z_NOFAIL);
2892 p_ev->nwk_wqe.func = dummynet_event_callback;
2893 p_ev->dn_ev_arg = *p_dn_event;
2894 nwk_wq_enqueue(&p_ev->nwk_wqe);
2895 }
2896
2897 const char *
dummynet_event2str(int event)2898 dummynet_event2str(int event)
2899 {
2900 switch (event) {
2901 #define DUMMYNET_EVENT_TO_STRING(type) case type: return #type;
2902 DUMMYNET_EVENT_TO_STRING(DUMMYNET_RULE_CONFIG)
2903 DUMMYNET_EVENT_TO_STRING(DUMMYNET_RULE_DELETE)
2904 DUMMYNET_EVENT_TO_STRING(DUMMYNET_PIPE_CONFIG)
2905 DUMMYNET_EVENT_TO_STRING(DUMMYNET_PIPE_DELETE)
2906 DUMMYNET_EVENT_TO_STRING(DUMMYNET_NLC_DISABLED)
2907 #undef DUMMYNET_EVENT_TO_STRING
2908 }
2909 return "UNKNOWN_DUMMYNET_EVENT";
2910 }
2911
2912 struct dummynet_tag_container {
2913 struct m_tag dtc_m_tag;
2914 struct dn_pkt_tag dtc_dn_pkt_tag;
2915 };
2916
2917 struct m_tag *
m_tag_kalloc_dummynet(u_int32_t id,u_int16_t type,uint16_t len,int wait)2918 m_tag_kalloc_dummynet(u_int32_t id, u_int16_t type, uint16_t len, int wait)
2919 {
2920 struct dummynet_tag_container *tag_container;
2921 struct m_tag *tag = NULL;
2922
2923 assert3u(id, ==, KERNEL_MODULE_TAG_ID);
2924 assert3u(type, ==, KERNEL_TAG_TYPE_DUMMYNET);
2925 assert3u(len, ==, sizeof(struct dn_pkt_tag));
2926
2927 if (len != sizeof(struct dn_pkt_tag)) {
2928 return NULL;
2929 }
2930
2931 tag_container = kalloc_type(struct dummynet_tag_container, wait | M_ZERO);
2932 if (tag_container != NULL) {
2933 tag = &tag_container->dtc_m_tag;
2934
2935 assert3p(tag, ==, tag_container);
2936
2937 M_TAG_INIT(tag, id, type, len, &tag_container->dtc_dn_pkt_tag, NULL);
2938 }
2939
2940 return tag;
2941 }
2942
2943 void
m_tag_kfree_dummynet(struct m_tag * tag)2944 m_tag_kfree_dummynet(struct m_tag *tag)
2945 {
2946 struct dummynet_tag_container *tag_container;
2947
2948 assert3u(tag->m_tag_len, ==, sizeof(struct dn_pkt_tag));
2949 tag_container = __container_of(tag, struct dummynet_tag_container, dtc_m_tag);
2950 kfree_type(struct dummynet_tag_container, tag_container);
2951 }
2952
2953 void
dummynet_register_m_tag(void)2954 dummynet_register_m_tag(void)
2955 {
2956 int error;
2957
2958 error = m_register_internal_tag_type(KERNEL_TAG_TYPE_DUMMYNET, sizeof(struct dn_pkt_tag),
2959 m_tag_kalloc_dummynet, m_tag_kfree_dummynet);
2960
2961 assert3u(error, ==, 0);
2962 }
2963