xref: /xnu-8020.140.41/bsd/netinet/ip_dummynet.c (revision 27b03b360a988dfd3dfdf34262bb0042026747cc)
1 /*
2  * Copyright (c) 2000-2022 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * Copyright (c) 1998-2002 Luigi Rizzo, Universita` di Pisa
30  * Portions Copyright (c) 2000 Akamba Corp.
31  * All rights reserved
32  *
33  * Redistribution and use in source and binary forms, with or without
34  * modification, are permitted provided that the following conditions
35  * are met:
36  * 1. Redistributions of source code must retain the above copyright
37  *    notice, this list of conditions and the following disclaimer.
38  * 2. Redistributions in binary form must reproduce the above copyright
39  *    notice, this list of conditions and the following disclaimer in the
40  *    documentation and/or other materials provided with the distribution.
41  *
42  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52  * SUCH DAMAGE.
53  *
54  * $FreeBSD: src/sys/netinet/ip_dummynet.c,v 1.84 2004/08/25 09:31:30 pjd Exp $
55  */
56 
57 #define DUMMYNET_DEBUG
58 
59 /*
60  * This module implements IP dummynet, a bandwidth limiter/delay emulator
61  * Description of the data structures used is in ip_dummynet.h
62  * Here you mainly find the following blocks of code:
63  *  + variable declarations;
64  *  + heap management functions;
65  *  + scheduler and dummynet functions;
66  *  + configuration and initialization.
67  *
68  * NOTA BENE: critical sections are protected by the "dummynet lock".
69  *
70  * Most important Changes:
71  *
72  * 010124: Fixed WF2Q behaviour
73  * 010122: Fixed spl protection.
74  * 000601: WF2Q support
75  * 000106: large rewrite, use heaps to handle very many pipes.
76  * 980513:	initial release
77  *
78  * include files marked with XXX are probably not needed
79  */
80 
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/malloc.h>
84 #include <sys/mbuf.h>
85 #include <sys/queue.h>                  /* XXX */
86 #include <sys/kernel.h>
87 #include <sys/random.h>
88 #include <sys/socket.h>
89 #include <sys/socketvar.h>
90 #include <sys/time.h>
91 #include <sys/sysctl.h>
92 #include <net/if.h>
93 #include <net/route.h>
94 #include <net/kpi_protocol.h>
95 #if DUMMYNET
96 #include <net/kpi_protocol.h>
97 #endif /* DUMMYNET */
98 #include <net/nwk_wq.h>
99 #include <net/pfvar.h>
100 #include <netinet/in.h>
101 #include <netinet/in_systm.h>
102 #include <netinet/in_var.h>
103 #include <netinet/ip.h>
104 #include <netinet/ip_dummynet.h>
105 #include <netinet/ip_var.h>
106 
107 #include <netinet/ip6.h>       /* for ip6_input, ip6_output prototypes */
108 #include <netinet6/ip6_var.h>
109 
110 #include <stdbool.h>
111 
112 /*
113  * We keep a private variable for the simulation time, but we could
114  * probably use an existing one ("softticks" in sys/kern/kern_timer.c)
115  */
116 static dn_key curr_time = 0;  /* current simulation time */
117 
118 /* this is for the timer that fires to call dummynet() - we only enable the timer when
119  *       there are packets to process, otherwise it's disabled */
120 static int timer_enabled = 0;
121 
122 static int dn_hash_size = 64;   /* default hash size */
123 
124 /* statistics on number of queue searches and search steps */
125 static int searches, search_steps;
126 static int pipe_expire = 1;    /* expire queue if empty */
127 static int dn_max_ratio = 16;  /* max queues/buckets ratio */
128 
129 static int red_lookup_depth = 256;      /* RED - default lookup table depth */
130 static int red_avg_pkt_size = 512;      /* RED - default medium packet size */
131 static int red_max_pkt_size = 1500;     /* RED - default max packet size */
132 
133 static int serialize = 0;
134 
135 /*
136  * Three heaps contain queues and pipes that the scheduler handles:
137  *
138  * ready_heap contains all dn_flow_queue related to fixed-rate pipes.
139  *
140  * wfq_ready_heap contains the pipes associated with WF2Q flows
141  *
142  * extract_heap contains pipes associated with delay lines.
143  *
144  */
145 static struct dn_heap ready_heap, extract_heap, wfq_ready_heap;
146 
147 static int heap_init(struct dn_heap *h, int size);
148 static int heap_insert(struct dn_heap *h, dn_key key1, void *p);
149 static void heap_extract(struct dn_heap *h, void *obj);
150 
151 
152 static void     transmit_event(struct dn_pipe *pipe, struct mbuf **head,
153     struct mbuf **tail);
154 static void     ready_event(struct dn_flow_queue *q, struct mbuf **head,
155     struct mbuf **tail);
156 static void     ready_event_wfq(struct dn_pipe *p, struct mbuf **head,
157     struct mbuf **tail);
158 
159 /*
160  * Packets are retrieved from queues in Dummynet in chains instead of
161  * packet-by-packet.  The entire list of packets is first dequeued and
162  * sent out by the following function.
163  */
164 static void dummynet_send(struct mbuf *m);
165 
166 #define HASHSIZE        16
167 #define HASH(num)       ((((num) >> 8) ^ ((num) >> 4) ^ (num)) & 0x0f)
168 static struct dn_pipe_head      pipehash[HASHSIZE];     /* all pipes */
169 static struct dn_flow_set_head  flowsethash[HASHSIZE];  /* all flowsets */
170 
171 #ifdef SYSCTL_NODE
172 SYSCTL_NODE(_net_inet_ip, OID_AUTO, dummynet,
173     CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Dummynet");
174 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, hash_size,
175     CTLFLAG_RW | CTLFLAG_LOCKED, &dn_hash_size, 0, "Default hash table size");
176 SYSCTL_QUAD(_net_inet_ip_dummynet, OID_AUTO, curr_time,
177     CTLFLAG_RD | CTLFLAG_LOCKED, &curr_time, "Current tick");
178 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, ready_heap,
179     CTLFLAG_RD | CTLFLAG_LOCKED, &ready_heap.size, 0, "Size of ready heap");
180 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, extract_heap,
181     CTLFLAG_RD | CTLFLAG_LOCKED, &extract_heap.size, 0, "Size of extract heap");
182 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, searches,
183     CTLFLAG_RD | CTLFLAG_LOCKED, &searches, 0, "Number of queue searches");
184 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, search_steps,
185     CTLFLAG_RD | CTLFLAG_LOCKED, &search_steps, 0, "Number of queue search steps");
186 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, expire,
187     CTLFLAG_RW | CTLFLAG_LOCKED, &pipe_expire, 0, "Expire queue if empty");
188 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, max_chain_len,
189     CTLFLAG_RW | CTLFLAG_LOCKED, &dn_max_ratio, 0,
190     "Max ratio between dynamic queues and buckets");
191 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_lookup_depth,
192     CTLFLAG_RD | CTLFLAG_LOCKED, &red_lookup_depth, 0, "Depth of RED lookup table");
193 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_avg_pkt_size,
194     CTLFLAG_RD | CTLFLAG_LOCKED, &red_avg_pkt_size, 0, "RED Medium packet size");
195 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_max_pkt_size,
196     CTLFLAG_RD | CTLFLAG_LOCKED, &red_max_pkt_size, 0, "RED Max packet size");
197 #endif
198 
199 #ifdef DUMMYNET_DEBUG
200 int     dummynet_debug = 0;
201 #ifdef SYSCTL_NODE
202 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED, &dummynet_debug,
203     0, "control debugging printfs");
204 #endif
205 #define DPRINTF(X)      if (dummynet_debug) printf X
206 #else
207 #define DPRINTF(X)
208 #endif
209 
210 /* dummynet lock */
211 static LCK_GRP_DECLARE(dn_mutex_grp, "dn");
212 static LCK_MTX_DECLARE(dn_mutex, &dn_mutex_grp);
213 
214 static int config_pipe(struct dn_pipe *p);
215 static int ip_dn_ctl(struct sockopt *sopt);
216 
217 static void dummynet(void *);
218 static void dummynet_flush(void);
219 void dummynet_drain(void);
220 static ip_dn_io_t dummynet_io;
221 
222 static void cp_flow_set_to_64_user(struct dn_flow_set *set, struct dn_flow_set_64 *fs_bp);
223 static void cp_queue_to_64_user( struct dn_flow_queue *q, struct dn_flow_queue_64 *qp);
224 static char *cp_pipe_to_64_user(struct dn_pipe *p, struct dn_pipe_64 *pipe_bp);
225 static char* dn_copy_set_64(struct dn_flow_set *set, char *bp);
226 static int cp_pipe_from_user_64( struct sockopt *sopt, struct dn_pipe *p );
227 
228 static void cp_flow_set_to_32_user(struct dn_flow_set *set, struct dn_flow_set_32 *fs_bp);
229 static void cp_queue_to_32_user( struct dn_flow_queue *q, struct dn_flow_queue_32 *qp);
230 static char *cp_pipe_to_32_user(struct dn_pipe *p, struct dn_pipe_32 *pipe_bp);
231 static char* dn_copy_set_32(struct dn_flow_set *set, char *bp);
232 static int cp_pipe_from_user_32( struct sockopt *sopt, struct dn_pipe *p );
233 
234 struct eventhandler_lists_ctxt dummynet_evhdlr_ctxt;
235 
236 uint32_t
my_random(void)237 my_random(void)
238 {
239 	uint32_t val;
240 	read_frandom(&val, sizeof(val));
241 	val &= 0x7FFFFFFF;
242 
243 	return val;
244 }
245 
246 /*
247  * Heap management functions.
248  *
249  * In the heap, first node is element 0. Children of i are 2i+1 and 2i+2.
250  * Some macros help finding parent/children so we can optimize them.
251  *
252  * heap_init() is called to expand the heap when needed.
253  * Increment size in blocks of 16 entries.
254  * XXX failure to allocate a new element is a pretty bad failure
255  * as we basically stall a whole queue forever!!
256  * Returns 1 on error, 0 on success
257  */
258 #define HEAP_FATHER(x) ( ( (x) - 1 ) / 2 )
259 #define HEAP_LEFT(x) ( 2*(x) + 1 )
260 #define HEAP_IS_LEFT(x) ( (x) & 1 )
261 #define HEAP_RIGHT(x) ( 2*(x) + 2 )
262 #define HEAP_SWAP(a, b, buffer) { buffer = a ; a = b ; b = buffer ; }
263 #define HEAP_INCREMENT  15
264 
265 
266 int
cp_pipe_from_user_32(struct sockopt * sopt,struct dn_pipe * p)267 cp_pipe_from_user_32( struct sockopt *sopt, struct dn_pipe *p )
268 {
269 	struct dn_pipe_32 user_pipe_32;
270 	int error = 0;
271 
272 	error = sooptcopyin(sopt, &user_pipe_32, sizeof(struct dn_pipe_32), sizeof(struct dn_pipe_32));
273 	if (!error) {
274 		p->pipe_nr = user_pipe_32.pipe_nr;
275 		p->bandwidth = user_pipe_32.bandwidth;
276 		p->delay = user_pipe_32.delay;
277 		p->V = user_pipe_32.V;
278 		p->sum = user_pipe_32.sum;
279 		p->numbytes = user_pipe_32.numbytes;
280 		p->sched_time = user_pipe_32.sched_time;
281 		bcopy( user_pipe_32.if_name, p->if_name, IFNAMSIZ);
282 		p->ready = user_pipe_32.ready;
283 
284 		p->fs.fs_nr = user_pipe_32.fs.fs_nr;
285 		p->fs.flags_fs = user_pipe_32.fs.flags_fs;
286 		p->fs.parent_nr = user_pipe_32.fs.parent_nr;
287 		p->fs.weight = user_pipe_32.fs.weight;
288 		p->fs.qsize = user_pipe_32.fs.qsize;
289 		p->fs.plr = user_pipe_32.fs.plr;
290 		p->fs.flow_mask = user_pipe_32.fs.flow_mask;
291 		p->fs.rq_size = user_pipe_32.fs.rq_size;
292 		p->fs.rq_elements = user_pipe_32.fs.rq_elements;
293 		p->fs.last_expired = user_pipe_32.fs.last_expired;
294 		p->fs.backlogged = user_pipe_32.fs.backlogged;
295 		p->fs.w_q = user_pipe_32.fs.w_q;
296 		p->fs.max_th = user_pipe_32.fs.max_th;
297 		p->fs.min_th = user_pipe_32.fs.min_th;
298 		p->fs.max_p = user_pipe_32.fs.max_p;
299 		p->fs.c_1 = user_pipe_32.fs.c_1;
300 		p->fs.c_2 = user_pipe_32.fs.c_2;
301 		p->fs.c_3 = user_pipe_32.fs.c_3;
302 		p->fs.c_4 = user_pipe_32.fs.c_4;
303 		p->fs.lookup_depth = user_pipe_32.fs.lookup_depth;
304 		p->fs.lookup_step = user_pipe_32.fs.lookup_step;
305 		p->fs.lookup_weight = user_pipe_32.fs.lookup_weight;
306 		p->fs.avg_pkt_size = user_pipe_32.fs.avg_pkt_size;
307 		p->fs.max_pkt_size = user_pipe_32.fs.max_pkt_size;
308 	}
309 	return error;
310 }
311 
312 
313 int
cp_pipe_from_user_64(struct sockopt * sopt,struct dn_pipe * p)314 cp_pipe_from_user_64( struct sockopt *sopt, struct dn_pipe *p )
315 {
316 	struct dn_pipe_64 user_pipe_64;
317 	int error = 0;
318 
319 	error = sooptcopyin(sopt, &user_pipe_64, sizeof(struct dn_pipe_64), sizeof(struct dn_pipe_64));
320 	if (!error) {
321 		p->pipe_nr = user_pipe_64.pipe_nr;
322 		p->bandwidth = user_pipe_64.bandwidth;
323 		p->delay = user_pipe_64.delay;
324 		p->V = user_pipe_64.V;
325 		p->sum = user_pipe_64.sum;
326 		p->numbytes = user_pipe_64.numbytes;
327 		p->sched_time = user_pipe_64.sched_time;
328 		bcopy( user_pipe_64.if_name, p->if_name, IFNAMSIZ);
329 		p->ready = user_pipe_64.ready;
330 
331 		p->fs.fs_nr = user_pipe_64.fs.fs_nr;
332 		p->fs.flags_fs = user_pipe_64.fs.flags_fs;
333 		p->fs.parent_nr = user_pipe_64.fs.parent_nr;
334 		p->fs.weight = user_pipe_64.fs.weight;
335 		p->fs.qsize = user_pipe_64.fs.qsize;
336 		p->fs.plr = user_pipe_64.fs.plr;
337 		p->fs.flow_mask = user_pipe_64.fs.flow_mask;
338 		p->fs.rq_size = user_pipe_64.fs.rq_size;
339 		p->fs.rq_elements = user_pipe_64.fs.rq_elements;
340 		p->fs.last_expired = user_pipe_64.fs.last_expired;
341 		p->fs.backlogged = user_pipe_64.fs.backlogged;
342 		p->fs.w_q = user_pipe_64.fs.w_q;
343 		p->fs.max_th = user_pipe_64.fs.max_th;
344 		p->fs.min_th = user_pipe_64.fs.min_th;
345 		p->fs.max_p = user_pipe_64.fs.max_p;
346 		p->fs.c_1 = user_pipe_64.fs.c_1;
347 		p->fs.c_2 = user_pipe_64.fs.c_2;
348 		p->fs.c_3 = user_pipe_64.fs.c_3;
349 		p->fs.c_4 = user_pipe_64.fs.c_4;
350 		p->fs.lookup_depth = user_pipe_64.fs.lookup_depth;
351 		p->fs.lookup_step = user_pipe_64.fs.lookup_step;
352 		p->fs.lookup_weight = user_pipe_64.fs.lookup_weight;
353 		p->fs.avg_pkt_size = user_pipe_64.fs.avg_pkt_size;
354 		p->fs.max_pkt_size = user_pipe_64.fs.max_pkt_size;
355 	}
356 	return error;
357 }
358 
359 static void
cp_flow_set_to_32_user(struct dn_flow_set * set,struct dn_flow_set_32 * fs_bp)360 cp_flow_set_to_32_user(struct dn_flow_set *set, struct dn_flow_set_32 *fs_bp)
361 {
362 	fs_bp->fs_nr = set->fs_nr;
363 	fs_bp->flags_fs = set->flags_fs;
364 	fs_bp->parent_nr = set->parent_nr;
365 	fs_bp->weight = set->weight;
366 	fs_bp->qsize = set->qsize;
367 	fs_bp->plr = set->plr;
368 	fs_bp->flow_mask = set->flow_mask;
369 	fs_bp->rq_size = set->rq_size;
370 	fs_bp->rq_elements = set->rq_elements;
371 	fs_bp->last_expired = set->last_expired;
372 	fs_bp->backlogged = set->backlogged;
373 	fs_bp->w_q = set->w_q;
374 	fs_bp->max_th = set->max_th;
375 	fs_bp->min_th = set->min_th;
376 	fs_bp->max_p = set->max_p;
377 	fs_bp->c_1 = set->c_1;
378 	fs_bp->c_2 = set->c_2;
379 	fs_bp->c_3 = set->c_3;
380 	fs_bp->c_4 = set->c_4;
381 	fs_bp->w_q_lookup = CAST_DOWN_EXPLICIT(user32_addr_t, set->w_q_lookup);
382 	fs_bp->lookup_depth = set->lookup_depth;
383 	fs_bp->lookup_step = set->lookup_step;
384 	fs_bp->lookup_weight = set->lookup_weight;
385 	fs_bp->avg_pkt_size = set->avg_pkt_size;
386 	fs_bp->max_pkt_size = set->max_pkt_size;
387 }
388 
389 static void
cp_flow_set_to_64_user(struct dn_flow_set * set,struct dn_flow_set_64 * fs_bp)390 cp_flow_set_to_64_user(struct dn_flow_set *set, struct dn_flow_set_64 *fs_bp)
391 {
392 	fs_bp->fs_nr = set->fs_nr;
393 	fs_bp->flags_fs = set->flags_fs;
394 	fs_bp->parent_nr = set->parent_nr;
395 	fs_bp->weight = set->weight;
396 	fs_bp->qsize = set->qsize;
397 	fs_bp->plr = set->plr;
398 	fs_bp->flow_mask = set->flow_mask;
399 	fs_bp->rq_size = set->rq_size;
400 	fs_bp->rq_elements = set->rq_elements;
401 	fs_bp->last_expired = set->last_expired;
402 	fs_bp->backlogged = set->backlogged;
403 	fs_bp->w_q = set->w_q;
404 	fs_bp->max_th = set->max_th;
405 	fs_bp->min_th = set->min_th;
406 	fs_bp->max_p = set->max_p;
407 	fs_bp->c_1 = set->c_1;
408 	fs_bp->c_2 = set->c_2;
409 	fs_bp->c_3 = set->c_3;
410 	fs_bp->c_4 = set->c_4;
411 	fs_bp->w_q_lookup = CAST_DOWN(user64_addr_t, set->w_q_lookup);
412 	fs_bp->lookup_depth = set->lookup_depth;
413 	fs_bp->lookup_step = set->lookup_step;
414 	fs_bp->lookup_weight = set->lookup_weight;
415 	fs_bp->avg_pkt_size = set->avg_pkt_size;
416 	fs_bp->max_pkt_size = set->max_pkt_size;
417 }
418 
419 static
420 void
cp_queue_to_32_user(struct dn_flow_queue * q,struct dn_flow_queue_32 * qp)421 cp_queue_to_32_user( struct dn_flow_queue *q, struct dn_flow_queue_32 *qp)
422 {
423 	qp->id = q->id;
424 	qp->len = q->len;
425 	qp->len_bytes = q->len_bytes;
426 	qp->numbytes = q->numbytes;
427 	qp->tot_pkts = q->tot_pkts;
428 	qp->tot_bytes = q->tot_bytes;
429 	qp->drops = q->drops;
430 	qp->hash_slot = q->hash_slot;
431 	qp->avg = q->avg;
432 	qp->count = q->count;
433 	qp->random = q->random;
434 	qp->q_time = (u_int32_t)q->q_time;
435 	qp->heap_pos = q->heap_pos;
436 	qp->sched_time = q->sched_time;
437 	qp->S = q->S;
438 	qp->F = q->F;
439 }
440 
441 static
442 void
cp_queue_to_64_user(struct dn_flow_queue * q,struct dn_flow_queue_64 * qp)443 cp_queue_to_64_user( struct dn_flow_queue *q, struct dn_flow_queue_64 *qp)
444 {
445 	qp->id = q->id;
446 	qp->len = q->len;
447 	qp->len_bytes = q->len_bytes;
448 	qp->numbytes = q->numbytes;
449 	qp->tot_pkts = q->tot_pkts;
450 	qp->tot_bytes = q->tot_bytes;
451 	qp->drops = q->drops;
452 	qp->hash_slot = q->hash_slot;
453 	qp->avg = q->avg;
454 	qp->count = q->count;
455 	qp->random = q->random;
456 	qp->q_time = (u_int32_t)q->q_time;
457 	qp->heap_pos = q->heap_pos;
458 	qp->sched_time = q->sched_time;
459 	qp->S = q->S;
460 	qp->F = q->F;
461 }
462 
463 static
464 char *
cp_pipe_to_32_user(struct dn_pipe * p,struct dn_pipe_32 * pipe_bp)465 cp_pipe_to_32_user(struct dn_pipe *p, struct dn_pipe_32 *pipe_bp)
466 {
467 	char    *bp;
468 
469 	pipe_bp->pipe_nr = p->pipe_nr;
470 	pipe_bp->bandwidth = p->bandwidth;
471 	pipe_bp->delay = p->delay;
472 	bcopy( &(p->scheduler_heap), &(pipe_bp->scheduler_heap), sizeof(struct dn_heap_32));
473 	pipe_bp->scheduler_heap.p = CAST_DOWN_EXPLICIT(user32_addr_t, pipe_bp->scheduler_heap.p);
474 	bcopy( &(p->not_eligible_heap), &(pipe_bp->not_eligible_heap), sizeof(struct dn_heap_32));
475 	pipe_bp->not_eligible_heap.p = CAST_DOWN_EXPLICIT(user32_addr_t, pipe_bp->not_eligible_heap.p);
476 	bcopy( &(p->idle_heap), &(pipe_bp->idle_heap), sizeof(struct dn_heap_32));
477 	pipe_bp->idle_heap.p = CAST_DOWN_EXPLICIT(user32_addr_t, pipe_bp->idle_heap.p);
478 	pipe_bp->V = p->V;
479 	pipe_bp->sum = p->sum;
480 	pipe_bp->numbytes = p->numbytes;
481 	pipe_bp->sched_time = p->sched_time;
482 	bcopy( p->if_name, pipe_bp->if_name, IFNAMSIZ);
483 	pipe_bp->ifp = CAST_DOWN_EXPLICIT(user32_addr_t, p->ifp);
484 	pipe_bp->ready = p->ready;
485 
486 	cp_flow_set_to_32_user( &(p->fs), &(pipe_bp->fs));
487 
488 	pipe_bp->delay = (pipe_bp->delay * 1000) / (hz * 10);
489 	/*
490 	 * XXX the following is a hack based on ->next being the
491 	 * first field in dn_pipe and dn_flow_set. The correct
492 	 * solution would be to move the dn_flow_set to the beginning
493 	 * of struct dn_pipe.
494 	 */
495 	pipe_bp->next = CAST_DOWN_EXPLICIT( user32_addr_t, DN_IS_PIPE );
496 	/* clean pointers */
497 	pipe_bp->head = pipe_bp->tail = (user32_addr_t) 0;
498 	pipe_bp->fs.next = (user32_addr_t)0;
499 	pipe_bp->fs.pipe = (user32_addr_t)0;
500 	pipe_bp->fs.rq = (user32_addr_t)0;
501 	bp = ((char *)pipe_bp) + sizeof(struct dn_pipe_32);
502 	return dn_copy_set_32( &(p->fs), bp);
503 }
504 
505 static
506 char *
cp_pipe_to_64_user(struct dn_pipe * p,struct dn_pipe_64 * pipe_bp)507 cp_pipe_to_64_user(struct dn_pipe *p, struct dn_pipe_64 *pipe_bp)
508 {
509 	char    *bp;
510 
511 	pipe_bp->pipe_nr = p->pipe_nr;
512 	pipe_bp->bandwidth = p->bandwidth;
513 	pipe_bp->delay = p->delay;
514 	bcopy( &(p->scheduler_heap), &(pipe_bp->scheduler_heap), sizeof(struct dn_heap_64));
515 	pipe_bp->scheduler_heap.p = CAST_DOWN(user64_addr_t, pipe_bp->scheduler_heap.p);
516 	bcopy( &(p->not_eligible_heap), &(pipe_bp->not_eligible_heap), sizeof(struct dn_heap_64));
517 	pipe_bp->not_eligible_heap.p = CAST_DOWN(user64_addr_t, pipe_bp->not_eligible_heap.p);
518 	bcopy( &(p->idle_heap), &(pipe_bp->idle_heap), sizeof(struct dn_heap_64));
519 	pipe_bp->idle_heap.p = CAST_DOWN(user64_addr_t, pipe_bp->idle_heap.p);
520 	pipe_bp->V = p->V;
521 	pipe_bp->sum = p->sum;
522 	pipe_bp->numbytes = p->numbytes;
523 	pipe_bp->sched_time = p->sched_time;
524 	bcopy( p->if_name, pipe_bp->if_name, IFNAMSIZ);
525 	pipe_bp->ifp = CAST_DOWN(user64_addr_t, p->ifp);
526 	pipe_bp->ready = p->ready;
527 
528 	cp_flow_set_to_64_user( &(p->fs), &(pipe_bp->fs));
529 
530 	pipe_bp->delay = (pipe_bp->delay * 1000) / (hz * 10);
531 	/*
532 	 * XXX the following is a hack based on ->next being the
533 	 * first field in dn_pipe and dn_flow_set. The correct
534 	 * solution would be to move the dn_flow_set to the beginning
535 	 * of struct dn_pipe.
536 	 */
537 	pipe_bp->next = CAST_DOWN( user64_addr_t, DN_IS_PIPE );
538 	/* clean pointers */
539 	pipe_bp->head = pipe_bp->tail = USER_ADDR_NULL;
540 	pipe_bp->fs.next = USER_ADDR_NULL;
541 	pipe_bp->fs.pipe = USER_ADDR_NULL;
542 	pipe_bp->fs.rq = USER_ADDR_NULL;
543 	bp = ((char *)pipe_bp) + sizeof(struct dn_pipe_64);
544 	return dn_copy_set_64( &(p->fs), bp);
545 }
546 
547 static int
heap_init(struct dn_heap * h,int new_size)548 heap_init(struct dn_heap *h, int new_size)
549 {
550 	struct dn_heap_entry *p;
551 
552 	if (h->size >= new_size) {
553 		printf("dummynet: heap_init, Bogus call, have %d want %d\n",
554 		    h->size, new_size);
555 		return 0;
556 	}
557 	new_size = (new_size + HEAP_INCREMENT) & ~HEAP_INCREMENT;
558 	p = krealloc_type(struct dn_heap_entry, h->size, new_size,
559 	    h->p, Z_NOWAIT | Z_ZERO);
560 	if (p == NULL) {
561 		printf("dummynet: heap_init, resize %d failed\n", new_size );
562 		return 1; /* error */
563 	}
564 	h->p = p;
565 	h->size = new_size;
566 	return 0;
567 }
568 
569 /*
570  * Insert element in heap. Normally, p != NULL, we insert p in
571  * a new position and bubble up. If p == NULL, then the element is
572  * already in place, and key is the position where to start the
573  * bubble-up.
574  * Returns 1 on failure (cannot allocate new heap entry)
575  *
576  * If offset > 0 the position (index, int) of the element in the heap is
577  * also stored in the element itself at the given offset in bytes.
578  */
579 #define SET_OFFSET(heap, node) \
580     if (heap->offset > 0) \
581 	    *((int *)(void *)((char *)(heap->p[node].object) + heap->offset)) = node ;
582 /*
583  * RESET_OFFSET is used for sanity checks. It sets offset to an invalid value.
584  */
585 #define RESET_OFFSET(heap, node) \
586     if (heap->offset > 0) \
587 	    *((int *)(void *)((char *)(heap->p[node].object) + heap->offset)) = -1 ;
588 static int
heap_insert(struct dn_heap * h,dn_key key1,void * p)589 heap_insert(struct dn_heap *h, dn_key key1, void *p)
590 {
591 	int son = h->elements;
592 
593 	if (p == NULL) { /* data already there, set starting point */
594 		VERIFY(key1 < INT_MAX);
595 		son = (int)key1;
596 	} else {        /* insert new element at the end, possibly resize */
597 		son = h->elements;
598 		if (son == h->size) { /* need resize... */
599 			if (heap_init(h, h->elements + 1)) {
600 				return 1; /* failure... */
601 			}
602 		}
603 		h->p[son].object = p;
604 		h->p[son].key = key1;
605 		h->elements++;
606 	}
607 	while (son > 0) {                       /* bubble up */
608 		int father = HEAP_FATHER(son);
609 		struct dn_heap_entry tmp;
610 
611 		if (DN_KEY_LT( h->p[father].key, h->p[son].key )) {
612 			break; /* found right position */
613 		}
614 		/* son smaller than father, swap and repeat */
615 		HEAP_SWAP(h->p[son], h->p[father], tmp);
616 		SET_OFFSET(h, son);
617 		son = father;
618 	}
619 	SET_OFFSET(h, son);
620 	return 0;
621 }
622 
623 /*
624  * remove top element from heap, or obj if obj != NULL
625  */
626 static void
heap_extract(struct dn_heap * h,void * obj)627 heap_extract(struct dn_heap *h, void *obj)
628 {
629 	int child, father, maxelt = h->elements - 1;
630 
631 	if (maxelt < 0) {
632 		printf("dummynet: warning, extract from empty heap 0x%llx\n",
633 		    (uint64_t)VM_KERNEL_ADDRPERM(h));
634 		return;
635 	}
636 	father = 0; /* default: move up smallest child */
637 	if (obj != NULL) { /* extract specific element, index is at offset */
638 		if (h->offset <= 0) {
639 			panic("dummynet: heap_extract from middle not supported on this heap!!!");
640 		}
641 		father = *((int *)(void *)((char *)obj + h->offset));
642 		if (father < 0 || father >= h->elements) {
643 			printf("dummynet: heap_extract, father %d out of bound 0..%d\n",
644 			    father, h->elements);
645 			panic("dummynet: heap_extract");
646 		}
647 	}
648 	RESET_OFFSET(h, father);
649 	child = HEAP_LEFT(father);      /* left child */
650 	while (child <= maxelt) {       /* valid entry */
651 		if (child != maxelt && DN_KEY_LT(h->p[child + 1].key, h->p[child].key)) {
652 			child = child + 1; /* take right child, otherwise left */
653 		}
654 		h->p[father] = h->p[child];
655 		SET_OFFSET(h, father);
656 		father = child;
657 		child = HEAP_LEFT(child); /* left child for next loop */
658 	}
659 	h->elements--;
660 	if (father != maxelt) {
661 		/*
662 		 * Fill hole with last entry and bubble up, reusing the insert code
663 		 */
664 		h->p[father] = h->p[maxelt];
665 		heap_insert(h, father, NULL); /* this one cannot fail */
666 	}
667 }
668 
669 /*
670  * heapify() will reorganize data inside an array to maintain the
671  * heap property. It is needed when we delete a bunch of entries.
672  */
673 static void
heapify(struct dn_heap * h)674 heapify(struct dn_heap *h)
675 {
676 	int i;
677 
678 	for (i = 0; i < h->elements; i++) {
679 		heap_insert(h, i, NULL);
680 	}
681 }
682 
683 /*
684  * cleanup the heap and free data structure
685  */
686 static void
heap_free(struct dn_heap * h)687 heap_free(struct dn_heap *h)
688 {
689 	kfree_type(struct dn_heap_entry, h->size, h->p);
690 	bzero(h, sizeof(*h));
691 }
692 
693 /*
694  * --- end of heap management functions ---
695  */
696 
697 /*
698  * Return the mbuf tag holding the dummynet state.  As an optimization
699  * this is assumed to be the first tag on the list.  If this turns out
700  * wrong we'll need to search the list.
701  */
702 static struct dn_pkt_tag *
dn_tag_get(struct mbuf * m)703 dn_tag_get(struct mbuf *m)
704 {
705 	struct m_tag *mtag = m_tag_first(m);
706 
707 	if (!(mtag != NULL &&
708 	    mtag->m_tag_id == KERNEL_MODULE_TAG_ID &&
709 	    mtag->m_tag_type == KERNEL_TAG_TYPE_DUMMYNET)) {
710 		panic("packet on dummynet queue w/o dummynet tag: 0x%llx",
711 		    (uint64_t)VM_KERNEL_ADDRPERM(m));
712 	}
713 
714 	return (struct dn_pkt_tag *)(mtag + 1);
715 }
716 
717 /*
718  * Scheduler functions:
719  *
720  * transmit_event() is called when the delay-line needs to enter
721  * the scheduler, either because of existing pkts getting ready,
722  * or new packets entering the queue. The event handled is the delivery
723  * time of the packet.
724  *
725  * ready_event() does something similar with fixed-rate queues, and the
726  * event handled is the finish time of the head pkt.
727  *
728  * wfq_ready_event() does something similar with WF2Q queues, and the
729  * event handled is the start time of the head pkt.
730  *
731  * In all cases, we make sure that the data structures are consistent
732  * before passing pkts out, because this might trigger recursive
733  * invocations of the procedures.
734  */
735 static void
transmit_event(struct dn_pipe * pipe,struct mbuf ** head,struct mbuf ** tail)736 transmit_event(struct dn_pipe *pipe, struct mbuf **head, struct mbuf **tail)
737 {
738 	struct mbuf *m;
739 	struct dn_pkt_tag *pkt = NULL;
740 	u_int64_t schedule_time;
741 
742 	LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
743 	ASSERT(serialize >= 0);
744 	if (serialize == 0) {
745 		while ((m = pipe->head) != NULL) {
746 			pkt = dn_tag_get(m);
747 			if (!DN_KEY_LEQ(pkt->dn_output_time, curr_time)) {
748 				break;
749 			}
750 
751 			pipe->head = m->m_nextpkt;
752 			if (*tail != NULL) {
753 				(*tail)->m_nextpkt = m;
754 			} else {
755 				*head = m;
756 			}
757 			*tail = m;
758 		}
759 
760 		if (*tail != NULL) {
761 			(*tail)->m_nextpkt = NULL;
762 		}
763 	}
764 
765 	schedule_time = pkt == NULL || DN_KEY_LEQ(pkt->dn_output_time, curr_time) ?
766 	    curr_time + 1 : pkt->dn_output_time;
767 
768 	/* if there are leftover packets, put the pipe into the heap for next ready event */
769 	if ((m = pipe->head) != NULL) {
770 		pkt = dn_tag_get(m);
771 		/* XXX should check errors on heap_insert, by draining the
772 		 * whole pipe p and hoping in the future we are more successful
773 		 */
774 		heap_insert(&extract_heap, schedule_time, pipe);
775 	}
776 }
777 
778 /*
779  * the following macro computes how many ticks we have to wait
780  * before being able to transmit a packet. The credit is taken from
781  * either a pipe (WF2Q) or a flow_queue (per-flow queueing)
782  */
783 
784 /* hz is 100, which gives a granularity of 10ms in the old timer.
785  * The timer has been changed to fire every 1ms, so the use of
786  * hz has been modified here. All instances of hz have been left
787  * in place but adjusted by a factor of 10 so that hz is functionally
788  * equal to 1000.
789  */
790 #define SET_TICKS(_m, q, p)     \
791     ((_m)->m_pkthdr.len*8*(hz*10) - (q)->numbytes + p->bandwidth - 1 ) / \
792 	    p->bandwidth ;
793 
794 /*
795  * extract pkt from queue, compute output time (could be now)
796  * and put into delay line (p_queue)
797  */
798 static void
move_pkt(struct mbuf * pkt,struct dn_flow_queue * q,struct dn_pipe * p,int len)799 move_pkt(struct mbuf *pkt, struct dn_flow_queue *q,
800     struct dn_pipe *p, int len)
801 {
802 	struct dn_pkt_tag *dt = dn_tag_get(pkt);
803 
804 	q->head = pkt->m_nextpkt;
805 	q->len--;
806 	q->len_bytes -= len;
807 
808 	dt->dn_output_time = curr_time + p->delay;
809 
810 	if (p->head == NULL) {
811 		p->head = pkt;
812 	} else {
813 		p->tail->m_nextpkt = pkt;
814 	}
815 	p->tail = pkt;
816 	p->tail->m_nextpkt = NULL;
817 }
818 
819 /*
820  * ready_event() is invoked every time the queue must enter the
821  * scheduler, either because the first packet arrives, or because
822  * a previously scheduled event fired.
823  * On invokation, drain as many pkts as possible (could be 0) and then
824  * if there are leftover packets reinsert the pkt in the scheduler.
825  */
826 static void
ready_event(struct dn_flow_queue * q,struct mbuf ** head,struct mbuf ** tail)827 ready_event(struct dn_flow_queue *q, struct mbuf **head, struct mbuf **tail)
828 {
829 	struct mbuf *pkt;
830 	struct dn_pipe *p = q->fs->pipe;
831 	int p_was_empty;
832 
833 	LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
834 
835 	if (p == NULL) {
836 		printf("dummynet: ready_event pipe is gone\n");
837 		return;
838 	}
839 	p_was_empty = (p->head == NULL);
840 
841 	/*
842 	 * schedule fixed-rate queues linked to this pipe:
843 	 * Account for the bw accumulated since last scheduling, then
844 	 * drain as many pkts as allowed by q->numbytes and move to
845 	 * the delay line (in p) computing output time.
846 	 * bandwidth==0 (no limit) means we can drain the whole queue,
847 	 * setting len_scaled = 0 does the job.
848 	 */
849 	q->numbytes += (curr_time - q->sched_time) * p->bandwidth;
850 	while ((pkt = q->head) != NULL) {
851 		int len = pkt->m_pkthdr.len;
852 		int len_scaled = p->bandwidth ? len * 8 * (hz * 10) : 0;
853 		if (len_scaled > q->numbytes) {
854 			break;
855 		}
856 		q->numbytes -= len_scaled;
857 		move_pkt(pkt, q, p, len);
858 	}
859 	/*
860 	 * If we have more packets queued, schedule next ready event
861 	 * (can only occur when bandwidth != 0, otherwise we would have
862 	 * flushed the whole queue in the previous loop).
863 	 * To this purpose we record the current time and compute how many
864 	 * ticks to go for the finish time of the packet.
865 	 */
866 	if ((pkt = q->head) != NULL) { /* this implies bandwidth != 0 */
867 		dn_key t = SET_TICKS(pkt, q, p); /* ticks i have to wait */
868 		q->sched_time = curr_time;
869 		heap_insert(&ready_heap, curr_time + t, (void *)q );
870 		/* XXX should check errors on heap_insert, and drain the whole
871 		 * queue on error hoping next time we are luckier.
872 		 */
873 	} else { /* RED needs to know when the queue becomes empty */
874 		q->q_time = curr_time;
875 		q->numbytes = 0;
876 	}
877 	/*
878 	 * If the delay line was empty call transmit_event(p) now.
879 	 * Otherwise, the scheduler will take care of it.
880 	 */
881 	if (p_was_empty) {
882 		transmit_event(p, head, tail);
883 	}
884 }
885 
886 /*
887  * Called when we can transmit packets on WF2Q queues. Take pkts out of
888  * the queues at their start time, and enqueue into the delay line.
889  * Packets are drained until p->numbytes < 0. As long as
890  * len_scaled >= p->numbytes, the packet goes into the delay line
891  * with a deadline p->delay. For the last packet, if p->numbytes<0,
892  * there is an additional delay.
893  */
894 static void
ready_event_wfq(struct dn_pipe * p,struct mbuf ** head,struct mbuf ** tail)895 ready_event_wfq(struct dn_pipe *p, struct mbuf **head, struct mbuf **tail)
896 {
897 	int p_was_empty = (p->head == NULL);
898 	struct dn_heap *sch = &(p->scheduler_heap);
899 	struct dn_heap *neh = &(p->not_eligible_heap);
900 	int64_t p_numbytes = p->numbytes;
901 
902 	LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
903 
904 	if (p->if_name[0] == 0) { /* tx clock is simulated */
905 		p_numbytes += (curr_time - p->sched_time) * p->bandwidth;
906 	} else { /* tx clock is for real, the ifq must be empty or this is a NOP */
907 		if (p->ifp && !IFCQ_IS_EMPTY(p->ifp->if_snd)) {
908 			return;
909 		} else {
910 			DPRINTF(("dummynet: pipe %d ready from %s --\n",
911 			    p->pipe_nr, p->if_name));
912 		}
913 	}
914 
915 	/*
916 	 * While we have backlogged traffic AND credit, we need to do
917 	 * something on the queue.
918 	 */
919 	while (p_numbytes >= 0 && (sch->elements > 0 || neh->elements > 0)) {
920 		if (sch->elements > 0) { /* have some eligible pkts to send out */
921 			struct dn_flow_queue *q = sch->p[0].object;
922 			struct mbuf *pkt = q->head;
923 			struct dn_flow_set *fs = q->fs;
924 			u_int32_t len = pkt->m_pkthdr.len;
925 			u_int64_t len_scaled = p->bandwidth ? len * 8 * (hz * 10) : 0;
926 
927 			heap_extract(sch, NULL); /* remove queue from heap */
928 			p_numbytes -= len_scaled;
929 			move_pkt(pkt, q, p, len);
930 
931 			p->V += (len << MY_M) / p->sum; /* update V */
932 			q->S = q->F; /* update start time */
933 			if (q->len == 0) { /* Flow not backlogged any more */
934 				fs->backlogged--;
935 				heap_insert(&(p->idle_heap), q->F, q);
936 			} else { /* still backlogged */
937 				/*
938 				 * update F and position in backlogged queue, then
939 				 * put flow in not_eligible_heap (we will fix this later).
940 				 */
941 				len = (q->head)->m_pkthdr.len;
942 				q->F += (len << MY_M) / (u_int64_t) fs->weight;
943 				if (DN_KEY_LEQ(q->S, p->V)) {
944 					heap_insert(neh, q->S, q);
945 				} else {
946 					heap_insert(sch, q->F, q);
947 				}
948 			}
949 		}
950 		/*
951 		 * now compute V = max(V, min(S_i)). Remember that all elements in sch
952 		 * have by definition S_i <= V so if sch is not empty, V is surely
953 		 * the max and we must not update it. Conversely, if sch is empty
954 		 * we only need to look at neh.
955 		 */
956 		if (sch->elements == 0 && neh->elements > 0) {
957 			p->V = MAX64( p->V, neh->p[0].key );
958 		}
959 		/* move from neh to sch any packets that have become eligible */
960 		while (neh->elements > 0 && DN_KEY_LEQ(neh->p[0].key, p->V)) {
961 			struct dn_flow_queue *q = neh->p[0].object;
962 			heap_extract(neh, NULL);
963 			heap_insert(sch, q->F, q);
964 		}
965 
966 		if (p->if_name[0] != '\0') {/* tx clock is from a real thing */
967 			p_numbytes = -1; /* mark not ready for I/O */
968 			break;
969 		}
970 	}
971 	if (sch->elements == 0 && neh->elements == 0 && p_numbytes >= 0
972 	    && p->idle_heap.elements > 0) {
973 		/*
974 		 * no traffic and no events scheduled. We can get rid of idle-heap.
975 		 */
976 		int i;
977 
978 		for (i = 0; i < p->idle_heap.elements; i++) {
979 			struct dn_flow_queue *q = p->idle_heap.p[i].object;
980 
981 			q->F = 0;
982 			q->S = q->F + 1;
983 		}
984 		p->sum = 0;
985 		p->V = 0;
986 		p->idle_heap.elements = 0;
987 	}
988 	/*
989 	 * If we are getting clocks from dummynet (not a real interface) and
990 	 * If we are under credit, schedule the next ready event.
991 	 * Also fix the delivery time of the last packet.
992 	 */
993 	if (p->if_name[0] == 0 && p_numbytes < 0) { /* this implies bandwidth >0 */
994 		dn_key t = 0; /* number of ticks i have to wait */
995 
996 		if (p->bandwidth > 0) {
997 			t = (p->bandwidth - 1 - p_numbytes) / p->bandwidth;
998 		}
999 		dn_tag_get(p->tail)->dn_output_time += t;
1000 		p->sched_time = curr_time;
1001 		heap_insert(&wfq_ready_heap, curr_time + t, (void *)p);
1002 		/* XXX should check errors on heap_insert, and drain the whole
1003 		 * queue on error hoping next time we are luckier.
1004 		 */
1005 	}
1006 
1007 	/* Fit (adjust if necessary) 64bit result into 32bit variable. */
1008 	if (p_numbytes > INT_MAX) {
1009 		p->numbytes = INT_MAX;
1010 	} else if (p_numbytes < INT_MIN) {
1011 		p->numbytes = INT_MIN;
1012 	} else {
1013 		p->numbytes = (int)p_numbytes;
1014 	}
1015 
1016 	/*
1017 	 * If the delay line was empty call transmit_event(p) now.
1018 	 * Otherwise, the scheduler will take care of it.
1019 	 */
1020 	if (p_was_empty) {
1021 		transmit_event(p, head, tail);
1022 	}
1023 }
1024 
1025 /*
1026  * This is called every 1ms. It is used to
1027  * increment the current tick counter and schedule expired events.
1028  */
1029 static void
dummynet(__unused void * unused)1030 dummynet(__unused void * unused)
1031 {
1032 	void *p; /* generic parameter to handler */
1033 	struct dn_heap *h;
1034 	struct dn_heap *heaps[3];
1035 	struct mbuf *head = NULL, *tail = NULL;
1036 	int i;
1037 	struct dn_pipe *pe;
1038 	struct timespec ts;
1039 	struct timeval      tv;
1040 
1041 	heaps[0] = &ready_heap;         /* fixed-rate queues */
1042 	heaps[1] = &wfq_ready_heap;     /* wfq queues */
1043 	heaps[2] = &extract_heap;       /* delay line */
1044 
1045 	lck_mtx_lock(&dn_mutex);
1046 
1047 	/* make all time measurements in milliseconds (ms) -
1048 	 * here we convert secs and usecs to msecs (just divide the
1049 	 * usecs and take the closest whole number).
1050 	 */
1051 	microuptime(&tv);
1052 	curr_time = (tv.tv_sec * 1000) + (tv.tv_usec / 1000);
1053 
1054 	for (i = 0; i < 3; i++) {
1055 		h = heaps[i];
1056 		while (h->elements > 0 && DN_KEY_LEQ(h->p[0].key, curr_time)) {
1057 			if (h->p[0].key > curr_time) {
1058 				printf("dummynet: warning, heap %d is %d ticks late\n",
1059 				    i, (int)(curr_time - h->p[0].key));
1060 			}
1061 			p = h->p[0].object; /* store a copy before heap_extract */
1062 			heap_extract(h, NULL); /* need to extract before processing */
1063 			if (i == 0) {
1064 				ready_event(p, &head, &tail);
1065 			} else if (i == 1) {
1066 				struct dn_pipe *pipe = p;
1067 				if (pipe->if_name[0] != '\0') {
1068 					printf("dummynet: bad ready_event_wfq for pipe %s\n",
1069 					    pipe->if_name);
1070 				} else {
1071 					ready_event_wfq(p, &head, &tail);
1072 				}
1073 			} else {
1074 				transmit_event(p, &head, &tail);
1075 			}
1076 		}
1077 	}
1078 	/* sweep pipes trying to expire idle flow_queues */
1079 	for (i = 0; i < HASHSIZE; i++) {
1080 		SLIST_FOREACH(pe, &pipehash[i], next) {
1081 			if (pe->idle_heap.elements > 0 &&
1082 			    DN_KEY_LT(pe->idle_heap.p[0].key, pe->V)) {
1083 				struct dn_flow_queue *q = pe->idle_heap.p[0].object;
1084 
1085 				heap_extract(&(pe->idle_heap), NULL);
1086 				q->S = q->F + 1; /* mark timestamp as invalid */
1087 				pe->sum -= q->fs->weight;
1088 			}
1089 		}
1090 	}
1091 
1092 	/* check the heaps to see if there's still stuff in there, and
1093 	 * only set the timer if there are packets to process
1094 	 */
1095 	timer_enabled = 0;
1096 	for (i = 0; i < 3; i++) {
1097 		h = heaps[i];
1098 		if (h->elements > 0) { // set the timer
1099 			ts.tv_sec = 0;
1100 			ts.tv_nsec = 1 * 1000000;       // 1ms
1101 			timer_enabled = 1;
1102 			bsd_timeout(dummynet, NULL, &ts);
1103 			break;
1104 		}
1105 	}
1106 
1107 	if (head != NULL) {
1108 		serialize++;
1109 	}
1110 
1111 	lck_mtx_unlock(&dn_mutex);
1112 
1113 	/* Send out the de-queued list of ready-to-send packets */
1114 	if (head != NULL) {
1115 		dummynet_send(head);
1116 		lck_mtx_lock(&dn_mutex);
1117 		serialize--;
1118 		lck_mtx_unlock(&dn_mutex);
1119 	}
1120 }
1121 
1122 
1123 static void
dummynet_send(struct mbuf * m)1124 dummynet_send(struct mbuf *m)
1125 {
1126 	struct dn_pkt_tag *pkt;
1127 	struct mbuf *n;
1128 
1129 	for (; m != NULL; m = n) {
1130 		n = m->m_nextpkt;
1131 		m->m_nextpkt = NULL;
1132 		pkt = dn_tag_get(m);
1133 
1134 		DPRINTF(("dummynet_send m: 0x%llx dn_dir: %d dn_flags: 0x%x\n",
1135 		    (uint64_t)VM_KERNEL_ADDRPERM(m), pkt->dn_dir,
1136 		    pkt->dn_flags));
1137 
1138 		switch (pkt->dn_dir) {
1139 		case DN_TO_IP_OUT: {
1140 			struct route tmp_rt;
1141 
1142 			/* route is already in the packet's dn_ro */
1143 			bzero(&tmp_rt, sizeof(tmp_rt));
1144 
1145 			/* Force IP_RAWOUTPUT as the IP header is fully formed */
1146 			pkt->dn_flags |= IP_RAWOUTPUT | IP_FORWARDING;
1147 			(void)ip_output(m, NULL, &tmp_rt, pkt->dn_flags, NULL, NULL);
1148 			ROUTE_RELEASE(&tmp_rt);
1149 			break;
1150 		}
1151 		case DN_TO_IP_IN:
1152 			proto_inject(PF_INET, m);
1153 			break;
1154 		case DN_TO_IP6_OUT: {
1155 			/* routes already in the packet's dn_{ro6,pmtu} */
1156 			if (pkt->dn_origifp != NULL) {
1157 				ip6_output_setsrcifscope(m, pkt->dn_origifp->if_index, NULL);
1158 				ip6_output_setdstifscope(m, pkt->dn_origifp->if_index, NULL);
1159 			} else {
1160 				ip6_output_setsrcifscope(m, IFSCOPE_UNKNOWN, NULL);
1161 				ip6_output_setdstifscope(m, IFSCOPE_UNKNOWN, NULL);
1162 			}
1163 
1164 			ip6_output(m, NULL, NULL, IPV6_FORWARDING, NULL, NULL, NULL);
1165 			break;
1166 		}
1167 		case DN_TO_IP6_IN:
1168 			proto_inject(PF_INET6, m);
1169 			break;
1170 		default:
1171 			printf("dummynet: bad switch %d!\n", pkt->dn_dir);
1172 			m_freem(m);
1173 			break;
1174 		}
1175 	}
1176 }
1177 
1178 /*
1179  * Unconditionally expire empty queues in case of shortage.
1180  * Returns the number of queues freed.
1181  */
1182 static int
expire_queues(struct dn_flow_set * fs)1183 expire_queues(struct dn_flow_set *fs)
1184 {
1185 	struct dn_flow_queue *q, *prev;
1186 	int i, initial_elements = fs->rq_elements;
1187 	struct timeval timenow;
1188 
1189 	/* reviewed for getmicrotime usage */
1190 	getmicrotime(&timenow);
1191 
1192 	if (fs->last_expired == timenow.tv_sec) {
1193 		return 0;
1194 	}
1195 	fs->last_expired = (int)timenow.tv_sec;
1196 	for (i = 0; i <= fs->rq_size; i++) { /* last one is overflow */
1197 		for (prev = NULL, q = fs->rq[i]; q != NULL;) {
1198 			if (q->head != NULL || q->S != q->F + 1) {
1199 				prev = q;
1200 				q = q->next;
1201 			} else { /* entry is idle, expire it */
1202 				struct dn_flow_queue *old_q = q;
1203 
1204 				if (prev != NULL) {
1205 					prev->next = q = q->next;
1206 				} else {
1207 					fs->rq[i] = q = q->next;
1208 				}
1209 				fs->rq_elements--;
1210 				kfree_type(struct dn_flow_queue, old_q);
1211 			}
1212 		}
1213 	}
1214 	return initial_elements - fs->rq_elements;
1215 }
1216 
1217 /*
1218  * If room, create a new queue and put at head of slot i;
1219  * otherwise, create or use the default queue.
1220  */
1221 static struct dn_flow_queue *
create_queue(struct dn_flow_set * fs,int i)1222 create_queue(struct dn_flow_set *fs, int i)
1223 {
1224 	struct dn_flow_queue *q;
1225 
1226 	if (fs->rq_elements > fs->rq_size * dn_max_ratio &&
1227 	    expire_queues(fs) == 0) {
1228 		/*
1229 		 * No way to get room, use or create overflow queue.
1230 		 */
1231 		i = fs->rq_size;
1232 		if (fs->rq[i] != NULL) {
1233 			return fs->rq[i];
1234 		}
1235 	}
1236 	q = kalloc_type(struct dn_flow_queue, Z_NOWAIT | Z_ZERO);
1237 	if (q == NULL) {
1238 		printf("dummynet: sorry, cannot allocate queue for new flow\n");
1239 		return NULL;
1240 	}
1241 	q->fs = fs;
1242 	q->hash_slot = i;
1243 	q->next = fs->rq[i];
1244 	q->S = q->F + 1; /* hack - mark timestamp as invalid */
1245 	fs->rq[i] = q;
1246 	fs->rq_elements++;
1247 	return q;
1248 }
1249 
1250 /*
1251  * Given a flow_set and a pkt in last_pkt, find a matching queue
1252  * after appropriate masking. The queue is moved to front
1253  * so that further searches take less time.
1254  */
1255 static struct dn_flow_queue *
find_queue(struct dn_flow_set * fs,struct ip_flow_id * id)1256 find_queue(struct dn_flow_set *fs, struct ip_flow_id *id)
1257 {
1258 	int i = 0; /* we need i and q for new allocations */
1259 	struct dn_flow_queue *q, *prev;
1260 	int is_v6 = IS_IP6_FLOW_ID(id);
1261 
1262 	if (!(fs->flags_fs & DN_HAVE_FLOW_MASK)) {
1263 		q = fs->rq[0];
1264 	} else {
1265 		/* first, do the masking, then hash */
1266 		id->dst_port &= fs->flow_mask.dst_port;
1267 		id->src_port &= fs->flow_mask.src_port;
1268 		id->proto &= fs->flow_mask.proto;
1269 		id->flags = 0; /* we don't care about this one */
1270 		if (is_v6) {
1271 			APPLY_MASK(&id->dst_ip6, &fs->flow_mask.dst_ip6);
1272 			APPLY_MASK(&id->src_ip6, &fs->flow_mask.src_ip6);
1273 			id->flow_id6 &= fs->flow_mask.flow_id6;
1274 
1275 			i = ((id->dst_ip6.__u6_addr.__u6_addr32[0]) & 0xffff) ^
1276 			    ((id->dst_ip6.__u6_addr.__u6_addr32[1]) & 0xffff) ^
1277 			    ((id->dst_ip6.__u6_addr.__u6_addr32[2]) & 0xffff) ^
1278 			    ((id->dst_ip6.__u6_addr.__u6_addr32[3]) & 0xffff) ^
1279 
1280 			    ((id->dst_ip6.__u6_addr.__u6_addr32[0] >> 15) & 0xffff) ^
1281 			    ((id->dst_ip6.__u6_addr.__u6_addr32[1] >> 15) & 0xffff) ^
1282 			    ((id->dst_ip6.__u6_addr.__u6_addr32[2] >> 15) & 0xffff) ^
1283 			    ((id->dst_ip6.__u6_addr.__u6_addr32[3] >> 15) & 0xffff) ^
1284 
1285 			    ((id->src_ip6.__u6_addr.__u6_addr32[0] << 1) & 0xfffff) ^
1286 			    ((id->src_ip6.__u6_addr.__u6_addr32[1] << 1) & 0xfffff) ^
1287 			    ((id->src_ip6.__u6_addr.__u6_addr32[2] << 1) & 0xfffff) ^
1288 			    ((id->src_ip6.__u6_addr.__u6_addr32[3] << 1) & 0xfffff) ^
1289 
1290 			    ((id->src_ip6.__u6_addr.__u6_addr32[0] >> 16) & 0xffff) ^
1291 			    ((id->src_ip6.__u6_addr.__u6_addr32[1] >> 16) & 0xffff) ^
1292 			    ((id->src_ip6.__u6_addr.__u6_addr32[2] >> 16) & 0xffff) ^
1293 			    ((id->src_ip6.__u6_addr.__u6_addr32[3] >> 16) & 0xffff) ^
1294 
1295 			    (id->dst_port << 1) ^ (id->src_port) ^
1296 			    (id->proto) ^
1297 			    (id->flow_id6);
1298 		} else {
1299 			id->dst_ip &= fs->flow_mask.dst_ip;
1300 			id->src_ip &= fs->flow_mask.src_ip;
1301 
1302 			i = ((id->dst_ip) & 0xffff) ^
1303 			    ((id->dst_ip >> 15) & 0xffff) ^
1304 			    ((id->src_ip << 1) & 0xffff) ^
1305 			    ((id->src_ip >> 16) & 0xffff) ^
1306 			    (id->dst_port << 1) ^ (id->src_port) ^
1307 			    (id->proto);
1308 		}
1309 		i = i % fs->rq_size;
1310 		/* finally, scan the current list for a match */
1311 		searches++;
1312 		for (prev = NULL, q = fs->rq[i]; q;) {
1313 			search_steps++;
1314 			if (is_v6 &&
1315 			    IN6_ARE_ADDR_EQUAL(&id->dst_ip6, &q->id.dst_ip6) &&
1316 			    IN6_ARE_ADDR_EQUAL(&id->src_ip6, &q->id.src_ip6) &&
1317 			    id->dst_port == q->id.dst_port &&
1318 			    id->src_port == q->id.src_port &&
1319 			    id->proto == q->id.proto &&
1320 			    id->flags == q->id.flags &&
1321 			    id->flow_id6 == q->id.flow_id6) {
1322 				break; /* found */
1323 			}
1324 			if (!is_v6 && id->dst_ip == q->id.dst_ip &&
1325 			    id->src_ip == q->id.src_ip &&
1326 			    id->dst_port == q->id.dst_port &&
1327 			    id->src_port == q->id.src_port &&
1328 			    id->proto == q->id.proto &&
1329 			    id->flags == q->id.flags) {
1330 				break; /* found */
1331 			}
1332 			/* No match. Check if we can expire the entry */
1333 			if (pipe_expire && q->head == NULL && q->S == q->F + 1) {
1334 				/* entry is idle and not in any heap, expire it */
1335 				struct dn_flow_queue *old_q = q;
1336 
1337 				if (prev != NULL) {
1338 					prev->next = q = q->next;
1339 				} else {
1340 					fs->rq[i] = q = q->next;
1341 				}
1342 				fs->rq_elements--;
1343 				kfree_type(struct dn_flow_queue, old_q);
1344 				continue;
1345 			}
1346 			prev = q;
1347 			q = q->next;
1348 		}
1349 		if (q && prev != NULL) { /* found and not in front */
1350 			prev->next = q->next;
1351 			q->next = fs->rq[i];
1352 			fs->rq[i] = q;
1353 		}
1354 	}
1355 	if (q == NULL) { /* no match, need to allocate a new entry */
1356 		q = create_queue(fs, i);
1357 		if (q != NULL) {
1358 			q->id = *id;
1359 		}
1360 	}
1361 	return q;
1362 }
1363 
1364 static int
red_drops(struct dn_flow_set * fs,struct dn_flow_queue * q,int len)1365 red_drops(struct dn_flow_set *fs, struct dn_flow_queue *q, int len)
1366 {
1367 	/*
1368 	 * RED algorithm
1369 	 *
1370 	 * RED calculates the average queue size (avg) using a low-pass filter
1371 	 * with an exponential weighted (w_q) moving average:
1372 	 *      avg  <-  (1-w_q) * avg + w_q * q_size
1373 	 * where q_size is the queue length (measured in bytes or * packets).
1374 	 *
1375 	 * If q_size == 0, we compute the idle time for the link, and set
1376 	 *	avg = (1 - w_q)^(idle/s)
1377 	 * where s is the time needed for transmitting a medium-sized packet.
1378 	 *
1379 	 * Now, if avg < min_th the packet is enqueued.
1380 	 * If avg > max_th the packet is dropped. Otherwise, the packet is
1381 	 * dropped with probability P function of avg.
1382 	 *
1383 	 */
1384 
1385 	int64_t p_b = 0;
1386 	/* queue in bytes or packets ? */
1387 	u_int q_size = (fs->flags_fs & DN_QSIZE_IS_BYTES) ? q->len_bytes : q->len;
1388 
1389 	DPRINTF(("\ndummynet: %d q: %2u ", (int) curr_time, q_size));
1390 
1391 	/* average queue size estimation */
1392 	if (q_size != 0) {
1393 		/*
1394 		 * queue is not empty, avg <- avg + (q_size - avg) * w_q
1395 		 */
1396 		int diff = SCALE(q_size) - q->avg;
1397 		int64_t v = SCALE_MUL((int64_t) diff, (int64_t) fs->w_q);
1398 
1399 		q->avg += (int) v;
1400 	} else {
1401 		/*
1402 		 * queue is empty, find for how long the queue has been
1403 		 * empty and use a lookup table for computing
1404 		 * (1 - * w_q)^(idle_time/s) where s is the time to send a
1405 		 * (small) packet.
1406 		 * XXX check wraps...
1407 		 */
1408 		if (q->avg) {
1409 			u_int64_t t = (curr_time - q->q_time) / fs->lookup_step;
1410 
1411 			q->avg = (t < fs->lookup_depth) ?
1412 			    SCALE_MUL(q->avg, fs->w_q_lookup[t]) : 0;
1413 		}
1414 	}
1415 	DPRINTF(("dummynet: avg: %u ", SCALE_VAL(q->avg)));
1416 
1417 	/* should i drop ? */
1418 
1419 	if (q->avg < fs->min_th) {
1420 		q->count = -1;
1421 		return 0; /* accept packet ; */
1422 	}
1423 	if (q->avg >= fs->max_th) { /* average queue >=  max threshold */
1424 		if (fs->flags_fs & DN_IS_GENTLE_RED) {
1425 			/*
1426 			 * According to Gentle-RED, if avg is greater than max_th the
1427 			 * packet is dropped with a probability
1428 			 *	p_b = c_3 * avg - c_4
1429 			 * where c_3 = (1 - max_p) / max_th, and c_4 = 1 - 2 * max_p
1430 			 */
1431 			p_b = SCALE_MUL((int64_t) fs->c_3, (int64_t) q->avg) - fs->c_4;
1432 		} else {
1433 			q->count = -1;
1434 			DPRINTF(("dummynet: - drop"));
1435 			return 1;
1436 		}
1437 	} else if (q->avg > fs->min_th) {
1438 		/*
1439 		 * we compute p_b using the linear dropping function p_b = c_1 *
1440 		 * avg - c_2, where c_1 = max_p / (max_th - min_th), and c_2 =
1441 		 * max_p * min_th / (max_th - min_th)
1442 		 */
1443 		p_b = SCALE_MUL((int64_t) fs->c_1, (int64_t) q->avg) - fs->c_2;
1444 	}
1445 	if (fs->flags_fs & DN_QSIZE_IS_BYTES) {
1446 		p_b = (p_b * len) / fs->max_pkt_size;
1447 	}
1448 	if (++q->count == 0) {
1449 		q->random = (my_random() & 0xffff);
1450 	} else {
1451 		/*
1452 		 * q->count counts packets arrived since last drop, so a greater
1453 		 * value of q->count means a greater packet drop probability.
1454 		 */
1455 		if (SCALE_MUL(p_b, SCALE((int64_t) q->count)) > q->random) {
1456 			q->count = 0;
1457 			DPRINTF(("dummynet: - red drop"));
1458 			/* after a drop we calculate a new random value */
1459 			q->random = (my_random() & 0xffff);
1460 			return 1; /* drop */
1461 		}
1462 	}
1463 	/* end of RED algorithm */
1464 	return 0; /* accept */
1465 }
1466 
1467 static __inline
1468 struct dn_flow_set *
locate_flowset(int fs_nr)1469 locate_flowset(int fs_nr)
1470 {
1471 	struct dn_flow_set *fs;
1472 	SLIST_FOREACH(fs, &flowsethash[HASH(fs_nr)], next) {
1473 		if (fs->fs_nr == fs_nr) {
1474 			return fs;
1475 		}
1476 	}
1477 
1478 	return NULL;
1479 }
1480 
1481 static __inline struct dn_pipe *
locate_pipe(int pipe_nr)1482 locate_pipe(int pipe_nr)
1483 {
1484 	struct dn_pipe *pipe;
1485 
1486 	SLIST_FOREACH(pipe, &pipehash[HASH(pipe_nr)], next) {
1487 		if (pipe->pipe_nr == pipe_nr) {
1488 			return pipe;
1489 		}
1490 	}
1491 
1492 	return NULL;
1493 }
1494 
1495 
1496 
1497 /*
1498  * dummynet hook for packets. Below 'pipe' is a pipe or a queue
1499  * depending on whether WF2Q or fixed bw is used.
1500  *
1501  * pipe_nr	pipe or queue the packet is destined for.
1502  * dir		where shall we send the packet after dummynet.
1503  * m		the mbuf with the packet
1504  * ifp		the 'ifp' parameter from the caller.
1505  *		NULL in ip_input, destination interface in ip_output,
1506  *		real_dst in bdg_forward
1507  * ro		route parameter (only used in ip_output, NULL otherwise)
1508  * dst		destination address, only used by ip_output
1509  * rule		matching rule, in case of multiple passes
1510  * flags	flags from the caller, only used in ip_output
1511  *
1512  */
1513 static int
dummynet_io(struct mbuf * m,int pipe_nr,int dir,struct ip_fw_args * fwa)1514 dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa)
1515 {
1516 	struct mbuf *head = NULL, *tail = NULL;
1517 	struct dn_pkt_tag *pkt;
1518 	struct m_tag *mtag;
1519 	struct dn_flow_set *fs = NULL;
1520 	struct dn_pipe *pipe;
1521 	u_int32_t len = m->m_pkthdr.len;
1522 	struct dn_flow_queue *q = NULL;
1523 	int is_pipe = 0;
1524 	struct timespec ts;
1525 	struct timeval      tv;
1526 
1527 	DPRINTF(("dummynet_io m: 0x%llx pipe: %d dir: %d\n",
1528 	    (uint64_t)VM_KERNEL_ADDRPERM(m), pipe_nr, dir));
1529 
1530 
1531 #if DUMMYNET
1532 	is_pipe = fwa->fwa_flags == DN_IS_PIPE ? 1 : 0;
1533 #endif /* DUMMYNET */
1534 
1535 	pipe_nr &= 0xffff;
1536 
1537 	lck_mtx_lock(&dn_mutex);
1538 
1539 	/* make all time measurements in milliseconds (ms) -
1540 	 * here we convert secs and usecs to msecs (just divide the
1541 	 * usecs and take the closest whole number).
1542 	 */
1543 	microuptime(&tv);
1544 	curr_time = (tv.tv_sec * 1000) + (tv.tv_usec / 1000);
1545 
1546 	/*
1547 	 * This is a dummynet rule, so we expect an O_PIPE or O_QUEUE rule.
1548 	 */
1549 	if (is_pipe) {
1550 		pipe = locate_pipe(pipe_nr);
1551 		if (pipe != NULL) {
1552 			fs = &(pipe->fs);
1553 		}
1554 	} else {
1555 		fs = locate_flowset(pipe_nr);
1556 	}
1557 
1558 
1559 	if (fs == NULL) {
1560 		goto dropit; /* this queue/pipe does not exist! */
1561 	}
1562 	pipe = fs->pipe;
1563 	if (pipe == NULL) { /* must be a queue, try find a matching pipe */
1564 		pipe = locate_pipe(fs->parent_nr);
1565 
1566 		if (pipe != NULL) {
1567 			fs->pipe = pipe;
1568 		} else {
1569 			printf("dummynet: no pipe %d for queue %d, drop pkt\n",
1570 			    fs->parent_nr, fs->fs_nr);
1571 			goto dropit;
1572 		}
1573 	}
1574 	q = find_queue(fs, &(fwa->fwa_id));
1575 	if (q == NULL) {
1576 		goto dropit;    /* cannot allocate queue		*/
1577 	}
1578 	/*
1579 	 * update statistics, then check reasons to drop pkt
1580 	 */
1581 	q->tot_bytes += len;
1582 	q->tot_pkts++;
1583 	if (fs->plr && (my_random() < fs->plr)) {
1584 		goto dropit;    /* random pkt drop			*/
1585 	}
1586 	if (fs->flags_fs & DN_QSIZE_IS_BYTES) {
1587 		if (q->len_bytes > fs->qsize) {
1588 			goto dropit; /* queue size overflow			*/
1589 		}
1590 	} else {
1591 		if (q->len >= fs->qsize) {
1592 			goto dropit; /* queue count overflow			*/
1593 		}
1594 	}
1595 	if (fs->flags_fs & DN_IS_RED && red_drops(fs, q, len)) {
1596 		goto dropit;
1597 	}
1598 
1599 	/* XXX expensive to zero, see if we can remove it*/
1600 	mtag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_DUMMYNET,
1601 	    sizeof(struct dn_pkt_tag), M_NOWAIT, m);
1602 	if (mtag == NULL) {
1603 		goto dropit;            /* cannot allocate packet header	*/
1604 	}
1605 	m_tag_prepend(m, mtag); /* attach to mbuf chain */
1606 
1607 	pkt = (struct dn_pkt_tag *)(mtag + 1);
1608 	bzero(pkt, sizeof(struct dn_pkt_tag));
1609 	/* ok, i can handle the pkt now... */
1610 	/* build and enqueue packet + parameters */
1611 	pkt->dn_pf_rule = fwa->fwa_pf_rule;
1612 	pkt->dn_dir = dir;
1613 
1614 	pkt->dn_ifp = fwa->fwa_oif;
1615 	if (dir == DN_TO_IP_OUT) {
1616 		/*
1617 		 * We need to copy *ro because for ICMP pkts (and maybe others)
1618 		 * the caller passed a pointer into the stack; dst might also be
1619 		 * a pointer into *ro so it needs to be updated.
1620 		 */
1621 		if (fwa->fwa_ro) {
1622 			route_copyout(&pkt->dn_ro, fwa->fwa_ro, sizeof(pkt->dn_ro));
1623 		}
1624 		if (fwa->fwa_dst) {
1625 			if (fwa->fwa_dst == (struct sockaddr_in *)(void *)&fwa->fwa_ro->ro_dst) { /* dst points into ro */
1626 				fwa->fwa_dst = (struct sockaddr_in *)(void *)&(pkt->dn_ro.ro_dst);
1627 			}
1628 
1629 			bcopy(fwa->fwa_dst, &pkt->dn_dst, sizeof(pkt->dn_dst));
1630 		}
1631 	} else if (dir == DN_TO_IP6_OUT) {
1632 		if (fwa->fwa_ro6) {
1633 			route_copyout((struct route *)&pkt->dn_ro6,
1634 			    (struct route *)fwa->fwa_ro6, sizeof(pkt->dn_ro6));
1635 		}
1636 		if (fwa->fwa_ro6_pmtu) {
1637 			route_copyout((struct route *)&pkt->dn_ro6_pmtu,
1638 			    (struct route *)fwa->fwa_ro6_pmtu, sizeof(pkt->dn_ro6_pmtu));
1639 		}
1640 		if (fwa->fwa_dst6) {
1641 			if (fwa->fwa_dst6 == (struct sockaddr_in6 *)&fwa->fwa_ro6->ro_dst) { /* dst points into ro */
1642 				fwa->fwa_dst6 = (struct sockaddr_in6 *)&(pkt->dn_ro6.ro_dst);
1643 			}
1644 
1645 			bcopy(fwa->fwa_dst6, &pkt->dn_dst6, sizeof(pkt->dn_dst6));
1646 		}
1647 		pkt->dn_origifp = fwa->fwa_origifp;
1648 		pkt->dn_mtu = fwa->fwa_mtu;
1649 		pkt->dn_unfragpartlen = fwa->fwa_unfragpartlen;
1650 		if (fwa->fwa_exthdrs) {
1651 			bcopy(fwa->fwa_exthdrs, &pkt->dn_exthdrs, sizeof(pkt->dn_exthdrs));
1652 			/*
1653 			 * Need to zero out the source structure so the mbufs
1654 			 * won't be freed by ip6_output()
1655 			 */
1656 			bzero(fwa->fwa_exthdrs, sizeof(struct ip6_exthdrs));
1657 		}
1658 	}
1659 	if (dir == DN_TO_IP_OUT || dir == DN_TO_IP6_OUT) {
1660 		pkt->dn_flags = fwa->fwa_oflags;
1661 		if (fwa->fwa_ipoa != NULL) {
1662 			pkt->dn_ipoa = *(fwa->fwa_ipoa);
1663 		}
1664 	}
1665 	if (q->head == NULL) {
1666 		q->head = m;
1667 	} else {
1668 		q->tail->m_nextpkt = m;
1669 	}
1670 	q->tail = m;
1671 	q->len++;
1672 	q->len_bytes += len;
1673 
1674 	if (q->head != m) {     /* flow was not idle, we are done */
1675 		goto done;
1676 	}
1677 	/*
1678 	 * If we reach this point the flow was previously idle, so we need
1679 	 * to schedule it. This involves different actions for fixed-rate or
1680 	 * WF2Q queues.
1681 	 */
1682 	if (is_pipe) {
1683 		/*
1684 		 * Fixed-rate queue: just insert into the ready_heap.
1685 		 */
1686 		dn_key t = 0;
1687 		if (pipe->bandwidth) {
1688 			t = SET_TICKS(m, q, pipe);
1689 		}
1690 		q->sched_time = curr_time;
1691 		if (t == 0) { /* must process it now */
1692 			ready_event( q, &head, &tail );
1693 		} else {
1694 			heap_insert(&ready_heap, curr_time + t, q );
1695 		}
1696 	} else {
1697 		/*
1698 		 * WF2Q. First, compute start time S: if the flow was idle (S=F+1)
1699 		 * set S to the virtual time V for the controlling pipe, and update
1700 		 * the sum of weights for the pipe; otherwise, remove flow from
1701 		 * idle_heap and set S to max(F,V).
1702 		 * Second, compute finish time F = S + len/weight.
1703 		 * Third, if pipe was idle, update V=max(S, V).
1704 		 * Fourth, count one more backlogged flow.
1705 		 */
1706 		if (DN_KEY_GT(q->S, q->F)) { /* means timestamps are invalid */
1707 			q->S = pipe->V;
1708 			pipe->sum += fs->weight; /* add weight of new queue */
1709 		} else {
1710 			heap_extract(&(pipe->idle_heap), q);
1711 			q->S = MAX64(q->F, pipe->V );
1712 		}
1713 		q->F = q->S + (len << MY_M) / (u_int64_t) fs->weight;
1714 
1715 		if (pipe->not_eligible_heap.elements == 0 &&
1716 		    pipe->scheduler_heap.elements == 0) {
1717 			pipe->V = MAX64( q->S, pipe->V );
1718 		}
1719 		fs->backlogged++;
1720 		/*
1721 		 * Look at eligibility. A flow is not eligibile if S>V (when
1722 		 * this happens, it means that there is some other flow already
1723 		 * scheduled for the same pipe, so the scheduler_heap cannot be
1724 		 * empty). If the flow is not eligible we just store it in the
1725 		 * not_eligible_heap. Otherwise, we store in the scheduler_heap
1726 		 * and possibly invoke ready_event_wfq() right now if there is
1727 		 * leftover credit.
1728 		 * Note that for all flows in scheduler_heap (SCH), S_i <= V,
1729 		 * and for all flows in not_eligible_heap (NEH), S_i > V .
1730 		 * So when we need to compute max( V, min(S_i) ) forall i in SCH+NEH,
1731 		 * we only need to look into NEH.
1732 		 */
1733 		if (DN_KEY_GT(q->S, pipe->V)) { /* not eligible */
1734 			if (pipe->scheduler_heap.elements == 0) {
1735 				printf("dummynet: ++ ouch! not eligible but empty scheduler!\n");
1736 			}
1737 			heap_insert(&(pipe->not_eligible_heap), q->S, q);
1738 		} else {
1739 			heap_insert(&(pipe->scheduler_heap), q->F, q);
1740 			if (pipe->numbytes >= 0) { /* pipe is idle */
1741 				if (pipe->scheduler_heap.elements != 1) {
1742 					printf("dummynet: OUCH! pipe should have been idle!\n");
1743 				}
1744 				DPRINTF(("dummynet: waking up pipe %d at %d\n",
1745 				    pipe->pipe_nr, (int)(q->F >> MY_M)));
1746 				pipe->sched_time = curr_time;
1747 				ready_event_wfq(pipe, &head, &tail);
1748 			}
1749 		}
1750 	}
1751 done:
1752 	/* start the timer and set global if not already set */
1753 	if (!timer_enabled) {
1754 		ts.tv_sec = 0;
1755 		ts.tv_nsec = 1 * 1000000;       // 1ms
1756 		timer_enabled = 1;
1757 		bsd_timeout(dummynet, NULL, &ts);
1758 	}
1759 
1760 	lck_mtx_unlock(&dn_mutex);
1761 
1762 	if (head != NULL) {
1763 		dummynet_send(head);
1764 	}
1765 
1766 	return 0;
1767 
1768 dropit:
1769 	if (q) {
1770 		q->drops++;
1771 	}
1772 	lck_mtx_unlock(&dn_mutex);
1773 	m_freem(m);
1774 	return (fs && (fs->flags_fs & DN_NOERROR)) ? 0 : ENOBUFS;
1775 }
1776 
1777 /*
1778  * Below, the ROUTE_RELEASE is only needed when (pkt->dn_dir == DN_TO_IP_OUT)
1779  * Doing this would probably save us the initial bzero of dn_pkt
1780  */
1781 #define DN_FREE_PKT(_m) do {                                    \
1782 	struct m_tag *tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_DUMMYNET, NULL); \
1783 	if (tag) {                                              \
1784 	        struct dn_pkt_tag *n = (struct dn_pkt_tag *)(tag+1);    \
1785 	        ROUTE_RELEASE(&n->dn_ro);                       \
1786 	}                                                       \
1787 	m_tag_delete(_m, tag);                                  \
1788 	m_freem(_m);                                            \
1789 } while (0)
1790 
1791 /*
1792  * Dispose all packets and flow_queues on a flow_set.
1793  * If all=1, also remove red lookup table and other storage,
1794  * including the descriptor itself.
1795  * For the one in dn_pipe MUST also cleanup ready_heap...
1796  */
1797 static void
purge_flow_set(struct dn_flow_set * fs,int all)1798 purge_flow_set(struct dn_flow_set *fs, int all)
1799 {
1800 	struct dn_flow_queue *q, *qn;
1801 	int i;
1802 
1803 	LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
1804 
1805 	for (i = 0; i <= fs->rq_size; i++) {
1806 		for (q = fs->rq[i]; q; q = qn) {
1807 			struct mbuf *m, *mnext;
1808 
1809 			mnext = q->head;
1810 			while ((m = mnext) != NULL) {
1811 				mnext = m->m_nextpkt;
1812 				DN_FREE_PKT(m);
1813 			}
1814 			qn = q->next;
1815 			kfree_type(struct dn_flow_queue, q);
1816 		}
1817 		fs->rq[i] = NULL;
1818 	}
1819 	fs->rq_elements = 0;
1820 	if (all) {
1821 		/* RED - free lookup table */
1822 		if (fs->w_q_lookup) {
1823 			kfree_data(fs->w_q_lookup, fs->lookup_depth * sizeof(int));
1824 		}
1825 		kfree_type(struct dn_flow_queue *, fs->rq_size + 1, fs->rq);
1826 		/* if this fs is not part of a pipe, free it */
1827 		if (fs->pipe && fs != &(fs->pipe->fs)) {
1828 			kfree_type(struct dn_flow_set, fs);
1829 		}
1830 	}
1831 }
1832 
1833 /*
1834  * Dispose all packets queued on a pipe (not a flow_set).
1835  * Also free all resources associated to a pipe, which is about
1836  * to be deleted.
1837  */
1838 static void
purge_pipe(struct dn_pipe * pipe)1839 purge_pipe(struct dn_pipe *pipe)
1840 {
1841 	struct mbuf *m, *mnext;
1842 
1843 	purge_flow_set( &(pipe->fs), 1 );
1844 
1845 	mnext = pipe->head;
1846 	while ((m = mnext) != NULL) {
1847 		mnext = m->m_nextpkt;
1848 		DN_FREE_PKT(m);
1849 	}
1850 
1851 	heap_free( &(pipe->scheduler_heap));
1852 	heap_free( &(pipe->not_eligible_heap));
1853 	heap_free( &(pipe->idle_heap));
1854 }
1855 
1856 /*
1857  * Delete all pipes and heaps returning memory.
1858  */
1859 static void
dummynet_flush(void)1860 dummynet_flush(void)
1861 {
1862 	struct dn_pipe *pipe, *pipe1;
1863 	struct dn_flow_set *fs, *fs1;
1864 	int i;
1865 
1866 	lck_mtx_lock(&dn_mutex);
1867 
1868 
1869 	/* Free heaps so we don't have unwanted events. */
1870 	heap_free(&ready_heap);
1871 	heap_free(&wfq_ready_heap);
1872 	heap_free(&extract_heap);
1873 
1874 	/*
1875 	 * Now purge all queued pkts and delete all pipes.
1876 	 *
1877 	 * XXXGL: can we merge the for(;;) cycles into one or not?
1878 	 */
1879 	for (i = 0; i < HASHSIZE; i++) {
1880 		SLIST_FOREACH_SAFE(fs, &flowsethash[i], next, fs1) {
1881 			SLIST_REMOVE(&flowsethash[i], fs, dn_flow_set, next);
1882 			purge_flow_set(fs, 1);
1883 		}
1884 	}
1885 	for (i = 0; i < HASHSIZE; i++) {
1886 		SLIST_FOREACH_SAFE(pipe, &pipehash[i], next, pipe1) {
1887 			SLIST_REMOVE(&pipehash[i], pipe, dn_pipe, next);
1888 			purge_pipe(pipe);
1889 			kfree_type(struct dn_pipe, pipe);
1890 		}
1891 	}
1892 	lck_mtx_unlock(&dn_mutex);
1893 }
1894 
1895 /*
1896  * setup RED parameters
1897  */
1898 static int
config_red(struct dn_flow_set * p,struct dn_flow_set * x)1899 config_red(struct dn_flow_set *p, struct dn_flow_set * x)
1900 {
1901 	int i;
1902 
1903 	x->w_q = p->w_q;
1904 	x->min_th = SCALE(p->min_th);
1905 	x->max_th = SCALE(p->max_th);
1906 	x->max_p = p->max_p;
1907 
1908 	x->c_1 = p->max_p / (p->max_th - p->min_th);
1909 	x->c_2 = SCALE_MUL(x->c_1, SCALE(p->min_th));
1910 	if (x->flags_fs & DN_IS_GENTLE_RED) {
1911 		x->c_3 = (SCALE(1) - p->max_p) / p->max_th;
1912 		x->c_4 = (SCALE(1) - 2 * p->max_p);
1913 	}
1914 
1915 	/* if the lookup table already exist, free and create it again */
1916 	if (x->w_q_lookup) {
1917 		kfree_data(x->w_q_lookup, x->lookup_depth * sizeof(int));
1918 		x->w_q_lookup = NULL;
1919 	}
1920 	if (red_lookup_depth == 0) {
1921 		printf("\ndummynet: net.inet.ip.dummynet.red_lookup_depth must be > 0\n");
1922 		return EINVAL;
1923 	}
1924 	x->lookup_depth = red_lookup_depth;
1925 	x->w_q_lookup = (u_int *) kalloc_data(x->lookup_depth * sizeof(int),
1926 	    Z_NOWAIT);
1927 	if (x->w_q_lookup == NULL) {
1928 		printf("dummynet: sorry, cannot allocate red lookup table\n");
1929 		return ENOSPC;
1930 	}
1931 
1932 	/* fill the lookup table with (1 - w_q)^x */
1933 	x->lookup_step = p->lookup_step;
1934 	x->lookup_weight = p->lookup_weight;
1935 	x->w_q_lookup[0] = SCALE(1) - x->w_q;
1936 	for (i = 1; i < x->lookup_depth; i++) {
1937 		x->w_q_lookup[i] = SCALE_MUL(x->w_q_lookup[i - 1], x->lookup_weight);
1938 	}
1939 	if (red_avg_pkt_size < 1) {
1940 		red_avg_pkt_size = 512;
1941 	}
1942 	x->avg_pkt_size = red_avg_pkt_size;
1943 	if (red_max_pkt_size < 1) {
1944 		red_max_pkt_size = 1500;
1945 	}
1946 	x->max_pkt_size = red_max_pkt_size;
1947 	return 0;
1948 }
1949 
1950 static int
alloc_hash(struct dn_flow_set * x,struct dn_flow_set * pfs)1951 alloc_hash(struct dn_flow_set *x, struct dn_flow_set *pfs)
1952 {
1953 	if (x->flags_fs & DN_HAVE_FLOW_MASK) { /* allocate some slots */
1954 		int l = pfs->rq_size;
1955 
1956 		if (l == 0) {
1957 			l = dn_hash_size;
1958 		}
1959 		if (l < 4) {
1960 			l = 4;
1961 		} else if (l > DN_MAX_HASH_SIZE) {
1962 			l = DN_MAX_HASH_SIZE;
1963 		}
1964 		x->rq_size = l;
1965 	} else {            /* one is enough for null mask */
1966 		x->rq_size = 1;
1967 	}
1968 	x->rq = kalloc_type(struct dn_flow_queue *, x->rq_size + 1,
1969 	    Z_NOWAIT | Z_ZERO);
1970 	if (x->rq == NULL) {
1971 		printf("dummynet: sorry, cannot allocate queue\n");
1972 		return ENOSPC;
1973 	}
1974 	x->rq_elements = 0;
1975 	return 0;
1976 }
1977 
1978 static int
set_fs_parms(struct dn_flow_set * x,struct dn_flow_set * src)1979 set_fs_parms(struct dn_flow_set *x, struct dn_flow_set *src)
1980 {
1981 	x->flags_fs = src->flags_fs;
1982 	x->qsize = src->qsize;
1983 	x->plr = src->plr;
1984 	x->flow_mask = src->flow_mask;
1985 	if (x->flags_fs & DN_QSIZE_IS_BYTES) {
1986 		if (x->qsize > 1024 * 1024) {
1987 			x->qsize = 1024 * 1024;
1988 		}
1989 	} else {
1990 		if (x->qsize == 0) {
1991 			x->qsize = 50;
1992 		}
1993 		if (x->qsize > 100) {
1994 			x->qsize = 50;
1995 		}
1996 	}
1997 	/* configuring RED */
1998 	if (x->flags_fs & DN_IS_RED) {
1999 		return config_red(src, x); /* XXX should check errors */
2000 	}
2001 	return 0;
2002 }
2003 
2004 /*
2005  * setup pipe or queue parameters.
2006  */
2007 static int
config_pipe(struct dn_pipe * p)2008 config_pipe(struct dn_pipe *p)
2009 {
2010 	int i, r;
2011 	struct dn_flow_set *pfs = &(p->fs);
2012 	struct dn_flow_queue *q;
2013 	bool is_new = false;
2014 
2015 	/*
2016 	 * The config program passes parameters as follows:
2017 	 * bw = bits/second (0 means no limits),
2018 	 * delay = ms, must be translated into ticks.
2019 	 * qsize = slots/bytes
2020 	 */
2021 	p->delay = (p->delay * (hz * 10)) / 1000;
2022 	/* We need either a pipe number or a flow_set number */
2023 	if (p->pipe_nr == 0 && pfs->fs_nr == 0) {
2024 		return EINVAL;
2025 	}
2026 	if (p->pipe_nr != 0 && pfs->fs_nr != 0) {
2027 		return EINVAL;
2028 	}
2029 	if (p->pipe_nr != 0) { /* this is a pipe */
2030 		struct dn_pipe *x, *b;
2031 		struct dummynet_event dn_event;
2032 		lck_mtx_lock(&dn_mutex);
2033 
2034 		/* locate pipe */
2035 		b = locate_pipe(p->pipe_nr);
2036 
2037 		if (b == NULL || b->pipe_nr != p->pipe_nr) { /* new pipe */
2038 			is_new = true;
2039 			x = kalloc_type(struct dn_pipe, Z_NOWAIT | Z_ZERO);
2040 			if (x == NULL) {
2041 				lck_mtx_unlock(&dn_mutex);
2042 				printf("dummynet: no memory for new pipe\n");
2043 				return ENOSPC;
2044 			}
2045 			x->pipe_nr = p->pipe_nr;
2046 			x->fs.pipe = x;
2047 			/* idle_heap is the only one from which we extract from the middle.
2048 			 */
2049 			x->idle_heap.size = x->idle_heap.elements = 0;
2050 			x->idle_heap.offset = offsetof(struct dn_flow_queue, heap_pos);
2051 		} else {
2052 			x = b;
2053 			/* Flush accumulated credit for all queues */
2054 			for (i = 0; i <= x->fs.rq_size; i++) {
2055 				for (q = x->fs.rq[i]; q; q = q->next) {
2056 					q->numbytes = 0;
2057 				}
2058 			}
2059 		}
2060 
2061 		x->bandwidth = p->bandwidth;
2062 		x->numbytes = 0; /* just in case... */
2063 		bcopy(p->if_name, x->if_name, sizeof(p->if_name));
2064 		x->ifp = NULL; /* reset interface ptr */
2065 		x->delay = p->delay;
2066 		r = set_fs_parms(&(x->fs), pfs);
2067 		if (r != 0) {
2068 			lck_mtx_unlock(&dn_mutex);
2069 			if (is_new) { /* a new pipe */
2070 				kfree_type(struct dn_pipe, x);
2071 			}
2072 			return r;
2073 		}
2074 
2075 		if (x->fs.rq == NULL) { /* a new pipe */
2076 			r = alloc_hash(&(x->fs), pfs);
2077 			if (r) {
2078 				lck_mtx_unlock(&dn_mutex);
2079 				if (is_new) {
2080 					kfree_type(struct dn_pipe, x);
2081 				}
2082 				return r;
2083 			}
2084 			SLIST_INSERT_HEAD(&pipehash[HASH(x->pipe_nr)],
2085 			    x, next);
2086 		}
2087 		lck_mtx_unlock(&dn_mutex);
2088 
2089 		bzero(&dn_event, sizeof(dn_event));
2090 		dn_event.dn_event_code = DUMMYNET_PIPE_CONFIG;
2091 		dn_event.dn_event_pipe_config.bandwidth = p->bandwidth;
2092 		dn_event.dn_event_pipe_config.delay = p->delay;
2093 		dn_event.dn_event_pipe_config.plr = pfs->plr;
2094 
2095 		dummynet_event_enqueue_nwk_wq_entry(&dn_event);
2096 	} else { /* config queue */
2097 		struct dn_flow_set *x, *b;
2098 
2099 		lck_mtx_lock(&dn_mutex);
2100 		/* locate flow_set */
2101 		b = locate_flowset(pfs->fs_nr);
2102 
2103 		if (b == NULL || b->fs_nr != pfs->fs_nr) { /* new  */
2104 			is_new = true;
2105 			if (pfs->parent_nr == 0) { /* need link to a pipe */
2106 				lck_mtx_unlock(&dn_mutex);
2107 				return EINVAL;
2108 			}
2109 			x = kalloc_type(struct dn_flow_set, Z_NOWAIT | Z_ZERO);
2110 			if (x == NULL) {
2111 				lck_mtx_unlock(&dn_mutex);
2112 				printf("dummynet: no memory for new flow_set\n");
2113 				return ENOSPC;
2114 			}
2115 			x->fs_nr = pfs->fs_nr;
2116 			x->parent_nr = pfs->parent_nr;
2117 			x->weight = pfs->weight;
2118 			if (x->weight == 0) {
2119 				x->weight = 1;
2120 			} else if (x->weight > 100) {
2121 				x->weight = 100;
2122 			}
2123 		} else {
2124 			/* Change parent pipe not allowed; must delete and recreate */
2125 			if (pfs->parent_nr != 0 && b->parent_nr != pfs->parent_nr) {
2126 				lck_mtx_unlock(&dn_mutex);
2127 				return EINVAL;
2128 			}
2129 			x = b;
2130 		}
2131 		r = set_fs_parms(x, pfs);
2132 		if (r != 0) {
2133 			lck_mtx_unlock(&dn_mutex);
2134 			printf("dummynet: no memory for new flow_set\n");
2135 			if (is_new) {
2136 				kfree_type(struct dn_flow_set, x);
2137 			}
2138 			return r;
2139 		}
2140 
2141 		if (x->rq == NULL) { /* a new flow_set */
2142 			r = alloc_hash(x, pfs);
2143 			if (r) {
2144 				lck_mtx_unlock(&dn_mutex);
2145 				kfree_type(struct dn_flow_set, x);
2146 				return r;
2147 			}
2148 			SLIST_INSERT_HEAD(&flowsethash[HASH(x->fs_nr)],
2149 			    x, next);
2150 		}
2151 		lck_mtx_unlock(&dn_mutex);
2152 	}
2153 	return 0;
2154 }
2155 
2156 /*
2157  * Helper function to remove from a heap queues which are linked to
2158  * a flow_set about to be deleted.
2159  */
2160 static void
fs_remove_from_heap(struct dn_heap * h,struct dn_flow_set * fs)2161 fs_remove_from_heap(struct dn_heap *h, struct dn_flow_set *fs)
2162 {
2163 	int i = 0, found = 0;
2164 	for (; i < h->elements;) {
2165 		if (((struct dn_flow_queue *)h->p[i].object)->fs == fs) {
2166 			h->elements--;
2167 			h->p[i] = h->p[h->elements];
2168 			found++;
2169 		} else {
2170 			i++;
2171 		}
2172 	}
2173 	if (found) {
2174 		heapify(h);
2175 	}
2176 }
2177 
2178 /*
2179  * helper function to remove a pipe from a heap (can be there at most once)
2180  */
2181 static void
pipe_remove_from_heap(struct dn_heap * h,struct dn_pipe * p)2182 pipe_remove_from_heap(struct dn_heap *h, struct dn_pipe *p)
2183 {
2184 	if (h->elements > 0) {
2185 		int i = 0;
2186 		for (i = 0; i < h->elements; i++) {
2187 			if (h->p[i].object == p) { /* found it */
2188 				h->elements--;
2189 				h->p[i] = h->p[h->elements];
2190 				heapify(h);
2191 				break;
2192 			}
2193 		}
2194 	}
2195 }
2196 
2197 /*
2198  * drain all queues. Called in case of severe mbuf shortage.
2199  */
2200 void
dummynet_drain(void)2201 dummynet_drain(void)
2202 {
2203 	struct dn_flow_set *fs;
2204 	struct dn_pipe *p;
2205 	struct mbuf *m, *mnext;
2206 	int i;
2207 
2208 	LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
2209 
2210 	heap_free(&ready_heap);
2211 	heap_free(&wfq_ready_heap);
2212 	heap_free(&extract_heap);
2213 	/* remove all references to this pipe from flow_sets */
2214 	for (i = 0; i < HASHSIZE; i++) {
2215 		SLIST_FOREACH(fs, &flowsethash[i], next) {
2216 			purge_flow_set(fs, 0);
2217 		}
2218 	}
2219 
2220 	for (i = 0; i < HASHSIZE; i++) {
2221 		SLIST_FOREACH(p, &pipehash[i], next) {
2222 			purge_flow_set(&(p->fs), 0);
2223 
2224 			mnext = p->head;
2225 			while ((m = mnext) != NULL) {
2226 				mnext = m->m_nextpkt;
2227 				DN_FREE_PKT(m);
2228 			}
2229 			p->head = p->tail = NULL;
2230 		}
2231 	}
2232 }
2233 
2234 /*
2235  * Fully delete a pipe or a queue, cleaning up associated info.
2236  */
2237 static int
delete_pipe(struct dn_pipe * p)2238 delete_pipe(struct dn_pipe *p)
2239 {
2240 	if (p->pipe_nr == 0 && p->fs.fs_nr == 0) {
2241 		return EINVAL;
2242 	}
2243 	if (p->pipe_nr != 0 && p->fs.fs_nr != 0) {
2244 		return EINVAL;
2245 	}
2246 	if (p->pipe_nr != 0) { /* this is an old-style pipe */
2247 		struct dn_pipe *b;
2248 		struct dn_flow_set *fs;
2249 		int i;
2250 
2251 		lck_mtx_lock(&dn_mutex);
2252 		/* locate pipe */
2253 		b = locate_pipe(p->pipe_nr);
2254 		if (b == NULL) {
2255 			lck_mtx_unlock(&dn_mutex);
2256 			return EINVAL; /* not found */
2257 		}
2258 
2259 		/* Unlink from list of pipes. */
2260 		SLIST_REMOVE(&pipehash[HASH(b->pipe_nr)], b, dn_pipe, next);
2261 
2262 
2263 		/* Remove all references to this pipe from flow_sets. */
2264 		for (i = 0; i < HASHSIZE; i++) {
2265 			SLIST_FOREACH(fs, &flowsethash[i], next) {
2266 				if (fs->pipe == b) {
2267 					printf("dummynet: ++ ref to pipe %d from fs %d\n",
2268 					    p->pipe_nr, fs->fs_nr);
2269 					fs->pipe = NULL;
2270 					purge_flow_set(fs, 0);
2271 				}
2272 			}
2273 		}
2274 		fs_remove_from_heap(&ready_heap, &(b->fs));
2275 
2276 		purge_pipe(b); /* remove all data associated to this pipe */
2277 		/* remove reference to here from extract_heap and wfq_ready_heap */
2278 		pipe_remove_from_heap(&extract_heap, b);
2279 		pipe_remove_from_heap(&wfq_ready_heap, b);
2280 		lck_mtx_unlock(&dn_mutex);
2281 
2282 		kfree_type(struct dn_pipe, b);
2283 	} else { /* this is a WF2Q queue (dn_flow_set) */
2284 		struct dn_flow_set *b;
2285 
2286 		lck_mtx_lock(&dn_mutex);
2287 		/* locate set */
2288 		b = locate_flowset(p->fs.fs_nr);
2289 		if (b == NULL) {
2290 			lck_mtx_unlock(&dn_mutex);
2291 			return EINVAL; /* not found */
2292 		}
2293 
2294 
2295 		/* Unlink from list of flowsets. */
2296 		SLIST_REMOVE( &flowsethash[HASH(b->fs_nr)], b, dn_flow_set, next);
2297 
2298 		if (b->pipe != NULL) {
2299 			/* Update total weight on parent pipe and cleanup parent heaps */
2300 			b->pipe->sum -= b->weight * b->backlogged;
2301 			fs_remove_from_heap(&(b->pipe->not_eligible_heap), b);
2302 			fs_remove_from_heap(&(b->pipe->scheduler_heap), b);
2303 #if 1   /* XXX should i remove from idle_heap as well ? */
2304 			fs_remove_from_heap(&(b->pipe->idle_heap), b);
2305 #endif
2306 		}
2307 		purge_flow_set(b, 1);
2308 		lck_mtx_unlock(&dn_mutex);
2309 	}
2310 	return 0;
2311 }
2312 
2313 /*
2314  * helper function used to copy data from kernel in DUMMYNET_GET
2315  */
2316 static
2317 char*
dn_copy_set_32(struct dn_flow_set * set,char * bp)2318 dn_copy_set_32(struct dn_flow_set *set, char *bp)
2319 {
2320 	int i, copied = 0;
2321 	struct dn_flow_queue *q;
2322 	struct dn_flow_queue_32 *qp = (struct dn_flow_queue_32 *)(void *)bp;
2323 
2324 	LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
2325 
2326 	for (i = 0; i <= set->rq_size; i++) {
2327 		for (q = set->rq[i]; q; q = q->next, qp++) {
2328 			if (q->hash_slot != i) {
2329 				printf("dummynet: ++ at %d: wrong slot (have %d, "
2330 				    "should be %d)\n", copied, q->hash_slot, i);
2331 			}
2332 			if (q->fs != set) {
2333 				printf("dummynet: ++ at %d: wrong fs ptr "
2334 				    "(have 0x%llx, should be 0x%llx)\n", i,
2335 				    (uint64_t)VM_KERNEL_ADDRPERM(q->fs),
2336 				    (uint64_t)VM_KERNEL_ADDRPERM(set));
2337 			}
2338 			copied++;
2339 			cp_queue_to_32_user( q, qp );
2340 			/* cleanup pointers */
2341 			qp->next = (user32_addr_t)0;
2342 			qp->head = qp->tail = (user32_addr_t)0;
2343 			qp->fs = (user32_addr_t)0;
2344 		}
2345 	}
2346 	if (copied != set->rq_elements) {
2347 		printf("dummynet: ++ wrong count, have %d should be %d\n",
2348 		    copied, set->rq_elements);
2349 	}
2350 	return (char *)qp;
2351 }
2352 
2353 static
2354 char*
dn_copy_set_64(struct dn_flow_set * set,char * bp)2355 dn_copy_set_64(struct dn_flow_set *set, char *bp)
2356 {
2357 	int i, copied = 0;
2358 	struct dn_flow_queue *q;
2359 	struct dn_flow_queue_64 *qp = (struct dn_flow_queue_64 *)(void *)bp;
2360 
2361 	LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
2362 
2363 	for (i = 0; i <= set->rq_size; i++) {
2364 		for (q = set->rq[i]; q; q = q->next, qp++) {
2365 			if (q->hash_slot != i) {
2366 				printf("dummynet: ++ at %d: wrong slot (have %d, "
2367 				    "should be %d)\n", copied, q->hash_slot, i);
2368 			}
2369 			if (q->fs != set) {
2370 				printf("dummynet: ++ at %d: wrong fs ptr "
2371 				    "(have 0x%llx, should be 0x%llx)\n", i,
2372 				    (uint64_t)VM_KERNEL_ADDRPERM(q->fs),
2373 				    (uint64_t)VM_KERNEL_ADDRPERM(set));
2374 			}
2375 			copied++;
2376 			//bcopy(q, qp, sizeof(*q));
2377 			cp_queue_to_64_user( q, qp );
2378 			/* cleanup pointers */
2379 			qp->next = USER_ADDR_NULL;
2380 			qp->head = qp->tail = USER_ADDR_NULL;
2381 			qp->fs = USER_ADDR_NULL;
2382 		}
2383 	}
2384 	if (copied != set->rq_elements) {
2385 		printf("dummynet: ++ wrong count, have %d should be %d\n",
2386 		    copied, set->rq_elements);
2387 	}
2388 	return (char *)qp;
2389 }
2390 
2391 static size_t
dn_calc_size(int is64user)2392 dn_calc_size(int is64user)
2393 {
2394 	struct dn_flow_set *set;
2395 	struct dn_pipe *p;
2396 	size_t size = 0;
2397 	size_t pipesize;
2398 	size_t queuesize;
2399 	size_t setsize;
2400 	int i;
2401 
2402 	LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
2403 	if (is64user) {
2404 		pipesize = sizeof(struct dn_pipe_64);
2405 		queuesize = sizeof(struct dn_flow_queue_64);
2406 		setsize = sizeof(struct dn_flow_set_64);
2407 	} else {
2408 		pipesize = sizeof(struct dn_pipe_32);
2409 		queuesize = sizeof(struct dn_flow_queue_32);
2410 		setsize = sizeof(struct dn_flow_set_32);
2411 	}
2412 	/*
2413 	 * compute size of data structures: list of pipes and flow_sets.
2414 	 */
2415 	for (i = 0; i < HASHSIZE; i++) {
2416 		SLIST_FOREACH(p, &pipehash[i], next) {
2417 			size += sizeof(*p) +
2418 			    p->fs.rq_elements * sizeof(struct dn_flow_queue);
2419 		}
2420 		SLIST_FOREACH(set, &flowsethash[i], next) {
2421 			size += sizeof(*set) +
2422 			    set->rq_elements * sizeof(struct dn_flow_queue);
2423 		}
2424 	}
2425 	return size;
2426 }
2427 
2428 static int
dummynet_get(struct sockopt * sopt)2429 dummynet_get(struct sockopt *sopt)
2430 {
2431 	char *buf = NULL, *bp = NULL; /* bp is the "copy-pointer" */
2432 	size_t size = 0;
2433 	struct dn_flow_set *set;
2434 	struct dn_pipe *p;
2435 	int error = 0, i;
2436 	int is64user = 0;
2437 
2438 	/* XXX lock held too long */
2439 	lck_mtx_lock(&dn_mutex);
2440 	/*
2441 	 * XXX: Ugly, but we need to allocate memory with M_WAITOK flag
2442 	 * and we cannot use this flag while holding a mutex.
2443 	 */
2444 	if (proc_is64bit(sopt->sopt_p)) {
2445 		is64user = 1;
2446 	}
2447 	for (i = 0; i < 10; i++) {
2448 		size = dn_calc_size(is64user);
2449 		lck_mtx_unlock(&dn_mutex);
2450 		buf = kalloc_data(size, Z_WAITOK | Z_ZERO);
2451 		if (buf == NULL) {
2452 			return ENOBUFS;
2453 		}
2454 		lck_mtx_lock(&dn_mutex);
2455 		if (size == dn_calc_size(is64user)) {
2456 			break;
2457 		}
2458 		kfree_data(buf, size);
2459 		buf = NULL;
2460 	}
2461 	if (buf == NULL) {
2462 		lck_mtx_unlock(&dn_mutex);
2463 		return ENOBUFS;
2464 	}
2465 
2466 	bp = buf;
2467 	for (i = 0; i < HASHSIZE; i++) {
2468 		SLIST_FOREACH(p, &pipehash[i], next) {
2469 			/*
2470 			 * copy pipe descriptor into *bp, convert delay
2471 			 * back to ms, then copy the flow_set descriptor(s)
2472 			 * one at a time. After each flow_set, copy the
2473 			 * queue descriptor it owns.
2474 			 */
2475 			if (is64user) {
2476 				bp = cp_pipe_to_64_user(p,
2477 				    (struct dn_pipe_64 *)(void *)bp);
2478 			} else {
2479 				bp = cp_pipe_to_32_user(p,
2480 				    (struct dn_pipe_32 *)(void *)bp);
2481 			}
2482 		}
2483 	}
2484 	for (i = 0; i < HASHSIZE; i++) {
2485 		SLIST_FOREACH(set, &flowsethash[i], next) {
2486 			struct dn_flow_set_64 *fs_bp =
2487 			    (struct dn_flow_set_64 *)(void *)bp;
2488 			cp_flow_set_to_64_user(set, fs_bp);
2489 			/* XXX same hack as above */
2490 			fs_bp->next = CAST_DOWN(user64_addr_t,
2491 			    DN_IS_QUEUE);
2492 			fs_bp->pipe = USER_ADDR_NULL;
2493 			fs_bp->rq = USER_ADDR_NULL;
2494 			bp += sizeof(struct dn_flow_set_64);
2495 			bp = dn_copy_set_64( set, bp );
2496 		}
2497 	}
2498 	lck_mtx_unlock(&dn_mutex);
2499 	error = sooptcopyout(sopt, buf, size);
2500 	kfree_data(buf, size);
2501 	return error;
2502 }
2503 
2504 /*
2505  * Handler for the various dummynet socket options (get, flush, config, del)
2506  */
2507 static int
ip_dn_ctl(struct sockopt * sopt)2508 ip_dn_ctl(struct sockopt *sopt)
2509 {
2510 	int error = 0;
2511 	struct dn_pipe *p, tmp_pipe;
2512 
2513 	/* Disallow sets in really-really secure mode. */
2514 	if (sopt->sopt_dir == SOPT_SET && securelevel >= 3) {
2515 		return EPERM;
2516 	}
2517 
2518 	switch (sopt->sopt_name) {
2519 	default:
2520 		printf("dummynet: -- unknown option %d", sopt->sopt_name);
2521 		return EINVAL;
2522 
2523 	case IP_DUMMYNET_GET:
2524 		error = dummynet_get(sopt);
2525 		break;
2526 
2527 	case IP_DUMMYNET_FLUSH:
2528 		dummynet_flush();
2529 		break;
2530 
2531 	case IP_DUMMYNET_CONFIGURE:
2532 		p = &tmp_pipe;
2533 		if (proc_is64bit(sopt->sopt_p)) {
2534 			error = cp_pipe_from_user_64( sopt, p );
2535 		} else {
2536 			error = cp_pipe_from_user_32( sopt, p );
2537 		}
2538 
2539 		if (error) {
2540 			break;
2541 		}
2542 		error = config_pipe(p);
2543 		break;
2544 
2545 	case IP_DUMMYNET_DEL:   /* remove a pipe or queue */
2546 		p = &tmp_pipe;
2547 		if (proc_is64bit(sopt->sopt_p)) {
2548 			error = cp_pipe_from_user_64( sopt, p );
2549 		} else {
2550 			error = cp_pipe_from_user_32( sopt, p );
2551 		}
2552 		if (error) {
2553 			break;
2554 		}
2555 
2556 		error = delete_pipe(p);
2557 		break;
2558 	}
2559 	return error;
2560 }
2561 
2562 void
dummynet_init(void)2563 dummynet_init(void)
2564 {
2565 	eventhandler_lists_ctxt_init(&dummynet_evhdlr_ctxt);
2566 }
2567 
2568 void
ip_dn_init(void)2569 ip_dn_init(void)
2570 {
2571 	/* setup locks */
2572 	ready_heap.size = ready_heap.elements = 0;
2573 	ready_heap.offset = 0;
2574 
2575 	wfq_ready_heap.size = wfq_ready_heap.elements = 0;
2576 	wfq_ready_heap.offset = 0;
2577 
2578 	extract_heap.size = extract_heap.elements = 0;
2579 	extract_heap.offset = 0;
2580 	ip_dn_ctl_ptr = ip_dn_ctl;
2581 	ip_dn_io_ptr = dummynet_io;
2582 }
2583 
2584 struct dn_event_nwk_wq_entry {
2585 	struct nwk_wq_entry nwk_wqe;
2586 	struct dummynet_event dn_ev_arg;
2587 };
2588 
2589 static void
dummynet_event_callback(struct nwk_wq_entry * nwk_item)2590 dummynet_event_callback(struct nwk_wq_entry *nwk_item)
2591 {
2592 	struct dn_event_nwk_wq_entry *p_ev;
2593 
2594 	p_ev = __container_of(nwk_item, struct dn_event_nwk_wq_entry, nwk_wqe);
2595 
2596 	EVENTHANDLER_INVOKE(&dummynet_evhdlr_ctxt, dummynet_event, &p_ev->dn_ev_arg);
2597 
2598 	kfree_type(struct dn_event_nwk_wq_entry, p_ev);
2599 }
2600 
2601 void
dummynet_event_enqueue_nwk_wq_entry(struct dummynet_event * p_dn_event)2602 dummynet_event_enqueue_nwk_wq_entry(struct dummynet_event *p_dn_event)
2603 {
2604 	struct dn_event_nwk_wq_entry *p_ev = NULL;
2605 
2606 	p_ev = kalloc_type(struct dn_event_nwk_wq_entry,
2607 	    Z_WAITOK | Z_ZERO | Z_NOFAIL);
2608 	p_ev->nwk_wqe.func = dummynet_event_callback;
2609 	p_ev->dn_ev_arg = *p_dn_event;
2610 	nwk_wq_enqueue(&p_ev->nwk_wqe);
2611 }
2612