1 /*
2 * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1998-2002 Luigi Rizzo, Universita` di Pisa
30 * Portions Copyright (c) 2000 Akamba Corp.
31 * All rights reserved
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * SUCH DAMAGE.
53 *
54 * $FreeBSD: src/sys/netinet/ip_dummynet.c,v 1.84 2004/08/25 09:31:30 pjd Exp $
55 */
56
57 #define DUMMYNET_DEBUG
58
59 /*
60 * This module implements IP dummynet, a bandwidth limiter/delay emulator
61 * Description of the data structures used is in ip_dummynet.h
62 * Here you mainly find the following blocks of code:
63 * + variable declarations;
64 * + heap management functions;
65 * + scheduler and dummynet functions;
66 * + configuration and initialization.
67 *
68 * NOTA BENE: critical sections are protected by the "dummynet lock".
69 *
70 * Most important Changes:
71 *
72 * 010124: Fixed WF2Q behaviour
73 * 010122: Fixed spl protection.
74 * 000601: WF2Q support
75 * 000106: large rewrite, use heaps to handle very many pipes.
76 * 980513: initial release
77 *
78 * include files marked with XXX are probably not needed
79 */
80
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/malloc.h>
84 #include <sys/mbuf.h>
85 #include <sys/queue.h> /* XXX */
86 #include <sys/kernel.h>
87 #include <sys/random.h>
88 #include <sys/socket.h>
89 #include <sys/socketvar.h>
90 #include <sys/time.h>
91 #include <sys/sysctl.h>
92 #include <net/if.h>
93 #include <net/route.h>
94 #include <net/kpi_protocol.h>
95 #if DUMMYNET
96 #include <net/kpi_protocol.h>
97 #endif /* DUMMYNET */
98 #include <net/nwk_wq.h>
99 #include <net/pfvar.h>
100 #include <netinet/in.h>
101 #include <netinet/in_systm.h>
102 #include <netinet/in_var.h>
103 #include <netinet/ip.h>
104 #include <netinet/ip_dummynet.h>
105 #include <netinet/ip_var.h>
106
107 #include <netinet/ip6.h> /* for ip6_input, ip6_output prototypes */
108 #include <netinet6/ip6_var.h>
109
110 /*
111 * We keep a private variable for the simulation time, but we could
112 * probably use an existing one ("softticks" in sys/kern/kern_timer.c)
113 */
114 static dn_key curr_time = 0; /* current simulation time */
115
116 /* this is for the timer that fires to call dummynet() - we only enable the timer when
117 * there are packets to process, otherwise it's disabled */
118 static int timer_enabled = 0;
119
120 static int dn_hash_size = 64; /* default hash size */
121
122 /* statistics on number of queue searches and search steps */
123 static int searches, search_steps;
124 static int pipe_expire = 1; /* expire queue if empty */
125 static int dn_max_ratio = 16; /* max queues/buckets ratio */
126
127 static int red_lookup_depth = 256; /* RED - default lookup table depth */
128 static int red_avg_pkt_size = 512; /* RED - default medium packet size */
129 static int red_max_pkt_size = 1500; /* RED - default max packet size */
130
131 static int serialize = 0;
132
133 /*
134 * Three heaps contain queues and pipes that the scheduler handles:
135 *
136 * ready_heap contains all dn_flow_queue related to fixed-rate pipes.
137 *
138 * wfq_ready_heap contains the pipes associated with WF2Q flows
139 *
140 * extract_heap contains pipes associated with delay lines.
141 *
142 */
143 static struct dn_heap ready_heap, extract_heap, wfq_ready_heap;
144
145 static int heap_init(struct dn_heap *h, int size);
146 static int heap_insert(struct dn_heap *h, dn_key key1, void *p);
147 static void heap_extract(struct dn_heap *h, void *obj);
148
149
150 static void transmit_event(struct dn_pipe *pipe, struct mbuf **head,
151 struct mbuf **tail);
152 static void ready_event(struct dn_flow_queue *q, struct mbuf **head,
153 struct mbuf **tail);
154 static void ready_event_wfq(struct dn_pipe *p, struct mbuf **head,
155 struct mbuf **tail);
156
157 /*
158 * Packets are retrieved from queues in Dummynet in chains instead of
159 * packet-by-packet. The entire list of packets is first dequeued and
160 * sent out by the following function.
161 */
162 static void dummynet_send(struct mbuf *m);
163
164 #define HASHSIZE 16
165 #define HASH(num) ((((num) >> 8) ^ ((num) >> 4) ^ (num)) & 0x0f)
166 static struct dn_pipe_head pipehash[HASHSIZE]; /* all pipes */
167 static struct dn_flow_set_head flowsethash[HASHSIZE]; /* all flowsets */
168
169 #ifdef SYSCTL_NODE
170 SYSCTL_NODE(_net_inet_ip, OID_AUTO, dummynet,
171 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Dummynet");
172 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, hash_size,
173 CTLFLAG_RW | CTLFLAG_LOCKED, &dn_hash_size, 0, "Default hash table size");
174 SYSCTL_QUAD(_net_inet_ip_dummynet, OID_AUTO, curr_time,
175 CTLFLAG_RD | CTLFLAG_LOCKED, &curr_time, "Current tick");
176 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, ready_heap,
177 CTLFLAG_RD | CTLFLAG_LOCKED, &ready_heap.size, 0, "Size of ready heap");
178 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, extract_heap,
179 CTLFLAG_RD | CTLFLAG_LOCKED, &extract_heap.size, 0, "Size of extract heap");
180 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, searches,
181 CTLFLAG_RD | CTLFLAG_LOCKED, &searches, 0, "Number of queue searches");
182 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, search_steps,
183 CTLFLAG_RD | CTLFLAG_LOCKED, &search_steps, 0, "Number of queue search steps");
184 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, expire,
185 CTLFLAG_RW | CTLFLAG_LOCKED, &pipe_expire, 0, "Expire queue if empty");
186 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, max_chain_len,
187 CTLFLAG_RW | CTLFLAG_LOCKED, &dn_max_ratio, 0,
188 "Max ratio between dynamic queues and buckets");
189 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_lookup_depth,
190 CTLFLAG_RD | CTLFLAG_LOCKED, &red_lookup_depth, 0, "Depth of RED lookup table");
191 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_avg_pkt_size,
192 CTLFLAG_RD | CTLFLAG_LOCKED, &red_avg_pkt_size, 0, "RED Medium packet size");
193 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_max_pkt_size,
194 CTLFLAG_RD | CTLFLAG_LOCKED, &red_max_pkt_size, 0, "RED Max packet size");
195 #endif
196
197 #ifdef DUMMYNET_DEBUG
198 int dummynet_debug = 0;
199 #ifdef SYSCTL_NODE
200 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED, &dummynet_debug,
201 0, "control debugging printfs");
202 #endif
203 #define DPRINTF(X) if (dummynet_debug) printf X
204 #else
205 #define DPRINTF(X)
206 #endif
207
208 /* dummynet lock */
209 static LCK_GRP_DECLARE(dn_mutex_grp, "dn");
210 static LCK_MTX_DECLARE(dn_mutex, &dn_mutex_grp);
211
212 static int config_pipe(struct dn_pipe *p);
213 static int ip_dn_ctl(struct sockopt *sopt);
214
215 static void dummynet(void *);
216 static void dummynet_flush(void);
217 void dummynet_drain(void);
218 static ip_dn_io_t dummynet_io;
219
220 static void cp_flow_set_to_64_user(struct dn_flow_set *set, struct dn_flow_set_64 *fs_bp);
221 static void cp_queue_to_64_user( struct dn_flow_queue *q, struct dn_flow_queue_64 *qp);
222 static char *cp_pipe_to_64_user(struct dn_pipe *p, struct dn_pipe_64 *pipe_bp);
223 static char* dn_copy_set_64(struct dn_flow_set *set, char *bp);
224 static int cp_pipe_from_user_64( struct sockopt *sopt, struct dn_pipe *p );
225
226 static void cp_flow_set_to_32_user(struct dn_flow_set *set, struct dn_flow_set_32 *fs_bp);
227 static void cp_queue_to_32_user( struct dn_flow_queue *q, struct dn_flow_queue_32 *qp);
228 static char *cp_pipe_to_32_user(struct dn_pipe *p, struct dn_pipe_32 *pipe_bp);
229 static char* dn_copy_set_32(struct dn_flow_set *set, char *bp);
230 static int cp_pipe_from_user_32( struct sockopt *sopt, struct dn_pipe *p );
231
232 struct eventhandler_lists_ctxt dummynet_evhdlr_ctxt;
233
234 uint32_t
my_random(void)235 my_random(void)
236 {
237 uint32_t val;
238 read_frandom(&val, sizeof(val));
239 val &= 0x7FFFFFFF;
240
241 return val;
242 }
243
244 /*
245 * Heap management functions.
246 *
247 * In the heap, first node is element 0. Children of i are 2i+1 and 2i+2.
248 * Some macros help finding parent/children so we can optimize them.
249 *
250 * heap_init() is called to expand the heap when needed.
251 * Increment size in blocks of 16 entries.
252 * XXX failure to allocate a new element is a pretty bad failure
253 * as we basically stall a whole queue forever!!
254 * Returns 1 on error, 0 on success
255 */
256 #define HEAP_FATHER(x) ( ( (x) - 1 ) / 2 )
257 #define HEAP_LEFT(x) ( 2*(x) + 1 )
258 #define HEAP_IS_LEFT(x) ( (x) & 1 )
259 #define HEAP_RIGHT(x) ( 2*(x) + 2 )
260 #define HEAP_SWAP(a, b, buffer) { buffer = a ; a = b ; b = buffer ; }
261 #define HEAP_INCREMENT 15
262
263
264 int
cp_pipe_from_user_32(struct sockopt * sopt,struct dn_pipe * p)265 cp_pipe_from_user_32( struct sockopt *sopt, struct dn_pipe *p )
266 {
267 struct dn_pipe_32 user_pipe_32;
268 int error = 0;
269
270 error = sooptcopyin(sopt, &user_pipe_32, sizeof(struct dn_pipe_32), sizeof(struct dn_pipe_32));
271 if (!error) {
272 p->pipe_nr = user_pipe_32.pipe_nr;
273 p->bandwidth = user_pipe_32.bandwidth;
274 p->delay = user_pipe_32.delay;
275 p->V = user_pipe_32.V;
276 p->sum = user_pipe_32.sum;
277 p->numbytes = user_pipe_32.numbytes;
278 p->sched_time = user_pipe_32.sched_time;
279 bcopy( user_pipe_32.if_name, p->if_name, IFNAMSIZ);
280 p->ready = user_pipe_32.ready;
281
282 p->fs.fs_nr = user_pipe_32.fs.fs_nr;
283 p->fs.flags_fs = user_pipe_32.fs.flags_fs;
284 p->fs.parent_nr = user_pipe_32.fs.parent_nr;
285 p->fs.weight = user_pipe_32.fs.weight;
286 p->fs.qsize = user_pipe_32.fs.qsize;
287 p->fs.plr = user_pipe_32.fs.plr;
288 p->fs.flow_mask = user_pipe_32.fs.flow_mask;
289 p->fs.rq_size = user_pipe_32.fs.rq_size;
290 p->fs.rq_elements = user_pipe_32.fs.rq_elements;
291 p->fs.last_expired = user_pipe_32.fs.last_expired;
292 p->fs.backlogged = user_pipe_32.fs.backlogged;
293 p->fs.w_q = user_pipe_32.fs.w_q;
294 p->fs.max_th = user_pipe_32.fs.max_th;
295 p->fs.min_th = user_pipe_32.fs.min_th;
296 p->fs.max_p = user_pipe_32.fs.max_p;
297 p->fs.c_1 = user_pipe_32.fs.c_1;
298 p->fs.c_2 = user_pipe_32.fs.c_2;
299 p->fs.c_3 = user_pipe_32.fs.c_3;
300 p->fs.c_4 = user_pipe_32.fs.c_4;
301 p->fs.lookup_depth = user_pipe_32.fs.lookup_depth;
302 p->fs.lookup_step = user_pipe_32.fs.lookup_step;
303 p->fs.lookup_weight = user_pipe_32.fs.lookup_weight;
304 p->fs.avg_pkt_size = user_pipe_32.fs.avg_pkt_size;
305 p->fs.max_pkt_size = user_pipe_32.fs.max_pkt_size;
306 }
307 return error;
308 }
309
310
311 int
cp_pipe_from_user_64(struct sockopt * sopt,struct dn_pipe * p)312 cp_pipe_from_user_64( struct sockopt *sopt, struct dn_pipe *p )
313 {
314 struct dn_pipe_64 user_pipe_64;
315 int error = 0;
316
317 error = sooptcopyin(sopt, &user_pipe_64, sizeof(struct dn_pipe_64), sizeof(struct dn_pipe_64));
318 if (!error) {
319 p->pipe_nr = user_pipe_64.pipe_nr;
320 p->bandwidth = user_pipe_64.bandwidth;
321 p->delay = user_pipe_64.delay;
322 p->V = user_pipe_64.V;
323 p->sum = user_pipe_64.sum;
324 p->numbytes = user_pipe_64.numbytes;
325 p->sched_time = user_pipe_64.sched_time;
326 bcopy( user_pipe_64.if_name, p->if_name, IFNAMSIZ);
327 p->ready = user_pipe_64.ready;
328
329 p->fs.fs_nr = user_pipe_64.fs.fs_nr;
330 p->fs.flags_fs = user_pipe_64.fs.flags_fs;
331 p->fs.parent_nr = user_pipe_64.fs.parent_nr;
332 p->fs.weight = user_pipe_64.fs.weight;
333 p->fs.qsize = user_pipe_64.fs.qsize;
334 p->fs.plr = user_pipe_64.fs.plr;
335 p->fs.flow_mask = user_pipe_64.fs.flow_mask;
336 p->fs.rq_size = user_pipe_64.fs.rq_size;
337 p->fs.rq_elements = user_pipe_64.fs.rq_elements;
338 p->fs.last_expired = user_pipe_64.fs.last_expired;
339 p->fs.backlogged = user_pipe_64.fs.backlogged;
340 p->fs.w_q = user_pipe_64.fs.w_q;
341 p->fs.max_th = user_pipe_64.fs.max_th;
342 p->fs.min_th = user_pipe_64.fs.min_th;
343 p->fs.max_p = user_pipe_64.fs.max_p;
344 p->fs.c_1 = user_pipe_64.fs.c_1;
345 p->fs.c_2 = user_pipe_64.fs.c_2;
346 p->fs.c_3 = user_pipe_64.fs.c_3;
347 p->fs.c_4 = user_pipe_64.fs.c_4;
348 p->fs.lookup_depth = user_pipe_64.fs.lookup_depth;
349 p->fs.lookup_step = user_pipe_64.fs.lookup_step;
350 p->fs.lookup_weight = user_pipe_64.fs.lookup_weight;
351 p->fs.avg_pkt_size = user_pipe_64.fs.avg_pkt_size;
352 p->fs.max_pkt_size = user_pipe_64.fs.max_pkt_size;
353 }
354 return error;
355 }
356
357 static void
cp_flow_set_to_32_user(struct dn_flow_set * set,struct dn_flow_set_32 * fs_bp)358 cp_flow_set_to_32_user(struct dn_flow_set *set, struct dn_flow_set_32 *fs_bp)
359 {
360 fs_bp->fs_nr = set->fs_nr;
361 fs_bp->flags_fs = set->flags_fs;
362 fs_bp->parent_nr = set->parent_nr;
363 fs_bp->weight = set->weight;
364 fs_bp->qsize = set->qsize;
365 fs_bp->plr = set->plr;
366 fs_bp->flow_mask = set->flow_mask;
367 fs_bp->rq_size = set->rq_size;
368 fs_bp->rq_elements = set->rq_elements;
369 fs_bp->last_expired = set->last_expired;
370 fs_bp->backlogged = set->backlogged;
371 fs_bp->w_q = set->w_q;
372 fs_bp->max_th = set->max_th;
373 fs_bp->min_th = set->min_th;
374 fs_bp->max_p = set->max_p;
375 fs_bp->c_1 = set->c_1;
376 fs_bp->c_2 = set->c_2;
377 fs_bp->c_3 = set->c_3;
378 fs_bp->c_4 = set->c_4;
379 fs_bp->w_q_lookup = CAST_DOWN_EXPLICIT(user32_addr_t, set->w_q_lookup);
380 fs_bp->lookup_depth = set->lookup_depth;
381 fs_bp->lookup_step = set->lookup_step;
382 fs_bp->lookup_weight = set->lookup_weight;
383 fs_bp->avg_pkt_size = set->avg_pkt_size;
384 fs_bp->max_pkt_size = set->max_pkt_size;
385 }
386
387 static void
cp_flow_set_to_64_user(struct dn_flow_set * set,struct dn_flow_set_64 * fs_bp)388 cp_flow_set_to_64_user(struct dn_flow_set *set, struct dn_flow_set_64 *fs_bp)
389 {
390 fs_bp->fs_nr = set->fs_nr;
391 fs_bp->flags_fs = set->flags_fs;
392 fs_bp->parent_nr = set->parent_nr;
393 fs_bp->weight = set->weight;
394 fs_bp->qsize = set->qsize;
395 fs_bp->plr = set->plr;
396 fs_bp->flow_mask = set->flow_mask;
397 fs_bp->rq_size = set->rq_size;
398 fs_bp->rq_elements = set->rq_elements;
399 fs_bp->last_expired = set->last_expired;
400 fs_bp->backlogged = set->backlogged;
401 fs_bp->w_q = set->w_q;
402 fs_bp->max_th = set->max_th;
403 fs_bp->min_th = set->min_th;
404 fs_bp->max_p = set->max_p;
405 fs_bp->c_1 = set->c_1;
406 fs_bp->c_2 = set->c_2;
407 fs_bp->c_3 = set->c_3;
408 fs_bp->c_4 = set->c_4;
409 fs_bp->w_q_lookup = CAST_DOWN(user64_addr_t, set->w_q_lookup);
410 fs_bp->lookup_depth = set->lookup_depth;
411 fs_bp->lookup_step = set->lookup_step;
412 fs_bp->lookup_weight = set->lookup_weight;
413 fs_bp->avg_pkt_size = set->avg_pkt_size;
414 fs_bp->max_pkt_size = set->max_pkt_size;
415 }
416
417 static
418 void
cp_queue_to_32_user(struct dn_flow_queue * q,struct dn_flow_queue_32 * qp)419 cp_queue_to_32_user( struct dn_flow_queue *q, struct dn_flow_queue_32 *qp)
420 {
421 qp->id = q->id;
422 qp->len = q->len;
423 qp->len_bytes = q->len_bytes;
424 qp->numbytes = q->numbytes;
425 qp->tot_pkts = q->tot_pkts;
426 qp->tot_bytes = q->tot_bytes;
427 qp->drops = q->drops;
428 qp->hash_slot = q->hash_slot;
429 qp->avg = q->avg;
430 qp->count = q->count;
431 qp->random = q->random;
432 qp->q_time = (u_int32_t)q->q_time;
433 qp->heap_pos = q->heap_pos;
434 qp->sched_time = q->sched_time;
435 qp->S = q->S;
436 qp->F = q->F;
437 }
438
439 static
440 void
cp_queue_to_64_user(struct dn_flow_queue * q,struct dn_flow_queue_64 * qp)441 cp_queue_to_64_user( struct dn_flow_queue *q, struct dn_flow_queue_64 *qp)
442 {
443 qp->id = q->id;
444 qp->len = q->len;
445 qp->len_bytes = q->len_bytes;
446 qp->numbytes = q->numbytes;
447 qp->tot_pkts = q->tot_pkts;
448 qp->tot_bytes = q->tot_bytes;
449 qp->drops = q->drops;
450 qp->hash_slot = q->hash_slot;
451 qp->avg = q->avg;
452 qp->count = q->count;
453 qp->random = q->random;
454 qp->q_time = (u_int32_t)q->q_time;
455 qp->heap_pos = q->heap_pos;
456 qp->sched_time = q->sched_time;
457 qp->S = q->S;
458 qp->F = q->F;
459 }
460
461 static
462 char *
cp_pipe_to_32_user(struct dn_pipe * p,struct dn_pipe_32 * pipe_bp)463 cp_pipe_to_32_user(struct dn_pipe *p, struct dn_pipe_32 *pipe_bp)
464 {
465 char *bp;
466
467 pipe_bp->pipe_nr = p->pipe_nr;
468 pipe_bp->bandwidth = p->bandwidth;
469 pipe_bp->delay = p->delay;
470 bcopy( &(p->scheduler_heap), &(pipe_bp->scheduler_heap), sizeof(struct dn_heap_32));
471 pipe_bp->scheduler_heap.p = CAST_DOWN_EXPLICIT(user32_addr_t, pipe_bp->scheduler_heap.p);
472 bcopy( &(p->not_eligible_heap), &(pipe_bp->not_eligible_heap), sizeof(struct dn_heap_32));
473 pipe_bp->not_eligible_heap.p = CAST_DOWN_EXPLICIT(user32_addr_t, pipe_bp->not_eligible_heap.p);
474 bcopy( &(p->idle_heap), &(pipe_bp->idle_heap), sizeof(struct dn_heap_32));
475 pipe_bp->idle_heap.p = CAST_DOWN_EXPLICIT(user32_addr_t, pipe_bp->idle_heap.p);
476 pipe_bp->V = p->V;
477 pipe_bp->sum = p->sum;
478 pipe_bp->numbytes = p->numbytes;
479 pipe_bp->sched_time = p->sched_time;
480 bcopy( p->if_name, pipe_bp->if_name, IFNAMSIZ);
481 pipe_bp->ifp = CAST_DOWN_EXPLICIT(user32_addr_t, p->ifp);
482 pipe_bp->ready = p->ready;
483
484 cp_flow_set_to_32_user( &(p->fs), &(pipe_bp->fs));
485
486 pipe_bp->delay = (pipe_bp->delay * 1000) / (hz * 10);
487 /*
488 * XXX the following is a hack based on ->next being the
489 * first field in dn_pipe and dn_flow_set. The correct
490 * solution would be to move the dn_flow_set to the beginning
491 * of struct dn_pipe.
492 */
493 pipe_bp->next = CAST_DOWN_EXPLICIT( user32_addr_t, DN_IS_PIPE );
494 /* clean pointers */
495 pipe_bp->head = pipe_bp->tail = (user32_addr_t) 0;
496 pipe_bp->fs.next = (user32_addr_t)0;
497 pipe_bp->fs.pipe = (user32_addr_t)0;
498 pipe_bp->fs.rq = (user32_addr_t)0;
499 bp = ((char *)pipe_bp) + sizeof(struct dn_pipe_32);
500 return dn_copy_set_32( &(p->fs), bp);
501 }
502
503 static
504 char *
cp_pipe_to_64_user(struct dn_pipe * p,struct dn_pipe_64 * pipe_bp)505 cp_pipe_to_64_user(struct dn_pipe *p, struct dn_pipe_64 *pipe_bp)
506 {
507 char *bp;
508
509 pipe_bp->pipe_nr = p->pipe_nr;
510 pipe_bp->bandwidth = p->bandwidth;
511 pipe_bp->delay = p->delay;
512 bcopy( &(p->scheduler_heap), &(pipe_bp->scheduler_heap), sizeof(struct dn_heap_64));
513 pipe_bp->scheduler_heap.p = CAST_DOWN(user64_addr_t, pipe_bp->scheduler_heap.p);
514 bcopy( &(p->not_eligible_heap), &(pipe_bp->not_eligible_heap), sizeof(struct dn_heap_64));
515 pipe_bp->not_eligible_heap.p = CAST_DOWN(user64_addr_t, pipe_bp->not_eligible_heap.p);
516 bcopy( &(p->idle_heap), &(pipe_bp->idle_heap), sizeof(struct dn_heap_64));
517 pipe_bp->idle_heap.p = CAST_DOWN(user64_addr_t, pipe_bp->idle_heap.p);
518 pipe_bp->V = p->V;
519 pipe_bp->sum = p->sum;
520 pipe_bp->numbytes = p->numbytes;
521 pipe_bp->sched_time = p->sched_time;
522 bcopy( p->if_name, pipe_bp->if_name, IFNAMSIZ);
523 pipe_bp->ifp = CAST_DOWN(user64_addr_t, p->ifp);
524 pipe_bp->ready = p->ready;
525
526 cp_flow_set_to_64_user( &(p->fs), &(pipe_bp->fs));
527
528 pipe_bp->delay = (pipe_bp->delay * 1000) / (hz * 10);
529 /*
530 * XXX the following is a hack based on ->next being the
531 * first field in dn_pipe and dn_flow_set. The correct
532 * solution would be to move the dn_flow_set to the beginning
533 * of struct dn_pipe.
534 */
535 pipe_bp->next = CAST_DOWN( user64_addr_t, DN_IS_PIPE );
536 /* clean pointers */
537 pipe_bp->head = pipe_bp->tail = USER_ADDR_NULL;
538 pipe_bp->fs.next = USER_ADDR_NULL;
539 pipe_bp->fs.pipe = USER_ADDR_NULL;
540 pipe_bp->fs.rq = USER_ADDR_NULL;
541 bp = ((char *)pipe_bp) + sizeof(struct dn_pipe_64);
542 return dn_copy_set_64( &(p->fs), bp);
543 }
544
545 static int
heap_init(struct dn_heap * h,int new_size)546 heap_init(struct dn_heap *h, int new_size)
547 {
548 struct dn_heap_entry *p;
549
550 if (h->size >= new_size) {
551 printf("dummynet: heap_init, Bogus call, have %d want %d\n",
552 h->size, new_size);
553 return 0;
554 }
555 new_size = (new_size + HEAP_INCREMENT) & ~HEAP_INCREMENT;
556 p = _MALLOC(new_size * sizeof(*p), M_DUMMYNET, M_DONTWAIT );
557 if (p == NULL) {
558 printf("dummynet: heap_init, resize %d failed\n", new_size );
559 return 1; /* error */
560 }
561 if (h->size > 0) {
562 bcopy(h->p, p, h->size * sizeof(*p));
563 FREE(h->p, M_DUMMYNET);
564 }
565 h->p = p;
566 h->size = new_size;
567 return 0;
568 }
569
570 /*
571 * Insert element in heap. Normally, p != NULL, we insert p in
572 * a new position and bubble up. If p == NULL, then the element is
573 * already in place, and key is the position where to start the
574 * bubble-up.
575 * Returns 1 on failure (cannot allocate new heap entry)
576 *
577 * If offset > 0 the position (index, int) of the element in the heap is
578 * also stored in the element itself at the given offset in bytes.
579 */
580 #define SET_OFFSET(heap, node) \
581 if (heap->offset > 0) \
582 *((int *)(void *)((char *)(heap->p[node].object) + heap->offset)) = node ;
583 /*
584 * RESET_OFFSET is used for sanity checks. It sets offset to an invalid value.
585 */
586 #define RESET_OFFSET(heap, node) \
587 if (heap->offset > 0) \
588 *((int *)(void *)((char *)(heap->p[node].object) + heap->offset)) = -1 ;
589 static int
heap_insert(struct dn_heap * h,dn_key key1,void * p)590 heap_insert(struct dn_heap *h, dn_key key1, void *p)
591 {
592 int son = h->elements;
593
594 if (p == NULL) { /* data already there, set starting point */
595 VERIFY(key1 < INT_MAX);
596 son = (int)key1;
597 } else { /* insert new element at the end, possibly resize */
598 son = h->elements;
599 if (son == h->size) { /* need resize... */
600 if (heap_init(h, h->elements + 1)) {
601 return 1; /* failure... */
602 }
603 }
604 h->p[son].object = p;
605 h->p[son].key = key1;
606 h->elements++;
607 }
608 while (son > 0) { /* bubble up */
609 int father = HEAP_FATHER(son);
610 struct dn_heap_entry tmp;
611
612 if (DN_KEY_LT( h->p[father].key, h->p[son].key )) {
613 break; /* found right position */
614 }
615 /* son smaller than father, swap and repeat */
616 HEAP_SWAP(h->p[son], h->p[father], tmp);
617 SET_OFFSET(h, son);
618 son = father;
619 }
620 SET_OFFSET(h, son);
621 return 0;
622 }
623
624 /*
625 * remove top element from heap, or obj if obj != NULL
626 */
627 static void
heap_extract(struct dn_heap * h,void * obj)628 heap_extract(struct dn_heap *h, void *obj)
629 {
630 int child, father, maxelt = h->elements - 1;
631
632 if (maxelt < 0) {
633 printf("dummynet: warning, extract from empty heap 0x%llx\n",
634 (uint64_t)VM_KERNEL_ADDRPERM(h));
635 return;
636 }
637 father = 0; /* default: move up smallest child */
638 if (obj != NULL) { /* extract specific element, index is at offset */
639 if (h->offset <= 0) {
640 panic("dummynet: heap_extract from middle not supported on this heap!!!");
641 }
642 father = *((int *)(void *)((char *)obj + h->offset));
643 if (father < 0 || father >= h->elements) {
644 printf("dummynet: heap_extract, father %d out of bound 0..%d\n",
645 father, h->elements);
646 panic("dummynet: heap_extract");
647 }
648 }
649 RESET_OFFSET(h, father);
650 child = HEAP_LEFT(father); /* left child */
651 while (child <= maxelt) { /* valid entry */
652 if (child != maxelt && DN_KEY_LT(h->p[child + 1].key, h->p[child].key)) {
653 child = child + 1; /* take right child, otherwise left */
654 }
655 h->p[father] = h->p[child];
656 SET_OFFSET(h, father);
657 father = child;
658 child = HEAP_LEFT(child); /* left child for next loop */
659 }
660 h->elements--;
661 if (father != maxelt) {
662 /*
663 * Fill hole with last entry and bubble up, reusing the insert code
664 */
665 h->p[father] = h->p[maxelt];
666 heap_insert(h, father, NULL); /* this one cannot fail */
667 }
668 }
669
670 /*
671 * heapify() will reorganize data inside an array to maintain the
672 * heap property. It is needed when we delete a bunch of entries.
673 */
674 static void
heapify(struct dn_heap * h)675 heapify(struct dn_heap *h)
676 {
677 int i;
678
679 for (i = 0; i < h->elements; i++) {
680 heap_insert(h, i, NULL);
681 }
682 }
683
684 /*
685 * cleanup the heap and free data structure
686 */
687 static void
heap_free(struct dn_heap * h)688 heap_free(struct dn_heap *h)
689 {
690 if (h->size > 0) {
691 FREE(h->p, M_DUMMYNET);
692 }
693 bzero(h, sizeof(*h));
694 }
695
696 /*
697 * --- end of heap management functions ---
698 */
699
700 /*
701 * Return the mbuf tag holding the dummynet state. As an optimization
702 * this is assumed to be the first tag on the list. If this turns out
703 * wrong we'll need to search the list.
704 */
705 static struct dn_pkt_tag *
dn_tag_get(struct mbuf * m)706 dn_tag_get(struct mbuf *m)
707 {
708 struct m_tag *mtag = m_tag_first(m);
709
710 if (!(mtag != NULL &&
711 mtag->m_tag_id == KERNEL_MODULE_TAG_ID &&
712 mtag->m_tag_type == KERNEL_TAG_TYPE_DUMMYNET)) {
713 panic("packet on dummynet queue w/o dummynet tag: 0x%llx",
714 (uint64_t)VM_KERNEL_ADDRPERM(m));
715 }
716
717 return (struct dn_pkt_tag *)(mtag + 1);
718 }
719
720 /*
721 * Scheduler functions:
722 *
723 * transmit_event() is called when the delay-line needs to enter
724 * the scheduler, either because of existing pkts getting ready,
725 * or new packets entering the queue. The event handled is the delivery
726 * time of the packet.
727 *
728 * ready_event() does something similar with fixed-rate queues, and the
729 * event handled is the finish time of the head pkt.
730 *
731 * wfq_ready_event() does something similar with WF2Q queues, and the
732 * event handled is the start time of the head pkt.
733 *
734 * In all cases, we make sure that the data structures are consistent
735 * before passing pkts out, because this might trigger recursive
736 * invocations of the procedures.
737 */
738 static void
transmit_event(struct dn_pipe * pipe,struct mbuf ** head,struct mbuf ** tail)739 transmit_event(struct dn_pipe *pipe, struct mbuf **head, struct mbuf **tail)
740 {
741 struct mbuf *m;
742 struct dn_pkt_tag *pkt = NULL;
743 u_int64_t schedule_time;
744
745 LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
746 ASSERT(serialize >= 0);
747 if (serialize == 0) {
748 while ((m = pipe->head) != NULL) {
749 pkt = dn_tag_get(m);
750 if (!DN_KEY_LEQ(pkt->dn_output_time, curr_time)) {
751 break;
752 }
753
754 pipe->head = m->m_nextpkt;
755 if (*tail != NULL) {
756 (*tail)->m_nextpkt = m;
757 } else {
758 *head = m;
759 }
760 *tail = m;
761 }
762
763 if (*tail != NULL) {
764 (*tail)->m_nextpkt = NULL;
765 }
766 }
767
768 schedule_time = pkt == NULL || DN_KEY_LEQ(pkt->dn_output_time, curr_time) ?
769 curr_time + 1 : pkt->dn_output_time;
770
771 /* if there are leftover packets, put the pipe into the heap for next ready event */
772 if ((m = pipe->head) != NULL) {
773 pkt = dn_tag_get(m);
774 /* XXX should check errors on heap_insert, by draining the
775 * whole pipe p and hoping in the future we are more successful
776 */
777 heap_insert(&extract_heap, schedule_time, pipe);
778 }
779 }
780
781 /*
782 * the following macro computes how many ticks we have to wait
783 * before being able to transmit a packet. The credit is taken from
784 * either a pipe (WF2Q) or a flow_queue (per-flow queueing)
785 */
786
787 /* hz is 100, which gives a granularity of 10ms in the old timer.
788 * The timer has been changed to fire every 1ms, so the use of
789 * hz has been modified here. All instances of hz have been left
790 * in place but adjusted by a factor of 10 so that hz is functionally
791 * equal to 1000.
792 */
793 #define SET_TICKS(_m, q, p) \
794 ((_m)->m_pkthdr.len*8*(hz*10) - (q)->numbytes + p->bandwidth - 1 ) / \
795 p->bandwidth ;
796
797 /*
798 * extract pkt from queue, compute output time (could be now)
799 * and put into delay line (p_queue)
800 */
801 static void
move_pkt(struct mbuf * pkt,struct dn_flow_queue * q,struct dn_pipe * p,int len)802 move_pkt(struct mbuf *pkt, struct dn_flow_queue *q,
803 struct dn_pipe *p, int len)
804 {
805 struct dn_pkt_tag *dt = dn_tag_get(pkt);
806
807 q->head = pkt->m_nextpkt;
808 q->len--;
809 q->len_bytes -= len;
810
811 dt->dn_output_time = curr_time + p->delay;
812
813 if (p->head == NULL) {
814 p->head = pkt;
815 } else {
816 p->tail->m_nextpkt = pkt;
817 }
818 p->tail = pkt;
819 p->tail->m_nextpkt = NULL;
820 }
821
822 /*
823 * ready_event() is invoked every time the queue must enter the
824 * scheduler, either because the first packet arrives, or because
825 * a previously scheduled event fired.
826 * On invokation, drain as many pkts as possible (could be 0) and then
827 * if there are leftover packets reinsert the pkt in the scheduler.
828 */
829 static void
ready_event(struct dn_flow_queue * q,struct mbuf ** head,struct mbuf ** tail)830 ready_event(struct dn_flow_queue *q, struct mbuf **head, struct mbuf **tail)
831 {
832 struct mbuf *pkt;
833 struct dn_pipe *p = q->fs->pipe;
834 int p_was_empty;
835
836 LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
837
838 if (p == NULL) {
839 printf("dummynet: ready_event pipe is gone\n");
840 return;
841 }
842 p_was_empty = (p->head == NULL);
843
844 /*
845 * schedule fixed-rate queues linked to this pipe:
846 * Account for the bw accumulated since last scheduling, then
847 * drain as many pkts as allowed by q->numbytes and move to
848 * the delay line (in p) computing output time.
849 * bandwidth==0 (no limit) means we can drain the whole queue,
850 * setting len_scaled = 0 does the job.
851 */
852 q->numbytes += (curr_time - q->sched_time) * p->bandwidth;
853 while ((pkt = q->head) != NULL) {
854 int len = pkt->m_pkthdr.len;
855 int len_scaled = p->bandwidth ? len * 8 * (hz * 10) : 0;
856 if (len_scaled > q->numbytes) {
857 break;
858 }
859 q->numbytes -= len_scaled;
860 move_pkt(pkt, q, p, len);
861 }
862 /*
863 * If we have more packets queued, schedule next ready event
864 * (can only occur when bandwidth != 0, otherwise we would have
865 * flushed the whole queue in the previous loop).
866 * To this purpose we record the current time and compute how many
867 * ticks to go for the finish time of the packet.
868 */
869 if ((pkt = q->head) != NULL) { /* this implies bandwidth != 0 */
870 dn_key t = SET_TICKS(pkt, q, p); /* ticks i have to wait */
871 q->sched_time = curr_time;
872 heap_insert(&ready_heap, curr_time + t, (void *)q );
873 /* XXX should check errors on heap_insert, and drain the whole
874 * queue on error hoping next time we are luckier.
875 */
876 } else { /* RED needs to know when the queue becomes empty */
877 q->q_time = curr_time;
878 q->numbytes = 0;
879 }
880 /*
881 * If the delay line was empty call transmit_event(p) now.
882 * Otherwise, the scheduler will take care of it.
883 */
884 if (p_was_empty) {
885 transmit_event(p, head, tail);
886 }
887 }
888
889 /*
890 * Called when we can transmit packets on WF2Q queues. Take pkts out of
891 * the queues at their start time, and enqueue into the delay line.
892 * Packets are drained until p->numbytes < 0. As long as
893 * len_scaled >= p->numbytes, the packet goes into the delay line
894 * with a deadline p->delay. For the last packet, if p->numbytes<0,
895 * there is an additional delay.
896 */
897 static void
ready_event_wfq(struct dn_pipe * p,struct mbuf ** head,struct mbuf ** tail)898 ready_event_wfq(struct dn_pipe *p, struct mbuf **head, struct mbuf **tail)
899 {
900 int p_was_empty = (p->head == NULL);
901 struct dn_heap *sch = &(p->scheduler_heap);
902 struct dn_heap *neh = &(p->not_eligible_heap);
903 int64_t p_numbytes = p->numbytes;
904
905 LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
906
907 if (p->if_name[0] == 0) { /* tx clock is simulated */
908 p_numbytes += (curr_time - p->sched_time) * p->bandwidth;
909 } else { /* tx clock is for real, the ifq must be empty or this is a NOP */
910 if (p->ifp && !IFCQ_IS_EMPTY(p->ifp->if_snd)) {
911 return;
912 } else {
913 DPRINTF(("dummynet: pipe %d ready from %s --\n",
914 p->pipe_nr, p->if_name));
915 }
916 }
917
918 /*
919 * While we have backlogged traffic AND credit, we need to do
920 * something on the queue.
921 */
922 while (p_numbytes >= 0 && (sch->elements > 0 || neh->elements > 0)) {
923 if (sch->elements > 0) { /* have some eligible pkts to send out */
924 struct dn_flow_queue *q = sch->p[0].object;
925 struct mbuf *pkt = q->head;
926 struct dn_flow_set *fs = q->fs;
927 u_int32_t len = pkt->m_pkthdr.len;
928 u_int64_t len_scaled = p->bandwidth ? len * 8 * (hz * 10) : 0;
929
930 heap_extract(sch, NULL); /* remove queue from heap */
931 p_numbytes -= len_scaled;
932 move_pkt(pkt, q, p, len);
933
934 p->V += (len << MY_M) / p->sum; /* update V */
935 q->S = q->F; /* update start time */
936 if (q->len == 0) { /* Flow not backlogged any more */
937 fs->backlogged--;
938 heap_insert(&(p->idle_heap), q->F, q);
939 } else { /* still backlogged */
940 /*
941 * update F and position in backlogged queue, then
942 * put flow in not_eligible_heap (we will fix this later).
943 */
944 len = (q->head)->m_pkthdr.len;
945 q->F += (len << MY_M) / (u_int64_t) fs->weight;
946 if (DN_KEY_LEQ(q->S, p->V)) {
947 heap_insert(neh, q->S, q);
948 } else {
949 heap_insert(sch, q->F, q);
950 }
951 }
952 }
953 /*
954 * now compute V = max(V, min(S_i)). Remember that all elements in sch
955 * have by definition S_i <= V so if sch is not empty, V is surely
956 * the max and we must not update it. Conversely, if sch is empty
957 * we only need to look at neh.
958 */
959 if (sch->elements == 0 && neh->elements > 0) {
960 p->V = MAX64( p->V, neh->p[0].key );
961 }
962 /* move from neh to sch any packets that have become eligible */
963 while (neh->elements > 0 && DN_KEY_LEQ(neh->p[0].key, p->V)) {
964 struct dn_flow_queue *q = neh->p[0].object;
965 heap_extract(neh, NULL);
966 heap_insert(sch, q->F, q);
967 }
968
969 if (p->if_name[0] != '\0') {/* tx clock is from a real thing */
970 p_numbytes = -1; /* mark not ready for I/O */
971 break;
972 }
973 }
974 if (sch->elements == 0 && neh->elements == 0 && p_numbytes >= 0
975 && p->idle_heap.elements > 0) {
976 /*
977 * no traffic and no events scheduled. We can get rid of idle-heap.
978 */
979 int i;
980
981 for (i = 0; i < p->idle_heap.elements; i++) {
982 struct dn_flow_queue *q = p->idle_heap.p[i].object;
983
984 q->F = 0;
985 q->S = q->F + 1;
986 }
987 p->sum = 0;
988 p->V = 0;
989 p->idle_heap.elements = 0;
990 }
991 /*
992 * If we are getting clocks from dummynet (not a real interface) and
993 * If we are under credit, schedule the next ready event.
994 * Also fix the delivery time of the last packet.
995 */
996 if (p->if_name[0] == 0 && p_numbytes < 0) { /* this implies bandwidth >0 */
997 dn_key t = 0; /* number of ticks i have to wait */
998
999 if (p->bandwidth > 0) {
1000 t = (p->bandwidth - 1 - p_numbytes) / p->bandwidth;
1001 }
1002 dn_tag_get(p->tail)->dn_output_time += t;
1003 p->sched_time = curr_time;
1004 heap_insert(&wfq_ready_heap, curr_time + t, (void *)p);
1005 /* XXX should check errors on heap_insert, and drain the whole
1006 * queue on error hoping next time we are luckier.
1007 */
1008 }
1009
1010 /* Fit (adjust if necessary) 64bit result into 32bit variable. */
1011 if (p_numbytes > INT_MAX) {
1012 p->numbytes = INT_MAX;
1013 } else if (p_numbytes < INT_MIN) {
1014 p->numbytes = INT_MIN;
1015 } else {
1016 p->numbytes = (int)p_numbytes;
1017 }
1018
1019 /*
1020 * If the delay line was empty call transmit_event(p) now.
1021 * Otherwise, the scheduler will take care of it.
1022 */
1023 if (p_was_empty) {
1024 transmit_event(p, head, tail);
1025 }
1026 }
1027
1028 /*
1029 * This is called every 1ms. It is used to
1030 * increment the current tick counter and schedule expired events.
1031 */
1032 static void
dummynet(__unused void * unused)1033 dummynet(__unused void * unused)
1034 {
1035 void *p; /* generic parameter to handler */
1036 struct dn_heap *h;
1037 struct dn_heap *heaps[3];
1038 struct mbuf *head = NULL, *tail = NULL;
1039 int i;
1040 struct dn_pipe *pe;
1041 struct timespec ts;
1042 struct timeval tv;
1043
1044 heaps[0] = &ready_heap; /* fixed-rate queues */
1045 heaps[1] = &wfq_ready_heap; /* wfq queues */
1046 heaps[2] = &extract_heap; /* delay line */
1047
1048 lck_mtx_lock(&dn_mutex);
1049
1050 /* make all time measurements in milliseconds (ms) -
1051 * here we convert secs and usecs to msecs (just divide the
1052 * usecs and take the closest whole number).
1053 */
1054 microuptime(&tv);
1055 curr_time = (tv.tv_sec * 1000) + (tv.tv_usec / 1000);
1056
1057 for (i = 0; i < 3; i++) {
1058 h = heaps[i];
1059 while (h->elements > 0 && DN_KEY_LEQ(h->p[0].key, curr_time)) {
1060 if (h->p[0].key > curr_time) {
1061 printf("dummynet: warning, heap %d is %d ticks late\n",
1062 i, (int)(curr_time - h->p[0].key));
1063 }
1064 p = h->p[0].object; /* store a copy before heap_extract */
1065 heap_extract(h, NULL); /* need to extract before processing */
1066 if (i == 0) {
1067 ready_event(p, &head, &tail);
1068 } else if (i == 1) {
1069 struct dn_pipe *pipe = p;
1070 if (pipe->if_name[0] != '\0') {
1071 printf("dummynet: bad ready_event_wfq for pipe %s\n",
1072 pipe->if_name);
1073 } else {
1074 ready_event_wfq(p, &head, &tail);
1075 }
1076 } else {
1077 transmit_event(p, &head, &tail);
1078 }
1079 }
1080 }
1081 /* sweep pipes trying to expire idle flow_queues */
1082 for (i = 0; i < HASHSIZE; i++) {
1083 SLIST_FOREACH(pe, &pipehash[i], next) {
1084 if (pe->idle_heap.elements > 0 &&
1085 DN_KEY_LT(pe->idle_heap.p[0].key, pe->V)) {
1086 struct dn_flow_queue *q = pe->idle_heap.p[0].object;
1087
1088 heap_extract(&(pe->idle_heap), NULL);
1089 q->S = q->F + 1; /* mark timestamp as invalid */
1090 pe->sum -= q->fs->weight;
1091 }
1092 }
1093 }
1094
1095 /* check the heaps to see if there's still stuff in there, and
1096 * only set the timer if there are packets to process
1097 */
1098 timer_enabled = 0;
1099 for (i = 0; i < 3; i++) {
1100 h = heaps[i];
1101 if (h->elements > 0) { // set the timer
1102 ts.tv_sec = 0;
1103 ts.tv_nsec = 1 * 1000000; // 1ms
1104 timer_enabled = 1;
1105 bsd_timeout(dummynet, NULL, &ts);
1106 break;
1107 }
1108 }
1109
1110 if (head != NULL) {
1111 serialize++;
1112 }
1113
1114 lck_mtx_unlock(&dn_mutex);
1115
1116 /* Send out the de-queued list of ready-to-send packets */
1117 if (head != NULL) {
1118 dummynet_send(head);
1119 lck_mtx_lock(&dn_mutex);
1120 serialize--;
1121 lck_mtx_unlock(&dn_mutex);
1122 }
1123 }
1124
1125
1126 static void
dummynet_send(struct mbuf * m)1127 dummynet_send(struct mbuf *m)
1128 {
1129 struct dn_pkt_tag *pkt;
1130 struct mbuf *n;
1131
1132 for (; m != NULL; m = n) {
1133 n = m->m_nextpkt;
1134 m->m_nextpkt = NULL;
1135 pkt = dn_tag_get(m);
1136
1137 DPRINTF(("dummynet_send m: 0x%llx dn_dir: %d dn_flags: 0x%x\n",
1138 (uint64_t)VM_KERNEL_ADDRPERM(m), pkt->dn_dir,
1139 pkt->dn_flags));
1140
1141 switch (pkt->dn_dir) {
1142 case DN_TO_IP_OUT: {
1143 struct route tmp_rt;
1144
1145 /* route is already in the packet's dn_ro */
1146 bzero(&tmp_rt, sizeof(tmp_rt));
1147
1148 /* Force IP_RAWOUTPUT as the IP header is fully formed */
1149 pkt->dn_flags |= IP_RAWOUTPUT | IP_FORWARDING;
1150 (void)ip_output(m, NULL, &tmp_rt, pkt->dn_flags, NULL, NULL);
1151 ROUTE_RELEASE(&tmp_rt);
1152 break;
1153 }
1154 case DN_TO_IP_IN:
1155 proto_inject(PF_INET, m);
1156 break;
1157 case DN_TO_IP6_OUT: {
1158 /* routes already in the packet's dn_{ro6,pmtu} */
1159 if (pkt->dn_origifp != NULL) {
1160 ip6_output_setsrcifscope(m, pkt->dn_origifp->if_index, NULL);
1161 ip6_output_setdstifscope(m, pkt->dn_origifp->if_index, NULL);
1162 } else {
1163 ip6_output_setsrcifscope(m, IFSCOPE_UNKNOWN, NULL);
1164 ip6_output_setdstifscope(m, IFSCOPE_UNKNOWN, NULL);
1165 }
1166
1167 ip6_output(m, NULL, NULL, IPV6_FORWARDING, NULL, NULL, NULL);
1168 break;
1169 }
1170 case DN_TO_IP6_IN:
1171 proto_inject(PF_INET6, m);
1172 break;
1173 default:
1174 printf("dummynet: bad switch %d!\n", pkt->dn_dir);
1175 m_freem(m);
1176 break;
1177 }
1178 }
1179 }
1180
1181 /*
1182 * Unconditionally expire empty queues in case of shortage.
1183 * Returns the number of queues freed.
1184 */
1185 static int
expire_queues(struct dn_flow_set * fs)1186 expire_queues(struct dn_flow_set *fs)
1187 {
1188 struct dn_flow_queue *q, *prev;
1189 int i, initial_elements = fs->rq_elements;
1190 struct timeval timenow;
1191
1192 /* reviewed for getmicrotime usage */
1193 getmicrotime(&timenow);
1194
1195 if (fs->last_expired == timenow.tv_sec) {
1196 return 0;
1197 }
1198 fs->last_expired = (int)timenow.tv_sec;
1199 for (i = 0; i <= fs->rq_size; i++) { /* last one is overflow */
1200 for (prev = NULL, q = fs->rq[i]; q != NULL;) {
1201 if (q->head != NULL || q->S != q->F + 1) {
1202 prev = q;
1203 q = q->next;
1204 } else { /* entry is idle, expire it */
1205 struct dn_flow_queue *old_q = q;
1206
1207 if (prev != NULL) {
1208 prev->next = q = q->next;
1209 } else {
1210 fs->rq[i] = q = q->next;
1211 }
1212 fs->rq_elements--;
1213 kfree_type(struct dn_flow_queue, old_q);
1214 }
1215 }
1216 }
1217 return initial_elements - fs->rq_elements;
1218 }
1219
1220 /*
1221 * If room, create a new queue and put at head of slot i;
1222 * otherwise, create or use the default queue.
1223 */
1224 static struct dn_flow_queue *
create_queue(struct dn_flow_set * fs,int i)1225 create_queue(struct dn_flow_set *fs, int i)
1226 {
1227 struct dn_flow_queue *q;
1228
1229 if (fs->rq_elements > fs->rq_size * dn_max_ratio &&
1230 expire_queues(fs) == 0) {
1231 /*
1232 * No way to get room, use or create overflow queue.
1233 */
1234 i = fs->rq_size;
1235 if (fs->rq[i] != NULL) {
1236 return fs->rq[i];
1237 }
1238 }
1239 q = kalloc_type(struct dn_flow_queue, Z_NOWAIT | Z_ZERO);
1240 if (q == NULL) {
1241 printf("dummynet: sorry, cannot allocate queue for new flow\n");
1242 return NULL;
1243 }
1244 q->fs = fs;
1245 q->hash_slot = i;
1246 q->next = fs->rq[i];
1247 q->S = q->F + 1; /* hack - mark timestamp as invalid */
1248 fs->rq[i] = q;
1249 fs->rq_elements++;
1250 return q;
1251 }
1252
1253 /*
1254 * Given a flow_set and a pkt in last_pkt, find a matching queue
1255 * after appropriate masking. The queue is moved to front
1256 * so that further searches take less time.
1257 */
1258 static struct dn_flow_queue *
find_queue(struct dn_flow_set * fs,struct ip_flow_id * id)1259 find_queue(struct dn_flow_set *fs, struct ip_flow_id *id)
1260 {
1261 int i = 0; /* we need i and q for new allocations */
1262 struct dn_flow_queue *q, *prev;
1263 int is_v6 = IS_IP6_FLOW_ID(id);
1264
1265 if (!(fs->flags_fs & DN_HAVE_FLOW_MASK)) {
1266 q = fs->rq[0];
1267 } else {
1268 /* first, do the masking, then hash */
1269 id->dst_port &= fs->flow_mask.dst_port;
1270 id->src_port &= fs->flow_mask.src_port;
1271 id->proto &= fs->flow_mask.proto;
1272 id->flags = 0; /* we don't care about this one */
1273 if (is_v6) {
1274 APPLY_MASK(&id->dst_ip6, &fs->flow_mask.dst_ip6);
1275 APPLY_MASK(&id->src_ip6, &fs->flow_mask.src_ip6);
1276 id->flow_id6 &= fs->flow_mask.flow_id6;
1277
1278 i = ((id->dst_ip6.__u6_addr.__u6_addr32[0]) & 0xffff) ^
1279 ((id->dst_ip6.__u6_addr.__u6_addr32[1]) & 0xffff) ^
1280 ((id->dst_ip6.__u6_addr.__u6_addr32[2]) & 0xffff) ^
1281 ((id->dst_ip6.__u6_addr.__u6_addr32[3]) & 0xffff) ^
1282
1283 ((id->dst_ip6.__u6_addr.__u6_addr32[0] >> 15) & 0xffff) ^
1284 ((id->dst_ip6.__u6_addr.__u6_addr32[1] >> 15) & 0xffff) ^
1285 ((id->dst_ip6.__u6_addr.__u6_addr32[2] >> 15) & 0xffff) ^
1286 ((id->dst_ip6.__u6_addr.__u6_addr32[3] >> 15) & 0xffff) ^
1287
1288 ((id->src_ip6.__u6_addr.__u6_addr32[0] << 1) & 0xfffff) ^
1289 ((id->src_ip6.__u6_addr.__u6_addr32[1] << 1) & 0xfffff) ^
1290 ((id->src_ip6.__u6_addr.__u6_addr32[2] << 1) & 0xfffff) ^
1291 ((id->src_ip6.__u6_addr.__u6_addr32[3] << 1) & 0xfffff) ^
1292
1293 ((id->src_ip6.__u6_addr.__u6_addr32[0] >> 16) & 0xffff) ^
1294 ((id->src_ip6.__u6_addr.__u6_addr32[1] >> 16) & 0xffff) ^
1295 ((id->src_ip6.__u6_addr.__u6_addr32[2] >> 16) & 0xffff) ^
1296 ((id->src_ip6.__u6_addr.__u6_addr32[3] >> 16) & 0xffff) ^
1297
1298 (id->dst_port << 1) ^ (id->src_port) ^
1299 (id->proto) ^
1300 (id->flow_id6);
1301 } else {
1302 id->dst_ip &= fs->flow_mask.dst_ip;
1303 id->src_ip &= fs->flow_mask.src_ip;
1304
1305 i = ((id->dst_ip) & 0xffff) ^
1306 ((id->dst_ip >> 15) & 0xffff) ^
1307 ((id->src_ip << 1) & 0xffff) ^
1308 ((id->src_ip >> 16) & 0xffff) ^
1309 (id->dst_port << 1) ^ (id->src_port) ^
1310 (id->proto);
1311 }
1312 i = i % fs->rq_size;
1313 /* finally, scan the current list for a match */
1314 searches++;
1315 for (prev = NULL, q = fs->rq[i]; q;) {
1316 search_steps++;
1317 if (is_v6 &&
1318 IN6_ARE_ADDR_EQUAL(&id->dst_ip6, &q->id.dst_ip6) &&
1319 IN6_ARE_ADDR_EQUAL(&id->src_ip6, &q->id.src_ip6) &&
1320 id->dst_port == q->id.dst_port &&
1321 id->src_port == q->id.src_port &&
1322 id->proto == q->id.proto &&
1323 id->flags == q->id.flags &&
1324 id->flow_id6 == q->id.flow_id6) {
1325 break; /* found */
1326 }
1327 if (!is_v6 && id->dst_ip == q->id.dst_ip &&
1328 id->src_ip == q->id.src_ip &&
1329 id->dst_port == q->id.dst_port &&
1330 id->src_port == q->id.src_port &&
1331 id->proto == q->id.proto &&
1332 id->flags == q->id.flags) {
1333 break; /* found */
1334 }
1335 /* No match. Check if we can expire the entry */
1336 if (pipe_expire && q->head == NULL && q->S == q->F + 1) {
1337 /* entry is idle and not in any heap, expire it */
1338 struct dn_flow_queue *old_q = q;
1339
1340 if (prev != NULL) {
1341 prev->next = q = q->next;
1342 } else {
1343 fs->rq[i] = q = q->next;
1344 }
1345 fs->rq_elements--;
1346 kfree_type(struct dn_flow_queue, old_q);
1347 continue;
1348 }
1349 prev = q;
1350 q = q->next;
1351 }
1352 if (q && prev != NULL) { /* found and not in front */
1353 prev->next = q->next;
1354 q->next = fs->rq[i];
1355 fs->rq[i] = q;
1356 }
1357 }
1358 if (q == NULL) { /* no match, need to allocate a new entry */
1359 q = create_queue(fs, i);
1360 if (q != NULL) {
1361 q->id = *id;
1362 }
1363 }
1364 return q;
1365 }
1366
1367 static int
red_drops(struct dn_flow_set * fs,struct dn_flow_queue * q,int len)1368 red_drops(struct dn_flow_set *fs, struct dn_flow_queue *q, int len)
1369 {
1370 /*
1371 * RED algorithm
1372 *
1373 * RED calculates the average queue size (avg) using a low-pass filter
1374 * with an exponential weighted (w_q) moving average:
1375 * avg <- (1-w_q) * avg + w_q * q_size
1376 * where q_size is the queue length (measured in bytes or * packets).
1377 *
1378 * If q_size == 0, we compute the idle time for the link, and set
1379 * avg = (1 - w_q)^(idle/s)
1380 * where s is the time needed for transmitting a medium-sized packet.
1381 *
1382 * Now, if avg < min_th the packet is enqueued.
1383 * If avg > max_th the packet is dropped. Otherwise, the packet is
1384 * dropped with probability P function of avg.
1385 *
1386 */
1387
1388 int64_t p_b = 0;
1389 /* queue in bytes or packets ? */
1390 u_int q_size = (fs->flags_fs & DN_QSIZE_IS_BYTES) ? q->len_bytes : q->len;
1391
1392 DPRINTF(("\ndummynet: %d q: %2u ", (int) curr_time, q_size));
1393
1394 /* average queue size estimation */
1395 if (q_size != 0) {
1396 /*
1397 * queue is not empty, avg <- avg + (q_size - avg) * w_q
1398 */
1399 int diff = SCALE(q_size) - q->avg;
1400 int64_t v = SCALE_MUL((int64_t) diff, (int64_t) fs->w_q);
1401
1402 q->avg += (int) v;
1403 } else {
1404 /*
1405 * queue is empty, find for how long the queue has been
1406 * empty and use a lookup table for computing
1407 * (1 - * w_q)^(idle_time/s) where s is the time to send a
1408 * (small) packet.
1409 * XXX check wraps...
1410 */
1411 if (q->avg) {
1412 u_int64_t t = (curr_time - q->q_time) / fs->lookup_step;
1413
1414 q->avg = (t < fs->lookup_depth) ?
1415 SCALE_MUL(q->avg, fs->w_q_lookup[t]) : 0;
1416 }
1417 }
1418 DPRINTF(("dummynet: avg: %u ", SCALE_VAL(q->avg)));
1419
1420 /* should i drop ? */
1421
1422 if (q->avg < fs->min_th) {
1423 q->count = -1;
1424 return 0; /* accept packet ; */
1425 }
1426 if (q->avg >= fs->max_th) { /* average queue >= max threshold */
1427 if (fs->flags_fs & DN_IS_GENTLE_RED) {
1428 /*
1429 * According to Gentle-RED, if avg is greater than max_th the
1430 * packet is dropped with a probability
1431 * p_b = c_3 * avg - c_4
1432 * where c_3 = (1 - max_p) / max_th, and c_4 = 1 - 2 * max_p
1433 */
1434 p_b = SCALE_MUL((int64_t) fs->c_3, (int64_t) q->avg) - fs->c_4;
1435 } else {
1436 q->count = -1;
1437 DPRINTF(("dummynet: - drop"));
1438 return 1;
1439 }
1440 } else if (q->avg > fs->min_th) {
1441 /*
1442 * we compute p_b using the linear dropping function p_b = c_1 *
1443 * avg - c_2, where c_1 = max_p / (max_th - min_th), and c_2 =
1444 * max_p * min_th / (max_th - min_th)
1445 */
1446 p_b = SCALE_MUL((int64_t) fs->c_1, (int64_t) q->avg) - fs->c_2;
1447 }
1448 if (fs->flags_fs & DN_QSIZE_IS_BYTES) {
1449 p_b = (p_b * len) / fs->max_pkt_size;
1450 }
1451 if (++q->count == 0) {
1452 q->random = (my_random() & 0xffff);
1453 } else {
1454 /*
1455 * q->count counts packets arrived since last drop, so a greater
1456 * value of q->count means a greater packet drop probability.
1457 */
1458 if (SCALE_MUL(p_b, SCALE((int64_t) q->count)) > q->random) {
1459 q->count = 0;
1460 DPRINTF(("dummynet: - red drop"));
1461 /* after a drop we calculate a new random value */
1462 q->random = (my_random() & 0xffff);
1463 return 1; /* drop */
1464 }
1465 }
1466 /* end of RED algorithm */
1467 return 0; /* accept */
1468 }
1469
1470 static __inline
1471 struct dn_flow_set *
locate_flowset(int fs_nr)1472 locate_flowset(int fs_nr)
1473 {
1474 struct dn_flow_set *fs;
1475 SLIST_FOREACH(fs, &flowsethash[HASH(fs_nr)], next) {
1476 if (fs->fs_nr == fs_nr) {
1477 return fs;
1478 }
1479 }
1480
1481 return NULL;
1482 }
1483
1484 static __inline struct dn_pipe *
locate_pipe(int pipe_nr)1485 locate_pipe(int pipe_nr)
1486 {
1487 struct dn_pipe *pipe;
1488
1489 SLIST_FOREACH(pipe, &pipehash[HASH(pipe_nr)], next) {
1490 if (pipe->pipe_nr == pipe_nr) {
1491 return pipe;
1492 }
1493 }
1494
1495 return NULL;
1496 }
1497
1498
1499
1500 /*
1501 * dummynet hook for packets. Below 'pipe' is a pipe or a queue
1502 * depending on whether WF2Q or fixed bw is used.
1503 *
1504 * pipe_nr pipe or queue the packet is destined for.
1505 * dir where shall we send the packet after dummynet.
1506 * m the mbuf with the packet
1507 * ifp the 'ifp' parameter from the caller.
1508 * NULL in ip_input, destination interface in ip_output,
1509 * real_dst in bdg_forward
1510 * ro route parameter (only used in ip_output, NULL otherwise)
1511 * dst destination address, only used by ip_output
1512 * rule matching rule, in case of multiple passes
1513 * flags flags from the caller, only used in ip_output
1514 *
1515 */
1516 static int
dummynet_io(struct mbuf * m,int pipe_nr,int dir,struct ip_fw_args * fwa)1517 dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa)
1518 {
1519 struct mbuf *head = NULL, *tail = NULL;
1520 struct dn_pkt_tag *pkt;
1521 struct m_tag *mtag;
1522 struct dn_flow_set *fs = NULL;
1523 struct dn_pipe *pipe;
1524 u_int32_t len = m->m_pkthdr.len;
1525 struct dn_flow_queue *q = NULL;
1526 int is_pipe = 0;
1527 struct timespec ts;
1528 struct timeval tv;
1529
1530 DPRINTF(("dummynet_io m: 0x%llx pipe: %d dir: %d\n",
1531 (uint64_t)VM_KERNEL_ADDRPERM(m), pipe_nr, dir));
1532
1533
1534 #if DUMMYNET
1535 is_pipe = fwa->fwa_flags == DN_IS_PIPE ? 1 : 0;
1536 #endif /* DUMMYNET */
1537
1538 pipe_nr &= 0xffff;
1539
1540 lck_mtx_lock(&dn_mutex);
1541
1542 /* make all time measurements in milliseconds (ms) -
1543 * here we convert secs and usecs to msecs (just divide the
1544 * usecs and take the closest whole number).
1545 */
1546 microuptime(&tv);
1547 curr_time = (tv.tv_sec * 1000) + (tv.tv_usec / 1000);
1548
1549 /*
1550 * This is a dummynet rule, so we expect an O_PIPE or O_QUEUE rule.
1551 */
1552 if (is_pipe) {
1553 pipe = locate_pipe(pipe_nr);
1554 if (pipe != NULL) {
1555 fs = &(pipe->fs);
1556 }
1557 } else {
1558 fs = locate_flowset(pipe_nr);
1559 }
1560
1561
1562 if (fs == NULL) {
1563 goto dropit; /* this queue/pipe does not exist! */
1564 }
1565 pipe = fs->pipe;
1566 if (pipe == NULL) { /* must be a queue, try find a matching pipe */
1567 pipe = locate_pipe(fs->parent_nr);
1568
1569 if (pipe != NULL) {
1570 fs->pipe = pipe;
1571 } else {
1572 printf("dummynet: no pipe %d for queue %d, drop pkt\n",
1573 fs->parent_nr, fs->fs_nr);
1574 goto dropit;
1575 }
1576 }
1577 q = find_queue(fs, &(fwa->fwa_id));
1578 if (q == NULL) {
1579 goto dropit; /* cannot allocate queue */
1580 }
1581 /*
1582 * update statistics, then check reasons to drop pkt
1583 */
1584 q->tot_bytes += len;
1585 q->tot_pkts++;
1586 if (fs->plr && (my_random() < fs->plr)) {
1587 goto dropit; /* random pkt drop */
1588 }
1589 if (fs->flags_fs & DN_QSIZE_IS_BYTES) {
1590 if (q->len_bytes > fs->qsize) {
1591 goto dropit; /* queue size overflow */
1592 }
1593 } else {
1594 if (q->len >= fs->qsize) {
1595 goto dropit; /* queue count overflow */
1596 }
1597 }
1598 if (fs->flags_fs & DN_IS_RED && red_drops(fs, q, len)) {
1599 goto dropit;
1600 }
1601
1602 /* XXX expensive to zero, see if we can remove it*/
1603 mtag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_DUMMYNET,
1604 sizeof(struct dn_pkt_tag), M_NOWAIT, m);
1605 if (mtag == NULL) {
1606 goto dropit; /* cannot allocate packet header */
1607 }
1608 m_tag_prepend(m, mtag); /* attach to mbuf chain */
1609
1610 pkt = (struct dn_pkt_tag *)(mtag + 1);
1611 bzero(pkt, sizeof(struct dn_pkt_tag));
1612 /* ok, i can handle the pkt now... */
1613 /* build and enqueue packet + parameters */
1614 pkt->dn_pf_rule = fwa->fwa_pf_rule;
1615 pkt->dn_dir = dir;
1616
1617 pkt->dn_ifp = fwa->fwa_oif;
1618 if (dir == DN_TO_IP_OUT) {
1619 /*
1620 * We need to copy *ro because for ICMP pkts (and maybe others)
1621 * the caller passed a pointer into the stack; dst might also be
1622 * a pointer into *ro so it needs to be updated.
1623 */
1624 if (fwa->fwa_ro) {
1625 route_copyout(&pkt->dn_ro, fwa->fwa_ro, sizeof(pkt->dn_ro));
1626 }
1627 if (fwa->fwa_dst) {
1628 if (fwa->fwa_dst == (struct sockaddr_in *)(void *)&fwa->fwa_ro->ro_dst) { /* dst points into ro */
1629 fwa->fwa_dst = (struct sockaddr_in *)(void *)&(pkt->dn_ro.ro_dst);
1630 }
1631
1632 bcopy(fwa->fwa_dst, &pkt->dn_dst, sizeof(pkt->dn_dst));
1633 }
1634 } else if (dir == DN_TO_IP6_OUT) {
1635 if (fwa->fwa_ro6) {
1636 route_copyout((struct route *)&pkt->dn_ro6,
1637 (struct route *)fwa->fwa_ro6, sizeof(pkt->dn_ro6));
1638 }
1639 if (fwa->fwa_ro6_pmtu) {
1640 route_copyout((struct route *)&pkt->dn_ro6_pmtu,
1641 (struct route *)fwa->fwa_ro6_pmtu, sizeof(pkt->dn_ro6_pmtu));
1642 }
1643 if (fwa->fwa_dst6) {
1644 if (fwa->fwa_dst6 == (struct sockaddr_in6 *)&fwa->fwa_ro6->ro_dst) { /* dst points into ro */
1645 fwa->fwa_dst6 = (struct sockaddr_in6 *)&(pkt->dn_ro6.ro_dst);
1646 }
1647
1648 bcopy(fwa->fwa_dst6, &pkt->dn_dst6, sizeof(pkt->dn_dst6));
1649 }
1650 pkt->dn_origifp = fwa->fwa_origifp;
1651 pkt->dn_mtu = fwa->fwa_mtu;
1652 pkt->dn_unfragpartlen = fwa->fwa_unfragpartlen;
1653 if (fwa->fwa_exthdrs) {
1654 bcopy(fwa->fwa_exthdrs, &pkt->dn_exthdrs, sizeof(pkt->dn_exthdrs));
1655 /*
1656 * Need to zero out the source structure so the mbufs
1657 * won't be freed by ip6_output()
1658 */
1659 bzero(fwa->fwa_exthdrs, sizeof(struct ip6_exthdrs));
1660 }
1661 }
1662 if (dir == DN_TO_IP_OUT || dir == DN_TO_IP6_OUT) {
1663 pkt->dn_flags = fwa->fwa_oflags;
1664 if (fwa->fwa_ipoa != NULL) {
1665 pkt->dn_ipoa = *(fwa->fwa_ipoa);
1666 }
1667 }
1668 if (q->head == NULL) {
1669 q->head = m;
1670 } else {
1671 q->tail->m_nextpkt = m;
1672 }
1673 q->tail = m;
1674 q->len++;
1675 q->len_bytes += len;
1676
1677 if (q->head != m) { /* flow was not idle, we are done */
1678 goto done;
1679 }
1680 /*
1681 * If we reach this point the flow was previously idle, so we need
1682 * to schedule it. This involves different actions for fixed-rate or
1683 * WF2Q queues.
1684 */
1685 if (is_pipe) {
1686 /*
1687 * Fixed-rate queue: just insert into the ready_heap.
1688 */
1689 dn_key t = 0;
1690 if (pipe->bandwidth) {
1691 t = SET_TICKS(m, q, pipe);
1692 }
1693 q->sched_time = curr_time;
1694 if (t == 0) { /* must process it now */
1695 ready_event( q, &head, &tail );
1696 } else {
1697 heap_insert(&ready_heap, curr_time + t, q );
1698 }
1699 } else {
1700 /*
1701 * WF2Q. First, compute start time S: if the flow was idle (S=F+1)
1702 * set S to the virtual time V for the controlling pipe, and update
1703 * the sum of weights for the pipe; otherwise, remove flow from
1704 * idle_heap and set S to max(F,V).
1705 * Second, compute finish time F = S + len/weight.
1706 * Third, if pipe was idle, update V=max(S, V).
1707 * Fourth, count one more backlogged flow.
1708 */
1709 if (DN_KEY_GT(q->S, q->F)) { /* means timestamps are invalid */
1710 q->S = pipe->V;
1711 pipe->sum += fs->weight; /* add weight of new queue */
1712 } else {
1713 heap_extract(&(pipe->idle_heap), q);
1714 q->S = MAX64(q->F, pipe->V );
1715 }
1716 q->F = q->S + (len << MY_M) / (u_int64_t) fs->weight;
1717
1718 if (pipe->not_eligible_heap.elements == 0 &&
1719 pipe->scheduler_heap.elements == 0) {
1720 pipe->V = MAX64( q->S, pipe->V );
1721 }
1722 fs->backlogged++;
1723 /*
1724 * Look at eligibility. A flow is not eligibile if S>V (when
1725 * this happens, it means that there is some other flow already
1726 * scheduled for the same pipe, so the scheduler_heap cannot be
1727 * empty). If the flow is not eligible we just store it in the
1728 * not_eligible_heap. Otherwise, we store in the scheduler_heap
1729 * and possibly invoke ready_event_wfq() right now if there is
1730 * leftover credit.
1731 * Note that for all flows in scheduler_heap (SCH), S_i <= V,
1732 * and for all flows in not_eligible_heap (NEH), S_i > V .
1733 * So when we need to compute max( V, min(S_i) ) forall i in SCH+NEH,
1734 * we only need to look into NEH.
1735 */
1736 if (DN_KEY_GT(q->S, pipe->V)) { /* not eligible */
1737 if (pipe->scheduler_heap.elements == 0) {
1738 printf("dummynet: ++ ouch! not eligible but empty scheduler!\n");
1739 }
1740 heap_insert(&(pipe->not_eligible_heap), q->S, q);
1741 } else {
1742 heap_insert(&(pipe->scheduler_heap), q->F, q);
1743 if (pipe->numbytes >= 0) { /* pipe is idle */
1744 if (pipe->scheduler_heap.elements != 1) {
1745 printf("dummynet: OUCH! pipe should have been idle!\n");
1746 }
1747 DPRINTF(("dummynet: waking up pipe %d at %d\n",
1748 pipe->pipe_nr, (int)(q->F >> MY_M)));
1749 pipe->sched_time = curr_time;
1750 ready_event_wfq(pipe, &head, &tail);
1751 }
1752 }
1753 }
1754 done:
1755 /* start the timer and set global if not already set */
1756 if (!timer_enabled) {
1757 ts.tv_sec = 0;
1758 ts.tv_nsec = 1 * 1000000; // 1ms
1759 timer_enabled = 1;
1760 bsd_timeout(dummynet, NULL, &ts);
1761 }
1762
1763 lck_mtx_unlock(&dn_mutex);
1764
1765 if (head != NULL) {
1766 dummynet_send(head);
1767 }
1768
1769 return 0;
1770
1771 dropit:
1772 if (q) {
1773 q->drops++;
1774 }
1775 lck_mtx_unlock(&dn_mutex);
1776 m_freem(m);
1777 return (fs && (fs->flags_fs & DN_NOERROR)) ? 0 : ENOBUFS;
1778 }
1779
1780 /*
1781 * Below, the ROUTE_RELEASE is only needed when (pkt->dn_dir == DN_TO_IP_OUT)
1782 * Doing this would probably save us the initial bzero of dn_pkt
1783 */
1784 #define DN_FREE_PKT(_m) do { \
1785 struct m_tag *tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_DUMMYNET, NULL); \
1786 if (tag) { \
1787 struct dn_pkt_tag *n = (struct dn_pkt_tag *)(tag+1); \
1788 ROUTE_RELEASE(&n->dn_ro); \
1789 } \
1790 m_tag_delete(_m, tag); \
1791 m_freem(_m); \
1792 } while (0)
1793
1794 /*
1795 * Dispose all packets and flow_queues on a flow_set.
1796 * If all=1, also remove red lookup table and other storage,
1797 * including the descriptor itself.
1798 * For the one in dn_pipe MUST also cleanup ready_heap...
1799 */
1800 static void
purge_flow_set(struct dn_flow_set * fs,int all)1801 purge_flow_set(struct dn_flow_set *fs, int all)
1802 {
1803 struct dn_flow_queue *q, *qn;
1804 int i;
1805
1806 LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
1807
1808 for (i = 0; i <= fs->rq_size; i++) {
1809 for (q = fs->rq[i]; q; q = qn) {
1810 struct mbuf *m, *mnext;
1811
1812 mnext = q->head;
1813 while ((m = mnext) != NULL) {
1814 mnext = m->m_nextpkt;
1815 DN_FREE_PKT(m);
1816 }
1817 qn = q->next;
1818 kfree_type(struct dn_flow_queue, q);
1819 }
1820 fs->rq[i] = NULL;
1821 }
1822 fs->rq_elements = 0;
1823 if (all) {
1824 /* RED - free lookup table */
1825 if (fs->w_q_lookup) {
1826 kfree_data(fs->w_q_lookup, fs->lookup_depth * sizeof(int));
1827 }
1828 if (fs->rq) {
1829 FREE(fs->rq, M_DUMMYNET);
1830 }
1831 /* if this fs is not part of a pipe, free it */
1832 if (fs->pipe && fs != &(fs->pipe->fs)) {
1833 kfree_type(struct dn_flow_set, fs);
1834 }
1835 }
1836 }
1837
1838 /*
1839 * Dispose all packets queued on a pipe (not a flow_set).
1840 * Also free all resources associated to a pipe, which is about
1841 * to be deleted.
1842 */
1843 static void
purge_pipe(struct dn_pipe * pipe)1844 purge_pipe(struct dn_pipe *pipe)
1845 {
1846 struct mbuf *m, *mnext;
1847
1848 purge_flow_set( &(pipe->fs), 1 );
1849
1850 mnext = pipe->head;
1851 while ((m = mnext) != NULL) {
1852 mnext = m->m_nextpkt;
1853 DN_FREE_PKT(m);
1854 }
1855
1856 heap_free( &(pipe->scheduler_heap));
1857 heap_free( &(pipe->not_eligible_heap));
1858 heap_free( &(pipe->idle_heap));
1859 }
1860
1861 /*
1862 * Delete all pipes and heaps returning memory.
1863 */
1864 static void
dummynet_flush(void)1865 dummynet_flush(void)
1866 {
1867 struct dn_pipe *pipe, *pipe1;
1868 struct dn_flow_set *fs, *fs1;
1869 int i;
1870
1871 lck_mtx_lock(&dn_mutex);
1872
1873
1874 /* Free heaps so we don't have unwanted events. */
1875 heap_free(&ready_heap);
1876 heap_free(&wfq_ready_heap);
1877 heap_free(&extract_heap);
1878
1879 /*
1880 * Now purge all queued pkts and delete all pipes.
1881 *
1882 * XXXGL: can we merge the for(;;) cycles into one or not?
1883 */
1884 for (i = 0; i < HASHSIZE; i++) {
1885 SLIST_FOREACH_SAFE(fs, &flowsethash[i], next, fs1) {
1886 SLIST_REMOVE(&flowsethash[i], fs, dn_flow_set, next);
1887 purge_flow_set(fs, 1);
1888 }
1889 }
1890 for (i = 0; i < HASHSIZE; i++) {
1891 SLIST_FOREACH_SAFE(pipe, &pipehash[i], next, pipe1) {
1892 SLIST_REMOVE(&pipehash[i], pipe, dn_pipe, next);
1893 purge_pipe(pipe);
1894 kfree_type(struct dn_pipe, pipe);
1895 }
1896 }
1897 lck_mtx_unlock(&dn_mutex);
1898 }
1899
1900 /*
1901 * setup RED parameters
1902 */
1903 static int
config_red(struct dn_flow_set * p,struct dn_flow_set * x)1904 config_red(struct dn_flow_set *p, struct dn_flow_set * x)
1905 {
1906 int i;
1907
1908 x->w_q = p->w_q;
1909 x->min_th = SCALE(p->min_th);
1910 x->max_th = SCALE(p->max_th);
1911 x->max_p = p->max_p;
1912
1913 x->c_1 = p->max_p / (p->max_th - p->min_th);
1914 x->c_2 = SCALE_MUL(x->c_1, SCALE(p->min_th));
1915 if (x->flags_fs & DN_IS_GENTLE_RED) {
1916 x->c_3 = (SCALE(1) - p->max_p) / p->max_th;
1917 x->c_4 = (SCALE(1) - 2 * p->max_p);
1918 }
1919
1920 /* if the lookup table already exist, free and create it again */
1921 if (x->w_q_lookup) {
1922 kfree_data(x->w_q_lookup, x->lookup_depth * sizeof(int));
1923 x->w_q_lookup = NULL;
1924 }
1925 if (red_lookup_depth == 0) {
1926 printf("\ndummynet: net.inet.ip.dummynet.red_lookup_depth must be > 0\n");
1927 kfree_type(struct dn_flow_set, x);
1928 return EINVAL;
1929 }
1930 x->lookup_depth = red_lookup_depth;
1931 x->w_q_lookup = (u_int *) kalloc_data(x->lookup_depth * sizeof(int),
1932 Z_NOWAIT);
1933 if (x->w_q_lookup == NULL) {
1934 printf("dummynet: sorry, cannot allocate red lookup table\n");
1935 kfree_type(struct dn_flow_set, x);
1936 return ENOSPC;
1937 }
1938
1939 /* fill the lookup table with (1 - w_q)^x */
1940 x->lookup_step = p->lookup_step;
1941 x->lookup_weight = p->lookup_weight;
1942 x->w_q_lookup[0] = SCALE(1) - x->w_q;
1943 for (i = 1; i < x->lookup_depth; i++) {
1944 x->w_q_lookup[i] = SCALE_MUL(x->w_q_lookup[i - 1], x->lookup_weight);
1945 }
1946 if (red_avg_pkt_size < 1) {
1947 red_avg_pkt_size = 512;
1948 }
1949 x->avg_pkt_size = red_avg_pkt_size;
1950 if (red_max_pkt_size < 1) {
1951 red_max_pkt_size = 1500;
1952 }
1953 x->max_pkt_size = red_max_pkt_size;
1954 return 0;
1955 }
1956
1957 static int
alloc_hash(struct dn_flow_set * x,struct dn_flow_set * pfs)1958 alloc_hash(struct dn_flow_set *x, struct dn_flow_set *pfs)
1959 {
1960 if (x->flags_fs & DN_HAVE_FLOW_MASK) { /* allocate some slots */
1961 int l = pfs->rq_size;
1962
1963 if (l == 0) {
1964 l = dn_hash_size;
1965 }
1966 if (l < 4) {
1967 l = 4;
1968 } else if (l > DN_MAX_HASH_SIZE) {
1969 l = DN_MAX_HASH_SIZE;
1970 }
1971 x->rq_size = l;
1972 } else { /* one is enough for null mask */
1973 x->rq_size = 1;
1974 }
1975 x->rq = _MALLOC((1 + x->rq_size) * sizeof(struct dn_flow_queue *),
1976 M_DUMMYNET, M_DONTWAIT | M_ZERO);
1977 if (x->rq == NULL) {
1978 printf("dummynet: sorry, cannot allocate queue\n");
1979 return ENOSPC;
1980 }
1981 x->rq_elements = 0;
1982 return 0;
1983 }
1984
1985 static void
set_fs_parms(struct dn_flow_set * x,struct dn_flow_set * src)1986 set_fs_parms(struct dn_flow_set *x, struct dn_flow_set *src)
1987 {
1988 x->flags_fs = src->flags_fs;
1989 x->qsize = src->qsize;
1990 x->plr = src->plr;
1991 x->flow_mask = src->flow_mask;
1992 if (x->flags_fs & DN_QSIZE_IS_BYTES) {
1993 if (x->qsize > 1024 * 1024) {
1994 x->qsize = 1024 * 1024;
1995 }
1996 } else {
1997 if (x->qsize == 0) {
1998 x->qsize = 50;
1999 }
2000 if (x->qsize > 100) {
2001 x->qsize = 50;
2002 }
2003 }
2004 /* configuring RED */
2005 if (x->flags_fs & DN_IS_RED) {
2006 config_red(src, x); /* XXX should check errors */
2007 }
2008 }
2009
2010 /*
2011 * setup pipe or queue parameters.
2012 */
2013 static int
config_pipe(struct dn_pipe * p)2014 config_pipe(struct dn_pipe *p)
2015 {
2016 int i, r;
2017 struct dn_flow_set *pfs = &(p->fs);
2018 struct dn_flow_queue *q;
2019
2020 /*
2021 * The config program passes parameters as follows:
2022 * bw = bits/second (0 means no limits),
2023 * delay = ms, must be translated into ticks.
2024 * qsize = slots/bytes
2025 */
2026 p->delay = (p->delay * (hz * 10)) / 1000;
2027 /* We need either a pipe number or a flow_set number */
2028 if (p->pipe_nr == 0 && pfs->fs_nr == 0) {
2029 return EINVAL;
2030 }
2031 if (p->pipe_nr != 0 && pfs->fs_nr != 0) {
2032 return EINVAL;
2033 }
2034 if (p->pipe_nr != 0) { /* this is a pipe */
2035 struct dn_pipe *x, *b;
2036 struct dummynet_event dn_event;
2037 lck_mtx_lock(&dn_mutex);
2038
2039 /* locate pipe */
2040 b = locate_pipe(p->pipe_nr);
2041
2042 if (b == NULL || b->pipe_nr != p->pipe_nr) { /* new pipe */
2043 x = kalloc_type(struct dn_pipe, Z_NOWAIT | Z_ZERO);
2044 if (x == NULL) {
2045 lck_mtx_unlock(&dn_mutex);
2046 printf("dummynet: no memory for new pipe\n");
2047 return ENOSPC;
2048 }
2049 x->pipe_nr = p->pipe_nr;
2050 x->fs.pipe = x;
2051 /* idle_heap is the only one from which we extract from the middle.
2052 */
2053 x->idle_heap.size = x->idle_heap.elements = 0;
2054 x->idle_heap.offset = offsetof(struct dn_flow_queue, heap_pos);
2055 } else {
2056 x = b;
2057 /* Flush accumulated credit for all queues */
2058 for (i = 0; i <= x->fs.rq_size; i++) {
2059 for (q = x->fs.rq[i]; q; q = q->next) {
2060 q->numbytes = 0;
2061 }
2062 }
2063 }
2064
2065 x->bandwidth = p->bandwidth;
2066 x->numbytes = 0; /* just in case... */
2067 bcopy(p->if_name, x->if_name, sizeof(p->if_name));
2068 x->ifp = NULL; /* reset interface ptr */
2069 x->delay = p->delay;
2070 set_fs_parms(&(x->fs), pfs);
2071
2072
2073 if (x->fs.rq == NULL) { /* a new pipe */
2074 r = alloc_hash(&(x->fs), pfs);
2075 if (r) {
2076 lck_mtx_unlock(&dn_mutex);
2077 kfree_type(struct dn_pipe, x);
2078 return r;
2079 }
2080 SLIST_INSERT_HEAD(&pipehash[HASH(x->pipe_nr)],
2081 x, next);
2082 }
2083 lck_mtx_unlock(&dn_mutex);
2084
2085 bzero(&dn_event, sizeof(dn_event));
2086 dn_event.dn_event_code = DUMMYNET_PIPE_CONFIG;
2087 dn_event.dn_event_pipe_config.bandwidth = p->bandwidth;
2088 dn_event.dn_event_pipe_config.delay = p->delay;
2089 dn_event.dn_event_pipe_config.plr = pfs->plr;
2090
2091 dummynet_event_enqueue_nwk_wq_entry(&dn_event);
2092 } else { /* config queue */
2093 struct dn_flow_set *x, *b;
2094
2095 lck_mtx_lock(&dn_mutex);
2096 /* locate flow_set */
2097 b = locate_flowset(pfs->fs_nr);
2098
2099 if (b == NULL || b->fs_nr != pfs->fs_nr) { /* new */
2100 if (pfs->parent_nr == 0) { /* need link to a pipe */
2101 lck_mtx_unlock(&dn_mutex);
2102 return EINVAL;
2103 }
2104 x = kalloc_type(struct dn_flow_set, Z_NOWAIT | Z_ZERO);
2105 if (x == NULL) {
2106 lck_mtx_unlock(&dn_mutex);
2107 printf("dummynet: no memory for new flow_set\n");
2108 return ENOSPC;
2109 }
2110 x->fs_nr = pfs->fs_nr;
2111 x->parent_nr = pfs->parent_nr;
2112 x->weight = pfs->weight;
2113 if (x->weight == 0) {
2114 x->weight = 1;
2115 } else if (x->weight > 100) {
2116 x->weight = 100;
2117 }
2118 } else {
2119 /* Change parent pipe not allowed; must delete and recreate */
2120 if (pfs->parent_nr != 0 && b->parent_nr != pfs->parent_nr) {
2121 lck_mtx_unlock(&dn_mutex);
2122 return EINVAL;
2123 }
2124 x = b;
2125 }
2126 set_fs_parms(x, pfs);
2127
2128 if (x->rq == NULL) { /* a new flow_set */
2129 r = alloc_hash(x, pfs);
2130 if (r) {
2131 lck_mtx_unlock(&dn_mutex);
2132 kfree_type(struct dn_flow_set, x);
2133 return r;
2134 }
2135 SLIST_INSERT_HEAD(&flowsethash[HASH(x->fs_nr)],
2136 x, next);
2137 }
2138 lck_mtx_unlock(&dn_mutex);
2139 }
2140 return 0;
2141 }
2142
2143 /*
2144 * Helper function to remove from a heap queues which are linked to
2145 * a flow_set about to be deleted.
2146 */
2147 static void
fs_remove_from_heap(struct dn_heap * h,struct dn_flow_set * fs)2148 fs_remove_from_heap(struct dn_heap *h, struct dn_flow_set *fs)
2149 {
2150 int i = 0, found = 0;
2151 for (; i < h->elements;) {
2152 if (((struct dn_flow_queue *)h->p[i].object)->fs == fs) {
2153 h->elements--;
2154 h->p[i] = h->p[h->elements];
2155 found++;
2156 } else {
2157 i++;
2158 }
2159 }
2160 if (found) {
2161 heapify(h);
2162 }
2163 }
2164
2165 /*
2166 * helper function to remove a pipe from a heap (can be there at most once)
2167 */
2168 static void
pipe_remove_from_heap(struct dn_heap * h,struct dn_pipe * p)2169 pipe_remove_from_heap(struct dn_heap *h, struct dn_pipe *p)
2170 {
2171 if (h->elements > 0) {
2172 int i = 0;
2173 for (i = 0; i < h->elements; i++) {
2174 if (h->p[i].object == p) { /* found it */
2175 h->elements--;
2176 h->p[i] = h->p[h->elements];
2177 heapify(h);
2178 break;
2179 }
2180 }
2181 }
2182 }
2183
2184 /*
2185 * drain all queues. Called in case of severe mbuf shortage.
2186 */
2187 void
dummynet_drain(void)2188 dummynet_drain(void)
2189 {
2190 struct dn_flow_set *fs;
2191 struct dn_pipe *p;
2192 struct mbuf *m, *mnext;
2193 int i;
2194
2195 LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
2196
2197 heap_free(&ready_heap);
2198 heap_free(&wfq_ready_heap);
2199 heap_free(&extract_heap);
2200 /* remove all references to this pipe from flow_sets */
2201 for (i = 0; i < HASHSIZE; i++) {
2202 SLIST_FOREACH(fs, &flowsethash[i], next) {
2203 purge_flow_set(fs, 0);
2204 }
2205 }
2206
2207 for (i = 0; i < HASHSIZE; i++) {
2208 SLIST_FOREACH(p, &pipehash[i], next) {
2209 purge_flow_set(&(p->fs), 0);
2210
2211 mnext = p->head;
2212 while ((m = mnext) != NULL) {
2213 mnext = m->m_nextpkt;
2214 DN_FREE_PKT(m);
2215 }
2216 p->head = p->tail = NULL;
2217 }
2218 }
2219 }
2220
2221 /*
2222 * Fully delete a pipe or a queue, cleaning up associated info.
2223 */
2224 static int
delete_pipe(struct dn_pipe * p)2225 delete_pipe(struct dn_pipe *p)
2226 {
2227 if (p->pipe_nr == 0 && p->fs.fs_nr == 0) {
2228 return EINVAL;
2229 }
2230 if (p->pipe_nr != 0 && p->fs.fs_nr != 0) {
2231 return EINVAL;
2232 }
2233 if (p->pipe_nr != 0) { /* this is an old-style pipe */
2234 struct dn_pipe *b;
2235 struct dn_flow_set *fs;
2236 int i;
2237
2238 lck_mtx_lock(&dn_mutex);
2239 /* locate pipe */
2240 b = locate_pipe(p->pipe_nr);
2241 if (b == NULL) {
2242 lck_mtx_unlock(&dn_mutex);
2243 return EINVAL; /* not found */
2244 }
2245
2246 /* Unlink from list of pipes. */
2247 SLIST_REMOVE(&pipehash[HASH(b->pipe_nr)], b, dn_pipe, next);
2248
2249
2250 /* Remove all references to this pipe from flow_sets. */
2251 for (i = 0; i < HASHSIZE; i++) {
2252 SLIST_FOREACH(fs, &flowsethash[i], next) {
2253 if (fs->pipe == b) {
2254 printf("dummynet: ++ ref to pipe %d from fs %d\n",
2255 p->pipe_nr, fs->fs_nr);
2256 fs->pipe = NULL;
2257 purge_flow_set(fs, 0);
2258 }
2259 }
2260 }
2261 fs_remove_from_heap(&ready_heap, &(b->fs));
2262
2263 purge_pipe(b); /* remove all data associated to this pipe */
2264 /* remove reference to here from extract_heap and wfq_ready_heap */
2265 pipe_remove_from_heap(&extract_heap, b);
2266 pipe_remove_from_heap(&wfq_ready_heap, b);
2267 lck_mtx_unlock(&dn_mutex);
2268
2269 kfree_type(struct dn_pipe, b);
2270 } else { /* this is a WF2Q queue (dn_flow_set) */
2271 struct dn_flow_set *b;
2272
2273 lck_mtx_lock(&dn_mutex);
2274 /* locate set */
2275 b = locate_flowset(p->fs.fs_nr);
2276 if (b == NULL) {
2277 lck_mtx_unlock(&dn_mutex);
2278 return EINVAL; /* not found */
2279 }
2280
2281
2282 /* Unlink from list of flowsets. */
2283 SLIST_REMOVE( &flowsethash[HASH(b->fs_nr)], b, dn_flow_set, next);
2284
2285 if (b->pipe != NULL) {
2286 /* Update total weight on parent pipe and cleanup parent heaps */
2287 b->pipe->sum -= b->weight * b->backlogged;
2288 fs_remove_from_heap(&(b->pipe->not_eligible_heap), b);
2289 fs_remove_from_heap(&(b->pipe->scheduler_heap), b);
2290 #if 1 /* XXX should i remove from idle_heap as well ? */
2291 fs_remove_from_heap(&(b->pipe->idle_heap), b);
2292 #endif
2293 }
2294 purge_flow_set(b, 1);
2295 lck_mtx_unlock(&dn_mutex);
2296 }
2297 return 0;
2298 }
2299
2300 /*
2301 * helper function used to copy data from kernel in DUMMYNET_GET
2302 */
2303 static
2304 char*
dn_copy_set_32(struct dn_flow_set * set,char * bp)2305 dn_copy_set_32(struct dn_flow_set *set, char *bp)
2306 {
2307 int i, copied = 0;
2308 struct dn_flow_queue *q;
2309 struct dn_flow_queue_32 *qp = (struct dn_flow_queue_32 *)(void *)bp;
2310
2311 LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
2312
2313 for (i = 0; i <= set->rq_size; i++) {
2314 for (q = set->rq[i]; q; q = q->next, qp++) {
2315 if (q->hash_slot != i) {
2316 printf("dummynet: ++ at %d: wrong slot (have %d, "
2317 "should be %d)\n", copied, q->hash_slot, i);
2318 }
2319 if (q->fs != set) {
2320 printf("dummynet: ++ at %d: wrong fs ptr "
2321 "(have 0x%llx, should be 0x%llx)\n", i,
2322 (uint64_t)VM_KERNEL_ADDRPERM(q->fs),
2323 (uint64_t)VM_KERNEL_ADDRPERM(set));
2324 }
2325 copied++;
2326 cp_queue_to_32_user( q, qp );
2327 /* cleanup pointers */
2328 qp->next = (user32_addr_t)0;
2329 qp->head = qp->tail = (user32_addr_t)0;
2330 qp->fs = (user32_addr_t)0;
2331 }
2332 }
2333 if (copied != set->rq_elements) {
2334 printf("dummynet: ++ wrong count, have %d should be %d\n",
2335 copied, set->rq_elements);
2336 }
2337 return (char *)qp;
2338 }
2339
2340 static
2341 char*
dn_copy_set_64(struct dn_flow_set * set,char * bp)2342 dn_copy_set_64(struct dn_flow_set *set, char *bp)
2343 {
2344 int i, copied = 0;
2345 struct dn_flow_queue *q;
2346 struct dn_flow_queue_64 *qp = (struct dn_flow_queue_64 *)(void *)bp;
2347
2348 LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
2349
2350 for (i = 0; i <= set->rq_size; i++) {
2351 for (q = set->rq[i]; q; q = q->next, qp++) {
2352 if (q->hash_slot != i) {
2353 printf("dummynet: ++ at %d: wrong slot (have %d, "
2354 "should be %d)\n", copied, q->hash_slot, i);
2355 }
2356 if (q->fs != set) {
2357 printf("dummynet: ++ at %d: wrong fs ptr "
2358 "(have 0x%llx, should be 0x%llx)\n", i,
2359 (uint64_t)VM_KERNEL_ADDRPERM(q->fs),
2360 (uint64_t)VM_KERNEL_ADDRPERM(set));
2361 }
2362 copied++;
2363 //bcopy(q, qp, sizeof(*q));
2364 cp_queue_to_64_user( q, qp );
2365 /* cleanup pointers */
2366 qp->next = USER_ADDR_NULL;
2367 qp->head = qp->tail = USER_ADDR_NULL;
2368 qp->fs = USER_ADDR_NULL;
2369 }
2370 }
2371 if (copied != set->rq_elements) {
2372 printf("dummynet: ++ wrong count, have %d should be %d\n",
2373 copied, set->rq_elements);
2374 }
2375 return (char *)qp;
2376 }
2377
2378 static size_t
dn_calc_size(int is64user)2379 dn_calc_size(int is64user)
2380 {
2381 struct dn_flow_set *set;
2382 struct dn_pipe *p;
2383 size_t size = 0;
2384 size_t pipesize;
2385 size_t queuesize;
2386 size_t setsize;
2387 int i;
2388
2389 LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
2390 if (is64user) {
2391 pipesize = sizeof(struct dn_pipe_64);
2392 queuesize = sizeof(struct dn_flow_queue_64);
2393 setsize = sizeof(struct dn_flow_set_64);
2394 } else {
2395 pipesize = sizeof(struct dn_pipe_32);
2396 queuesize = sizeof(struct dn_flow_queue_32);
2397 setsize = sizeof(struct dn_flow_set_32);
2398 }
2399 /*
2400 * compute size of data structures: list of pipes and flow_sets.
2401 */
2402 for (i = 0; i < HASHSIZE; i++) {
2403 SLIST_FOREACH(p, &pipehash[i], next) {
2404 size += sizeof(*p) +
2405 p->fs.rq_elements * sizeof(struct dn_flow_queue);
2406 }
2407 SLIST_FOREACH(set, &flowsethash[i], next) {
2408 size += sizeof(*set) +
2409 set->rq_elements * sizeof(struct dn_flow_queue);
2410 }
2411 }
2412 return size;
2413 }
2414
2415 static int
dummynet_get(struct sockopt * sopt)2416 dummynet_get(struct sockopt *sopt)
2417 {
2418 char *buf = NULL, *bp = NULL; /* bp is the "copy-pointer" */
2419 size_t size = 0;
2420 struct dn_flow_set *set;
2421 struct dn_pipe *p;
2422 int error = 0, i;
2423 int is64user = 0;
2424
2425 /* XXX lock held too long */
2426 lck_mtx_lock(&dn_mutex);
2427 /*
2428 * XXX: Ugly, but we need to allocate memory with M_WAITOK flag
2429 * and we cannot use this flag while holding a mutex.
2430 */
2431 if (proc_is64bit(sopt->sopt_p)) {
2432 is64user = 1;
2433 }
2434 for (i = 0; i < 10; i++) {
2435 size = dn_calc_size(is64user);
2436 lck_mtx_unlock(&dn_mutex);
2437 buf = kalloc_data(size, Z_WAITOK | Z_ZERO);
2438 if (buf == NULL) {
2439 return ENOBUFS;
2440 }
2441 lck_mtx_lock(&dn_mutex);
2442 if (size == dn_calc_size(is64user)) {
2443 break;
2444 }
2445 kfree_data(buf, size);
2446 buf = NULL;
2447 }
2448 if (buf == NULL) {
2449 lck_mtx_unlock(&dn_mutex);
2450 return ENOBUFS;
2451 }
2452
2453 bp = buf;
2454 for (i = 0; i < HASHSIZE; i++) {
2455 SLIST_FOREACH(p, &pipehash[i], next) {
2456 /*
2457 * copy pipe descriptor into *bp, convert delay
2458 * back to ms, then copy the flow_set descriptor(s)
2459 * one at a time. After each flow_set, copy the
2460 * queue descriptor it owns.
2461 */
2462 if (is64user) {
2463 bp = cp_pipe_to_64_user(p,
2464 (struct dn_pipe_64 *)(void *)bp);
2465 } else {
2466 bp = cp_pipe_to_32_user(p,
2467 (struct dn_pipe_32 *)(void *)bp);
2468 }
2469 }
2470 }
2471 for (i = 0; i < HASHSIZE; i++) {
2472 SLIST_FOREACH(set, &flowsethash[i], next) {
2473 struct dn_flow_set_64 *fs_bp =
2474 (struct dn_flow_set_64 *)(void *)bp;
2475 cp_flow_set_to_64_user(set, fs_bp);
2476 /* XXX same hack as above */
2477 fs_bp->next = CAST_DOWN(user64_addr_t,
2478 DN_IS_QUEUE);
2479 fs_bp->pipe = USER_ADDR_NULL;
2480 fs_bp->rq = USER_ADDR_NULL;
2481 bp += sizeof(struct dn_flow_set_64);
2482 bp = dn_copy_set_64( set, bp );
2483 }
2484 }
2485 lck_mtx_unlock(&dn_mutex);
2486 error = sooptcopyout(sopt, buf, size);
2487 kfree_data(buf, size);
2488 return error;
2489 }
2490
2491 /*
2492 * Handler for the various dummynet socket options (get, flush, config, del)
2493 */
2494 static int
ip_dn_ctl(struct sockopt * sopt)2495 ip_dn_ctl(struct sockopt *sopt)
2496 {
2497 int error = 0;
2498 struct dn_pipe *p, tmp_pipe;
2499
2500 /* Disallow sets in really-really secure mode. */
2501 if (sopt->sopt_dir == SOPT_SET && securelevel >= 3) {
2502 return EPERM;
2503 }
2504
2505 switch (sopt->sopt_name) {
2506 default:
2507 printf("dummynet: -- unknown option %d", sopt->sopt_name);
2508 return EINVAL;
2509
2510 case IP_DUMMYNET_GET:
2511 error = dummynet_get(sopt);
2512 break;
2513
2514 case IP_DUMMYNET_FLUSH:
2515 dummynet_flush();
2516 break;
2517
2518 case IP_DUMMYNET_CONFIGURE:
2519 p = &tmp_pipe;
2520 if (proc_is64bit(sopt->sopt_p)) {
2521 error = cp_pipe_from_user_64( sopt, p );
2522 } else {
2523 error = cp_pipe_from_user_32( sopt, p );
2524 }
2525
2526 if (error) {
2527 break;
2528 }
2529 error = config_pipe(p);
2530 break;
2531
2532 case IP_DUMMYNET_DEL: /* remove a pipe or queue */
2533 p = &tmp_pipe;
2534 if (proc_is64bit(sopt->sopt_p)) {
2535 error = cp_pipe_from_user_64( sopt, p );
2536 } else {
2537 error = cp_pipe_from_user_32( sopt, p );
2538 }
2539 if (error) {
2540 break;
2541 }
2542
2543 error = delete_pipe(p);
2544 break;
2545 }
2546 return error;
2547 }
2548
2549 void
dummynet_init(void)2550 dummynet_init(void)
2551 {
2552 eventhandler_lists_ctxt_init(&dummynet_evhdlr_ctxt);
2553 }
2554
2555 void
ip_dn_init(void)2556 ip_dn_init(void)
2557 {
2558 /* setup locks */
2559 ready_heap.size = ready_heap.elements = 0;
2560 ready_heap.offset = 0;
2561
2562 wfq_ready_heap.size = wfq_ready_heap.elements = 0;
2563 wfq_ready_heap.offset = 0;
2564
2565 extract_heap.size = extract_heap.elements = 0;
2566 extract_heap.offset = 0;
2567 ip_dn_ctl_ptr = ip_dn_ctl;
2568 ip_dn_io_ptr = dummynet_io;
2569 }
2570
2571 struct dn_event_nwk_wq_entry {
2572 struct nwk_wq_entry nwk_wqe;
2573 struct dummynet_event dn_ev_arg;
2574 };
2575
2576 static void
dummynet_event_callback(void * arg)2577 dummynet_event_callback(void *arg)
2578 {
2579 struct dummynet_event *p_dn_ev = (struct dummynet_event *)arg;
2580
2581 EVENTHANDLER_INVOKE(&dummynet_evhdlr_ctxt, dummynet_event, p_dn_ev);
2582 return;
2583 }
2584
2585 void
dummynet_event_enqueue_nwk_wq_entry(struct dummynet_event * p_dn_event)2586 dummynet_event_enqueue_nwk_wq_entry(struct dummynet_event *p_dn_event)
2587 {
2588 struct dn_event_nwk_wq_entry *p_dn_ev = NULL;
2589
2590 MALLOC(p_dn_ev, struct dn_event_nwk_wq_entry *,
2591 sizeof(struct dn_event_nwk_wq_entry),
2592 M_NWKWQ, M_WAITOK | M_ZERO);
2593
2594 p_dn_ev->nwk_wqe.func = dummynet_event_callback;
2595 p_dn_ev->nwk_wqe.is_arg_managed = TRUE;
2596 p_dn_ev->nwk_wqe.arg = &p_dn_ev->dn_ev_arg;
2597
2598 bcopy(p_dn_event, &(p_dn_ev->dn_ev_arg),
2599 sizeof(struct dummynet_event));
2600 nwk_wq_enqueue((struct nwk_wq_entry*)p_dn_ev);
2601 }
2602