1 /*
2 * Copyright (c) 2000-2022 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1998-2002 Luigi Rizzo, Universita` di Pisa
30 * Portions Copyright (c) 2000 Akamba Corp.
31 * All rights reserved
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * SUCH DAMAGE.
53 *
54 * $FreeBSD: src/sys/netinet/ip_dummynet.c,v 1.84 2004/08/25 09:31:30 pjd Exp $
55 */
56
57 #define DUMMYNET_DEBUG
58
59 /*
60 * This module implements IP dummynet, a bandwidth limiter/delay emulator
61 * Description of the data structures used is in ip_dummynet.h
62 * Here you mainly find the following blocks of code:
63 * + variable declarations;
64 * + heap management functions;
65 * + scheduler and dummynet functions;
66 * + configuration and initialization.
67 *
68 * NOTA BENE: critical sections are protected by the "dummynet lock".
69 *
70 * Most important Changes:
71 *
72 * 010124: Fixed WF2Q behaviour
73 * 010122: Fixed spl protection.
74 * 000601: WF2Q support
75 * 000106: large rewrite, use heaps to handle very many pipes.
76 * 980513: initial release
77 *
78 * include files marked with XXX are probably not needed
79 */
80
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/malloc.h>
84 #include <sys/mbuf.h>
85 #include <sys/queue.h> /* XXX */
86 #include <sys/kernel.h>
87 #include <sys/random.h>
88 #include <sys/socket.h>
89 #include <sys/socketvar.h>
90 #include <sys/time.h>
91 #include <sys/sysctl.h>
92 #include <net/if.h>
93 #include <net/route.h>
94 #include <net/kpi_protocol.h>
95 #if DUMMYNET
96 #include <net/kpi_protocol.h>
97 #endif /* DUMMYNET */
98 #include <net/nwk_wq.h>
99 #include <net/pfvar.h>
100 #include <netinet/in.h>
101 #include <netinet/in_systm.h>
102 #include <netinet/in_var.h>
103 #include <netinet/ip.h>
104 #include <netinet/ip_dummynet.h>
105 #include <netinet/ip_var.h>
106
107 #include <netinet/ip6.h> /* for ip6_input, ip6_output prototypes */
108 #include <netinet6/ip6_var.h>
109
110 #include <stdbool.h>
111 #include <net/sockaddr_utils.h>
112
113 /*
114 * We keep a private variable for the simulation time, but we could
115 * probably use an existing one ("softticks" in sys/kern/kern_timer.c)
116 */
117 static dn_key curr_time = 0; /* current simulation time */
118
119 /* this is for the timer that fires to call dummynet() - we only enable the timer when
120 * there are packets to process, otherwise it's disabled */
121 static int timer_enabled = 0;
122
123 static int dn_hash_size = 64; /* default hash size */
124
125 /* statistics on number of queue searches and search steps */
126 static int searches, search_steps;
127 static int pipe_expire = 1; /* expire queue if empty */
128 static int dn_max_ratio = 16; /* max queues/buckets ratio */
129
130 static int red_lookup_depth = 256; /* RED - default lookup table depth */
131 static int red_avg_pkt_size = 512; /* RED - default medium packet size */
132 static int red_max_pkt_size = 1500; /* RED - default max packet size */
133
134 static int serialize = 0;
135
136 /*
137 * Three heaps contain queues and pipes that the scheduler handles:
138 *
139 * ready_heap contains all dn_flow_queue related to fixed-rate pipes.
140 *
141 * wfq_ready_heap contains the pipes associated with WF2Q flows
142 *
143 * extract_heap contains pipes associated with delay lines.
144 *
145 */
146 static struct dn_heap ready_heap, extract_heap, wfq_ready_heap;
147
148 static int heap_init(struct dn_heap *h, int size);
149 static int heap_insert(struct dn_heap *h, dn_key key1, void *p);
150 static void heap_extract(struct dn_heap *h, void *obj);
151
152
153 static void transmit_event(struct dn_pipe *pipe, struct mbuf **head,
154 struct mbuf **tail);
155 static void ready_event(struct dn_flow_queue *q, struct mbuf **head,
156 struct mbuf **tail);
157 static void ready_event_wfq(struct dn_pipe *p, struct mbuf **head,
158 struct mbuf **tail);
159
160 /*
161 * Packets are retrieved from queues in Dummynet in chains instead of
162 * packet-by-packet. The entire list of packets is first dequeued and
163 * sent out by the following function.
164 */
165 static void dummynet_send(struct mbuf *m);
166
167 #define HASHSIZE 16
168 #define HASH(num) ((((num) >> 8) ^ ((num) >> 4) ^ (num)) & 0x0f)
169 static struct dn_pipe_head pipehash[HASHSIZE]; /* all pipes */
170 static struct dn_flow_set_head flowsethash[HASHSIZE]; /* all flowsets */
171
172 #ifdef SYSCTL_NODE
173 SYSCTL_NODE(_net_inet_ip, OID_AUTO, dummynet,
174 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Dummynet");
175 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, hash_size,
176 CTLFLAG_RW | CTLFLAG_LOCKED, &dn_hash_size, 0, "Default hash table size");
177 SYSCTL_QUAD(_net_inet_ip_dummynet, OID_AUTO, curr_time,
178 CTLFLAG_RD | CTLFLAG_LOCKED, &curr_time, "Current tick");
179 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, ready_heap,
180 CTLFLAG_RD | CTLFLAG_LOCKED, &ready_heap.size, 0, "Size of ready heap");
181 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, extract_heap,
182 CTLFLAG_RD | CTLFLAG_LOCKED, &extract_heap.size, 0, "Size of extract heap");
183 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, searches,
184 CTLFLAG_RD | CTLFLAG_LOCKED, &searches, 0, "Number of queue searches");
185 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, search_steps,
186 CTLFLAG_RD | CTLFLAG_LOCKED, &search_steps, 0, "Number of queue search steps");
187 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, expire,
188 CTLFLAG_RW | CTLFLAG_LOCKED, &pipe_expire, 0, "Expire queue if empty");
189 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, max_chain_len,
190 CTLFLAG_RW | CTLFLAG_LOCKED, &dn_max_ratio, 0,
191 "Max ratio between dynamic queues and buckets");
192 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_lookup_depth,
193 CTLFLAG_RD | CTLFLAG_LOCKED, &red_lookup_depth, 0, "Depth of RED lookup table");
194 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_avg_pkt_size,
195 CTLFLAG_RD | CTLFLAG_LOCKED, &red_avg_pkt_size, 0, "RED Medium packet size");
196 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_max_pkt_size,
197 CTLFLAG_RD | CTLFLAG_LOCKED, &red_max_pkt_size, 0, "RED Max packet size");
198 #endif
199
200 #ifdef DUMMYNET_DEBUG
201 int dummynet_debug = 0;
202 #ifdef SYSCTL_NODE
203 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED, &dummynet_debug,
204 0, "control debugging printfs");
205 #endif
206 #define DPRINTF(X) if (dummynet_debug) printf X
207 #else
208 #define DPRINTF(X)
209 #endif
210
211 /* dummynet lock */
212 static LCK_GRP_DECLARE(dn_mutex_grp, "dn");
213 static LCK_MTX_DECLARE(dn_mutex, &dn_mutex_grp);
214
215 static int config_pipe(struct dn_pipe *p);
216 static int ip_dn_ctl(struct sockopt *sopt);
217
218 static void dummynet(void *);
219 static void dummynet_flush(void);
220 void dummynet_drain(void);
221 static ip_dn_io_t dummynet_io;
222
223 static void cp_flow_set_to_64_user(struct dn_flow_set *set, struct dn_flow_set_64 *fs_bp);
224 static void cp_queue_to_64_user( struct dn_flow_queue *q, struct dn_flow_queue_64 *qp);
225 static char *cp_pipe_to_64_user(struct dn_pipe *p, struct dn_pipe_64 *pipe_bp);
226 static char* dn_copy_set_64(struct dn_flow_set *set, char *bp);
227 static int cp_pipe_from_user_64( struct sockopt *sopt, struct dn_pipe *p );
228
229 static void cp_flow_set_to_32_user(struct dn_flow_set *set, struct dn_flow_set_32 *fs_bp);
230 static void cp_queue_to_32_user( struct dn_flow_queue *q, struct dn_flow_queue_32 *qp);
231 static char *cp_pipe_to_32_user(struct dn_pipe *p, struct dn_pipe_32 *pipe_bp);
232 static char* dn_copy_set_32(struct dn_flow_set *set, char *bp);
233 static int cp_pipe_from_user_32( struct sockopt *sopt, struct dn_pipe *p );
234
235 static struct m_tag * m_tag_kalloc_dummynet(u_int32_t id, u_int16_t type, uint16_t len, int wait);
236 static void m_tag_kfree_dummynet(struct m_tag *tag);
237
238 struct eventhandler_lists_ctxt dummynet_evhdlr_ctxt;
239
240 uint32_t
my_random(void)241 my_random(void)
242 {
243 uint32_t val;
244 read_frandom(&val, sizeof(val));
245 val &= 0x7FFFFFFF;
246
247 return val;
248 }
249
250 /*
251 * Heap management functions.
252 *
253 * In the heap, first node is element 0. Children of i are 2i+1 and 2i+2.
254 * Some macros help finding parent/children so we can optimize them.
255 *
256 * heap_init() is called to expand the heap when needed.
257 * Increment size in blocks of 16 entries.
258 * XXX failure to allocate a new element is a pretty bad failure
259 * as we basically stall a whole queue forever!!
260 * Returns 1 on error, 0 on success
261 */
262 #define HEAP_FATHER(x) ( ( (x) - 1 ) / 2 )
263 #define HEAP_LEFT(x) ( 2*(x) + 1 )
264 #define HEAP_IS_LEFT(x) ( (x) & 1 )
265 #define HEAP_RIGHT(x) ( 2*(x) + 2 )
266 #define HEAP_SWAP(a, b, buffer) { buffer = a ; a = b ; b = buffer ; }
267 #define HEAP_INCREMENT 15
268
269
270 int
cp_pipe_from_user_32(struct sockopt * sopt,struct dn_pipe * p)271 cp_pipe_from_user_32( struct sockopt *sopt, struct dn_pipe *p )
272 {
273 struct dn_pipe_32 user_pipe_32;
274 int error = 0;
275
276 error = sooptcopyin(sopt, &user_pipe_32, sizeof(struct dn_pipe_32), sizeof(struct dn_pipe_32));
277 if (!error) {
278 p->pipe_nr = user_pipe_32.pipe_nr;
279 p->bandwidth = user_pipe_32.bandwidth;
280 p->delay = user_pipe_32.delay;
281 p->V = user_pipe_32.V;
282 p->sum = user_pipe_32.sum;
283 p->numbytes = user_pipe_32.numbytes;
284 p->sched_time = user_pipe_32.sched_time;
285 bcopy( user_pipe_32.if_name, p->if_name, IFNAMSIZ);
286 p->ready = user_pipe_32.ready;
287
288 p->fs.fs_nr = user_pipe_32.fs.fs_nr;
289 p->fs.flags_fs = user_pipe_32.fs.flags_fs;
290 p->fs.parent_nr = user_pipe_32.fs.parent_nr;
291 p->fs.weight = user_pipe_32.fs.weight;
292 p->fs.qsize = user_pipe_32.fs.qsize;
293 p->fs.plr = user_pipe_32.fs.plr;
294 p->fs.flow_mask = user_pipe_32.fs.flow_mask;
295 p->fs.rq_size = user_pipe_32.fs.rq_size;
296 p->fs.rq_elements = user_pipe_32.fs.rq_elements;
297 p->fs.last_expired = user_pipe_32.fs.last_expired;
298 p->fs.backlogged = user_pipe_32.fs.backlogged;
299 p->fs.w_q = user_pipe_32.fs.w_q;
300 p->fs.max_th = user_pipe_32.fs.max_th;
301 p->fs.min_th = user_pipe_32.fs.min_th;
302 p->fs.max_p = user_pipe_32.fs.max_p;
303 p->fs.c_1 = user_pipe_32.fs.c_1;
304 p->fs.c_2 = user_pipe_32.fs.c_2;
305 p->fs.c_3 = user_pipe_32.fs.c_3;
306 p->fs.c_4 = user_pipe_32.fs.c_4;
307 p->fs.lookup_depth = user_pipe_32.fs.lookup_depth;
308 p->fs.lookup_step = user_pipe_32.fs.lookup_step;
309 p->fs.lookup_weight = user_pipe_32.fs.lookup_weight;
310 p->fs.avg_pkt_size = user_pipe_32.fs.avg_pkt_size;
311 p->fs.max_pkt_size = user_pipe_32.fs.max_pkt_size;
312 }
313 return error;
314 }
315
316
317 int
cp_pipe_from_user_64(struct sockopt * sopt,struct dn_pipe * p)318 cp_pipe_from_user_64( struct sockopt *sopt, struct dn_pipe *p )
319 {
320 struct dn_pipe_64 user_pipe_64;
321 int error = 0;
322
323 error = sooptcopyin(sopt, &user_pipe_64, sizeof(struct dn_pipe_64), sizeof(struct dn_pipe_64));
324 if (!error) {
325 p->pipe_nr = user_pipe_64.pipe_nr;
326 p->bandwidth = user_pipe_64.bandwidth;
327 p->delay = user_pipe_64.delay;
328 p->V = user_pipe_64.V;
329 p->sum = user_pipe_64.sum;
330 p->numbytes = user_pipe_64.numbytes;
331 p->sched_time = user_pipe_64.sched_time;
332 bcopy( user_pipe_64.if_name, p->if_name, IFNAMSIZ);
333 p->ready = user_pipe_64.ready;
334
335 p->fs.fs_nr = user_pipe_64.fs.fs_nr;
336 p->fs.flags_fs = user_pipe_64.fs.flags_fs;
337 p->fs.parent_nr = user_pipe_64.fs.parent_nr;
338 p->fs.weight = user_pipe_64.fs.weight;
339 p->fs.qsize = user_pipe_64.fs.qsize;
340 p->fs.plr = user_pipe_64.fs.plr;
341 p->fs.flow_mask = user_pipe_64.fs.flow_mask;
342 p->fs.rq_size = user_pipe_64.fs.rq_size;
343 p->fs.rq_elements = user_pipe_64.fs.rq_elements;
344 p->fs.last_expired = user_pipe_64.fs.last_expired;
345 p->fs.backlogged = user_pipe_64.fs.backlogged;
346 p->fs.w_q = user_pipe_64.fs.w_q;
347 p->fs.max_th = user_pipe_64.fs.max_th;
348 p->fs.min_th = user_pipe_64.fs.min_th;
349 p->fs.max_p = user_pipe_64.fs.max_p;
350 p->fs.c_1 = user_pipe_64.fs.c_1;
351 p->fs.c_2 = user_pipe_64.fs.c_2;
352 p->fs.c_3 = user_pipe_64.fs.c_3;
353 p->fs.c_4 = user_pipe_64.fs.c_4;
354 p->fs.lookup_depth = user_pipe_64.fs.lookup_depth;
355 p->fs.lookup_step = user_pipe_64.fs.lookup_step;
356 p->fs.lookup_weight = user_pipe_64.fs.lookup_weight;
357 p->fs.avg_pkt_size = user_pipe_64.fs.avg_pkt_size;
358 p->fs.max_pkt_size = user_pipe_64.fs.max_pkt_size;
359 }
360 return error;
361 }
362
363 static void
cp_flow_set_to_32_user(struct dn_flow_set * set,struct dn_flow_set_32 * fs_bp)364 cp_flow_set_to_32_user(struct dn_flow_set *set, struct dn_flow_set_32 *fs_bp)
365 {
366 fs_bp->fs_nr = set->fs_nr;
367 fs_bp->flags_fs = set->flags_fs;
368 fs_bp->parent_nr = set->parent_nr;
369 fs_bp->weight = set->weight;
370 fs_bp->qsize = set->qsize;
371 fs_bp->plr = set->plr;
372 fs_bp->flow_mask = set->flow_mask;
373 fs_bp->rq_size = set->rq_size;
374 fs_bp->rq_elements = set->rq_elements;
375 fs_bp->last_expired = set->last_expired;
376 fs_bp->backlogged = set->backlogged;
377 fs_bp->w_q = set->w_q;
378 fs_bp->max_th = set->max_th;
379 fs_bp->min_th = set->min_th;
380 fs_bp->max_p = set->max_p;
381 fs_bp->c_1 = set->c_1;
382 fs_bp->c_2 = set->c_2;
383 fs_bp->c_3 = set->c_3;
384 fs_bp->c_4 = set->c_4;
385 fs_bp->w_q_lookup = CAST_DOWN_EXPLICIT(user32_addr_t, set->w_q_lookup);
386 fs_bp->lookup_depth = set->lookup_depth;
387 fs_bp->lookup_step = set->lookup_step;
388 fs_bp->lookup_weight = set->lookup_weight;
389 fs_bp->avg_pkt_size = set->avg_pkt_size;
390 fs_bp->max_pkt_size = set->max_pkt_size;
391 }
392
393 static void
cp_flow_set_to_64_user(struct dn_flow_set * set,struct dn_flow_set_64 * fs_bp)394 cp_flow_set_to_64_user(struct dn_flow_set *set, struct dn_flow_set_64 *fs_bp)
395 {
396 fs_bp->fs_nr = set->fs_nr;
397 fs_bp->flags_fs = set->flags_fs;
398 fs_bp->parent_nr = set->parent_nr;
399 fs_bp->weight = set->weight;
400 fs_bp->qsize = set->qsize;
401 fs_bp->plr = set->plr;
402 fs_bp->flow_mask = set->flow_mask;
403 fs_bp->rq_size = set->rq_size;
404 fs_bp->rq_elements = set->rq_elements;
405 fs_bp->last_expired = set->last_expired;
406 fs_bp->backlogged = set->backlogged;
407 fs_bp->w_q = set->w_q;
408 fs_bp->max_th = set->max_th;
409 fs_bp->min_th = set->min_th;
410 fs_bp->max_p = set->max_p;
411 fs_bp->c_1 = set->c_1;
412 fs_bp->c_2 = set->c_2;
413 fs_bp->c_3 = set->c_3;
414 fs_bp->c_4 = set->c_4;
415 fs_bp->w_q_lookup = CAST_DOWN(user64_addr_t, set->w_q_lookup);
416 fs_bp->lookup_depth = set->lookup_depth;
417 fs_bp->lookup_step = set->lookup_step;
418 fs_bp->lookup_weight = set->lookup_weight;
419 fs_bp->avg_pkt_size = set->avg_pkt_size;
420 fs_bp->max_pkt_size = set->max_pkt_size;
421 }
422
423 static
424 void
cp_queue_to_32_user(struct dn_flow_queue * q,struct dn_flow_queue_32 * qp)425 cp_queue_to_32_user( struct dn_flow_queue *q, struct dn_flow_queue_32 *qp)
426 {
427 qp->id = q->id;
428 qp->len = q->len;
429 qp->len_bytes = q->len_bytes;
430 qp->numbytes = q->numbytes;
431 qp->tot_pkts = q->tot_pkts;
432 qp->tot_bytes = q->tot_bytes;
433 qp->drops = q->drops;
434 qp->hash_slot = q->hash_slot;
435 qp->avg = q->avg;
436 qp->count = q->count;
437 qp->random = q->random;
438 qp->q_time = (u_int32_t)q->q_time;
439 qp->heap_pos = q->heap_pos;
440 qp->sched_time = q->sched_time;
441 qp->S = q->S;
442 qp->F = q->F;
443 }
444
445 static
446 void
cp_queue_to_64_user(struct dn_flow_queue * q,struct dn_flow_queue_64 * qp)447 cp_queue_to_64_user( struct dn_flow_queue *q, struct dn_flow_queue_64 *qp)
448 {
449 qp->id = q->id;
450 qp->len = q->len;
451 qp->len_bytes = q->len_bytes;
452 qp->numbytes = q->numbytes;
453 qp->tot_pkts = q->tot_pkts;
454 qp->tot_bytes = q->tot_bytes;
455 qp->drops = q->drops;
456 qp->hash_slot = q->hash_slot;
457 qp->avg = q->avg;
458 qp->count = q->count;
459 qp->random = q->random;
460 qp->q_time = (u_int32_t)q->q_time;
461 qp->heap_pos = q->heap_pos;
462 qp->sched_time = q->sched_time;
463 qp->S = q->S;
464 qp->F = q->F;
465 }
466
467 static
468 char *
cp_pipe_to_32_user(struct dn_pipe * p,struct dn_pipe_32 * pipe_bp)469 cp_pipe_to_32_user(struct dn_pipe *p, struct dn_pipe_32 *pipe_bp)
470 {
471 char *bp;
472
473 pipe_bp->pipe_nr = p->pipe_nr;
474 pipe_bp->bandwidth = p->bandwidth;
475 pipe_bp->delay = p->delay;
476 bcopy( &(p->scheduler_heap), &(pipe_bp->scheduler_heap), sizeof(struct dn_heap_32));
477 pipe_bp->scheduler_heap.p = CAST_DOWN_EXPLICIT(user32_addr_t, pipe_bp->scheduler_heap.p);
478 bcopy( &(p->not_eligible_heap), &(pipe_bp->not_eligible_heap), sizeof(struct dn_heap_32));
479 pipe_bp->not_eligible_heap.p = CAST_DOWN_EXPLICIT(user32_addr_t, pipe_bp->not_eligible_heap.p);
480 bcopy( &(p->idle_heap), &(pipe_bp->idle_heap), sizeof(struct dn_heap_32));
481 pipe_bp->idle_heap.p = CAST_DOWN_EXPLICIT(user32_addr_t, pipe_bp->idle_heap.p);
482 pipe_bp->V = p->V;
483 pipe_bp->sum = p->sum;
484 pipe_bp->numbytes = p->numbytes;
485 pipe_bp->sched_time = p->sched_time;
486 bcopy( p->if_name, pipe_bp->if_name, IFNAMSIZ);
487 pipe_bp->ifp = CAST_DOWN_EXPLICIT(user32_addr_t, p->ifp);
488 pipe_bp->ready = p->ready;
489
490 cp_flow_set_to_32_user( &(p->fs), &(pipe_bp->fs));
491
492 pipe_bp->delay = (pipe_bp->delay * 1000) / (hz * 10);
493 /*
494 * XXX the following is a hack based on ->next being the
495 * first field in dn_pipe and dn_flow_set. The correct
496 * solution would be to move the dn_flow_set to the beginning
497 * of struct dn_pipe.
498 */
499 pipe_bp->next = CAST_DOWN_EXPLICIT( user32_addr_t, DN_IS_PIPE );
500 /* clean pointers */
501 pipe_bp->head = pipe_bp->tail = (user32_addr_t) 0;
502 pipe_bp->fs.next = (user32_addr_t)0;
503 pipe_bp->fs.pipe = (user32_addr_t)0;
504 pipe_bp->fs.rq = (user32_addr_t)0;
505 bp = ((char *)pipe_bp) + sizeof(struct dn_pipe_32);
506 return dn_copy_set_32( &(p->fs), bp);
507 }
508
509 static
510 char *
cp_pipe_to_64_user(struct dn_pipe * p,struct dn_pipe_64 * pipe_bp)511 cp_pipe_to_64_user(struct dn_pipe *p, struct dn_pipe_64 *pipe_bp)
512 {
513 char *bp;
514
515 pipe_bp->pipe_nr = p->pipe_nr;
516 pipe_bp->bandwidth = p->bandwidth;
517 pipe_bp->delay = p->delay;
518 bcopy( &(p->scheduler_heap), &(pipe_bp->scheduler_heap), sizeof(struct dn_heap_64));
519 pipe_bp->scheduler_heap.p = CAST_DOWN(user64_addr_t, pipe_bp->scheduler_heap.p);
520 bcopy( &(p->not_eligible_heap), &(pipe_bp->not_eligible_heap), sizeof(struct dn_heap_64));
521 pipe_bp->not_eligible_heap.p = CAST_DOWN(user64_addr_t, pipe_bp->not_eligible_heap.p);
522 bcopy( &(p->idle_heap), &(pipe_bp->idle_heap), sizeof(struct dn_heap_64));
523 pipe_bp->idle_heap.p = CAST_DOWN(user64_addr_t, pipe_bp->idle_heap.p);
524 pipe_bp->V = p->V;
525 pipe_bp->sum = p->sum;
526 pipe_bp->numbytes = p->numbytes;
527 pipe_bp->sched_time = p->sched_time;
528 bcopy( p->if_name, pipe_bp->if_name, IFNAMSIZ);
529 pipe_bp->ifp = CAST_DOWN(user64_addr_t, p->ifp);
530 pipe_bp->ready = p->ready;
531
532 cp_flow_set_to_64_user( &(p->fs), &(pipe_bp->fs));
533
534 pipe_bp->delay = (pipe_bp->delay * 1000) / (hz * 10);
535 /*
536 * XXX the following is a hack based on ->next being the
537 * first field in dn_pipe and dn_flow_set. The correct
538 * solution would be to move the dn_flow_set to the beginning
539 * of struct dn_pipe.
540 */
541 pipe_bp->next = CAST_DOWN( user64_addr_t, DN_IS_PIPE );
542 /* clean pointers */
543 pipe_bp->head = pipe_bp->tail = USER_ADDR_NULL;
544 pipe_bp->fs.next = USER_ADDR_NULL;
545 pipe_bp->fs.pipe = USER_ADDR_NULL;
546 pipe_bp->fs.rq = USER_ADDR_NULL;
547 bp = ((char *)pipe_bp) + sizeof(struct dn_pipe_64);
548 return dn_copy_set_64( &(p->fs), bp);
549 }
550
551 static int
heap_init(struct dn_heap * h,int new_size)552 heap_init(struct dn_heap *h, int new_size)
553 {
554 struct dn_heap_entry *p;
555
556 if (h->size >= new_size) {
557 printf("dummynet: heap_init, Bogus call, have %d want %d\n",
558 h->size, new_size);
559 return 0;
560 }
561 new_size = (new_size + HEAP_INCREMENT) & ~HEAP_INCREMENT;
562 p = krealloc_type(struct dn_heap_entry, h->size, new_size,
563 h->p, Z_NOWAIT | Z_ZERO);
564 if (p == NULL) {
565 printf("dummynet: heap_init, resize %d failed\n", new_size );
566 return 1; /* error */
567 }
568 h->p = p;
569 h->size = new_size;
570 return 0;
571 }
572
573 /*
574 * Insert element in heap. Normally, p != NULL, we insert p in
575 * a new position and bubble up. If p == NULL, then the element is
576 * already in place, and key is the position where to start the
577 * bubble-up.
578 * Returns 1 on failure (cannot allocate new heap entry)
579 *
580 * If offset > 0 the position (index, int) of the element in the heap is
581 * also stored in the element itself at the given offset in bytes.
582 */
583 #define SET_OFFSET(heap, node) \
584 if (heap->offset > 0) \
585 *((int *)(void *)((char *)(heap->p[node].object) + heap->offset)) = node ;
586 /*
587 * RESET_OFFSET is used for sanity checks. It sets offset to an invalid value.
588 */
589 #define RESET_OFFSET(heap, node) \
590 if (heap->offset > 0) \
591 *((int *)(void *)((char *)(heap->p[node].object) + heap->offset)) = -1 ;
592 static int
heap_insert(struct dn_heap * h,dn_key key1,void * p)593 heap_insert(struct dn_heap *h, dn_key key1, void *p)
594 {
595 int son = h->elements;
596
597 if (p == NULL) { /* data already there, set starting point */
598 VERIFY(key1 < INT_MAX);
599 son = (int)key1;
600 } else { /* insert new element at the end, possibly resize */
601 son = h->elements;
602 if (son == h->size) { /* need resize... */
603 if (heap_init(h, h->elements + 1)) {
604 return 1; /* failure... */
605 }
606 }
607 h->p[son].object = p;
608 h->p[son].key = key1;
609 h->elements++;
610 }
611 while (son > 0) { /* bubble up */
612 int father = HEAP_FATHER(son);
613 struct dn_heap_entry tmp;
614
615 if (DN_KEY_LT( h->p[father].key, h->p[son].key )) {
616 break; /* found right position */
617 }
618 /* son smaller than father, swap and repeat */
619 HEAP_SWAP(h->p[son], h->p[father], tmp);
620 SET_OFFSET(h, son);
621 son = father;
622 }
623 SET_OFFSET(h, son);
624 return 0;
625 }
626
627 /*
628 * remove top element from heap, or obj if obj != NULL
629 */
630 static void
heap_extract(struct dn_heap * h,void * obj)631 heap_extract(struct dn_heap *h, void *obj)
632 {
633 int child, father, maxelt = h->elements - 1;
634
635 if (maxelt < 0) {
636 printf("dummynet: warning, extract from empty heap 0x%llx\n",
637 (uint64_t)VM_KERNEL_ADDRPERM(h));
638 return;
639 }
640 father = 0; /* default: move up smallest child */
641 if (obj != NULL) { /* extract specific element, index is at offset */
642 if (h->offset <= 0) {
643 panic("dummynet: heap_extract from middle not supported on this heap!!!");
644 }
645 father = *((int *)(void *)((char *)obj + h->offset));
646 if (father < 0 || father >= h->elements) {
647 printf("dummynet: heap_extract, father %d out of bound 0..%d\n",
648 father, h->elements);
649 panic("dummynet: heap_extract");
650 }
651 }
652 RESET_OFFSET(h, father);
653 child = HEAP_LEFT(father); /* left child */
654 while (child <= maxelt) { /* valid entry */
655 if (child != maxelt && DN_KEY_LT(h->p[child + 1].key, h->p[child].key)) {
656 child = child + 1; /* take right child, otherwise left */
657 }
658 h->p[father] = h->p[child];
659 SET_OFFSET(h, father);
660 father = child;
661 child = HEAP_LEFT(child); /* left child for next loop */
662 }
663 h->elements--;
664 if (father != maxelt) {
665 /*
666 * Fill hole with last entry and bubble up, reusing the insert code
667 */
668 h->p[father] = h->p[maxelt];
669 heap_insert(h, father, NULL); /* this one cannot fail */
670 }
671 }
672
673 /*
674 * heapify() will reorganize data inside an array to maintain the
675 * heap property. It is needed when we delete a bunch of entries.
676 */
677 static void
heapify(struct dn_heap * h)678 heapify(struct dn_heap *h)
679 {
680 int i;
681
682 for (i = 0; i < h->elements; i++) {
683 heap_insert(h, i, NULL);
684 }
685 }
686
687 /*
688 * cleanup the heap and free data structure
689 */
690 static void
heap_free(struct dn_heap * h)691 heap_free(struct dn_heap *h)
692 {
693 kfree_type(struct dn_heap_entry, h->size, h->p);
694 bzero(h, sizeof(*h));
695 }
696
697 /*
698 * --- end of heap management functions ---
699 */
700
701 /*
702 * Return the mbuf tag holding the dummynet state. As an optimization
703 * this is assumed to be the first tag on the list. If this turns out
704 * wrong we'll need to search the list.
705 */
706 static struct dn_pkt_tag *
dn_tag_get(struct mbuf * m)707 dn_tag_get(struct mbuf *m)
708 {
709 struct m_tag *mtag = m_tag_first(m);
710
711 if (!(mtag != NULL &&
712 mtag->m_tag_id == KERNEL_MODULE_TAG_ID &&
713 mtag->m_tag_type == KERNEL_TAG_TYPE_DUMMYNET)) {
714 panic("packet on dummynet queue w/o dummynet tag: 0x%llx",
715 (uint64_t)VM_KERNEL_ADDRPERM(m));
716 }
717
718 return (struct dn_pkt_tag *)(mtag->m_tag_data);
719 }
720
721 /*
722 * Scheduler functions:
723 *
724 * transmit_event() is called when the delay-line needs to enter
725 * the scheduler, either because of existing pkts getting ready,
726 * or new packets entering the queue. The event handled is the delivery
727 * time of the packet.
728 *
729 * ready_event() does something similar with fixed-rate queues, and the
730 * event handled is the finish time of the head pkt.
731 *
732 * wfq_ready_event() does something similar with WF2Q queues, and the
733 * event handled is the start time of the head pkt.
734 *
735 * In all cases, we make sure that the data structures are consistent
736 * before passing pkts out, because this might trigger recursive
737 * invocations of the procedures.
738 */
739 static void
transmit_event(struct dn_pipe * pipe,struct mbuf ** head,struct mbuf ** tail)740 transmit_event(struct dn_pipe *pipe, struct mbuf **head, struct mbuf **tail)
741 {
742 struct mbuf *m;
743 struct dn_pkt_tag *pkt = NULL;
744 u_int64_t schedule_time;
745
746 LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
747 ASSERT(serialize >= 0);
748 if (serialize == 0) {
749 while ((m = pipe->head) != NULL) {
750 pkt = dn_tag_get(m);
751 if (!DN_KEY_LEQ(pkt->dn_output_time, curr_time)) {
752 break;
753 }
754
755 pipe->head = m->m_nextpkt;
756 if (*tail != NULL) {
757 (*tail)->m_nextpkt = m;
758 } else {
759 *head = m;
760 }
761 *tail = m;
762 }
763
764 if (*tail != NULL) {
765 (*tail)->m_nextpkt = NULL;
766 }
767 }
768
769 schedule_time = pkt == NULL || DN_KEY_LEQ(pkt->dn_output_time, curr_time) ?
770 curr_time + 1 : pkt->dn_output_time;
771
772 /* if there are leftover packets, put the pipe into the heap for next ready event */
773 if ((m = pipe->head) != NULL) {
774 pkt = dn_tag_get(m);
775 /* XXX should check errors on heap_insert, by draining the
776 * whole pipe p and hoping in the future we are more successful
777 */
778 heap_insert(&extract_heap, schedule_time, pipe);
779 }
780 }
781
782 /*
783 * the following macro computes how many ticks we have to wait
784 * before being able to transmit a packet. The credit is taken from
785 * either a pipe (WF2Q) or a flow_queue (per-flow queueing)
786 */
787
788 /* hz is 100, which gives a granularity of 10ms in the old timer.
789 * The timer has been changed to fire every 1ms, so the use of
790 * hz has been modified here. All instances of hz have been left
791 * in place but adjusted by a factor of 10 so that hz is functionally
792 * equal to 1000.
793 */
794 #define SET_TICKS(_m, q, p) \
795 ((_m)->m_pkthdr.len*8*(hz*10) - (q)->numbytes + p->bandwidth - 1 ) / \
796 p->bandwidth ;
797
798 /*
799 * extract pkt from queue, compute output time (could be now)
800 * and put into delay line (p_queue)
801 */
802 static void
move_pkt(struct mbuf * pkt,struct dn_flow_queue * q,struct dn_pipe * p,int len)803 move_pkt(struct mbuf *pkt, struct dn_flow_queue *q,
804 struct dn_pipe *p, int len)
805 {
806 struct dn_pkt_tag *dt = dn_tag_get(pkt);
807
808 q->head = pkt->m_nextpkt;
809 q->len--;
810 q->len_bytes -= len;
811
812 dt->dn_output_time = curr_time + p->delay;
813
814 if (p->head == NULL) {
815 p->head = pkt;
816 } else {
817 p->tail->m_nextpkt = pkt;
818 }
819 p->tail = pkt;
820 p->tail->m_nextpkt = NULL;
821 }
822
823 /*
824 * ready_event() is invoked every time the queue must enter the
825 * scheduler, either because the first packet arrives, or because
826 * a previously scheduled event fired.
827 * On invokation, drain as many pkts as possible (could be 0) and then
828 * if there are leftover packets reinsert the pkt in the scheduler.
829 */
830 static void
ready_event(struct dn_flow_queue * q,struct mbuf ** head,struct mbuf ** tail)831 ready_event(struct dn_flow_queue *q, struct mbuf **head, struct mbuf **tail)
832 {
833 struct mbuf *pkt;
834 struct dn_pipe *p = q->fs->pipe;
835 int p_was_empty;
836
837 LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
838
839 if (p == NULL) {
840 printf("dummynet: ready_event pipe is gone\n");
841 return;
842 }
843 p_was_empty = (p->head == NULL);
844
845 /*
846 * schedule fixed-rate queues linked to this pipe:
847 * Account for the bw accumulated since last scheduling, then
848 * drain as many pkts as allowed by q->numbytes and move to
849 * the delay line (in p) computing output time.
850 * bandwidth==0 (no limit) means we can drain the whole queue,
851 * setting len_scaled = 0 does the job.
852 */
853 q->numbytes += (curr_time - q->sched_time) * p->bandwidth;
854 while ((pkt = q->head) != NULL) {
855 int len = pkt->m_pkthdr.len;
856 int len_scaled = p->bandwidth ? len * 8 * (hz * 10) : 0;
857 if (len_scaled > q->numbytes) {
858 break;
859 }
860 q->numbytes -= len_scaled;
861 move_pkt(pkt, q, p, len);
862 }
863 /*
864 * If we have more packets queued, schedule next ready event
865 * (can only occur when bandwidth != 0, otherwise we would have
866 * flushed the whole queue in the previous loop).
867 * To this purpose we record the current time and compute how many
868 * ticks to go for the finish time of the packet.
869 */
870 if ((pkt = q->head) != NULL) { /* this implies bandwidth != 0 */
871 dn_key t = SET_TICKS(pkt, q, p); /* ticks i have to wait */
872 q->sched_time = curr_time;
873 heap_insert(&ready_heap, curr_time + t, (void *)q );
874 /* XXX should check errors on heap_insert, and drain the whole
875 * queue on error hoping next time we are luckier.
876 */
877 } else { /* RED needs to know when the queue becomes empty */
878 q->q_time = curr_time;
879 q->numbytes = 0;
880 }
881 /*
882 * If the delay line was empty call transmit_event(p) now.
883 * Otherwise, the scheduler will take care of it.
884 */
885 if (p_was_empty) {
886 transmit_event(p, head, tail);
887 }
888 }
889
890 /*
891 * Called when we can transmit packets on WF2Q queues. Take pkts out of
892 * the queues at their start time, and enqueue into the delay line.
893 * Packets are drained until p->numbytes < 0. As long as
894 * len_scaled >= p->numbytes, the packet goes into the delay line
895 * with a deadline p->delay. For the last packet, if p->numbytes<0,
896 * there is an additional delay.
897 */
898 static void
ready_event_wfq(struct dn_pipe * p,struct mbuf ** head,struct mbuf ** tail)899 ready_event_wfq(struct dn_pipe *p, struct mbuf **head, struct mbuf **tail)
900 {
901 int p_was_empty = (p->head == NULL);
902 struct dn_heap *sch = &(p->scheduler_heap);
903 struct dn_heap *neh = &(p->not_eligible_heap);
904 int64_t p_numbytes = p->numbytes;
905
906 LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
907
908 if (p->if_name[0] == 0) { /* tx clock is simulated */
909 p_numbytes += (curr_time - p->sched_time) * p->bandwidth;
910 } else { /* tx clock is for real, the ifq must be empty or this is a NOP */
911 if (p->ifp && !IFCQ_IS_EMPTY(p->ifp->if_snd)) {
912 return;
913 } else {
914 DPRINTF(("dummynet: pipe %d ready from %s --\n",
915 p->pipe_nr, p->if_name));
916 }
917 }
918
919 /*
920 * While we have backlogged traffic AND credit, we need to do
921 * something on the queue.
922 */
923 while (p_numbytes >= 0 && (sch->elements > 0 || neh->elements > 0)) {
924 if (sch->elements > 0) { /* have some eligible pkts to send out */
925 struct dn_flow_queue *q = sch->p[0].object;
926 struct mbuf *pkt = q->head;
927 struct dn_flow_set *fs = q->fs;
928 u_int32_t len = pkt->m_pkthdr.len;
929 u_int64_t len_scaled = p->bandwidth ? len * 8 * (hz * 10) : 0;
930
931 heap_extract(sch, NULL); /* remove queue from heap */
932 p_numbytes -= len_scaled;
933 move_pkt(pkt, q, p, len);
934
935 p->V += (len << MY_M) / p->sum; /* update V */
936 q->S = q->F; /* update start time */
937 if (q->len == 0) { /* Flow not backlogged any more */
938 fs->backlogged--;
939 heap_insert(&(p->idle_heap), q->F, q);
940 } else { /* still backlogged */
941 /*
942 * update F and position in backlogged queue, then
943 * put flow in not_eligible_heap (we will fix this later).
944 */
945 len = (q->head)->m_pkthdr.len;
946 q->F += (len << MY_M) / (u_int64_t) fs->weight;
947 if (DN_KEY_LEQ(q->S, p->V)) {
948 heap_insert(neh, q->S, q);
949 } else {
950 heap_insert(sch, q->F, q);
951 }
952 }
953 }
954 /*
955 * now compute V = max(V, min(S_i)). Remember that all elements in sch
956 * have by definition S_i <= V so if sch is not empty, V is surely
957 * the max and we must not update it. Conversely, if sch is empty
958 * we only need to look at neh.
959 */
960 if (sch->elements == 0 && neh->elements > 0) {
961 p->V = MAX64( p->V, neh->p[0].key );
962 }
963 /* move from neh to sch any packets that have become eligible */
964 while (neh->elements > 0 && DN_KEY_LEQ(neh->p[0].key, p->V)) {
965 struct dn_flow_queue *q = neh->p[0].object;
966 heap_extract(neh, NULL);
967 heap_insert(sch, q->F, q);
968 }
969
970 if (p->if_name[0] != '\0') {/* tx clock is from a real thing */
971 p_numbytes = -1; /* mark not ready for I/O */
972 break;
973 }
974 }
975 if (sch->elements == 0 && neh->elements == 0 && p_numbytes >= 0
976 && p->idle_heap.elements > 0) {
977 /*
978 * no traffic and no events scheduled. We can get rid of idle-heap.
979 */
980 int i;
981
982 for (i = 0; i < p->idle_heap.elements; i++) {
983 struct dn_flow_queue *q = p->idle_heap.p[i].object;
984
985 q->F = 0;
986 q->S = q->F + 1;
987 }
988 p->sum = 0;
989 p->V = 0;
990 p->idle_heap.elements = 0;
991 }
992 /*
993 * If we are getting clocks from dummynet (not a real interface) and
994 * If we are under credit, schedule the next ready event.
995 * Also fix the delivery time of the last packet.
996 */
997 if (p->if_name[0] == 0 && p_numbytes < 0) { /* this implies bandwidth >0 */
998 dn_key t = 0; /* number of ticks i have to wait */
999
1000 if (p->bandwidth > 0) {
1001 t = (p->bandwidth - 1 - p_numbytes) / p->bandwidth;
1002 }
1003 dn_tag_get(p->tail)->dn_output_time += t;
1004 p->sched_time = curr_time;
1005 heap_insert(&wfq_ready_heap, curr_time + t, (void *)p);
1006 /* XXX should check errors on heap_insert, and drain the whole
1007 * queue on error hoping next time we are luckier.
1008 */
1009 }
1010
1011 /* Fit (adjust if necessary) 64bit result into 32bit variable. */
1012 if (p_numbytes > INT_MAX) {
1013 p->numbytes = INT_MAX;
1014 } else if (p_numbytes < INT_MIN) {
1015 p->numbytes = INT_MIN;
1016 } else {
1017 p->numbytes = (int)p_numbytes;
1018 }
1019
1020 /*
1021 * If the delay line was empty call transmit_event(p) now.
1022 * Otherwise, the scheduler will take care of it.
1023 */
1024 if (p_was_empty) {
1025 transmit_event(p, head, tail);
1026 }
1027 }
1028
1029 /*
1030 * This is called every 1ms. It is used to
1031 * increment the current tick counter and schedule expired events.
1032 */
1033 static void
dummynet(__unused void * unused)1034 dummynet(__unused void * unused)
1035 {
1036 void *p; /* generic parameter to handler */
1037 struct dn_heap *h;
1038 struct dn_heap *heaps[3];
1039 struct mbuf *head = NULL, *tail = NULL;
1040 int i;
1041 struct dn_pipe *pe;
1042 struct timespec ts;
1043 struct timeval tv;
1044
1045 heaps[0] = &ready_heap; /* fixed-rate queues */
1046 heaps[1] = &wfq_ready_heap; /* wfq queues */
1047 heaps[2] = &extract_heap; /* delay line */
1048
1049 lck_mtx_lock(&dn_mutex);
1050
1051 /* make all time measurements in milliseconds (ms) -
1052 * here we convert secs and usecs to msecs (just divide the
1053 * usecs and take the closest whole number).
1054 */
1055 microuptime(&tv);
1056 curr_time = (tv.tv_sec * 1000) + (tv.tv_usec / 1000);
1057
1058 for (i = 0; i < 3; i++) {
1059 h = heaps[i];
1060 while (h->elements > 0 && DN_KEY_LEQ(h->p[0].key, curr_time)) {
1061 if (h->p[0].key > curr_time) {
1062 printf("dummynet: warning, heap %d is %d ticks late\n",
1063 i, (int)(curr_time - h->p[0].key));
1064 }
1065 p = h->p[0].object; /* store a copy before heap_extract */
1066 heap_extract(h, NULL); /* need to extract before processing */
1067 if (i == 0) {
1068 ready_event(p, &head, &tail);
1069 } else if (i == 1) {
1070 struct dn_pipe *pipe = p;
1071 if (pipe->if_name[0] != '\0') {
1072 printf("dummynet: bad ready_event_wfq for pipe %s\n",
1073 pipe->if_name);
1074 } else {
1075 ready_event_wfq(p, &head, &tail);
1076 }
1077 } else {
1078 transmit_event(p, &head, &tail);
1079 }
1080 }
1081 }
1082 /* sweep pipes trying to expire idle flow_queues */
1083 for (i = 0; i < HASHSIZE; i++) {
1084 SLIST_FOREACH(pe, &pipehash[i], next) {
1085 if (pe->idle_heap.elements > 0 &&
1086 DN_KEY_LT(pe->idle_heap.p[0].key, pe->V)) {
1087 struct dn_flow_queue *q = pe->idle_heap.p[0].object;
1088
1089 heap_extract(&(pe->idle_heap), NULL);
1090 q->S = q->F + 1; /* mark timestamp as invalid */
1091 pe->sum -= q->fs->weight;
1092 }
1093 }
1094 }
1095
1096 /* check the heaps to see if there's still stuff in there, and
1097 * only set the timer if there are packets to process
1098 */
1099 timer_enabled = 0;
1100 for (i = 0; i < 3; i++) {
1101 h = heaps[i];
1102 if (h->elements > 0) { // set the timer
1103 ts.tv_sec = 0;
1104 ts.tv_nsec = 1 * 1000000; // 1ms
1105 timer_enabled = 1;
1106 bsd_timeout(dummynet, NULL, &ts);
1107 break;
1108 }
1109 }
1110
1111 if (head != NULL) {
1112 serialize++;
1113 }
1114
1115 lck_mtx_unlock(&dn_mutex);
1116
1117 /* Send out the de-queued list of ready-to-send packets */
1118 if (head != NULL) {
1119 dummynet_send(head);
1120 lck_mtx_lock(&dn_mutex);
1121 serialize--;
1122 lck_mtx_unlock(&dn_mutex);
1123 }
1124 }
1125
1126
1127 static void
dummynet_send(struct mbuf * m)1128 dummynet_send(struct mbuf *m)
1129 {
1130 struct dn_pkt_tag *pkt;
1131 struct mbuf *n;
1132
1133 for (; m != NULL; m = n) {
1134 n = m->m_nextpkt;
1135 m->m_nextpkt = NULL;
1136 pkt = dn_tag_get(m);
1137
1138 DPRINTF(("dummynet_send m: 0x%llx dn_dir: %d dn_flags: 0x%x\n",
1139 (uint64_t)VM_KERNEL_ADDRPERM(m), pkt->dn_dir,
1140 pkt->dn_flags));
1141
1142 switch (pkt->dn_dir) {
1143 case DN_TO_IP_OUT: {
1144 struct route tmp_rt;
1145
1146 /* route is already in the packet's dn_ro */
1147 bzero(&tmp_rt, sizeof(tmp_rt));
1148
1149 /* Force IP_RAWOUTPUT as the IP header is fully formed */
1150 pkt->dn_flags |= IP_RAWOUTPUT | IP_FORWARDING;
1151 (void)ip_output(m, NULL, &tmp_rt, pkt->dn_flags, NULL, NULL);
1152 ROUTE_RELEASE(&tmp_rt);
1153 break;
1154 }
1155 case DN_TO_IP_IN:
1156 proto_inject(PF_INET, m);
1157 break;
1158 case DN_TO_IP6_OUT: {
1159 /* routes already in the packet's dn_{ro6,pmtu} */
1160 if (pkt->dn_origifp != NULL) {
1161 ip6_output_setsrcifscope(m, pkt->dn_origifp->if_index, NULL);
1162 ip6_output_setdstifscope(m, pkt->dn_origifp->if_index, NULL);
1163 } else {
1164 ip6_output_setsrcifscope(m, IFSCOPE_UNKNOWN, NULL);
1165 ip6_output_setdstifscope(m, IFSCOPE_UNKNOWN, NULL);
1166 }
1167
1168 ip6_output(m, NULL, NULL, IPV6_FORWARDING, NULL, NULL, NULL);
1169 break;
1170 }
1171 case DN_TO_IP6_IN:
1172 proto_inject(PF_INET6, m);
1173 break;
1174 default:
1175 printf("dummynet: bad switch %d!\n", pkt->dn_dir);
1176 m_freem(m);
1177 break;
1178 }
1179 }
1180 }
1181
1182 /*
1183 * Unconditionally expire empty queues in case of shortage.
1184 * Returns the number of queues freed.
1185 */
1186 static int
expire_queues(struct dn_flow_set * fs)1187 expire_queues(struct dn_flow_set *fs)
1188 {
1189 struct dn_flow_queue *q, *prev;
1190 int i, initial_elements = fs->rq_elements;
1191 struct timeval timenow;
1192
1193 /* reviewed for getmicrotime usage */
1194 getmicrotime(&timenow);
1195
1196 if (fs->last_expired == timenow.tv_sec) {
1197 return 0;
1198 }
1199 fs->last_expired = (int)timenow.tv_sec;
1200 for (i = 0; i <= fs->rq_size; i++) { /* last one is overflow */
1201 for (prev = NULL, q = fs->rq[i]; q != NULL;) {
1202 if (q->head != NULL || q->S != q->F + 1) {
1203 prev = q;
1204 q = q->next;
1205 } else { /* entry is idle, expire it */
1206 struct dn_flow_queue *old_q = q;
1207
1208 if (prev != NULL) {
1209 prev->next = q = q->next;
1210 } else {
1211 fs->rq[i] = q = q->next;
1212 }
1213 fs->rq_elements--;
1214 kfree_type(struct dn_flow_queue, old_q);
1215 }
1216 }
1217 }
1218 return initial_elements - fs->rq_elements;
1219 }
1220
1221 /*
1222 * If room, create a new queue and put at head of slot i;
1223 * otherwise, create or use the default queue.
1224 */
1225 static struct dn_flow_queue *
create_queue(struct dn_flow_set * fs,int i)1226 create_queue(struct dn_flow_set *fs, int i)
1227 {
1228 struct dn_flow_queue *q;
1229
1230 if (fs->rq_elements > fs->rq_size * dn_max_ratio &&
1231 expire_queues(fs) == 0) {
1232 /*
1233 * No way to get room, use or create overflow queue.
1234 */
1235 i = fs->rq_size;
1236 if (fs->rq[i] != NULL) {
1237 return fs->rq[i];
1238 }
1239 }
1240 q = kalloc_type(struct dn_flow_queue, Z_NOWAIT | Z_ZERO);
1241 if (q == NULL) {
1242 printf("dummynet: sorry, cannot allocate queue for new flow\n");
1243 return NULL;
1244 }
1245 q->fs = fs;
1246 q->hash_slot = i;
1247 q->next = fs->rq[i];
1248 q->S = q->F + 1; /* hack - mark timestamp as invalid */
1249 fs->rq[i] = q;
1250 fs->rq_elements++;
1251 return q;
1252 }
1253
1254 /*
1255 * Given a flow_set and a pkt in last_pkt, find a matching queue
1256 * after appropriate masking. The queue is moved to front
1257 * so that further searches take less time.
1258 */
1259 static struct dn_flow_queue *
find_queue(struct dn_flow_set * fs,struct ip_flow_id * id)1260 find_queue(struct dn_flow_set *fs, struct ip_flow_id *id)
1261 {
1262 int i = 0; /* we need i and q for new allocations */
1263 struct dn_flow_queue *q, *prev;
1264 int is_v6 = IS_IP6_FLOW_ID(id);
1265
1266 if (!(fs->flags_fs & DN_HAVE_FLOW_MASK)) {
1267 q = fs->rq[0];
1268 } else {
1269 /* first, do the masking, then hash */
1270 id->dst_port &= fs->flow_mask.dst_port;
1271 id->src_port &= fs->flow_mask.src_port;
1272 id->proto &= fs->flow_mask.proto;
1273 id->flags = 0; /* we don't care about this one */
1274 if (is_v6) {
1275 APPLY_MASK(&id->dst_ip6, &fs->flow_mask.dst_ip6);
1276 APPLY_MASK(&id->src_ip6, &fs->flow_mask.src_ip6);
1277 id->flow_id6 &= fs->flow_mask.flow_id6;
1278
1279 i = ((id->dst_ip6.__u6_addr.__u6_addr32[0]) & 0xffff) ^
1280 ((id->dst_ip6.__u6_addr.__u6_addr32[1]) & 0xffff) ^
1281 ((id->dst_ip6.__u6_addr.__u6_addr32[2]) & 0xffff) ^
1282 ((id->dst_ip6.__u6_addr.__u6_addr32[3]) & 0xffff) ^
1283
1284 ((id->dst_ip6.__u6_addr.__u6_addr32[0] >> 15) & 0xffff) ^
1285 ((id->dst_ip6.__u6_addr.__u6_addr32[1] >> 15) & 0xffff) ^
1286 ((id->dst_ip6.__u6_addr.__u6_addr32[2] >> 15) & 0xffff) ^
1287 ((id->dst_ip6.__u6_addr.__u6_addr32[3] >> 15) & 0xffff) ^
1288
1289 ((id->src_ip6.__u6_addr.__u6_addr32[0] << 1) & 0xfffff) ^
1290 ((id->src_ip6.__u6_addr.__u6_addr32[1] << 1) & 0xfffff) ^
1291 ((id->src_ip6.__u6_addr.__u6_addr32[2] << 1) & 0xfffff) ^
1292 ((id->src_ip6.__u6_addr.__u6_addr32[3] << 1) & 0xfffff) ^
1293
1294 ((id->src_ip6.__u6_addr.__u6_addr32[0] >> 16) & 0xffff) ^
1295 ((id->src_ip6.__u6_addr.__u6_addr32[1] >> 16) & 0xffff) ^
1296 ((id->src_ip6.__u6_addr.__u6_addr32[2] >> 16) & 0xffff) ^
1297 ((id->src_ip6.__u6_addr.__u6_addr32[3] >> 16) & 0xffff) ^
1298
1299 (id->dst_port << 1) ^ (id->src_port) ^
1300 (id->proto) ^
1301 (id->flow_id6);
1302 } else {
1303 id->dst_ip &= fs->flow_mask.dst_ip;
1304 id->src_ip &= fs->flow_mask.src_ip;
1305
1306 i = ((id->dst_ip) & 0xffff) ^
1307 ((id->dst_ip >> 15) & 0xffff) ^
1308 ((id->src_ip << 1) & 0xffff) ^
1309 ((id->src_ip >> 16) & 0xffff) ^
1310 (id->dst_port << 1) ^ (id->src_port) ^
1311 (id->proto);
1312 }
1313 i = i % fs->rq_size;
1314 /* finally, scan the current list for a match */
1315 searches++;
1316 for (prev = NULL, q = fs->rq[i]; q;) {
1317 search_steps++;
1318 if (is_v6 &&
1319 IN6_ARE_ADDR_EQUAL(&id->dst_ip6, &q->id.dst_ip6) &&
1320 IN6_ARE_ADDR_EQUAL(&id->src_ip6, &q->id.src_ip6) &&
1321 id->dst_port == q->id.dst_port &&
1322 id->src_port == q->id.src_port &&
1323 id->proto == q->id.proto &&
1324 id->flags == q->id.flags &&
1325 id->flow_id6 == q->id.flow_id6) {
1326 break; /* found */
1327 }
1328 if (!is_v6 && id->dst_ip == q->id.dst_ip &&
1329 id->src_ip == q->id.src_ip &&
1330 id->dst_port == q->id.dst_port &&
1331 id->src_port == q->id.src_port &&
1332 id->proto == q->id.proto &&
1333 id->flags == q->id.flags) {
1334 break; /* found */
1335 }
1336 /* No match. Check if we can expire the entry */
1337 if (pipe_expire && q->head == NULL && q->S == q->F + 1) {
1338 /* entry is idle and not in any heap, expire it */
1339 struct dn_flow_queue *old_q = q;
1340
1341 if (prev != NULL) {
1342 prev->next = q = q->next;
1343 } else {
1344 fs->rq[i] = q = q->next;
1345 }
1346 fs->rq_elements--;
1347 kfree_type(struct dn_flow_queue, old_q);
1348 continue;
1349 }
1350 prev = q;
1351 q = q->next;
1352 }
1353 if (q && prev != NULL) { /* found and not in front */
1354 prev->next = q->next;
1355 q->next = fs->rq[i];
1356 fs->rq[i] = q;
1357 }
1358 }
1359 if (q == NULL) { /* no match, need to allocate a new entry */
1360 q = create_queue(fs, i);
1361 if (q != NULL) {
1362 q->id = *id;
1363 }
1364 }
1365 return q;
1366 }
1367
1368 static int
red_drops(struct dn_flow_set * fs,struct dn_flow_queue * q,int len)1369 red_drops(struct dn_flow_set *fs, struct dn_flow_queue *q, int len)
1370 {
1371 /*
1372 * RED algorithm
1373 *
1374 * RED calculates the average queue size (avg) using a low-pass filter
1375 * with an exponential weighted (w_q) moving average:
1376 * avg <- (1-w_q) * avg + w_q * q_size
1377 * where q_size is the queue length (measured in bytes or * packets).
1378 *
1379 * If q_size == 0, we compute the idle time for the link, and set
1380 * avg = (1 - w_q)^(idle/s)
1381 * where s is the time needed for transmitting a medium-sized packet.
1382 *
1383 * Now, if avg < min_th the packet is enqueued.
1384 * If avg > max_th the packet is dropped. Otherwise, the packet is
1385 * dropped with probability P function of avg.
1386 *
1387 */
1388
1389 int64_t p_b = 0;
1390 /* queue in bytes or packets ? */
1391 u_int q_size = (fs->flags_fs & DN_QSIZE_IS_BYTES) ? q->len_bytes : q->len;
1392
1393 DPRINTF(("\ndummynet: %d q: %2u ", (int) curr_time, q_size));
1394
1395 /* average queue size estimation */
1396 if (q_size != 0) {
1397 /*
1398 * queue is not empty, avg <- avg + (q_size - avg) * w_q
1399 */
1400 int diff = SCALE(q_size) - q->avg;
1401 int64_t v = SCALE_MUL((int64_t) diff, (int64_t) fs->w_q);
1402
1403 q->avg += (int) v;
1404 } else {
1405 /*
1406 * queue is empty, find for how long the queue has been
1407 * empty and use a lookup table for computing
1408 * (1 - * w_q)^(idle_time/s) where s is the time to send a
1409 * (small) packet.
1410 * XXX check wraps...
1411 */
1412 if (q->avg) {
1413 u_int64_t t = (curr_time - q->q_time) / fs->lookup_step;
1414
1415 q->avg = (t < fs->lookup_depth) ?
1416 SCALE_MUL(q->avg, fs->w_q_lookup[t]) : 0;
1417 }
1418 }
1419 DPRINTF(("dummynet: avg: %u ", SCALE_VAL(q->avg)));
1420
1421 /* should i drop ? */
1422
1423 if (q->avg < fs->min_th) {
1424 q->count = -1;
1425 return 0; /* accept packet ; */
1426 }
1427 if (q->avg >= fs->max_th) { /* average queue >= max threshold */
1428 if (fs->flags_fs & DN_IS_GENTLE_RED) {
1429 /*
1430 * According to Gentle-RED, if avg is greater than max_th the
1431 * packet is dropped with a probability
1432 * p_b = c_3 * avg - c_4
1433 * where c_3 = (1 - max_p) / max_th, and c_4 = 1 - 2 * max_p
1434 */
1435 p_b = SCALE_MUL((int64_t) fs->c_3, (int64_t) q->avg) - fs->c_4;
1436 } else {
1437 q->count = -1;
1438 DPRINTF(("dummynet: - drop"));
1439 return 1;
1440 }
1441 } else if (q->avg > fs->min_th) {
1442 /*
1443 * we compute p_b using the linear dropping function p_b = c_1 *
1444 * avg - c_2, where c_1 = max_p / (max_th - min_th), and c_2 =
1445 * max_p * min_th / (max_th - min_th)
1446 */
1447 p_b = SCALE_MUL((int64_t) fs->c_1, (int64_t) q->avg) - fs->c_2;
1448 }
1449 if (fs->flags_fs & DN_QSIZE_IS_BYTES) {
1450 p_b = (p_b * len) / fs->max_pkt_size;
1451 }
1452 if (++q->count == 0) {
1453 q->random = (my_random() & 0xffff);
1454 } else {
1455 /*
1456 * q->count counts packets arrived since last drop, so a greater
1457 * value of q->count means a greater packet drop probability.
1458 */
1459 if (SCALE_MUL(p_b, SCALE((int64_t) q->count)) > q->random) {
1460 q->count = 0;
1461 DPRINTF(("dummynet: - red drop"));
1462 /* after a drop we calculate a new random value */
1463 q->random = (my_random() & 0xffff);
1464 return 1; /* drop */
1465 }
1466 }
1467 /* end of RED algorithm */
1468 return 0; /* accept */
1469 }
1470
1471 static __inline
1472 struct dn_flow_set *
locate_flowset(int fs_nr)1473 locate_flowset(int fs_nr)
1474 {
1475 struct dn_flow_set *fs;
1476 SLIST_FOREACH(fs, &flowsethash[HASH(fs_nr)], next) {
1477 if (fs->fs_nr == fs_nr) {
1478 return fs;
1479 }
1480 }
1481
1482 return NULL;
1483 }
1484
1485 static __inline struct dn_pipe *
locate_pipe(int pipe_nr)1486 locate_pipe(int pipe_nr)
1487 {
1488 struct dn_pipe *pipe;
1489
1490 SLIST_FOREACH(pipe, &pipehash[HASH(pipe_nr)], next) {
1491 if (pipe->pipe_nr == pipe_nr) {
1492 return pipe;
1493 }
1494 }
1495
1496 return NULL;
1497 }
1498
1499
1500
1501 /*
1502 * dummynet hook for packets. Below 'pipe' is a pipe or a queue
1503 * depending on whether WF2Q or fixed bw is used.
1504 *
1505 * pipe_nr pipe or queue the packet is destined for.
1506 * dir where shall we send the packet after dummynet.
1507 * m the mbuf with the packet
1508 * ifp the 'ifp' parameter from the caller.
1509 * NULL in ip_input, destination interface in ip_output,
1510 * real_dst in bdg_forward
1511 * ro route parameter (only used in ip_output, NULL otherwise)
1512 * dst destination address, only used by ip_output
1513 * rule matching rule, in case of multiple passes
1514 * flags flags from the caller, only used in ip_output
1515 *
1516 */
1517 static int
dummynet_io(struct mbuf * m,int pipe_nr,int dir,struct ip_fw_args * fwa)1518 dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa)
1519 {
1520 struct mbuf *head = NULL, *tail = NULL;
1521 struct dn_pkt_tag *pkt;
1522 struct m_tag *mtag;
1523 struct dn_flow_set *fs = NULL;
1524 struct dn_pipe *pipe;
1525 u_int32_t len = m->m_pkthdr.len;
1526 struct dn_flow_queue *q = NULL;
1527 int is_pipe = 0;
1528 struct timespec ts;
1529 struct timeval tv;
1530
1531 DPRINTF(("dummynet_io m: 0x%llx pipe: %d dir: %d\n",
1532 (uint64_t)VM_KERNEL_ADDRPERM(m), pipe_nr, dir));
1533
1534
1535 #if DUMMYNET
1536 is_pipe = fwa->fwa_flags == DN_IS_PIPE ? 1 : 0;
1537 #endif /* DUMMYNET */
1538
1539 pipe_nr &= 0xffff;
1540
1541 lck_mtx_lock(&dn_mutex);
1542
1543 /* make all time measurements in milliseconds (ms) -
1544 * here we convert secs and usecs to msecs (just divide the
1545 * usecs and take the closest whole number).
1546 */
1547 microuptime(&tv);
1548 curr_time = (tv.tv_sec * 1000) + (tv.tv_usec / 1000);
1549
1550 /*
1551 * This is a dummynet rule, so we expect an O_PIPE or O_QUEUE rule.
1552 */
1553 if (is_pipe) {
1554 pipe = locate_pipe(pipe_nr);
1555 if (pipe != NULL) {
1556 fs = &(pipe->fs);
1557 }
1558 } else {
1559 fs = locate_flowset(pipe_nr);
1560 }
1561
1562
1563 if (fs == NULL) {
1564 goto dropit; /* this queue/pipe does not exist! */
1565 }
1566 pipe = fs->pipe;
1567 if (pipe == NULL) { /* must be a queue, try find a matching pipe */
1568 pipe = locate_pipe(fs->parent_nr);
1569
1570 if (pipe != NULL) {
1571 fs->pipe = pipe;
1572 } else {
1573 printf("dummynet: no pipe %d for queue %d, drop pkt\n",
1574 fs->parent_nr, fs->fs_nr);
1575 goto dropit;
1576 }
1577 }
1578 q = find_queue(fs, &(fwa->fwa_id));
1579 if (q == NULL) {
1580 goto dropit; /* cannot allocate queue */
1581 }
1582 /*
1583 * update statistics, then check reasons to drop pkt
1584 */
1585 q->tot_bytes += len;
1586 q->tot_pkts++;
1587 if (fs->plr && (my_random() < fs->plr)) {
1588 goto dropit; /* random pkt drop */
1589 }
1590 if (fs->flags_fs & DN_QSIZE_IS_BYTES) {
1591 if (q->len_bytes > fs->qsize) {
1592 goto dropit; /* queue size overflow */
1593 }
1594 } else {
1595 if (q->len >= fs->qsize) {
1596 goto dropit; /* queue count overflow */
1597 }
1598 }
1599 if (fs->flags_fs & DN_IS_RED && red_drops(fs, q, len)) {
1600 goto dropit;
1601 }
1602
1603 /* XXX expensive to zero, see if we can remove it*/
1604 mtag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_DUMMYNET,
1605 sizeof(struct dn_pkt_tag), M_NOWAIT, m);
1606 if (mtag == NULL) {
1607 goto dropit; /* cannot allocate packet header */
1608 }
1609 m_tag_prepend(m, mtag); /* attach to mbuf chain */
1610
1611 pkt = (struct dn_pkt_tag *)(mtag->m_tag_data);
1612 bzero(pkt, sizeof(struct dn_pkt_tag));
1613 /* ok, i can handle the pkt now... */
1614 /* build and enqueue packet + parameters */
1615 pkt->dn_pf_rule = fwa->fwa_pf_rule;
1616 pkt->dn_dir = dir;
1617
1618 pkt->dn_ifp = fwa->fwa_oif;
1619 if (dir == DN_TO_IP_OUT) {
1620 /*
1621 * We need to copy *ro because for ICMP pkts (and maybe others)
1622 * the caller passed a pointer into the stack; dst might also be
1623 * a pointer into *ro so it needs to be updated.
1624 */
1625 if (fwa->fwa_ro) {
1626 route_copyout(&pkt->dn_ro, fwa->fwa_ro, sizeof(pkt->dn_ro));
1627 }
1628 if (fwa->fwa_dst) {
1629 if (fwa->fwa_dst == SIN(&fwa->fwa_ro->ro_dst)) { /* dst points into ro */
1630 fwa->fwa_dst = SIN(&(pkt->dn_ro.ro_dst));
1631 }
1632
1633 SOCKADDR_COPY(fwa->fwa_dst, &pkt->dn_dst, sizeof(pkt->dn_dst));
1634 }
1635 } else if (dir == DN_TO_IP6_OUT) {
1636 if (fwa->fwa_ro6) {
1637 route_copyout((struct route *)&pkt->dn_ro6,
1638 (struct route *)fwa->fwa_ro6, sizeof(pkt->dn_ro6));
1639 }
1640 if (fwa->fwa_ro6_pmtu) {
1641 route_copyout((struct route *)&pkt->dn_ro6_pmtu,
1642 (struct route *)fwa->fwa_ro6_pmtu, sizeof(pkt->dn_ro6_pmtu));
1643 }
1644 if (fwa->fwa_dst6) {
1645 if (fwa->fwa_dst6 == SIN6(&fwa->fwa_ro6->ro_dst)) { /* dst points into ro */
1646 fwa->fwa_dst6 = SIN6(&(pkt->dn_ro6.ro_dst));
1647 }
1648
1649 SOCKADDR_COPY(fwa->fwa_dst6, &pkt->dn_dst6, sizeof(pkt->dn_dst6));
1650 }
1651 pkt->dn_origifp = fwa->fwa_origifp;
1652 pkt->dn_mtu = fwa->fwa_mtu;
1653 pkt->dn_unfragpartlen = fwa->fwa_unfragpartlen;
1654 if (fwa->fwa_exthdrs) {
1655 bcopy(fwa->fwa_exthdrs, &pkt->dn_exthdrs, sizeof(pkt->dn_exthdrs));
1656 /*
1657 * Need to zero out the source structure so the mbufs
1658 * won't be freed by ip6_output()
1659 */
1660 bzero(fwa->fwa_exthdrs, sizeof(struct ip6_exthdrs));
1661 }
1662 }
1663 if (dir == DN_TO_IP_OUT || dir == DN_TO_IP6_OUT) {
1664 pkt->dn_flags = fwa->fwa_oflags;
1665 if (fwa->fwa_ipoa != NULL) {
1666 pkt->dn_ipoa = *(fwa->fwa_ipoa);
1667 }
1668 }
1669 if (q->head == NULL) {
1670 q->head = m;
1671 } else {
1672 q->tail->m_nextpkt = m;
1673 }
1674 q->tail = m;
1675 q->len++;
1676 q->len_bytes += len;
1677
1678 if (q->head != m) { /* flow was not idle, we are done */
1679 goto done;
1680 }
1681 /*
1682 * If we reach this point the flow was previously idle, so we need
1683 * to schedule it. This involves different actions for fixed-rate or
1684 * WF2Q queues.
1685 */
1686 if (is_pipe) {
1687 /*
1688 * Fixed-rate queue: just insert into the ready_heap.
1689 */
1690 dn_key t = 0;
1691 if (pipe->bandwidth) {
1692 t = SET_TICKS(m, q, pipe);
1693 }
1694 q->sched_time = curr_time;
1695 if (t == 0) { /* must process it now */
1696 ready_event( q, &head, &tail );
1697 } else {
1698 heap_insert(&ready_heap, curr_time + t, q );
1699 }
1700 } else {
1701 /*
1702 * WF2Q. First, compute start time S: if the flow was idle (S=F+1)
1703 * set S to the virtual time V for the controlling pipe, and update
1704 * the sum of weights for the pipe; otherwise, remove flow from
1705 * idle_heap and set S to max(F,V).
1706 * Second, compute finish time F = S + len/weight.
1707 * Third, if pipe was idle, update V=max(S, V).
1708 * Fourth, count one more backlogged flow.
1709 */
1710 if (DN_KEY_GT(q->S, q->F)) { /* means timestamps are invalid */
1711 q->S = pipe->V;
1712 pipe->sum += fs->weight; /* add weight of new queue */
1713 } else {
1714 heap_extract(&(pipe->idle_heap), q);
1715 q->S = MAX64(q->F, pipe->V );
1716 }
1717 q->F = q->S + (len << MY_M) / (u_int64_t) fs->weight;
1718
1719 if (pipe->not_eligible_heap.elements == 0 &&
1720 pipe->scheduler_heap.elements == 0) {
1721 pipe->V = MAX64( q->S, pipe->V );
1722 }
1723 fs->backlogged++;
1724 /*
1725 * Look at eligibility. A flow is not eligibile if S>V (when
1726 * this happens, it means that there is some other flow already
1727 * scheduled for the same pipe, so the scheduler_heap cannot be
1728 * empty). If the flow is not eligible we just store it in the
1729 * not_eligible_heap. Otherwise, we store in the scheduler_heap
1730 * and possibly invoke ready_event_wfq() right now if there is
1731 * leftover credit.
1732 * Note that for all flows in scheduler_heap (SCH), S_i <= V,
1733 * and for all flows in not_eligible_heap (NEH), S_i > V .
1734 * So when we need to compute max( V, min(S_i) ) forall i in SCH+NEH,
1735 * we only need to look into NEH.
1736 */
1737 if (DN_KEY_GT(q->S, pipe->V)) { /* not eligible */
1738 if (pipe->scheduler_heap.elements == 0) {
1739 printf("dummynet: ++ ouch! not eligible but empty scheduler!\n");
1740 }
1741 heap_insert(&(pipe->not_eligible_heap), q->S, q);
1742 } else {
1743 heap_insert(&(pipe->scheduler_heap), q->F, q);
1744 if (pipe->numbytes >= 0) { /* pipe is idle */
1745 if (pipe->scheduler_heap.elements != 1) {
1746 printf("dummynet: OUCH! pipe should have been idle!\n");
1747 }
1748 DPRINTF(("dummynet: waking up pipe %d at %d\n",
1749 pipe->pipe_nr, (int)(q->F >> MY_M)));
1750 pipe->sched_time = curr_time;
1751 ready_event_wfq(pipe, &head, &tail);
1752 }
1753 }
1754 }
1755 done:
1756 /* start the timer and set global if not already set */
1757 if (!timer_enabled) {
1758 ts.tv_sec = 0;
1759 ts.tv_nsec = 1 * 1000000; // 1ms
1760 timer_enabled = 1;
1761 bsd_timeout(dummynet, NULL, &ts);
1762 }
1763
1764 lck_mtx_unlock(&dn_mutex);
1765
1766 if (head != NULL) {
1767 dummynet_send(head);
1768 }
1769
1770 return 0;
1771
1772 dropit:
1773 if (q) {
1774 q->drops++;
1775 }
1776 lck_mtx_unlock(&dn_mutex);
1777 m_freem(m);
1778 return (fs && (fs->flags_fs & DN_NOERROR)) ? 0 : ENOBUFS;
1779 }
1780
1781 /*
1782 * Below, the ROUTE_RELEASE is only needed when (pkt->dn_dir == DN_TO_IP_OUT)
1783 * Doing this would probably save us the initial bzero of dn_pkt
1784 */
1785 #define DN_FREE_PKT(_m) do { \
1786 struct m_tag *tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_DUMMYNET); \
1787 if (tag) { \
1788 struct dn_pkt_tag *n = (struct dn_pkt_tag *)(tag->m_tag_data); \
1789 ROUTE_RELEASE(&n->dn_ro); \
1790 } \
1791 m_tag_delete(_m, tag); \
1792 m_freem(_m); \
1793 } while (0)
1794
1795 /*
1796 * Dispose all packets and flow_queues on a flow_set.
1797 * If all=1, also remove red lookup table and other storage,
1798 * including the descriptor itself.
1799 * For the one in dn_pipe MUST also cleanup ready_heap...
1800 */
1801 static void
purge_flow_set(struct dn_flow_set * fs,int all)1802 purge_flow_set(struct dn_flow_set *fs, int all)
1803 {
1804 struct dn_flow_queue *q, *qn;
1805 int i;
1806
1807 LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
1808
1809 for (i = 0; i <= fs->rq_size; i++) {
1810 for (q = fs->rq[i]; q; q = qn) {
1811 struct mbuf *m, *mnext;
1812
1813 mnext = q->head;
1814 while ((m = mnext) != NULL) {
1815 mnext = m->m_nextpkt;
1816 DN_FREE_PKT(m);
1817 }
1818 qn = q->next;
1819 kfree_type(struct dn_flow_queue, q);
1820 }
1821 fs->rq[i] = NULL;
1822 }
1823 fs->rq_elements = 0;
1824 if (all) {
1825 /* RED - free lookup table */
1826 if (fs->w_q_lookup) {
1827 kfree_data(fs->w_q_lookup, fs->lookup_depth * sizeof(int));
1828 }
1829 kfree_type(struct dn_flow_queue *, fs->rq_size + 1, fs->rq);
1830 /* if this fs is not part of a pipe, free it */
1831 if (fs->pipe && fs != &(fs->pipe->fs)) {
1832 kfree_type(struct dn_flow_set, fs);
1833 }
1834 }
1835 }
1836
1837 /*
1838 * Dispose all packets queued on a pipe (not a flow_set).
1839 * Also free all resources associated to a pipe, which is about
1840 * to be deleted.
1841 */
1842 static void
purge_pipe(struct dn_pipe * pipe)1843 purge_pipe(struct dn_pipe *pipe)
1844 {
1845 struct mbuf *m, *mnext;
1846
1847 purge_flow_set( &(pipe->fs), 1 );
1848
1849 mnext = pipe->head;
1850 while ((m = mnext) != NULL) {
1851 mnext = m->m_nextpkt;
1852 DN_FREE_PKT(m);
1853 }
1854
1855 heap_free( &(pipe->scheduler_heap));
1856 heap_free( &(pipe->not_eligible_heap));
1857 heap_free( &(pipe->idle_heap));
1858 }
1859
1860 /*
1861 * Delete all pipes and heaps returning memory.
1862 */
1863 static void
dummynet_flush(void)1864 dummynet_flush(void)
1865 {
1866 struct dn_pipe *pipe, *pipe1;
1867 struct dn_flow_set *fs, *fs1;
1868 int i;
1869
1870 lck_mtx_lock(&dn_mutex);
1871
1872
1873 /* Free heaps so we don't have unwanted events. */
1874 heap_free(&ready_heap);
1875 heap_free(&wfq_ready_heap);
1876 heap_free(&extract_heap);
1877
1878 /*
1879 * Now purge all queued pkts and delete all pipes.
1880 *
1881 * XXXGL: can we merge the for(;;) cycles into one or not?
1882 */
1883 for (i = 0; i < HASHSIZE; i++) {
1884 SLIST_FOREACH_SAFE(fs, &flowsethash[i], next, fs1) {
1885 SLIST_REMOVE(&flowsethash[i], fs, dn_flow_set, next);
1886 purge_flow_set(fs, 1);
1887 }
1888 }
1889 for (i = 0; i < HASHSIZE; i++) {
1890 SLIST_FOREACH_SAFE(pipe, &pipehash[i], next, pipe1) {
1891 SLIST_REMOVE(&pipehash[i], pipe, dn_pipe, next);
1892 purge_pipe(pipe);
1893 kfree_type(struct dn_pipe, pipe);
1894 }
1895 }
1896 lck_mtx_unlock(&dn_mutex);
1897 }
1898
1899 /*
1900 * setup RED parameters
1901 */
1902 static int
config_red(struct dn_flow_set * p,struct dn_flow_set * x)1903 config_red(struct dn_flow_set *p, struct dn_flow_set * x)
1904 {
1905 int i;
1906
1907 x->w_q = p->w_q;
1908 x->min_th = SCALE(p->min_th);
1909 x->max_th = SCALE(p->max_th);
1910 x->max_p = p->max_p;
1911
1912 x->c_1 = p->max_p / (p->max_th - p->min_th);
1913 x->c_2 = SCALE_MUL(x->c_1, SCALE(p->min_th));
1914 if (x->flags_fs & DN_IS_GENTLE_RED) {
1915 x->c_3 = (SCALE(1) - p->max_p) / p->max_th;
1916 x->c_4 = (SCALE(1) - 2 * p->max_p);
1917 }
1918
1919 /* if the lookup table already exist, free and create it again */
1920 if (x->w_q_lookup) {
1921 kfree_data(x->w_q_lookup, x->lookup_depth * sizeof(int));
1922 x->w_q_lookup = NULL;
1923 }
1924 if (red_lookup_depth == 0) {
1925 printf("\ndummynet: net.inet.ip.dummynet.red_lookup_depth must be > 0\n");
1926 return EINVAL;
1927 }
1928 x->lookup_depth = red_lookup_depth;
1929 x->w_q_lookup = (u_int *) kalloc_data(x->lookup_depth * sizeof(int),
1930 Z_NOWAIT);
1931 if (x->w_q_lookup == NULL) {
1932 printf("dummynet: sorry, cannot allocate red lookup table\n");
1933 return ENOSPC;
1934 }
1935
1936 /* fill the lookup table with (1 - w_q)^x */
1937 x->lookup_step = p->lookup_step;
1938 x->lookup_weight = p->lookup_weight;
1939 x->w_q_lookup[0] = SCALE(1) - x->w_q;
1940 for (i = 1; i < x->lookup_depth; i++) {
1941 x->w_q_lookup[i] = SCALE_MUL(x->w_q_lookup[i - 1], x->lookup_weight);
1942 }
1943 if (red_avg_pkt_size < 1) {
1944 red_avg_pkt_size = 512;
1945 }
1946 x->avg_pkt_size = red_avg_pkt_size;
1947 if (red_max_pkt_size < 1) {
1948 red_max_pkt_size = 1500;
1949 }
1950 x->max_pkt_size = red_max_pkt_size;
1951 return 0;
1952 }
1953
1954 static int
alloc_hash(struct dn_flow_set * x,struct dn_flow_set * pfs)1955 alloc_hash(struct dn_flow_set *x, struct dn_flow_set *pfs)
1956 {
1957 if (x->flags_fs & DN_HAVE_FLOW_MASK) { /* allocate some slots */
1958 int l = pfs->rq_size;
1959
1960 if (l == 0) {
1961 l = dn_hash_size;
1962 }
1963 if (l < 4) {
1964 l = 4;
1965 } else if (l > DN_MAX_HASH_SIZE) {
1966 l = DN_MAX_HASH_SIZE;
1967 }
1968 x->rq_size = l;
1969 } else { /* one is enough for null mask */
1970 x->rq_size = 1;
1971 }
1972 x->rq = kalloc_type(struct dn_flow_queue *, x->rq_size + 1,
1973 Z_NOWAIT | Z_ZERO);
1974 if (x->rq == NULL) {
1975 printf("dummynet: sorry, cannot allocate queue\n");
1976 return ENOSPC;
1977 }
1978 x->rq_elements = 0;
1979 return 0;
1980 }
1981
1982 static int
set_fs_parms(struct dn_flow_set * x,struct dn_flow_set * src)1983 set_fs_parms(struct dn_flow_set *x, struct dn_flow_set *src)
1984 {
1985 x->flags_fs = src->flags_fs;
1986 x->qsize = src->qsize;
1987 x->plr = src->plr;
1988 x->flow_mask = src->flow_mask;
1989 if (x->flags_fs & DN_QSIZE_IS_BYTES) {
1990 if (x->qsize > 1024 * 1024) {
1991 x->qsize = 1024 * 1024;
1992 }
1993 } else {
1994 if (x->qsize == 0) {
1995 x->qsize = 50;
1996 }
1997 if (x->qsize > 100) {
1998 x->qsize = 50;
1999 }
2000 }
2001 /* configuring RED */
2002 if (x->flags_fs & DN_IS_RED) {
2003 return config_red(src, x); /* XXX should check errors */
2004 }
2005 return 0;
2006 }
2007
2008 /*
2009 * setup pipe or queue parameters.
2010 */
2011 static int
config_pipe(struct dn_pipe * p)2012 config_pipe(struct dn_pipe *p)
2013 {
2014 int i, r;
2015 struct dn_flow_set *pfs = &(p->fs);
2016 struct dn_flow_queue *q;
2017 bool is_new = false;
2018
2019 /*
2020 * The config program passes parameters as follows:
2021 * bw = bits/second (0 means no limits),
2022 * delay = ms, must be translated into ticks.
2023 * qsize = slots/bytes
2024 */
2025 p->delay = (p->delay * (hz * 10)) / 1000;
2026 /* We need either a pipe number or a flow_set number */
2027 if (p->pipe_nr == 0 && pfs->fs_nr == 0) {
2028 return EINVAL;
2029 }
2030 if (p->pipe_nr != 0 && pfs->fs_nr != 0) {
2031 return EINVAL;
2032 }
2033 if (p->pipe_nr != 0) { /* this is a pipe */
2034 struct dn_pipe *x, *b;
2035 struct dummynet_event dn_event;
2036 lck_mtx_lock(&dn_mutex);
2037
2038 /* locate pipe */
2039 b = locate_pipe(p->pipe_nr);
2040
2041 if (b == NULL || b->pipe_nr != p->pipe_nr) { /* new pipe */
2042 is_new = true;
2043 x = kalloc_type(struct dn_pipe, Z_NOWAIT | Z_ZERO);
2044 if (x == NULL) {
2045 lck_mtx_unlock(&dn_mutex);
2046 printf("dummynet: no memory for new pipe\n");
2047 return ENOSPC;
2048 }
2049 x->pipe_nr = p->pipe_nr;
2050 x->fs.pipe = x;
2051 /* idle_heap is the only one from which we extract from the middle.
2052 */
2053 x->idle_heap.size = x->idle_heap.elements = 0;
2054 x->idle_heap.offset = offsetof(struct dn_flow_queue, heap_pos);
2055 } else {
2056 x = b;
2057 /* Flush accumulated credit for all queues */
2058 for (i = 0; i <= x->fs.rq_size; i++) {
2059 for (q = x->fs.rq[i]; q; q = q->next) {
2060 q->numbytes = 0;
2061 }
2062 }
2063 }
2064
2065 x->bandwidth = p->bandwidth;
2066 x->numbytes = 0; /* just in case... */
2067 bcopy(p->if_name, x->if_name, sizeof(p->if_name));
2068 x->ifp = NULL; /* reset interface ptr */
2069 x->delay = p->delay;
2070 r = set_fs_parms(&(x->fs), pfs);
2071 if (r != 0) {
2072 lck_mtx_unlock(&dn_mutex);
2073 if (is_new) { /* a new pipe */
2074 kfree_type(struct dn_pipe, x);
2075 }
2076 return r;
2077 }
2078
2079 if (x->fs.rq == NULL) { /* a new pipe */
2080 r = alloc_hash(&(x->fs), pfs);
2081 if (r) {
2082 lck_mtx_unlock(&dn_mutex);
2083 if (is_new) {
2084 kfree_type(struct dn_pipe, x);
2085 }
2086 return r;
2087 }
2088 SLIST_INSERT_HEAD(&pipehash[HASH(x->pipe_nr)],
2089 x, next);
2090 }
2091 lck_mtx_unlock(&dn_mutex);
2092
2093 bzero(&dn_event, sizeof(dn_event));
2094 dn_event.dn_event_code = DUMMYNET_PIPE_CONFIG;
2095 dn_event.dn_event_pipe_config.bandwidth = p->bandwidth;
2096 dn_event.dn_event_pipe_config.delay = p->delay;
2097 dn_event.dn_event_pipe_config.plr = pfs->plr;
2098
2099 dummynet_event_enqueue_nwk_wq_entry(&dn_event);
2100 } else { /* config queue */
2101 struct dn_flow_set *x, *b;
2102
2103 lck_mtx_lock(&dn_mutex);
2104 /* locate flow_set */
2105 b = locate_flowset(pfs->fs_nr);
2106
2107 if (b == NULL || b->fs_nr != pfs->fs_nr) { /* new */
2108 is_new = true;
2109 if (pfs->parent_nr == 0) { /* need link to a pipe */
2110 lck_mtx_unlock(&dn_mutex);
2111 return EINVAL;
2112 }
2113 x = kalloc_type(struct dn_flow_set, Z_NOWAIT | Z_ZERO);
2114 if (x == NULL) {
2115 lck_mtx_unlock(&dn_mutex);
2116 printf("dummynet: no memory for new flow_set\n");
2117 return ENOSPC;
2118 }
2119 x->fs_nr = pfs->fs_nr;
2120 x->parent_nr = pfs->parent_nr;
2121 x->weight = pfs->weight;
2122 if (x->weight == 0) {
2123 x->weight = 1;
2124 } else if (x->weight > 100) {
2125 x->weight = 100;
2126 }
2127 } else {
2128 /* Change parent pipe not allowed; must delete and recreate */
2129 if (pfs->parent_nr != 0 && b->parent_nr != pfs->parent_nr) {
2130 lck_mtx_unlock(&dn_mutex);
2131 return EINVAL;
2132 }
2133 x = b;
2134 }
2135 r = set_fs_parms(x, pfs);
2136 if (r != 0) {
2137 lck_mtx_unlock(&dn_mutex);
2138 printf("dummynet: no memory for new flow_set\n");
2139 if (is_new) {
2140 kfree_type(struct dn_flow_set, x);
2141 }
2142 return r;
2143 }
2144
2145 if (x->rq == NULL) { /* a new flow_set */
2146 r = alloc_hash(x, pfs);
2147 if (r) {
2148 lck_mtx_unlock(&dn_mutex);
2149 kfree_type(struct dn_flow_set, x);
2150 return r;
2151 }
2152 SLIST_INSERT_HEAD(&flowsethash[HASH(x->fs_nr)],
2153 x, next);
2154 }
2155 lck_mtx_unlock(&dn_mutex);
2156 }
2157 return 0;
2158 }
2159
2160 /*
2161 * Helper function to remove from a heap queues which are linked to
2162 * a flow_set about to be deleted.
2163 */
2164 static void
fs_remove_from_heap(struct dn_heap * h,struct dn_flow_set * fs)2165 fs_remove_from_heap(struct dn_heap *h, struct dn_flow_set *fs)
2166 {
2167 int i = 0, found = 0;
2168 for (; i < h->elements;) {
2169 if (((struct dn_flow_queue *)h->p[i].object)->fs == fs) {
2170 h->elements--;
2171 h->p[i] = h->p[h->elements];
2172 found++;
2173 } else {
2174 i++;
2175 }
2176 }
2177 if (found) {
2178 heapify(h);
2179 }
2180 }
2181
2182 /*
2183 * helper function to remove a pipe from a heap (can be there at most once)
2184 */
2185 static void
pipe_remove_from_heap(struct dn_heap * h,struct dn_pipe * p)2186 pipe_remove_from_heap(struct dn_heap *h, struct dn_pipe *p)
2187 {
2188 if (h->elements > 0) {
2189 int i = 0;
2190 for (i = 0; i < h->elements; i++) {
2191 if (h->p[i].object == p) { /* found it */
2192 h->elements--;
2193 h->p[i] = h->p[h->elements];
2194 heapify(h);
2195 break;
2196 }
2197 }
2198 }
2199 }
2200
2201 /*
2202 * drain all queues. Called in case of severe mbuf shortage.
2203 */
2204 void
dummynet_drain(void)2205 dummynet_drain(void)
2206 {
2207 struct dn_flow_set *fs;
2208 struct dn_pipe *p;
2209 struct mbuf *m, *mnext;
2210 int i;
2211
2212 LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
2213
2214 heap_free(&ready_heap);
2215 heap_free(&wfq_ready_heap);
2216 heap_free(&extract_heap);
2217 /* remove all references to this pipe from flow_sets */
2218 for (i = 0; i < HASHSIZE; i++) {
2219 SLIST_FOREACH(fs, &flowsethash[i], next) {
2220 purge_flow_set(fs, 0);
2221 }
2222 }
2223
2224 for (i = 0; i < HASHSIZE; i++) {
2225 SLIST_FOREACH(p, &pipehash[i], next) {
2226 purge_flow_set(&(p->fs), 0);
2227
2228 mnext = p->head;
2229 while ((m = mnext) != NULL) {
2230 mnext = m->m_nextpkt;
2231 DN_FREE_PKT(m);
2232 }
2233 p->head = p->tail = NULL;
2234 }
2235 }
2236 }
2237
2238 /*
2239 * Fully delete a pipe or a queue, cleaning up associated info.
2240 */
2241 static int
delete_pipe(struct dn_pipe * p)2242 delete_pipe(struct dn_pipe *p)
2243 {
2244 if (p->pipe_nr == 0 && p->fs.fs_nr == 0) {
2245 return EINVAL;
2246 }
2247 if (p->pipe_nr != 0 && p->fs.fs_nr != 0) {
2248 return EINVAL;
2249 }
2250 if (p->pipe_nr != 0) { /* this is an old-style pipe */
2251 struct dn_pipe *b;
2252 struct dn_flow_set *fs;
2253 int i;
2254
2255 lck_mtx_lock(&dn_mutex);
2256 /* locate pipe */
2257 b = locate_pipe(p->pipe_nr);
2258 if (b == NULL) {
2259 lck_mtx_unlock(&dn_mutex);
2260 return EINVAL; /* not found */
2261 }
2262
2263 /* Unlink from list of pipes. */
2264 SLIST_REMOVE(&pipehash[HASH(b->pipe_nr)], b, dn_pipe, next);
2265
2266
2267 /* Remove all references to this pipe from flow_sets. */
2268 for (i = 0; i < HASHSIZE; i++) {
2269 SLIST_FOREACH(fs, &flowsethash[i], next) {
2270 if (fs->pipe == b) {
2271 printf("dummynet: ++ ref to pipe %d from fs %d\n",
2272 p->pipe_nr, fs->fs_nr);
2273 fs->pipe = NULL;
2274 purge_flow_set(fs, 0);
2275 }
2276 }
2277 }
2278 fs_remove_from_heap(&ready_heap, &(b->fs));
2279
2280 purge_pipe(b); /* remove all data associated to this pipe */
2281 /* remove reference to here from extract_heap and wfq_ready_heap */
2282 pipe_remove_from_heap(&extract_heap, b);
2283 pipe_remove_from_heap(&wfq_ready_heap, b);
2284 lck_mtx_unlock(&dn_mutex);
2285
2286 kfree_type(struct dn_pipe, b);
2287 } else { /* this is a WF2Q queue (dn_flow_set) */
2288 struct dn_flow_set *b;
2289
2290 lck_mtx_lock(&dn_mutex);
2291 /* locate set */
2292 b = locate_flowset(p->fs.fs_nr);
2293 if (b == NULL) {
2294 lck_mtx_unlock(&dn_mutex);
2295 return EINVAL; /* not found */
2296 }
2297
2298
2299 /* Unlink from list of flowsets. */
2300 SLIST_REMOVE( &flowsethash[HASH(b->fs_nr)], b, dn_flow_set, next);
2301
2302 if (b->pipe != NULL) {
2303 /* Update total weight on parent pipe and cleanup parent heaps */
2304 b->pipe->sum -= b->weight * b->backlogged;
2305 fs_remove_from_heap(&(b->pipe->not_eligible_heap), b);
2306 fs_remove_from_heap(&(b->pipe->scheduler_heap), b);
2307 #if 1 /* XXX should i remove from idle_heap as well ? */
2308 fs_remove_from_heap(&(b->pipe->idle_heap), b);
2309 #endif
2310 }
2311 purge_flow_set(b, 1);
2312 lck_mtx_unlock(&dn_mutex);
2313 }
2314 return 0;
2315 }
2316
2317 /*
2318 * helper function used to copy data from kernel in DUMMYNET_GET
2319 */
2320 static
2321 char*
dn_copy_set_32(struct dn_flow_set * set,char * bp)2322 dn_copy_set_32(struct dn_flow_set *set, char *bp)
2323 {
2324 int i, copied = 0;
2325 struct dn_flow_queue *q;
2326 struct dn_flow_queue_32 *qp = (struct dn_flow_queue_32 *)(void *)bp;
2327
2328 LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
2329
2330 for (i = 0; i <= set->rq_size; i++) {
2331 for (q = set->rq[i]; q; q = q->next, qp++) {
2332 if (q->hash_slot != i) {
2333 printf("dummynet: ++ at %d: wrong slot (have %d, "
2334 "should be %d)\n", copied, q->hash_slot, i);
2335 }
2336 if (q->fs != set) {
2337 printf("dummynet: ++ at %d: wrong fs ptr "
2338 "(have 0x%llx, should be 0x%llx)\n", i,
2339 (uint64_t)VM_KERNEL_ADDRPERM(q->fs),
2340 (uint64_t)VM_KERNEL_ADDRPERM(set));
2341 }
2342 copied++;
2343 cp_queue_to_32_user( q, qp );
2344 /* cleanup pointers */
2345 qp->next = (user32_addr_t)0;
2346 qp->head = qp->tail = (user32_addr_t)0;
2347 qp->fs = (user32_addr_t)0;
2348 }
2349 }
2350 if (copied != set->rq_elements) {
2351 printf("dummynet: ++ wrong count, have %d should be %d\n",
2352 copied, set->rq_elements);
2353 }
2354 return (char *)qp;
2355 }
2356
2357 static
2358 char*
dn_copy_set_64(struct dn_flow_set * set,char * bp)2359 dn_copy_set_64(struct dn_flow_set *set, char *bp)
2360 {
2361 int i, copied = 0;
2362 struct dn_flow_queue *q;
2363 struct dn_flow_queue_64 *qp = (struct dn_flow_queue_64 *)(void *)bp;
2364
2365 LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
2366
2367 for (i = 0; i <= set->rq_size; i++) {
2368 for (q = set->rq[i]; q; q = q->next, qp++) {
2369 if (q->hash_slot != i) {
2370 printf("dummynet: ++ at %d: wrong slot (have %d, "
2371 "should be %d)\n", copied, q->hash_slot, i);
2372 }
2373 if (q->fs != set) {
2374 printf("dummynet: ++ at %d: wrong fs ptr "
2375 "(have 0x%llx, should be 0x%llx)\n", i,
2376 (uint64_t)VM_KERNEL_ADDRPERM(q->fs),
2377 (uint64_t)VM_KERNEL_ADDRPERM(set));
2378 }
2379 copied++;
2380 //bcopy(q, qp, sizeof(*q));
2381 cp_queue_to_64_user( q, qp );
2382 /* cleanup pointers */
2383 qp->next = USER_ADDR_NULL;
2384 qp->head = qp->tail = USER_ADDR_NULL;
2385 qp->fs = USER_ADDR_NULL;
2386 }
2387 }
2388 if (copied != set->rq_elements) {
2389 printf("dummynet: ++ wrong count, have %d should be %d\n",
2390 copied, set->rq_elements);
2391 }
2392 return (char *)qp;
2393 }
2394
2395 static size_t
dn_calc_size(int is64user)2396 dn_calc_size(int is64user)
2397 {
2398 struct dn_flow_set *set;
2399 struct dn_pipe *p;
2400 size_t size = 0;
2401 size_t pipesize;
2402 size_t queuesize;
2403 size_t setsize;
2404 int i;
2405
2406 LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
2407 if (is64user) {
2408 pipesize = sizeof(struct dn_pipe_64);
2409 queuesize = sizeof(struct dn_flow_queue_64);
2410 setsize = sizeof(struct dn_flow_set_64);
2411 } else {
2412 pipesize = sizeof(struct dn_pipe_32);
2413 queuesize = sizeof(struct dn_flow_queue_32);
2414 setsize = sizeof(struct dn_flow_set_32);
2415 }
2416 /*
2417 * compute size of data structures: list of pipes and flow_sets.
2418 */
2419 for (i = 0; i < HASHSIZE; i++) {
2420 SLIST_FOREACH(p, &pipehash[i], next) {
2421 size += sizeof(*p) +
2422 p->fs.rq_elements * sizeof(struct dn_flow_queue);
2423 }
2424 SLIST_FOREACH(set, &flowsethash[i], next) {
2425 size += sizeof(*set) +
2426 set->rq_elements * sizeof(struct dn_flow_queue);
2427 }
2428 }
2429 return size;
2430 }
2431
2432 static int
dummynet_get(struct sockopt * sopt)2433 dummynet_get(struct sockopt *sopt)
2434 {
2435 char *buf = NULL, *bp = NULL; /* bp is the "copy-pointer" */
2436 size_t size = 0;
2437 struct dn_flow_set *set;
2438 struct dn_pipe *p;
2439 int error = 0, i;
2440 int is64user = 0;
2441
2442 /* XXX lock held too long */
2443 lck_mtx_lock(&dn_mutex);
2444 /*
2445 * XXX: Ugly, but we need to allocate memory with M_WAITOK flag
2446 * and we cannot use this flag while holding a mutex.
2447 */
2448 if (proc_is64bit(sopt->sopt_p)) {
2449 is64user = 1;
2450 }
2451 for (i = 0; i < 10; i++) {
2452 size = dn_calc_size(is64user);
2453 lck_mtx_unlock(&dn_mutex);
2454 buf = kalloc_data(size, Z_WAITOK | Z_ZERO);
2455 if (buf == NULL) {
2456 return ENOBUFS;
2457 }
2458 lck_mtx_lock(&dn_mutex);
2459 if (size == dn_calc_size(is64user)) {
2460 break;
2461 }
2462 kfree_data(buf, size);
2463 buf = NULL;
2464 }
2465 if (buf == NULL) {
2466 lck_mtx_unlock(&dn_mutex);
2467 return ENOBUFS;
2468 }
2469
2470 bp = buf;
2471 for (i = 0; i < HASHSIZE; i++) {
2472 SLIST_FOREACH(p, &pipehash[i], next) {
2473 /*
2474 * copy pipe descriptor into *bp, convert delay
2475 * back to ms, then copy the flow_set descriptor(s)
2476 * one at a time. After each flow_set, copy the
2477 * queue descriptor it owns.
2478 */
2479 if (is64user) {
2480 bp = cp_pipe_to_64_user(p,
2481 (struct dn_pipe_64 *)(void *)bp);
2482 } else {
2483 bp = cp_pipe_to_32_user(p,
2484 (struct dn_pipe_32 *)(void *)bp);
2485 }
2486 }
2487 }
2488 for (i = 0; i < HASHSIZE; i++) {
2489 SLIST_FOREACH(set, &flowsethash[i], next) {
2490 struct dn_flow_set_64 *fs_bp =
2491 (struct dn_flow_set_64 *)(void *)bp;
2492 cp_flow_set_to_64_user(set, fs_bp);
2493 /* XXX same hack as above */
2494 fs_bp->next = CAST_DOWN(user64_addr_t,
2495 DN_IS_QUEUE);
2496 fs_bp->pipe = USER_ADDR_NULL;
2497 fs_bp->rq = USER_ADDR_NULL;
2498 bp += sizeof(struct dn_flow_set_64);
2499 bp = dn_copy_set_64( set, bp );
2500 }
2501 }
2502 lck_mtx_unlock(&dn_mutex);
2503 error = sooptcopyout(sopt, buf, size);
2504 kfree_data(buf, size);
2505 return error;
2506 }
2507
2508 /*
2509 * Handler for the various dummynet socket options (get, flush, config, del)
2510 */
2511 static int
ip_dn_ctl(struct sockopt * sopt)2512 ip_dn_ctl(struct sockopt *sopt)
2513 {
2514 int error = 0;
2515 struct dn_pipe *p, tmp_pipe;
2516
2517 /* Disallow sets in really-really secure mode. */
2518 if (sopt->sopt_dir == SOPT_SET && securelevel >= 3) {
2519 return EPERM;
2520 }
2521
2522 switch (sopt->sopt_name) {
2523 default:
2524 printf("dummynet: -- unknown option %d", sopt->sopt_name);
2525 return EINVAL;
2526
2527 case IP_DUMMYNET_GET:
2528 error = dummynet_get(sopt);
2529 break;
2530
2531 case IP_DUMMYNET_FLUSH:
2532 dummynet_flush();
2533 break;
2534
2535 case IP_DUMMYNET_CONFIGURE:
2536 p = &tmp_pipe;
2537 if (proc_is64bit(sopt->sopt_p)) {
2538 error = cp_pipe_from_user_64( sopt, p );
2539 } else {
2540 error = cp_pipe_from_user_32( sopt, p );
2541 }
2542
2543 if (error) {
2544 break;
2545 }
2546 error = config_pipe(p);
2547 break;
2548
2549 case IP_DUMMYNET_DEL: /* remove a pipe or queue */
2550 p = &tmp_pipe;
2551 if (proc_is64bit(sopt->sopt_p)) {
2552 error = cp_pipe_from_user_64( sopt, p );
2553 } else {
2554 error = cp_pipe_from_user_32( sopt, p );
2555 }
2556 if (error) {
2557 break;
2558 }
2559
2560 error = delete_pipe(p);
2561 break;
2562 }
2563 return error;
2564 }
2565
2566 void
dummynet_init(void)2567 dummynet_init(void)
2568 {
2569 eventhandler_lists_ctxt_init(&dummynet_evhdlr_ctxt);
2570 }
2571
2572 void
ip_dn_init(void)2573 ip_dn_init(void)
2574 {
2575 /* setup locks */
2576 ready_heap.size = ready_heap.elements = 0;
2577 ready_heap.offset = 0;
2578
2579 wfq_ready_heap.size = wfq_ready_heap.elements = 0;
2580 wfq_ready_heap.offset = 0;
2581
2582 extract_heap.size = extract_heap.elements = 0;
2583 extract_heap.offset = 0;
2584 ip_dn_ctl_ptr = ip_dn_ctl;
2585 ip_dn_io_ptr = dummynet_io;
2586 }
2587
2588 struct dn_event_nwk_wq_entry {
2589 struct nwk_wq_entry nwk_wqe;
2590 struct dummynet_event dn_ev_arg;
2591 };
2592
2593 static void
dummynet_event_callback(struct nwk_wq_entry * nwk_item)2594 dummynet_event_callback(struct nwk_wq_entry *nwk_item)
2595 {
2596 struct dn_event_nwk_wq_entry *p_ev;
2597
2598 p_ev = __container_of(nwk_item, struct dn_event_nwk_wq_entry, nwk_wqe);
2599
2600 EVENTHANDLER_INVOKE(&dummynet_evhdlr_ctxt, dummynet_event, &p_ev->dn_ev_arg);
2601
2602 kfree_type(struct dn_event_nwk_wq_entry, p_ev);
2603 }
2604
2605 void
dummynet_event_enqueue_nwk_wq_entry(struct dummynet_event * p_dn_event)2606 dummynet_event_enqueue_nwk_wq_entry(struct dummynet_event *p_dn_event)
2607 {
2608 struct dn_event_nwk_wq_entry *p_ev = NULL;
2609
2610 p_ev = kalloc_type(struct dn_event_nwk_wq_entry,
2611 Z_WAITOK | Z_ZERO | Z_NOFAIL);
2612 p_ev->nwk_wqe.func = dummynet_event_callback;
2613 p_ev->dn_ev_arg = *p_dn_event;
2614 nwk_wq_enqueue(&p_ev->nwk_wqe);
2615 }
2616
2617 struct dummynet_tag_container {
2618 struct m_tag dtc_m_tag;
2619 struct dn_pkt_tag dtc_dn_pkt_tag;
2620 };
2621
2622 struct m_tag *
m_tag_kalloc_dummynet(u_int32_t id,u_int16_t type,uint16_t len,int wait)2623 m_tag_kalloc_dummynet(u_int32_t id, u_int16_t type, uint16_t len, int wait)
2624 {
2625 struct dummynet_tag_container *tag_container;
2626 struct m_tag *tag = NULL;
2627
2628 assert3u(id, ==, KERNEL_MODULE_TAG_ID);
2629 assert3u(type, ==, KERNEL_TAG_TYPE_DUMMYNET);
2630 assert3u(len, ==, sizeof(struct dn_pkt_tag));
2631
2632 if (len != sizeof(struct dn_pkt_tag)) {
2633 return NULL;
2634 }
2635
2636 tag_container = kalloc_type(struct dummynet_tag_container, wait | M_ZERO);
2637 if (tag_container != NULL) {
2638 tag = &tag_container->dtc_m_tag;
2639
2640 assert3p(tag, ==, tag_container);
2641
2642 M_TAG_INIT(tag, id, type, len, &tag_container->dtc_dn_pkt_tag, NULL);
2643 }
2644
2645 return tag;
2646 }
2647
2648 void
m_tag_kfree_dummynet(struct m_tag * tag)2649 m_tag_kfree_dummynet(struct m_tag *tag)
2650 {
2651 struct dummynet_tag_container *tag_container = (struct dummynet_tag_container *)tag;
2652
2653 assert3u(tag->m_tag_len, ==, sizeof(struct dn_pkt_tag));
2654
2655 kfree_type(struct dummynet_tag_container, tag_container);
2656 }
2657
2658 void
dummynet_register_m_tag(void)2659 dummynet_register_m_tag(void)
2660 {
2661 int error;
2662
2663 error = m_register_internal_tag_type(KERNEL_TAG_TYPE_DUMMYNET, sizeof(struct dn_pkt_tag),
2664 m_tag_kalloc_dummynet, m_tag_kfree_dummynet);
2665
2666 assert3u(error, ==, 0);
2667 }
2668