1 /*
2 * Copyright (c) 2000-2022 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1998-2002 Luigi Rizzo, Universita` di Pisa
30 * Portions Copyright (c) 2000 Akamba Corp.
31 * All rights reserved
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * SUCH DAMAGE.
53 *
54 * $FreeBSD: src/sys/netinet/ip_dummynet.c,v 1.84 2004/08/25 09:31:30 pjd Exp $
55 */
56
57 #define DUMMYNET_DEBUG
58
59 /*
60 * This module implements IP dummynet, a bandwidth limiter/delay emulator
61 * Description of the data structures used is in ip_dummynet.h
62 * Here you mainly find the following blocks of code:
63 * + variable declarations;
64 * + heap management functions;
65 * + scheduler and dummynet functions;
66 * + configuration and initialization.
67 *
68 * NOTA BENE: critical sections are protected by the "dummynet lock".
69 *
70 * Most important Changes:
71 *
72 * 010124: Fixed WF2Q behaviour
73 * 010122: Fixed spl protection.
74 * 000601: WF2Q support
75 * 000106: large rewrite, use heaps to handle very many pipes.
76 * 980513: initial release
77 *
78 * include files marked with XXX are probably not needed
79 */
80
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/malloc.h>
84 #include <sys/mbuf.h>
85 #include <sys/queue.h> /* XXX */
86 #include <sys/kernel.h>
87 #include <sys/random.h>
88 #include <sys/socket.h>
89 #include <sys/socketvar.h>
90 #include <sys/time.h>
91 #include <sys/sysctl.h>
92 #include <net/if.h>
93 #include <net/route.h>
94 #include <net/kpi_protocol.h>
95 #if DUMMYNET
96 #include <net/kpi_protocol.h>
97 #endif /* DUMMYNET */
98 #include <net/nwk_wq.h>
99 #include <net/pfvar.h>
100 #include <netinet/in.h>
101 #include <netinet/in_systm.h>
102 #include <netinet/in_var.h>
103 #include <netinet/ip.h>
104 #include <netinet/ip_dummynet.h>
105 #include <netinet/ip_var.h>
106
107 #include <netinet/ip6.h> /* for ip6_input, ip6_output prototypes */
108 #include <netinet6/ip6_var.h>
109
110 #include <stdbool.h>
111
112 /*
113 * We keep a private variable for the simulation time, but we could
114 * probably use an existing one ("softticks" in sys/kern/kern_timer.c)
115 */
116 static dn_key curr_time = 0; /* current simulation time */
117
118 /* this is for the timer that fires to call dummynet() - we only enable the timer when
119 * there are packets to process, otherwise it's disabled */
120 static int timer_enabled = 0;
121
122 static int dn_hash_size = 64; /* default hash size */
123
124 /* statistics on number of queue searches and search steps */
125 static int searches, search_steps;
126 static int pipe_expire = 1; /* expire queue if empty */
127 static int dn_max_ratio = 16; /* max queues/buckets ratio */
128
129 static int red_lookup_depth = 256; /* RED - default lookup table depth */
130 static int red_avg_pkt_size = 512; /* RED - default medium packet size */
131 static int red_max_pkt_size = 1500; /* RED - default max packet size */
132
133 static int serialize = 0;
134
135 /*
136 * Three heaps contain queues and pipes that the scheduler handles:
137 *
138 * ready_heap contains all dn_flow_queue related to fixed-rate pipes.
139 *
140 * wfq_ready_heap contains the pipes associated with WF2Q flows
141 *
142 * extract_heap contains pipes associated with delay lines.
143 *
144 */
145 static struct dn_heap ready_heap, extract_heap, wfq_ready_heap;
146
147 static int heap_init(struct dn_heap *h, int size);
148 static int heap_insert(struct dn_heap *h, dn_key key1, void *p);
149 static void heap_extract(struct dn_heap *h, void *obj);
150
151
152 static void transmit_event(struct dn_pipe *pipe, struct mbuf **head,
153 struct mbuf **tail);
154 static void ready_event(struct dn_flow_queue *q, struct mbuf **head,
155 struct mbuf **tail);
156 static void ready_event_wfq(struct dn_pipe *p, struct mbuf **head,
157 struct mbuf **tail);
158
159 /*
160 * Packets are retrieved from queues in Dummynet in chains instead of
161 * packet-by-packet. The entire list of packets is first dequeued and
162 * sent out by the following function.
163 */
164 static void dummynet_send(struct mbuf *m);
165
166 #define HASHSIZE 16
167 #define HASH(num) ((((num) >> 8) ^ ((num) >> 4) ^ (num)) & 0x0f)
168 static struct dn_pipe_head pipehash[HASHSIZE]; /* all pipes */
169 static struct dn_flow_set_head flowsethash[HASHSIZE]; /* all flowsets */
170
171 #ifdef SYSCTL_NODE
172 SYSCTL_NODE(_net_inet_ip, OID_AUTO, dummynet,
173 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Dummynet");
174 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, hash_size,
175 CTLFLAG_RW | CTLFLAG_LOCKED, &dn_hash_size, 0, "Default hash table size");
176 SYSCTL_QUAD(_net_inet_ip_dummynet, OID_AUTO, curr_time,
177 CTLFLAG_RD | CTLFLAG_LOCKED, &curr_time, "Current tick");
178 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, ready_heap,
179 CTLFLAG_RD | CTLFLAG_LOCKED, &ready_heap.size, 0, "Size of ready heap");
180 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, extract_heap,
181 CTLFLAG_RD | CTLFLAG_LOCKED, &extract_heap.size, 0, "Size of extract heap");
182 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, searches,
183 CTLFLAG_RD | CTLFLAG_LOCKED, &searches, 0, "Number of queue searches");
184 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, search_steps,
185 CTLFLAG_RD | CTLFLAG_LOCKED, &search_steps, 0, "Number of queue search steps");
186 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, expire,
187 CTLFLAG_RW | CTLFLAG_LOCKED, &pipe_expire, 0, "Expire queue if empty");
188 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, max_chain_len,
189 CTLFLAG_RW | CTLFLAG_LOCKED, &dn_max_ratio, 0,
190 "Max ratio between dynamic queues and buckets");
191 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_lookup_depth,
192 CTLFLAG_RD | CTLFLAG_LOCKED, &red_lookup_depth, 0, "Depth of RED lookup table");
193 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_avg_pkt_size,
194 CTLFLAG_RD | CTLFLAG_LOCKED, &red_avg_pkt_size, 0, "RED Medium packet size");
195 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_max_pkt_size,
196 CTLFLAG_RD | CTLFLAG_LOCKED, &red_max_pkt_size, 0, "RED Max packet size");
197 #endif
198
199 #ifdef DUMMYNET_DEBUG
200 int dummynet_debug = 0;
201 #ifdef SYSCTL_NODE
202 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED, &dummynet_debug,
203 0, "control debugging printfs");
204 #endif
205 #define DPRINTF(X) if (dummynet_debug) printf X
206 #else
207 #define DPRINTF(X)
208 #endif
209
210 /* dummynet lock */
211 static LCK_GRP_DECLARE(dn_mutex_grp, "dn");
212 static LCK_MTX_DECLARE(dn_mutex, &dn_mutex_grp);
213
214 static int config_pipe(struct dn_pipe *p);
215 static int ip_dn_ctl(struct sockopt *sopt);
216
217 static void dummynet(void *);
218 static void dummynet_flush(void);
219 void dummynet_drain(void);
220 static ip_dn_io_t dummynet_io;
221
222 static void cp_flow_set_to_64_user(struct dn_flow_set *set, struct dn_flow_set_64 *fs_bp);
223 static void cp_queue_to_64_user( struct dn_flow_queue *q, struct dn_flow_queue_64 *qp);
224 static char *cp_pipe_to_64_user(struct dn_pipe *p, struct dn_pipe_64 *pipe_bp);
225 static char* dn_copy_set_64(struct dn_flow_set *set, char *bp);
226 static int cp_pipe_from_user_64( struct sockopt *sopt, struct dn_pipe *p );
227
228 static void cp_flow_set_to_32_user(struct dn_flow_set *set, struct dn_flow_set_32 *fs_bp);
229 static void cp_queue_to_32_user( struct dn_flow_queue *q, struct dn_flow_queue_32 *qp);
230 static char *cp_pipe_to_32_user(struct dn_pipe *p, struct dn_pipe_32 *pipe_bp);
231 static char* dn_copy_set_32(struct dn_flow_set *set, char *bp);
232 static int cp_pipe_from_user_32( struct sockopt *sopt, struct dn_pipe *p );
233
234 static struct m_tag * m_tag_kalloc_dummynet(u_int32_t id, u_int16_t type, uint16_t len, int wait);
235 static void m_tag_kfree_dummynet(struct m_tag *tag);
236
237 struct eventhandler_lists_ctxt dummynet_evhdlr_ctxt;
238
239 uint32_t
my_random(void)240 my_random(void)
241 {
242 uint32_t val;
243 read_frandom(&val, sizeof(val));
244 val &= 0x7FFFFFFF;
245
246 return val;
247 }
248
249 /*
250 * Heap management functions.
251 *
252 * In the heap, first node is element 0. Children of i are 2i+1 and 2i+2.
253 * Some macros help finding parent/children so we can optimize them.
254 *
255 * heap_init() is called to expand the heap when needed.
256 * Increment size in blocks of 16 entries.
257 * XXX failure to allocate a new element is a pretty bad failure
258 * as we basically stall a whole queue forever!!
259 * Returns 1 on error, 0 on success
260 */
261 #define HEAP_FATHER(x) ( ( (x) - 1 ) / 2 )
262 #define HEAP_LEFT(x) ( 2*(x) + 1 )
263 #define HEAP_IS_LEFT(x) ( (x) & 1 )
264 #define HEAP_RIGHT(x) ( 2*(x) + 2 )
265 #define HEAP_SWAP(a, b, buffer) { buffer = a ; a = b ; b = buffer ; }
266 #define HEAP_INCREMENT 15
267
268
269 int
cp_pipe_from_user_32(struct sockopt * sopt,struct dn_pipe * p)270 cp_pipe_from_user_32( struct sockopt *sopt, struct dn_pipe *p )
271 {
272 struct dn_pipe_32 user_pipe_32;
273 int error = 0;
274
275 error = sooptcopyin(sopt, &user_pipe_32, sizeof(struct dn_pipe_32), sizeof(struct dn_pipe_32));
276 if (!error) {
277 p->pipe_nr = user_pipe_32.pipe_nr;
278 p->bandwidth = user_pipe_32.bandwidth;
279 p->delay = user_pipe_32.delay;
280 p->V = user_pipe_32.V;
281 p->sum = user_pipe_32.sum;
282 p->numbytes = user_pipe_32.numbytes;
283 p->sched_time = user_pipe_32.sched_time;
284 bcopy( user_pipe_32.if_name, p->if_name, IFNAMSIZ);
285 p->ready = user_pipe_32.ready;
286
287 p->fs.fs_nr = user_pipe_32.fs.fs_nr;
288 p->fs.flags_fs = user_pipe_32.fs.flags_fs;
289 p->fs.parent_nr = user_pipe_32.fs.parent_nr;
290 p->fs.weight = user_pipe_32.fs.weight;
291 p->fs.qsize = user_pipe_32.fs.qsize;
292 p->fs.plr = user_pipe_32.fs.plr;
293 p->fs.flow_mask = user_pipe_32.fs.flow_mask;
294 p->fs.rq_size = user_pipe_32.fs.rq_size;
295 p->fs.rq_elements = user_pipe_32.fs.rq_elements;
296 p->fs.last_expired = user_pipe_32.fs.last_expired;
297 p->fs.backlogged = user_pipe_32.fs.backlogged;
298 p->fs.w_q = user_pipe_32.fs.w_q;
299 p->fs.max_th = user_pipe_32.fs.max_th;
300 p->fs.min_th = user_pipe_32.fs.min_th;
301 p->fs.max_p = user_pipe_32.fs.max_p;
302 p->fs.c_1 = user_pipe_32.fs.c_1;
303 p->fs.c_2 = user_pipe_32.fs.c_2;
304 p->fs.c_3 = user_pipe_32.fs.c_3;
305 p->fs.c_4 = user_pipe_32.fs.c_4;
306 p->fs.lookup_depth = user_pipe_32.fs.lookup_depth;
307 p->fs.lookup_step = user_pipe_32.fs.lookup_step;
308 p->fs.lookup_weight = user_pipe_32.fs.lookup_weight;
309 p->fs.avg_pkt_size = user_pipe_32.fs.avg_pkt_size;
310 p->fs.max_pkt_size = user_pipe_32.fs.max_pkt_size;
311 }
312 return error;
313 }
314
315
316 int
cp_pipe_from_user_64(struct sockopt * sopt,struct dn_pipe * p)317 cp_pipe_from_user_64( struct sockopt *sopt, struct dn_pipe *p )
318 {
319 struct dn_pipe_64 user_pipe_64;
320 int error = 0;
321
322 error = sooptcopyin(sopt, &user_pipe_64, sizeof(struct dn_pipe_64), sizeof(struct dn_pipe_64));
323 if (!error) {
324 p->pipe_nr = user_pipe_64.pipe_nr;
325 p->bandwidth = user_pipe_64.bandwidth;
326 p->delay = user_pipe_64.delay;
327 p->V = user_pipe_64.V;
328 p->sum = user_pipe_64.sum;
329 p->numbytes = user_pipe_64.numbytes;
330 p->sched_time = user_pipe_64.sched_time;
331 bcopy( user_pipe_64.if_name, p->if_name, IFNAMSIZ);
332 p->ready = user_pipe_64.ready;
333
334 p->fs.fs_nr = user_pipe_64.fs.fs_nr;
335 p->fs.flags_fs = user_pipe_64.fs.flags_fs;
336 p->fs.parent_nr = user_pipe_64.fs.parent_nr;
337 p->fs.weight = user_pipe_64.fs.weight;
338 p->fs.qsize = user_pipe_64.fs.qsize;
339 p->fs.plr = user_pipe_64.fs.plr;
340 p->fs.flow_mask = user_pipe_64.fs.flow_mask;
341 p->fs.rq_size = user_pipe_64.fs.rq_size;
342 p->fs.rq_elements = user_pipe_64.fs.rq_elements;
343 p->fs.last_expired = user_pipe_64.fs.last_expired;
344 p->fs.backlogged = user_pipe_64.fs.backlogged;
345 p->fs.w_q = user_pipe_64.fs.w_q;
346 p->fs.max_th = user_pipe_64.fs.max_th;
347 p->fs.min_th = user_pipe_64.fs.min_th;
348 p->fs.max_p = user_pipe_64.fs.max_p;
349 p->fs.c_1 = user_pipe_64.fs.c_1;
350 p->fs.c_2 = user_pipe_64.fs.c_2;
351 p->fs.c_3 = user_pipe_64.fs.c_3;
352 p->fs.c_4 = user_pipe_64.fs.c_4;
353 p->fs.lookup_depth = user_pipe_64.fs.lookup_depth;
354 p->fs.lookup_step = user_pipe_64.fs.lookup_step;
355 p->fs.lookup_weight = user_pipe_64.fs.lookup_weight;
356 p->fs.avg_pkt_size = user_pipe_64.fs.avg_pkt_size;
357 p->fs.max_pkt_size = user_pipe_64.fs.max_pkt_size;
358 }
359 return error;
360 }
361
362 static void
cp_flow_set_to_32_user(struct dn_flow_set * set,struct dn_flow_set_32 * fs_bp)363 cp_flow_set_to_32_user(struct dn_flow_set *set, struct dn_flow_set_32 *fs_bp)
364 {
365 fs_bp->fs_nr = set->fs_nr;
366 fs_bp->flags_fs = set->flags_fs;
367 fs_bp->parent_nr = set->parent_nr;
368 fs_bp->weight = set->weight;
369 fs_bp->qsize = set->qsize;
370 fs_bp->plr = set->plr;
371 fs_bp->flow_mask = set->flow_mask;
372 fs_bp->rq_size = set->rq_size;
373 fs_bp->rq_elements = set->rq_elements;
374 fs_bp->last_expired = set->last_expired;
375 fs_bp->backlogged = set->backlogged;
376 fs_bp->w_q = set->w_q;
377 fs_bp->max_th = set->max_th;
378 fs_bp->min_th = set->min_th;
379 fs_bp->max_p = set->max_p;
380 fs_bp->c_1 = set->c_1;
381 fs_bp->c_2 = set->c_2;
382 fs_bp->c_3 = set->c_3;
383 fs_bp->c_4 = set->c_4;
384 fs_bp->w_q_lookup = CAST_DOWN_EXPLICIT(user32_addr_t, set->w_q_lookup);
385 fs_bp->lookup_depth = set->lookup_depth;
386 fs_bp->lookup_step = set->lookup_step;
387 fs_bp->lookup_weight = set->lookup_weight;
388 fs_bp->avg_pkt_size = set->avg_pkt_size;
389 fs_bp->max_pkt_size = set->max_pkt_size;
390 }
391
392 static void
cp_flow_set_to_64_user(struct dn_flow_set * set,struct dn_flow_set_64 * fs_bp)393 cp_flow_set_to_64_user(struct dn_flow_set *set, struct dn_flow_set_64 *fs_bp)
394 {
395 fs_bp->fs_nr = set->fs_nr;
396 fs_bp->flags_fs = set->flags_fs;
397 fs_bp->parent_nr = set->parent_nr;
398 fs_bp->weight = set->weight;
399 fs_bp->qsize = set->qsize;
400 fs_bp->plr = set->plr;
401 fs_bp->flow_mask = set->flow_mask;
402 fs_bp->rq_size = set->rq_size;
403 fs_bp->rq_elements = set->rq_elements;
404 fs_bp->last_expired = set->last_expired;
405 fs_bp->backlogged = set->backlogged;
406 fs_bp->w_q = set->w_q;
407 fs_bp->max_th = set->max_th;
408 fs_bp->min_th = set->min_th;
409 fs_bp->max_p = set->max_p;
410 fs_bp->c_1 = set->c_1;
411 fs_bp->c_2 = set->c_2;
412 fs_bp->c_3 = set->c_3;
413 fs_bp->c_4 = set->c_4;
414 fs_bp->w_q_lookup = CAST_DOWN(user64_addr_t, set->w_q_lookup);
415 fs_bp->lookup_depth = set->lookup_depth;
416 fs_bp->lookup_step = set->lookup_step;
417 fs_bp->lookup_weight = set->lookup_weight;
418 fs_bp->avg_pkt_size = set->avg_pkt_size;
419 fs_bp->max_pkt_size = set->max_pkt_size;
420 }
421
422 static
423 void
cp_queue_to_32_user(struct dn_flow_queue * q,struct dn_flow_queue_32 * qp)424 cp_queue_to_32_user( struct dn_flow_queue *q, struct dn_flow_queue_32 *qp)
425 {
426 qp->id = q->id;
427 qp->len = q->len;
428 qp->len_bytes = q->len_bytes;
429 qp->numbytes = q->numbytes;
430 qp->tot_pkts = q->tot_pkts;
431 qp->tot_bytes = q->tot_bytes;
432 qp->drops = q->drops;
433 qp->hash_slot = q->hash_slot;
434 qp->avg = q->avg;
435 qp->count = q->count;
436 qp->random = q->random;
437 qp->q_time = (u_int32_t)q->q_time;
438 qp->heap_pos = q->heap_pos;
439 qp->sched_time = q->sched_time;
440 qp->S = q->S;
441 qp->F = q->F;
442 }
443
444 static
445 void
cp_queue_to_64_user(struct dn_flow_queue * q,struct dn_flow_queue_64 * qp)446 cp_queue_to_64_user( struct dn_flow_queue *q, struct dn_flow_queue_64 *qp)
447 {
448 qp->id = q->id;
449 qp->len = q->len;
450 qp->len_bytes = q->len_bytes;
451 qp->numbytes = q->numbytes;
452 qp->tot_pkts = q->tot_pkts;
453 qp->tot_bytes = q->tot_bytes;
454 qp->drops = q->drops;
455 qp->hash_slot = q->hash_slot;
456 qp->avg = q->avg;
457 qp->count = q->count;
458 qp->random = q->random;
459 qp->q_time = (u_int32_t)q->q_time;
460 qp->heap_pos = q->heap_pos;
461 qp->sched_time = q->sched_time;
462 qp->S = q->S;
463 qp->F = q->F;
464 }
465
466 static
467 char *
cp_pipe_to_32_user(struct dn_pipe * p,struct dn_pipe_32 * pipe_bp)468 cp_pipe_to_32_user(struct dn_pipe *p, struct dn_pipe_32 *pipe_bp)
469 {
470 char *bp;
471
472 pipe_bp->pipe_nr = p->pipe_nr;
473 pipe_bp->bandwidth = p->bandwidth;
474 pipe_bp->delay = p->delay;
475 bcopy( &(p->scheduler_heap), &(pipe_bp->scheduler_heap), sizeof(struct dn_heap_32));
476 pipe_bp->scheduler_heap.p = CAST_DOWN_EXPLICIT(user32_addr_t, pipe_bp->scheduler_heap.p);
477 bcopy( &(p->not_eligible_heap), &(pipe_bp->not_eligible_heap), sizeof(struct dn_heap_32));
478 pipe_bp->not_eligible_heap.p = CAST_DOWN_EXPLICIT(user32_addr_t, pipe_bp->not_eligible_heap.p);
479 bcopy( &(p->idle_heap), &(pipe_bp->idle_heap), sizeof(struct dn_heap_32));
480 pipe_bp->idle_heap.p = CAST_DOWN_EXPLICIT(user32_addr_t, pipe_bp->idle_heap.p);
481 pipe_bp->V = p->V;
482 pipe_bp->sum = p->sum;
483 pipe_bp->numbytes = p->numbytes;
484 pipe_bp->sched_time = p->sched_time;
485 bcopy( p->if_name, pipe_bp->if_name, IFNAMSIZ);
486 pipe_bp->ifp = CAST_DOWN_EXPLICIT(user32_addr_t, p->ifp);
487 pipe_bp->ready = p->ready;
488
489 cp_flow_set_to_32_user( &(p->fs), &(pipe_bp->fs));
490
491 pipe_bp->delay = (pipe_bp->delay * 1000) / (hz * 10);
492 /*
493 * XXX the following is a hack based on ->next being the
494 * first field in dn_pipe and dn_flow_set. The correct
495 * solution would be to move the dn_flow_set to the beginning
496 * of struct dn_pipe.
497 */
498 pipe_bp->next = CAST_DOWN_EXPLICIT( user32_addr_t, DN_IS_PIPE );
499 /* clean pointers */
500 pipe_bp->head = pipe_bp->tail = (user32_addr_t) 0;
501 pipe_bp->fs.next = (user32_addr_t)0;
502 pipe_bp->fs.pipe = (user32_addr_t)0;
503 pipe_bp->fs.rq = (user32_addr_t)0;
504 bp = ((char *)pipe_bp) + sizeof(struct dn_pipe_32);
505 return dn_copy_set_32( &(p->fs), bp);
506 }
507
508 static
509 char *
cp_pipe_to_64_user(struct dn_pipe * p,struct dn_pipe_64 * pipe_bp)510 cp_pipe_to_64_user(struct dn_pipe *p, struct dn_pipe_64 *pipe_bp)
511 {
512 char *bp;
513
514 pipe_bp->pipe_nr = p->pipe_nr;
515 pipe_bp->bandwidth = p->bandwidth;
516 pipe_bp->delay = p->delay;
517 bcopy( &(p->scheduler_heap), &(pipe_bp->scheduler_heap), sizeof(struct dn_heap_64));
518 pipe_bp->scheduler_heap.p = CAST_DOWN(user64_addr_t, pipe_bp->scheduler_heap.p);
519 bcopy( &(p->not_eligible_heap), &(pipe_bp->not_eligible_heap), sizeof(struct dn_heap_64));
520 pipe_bp->not_eligible_heap.p = CAST_DOWN(user64_addr_t, pipe_bp->not_eligible_heap.p);
521 bcopy( &(p->idle_heap), &(pipe_bp->idle_heap), sizeof(struct dn_heap_64));
522 pipe_bp->idle_heap.p = CAST_DOWN(user64_addr_t, pipe_bp->idle_heap.p);
523 pipe_bp->V = p->V;
524 pipe_bp->sum = p->sum;
525 pipe_bp->numbytes = p->numbytes;
526 pipe_bp->sched_time = p->sched_time;
527 bcopy( p->if_name, pipe_bp->if_name, IFNAMSIZ);
528 pipe_bp->ifp = CAST_DOWN(user64_addr_t, p->ifp);
529 pipe_bp->ready = p->ready;
530
531 cp_flow_set_to_64_user( &(p->fs), &(pipe_bp->fs));
532
533 pipe_bp->delay = (pipe_bp->delay * 1000) / (hz * 10);
534 /*
535 * XXX the following is a hack based on ->next being the
536 * first field in dn_pipe and dn_flow_set. The correct
537 * solution would be to move the dn_flow_set to the beginning
538 * of struct dn_pipe.
539 */
540 pipe_bp->next = CAST_DOWN( user64_addr_t, DN_IS_PIPE );
541 /* clean pointers */
542 pipe_bp->head = pipe_bp->tail = USER_ADDR_NULL;
543 pipe_bp->fs.next = USER_ADDR_NULL;
544 pipe_bp->fs.pipe = USER_ADDR_NULL;
545 pipe_bp->fs.rq = USER_ADDR_NULL;
546 bp = ((char *)pipe_bp) + sizeof(struct dn_pipe_64);
547 return dn_copy_set_64( &(p->fs), bp);
548 }
549
550 static int
heap_init(struct dn_heap * h,int new_size)551 heap_init(struct dn_heap *h, int new_size)
552 {
553 struct dn_heap_entry *p;
554
555 if (h->size >= new_size) {
556 printf("dummynet: heap_init, Bogus call, have %d want %d\n",
557 h->size, new_size);
558 return 0;
559 }
560 new_size = (new_size + HEAP_INCREMENT) & ~HEAP_INCREMENT;
561 p = krealloc_type(struct dn_heap_entry, h->size, new_size,
562 h->p, Z_NOWAIT | Z_ZERO);
563 if (p == NULL) {
564 printf("dummynet: heap_init, resize %d failed\n", new_size );
565 return 1; /* error */
566 }
567 h->p = p;
568 h->size = new_size;
569 return 0;
570 }
571
572 /*
573 * Insert element in heap. Normally, p != NULL, we insert p in
574 * a new position and bubble up. If p == NULL, then the element is
575 * already in place, and key is the position where to start the
576 * bubble-up.
577 * Returns 1 on failure (cannot allocate new heap entry)
578 *
579 * If offset > 0 the position (index, int) of the element in the heap is
580 * also stored in the element itself at the given offset in bytes.
581 */
582 #define SET_OFFSET(heap, node) \
583 if (heap->offset > 0) \
584 *((int *)(void *)((char *)(heap->p[node].object) + heap->offset)) = node ;
585 /*
586 * RESET_OFFSET is used for sanity checks. It sets offset to an invalid value.
587 */
588 #define RESET_OFFSET(heap, node) \
589 if (heap->offset > 0) \
590 *((int *)(void *)((char *)(heap->p[node].object) + heap->offset)) = -1 ;
591 static int
heap_insert(struct dn_heap * h,dn_key key1,void * p)592 heap_insert(struct dn_heap *h, dn_key key1, void *p)
593 {
594 int son = h->elements;
595
596 if (p == NULL) { /* data already there, set starting point */
597 VERIFY(key1 < INT_MAX);
598 son = (int)key1;
599 } else { /* insert new element at the end, possibly resize */
600 son = h->elements;
601 if (son == h->size) { /* need resize... */
602 if (heap_init(h, h->elements + 1)) {
603 return 1; /* failure... */
604 }
605 }
606 h->p[son].object = p;
607 h->p[son].key = key1;
608 h->elements++;
609 }
610 while (son > 0) { /* bubble up */
611 int father = HEAP_FATHER(son);
612 struct dn_heap_entry tmp;
613
614 if (DN_KEY_LT( h->p[father].key, h->p[son].key )) {
615 break; /* found right position */
616 }
617 /* son smaller than father, swap and repeat */
618 HEAP_SWAP(h->p[son], h->p[father], tmp);
619 SET_OFFSET(h, son);
620 son = father;
621 }
622 SET_OFFSET(h, son);
623 return 0;
624 }
625
626 /*
627 * remove top element from heap, or obj if obj != NULL
628 */
629 static void
heap_extract(struct dn_heap * h,void * obj)630 heap_extract(struct dn_heap *h, void *obj)
631 {
632 int child, father, maxelt = h->elements - 1;
633
634 if (maxelt < 0) {
635 printf("dummynet: warning, extract from empty heap 0x%llx\n",
636 (uint64_t)VM_KERNEL_ADDRPERM(h));
637 return;
638 }
639 father = 0; /* default: move up smallest child */
640 if (obj != NULL) { /* extract specific element, index is at offset */
641 if (h->offset <= 0) {
642 panic("dummynet: heap_extract from middle not supported on this heap!!!");
643 }
644 father = *((int *)(void *)((char *)obj + h->offset));
645 if (father < 0 || father >= h->elements) {
646 printf("dummynet: heap_extract, father %d out of bound 0..%d\n",
647 father, h->elements);
648 panic("dummynet: heap_extract");
649 }
650 }
651 RESET_OFFSET(h, father);
652 child = HEAP_LEFT(father); /* left child */
653 while (child <= maxelt) { /* valid entry */
654 if (child != maxelt && DN_KEY_LT(h->p[child + 1].key, h->p[child].key)) {
655 child = child + 1; /* take right child, otherwise left */
656 }
657 h->p[father] = h->p[child];
658 SET_OFFSET(h, father);
659 father = child;
660 child = HEAP_LEFT(child); /* left child for next loop */
661 }
662 h->elements--;
663 if (father != maxelt) {
664 /*
665 * Fill hole with last entry and bubble up, reusing the insert code
666 */
667 h->p[father] = h->p[maxelt];
668 heap_insert(h, father, NULL); /* this one cannot fail */
669 }
670 }
671
672 /*
673 * heapify() will reorganize data inside an array to maintain the
674 * heap property. It is needed when we delete a bunch of entries.
675 */
676 static void
heapify(struct dn_heap * h)677 heapify(struct dn_heap *h)
678 {
679 int i;
680
681 for (i = 0; i < h->elements; i++) {
682 heap_insert(h, i, NULL);
683 }
684 }
685
686 /*
687 * cleanup the heap and free data structure
688 */
689 static void
heap_free(struct dn_heap * h)690 heap_free(struct dn_heap *h)
691 {
692 kfree_type(struct dn_heap_entry, h->size, h->p);
693 bzero(h, sizeof(*h));
694 }
695
696 /*
697 * --- end of heap management functions ---
698 */
699
700 /*
701 * Return the mbuf tag holding the dummynet state. As an optimization
702 * this is assumed to be the first tag on the list. If this turns out
703 * wrong we'll need to search the list.
704 */
705 static struct dn_pkt_tag *
dn_tag_get(struct mbuf * m)706 dn_tag_get(struct mbuf *m)
707 {
708 struct m_tag *mtag = m_tag_first(m);
709
710 if (!(mtag != NULL &&
711 mtag->m_tag_id == KERNEL_MODULE_TAG_ID &&
712 mtag->m_tag_type == KERNEL_TAG_TYPE_DUMMYNET)) {
713 panic("packet on dummynet queue w/o dummynet tag: 0x%llx",
714 (uint64_t)VM_KERNEL_ADDRPERM(m));
715 }
716
717 return (struct dn_pkt_tag *)(mtag->m_tag_data);
718 }
719
720 /*
721 * Scheduler functions:
722 *
723 * transmit_event() is called when the delay-line needs to enter
724 * the scheduler, either because of existing pkts getting ready,
725 * or new packets entering the queue. The event handled is the delivery
726 * time of the packet.
727 *
728 * ready_event() does something similar with fixed-rate queues, and the
729 * event handled is the finish time of the head pkt.
730 *
731 * wfq_ready_event() does something similar with WF2Q queues, and the
732 * event handled is the start time of the head pkt.
733 *
734 * In all cases, we make sure that the data structures are consistent
735 * before passing pkts out, because this might trigger recursive
736 * invocations of the procedures.
737 */
738 static void
transmit_event(struct dn_pipe * pipe,struct mbuf ** head,struct mbuf ** tail)739 transmit_event(struct dn_pipe *pipe, struct mbuf **head, struct mbuf **tail)
740 {
741 struct mbuf *m;
742 struct dn_pkt_tag *pkt = NULL;
743 u_int64_t schedule_time;
744
745 LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
746 ASSERT(serialize >= 0);
747 if (serialize == 0) {
748 while ((m = pipe->head) != NULL) {
749 pkt = dn_tag_get(m);
750 if (!DN_KEY_LEQ(pkt->dn_output_time, curr_time)) {
751 break;
752 }
753
754 pipe->head = m->m_nextpkt;
755 if (*tail != NULL) {
756 (*tail)->m_nextpkt = m;
757 } else {
758 *head = m;
759 }
760 *tail = m;
761 }
762
763 if (*tail != NULL) {
764 (*tail)->m_nextpkt = NULL;
765 }
766 }
767
768 schedule_time = pkt == NULL || DN_KEY_LEQ(pkt->dn_output_time, curr_time) ?
769 curr_time + 1 : pkt->dn_output_time;
770
771 /* if there are leftover packets, put the pipe into the heap for next ready event */
772 if ((m = pipe->head) != NULL) {
773 pkt = dn_tag_get(m);
774 /* XXX should check errors on heap_insert, by draining the
775 * whole pipe p and hoping in the future we are more successful
776 */
777 heap_insert(&extract_heap, schedule_time, pipe);
778 }
779 }
780
781 /*
782 * the following macro computes how many ticks we have to wait
783 * before being able to transmit a packet. The credit is taken from
784 * either a pipe (WF2Q) or a flow_queue (per-flow queueing)
785 */
786
787 /* hz is 100, which gives a granularity of 10ms in the old timer.
788 * The timer has been changed to fire every 1ms, so the use of
789 * hz has been modified here. All instances of hz have been left
790 * in place but adjusted by a factor of 10 so that hz is functionally
791 * equal to 1000.
792 */
793 #define SET_TICKS(_m, q, p) \
794 ((_m)->m_pkthdr.len*8*(hz*10) - (q)->numbytes + p->bandwidth - 1 ) / \
795 p->bandwidth ;
796
797 /*
798 * extract pkt from queue, compute output time (could be now)
799 * and put into delay line (p_queue)
800 */
801 static void
move_pkt(struct mbuf * pkt,struct dn_flow_queue * q,struct dn_pipe * p,int len)802 move_pkt(struct mbuf *pkt, struct dn_flow_queue *q,
803 struct dn_pipe *p, int len)
804 {
805 struct dn_pkt_tag *dt = dn_tag_get(pkt);
806
807 q->head = pkt->m_nextpkt;
808 q->len--;
809 q->len_bytes -= len;
810
811 dt->dn_output_time = curr_time + p->delay;
812
813 if (p->head == NULL) {
814 p->head = pkt;
815 } else {
816 p->tail->m_nextpkt = pkt;
817 }
818 p->tail = pkt;
819 p->tail->m_nextpkt = NULL;
820 }
821
822 /*
823 * ready_event() is invoked every time the queue must enter the
824 * scheduler, either because the first packet arrives, or because
825 * a previously scheduled event fired.
826 * On invokation, drain as many pkts as possible (could be 0) and then
827 * if there are leftover packets reinsert the pkt in the scheduler.
828 */
829 static void
ready_event(struct dn_flow_queue * q,struct mbuf ** head,struct mbuf ** tail)830 ready_event(struct dn_flow_queue *q, struct mbuf **head, struct mbuf **tail)
831 {
832 struct mbuf *pkt;
833 struct dn_pipe *p = q->fs->pipe;
834 int p_was_empty;
835
836 LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
837
838 if (p == NULL) {
839 printf("dummynet: ready_event pipe is gone\n");
840 return;
841 }
842 p_was_empty = (p->head == NULL);
843
844 /*
845 * schedule fixed-rate queues linked to this pipe:
846 * Account for the bw accumulated since last scheduling, then
847 * drain as many pkts as allowed by q->numbytes and move to
848 * the delay line (in p) computing output time.
849 * bandwidth==0 (no limit) means we can drain the whole queue,
850 * setting len_scaled = 0 does the job.
851 */
852 q->numbytes += (curr_time - q->sched_time) * p->bandwidth;
853 while ((pkt = q->head) != NULL) {
854 int len = pkt->m_pkthdr.len;
855 int len_scaled = p->bandwidth ? len * 8 * (hz * 10) : 0;
856 if (len_scaled > q->numbytes) {
857 break;
858 }
859 q->numbytes -= len_scaled;
860 move_pkt(pkt, q, p, len);
861 }
862 /*
863 * If we have more packets queued, schedule next ready event
864 * (can only occur when bandwidth != 0, otherwise we would have
865 * flushed the whole queue in the previous loop).
866 * To this purpose we record the current time and compute how many
867 * ticks to go for the finish time of the packet.
868 */
869 if ((pkt = q->head) != NULL) { /* this implies bandwidth != 0 */
870 dn_key t = SET_TICKS(pkt, q, p); /* ticks i have to wait */
871 q->sched_time = curr_time;
872 heap_insert(&ready_heap, curr_time + t, (void *)q );
873 /* XXX should check errors on heap_insert, and drain the whole
874 * queue on error hoping next time we are luckier.
875 */
876 } else { /* RED needs to know when the queue becomes empty */
877 q->q_time = curr_time;
878 q->numbytes = 0;
879 }
880 /*
881 * If the delay line was empty call transmit_event(p) now.
882 * Otherwise, the scheduler will take care of it.
883 */
884 if (p_was_empty) {
885 transmit_event(p, head, tail);
886 }
887 }
888
889 /*
890 * Called when we can transmit packets on WF2Q queues. Take pkts out of
891 * the queues at their start time, and enqueue into the delay line.
892 * Packets are drained until p->numbytes < 0. As long as
893 * len_scaled >= p->numbytes, the packet goes into the delay line
894 * with a deadline p->delay. For the last packet, if p->numbytes<0,
895 * there is an additional delay.
896 */
897 static void
ready_event_wfq(struct dn_pipe * p,struct mbuf ** head,struct mbuf ** tail)898 ready_event_wfq(struct dn_pipe *p, struct mbuf **head, struct mbuf **tail)
899 {
900 int p_was_empty = (p->head == NULL);
901 struct dn_heap *sch = &(p->scheduler_heap);
902 struct dn_heap *neh = &(p->not_eligible_heap);
903 int64_t p_numbytes = p->numbytes;
904
905 LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
906
907 if (p->if_name[0] == 0) { /* tx clock is simulated */
908 p_numbytes += (curr_time - p->sched_time) * p->bandwidth;
909 } else { /* tx clock is for real, the ifq must be empty or this is a NOP */
910 if (p->ifp && !IFCQ_IS_EMPTY(p->ifp->if_snd)) {
911 return;
912 } else {
913 DPRINTF(("dummynet: pipe %d ready from %s --\n",
914 p->pipe_nr, p->if_name));
915 }
916 }
917
918 /*
919 * While we have backlogged traffic AND credit, we need to do
920 * something on the queue.
921 */
922 while (p_numbytes >= 0 && (sch->elements > 0 || neh->elements > 0)) {
923 if (sch->elements > 0) { /* have some eligible pkts to send out */
924 struct dn_flow_queue *q = sch->p[0].object;
925 struct mbuf *pkt = q->head;
926 struct dn_flow_set *fs = q->fs;
927 u_int32_t len = pkt->m_pkthdr.len;
928 u_int64_t len_scaled = p->bandwidth ? len * 8 * (hz * 10) : 0;
929
930 heap_extract(sch, NULL); /* remove queue from heap */
931 p_numbytes -= len_scaled;
932 move_pkt(pkt, q, p, len);
933
934 p->V += (len << MY_M) / p->sum; /* update V */
935 q->S = q->F; /* update start time */
936 if (q->len == 0) { /* Flow not backlogged any more */
937 fs->backlogged--;
938 heap_insert(&(p->idle_heap), q->F, q);
939 } else { /* still backlogged */
940 /*
941 * update F and position in backlogged queue, then
942 * put flow in not_eligible_heap (we will fix this later).
943 */
944 len = (q->head)->m_pkthdr.len;
945 q->F += (len << MY_M) / (u_int64_t) fs->weight;
946 if (DN_KEY_LEQ(q->S, p->V)) {
947 heap_insert(neh, q->S, q);
948 } else {
949 heap_insert(sch, q->F, q);
950 }
951 }
952 }
953 /*
954 * now compute V = max(V, min(S_i)). Remember that all elements in sch
955 * have by definition S_i <= V so if sch is not empty, V is surely
956 * the max and we must not update it. Conversely, if sch is empty
957 * we only need to look at neh.
958 */
959 if (sch->elements == 0 && neh->elements > 0) {
960 p->V = MAX64( p->V, neh->p[0].key );
961 }
962 /* move from neh to sch any packets that have become eligible */
963 while (neh->elements > 0 && DN_KEY_LEQ(neh->p[0].key, p->V)) {
964 struct dn_flow_queue *q = neh->p[0].object;
965 heap_extract(neh, NULL);
966 heap_insert(sch, q->F, q);
967 }
968
969 if (p->if_name[0] != '\0') {/* tx clock is from a real thing */
970 p_numbytes = -1; /* mark not ready for I/O */
971 break;
972 }
973 }
974 if (sch->elements == 0 && neh->elements == 0 && p_numbytes >= 0
975 && p->idle_heap.elements > 0) {
976 /*
977 * no traffic and no events scheduled. We can get rid of idle-heap.
978 */
979 int i;
980
981 for (i = 0; i < p->idle_heap.elements; i++) {
982 struct dn_flow_queue *q = p->idle_heap.p[i].object;
983
984 q->F = 0;
985 q->S = q->F + 1;
986 }
987 p->sum = 0;
988 p->V = 0;
989 p->idle_heap.elements = 0;
990 }
991 /*
992 * If we are getting clocks from dummynet (not a real interface) and
993 * If we are under credit, schedule the next ready event.
994 * Also fix the delivery time of the last packet.
995 */
996 if (p->if_name[0] == 0 && p_numbytes < 0) { /* this implies bandwidth >0 */
997 dn_key t = 0; /* number of ticks i have to wait */
998
999 if (p->bandwidth > 0) {
1000 t = (p->bandwidth - 1 - p_numbytes) / p->bandwidth;
1001 }
1002 dn_tag_get(p->tail)->dn_output_time += t;
1003 p->sched_time = curr_time;
1004 heap_insert(&wfq_ready_heap, curr_time + t, (void *)p);
1005 /* XXX should check errors on heap_insert, and drain the whole
1006 * queue on error hoping next time we are luckier.
1007 */
1008 }
1009
1010 /* Fit (adjust if necessary) 64bit result into 32bit variable. */
1011 if (p_numbytes > INT_MAX) {
1012 p->numbytes = INT_MAX;
1013 } else if (p_numbytes < INT_MIN) {
1014 p->numbytes = INT_MIN;
1015 } else {
1016 p->numbytes = (int)p_numbytes;
1017 }
1018
1019 /*
1020 * If the delay line was empty call transmit_event(p) now.
1021 * Otherwise, the scheduler will take care of it.
1022 */
1023 if (p_was_empty) {
1024 transmit_event(p, head, tail);
1025 }
1026 }
1027
1028 /*
1029 * This is called every 1ms. It is used to
1030 * increment the current tick counter and schedule expired events.
1031 */
1032 static void
dummynet(__unused void * unused)1033 dummynet(__unused void * unused)
1034 {
1035 void *p; /* generic parameter to handler */
1036 struct dn_heap *h;
1037 struct dn_heap *heaps[3];
1038 struct mbuf *head = NULL, *tail = NULL;
1039 int i;
1040 struct dn_pipe *pe;
1041 struct timespec ts;
1042 struct timeval tv;
1043
1044 heaps[0] = &ready_heap; /* fixed-rate queues */
1045 heaps[1] = &wfq_ready_heap; /* wfq queues */
1046 heaps[2] = &extract_heap; /* delay line */
1047
1048 lck_mtx_lock(&dn_mutex);
1049
1050 /* make all time measurements in milliseconds (ms) -
1051 * here we convert secs and usecs to msecs (just divide the
1052 * usecs and take the closest whole number).
1053 */
1054 microuptime(&tv);
1055 curr_time = (tv.tv_sec * 1000) + (tv.tv_usec / 1000);
1056
1057 for (i = 0; i < 3; i++) {
1058 h = heaps[i];
1059 while (h->elements > 0 && DN_KEY_LEQ(h->p[0].key, curr_time)) {
1060 if (h->p[0].key > curr_time) {
1061 printf("dummynet: warning, heap %d is %d ticks late\n",
1062 i, (int)(curr_time - h->p[0].key));
1063 }
1064 p = h->p[0].object; /* store a copy before heap_extract */
1065 heap_extract(h, NULL); /* need to extract before processing */
1066 if (i == 0) {
1067 ready_event(p, &head, &tail);
1068 } else if (i == 1) {
1069 struct dn_pipe *pipe = p;
1070 if (pipe->if_name[0] != '\0') {
1071 printf("dummynet: bad ready_event_wfq for pipe %s\n",
1072 pipe->if_name);
1073 } else {
1074 ready_event_wfq(p, &head, &tail);
1075 }
1076 } else {
1077 transmit_event(p, &head, &tail);
1078 }
1079 }
1080 }
1081 /* sweep pipes trying to expire idle flow_queues */
1082 for (i = 0; i < HASHSIZE; i++) {
1083 SLIST_FOREACH(pe, &pipehash[i], next) {
1084 if (pe->idle_heap.elements > 0 &&
1085 DN_KEY_LT(pe->idle_heap.p[0].key, pe->V)) {
1086 struct dn_flow_queue *q = pe->idle_heap.p[0].object;
1087
1088 heap_extract(&(pe->idle_heap), NULL);
1089 q->S = q->F + 1; /* mark timestamp as invalid */
1090 pe->sum -= q->fs->weight;
1091 }
1092 }
1093 }
1094
1095 /* check the heaps to see if there's still stuff in there, and
1096 * only set the timer if there are packets to process
1097 */
1098 timer_enabled = 0;
1099 for (i = 0; i < 3; i++) {
1100 h = heaps[i];
1101 if (h->elements > 0) { // set the timer
1102 ts.tv_sec = 0;
1103 ts.tv_nsec = 1 * 1000000; // 1ms
1104 timer_enabled = 1;
1105 bsd_timeout(dummynet, NULL, &ts);
1106 break;
1107 }
1108 }
1109
1110 if (head != NULL) {
1111 serialize++;
1112 }
1113
1114 lck_mtx_unlock(&dn_mutex);
1115
1116 /* Send out the de-queued list of ready-to-send packets */
1117 if (head != NULL) {
1118 dummynet_send(head);
1119 lck_mtx_lock(&dn_mutex);
1120 serialize--;
1121 lck_mtx_unlock(&dn_mutex);
1122 }
1123 }
1124
1125
1126 static void
dummynet_send(struct mbuf * m)1127 dummynet_send(struct mbuf *m)
1128 {
1129 struct dn_pkt_tag *pkt;
1130 struct mbuf *n;
1131
1132 for (; m != NULL; m = n) {
1133 n = m->m_nextpkt;
1134 m->m_nextpkt = NULL;
1135 pkt = dn_tag_get(m);
1136
1137 DPRINTF(("dummynet_send m: 0x%llx dn_dir: %d dn_flags: 0x%x\n",
1138 (uint64_t)VM_KERNEL_ADDRPERM(m), pkt->dn_dir,
1139 pkt->dn_flags));
1140
1141 switch (pkt->dn_dir) {
1142 case DN_TO_IP_OUT: {
1143 struct route tmp_rt;
1144
1145 /* route is already in the packet's dn_ro */
1146 bzero(&tmp_rt, sizeof(tmp_rt));
1147
1148 /* Force IP_RAWOUTPUT as the IP header is fully formed */
1149 pkt->dn_flags |= IP_RAWOUTPUT | IP_FORWARDING;
1150 (void)ip_output(m, NULL, &tmp_rt, pkt->dn_flags, NULL, NULL);
1151 ROUTE_RELEASE(&tmp_rt);
1152 break;
1153 }
1154 case DN_TO_IP_IN:
1155 proto_inject(PF_INET, m);
1156 break;
1157 case DN_TO_IP6_OUT: {
1158 /* routes already in the packet's dn_{ro6,pmtu} */
1159 if (pkt->dn_origifp != NULL) {
1160 ip6_output_setsrcifscope(m, pkt->dn_origifp->if_index, NULL);
1161 ip6_output_setdstifscope(m, pkt->dn_origifp->if_index, NULL);
1162 } else {
1163 ip6_output_setsrcifscope(m, IFSCOPE_UNKNOWN, NULL);
1164 ip6_output_setdstifscope(m, IFSCOPE_UNKNOWN, NULL);
1165 }
1166
1167 ip6_output(m, NULL, NULL, IPV6_FORWARDING, NULL, NULL, NULL);
1168 break;
1169 }
1170 case DN_TO_IP6_IN:
1171 proto_inject(PF_INET6, m);
1172 break;
1173 default:
1174 printf("dummynet: bad switch %d!\n", pkt->dn_dir);
1175 m_freem(m);
1176 break;
1177 }
1178 }
1179 }
1180
1181 /*
1182 * Unconditionally expire empty queues in case of shortage.
1183 * Returns the number of queues freed.
1184 */
1185 static int
expire_queues(struct dn_flow_set * fs)1186 expire_queues(struct dn_flow_set *fs)
1187 {
1188 struct dn_flow_queue *q, *prev;
1189 int i, initial_elements = fs->rq_elements;
1190 struct timeval timenow;
1191
1192 /* reviewed for getmicrotime usage */
1193 getmicrotime(&timenow);
1194
1195 if (fs->last_expired == timenow.tv_sec) {
1196 return 0;
1197 }
1198 fs->last_expired = (int)timenow.tv_sec;
1199 for (i = 0; i <= fs->rq_size; i++) { /* last one is overflow */
1200 for (prev = NULL, q = fs->rq[i]; q != NULL;) {
1201 if (q->head != NULL || q->S != q->F + 1) {
1202 prev = q;
1203 q = q->next;
1204 } else { /* entry is idle, expire it */
1205 struct dn_flow_queue *old_q = q;
1206
1207 if (prev != NULL) {
1208 prev->next = q = q->next;
1209 } else {
1210 fs->rq[i] = q = q->next;
1211 }
1212 fs->rq_elements--;
1213 kfree_type(struct dn_flow_queue, old_q);
1214 }
1215 }
1216 }
1217 return initial_elements - fs->rq_elements;
1218 }
1219
1220 /*
1221 * If room, create a new queue and put at head of slot i;
1222 * otherwise, create or use the default queue.
1223 */
1224 static struct dn_flow_queue *
create_queue(struct dn_flow_set * fs,int i)1225 create_queue(struct dn_flow_set *fs, int i)
1226 {
1227 struct dn_flow_queue *q;
1228
1229 if (fs->rq_elements > fs->rq_size * dn_max_ratio &&
1230 expire_queues(fs) == 0) {
1231 /*
1232 * No way to get room, use or create overflow queue.
1233 */
1234 i = fs->rq_size;
1235 if (fs->rq[i] != NULL) {
1236 return fs->rq[i];
1237 }
1238 }
1239 q = kalloc_type(struct dn_flow_queue, Z_NOWAIT | Z_ZERO);
1240 if (q == NULL) {
1241 printf("dummynet: sorry, cannot allocate queue for new flow\n");
1242 return NULL;
1243 }
1244 q->fs = fs;
1245 q->hash_slot = i;
1246 q->next = fs->rq[i];
1247 q->S = q->F + 1; /* hack - mark timestamp as invalid */
1248 fs->rq[i] = q;
1249 fs->rq_elements++;
1250 return q;
1251 }
1252
1253 /*
1254 * Given a flow_set and a pkt in last_pkt, find a matching queue
1255 * after appropriate masking. The queue is moved to front
1256 * so that further searches take less time.
1257 */
1258 static struct dn_flow_queue *
find_queue(struct dn_flow_set * fs,struct ip_flow_id * id)1259 find_queue(struct dn_flow_set *fs, struct ip_flow_id *id)
1260 {
1261 int i = 0; /* we need i and q for new allocations */
1262 struct dn_flow_queue *q, *prev;
1263 int is_v6 = IS_IP6_FLOW_ID(id);
1264
1265 if (!(fs->flags_fs & DN_HAVE_FLOW_MASK)) {
1266 q = fs->rq[0];
1267 } else {
1268 /* first, do the masking, then hash */
1269 id->dst_port &= fs->flow_mask.dst_port;
1270 id->src_port &= fs->flow_mask.src_port;
1271 id->proto &= fs->flow_mask.proto;
1272 id->flags = 0; /* we don't care about this one */
1273 if (is_v6) {
1274 APPLY_MASK(&id->dst_ip6, &fs->flow_mask.dst_ip6);
1275 APPLY_MASK(&id->src_ip6, &fs->flow_mask.src_ip6);
1276 id->flow_id6 &= fs->flow_mask.flow_id6;
1277
1278 i = ((id->dst_ip6.__u6_addr.__u6_addr32[0]) & 0xffff) ^
1279 ((id->dst_ip6.__u6_addr.__u6_addr32[1]) & 0xffff) ^
1280 ((id->dst_ip6.__u6_addr.__u6_addr32[2]) & 0xffff) ^
1281 ((id->dst_ip6.__u6_addr.__u6_addr32[3]) & 0xffff) ^
1282
1283 ((id->dst_ip6.__u6_addr.__u6_addr32[0] >> 15) & 0xffff) ^
1284 ((id->dst_ip6.__u6_addr.__u6_addr32[1] >> 15) & 0xffff) ^
1285 ((id->dst_ip6.__u6_addr.__u6_addr32[2] >> 15) & 0xffff) ^
1286 ((id->dst_ip6.__u6_addr.__u6_addr32[3] >> 15) & 0xffff) ^
1287
1288 ((id->src_ip6.__u6_addr.__u6_addr32[0] << 1) & 0xfffff) ^
1289 ((id->src_ip6.__u6_addr.__u6_addr32[1] << 1) & 0xfffff) ^
1290 ((id->src_ip6.__u6_addr.__u6_addr32[2] << 1) & 0xfffff) ^
1291 ((id->src_ip6.__u6_addr.__u6_addr32[3] << 1) & 0xfffff) ^
1292
1293 ((id->src_ip6.__u6_addr.__u6_addr32[0] >> 16) & 0xffff) ^
1294 ((id->src_ip6.__u6_addr.__u6_addr32[1] >> 16) & 0xffff) ^
1295 ((id->src_ip6.__u6_addr.__u6_addr32[2] >> 16) & 0xffff) ^
1296 ((id->src_ip6.__u6_addr.__u6_addr32[3] >> 16) & 0xffff) ^
1297
1298 (id->dst_port << 1) ^ (id->src_port) ^
1299 (id->proto) ^
1300 (id->flow_id6);
1301 } else {
1302 id->dst_ip &= fs->flow_mask.dst_ip;
1303 id->src_ip &= fs->flow_mask.src_ip;
1304
1305 i = ((id->dst_ip) & 0xffff) ^
1306 ((id->dst_ip >> 15) & 0xffff) ^
1307 ((id->src_ip << 1) & 0xffff) ^
1308 ((id->src_ip >> 16) & 0xffff) ^
1309 (id->dst_port << 1) ^ (id->src_port) ^
1310 (id->proto);
1311 }
1312 i = i % fs->rq_size;
1313 /* finally, scan the current list for a match */
1314 searches++;
1315 for (prev = NULL, q = fs->rq[i]; q;) {
1316 search_steps++;
1317 if (is_v6 &&
1318 IN6_ARE_ADDR_EQUAL(&id->dst_ip6, &q->id.dst_ip6) &&
1319 IN6_ARE_ADDR_EQUAL(&id->src_ip6, &q->id.src_ip6) &&
1320 id->dst_port == q->id.dst_port &&
1321 id->src_port == q->id.src_port &&
1322 id->proto == q->id.proto &&
1323 id->flags == q->id.flags &&
1324 id->flow_id6 == q->id.flow_id6) {
1325 break; /* found */
1326 }
1327 if (!is_v6 && id->dst_ip == q->id.dst_ip &&
1328 id->src_ip == q->id.src_ip &&
1329 id->dst_port == q->id.dst_port &&
1330 id->src_port == q->id.src_port &&
1331 id->proto == q->id.proto &&
1332 id->flags == q->id.flags) {
1333 break; /* found */
1334 }
1335 /* No match. Check if we can expire the entry */
1336 if (pipe_expire && q->head == NULL && q->S == q->F + 1) {
1337 /* entry is idle and not in any heap, expire it */
1338 struct dn_flow_queue *old_q = q;
1339
1340 if (prev != NULL) {
1341 prev->next = q = q->next;
1342 } else {
1343 fs->rq[i] = q = q->next;
1344 }
1345 fs->rq_elements--;
1346 kfree_type(struct dn_flow_queue, old_q);
1347 continue;
1348 }
1349 prev = q;
1350 q = q->next;
1351 }
1352 if (q && prev != NULL) { /* found and not in front */
1353 prev->next = q->next;
1354 q->next = fs->rq[i];
1355 fs->rq[i] = q;
1356 }
1357 }
1358 if (q == NULL) { /* no match, need to allocate a new entry */
1359 q = create_queue(fs, i);
1360 if (q != NULL) {
1361 q->id = *id;
1362 }
1363 }
1364 return q;
1365 }
1366
1367 static int
red_drops(struct dn_flow_set * fs,struct dn_flow_queue * q,int len)1368 red_drops(struct dn_flow_set *fs, struct dn_flow_queue *q, int len)
1369 {
1370 /*
1371 * RED algorithm
1372 *
1373 * RED calculates the average queue size (avg) using a low-pass filter
1374 * with an exponential weighted (w_q) moving average:
1375 * avg <- (1-w_q) * avg + w_q * q_size
1376 * where q_size is the queue length (measured in bytes or * packets).
1377 *
1378 * If q_size == 0, we compute the idle time for the link, and set
1379 * avg = (1 - w_q)^(idle/s)
1380 * where s is the time needed for transmitting a medium-sized packet.
1381 *
1382 * Now, if avg < min_th the packet is enqueued.
1383 * If avg > max_th the packet is dropped. Otherwise, the packet is
1384 * dropped with probability P function of avg.
1385 *
1386 */
1387
1388 int64_t p_b = 0;
1389 /* queue in bytes or packets ? */
1390 u_int q_size = (fs->flags_fs & DN_QSIZE_IS_BYTES) ? q->len_bytes : q->len;
1391
1392 DPRINTF(("\ndummynet: %d q: %2u ", (int) curr_time, q_size));
1393
1394 /* average queue size estimation */
1395 if (q_size != 0) {
1396 /*
1397 * queue is not empty, avg <- avg + (q_size - avg) * w_q
1398 */
1399 int diff = SCALE(q_size) - q->avg;
1400 int64_t v = SCALE_MUL((int64_t) diff, (int64_t) fs->w_q);
1401
1402 q->avg += (int) v;
1403 } else {
1404 /*
1405 * queue is empty, find for how long the queue has been
1406 * empty and use a lookup table for computing
1407 * (1 - * w_q)^(idle_time/s) where s is the time to send a
1408 * (small) packet.
1409 * XXX check wraps...
1410 */
1411 if (q->avg) {
1412 u_int64_t t = (curr_time - q->q_time) / fs->lookup_step;
1413
1414 q->avg = (t < fs->lookup_depth) ?
1415 SCALE_MUL(q->avg, fs->w_q_lookup[t]) : 0;
1416 }
1417 }
1418 DPRINTF(("dummynet: avg: %u ", SCALE_VAL(q->avg)));
1419
1420 /* should i drop ? */
1421
1422 if (q->avg < fs->min_th) {
1423 q->count = -1;
1424 return 0; /* accept packet ; */
1425 }
1426 if (q->avg >= fs->max_th) { /* average queue >= max threshold */
1427 if (fs->flags_fs & DN_IS_GENTLE_RED) {
1428 /*
1429 * According to Gentle-RED, if avg is greater than max_th the
1430 * packet is dropped with a probability
1431 * p_b = c_3 * avg - c_4
1432 * where c_3 = (1 - max_p) / max_th, and c_4 = 1 - 2 * max_p
1433 */
1434 p_b = SCALE_MUL((int64_t) fs->c_3, (int64_t) q->avg) - fs->c_4;
1435 } else {
1436 q->count = -1;
1437 DPRINTF(("dummynet: - drop"));
1438 return 1;
1439 }
1440 } else if (q->avg > fs->min_th) {
1441 /*
1442 * we compute p_b using the linear dropping function p_b = c_1 *
1443 * avg - c_2, where c_1 = max_p / (max_th - min_th), and c_2 =
1444 * max_p * min_th / (max_th - min_th)
1445 */
1446 p_b = SCALE_MUL((int64_t) fs->c_1, (int64_t) q->avg) - fs->c_2;
1447 }
1448 if (fs->flags_fs & DN_QSIZE_IS_BYTES) {
1449 p_b = (p_b * len) / fs->max_pkt_size;
1450 }
1451 if (++q->count == 0) {
1452 q->random = (my_random() & 0xffff);
1453 } else {
1454 /*
1455 * q->count counts packets arrived since last drop, so a greater
1456 * value of q->count means a greater packet drop probability.
1457 */
1458 if (SCALE_MUL(p_b, SCALE((int64_t) q->count)) > q->random) {
1459 q->count = 0;
1460 DPRINTF(("dummynet: - red drop"));
1461 /* after a drop we calculate a new random value */
1462 q->random = (my_random() & 0xffff);
1463 return 1; /* drop */
1464 }
1465 }
1466 /* end of RED algorithm */
1467 return 0; /* accept */
1468 }
1469
1470 static __inline
1471 struct dn_flow_set *
locate_flowset(int fs_nr)1472 locate_flowset(int fs_nr)
1473 {
1474 struct dn_flow_set *fs;
1475 SLIST_FOREACH(fs, &flowsethash[HASH(fs_nr)], next) {
1476 if (fs->fs_nr == fs_nr) {
1477 return fs;
1478 }
1479 }
1480
1481 return NULL;
1482 }
1483
1484 static __inline struct dn_pipe *
locate_pipe(int pipe_nr)1485 locate_pipe(int pipe_nr)
1486 {
1487 struct dn_pipe *pipe;
1488
1489 SLIST_FOREACH(pipe, &pipehash[HASH(pipe_nr)], next) {
1490 if (pipe->pipe_nr == pipe_nr) {
1491 return pipe;
1492 }
1493 }
1494
1495 return NULL;
1496 }
1497
1498
1499
1500 /*
1501 * dummynet hook for packets. Below 'pipe' is a pipe or a queue
1502 * depending on whether WF2Q or fixed bw is used.
1503 *
1504 * pipe_nr pipe or queue the packet is destined for.
1505 * dir where shall we send the packet after dummynet.
1506 * m the mbuf with the packet
1507 * ifp the 'ifp' parameter from the caller.
1508 * NULL in ip_input, destination interface in ip_output,
1509 * real_dst in bdg_forward
1510 * ro route parameter (only used in ip_output, NULL otherwise)
1511 * dst destination address, only used by ip_output
1512 * rule matching rule, in case of multiple passes
1513 * flags flags from the caller, only used in ip_output
1514 *
1515 */
1516 static int
dummynet_io(struct mbuf * m,int pipe_nr,int dir,struct ip_fw_args * fwa)1517 dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa)
1518 {
1519 struct mbuf *head = NULL, *tail = NULL;
1520 struct dn_pkt_tag *pkt;
1521 struct m_tag *mtag;
1522 struct dn_flow_set *fs = NULL;
1523 struct dn_pipe *pipe;
1524 u_int32_t len = m->m_pkthdr.len;
1525 struct dn_flow_queue *q = NULL;
1526 int is_pipe = 0;
1527 struct timespec ts;
1528 struct timeval tv;
1529
1530 DPRINTF(("dummynet_io m: 0x%llx pipe: %d dir: %d\n",
1531 (uint64_t)VM_KERNEL_ADDRPERM(m), pipe_nr, dir));
1532
1533
1534 #if DUMMYNET
1535 is_pipe = fwa->fwa_flags == DN_IS_PIPE ? 1 : 0;
1536 #endif /* DUMMYNET */
1537
1538 pipe_nr &= 0xffff;
1539
1540 lck_mtx_lock(&dn_mutex);
1541
1542 /* make all time measurements in milliseconds (ms) -
1543 * here we convert secs and usecs to msecs (just divide the
1544 * usecs and take the closest whole number).
1545 */
1546 microuptime(&tv);
1547 curr_time = (tv.tv_sec * 1000) + (tv.tv_usec / 1000);
1548
1549 /*
1550 * This is a dummynet rule, so we expect an O_PIPE or O_QUEUE rule.
1551 */
1552 if (is_pipe) {
1553 pipe = locate_pipe(pipe_nr);
1554 if (pipe != NULL) {
1555 fs = &(pipe->fs);
1556 }
1557 } else {
1558 fs = locate_flowset(pipe_nr);
1559 }
1560
1561
1562 if (fs == NULL) {
1563 goto dropit; /* this queue/pipe does not exist! */
1564 }
1565 pipe = fs->pipe;
1566 if (pipe == NULL) { /* must be a queue, try find a matching pipe */
1567 pipe = locate_pipe(fs->parent_nr);
1568
1569 if (pipe != NULL) {
1570 fs->pipe = pipe;
1571 } else {
1572 printf("dummynet: no pipe %d for queue %d, drop pkt\n",
1573 fs->parent_nr, fs->fs_nr);
1574 goto dropit;
1575 }
1576 }
1577 q = find_queue(fs, &(fwa->fwa_id));
1578 if (q == NULL) {
1579 goto dropit; /* cannot allocate queue */
1580 }
1581 /*
1582 * update statistics, then check reasons to drop pkt
1583 */
1584 q->tot_bytes += len;
1585 q->tot_pkts++;
1586 if (fs->plr && (my_random() < fs->plr)) {
1587 goto dropit; /* random pkt drop */
1588 }
1589 if (fs->flags_fs & DN_QSIZE_IS_BYTES) {
1590 if (q->len_bytes > fs->qsize) {
1591 goto dropit; /* queue size overflow */
1592 }
1593 } else {
1594 if (q->len >= fs->qsize) {
1595 goto dropit; /* queue count overflow */
1596 }
1597 }
1598 if (fs->flags_fs & DN_IS_RED && red_drops(fs, q, len)) {
1599 goto dropit;
1600 }
1601
1602 /* XXX expensive to zero, see if we can remove it*/
1603 mtag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_DUMMYNET,
1604 sizeof(struct dn_pkt_tag), M_NOWAIT, m);
1605 if (mtag == NULL) {
1606 goto dropit; /* cannot allocate packet header */
1607 }
1608 m_tag_prepend(m, mtag); /* attach to mbuf chain */
1609
1610 pkt = (struct dn_pkt_tag *)(mtag->m_tag_data);
1611 bzero(pkt, sizeof(struct dn_pkt_tag));
1612 /* ok, i can handle the pkt now... */
1613 /* build and enqueue packet + parameters */
1614 pkt->dn_pf_rule = fwa->fwa_pf_rule;
1615 pkt->dn_dir = dir;
1616
1617 pkt->dn_ifp = fwa->fwa_oif;
1618 if (dir == DN_TO_IP_OUT) {
1619 /*
1620 * We need to copy *ro because for ICMP pkts (and maybe others)
1621 * the caller passed a pointer into the stack; dst might also be
1622 * a pointer into *ro so it needs to be updated.
1623 */
1624 if (fwa->fwa_ro) {
1625 route_copyout(&pkt->dn_ro, fwa->fwa_ro, sizeof(pkt->dn_ro));
1626 }
1627 if (fwa->fwa_dst) {
1628 if (fwa->fwa_dst == (struct sockaddr_in *)(void *)&fwa->fwa_ro->ro_dst) { /* dst points into ro */
1629 fwa->fwa_dst = (struct sockaddr_in *)(void *)&(pkt->dn_ro.ro_dst);
1630 }
1631
1632 bcopy(fwa->fwa_dst, &pkt->dn_dst, sizeof(pkt->dn_dst));
1633 }
1634 } else if (dir == DN_TO_IP6_OUT) {
1635 if (fwa->fwa_ro6) {
1636 route_copyout((struct route *)&pkt->dn_ro6,
1637 (struct route *)fwa->fwa_ro6, sizeof(pkt->dn_ro6));
1638 }
1639 if (fwa->fwa_ro6_pmtu) {
1640 route_copyout((struct route *)&pkt->dn_ro6_pmtu,
1641 (struct route *)fwa->fwa_ro6_pmtu, sizeof(pkt->dn_ro6_pmtu));
1642 }
1643 if (fwa->fwa_dst6) {
1644 if (fwa->fwa_dst6 == (struct sockaddr_in6 *)&fwa->fwa_ro6->ro_dst) { /* dst points into ro */
1645 fwa->fwa_dst6 = (struct sockaddr_in6 *)&(pkt->dn_ro6.ro_dst);
1646 }
1647
1648 bcopy(fwa->fwa_dst6, &pkt->dn_dst6, sizeof(pkt->dn_dst6));
1649 }
1650 pkt->dn_origifp = fwa->fwa_origifp;
1651 pkt->dn_mtu = fwa->fwa_mtu;
1652 pkt->dn_unfragpartlen = fwa->fwa_unfragpartlen;
1653 if (fwa->fwa_exthdrs) {
1654 bcopy(fwa->fwa_exthdrs, &pkt->dn_exthdrs, sizeof(pkt->dn_exthdrs));
1655 /*
1656 * Need to zero out the source structure so the mbufs
1657 * won't be freed by ip6_output()
1658 */
1659 bzero(fwa->fwa_exthdrs, sizeof(struct ip6_exthdrs));
1660 }
1661 }
1662 if (dir == DN_TO_IP_OUT || dir == DN_TO_IP6_OUT) {
1663 pkt->dn_flags = fwa->fwa_oflags;
1664 if (fwa->fwa_ipoa != NULL) {
1665 pkt->dn_ipoa = *(fwa->fwa_ipoa);
1666 }
1667 }
1668 if (q->head == NULL) {
1669 q->head = m;
1670 } else {
1671 q->tail->m_nextpkt = m;
1672 }
1673 q->tail = m;
1674 q->len++;
1675 q->len_bytes += len;
1676
1677 if (q->head != m) { /* flow was not idle, we are done */
1678 goto done;
1679 }
1680 /*
1681 * If we reach this point the flow was previously idle, so we need
1682 * to schedule it. This involves different actions for fixed-rate or
1683 * WF2Q queues.
1684 */
1685 if (is_pipe) {
1686 /*
1687 * Fixed-rate queue: just insert into the ready_heap.
1688 */
1689 dn_key t = 0;
1690 if (pipe->bandwidth) {
1691 t = SET_TICKS(m, q, pipe);
1692 }
1693 q->sched_time = curr_time;
1694 if (t == 0) { /* must process it now */
1695 ready_event( q, &head, &tail );
1696 } else {
1697 heap_insert(&ready_heap, curr_time + t, q );
1698 }
1699 } else {
1700 /*
1701 * WF2Q. First, compute start time S: if the flow was idle (S=F+1)
1702 * set S to the virtual time V for the controlling pipe, and update
1703 * the sum of weights for the pipe; otherwise, remove flow from
1704 * idle_heap and set S to max(F,V).
1705 * Second, compute finish time F = S + len/weight.
1706 * Third, if pipe was idle, update V=max(S, V).
1707 * Fourth, count one more backlogged flow.
1708 */
1709 if (DN_KEY_GT(q->S, q->F)) { /* means timestamps are invalid */
1710 q->S = pipe->V;
1711 pipe->sum += fs->weight; /* add weight of new queue */
1712 } else {
1713 heap_extract(&(pipe->idle_heap), q);
1714 q->S = MAX64(q->F, pipe->V );
1715 }
1716 q->F = q->S + (len << MY_M) / (u_int64_t) fs->weight;
1717
1718 if (pipe->not_eligible_heap.elements == 0 &&
1719 pipe->scheduler_heap.elements == 0) {
1720 pipe->V = MAX64( q->S, pipe->V );
1721 }
1722 fs->backlogged++;
1723 /*
1724 * Look at eligibility. A flow is not eligibile if S>V (when
1725 * this happens, it means that there is some other flow already
1726 * scheduled for the same pipe, so the scheduler_heap cannot be
1727 * empty). If the flow is not eligible we just store it in the
1728 * not_eligible_heap. Otherwise, we store in the scheduler_heap
1729 * and possibly invoke ready_event_wfq() right now if there is
1730 * leftover credit.
1731 * Note that for all flows in scheduler_heap (SCH), S_i <= V,
1732 * and for all flows in not_eligible_heap (NEH), S_i > V .
1733 * So when we need to compute max( V, min(S_i) ) forall i in SCH+NEH,
1734 * we only need to look into NEH.
1735 */
1736 if (DN_KEY_GT(q->S, pipe->V)) { /* not eligible */
1737 if (pipe->scheduler_heap.elements == 0) {
1738 printf("dummynet: ++ ouch! not eligible but empty scheduler!\n");
1739 }
1740 heap_insert(&(pipe->not_eligible_heap), q->S, q);
1741 } else {
1742 heap_insert(&(pipe->scheduler_heap), q->F, q);
1743 if (pipe->numbytes >= 0) { /* pipe is idle */
1744 if (pipe->scheduler_heap.elements != 1) {
1745 printf("dummynet: OUCH! pipe should have been idle!\n");
1746 }
1747 DPRINTF(("dummynet: waking up pipe %d at %d\n",
1748 pipe->pipe_nr, (int)(q->F >> MY_M)));
1749 pipe->sched_time = curr_time;
1750 ready_event_wfq(pipe, &head, &tail);
1751 }
1752 }
1753 }
1754 done:
1755 /* start the timer and set global if not already set */
1756 if (!timer_enabled) {
1757 ts.tv_sec = 0;
1758 ts.tv_nsec = 1 * 1000000; // 1ms
1759 timer_enabled = 1;
1760 bsd_timeout(dummynet, NULL, &ts);
1761 }
1762
1763 lck_mtx_unlock(&dn_mutex);
1764
1765 if (head != NULL) {
1766 dummynet_send(head);
1767 }
1768
1769 return 0;
1770
1771 dropit:
1772 if (q) {
1773 q->drops++;
1774 }
1775 lck_mtx_unlock(&dn_mutex);
1776 m_freem(m);
1777 return (fs && (fs->flags_fs & DN_NOERROR)) ? 0 : ENOBUFS;
1778 }
1779
1780 /*
1781 * Below, the ROUTE_RELEASE is only needed when (pkt->dn_dir == DN_TO_IP_OUT)
1782 * Doing this would probably save us the initial bzero of dn_pkt
1783 */
1784 #define DN_FREE_PKT(_m) do { \
1785 struct m_tag *tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_DUMMYNET); \
1786 if (tag) { \
1787 struct dn_pkt_tag *n = (struct dn_pkt_tag *)(tag->m_tag_data); \
1788 ROUTE_RELEASE(&n->dn_ro); \
1789 } \
1790 m_tag_delete(_m, tag); \
1791 m_freem(_m); \
1792 } while (0)
1793
1794 /*
1795 * Dispose all packets and flow_queues on a flow_set.
1796 * If all=1, also remove red lookup table and other storage,
1797 * including the descriptor itself.
1798 * For the one in dn_pipe MUST also cleanup ready_heap...
1799 */
1800 static void
purge_flow_set(struct dn_flow_set * fs,int all)1801 purge_flow_set(struct dn_flow_set *fs, int all)
1802 {
1803 struct dn_flow_queue *q, *qn;
1804 int i;
1805
1806 LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
1807
1808 for (i = 0; i <= fs->rq_size; i++) {
1809 for (q = fs->rq[i]; q; q = qn) {
1810 struct mbuf *m, *mnext;
1811
1812 mnext = q->head;
1813 while ((m = mnext) != NULL) {
1814 mnext = m->m_nextpkt;
1815 DN_FREE_PKT(m);
1816 }
1817 qn = q->next;
1818 kfree_type(struct dn_flow_queue, q);
1819 }
1820 fs->rq[i] = NULL;
1821 }
1822 fs->rq_elements = 0;
1823 if (all) {
1824 /* RED - free lookup table */
1825 if (fs->w_q_lookup) {
1826 kfree_data(fs->w_q_lookup, fs->lookup_depth * sizeof(int));
1827 }
1828 kfree_type(struct dn_flow_queue *, fs->rq_size + 1, fs->rq);
1829 /* if this fs is not part of a pipe, free it */
1830 if (fs->pipe && fs != &(fs->pipe->fs)) {
1831 kfree_type(struct dn_flow_set, fs);
1832 }
1833 }
1834 }
1835
1836 /*
1837 * Dispose all packets queued on a pipe (not a flow_set).
1838 * Also free all resources associated to a pipe, which is about
1839 * to be deleted.
1840 */
1841 static void
purge_pipe(struct dn_pipe * pipe)1842 purge_pipe(struct dn_pipe *pipe)
1843 {
1844 struct mbuf *m, *mnext;
1845
1846 purge_flow_set( &(pipe->fs), 1 );
1847
1848 mnext = pipe->head;
1849 while ((m = mnext) != NULL) {
1850 mnext = m->m_nextpkt;
1851 DN_FREE_PKT(m);
1852 }
1853
1854 heap_free( &(pipe->scheduler_heap));
1855 heap_free( &(pipe->not_eligible_heap));
1856 heap_free( &(pipe->idle_heap));
1857 }
1858
1859 /*
1860 * Delete all pipes and heaps returning memory.
1861 */
1862 static void
dummynet_flush(void)1863 dummynet_flush(void)
1864 {
1865 struct dn_pipe *pipe, *pipe1;
1866 struct dn_flow_set *fs, *fs1;
1867 int i;
1868
1869 lck_mtx_lock(&dn_mutex);
1870
1871
1872 /* Free heaps so we don't have unwanted events. */
1873 heap_free(&ready_heap);
1874 heap_free(&wfq_ready_heap);
1875 heap_free(&extract_heap);
1876
1877 /*
1878 * Now purge all queued pkts and delete all pipes.
1879 *
1880 * XXXGL: can we merge the for(;;) cycles into one or not?
1881 */
1882 for (i = 0; i < HASHSIZE; i++) {
1883 SLIST_FOREACH_SAFE(fs, &flowsethash[i], next, fs1) {
1884 SLIST_REMOVE(&flowsethash[i], fs, dn_flow_set, next);
1885 purge_flow_set(fs, 1);
1886 }
1887 }
1888 for (i = 0; i < HASHSIZE; i++) {
1889 SLIST_FOREACH_SAFE(pipe, &pipehash[i], next, pipe1) {
1890 SLIST_REMOVE(&pipehash[i], pipe, dn_pipe, next);
1891 purge_pipe(pipe);
1892 kfree_type(struct dn_pipe, pipe);
1893 }
1894 }
1895 lck_mtx_unlock(&dn_mutex);
1896 }
1897
1898 /*
1899 * setup RED parameters
1900 */
1901 static int
config_red(struct dn_flow_set * p,struct dn_flow_set * x)1902 config_red(struct dn_flow_set *p, struct dn_flow_set * x)
1903 {
1904 int i;
1905
1906 x->w_q = p->w_q;
1907 x->min_th = SCALE(p->min_th);
1908 x->max_th = SCALE(p->max_th);
1909 x->max_p = p->max_p;
1910
1911 x->c_1 = p->max_p / (p->max_th - p->min_th);
1912 x->c_2 = SCALE_MUL(x->c_1, SCALE(p->min_th));
1913 if (x->flags_fs & DN_IS_GENTLE_RED) {
1914 x->c_3 = (SCALE(1) - p->max_p) / p->max_th;
1915 x->c_4 = (SCALE(1) - 2 * p->max_p);
1916 }
1917
1918 /* if the lookup table already exist, free and create it again */
1919 if (x->w_q_lookup) {
1920 kfree_data(x->w_q_lookup, x->lookup_depth * sizeof(int));
1921 x->w_q_lookup = NULL;
1922 }
1923 if (red_lookup_depth == 0) {
1924 printf("\ndummynet: net.inet.ip.dummynet.red_lookup_depth must be > 0\n");
1925 return EINVAL;
1926 }
1927 x->lookup_depth = red_lookup_depth;
1928 x->w_q_lookup = (u_int *) kalloc_data(x->lookup_depth * sizeof(int),
1929 Z_NOWAIT);
1930 if (x->w_q_lookup == NULL) {
1931 printf("dummynet: sorry, cannot allocate red lookup table\n");
1932 return ENOSPC;
1933 }
1934
1935 /* fill the lookup table with (1 - w_q)^x */
1936 x->lookup_step = p->lookup_step;
1937 x->lookup_weight = p->lookup_weight;
1938 x->w_q_lookup[0] = SCALE(1) - x->w_q;
1939 for (i = 1; i < x->lookup_depth; i++) {
1940 x->w_q_lookup[i] = SCALE_MUL(x->w_q_lookup[i - 1], x->lookup_weight);
1941 }
1942 if (red_avg_pkt_size < 1) {
1943 red_avg_pkt_size = 512;
1944 }
1945 x->avg_pkt_size = red_avg_pkt_size;
1946 if (red_max_pkt_size < 1) {
1947 red_max_pkt_size = 1500;
1948 }
1949 x->max_pkt_size = red_max_pkt_size;
1950 return 0;
1951 }
1952
1953 static int
alloc_hash(struct dn_flow_set * x,struct dn_flow_set * pfs)1954 alloc_hash(struct dn_flow_set *x, struct dn_flow_set *pfs)
1955 {
1956 if (x->flags_fs & DN_HAVE_FLOW_MASK) { /* allocate some slots */
1957 int l = pfs->rq_size;
1958
1959 if (l == 0) {
1960 l = dn_hash_size;
1961 }
1962 if (l < 4) {
1963 l = 4;
1964 } else if (l > DN_MAX_HASH_SIZE) {
1965 l = DN_MAX_HASH_SIZE;
1966 }
1967 x->rq_size = l;
1968 } else { /* one is enough for null mask */
1969 x->rq_size = 1;
1970 }
1971 x->rq = kalloc_type(struct dn_flow_queue *, x->rq_size + 1,
1972 Z_NOWAIT | Z_ZERO);
1973 if (x->rq == NULL) {
1974 printf("dummynet: sorry, cannot allocate queue\n");
1975 return ENOSPC;
1976 }
1977 x->rq_elements = 0;
1978 return 0;
1979 }
1980
1981 static int
set_fs_parms(struct dn_flow_set * x,struct dn_flow_set * src)1982 set_fs_parms(struct dn_flow_set *x, struct dn_flow_set *src)
1983 {
1984 x->flags_fs = src->flags_fs;
1985 x->qsize = src->qsize;
1986 x->plr = src->plr;
1987 x->flow_mask = src->flow_mask;
1988 if (x->flags_fs & DN_QSIZE_IS_BYTES) {
1989 if (x->qsize > 1024 * 1024) {
1990 x->qsize = 1024 * 1024;
1991 }
1992 } else {
1993 if (x->qsize == 0) {
1994 x->qsize = 50;
1995 }
1996 if (x->qsize > 100) {
1997 x->qsize = 50;
1998 }
1999 }
2000 /* configuring RED */
2001 if (x->flags_fs & DN_IS_RED) {
2002 return config_red(src, x); /* XXX should check errors */
2003 }
2004 return 0;
2005 }
2006
2007 /*
2008 * setup pipe or queue parameters.
2009 */
2010 static int
config_pipe(struct dn_pipe * p)2011 config_pipe(struct dn_pipe *p)
2012 {
2013 int i, r;
2014 struct dn_flow_set *pfs = &(p->fs);
2015 struct dn_flow_queue *q;
2016 bool is_new = false;
2017
2018 /*
2019 * The config program passes parameters as follows:
2020 * bw = bits/second (0 means no limits),
2021 * delay = ms, must be translated into ticks.
2022 * qsize = slots/bytes
2023 */
2024 p->delay = (p->delay * (hz * 10)) / 1000;
2025 /* We need either a pipe number or a flow_set number */
2026 if (p->pipe_nr == 0 && pfs->fs_nr == 0) {
2027 return EINVAL;
2028 }
2029 if (p->pipe_nr != 0 && pfs->fs_nr != 0) {
2030 return EINVAL;
2031 }
2032 if (p->pipe_nr != 0) { /* this is a pipe */
2033 struct dn_pipe *x, *b;
2034 struct dummynet_event dn_event;
2035 lck_mtx_lock(&dn_mutex);
2036
2037 /* locate pipe */
2038 b = locate_pipe(p->pipe_nr);
2039
2040 if (b == NULL || b->pipe_nr != p->pipe_nr) { /* new pipe */
2041 is_new = true;
2042 x = kalloc_type(struct dn_pipe, Z_NOWAIT | Z_ZERO);
2043 if (x == NULL) {
2044 lck_mtx_unlock(&dn_mutex);
2045 printf("dummynet: no memory for new pipe\n");
2046 return ENOSPC;
2047 }
2048 x->pipe_nr = p->pipe_nr;
2049 x->fs.pipe = x;
2050 /* idle_heap is the only one from which we extract from the middle.
2051 */
2052 x->idle_heap.size = x->idle_heap.elements = 0;
2053 x->idle_heap.offset = offsetof(struct dn_flow_queue, heap_pos);
2054 } else {
2055 x = b;
2056 /* Flush accumulated credit for all queues */
2057 for (i = 0; i <= x->fs.rq_size; i++) {
2058 for (q = x->fs.rq[i]; q; q = q->next) {
2059 q->numbytes = 0;
2060 }
2061 }
2062 }
2063
2064 x->bandwidth = p->bandwidth;
2065 x->numbytes = 0; /* just in case... */
2066 bcopy(p->if_name, x->if_name, sizeof(p->if_name));
2067 x->ifp = NULL; /* reset interface ptr */
2068 x->delay = p->delay;
2069 r = set_fs_parms(&(x->fs), pfs);
2070 if (r != 0) {
2071 lck_mtx_unlock(&dn_mutex);
2072 if (is_new) { /* a new pipe */
2073 kfree_type(struct dn_pipe, x);
2074 }
2075 return r;
2076 }
2077
2078 if (x->fs.rq == NULL) { /* a new pipe */
2079 r = alloc_hash(&(x->fs), pfs);
2080 if (r) {
2081 lck_mtx_unlock(&dn_mutex);
2082 if (is_new) {
2083 kfree_type(struct dn_pipe, x);
2084 }
2085 return r;
2086 }
2087 SLIST_INSERT_HEAD(&pipehash[HASH(x->pipe_nr)],
2088 x, next);
2089 }
2090 lck_mtx_unlock(&dn_mutex);
2091
2092 bzero(&dn_event, sizeof(dn_event));
2093 dn_event.dn_event_code = DUMMYNET_PIPE_CONFIG;
2094 dn_event.dn_event_pipe_config.bandwidth = p->bandwidth;
2095 dn_event.dn_event_pipe_config.delay = p->delay;
2096 dn_event.dn_event_pipe_config.plr = pfs->plr;
2097
2098 dummynet_event_enqueue_nwk_wq_entry(&dn_event);
2099 } else { /* config queue */
2100 struct dn_flow_set *x, *b;
2101
2102 lck_mtx_lock(&dn_mutex);
2103 /* locate flow_set */
2104 b = locate_flowset(pfs->fs_nr);
2105
2106 if (b == NULL || b->fs_nr != pfs->fs_nr) { /* new */
2107 is_new = true;
2108 if (pfs->parent_nr == 0) { /* need link to a pipe */
2109 lck_mtx_unlock(&dn_mutex);
2110 return EINVAL;
2111 }
2112 x = kalloc_type(struct dn_flow_set, Z_NOWAIT | Z_ZERO);
2113 if (x == NULL) {
2114 lck_mtx_unlock(&dn_mutex);
2115 printf("dummynet: no memory for new flow_set\n");
2116 return ENOSPC;
2117 }
2118 x->fs_nr = pfs->fs_nr;
2119 x->parent_nr = pfs->parent_nr;
2120 x->weight = pfs->weight;
2121 if (x->weight == 0) {
2122 x->weight = 1;
2123 } else if (x->weight > 100) {
2124 x->weight = 100;
2125 }
2126 } else {
2127 /* Change parent pipe not allowed; must delete and recreate */
2128 if (pfs->parent_nr != 0 && b->parent_nr != pfs->parent_nr) {
2129 lck_mtx_unlock(&dn_mutex);
2130 return EINVAL;
2131 }
2132 x = b;
2133 }
2134 r = set_fs_parms(x, pfs);
2135 if (r != 0) {
2136 lck_mtx_unlock(&dn_mutex);
2137 printf("dummynet: no memory for new flow_set\n");
2138 if (is_new) {
2139 kfree_type(struct dn_flow_set, x);
2140 }
2141 return r;
2142 }
2143
2144 if (x->rq == NULL) { /* a new flow_set */
2145 r = alloc_hash(x, pfs);
2146 if (r) {
2147 lck_mtx_unlock(&dn_mutex);
2148 kfree_type(struct dn_flow_set, x);
2149 return r;
2150 }
2151 SLIST_INSERT_HEAD(&flowsethash[HASH(x->fs_nr)],
2152 x, next);
2153 }
2154 lck_mtx_unlock(&dn_mutex);
2155 }
2156 return 0;
2157 }
2158
2159 /*
2160 * Helper function to remove from a heap queues which are linked to
2161 * a flow_set about to be deleted.
2162 */
2163 static void
fs_remove_from_heap(struct dn_heap * h,struct dn_flow_set * fs)2164 fs_remove_from_heap(struct dn_heap *h, struct dn_flow_set *fs)
2165 {
2166 int i = 0, found = 0;
2167 for (; i < h->elements;) {
2168 if (((struct dn_flow_queue *)h->p[i].object)->fs == fs) {
2169 h->elements--;
2170 h->p[i] = h->p[h->elements];
2171 found++;
2172 } else {
2173 i++;
2174 }
2175 }
2176 if (found) {
2177 heapify(h);
2178 }
2179 }
2180
2181 /*
2182 * helper function to remove a pipe from a heap (can be there at most once)
2183 */
2184 static void
pipe_remove_from_heap(struct dn_heap * h,struct dn_pipe * p)2185 pipe_remove_from_heap(struct dn_heap *h, struct dn_pipe *p)
2186 {
2187 if (h->elements > 0) {
2188 int i = 0;
2189 for (i = 0; i < h->elements; i++) {
2190 if (h->p[i].object == p) { /* found it */
2191 h->elements--;
2192 h->p[i] = h->p[h->elements];
2193 heapify(h);
2194 break;
2195 }
2196 }
2197 }
2198 }
2199
2200 /*
2201 * drain all queues. Called in case of severe mbuf shortage.
2202 */
2203 void
dummynet_drain(void)2204 dummynet_drain(void)
2205 {
2206 struct dn_flow_set *fs;
2207 struct dn_pipe *p;
2208 struct mbuf *m, *mnext;
2209 int i;
2210
2211 LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
2212
2213 heap_free(&ready_heap);
2214 heap_free(&wfq_ready_heap);
2215 heap_free(&extract_heap);
2216 /* remove all references to this pipe from flow_sets */
2217 for (i = 0; i < HASHSIZE; i++) {
2218 SLIST_FOREACH(fs, &flowsethash[i], next) {
2219 purge_flow_set(fs, 0);
2220 }
2221 }
2222
2223 for (i = 0; i < HASHSIZE; i++) {
2224 SLIST_FOREACH(p, &pipehash[i], next) {
2225 purge_flow_set(&(p->fs), 0);
2226
2227 mnext = p->head;
2228 while ((m = mnext) != NULL) {
2229 mnext = m->m_nextpkt;
2230 DN_FREE_PKT(m);
2231 }
2232 p->head = p->tail = NULL;
2233 }
2234 }
2235 }
2236
2237 /*
2238 * Fully delete a pipe or a queue, cleaning up associated info.
2239 */
2240 static int
delete_pipe(struct dn_pipe * p)2241 delete_pipe(struct dn_pipe *p)
2242 {
2243 if (p->pipe_nr == 0 && p->fs.fs_nr == 0) {
2244 return EINVAL;
2245 }
2246 if (p->pipe_nr != 0 && p->fs.fs_nr != 0) {
2247 return EINVAL;
2248 }
2249 if (p->pipe_nr != 0) { /* this is an old-style pipe */
2250 struct dn_pipe *b;
2251 struct dn_flow_set *fs;
2252 int i;
2253
2254 lck_mtx_lock(&dn_mutex);
2255 /* locate pipe */
2256 b = locate_pipe(p->pipe_nr);
2257 if (b == NULL) {
2258 lck_mtx_unlock(&dn_mutex);
2259 return EINVAL; /* not found */
2260 }
2261
2262 /* Unlink from list of pipes. */
2263 SLIST_REMOVE(&pipehash[HASH(b->pipe_nr)], b, dn_pipe, next);
2264
2265
2266 /* Remove all references to this pipe from flow_sets. */
2267 for (i = 0; i < HASHSIZE; i++) {
2268 SLIST_FOREACH(fs, &flowsethash[i], next) {
2269 if (fs->pipe == b) {
2270 printf("dummynet: ++ ref to pipe %d from fs %d\n",
2271 p->pipe_nr, fs->fs_nr);
2272 fs->pipe = NULL;
2273 purge_flow_set(fs, 0);
2274 }
2275 }
2276 }
2277 fs_remove_from_heap(&ready_heap, &(b->fs));
2278
2279 purge_pipe(b); /* remove all data associated to this pipe */
2280 /* remove reference to here from extract_heap and wfq_ready_heap */
2281 pipe_remove_from_heap(&extract_heap, b);
2282 pipe_remove_from_heap(&wfq_ready_heap, b);
2283 lck_mtx_unlock(&dn_mutex);
2284
2285 kfree_type(struct dn_pipe, b);
2286 } else { /* this is a WF2Q queue (dn_flow_set) */
2287 struct dn_flow_set *b;
2288
2289 lck_mtx_lock(&dn_mutex);
2290 /* locate set */
2291 b = locate_flowset(p->fs.fs_nr);
2292 if (b == NULL) {
2293 lck_mtx_unlock(&dn_mutex);
2294 return EINVAL; /* not found */
2295 }
2296
2297
2298 /* Unlink from list of flowsets. */
2299 SLIST_REMOVE( &flowsethash[HASH(b->fs_nr)], b, dn_flow_set, next);
2300
2301 if (b->pipe != NULL) {
2302 /* Update total weight on parent pipe and cleanup parent heaps */
2303 b->pipe->sum -= b->weight * b->backlogged;
2304 fs_remove_from_heap(&(b->pipe->not_eligible_heap), b);
2305 fs_remove_from_heap(&(b->pipe->scheduler_heap), b);
2306 #if 1 /* XXX should i remove from idle_heap as well ? */
2307 fs_remove_from_heap(&(b->pipe->idle_heap), b);
2308 #endif
2309 }
2310 purge_flow_set(b, 1);
2311 lck_mtx_unlock(&dn_mutex);
2312 }
2313 return 0;
2314 }
2315
2316 /*
2317 * helper function used to copy data from kernel in DUMMYNET_GET
2318 */
2319 static
2320 char*
dn_copy_set_32(struct dn_flow_set * set,char * bp)2321 dn_copy_set_32(struct dn_flow_set *set, char *bp)
2322 {
2323 int i, copied = 0;
2324 struct dn_flow_queue *q;
2325 struct dn_flow_queue_32 *qp = (struct dn_flow_queue_32 *)(void *)bp;
2326
2327 LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
2328
2329 for (i = 0; i <= set->rq_size; i++) {
2330 for (q = set->rq[i]; q; q = q->next, qp++) {
2331 if (q->hash_slot != i) {
2332 printf("dummynet: ++ at %d: wrong slot (have %d, "
2333 "should be %d)\n", copied, q->hash_slot, i);
2334 }
2335 if (q->fs != set) {
2336 printf("dummynet: ++ at %d: wrong fs ptr "
2337 "(have 0x%llx, should be 0x%llx)\n", i,
2338 (uint64_t)VM_KERNEL_ADDRPERM(q->fs),
2339 (uint64_t)VM_KERNEL_ADDRPERM(set));
2340 }
2341 copied++;
2342 cp_queue_to_32_user( q, qp );
2343 /* cleanup pointers */
2344 qp->next = (user32_addr_t)0;
2345 qp->head = qp->tail = (user32_addr_t)0;
2346 qp->fs = (user32_addr_t)0;
2347 }
2348 }
2349 if (copied != set->rq_elements) {
2350 printf("dummynet: ++ wrong count, have %d should be %d\n",
2351 copied, set->rq_elements);
2352 }
2353 return (char *)qp;
2354 }
2355
2356 static
2357 char*
dn_copy_set_64(struct dn_flow_set * set,char * bp)2358 dn_copy_set_64(struct dn_flow_set *set, char *bp)
2359 {
2360 int i, copied = 0;
2361 struct dn_flow_queue *q;
2362 struct dn_flow_queue_64 *qp = (struct dn_flow_queue_64 *)(void *)bp;
2363
2364 LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
2365
2366 for (i = 0; i <= set->rq_size; i++) {
2367 for (q = set->rq[i]; q; q = q->next, qp++) {
2368 if (q->hash_slot != i) {
2369 printf("dummynet: ++ at %d: wrong slot (have %d, "
2370 "should be %d)\n", copied, q->hash_slot, i);
2371 }
2372 if (q->fs != set) {
2373 printf("dummynet: ++ at %d: wrong fs ptr "
2374 "(have 0x%llx, should be 0x%llx)\n", i,
2375 (uint64_t)VM_KERNEL_ADDRPERM(q->fs),
2376 (uint64_t)VM_KERNEL_ADDRPERM(set));
2377 }
2378 copied++;
2379 //bcopy(q, qp, sizeof(*q));
2380 cp_queue_to_64_user( q, qp );
2381 /* cleanup pointers */
2382 qp->next = USER_ADDR_NULL;
2383 qp->head = qp->tail = USER_ADDR_NULL;
2384 qp->fs = USER_ADDR_NULL;
2385 }
2386 }
2387 if (copied != set->rq_elements) {
2388 printf("dummynet: ++ wrong count, have %d should be %d\n",
2389 copied, set->rq_elements);
2390 }
2391 return (char *)qp;
2392 }
2393
2394 static size_t
dn_calc_size(int is64user)2395 dn_calc_size(int is64user)
2396 {
2397 struct dn_flow_set *set;
2398 struct dn_pipe *p;
2399 size_t size = 0;
2400 size_t pipesize;
2401 size_t queuesize;
2402 size_t setsize;
2403 int i;
2404
2405 LCK_MTX_ASSERT(&dn_mutex, LCK_MTX_ASSERT_OWNED);
2406 if (is64user) {
2407 pipesize = sizeof(struct dn_pipe_64);
2408 queuesize = sizeof(struct dn_flow_queue_64);
2409 setsize = sizeof(struct dn_flow_set_64);
2410 } else {
2411 pipesize = sizeof(struct dn_pipe_32);
2412 queuesize = sizeof(struct dn_flow_queue_32);
2413 setsize = sizeof(struct dn_flow_set_32);
2414 }
2415 /*
2416 * compute size of data structures: list of pipes and flow_sets.
2417 */
2418 for (i = 0; i < HASHSIZE; i++) {
2419 SLIST_FOREACH(p, &pipehash[i], next) {
2420 size += sizeof(*p) +
2421 p->fs.rq_elements * sizeof(struct dn_flow_queue);
2422 }
2423 SLIST_FOREACH(set, &flowsethash[i], next) {
2424 size += sizeof(*set) +
2425 set->rq_elements * sizeof(struct dn_flow_queue);
2426 }
2427 }
2428 return size;
2429 }
2430
2431 static int
dummynet_get(struct sockopt * sopt)2432 dummynet_get(struct sockopt *sopt)
2433 {
2434 char *buf = NULL, *bp = NULL; /* bp is the "copy-pointer" */
2435 size_t size = 0;
2436 struct dn_flow_set *set;
2437 struct dn_pipe *p;
2438 int error = 0, i;
2439 int is64user = 0;
2440
2441 /* XXX lock held too long */
2442 lck_mtx_lock(&dn_mutex);
2443 /*
2444 * XXX: Ugly, but we need to allocate memory with M_WAITOK flag
2445 * and we cannot use this flag while holding a mutex.
2446 */
2447 if (proc_is64bit(sopt->sopt_p)) {
2448 is64user = 1;
2449 }
2450 for (i = 0; i < 10; i++) {
2451 size = dn_calc_size(is64user);
2452 lck_mtx_unlock(&dn_mutex);
2453 buf = kalloc_data(size, Z_WAITOK | Z_ZERO);
2454 if (buf == NULL) {
2455 return ENOBUFS;
2456 }
2457 lck_mtx_lock(&dn_mutex);
2458 if (size == dn_calc_size(is64user)) {
2459 break;
2460 }
2461 kfree_data(buf, size);
2462 buf = NULL;
2463 }
2464 if (buf == NULL) {
2465 lck_mtx_unlock(&dn_mutex);
2466 return ENOBUFS;
2467 }
2468
2469 bp = buf;
2470 for (i = 0; i < HASHSIZE; i++) {
2471 SLIST_FOREACH(p, &pipehash[i], next) {
2472 /*
2473 * copy pipe descriptor into *bp, convert delay
2474 * back to ms, then copy the flow_set descriptor(s)
2475 * one at a time. After each flow_set, copy the
2476 * queue descriptor it owns.
2477 */
2478 if (is64user) {
2479 bp = cp_pipe_to_64_user(p,
2480 (struct dn_pipe_64 *)(void *)bp);
2481 } else {
2482 bp = cp_pipe_to_32_user(p,
2483 (struct dn_pipe_32 *)(void *)bp);
2484 }
2485 }
2486 }
2487 for (i = 0; i < HASHSIZE; i++) {
2488 SLIST_FOREACH(set, &flowsethash[i], next) {
2489 struct dn_flow_set_64 *fs_bp =
2490 (struct dn_flow_set_64 *)(void *)bp;
2491 cp_flow_set_to_64_user(set, fs_bp);
2492 /* XXX same hack as above */
2493 fs_bp->next = CAST_DOWN(user64_addr_t,
2494 DN_IS_QUEUE);
2495 fs_bp->pipe = USER_ADDR_NULL;
2496 fs_bp->rq = USER_ADDR_NULL;
2497 bp += sizeof(struct dn_flow_set_64);
2498 bp = dn_copy_set_64( set, bp );
2499 }
2500 }
2501 lck_mtx_unlock(&dn_mutex);
2502 error = sooptcopyout(sopt, buf, size);
2503 kfree_data(buf, size);
2504 return error;
2505 }
2506
2507 /*
2508 * Handler for the various dummynet socket options (get, flush, config, del)
2509 */
2510 static int
ip_dn_ctl(struct sockopt * sopt)2511 ip_dn_ctl(struct sockopt *sopt)
2512 {
2513 int error = 0;
2514 struct dn_pipe *p, tmp_pipe;
2515
2516 /* Disallow sets in really-really secure mode. */
2517 if (sopt->sopt_dir == SOPT_SET && securelevel >= 3) {
2518 return EPERM;
2519 }
2520
2521 switch (sopt->sopt_name) {
2522 default:
2523 printf("dummynet: -- unknown option %d", sopt->sopt_name);
2524 return EINVAL;
2525
2526 case IP_DUMMYNET_GET:
2527 error = dummynet_get(sopt);
2528 break;
2529
2530 case IP_DUMMYNET_FLUSH:
2531 dummynet_flush();
2532 break;
2533
2534 case IP_DUMMYNET_CONFIGURE:
2535 p = &tmp_pipe;
2536 if (proc_is64bit(sopt->sopt_p)) {
2537 error = cp_pipe_from_user_64( sopt, p );
2538 } else {
2539 error = cp_pipe_from_user_32( sopt, p );
2540 }
2541
2542 if (error) {
2543 break;
2544 }
2545 error = config_pipe(p);
2546 break;
2547
2548 case IP_DUMMYNET_DEL: /* remove a pipe or queue */
2549 p = &tmp_pipe;
2550 if (proc_is64bit(sopt->sopt_p)) {
2551 error = cp_pipe_from_user_64( sopt, p );
2552 } else {
2553 error = cp_pipe_from_user_32( sopt, p );
2554 }
2555 if (error) {
2556 break;
2557 }
2558
2559 error = delete_pipe(p);
2560 break;
2561 }
2562 return error;
2563 }
2564
2565 void
dummynet_init(void)2566 dummynet_init(void)
2567 {
2568 eventhandler_lists_ctxt_init(&dummynet_evhdlr_ctxt);
2569 }
2570
2571 void
ip_dn_init(void)2572 ip_dn_init(void)
2573 {
2574 /* setup locks */
2575 ready_heap.size = ready_heap.elements = 0;
2576 ready_heap.offset = 0;
2577
2578 wfq_ready_heap.size = wfq_ready_heap.elements = 0;
2579 wfq_ready_heap.offset = 0;
2580
2581 extract_heap.size = extract_heap.elements = 0;
2582 extract_heap.offset = 0;
2583 ip_dn_ctl_ptr = ip_dn_ctl;
2584 ip_dn_io_ptr = dummynet_io;
2585 }
2586
2587 struct dn_event_nwk_wq_entry {
2588 struct nwk_wq_entry nwk_wqe;
2589 struct dummynet_event dn_ev_arg;
2590 };
2591
2592 static void
dummynet_event_callback(struct nwk_wq_entry * nwk_item)2593 dummynet_event_callback(struct nwk_wq_entry *nwk_item)
2594 {
2595 struct dn_event_nwk_wq_entry *p_ev;
2596
2597 p_ev = __container_of(nwk_item, struct dn_event_nwk_wq_entry, nwk_wqe);
2598
2599 EVENTHANDLER_INVOKE(&dummynet_evhdlr_ctxt, dummynet_event, &p_ev->dn_ev_arg);
2600
2601 kfree_type(struct dn_event_nwk_wq_entry, p_ev);
2602 }
2603
2604 void
dummynet_event_enqueue_nwk_wq_entry(struct dummynet_event * p_dn_event)2605 dummynet_event_enqueue_nwk_wq_entry(struct dummynet_event *p_dn_event)
2606 {
2607 struct dn_event_nwk_wq_entry *p_ev = NULL;
2608
2609 p_ev = kalloc_type(struct dn_event_nwk_wq_entry,
2610 Z_WAITOK | Z_ZERO | Z_NOFAIL);
2611 p_ev->nwk_wqe.func = dummynet_event_callback;
2612 p_ev->dn_ev_arg = *p_dn_event;
2613 nwk_wq_enqueue(&p_ev->nwk_wqe);
2614 }
2615
2616 struct dummynet_tag_container {
2617 struct m_tag dtc_m_tag;
2618 struct dn_pkt_tag dtc_dn_pkt_tag;
2619 };
2620
2621 struct m_tag *
m_tag_kalloc_dummynet(u_int32_t id,u_int16_t type,uint16_t len,int wait)2622 m_tag_kalloc_dummynet(u_int32_t id, u_int16_t type, uint16_t len, int wait)
2623 {
2624 struct dummynet_tag_container *tag_container;
2625 struct m_tag *tag = NULL;
2626
2627 assert3u(id, ==, KERNEL_MODULE_TAG_ID);
2628 assert3u(type, ==, KERNEL_TAG_TYPE_DUMMYNET);
2629 assert3u(len, ==, sizeof(struct dn_pkt_tag));
2630
2631 if (len != sizeof(struct dn_pkt_tag)) {
2632 return NULL;
2633 }
2634
2635 tag_container = kalloc_type(struct dummynet_tag_container, wait | M_ZERO);
2636 if (tag_container != NULL) {
2637 tag = &tag_container->dtc_m_tag;
2638
2639 assert3p(tag, ==, tag_container);
2640
2641 M_TAG_INIT(tag, id, type, len, &tag_container->dtc_dn_pkt_tag, NULL);
2642 }
2643
2644 return tag;
2645 }
2646
2647 void
m_tag_kfree_dummynet(struct m_tag * tag)2648 m_tag_kfree_dummynet(struct m_tag *tag)
2649 {
2650 struct dummynet_tag_container *tag_container = (struct dummynet_tag_container *)tag;
2651
2652 assert3u(tag->m_tag_len, ==, sizeof(struct dn_pkt_tag));
2653
2654 kfree_type(struct dummynet_tag_container, tag_container);
2655 }
2656
2657 void
dummynet_register_m_tag(void)2658 dummynet_register_m_tag(void)
2659 {
2660 int error;
2661
2662 error = m_register_internal_tag_type(KERNEL_TAG_TYPE_DUMMYNET, sizeof(struct dn_pkt_tag),
2663 m_tag_kalloc_dummynet, m_tag_kfree_dummynet);
2664
2665 assert3u(error, ==, 0);
2666 }
2667