1 /*
2 * Copyright (c) 1998-2022 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1988, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
62 */
63 /*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
69
70 #include <ptrauth.h>
71
72 #include <stdint.h>
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/malloc.h>
76 #include <sys/mbuf.h>
77 #include <sys/kernel.h>
78 #include <sys/sysctl.h>
79 #include <sys/syslog.h>
80 #include <sys/protosw.h>
81 #include <sys/domain.h>
82 #include <sys/queue.h>
83 #include <sys/proc.h>
84 #include <sys/filedesc.h>
85 #include <sys/file_internal.h>
86
87 #include <vm/vm_kern_xnu.h>
88
89 #include <dev/random/randomdev.h>
90
91 #include <kern/kern_types.h>
92 #include <kern/simple_lock.h>
93 #include <kern/queue.h>
94 #include <kern/sched_prim.h>
95 #include <kern/backtrace.h>
96 #include <kern/percpu.h>
97 #include <kern/zalloc.h>
98
99 #include <libkern/OSDebug.h>
100 #include <libkern/libkern.h>
101
102 #include <os/log.h>
103 #include <os/ptrtools.h>
104
105 #include <machine/limits.h>
106 #include <machine/machine_routines.h>
107
108 #if CONFIG_MBUF_MCACHE
109 #include <sys/mcache.h>
110 #include <IOKit/IOMapper.h>
111 #endif /* CONFIG_MBUF_MCACHE */
112
113 #include <net/ntstat.h>
114 #include <net/droptap.h>
115
116 #if INET
117 extern int dump_tcp_reass_qlen(char *, int);
118 extern int tcp_reass_qlen_space(struct socket *);
119 #endif /* INET */
120
121 #if MPTCP
122 extern int dump_mptcp_reass_qlen(char *, int);
123 #endif /* MPTCP */
124
125 #if NETWORKING
126 extern int dlil_dump_top_if_qlen(char *__counted_by(str_len), int str_len);
127 #endif /* NETWORKING */
128
129 #if CONFIG_MBUF_MCACHE
130 /*
131 * MBUF IMPLEMENTATION NOTES.
132 *
133 * There is a total of 5 per-CPU caches:
134 *
135 * MC_MBUF:
136 * This is a cache of rudimentary objects of _MSIZE in size; each
137 * object represents an mbuf structure. This cache preserves only
138 * the m_type field of the mbuf during its transactions.
139 *
140 * MC_CL:
141 * This is a cache of rudimentary objects of MCLBYTES in size; each
142 * object represents a mcluster structure. This cache does not
143 * preserve the contents of the objects during its transactions.
144 *
145 * MC_BIGCL:
146 * This is a cache of rudimentary objects of MBIGCLBYTES in size; each
147 * object represents a mbigcluster structure. This cache does not
148 * preserve the contents of the objects during its transaction.
149 *
150 * MC_MBUF_CL:
151 * This is a cache of mbufs each having a cluster attached to it.
152 * It is backed by MC_MBUF and MC_CL rudimentary caches. Several
153 * fields of the mbuf related to the external cluster are preserved
154 * during transactions.
155 *
156 * MC_MBUF_BIGCL:
157 * This is a cache of mbufs each having a big cluster attached to it.
158 * It is backed by MC_MBUF and MC_BIGCL rudimentary caches. Several
159 * fields of the mbuf related to the external cluster are preserved
160 * during transactions.
161 *
162 * OBJECT ALLOCATION:
163 *
164 * Allocation requests are handled first at the per-CPU (mcache) layer
165 * before falling back to the slab layer. Performance is optimal when
166 * the request is satisfied at the CPU layer because global data/lock
167 * never gets accessed. When the slab layer is entered for allocation,
168 * the slab freelist will be checked first for available objects before
169 * the VM backing store is invoked. Slab layer operations are serialized
170 * for all of the caches as the mbuf global lock is held most of the time.
171 * Allocation paths are different depending on the class of objects:
172 *
173 * a. Rudimentary object:
174 *
175 * { m_get_common(), m_clattach(), m_mclget(),
176 * m_mclalloc(), m_bigalloc(), m_copym_with_hdrs(),
177 * composite object allocation }
178 * | ^
179 * | |
180 * | +-----------------------+
181 * v |
182 * mcache_alloc/mcache_alloc_ext() mbuf_slab_audit()
183 * | ^
184 * v |
185 * [CPU cache] -------> (found?) -------+
186 * | |
187 * v |
188 * mbuf_slab_alloc() |
189 * | |
190 * v |
191 * +---------> [freelist] -------> (found?) -------+
192 * | |
193 * | v
194 * | m_clalloc()
195 * | |
196 * | v
197 * +---<<---- kmem_mb_alloc()
198 *
199 * b. Composite object:
200 *
201 * { m_getpackets_internal(), m_allocpacket_internal() }
202 * | ^
203 * | |
204 * | +------ (done) ---------+
205 * v |
206 * mcache_alloc/mcache_alloc_ext() mbuf_cslab_audit()
207 * | ^
208 * v |
209 * [CPU cache] -------> (found?) -------+
210 * | |
211 * v |
212 * mbuf_cslab_alloc() |
213 * | |
214 * v |
215 * [freelist] -------> (found?) -------+
216 * | |
217 * v |
218 * (rudimentary object) |
219 * mcache_alloc/mcache_alloc_ext() ------>>-----+
220 *
221 * Auditing notes: If auditing is enabled, buffers will be subjected to
222 * integrity checks by the audit routine. This is done by verifying their
223 * contents against DEADBEEF (free) pattern before returning them to caller.
224 * As part of this step, the routine will also record the transaction and
225 * pattern-fill the buffers with BADDCAFE (uninitialized) pattern. It will
226 * also restore any constructed data structure fields if necessary.
227 *
228 * OBJECT DEALLOCATION:
229 *
230 * Freeing an object simply involves placing it into the CPU cache; this
231 * pollutes the cache to benefit subsequent allocations. The slab layer
232 * will only be entered if the object is to be purged out of the cache.
233 * During normal operations, this happens only when the CPU layer resizes
234 * its bucket while it's adjusting to the allocation load. Deallocation
235 * paths are different depending on the class of objects:
236 *
237 * a. Rudimentary object:
238 *
239 * { m_free(), m_freem_list(), composite object deallocation }
240 * | ^
241 * | |
242 * | +------ (done) ---------+
243 * v |
244 * mcache_free/mcache_free_ext() |
245 * | |
246 * v |
247 * mbuf_slab_audit() |
248 * | |
249 * v |
250 * [CPU cache] ---> (not purging?) -----+
251 * | |
252 * v |
253 * mbuf_slab_free() |
254 * | |
255 * v |
256 * [freelist] ----------->>------------+
257 * (objects get purged to VM only on demand)
258 *
259 * b. Composite object:
260 *
261 * { m_free(), m_freem_list() }
262 * | ^
263 * | |
264 * | +------ (done) ---------+
265 * v |
266 * mcache_free/mcache_free_ext() |
267 * | |
268 * v |
269 * mbuf_cslab_audit() |
270 * | |
271 * v |
272 * [CPU cache] ---> (not purging?) -----+
273 * | |
274 * v |
275 * mbuf_cslab_free() |
276 * | |
277 * v |
278 * [freelist] ---> (not purging?) -----+
279 * | |
280 * v |
281 * (rudimentary object) |
282 * mcache_free/mcache_free_ext() ------->>------+
283 *
284 * Auditing notes: If auditing is enabled, the audit routine will save
285 * any constructed data structure fields (if necessary) before filling the
286 * contents of the buffers with DEADBEEF (free) pattern and recording the
287 * transaction. Buffers that are freed (whether at CPU or slab layer) are
288 * expected to contain the free pattern.
289 *
290 * DEBUGGING:
291 *
292 * Debugging can be enabled by adding "mbuf_debug=0x3" to boot-args; this
293 * translates to the mcache flags (MCF_VERIFY | MCF_AUDIT). Additionally,
294 * the CPU layer cache can be disabled by setting the MCF_NOCPUCACHE flag,
295 * i.e. modify the boot argument parameter to "mbuf_debug=0x13". Leak
296 * detection may also be disabled by setting the MCF_NOLEAKLOG flag, e.g.
297 * "mbuf_debug=0x113". Note that debugging consumes more CPU and memory.
298 *
299 * Each object is associated with exactly one mcache_audit_t structure that
300 * contains the information related to its last buffer transaction. Given
301 * an address of an object, the audit structure can be retrieved by finding
302 * the position of the object relevant to the base address of the cluster:
303 *
304 * +------------+ +=============+
305 * | mbuf addr | | mclaudit[i] |
306 * +------------+ +=============+
307 * | | cl_audit[0] |
308 * i = MTOBG(addr) +-------------+
309 * | +-----> | cl_audit[1] | -----> mcache_audit_t
310 * b = BGTOM(i) | +-------------+
311 * | | | ... |
312 * x = MCLIDX(b, addr) | +-------------+
313 * | | | cl_audit[7] |
314 * +-----------------+ +-------------+
315 * (e.g. x == 1)
316 *
317 * The mclaudit[] array is allocated at initialization time, but its contents
318 * get populated when the corresponding cluster is created. Because a page
319 * can be turned into NMBPG number of mbufs, we preserve enough space for the
320 * mbufs so that there is a 1-to-1 mapping between them. A page that never
321 * gets (or has not yet) turned into mbufs will use only cl_audit[0] with the
322 * remaining entries unused. For 16KB cluster, only one entry from the first
323 * page is allocated and used for the entire object.
324 */
325 #else
326 /*
327 * MBUF IMPLEMENTATION NOTES (using zalloc).
328 *
329 * There are a total of 4 zones and 3 zcaches.
330 *
331 * MC_MBUF:
332 * This is a zone of rudimentary objects of _MSIZE in size; each
333 * object represents an mbuf structure. This cache preserves only
334 * the m_type field of the mbuf during its transactions.
335 *
336 * MC_CL:
337 * This is a zone of rudimentary objects of MCLBYTES in size; each
338 * object represents a mcluster structure. This cache does not
339 * preserve the contents of the objects during its transactions.
340 *
341 * MC_BIGCL:
342 * This is a zone of rudimentary objects of MBIGCLBYTES in size; each
343 * object represents a mbigcluster structure. This cache does not
344 * preserve the contents of the objects during its transaction.
345 *
346 * MC_16KCL:
347 * This is a zone of rudimentary objects of M16KCLBYTES in size; each
348 * object represents a m16kcluster structure. This cache does not
349 * preserve the contents of the objects during its transaction.
350 *
351 * MC_MBUF_CL:
352 * This is a cache of mbufs each having a cluster attached to it.
353 * It is backed by MC_MBUF and MC_CL rudimentary caches. Several
354 * fields of the mbuf related to the external cluster are preserved
355 * during transactions.
356 *
357 * MC_MBUF_BIGCL:
358 * This is a cache of mbufs each having a big cluster attached to it.
359 * It is backed by MC_MBUF and MC_BIGCL rudimentary caches. Several
360 * fields of the mbuf related to the external cluster are preserved
361 * during transactions.
362 *
363 * MC_MBUF_16KCL:
364 * This is a cache of mbufs each having a big cluster attached to it.
365 * It is backed by MC_MBUF and MC_16KCL rudimentary caches. Several
366 * fields of the mbuf related to the external cluster are preserved
367 * during transactions.
368 *
369 * OBJECT ALLOCATION:
370 *
371 * Allocation requests are handled first at the zalloc per-CPU layer
372 * before falling back to the zalloc depot. Performance is optimal when
373 * the request is satisfied at the CPU layer. zalloc has an additional
374 * overflow layer called the depot, not pictured in the diagram below.
375 *
376 * Allocation paths are different depending on the class of objects:
377 *
378 * a. Rudimentary object:
379 *
380 * { m_get_common(), m_clattach(), m_mclget(),
381 * m_mclalloc(), m_bigalloc(), m_copym_with_hdrs(),
382 * composite object allocation }
383 * | ^
384 * | |
385 * | +------- (done) --------+
386 * v |
387 * zalloc_flags/zalloc_n() KASAN
388 * | ^
389 * v |
390 * +----> [zalloc per-CPU cache] -----> (found?) --+
391 * | | |
392 * | v |
393 * | [zalloc recirculation layer] --> (found?) ---+
394 * | |
395 * | v
396 * +--<<-- [zone backing store]
397 *
398 * b. Composite object:
399 *
400 * { m_getpackets_internal(), m_allocpacket_internal() }
401 * | ^
402 * | |
403 * | +------ (done) ---------+
404 * v |
405 * mz_composite_alloc() KASAN
406 * | ^
407 * v |
408 * zcache_alloc_n() |
409 * | |
410 * v |
411 * [zalloc per-CPU cache] --> mark_valid() ---+
412 * | |
413 * v |
414 * [zalloc recirculation layer] -> mark_valid() -+
415 * | |
416 * v |
417 * mz_composite_build() |
418 * | |
419 * v |
420 * (rudimentary objects) |
421 * zalloc_id() ---------------->>-----+
422 *
423 * Auditing notes: If KASAN enabled, buffers will be subjected to
424 * integrity checks by the AddressSanitizer.
425 *
426 * OBJECT DEALLOCATION:
427 *
428 * Freeing an object simply involves placing it into the CPU cache; this
429 * pollutes the cache to benefit subsequent allocations. The depot
430 * will only be entered if the object is to be purged out of the cache.
431 * Objects may be purged based on the overall memory pressure or
432 * during zone garbage collection.
433 * To improve performance, objects are not zero-filled when freed
434 * as it's custom for other zalloc zones.
435 *
436 * Deallocation paths are different depending on the class of objects:
437 *
438 * a. Rudimentary object:
439 *
440 * { m_free(), m_freem_list(), composite object deallocation }
441 * | ^
442 * | |
443 * | +------ (done) ---------+
444 * v |
445 * zfree_nozero() |
446 * | |
447 * v |
448 * KASAN |
449 * | |
450 * v |
451 * [zalloc per-CPU cache] -> (not purging?) --+
452 * | |
453 * v |
454 * [zalloc recirculation layer] --->>----------+
455 *
456 *
457 * b. Composite object:
458 *
459 * { m_free(), m_freem_list() }
460 * | ^
461 * | |
462 * | +------ (done) ---------+
463 * v |
464 * mz_composite_free() |
465 * | |
466 * v |
467 * zcache_free_n() |
468 * | |
469 * v |
470 * KASAN |
471 * | |
472 * v |
473 * [zalloc per-CPU cache] -> mark_invalid() --+
474 * | |
475 * v |
476 * mz_composite_destroy() |
477 * | |
478 * v |
479 * (rudimentary object) |
480 * zfree_nozero() -------------->>------+
481 *
482 * Auditing notes: If KASAN enabled, buffers will be subjected to
483 * integrity checks by the AddressSanitizer.
484 *
485 * DEBUGGING:
486 *
487 * Debugging mbufs can be done by booting a KASAN enabled kernel.
488 */
489
490 #endif /* CONFIG_MBUF_MCACHE */
491
492 /* TODO: should be in header file */
493 /* kernel translater */
494 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
495 extern vm_map_t mb_map; /* special map */
496
497 #if CONFIG_MBUF_MCACHE
498 static uint32_t mb_kmem_contig_failed;
499 static uint32_t mb_kmem_failed;
500 static uint32_t mb_kmem_one_failed;
501 /* Timestamp of allocation failures. */
502 static uint64_t mb_kmem_contig_failed_ts;
503 static uint64_t mb_kmem_failed_ts;
504 static uint64_t mb_kmem_one_failed_ts;
505 static uint64_t mb_kmem_contig_failed_size;
506 static uint64_t mb_kmem_failed_size;
507 static uint32_t mb_kmem_stats[6];
508 #endif /* CONFIG_MBUF_MCACHE */
509
510 /* Global lock */
511 static LCK_GRP_DECLARE(mbuf_mlock_grp, "mbuf");
512 static LCK_MTX_DECLARE(mbuf_mlock_data, &mbuf_mlock_grp);
513 static lck_mtx_t *const mbuf_mlock = &mbuf_mlock_data;
514
515 #if CONFIG_MBUF_MCACHE
516 /* Back-end (common) layer */
517 static uint64_t mb_expand_cnt;
518 static uint64_t mb_expand_cl_cnt;
519 static uint64_t mb_expand_cl_total;
520 static uint64_t mb_expand_bigcl_cnt;
521 static uint64_t mb_expand_bigcl_total;
522 static uint64_t mb_expand_16kcl_cnt;
523 static uint64_t mb_expand_16kcl_total;
524 static boolean_t mbuf_worker_needs_wakeup; /* wait channel for mbuf worker */
525 static uint32_t mbuf_worker_run_cnt;
526 static uint64_t mbuf_worker_last_runtime;
527 static uint64_t mbuf_drain_last_runtime;
528 static int mbuf_worker_ready; /* worker thread is runnable */
529 static unsigned int ncpu; /* number of CPUs */
530 static ppnum_t *mcl_paddr; /* Array of cluster physical addresses */
531 static ppnum_t mcl_pages; /* Size of array (# physical pages) */
532 static ppnum_t mcl_paddr_base; /* Handle returned by IOMapper::iovmAlloc() */
533 static mcache_t *ref_cache; /* Cache of cluster reference & flags */
534 static mcache_t *mcl_audit_con_cache; /* Audit contents cache */
535 unsigned int mbuf_debug; /* patchable mbuf mcache flags */
536 #endif /* CONFIG_MBUF_DEBUG */
537 static unsigned int mb_normalized; /* number of packets "normalized" */
538
539 /*
540 * Convention typedefs for local __single pointers.
541 */
542 typedef typeof(*((zone_t)0)) *__single zone_ref_t;
543 typedef void * __single any_ref_t;
544
545 #define MB_GROWTH_AGGRESSIVE 1 /* Threshold: 1/2 of total */
546 #define MB_GROWTH_NORMAL 2 /* Threshold: 3/4 of total */
547
548 typedef enum {
549 MC_MBUF = 0, /* Regular mbuf */
550 MC_CL, /* Cluster */
551 MC_BIGCL, /* Large (4KB) cluster */
552 MC_16KCL, /* Jumbo (16KB) cluster */
553 MC_MBUF_CL, /* mbuf + cluster */
554 MC_MBUF_BIGCL, /* mbuf + large (4KB) cluster */
555 MC_MBUF_16KCL /* mbuf + jumbo (16KB) cluster */
556 } mbuf_class_t;
557
558 #define MBUF_CLASS_MIN MC_MBUF
559 #define MBUF_CLASS_MAX MC_MBUF_16KCL
560 #define MBUF_CLASS_LAST MC_16KCL
561 #define MBUF_CLASS_VALID(c) \
562 ((int)(c) >= MBUF_CLASS_MIN && (int)(c) <= MBUF_CLASS_MAX)
563 #define MBUF_CLASS_COMPOSITE(c) \
564 ((int)(c) > MBUF_CLASS_LAST)
565
566
567 /*
568 * mbuf specific mcache allocation request flags.
569 */
570 #define MCR_COMP MCR_USR1 /* for MC_MBUF_{CL,BIGCL,16KCL} caches */
571
572 /*
573 * Per-cluster slab structure.
574 *
575 * A slab is a cluster control structure that contains one or more object
576 * chunks; the available chunks are chained in the slab's freelist (sl_head).
577 * Each time a chunk is taken out of the slab, the slab's reference count
578 * gets incremented. When all chunks have been taken out, the empty slab
579 * gets removed (SLF_DETACHED) from the class's slab list. A chunk that is
580 * returned to a slab causes the slab's reference count to be decremented;
581 * it also causes the slab to be reinserted back to class's slab list, if
582 * it's not already done.
583 *
584 * Compartmentalizing of the object chunks into slabs allows us to easily
585 * merge one or more slabs together when the adjacent slabs are idle, as
586 * well as to convert or move a slab from one class to another; e.g. the
587 * mbuf cluster slab can be converted to a regular cluster slab when all
588 * mbufs in the slab have been freed.
589 *
590 * A slab may also span across multiple clusters for chunks larger than
591 * a cluster's size. In this case, only the slab of the first cluster is
592 * used. The rest of the slabs are marked with SLF_PARTIAL to indicate
593 * that they are part of the larger slab.
594 *
595 * Each slab controls a page of memory.
596 */
597 typedef struct mcl_slab {
598 struct mcl_slab *sl_next; /* neighboring slab */
599 u_int8_t sl_class; /* controlling mbuf class */
600 int8_t sl_refcnt; /* outstanding allocations */
601 int8_t sl_chunks; /* chunks (bufs) in this slab */
602 u_int16_t sl_flags; /* slab flags (see below) */
603 u_int16_t sl_len; /* slab length */
604 void *sl_base; /* base of allocated memory */
605 void *sl_head; /* first free buffer */
606 TAILQ_ENTRY(mcl_slab) sl_link; /* next/prev slab on freelist */
607 } mcl_slab_t;
608
609 #define SLF_MAPPED 0x0001 /* backed by a mapped page */
610 #define SLF_PARTIAL 0x0002 /* part of another slab */
611 #define SLF_DETACHED 0x0004 /* not in slab freelist */
612
613 /*
614 * The array of slabs are broken into groups of arrays per 1MB of kernel
615 * memory to reduce the footprint. Each group is allocated on demand
616 * whenever a new piece of memory mapped in from the VM crosses the 1MB
617 * boundary.
618 */
619 #define NSLABSPMB ((1 << MBSHIFT) >> PAGE_SHIFT)
620
621 typedef struct mcl_slabg {
622 mcl_slab_t *slg_slab; /* group of slabs */
623 } mcl_slabg_t;
624
625 /*
626 * Number of slabs needed to control a 16KB cluster object.
627 */
628 #define NSLABSP16KB (M16KCLBYTES >> PAGE_SHIFT)
629
630 #if CONFIG_MBUF_MCACHE
631 /*
632 * Per-cluster audit structure.
633 */
634 typedef struct {
635 mcache_audit_t **cl_audit; /* array of audits */
636 } mcl_audit_t;
637
638 typedef struct {
639 struct thread *msa_thread; /* thread doing transaction */
640 struct thread *msa_pthread; /* previous transaction thread */
641 uint32_t msa_tstamp; /* transaction timestamp (ms) */
642 uint32_t msa_ptstamp; /* prev transaction timestamp (ms) */
643 uint16_t msa_depth; /* pc stack depth */
644 uint16_t msa_pdepth; /* previous transaction pc stack */
645 void *msa_stack[MCACHE_STACK_DEPTH];
646 void *msa_pstack[MCACHE_STACK_DEPTH];
647 } mcl_scratch_audit_t;
648
649 typedef struct {
650 /*
651 * Size of data from the beginning of an mbuf that covers m_hdr,
652 * pkthdr and m_ext structures. If auditing is enabled, we allocate
653 * a shadow mbuf structure of this size inside each audit structure,
654 * and the contents of the real mbuf gets copied into it when the mbuf
655 * is freed. This allows us to pattern-fill the mbuf for integrity
656 * check, and to preserve any constructed mbuf fields (e.g. mbuf +
657 * cluster cache case). Note that we don't save the contents of
658 * clusters when they are freed; we simply pattern-fill them.
659 */
660 u_int8_t sc_mbuf[(_MSIZE - _MHLEN) + sizeof(_m_ext_t)];
661 mcl_scratch_audit_t sc_scratch __attribute__((aligned(8)));
662 } mcl_saved_contents_t;
663
664 #define AUDIT_CONTENTS_SIZE (sizeof (mcl_saved_contents_t))
665
666 #define MCA_SAVED_MBUF_PTR(_mca) \
667 ((struct mbuf *)(void *)((mcl_saved_contents_t *) \
668 (_mca)->mca_contents)->sc_mbuf)
669 #define MCA_SAVED_MBUF_SIZE \
670 (sizeof (((mcl_saved_contents_t *)0)->sc_mbuf))
671 #define MCA_SAVED_SCRATCH_PTR(_mca) \
672 (&((mcl_saved_contents_t *)(_mca)->mca_contents)->sc_scratch)
673
674 /*
675 * mbuf specific mcache audit flags
676 */
677 #define MB_INUSE 0x01 /* object has not been returned to slab */
678 #define MB_COMP_INUSE 0x02 /* object has not been returned to cslab */
679 #define MB_SCVALID 0x04 /* object has valid saved contents */
680
681 /*
682 * Each of the following two arrays hold up to nmbclusters elements.
683 */
684 static mcl_audit_t *mclaudit; /* array of cluster audit information */
685 static unsigned int maxclaudit; /* max # of entries in audit table */
686 static mcl_slabg_t **slabstbl; /* cluster slabs table */
687 static unsigned int maxslabgrp; /* max # of entries in slabs table */
688 static unsigned int slabgrp; /* # of entries in slabs table */
689 #endif /* CONFIG_MBUF_MCACHE */
690
691 /* Globals */
692 int nclusters; /* # of clusters for non-jumbo (legacy) sizes */
693 int njcl; /* # of clusters for jumbo sizes */
694 int njclbytes; /* size of a jumbo cluster */
695 unsigned char *mbutl; /* first mapped cluster address */
696 unsigned char *embutl; /* ending virtual address of mclusters */
697 int max_linkhdr; /* largest link-level header */
698 int max_protohdr; /* largest protocol header */
699 int max_hdr; /* largest link+protocol header */
700 int max_datalen; /* MHLEN - max_hdr */
701
702 #if CONFIG_MBUF_MCACHE
703 static boolean_t mclverify; /* debug: pattern-checking */
704 static boolean_t mcltrace; /* debug: stack tracing */
705 static boolean_t mclfindleak; /* debug: leak detection */
706 static boolean_t mclexpleak; /* debug: expose leak info to user space */
707
708 static struct timeval mb_start; /* beginning of time */
709
710 /* mbuf leak detection variables */
711 static struct mleak_table mleak_table;
712 static mleak_stat_t *mleak_stat;
713
714 #define MLEAK_STAT_SIZE(n) \
715 __builtin_offsetof(mleak_stat_t, ml_trace[n])
716
717 struct mallocation {
718 mcache_obj_t *element; /* the alloc'ed element, NULL if unused */
719 u_int32_t trace_index; /* mtrace index for corresponding backtrace */
720 u_int32_t count; /* How many objects were requested */
721 u_int64_t hitcount; /* for determining hash effectiveness */
722 };
723
724 struct mtrace {
725 u_int64_t collisions;
726 u_int64_t hitcount;
727 u_int64_t allocs;
728 u_int64_t depth;
729 uintptr_t addr[MLEAK_STACK_DEPTH];
730 };
731
732 /* Size must be a power of two for the zhash to be able to just mask off bits */
733 #define MLEAK_ALLOCATION_MAP_NUM 512
734 #define MLEAK_TRACE_MAP_NUM 256
735
736 /*
737 * Sample factor for how often to record a trace. This is overwritable
738 * by the boot-arg mleak_sample_factor.
739 */
740 #define MLEAK_SAMPLE_FACTOR 500
741
742 /*
743 * Number of top leakers recorded.
744 */
745 #define MLEAK_NUM_TRACES 5
746
747 #define MB_LEAK_SPACING_64 " "
748 #define MB_LEAK_SPACING_32 " "
749
750
751 #define MB_LEAK_HDR_32 "\n\
752 trace [1] trace [2] trace [3] trace [4] trace [5] \n\
753 ---------- ---------- ---------- ---------- ---------- \n\
754 "
755
756 #define MB_LEAK_HDR_64 "\n\
757 trace [1] trace [2] trace [3] \
758 trace [4] trace [5] \n\
759 ------------------ ------------------ ------------------ \
760 ------------------ ------------------ \n\
761 "
762
763 static uint32_t mleak_alloc_buckets = MLEAK_ALLOCATION_MAP_NUM;
764 static uint32_t mleak_trace_buckets = MLEAK_TRACE_MAP_NUM;
765
766 /* Hashmaps of allocations and their corresponding traces */
767 static struct mallocation *mleak_allocations;
768 static struct mtrace *mleak_traces;
769 static struct mtrace *mleak_top_trace[MLEAK_NUM_TRACES];
770
771 /* Lock to protect mleak tables from concurrent modification */
772 static LCK_GRP_DECLARE(mleak_lock_grp, "mleak_lock");
773 static LCK_MTX_DECLARE(mleak_lock_data, &mleak_lock_grp);
774 static lck_mtx_t *const mleak_lock = &mleak_lock_data;
775
776 /* *Failed* large allocations. */
777 struct mtracelarge {
778 uint64_t size;
779 uint64_t depth;
780 uintptr_t addr[MLEAK_STACK_DEPTH];
781 };
782
783 #define MTRACELARGE_NUM_TRACES 5
784 static struct mtracelarge mtracelarge_table[MTRACELARGE_NUM_TRACES];
785
786 static void mtracelarge_register(size_t size);
787 #endif /* CONFIG_MBUF_MCACHE */
788
789 /* Lock to protect the completion callback table */
790 static LCK_GRP_DECLARE(mbuf_tx_compl_tbl_lck_grp, "mbuf_tx_compl_tbl");
791 LCK_RW_DECLARE(mbuf_tx_compl_tbl_lock, &mbuf_tx_compl_tbl_lck_grp);
792
793 extern u_int32_t high_sb_max;
794
795 /* The minimum number of objects that are allocated, to start. */
796 #define MINCL 32
797 #define MINBIGCL (MINCL >> 1)
798 #define MIN16KCL (MINCL >> 2)
799
800 /* Low watermarks (only map in pages once free counts go below) */
801 #define MBIGCL_LOWAT MINBIGCL
802 #define M16KCL_LOWAT MIN16KCL
803
804 typedef struct {
805 mbuf_class_t mtbl_class; /* class type */
806 #if CONFIG_MBUF_MCACHE
807 mcache_t *mtbl_cache; /* mcache for this buffer class */
808 TAILQ_HEAD(mcl_slhead, mcl_slab) mtbl_slablist; /* slab list */
809 mcache_obj_t *mtbl_cobjlist; /* composite objects freelist */
810 #endif /* CONFIG_MBUF_MCACHE */
811 mb_class_stat_t *mtbl_stats; /* statistics fetchable via sysctl */
812 u_int32_t mtbl_maxsize; /* maximum buffer size */
813 int mtbl_minlimit; /* minimum allowed */
814 int mtbl_maxlimit; /* maximum allowed */
815 u_int32_t mtbl_wantpurge; /* purge during next reclaim */
816 uint32_t mtbl_avgtotal; /* average total on iOS */
817 u_int32_t mtbl_expand; /* worker should expand the class */
818 } mbuf_table_t;
819
820 #define m_class(c) mbuf_table[c].mtbl_class
821 #if CONFIG_MBUF_MCACHE
822 #define m_cache(c) mbuf_table[c].mtbl_cache
823 #define m_slablist(c) mbuf_table[c].mtbl_slablist
824 #define m_cobjlist(c) mbuf_table[c].mtbl_cobjlist
825 #else
826 #define m_stats(c) mbuf_table[c].mtbl_stats
827 #endif /* CONFIG_MBUF_MCACHE */
828 #define m_maxsize(c) mbuf_table[c].mtbl_maxsize
829 #define m_minlimit(c) mbuf_table[c].mtbl_minlimit
830 #define m_maxlimit(c) mbuf_table[c].mtbl_maxlimit
831 #define m_wantpurge(c) mbuf_table[c].mtbl_wantpurge
832 #define m_cname(c) mbuf_table[c].mtbl_stats->mbcl_cname
833 #define m_size(c) mbuf_table[c].mtbl_stats->mbcl_size
834 #define m_total(c) mbuf_table[c].mtbl_stats->mbcl_total
835 #define m_active(c) mbuf_table[c].mtbl_stats->mbcl_active
836 #define m_infree(c) mbuf_table[c].mtbl_stats->mbcl_infree
837 #define m_slab_cnt(c) mbuf_table[c].mtbl_stats->mbcl_slab_cnt
838 #define m_alloc_cnt(c) mbuf_table[c].mtbl_stats->mbcl_alloc_cnt
839 #define m_free_cnt(c) mbuf_table[c].mtbl_stats->mbcl_free_cnt
840 #define m_notified(c) mbuf_table[c].mtbl_stats->mbcl_notified
841 #define m_purge_cnt(c) mbuf_table[c].mtbl_stats->mbcl_purge_cnt
842 #define m_fail_cnt(c) mbuf_table[c].mtbl_stats->mbcl_fail_cnt
843 #define m_ctotal(c) mbuf_table[c].mtbl_stats->mbcl_ctotal
844 #define m_release_cnt(c) mbuf_table[c].mtbl_stats->mbcl_release_cnt
845 #define m_region_expand(c) mbuf_table[c].mtbl_expand
846
847 /*
848 * Note: number of entries in mbuf_table must not exceed
849 * MB_STAT_MAX_MB_CLASSES
850 */
851 static mbuf_table_t mbuf_table[] = {
852 #if CONFIG_MBUF_MCACHE
853 /*
854 * The caches for mbufs, regular clusters and big clusters.
855 * The average total values were based on data gathered by actual
856 * usage patterns on iOS.
857 */
858 { MC_MBUF, NULL, TAILQ_HEAD_INITIALIZER(m_slablist(MC_MBUF)),
859 NULL, NULL, 0, 0, 0, 0, 3000, 0 },
860 { MC_CL, NULL, TAILQ_HEAD_INITIALIZER(m_slablist(MC_CL)),
861 NULL, NULL, 0, 0, 0, 0, 2000, 0 },
862 { MC_BIGCL, NULL, TAILQ_HEAD_INITIALIZER(m_slablist(MC_BIGCL)),
863 NULL, NULL, 0, 0, 0, 0, 1000, 0 },
864 { MC_16KCL, NULL, TAILQ_HEAD_INITIALIZER(m_slablist(MC_16KCL)),
865 NULL, NULL, 0, 0, 0, 0, 200, 0 },
866 /*
867 * The following are special caches; they serve as intermediate
868 * caches backed by the above rudimentary caches. Each object
869 * in the cache is an mbuf with a cluster attached to it. Unlike
870 * the above caches, these intermediate caches do not directly
871 * deal with the slab structures; instead, the constructed
872 * cached elements are simply stored in the freelists.
873 */
874 { MC_MBUF_CL, NULL, { NULL, NULL }, NULL, NULL, 0, 0, 0, 0, 2000, 0 },
875 { MC_MBUF_BIGCL, NULL, { NULL, NULL }, NULL, NULL, 0, 0, 0, 0, 1000, 0 },
876 { MC_MBUF_16KCL, NULL, { NULL, NULL }, NULL, NULL, 0, 0, 0, 0, 200, 0 },
877 #else
878 { .mtbl_class = MC_MBUF },
879 { .mtbl_class = MC_CL },
880 { .mtbl_class = MC_BIGCL },
881 { .mtbl_class = MC_16KCL },
882 { .mtbl_class = MC_MBUF_CL },
883 { .mtbl_class = MC_MBUF_BIGCL },
884 { .mtbl_class = MC_MBUF_16KCL },
885 #endif /* CONFIG_MBUF_MCACHE */
886 };
887
888 #define NELEM(a) (sizeof (a) / sizeof ((a)[0]))
889
890 #if SKYWALK && CONFIG_MBUF_MCACHE
891 #define MC_THRESHOLD_SCALE_DOWN_FACTOR 2
892 static unsigned int mc_threshold_scale_down_factor =
893 MC_THRESHOLD_SCALE_DOWN_FACTOR;
894 #endif /* SKYWALK */
895
896 #if CONFIG_MBUF_MCACHE
897 static uint32_t
m_avgtotal(mbuf_class_t c)898 m_avgtotal(mbuf_class_t c)
899 {
900 #if SKYWALK
901 return if_is_fsw_transport_netagent_enabled() ?
902 (mbuf_table[c].mtbl_avgtotal / mc_threshold_scale_down_factor) :
903 mbuf_table[c].mtbl_avgtotal;
904 #else /* !SKYWALK */
905 return mbuf_table[c].mtbl_avgtotal;
906 #endif /* SKYWALK */
907 }
908 #endif /* CONFIG_MBUF_MCACHE */
909
910 #if CONFIG_MBUF_MCACHE
911 static void *mb_waitchan = &mbuf_table; /* wait channel for all caches */
912 static int mb_waiters; /* number of waiters */
913 #endif /* CONFIG_MBUF_MCACHE */
914
915 #define MB_WDT_MAXTIME 10 /* # of secs before watchdog panic */
916 #if CONFIG_MBUF_MCACHE
917 static struct timeval mb_wdtstart; /* watchdog start timestamp */
918 static char *mbuf_dump_buf;
919
920 #define MBUF_DUMP_BUF_SIZE 4096
921
922 /*
923 * mbuf watchdog is enabled by default. It is also toggeable via the
924 * kern.ipc.mb_watchdog sysctl.
925 * Garbage collection is enabled by default on embedded platforms.
926 * mb_drain_maxint controls the amount of time to wait (in seconds) before
927 * consecutive calls to mbuf_drain().
928 */
929 static unsigned int mb_watchdog = 1;
930 #if !XNU_TARGET_OS_OSX
931 static unsigned int mb_drain_maxint = 60;
932 #else /* XNU_TARGET_OS_OSX */
933 static unsigned int mb_drain_maxint = 0;
934 #endif /* XNU_TARGET_OS_OSX */
935 #endif /* CONFIG_MBUF_MCACHE */
936 static unsigned int mb_memory_pressure_percentage = 80;
937
938 static void m_set_rfa(struct mbuf *, struct ext_ref *);
939
940 #if CONFIG_MBUF_MCACHE
941 /* The following are used to serialize m_clalloc() */
942 static boolean_t mb_clalloc_busy;
943 static void *mb_clalloc_waitchan = &mb_clalloc_busy;
944 static int mb_clalloc_waiters;
945 #endif /* CONFIG_MBUF_MCACHE */
946
947 static void mbuf_mtypes_sync(boolean_t);
948 static int mbstat_sysctl SYSCTL_HANDLER_ARGS;
949 static void mbuf_stat_sync(void);
950 static int mb_stat_sysctl SYSCTL_HANDLER_ARGS;
951 #if CONFIG_MBUF_MCACHE
952 static int mleak_top_trace_sysctl SYSCTL_HANDLER_ARGS;
953 static int mleak_table_sysctl SYSCTL_HANDLER_ARGS;
954 static char *mbuf_dump(void);
955 #endif /* CONFIG_MBUF_MCACHE */
956 static void mbuf_table_init(void);
957 static inline void m_incref(struct mbuf *);
958 static inline u_int16_t m_decref(struct mbuf *);
959 static void mbuf_watchdog_defunct(thread_call_param_t, thread_call_param_t);
960 #if CONFIG_MBUF_MCACHE
961 static int m_clalloc(const u_int32_t, const int, const u_int32_t);
962 static void mbuf_worker_thread_init(void);
963 static mcache_obj_t *slab_alloc(mbuf_class_t, int);
964 static void slab_free(mbuf_class_t, mcache_obj_t *);
965 static unsigned int mbuf_slab_alloc(void *, mcache_obj_t ***,
966 unsigned int, int);
967 static void mbuf_slab_free(void *, mcache_obj_t *, int);
968 static void mbuf_slab_audit(void *, mcache_obj_t *, boolean_t);
969 static void mbuf_slab_notify(void *, u_int32_t);
970 static unsigned int cslab_alloc(mbuf_class_t, mcache_obj_t ***,
971 unsigned int);
972 static unsigned int cslab_free(mbuf_class_t, mcache_obj_t *, int);
973 static unsigned int mbuf_cslab_alloc(void *, mcache_obj_t ***,
974 unsigned int, int);
975 static void mbuf_cslab_free(void *, mcache_obj_t *, int);
976 static void mbuf_cslab_audit(void *, mcache_obj_t *, boolean_t);
977 static int freelist_populate(mbuf_class_t, unsigned int, int);
978 static void freelist_init(mbuf_class_t);
979 static boolean_t mbuf_cached_above(mbuf_class_t, int);
980 static boolean_t mbuf_steal(mbuf_class_t, unsigned int);
981 static void m_reclaim(mbuf_class_t, unsigned int, boolean_t);
982 static int m_howmany(int, size_t);
983 static void mbuf_worker_thread(void);
984 static void mbuf_watchdog(void);
985 static boolean_t mbuf_sleep(mbuf_class_t, unsigned int, int);
986
987 static void mcl_audit_init(void *, mcache_audit_t **, mcache_obj_t **,
988 size_t, unsigned int);
989 static void mcl_audit_free(void *, unsigned int);
990 static mcache_audit_t *mcl_audit_buf2mca(mbuf_class_t, mcache_obj_t *);
991 static void mcl_audit_mbuf(mcache_audit_t *, void *, boolean_t, boolean_t);
992 static void mcl_audit_cluster(mcache_audit_t *, void *, size_t, boolean_t,
993 boolean_t);
994 static void mcl_audit_restore_mbuf(struct mbuf *, mcache_audit_t *, boolean_t);
995 static void mcl_audit_save_mbuf(struct mbuf *, mcache_audit_t *);
996 static void mcl_audit_scratch(mcache_audit_t *);
997 static void mcl_audit_mcheck_panic(struct mbuf *);
998 static void mcl_audit_verify_nextptr(void *, mcache_audit_t *);
999
1000 static void mleak_activate(void);
1001 static void mleak_logger(u_int32_t, mcache_obj_t *, boolean_t);
1002 static boolean_t mleak_log(uintptr_t *, mcache_obj_t *, uint32_t, int);
1003 static void mleak_free(mcache_obj_t *);
1004 static void mleak_sort_traces(void);
1005 static void mleak_update_stats(void);
1006
1007 static mcl_slab_t *slab_get(void *);
1008 static void slab_init(mcl_slab_t *, mbuf_class_t, u_int32_t,
1009 void *, void *, unsigned int, int, int);
1010 static void slab_insert(mcl_slab_t *, mbuf_class_t);
1011 static void slab_remove(mcl_slab_t *, mbuf_class_t);
1012 static boolean_t slab_inrange(mcl_slab_t *, void *);
1013 static void slab_nextptr_panic(mcl_slab_t *, void *);
1014 static void slab_detach(mcl_slab_t *);
1015 static boolean_t slab_is_detached(mcl_slab_t *);
1016 #else /* !CONFIG_MBUF_MCACHE */
1017 static void mbuf_watchdog_drain_composite(thread_call_param_t, thread_call_param_t);
1018 static struct mbuf *mz_alloc(zalloc_flags_t);
1019 static void mz_free(struct mbuf *);
1020 static struct ext_ref *mz_ref_alloc(zalloc_flags_t);
1021 static void mz_ref_free(struct ext_ref *);
1022 static void * __bidi_indexable mz_cl_alloc(zone_id_t, zalloc_flags_t);
1023 static void mz_cl_free(zone_id_t, void *);
1024 static struct mbuf *mz_composite_alloc(mbuf_class_t, zalloc_flags_t);
1025 static zstack_t mz_composite_alloc_n(mbuf_class_t, unsigned int, zalloc_flags_t);
1026 static void mz_composite_free(mbuf_class_t, struct mbuf *);
1027 static void mz_composite_free_n(mbuf_class_t, zstack_t);
1028 static void *mz_composite_build(zone_id_t, zalloc_flags_t);
1029 static void *mz_composite_mark_valid(zone_id_t, void *);
1030 static void *mz_composite_mark_invalid(zone_id_t, void *);
1031 static void mz_composite_destroy(zone_id_t, void *);
1032
1033 ZONE_DEFINE_ID(ZONE_ID_MBUF_REF, "mbuf.ref", struct ext_ref,
1034 ZC_CACHING | ZC_NOPGZ | ZC_KASAN_NOQUARANTINE);
1035 ZONE_DEFINE_ID(ZONE_ID_MBUF, "mbuf", struct mbuf,
1036 ZC_CACHING | ZC_NOPGZ | ZC_KASAN_NOQUARANTINE);
1037 ZONE_DEFINE_ID(ZONE_ID_CLUSTER_2K, "mbuf.cluster.2k", union mcluster,
1038 ZC_CACHING | ZC_NOPGZ | ZC_KASAN_NOQUARANTINE | ZC_DATA);
1039 ZONE_DEFINE_ID(ZONE_ID_CLUSTER_4K, "mbuf.cluster.4k", union mbigcluster,
1040 ZC_CACHING | ZC_NOPGZ | ZC_KASAN_NOQUARANTINE | ZC_DATA);
1041 ZONE_DEFINE_ID(ZONE_ID_CLUSTER_16K, "mbuf.cluster.16k", union m16kcluster,
1042 ZC_CACHING | ZC_NOPGZ | ZC_KASAN_NOQUARANTINE | ZC_DATA);
1043 static_assert(sizeof(union mcluster) == MCLBYTES);
1044 static_assert(sizeof(union mbigcluster) == MBIGCLBYTES);
1045 static_assert(sizeof(union m16kcluster) == M16KCLBYTES);
1046
1047 static const struct zone_cache_ops mz_composite_ops = {
1048 .zc_op_alloc = mz_composite_build,
1049 .zc_op_mark_valid = mz_composite_mark_valid,
1050 .zc_op_mark_invalid = mz_composite_mark_invalid,
1051 .zc_op_free = mz_composite_destroy,
1052 };
1053 ZCACHE_DEFINE(ZONE_ID_MBUF_CLUSTER_2K, "mbuf.composite.2k", struct mbuf,
1054 sizeof(struct mbuf) + sizeof(struct ext_ref) + MCLBYTES,
1055 &mz_composite_ops);
1056 ZCACHE_DEFINE(ZONE_ID_MBUF_CLUSTER_4K, "mbuf.composite.4k", struct mbuf,
1057 sizeof(struct mbuf) + sizeof(struct ext_ref) + MBIGCLBYTES,
1058 &mz_composite_ops);
1059 ZCACHE_DEFINE(ZONE_ID_MBUF_CLUSTER_16K, "mbuf.composite.16k", struct mbuf,
1060 sizeof(struct mbuf) + sizeof(struct ext_ref) + M16KCLBYTES,
1061 &mz_composite_ops);
1062 static_assert(ZONE_ID_MBUF + MC_MBUF == ZONE_ID_MBUF);
1063 static_assert(ZONE_ID_MBUF + MC_CL == ZONE_ID_CLUSTER_2K);
1064 static_assert(ZONE_ID_MBUF + MC_BIGCL == ZONE_ID_CLUSTER_4K);
1065 static_assert(ZONE_ID_MBUF + MC_16KCL == ZONE_ID_CLUSTER_16K);
1066 static_assert(ZONE_ID_MBUF + MC_MBUF_CL == ZONE_ID_MBUF_CLUSTER_2K);
1067 static_assert(ZONE_ID_MBUF + MC_MBUF_BIGCL == ZONE_ID_MBUF_CLUSTER_4K);
1068 static_assert(ZONE_ID_MBUF + MC_MBUF_16KCL == ZONE_ID_MBUF_CLUSTER_16K);
1069
1070 /* Converts a an mbuf class to a zalloc zone ID. */
1071 __attribute__((always_inline))
1072 static inline zone_id_t
m_class_to_zid(mbuf_class_t class)1073 m_class_to_zid(mbuf_class_t class)
1074 {
1075 return ZONE_ID_MBUF + class - MC_MBUF;
1076 }
1077
1078 __attribute__((always_inline))
1079 static inline mbuf_class_t
m_class_from_zid(zone_id_t zid)1080 m_class_from_zid(zone_id_t zid)
1081 {
1082 return MC_MBUF + zid - ZONE_ID_MBUF;
1083 }
1084
1085 static thread_call_t mbuf_defunct_tcall;
1086 static thread_call_t mbuf_drain_tcall;
1087 #endif /* CONFIG_MBUF_MCACHE */
1088
1089 static int m_copyback0(struct mbuf **, int, int len, const void * __sized_by_or_null(len), int, int);
1090 static struct mbuf *m_split0(struct mbuf *, int, int, int);
1091 #if CONFIG_MBUF_MCACHE && (DEBUG || DEVELOPMENT)
1092 #define mbwdog_logger(fmt, ...) _mbwdog_logger(__func__, __LINE__, fmt, ## __VA_ARGS__)
1093 static void _mbwdog_logger(const char *func, const int line, const char *fmt, ...);
1094 static char *mbwdog_logging;
1095 const unsigned mbwdog_logging_size = 4096;
1096 static size_t mbwdog_logging_used;
1097 #else
1098 #define mbwdog_logger(fmt, ...) do { } while (0)
1099 #endif /* CONFIG_MBUF_MCACHE &&DEBUG || DEVELOPMENT */
1100 #if CONFIG_MBUF_MCACHE
1101 static void mbuf_drain_locked(boolean_t);
1102 #endif /* CONFIG_MBUF_MCACHE */
1103
1104 /* flags for m_copyback0 */
1105 #define M_COPYBACK0_COPYBACK 0x0001 /* copyback from cp */
1106 #define M_COPYBACK0_PRESERVE 0x0002 /* preserve original data */
1107 #define M_COPYBACK0_COW 0x0004 /* do copy-on-write */
1108 #define M_COPYBACK0_EXTEND 0x0008 /* extend chain */
1109
1110 /*
1111 * This flag is set for all mbufs that come out of and into the composite
1112 * mbuf + cluster caches, i.e. MC_MBUF_CL and MC_MBUF_BIGCL. mbufs that
1113 * are marked with such a flag have clusters attached to them, and will be
1114 * treated differently when they are freed; instead of being placed back
1115 * into the mbuf and cluster freelists, the composite mbuf + cluster objects
1116 * are placed back into the appropriate composite cache's freelist, and the
1117 * actual freeing is deferred until the composite objects are purged. At
1118 * such a time, this flag will be cleared from the mbufs and the objects
1119 * will be freed into their own separate freelists.
1120 */
1121 #define EXTF_COMPOSITE 0x1
1122
1123 /*
1124 * This flag indicates that the external cluster is read-only, i.e. it is
1125 * or was referred to by more than one mbufs. Once set, this flag is never
1126 * cleared.
1127 */
1128 #define EXTF_READONLY 0x2
1129 /*
1130 * This flag indicates that the external cluster is paired with the mbuf.
1131 * Pairing implies an external free routine defined which will be invoked
1132 * when the reference count drops to the minimum at m_free time. This
1133 * flag is never cleared.
1134 */
1135 #define EXTF_PAIRED 0x4
1136
1137 #define EXTF_MASK \
1138 (EXTF_COMPOSITE | EXTF_READONLY | EXTF_PAIRED)
1139
1140 #define MEXT_MINREF(m) ((m_get_rfa(m))->minref)
1141 #define MEXT_REF(m) ((m_get_rfa(m))->refcnt)
1142 #define MEXT_PREF(m) ((m_get_rfa(m))->prefcnt)
1143 #define MEXT_FLAGS(m) ((m_get_rfa(m))->flags)
1144 #define MEXT_PRIV(m) ((m_get_rfa(m))->priv)
1145 #define MEXT_PMBUF(m) ((m_get_rfa(m))->paired)
1146 #define MBUF_IS_COMPOSITE(m) \
1147 (MEXT_REF(m) == MEXT_MINREF(m) && \
1148 (MEXT_FLAGS(m) & EXTF_MASK) == EXTF_COMPOSITE)
1149 /*
1150 * This macro can be used to test if the mbuf is paired to an external
1151 * cluster. The test for MEXT_PMBUF being equal to the mbuf in subject
1152 * is important, as EXTF_PAIRED alone is insufficient since it is immutable,
1153 * and thus survives calls to m_free_paired.
1154 */
1155 #define MBUF_IS_PAIRED(m) \
1156 (((m)->m_flags & M_EXT) && \
1157 (MEXT_FLAGS(m) & EXTF_MASK) == EXTF_PAIRED && \
1158 MEXT_PMBUF(m) == (m))
1159
1160 /*
1161 * Macros used to verify the integrity of the mbuf.
1162 */
1163 #if CONFIG_MBUF_MCACHE
1164 #define _MCHECK(m) { \
1165 if ((m)->m_type != MT_FREE && !MBUF_IS_PAIRED(m)) { \
1166 if (mclaudit == NULL) \
1167 panic("MCHECK: m_type=%d m=%p", \
1168 (u_int16_t)(m)->m_type, m); \
1169 else \
1170 mcl_audit_mcheck_panic(m); \
1171 } \
1172 }
1173 #else
1174 #define _MCHECK(m) \
1175 if ((m)->m_type != MT_FREE && !MBUF_IS_PAIRED(m)) { \
1176 panic("MCHECK: m_type=%d m=%p", \
1177 (u_int16_t)(m)->m_type, m); \
1178 }
1179 #endif /* CONFIG_MBUF_MCACHE */
1180
1181 #if CONFIG_MBUF_MCACHE
1182 #define MBUF_IN_MAP(addr) \
1183 ((unsigned char *)(addr) >= mbutl && \
1184 (unsigned char *)(addr) < embutl)
1185
1186 #define MRANGE(addr) { \
1187 if (!MBUF_IN_MAP(addr)) \
1188 panic("MRANGE: address out of range 0x%p", addr); \
1189 }
1190
1191 /*
1192 * Macros to obtain page index given a base cluster address
1193 */
1194 #define MTOPG(x) (((unsigned char *)x - mbutl) >> PAGE_SHIFT)
1195 #define PGTOM(x) (mbutl + (x << PAGE_SHIFT))
1196
1197 /*
1198 * Macro to find the mbuf index relative to a base.
1199 */
1200 #define MBPAGEIDX(c, m) \
1201 (((unsigned char *)(m) - (unsigned char *)(c)) >> _MSIZESHIFT)
1202
1203 /*
1204 * Same thing for 2KB cluster index.
1205 */
1206 #define CLPAGEIDX(c, m) \
1207 (((unsigned char *)(m) - (unsigned char *)(c)) >> MCLSHIFT)
1208
1209 /*
1210 * Macro to find 4KB cluster index relative to a base
1211 */
1212 #define BCLPAGEIDX(c, m) \
1213 (((unsigned char *)(m) - (unsigned char *)(c)) >> MBIGCLSHIFT)
1214 #endif /* CONFIG_MBUF_MCACHE */
1215
1216 /*
1217 * Macros used during mbuf and cluster initialization.
1218 */
1219 #define MBUF_INIT_PKTHDR(m) { \
1220 (m)->m_pkthdr.rcvif = NULL; \
1221 (m)->m_pkthdr.pkt_hdr = NULL; \
1222 (m)->m_pkthdr.len = 0; \
1223 (m)->m_pkthdr.csum_flags = 0; \
1224 (m)->m_pkthdr.csum_data = 0; \
1225 (m)->m_pkthdr.vlan_tag = 0; \
1226 (m)->m_pkthdr.comp_gencnt = 0; \
1227 (m)->m_pkthdr.pkt_crumbs = 0; \
1228 m_classifier_init(m, 0); \
1229 m_tag_init(m, 1); \
1230 m_scratch_init(m); \
1231 }
1232
1233 #define MBUF_INIT(m, pkthdr, type) { \
1234 _MCHECK(m); \
1235 (m)->m_next = (m)->m_nextpkt = NULL; \
1236 (m)->m_len = 0; \
1237 (m)->m_type = type; \
1238 if ((pkthdr) == 0) { \
1239 (m)->m_data = (uintptr_t)(m)->m_dat; \
1240 (m)->m_flags = 0; \
1241 } else { \
1242 (m)->m_data = (uintptr_t)(m)->m_pktdat; \
1243 (m)->m_flags = M_PKTHDR; \
1244 MBUF_INIT_PKTHDR(m); \
1245 } \
1246 }
1247
1248 #define MEXT_INIT mext_init
1249
1250 #define MBUF_CL_INIT(m, buf, rfa, ref, flag) \
1251 MEXT_INIT(m, buf, m_maxsize(MC_CL), NULL, NULL, rfa, 0, \
1252 ref, 0, flag, 0, NULL)
1253
1254 #define MBUF_BIGCL_INIT(m, buf, rfa, ref, flag) \
1255 MEXT_INIT(m, buf, m_maxsize(MC_BIGCL), m_bigfree, NULL, rfa, 0, \
1256 ref, 0, flag, 0, NULL)
1257
1258 #define MBUF_16KCL_INIT(m, buf, rfa, ref, flag) \
1259 MEXT_INIT(m, buf, m_maxsize(MC_16KCL), m_16kfree, NULL, rfa, 0, \
1260 ref, 0, flag, 0, NULL)
1261
1262 /*
1263 * Macro to convert BSD malloc sleep flag to mcache's
1264 */
1265 #define MSLEEPF(f) ((!((f) & M_DONTWAIT)) ? MCR_SLEEP : MCR_NOSLEEP)
1266
1267 /*
1268 * The structure that holds all mbuf class statistics exportable via sysctl.
1269 * Similar to mbstat structure, the mb_stat structure is protected by the
1270 * global mbuf lock. It contains additional information about the classes
1271 * that allows for a more accurate view of the state of the allocator.
1272 */
1273 struct mb_stat *mb_stat;
1274 struct omb_stat *omb_stat; /* For backwards compatibility */
1275
1276 #define MB_STAT_SIZE(n) \
1277 __builtin_offsetof(mb_stat_t, mbs_class[n])
1278 #define OMB_STAT_SIZE(n) \
1279 __builtin_offsetof(struct omb_stat, mbs_class[n])
1280
1281 /*
1282 * The legacy structure holding all of the mbuf allocation statistics.
1283 * The actual statistics used by the kernel are stored in the mbuf_table
1284 * instead, and are updated atomically while the global mbuf lock is held.
1285 * They are mirrored in mbstat to support legacy applications (e.g. netstat).
1286 * Unlike before, the kernel no longer relies on the contents of mbstat for
1287 * its operations (e.g. cluster expansion) because the structure is exposed
1288 * to outside and could possibly be modified, therefore making it unsafe.
1289 * With the exception of the mbstat.m_mtypes array (see below), all of the
1290 * statistics are updated as they change.
1291 */
1292 struct mbstat mbstat;
1293
1294 #define MBSTAT_MTYPES_MAX \
1295 (sizeof (mbstat.m_mtypes) / sizeof (mbstat.m_mtypes[0]))
1296
1297 /*
1298 * Allocation statistics related to mbuf types (up to MT_MAX-1) are updated
1299 * atomically and stored in a per-CPU structure which is lock-free; this is
1300 * done in order to avoid writing to the global mbstat data structure which
1301 * would cause false sharing. During sysctl request for kern.ipc.mbstat,
1302 * the statistics across all CPUs will be converged into the mbstat.m_mtypes
1303 * array and returned to the application. Any updates for types greater or
1304 * equal than MT_MAX would be done atomically to the mbstat; this slows down
1305 * performance but is okay since the kernel uses only up to MT_MAX-1 while
1306 * anything beyond that (up to type 255) is considered a corner case.
1307 */
1308 typedef struct {
1309 unsigned int cpu_mtypes[MT_MAX];
1310 } mbuf_mtypes_t;
1311
1312 static mbuf_mtypes_t PERCPU_DATA(mbuf_mtypes);
1313
1314 #define mtype_stat_add(type, n) { \
1315 if ((unsigned)(type) < MT_MAX) { \
1316 mbuf_mtypes_t *mbs = PERCPU_GET(mbuf_mtypes); \
1317 os_atomic_add(&mbs->cpu_mtypes[type], n, relaxed); \
1318 } else if ((unsigned)(type) < (unsigned)MBSTAT_MTYPES_MAX) { \
1319 os_atomic_add((int16_t *)&mbstat.m_mtypes[type], n, relaxed); \
1320 } \
1321 }
1322
1323 #define mtype_stat_sub(t, n) mtype_stat_add(t, -(n))
1324 #define mtype_stat_inc(t) mtype_stat_add(t, 1)
1325 #define mtype_stat_dec(t) mtype_stat_sub(t, 1)
1326
1327 static inline void
mext_init(struct mbuf * m,void * __sized_by (size)buf,u_int size,m_ext_free_func_t free,caddr_t free_arg,struct ext_ref * rfa,u_int16_t min,u_int16_t ref,u_int16_t pref,u_int16_t flag,u_int32_t priv,struct mbuf * pm)1328 mext_init(struct mbuf *m, void *__sized_by(size)buf, u_int size,
1329 m_ext_free_func_t free, caddr_t free_arg, struct ext_ref *rfa,
1330 u_int16_t min, u_int16_t ref, u_int16_t pref, u_int16_t flag,
1331 u_int32_t priv, struct mbuf *pm)
1332 {
1333 m->m_ext.ext_buf = buf;
1334 m->m_ext.ext_size = size;
1335 m->m_data = (uintptr_t)m->m_ext.ext_buf;
1336 m->m_len = 0;
1337 m->m_flags |= M_EXT;
1338 m_set_ext(m, rfa, free, free_arg);
1339 MEXT_MINREF(m) = min;
1340 MEXT_REF(m) = ref;
1341 MEXT_PREF(m) = pref;
1342 MEXT_FLAGS(m) = flag;
1343 MEXT_PRIV(m) = priv;
1344 MEXT_PMBUF(m) = pm;
1345 }
1346
1347 static void
mbuf_mtypes_sync(boolean_t locked)1348 mbuf_mtypes_sync(boolean_t locked)
1349 {
1350 mbuf_mtypes_t mtc;
1351
1352 if (locked) {
1353 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
1354 }
1355
1356 mtc = *PERCPU_GET_MASTER(mbuf_mtypes);
1357 percpu_foreach_secondary(mtype, mbuf_mtypes) {
1358 for (int n = 0; n < MT_MAX; n++) {
1359 mtc.cpu_mtypes[n] += mtype->cpu_mtypes[n];
1360 }
1361 }
1362
1363 if (!locked) {
1364 lck_mtx_lock(mbuf_mlock);
1365 }
1366 for (int n = 0; n < MT_MAX; n++) {
1367 mbstat.m_mtypes[n] = mtc.cpu_mtypes[n];
1368 }
1369 if (!locked) {
1370 lck_mtx_unlock(mbuf_mlock);
1371 }
1372 }
1373
1374 static int
1375 mbstat_sysctl SYSCTL_HANDLER_ARGS
1376 {
1377 #pragma unused(oidp, arg1, arg2)
1378
1379 #if CONFIG_MBUF_MCACHE
1380 mbuf_mtypes_sync(FALSE);
1381 #else
1382 lck_mtx_lock(mbuf_mlock);
1383 mbuf_stat_sync();
1384 mbuf_mtypes_sync(TRUE);
1385 lck_mtx_unlock(mbuf_mlock);
1386 #endif
1387
1388 return SYSCTL_OUT(req, &mbstat, sizeof(mbstat));
1389 }
1390
1391 static void
mbuf_stat_sync(void)1392 mbuf_stat_sync(void)
1393 {
1394 mb_class_stat_t *sp;
1395 #if CONFIG_MBUF_MCACHE
1396 mcache_cpu_t *ccp;
1397 mcache_t *cp;
1398 int k, m, bktsize;
1399 #else
1400 int k;
1401 uint64_t drops = 0;
1402 #endif /* CONFIG_MBUF_MCACHE */
1403
1404
1405 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
1406
1407 #if CONFIG_MBUF_MCACHE
1408 for (k = 0; k < NELEM(mbuf_table); k++) {
1409 cp = m_cache(k);
1410 ccp = &cp->mc_cpu[0];
1411 bktsize = ccp->cc_bktsize;
1412 sp = mbuf_table[k].mtbl_stats;
1413
1414 if (cp->mc_flags & MCF_NOCPUCACHE) {
1415 sp->mbcl_mc_state = MCS_DISABLED;
1416 } else if (cp->mc_purge_cnt > 0) {
1417 sp->mbcl_mc_state = MCS_PURGING;
1418 } else if (bktsize == 0) {
1419 sp->mbcl_mc_state = MCS_OFFLINE;
1420 } else {
1421 sp->mbcl_mc_state = MCS_ONLINE;
1422 }
1423
1424 sp->mbcl_mc_cached = 0;
1425 for (m = 0; m < ncpu; m++) {
1426 ccp = &cp->mc_cpu[m];
1427 if (ccp->cc_objs > 0) {
1428 sp->mbcl_mc_cached += ccp->cc_objs;
1429 }
1430 if (ccp->cc_pobjs > 0) {
1431 sp->mbcl_mc_cached += ccp->cc_pobjs;
1432 }
1433 }
1434 sp->mbcl_mc_cached += (cp->mc_full.bl_total * bktsize);
1435 sp->mbcl_active = sp->mbcl_total - sp->mbcl_mc_cached -
1436 sp->mbcl_infree;
1437
1438 sp->mbcl_mc_waiter_cnt = cp->mc_waiter_cnt;
1439 sp->mbcl_mc_wretry_cnt = cp->mc_wretry_cnt;
1440 sp->mbcl_mc_nwretry_cnt = cp->mc_nwretry_cnt;
1441
1442 /* Calculate total count specific to each class */
1443 sp->mbcl_ctotal = sp->mbcl_total;
1444 switch (m_class(k)) {
1445 case MC_MBUF:
1446 /* Deduct mbufs used in composite caches */
1447 sp->mbcl_ctotal -= (m_total(MC_MBUF_CL) +
1448 m_total(MC_MBUF_BIGCL) - m_total(MC_MBUF_16KCL));
1449 break;
1450
1451 case MC_CL:
1452 /* Deduct clusters used in composite cache */
1453 sp->mbcl_ctotal -= m_total(MC_MBUF_CL);
1454 break;
1455
1456 case MC_BIGCL:
1457 /* Deduct clusters used in composite cache */
1458 sp->mbcl_ctotal -= m_total(MC_MBUF_BIGCL);
1459 break;
1460
1461 case MC_16KCL:
1462 /* Deduct clusters used in composite cache */
1463 sp->mbcl_ctotal -= m_total(MC_MBUF_16KCL);
1464 break;
1465
1466 default:
1467 break;
1468 }
1469 }
1470 #else
1471 for (k = 0; k < NELEM(mbuf_table); k++) {
1472 const zone_id_t zid = m_class_to_zid(m_class(k));
1473 const zone_ref_t zone = zone_by_id(zid);
1474 struct zone_basic_stats stats = {};
1475
1476 sp = m_stats(k);
1477 zone_get_stats(zone, &stats);
1478 drops += stats.zbs_alloc_fail;
1479 sp->mbcl_total = stats.zbs_avail;
1480 sp->mbcl_active = stats.zbs_alloc;
1481 /*
1482 * infree is what mcache considers the freelist (uncached)
1483 * free_cnt contains all the cached/uncached elements
1484 * in a zone.
1485 */
1486 sp->mbcl_infree = stats.zbs_free - stats.zbs_cached;
1487 sp->mbcl_fail_cnt = stats.zbs_alloc_fail;
1488 sp->mbcl_ctotal = sp->mbcl_total;
1489
1490 /* These stats are not available in zalloc. */
1491 sp->mbcl_alloc_cnt = 0;
1492 sp->mbcl_free_cnt = 0;
1493 sp->mbcl_notified = 0;
1494 sp->mbcl_purge_cnt = 0;
1495 sp->mbcl_slab_cnt = 0;
1496 sp->mbcl_release_cnt = 0;
1497
1498 /* zalloc caches are always on. */
1499 sp->mbcl_mc_state = MCS_ONLINE;
1500 sp->mbcl_mc_cached = stats.zbs_cached;
1501 /* These stats are not collected by zalloc. */
1502 sp->mbcl_mc_waiter_cnt = 0;
1503 sp->mbcl_mc_wretry_cnt = 0;
1504 sp->mbcl_mc_nwretry_cnt = 0;
1505 }
1506 /* Deduct clusters used in composite cache */
1507 m_ctotal(MC_MBUF) -= (m_total(MC_MBUF_CL) +
1508 m_total(MC_MBUF_BIGCL) -
1509 m_total(MC_MBUF_16KCL));
1510 m_ctotal(MC_CL) -= m_total(MC_MBUF_CL);
1511 m_ctotal(MC_BIGCL) -= m_total(MC_MBUF_BIGCL);
1512 m_ctotal(MC_16KCL) -= m_total(MC_MBUF_16KCL);
1513
1514 /* Update mbstat. */
1515 mbstat.m_mbufs = m_total(MC_MBUF);
1516 mbstat.m_clusters = m_total(MC_CL);
1517 mbstat.m_clfree = m_infree(MC_CL) + m_infree(MC_MBUF_CL);
1518 mbstat.m_drops = drops;
1519 mbstat.m_bigclusters = m_total(MC_BIGCL);
1520 mbstat.m_bigclfree = m_infree(MC_BIGCL) + m_infree(MC_MBUF_BIGCL);
1521 #endif /* CONFIG_MBUF_MCACHE */
1522 }
1523
1524 static int
1525 mb_stat_sysctl SYSCTL_HANDLER_ARGS
1526 {
1527 #pragma unused(oidp, arg1, arg2)
1528 any_ref_t statp;
1529 int k, statsz, proc64 = proc_is64bit(req->p);
1530
1531 lck_mtx_lock(mbuf_mlock);
1532 mbuf_stat_sync();
1533
1534 if (!proc64) {
1535 struct omb_class_stat *oc;
1536 struct mb_class_stat *c;
1537
1538 omb_stat->mbs_cnt = mb_stat->mbs_cnt;
1539 oc = &omb_stat->mbs_class[0];
1540 c = &mb_stat->mbs_class[0];
1541 for (k = 0; k < omb_stat->mbs_cnt; k++, oc++, c++) {
1542 (void) snprintf(oc->mbcl_cname, sizeof(oc->mbcl_cname),
1543 "%s", c->mbcl_cname);
1544 oc->mbcl_size = c->mbcl_size;
1545 oc->mbcl_total = c->mbcl_total;
1546 oc->mbcl_active = c->mbcl_active;
1547 oc->mbcl_infree = c->mbcl_infree;
1548 oc->mbcl_slab_cnt = c->mbcl_slab_cnt;
1549 oc->mbcl_alloc_cnt = c->mbcl_alloc_cnt;
1550 oc->mbcl_free_cnt = c->mbcl_free_cnt;
1551 oc->mbcl_notified = c->mbcl_notified;
1552 oc->mbcl_purge_cnt = c->mbcl_purge_cnt;
1553 oc->mbcl_fail_cnt = c->mbcl_fail_cnt;
1554 oc->mbcl_ctotal = c->mbcl_ctotal;
1555 oc->mbcl_release_cnt = c->mbcl_release_cnt;
1556 oc->mbcl_mc_state = c->mbcl_mc_state;
1557 oc->mbcl_mc_cached = c->mbcl_mc_cached;
1558 oc->mbcl_mc_waiter_cnt = c->mbcl_mc_waiter_cnt;
1559 oc->mbcl_mc_wretry_cnt = c->mbcl_mc_wretry_cnt;
1560 oc->mbcl_mc_nwretry_cnt = c->mbcl_mc_nwretry_cnt;
1561 }
1562 statp = omb_stat;
1563 statsz = OMB_STAT_SIZE(NELEM(mbuf_table));
1564 } else {
1565 statp = mb_stat;
1566 statsz = MB_STAT_SIZE(NELEM(mbuf_table));
1567 }
1568
1569 lck_mtx_unlock(mbuf_mlock);
1570
1571 return SYSCTL_OUT(req, statp, statsz);
1572 }
1573
1574 #if !CONFIG_MBUF_MCACHE
1575 /*
1576 * The following functions are wrappers around mbuf
1577 * allocation for zalloc. They all have the prefix "mz"
1578 * which was chosen to avoid conflicts with the mbuf KPIs.
1579 *
1580 * Z_NOPAGEWAIT is used in place of Z_NOWAIT because
1581 * Z_NOPAGEWAIT maps closer to MCR_TRYHARD. Z_NOWAIT will
1582 * fail immediately if it has to take a mutex and that
1583 * may cause packets to be dropped more frequently.
1584 * In general, the mbuf subsystem can sustain grabbing a mutex
1585 * during "non-blocking" allocation and that's the reason
1586 * why Z_NOPAGEWAIT was chosen.
1587 *
1588 * mbufs are elided (removed all pointers) before they are
1589 * returned to the cache. The exception are composite mbufs which
1590 * are re-initialized on allocation.
1591 */
1592 __attribute__((always_inline))
1593 static inline void
m_elide(struct mbuf * m)1594 m_elide(struct mbuf *m)
1595 {
1596 m->m_next = m->m_nextpkt = NULL;
1597 m->m_data = 0;
1598 memset(&m->m_ext, 0, sizeof(m->m_ext));
1599 m->m_pkthdr.rcvif = NULL;
1600 m->m_pkthdr.pkt_hdr = NULL;
1601 m->m_flags |= M_PKTHDR;
1602 m_tag_init(m, 1);
1603 m->m_pkthdr.pkt_flags = 0;
1604 m_scratch_init(m);
1605 m->m_flags &= ~M_PKTHDR;
1606 }
1607
1608 __attribute__((always_inline))
1609 static inline struct mbuf *
mz_alloc(zalloc_flags_t flags)1610 mz_alloc(zalloc_flags_t flags)
1611 {
1612 if (flags & Z_NOWAIT) {
1613 flags ^= Z_NOWAIT | Z_NOPAGEWAIT;
1614 } else if (!(flags & Z_NOPAGEWAIT)) {
1615 flags |= Z_NOFAIL;
1616 }
1617 return zalloc_id(ZONE_ID_MBUF, flags | Z_NOZZC);
1618 }
1619
1620 __attribute__((always_inline))
1621 static inline zstack_t
mz_alloc_n(uint32_t count,zalloc_flags_t flags)1622 mz_alloc_n(uint32_t count, zalloc_flags_t flags)
1623 {
1624 if (flags & Z_NOWAIT) {
1625 flags ^= Z_NOWAIT | Z_NOPAGEWAIT;
1626 } else if (!(flags & Z_NOPAGEWAIT)) {
1627 flags |= Z_NOFAIL;
1628 }
1629 return zalloc_n(ZONE_ID_MBUF, count, flags | Z_NOZZC);
1630 }
1631
1632 __attribute__((always_inline))
1633 static inline void
mz_free(struct mbuf * m)1634 mz_free(struct mbuf *m)
1635 {
1636 #if KASAN
1637 zone_require(zone_by_id(ZONE_ID_MBUF), m);
1638 #endif
1639 m_elide(m);
1640 zfree_nozero(ZONE_ID_MBUF, m);
1641 }
1642
1643 __attribute__((always_inline))
1644 static inline void
mz_free_n(zstack_t list)1645 mz_free_n(zstack_t list)
1646 {
1647 /* Callers of this function have already elided the mbuf. */
1648 zfree_nozero_n(ZONE_ID_MBUF, list);
1649 }
1650
1651 __attribute__((always_inline))
1652 static inline struct ext_ref *
mz_ref_alloc(zalloc_flags_t flags)1653 mz_ref_alloc(zalloc_flags_t flags)
1654 {
1655 if (flags & Z_NOWAIT) {
1656 flags ^= Z_NOWAIT | Z_NOPAGEWAIT;
1657 }
1658 return zalloc_id(ZONE_ID_MBUF_REF, flags | Z_NOZZC);
1659 }
1660
1661 __attribute__((always_inline))
1662 static inline void
mz_ref_free(struct ext_ref * rfa)1663 mz_ref_free(struct ext_ref *rfa)
1664 {
1665 VERIFY(rfa->minref == rfa->refcnt);
1666 #if KASAN
1667 zone_require(zone_by_id(ZONE_ID_MBUF_REF), rfa);
1668 #endif
1669 zfree_nozero(ZONE_ID_MBUF_REF, rfa);
1670 }
1671
1672 __attribute__((always_inline))
1673 static inline void * __bidi_indexable
mz_cl_alloc(zone_id_t zid,zalloc_flags_t flags)1674 mz_cl_alloc(zone_id_t zid, zalloc_flags_t flags)
1675 {
1676 void * p __unsafe_indexable;
1677 if (flags & Z_NOWAIT) {
1678 flags ^= Z_NOWAIT | Z_NOPAGEWAIT;
1679 } else if (!(flags & Z_NOPAGEWAIT)) {
1680 flags |= Z_NOFAIL;
1681 }
1682 flags |= Z_NOZZC;
1683
1684 /*
1685 * N.B. Invoking `(zalloc_id)' directly, vs. via `zalloc_id' macro.
1686 */
1687 p = (zalloc_id)(zid, flags);
1688 return __unsafe_forge_bidi_indexable(void *, p, zone_get_elem_size(zone_by_id(zid)));
1689 }
1690
1691 __attribute__((always_inline))
1692 static inline void
mz_cl_free(zone_id_t zid,void * cl)1693 mz_cl_free(zone_id_t zid, void *cl)
1694 {
1695 #if KASAN
1696 zone_require(zone_by_id(zid), cl);
1697 #endif
1698 zfree_nozero(zid, cl);
1699 }
1700
1701 __attribute__((always_inline))
1702 static inline zstack_t
mz_composite_alloc_n(mbuf_class_t class,unsigned int n,zalloc_flags_t flags)1703 mz_composite_alloc_n(mbuf_class_t class, unsigned int n, zalloc_flags_t flags)
1704 {
1705 if (flags & Z_NOWAIT) {
1706 flags ^= Z_NOWAIT | Z_NOPAGEWAIT;
1707 }
1708 return (zcache_alloc_n)(m_class_to_zid(class), n, flags,
1709 &mz_composite_ops);
1710 }
1711
1712 __attribute__((always_inline))
1713 static inline struct mbuf *
mz_composite_alloc(mbuf_class_t class,zalloc_flags_t flags)1714 mz_composite_alloc(mbuf_class_t class, zalloc_flags_t flags)
1715 {
1716 zstack_t list = {};
1717 list = mz_composite_alloc_n(class, 1, flags);
1718 if (!zstack_empty(list)) {
1719 return zstack_pop(&list);
1720 } else {
1721 return NULL;
1722 }
1723 }
1724
1725 __attribute__((always_inline))
1726 static inline void
mz_composite_free_n(mbuf_class_t class,zstack_t list)1727 mz_composite_free_n(mbuf_class_t class, zstack_t list)
1728 {
1729 (zcache_free_n)(m_class_to_zid(class), list, &mz_composite_ops);
1730 }
1731
1732 __attribute__((always_inline))
1733 static inline void
mz_composite_free(mbuf_class_t class,struct mbuf * m)1734 mz_composite_free(mbuf_class_t class, struct mbuf *m)
1735 {
1736 zstack_t list = {};
1737 zstack_push(&list, m);
1738 (zcache_free_n)(m_class_to_zid(class), list, &mz_composite_ops);
1739 }
1740
1741 /* Converts composite zone ID to the cluster zone ID. */
1742 __attribute__((always_inline))
1743 static inline zone_id_t
mz_cl_zid(zone_id_t zid)1744 mz_cl_zid(zone_id_t zid)
1745 {
1746 return ZONE_ID_CLUSTER_2K + zid - ZONE_ID_MBUF_CLUSTER_2K;
1747 }
1748
1749 static void *
mz_composite_build(zone_id_t zid,zalloc_flags_t flags)1750 mz_composite_build(zone_id_t zid, zalloc_flags_t flags)
1751 {
1752 const zone_id_t cl_zid = mz_cl_zid(zid);
1753 struct mbuf *m = NULL;
1754 struct ext_ref *rfa = NULL;
1755 void *cl = NULL;
1756
1757 cl = mz_cl_alloc(cl_zid, flags);
1758 if (__improbable(cl == NULL)) {
1759 goto out;
1760 }
1761 rfa = mz_ref_alloc(flags);
1762 if (__improbable(rfa == NULL)) {
1763 goto out_free_cl;
1764 }
1765 m = mz_alloc(flags);
1766 if (__improbable(m == NULL)) {
1767 goto out_free_rfa;
1768 }
1769 MBUF_INIT(m, 0, MT_FREE);
1770 if (zid == ZONE_ID_MBUF_CLUSTER_2K) {
1771 MBUF_CL_INIT(m, cl, rfa, 0, EXTF_COMPOSITE);
1772 } else if (zid == ZONE_ID_MBUF_CLUSTER_4K) {
1773 MBUF_BIGCL_INIT(m, cl, rfa, 0, EXTF_COMPOSITE);
1774 } else {
1775 MBUF_16KCL_INIT(m, cl, rfa, 0, EXTF_COMPOSITE);
1776 }
1777 VERIFY(m->m_flags == M_EXT);
1778 VERIFY(m_get_rfa(m) != NULL && MBUF_IS_COMPOSITE(m));
1779
1780 return m;
1781 out_free_rfa:
1782 mz_ref_free(rfa);
1783 out_free_cl:
1784 mz_cl_free(cl_zid, cl);
1785 out:
1786 return NULL;
1787 }
1788
1789 static void *
mz_composite_mark_valid(zone_id_t zid,void * p)1790 mz_composite_mark_valid(zone_id_t zid, void *p)
1791 {
1792 mbuf_ref_t m = p;
1793
1794 m = zcache_mark_valid_single(zone_by_id(ZONE_ID_MBUF), m);
1795 #if KASAN
1796 struct ext_ref *rfa __single = m_get_rfa(m);
1797 const zone_id_t cl_zid = mz_cl_zid(zid);
1798 void *cl = m->m_ext.ext_buf;
1799
1800 cl = __unsafe_forge_bidi_indexable(void *,
1801 zcache_mark_valid(zone_by_id(cl_zid), cl),
1802 zone_get_elem_size(zone_by_id(cl_zid)));
1803 rfa = __unsafe_forge_single(struct ext_ref *,
1804 zcache_mark_valid(zone_by_id(ZONE_ID_MBUF_REF), rfa));
1805 m->m_data = (uintptr_t)cl;
1806 m->m_ext.ext_buf = cl;
1807 m->m_ext.ext_size = m->m_ext.ext_size;
1808 m_set_rfa(m, rfa);
1809 #else
1810 #pragma unused(zid)
1811 #endif
1812 VERIFY(MBUF_IS_COMPOSITE(m));
1813
1814 return m;
1815 }
1816
1817 static void *
mz_composite_mark_invalid(zone_id_t zid,void * p)1818 mz_composite_mark_invalid(zone_id_t zid, void *p)
1819 {
1820 mbuf_ref_t m = p;
1821
1822 VERIFY(MBUF_IS_COMPOSITE(m));
1823 VERIFY(MEXT_REF(m) == MEXT_MINREF(m));
1824 #if KASAN
1825 struct ext_ref *rfa __single = m_get_rfa(m);
1826 const zone_id_t cl_zid = mz_cl_zid(zid);
1827 void *cl = m->m_ext.ext_buf;
1828
1829 cl = __unsafe_forge_bidi_indexable(void *,
1830 zcache_mark_invalid(zone_by_id(cl_zid), cl),
1831 zone_get_elem_size(zone_by_id(cl_zid)));
1832 rfa = __unsafe_forge_single(struct ext_ref *,
1833 zcache_mark_invalid(zone_by_id(ZONE_ID_MBUF_REF), rfa));
1834 m->m_data = (uintptr_t)cl;
1835 m->m_ext.ext_buf = cl;
1836 m->m_ext.ext_size = m->m_ext.ext_size;
1837 m_set_rfa(m, rfa);
1838 #else
1839 #pragma unused(zid)
1840 #endif
1841
1842 return zcache_mark_invalid_single(zone_by_id(ZONE_ID_MBUF), m);
1843 }
1844
1845 static void
mz_composite_destroy(zone_id_t zid,void * p)1846 mz_composite_destroy(zone_id_t zid, void *p)
1847 {
1848 const zone_id_t cl_zid = mz_cl_zid(zid);
1849 struct ext_ref *rfa = NULL;
1850 mbuf_ref_t m = p;
1851
1852 VERIFY(MBUF_IS_COMPOSITE(m));
1853
1854 MEXT_MINREF(m) = 0;
1855 MEXT_REF(m) = 0;
1856 MEXT_PREF(m) = 0;
1857 MEXT_FLAGS(m) = 0;
1858 MEXT_PRIV(m) = 0;
1859 MEXT_PMBUF(m) = NULL;
1860
1861 rfa = m_get_rfa(m);
1862 m_set_ext(m, NULL, NULL, NULL);
1863
1864 m->m_type = MT_FREE;
1865 m->m_flags = m->m_len = 0;
1866 m->m_next = m->m_nextpkt = NULL;
1867
1868 mz_cl_free(cl_zid, m->m_ext.ext_buf);
1869 m->m_ext.ext_size = 0;
1870 m->m_ext.ext_buf = NULL;
1871 mz_ref_free(rfa);
1872 mz_free(m);
1873 }
1874 #endif /* !CONFIG_MBUF_MCACHE */
1875
1876 #if CONFIG_MBUF_MCACHE
1877 static int
1878 mleak_top_trace_sysctl SYSCTL_HANDLER_ARGS
1879 {
1880 #pragma unused(oidp, arg1, arg2)
1881 int i;
1882
1883 /* Ensure leak tracing turned on */
1884 if (!mclfindleak || !mclexpleak) {
1885 return ENXIO;
1886 }
1887
1888 lck_mtx_lock(mleak_lock);
1889 mleak_update_stats();
1890 i = SYSCTL_OUT(req, mleak_stat, MLEAK_STAT_SIZE(MLEAK_NUM_TRACES));
1891 lck_mtx_unlock(mleak_lock);
1892
1893 return i;
1894 }
1895
1896 static int
1897 mleak_table_sysctl SYSCTL_HANDLER_ARGS
1898 {
1899 #pragma unused(oidp, arg1, arg2)
1900 int i = 0;
1901
1902 /* Ensure leak tracing turned on */
1903 if (!mclfindleak || !mclexpleak) {
1904 return ENXIO;
1905 }
1906
1907 lck_mtx_lock(mleak_lock);
1908 i = SYSCTL_OUT(req, &mleak_table, sizeof(mleak_table));
1909 lck_mtx_unlock(mleak_lock);
1910
1911 return i;
1912 }
1913 #endif /* CONFIG_MBUF_MCACHE */
1914
1915 static inline void
m_incref(struct mbuf * m)1916 m_incref(struct mbuf *m)
1917 {
1918 uint16_t new = os_atomic_inc(&MEXT_REF(m), relaxed);
1919
1920 VERIFY(new != 0);
1921 /*
1922 * If cluster is shared, mark it with (sticky) EXTF_READONLY;
1923 * we don't clear the flag when the refcount goes back to the
1924 * minimum, to simplify code calling m_mclhasreference().
1925 */
1926 if (new > (MEXT_MINREF(m) + 1) && !(MEXT_FLAGS(m) & EXTF_READONLY)) {
1927 os_atomic_or(&MEXT_FLAGS(m), EXTF_READONLY, relaxed);
1928 }
1929 }
1930
1931 static inline uint16_t
m_decref(struct mbuf * m)1932 m_decref(struct mbuf *m)
1933 {
1934 VERIFY(MEXT_REF(m) != 0);
1935
1936 return os_atomic_dec(&MEXT_REF(m), acq_rel);
1937 }
1938
1939 static void
mbuf_table_init(void)1940 mbuf_table_init(void)
1941 {
1942 unsigned int b, c, s;
1943 int m, config_mbuf_jumbo = 0;
1944
1945 VERIFY(NELEM(mbuf_table) <= MB_STAT_MAX_MB_CLASSES);
1946 /*
1947 * Kernel version of mb_stat / omb_stat should be sufficient
1948 * for the NELEM(mbuf_table).
1949 */
1950 VERIFY(OMB_STAT_SIZE(NELEM(mbuf_table)) <= sizeof(*omb_stat));
1951 omb_stat = zalloc_permanent(sizeof(*omb_stat),
1952 ZALIGN(struct omb_stat));
1953
1954 VERIFY(MB_STAT_SIZE(NELEM(mbuf_table)) <= sizeof(*mb_stat));
1955 mb_stat = zalloc_permanent(sizeof(*mb_stat),
1956 ZALIGN(mb_stat_t));
1957
1958 mb_stat->mbs_cnt = NELEM(mbuf_table);
1959 for (m = 0; m < NELEM(mbuf_table); m++) {
1960 mbuf_table[m].mtbl_stats = &mb_stat->mbs_class[m];
1961 }
1962
1963 #if CONFIG_MBUF_JUMBO
1964 config_mbuf_jumbo = 1;
1965 #endif /* CONFIG_MBUF_JUMBO */
1966
1967 if (config_mbuf_jumbo == 1 || PAGE_SIZE == M16KCLBYTES) {
1968 /*
1969 * Set aside 1/3 of the mbuf cluster map for jumbo
1970 * clusters; we do this only on platforms where jumbo
1971 * cluster pool is enabled.
1972 */
1973 njcl = nmbclusters / 3;
1974 njclbytes = M16KCLBYTES;
1975 }
1976
1977 /*
1978 * nclusters holds both the 2KB and 4KB pools, so ensure it's
1979 * a multiple of 4KB clusters.
1980 */
1981 nclusters = P2ROUNDDOWN(nmbclusters - njcl, NCLPG);
1982 if (njcl > 0) {
1983 /*
1984 * Each jumbo cluster takes 8 2KB clusters, so make
1985 * sure that the pool size is evenly divisible by 8;
1986 * njcl is in 2KB unit, hence treated as such.
1987 */
1988 njcl = P2ROUNDDOWN(nmbclusters - nclusters, NCLPJCL);
1989
1990 /* Update nclusters with rounded down value of njcl */
1991 nclusters = P2ROUNDDOWN(nmbclusters - njcl, NCLPG);
1992 }
1993
1994 /*
1995 * njcl is valid only on platforms with 16KB jumbo clusters or
1996 * with 16KB pages, where it is configured to 1/3 of the pool
1997 * size. On these platforms, the remaining is used for 2KB
1998 * and 4KB clusters. On platforms without 16KB jumbo clusters,
1999 * the entire pool is used for both 2KB and 4KB clusters. A 4KB
2000 * cluster can either be splitted into 16 mbufs, or into 2 2KB
2001 * clusters.
2002 *
2003 * +---+---+------------ ... -----------+------- ... -------+
2004 * | c | b | s | njcl |
2005 * +---+---+------------ ... -----------+------- ... -------+
2006 *
2007 * 1/32th of the shared region is reserved for pure 2KB and 4KB
2008 * clusters (1/64th each.)
2009 */
2010 c = P2ROUNDDOWN((nclusters >> 6), NCLPG); /* in 2KB unit */
2011 b = P2ROUNDDOWN((nclusters >> (6 + NCLPBGSHIFT)), NBCLPG); /* in 4KB unit */
2012 s = nclusters - (c + (b << NCLPBGSHIFT)); /* in 2KB unit */
2013
2014 /*
2015 * 1/64th (c) is reserved for 2KB clusters.
2016 */
2017 m_minlimit(MC_CL) = c;
2018 m_maxlimit(MC_CL) = s + c; /* in 2KB unit */
2019 m_maxsize(MC_CL) = m_size(MC_CL) = MCLBYTES;
2020 snprintf(m_cname(MC_CL), MAX_MBUF_CNAME, "cl");
2021
2022 /*
2023 * Another 1/64th (b) of the map is reserved for 4KB clusters.
2024 * It cannot be turned into 2KB clusters or mbufs.
2025 */
2026 m_minlimit(MC_BIGCL) = b;
2027 m_maxlimit(MC_BIGCL) = (s >> NCLPBGSHIFT) + b; /* in 4KB unit */
2028 m_maxsize(MC_BIGCL) = m_size(MC_BIGCL) = MBIGCLBYTES;
2029 snprintf(m_cname(MC_BIGCL), MAX_MBUF_CNAME, "bigcl");
2030
2031 /*
2032 * The remaining 31/32ths (s) are all-purpose (mbufs, 2KB, or 4KB)
2033 */
2034 m_minlimit(MC_MBUF) = 0;
2035 m_maxlimit(MC_MBUF) = s * NMBPCL; /* in mbuf unit */
2036 m_maxsize(MC_MBUF) = m_size(MC_MBUF) = _MSIZE;
2037 snprintf(m_cname(MC_MBUF), MAX_MBUF_CNAME, "mbuf");
2038
2039 /*
2040 * Set limits for the composite classes.
2041 */
2042 m_minlimit(MC_MBUF_CL) = 0;
2043 m_maxlimit(MC_MBUF_CL) = m_maxlimit(MC_CL);
2044 m_maxsize(MC_MBUF_CL) = MCLBYTES;
2045 m_size(MC_MBUF_CL) = m_size(MC_MBUF) + m_size(MC_CL);
2046 snprintf(m_cname(MC_MBUF_CL), MAX_MBUF_CNAME, "mbuf_cl");
2047
2048 m_minlimit(MC_MBUF_BIGCL) = 0;
2049 m_maxlimit(MC_MBUF_BIGCL) = m_maxlimit(MC_BIGCL);
2050 m_maxsize(MC_MBUF_BIGCL) = MBIGCLBYTES;
2051 m_size(MC_MBUF_BIGCL) = m_size(MC_MBUF) + m_size(MC_BIGCL);
2052 snprintf(m_cname(MC_MBUF_BIGCL), MAX_MBUF_CNAME, "mbuf_bigcl");
2053
2054 /*
2055 * And for jumbo classes.
2056 */
2057 m_minlimit(MC_16KCL) = 0;
2058 m_maxlimit(MC_16KCL) = (njcl >> NCLPJCLSHIFT); /* in 16KB unit */
2059 m_maxsize(MC_16KCL) = m_size(MC_16KCL) = M16KCLBYTES;
2060 snprintf(m_cname(MC_16KCL), MAX_MBUF_CNAME, "16kcl");
2061
2062 m_minlimit(MC_MBUF_16KCL) = 0;
2063 m_maxlimit(MC_MBUF_16KCL) = m_maxlimit(MC_16KCL);
2064 m_maxsize(MC_MBUF_16KCL) = M16KCLBYTES;
2065 m_size(MC_MBUF_16KCL) = m_size(MC_MBUF) + m_size(MC_16KCL);
2066 snprintf(m_cname(MC_MBUF_16KCL), MAX_MBUF_CNAME, "mbuf_16kcl");
2067
2068 /*
2069 * Initialize the legacy mbstat structure.
2070 */
2071 bzero(&mbstat, sizeof(mbstat));
2072 mbstat.m_msize = m_maxsize(MC_MBUF);
2073 mbstat.m_mclbytes = m_maxsize(MC_CL);
2074 mbstat.m_minclsize = MINCLSIZE;
2075 mbstat.m_mlen = MLEN;
2076 mbstat.m_mhlen = MHLEN;
2077 mbstat.m_bigmclbytes = m_maxsize(MC_BIGCL);
2078 }
2079
2080 static int
mbuf_get_class(struct mbuf * m)2081 mbuf_get_class(struct mbuf *m)
2082 {
2083 if (m->m_flags & M_EXT) {
2084 uint32_t composite = (MEXT_FLAGS(m) & EXTF_COMPOSITE);
2085 m_ext_free_func_t m_free_func = m_get_ext_free(m);
2086
2087 if (m_free_func == NULL) {
2088 if (composite) {
2089 return MC_MBUF_CL;
2090 } else {
2091 return MC_CL;
2092 }
2093 } else if (m_free_func == m_bigfree) {
2094 if (composite) {
2095 return MC_MBUF_BIGCL;
2096 } else {
2097 return MC_BIGCL;
2098 }
2099 } else if (m_free_func == m_16kfree) {
2100 if (composite) {
2101 return MC_MBUF_16KCL;
2102 } else {
2103 return MC_16KCL;
2104 }
2105 }
2106 }
2107
2108 return MC_MBUF;
2109 }
2110
2111 bool
mbuf_class_under_pressure(struct mbuf * m)2112 mbuf_class_under_pressure(struct mbuf *m)
2113 {
2114 int mclass = mbuf_get_class(m);
2115
2116 #if CONFIG_MBUF_MCACHE
2117 if (m_total(mclass) - m_infree(mclass) >= (m_maxlimit(mclass) * mb_memory_pressure_percentage) / 100) {
2118 /*
2119 * The above computation does not include the per-CPU cached objects.
2120 * As a fast-path check this is good-enough. But now we do
2121 * the "slower" count of the cached objects to know exactly the
2122 * number of active mbufs in use.
2123 *
2124 * We do not take the mbuf_lock here to avoid lock-contention. Numbers
2125 * might be slightly off but we don't try to be 100% accurate.
2126 * At worst, we drop a packet that we shouldn't have dropped or
2127 * we might go slightly above our memory-pressure threshold.
2128 */
2129 mcache_t *cp = m_cache(mclass);
2130 mcache_cpu_t *ccp = &cp->mc_cpu[0];
2131
2132 int bktsize = os_access_once(ccp->cc_bktsize);
2133 uint32_t bl_total = os_access_once(cp->mc_full.bl_total);
2134 uint32_t cached = 0;
2135 int i;
2136
2137 for (i = 0; i < ncpu; i++) {
2138 ccp = &cp->mc_cpu[i];
2139
2140 int cc_objs = os_access_once(ccp->cc_objs);
2141 if (cc_objs > 0) {
2142 cached += cc_objs;
2143 }
2144
2145 int cc_pobjs = os_access_once(ccp->cc_pobjs);
2146 if (cc_pobjs > 0) {
2147 cached += cc_pobjs;
2148 }
2149 }
2150 cached += (bl_total * bktsize);
2151 if (m_total(mclass) - m_infree(mclass) - cached >= (m_maxlimit(mclass) * mb_memory_pressure_percentage) / 100) {
2152 os_log(OS_LOG_DEFAULT,
2153 "%s memory-pressure on mbuf due to class %u, total %u free %u cached %u max %u",
2154 __func__, mclass, m_total(mclass), m_infree(mclass), cached, m_maxlimit(mclass));
2155 return true;
2156 }
2157 }
2158 #else
2159 /*
2160 * Grab the statistics from zalloc.
2161 * We can't call mbuf_stat_sync() since that requires a lock.
2162 */
2163 const zone_id_t zid = m_class_to_zid(m_class(mclass));
2164 const zone_ref_t zone = zone_by_id(zid);
2165 struct zone_basic_stats stats = {};
2166
2167 zone_get_stats(zone, &stats);
2168 if (stats.zbs_avail - stats.zbs_free >= (m_maxlimit(mclass) * mb_memory_pressure_percentage) / 100) {
2169 os_log(OS_LOG_DEFAULT,
2170 "%s memory-pressure on mbuf due to class %u, total %llu free %llu max %u",
2171 __func__, mclass, stats.zbs_avail, stats.zbs_free, m_maxlimit(mclass));
2172 return true;
2173 }
2174 #endif /* CONFIG_MBUF_MCACHE */
2175
2176 return false;
2177 }
2178
2179 #if defined(__LP64__)
2180 typedef struct ncl_tbl {
2181 uint64_t nt_maxmem; /* memory (sane) size */
2182 uint32_t nt_mbpool; /* mbuf pool size */
2183 } ncl_tbl_t;
2184
2185 static const ncl_tbl_t ncl_table[] = {
2186 { (1ULL << GBSHIFT) /* 1 GB */, (64 << MBSHIFT) /* 64 MB */ },
2187 { (1ULL << (GBSHIFT + 2)) /* 4 GB */, (96 << MBSHIFT) /* 96 MB */ },
2188 { (1ULL << (GBSHIFT + 3)) /* 8 GB */, (128 << MBSHIFT) /* 128 MB */ },
2189 { (1ULL << (GBSHIFT + 4)) /* 16 GB */, (256 << MBSHIFT) /* 256 MB */ },
2190 { (1ULL << (GBSHIFT + 5)) /* 32 GB */, (512 << MBSHIFT) /* 512 MB */ },
2191 { 0, 0 }
2192 };
2193 #endif /* __LP64__ */
2194
2195 __private_extern__ unsigned int
mbuf_default_ncl(uint64_t mem)2196 mbuf_default_ncl(uint64_t mem)
2197 {
2198 #if !defined(__LP64__)
2199 unsigned int n;
2200 /*
2201 * 32-bit kernel (default to 64MB of mbuf pool for >= 1GB RAM).
2202 */
2203 if ((n = ((mem / 16) / MCLBYTES)) > 32768) {
2204 n = 32768;
2205 }
2206 #else
2207 unsigned int n, i;
2208 /*
2209 * 64-bit kernel (mbuf pool size based on table).
2210 */
2211 n = ncl_table[0].nt_mbpool;
2212 for (i = 0; ncl_table[i].nt_mbpool != 0; i++) {
2213 if (mem < ncl_table[i].nt_maxmem) {
2214 break;
2215 }
2216 n = ncl_table[i].nt_mbpool;
2217 }
2218 n >>= MCLSHIFT;
2219 #endif /* !__LP64__ */
2220 return n;
2221 }
2222
2223 __private_extern__ void
mbinit(void)2224 mbinit(void)
2225 {
2226 unsigned int m;
2227 #if CONFIG_MBUF_MCACHE
2228 unsigned int initmcl = 0;
2229 thread_t thread = THREAD_NULL;
2230 #endif /* CONFIG_MBUF_MCACHE */
2231
2232 #if CONFIG_MBUF_MCACHE
2233 microuptime(&mb_start);
2234 #endif /* CONFIG_MBUF_MCACHE */
2235
2236 /*
2237 * These MBUF_ values must be equal to their private counterparts.
2238 */
2239 _CASSERT(MBUF_EXT == M_EXT);
2240 _CASSERT(MBUF_PKTHDR == M_PKTHDR);
2241 _CASSERT(MBUF_EOR == M_EOR);
2242 _CASSERT(MBUF_LOOP == M_LOOP);
2243 _CASSERT(MBUF_BCAST == M_BCAST);
2244 _CASSERT(MBUF_MCAST == M_MCAST);
2245 _CASSERT(MBUF_FRAG == M_FRAG);
2246 _CASSERT(MBUF_FIRSTFRAG == M_FIRSTFRAG);
2247 _CASSERT(MBUF_LASTFRAG == M_LASTFRAG);
2248 _CASSERT(MBUF_PROMISC == M_PROMISC);
2249 _CASSERT(MBUF_HASFCS == M_HASFCS);
2250
2251 _CASSERT(MBUF_TYPE_FREE == MT_FREE);
2252 _CASSERT(MBUF_TYPE_DATA == MT_DATA);
2253 _CASSERT(MBUF_TYPE_HEADER == MT_HEADER);
2254 _CASSERT(MBUF_TYPE_SOCKET == MT_SOCKET);
2255 _CASSERT(MBUF_TYPE_PCB == MT_PCB);
2256 _CASSERT(MBUF_TYPE_RTABLE == MT_RTABLE);
2257 _CASSERT(MBUF_TYPE_HTABLE == MT_HTABLE);
2258 _CASSERT(MBUF_TYPE_ATABLE == MT_ATABLE);
2259 _CASSERT(MBUF_TYPE_SONAME == MT_SONAME);
2260 _CASSERT(MBUF_TYPE_SOOPTS == MT_SOOPTS);
2261 _CASSERT(MBUF_TYPE_FTABLE == MT_FTABLE);
2262 _CASSERT(MBUF_TYPE_RIGHTS == MT_RIGHTS);
2263 _CASSERT(MBUF_TYPE_IFADDR == MT_IFADDR);
2264 _CASSERT(MBUF_TYPE_CONTROL == MT_CONTROL);
2265 _CASSERT(MBUF_TYPE_OOBDATA == MT_OOBDATA);
2266
2267 _CASSERT(MBUF_TSO_IPV4 == CSUM_TSO_IPV4);
2268 _CASSERT(MBUF_TSO_IPV6 == CSUM_TSO_IPV6);
2269 _CASSERT(MBUF_CSUM_REQ_SUM16 == CSUM_PARTIAL);
2270 _CASSERT(MBUF_CSUM_TCP_SUM16 == MBUF_CSUM_REQ_SUM16);
2271 _CASSERT(MBUF_CSUM_REQ_ZERO_INVERT == CSUM_ZERO_INVERT);
2272 _CASSERT(MBUF_CSUM_REQ_IP == CSUM_IP);
2273 _CASSERT(MBUF_CSUM_REQ_TCP == CSUM_TCP);
2274 _CASSERT(MBUF_CSUM_REQ_UDP == CSUM_UDP);
2275 _CASSERT(MBUF_CSUM_REQ_TCPIPV6 == CSUM_TCPIPV6);
2276 _CASSERT(MBUF_CSUM_REQ_UDPIPV6 == CSUM_UDPIPV6);
2277 _CASSERT(MBUF_CSUM_DID_IP == CSUM_IP_CHECKED);
2278 _CASSERT(MBUF_CSUM_IP_GOOD == CSUM_IP_VALID);
2279 _CASSERT(MBUF_CSUM_DID_DATA == CSUM_DATA_VALID);
2280 _CASSERT(MBUF_CSUM_PSEUDO_HDR == CSUM_PSEUDO_HDR);
2281
2282 _CASSERT(MBUF_WAITOK == M_WAIT);
2283 _CASSERT(MBUF_DONTWAIT == M_DONTWAIT);
2284 _CASSERT(MBUF_COPYALL == M_COPYALL);
2285
2286 _CASSERT(MBUF_SC2TC(MBUF_SC_BK_SYS) == MBUF_TC_BK);
2287 _CASSERT(MBUF_SC2TC(MBUF_SC_BK) == MBUF_TC_BK);
2288 _CASSERT(MBUF_SC2TC(MBUF_SC_BE) == MBUF_TC_BE);
2289 _CASSERT(MBUF_SC2TC(MBUF_SC_RD) == MBUF_TC_BE);
2290 _CASSERT(MBUF_SC2TC(MBUF_SC_OAM) == MBUF_TC_BE);
2291 _CASSERT(MBUF_SC2TC(MBUF_SC_AV) == MBUF_TC_VI);
2292 _CASSERT(MBUF_SC2TC(MBUF_SC_RV) == MBUF_TC_VI);
2293 _CASSERT(MBUF_SC2TC(MBUF_SC_VI) == MBUF_TC_VI);
2294 _CASSERT(MBUF_SC2TC(MBUF_SC_SIG) == MBUF_TC_VI);
2295 _CASSERT(MBUF_SC2TC(MBUF_SC_VO) == MBUF_TC_VO);
2296 _CASSERT(MBUF_SC2TC(MBUF_SC_CTL) == MBUF_TC_VO);
2297
2298 _CASSERT(MBUF_TC2SCVAL(MBUF_TC_BK) == SCVAL_BK);
2299 _CASSERT(MBUF_TC2SCVAL(MBUF_TC_BE) == SCVAL_BE);
2300 _CASSERT(MBUF_TC2SCVAL(MBUF_TC_VI) == SCVAL_VI);
2301 _CASSERT(MBUF_TC2SCVAL(MBUF_TC_VO) == SCVAL_VO);
2302
2303 /* Module specific scratch space (32-bit alignment requirement) */
2304 _CASSERT(!(offsetof(struct mbuf, m_pkthdr.pkt_mpriv) %
2305 sizeof(uint32_t)));
2306
2307 #if CONFIG_MBUF_MCACHE
2308 /* Make sure we don't save more than we should */
2309 _CASSERT(MCA_SAVED_MBUF_SIZE <= sizeof(struct mbuf));
2310 #endif /* CONFIG_MBUF_MCACHE */
2311
2312 if (nmbclusters == 0) {
2313 nmbclusters = NMBCLUSTERS;
2314 }
2315
2316 /* This should be a sane (at least even) value by now */
2317 VERIFY(nmbclusters != 0 && !(nmbclusters & 0x1));
2318
2319 /* Setup the mbuf table */
2320 mbuf_table_init();
2321
2322 _CASSERT(sizeof(struct mbuf) == _MSIZE);
2323
2324 #if CONFIG_MBUF_MCACHE
2325 /*
2326 * Allocate cluster slabs table:
2327 *
2328 * maxslabgrp = (N * 2048) / (1024 * 1024)
2329 *
2330 * Where N is nmbclusters rounded up to the nearest 512. This yields
2331 * mcl_slab_g_t units, each one representing a MB of memory.
2332 */
2333 maxslabgrp =
2334 (P2ROUNDUP(nmbclusters, (MBSIZE >> MCLSHIFT)) << MCLSHIFT) >> MBSHIFT;
2335 slabstbl = zalloc_permanent(maxslabgrp * sizeof(mcl_slabg_t *),
2336 ZALIGN(mcl_slabg_t));
2337
2338 /*
2339 * Allocate audit structures, if needed:
2340 *
2341 * maxclaudit = (maxslabgrp * 1024 * 1024) / PAGE_SIZE
2342 *
2343 * This yields mcl_audit_t units, each one representing a page.
2344 */
2345 PE_parse_boot_argn("mbuf_debug", &mbuf_debug, sizeof(mbuf_debug));
2346 mbuf_debug |= mcache_getflags();
2347 if (mbuf_debug & MCF_DEBUG) {
2348 int l;
2349 mcl_audit_t *mclad;
2350 maxclaudit = ((maxslabgrp << MBSHIFT) >> PAGE_SHIFT);
2351 mclaudit = zalloc_permanent(maxclaudit * sizeof(*mclaudit),
2352 ZALIGN(mcl_audit_t));
2353 for (l = 0, mclad = mclaudit; l < maxclaudit; l++) {
2354 mclad[l].cl_audit = zalloc_permanent(NMBPG * sizeof(mcache_audit_t *),
2355 ZALIGN_PTR);
2356 }
2357
2358 mcl_audit_con_cache = mcache_create("mcl_audit_contents",
2359 AUDIT_CONTENTS_SIZE, sizeof(u_int64_t), 0, MCR_SLEEP);
2360 VERIFY(mcl_audit_con_cache != NULL);
2361 }
2362 mclverify = (mbuf_debug & MCF_VERIFY);
2363 mcltrace = (mbuf_debug & MCF_TRACE);
2364 mclfindleak = !(mbuf_debug & MCF_NOLEAKLOG);
2365 mclexpleak = mclfindleak && (mbuf_debug & MCF_EXPLEAKLOG);
2366
2367 /* Enable mbuf leak logging, with a lock to protect the tables */
2368
2369 mleak_activate();
2370
2371 /*
2372 * Allocate structure for per-CPU statistics that's aligned
2373 * on the CPU cache boundary; this code assumes that we never
2374 * uninitialize this framework, since the original address
2375 * before alignment is not saved.
2376 */
2377 ncpu = ml_wait_max_cpus();
2378
2379 /* Calculate the number of pages assigned to the cluster pool */
2380 mcl_pages = (nmbclusters << MCLSHIFT) / PAGE_SIZE;
2381 mcl_paddr = zalloc_permanent(mcl_pages * sizeof(ppnum_t),
2382 ZALIGN(ppnum_t));
2383
2384 /* Register with the I/O Bus mapper */
2385 mcl_paddr_base = IOMapperIOVMAlloc(mcl_pages);
2386
2387 embutl = (mbutl + (nmbclusters * MCLBYTES));
2388 VERIFY(((embutl - mbutl) % MBIGCLBYTES) == 0);
2389
2390 /* Prime up the freelist */
2391 PE_parse_boot_argn("initmcl", &initmcl, sizeof(initmcl));
2392 if (initmcl != 0) {
2393 initmcl >>= NCLPBGSHIFT; /* become a 4K unit */
2394 if (initmcl > m_maxlimit(MC_BIGCL)) {
2395 initmcl = m_maxlimit(MC_BIGCL);
2396 }
2397 }
2398 if (initmcl < m_minlimit(MC_BIGCL)) {
2399 initmcl = m_minlimit(MC_BIGCL);
2400 }
2401
2402 lck_mtx_lock(mbuf_mlock);
2403
2404 /*
2405 * For classes with non-zero minimum limits, populate their freelists
2406 * so that m_total(class) is at least m_minlimit(class).
2407 */
2408 VERIFY(m_total(MC_BIGCL) == 0 && m_minlimit(MC_BIGCL) != 0);
2409 freelist_populate(m_class(MC_BIGCL), initmcl, M_WAIT);
2410 VERIFY(m_total(MC_BIGCL) >= m_minlimit(MC_BIGCL));
2411 freelist_init(m_class(MC_CL));
2412 #else
2413 /*
2414 * We have yet to create the non composite zones
2415 * and thus we haven't asked zalloc to allocate
2416 * anything yet, which means that at this point
2417 * m_total() is zero. Once we create the zones and
2418 * raise the reserve, m_total() will be calculated,
2419 * but until then just assume that we will have
2420 * at least the minium limit allocated.
2421 */
2422 m_total(MC_BIGCL) = m_minlimit(MC_BIGCL);
2423 m_total(MC_CL) = m_minlimit(MC_CL);
2424 #endif /* CONFIG_MBUF_MCACHE */
2425
2426 for (m = 0; m < NELEM(mbuf_table); m++) {
2427 /* Make sure we didn't miss any */
2428 VERIFY(m_minlimit(m_class(m)) == 0 ||
2429 m_total(m_class(m)) >= m_minlimit(m_class(m)));
2430 }
2431
2432 #if CONFIG_MBUF_MCACHE
2433 lck_mtx_unlock(mbuf_mlock);
2434
2435 (void) kernel_thread_start((thread_continue_t)mbuf_worker_thread_init,
2436 NULL, &thread);
2437 thread_deallocate(thread);
2438
2439 ref_cache = mcache_create("mext_ref", sizeof(struct ext_ref),
2440 0, 0, MCR_SLEEP);
2441 #endif /* CONFIG_MBUF_MCACHE */
2442
2443 /* Create the cache for each class */
2444 for (m = 0; m < NELEM(mbuf_table); m++) {
2445 #if CONFIG_MBUF_MCACHE
2446 void *allocfunc, *freefunc, *auditfunc, *logfunc;
2447 u_int32_t flags;
2448
2449 flags = mbuf_debug;
2450 if (m_class(m) == MC_MBUF_CL || m_class(m) == MC_MBUF_BIGCL ||
2451 m_class(m) == MC_MBUF_16KCL) {
2452 allocfunc = mbuf_cslab_alloc;
2453 freefunc = mbuf_cslab_free;
2454 auditfunc = mbuf_cslab_audit;
2455 logfunc = mleak_logger;
2456 } else {
2457 allocfunc = mbuf_slab_alloc;
2458 freefunc = mbuf_slab_free;
2459 auditfunc = mbuf_slab_audit;
2460 logfunc = mleak_logger;
2461 }
2462
2463 /*
2464 * Disable per-CPU caches for jumbo classes if there
2465 * is no jumbo cluster pool available in the system.
2466 * The cache itself is still created (but will never
2467 * be populated) since it simplifies the code.
2468 */
2469 if ((m_class(m) == MC_MBUF_16KCL || m_class(m) == MC_16KCL) &&
2470 njcl == 0) {
2471 flags |= MCF_NOCPUCACHE;
2472 }
2473
2474 if (!mclfindleak) {
2475 flags |= MCF_NOLEAKLOG;
2476 }
2477
2478 m_cache(m) = mcache_create_ext(m_cname(m), m_maxsize(m),
2479 allocfunc, freefunc, auditfunc, logfunc, mbuf_slab_notify,
2480 (void *)(uintptr_t)m, flags, MCR_SLEEP);
2481 #else
2482 if (!MBUF_CLASS_COMPOSITE(m)) {
2483 zone_ref_t zone = zone_by_id(m_class_to_zid(m));
2484
2485 zone_set_exhaustible(zone, m_maxlimit(m), false);
2486 zone_raise_reserve(zone, m_minlimit(m));
2487 /*
2488 * Pretend that we have allocated m_total() items
2489 * at this point. zalloc will eventually do that
2490 * but it's an async operation.
2491 */
2492 m_total(m) = m_minlimit(m);
2493 }
2494 #endif /* CONFIG_MBUF_MCACHE */
2495 }
2496
2497 /*
2498 * Set the max limit on sb_max to be 1/16 th of the size of
2499 * memory allocated for mbuf clusters.
2500 */
2501 high_sb_max = (nmbclusters << (MCLSHIFT - 4));
2502 if (high_sb_max < sb_max) {
2503 /* sb_max is too large for this configuration, scale it down */
2504 if (high_sb_max > (1 << MBSHIFT)) {
2505 /* We have atleast 16 M of mbuf pool */
2506 sb_max = high_sb_max;
2507 } else if ((nmbclusters << MCLSHIFT) > (1 << MBSHIFT)) {
2508 /*
2509 * If we have more than 1M of mbufpool, cap the size of
2510 * max sock buf at 1M
2511 */
2512 sb_max = high_sb_max = (1 << MBSHIFT);
2513 } else {
2514 sb_max = high_sb_max;
2515 }
2516 }
2517
2518 #if CONFIG_MBUF_MCACHE
2519 /* allocate space for mbuf_dump_buf */
2520 mbuf_dump_buf = zalloc_permanent(MBUF_DUMP_BUF_SIZE, ZALIGN_NONE);
2521
2522 if (mbuf_debug & MCF_DEBUG) {
2523 printf("%s: MLEN %d, MHLEN %d\n", __func__,
2524 (int)_MLEN, (int)_MHLEN);
2525 }
2526 #else
2527 mbuf_defunct_tcall =
2528 thread_call_allocate_with_options(mbuf_watchdog_defunct,
2529 NULL,
2530 THREAD_CALL_PRIORITY_KERNEL,
2531 THREAD_CALL_OPTIONS_ONCE);
2532 mbuf_drain_tcall =
2533 thread_call_allocate_with_options(mbuf_watchdog_drain_composite,
2534 NULL,
2535 THREAD_CALL_PRIORITY_KERNEL,
2536 THREAD_CALL_OPTIONS_ONCE);
2537 #endif /* CONFIG_MBUF_MCACHE */
2538 printf("%s: done [%d MB total pool size, (%d/%d) split]\n", __func__,
2539 (nmbclusters << MCLSHIFT) >> MBSHIFT,
2540 (nclusters << MCLSHIFT) >> MBSHIFT,
2541 (njcl << MCLSHIFT) >> MBSHIFT);
2542 }
2543
2544 #if CONFIG_MBUF_MCACHE
2545 /*
2546 * Obtain a slab of object(s) from the class's freelist.
2547 */
2548 static mcache_obj_t *
slab_alloc(mbuf_class_t class,int wait)2549 slab_alloc(mbuf_class_t class, int wait)
2550 {
2551 mcl_slab_t *sp;
2552 mcache_obj_t *buf;
2553
2554 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
2555
2556 /* This should always be NULL for us */
2557 VERIFY(m_cobjlist(class) == NULL);
2558
2559 /*
2560 * Treat composite objects as having longer lifespan by using
2561 * a slab from the reverse direction, in hoping that this could
2562 * reduce the probability of fragmentation for slabs that hold
2563 * more than one buffer chunks (e.g. mbuf slabs). For other
2564 * slabs, this probably doesn't make much of a difference.
2565 */
2566 if ((class == MC_MBUF || class == MC_CL || class == MC_BIGCL)
2567 && (wait & MCR_COMP)) {
2568 sp = (mcl_slab_t *)TAILQ_LAST(&m_slablist(class), mcl_slhead);
2569 } else {
2570 sp = (mcl_slab_t *)TAILQ_FIRST(&m_slablist(class));
2571 }
2572
2573 if (sp == NULL) {
2574 VERIFY(m_infree(class) == 0 && m_slab_cnt(class) == 0);
2575 /* The slab list for this class is empty */
2576 return NULL;
2577 }
2578
2579 VERIFY(m_infree(class) > 0);
2580 VERIFY(!slab_is_detached(sp));
2581 VERIFY(sp->sl_class == class &&
2582 (sp->sl_flags & (SLF_MAPPED | SLF_PARTIAL)) == SLF_MAPPED);
2583 buf = sp->sl_head;
2584 VERIFY(slab_inrange(sp, buf) && sp == slab_get(buf));
2585 sp->sl_head = buf->obj_next;
2586 /* Increment slab reference */
2587 sp->sl_refcnt++;
2588
2589 VERIFY(sp->sl_head != NULL || sp->sl_refcnt == sp->sl_chunks);
2590
2591 if (sp->sl_head != NULL && !slab_inrange(sp, sp->sl_head)) {
2592 slab_nextptr_panic(sp, sp->sl_head);
2593 /* In case sl_head is in the map but not in the slab */
2594 VERIFY(slab_inrange(sp, sp->sl_head));
2595 /* NOTREACHED */
2596 }
2597
2598 if (mclaudit != NULL) {
2599 mcache_audit_t *mca = mcl_audit_buf2mca(class, buf);
2600 mca->mca_uflags = 0;
2601 /* Save contents on mbuf objects only */
2602 if (class == MC_MBUF) {
2603 mca->mca_uflags |= MB_SCVALID;
2604 }
2605 }
2606
2607 if (class == MC_CL) {
2608 mbstat.m_clfree = (--m_infree(MC_CL)) + m_infree(MC_MBUF_CL);
2609 /*
2610 * A 2K cluster slab can have at most NCLPG references.
2611 */
2612 VERIFY(sp->sl_refcnt >= 1 && sp->sl_refcnt <= NCLPG &&
2613 sp->sl_chunks == NCLPG && sp->sl_len == PAGE_SIZE);
2614 VERIFY(sp->sl_refcnt < NCLPG || sp->sl_head == NULL);
2615 } else if (class == MC_BIGCL) {
2616 mbstat.m_bigclfree = (--m_infree(MC_BIGCL)) +
2617 m_infree(MC_MBUF_BIGCL);
2618 /*
2619 * A 4K cluster slab can have NBCLPG references.
2620 */
2621 VERIFY(sp->sl_refcnt >= 1 && sp->sl_chunks == NBCLPG &&
2622 sp->sl_len == PAGE_SIZE &&
2623 (sp->sl_refcnt < NBCLPG || sp->sl_head == NULL));
2624 } else if (class == MC_16KCL) {
2625 mcl_slab_t *nsp;
2626 int k;
2627
2628 --m_infree(MC_16KCL);
2629 VERIFY(sp->sl_refcnt == 1 && sp->sl_chunks == 1 &&
2630 sp->sl_len == m_maxsize(class) && sp->sl_head == NULL);
2631 /*
2632 * Increment 2nd-Nth slab reference, where N is NSLABSP16KB.
2633 * A 16KB big cluster takes NSLABSP16KB slabs, each having at
2634 * most 1 reference.
2635 */
2636 for (nsp = sp, k = 1; k < NSLABSP16KB; k++) {
2637 nsp = nsp->sl_next;
2638 /* Next slab must already be present */
2639 VERIFY(nsp != NULL);
2640 nsp->sl_refcnt++;
2641 VERIFY(!slab_is_detached(nsp));
2642 VERIFY(nsp->sl_class == MC_16KCL &&
2643 nsp->sl_flags == (SLF_MAPPED | SLF_PARTIAL) &&
2644 nsp->sl_refcnt == 1 && nsp->sl_chunks == 0 &&
2645 nsp->sl_len == 0 && nsp->sl_base == sp->sl_base &&
2646 nsp->sl_head == NULL);
2647 }
2648 } else {
2649 VERIFY(class == MC_MBUF);
2650 --m_infree(MC_MBUF);
2651 /*
2652 * If auditing is turned on, this check is
2653 * deferred until later in mbuf_slab_audit().
2654 */
2655 if (mclaudit == NULL) {
2656 _MCHECK((struct mbuf *)buf);
2657 }
2658 /*
2659 * Since we have incremented the reference count above,
2660 * an mbuf slab (formerly a 4KB cluster slab that was cut
2661 * up into mbufs) must have a reference count between 1
2662 * and NMBPG at this point.
2663 */
2664 VERIFY(sp->sl_refcnt >= 1 && sp->sl_refcnt <= NMBPG &&
2665 sp->sl_chunks == NMBPG &&
2666 sp->sl_len == PAGE_SIZE);
2667 VERIFY(sp->sl_refcnt < NMBPG || sp->sl_head == NULL);
2668 }
2669
2670 /* If empty, remove this slab from the class's freelist */
2671 if (sp->sl_head == NULL) {
2672 VERIFY(class != MC_MBUF || sp->sl_refcnt == NMBPG);
2673 VERIFY(class != MC_CL || sp->sl_refcnt == NCLPG);
2674 VERIFY(class != MC_BIGCL || sp->sl_refcnt == NBCLPG);
2675 slab_remove(sp, class);
2676 }
2677
2678 return buf;
2679 }
2680
2681 /*
2682 * Place a slab of object(s) back into a class's slab list.
2683 */
2684 static void
slab_free(mbuf_class_t class,mcache_obj_t * buf)2685 slab_free(mbuf_class_t class, mcache_obj_t *buf)
2686 {
2687 mcl_slab_t *sp;
2688 boolean_t reinit_supercl = false;
2689 mbuf_class_t super_class;
2690
2691 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
2692
2693 VERIFY(class != MC_16KCL || njcl > 0);
2694 VERIFY(buf->obj_next == NULL);
2695
2696 /*
2697 * Synchronizing with m_clalloc, as it reads m_total, while we here
2698 * are modifying m_total.
2699 */
2700 while (mb_clalloc_busy) {
2701 mb_clalloc_waiters++;
2702 (void) msleep(mb_clalloc_waitchan, mbuf_mlock,
2703 (PZERO - 1), "m_clalloc", NULL);
2704 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
2705 }
2706
2707 /* We are busy now; tell everyone else to go away */
2708 mb_clalloc_busy = TRUE;
2709
2710 sp = slab_get(buf);
2711 VERIFY(sp->sl_class == class && slab_inrange(sp, buf) &&
2712 (sp->sl_flags & (SLF_MAPPED | SLF_PARTIAL)) == SLF_MAPPED);
2713
2714 /* Decrement slab reference */
2715 sp->sl_refcnt--;
2716
2717 if (class == MC_CL) {
2718 VERIFY(IS_P2ALIGNED(buf, MCLBYTES));
2719 /*
2720 * A slab that has been splitted for 2KB clusters can have
2721 * at most 1 outstanding reference at this point.
2722 */
2723 VERIFY(sp->sl_refcnt >= 0 && sp->sl_refcnt <= (NCLPG - 1) &&
2724 sp->sl_chunks == NCLPG && sp->sl_len == PAGE_SIZE);
2725 VERIFY(sp->sl_refcnt < (NCLPG - 1) ||
2726 (slab_is_detached(sp) && sp->sl_head == NULL));
2727 } else if (class == MC_BIGCL) {
2728 VERIFY(IS_P2ALIGNED(buf, MBIGCLBYTES));
2729
2730 /* A 4KB cluster slab can have NBCLPG references at most */
2731 VERIFY(sp->sl_refcnt >= 0 && sp->sl_chunks == NBCLPG);
2732 VERIFY(sp->sl_refcnt < (NBCLPG - 1) ||
2733 (slab_is_detached(sp) && sp->sl_head == NULL));
2734 } else if (class == MC_16KCL) {
2735 mcl_slab_t *nsp;
2736 int k;
2737 /*
2738 * A 16KB cluster takes NSLABSP16KB slabs, all must
2739 * now have 0 reference.
2740 */
2741 VERIFY(IS_P2ALIGNED(buf, PAGE_SIZE));
2742 VERIFY(sp->sl_refcnt == 0 && sp->sl_chunks == 1 &&
2743 sp->sl_len == m_maxsize(class) && sp->sl_head == NULL);
2744 VERIFY(slab_is_detached(sp));
2745 for (nsp = sp, k = 1; k < NSLABSP16KB; k++) {
2746 nsp = nsp->sl_next;
2747 /* Next slab must already be present */
2748 VERIFY(nsp != NULL);
2749 nsp->sl_refcnt--;
2750 VERIFY(slab_is_detached(nsp));
2751 VERIFY(nsp->sl_class == MC_16KCL &&
2752 (nsp->sl_flags & (SLF_MAPPED | SLF_PARTIAL)) &&
2753 nsp->sl_refcnt == 0 && nsp->sl_chunks == 0 &&
2754 nsp->sl_len == 0 && nsp->sl_base == sp->sl_base &&
2755 nsp->sl_head == NULL);
2756 }
2757 } else {
2758 /*
2759 * A slab that has been splitted for mbufs has at most
2760 * NMBPG reference counts. Since we have decremented
2761 * one reference above, it must now be between 0 and
2762 * NMBPG-1.
2763 */
2764 VERIFY(class == MC_MBUF);
2765 VERIFY(sp->sl_refcnt >= 0 &&
2766 sp->sl_refcnt <= (NMBPG - 1) &&
2767 sp->sl_chunks == NMBPG &&
2768 sp->sl_len == PAGE_SIZE);
2769 VERIFY(sp->sl_refcnt < (NMBPG - 1) ||
2770 (slab_is_detached(sp) && sp->sl_head == NULL));
2771 }
2772
2773 /*
2774 * When auditing is enabled, ensure that the buffer still
2775 * contains the free pattern. Otherwise it got corrupted
2776 * while at the CPU cache layer.
2777 */
2778 if (mclaudit != NULL) {
2779 mcache_audit_t *mca = mcl_audit_buf2mca(class, buf);
2780 if (mclverify) {
2781 mcache_audit_free_verify(mca, buf, 0,
2782 m_maxsize(class));
2783 }
2784 mca->mca_uflags &= ~MB_SCVALID;
2785 }
2786
2787 if (class == MC_CL) {
2788 mbstat.m_clfree = (++m_infree(MC_CL)) + m_infree(MC_MBUF_CL);
2789 buf->obj_next = sp->sl_head;
2790 } else if (class == MC_BIGCL) {
2791 mbstat.m_bigclfree = (++m_infree(MC_BIGCL)) +
2792 m_infree(MC_MBUF_BIGCL);
2793 buf->obj_next = sp->sl_head;
2794 } else if (class == MC_16KCL) {
2795 ++m_infree(MC_16KCL);
2796 } else {
2797 ++m_infree(MC_MBUF);
2798 buf->obj_next = sp->sl_head;
2799 }
2800 sp->sl_head = buf;
2801
2802 /*
2803 * If a slab has been split to either one which holds 2KB clusters,
2804 * or one which holds mbufs, turn it back to one which holds a
2805 * 4 or 16 KB cluster depending on the page size.
2806 */
2807 if (m_maxsize(MC_BIGCL) == PAGE_SIZE) {
2808 super_class = MC_BIGCL;
2809 } else {
2810 VERIFY(PAGE_SIZE == m_maxsize(MC_16KCL));
2811 super_class = MC_16KCL;
2812 }
2813 if (class == MC_MBUF && sp->sl_refcnt == 0 &&
2814 m_total(class) >= (m_minlimit(class) + NMBPG) &&
2815 m_total(super_class) < m_maxlimit(super_class)) {
2816 int i = NMBPG;
2817
2818 m_total(MC_MBUF) -= NMBPG;
2819 mbstat.m_mbufs = m_total(MC_MBUF);
2820 m_infree(MC_MBUF) -= NMBPG;
2821 mtype_stat_add(MT_FREE, -((unsigned)NMBPG));
2822
2823 while (i--) {
2824 struct mbuf *m = sp->sl_head;
2825 VERIFY(m != NULL);
2826 sp->sl_head = m->m_next;
2827 m->m_next = NULL;
2828 }
2829 reinit_supercl = true;
2830 } else if (class == MC_CL && sp->sl_refcnt == 0 &&
2831 m_total(class) >= (m_minlimit(class) + NCLPG) &&
2832 m_total(super_class) < m_maxlimit(super_class)) {
2833 int i = NCLPG;
2834
2835 m_total(MC_CL) -= NCLPG;
2836 mbstat.m_clusters = m_total(MC_CL);
2837 m_infree(MC_CL) -= NCLPG;
2838
2839 while (i--) {
2840 union mcluster *c = sp->sl_head;
2841 VERIFY(c != NULL);
2842 sp->sl_head = c->mcl_next;
2843 c->mcl_next = NULL;
2844 }
2845 reinit_supercl = true;
2846 } else if (class == MC_BIGCL && super_class != MC_BIGCL &&
2847 sp->sl_refcnt == 0 &&
2848 m_total(class) >= (m_minlimit(class) + NBCLPG) &&
2849 m_total(super_class) < m_maxlimit(super_class)) {
2850 int i = NBCLPG;
2851
2852 VERIFY(super_class == MC_16KCL);
2853 m_total(MC_BIGCL) -= NBCLPG;
2854 mbstat.m_bigclusters = m_total(MC_BIGCL);
2855 m_infree(MC_BIGCL) -= NBCLPG;
2856
2857 while (i--) {
2858 union mbigcluster *bc = sp->sl_head;
2859 VERIFY(bc != NULL);
2860 sp->sl_head = bc->mbc_next;
2861 bc->mbc_next = NULL;
2862 }
2863 reinit_supercl = true;
2864 }
2865
2866 if (reinit_supercl) {
2867 VERIFY(sp->sl_head == NULL);
2868 VERIFY(m_total(class) >= m_minlimit(class));
2869 slab_remove(sp, class);
2870
2871 /* Reinitialize it as a cluster for the super class */
2872 m_total(super_class)++;
2873 m_infree(super_class)++;
2874 VERIFY(sp->sl_flags == (SLF_MAPPED | SLF_DETACHED) &&
2875 sp->sl_len == PAGE_SIZE && sp->sl_refcnt == 0);
2876
2877 slab_init(sp, super_class, SLF_MAPPED, sp->sl_base,
2878 sp->sl_base, PAGE_SIZE, 0, 1);
2879 if (mclverify) {
2880 mcache_set_pattern(MCACHE_FREE_PATTERN,
2881 (caddr_t)sp->sl_base, sp->sl_len);
2882 }
2883 ((mcache_obj_t *)(sp->sl_base))->obj_next = NULL;
2884
2885 if (super_class == MC_BIGCL) {
2886 mbstat.m_bigclusters = m_total(MC_BIGCL);
2887 mbstat.m_bigclfree = m_infree(MC_BIGCL) +
2888 m_infree(MC_MBUF_BIGCL);
2889 }
2890
2891 VERIFY(slab_is_detached(sp));
2892 VERIFY(m_total(super_class) <= m_maxlimit(super_class));
2893
2894 /* And finally switch class */
2895 class = super_class;
2896 }
2897
2898 /* Reinsert the slab to the class's slab list */
2899 if (slab_is_detached(sp)) {
2900 slab_insert(sp, class);
2901 }
2902
2903 /* We're done; let others enter */
2904 mb_clalloc_busy = FALSE;
2905 if (mb_clalloc_waiters > 0) {
2906 mb_clalloc_waiters = 0;
2907 wakeup(mb_clalloc_waitchan);
2908 }
2909 }
2910
2911 /*
2912 * Common allocator for rudimentary objects called by the CPU cache layer
2913 * during an allocation request whenever there is no available element in the
2914 * bucket layer. It returns one or more elements from the appropriate global
2915 * freelist. If the freelist is empty, it will attempt to populate it and
2916 * retry the allocation.
2917 */
2918 static unsigned int
mbuf_slab_alloc(void * arg,mcache_obj_t *** plist,unsigned int num,int wait)2919 mbuf_slab_alloc(void *arg, mcache_obj_t ***plist, unsigned int num, int wait)
2920 {
2921 mbuf_class_t class = (mbuf_class_t)arg;
2922 unsigned int need = num;
2923 mcache_obj_t **list = *plist;
2924
2925 ASSERT(MBUF_CLASS_VALID(class) && !MBUF_CLASS_COMPOSITE(class));
2926 ASSERT(need > 0);
2927
2928 lck_mtx_lock(mbuf_mlock);
2929
2930 for (;;) {
2931 if ((*list = slab_alloc(class, wait)) != NULL) {
2932 (*list)->obj_next = NULL;
2933 list = *plist = &(*list)->obj_next;
2934
2935 if (--need == 0) {
2936 /*
2937 * If the number of elements in freelist has
2938 * dropped below low watermark, asynchronously
2939 * populate the freelist now rather than doing
2940 * it later when we run out of elements.
2941 */
2942 if (!mbuf_cached_above(class, wait) &&
2943 m_infree(class) < (m_total(class) >> 5)) {
2944 (void) freelist_populate(class, 1,
2945 M_DONTWAIT);
2946 }
2947 break;
2948 }
2949 } else {
2950 VERIFY(m_infree(class) == 0 || class == MC_CL);
2951
2952 (void) freelist_populate(class, 1,
2953 (wait & MCR_NOSLEEP) ? M_DONTWAIT : M_WAIT);
2954
2955 if (m_infree(class) > 0) {
2956 continue;
2957 }
2958
2959 /* Check if there's anything at the cache layer */
2960 if (mbuf_cached_above(class, wait)) {
2961 break;
2962 }
2963
2964 /* watchdog checkpoint */
2965 mbuf_watchdog();
2966
2967 /* We have nothing and cannot block; give up */
2968 if (wait & MCR_NOSLEEP) {
2969 if (!(wait & MCR_TRYHARD)) {
2970 m_fail_cnt(class)++;
2971 mbstat.m_drops++;
2972 break;
2973 }
2974 }
2975
2976 /*
2977 * If the freelist is still empty and the caller is
2978 * willing to be blocked, sleep on the wait channel
2979 * until an element is available. Otherwise, if
2980 * MCR_TRYHARD is set, do our best to satisfy the
2981 * request without having to go to sleep.
2982 */
2983 if (mbuf_worker_ready &&
2984 mbuf_sleep(class, need, wait)) {
2985 break;
2986 }
2987
2988 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
2989 }
2990 }
2991
2992 m_alloc_cnt(class) += num - need;
2993 lck_mtx_unlock(mbuf_mlock);
2994
2995 return num - need;
2996 }
2997
2998 /*
2999 * Common de-allocator for rudimentary objects called by the CPU cache
3000 * layer when one or more elements need to be returned to the appropriate
3001 * global freelist.
3002 */
3003 static void
mbuf_slab_free(void * arg,mcache_obj_t * list,__unused int purged)3004 mbuf_slab_free(void *arg, mcache_obj_t *list, __unused int purged)
3005 {
3006 mbuf_class_t class = (mbuf_class_t)arg;
3007 mcache_obj_t *nlist;
3008 unsigned int num = 0;
3009 int w;
3010
3011 ASSERT(MBUF_CLASS_VALID(class) && !MBUF_CLASS_COMPOSITE(class));
3012
3013 lck_mtx_lock(mbuf_mlock);
3014
3015 for (;;) {
3016 nlist = list->obj_next;
3017 list->obj_next = NULL;
3018 slab_free(class, list);
3019 ++num;
3020 if ((list = nlist) == NULL) {
3021 break;
3022 }
3023 }
3024 m_free_cnt(class) += num;
3025
3026 if ((w = mb_waiters) > 0) {
3027 mb_waiters = 0;
3028 }
3029 if (w) {
3030 mbwdog_logger("waking up all threads");
3031 }
3032 lck_mtx_unlock(mbuf_mlock);
3033
3034 if (w != 0) {
3035 wakeup(mb_waitchan);
3036 }
3037 }
3038
3039 /*
3040 * Common auditor for rudimentary objects called by the CPU cache layer
3041 * during an allocation or free request. For the former, this is called
3042 * after the objects are obtained from either the bucket or slab layer
3043 * and before they are returned to the caller. For the latter, this is
3044 * called immediately during free and before placing the objects into
3045 * the bucket or slab layer.
3046 */
3047 static void
mbuf_slab_audit(void * arg,mcache_obj_t * list,boolean_t alloc)3048 mbuf_slab_audit(void *arg, mcache_obj_t *list, boolean_t alloc)
3049 {
3050 mbuf_class_t class = (mbuf_class_t)arg;
3051 mcache_audit_t *mca;
3052
3053 ASSERT(MBUF_CLASS_VALID(class) && !MBUF_CLASS_COMPOSITE(class));
3054
3055 while (list != NULL) {
3056 lck_mtx_lock(mbuf_mlock);
3057 mca = mcl_audit_buf2mca(class, list);
3058
3059 /* Do the sanity checks */
3060 if (class == MC_MBUF) {
3061 mcl_audit_mbuf(mca, list, FALSE, alloc);
3062 ASSERT(mca->mca_uflags & MB_SCVALID);
3063 } else {
3064 mcl_audit_cluster(mca, list, m_maxsize(class),
3065 alloc, TRUE);
3066 ASSERT(!(mca->mca_uflags & MB_SCVALID));
3067 }
3068 /* Record this transaction */
3069 if (mcltrace) {
3070 mcache_buffer_log(mca, list, m_cache(class), &mb_start);
3071 }
3072
3073 if (alloc) {
3074 mca->mca_uflags |= MB_INUSE;
3075 } else {
3076 mca->mca_uflags &= ~MB_INUSE;
3077 }
3078 /* Unpair the object (unconditionally) */
3079 mca->mca_uptr = NULL;
3080 lck_mtx_unlock(mbuf_mlock);
3081
3082 list = list->obj_next;
3083 }
3084 }
3085
3086 /*
3087 * Common notify routine for all caches. It is called by mcache when
3088 * one or more objects get freed. We use this indication to trigger
3089 * the wakeup of any sleeping threads so that they can retry their
3090 * allocation requests.
3091 */
3092 static void
mbuf_slab_notify(void * arg,u_int32_t reason)3093 mbuf_slab_notify(void *arg, u_int32_t reason)
3094 {
3095 mbuf_class_t class = (mbuf_class_t)arg;
3096 int w;
3097
3098 ASSERT(MBUF_CLASS_VALID(class));
3099
3100 if (reason != MCN_RETRYALLOC) {
3101 return;
3102 }
3103
3104 lck_mtx_lock(mbuf_mlock);
3105 if ((w = mb_waiters) > 0) {
3106 m_notified(class)++;
3107 mb_waiters = 0;
3108 }
3109 if (w) {
3110 mbwdog_logger("waking up all threads");
3111 }
3112 lck_mtx_unlock(mbuf_mlock);
3113
3114 if (w != 0) {
3115 wakeup(mb_waitchan);
3116 }
3117 }
3118
3119 /*
3120 * Obtain object(s) from the composite class's freelist.
3121 */
3122 static unsigned int
cslab_alloc(mbuf_class_t class,mcache_obj_t *** plist,unsigned int num)3123 cslab_alloc(mbuf_class_t class, mcache_obj_t ***plist, unsigned int num)
3124 {
3125 unsigned int need = num;
3126 mcl_slab_t *sp, *clsp, *nsp;
3127 struct mbuf *m;
3128 mcache_obj_t **list = *plist;
3129 void *cl;
3130
3131 VERIFY(need > 0);
3132 VERIFY(class != MC_MBUF_16KCL || njcl > 0);
3133 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
3134
3135 /* Get what we can from the freelist */
3136 while ((*list = m_cobjlist(class)) != NULL) {
3137 MRANGE(*list);
3138
3139 m = (struct mbuf *)*list;
3140 sp = slab_get(m);
3141 cl = m->m_ext.ext_buf;
3142 clsp = slab_get(cl);
3143 VERIFY(m->m_flags == M_EXT && cl != NULL);
3144 VERIFY(m_get_rfa(m) != NULL && MBUF_IS_COMPOSITE(m));
3145
3146 if (class == MC_MBUF_CL) {
3147 VERIFY(clsp->sl_refcnt >= 1 &&
3148 clsp->sl_refcnt <= NCLPG);
3149 } else {
3150 VERIFY(clsp->sl_refcnt >= 1 &&
3151 clsp->sl_refcnt <= NBCLPG);
3152 }
3153
3154 if (class == MC_MBUF_16KCL) {
3155 int k;
3156 for (nsp = clsp, k = 1; k < NSLABSP16KB; k++) {
3157 nsp = nsp->sl_next;
3158 /* Next slab must already be present */
3159 VERIFY(nsp != NULL);
3160 VERIFY(nsp->sl_refcnt == 1);
3161 }
3162 }
3163
3164 if ((m_cobjlist(class) = (*list)->obj_next) != NULL &&
3165 !MBUF_IN_MAP(m_cobjlist(class))) {
3166 slab_nextptr_panic(sp, m_cobjlist(class));
3167 /* NOTREACHED */
3168 }
3169 (*list)->obj_next = NULL;
3170 list = *plist = &(*list)->obj_next;
3171
3172 if (--need == 0) {
3173 break;
3174 }
3175 }
3176 m_infree(class) -= (num - need);
3177
3178 return num - need;
3179 }
3180
3181 /*
3182 * Place object(s) back into a composite class's freelist.
3183 */
3184 static unsigned int
cslab_free(mbuf_class_t class,mcache_obj_t * list,int purged)3185 cslab_free(mbuf_class_t class, mcache_obj_t *list, int purged)
3186 {
3187 mcache_obj_t *o, *tail;
3188 unsigned int num = 0;
3189 struct mbuf *m, *ms;
3190 mcache_audit_t *mca = NULL;
3191 mcache_obj_t *ref_list = NULL;
3192 mcl_slab_t *clsp, *nsp;
3193 void *cl;
3194 mbuf_class_t cl_class;
3195
3196 ASSERT(MBUF_CLASS_VALID(class) && MBUF_CLASS_COMPOSITE(class));
3197 VERIFY(class != MC_MBUF_16KCL || njcl > 0);
3198 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
3199
3200 if (class == MC_MBUF_CL) {
3201 cl_class = MC_CL;
3202 } else if (class == MC_MBUF_BIGCL) {
3203 cl_class = MC_BIGCL;
3204 } else {
3205 VERIFY(class == MC_MBUF_16KCL);
3206 cl_class = MC_16KCL;
3207 }
3208
3209 o = tail = list;
3210
3211 while ((m = ms = (struct mbuf *)o) != NULL) {
3212 mcache_obj_t *rfa, *nexto = o->obj_next;
3213
3214 /* Do the mbuf sanity checks */
3215 if (mclaudit != NULL) {
3216 mca = mcl_audit_buf2mca(MC_MBUF, (mcache_obj_t *)m);
3217 if (mclverify) {
3218 mcache_audit_free_verify(mca, m, 0,
3219 m_maxsize(MC_MBUF));
3220 }
3221 ms = MCA_SAVED_MBUF_PTR(mca);
3222 }
3223
3224 /* Do the cluster sanity checks */
3225 cl = ms->m_ext.ext_buf;
3226 clsp = slab_get(cl);
3227 if (mclverify) {
3228 size_t size = m_maxsize(cl_class);
3229 mcache_audit_free_verify(mcl_audit_buf2mca(cl_class,
3230 (mcache_obj_t *)cl), cl, 0, size);
3231 }
3232 VERIFY(ms->m_type == MT_FREE);
3233 VERIFY(ms->m_flags == M_EXT);
3234 VERIFY(m_get_rfa(ms) != NULL && MBUF_IS_COMPOSITE(ms));
3235 if (cl_class == MC_CL) {
3236 VERIFY(clsp->sl_refcnt >= 1 &&
3237 clsp->sl_refcnt <= NCLPG);
3238 } else {
3239 VERIFY(clsp->sl_refcnt >= 1 &&
3240 clsp->sl_refcnt <= NBCLPG);
3241 }
3242 if (cl_class == MC_16KCL) {
3243 int k;
3244 for (nsp = clsp, k = 1; k < NSLABSP16KB; k++) {
3245 nsp = nsp->sl_next;
3246 /* Next slab must already be present */
3247 VERIFY(nsp != NULL);
3248 VERIFY(nsp->sl_refcnt == 1);
3249 }
3250 }
3251
3252 /*
3253 * If we're asked to purge, restore the actual mbuf using
3254 * contents of the shadow structure (if auditing is enabled)
3255 * and clear EXTF_COMPOSITE flag from the mbuf, as we are
3256 * about to free it and the attached cluster into their caches.
3257 */
3258 if (purged) {
3259 /* Restore constructed mbuf fields */
3260 if (mclaudit != NULL) {
3261 mcl_audit_restore_mbuf(m, mca, TRUE);
3262 }
3263
3264 MEXT_MINREF(m) = 0;
3265 MEXT_REF(m) = 0;
3266 MEXT_PREF(m) = 0;
3267 MEXT_FLAGS(m) = 0;
3268 MEXT_PRIV(m) = 0;
3269 MEXT_PMBUF(m) = NULL;
3270
3271 rfa = (mcache_obj_t *)(void *)m_get_rfa(m);
3272 m_set_ext(m, NULL, NULL, NULL);
3273 rfa->obj_next = ref_list;
3274 ref_list = rfa;
3275
3276 m->m_type = MT_FREE;
3277 m->m_flags = m->m_len = 0;
3278 m->m_next = m->m_nextpkt = NULL;
3279
3280 /* Save mbuf fields and make auditing happy */
3281 if (mclaudit != NULL) {
3282 mcl_audit_mbuf(mca, o, FALSE, FALSE);
3283 }
3284
3285 VERIFY(m_total(class) > 0);
3286 m_total(class)--;
3287
3288 /* Free the mbuf */
3289 o->obj_next = NULL;
3290 slab_free(MC_MBUF, o);
3291
3292 /* And free the cluster */
3293 ((mcache_obj_t *)cl)->obj_next = NULL;
3294 if (class == MC_MBUF_CL) {
3295 slab_free(MC_CL, cl);
3296 } else if (class == MC_MBUF_BIGCL) {
3297 slab_free(MC_BIGCL, cl);
3298 } else {
3299 slab_free(MC_16KCL, cl);
3300 }
3301 }
3302
3303 ++num;
3304 tail = o;
3305 o = nexto;
3306 }
3307
3308 if (!purged) {
3309 tail->obj_next = m_cobjlist(class);
3310 m_cobjlist(class) = list;
3311 m_infree(class) += num;
3312 } else if (ref_list != NULL) {
3313 mcache_free_ext(ref_cache, ref_list);
3314 }
3315
3316 return num;
3317 }
3318
3319 /*
3320 * Common allocator for composite objects called by the CPU cache layer
3321 * during an allocation request whenever there is no available element in
3322 * the bucket layer. It returns one or more composite elements from the
3323 * appropriate global freelist. If the freelist is empty, it will attempt
3324 * to obtain the rudimentary objects from their caches and construct them
3325 * into composite mbuf + cluster objects.
3326 */
3327 static unsigned int
mbuf_cslab_alloc(void * arg,mcache_obj_t *** plist,unsigned int needed,int wait)3328 mbuf_cslab_alloc(void *arg, mcache_obj_t ***plist, unsigned int needed,
3329 int wait)
3330 {
3331 mbuf_class_t class = (mbuf_class_t)arg;
3332 mbuf_class_t cl_class = 0;
3333 unsigned int num = 0, cnum = 0, want = needed;
3334 mcache_obj_t *ref_list = NULL;
3335 mcache_obj_t *mp_list = NULL;
3336 mcache_obj_t *clp_list = NULL;
3337 mcache_obj_t **list;
3338 struct ext_ref *rfa;
3339 struct mbuf *m;
3340 void *cl;
3341
3342 ASSERT(MBUF_CLASS_VALID(class) && MBUF_CLASS_COMPOSITE(class));
3343 ASSERT(needed > 0);
3344
3345 VERIFY(class != MC_MBUF_16KCL || njcl > 0);
3346
3347 /* There should not be any slab for this class */
3348 VERIFY(m_slab_cnt(class) == 0 &&
3349 m_slablist(class).tqh_first == NULL &&
3350 m_slablist(class).tqh_last == NULL);
3351
3352 lck_mtx_lock(mbuf_mlock);
3353
3354 /* Try using the freelist first */
3355 num = cslab_alloc(class, plist, needed);
3356 list = *plist;
3357 if (num == needed) {
3358 m_alloc_cnt(class) += num;
3359 lck_mtx_unlock(mbuf_mlock);
3360 return needed;
3361 }
3362
3363 lck_mtx_unlock(mbuf_mlock);
3364
3365 /*
3366 * We could not satisfy the request using the freelist alone;
3367 * allocate from the appropriate rudimentary caches and use
3368 * whatever we can get to construct the composite objects.
3369 */
3370 needed -= num;
3371
3372 /*
3373 * Mark these allocation requests as coming from a composite cache.
3374 * Also, if the caller is willing to be blocked, mark the request
3375 * with MCR_FAILOK such that we don't end up sleeping at the mbuf
3376 * slab layer waiting for the individual object when one or more
3377 * of the already-constructed composite objects are available.
3378 */
3379 wait |= MCR_COMP;
3380 if (!(wait & MCR_NOSLEEP)) {
3381 wait |= MCR_FAILOK;
3382 }
3383
3384 /* allocate mbufs */
3385 needed = mcache_alloc_ext(m_cache(MC_MBUF), &mp_list, needed, wait);
3386 if (needed == 0) {
3387 ASSERT(mp_list == NULL);
3388 goto fail;
3389 }
3390
3391 /* allocate clusters */
3392 if (class == MC_MBUF_CL) {
3393 cl_class = MC_CL;
3394 } else if (class == MC_MBUF_BIGCL) {
3395 cl_class = MC_BIGCL;
3396 } else {
3397 VERIFY(class == MC_MBUF_16KCL);
3398 cl_class = MC_16KCL;
3399 }
3400 needed = mcache_alloc_ext(m_cache(cl_class), &clp_list, needed, wait);
3401 if (needed == 0) {
3402 ASSERT(clp_list == NULL);
3403 goto fail;
3404 }
3405
3406 needed = mcache_alloc_ext(ref_cache, &ref_list, needed, wait);
3407 if (needed == 0) {
3408 ASSERT(ref_list == NULL);
3409 goto fail;
3410 }
3411
3412 /*
3413 * By this time "needed" is MIN(mbuf, cluster, ref). Any left
3414 * overs will get freed accordingly before we return to caller.
3415 */
3416 for (cnum = 0; cnum < needed; cnum++) {
3417 struct mbuf *ms;
3418
3419 m = ms = (struct mbuf *)mp_list;
3420 mp_list = mp_list->obj_next;
3421
3422 cl = clp_list;
3423 clp_list = clp_list->obj_next;
3424 ((mcache_obj_t *)cl)->obj_next = NULL;
3425
3426 rfa = (struct ext_ref *)ref_list;
3427 ref_list = ref_list->obj_next;
3428 ((mcache_obj_t *)(void *)rfa)->obj_next = NULL;
3429
3430 /*
3431 * If auditing is enabled, construct the shadow mbuf
3432 * in the audit structure instead of in the actual one.
3433 * mbuf_cslab_audit() will take care of restoring the
3434 * contents after the integrity check.
3435 */
3436 if (mclaudit != NULL) {
3437 mcache_audit_t *mca, *cl_mca;
3438
3439 lck_mtx_lock(mbuf_mlock);
3440 mca = mcl_audit_buf2mca(MC_MBUF, (mcache_obj_t *)m);
3441 ms = MCA_SAVED_MBUF_PTR(mca);
3442 cl_mca = mcl_audit_buf2mca(cl_class,
3443 (mcache_obj_t *)cl);
3444
3445 /*
3446 * Pair them up. Note that this is done at the time
3447 * the mbuf+cluster objects are constructed. This
3448 * information should be treated as "best effort"
3449 * debugging hint since more than one mbufs can refer
3450 * to a cluster. In that case, the cluster might not
3451 * be freed along with the mbuf it was paired with.
3452 */
3453 mca->mca_uptr = cl_mca;
3454 cl_mca->mca_uptr = mca;
3455
3456 ASSERT(mca->mca_uflags & MB_SCVALID);
3457 ASSERT(!(cl_mca->mca_uflags & MB_SCVALID));
3458 lck_mtx_unlock(mbuf_mlock);
3459
3460 /* Technically, they are in the freelist */
3461 if (mclverify) {
3462 size_t size;
3463
3464 mcache_set_pattern(MCACHE_FREE_PATTERN, m,
3465 m_maxsize(MC_MBUF));
3466
3467 if (class == MC_MBUF_CL) {
3468 size = m_maxsize(MC_CL);
3469 } else if (class == MC_MBUF_BIGCL) {
3470 size = m_maxsize(MC_BIGCL);
3471 } else {
3472 size = m_maxsize(MC_16KCL);
3473 }
3474
3475 mcache_set_pattern(MCACHE_FREE_PATTERN, cl,
3476 size);
3477 }
3478 }
3479
3480 MBUF_INIT(ms, 0, MT_FREE);
3481 if (class == MC_MBUF_16KCL) {
3482 MBUF_16KCL_INIT(ms, cl, rfa, 0, EXTF_COMPOSITE);
3483 } else if (class == MC_MBUF_BIGCL) {
3484 MBUF_BIGCL_INIT(ms, cl, rfa, 0, EXTF_COMPOSITE);
3485 } else {
3486 MBUF_CL_INIT(ms, cl, rfa, 0, EXTF_COMPOSITE);
3487 }
3488 VERIFY(ms->m_flags == M_EXT);
3489 VERIFY(m_get_rfa(ms) != NULL && MBUF_IS_COMPOSITE(ms));
3490
3491 *list = (mcache_obj_t *)m;
3492 (*list)->obj_next = NULL;
3493 list = *plist = &(*list)->obj_next;
3494 }
3495
3496 fail:
3497 /*
3498 * Free up what's left of the above.
3499 */
3500 if (mp_list != NULL) {
3501 mcache_free_ext(m_cache(MC_MBUF), mp_list);
3502 }
3503 if (clp_list != NULL) {
3504 mcache_free_ext(m_cache(cl_class), clp_list);
3505 }
3506 if (ref_list != NULL) {
3507 mcache_free_ext(ref_cache, ref_list);
3508 }
3509
3510 lck_mtx_lock(mbuf_mlock);
3511 if (num > 0 || cnum > 0) {
3512 m_total(class) += cnum;
3513 VERIFY(m_total(class) <= m_maxlimit(class));
3514 m_alloc_cnt(class) += num + cnum;
3515 }
3516 if ((num + cnum) < want) {
3517 m_fail_cnt(class) += (want - (num + cnum));
3518 }
3519 lck_mtx_unlock(mbuf_mlock);
3520
3521 return num + cnum;
3522 }
3523
3524 /*
3525 * Common de-allocator for composite objects called by the CPU cache
3526 * layer when one or more elements need to be returned to the appropriate
3527 * global freelist.
3528 */
3529 static void
mbuf_cslab_free(void * arg,mcache_obj_t * list,int purged)3530 mbuf_cslab_free(void *arg, mcache_obj_t *list, int purged)
3531 {
3532 mbuf_class_t class = (mbuf_class_t)arg;
3533 unsigned int num;
3534 int w;
3535
3536 ASSERT(MBUF_CLASS_VALID(class) && MBUF_CLASS_COMPOSITE(class));
3537
3538 lck_mtx_lock(mbuf_mlock);
3539
3540 num = cslab_free(class, list, purged);
3541 m_free_cnt(class) += num;
3542
3543 if ((w = mb_waiters) > 0) {
3544 mb_waiters = 0;
3545 }
3546 if (w) {
3547 mbwdog_logger("waking up all threads");
3548 }
3549
3550 lck_mtx_unlock(mbuf_mlock);
3551
3552 if (w != 0) {
3553 wakeup(mb_waitchan);
3554 }
3555 }
3556
3557 /*
3558 * Common auditor for composite objects called by the CPU cache layer
3559 * during an allocation or free request. For the former, this is called
3560 * after the objects are obtained from either the bucket or slab layer
3561 * and before they are returned to the caller. For the latter, this is
3562 * called immediately during free and before placing the objects into
3563 * the bucket or slab layer.
3564 */
3565 static void
mbuf_cslab_audit(void * arg,mcache_obj_t * list,boolean_t alloc)3566 mbuf_cslab_audit(void *arg, mcache_obj_t *list, boolean_t alloc)
3567 {
3568 mbuf_class_t class = (mbuf_class_t)arg, cl_class;
3569 mcache_audit_t *mca;
3570 struct mbuf *m, *ms;
3571 mcl_slab_t *clsp, *nsp;
3572 size_t cl_size;
3573 void *cl;
3574
3575 ASSERT(MBUF_CLASS_VALID(class) && MBUF_CLASS_COMPOSITE(class));
3576 if (class == MC_MBUF_CL) {
3577 cl_class = MC_CL;
3578 } else if (class == MC_MBUF_BIGCL) {
3579 cl_class = MC_BIGCL;
3580 } else {
3581 cl_class = MC_16KCL;
3582 }
3583 cl_size = m_maxsize(cl_class);
3584
3585 while ((m = ms = (struct mbuf *)list) != NULL) {
3586 lck_mtx_lock(mbuf_mlock);
3587 /* Do the mbuf sanity checks and record its transaction */
3588 mca = mcl_audit_buf2mca(MC_MBUF, (mcache_obj_t *)m);
3589 mcl_audit_mbuf(mca, m, TRUE, alloc);
3590 if (mcltrace) {
3591 mcache_buffer_log(mca, m, m_cache(class), &mb_start);
3592 }
3593
3594 if (alloc) {
3595 mca->mca_uflags |= MB_COMP_INUSE;
3596 } else {
3597 mca->mca_uflags &= ~MB_COMP_INUSE;
3598 }
3599
3600 /*
3601 * Use the shadow mbuf in the audit structure if we are
3602 * freeing, since the contents of the actual mbuf has been
3603 * pattern-filled by the above call to mcl_audit_mbuf().
3604 */
3605 if (!alloc && mclverify) {
3606 ms = MCA_SAVED_MBUF_PTR(mca);
3607 }
3608
3609 /* Do the cluster sanity checks and record its transaction */
3610 cl = ms->m_ext.ext_buf;
3611 clsp = slab_get(cl);
3612 VERIFY(ms->m_flags == M_EXT && cl != NULL);
3613 VERIFY(m_get_rfa(ms) != NULL && MBUF_IS_COMPOSITE(ms));
3614 if (class == MC_MBUF_CL) {
3615 VERIFY(clsp->sl_refcnt >= 1 &&
3616 clsp->sl_refcnt <= NCLPG);
3617 } else {
3618 VERIFY(clsp->sl_refcnt >= 1 &&
3619 clsp->sl_refcnt <= NBCLPG);
3620 }
3621
3622 if (class == MC_MBUF_16KCL) {
3623 int k;
3624 for (nsp = clsp, k = 1; k < NSLABSP16KB; k++) {
3625 nsp = nsp->sl_next;
3626 /* Next slab must already be present */
3627 VERIFY(nsp != NULL);
3628 VERIFY(nsp->sl_refcnt == 1);
3629 }
3630 }
3631
3632
3633 mca = mcl_audit_buf2mca(cl_class, cl);
3634 mcl_audit_cluster(mca, cl, cl_size, alloc, FALSE);
3635 if (mcltrace) {
3636 mcache_buffer_log(mca, cl, m_cache(class), &mb_start);
3637 }
3638
3639 if (alloc) {
3640 mca->mca_uflags |= MB_COMP_INUSE;
3641 } else {
3642 mca->mca_uflags &= ~MB_COMP_INUSE;
3643 }
3644 lck_mtx_unlock(mbuf_mlock);
3645
3646 list = list->obj_next;
3647 }
3648 }
3649
3650 static void
m_vm_error_stats(uint32_t * cnt,uint64_t * ts,uint64_t * size,uint64_t alloc_size,kern_return_t error)3651 m_vm_error_stats(uint32_t *cnt, uint64_t *ts, uint64_t *size,
3652 uint64_t alloc_size, kern_return_t error)
3653 {
3654 *cnt = *cnt + 1;
3655 *ts = net_uptime();
3656 if (size) {
3657 *size = alloc_size;
3658 }
3659 switch (error) {
3660 case KERN_SUCCESS:
3661 break;
3662 case KERN_INVALID_ARGUMENT:
3663 mb_kmem_stats[0]++;
3664 break;
3665 case KERN_INVALID_ADDRESS:
3666 mb_kmem_stats[1]++;
3667 break;
3668 case KERN_RESOURCE_SHORTAGE:
3669 mb_kmem_stats[2]++;
3670 break;
3671 case KERN_NO_SPACE:
3672 mb_kmem_stats[3]++;
3673 break;
3674 case KERN_FAILURE:
3675 mb_kmem_stats[4]++;
3676 break;
3677 default:
3678 mb_kmem_stats[5]++;
3679 break;
3680 }
3681 }
3682
3683 static vm_offset_t
kmem_mb_alloc(vm_map_t mbmap,int size,int physContig,kern_return_t * err)3684 kmem_mb_alloc(vm_map_t mbmap, int size, int physContig, kern_return_t *err)
3685 {
3686 vm_offset_t addr = 0;
3687 kern_return_t kr = KERN_SUCCESS;
3688
3689 if (!physContig) {
3690 kr = kmem_alloc(mbmap, &addr, size,
3691 KMA_KOBJECT | KMA_LOMEM, VM_KERN_MEMORY_MBUF);
3692 } else {
3693 kr = kmem_alloc_contig(mbmap, &addr, size, PAGE_MASK, 0xfffff,
3694 0, KMA_KOBJECT | KMA_LOMEM, VM_KERN_MEMORY_MBUF);
3695 }
3696
3697 if (kr != KERN_SUCCESS) {
3698 addr = 0;
3699 }
3700 if (err) {
3701 *err = kr;
3702 }
3703
3704 return addr;
3705 }
3706
3707 /*
3708 * Allocate some number of mbuf clusters and place on cluster freelist.
3709 */
3710 static int
m_clalloc(const u_int32_t num,const int wait,const u_int32_t bufsize)3711 m_clalloc(const u_int32_t num, const int wait, const u_int32_t bufsize)
3712 {
3713 int i, count = 0;
3714 vm_size_t size = 0;
3715 int numpages = 0, large_buffer;
3716 vm_offset_t page = 0;
3717 mcache_audit_t *mca_list = NULL;
3718 mcache_obj_t *con_list = NULL;
3719 mcl_slab_t *sp;
3720 mbuf_class_t class;
3721 kern_return_t error;
3722
3723 /* Set if a buffer allocation needs allocation of multiple pages */
3724 large_buffer = ((bufsize == m_maxsize(MC_16KCL)) &&
3725 PAGE_SIZE < M16KCLBYTES);
3726 VERIFY(bufsize == m_maxsize(MC_BIGCL) ||
3727 bufsize == m_maxsize(MC_16KCL));
3728
3729 VERIFY((bufsize == PAGE_SIZE) ||
3730 (bufsize > PAGE_SIZE && bufsize == m_maxsize(MC_16KCL)));
3731
3732 if (bufsize == m_size(MC_BIGCL)) {
3733 class = MC_BIGCL;
3734 } else {
3735 class = MC_16KCL;
3736 }
3737
3738 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
3739
3740 /*
3741 * Multiple threads may attempt to populate the cluster map one
3742 * after another. Since we drop the lock below prior to acquiring
3743 * the physical page(s), our view of the cluster map may no longer
3744 * be accurate, and we could end up over-committing the pages beyond
3745 * the maximum allowed for each class. To prevent it, this entire
3746 * operation (including the page mapping) is serialized.
3747 */
3748 while (mb_clalloc_busy) {
3749 mb_clalloc_waiters++;
3750 (void) msleep(mb_clalloc_waitchan, mbuf_mlock,
3751 (PZERO - 1), "m_clalloc", NULL);
3752 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
3753 }
3754
3755 /* We are busy now; tell everyone else to go away */
3756 mb_clalloc_busy = TRUE;
3757
3758 /*
3759 * Honor the caller's wish to block or not block. We have a way
3760 * to grow the pool asynchronously using the mbuf worker thread.
3761 */
3762 i = m_howmany(num, bufsize);
3763 if (i <= 0 || (wait & M_DONTWAIT)) {
3764 goto out;
3765 }
3766
3767 lck_mtx_unlock(mbuf_mlock);
3768
3769 size = round_page(i * bufsize);
3770 page = kmem_mb_alloc(mb_map, size, large_buffer, &error);
3771
3772 /*
3773 * If we did ask for "n" 16KB physically contiguous chunks
3774 * and didn't get them, then please try again without this
3775 * restriction.
3776 */
3777 net_update_uptime();
3778 if (large_buffer && page == 0) {
3779 m_vm_error_stats(&mb_kmem_contig_failed,
3780 &mb_kmem_contig_failed_ts,
3781 &mb_kmem_contig_failed_size,
3782 size, error);
3783 page = kmem_mb_alloc(mb_map, size, 0, &error);
3784 }
3785
3786 if (page == 0) {
3787 m_vm_error_stats(&mb_kmem_failed,
3788 &mb_kmem_failed_ts,
3789 &mb_kmem_failed_size,
3790 size, error);
3791 #if PAGE_SIZE == 4096
3792 if (bufsize == m_maxsize(MC_BIGCL)) {
3793 #else
3794 if (bufsize >= m_maxsize(MC_BIGCL)) {
3795 #endif
3796 /* Try for 1 page if failed */
3797 size = PAGE_SIZE;
3798 page = kmem_mb_alloc(mb_map, size, 0, &error);
3799 if (page == 0) {
3800 m_vm_error_stats(&mb_kmem_one_failed,
3801 &mb_kmem_one_failed_ts,
3802 NULL, size, error);
3803 }
3804 }
3805
3806 if (page == 0) {
3807 lck_mtx_lock(mbuf_mlock);
3808 goto out;
3809 }
3810 }
3811
3812 VERIFY(IS_P2ALIGNED(page, PAGE_SIZE));
3813 numpages = size / PAGE_SIZE;
3814
3815 /* If auditing is enabled, allocate the audit structures now */
3816 if (mclaudit != NULL) {
3817 int needed;
3818
3819 /*
3820 * Yes, I realize this is a waste of memory for clusters
3821 * that never get transformed into mbufs, as we may end
3822 * up with NMBPG-1 unused audit structures per cluster.
3823 * But doing so tremendously simplifies the allocation
3824 * strategy, since at this point we are not holding the
3825 * mbuf lock and the caller is okay to be blocked.
3826 */
3827 if (bufsize == PAGE_SIZE) {
3828 needed = numpages * NMBPG;
3829
3830 i = mcache_alloc_ext(mcl_audit_con_cache,
3831 &con_list, needed, MCR_SLEEP);
3832
3833 VERIFY(con_list != NULL && i == needed);
3834 } else {
3835 /*
3836 * if multiple 4K pages are being used for a
3837 * 16K cluster
3838 */
3839 needed = numpages / NSLABSP16KB;
3840 }
3841
3842 i = mcache_alloc_ext(mcache_audit_cache,
3843 (mcache_obj_t **)&mca_list, needed, MCR_SLEEP);
3844
3845 VERIFY(mca_list != NULL && i == needed);
3846 }
3847
3848 lck_mtx_lock(mbuf_mlock);
3849
3850 for (i = 0; i < numpages; i++, page += PAGE_SIZE) {
3851 ppnum_t offset =
3852 ((unsigned char *)page - mbutl) >> PAGE_SHIFT;
3853 ppnum_t new_page = pmap_find_phys(kernel_pmap, page);
3854
3855 /*
3856 * If there is a mapper the appropriate I/O page is
3857 * returned; zero out the page to discard its past
3858 * contents to prevent exposing leftover kernel memory.
3859 */
3860 VERIFY(offset < mcl_pages);
3861 if (mcl_paddr_base != 0) {
3862 bzero((void *)(uintptr_t) page, PAGE_SIZE);
3863 new_page = IOMapperInsertPage(mcl_paddr_base,
3864 offset, new_page);
3865 }
3866 mcl_paddr[offset] = new_page;
3867
3868 /* Pattern-fill this fresh page */
3869 if (mclverify) {
3870 mcache_set_pattern(MCACHE_FREE_PATTERN,
3871 (caddr_t)page, PAGE_SIZE);
3872 }
3873 if (bufsize == PAGE_SIZE) {
3874 mcache_obj_t *buf;
3875 /* One for the entire page */
3876 sp = slab_get((void *)page);
3877 if (mclaudit != NULL) {
3878 mcl_audit_init((void *)page,
3879 &mca_list, &con_list,
3880 AUDIT_CONTENTS_SIZE, NMBPG);
3881 }
3882 VERIFY(sp->sl_refcnt == 0 && sp->sl_flags == 0);
3883 slab_init(sp, class, SLF_MAPPED, (void *)page,
3884 (void *)page, PAGE_SIZE, 0, 1);
3885 buf = (mcache_obj_t *)page;
3886 buf->obj_next = NULL;
3887
3888 /* Insert this slab */
3889 slab_insert(sp, class);
3890
3891 /* Update stats now since slab_get drops the lock */
3892 ++m_infree(class);
3893 ++m_total(class);
3894 VERIFY(m_total(class) <= m_maxlimit(class));
3895 if (class == MC_BIGCL) {
3896 mbstat.m_bigclfree = m_infree(MC_BIGCL) +
3897 m_infree(MC_MBUF_BIGCL);
3898 mbstat.m_bigclusters = m_total(MC_BIGCL);
3899 }
3900 ++count;
3901 } else if ((bufsize > PAGE_SIZE) &&
3902 (i % NSLABSP16KB) == 0) {
3903 union m16kcluster *m16kcl = (union m16kcluster *)page;
3904 mcl_slab_t *nsp;
3905 int k;
3906
3907 /* One for the entire 16KB */
3908 sp = slab_get(m16kcl);
3909 if (mclaudit != NULL) {
3910 mcl_audit_init(m16kcl, &mca_list, NULL, 0, 1);
3911 }
3912
3913 VERIFY(sp->sl_refcnt == 0 && sp->sl_flags == 0);
3914 slab_init(sp, MC_16KCL, SLF_MAPPED,
3915 m16kcl, m16kcl, bufsize, 0, 1);
3916 m16kcl->m16kcl_next = NULL;
3917
3918 /*
3919 * 2nd-Nth page's slab is part of the first one,
3920 * where N is NSLABSP16KB.
3921 */
3922 for (k = 1; k < NSLABSP16KB; k++) {
3923 nsp = slab_get(((union mbigcluster *)page) + k);
3924 VERIFY(nsp->sl_refcnt == 0 &&
3925 nsp->sl_flags == 0);
3926 slab_init(nsp, MC_16KCL,
3927 SLF_MAPPED | SLF_PARTIAL,
3928 m16kcl, NULL, 0, 0, 0);
3929 }
3930 /* Insert this slab */
3931 slab_insert(sp, MC_16KCL);
3932
3933 /* Update stats now since slab_get drops the lock */
3934 ++m_infree(MC_16KCL);
3935 ++m_total(MC_16KCL);
3936 VERIFY(m_total(MC_16KCL) <= m_maxlimit(MC_16KCL));
3937 ++count;
3938 }
3939 }
3940 VERIFY(mca_list == NULL && con_list == NULL);
3941
3942 /* We're done; let others enter */
3943 mb_clalloc_busy = FALSE;
3944 if (mb_clalloc_waiters > 0) {
3945 mb_clalloc_waiters = 0;
3946 wakeup(mb_clalloc_waitchan);
3947 }
3948
3949 return count;
3950 out:
3951 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
3952
3953 mtracelarge_register(size);
3954
3955 /* We're done; let others enter */
3956 mb_clalloc_busy = FALSE;
3957 if (mb_clalloc_waiters > 0) {
3958 mb_clalloc_waiters = 0;
3959 wakeup(mb_clalloc_waitchan);
3960 }
3961
3962 /*
3963 * When non-blocking we kick a thread if we have to grow the
3964 * pool or if the number of free clusters is less than requested.
3965 */
3966 if (i > 0 && mbuf_worker_ready && mbuf_worker_needs_wakeup) {
3967 mbwdog_logger("waking up the worker thread to to grow %s by %d",
3968 m_cname(class), i);
3969 wakeup((caddr_t)&mbuf_worker_needs_wakeup);
3970 mbuf_worker_needs_wakeup = FALSE;
3971 }
3972 if (class == MC_BIGCL) {
3973 if (i > 0) {
3974 /*
3975 * Remember total number of 4KB clusters needed
3976 * at this time.
3977 */
3978 i += m_total(MC_BIGCL);
3979 if (i > m_region_expand(MC_BIGCL)) {
3980 m_region_expand(MC_BIGCL) = i;
3981 }
3982 }
3983 if (m_infree(MC_BIGCL) >= num) {
3984 return 1;
3985 }
3986 } else {
3987 if (i > 0) {
3988 /*
3989 * Remember total number of 16KB clusters needed
3990 * at this time.
3991 */
3992 i += m_total(MC_16KCL);
3993 if (i > m_region_expand(MC_16KCL)) {
3994 m_region_expand(MC_16KCL) = i;
3995 }
3996 }
3997 if (m_infree(MC_16KCL) >= num) {
3998 return 1;
3999 }
4000 }
4001 return 0;
4002 }
4003
4004 /*
4005 * Populate the global freelist of the corresponding buffer class.
4006 */
4007 static int
4008 freelist_populate(mbuf_class_t class, unsigned int num, int wait)
4009 {
4010 mcache_obj_t *o = NULL;
4011 int i, numpages = 0, count;
4012 mbuf_class_t super_class;
4013
4014 VERIFY(class == MC_MBUF || class == MC_CL || class == MC_BIGCL ||
4015 class == MC_16KCL);
4016
4017 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
4018
4019 VERIFY(PAGE_SIZE == m_maxsize(MC_BIGCL) ||
4020 PAGE_SIZE == m_maxsize(MC_16KCL));
4021
4022 if (m_maxsize(class) >= PAGE_SIZE) {
4023 return m_clalloc(num, wait, m_maxsize(class)) != 0;
4024 }
4025
4026 /*
4027 * The rest of the function will allocate pages and will slice
4028 * them up into the right size
4029 */
4030
4031 numpages = (num * m_size(class) + PAGE_SIZE - 1) / PAGE_SIZE;
4032
4033 /* Currently assume that pages are 4K or 16K */
4034 if (PAGE_SIZE == m_maxsize(MC_BIGCL)) {
4035 super_class = MC_BIGCL;
4036 } else {
4037 super_class = MC_16KCL;
4038 }
4039
4040 i = m_clalloc(numpages, wait, m_maxsize(super_class));
4041
4042 /* how many objects will we cut the page into? */
4043 int numobj = PAGE_SIZE / m_maxsize(class);
4044
4045 for (count = 0; count < numpages; count++) {
4046 /* respect totals, minlimit, maxlimit */
4047 if (m_total(super_class) <= m_minlimit(super_class) ||
4048 m_total(class) >= m_maxlimit(class)) {
4049 break;
4050 }
4051
4052 if ((o = slab_alloc(super_class, wait)) == NULL) {
4053 break;
4054 }
4055
4056 struct mbuf *m = (struct mbuf *)o;
4057 union mcluster *c = (union mcluster *)o;
4058 union mbigcluster *mbc = (union mbigcluster *)o;
4059 mcl_slab_t *sp = slab_get(o);
4060 mcache_audit_t *mca = NULL;
4061
4062 /*
4063 * since one full page will be converted to MC_MBUF or
4064 * MC_CL, verify that the reference count will match that
4065 * assumption
4066 */
4067 VERIFY(sp->sl_refcnt == 1 && slab_is_detached(sp));
4068 VERIFY((sp->sl_flags & (SLF_MAPPED | SLF_PARTIAL)) == SLF_MAPPED);
4069 /*
4070 * Make sure that the cluster is unmolested
4071 * while in freelist
4072 */
4073 if (mclverify) {
4074 mca = mcl_audit_buf2mca(super_class,
4075 (mcache_obj_t *)o);
4076 mcache_audit_free_verify(mca,
4077 (mcache_obj_t *)o, 0, m_maxsize(super_class));
4078 }
4079
4080 /* Reinitialize it as an mbuf or 2K or 4K slab */
4081 slab_init(sp, class, sp->sl_flags,
4082 sp->sl_base, NULL, PAGE_SIZE, 0, numobj);
4083
4084 VERIFY(sp->sl_head == NULL);
4085
4086 VERIFY(m_total(super_class) >= 1);
4087 m_total(super_class)--;
4088
4089 if (super_class == MC_BIGCL) {
4090 mbstat.m_bigclusters = m_total(MC_BIGCL);
4091 }
4092
4093 m_total(class) += numobj;
4094 VERIFY(m_total(class) <= m_maxlimit(class));
4095 m_infree(class) += numobj;
4096
4097 i = numobj;
4098 if (class == MC_MBUF) {
4099 mbstat.m_mbufs = m_total(MC_MBUF);
4100 mtype_stat_add(MT_FREE, NMBPG);
4101 while (i--) {
4102 /*
4103 * If auditing is enabled, construct the
4104 * shadow mbuf in the audit structure
4105 * instead of the actual one.
4106 * mbuf_slab_audit() will take care of
4107 * restoring the contents after the
4108 * integrity check.
4109 */
4110 if (mclaudit != NULL) {
4111 struct mbuf *ms;
4112 mca = mcl_audit_buf2mca(MC_MBUF,
4113 (mcache_obj_t *)m);
4114 ms = MCA_SAVED_MBUF_PTR(mca);
4115 ms->m_type = MT_FREE;
4116 } else {
4117 m->m_type = MT_FREE;
4118 }
4119 m->m_next = sp->sl_head;
4120 sp->sl_head = (void *)m++;
4121 }
4122 } else if (class == MC_CL) { /* MC_CL */
4123 mbstat.m_clfree =
4124 m_infree(MC_CL) + m_infree(MC_MBUF_CL);
4125 mbstat.m_clusters = m_total(MC_CL);
4126 while (i--) {
4127 c->mcl_next = sp->sl_head;
4128 sp->sl_head = (void *)c++;
4129 }
4130 } else {
4131 VERIFY(class == MC_BIGCL);
4132 mbstat.m_bigclusters = m_total(MC_BIGCL);
4133 mbstat.m_bigclfree = m_infree(MC_BIGCL) +
4134 m_infree(MC_MBUF_BIGCL);
4135 while (i--) {
4136 mbc->mbc_next = sp->sl_head;
4137 sp->sl_head = (void *)mbc++;
4138 }
4139 }
4140
4141 /* Insert into the mbuf or 2k or 4k slab list */
4142 slab_insert(sp, class);
4143
4144 if ((i = mb_waiters) > 0) {
4145 mb_waiters = 0;
4146 }
4147 if (i != 0) {
4148 mbwdog_logger("waking up all threads");
4149 wakeup(mb_waitchan);
4150 }
4151 }
4152 return count != 0;
4153 }
4154
4155 /*
4156 * For each class, initialize the freelist to hold m_minlimit() objects.
4157 */
4158 static void
4159 freelist_init(mbuf_class_t class)
4160 {
4161 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
4162
4163 VERIFY(class == MC_CL || class == MC_BIGCL);
4164 VERIFY(m_total(class) == 0);
4165 VERIFY(m_minlimit(class) > 0);
4166
4167 while (m_total(class) < m_minlimit(class)) {
4168 (void) freelist_populate(class, m_minlimit(class), M_WAIT);
4169 }
4170
4171 VERIFY(m_total(class) >= m_minlimit(class));
4172 }
4173
4174 /*
4175 * (Inaccurately) check if it might be worth a trip back to the
4176 * mcache layer due the availability of objects there. We'll
4177 * end up back here if there's nothing up there.
4178 */
4179 static boolean_t
4180 mbuf_cached_above(mbuf_class_t class, int wait)
4181 {
4182 switch (class) {
4183 case MC_MBUF:
4184 if (wait & MCR_COMP) {
4185 return !mcache_bkt_isempty(m_cache(MC_MBUF_CL)) ||
4186 !mcache_bkt_isempty(m_cache(MC_MBUF_BIGCL));
4187 }
4188 break;
4189
4190 case MC_CL:
4191 if (wait & MCR_COMP) {
4192 return !mcache_bkt_isempty(m_cache(MC_MBUF_CL));
4193 }
4194 break;
4195
4196 case MC_BIGCL:
4197 if (wait & MCR_COMP) {
4198 return !mcache_bkt_isempty(m_cache(MC_MBUF_BIGCL));
4199 }
4200 break;
4201
4202 case MC_16KCL:
4203 if (wait & MCR_COMP) {
4204 return !mcache_bkt_isempty(m_cache(MC_MBUF_16KCL));
4205 }
4206 break;
4207
4208 case MC_MBUF_CL:
4209 case MC_MBUF_BIGCL:
4210 case MC_MBUF_16KCL:
4211 break;
4212
4213 default:
4214 VERIFY(0);
4215 /* NOTREACHED */
4216 }
4217
4218 return !mcache_bkt_isempty(m_cache(class));
4219 }
4220
4221 /*
4222 * If possible, convert constructed objects to raw ones.
4223 */
4224 static boolean_t
4225 mbuf_steal(mbuf_class_t class, unsigned int num)
4226 {
4227 mcache_obj_t *top = NULL;
4228 mcache_obj_t **list = ⊤
4229 unsigned int tot = 0;
4230
4231 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
4232
4233 switch (class) {
4234 case MC_MBUF:
4235 case MC_CL:
4236 case MC_BIGCL:
4237 case MC_16KCL:
4238 return FALSE;
4239
4240 case MC_MBUF_CL:
4241 case MC_MBUF_BIGCL:
4242 case MC_MBUF_16KCL:
4243 /* Get the required number of constructed objects if possible */
4244 if (m_infree(class) > m_minlimit(class)) {
4245 tot = cslab_alloc(class, &list,
4246 MIN(num, m_infree(class)));
4247 }
4248
4249 /* And destroy them to get back the raw objects */
4250 if (top != NULL) {
4251 (void) cslab_free(class, top, 1);
4252 }
4253 break;
4254
4255 default:
4256 VERIFY(0);
4257 /* NOTREACHED */
4258 }
4259
4260 return tot == num;
4261 }
4262
4263 static void
4264 m_reclaim(mbuf_class_t class, unsigned int num, boolean_t comp)
4265 {
4266 int m, bmap = 0;
4267
4268 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
4269
4270 VERIFY(m_total(MC_CL) <= m_maxlimit(MC_CL));
4271 VERIFY(m_total(MC_BIGCL) <= m_maxlimit(MC_BIGCL));
4272 VERIFY(m_total(MC_16KCL) <= m_maxlimit(MC_16KCL));
4273
4274 /*
4275 * This logic can be made smarter; for now, simply mark
4276 * all other related classes as potential victims.
4277 */
4278 switch (class) {
4279 case MC_MBUF:
4280 m_wantpurge(MC_CL)++;
4281 m_wantpurge(MC_BIGCL)++;
4282 m_wantpurge(MC_MBUF_CL)++;
4283 m_wantpurge(MC_MBUF_BIGCL)++;
4284 break;
4285
4286 case MC_CL:
4287 m_wantpurge(MC_MBUF)++;
4288 m_wantpurge(MC_BIGCL)++;
4289 m_wantpurge(MC_MBUF_BIGCL)++;
4290 if (!comp) {
4291 m_wantpurge(MC_MBUF_CL)++;
4292 }
4293 break;
4294
4295 case MC_BIGCL:
4296 m_wantpurge(MC_MBUF)++;
4297 m_wantpurge(MC_CL)++;
4298 m_wantpurge(MC_MBUF_CL)++;
4299 if (!comp) {
4300 m_wantpurge(MC_MBUF_BIGCL)++;
4301 }
4302 break;
4303
4304 case MC_16KCL:
4305 if (!comp) {
4306 m_wantpurge(MC_MBUF_16KCL)++;
4307 }
4308 break;
4309
4310 default:
4311 VERIFY(0);
4312 /* NOTREACHED */
4313 }
4314
4315 /*
4316 * Run through each marked class and check if we really need to
4317 * purge (and therefore temporarily disable) the per-CPU caches
4318 * layer used by the class. If so, remember the classes since
4319 * we are going to drop the lock below prior to purging.
4320 */
4321 for (m = 0; m < NELEM(mbuf_table); m++) {
4322 if (m_wantpurge(m) > 0) {
4323 m_wantpurge(m) = 0;
4324 /*
4325 * Try hard to steal the required number of objects
4326 * from the freelist of other mbuf classes. Only
4327 * purge and disable the per-CPU caches layer when
4328 * we don't have enough; it's the last resort.
4329 */
4330 if (!mbuf_steal(m, num)) {
4331 bmap |= (1 << m);
4332 }
4333 }
4334 }
4335
4336 lck_mtx_unlock(mbuf_mlock);
4337
4338 if (bmap != 0) {
4339 /* signal the domains to drain */
4340 net_drain_domains();
4341
4342 /* Sigh; we have no other choices but to ask mcache to purge */
4343 for (m = 0; m < NELEM(mbuf_table); m++) {
4344 if ((bmap & (1 << m)) &&
4345 mcache_purge_cache(m_cache(m), TRUE)) {
4346 lck_mtx_lock(mbuf_mlock);
4347 m_purge_cnt(m)++;
4348 mbstat.m_drain++;
4349 lck_mtx_unlock(mbuf_mlock);
4350 }
4351 }
4352 } else {
4353 /*
4354 * Request mcache to reap extra elements from all of its caches;
4355 * note that all reaps are serialized and happen only at a fixed
4356 * interval.
4357 */
4358 mcache_reap();
4359 }
4360 lck_mtx_lock(mbuf_mlock);
4361 }
4362 #endif /* CONFIG_MBUF_MCACHE */
4363
4364 static inline struct mbuf *
4365 m_get_common(int wait, short type, int hdr)
4366 {
4367 struct mbuf *m;
4368
4369 #if CONFIG_MBUF_MCACHE
4370 int mcflags = MSLEEPF(wait);
4371
4372 /* Is this due to a non-blocking retry? If so, then try harder */
4373 if (mcflags & MCR_NOSLEEP) {
4374 mcflags |= MCR_TRYHARD;
4375 }
4376
4377 m = mcache_alloc(m_cache(MC_MBUF), mcflags);
4378 #else
4379 m = mz_alloc(wait);
4380 #endif /* CONFIG_MBUF_MCACHE */
4381 if (m != NULL) {
4382 MBUF_INIT(m, hdr, type);
4383 mtype_stat_inc(type);
4384 mtype_stat_dec(MT_FREE);
4385 }
4386 return m;
4387 }
4388
4389 /*
4390 * Space allocation routines; these are also available as macros
4391 * for critical paths.
4392 */
4393 #define _M_GET(wait, type) m_get_common(wait, type, 0)
4394 #define _M_GETHDR(wait, type) m_get_common(wait, type, 1)
4395 #define _M_RETRY(wait, type) _M_GET(wait, type)
4396 #define _M_RETRYHDR(wait, type) _M_GETHDR(wait, type)
4397 #define _MGET(m, how, type) ((m) = _M_GET(how, type))
4398 #define _MGETHDR(m, how, type) ((m) = _M_GETHDR(how, type))
4399
4400 struct mbuf *
4401 m_get(int wait, int type)
4402 {
4403 return _M_GET(wait, type);
4404 }
4405
4406 struct mbuf *
4407 m_gethdr(int wait, int type)
4408 {
4409 return _M_GETHDR(wait, type);
4410 }
4411
4412 struct mbuf *
4413 m_retry(int wait, int type)
4414 {
4415 return _M_RETRY(wait, type);
4416 }
4417
4418 struct mbuf *
4419 m_retryhdr(int wait, int type)
4420 {
4421 return _M_RETRYHDR(wait, type);
4422 }
4423
4424 struct mbuf *
4425 m_getclr(int wait, int type)
4426 {
4427 struct mbuf *m;
4428
4429 _MGET(m, wait, type);
4430 if (m != NULL) {
4431 bzero(mtod(m, caddr_t), MLEN);
4432 }
4433 return m;
4434 }
4435
4436 static int
4437 m_free_paired(struct mbuf *m)
4438 {
4439 VERIFY((m->m_flags & M_EXT) && (MEXT_FLAGS(m) & EXTF_PAIRED));
4440
4441 os_atomic_thread_fence(seq_cst);
4442 if (MEXT_PMBUF(m) == m) {
4443 /*
4444 * Paired ref count might be negative in case we lose
4445 * against another thread clearing MEXT_PMBUF, in the
4446 * event it occurs after the above memory barrier sync.
4447 * In that case just ignore as things have been unpaired.
4448 */
4449 int16_t prefcnt = os_atomic_dec(&MEXT_PREF(m), acq_rel);
4450 if (prefcnt > 1) {
4451 return 1;
4452 } else if (prefcnt == 1) {
4453 m_ext_free_func_t m_free_func = m_get_ext_free(m);
4454 VERIFY(m_free_func != NULL);
4455 (*m_free_func)(m->m_ext.ext_buf,
4456 m->m_ext.ext_size, m_get_ext_arg(m));
4457 return 1;
4458 } else if (prefcnt == 0) {
4459 VERIFY(MBUF_IS_PAIRED(m));
4460
4461 /*
4462 * Restore minref to its natural value, so that
4463 * the caller will be able to free the cluster
4464 * as appropriate.
4465 */
4466 MEXT_MINREF(m) = 0;
4467
4468 /*
4469 * Clear MEXT_PMBUF, but leave EXTF_PAIRED intact
4470 * as it is immutable. atomic_set_ptr also causes
4471 * memory barrier sync.
4472 */
4473 os_atomic_store(&MEXT_PMBUF(m), (mbuf_ref_t)0, release);
4474
4475 switch (m->m_ext.ext_size) {
4476 case MCLBYTES:
4477 m_set_ext(m, m_get_rfa(m), NULL, NULL);
4478 break;
4479
4480 case MBIGCLBYTES:
4481 m_set_ext(m, m_get_rfa(m), m_bigfree, NULL);
4482 break;
4483
4484 case M16KCLBYTES:
4485 m_set_ext(m, m_get_rfa(m), m_16kfree, NULL);
4486 break;
4487
4488 default:
4489 VERIFY(0);
4490 /* NOTREACHED */
4491 }
4492 }
4493 }
4494
4495 /*
4496 * Tell caller the unpair has occurred, and that the reference
4497 * count on the external cluster held for the paired mbuf should
4498 * now be dropped.
4499 */
4500 return 0;
4501 }
4502
4503 struct mbuf *
4504 m_free(struct mbuf *m)
4505 {
4506 struct mbuf *n = m->m_next;
4507
4508 if (m->m_type == MT_FREE) {
4509 panic("m_free: freeing an already freed mbuf");
4510 }
4511
4512 if (m->m_flags & M_PKTHDR) {
4513 /* Free the aux data and tags if there is any */
4514 m_tag_delete_chain(m);
4515
4516 m_do_tx_compl_callback(m, NULL);
4517 }
4518
4519 if (m->m_flags & M_EXT) {
4520 if (MBUF_IS_PAIRED(m) && m_free_paired(m)) {
4521 return n;
4522 }
4523 /*
4524 * Make sure that we don't touch any ext_ref
4525 * member after we decrement the reference count
4526 * since that may lead to use-after-free
4527 * when we do not hold the last reference.
4528 */
4529 const bool composite = !!(MEXT_FLAGS(m) & EXTF_COMPOSITE);
4530 const m_ext_free_func_t m_free_func = m_get_ext_free(m);
4531 const uint16_t minref = MEXT_MINREF(m);
4532 const uint16_t refcnt = m_decref(m);
4533
4534 if (refcnt == minref && !composite) {
4535 #if CONFIG_MBUF_MCACHE
4536 if (m_free_func == NULL) {
4537 mcache_free(m_cache(MC_CL), m->m_ext.ext_buf);
4538 } else if (m_free_func == m_bigfree) {
4539 mcache_free(m_cache(MC_BIGCL),
4540 m->m_ext.ext_buf);
4541 } else if (m_free_func == m_16kfree) {
4542 mcache_free(m_cache(MC_16KCL),
4543 m->m_ext.ext_buf);
4544 } else {
4545 (*m_free_func)(m->m_ext.ext_buf,
4546 m->m_ext.ext_size, m_get_ext_arg(m));
4547 }
4548 mcache_free(ref_cache, m_get_rfa(m));
4549 #else
4550 if (m_free_func == NULL) {
4551 mz_cl_free(ZONE_ID_CLUSTER_2K, m->m_ext.ext_buf);
4552 } else if (m_free_func == m_bigfree) {
4553 mz_cl_free(ZONE_ID_CLUSTER_4K, m->m_ext.ext_buf);
4554 } else if (m_free_func == m_16kfree) {
4555 mz_cl_free(ZONE_ID_CLUSTER_16K, m->m_ext.ext_buf);
4556 } else {
4557 (*m_free_func)(m->m_ext.ext_buf,
4558 m->m_ext.ext_size, m_get_ext_arg(m));
4559 }
4560 mz_ref_free(m_get_rfa(m));
4561 #endif /* CONFIG_MBUF_MCACHE */
4562 m_set_ext(m, NULL, NULL, NULL);
4563 } else if (refcnt == minref && composite) {
4564 VERIFY(!(MEXT_FLAGS(m) & EXTF_PAIRED));
4565
4566 mtype_stat_dec(m->m_type);
4567 mtype_stat_inc(MT_FREE);
4568
4569 m->m_type = MT_FREE;
4570 m->m_flags = M_EXT;
4571 m->m_len = 0;
4572 m->m_next = m->m_nextpkt = NULL;
4573 /*
4574 * MEXT_FLAGS is safe to access here
4575 * since we are now sure that we held
4576 * the last reference to ext_ref.
4577 */
4578 MEXT_FLAGS(m) &= ~EXTF_READONLY;
4579
4580 #if CONFIG_MBUF_MCACHE
4581 /* "Free" into the intermediate cache */
4582 if (m_free_func == NULL) {
4583 mcache_free(m_cache(MC_MBUF_CL), m);
4584 } else if (m_free_func == m_bigfree) {
4585 mcache_free(m_cache(MC_MBUF_BIGCL), m);
4586 } else {
4587 VERIFY(m_free_func == m_16kfree);
4588 mcache_free(m_cache(MC_MBUF_16KCL), m);
4589 }
4590 #else
4591 /* "Free" into the intermediate cache */
4592 if (m_free_func == NULL) {
4593 mz_composite_free(MC_MBUF_CL, m);
4594 } else if (m_free_func == m_bigfree) {
4595 mz_composite_free(MC_MBUF_BIGCL, m);
4596 } else {
4597 VERIFY(m_free_func == m_16kfree);
4598 mz_composite_free(MC_MBUF_16KCL, m);
4599 }
4600 #endif /* CONFIG_MBUF_MCACHE */
4601 return n;
4602 }
4603 }
4604
4605 mtype_stat_dec(m->m_type);
4606 mtype_stat_inc(MT_FREE);
4607
4608 m->m_type = MT_FREE;
4609 m->m_flags = m->m_len = 0;
4610 m->m_next = m->m_nextpkt = NULL;
4611
4612 #if CONFIG_MBUF_MCACHE
4613 mcache_free(m_cache(MC_MBUF), m);
4614 #else
4615 mz_free(m);
4616 #endif /* CONFIG_MBUF_MCACHE */
4617
4618 return n;
4619 }
4620
4621 __private_extern__ struct mbuf *
4622 m_clattach(struct mbuf *m, int type, caddr_t extbuf __sized_by(extsize),
4623 void (*extfree)(caddr_t, u_int, caddr_t), size_t extsize, caddr_t extarg,
4624 int wait, int pair)
4625 {
4626 struct ext_ref *rfa = NULL;
4627
4628 /*
4629 * If pairing is requested and an existing mbuf is provided, reject
4630 * it if it's already been paired to another cluster. Otherwise,
4631 * allocate a new one or free any existing below.
4632 */
4633 if ((m != NULL && MBUF_IS_PAIRED(m)) ||
4634 (m == NULL && (m = _M_GETHDR(wait, type)) == NULL)) {
4635 return NULL;
4636 }
4637
4638 if (m->m_flags & M_EXT) {
4639 /*
4640 * Make sure that we don't touch any ext_ref
4641 * member after we decrement the reference count
4642 * since that may lead to use-after-free
4643 * when we do not hold the last reference.
4644 */
4645 const bool composite = !!(MEXT_FLAGS(m) & EXTF_COMPOSITE);
4646 VERIFY(!(MEXT_FLAGS(m) & EXTF_PAIRED) && MEXT_PMBUF(m) == NULL);
4647 const m_ext_free_func_t m_free_func = m_get_ext_free(m);
4648 const uint16_t minref = MEXT_MINREF(m);
4649 const uint16_t refcnt = m_decref(m);
4650
4651 if (refcnt == minref && !composite) {
4652 #if CONFIG_MBUF_MCACHE
4653 if (m_free_func == NULL) {
4654 mcache_free(m_cache(MC_CL), m->m_ext.ext_buf);
4655 } else if (m_free_func == m_bigfree) {
4656 mcache_free(m_cache(MC_BIGCL),
4657 m->m_ext.ext_buf);
4658 } else if (m_free_func == m_16kfree) {
4659 mcache_free(m_cache(MC_16KCL),
4660 m->m_ext.ext_buf);
4661 } else {
4662 (*m_free_func)(m->m_ext.ext_buf,
4663 m->m_ext.ext_size, m_get_ext_arg(m));
4664 }
4665 #else
4666 if (m_free_func == NULL) {
4667 mz_cl_free(ZONE_ID_CLUSTER_2K, m->m_ext.ext_buf);
4668 } else if (m_free_func == m_bigfree) {
4669 mz_cl_free(ZONE_ID_CLUSTER_4K, m->m_ext.ext_buf);
4670 } else if (m_free_func == m_16kfree) {
4671 mz_cl_free(ZONE_ID_CLUSTER_16K, m->m_ext.ext_buf);
4672 } else {
4673 (*m_free_func)(m->m_ext.ext_buf,
4674 m->m_ext.ext_size, m_get_ext_arg(m));
4675 }
4676 #endif /* CONFIG_MBUF_MCACHE */
4677 /* Re-use the reference structure */
4678 rfa = m_get_rfa(m);
4679 } else if (refcnt == minref && composite) {
4680 VERIFY(m->m_type != MT_FREE);
4681
4682 mtype_stat_dec(m->m_type);
4683 mtype_stat_inc(MT_FREE);
4684
4685 m->m_type = MT_FREE;
4686 m->m_flags = M_EXT;
4687 m->m_len = 0;
4688 m->m_next = m->m_nextpkt = NULL;
4689
4690 /*
4691 * MEXT_FLAGS is safe to access here
4692 * since we are now sure that we held
4693 * the last reference to ext_ref.
4694 */
4695 MEXT_FLAGS(m) &= ~EXTF_READONLY;
4696
4697 /* "Free" into the intermediate cache */
4698 #if CONFIG_MBUF_MCACHE
4699 if (m_free_func == NULL) {
4700 mcache_free(m_cache(MC_MBUF_CL), m);
4701 } else if (m_free_func == m_bigfree) {
4702 mcache_free(m_cache(MC_MBUF_BIGCL), m);
4703 } else {
4704 VERIFY(m_free_func == m_16kfree);
4705 mcache_free(m_cache(MC_MBUF_16KCL), m);
4706 }
4707 #else
4708 if (m_free_func == NULL) {
4709 mz_composite_free(MC_MBUF_CL, m);
4710 } else if (m_free_func == m_bigfree) {
4711 mz_composite_free(MC_MBUF_BIGCL, m);
4712 } else {
4713 VERIFY(m_free_func == m_16kfree);
4714 mz_composite_free(MC_MBUF_16KCL, m);
4715 }
4716 #endif /* CONFIG_MBUF_MCACHE */
4717 /*
4718 * Allocate a new mbuf, since we didn't divorce
4719 * the composite mbuf + cluster pair above.
4720 */
4721 if ((m = _M_GETHDR(wait, type)) == NULL) {
4722 return NULL;
4723 }
4724 }
4725 }
4726
4727 #if CONFIG_MBUF_MCACHE
4728 if (rfa == NULL &&
4729 (rfa = mcache_alloc(ref_cache, MSLEEPF(wait))) == NULL) {
4730 m_free(m);
4731 return NULL;
4732 }
4733 #else
4734 if (rfa == NULL &&
4735 (rfa = mz_ref_alloc(wait)) == NULL) {
4736 m_free(m);
4737 return NULL;
4738 }
4739 #endif /* CONFIG_MBUF_MCACHE */
4740
4741 if (!pair) {
4742 MEXT_INIT(m, extbuf, extsize, extfree, extarg, rfa,
4743 0, 1, 0, 0, 0, NULL);
4744 } else {
4745 MEXT_INIT(m, extbuf, extsize, extfree, (caddr_t)m, rfa,
4746 1, 1, 1, EXTF_PAIRED, 0, m);
4747 }
4748
4749 return m;
4750 }
4751
4752 /*
4753 * Perform `fast' allocation mbuf clusters from a cache of recently-freed
4754 * clusters. (If the cache is empty, new clusters are allocated en-masse.)
4755 */
4756 struct mbuf *
4757 m_getcl(int wait, int type, int flags)
4758 {
4759 struct mbuf *m = NULL;
4760 int hdr = (flags & M_PKTHDR);
4761
4762 #if CONFIG_MBUF_MCACHE
4763 int mcflags = MSLEEPF(wait);
4764
4765 /* Is this due to a non-blocking retry? If so, then try harder */
4766 if (mcflags & MCR_NOSLEEP) {
4767 mcflags |= MCR_TRYHARD;
4768 }
4769
4770 m = mcache_alloc(m_cache(MC_MBUF_CL), mcflags);
4771 #else
4772 m = mz_composite_alloc(MC_MBUF_CL, wait);
4773 #endif /* CONFIG_MBUF_MCACHE */
4774 if (m != NULL) {
4775 u_int16_t flag;
4776 struct ext_ref *rfa;
4777 void *cl;
4778
4779 VERIFY(m->m_type == MT_FREE && m->m_flags == M_EXT);
4780 cl = m->m_ext.ext_buf;
4781 rfa = m_get_rfa(m);
4782
4783 ASSERT(cl != NULL && rfa != NULL);
4784 VERIFY(MBUF_IS_COMPOSITE(m) && m_get_ext_free(m) == NULL);
4785
4786 flag = MEXT_FLAGS(m);
4787
4788 MBUF_INIT(m, hdr, type);
4789 MBUF_CL_INIT(m, cl, rfa, 1, flag);
4790
4791 mtype_stat_inc(type);
4792 mtype_stat_dec(MT_FREE);
4793 }
4794 return m;
4795 }
4796
4797 /* m_mclget() add an mbuf cluster to a normal mbuf */
4798 struct mbuf *
4799 m_mclget(struct mbuf *m, int wait)
4800 {
4801 struct ext_ref *rfa = NULL;
4802 char *bytes = NULL;
4803
4804 #if CONFIG_MBUF_MCACHE
4805 if ((rfa = mcache_alloc(ref_cache, MSLEEPF(wait))) == NULL) {
4806 return m;
4807 }
4808 #else
4809 if ((rfa = mz_ref_alloc(wait)) == NULL) {
4810 return m;
4811 }
4812 #endif /* CONFIG_MBUF_MCACHE */
4813 if ((bytes = m_mclalloc(wait)) != NULL) {
4814 m->m_ext.ext_size = MCLBYTES;
4815 m->m_ext.ext_buf = bytes;
4816 MBUF_CL_INIT(m, m->m_ext.ext_buf, rfa, 1, 0);
4817 } else {
4818 m->m_ext.ext_size = 0;
4819 m->m_ext.ext_buf = NULL;
4820 #if CONFIG_MBUF_MCACHE
4821 mcache_free(ref_cache, rfa);
4822 #else
4823 mz_ref_free(rfa);
4824 #endif /* CONFIG_MBUF_MCACHE */
4825 }
4826
4827 return m;
4828 }
4829
4830 /* Allocate an mbuf cluster */
4831 char *
4832 __sized_by_or_null(MCLBYTES)
4833 m_mclalloc(int wait)
4834 {
4835 #if CONFIG_MBUF_MCACHE
4836 int mcflags = MSLEEPF(wait);
4837
4838 /* Is this due to a non-blocking retry? If so, then try harder */
4839 if (mcflags & MCR_NOSLEEP) {
4840 mcflags |= MCR_TRYHARD;
4841 }
4842
4843 return mcache_alloc(m_cache(MC_CL), mcflags);
4844 #else
4845 return mz_cl_alloc(ZONE_ID_CLUSTER_2K, wait);
4846 #endif /* CONFIG_MBUF_MCACHE */
4847 }
4848
4849 /* Free an mbuf cluster */
4850 void
4851 m_mclfree(caddr_t p)
4852 {
4853 #if CONFIG_MBUF_MCACHE
4854 mcache_free(m_cache(MC_CL), p);
4855 #else
4856 mz_cl_free(ZONE_ID_CLUSTER_2K, p);
4857 #endif /* CONFIG_MBUF_MCACHE */
4858 }
4859
4860 /*
4861 * mcl_hasreference() checks if a cluster of an mbuf is referenced by
4862 * another mbuf; see comments in m_incref() regarding EXTF_READONLY.
4863 */
4864 int
4865 m_mclhasreference(struct mbuf *m)
4866 {
4867 if (!(m->m_flags & M_EXT)) {
4868 return 0;
4869 }
4870
4871 ASSERT(m_get_rfa(m) != NULL);
4872
4873 return (MEXT_FLAGS(m) & EXTF_READONLY) ? 1 : 0;
4874 }
4875
4876 __private_extern__ char *
4877 __sized_by_or_null(MBIGCLBYTES)
4878 m_bigalloc(int wait)
4879 {
4880 #if CONFIG_MBUF_MCACHE
4881 int mcflags = MSLEEPF(wait);
4882
4883 /* Is this due to a non-blocking retry? If so, then try harder */
4884 if (mcflags & MCR_NOSLEEP) {
4885 mcflags |= MCR_TRYHARD;
4886 }
4887
4888 return mcache_alloc(m_cache(MC_BIGCL), mcflags);
4889 #else
4890 return mz_cl_alloc(ZONE_ID_CLUSTER_4K, wait);
4891 #endif /* CONFIG_MBUF_MCACHE */
4892 }
4893
4894 __private_extern__ void
4895 m_bigfree(caddr_t p, __unused u_int size, __unused caddr_t arg)
4896 {
4897 #if CONFIG_MBUF_MCACHE
4898 mcache_free(m_cache(MC_BIGCL), p);
4899 #else
4900 mz_cl_free(ZONE_ID_CLUSTER_4K, p);
4901 #endif /* CONFIG_MBUF_MCACHE */
4902 }
4903
4904 /* m_mbigget() add an 4KB mbuf cluster to a normal mbuf */
4905 __private_extern__ struct mbuf *
4906 m_mbigget(struct mbuf *m, int wait)
4907 {
4908 struct ext_ref *rfa = NULL;
4909 void * bytes = NULL;
4910
4911 #if CONFIG_MBUF_MCACHE
4912 if ((rfa = mcache_alloc(ref_cache, MSLEEPF(wait))) == NULL) {
4913 return m;
4914 }
4915 #else
4916 if ((rfa = mz_ref_alloc(wait)) == NULL) {
4917 return m;
4918 }
4919 #endif /* CONFIG_MBUF_MCACHE */
4920 if ((bytes = m_bigalloc(wait)) != NULL) {
4921 m->m_ext.ext_size = MBIGCLBYTES;
4922 m->m_ext.ext_buf = bytes;
4923 MBUF_BIGCL_INIT(m, m->m_ext.ext_buf, rfa, 1, 0);
4924 } else {
4925 m->m_ext.ext_size = 0;
4926 m->m_ext.ext_buf = NULL;
4927 #if CONFIG_MBUF_MCACHE
4928 mcache_free(ref_cache, rfa);
4929 #else
4930 mz_ref_free(rfa);
4931 #endif /* CONFIG_MBUF_MCACHE */
4932 }
4933
4934 return m;
4935 }
4936
4937 __private_extern__ char *
4938 __sized_by_or_null(M16KCLBYTES)
4939 m_16kalloc(int wait)
4940 {
4941 #if CONFIG_MBUF_MCACHE
4942 int mcflags = MSLEEPF(wait);
4943
4944 /* Is this due to a non-blocking retry? If so, then try harder */
4945 if (mcflags & MCR_NOSLEEP) {
4946 mcflags |= MCR_TRYHARD;
4947 }
4948
4949 return mcache_alloc(m_cache(MC_16KCL), mcflags);
4950 #else
4951 return mz_cl_alloc(ZONE_ID_CLUSTER_16K, wait);
4952 #endif /* CONFIG_MBUF_MCACHE */
4953 }
4954
4955 __private_extern__ void
4956 m_16kfree(caddr_t p, __unused u_int size, __unused caddr_t arg)
4957 {
4958 #if CONFIG_MBUF_MCACHE
4959 mcache_free(m_cache(MC_16KCL), p);
4960 #else
4961 mz_cl_free(ZONE_ID_CLUSTER_16K, p);
4962 #endif /* CONFIG_MBUF_MCACHE */
4963 }
4964
4965 /* m_m16kget() add a 16KB mbuf cluster to a normal mbuf */
4966 __private_extern__ struct mbuf *
4967 m_m16kget(struct mbuf *m, int wait)
4968 {
4969 struct ext_ref *rfa = NULL;
4970 void *bytes = NULL;
4971
4972 #if CONFIG_MBUF_MCACHE
4973 if ((rfa = mcache_alloc(ref_cache, MSLEEPF(wait))) == NULL) {
4974 return m;
4975 }
4976 #else
4977 if ((rfa = mz_ref_alloc(wait)) == NULL) {
4978 return m;
4979 }
4980 #endif /* CONFIG_MBUF_MCACHE */
4981 if ((bytes = m_16kalloc(wait)) != NULL) {
4982 m->m_ext.ext_size = M16KCLBYTES;
4983 m->m_ext.ext_buf = bytes;
4984 MBUF_16KCL_INIT(m, m->m_ext.ext_buf, rfa, 1, 0);
4985 } else {
4986 m->m_ext.ext_size = 0;
4987 m->m_ext.ext_buf = NULL;
4988 #if CONFIG_MBUF_MCACHE
4989 mcache_free(ref_cache, rfa);
4990 #else
4991 mz_ref_free(rfa);
4992 #endif /* CONFIG_MBUF_MCACHE */
4993 }
4994
4995 return m;
4996 }
4997
4998 /*
4999 * "Move" mbuf pkthdr from "from" to "to".
5000 * "from" must have M_PKTHDR set, and "to" must be empty.
5001 */
5002 void
5003 m_copy_pkthdr(struct mbuf *to, struct mbuf *from)
5004 {
5005 VERIFY(from->m_flags & M_PKTHDR);
5006
5007 if (to->m_flags & M_PKTHDR) {
5008 /* We will be taking over the tags of 'to' */
5009 m_tag_delete_chain(to);
5010 }
5011 to->m_pkthdr = from->m_pkthdr; /* especially tags */
5012 m_classifier_init(from, 0); /* purge classifier info */
5013 m_tag_init(from, 1); /* purge all tags from src */
5014 m_scratch_init(from); /* clear src scratch area */
5015 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
5016 if ((to->m_flags & M_EXT) == 0) {
5017 to->m_data = (uintptr_t)to->m_pktdat;
5018 }
5019 }
5020
5021 /*
5022 * Duplicate "from"'s mbuf pkthdr in "to".
5023 * "from" must have M_PKTHDR set, and "to" must be empty.
5024 * In particular, this does a deep copy of the packet tags.
5025 */
5026 int
5027 m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how)
5028 {
5029 VERIFY(from->m_flags & M_PKTHDR);
5030
5031 if (to->m_flags & M_PKTHDR) {
5032 /* We will be taking over the tags of 'to' */
5033 m_tag_delete_chain(to);
5034 }
5035 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
5036 if ((to->m_flags & M_EXT) == 0) {
5037 to->m_data = (uintptr_t)to->m_pktdat;
5038 }
5039 to->m_pkthdr = from->m_pkthdr;
5040 /* clear TX completion flag so the callback is not called in the copy */
5041 to->m_pkthdr.pkt_flags &= ~PKTF_TX_COMPL_TS_REQ;
5042 m_tag_init(to, 0); /* preserve dst static tags */
5043 return m_tag_copy_chain(to, from, how);
5044 }
5045
5046 void
5047 m_copy_pftag(struct mbuf *to, struct mbuf *from)
5048 {
5049 memcpy(m_pftag(to), m_pftag(from), sizeof(struct pf_mtag));
5050 #if PF_ECN
5051 m_pftag(to)->pftag_hdr = NULL;
5052 m_pftag(to)->pftag_flags &= ~(PF_TAG_HDR_INET | PF_TAG_HDR_INET6);
5053 #endif /* PF_ECN */
5054 }
5055
5056 void
5057 m_copy_necptag(struct mbuf *to, struct mbuf *from)
5058 {
5059 memcpy(m_necptag(to), m_necptag(from), sizeof(struct necp_mtag_));
5060 }
5061
5062 void
5063 m_classifier_init(struct mbuf *m, uint32_t pktf_mask)
5064 {
5065 VERIFY(m->m_flags & M_PKTHDR);
5066
5067 m->m_pkthdr.pkt_proto = 0;
5068 m->m_pkthdr.pkt_flowsrc = 0;
5069 m->m_pkthdr.pkt_flowid = 0;
5070 m->m_pkthdr.pkt_ext_flags = 0;
5071 m->m_pkthdr.pkt_flags &= pktf_mask; /* caller-defined mask */
5072 /* preserve service class and interface info for loopback packets */
5073 if (!(m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
5074 (void) m_set_service_class(m, MBUF_SC_BE);
5075 }
5076 if (!(m->m_pkthdr.pkt_flags & PKTF_IFAINFO)) {
5077 m->m_pkthdr.pkt_ifainfo = 0;
5078 }
5079 /*
5080 * Preserve timestamp if requested
5081 */
5082 if (!(m->m_pkthdr.pkt_flags & PKTF_TS_VALID)) {
5083 m->m_pkthdr.pkt_timestamp = 0;
5084 }
5085 }
5086
5087 void
5088 m_copy_classifier(struct mbuf *to, struct mbuf *from)
5089 {
5090 VERIFY(to->m_flags & M_PKTHDR);
5091 VERIFY(from->m_flags & M_PKTHDR);
5092
5093 to->m_pkthdr.pkt_proto = from->m_pkthdr.pkt_proto;
5094 to->m_pkthdr.pkt_flowsrc = from->m_pkthdr.pkt_flowsrc;
5095 to->m_pkthdr.pkt_flowid = from->m_pkthdr.pkt_flowid;
5096 to->m_pkthdr.pkt_mpriv_srcid = from->m_pkthdr.pkt_mpriv_srcid;
5097 to->m_pkthdr.pkt_flags = from->m_pkthdr.pkt_flags;
5098 to->m_pkthdr.pkt_ext_flags = from->m_pkthdr.pkt_ext_flags;
5099 (void) m_set_service_class(to, from->m_pkthdr.pkt_svc);
5100 to->m_pkthdr.pkt_ifainfo = from->m_pkthdr.pkt_ifainfo;
5101 }
5102
5103 /*
5104 * Return a list of mbuf hdrs that point to clusters. Try for num_needed;
5105 * if wantall is not set, return whatever number were available. Set up the
5106 * first num_with_pkthdrs with mbuf hdrs configured as packet headers; these
5107 * are chained on the m_nextpkt field. Any packets requested beyond this
5108 * are chained onto the last packet header's m_next field. The size of
5109 * the cluster is controlled by the parameter bufsize.
5110 */
5111 __private_extern__ struct mbuf *
5112 m_getpackets_internal(unsigned int *num_needed, int num_with_pkthdrs,
5113 int wait, int wantall, size_t bufsize)
5114 {
5115 mbuf_ref_t m = NULL;
5116 mbuf_ref_t *np, top;
5117 unsigned int pnum, needed = *num_needed;
5118 #if CONFIG_MBUF_MCACHE
5119 mcache_obj_t *mp_list = NULL;
5120 int mcflags = MSLEEPF(wait);
5121 mcache_t *cp;
5122 #else
5123 zstack_t mp_list = {};
5124 mbuf_class_t class = MC_MBUF_CL;
5125 #endif /* CONFIG_MBUF_MCACHE */
5126 u_int16_t flag;
5127 struct ext_ref *rfa;
5128 void *cl;
5129
5130 ASSERT(bufsize == m_maxsize(MC_CL) ||
5131 bufsize == m_maxsize(MC_BIGCL) ||
5132 bufsize == m_maxsize(MC_16KCL));
5133
5134 /*
5135 * Caller must first check for njcl because this
5136 * routine is internal and not exposed/used via KPI.
5137 */
5138 VERIFY(bufsize != m_maxsize(MC_16KCL) || njcl > 0);
5139
5140 top = NULL;
5141 np = ⊤
5142 pnum = 0;
5143
5144 /*
5145 * The caller doesn't want all the requested buffers; only some.
5146 * Try hard to get what we can, but don't block. This effectively
5147 * overrides MCR_SLEEP, since this thread will not go to sleep
5148 * if we can't get all the buffers.
5149 */
5150 #if CONFIG_MBUF_MCACHE
5151 if (!wantall || (mcflags & MCR_NOSLEEP)) {
5152 mcflags |= MCR_TRYHARD;
5153 }
5154
5155 /* Allocate the composite mbuf + cluster elements from the cache */
5156 if (bufsize == m_maxsize(MC_CL)) {
5157 cp = m_cache(MC_MBUF_CL);
5158 } else if (bufsize == m_maxsize(MC_BIGCL)) {
5159 cp = m_cache(MC_MBUF_BIGCL);
5160 } else {
5161 cp = m_cache(MC_MBUF_16KCL);
5162 }
5163 needed = mcache_alloc_ext(cp, &mp_list, needed, mcflags);
5164 #else
5165 if (!wantall || (wait & Z_NOWAIT)) {
5166 wait &= ~Z_NOWAIT;
5167 wait |= Z_NOPAGEWAIT;
5168 }
5169
5170 /* Allocate the composite mbuf + cluster elements from the cache */
5171 if (bufsize == m_maxsize(MC_CL)) {
5172 class = MC_MBUF_CL;
5173 } else if (bufsize == m_maxsize(MC_BIGCL)) {
5174 class = MC_MBUF_BIGCL;
5175 } else {
5176 class = MC_MBUF_16KCL;
5177 }
5178 mp_list = mz_composite_alloc_n(class, needed, wait);
5179 needed = zstack_count(mp_list);
5180 #endif /* CONFIG_MBUF_MCACHE */
5181
5182 for (pnum = 0; pnum < needed; pnum++) {
5183 #if CONFIG_MBUF_MCACHE
5184 m = (struct mbuf *)mp_list;
5185 mp_list = mp_list->obj_next;
5186 #else
5187 m = zstack_pop(&mp_list);
5188 #endif /* CONFIG_MBUF_MCACHE */
5189
5190 VERIFY(m->m_type == MT_FREE && m->m_flags == M_EXT);
5191 cl = m->m_ext.ext_buf;
5192 rfa = m_get_rfa(m);
5193
5194 ASSERT(cl != NULL && rfa != NULL);
5195 VERIFY(MBUF_IS_COMPOSITE(m));
5196
5197 flag = MEXT_FLAGS(m);
5198
5199 MBUF_INIT(m, num_with_pkthdrs, MT_DATA);
5200 if (bufsize == m_maxsize(MC_16KCL)) {
5201 MBUF_16KCL_INIT(m, cl, rfa, 1, flag);
5202 } else if (bufsize == m_maxsize(MC_BIGCL)) {
5203 MBUF_BIGCL_INIT(m, cl, rfa, 1, flag);
5204 } else {
5205 MBUF_CL_INIT(m, cl, rfa, 1, flag);
5206 }
5207
5208 if (num_with_pkthdrs > 0) {
5209 --num_with_pkthdrs;
5210 }
5211
5212 *np = m;
5213 if (num_with_pkthdrs > 0) {
5214 np = &m->m_nextpkt;
5215 } else {
5216 np = &m->m_next;
5217 }
5218 }
5219 #if CONFIG_MBUF_MCACHE
5220 ASSERT(pnum != *num_needed || mp_list == NULL);
5221 if (mp_list != NULL) {
5222 mcache_free_ext(cp, mp_list);
5223 }
5224 #else
5225 ASSERT(pnum != *num_needed || zstack_empty(mp_list));
5226 if (!zstack_empty(mp_list)) {
5227 mz_composite_free_n(class, mp_list);
5228 }
5229 #endif /* CONFIG_MBUF_MCACHE */
5230 if (pnum > 0) {
5231 mtype_stat_add(MT_DATA, pnum);
5232 mtype_stat_sub(MT_FREE, pnum);
5233 }
5234
5235 if (wantall && (pnum != *num_needed)) {
5236 if (top != NULL) {
5237 m_freem_list(top);
5238 }
5239 return NULL;
5240 }
5241
5242 if (pnum > *num_needed) {
5243 printf("%s: File a radar related to <rdar://10146739>. \
5244 needed = %u, pnum = %u, num_needed = %u \n",
5245 __func__, needed, pnum, *num_needed);
5246 }
5247 *num_needed = pnum;
5248
5249 return top;
5250 }
5251
5252 /*
5253 * Return list of mbuf linked by m_nextpkt. Try for numlist, and if
5254 * wantall is not set, return whatever number were available. The size of
5255 * each mbuf in the list is controlled by the parameter packetlen. Each
5256 * mbuf of the list may have a chain of mbufs linked by m_next. Each mbuf
5257 * in the chain is called a segment. If maxsegments is not null and the
5258 * value pointed to is not null, this specify the maximum number of segments
5259 * for a chain of mbufs. If maxsegments is zero or the value pointed to
5260 * is zero the caller does not have any restriction on the number of segments.
5261 * The actual number of segments of a mbuf chain is return in the value
5262 * pointed to by maxsegments.
5263 */
5264 __private_extern__ struct mbuf *
5265 m_allocpacket_internal(unsigned int *numlist, size_t packetlen,
5266 unsigned int *maxsegments, int wait, int wantall, size_t wantsize)
5267 {
5268 mbuf_ref_t *np, top, first = NULL;
5269 size_t bufsize, r_bufsize;
5270 unsigned int num = 0;
5271 unsigned int nsegs = 0;
5272 unsigned int needed = 0, resid;
5273 #if CONFIG_MBUF_MCACHE
5274 int mcflags = MSLEEPF(wait);
5275 mcache_obj_t *mp_list = NULL, *rmp_list = NULL;
5276 mcache_t *cp = NULL, *rcp = NULL;
5277 #else
5278 zstack_t mp_list = {}, rmp_list = {};
5279 mbuf_class_t class = MC_MBUF, rclass = MC_MBUF_CL;
5280 #endif /* CONFIG_MBUF_MCACHE */
5281
5282 if (*numlist == 0) {
5283 os_log(OS_LOG_DEFAULT, "m_allocpacket_internal *numlist is 0");
5284 return NULL;
5285 }
5286
5287 top = NULL;
5288 np = ⊤
5289
5290 if (wantsize == 0) {
5291 if (packetlen <= MINCLSIZE) {
5292 bufsize = packetlen;
5293 } else if (packetlen > m_maxsize(MC_CL)) {
5294 /* Use 4KB if jumbo cluster pool isn't available */
5295 if (packetlen <= m_maxsize(MC_BIGCL) || njcl == 0) {
5296 bufsize = m_maxsize(MC_BIGCL);
5297 } else {
5298 bufsize = m_maxsize(MC_16KCL);
5299 }
5300 } else {
5301 bufsize = m_maxsize(MC_CL);
5302 }
5303 } else if (wantsize == m_maxsize(MC_CL) ||
5304 wantsize == m_maxsize(MC_BIGCL) ||
5305 (wantsize == m_maxsize(MC_16KCL) && njcl > 0)) {
5306 bufsize = wantsize;
5307 } else {
5308 *numlist = 0;
5309 os_log(OS_LOG_DEFAULT, "m_allocpacket_internal wantsize unsupported");
5310 return NULL;
5311 }
5312
5313 if (bufsize <= MHLEN) {
5314 nsegs = 1;
5315 } else if (bufsize <= MINCLSIZE) {
5316 if (maxsegments != NULL && *maxsegments == 1) {
5317 bufsize = m_maxsize(MC_CL);
5318 nsegs = 1;
5319 } else {
5320 nsegs = 2;
5321 }
5322 } else if (bufsize == m_maxsize(MC_16KCL)) {
5323 VERIFY(njcl > 0);
5324 nsegs = ((packetlen - 1) >> M16KCLSHIFT) + 1;
5325 } else if (bufsize == m_maxsize(MC_BIGCL)) {
5326 nsegs = ((packetlen - 1) >> MBIGCLSHIFT) + 1;
5327 } else {
5328 nsegs = ((packetlen - 1) >> MCLSHIFT) + 1;
5329 }
5330 if (maxsegments != NULL) {
5331 if (*maxsegments && nsegs > *maxsegments) {
5332 *maxsegments = nsegs;
5333 *numlist = 0;
5334 os_log(OS_LOG_DEFAULT, "m_allocpacket_internal nsegs > *maxsegments");
5335 return NULL;
5336 }
5337 *maxsegments = nsegs;
5338 }
5339
5340 /*
5341 * The caller doesn't want all the requested buffers; only some.
5342 * Try hard to get what we can, but don't block. This effectively
5343 * overrides MCR_SLEEP, since this thread will not go to sleep
5344 * if we can't get all the buffers.
5345 */
5346 #if CONFIG_MBUF_MCACHE
5347 if (!wantall || (mcflags & MCR_NOSLEEP)) {
5348 mcflags |= MCR_TRYHARD;
5349 }
5350 #else
5351 if (!wantall || (wait & Z_NOWAIT)) {
5352 wait &= ~Z_NOWAIT;
5353 wait |= Z_NOPAGEWAIT;
5354 }
5355 #endif /* !CONFIG_MBUF_MCACHE */
5356
5357 /*
5358 * Simple case where all elements in the lists/chains are mbufs.
5359 * Unless bufsize is greater than MHLEN, each segment chain is made
5360 * up of exactly 1 mbuf. Otherwise, each segment chain is made up
5361 * of 2 mbufs; the second one is used for the residual data, i.e.
5362 * the remaining data that cannot fit into the first mbuf.
5363 */
5364 if (bufsize <= MINCLSIZE) {
5365 /* Allocate the elements in one shot from the mbuf cache */
5366 ASSERT(bufsize <= MHLEN || nsegs == 2);
5367 #if CONFIG_MBUF_MCACHE
5368 cp = m_cache(MC_MBUF);
5369 needed = mcache_alloc_ext(cp, &mp_list,
5370 (*numlist) * nsegs, mcflags);
5371 #else
5372 class = MC_MBUF;
5373 mp_list = mz_alloc_n((*numlist) * nsegs, wait);
5374 needed = zstack_count(mp_list);
5375 #endif /* CONFIG_MBUF_MCACHE */
5376
5377 /*
5378 * The number of elements must be even if we are to use an
5379 * mbuf (instead of a cluster) to store the residual data.
5380 * If we couldn't allocate the requested number of mbufs,
5381 * trim the number down (if it's odd) in order to avoid
5382 * creating a partial segment chain.
5383 */
5384 if (bufsize > MHLEN && (needed & 0x1)) {
5385 needed--;
5386 }
5387
5388 while (num < needed) {
5389 mbuf_ref_t m = NULL;
5390
5391 #if CONFIG_MBUF_MCACHE
5392 m = (struct mbuf *)mp_list;
5393 mp_list = mp_list->obj_next;
5394 #else
5395 m = zstack_pop(&mp_list);
5396 #endif /* CONFIG_MBUF_MCACHE */
5397 ASSERT(m != NULL);
5398
5399 MBUF_INIT(m, 1, MT_DATA);
5400 num++;
5401 if (bufsize > MHLEN) {
5402 /* A second mbuf for this segment chain */
5403 #if CONFIG_MBUF_MCACHE
5404 m->m_next = (struct mbuf *)mp_list;
5405 mp_list = mp_list->obj_next;
5406 #else
5407 m->m_next = zstack_pop(&mp_list);
5408 #endif /* CONFIG_MBUF_MCACHE */
5409
5410 ASSERT(m->m_next != NULL);
5411
5412 MBUF_INIT(m->m_next, 0, MT_DATA);
5413 num++;
5414 }
5415 *np = m;
5416 np = &m->m_nextpkt;
5417 }
5418 #if CONFIG_MBUF_MCACHE
5419 ASSERT(num != *numlist || mp_list == NULL);
5420 #else
5421 ASSERT(num != *numlist || zstack_empty(mp_list));
5422 #endif /* CONFIG_MBUF_MCACHE */
5423
5424 if (num > 0) {
5425 mtype_stat_add(MT_DATA, num);
5426 mtype_stat_sub(MT_FREE, num);
5427 }
5428 num /= nsegs;
5429
5430 /* We've got them all; return to caller */
5431 if (num == *numlist) {
5432 return top;
5433 }
5434
5435 goto fail;
5436 }
5437
5438 /*
5439 * Complex cases where elements are made up of one or more composite
5440 * mbufs + cluster, depending on packetlen. Each N-segment chain can
5441 * be illustrated as follows:
5442 *
5443 * [mbuf + cluster 1] [mbuf + cluster 2] ... [mbuf + cluster N]
5444 *
5445 * Every composite mbuf + cluster element comes from the intermediate
5446 * cache (either MC_MBUF_CL or MC_MBUF_BIGCL). For space efficiency,
5447 * the last composite element will come from the MC_MBUF_CL cache,
5448 * unless the residual data is larger than 2KB where we use the
5449 * big cluster composite cache (MC_MBUF_BIGCL) instead. Residual
5450 * data is defined as extra data beyond the first element that cannot
5451 * fit into the previous element, i.e. there is no residual data if
5452 * the chain only has 1 segment.
5453 */
5454 r_bufsize = bufsize;
5455 resid = packetlen > bufsize ? packetlen % bufsize : 0;
5456 if (resid > 0) {
5457 /* There is residual data; figure out the cluster size */
5458 if (wantsize == 0 && packetlen > MINCLSIZE) {
5459 /*
5460 * Caller didn't request that all of the segments
5461 * in the chain use the same cluster size; use the
5462 * smaller of the cluster sizes.
5463 */
5464 if (njcl > 0 && resid > m_maxsize(MC_BIGCL)) {
5465 r_bufsize = m_maxsize(MC_16KCL);
5466 } else if (resid > m_maxsize(MC_CL)) {
5467 r_bufsize = m_maxsize(MC_BIGCL);
5468 } else {
5469 r_bufsize = m_maxsize(MC_CL);
5470 }
5471 } else {
5472 /* Use the same cluster size as the other segments */
5473 resid = 0;
5474 }
5475 }
5476
5477 needed = *numlist;
5478 if (resid > 0) {
5479 /*
5480 * Attempt to allocate composite mbuf + cluster elements for
5481 * the residual data in each chain; record the number of such
5482 * elements that can be allocated so that we know how many
5483 * segment chains we can afford to create.
5484 */
5485 #if CONFIG_MBUF_MCACHE
5486 if (r_bufsize <= m_maxsize(MC_CL)) {
5487 rcp = m_cache(MC_MBUF_CL);
5488 } else if (r_bufsize <= m_maxsize(MC_BIGCL)) {
5489 rcp = m_cache(MC_MBUF_BIGCL);
5490 } else {
5491 rcp = m_cache(MC_MBUF_16KCL);
5492 }
5493 needed = mcache_alloc_ext(rcp, &rmp_list, *numlist, mcflags);
5494 #else
5495 if (r_bufsize <= m_maxsize(MC_CL)) {
5496 rclass = MC_MBUF_CL;
5497 } else if (r_bufsize <= m_maxsize(MC_BIGCL)) {
5498 rclass = MC_MBUF_BIGCL;
5499 } else {
5500 rclass = MC_MBUF_16KCL;
5501 }
5502 rmp_list = mz_composite_alloc_n(rclass, *numlist, wait);
5503 needed = zstack_count(rmp_list);
5504 #endif /* CONFIG_MBUF_MCACHE */
5505 if (needed == 0) {
5506 goto fail;
5507 }
5508
5509 /* This is temporarily reduced for calculation */
5510 ASSERT(nsegs > 1);
5511 nsegs--;
5512 }
5513
5514 /*
5515 * Attempt to allocate the rest of the composite mbuf + cluster
5516 * elements for the number of segment chains that we need.
5517 */
5518 #if CONFIG_MBUF_MCACHE
5519 if (bufsize <= m_maxsize(MC_CL)) {
5520 cp = m_cache(MC_MBUF_CL);
5521 } else if (bufsize <= m_maxsize(MC_BIGCL)) {
5522 cp = m_cache(MC_MBUF_BIGCL);
5523 } else {
5524 cp = m_cache(MC_MBUF_16KCL);
5525 }
5526 needed = mcache_alloc_ext(cp, &mp_list, needed * nsegs, mcflags);
5527 #else
5528 if (bufsize <= m_maxsize(MC_CL)) {
5529 class = MC_MBUF_CL;
5530 } else if (bufsize <= m_maxsize(MC_BIGCL)) {
5531 class = MC_MBUF_BIGCL;
5532 } else {
5533 class = MC_MBUF_16KCL;
5534 }
5535 mp_list = mz_composite_alloc_n(class, needed * nsegs, wait);
5536 needed = zstack_count(mp_list);
5537 #endif /* CONFIG_MBUF_MCACHE */
5538
5539 /* Round it down to avoid creating a partial segment chain */
5540 needed = (needed / nsegs) * nsegs;
5541 if (needed == 0) {
5542 goto fail;
5543 }
5544
5545 if (resid > 0) {
5546 /*
5547 * We're about to construct the chain(s); take into account
5548 * the number of segments we have created above to hold the
5549 * residual data for each chain, as well as restore the
5550 * original count of segments per chain.
5551 */
5552 ASSERT(nsegs > 0);
5553 needed += needed / nsegs;
5554 nsegs++;
5555 }
5556
5557 for (;;) {
5558 mbuf_ref_t m = NULL;
5559 u_int16_t flag;
5560 struct ext_ref *rfa;
5561 void *cl;
5562 int pkthdr;
5563 m_ext_free_func_t m_free_func;
5564
5565 ++num;
5566
5567 if (nsegs == 1 || (num % nsegs) != 0 || resid == 0) {
5568 #if CONFIG_MBUF_MCACHE
5569 m = (struct mbuf *)mp_list;
5570 mp_list = mp_list->obj_next;
5571 #else
5572 m = zstack_pop(&mp_list);
5573 #endif /* CONFIG_MBUF_MCACHE */
5574 } else {
5575 #if CONFIG_MBUF_MCACHE
5576 m = (struct mbuf *)rmp_list;
5577 rmp_list = rmp_list->obj_next;
5578 #else
5579 m = zstack_pop(&rmp_list);
5580 #endif /* CONFIG_MBUF_MCACHE */
5581 }
5582 m_free_func = m_get_ext_free(m);
5583 ASSERT(m != NULL);
5584 VERIFY(m->m_type == MT_FREE && m->m_flags == M_EXT);
5585 VERIFY(m_free_func == NULL || m_free_func == m_bigfree ||
5586 m_free_func == m_16kfree);
5587
5588 cl = m->m_ext.ext_buf;
5589 rfa = m_get_rfa(m);
5590
5591 ASSERT(cl != NULL && rfa != NULL);
5592 VERIFY(MBUF_IS_COMPOSITE(m));
5593
5594 flag = MEXT_FLAGS(m);
5595
5596 pkthdr = (nsegs == 1 || (num % nsegs) == 1);
5597 if (pkthdr) {
5598 first = m;
5599 }
5600 MBUF_INIT(m, pkthdr, MT_DATA);
5601 if (m_free_func == m_16kfree) {
5602 MBUF_16KCL_INIT(m, cl, rfa, 1, flag);
5603 } else if (m_free_func == m_bigfree) {
5604 MBUF_BIGCL_INIT(m, cl, rfa, 1, flag);
5605 } else {
5606 MBUF_CL_INIT(m, cl, rfa, 1, flag);
5607 }
5608
5609 *np = m;
5610 if ((num % nsegs) == 0) {
5611 np = &first->m_nextpkt;
5612 } else {
5613 np = &m->m_next;
5614 }
5615
5616 if (num == needed) {
5617 break;
5618 }
5619 }
5620
5621 if (num > 0) {
5622 mtype_stat_add(MT_DATA, num);
5623 mtype_stat_sub(MT_FREE, num);
5624 }
5625
5626 num /= nsegs;
5627
5628 /* We've got them all; return to caller */
5629 if (num == *numlist) {
5630 #if CONFIG_MBUF_MCACHE
5631 ASSERT(mp_list == NULL && rmp_list == NULL);
5632 #else
5633 ASSERT(zstack_empty(mp_list) && zstack_empty(rmp_list));
5634 #endif /* CONFIG_MBUF_MCACHE */
5635 return top;
5636 }
5637
5638 fail:
5639 /* Free up what's left of the above */
5640 #if CONFIG_MBUF_MCACHE
5641 if (mp_list != NULL) {
5642 mcache_free_ext(cp, mp_list);
5643 }
5644 if (rmp_list != NULL) {
5645 mcache_free_ext(rcp, rmp_list);
5646 }
5647 #else
5648 if (!zstack_empty(mp_list)) {
5649 if (class == MC_MBUF) {
5650 /* No need to elide, these mbufs came from the cache. */
5651 mz_free_n(mp_list);
5652 } else {
5653 mz_composite_free_n(class, mp_list);
5654 }
5655 }
5656 if (!zstack_empty(rmp_list)) {
5657 mz_composite_free_n(rclass, rmp_list);
5658 }
5659 #endif /* CONFIG_MBUF_MCACHE */
5660 if (wantall && top != NULL) {
5661 m_freem_list(top);
5662 *numlist = 0;
5663 return NULL;
5664 }
5665 *numlist = num;
5666 return top;
5667 }
5668
5669 /*
5670 * Best effort to get a mbuf cluster + pkthdr. Used by drivers to allocated
5671 * packets on receive ring.
5672 */
5673 __private_extern__ struct mbuf *
5674 m_getpacket_how(int wait)
5675 {
5676 unsigned int num_needed = 1;
5677
5678 return m_getpackets_internal(&num_needed, 1, wait, 1,
5679 m_maxsize(MC_CL));
5680 }
5681
5682 /*
5683 * Best effort to get a mbuf cluster + pkthdr. Used by drivers to allocated
5684 * packets on receive ring.
5685 */
5686 struct mbuf *
5687 m_getpacket(void)
5688 {
5689 unsigned int num_needed = 1;
5690
5691 return m_getpackets_internal(&num_needed, 1, M_WAIT, 1,
5692 m_maxsize(MC_CL));
5693 }
5694
5695 /*
5696 * Return a list of mbuf hdrs that point to clusters. Try for num_needed;
5697 * if this can't be met, return whatever number were available. Set up the
5698 * first num_with_pkthdrs with mbuf hdrs configured as packet headers. These
5699 * are chained on the m_nextpkt field. Any packets requested beyond this are
5700 * chained onto the last packet header's m_next field.
5701 */
5702 struct mbuf *
5703 m_getpackets(int num_needed, int num_with_pkthdrs, int how)
5704 {
5705 unsigned int n = num_needed;
5706
5707 return m_getpackets_internal(&n, num_with_pkthdrs, how, 0,
5708 m_maxsize(MC_CL));
5709 }
5710
5711 /*
5712 * Return a list of mbuf hdrs set up as packet hdrs chained together
5713 * on the m_nextpkt field
5714 */
5715 struct mbuf *
5716 m_getpackethdrs(int num_needed, int how)
5717 {
5718 mbuf_ref_t m, *np, top;
5719
5720 top = NULL;
5721 np = ⊤
5722
5723 while (num_needed--) {
5724 m = _M_RETRYHDR(how, MT_DATA);
5725 if (m == NULL) {
5726 break;
5727 }
5728
5729 *np = m;
5730 np = &m->m_nextpkt;
5731 }
5732
5733 return top;
5734 }
5735
5736 /*
5737 * Free an mbuf list (m_nextpkt) while following m_next. Returns the count
5738 * for mbufs packets freed. Used by the drivers.
5739 */
5740 int
5741 m_freem_list(struct mbuf *m)
5742 {
5743 struct mbuf *nextpkt;
5744 #if CONFIG_MBUF_MCACHE
5745 mcache_obj_t *mp_list = NULL;
5746 mcache_obj_t *mcl_list = NULL;
5747 mcache_obj_t *mbc_list = NULL;
5748 mcache_obj_t *m16k_list = NULL;
5749 mcache_obj_t *m_mcl_list = NULL;
5750 mcache_obj_t *m_mbc_list = NULL;
5751 mcache_obj_t *m_m16k_list = NULL;
5752 mcache_obj_t *ref_list = NULL;
5753 #else
5754 zstack_t mp_list = {}, mcl_list = {}, mbc_list = {},
5755 m16k_list = {}, m_mcl_list = {},
5756 m_mbc_list = {}, m_m16k_list = {}, ref_list = {};
5757 #endif /* CONFIG_MBUF_MCACHE */
5758 int pktcount = 0;
5759 int mt_free = 0, mt_data = 0, mt_header = 0, mt_soname = 0, mt_tag = 0;
5760
5761 while (m != NULL) {
5762 pktcount++;
5763
5764 nextpkt = m->m_nextpkt;
5765 m->m_nextpkt = NULL;
5766
5767 while (m != NULL) {
5768 struct mbuf *next = m->m_next;
5769 #if CONFIG_MBUF_MCACHE
5770 mcache_obj_t *o, *rfa;
5771 #else
5772 void *cl = NULL;
5773 #endif /* CONFIG_MBUF_MCACHE */
5774 if (m->m_type == MT_FREE) {
5775 panic("m_free: freeing an already freed mbuf");
5776 }
5777
5778 if (m->m_flags & M_PKTHDR) {
5779 /* Free the aux data and tags if there is any */
5780 m_tag_delete_chain(m);
5781 m_do_tx_compl_callback(m, NULL);
5782 }
5783
5784 if (!(m->m_flags & M_EXT)) {
5785 mt_free++;
5786 goto simple_free;
5787 }
5788
5789 if (MBUF_IS_PAIRED(m) && m_free_paired(m)) {
5790 m = next;
5791 continue;
5792 }
5793
5794 mt_free++;
5795
5796 #if CONFIG_MBUF_MCACHE
5797 o = (mcache_obj_t *)(void *)m->m_ext.ext_buf;
5798 #else
5799 cl = m->m_ext.ext_buf;
5800 #endif /* CONFIG_MBUF_MCACHE */
5801 /*
5802 * Make sure that we don't touch any ext_ref
5803 * member after we decrement the reference count
5804 * since that may lead to use-after-free
5805 * when we do not hold the last reference.
5806 */
5807 const bool composite = !!(MEXT_FLAGS(m) & EXTF_COMPOSITE);
5808 const m_ext_free_func_t m_free_func = m_get_ext_free(m);
5809 const uint16_t minref = MEXT_MINREF(m);
5810 const uint16_t refcnt = m_decref(m);
5811 if (refcnt == minref && !composite) {
5812 #if CONFIG_MBUF_MCACHE
5813 if (m_free_func == NULL) {
5814 o->obj_next = mcl_list;
5815 mcl_list = o;
5816 } else if (m_free_func == m_bigfree) {
5817 o->obj_next = mbc_list;
5818 mbc_list = o;
5819 } else if (m_free_func == m_16kfree) {
5820 o->obj_next = m16k_list;
5821 m16k_list = o;
5822 } else {
5823 (*(m_free_func))((caddr_t)o,
5824 m->m_ext.ext_size,
5825 m_get_ext_arg(m));
5826 }
5827 rfa = (mcache_obj_t *)(void *)m_get_rfa(m);
5828 rfa->obj_next = ref_list;
5829 ref_list = rfa;
5830 #else
5831 if (m_free_func == NULL) {
5832 zstack_push(&mcl_list, cl);
5833 } else if (m_free_func == m_bigfree) {
5834 zstack_push(&mbc_list, cl);
5835 } else if (m_free_func == m_16kfree) {
5836 zstack_push(&m16k_list, cl);
5837 } else {
5838 (*(m_free_func))((caddr_t)cl,
5839 m->m_ext.ext_size,
5840 m_get_ext_arg(m));
5841 }
5842 zstack_push(&ref_list, m_get_rfa(m));
5843 #endif /* CONFIG_MBUF_MCACHE */
5844 m_set_ext(m, NULL, NULL, NULL);
5845 } else if (refcnt == minref && composite) {
5846 VERIFY(!(MEXT_FLAGS(m) & EXTF_PAIRED));
5847 /*
5848 * Amortize the costs of atomic operations
5849 * by doing them at the end, if possible.
5850 */
5851 if (m->m_type == MT_DATA) {
5852 mt_data++;
5853 } else if (m->m_type == MT_HEADER) {
5854 mt_header++;
5855 } else if (m->m_type == MT_SONAME) {
5856 mt_soname++;
5857 } else if (m->m_type == MT_TAG) {
5858 mt_tag++;
5859 } else {
5860 mtype_stat_dec(m->m_type);
5861 }
5862
5863 m->m_type = MT_FREE;
5864 m->m_flags = M_EXT;
5865 m->m_len = 0;
5866 m->m_next = m->m_nextpkt = NULL;
5867
5868 /*
5869 * MEXT_FLAGS is safe to access here
5870 * since we are now sure that we held
5871 * the last reference to ext_ref.
5872 */
5873 MEXT_FLAGS(m) &= ~EXTF_READONLY;
5874
5875 /* "Free" into the intermediate cache */
5876 #if CONFIG_MBUF_MCACHE
5877 o = (mcache_obj_t *)m;
5878 if (m_free_func == NULL) {
5879 o->obj_next = m_mcl_list;
5880 m_mcl_list = o;
5881 } else if (m_free_func == m_bigfree) {
5882 o->obj_next = m_mbc_list;
5883 m_mbc_list = o;
5884 } else {
5885 VERIFY(m_free_func == m_16kfree);
5886 o->obj_next = m_m16k_list;
5887 m_m16k_list = o;
5888 }
5889 #else
5890 if (m_free_func == NULL) {
5891 zstack_push(&m_mcl_list, m);
5892 } else if (m_free_func == m_bigfree) {
5893 zstack_push(&m_mbc_list, m);
5894 } else {
5895 VERIFY(m_free_func == m_16kfree);
5896 zstack_push(&m_m16k_list, m);
5897 }
5898 #endif /* CONFIG_MBUF_MCACHE */
5899 m = next;
5900 continue;
5901 }
5902 simple_free:
5903 /*
5904 * Amortize the costs of atomic operations
5905 * by doing them at the end, if possible.
5906 */
5907 if (m->m_type == MT_DATA) {
5908 mt_data++;
5909 } else if (m->m_type == MT_HEADER) {
5910 mt_header++;
5911 } else if (m->m_type == MT_SONAME) {
5912 mt_soname++;
5913 } else if (m->m_type == MT_TAG) {
5914 mt_tag++;
5915 } else if (m->m_type != MT_FREE) {
5916 mtype_stat_dec(m->m_type);
5917 }
5918
5919 m->m_type = MT_FREE;
5920 m->m_flags = m->m_len = 0;
5921 m->m_next = m->m_nextpkt = NULL;
5922
5923 #if CONFIG_MBUF_MCACHE
5924 ((mcache_obj_t *)m)->obj_next = mp_list;
5925 mp_list = (mcache_obj_t *)m;
5926 #else
5927 m_elide(m);
5928 zstack_push(&mp_list, m);
5929 #endif /* CONFIG_MBUF_MCACHE */
5930
5931 m = next;
5932 }
5933
5934 m = nextpkt;
5935 }
5936
5937 if (mt_free > 0) {
5938 mtype_stat_add(MT_FREE, mt_free);
5939 }
5940 if (mt_data > 0) {
5941 mtype_stat_sub(MT_DATA, mt_data);
5942 }
5943 if (mt_header > 0) {
5944 mtype_stat_sub(MT_HEADER, mt_header);
5945 }
5946 if (mt_soname > 0) {
5947 mtype_stat_sub(MT_SONAME, mt_soname);
5948 }
5949 if (mt_tag > 0) {
5950 mtype_stat_sub(MT_TAG, mt_tag);
5951 }
5952 #if CONFIG_MBUF_MCACHE
5953 if (mp_list != NULL) {
5954 mcache_free_ext(m_cache(MC_MBUF), mp_list);
5955 }
5956 if (mcl_list != NULL) {
5957 mcache_free_ext(m_cache(MC_CL), mcl_list);
5958 }
5959 if (mbc_list != NULL) {
5960 mcache_free_ext(m_cache(MC_BIGCL), mbc_list);
5961 }
5962 if (m16k_list != NULL) {
5963 mcache_free_ext(m_cache(MC_16KCL), m16k_list);
5964 }
5965 if (m_mcl_list != NULL) {
5966 mcache_free_ext(m_cache(MC_MBUF_CL), m_mcl_list);
5967 }
5968 if (m_mbc_list != NULL) {
5969 mcache_free_ext(m_cache(MC_MBUF_BIGCL), m_mbc_list);
5970 }
5971 if (m_m16k_list != NULL) {
5972 mcache_free_ext(m_cache(MC_MBUF_16KCL), m_m16k_list);
5973 }
5974 if (ref_list != NULL) {
5975 mcache_free_ext(ref_cache, ref_list);
5976 }
5977 #else
5978 if (!zstack_empty(mp_list)) {
5979 /* mbufs elided above. */
5980 mz_free_n(mp_list);
5981 }
5982 if (!zstack_empty(mcl_list)) {
5983 zfree_nozero_n(ZONE_ID_CLUSTER_2K, mcl_list);
5984 }
5985 if (!zstack_empty(mbc_list)) {
5986 zfree_nozero_n(ZONE_ID_CLUSTER_4K, mbc_list);
5987 }
5988 if (!zstack_empty(m16k_list)) {
5989 zfree_nozero_n(ZONE_ID_CLUSTER_16K, m16k_list);
5990 }
5991 if (!zstack_empty(m_mcl_list)) {
5992 mz_composite_free_n(MC_MBUF_CL, m_mcl_list);
5993 }
5994 if (!zstack_empty(m_mbc_list)) {
5995 mz_composite_free_n(MC_MBUF_BIGCL, m_mbc_list);
5996 }
5997 if (!zstack_empty(m_m16k_list)) {
5998 mz_composite_free_n(MC_MBUF_16KCL, m_m16k_list);
5999 }
6000 if (!zstack_empty(ref_list)) {
6001 zfree_nozero_n(ZONE_ID_MBUF_REF, ref_list);
6002 }
6003 #endif /* CONFIG_MBUF_MCACHE */
6004
6005 return pktcount;
6006 }
6007
6008 /*
6009 * Wrapper around m_freem_list which captures the packet that's going to be
6010 * dropped. If funcname is NULL, that means we do not want to store both
6011 * function name and line number, and only the drop reason will be saved.
6012 * Make sure to pass the direction flag (DROPTAP_FLAG_DIR_OUT,
6013 * DROPTAP_FLAG_DIR_IN), or the packet will not be captured.
6014 */
6015 void
6016 m_drop_list(mbuf_t m, struct ifnet *ifp, uint16_t flags, uint32_t reason, const char *funcname,
6017 uint16_t linenum)
6018 {
6019 struct mbuf *nextpkt;
6020
6021 if (m == NULL) {
6022 return;
6023 }
6024
6025 if (__probable(droptap_total_tap_count == 0)) {
6026 m_freem_list(m);
6027 return;
6028 }
6029
6030 if (flags & DROPTAP_FLAG_DIR_OUT) {
6031 while (m != NULL) {
6032 uint16_t tmp_flags = flags;
6033
6034 nextpkt = m->m_nextpkt;
6035 if (m->m_pkthdr.pkt_hdr == NULL) {
6036 tmp_flags |= DROPTAP_FLAG_L2_MISSING;
6037 }
6038 droptap_output_mbuf(m, reason, funcname, linenum, tmp_flags,
6039 ifp);
6040 m = nextpkt;
6041 }
6042 } else if (flags & DROPTAP_FLAG_DIR_IN) {
6043 while (m != NULL) {
6044 char *frame_header __single;
6045 uint16_t tmp_flags = flags;
6046
6047 nextpkt = m->m_nextpkt;
6048
6049 if ((flags & DROPTAP_FLAG_L2_MISSING) == 0 &&
6050 m->m_pkthdr.pkt_hdr != NULL) {
6051 frame_header = m->m_pkthdr.pkt_hdr;
6052 } else {
6053 frame_header = NULL;
6054 tmp_flags |= DROPTAP_FLAG_L2_MISSING;
6055 }
6056
6057 droptap_input_mbuf(m, reason, funcname, linenum, tmp_flags,
6058 m->m_pkthdr.rcvif, frame_header);
6059 m = nextpkt;
6060 }
6061 }
6062 m_freem_list(m);
6063 }
6064
6065 void
6066 m_freem(struct mbuf *m)
6067 {
6068 while (m != NULL) {
6069 m = m_free(m);
6070 }
6071 }
6072
6073 /*
6074 * Wrapper around m_freem which captures the packet that's going to be dropped.
6075 * If funcname is NULL, that means we do not want to store both function name
6076 * and line number, and only the drop reason will be saved. Make sure to pass the
6077 * direction flag (DROPTAP_FLAG_DIR_OUT, DROPTAP_FLAG_DIR_IN), or the packet will
6078 * not be captured.
6079 */
6080 static void
6081 m_drop_common(mbuf_t m, struct ifnet *ifp, uint16_t flags, uint32_t reason, const char *funcname,
6082 uint16_t linenum)
6083 {
6084 if (flags & DROPTAP_FLAG_DIR_OUT) {
6085 droptap_output_mbuf(m, reason, funcname, linenum, flags, ifp);
6086 } else if (flags & DROPTAP_FLAG_DIR_IN) {
6087 char *frame_header __single;
6088
6089 if ((flags & DROPTAP_FLAG_L2_MISSING) == 0 &&
6090 m->m_pkthdr.pkt_hdr != NULL) {
6091 frame_header = m->m_pkthdr.pkt_hdr;
6092 } else {
6093 frame_header = NULL;
6094 flags |= DROPTAP_FLAG_L2_MISSING;
6095 }
6096
6097 droptap_input_mbuf(m, reason, funcname, linenum, flags, ifp,
6098 frame_header);
6099 }
6100 m_freem(m);
6101 }
6102
6103 void
6104 m_drop(mbuf_t m, uint16_t flags, uint32_t reason, const char *funcname,
6105 uint16_t linenum)
6106 {
6107 if (m == NULL) {
6108 return;
6109 }
6110
6111 if (__probable(droptap_total_tap_count == 0)) {
6112 m_freem(m);
6113 return;
6114 }
6115
6116 if (flags & DROPTAP_FLAG_DIR_OUT) {
6117 m_drop_common(m, NULL, flags, reason, funcname, linenum);
6118 } else if (flags & DROPTAP_FLAG_DIR_IN) {
6119 m_drop_common(m, m->m_pkthdr.rcvif, flags, reason, funcname, linenum);
6120 }
6121 }
6122
6123 void
6124 m_drop_if(mbuf_t m, struct ifnet *ifp, uint16_t flags, uint32_t reason, const char *funcname,
6125 uint16_t linenum)
6126 {
6127 if (m == NULL) {
6128 return;
6129 }
6130
6131 if (__probable(droptap_total_tap_count == 0)) {
6132 m_freem(m);
6133 return;
6134 }
6135
6136 m_drop_common(m, ifp, flags, reason, funcname, linenum);
6137 }
6138
6139 /*
6140 * Mbuffer utility routines.
6141 */
6142 /*
6143 * Set the m_data pointer of a newly allocated mbuf to place an object of the
6144 * specified size at the end of the mbuf, longword aligned.
6145 *
6146 * NB: Historically, we had M_ALIGN(), MH_ALIGN(), and MEXT_ALIGN() as
6147 * separate macros, each asserting that it was called at the proper moment.
6148 * This required callers to themselves test the storage type and call the
6149 * right one. Rather than require callers to be aware of those layout
6150 * decisions, we centralize here.
6151 */
6152 void
6153 m_align(struct mbuf *m, int len)
6154 {
6155 int adjust = 0;
6156
6157 /* At this point data must point to start */
6158 VERIFY(m->m_data == (uintptr_t)M_START(m));
6159 VERIFY(len >= 0);
6160 VERIFY(len <= M_SIZE(m));
6161 adjust = M_SIZE(m) - len;
6162 m->m_data += adjust & ~(sizeof(long) - 1);
6163 }
6164
6165 /*
6166 * Lesser-used path for M_PREPEND: allocate new mbuf to prepend to chain,
6167 * copy junk along. Does not adjust packet header length.
6168 */
6169 struct mbuf *
6170 m_prepend(struct mbuf *m, int len, int how)
6171 {
6172 struct mbuf *mn;
6173
6174 _MGET(mn, how, m->m_type);
6175 if (mn == NULL) {
6176 m_freem(m);
6177 return NULL;
6178 }
6179 if (m->m_flags & M_PKTHDR) {
6180 M_COPY_PKTHDR(mn, m);
6181 m->m_flags &= ~M_PKTHDR;
6182 }
6183 mn->m_next = m;
6184 m = mn;
6185 if (m->m_flags & M_PKTHDR) {
6186 VERIFY(len <= MHLEN);
6187 MH_ALIGN(m, len);
6188 } else {
6189 VERIFY(len <= MLEN);
6190 M_ALIGN(m, len);
6191 }
6192 m->m_len = len;
6193 return m;
6194 }
6195
6196 /*
6197 * Replacement for old M_PREPEND macro: allocate new mbuf to prepend to
6198 * chain, copy junk along, and adjust length.
6199 */
6200 struct mbuf *
6201 m_prepend_2(struct mbuf *m, int len, int how, int align)
6202 {
6203 if (M_LEADINGSPACE(m) >= len &&
6204 (!align || IS_P2ALIGNED((m->m_data - len), sizeof(u_int32_t)))) {
6205 m->m_data -= len;
6206 m->m_len += len;
6207 } else {
6208 m = m_prepend(m, len, how);
6209 }
6210 if ((m) && (m->m_flags & M_PKTHDR)) {
6211 m->m_pkthdr.len += len;
6212 }
6213 return m;
6214 }
6215
6216 /*
6217 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
6218 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
6219 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
6220 *
6221 * The last mbuf and offset accessed are passed in and adjusted on return to
6222 * avoid having to iterate over the entire mbuf chain each time.
6223 */
6224 struct mbuf *
6225 m_copym_mode(struct mbuf *m, int off0, int len0, int wait,
6226 struct mbuf **m_lastm, int *m_off, uint32_t mode)
6227 {
6228 mbuf_ref_t n, mhdr = NULL, *np, top;
6229 int off = off0, len = len0;
6230 int copyhdr = 0;
6231
6232 if (off < 0 || len < 0) {
6233 panic("m_copym: invalid offset %d or len %d", off, len);
6234 }
6235
6236 VERIFY((mode != M_COPYM_MUST_COPY_HDR &&
6237 mode != M_COPYM_MUST_MOVE_HDR) || (m->m_flags & M_PKTHDR));
6238
6239 if ((off == 0 && (m->m_flags & M_PKTHDR)) ||
6240 mode == M_COPYM_MUST_COPY_HDR || mode == M_COPYM_MUST_MOVE_HDR) {
6241 mhdr = m;
6242 copyhdr = 1;
6243 }
6244
6245 if (m_lastm != NULL && *m_lastm != NULL) {
6246 if (off0 >= *m_off) {
6247 m = *m_lastm;
6248 off = off0 - *m_off;
6249 }
6250 }
6251
6252 while (off >= m->m_len) {
6253 off -= m->m_len;
6254 m = m->m_next;
6255 }
6256 np = ⊤
6257 top = NULL;
6258
6259 while (len > 0) {
6260 if (m == NULL) {
6261 if (len != M_COPYALL) {
6262 panic("m_copym: len != M_COPYALL");
6263 }
6264 break;
6265 }
6266
6267 if (copyhdr) {
6268 n = _M_RETRYHDR(wait, m->m_type);
6269 } else {
6270 n = _M_RETRY(wait, m->m_type);
6271 }
6272 *np = n;
6273
6274 if (n == NULL) {
6275 goto nospace;
6276 }
6277
6278 if (copyhdr != 0) {
6279 if ((mode == M_COPYM_MOVE_HDR) ||
6280 (mode == M_COPYM_MUST_MOVE_HDR)) {
6281 M_COPY_PKTHDR(n, mhdr);
6282 } else if ((mode == M_COPYM_COPY_HDR) ||
6283 (mode == M_COPYM_MUST_COPY_HDR)) {
6284 if (m_dup_pkthdr(n, mhdr, wait) == 0) {
6285 goto nospace;
6286 }
6287 }
6288 if (len == M_COPYALL) {
6289 n->m_pkthdr.len -= off0;
6290 } else {
6291 n->m_pkthdr.len = len;
6292 }
6293 copyhdr = 0;
6294 /*
6295 * There is data to copy from the packet header mbuf
6296 * if it is empty or it is before the starting offset
6297 */
6298 if (mhdr != m) {
6299 np = &n->m_next;
6300 continue;
6301 }
6302 }
6303 n->m_len = MIN(len, (m->m_len - off));
6304 if (m->m_flags & M_EXT) {
6305 n->m_ext = m->m_ext;
6306 m_incref(m);
6307 n->m_data = m->m_data + off;
6308 n->m_flags |= M_EXT;
6309 } else {
6310 /*
6311 * Limit to the capacity of the destination
6312 */
6313 n->m_len = MIN(n->m_len, M_SIZE(n));
6314
6315 if (m_mtod_end(n) > m_mtod_upper_bound(n)) {
6316 panic("%s n %p copy overflow",
6317 __func__, n);
6318 }
6319
6320 bcopy(mtod(m, caddr_t) + off, mtod(n, caddr_t),
6321 (unsigned)n->m_len);
6322 }
6323 if (len != M_COPYALL) {
6324 len -= n->m_len;
6325 }
6326
6327 if (len == 0) {
6328 if (m_lastm != NULL) {
6329 *m_lastm = m;
6330 *m_off = off0 + len0 - (off + n->m_len);
6331 }
6332 }
6333 off = 0;
6334 m = m->m_next;
6335 np = &n->m_next;
6336 }
6337
6338 return top;
6339 nospace:
6340 m_freem(top);
6341
6342 return NULL;
6343 }
6344
6345
6346 struct mbuf *
6347 m_copym(struct mbuf *m, int off0, int len, int wait)
6348 {
6349 return m_copym_mode(m, off0, len, wait, NULL, NULL, M_COPYM_MOVE_HDR);
6350 }
6351
6352 /*
6353 * Equivalent to m_copym except that all necessary mbuf hdrs are allocated
6354 * within this routine also.
6355 *
6356 * The last mbuf and offset accessed are passed in and adjusted on return to
6357 * avoid having to iterate over the entire mbuf chain each time.
6358 */
6359 struct mbuf *
6360 m_copym_with_hdrs(struct mbuf *m0, int off0, int len0, int wait,
6361 struct mbuf **m_lastm, int *m_off, uint32_t mode)
6362 {
6363 mbuf_ref_t m = m0, n, *np = NULL, top = NULL;
6364 int off = off0, len = len0;
6365 #if CONFIG_MBUF_MCACHE
6366 int mcflags = MSLEEPF(wait);
6367 mcache_obj_t *list = NULL;
6368 #else
6369 zstack_t list = {};
6370 #endif /* CONFIG_MBUF_MCACHE */
6371 int copyhdr = 0;
6372 int type = 0;
6373 int needed = 0;
6374
6375 if (off == 0 && (m->m_flags & M_PKTHDR)) {
6376 copyhdr = 1;
6377 }
6378
6379 if (m_lastm != NULL && *m_lastm != NULL) {
6380 if (off0 >= *m_off) {
6381 m = *m_lastm;
6382 off = off0 - *m_off;
6383 }
6384 }
6385
6386 while (off >= m->m_len) {
6387 off -= m->m_len;
6388 m = m->m_next;
6389 }
6390
6391 n = m;
6392 while (len > 0) {
6393 needed++;
6394 len -= MIN(len, (n->m_len - ((needed == 1) ? off : 0)));
6395 n = n->m_next;
6396 }
6397 needed++;
6398 len = len0;
6399
6400 #if CONFIG_MBUF_MCACHE
6401 /*
6402 * If the caller doesn't want to be put to sleep, mark it with
6403 * MCR_TRYHARD so that we may reclaim buffers from other places
6404 * before giving up.
6405 */
6406 if (mcflags & MCR_NOSLEEP) {
6407 mcflags |= MCR_TRYHARD;
6408 }
6409
6410 if (mcache_alloc_ext(m_cache(MC_MBUF), &list, needed,
6411 mcflags) != needed) {
6412 goto nospace;
6413 }
6414 #else
6415 list = mz_alloc_n(needed, wait);
6416 if (zstack_count(list) != needed) {
6417 goto nospace;
6418 }
6419 #endif /* CONFIG_MBUF_MCACHE */
6420
6421 needed = 0;
6422 while (len > 0) {
6423 #if CONFIG_MBUF_MCACHE
6424 n = (struct mbuf *)list;
6425 list = list->obj_next;
6426 #else
6427 n = zstack_pop(&list);
6428 #endif /* CONFIG_MBUF_MCACHE */
6429 ASSERT(n != NULL && m != NULL);
6430
6431 type = (top == NULL) ? MT_HEADER : m->m_type;
6432 MBUF_INIT(n, (top == NULL), type);
6433
6434 if (top == NULL) {
6435 top = n;
6436 np = &top->m_next;
6437 continue;
6438 } else {
6439 needed++;
6440 *np = n;
6441 }
6442
6443 if (copyhdr) {
6444 if ((mode == M_COPYM_MOVE_HDR) ||
6445 (mode == M_COPYM_MUST_MOVE_HDR)) {
6446 M_COPY_PKTHDR(n, m);
6447 } else if ((mode == M_COPYM_COPY_HDR) ||
6448 (mode == M_COPYM_MUST_COPY_HDR)) {
6449 if (m_dup_pkthdr(n, m, wait) == 0) {
6450 #if !CONFIG_MBUF_MCACHE
6451 m_elide(n);
6452 #endif
6453 goto nospace;
6454 }
6455 }
6456 n->m_pkthdr.len = len;
6457 copyhdr = 0;
6458 }
6459 n->m_len = MIN(len, (m->m_len - off));
6460
6461 if (m->m_flags & M_EXT) {
6462 n->m_ext = m->m_ext;
6463 m_incref(m);
6464 n->m_data = m->m_data + off;
6465 n->m_flags |= M_EXT;
6466 } else {
6467 if (m_mtod_end(n) > m_mtod_upper_bound(n)) {
6468 panic("%s n %p copy overflow",
6469 __func__, n);
6470 }
6471
6472 bcopy(mtod(m, caddr_t) + off, mtod(n, caddr_t),
6473 (unsigned)n->m_len);
6474 }
6475 len -= n->m_len;
6476
6477 if (len == 0) {
6478 if (m_lastm != NULL) {
6479 *m_lastm = m;
6480 *m_off = off0 + len0 - (off + n->m_len);
6481 }
6482 break;
6483 }
6484 off = 0;
6485 m = m->m_next;
6486 np = &n->m_next;
6487 }
6488
6489 mtype_stat_inc(MT_HEADER);
6490 mtype_stat_add(type, needed);
6491 mtype_stat_sub(MT_FREE, needed + 1);
6492
6493 #if CONFIG_MBUF_MCACHE
6494 ASSERT(list == NULL);
6495 #else
6496 ASSERT(zstack_empty(list));
6497 #endif /* CONFIG_MBUF_MCACHE */
6498
6499 return top;
6500
6501 nospace:
6502 #if CONFIG_MBUF_MCACHE
6503 if (list != NULL) {
6504 mcache_free_ext(m_cache(MC_MBUF), list);
6505 }
6506 #else
6507 if (!zstack_empty(list)) {
6508 /* No need to elide, these mbufs came from the cache. */
6509 mz_free_n(list);
6510 }
6511 #endif /* CONFIG_MBUF_MCACHE */
6512 if (top != NULL) {
6513 m_freem(top);
6514 }
6515 return NULL;
6516 }
6517
6518 /*
6519 * Copy data from an mbuf chain starting "off" bytes from the beginning,
6520 * continuing for "len" bytes, into the indicated buffer.
6521 */
6522 void
6523 m_copydata(struct mbuf *m, int off, int len0, void *vp __sized_by(len0))
6524 {
6525 int off0 = off, len = len0;
6526 struct mbuf *m0 = m;
6527 unsigned count;
6528 char *cp = vp;
6529
6530 if (__improbable(off < 0 || len < 0)) {
6531 panic("%s: invalid offset %d or len %d", __func__, off, len);
6532 /* NOTREACHED */
6533 }
6534
6535 while (off > 0) {
6536 if (__improbable(m == NULL)) {
6537 panic("%s: invalid mbuf chain %p [off %d, len %d]",
6538 __func__, m0, off0, len0);
6539 /* NOTREACHED */
6540 }
6541 if (off < m->m_len) {
6542 break;
6543 }
6544 off -= m->m_len;
6545 m = m->m_next;
6546 }
6547 while (len > 0) {
6548 if (__improbable(m == NULL)) {
6549 panic("%s: invalid mbuf chain %p [off %d, len %d]",
6550 __func__, m0, off0, len0);
6551 /* NOTREACHED */
6552 }
6553 count = MIN(m->m_len - off, len);
6554 bcopy(mtod(m, caddr_t) + off, cp, count);
6555 len -= count;
6556 cp += count;
6557 off = 0;
6558 m = m->m_next;
6559 }
6560 }
6561
6562 /*
6563 * Concatenate mbuf chain n to m. Both chains must be of the same type
6564 * (e.g. MT_DATA). Any m_pkthdr is not updated.
6565 */
6566 void
6567 m_cat(struct mbuf *m, struct mbuf *n)
6568 {
6569 while (m->m_next) {
6570 m = m->m_next;
6571 }
6572 while (n) {
6573 if ((m->m_flags & M_EXT) ||
6574 m->m_data + m->m_len + n->m_len >= (uintptr_t)&m->m_dat[MLEN]) {
6575 /* just join the two chains */
6576 m->m_next = n;
6577 return;
6578 }
6579 /* splat the data from one into the other */
6580 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
6581 (u_int)n->m_len);
6582 m->m_len += n->m_len;
6583 n = m_free(n);
6584 }
6585 }
6586
6587 void
6588 m_adj(struct mbuf *mp, int req_len)
6589 {
6590 int len = req_len;
6591 struct mbuf *m;
6592 int count;
6593
6594 if ((m = mp) == NULL) {
6595 return;
6596 }
6597 if (len >= 0) {
6598 /*
6599 * Trim from head.
6600 */
6601 while (m != NULL && len > 0) {
6602 if (m->m_len <= len) {
6603 len -= m->m_len;
6604 m->m_len = 0;
6605 m = m->m_next;
6606 } else {
6607 m->m_len -= len;
6608 m->m_data += len;
6609 len = 0;
6610 }
6611 }
6612 m = mp;
6613 if (m->m_flags & M_PKTHDR) {
6614 m->m_pkthdr.len -= (req_len - len);
6615 }
6616 } else {
6617 /*
6618 * Trim from tail. Scan the mbuf chain,
6619 * calculating its length and finding the last mbuf.
6620 * If the adjustment only affects this mbuf, then just
6621 * adjust and return. Otherwise, rescan and truncate
6622 * after the remaining size.
6623 */
6624 len = -len;
6625 count = 0;
6626 for (;;) {
6627 count += m->m_len;
6628 if (m->m_next == NULL) {
6629 break;
6630 }
6631 m = m->m_next;
6632 }
6633 if (m->m_len >= len) {
6634 m->m_len -= len;
6635 m = mp;
6636 if (m->m_flags & M_PKTHDR) {
6637 m->m_pkthdr.len -= len;
6638 }
6639 return;
6640 }
6641 count -= len;
6642 if (count < 0) {
6643 count = 0;
6644 }
6645 /*
6646 * Correct length for chain is "count".
6647 * Find the mbuf with last data, adjust its length,
6648 * and toss data from remaining mbufs on chain.
6649 */
6650 m = mp;
6651 if (m->m_flags & M_PKTHDR) {
6652 m->m_pkthdr.len = count;
6653 }
6654 for (; m; m = m->m_next) {
6655 if (m->m_len >= count) {
6656 m->m_len = count;
6657 break;
6658 }
6659 count -= m->m_len;
6660 }
6661 while ((m = m->m_next)) {
6662 m->m_len = 0;
6663 }
6664 }
6665 }
6666
6667 /*
6668 * Rearange an mbuf chain so that len bytes are contiguous
6669 * and in the data area of an mbuf (so that mtod
6670 * will work for a structure of size len). Returns the resulting
6671 * mbuf chain on success, frees it and returns null on failure.
6672 * If there is room, it will add up to max_protohdr-len extra bytes to the
6673 * contiguous region in an attempt to avoid being called next time.
6674 */
6675 struct mbuf *
6676 m_pullup(struct mbuf *n, int len)
6677 {
6678 struct mbuf *m;
6679 int count;
6680 int space;
6681
6682 /* check invalid arguments */
6683 if (n == NULL) {
6684 panic("%s: n == NULL", __func__);
6685 }
6686 if (len < 0) {
6687 os_log_info(OS_LOG_DEFAULT, "%s: failed negative len %d",
6688 __func__, len);
6689 goto bad;
6690 }
6691 if (len > MLEN) {
6692 os_log_info(OS_LOG_DEFAULT, "%s: failed len %d too big",
6693 __func__, len);
6694 goto bad;
6695 }
6696 if ((n->m_flags & M_EXT) == 0 &&
6697 m_mtod_current(n) >= m_mtod_upper_bound(n)) {
6698 os_log_info(OS_LOG_DEFAULT, "%s: m_data out of bounds",
6699 __func__);
6700 goto bad;
6701 }
6702
6703 /*
6704 * If first mbuf has no cluster, and has room for len bytes
6705 * without shifting current data, pullup into it,
6706 * otherwise allocate a new mbuf to prepend to the chain.
6707 */
6708 if ((n->m_flags & M_EXT) == 0 &&
6709 len < m_mtod_upper_bound(n) - m_mtod_current(n) && n->m_next != NULL) {
6710 if (n->m_len >= len) {
6711 return n;
6712 }
6713 m = n;
6714 n = n->m_next;
6715 len -= m->m_len;
6716 } else {
6717 if (len > MHLEN) {
6718 goto bad;
6719 }
6720 _MGET(m, M_DONTWAIT, n->m_type);
6721 if (m == 0) {
6722 goto bad;
6723 }
6724 m->m_len = 0;
6725 if (n->m_flags & M_PKTHDR) {
6726 M_COPY_PKTHDR(m, n);
6727 n->m_flags &= ~M_PKTHDR;
6728 }
6729 }
6730 space = m_mtod_upper_bound(m) - m_mtod_end(m);
6731 do {
6732 count = MIN(MIN(MAX(len, max_protohdr), space), n->m_len);
6733 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
6734 (unsigned)count);
6735 len -= count;
6736 m->m_len += count;
6737 n->m_len -= count;
6738 space -= count;
6739 if (n->m_len != 0) {
6740 n->m_data += count;
6741 } else {
6742 n = m_free(n);
6743 }
6744 } while (len > 0 && n != NULL);
6745 if (len > 0) {
6746 (void) m_free(m);
6747 goto bad;
6748 }
6749 m->m_next = n;
6750 return m;
6751 bad:
6752 m_freem(n);
6753 return 0;
6754 }
6755
6756 /*
6757 * Like m_pullup(), except a new mbuf is always allocated, and we allow
6758 * the amount of empty space before the data in the new mbuf to be specified
6759 * (in the event that the caller expects to prepend later).
6760 */
6761 __private_extern__ struct mbuf *
6762 m_copyup(struct mbuf *n, int len, int dstoff)
6763 {
6764 struct mbuf *m;
6765 int count, space;
6766
6767 VERIFY(len >= 0 && dstoff >= 0);
6768
6769 if (len > (MHLEN - dstoff)) {
6770 goto bad;
6771 }
6772 MGET(m, M_DONTWAIT, n->m_type);
6773 if (m == NULL) {
6774 goto bad;
6775 }
6776 m->m_len = 0;
6777 if (n->m_flags & M_PKTHDR) {
6778 m_copy_pkthdr(m, n);
6779 n->m_flags &= ~M_PKTHDR;
6780 }
6781 m->m_data += dstoff;
6782 space = m_mtod_upper_bound(m) - m_mtod_end(m);
6783 do {
6784 count = min(min(max(len, max_protohdr), space), n->m_len);
6785 memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t),
6786 (unsigned)count);
6787 len -= count;
6788 m->m_len += count;
6789 n->m_len -= count;
6790 space -= count;
6791 if (n->m_len) {
6792 n->m_data += count;
6793 } else {
6794 n = m_free(n);
6795 }
6796 } while (len > 0 && n);
6797 if (len > 0) {
6798 (void) m_free(m);
6799 goto bad;
6800 }
6801 m->m_next = n;
6802 return m;
6803 bad:
6804 m_freem(n);
6805
6806 return NULL;
6807 }
6808
6809 /*
6810 * Partition an mbuf chain in two pieces, returning the tail --
6811 * all but the first len0 bytes. In case of failure, it returns NULL and
6812 * attempts to restore the chain to its original state.
6813 */
6814 struct mbuf *
6815 m_split(struct mbuf *m0, int len0, int wait)
6816 {
6817 return m_split0(m0, len0, wait, 1);
6818 }
6819
6820 static struct mbuf *
6821 m_split0(struct mbuf *m0, int len0, int wait, int copyhdr)
6822 {
6823 struct mbuf *m, *n;
6824 unsigned len = len0, remain;
6825
6826 /*
6827 * First iterate to the mbuf which contains the first byte of
6828 * data at offset len0
6829 */
6830 for (m = m0; m && len > m->m_len; m = m->m_next) {
6831 len -= m->m_len;
6832 }
6833 if (m == NULL) {
6834 return NULL;
6835 }
6836 /*
6837 * len effectively is now the offset in the current
6838 * mbuf where we have to perform split.
6839 *
6840 * remain becomes the tail length.
6841 * Note that len can also be == m->m_len
6842 */
6843 remain = m->m_len - len;
6844
6845 /*
6846 * If current mbuf len contains the entire remaining offset len,
6847 * just make the second mbuf chain pointing to next mbuf onwards
6848 * and return after making necessary adjustments
6849 */
6850 if (copyhdr && (m0->m_flags & M_PKTHDR) && remain == 0) {
6851 _MGETHDR(n, wait, m0->m_type);
6852 if (n == NULL) {
6853 return NULL;
6854 }
6855 n->m_next = m->m_next;
6856 m->m_next = NULL;
6857 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
6858 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
6859 m0->m_pkthdr.len = len0;
6860 return n;
6861 }
6862 if (copyhdr && (m0->m_flags & M_PKTHDR)) {
6863 _MGETHDR(n, wait, m0->m_type);
6864 if (n == NULL) {
6865 return NULL;
6866 }
6867 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
6868 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
6869 m0->m_pkthdr.len = len0;
6870
6871 /*
6872 * If current points to external storage
6873 * then it can be shared by making last mbuf
6874 * of head chain and first mbuf of current chain
6875 * pointing to different data offsets
6876 */
6877 if (m->m_flags & M_EXT) {
6878 goto extpacket;
6879 }
6880 if (remain > MHLEN) {
6881 /* m can't be the lead packet */
6882 MH_ALIGN(n, 0);
6883 n->m_next = m_split(m, len, wait);
6884 if (n->m_next == NULL) {
6885 (void) m_free(n);
6886 return NULL;
6887 } else {
6888 return n;
6889 }
6890 } else {
6891 MH_ALIGN(n, remain);
6892 }
6893 } else if (remain == 0) {
6894 n = m->m_next;
6895 m->m_next = NULL;
6896 return n;
6897 } else {
6898 _MGET(n, wait, m->m_type);
6899 if (n == NULL) {
6900 return NULL;
6901 }
6902
6903 if ((m->m_flags & M_EXT) == 0) {
6904 VERIFY(remain <= MLEN);
6905 M_ALIGN(n, remain);
6906 }
6907 }
6908 extpacket:
6909 if (m->m_flags & M_EXT) {
6910 n->m_flags |= M_EXT;
6911 n->m_ext = m->m_ext;
6912 m_incref(m);
6913 n->m_data = m->m_data + len;
6914 } else {
6915 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
6916 }
6917 n->m_len = remain;
6918 m->m_len = len;
6919 n->m_next = m->m_next;
6920 m->m_next = NULL;
6921 return n;
6922 }
6923
6924
6925 #if CONFIG_MBUF_MCACHE
6926 #ifndef MBUF_GROWTH_NORMAL_THRESH
6927 #define MBUF_GROWTH_NORMAL_THRESH 25
6928 #endif
6929
6930 /*
6931 * Cluster freelist allocation check.
6932 */
6933 static int
6934 m_howmany(int num, size_t bufsize)
6935 {
6936 int i = 0, j = 0;
6937 u_int32_t m_mbclusters, m_clusters, m_bigclusters, m_16kclusters;
6938 u_int32_t m_mbfree, m_clfree, m_bigclfree, m_16kclfree;
6939 u_int32_t sumclusters, freeclusters;
6940 u_int32_t percent_pool, percent_kmem;
6941 u_int32_t mb_growth, mb_growth_thresh;
6942
6943 VERIFY(bufsize == m_maxsize(MC_BIGCL) ||
6944 bufsize == m_maxsize(MC_16KCL));
6945
6946 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
6947
6948 /* Numbers in 2K cluster units */
6949 m_mbclusters = m_total(MC_MBUF) >> NMBPCLSHIFT;
6950 m_clusters = m_total(MC_CL);
6951 m_bigclusters = m_total(MC_BIGCL) << NCLPBGSHIFT;
6952 m_16kclusters = m_total(MC_16KCL);
6953 sumclusters = m_mbclusters + m_clusters + m_bigclusters;
6954
6955 m_mbfree = m_infree(MC_MBUF) >> NMBPCLSHIFT;
6956 m_clfree = m_infree(MC_CL);
6957 m_bigclfree = m_infree(MC_BIGCL) << NCLPBGSHIFT;
6958 m_16kclfree = m_infree(MC_16KCL);
6959 freeclusters = m_mbfree + m_clfree + m_bigclfree;
6960
6961 /* Bail if we've maxed out the mbuf memory map */
6962 if ((bufsize == m_maxsize(MC_BIGCL) && sumclusters >= nclusters) ||
6963 (njcl > 0 && bufsize == m_maxsize(MC_16KCL) &&
6964 (m_16kclusters << NCLPJCLSHIFT) >= njcl)) {
6965 mbwdog_logger("maxed out nclusters (%u >= %u) or njcl (%u >= %u)",
6966 sumclusters, nclusters,
6967 (m_16kclusters << NCLPJCLSHIFT), njcl);
6968 return 0;
6969 }
6970
6971 if (bufsize == m_maxsize(MC_BIGCL)) {
6972 /* Under minimum */
6973 if (m_bigclusters < m_minlimit(MC_BIGCL)) {
6974 return m_minlimit(MC_BIGCL) - m_bigclusters;
6975 }
6976
6977 percent_pool =
6978 ((sumclusters - freeclusters) * 100) / sumclusters;
6979 percent_kmem = (sumclusters * 100) / nclusters;
6980
6981 /*
6982 * If a light/normal user, grow conservatively (75%)
6983 * If a heavy user, grow aggressively (50%)
6984 */
6985 if (percent_kmem < MBUF_GROWTH_NORMAL_THRESH) {
6986 mb_growth = MB_GROWTH_NORMAL;
6987 } else {
6988 mb_growth = MB_GROWTH_AGGRESSIVE;
6989 }
6990
6991 if (percent_kmem < 5) {
6992 /* For initial allocations */
6993 i = num;
6994 } else {
6995 /* Return if >= MBIGCL_LOWAT clusters available */
6996 if (m_infree(MC_BIGCL) >= MBIGCL_LOWAT &&
6997 m_total(MC_BIGCL) >=
6998 MBIGCL_LOWAT + m_minlimit(MC_BIGCL)) {
6999 return 0;
7000 }
7001
7002 /* Ensure at least num clusters are accessible */
7003 if (num >= m_infree(MC_BIGCL)) {
7004 i = num - m_infree(MC_BIGCL);
7005 }
7006 if (num > m_total(MC_BIGCL) - m_minlimit(MC_BIGCL)) {
7007 j = num - (m_total(MC_BIGCL) -
7008 m_minlimit(MC_BIGCL));
7009 }
7010
7011 i = MAX(i, j);
7012
7013 /*
7014 * Grow pool if percent_pool > 75 (normal growth)
7015 * or percent_pool > 50 (aggressive growth).
7016 */
7017 mb_growth_thresh = 100 - (100 / (1 << mb_growth));
7018 if (percent_pool > mb_growth_thresh) {
7019 j = ((sumclusters + num) >> mb_growth) -
7020 freeclusters;
7021 }
7022 i = MAX(i, j);
7023 }
7024
7025 /* Check to ensure we didn't go over limits */
7026 if (i + m_bigclusters >= m_maxlimit(MC_BIGCL)) {
7027 i = m_maxlimit(MC_BIGCL) - m_bigclusters;
7028 }
7029 if ((i << 1) + sumclusters >= nclusters) {
7030 i = (nclusters - sumclusters) >> 1;
7031 }
7032 VERIFY((m_total(MC_BIGCL) + i) <= m_maxlimit(MC_BIGCL));
7033 VERIFY(sumclusters + (i << 1) <= nclusters);
7034 } else { /* 16K CL */
7035 VERIFY(njcl > 0);
7036 /* Ensure at least num clusters are available */
7037 if (num >= m_16kclfree) {
7038 i = num - m_16kclfree;
7039 }
7040
7041 /* Always grow 16KCL pool aggressively */
7042 if (((m_16kclusters + num) >> 1) > m_16kclfree) {
7043 j = ((m_16kclusters + num) >> 1) - m_16kclfree;
7044 }
7045 i = MAX(i, j);
7046
7047 /* Check to ensure we don't go over limit */
7048 if ((i + m_total(MC_16KCL)) >= m_maxlimit(MC_16KCL)) {
7049 i = m_maxlimit(MC_16KCL) - m_total(MC_16KCL);
7050 }
7051 }
7052 return i;
7053 }
7054 #endif /* CONFIG_MBUF_MCACHE */
7055 /*
7056 * Return the number of bytes in the mbuf chain, m.
7057 */
7058 unsigned int
7059 m_length(struct mbuf *m)
7060 {
7061 struct mbuf *m0;
7062 unsigned int pktlen;
7063
7064 if (m->m_flags & M_PKTHDR) {
7065 return m->m_pkthdr.len;
7066 }
7067
7068 pktlen = 0;
7069 for (m0 = m; m0 != NULL; m0 = m0->m_next) {
7070 pktlen += m0->m_len;
7071 }
7072 return pktlen;
7073 }
7074
7075 /*
7076 * Copy data from a buffer back into the indicated mbuf chain,
7077 * starting "off" bytes from the beginning, extending the mbuf
7078 * chain if necessary.
7079 */
7080 void
7081 m_copyback(struct mbuf *m0, int off, int len, const void *cp __sized_by(len))
7082 {
7083 #if DEBUG
7084 struct mbuf *origm = m0;
7085 int error;
7086 #endif /* DEBUG */
7087
7088 if (m0 == NULL) {
7089 return;
7090 }
7091
7092 #if DEBUG
7093 error =
7094 #endif /* DEBUG */
7095 m_copyback0(&m0, off, len, cp,
7096 M_COPYBACK0_COPYBACK | M_COPYBACK0_EXTEND, M_DONTWAIT);
7097
7098 #if DEBUG
7099 if (error != 0 || (m0 != NULL && origm != m0)) {
7100 panic("m_copyback");
7101 }
7102 #endif /* DEBUG */
7103 }
7104
7105 struct mbuf *
7106 m_copyback_cow(struct mbuf *m0, int off, int len, const void *cp __sized_by(len), int how)
7107 {
7108 int error;
7109
7110 /* don't support chain expansion */
7111 VERIFY(off + len <= m_length(m0));
7112
7113 error = m_copyback0(&m0, off, len, cp,
7114 M_COPYBACK0_COPYBACK | M_COPYBACK0_COW, how);
7115 if (error) {
7116 /*
7117 * no way to recover from partial success.
7118 * just free the chain.
7119 */
7120 m_freem(m0);
7121 return NULL;
7122 }
7123 return m0;
7124 }
7125
7126 /*
7127 * m_makewritable: ensure the specified range writable.
7128 */
7129 int
7130 m_makewritable(struct mbuf **mp, int off, int len, int how)
7131 {
7132 int error;
7133 #if DEBUG
7134 struct mbuf *n;
7135 int origlen, reslen;
7136
7137 origlen = m_length(*mp);
7138 #endif /* DEBUG */
7139
7140 #if 0 /* M_COPYALL is large enough */
7141 if (len == M_COPYALL) {
7142 len = m_length(*mp) - off; /* XXX */
7143 }
7144 #endif
7145
7146 error = m_copyback0(mp, off, len, NULL,
7147 M_COPYBACK0_PRESERVE | M_COPYBACK0_COW, how);
7148
7149 #if DEBUG
7150 reslen = 0;
7151 for (n = *mp; n; n = n->m_next) {
7152 reslen += n->m_len;
7153 }
7154 if (origlen != reslen) {
7155 panic("m_makewritable: length changed");
7156 }
7157 if (((*mp)->m_flags & M_PKTHDR) && reslen != (*mp)->m_pkthdr.len) {
7158 panic("m_makewritable: inconsist");
7159 }
7160 #endif /* DEBUG */
7161
7162 return error;
7163 }
7164
7165 static int
7166 m_copyback0(struct mbuf **mp0, int off, int len0, const void *vp __sized_by_or_null(len0), int flags,
7167 int how)
7168 {
7169 int mlen, len = len0, totlen = 0;
7170 mbuf_ref_t m, n, *mp;
7171 const char *cp = vp;
7172
7173 VERIFY(mp0 != NULL);
7174 VERIFY(*mp0 != NULL);
7175 VERIFY((flags & M_COPYBACK0_PRESERVE) == 0 || cp == NULL);
7176 VERIFY((flags & M_COPYBACK0_COPYBACK) == 0 || cp != NULL);
7177
7178 /*
7179 * we don't bother to update "totlen" in the case of M_COPYBACK0_COW,
7180 * assuming that M_COPYBACK0_EXTEND and M_COPYBACK0_COW are exclusive.
7181 */
7182
7183 VERIFY((~flags & (M_COPYBACK0_EXTEND | M_COPYBACK0_COW)) != 0);
7184
7185 mp = mp0;
7186 m = *mp;
7187 while (off > (mlen = m->m_len)) {
7188 off -= mlen;
7189 totlen += mlen;
7190 if (m->m_next == NULL) {
7191 int tspace;
7192 extend:
7193 if (!(flags & M_COPYBACK0_EXTEND)) {
7194 goto out;
7195 }
7196
7197 /*
7198 * try to make some space at the end of "m".
7199 */
7200
7201 mlen = m->m_len;
7202 if (off + len >= MINCLSIZE &&
7203 !(m->m_flags & M_EXT) && m->m_len == 0) {
7204 MCLGET(m, how);
7205 }
7206 tspace = M_TRAILINGSPACE(m);
7207 if (tspace > 0) {
7208 tspace = MIN(tspace, off + len);
7209 VERIFY(tspace > 0);
7210 bzero(mtod(m, char *) + m->m_len,
7211 MIN(off, tspace));
7212 m->m_len += tspace;
7213 off += mlen;
7214 totlen -= mlen;
7215 continue;
7216 }
7217
7218 /*
7219 * need to allocate an mbuf.
7220 */
7221
7222 if (off + len >= MINCLSIZE) {
7223 n = m_getcl(how, m->m_type, 0);
7224 } else {
7225 n = _M_GET(how, m->m_type);
7226 }
7227 if (n == NULL) {
7228 goto out;
7229 }
7230 n->m_len = 0;
7231 n->m_len = MIN(M_TRAILINGSPACE(n), off + len);
7232 bzero(mtod(n, char *), MIN(n->m_len, off));
7233 m->m_next = n;
7234 }
7235 mp = &m->m_next;
7236 m = m->m_next;
7237 }
7238 while (len > 0) {
7239 mlen = m->m_len - off;
7240 if (mlen != 0 && m_mclhasreference(m)) {
7241 char *datap;
7242 int eatlen;
7243
7244 /*
7245 * this mbuf is read-only.
7246 * allocate a new writable mbuf and try again.
7247 */
7248
7249 #if DIAGNOSTIC
7250 if (!(flags & M_COPYBACK0_COW)) {
7251 panic("m_copyback0: read-only");
7252 }
7253 #endif /* DIAGNOSTIC */
7254
7255 /*
7256 * if we're going to write into the middle of
7257 * a mbuf, split it first.
7258 */
7259 if (off > 0 && len < mlen) {
7260 n = m_split0(m, off, how, 0);
7261 if (n == NULL) {
7262 goto enobufs;
7263 }
7264 m->m_next = n;
7265 mp = &m->m_next;
7266 m = n;
7267 off = 0;
7268 continue;
7269 }
7270
7271 /*
7272 * XXX TODO coalesce into the trailingspace of
7273 * the previous mbuf when possible.
7274 */
7275
7276 /*
7277 * allocate a new mbuf. copy packet header if needed.
7278 */
7279 n = _M_GET(how, m->m_type);
7280 if (n == NULL) {
7281 goto enobufs;
7282 }
7283 if (off == 0 && (m->m_flags & M_PKTHDR)) {
7284 M_COPY_PKTHDR(n, m);
7285 n->m_len = MHLEN;
7286 } else {
7287 if (len >= MINCLSIZE) {
7288 MCLGET(n, M_DONTWAIT);
7289 }
7290 n->m_len =
7291 (n->m_flags & M_EXT) ? MCLBYTES : MLEN;
7292 }
7293 if (n->m_len > len) {
7294 n->m_len = len;
7295 }
7296
7297 /*
7298 * free the region which has been overwritten.
7299 * copying data from old mbufs if requested.
7300 */
7301 if (flags & M_COPYBACK0_PRESERVE) {
7302 datap = mtod(n, char *);
7303 } else {
7304 datap = NULL;
7305 }
7306 eatlen = n->m_len;
7307 VERIFY(off == 0 || eatlen >= mlen);
7308 if (off > 0) {
7309 VERIFY(len >= mlen);
7310 m->m_len = off;
7311 m->m_next = n;
7312 if (datap) {
7313 m_copydata(m, off, mlen, datap);
7314 datap += mlen;
7315 }
7316 eatlen -= mlen;
7317 mp = &m->m_next;
7318 m = m->m_next;
7319 }
7320 while (m != NULL && m_mclhasreference(m) &&
7321 n->m_type == m->m_type && eatlen > 0) {
7322 mlen = MIN(eatlen, m->m_len);
7323 if (datap) {
7324 m_copydata(m, 0, mlen, datap);
7325 datap += mlen;
7326 }
7327 m->m_data += mlen;
7328 m->m_len -= mlen;
7329 eatlen -= mlen;
7330 if (m->m_len == 0) {
7331 *mp = m = m_free(m);
7332 }
7333 }
7334 if (eatlen > 0) {
7335 n->m_len -= eatlen;
7336 }
7337 n->m_next = m;
7338 *mp = m = n;
7339 continue;
7340 }
7341 mlen = MIN(mlen, len);
7342 if (flags & M_COPYBACK0_COPYBACK) {
7343 bcopy(cp, mtod(m, caddr_t) + off, (unsigned)mlen);
7344 cp += mlen;
7345 }
7346 len -= mlen;
7347 mlen += off;
7348 off = 0;
7349 totlen += mlen;
7350 if (len == 0) {
7351 break;
7352 }
7353 if (m->m_next == NULL) {
7354 goto extend;
7355 }
7356 mp = &m->m_next;
7357 m = m->m_next;
7358 }
7359 out:
7360 if (((m = *mp0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) {
7361 VERIFY(flags & M_COPYBACK0_EXTEND);
7362 m->m_pkthdr.len = totlen;
7363 }
7364
7365 return 0;
7366
7367 enobufs:
7368 return ENOBUFS;
7369 }
7370
7371 uint64_t
7372 mcl_to_paddr(char *addr)
7373 {
7374 #if CONFIG_MBUF_MCACHE
7375 vm_offset_t base_phys;
7376
7377 if (!MBUF_IN_MAP(addr)) {
7378 return 0;
7379 }
7380 base_phys = mcl_paddr[atop_64(addr - (char *)mbutl)];
7381
7382 if (base_phys == 0) {
7383 return 0;
7384 }
7385 return (uint64_t)(ptoa_64(base_phys) | ((uint64_t)addr & PAGE_MASK));
7386 #else
7387 extern addr64_t kvtophys(vm_offset_t va);
7388
7389 return kvtophys((vm_offset_t)addr);
7390 #endif /* CONFIG_MBUF_MCACHE */
7391 }
7392
7393 /*
7394 * Dup the mbuf chain passed in. The whole thing. No cute additional cruft.
7395 * And really copy the thing. That way, we don't "precompute" checksums
7396 * for unsuspecting consumers. Assumption: m->m_nextpkt == 0. Trick: for
7397 * small packets, don't dup into a cluster. That way received packets
7398 * don't take up too much room in the sockbuf (cf. sbspace()).
7399 */
7400 struct mbuf *
7401 m_dup(struct mbuf *m, int how)
7402 {
7403 mbuf_ref_t n, top, *np;
7404 int copyhdr = 0;
7405
7406 np = ⊤
7407 top = NULL;
7408 if (m->m_flags & M_PKTHDR) {
7409 copyhdr = 1;
7410 }
7411
7412 /*
7413 * Quick check: if we have one mbuf and its data fits in an
7414 * mbuf with packet header, just copy and go.
7415 */
7416 if (m->m_next == NULL) {
7417 /* Then just move the data into an mbuf and be done... */
7418 if (copyhdr) {
7419 if (m->m_pkthdr.len <= MHLEN && m->m_len <= MHLEN) {
7420 if ((n = _M_GETHDR(how, m->m_type)) == NULL) {
7421 return NULL;
7422 }
7423 n->m_len = m->m_len;
7424 m_dup_pkthdr(n, m, how);
7425 bcopy(mtod(m, caddr_t), mtod(n, caddr_t), m->m_len);
7426 return n;
7427 }
7428 } else if (m->m_len <= MLEN) {
7429 if ((n = _M_GET(how, m->m_type)) == NULL) {
7430 return NULL;
7431 }
7432 bcopy(mtod(m, caddr_t), mtod(n, caddr_t), m->m_len);
7433 n->m_len = m->m_len;
7434 return n;
7435 }
7436 }
7437 while (m != NULL) {
7438 #if BLUE_DEBUG
7439 printf("<%x: %x, %x, %x\n", m, m->m_flags, m->m_len,
7440 m->m_data);
7441 #endif
7442 if (copyhdr) {
7443 n = _M_GETHDR(how, m->m_type);
7444 } else {
7445 n = _M_GET(how, m->m_type);
7446 }
7447 if (n == NULL) {
7448 goto nospace;
7449 }
7450 if (m->m_flags & M_EXT) {
7451 if (m->m_len <= m_maxsize(MC_CL)) {
7452 MCLGET(n, how);
7453 } else if (m->m_len <= m_maxsize(MC_BIGCL)) {
7454 n = m_mbigget(n, how);
7455 } else if (m->m_len <= m_maxsize(MC_16KCL) && njcl > 0) {
7456 n = m_m16kget(n, how);
7457 }
7458 if (!(n->m_flags & M_EXT)) {
7459 (void) m_free(n);
7460 goto nospace;
7461 }
7462 } else {
7463 VERIFY((copyhdr == 1 && m->m_len <= MHLEN) ||
7464 (copyhdr == 0 && m->m_len <= MLEN));
7465 }
7466 *np = n;
7467 if (copyhdr) {
7468 /* Don't use M_COPY_PKTHDR: preserve m_data */
7469 m_dup_pkthdr(n, m, how);
7470 copyhdr = 0;
7471 if (!(n->m_flags & M_EXT)) {
7472 n->m_data = (uintptr_t)n->m_pktdat;
7473 }
7474 }
7475 n->m_len = m->m_len;
7476 /*
7477 * Get the dup on the same bdry as the original
7478 * Assume that the two mbufs have the same offset to data area
7479 * (up to word boundaries)
7480 */
7481 bcopy(mtod(m, caddr_t), mtod(n, caddr_t), (unsigned)n->m_len);
7482 m = m->m_next;
7483 np = &n->m_next;
7484 #if BLUE_DEBUG
7485 printf(">%x: %x, %x, %x\n", n, n->m_flags, n->m_len,
7486 n->m_data);
7487 #endif
7488 }
7489
7490 return top;
7491
7492 nospace:
7493 m_freem(top);
7494 return NULL;
7495 }
7496
7497 #define MBUF_MULTIPAGES(m) \
7498 (((m)->m_flags & M_EXT) && \
7499 ((IS_P2ALIGNED((m)->m_data, PAGE_SIZE) \
7500 && (m)->m_len > PAGE_SIZE) || \
7501 (!IS_P2ALIGNED((m)->m_data, PAGE_SIZE) && \
7502 P2ROUNDUP((m)->m_data, PAGE_SIZE) < ((uintptr_t)(m)->m_data + (m)->m_len))))
7503
7504 static struct mbuf *
7505 m_expand(struct mbuf *m, struct mbuf **last)
7506 {
7507 mbuf_ref_t top = NULL, *nm = ⊤
7508 uintptr_t data0, data;
7509 unsigned int len0, len;
7510
7511 VERIFY(MBUF_MULTIPAGES(m));
7512 VERIFY(m->m_next == NULL);
7513 data0 = (uintptr_t)m->m_data;
7514 len0 = m->m_len;
7515 *last = top;
7516
7517 for (;;) {
7518 struct mbuf *n;
7519
7520 data = data0;
7521 if (IS_P2ALIGNED(data, PAGE_SIZE) && len0 > PAGE_SIZE) {
7522 len = PAGE_SIZE;
7523 } else if (!IS_P2ALIGNED(data, PAGE_SIZE) &&
7524 P2ROUNDUP(data, PAGE_SIZE) < (data + len0)) {
7525 len = P2ROUNDUP(data, PAGE_SIZE) - data;
7526 } else {
7527 len = len0;
7528 }
7529
7530 VERIFY(len > 0);
7531 VERIFY(m->m_flags & M_EXT);
7532 m->m_data = data;
7533 m->m_len = len;
7534
7535 *nm = *last = m;
7536 nm = &m->m_next;
7537 m->m_next = NULL;
7538
7539 data0 += len;
7540 len0 -= len;
7541 if (len0 == 0) {
7542 break;
7543 }
7544
7545 n = _M_RETRY(M_DONTWAIT, MT_DATA);
7546 if (n == NULL) {
7547 m_freem(top);
7548 top = *last = NULL;
7549 break;
7550 }
7551
7552 n->m_ext = m->m_ext;
7553 m_incref(m);
7554 n->m_flags |= M_EXT;
7555 m = n;
7556 }
7557 return top;
7558 }
7559
7560 struct mbuf *
7561 m_normalize(struct mbuf *m)
7562 {
7563 mbuf_ref_t top = NULL, *nm = ⊤
7564 boolean_t expanded = FALSE;
7565
7566 while (m != NULL) {
7567 mbuf_ref_t n;
7568
7569 n = m->m_next;
7570 m->m_next = NULL;
7571
7572 /* Does the data cross one or more page boundaries? */
7573 if (MBUF_MULTIPAGES(m)) {
7574 mbuf_ref_t last;
7575 if ((m = m_expand(m, &last)) == NULL) {
7576 m_freem(n);
7577 m_freem(top);
7578 top = NULL;
7579 break;
7580 }
7581 *nm = m;
7582 nm = &last->m_next;
7583 expanded = TRUE;
7584 } else {
7585 *nm = m;
7586 nm = &m->m_next;
7587 }
7588 m = n;
7589 }
7590 if (expanded) {
7591 os_atomic_inc(&mb_normalized, relaxed);
7592 }
7593 return top;
7594 }
7595
7596 /*
7597 * Append the specified data to the indicated mbuf chain,
7598 * Extend the mbuf chain if the new data does not fit in
7599 * existing space.
7600 *
7601 * Return 1 if able to complete the job; otherwise 0.
7602 */
7603 int
7604 m_append(struct mbuf *m0, int len0, caddr_t cp0 __sized_by(len0))
7605 {
7606 struct mbuf *m, *n;
7607 int remainder, space, len = len0;
7608 caddr_t cp = cp0;
7609
7610 for (m = m0; m->m_next != NULL; m = m->m_next) {
7611 ;
7612 }
7613 remainder = len;
7614 space = M_TRAILINGSPACE(m);
7615 if (space > 0) {
7616 /*
7617 * Copy into available space.
7618 */
7619 if (space > remainder) {
7620 space = remainder;
7621 }
7622 bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
7623 m->m_len += space;
7624 cp += space;
7625 remainder -= space;
7626 }
7627 while (remainder > 0) {
7628 /*
7629 * Allocate a new mbuf; could check space
7630 * and allocate a cluster instead.
7631 */
7632 n = m_get(M_WAITOK, m->m_type);
7633 if (n == NULL) {
7634 break;
7635 }
7636 n->m_len = min(MLEN, remainder);
7637 bcopy(cp, mtod(n, caddr_t), n->m_len);
7638 cp += n->m_len;
7639 remainder -= n->m_len;
7640 m->m_next = n;
7641 m = n;
7642 }
7643 if (m0->m_flags & M_PKTHDR) {
7644 m0->m_pkthdr.len += len - remainder;
7645 }
7646 return remainder == 0;
7647 }
7648
7649 struct mbuf *
7650 m_last(struct mbuf *m)
7651 {
7652 while (m->m_next != NULL) {
7653 m = m->m_next;
7654 }
7655 return m;
7656 }
7657
7658 unsigned int
7659 m_fixhdr(struct mbuf *m0)
7660 {
7661 u_int len;
7662
7663 VERIFY(m0->m_flags & M_PKTHDR);
7664
7665 len = m_length2(m0, NULL);
7666 m0->m_pkthdr.len = len;
7667 return len;
7668 }
7669
7670 unsigned int
7671 m_length2(struct mbuf *m0, struct mbuf **last)
7672 {
7673 struct mbuf *m;
7674 u_int len;
7675
7676 len = 0;
7677 for (m = m0; m != NULL; m = m->m_next) {
7678 len += m->m_len;
7679 if (m->m_next == NULL) {
7680 break;
7681 }
7682 }
7683 if (last != NULL) {
7684 *last = m;
7685 }
7686 return len;
7687 }
7688
7689 /*
7690 * Defragment a mbuf chain, returning the shortest possible chain of mbufs
7691 * and clusters. If allocation fails and this cannot be completed, NULL will
7692 * be returned, but the passed in chain will be unchanged. Upon success,
7693 * the original chain will be freed, and the new chain will be returned.
7694 *
7695 * If a non-packet header is passed in, the original mbuf (chain?) will
7696 * be returned unharmed.
7697 *
7698 * If offset is specfied, the first mbuf in the chain will have a leading
7699 * space of the amount stated by the "off" parameter.
7700 *
7701 * This routine requires that the m_pkthdr.header field of the original
7702 * mbuf chain is cleared by the caller.
7703 */
7704 struct mbuf *
7705 m_defrag_offset(struct mbuf *m0, u_int32_t off, int how)
7706 {
7707 struct mbuf *m_new = NULL, *m_final = NULL;
7708 int progress = 0, length, pktlen;
7709
7710 if (!(m0->m_flags & M_PKTHDR)) {
7711 return m0;
7712 }
7713
7714 VERIFY(off < MHLEN);
7715 m_fixhdr(m0); /* Needed sanity check */
7716
7717 pktlen = m0->m_pkthdr.len + off;
7718 if (pktlen > MHLEN) {
7719 m_final = m_getcl(how, MT_DATA, M_PKTHDR);
7720 } else {
7721 m_final = m_gethdr(how, MT_DATA);
7722 }
7723
7724 if (m_final == NULL) {
7725 goto nospace;
7726 }
7727
7728 if (off > 0) {
7729 pktlen -= off;
7730 m_final->m_data += off;
7731 }
7732
7733 /*
7734 * Caller must have handled the contents pointed to by this
7735 * pointer before coming here, as otherwise it will point to
7736 * the original mbuf which will get freed upon success.
7737 */
7738 VERIFY(m0->m_pkthdr.pkt_hdr == NULL);
7739
7740 if (m_dup_pkthdr(m_final, m0, how) == 0) {
7741 goto nospace;
7742 }
7743
7744 m_new = m_final;
7745
7746 while (progress < pktlen) {
7747 length = pktlen - progress;
7748 if (length > MCLBYTES) {
7749 length = MCLBYTES;
7750 }
7751 length -= ((m_new == m_final) ? off : 0);
7752 if (length < 0) {
7753 goto nospace;
7754 }
7755
7756 if (m_new == NULL) {
7757 if (length > MLEN) {
7758 m_new = m_getcl(how, MT_DATA, 0);
7759 } else {
7760 m_new = m_get(how, MT_DATA);
7761 }
7762 if (m_new == NULL) {
7763 goto nospace;
7764 }
7765 }
7766
7767 m_copydata(m0, progress, length, mtod(m_new, caddr_t));
7768 progress += length;
7769 m_new->m_len = length;
7770 if (m_new != m_final) {
7771 m_cat(m_final, m_new);
7772 }
7773 m_new = NULL;
7774 }
7775 m_freem(m0);
7776 m0 = m_final;
7777 return m0;
7778 nospace:
7779 if (m_final) {
7780 m_freem(m_final);
7781 }
7782 return NULL;
7783 }
7784
7785 struct mbuf *
7786 m_defrag(struct mbuf *m0, int how)
7787 {
7788 return m_defrag_offset(m0, 0, how);
7789 }
7790
7791 void
7792 m_mchtype(struct mbuf *m, int t)
7793 {
7794 mtype_stat_inc(t);
7795 mtype_stat_dec(m->m_type);
7796 (m)->m_type = t;
7797 }
7798
7799 void *__unsafe_indexable
7800 m_mtod(struct mbuf *m)
7801 {
7802 return m_mtod_current(m);
7803 }
7804
7805 void
7806 m_mcheck(struct mbuf *m)
7807 {
7808 _MCHECK(m);
7809 }
7810
7811 /*
7812 * Return a pointer to mbuf/offset of location in mbuf chain.
7813 */
7814 struct mbuf *
7815 m_getptr(struct mbuf *m, int loc, int *off)
7816 {
7817 while (loc >= 0) {
7818 /* Normal end of search. */
7819 if (m->m_len > loc) {
7820 *off = loc;
7821 return m;
7822 } else {
7823 loc -= m->m_len;
7824 if (m->m_next == NULL) {
7825 if (loc == 0) {
7826 /* Point at the end of valid data. */
7827 *off = m->m_len;
7828 return m;
7829 }
7830 return NULL;
7831 }
7832 m = m->m_next;
7833 }
7834 }
7835 return NULL;
7836 }
7837
7838 #if CONFIG_MBUF_MCACHE
7839 /*
7840 * Inform the corresponding mcache(s) that there's a waiter below.
7841 */
7842 static void
7843 mbuf_waiter_inc(mbuf_class_t class, boolean_t comp)
7844 {
7845 mcache_waiter_inc(m_cache(class));
7846 if (comp) {
7847 if (class == MC_CL) {
7848 mcache_waiter_inc(m_cache(MC_MBUF_CL));
7849 } else if (class == MC_BIGCL) {
7850 mcache_waiter_inc(m_cache(MC_MBUF_BIGCL));
7851 } else if (class == MC_16KCL) {
7852 mcache_waiter_inc(m_cache(MC_MBUF_16KCL));
7853 } else {
7854 mcache_waiter_inc(m_cache(MC_MBUF_CL));
7855 mcache_waiter_inc(m_cache(MC_MBUF_BIGCL));
7856 }
7857 }
7858 }
7859
7860 /*
7861 * Inform the corresponding mcache(s) that there's no more waiter below.
7862 */
7863 static void
7864 mbuf_waiter_dec(mbuf_class_t class, boolean_t comp)
7865 {
7866 mcache_waiter_dec(m_cache(class));
7867 if (comp) {
7868 if (class == MC_CL) {
7869 mcache_waiter_dec(m_cache(MC_MBUF_CL));
7870 } else if (class == MC_BIGCL) {
7871 mcache_waiter_dec(m_cache(MC_MBUF_BIGCL));
7872 } else if (class == MC_16KCL) {
7873 mcache_waiter_dec(m_cache(MC_MBUF_16KCL));
7874 } else {
7875 mcache_waiter_dec(m_cache(MC_MBUF_CL));
7876 mcache_waiter_dec(m_cache(MC_MBUF_BIGCL));
7877 }
7878 }
7879 }
7880
7881 static bool mbuf_watchdog_defunct_active = false;
7882
7883 #endif /* CONFIG_MBUF_MCACHE */
7884
7885 static uint32_t
7886 mbuf_watchdog_socket_space(struct socket *so)
7887 {
7888 uint32_t space = 0;
7889
7890 if (so == NULL) {
7891 return 0;
7892 }
7893
7894 space = so->so_snd.sb_mbcnt + so->so_rcv.sb_mbcnt;
7895
7896 #if INET
7897 if ((SOCK_DOM(so) == PF_INET || SOCK_DOM(so) == PF_INET6) &&
7898 SOCK_PROTO(so) == IPPROTO_TCP) {
7899 space += tcp_reass_qlen_space(so);
7900 }
7901 #endif /* INET */
7902
7903 return space;
7904 }
7905
7906 struct mbuf_watchdog_defunct_args {
7907 struct proc *top_app;
7908 uint32_t top_app_space_used;
7909 bool non_blocking;
7910 };
7911
7912 static bool
7913 proc_fd_trylock(proc_t p)
7914 {
7915 return lck_mtx_try_lock(&p->p_fd.fd_lock);
7916 }
7917
7918 static int
7919 mbuf_watchdog_defunct_iterate(proc_t p, void *arg)
7920 {
7921 struct fileproc *fp = NULL;
7922 struct mbuf_watchdog_defunct_args *args =
7923 (struct mbuf_watchdog_defunct_args *)arg;
7924 uint32_t space_used = 0;
7925
7926 /*
7927 * Non-blocking is only used when dumping the mbuf usage from the watchdog
7928 */
7929 if (args->non_blocking) {
7930 if (!proc_fd_trylock(p)) {
7931 return PROC_RETURNED;
7932 }
7933 } else {
7934 proc_fdlock(p);
7935 }
7936 fdt_foreach(fp, p) {
7937 struct fileglob *fg = fp->fp_glob;
7938 socket_ref_t so = NULL;
7939
7940 if (FILEGLOB_DTYPE(fg) != DTYPE_SOCKET) {
7941 continue;
7942 }
7943 so = fg_get_data(fg);
7944 /*
7945 * We calculate the space without the socket
7946 * lock because we don't want to be blocked
7947 * by another process that called send() and
7948 * is stuck waiting for mbufs.
7949 *
7950 * These variables are 32-bit so we don't have
7951 * to worry about incomplete reads.
7952 */
7953 space_used += mbuf_watchdog_socket_space(so);
7954 }
7955 proc_fdunlock(p);
7956 if (space_used > args->top_app_space_used) {
7957 if (args->top_app != NULL) {
7958 proc_rele(args->top_app);
7959 }
7960 args->top_app = p;
7961 args->top_app_space_used = space_used;
7962
7963 return PROC_CLAIMED;
7964 } else {
7965 return PROC_RETURNED;
7966 }
7967 }
7968
7969 extern char *proc_name_address(void *p);
7970
7971 static void
7972 mbuf_watchdog_defunct(thread_call_param_t arg0, thread_call_param_t arg1)
7973 {
7974 #pragma unused(arg0, arg1)
7975 struct mbuf_watchdog_defunct_args args = {};
7976 struct fileproc *fp = NULL;
7977
7978 args.non_blocking = false;
7979 proc_iterate(PROC_ALLPROCLIST,
7980 mbuf_watchdog_defunct_iterate, &args, NULL, NULL);
7981
7982 /*
7983 * Defunct all sockets from this app.
7984 */
7985 if (args.top_app != NULL) {
7986 #if CONFIG_MBUF_MCACHE
7987 /* Restart the watchdog count. */
7988 lck_mtx_lock(mbuf_mlock);
7989 microuptime(&mb_wdtstart);
7990 lck_mtx_unlock(mbuf_mlock);
7991 #endif
7992 os_log(OS_LOG_DEFAULT, "%s: defuncting all sockets from %s.%d",
7993 __func__,
7994 proc_name_address(args.top_app),
7995 proc_pid(args.top_app));
7996 proc_fdlock(args.top_app);
7997 fdt_foreach(fp, args.top_app) {
7998 struct fileglob *fg = fp->fp_glob;
7999 struct socket *so = NULL;
8000
8001 if (FILEGLOB_DTYPE(fg) != DTYPE_SOCKET) {
8002 continue;
8003 }
8004 so = (struct socket *)fp_get_data(fp);
8005 if (!socket_try_lock(so)) {
8006 continue;
8007 }
8008 if (sosetdefunct(args.top_app, so,
8009 SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL,
8010 TRUE) == 0) {
8011 sodefunct(args.top_app, so,
8012 SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL);
8013 }
8014 socket_unlock(so, 0);
8015 }
8016 proc_fdunlock(args.top_app);
8017 proc_rele(args.top_app);
8018 mbstat.m_forcedefunct++;
8019 #if !CONFIG_MBUF_MCACHE
8020 zcache_drain(ZONE_ID_MBUF_CLUSTER_2K);
8021 zcache_drain(ZONE_ID_MBUF_CLUSTER_4K);
8022 zcache_drain(ZONE_ID_MBUF_CLUSTER_16K);
8023 zone_drain(zone_by_id(ZONE_ID_MBUF));
8024 zone_drain(zone_by_id(ZONE_ID_CLUSTER_2K));
8025 zone_drain(zone_by_id(ZONE_ID_CLUSTER_4K));
8026 zone_drain(zone_by_id(ZONE_ID_CLUSTER_16K));
8027 zone_drain(zone_by_id(ZONE_ID_MBUF_REF));
8028 #endif
8029 }
8030 #if CONFIG_MBUF_MCACHE
8031 mbuf_watchdog_defunct_active = false;
8032 #endif
8033 }
8034
8035 #if !CONFIG_MBUF_MCACHE
8036 static LCK_GRP_DECLARE(mbuf_exhausted_grp, "mbuf-exhausted");
8037 static LCK_TICKET_DECLARE(mbuf_exhausted_lock, &mbuf_exhausted_grp);
8038 static uint32_t mbuf_exhausted_mask;
8039
8040 #define MBUF_EXHAUSTED_DRAIN_MASK (\
8041 (1u << MC_MBUF) | \
8042 (1u << MC_CL) | \
8043 (1u << MC_BIGCL) | \
8044 (1u << MC_16KCL))
8045
8046 #define MBUF_EXHAUSTED_DEFUNCT_MASK (\
8047 (1u << MC_MBUF) | \
8048 (1u << MC_MBUF_CL) | \
8049 (1u << MC_MBUF_BIGCL) | \
8050 (1u << MC_MBUF_16KCL))
8051
8052 static void
8053 mbuf_watchdog_drain_composite(thread_call_param_t arg0, thread_call_param_t arg1)
8054 {
8055 #pragma unused(arg0, arg1)
8056 zcache_drain(ZONE_ID_MBUF_CLUSTER_2K);
8057 zcache_drain(ZONE_ID_MBUF_CLUSTER_4K);
8058 zcache_drain(ZONE_ID_MBUF_CLUSTER_16K);
8059 }
8060
8061 static void
8062 mbuf_zone_exhausted_start(uint32_t bit)
8063 {
8064 uint64_t deadline;
8065 uint32_t mask;
8066
8067 mask = mbuf_exhausted_mask;
8068 mbuf_exhausted_mask = mask | bit;
8069
8070 if ((mask & MBUF_EXHAUSTED_DRAIN_MASK) == 0 &&
8071 (bit & MBUF_EXHAUSTED_DRAIN_MASK)) {
8072 clock_interval_to_deadline(MB_WDT_MAXTIME * 1000 / 10,
8073 NSEC_PER_MSEC, &deadline);
8074 thread_call_enter_delayed(mbuf_drain_tcall, deadline);
8075 }
8076
8077 if ((mask & MBUF_EXHAUSTED_DEFUNCT_MASK) == 0 &&
8078 (bit & MBUF_EXHAUSTED_DEFUNCT_MASK)) {
8079 clock_interval_to_deadline(MB_WDT_MAXTIME * 1000 / 2,
8080 NSEC_PER_MSEC, &deadline);
8081 thread_call_enter_delayed(mbuf_defunct_tcall, deadline);
8082 }
8083 }
8084
8085 static void
8086 mbuf_zone_exhausted_end(uint32_t bit)
8087 {
8088 uint32_t mask;
8089
8090 mask = (mbuf_exhausted_mask &= ~bit);
8091
8092 if ((mask & MBUF_EXHAUSTED_DRAIN_MASK) == 0 &&
8093 (bit & MBUF_EXHAUSTED_DRAIN_MASK)) {
8094 thread_call_cancel(mbuf_drain_tcall);
8095 }
8096
8097 if ((mask & MBUF_EXHAUSTED_DEFUNCT_MASK) == 0 &&
8098 (bit & MBUF_EXHAUSTED_DEFUNCT_MASK)) {
8099 thread_call_cancel(mbuf_defunct_tcall);
8100 }
8101 }
8102
8103 static void
8104 mbuf_zone_exhausted(zone_id_t zid, zone_t zone __unused, bool exhausted)
8105 {
8106 uint32_t bit;
8107
8108 if (zid < m_class_to_zid(MBUF_CLASS_MIN) ||
8109 zid > m_class_to_zid(MBUF_CLASS_MAX)) {
8110 return;
8111 }
8112
8113 bit = 1u << m_class_from_zid(zid);
8114
8115 lck_ticket_lock_nopreempt(&mbuf_exhausted_lock, &mbuf_exhausted_grp);
8116
8117 if (exhausted) {
8118 mbuf_zone_exhausted_start(bit);
8119 } else {
8120 mbuf_zone_exhausted_end(bit);
8121 }
8122
8123 lck_ticket_unlock_nopreempt(&mbuf_exhausted_lock);
8124 }
8125 EVENT_REGISTER_HANDLER(ZONE_EXHAUSTED, mbuf_zone_exhausted);
8126 #endif /* !CONFIG_MBUF_MCACHE */
8127
8128 #if CONFIG_MBUF_MCACHE
8129 /*
8130 * Called during slab (blocking and non-blocking) allocation. If there
8131 * is at least one waiter, and the time since the first waiter is blocked
8132 * is greater than the watchdog timeout, panic the system.
8133 */
8134 static void
8135 mbuf_watchdog(void)
8136 {
8137 struct timeval now;
8138 unsigned int since;
8139 static thread_call_t defunct_tcall = NULL;
8140
8141 if (mb_waiters == 0 || !mb_watchdog) {
8142 return;
8143 }
8144
8145 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
8146
8147 microuptime(&now);
8148 since = now.tv_sec - mb_wdtstart.tv_sec;
8149
8150 if (mbuf_watchdog_defunct_active) {
8151 /*
8152 * Don't panic the system while we are trying
8153 * to find sockets to defunct.
8154 */
8155 return;
8156 }
8157 if (since >= MB_WDT_MAXTIME) {
8158 panic_plain("%s: %d waiters stuck for %u secs\n%s", __func__,
8159 mb_waiters, since, mbuf_dump());
8160 /* NOTREACHED */
8161 }
8162 /*
8163 * Check if we are about to panic the system due
8164 * to lack of mbufs and start defuncting sockets
8165 * from processes that use too many sockets.
8166 *
8167 * We're always called with the mbuf_mlock held,
8168 * so that also protects mbuf_watchdog_defunct_active.
8169 */
8170 if (since >= MB_WDT_MAXTIME / 2) {
8171 /*
8172 * Start a thread to defunct sockets
8173 * from apps that are over-using their socket
8174 * buffers.
8175 */
8176 if (defunct_tcall == NULL) {
8177 defunct_tcall =
8178 thread_call_allocate_with_options(mbuf_watchdog_defunct,
8179 NULL,
8180 THREAD_CALL_PRIORITY_KERNEL,
8181 THREAD_CALL_OPTIONS_ONCE);
8182 }
8183 if (defunct_tcall != NULL) {
8184 mbuf_watchdog_defunct_active = true;
8185 thread_call_enter(defunct_tcall);
8186 }
8187 }
8188 }
8189
8190 /*
8191 * Called during blocking allocation. Returns TRUE if one or more objects
8192 * are available at the per-CPU caches layer and that allocation should be
8193 * retried at that level.
8194 */
8195 static boolean_t
8196 mbuf_sleep(mbuf_class_t class, unsigned int num, int wait)
8197 {
8198 boolean_t mcache_retry = FALSE;
8199
8200 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
8201
8202 /* Check if there's anything at the cache layer */
8203 if (mbuf_cached_above(class, wait)) {
8204 mcache_retry = TRUE;
8205 goto done;
8206 }
8207
8208 /* Nothing? Then try hard to get it from somewhere */
8209 m_reclaim(class, num, (wait & MCR_COMP));
8210
8211 /* We tried hard and got something? */
8212 if (m_infree(class) > 0) {
8213 mbstat.m_wait++;
8214 goto done;
8215 } else if (mbuf_cached_above(class, wait)) {
8216 mbstat.m_wait++;
8217 mcache_retry = TRUE;
8218 goto done;
8219 } else if (wait & MCR_TRYHARD) {
8220 mcache_retry = TRUE;
8221 goto done;
8222 }
8223
8224 /*
8225 * There's really nothing for us right now; inform the
8226 * cache(s) that there is a waiter below and go to sleep.
8227 */
8228 mbuf_waiter_inc(class, (wait & MCR_COMP));
8229
8230 VERIFY(!(wait & MCR_NOSLEEP));
8231
8232 /*
8233 * If this is the first waiter, arm the watchdog timer. Otherwise
8234 * check if we need to panic the system due to watchdog timeout.
8235 */
8236 if (mb_waiters == 0) {
8237 microuptime(&mb_wdtstart);
8238 } else {
8239 mbuf_watchdog();
8240 }
8241
8242 mb_waiters++;
8243 m_region_expand(class) += m_total(class) + num;
8244 /* wake up the worker thread */
8245 if (mbuf_worker_ready &&
8246 mbuf_worker_needs_wakeup) {
8247 wakeup((caddr_t)&mbuf_worker_needs_wakeup);
8248 mbuf_worker_needs_wakeup = FALSE;
8249 }
8250 mbwdog_logger("waiting (%d mbufs in class %s)", num, m_cname(class));
8251 (void) msleep(mb_waitchan, mbuf_mlock, (PZERO - 1), m_cname(class), NULL);
8252 mbwdog_logger("woke up (%d mbufs in class %s) ", num, m_cname(class));
8253
8254 /* We are now up; stop getting notified until next round */
8255 mbuf_waiter_dec(class, (wait & MCR_COMP));
8256
8257 /* We waited and got something */
8258 if (m_infree(class) > 0) {
8259 mbstat.m_wait++;
8260 goto done;
8261 } else if (mbuf_cached_above(class, wait)) {
8262 mbstat.m_wait++;
8263 mcache_retry = TRUE;
8264 }
8265 done:
8266 return mcache_retry;
8267 }
8268
8269 __attribute__((noreturn))
8270 static void
8271 mbuf_worker_thread(void)
8272 {
8273 int mbuf_expand;
8274
8275 while (1) {
8276 lck_mtx_lock(mbuf_mlock);
8277 mbwdog_logger("worker thread running");
8278 mbuf_worker_run_cnt++;
8279 mbuf_expand = 0;
8280 /*
8281 * Allocations are based on page size, so if we have depleted
8282 * the reserved spaces, try to free mbufs from the major classes.
8283 */
8284 #if PAGE_SIZE == 4096
8285 uint32_t m_mbclusters = m_total(MC_MBUF) >> NMBPCLSHIFT;
8286 uint32_t m_clusters = m_total(MC_CL);
8287 uint32_t m_bigclusters = m_total(MC_BIGCL) << NCLPBGSHIFT;
8288 uint32_t sumclusters = m_mbclusters + m_clusters + m_bigclusters;
8289 if (sumclusters >= nclusters) {
8290 mbwdog_logger("reclaiming bigcl");
8291 mbuf_drain_locked(TRUE);
8292 m_reclaim(MC_BIGCL, 4, FALSE);
8293 }
8294 #else
8295 uint32_t m_16kclusters = m_total(MC_16KCL);
8296 if (njcl > 0 && (m_16kclusters << NCLPJCLSHIFT) >= njcl) {
8297 mbwdog_logger("reclaiming 16kcl");
8298 mbuf_drain_locked(TRUE);
8299 m_reclaim(MC_16KCL, 4, FALSE);
8300 }
8301 #endif
8302 if (m_region_expand(MC_CL) > 0) {
8303 int n;
8304 mb_expand_cl_cnt++;
8305 /* Adjust to current number of cluster in use */
8306 n = m_region_expand(MC_CL) -
8307 (m_total(MC_CL) - m_infree(MC_CL));
8308 if ((n + m_total(MC_CL)) > m_maxlimit(MC_CL)) {
8309 n = m_maxlimit(MC_CL) - m_total(MC_CL);
8310 }
8311 if (n > 0) {
8312 mb_expand_cl_total += n;
8313 }
8314 m_region_expand(MC_CL) = 0;
8315
8316 if (n > 0) {
8317 mbwdog_logger("expanding MC_CL by %d", n);
8318 freelist_populate(MC_CL, n, M_WAIT);
8319 }
8320 }
8321 if (m_region_expand(MC_BIGCL) > 0) {
8322 int n;
8323 mb_expand_bigcl_cnt++;
8324 /* Adjust to current number of 4 KB cluster in use */
8325 n = m_region_expand(MC_BIGCL) -
8326 (m_total(MC_BIGCL) - m_infree(MC_BIGCL));
8327 if ((n + m_total(MC_BIGCL)) > m_maxlimit(MC_BIGCL)) {
8328 n = m_maxlimit(MC_BIGCL) - m_total(MC_BIGCL);
8329 }
8330 if (n > 0) {
8331 mb_expand_bigcl_total += n;
8332 }
8333 m_region_expand(MC_BIGCL) = 0;
8334
8335 if (n > 0) {
8336 mbwdog_logger("expanding MC_BIGCL by %d", n);
8337 freelist_populate(MC_BIGCL, n, M_WAIT);
8338 }
8339 }
8340 if (m_region_expand(MC_16KCL) > 0) {
8341 int n;
8342 mb_expand_16kcl_cnt++;
8343 /* Adjust to current number of 16 KB cluster in use */
8344 n = m_region_expand(MC_16KCL) -
8345 (m_total(MC_16KCL) - m_infree(MC_16KCL));
8346 if ((n + m_total(MC_16KCL)) > m_maxlimit(MC_16KCL)) {
8347 n = m_maxlimit(MC_16KCL) - m_total(MC_16KCL);
8348 }
8349 if (n > 0) {
8350 mb_expand_16kcl_total += n;
8351 }
8352 m_region_expand(MC_16KCL) = 0;
8353
8354 if (n > 0) {
8355 mbwdog_logger("expanding MC_16KCL by %d", n);
8356 (void) freelist_populate(MC_16KCL, n, M_WAIT);
8357 }
8358 }
8359
8360 /*
8361 * Because we can run out of memory before filling the mbuf
8362 * map, we should not allocate more clusters than they are
8363 * mbufs -- otherwise we could have a large number of useless
8364 * clusters allocated.
8365 */
8366 mbwdog_logger("totals: MC_MBUF %d MC_BIGCL %d MC_CL %d MC_16KCL %d",
8367 m_total(MC_MBUF), m_total(MC_BIGCL), m_total(MC_CL),
8368 m_total(MC_16KCL));
8369 uint32_t total_mbufs = m_total(MC_MBUF);
8370 uint32_t total_clusters = m_total(MC_BIGCL) + m_total(MC_CL) +
8371 m_total(MC_16KCL);
8372 if (total_mbufs < total_clusters) {
8373 mbwdog_logger("expanding MC_MBUF by %d",
8374 total_clusters - total_mbufs);
8375 }
8376 while (total_mbufs < total_clusters) {
8377 mb_expand_cnt++;
8378 if (freelist_populate(MC_MBUF, 1, M_WAIT) == 0) {
8379 break;
8380 }
8381 total_mbufs = m_total(MC_MBUF);
8382 total_clusters = m_total(MC_BIGCL) + m_total(MC_CL) +
8383 m_total(MC_16KCL);
8384 }
8385
8386 mbuf_worker_needs_wakeup = TRUE;
8387 /*
8388 * If there's a deadlock and we're not sending / receiving
8389 * packets, net_uptime() won't be updated. Update it here
8390 * so we are sure it's correct.
8391 */
8392 net_update_uptime();
8393 mbuf_worker_last_runtime = net_uptime();
8394 assert_wait((caddr_t)&mbuf_worker_needs_wakeup,
8395 THREAD_UNINT);
8396 mbwdog_logger("worker thread sleeping");
8397 lck_mtx_unlock(mbuf_mlock);
8398 (void) thread_block((thread_continue_t)mbuf_worker_thread);
8399 }
8400 }
8401
8402 __attribute__((noreturn))
8403 static void
8404 mbuf_worker_thread_init(void)
8405 {
8406 mbuf_worker_ready++;
8407 mbuf_worker_thread();
8408 }
8409
8410 static mcl_slab_t *
8411 slab_get(void *buf)
8412 {
8413 mcl_slabg_t *slg;
8414 unsigned int ix, k;
8415
8416 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
8417
8418 VERIFY(MBUF_IN_MAP(buf));
8419 ix = ((unsigned char *)buf - mbutl) >> MBSHIFT;
8420 VERIFY(ix < maxslabgrp);
8421
8422 if ((slg = slabstbl[ix]) == NULL) {
8423 /*
8424 * In the current implementation, we never shrink the slabs
8425 * table; if we attempt to reallocate a cluster group when
8426 * it's already allocated, panic since this is a sign of a
8427 * memory corruption (slabstbl[ix] got nullified).
8428 */
8429 ++slabgrp;
8430 VERIFY(ix < slabgrp);
8431 /*
8432 * Slabs expansion can only be done single threaded; when
8433 * we get here, it must be as a result of m_clalloc() which
8434 * is serialized and therefore mb_clalloc_busy must be set.
8435 */
8436 VERIFY(mb_clalloc_busy);
8437 lck_mtx_unlock(mbuf_mlock);
8438
8439 /* This is a new buffer; create the slabs group for it */
8440 slg = zalloc_permanent_type(mcl_slabg_t);
8441 slg->slg_slab = zalloc_permanent(sizeof(mcl_slab_t) * NSLABSPMB,
8442 ZALIGN(mcl_slab_t));
8443
8444 lck_mtx_lock(mbuf_mlock);
8445 /*
8446 * No other thread could have gone into m_clalloc() after
8447 * we dropped the lock above, so verify that it's true.
8448 */
8449 VERIFY(mb_clalloc_busy);
8450
8451 slabstbl[ix] = slg;
8452
8453 /* Chain each slab in the group to its forward neighbor */
8454 for (k = 1; k < NSLABSPMB; k++) {
8455 slg->slg_slab[k - 1].sl_next = &slg->slg_slab[k];
8456 }
8457 VERIFY(slg->slg_slab[NSLABSPMB - 1].sl_next == NULL);
8458
8459 /* And chain the last slab in the previous group to this */
8460 if (ix > 0) {
8461 VERIFY(slabstbl[ix - 1]->
8462 slg_slab[NSLABSPMB - 1].sl_next == NULL);
8463 slabstbl[ix - 1]->slg_slab[NSLABSPMB - 1].sl_next =
8464 &slg->slg_slab[0];
8465 }
8466 }
8467
8468 ix = MTOPG(buf) % NSLABSPMB;
8469 VERIFY(ix < NSLABSPMB);
8470
8471 return &slg->slg_slab[ix];
8472 }
8473
8474 static void
8475 slab_init(mcl_slab_t *sp, mbuf_class_t class, u_int32_t flags,
8476 void *base, void *head, unsigned int len, int refcnt, int chunks)
8477 {
8478 sp->sl_class = class;
8479 sp->sl_flags = flags;
8480 sp->sl_base = base;
8481 sp->sl_head = head;
8482 sp->sl_len = len;
8483 sp->sl_refcnt = refcnt;
8484 sp->sl_chunks = chunks;
8485 slab_detach(sp);
8486 }
8487
8488 static void
8489 slab_insert(mcl_slab_t *sp, mbuf_class_t class)
8490 {
8491 VERIFY(slab_is_detached(sp));
8492 m_slab_cnt(class)++;
8493 TAILQ_INSERT_TAIL(&m_slablist(class), sp, sl_link);
8494 sp->sl_flags &= ~SLF_DETACHED;
8495
8496 /*
8497 * If a buffer spans multiple contiguous pages then mark them as
8498 * detached too
8499 */
8500 if (class == MC_16KCL) {
8501 int k;
8502 for (k = 1; k < NSLABSP16KB; k++) {
8503 sp = sp->sl_next;
8504 /* Next slab must already be present */
8505 VERIFY(sp != NULL && slab_is_detached(sp));
8506 sp->sl_flags &= ~SLF_DETACHED;
8507 }
8508 }
8509 }
8510
8511 static void
8512 slab_remove(mcl_slab_t *sp, mbuf_class_t class)
8513 {
8514 int k;
8515 VERIFY(!slab_is_detached(sp));
8516 VERIFY(m_slab_cnt(class) > 0);
8517 m_slab_cnt(class)--;
8518 TAILQ_REMOVE(&m_slablist(class), sp, sl_link);
8519 slab_detach(sp);
8520 if (class == MC_16KCL) {
8521 for (k = 1; k < NSLABSP16KB; k++) {
8522 sp = sp->sl_next;
8523 /* Next slab must already be present */
8524 VERIFY(sp != NULL);
8525 VERIFY(!slab_is_detached(sp));
8526 slab_detach(sp);
8527 }
8528 }
8529 }
8530
8531 static boolean_t
8532 slab_inrange(mcl_slab_t *sp, void *buf)
8533 {
8534 return (uintptr_t)buf >= (uintptr_t)sp->sl_base &&
8535 (uintptr_t)buf < ((uintptr_t)sp->sl_base + sp->sl_len);
8536 }
8537
8538 #undef panic
8539
8540 static void
8541 slab_nextptr_panic(mcl_slab_t *sp, void *addr)
8542 {
8543 int i;
8544 unsigned int chunk_len = sp->sl_len / sp->sl_chunks;
8545 uintptr_t buf = (uintptr_t)sp->sl_base;
8546
8547 for (i = 0; i < sp->sl_chunks; i++, buf += chunk_len) {
8548 void *next = ((mcache_obj_t *)buf)->obj_next;
8549 if (next != addr) {
8550 continue;
8551 }
8552 if (!mclverify) {
8553 if (next != NULL && !MBUF_IN_MAP(next)) {
8554 mcache_t *cp = m_cache(sp->sl_class);
8555 panic("%s: %s buffer %p in slab %p modified "
8556 "after free at offset 0: %p out of range "
8557 "[%p-%p)\n", __func__, cp->mc_name,
8558 (void *)buf, sp, next, mbutl, embutl);
8559 /* NOTREACHED */
8560 }
8561 } else {
8562 mcache_audit_t *mca = mcl_audit_buf2mca(sp->sl_class,
8563 (mcache_obj_t *)buf);
8564 mcl_audit_verify_nextptr(next, mca);
8565 }
8566 }
8567 }
8568
8569 static void
8570 slab_detach(mcl_slab_t *sp)
8571 {
8572 sp->sl_link.tqe_next = (mcl_slab_t *)-1;
8573 sp->sl_link.tqe_prev = (mcl_slab_t **)-1;
8574 sp->sl_flags |= SLF_DETACHED;
8575 }
8576
8577 static boolean_t
8578 slab_is_detached(mcl_slab_t *sp)
8579 {
8580 return (intptr_t)sp->sl_link.tqe_next == -1 &&
8581 (intptr_t)sp->sl_link.tqe_prev == -1 &&
8582 (sp->sl_flags & SLF_DETACHED);
8583 }
8584
8585 static void
8586 mcl_audit_init(void *buf, mcache_audit_t **mca_list,
8587 mcache_obj_t **con_list, size_t con_size, unsigned int num)
8588 {
8589 mcache_audit_t *mca, *mca_tail;
8590 mcache_obj_t *con = NULL;
8591 boolean_t save_contents = (con_list != NULL);
8592 unsigned int i, ix;
8593
8594 ASSERT(num <= NMBPG);
8595 ASSERT(con_list == NULL || con_size != 0);
8596
8597 ix = MTOPG(buf);
8598 VERIFY(ix < maxclaudit);
8599
8600 /* Make sure we haven't been here before */
8601 for (i = 0; i < num; i++) {
8602 VERIFY(mclaudit[ix].cl_audit[i] == NULL);
8603 }
8604
8605 mca = mca_tail = *mca_list;
8606 if (save_contents) {
8607 con = *con_list;
8608 }
8609
8610 for (i = 0; i < num; i++) {
8611 mcache_audit_t *next;
8612
8613 next = mca->mca_next;
8614 bzero(mca, sizeof(*mca));
8615 mca->mca_next = next;
8616 mclaudit[ix].cl_audit[i] = mca;
8617
8618 /* Attach the contents buffer if requested */
8619 if (save_contents) {
8620 mcl_saved_contents_t *msc =
8621 (mcl_saved_contents_t *)(void *)con;
8622
8623 VERIFY(msc != NULL);
8624 VERIFY(IS_P2ALIGNED(msc, sizeof(u_int64_t)));
8625 VERIFY(con_size == sizeof(*msc));
8626 mca->mca_contents_size = con_size;
8627 mca->mca_contents = msc;
8628 con = con->obj_next;
8629 bzero(mca->mca_contents, mca->mca_contents_size);
8630 }
8631
8632 mca_tail = mca;
8633 mca = mca->mca_next;
8634 }
8635
8636 if (save_contents) {
8637 *con_list = con;
8638 }
8639
8640 *mca_list = mca_tail->mca_next;
8641 mca_tail->mca_next = NULL;
8642 }
8643
8644 static void
8645 mcl_audit_free(void *buf, unsigned int num)
8646 {
8647 unsigned int i, ix;
8648 mcache_audit_t *mca, *mca_list;
8649
8650 ix = MTOPG(buf);
8651 VERIFY(ix < maxclaudit);
8652
8653 if (mclaudit[ix].cl_audit[0] != NULL) {
8654 mca_list = mclaudit[ix].cl_audit[0];
8655 for (i = 0; i < num; i++) {
8656 mca = mclaudit[ix].cl_audit[i];
8657 mclaudit[ix].cl_audit[i] = NULL;
8658 if (mca->mca_contents) {
8659 mcache_free(mcl_audit_con_cache,
8660 mca->mca_contents);
8661 }
8662 }
8663 mcache_free_ext(mcache_audit_cache,
8664 (mcache_obj_t *)mca_list);
8665 }
8666 }
8667
8668 /*
8669 * Given an address of a buffer (mbuf/2KB/4KB/16KB), return
8670 * the corresponding audit structure for that buffer.
8671 */
8672 static mcache_audit_t *
8673 mcl_audit_buf2mca(mbuf_class_t class, mcache_obj_t *mobj)
8674 {
8675 mcache_audit_t *mca = NULL;
8676 int ix = MTOPG(mobj), m_idx = 0;
8677 unsigned char *page_addr;
8678
8679 VERIFY(ix < maxclaudit);
8680 VERIFY(IS_P2ALIGNED(mobj, MIN(m_maxsize(class), PAGE_SIZE)));
8681
8682 page_addr = PGTOM(ix);
8683
8684 switch (class) {
8685 case MC_MBUF:
8686 /*
8687 * For the mbuf case, find the index of the page
8688 * used by the mbuf and use that index to locate the
8689 * base address of the page. Then find out the
8690 * mbuf index relative to the page base and use
8691 * it to locate the audit structure.
8692 */
8693 m_idx = MBPAGEIDX(page_addr, mobj);
8694 VERIFY(m_idx < (int)NMBPG);
8695 mca = mclaudit[ix].cl_audit[m_idx];
8696 break;
8697
8698 case MC_CL:
8699 /*
8700 * Same thing as above, but for 2KB clusters in a page.
8701 */
8702 m_idx = CLPAGEIDX(page_addr, mobj);
8703 VERIFY(m_idx < (int)NCLPG);
8704 mca = mclaudit[ix].cl_audit[m_idx];
8705 break;
8706
8707 case MC_BIGCL:
8708 m_idx = BCLPAGEIDX(page_addr, mobj);
8709 VERIFY(m_idx < (int)NBCLPG);
8710 mca = mclaudit[ix].cl_audit[m_idx];
8711 break;
8712 case MC_16KCL:
8713 /*
8714 * Same as above, but only return the first element.
8715 */
8716 mca = mclaudit[ix].cl_audit[0];
8717 break;
8718
8719 default:
8720 VERIFY(0);
8721 /* NOTREACHED */
8722 }
8723
8724 return mca;
8725 }
8726
8727 static void
8728 mcl_audit_mbuf(mcache_audit_t *mca, void *addr, boolean_t composite,
8729 boolean_t alloc)
8730 {
8731 struct mbuf *m = addr;
8732 mcache_obj_t *next = ((mcache_obj_t *)m)->obj_next;
8733
8734 VERIFY(mca->mca_contents != NULL &&
8735 mca->mca_contents_size == AUDIT_CONTENTS_SIZE);
8736
8737 if (mclverify) {
8738 mcl_audit_verify_nextptr(next, mca);
8739 }
8740
8741 if (!alloc) {
8742 /* Save constructed mbuf fields */
8743 mcl_audit_save_mbuf(m, mca);
8744 if (mclverify) {
8745 mcache_set_pattern(MCACHE_FREE_PATTERN, m,
8746 m_maxsize(MC_MBUF));
8747 }
8748 ((mcache_obj_t *)m)->obj_next = next;
8749 return;
8750 }
8751
8752 /* Check if the buffer has been corrupted while in freelist */
8753 if (mclverify) {
8754 mcache_audit_free_verify_set(mca, addr, 0, m_maxsize(MC_MBUF));
8755 }
8756 /* Restore constructed mbuf fields */
8757 mcl_audit_restore_mbuf(m, mca, composite);
8758 }
8759
8760 static void
8761 mcl_audit_restore_mbuf(struct mbuf *m, mcache_audit_t *mca, boolean_t composite)
8762 {
8763 struct mbuf *ms = MCA_SAVED_MBUF_PTR(mca);
8764
8765 if (composite) {
8766 struct mbuf *next = m->m_next;
8767 VERIFY(ms->m_flags == M_EXT && m_get_rfa(ms) != NULL &&
8768 MBUF_IS_COMPOSITE(ms));
8769 VERIFY(mca->mca_contents_size == AUDIT_CONTENTS_SIZE);
8770 /*
8771 * We could have hand-picked the mbuf fields and restore
8772 * them individually, but that will be a maintenance
8773 * headache. Instead, restore everything that was saved;
8774 * the mbuf layer will recheck and reinitialize anyway.
8775 */
8776 bcopy(ms, m, MCA_SAVED_MBUF_SIZE);
8777 m->m_next = next;
8778 } else {
8779 /*
8780 * For a regular mbuf (no cluster attached) there's nothing
8781 * to restore other than the type field, which is expected
8782 * to be MT_FREE.
8783 */
8784 m->m_type = ms->m_type;
8785 }
8786 _MCHECK(m);
8787 }
8788
8789 static void
8790 mcl_audit_save_mbuf(struct mbuf *m, mcache_audit_t *mca)
8791 {
8792 VERIFY(mca->mca_contents_size == AUDIT_CONTENTS_SIZE);
8793 _MCHECK(m);
8794 bcopy(m, MCA_SAVED_MBUF_PTR(mca), MCA_SAVED_MBUF_SIZE);
8795 }
8796
8797 static void
8798 mcl_audit_cluster(mcache_audit_t *mca, void *addr, size_t size, boolean_t alloc,
8799 boolean_t save_next)
8800 {
8801 mcache_obj_t *next = ((mcache_obj_t *)addr)->obj_next;
8802
8803 if (!alloc) {
8804 if (mclverify) {
8805 mcache_set_pattern(MCACHE_FREE_PATTERN, addr, size);
8806 }
8807 if (save_next) {
8808 mcl_audit_verify_nextptr(next, mca);
8809 ((mcache_obj_t *)addr)->obj_next = next;
8810 }
8811 } else if (mclverify) {
8812 /* Check if the buffer has been corrupted while in freelist */
8813 mcl_audit_verify_nextptr(next, mca);
8814 mcache_audit_free_verify_set(mca, addr, 0, size);
8815 }
8816 }
8817
8818 static void
8819 mcl_audit_scratch(mcache_audit_t *mca)
8820 {
8821 void *stack[MCACHE_STACK_DEPTH + 1];
8822 mcl_scratch_audit_t *msa;
8823 struct timeval now;
8824
8825 VERIFY(mca->mca_contents != NULL);
8826 msa = MCA_SAVED_SCRATCH_PTR(mca);
8827
8828 msa->msa_pthread = msa->msa_thread;
8829 msa->msa_thread = current_thread();
8830 bcopy(msa->msa_stack, msa->msa_pstack, sizeof(msa->msa_pstack));
8831 msa->msa_pdepth = msa->msa_depth;
8832 bzero(stack, sizeof(stack));
8833 msa->msa_depth = OSBacktrace(stack, MCACHE_STACK_DEPTH + 1) - 1;
8834 bcopy(&stack[1], msa->msa_stack, sizeof(msa->msa_stack));
8835
8836 msa->msa_ptstamp = msa->msa_tstamp;
8837 microuptime(&now);
8838 /* tstamp is in ms relative to base_ts */
8839 msa->msa_tstamp = ((now.tv_usec - mb_start.tv_usec) / 1000);
8840 if ((now.tv_sec - mb_start.tv_sec) > 0) {
8841 msa->msa_tstamp += ((now.tv_sec - mb_start.tv_sec) * 1000);
8842 }
8843 }
8844
8845 __abortlike
8846 static void
8847 mcl_audit_mcheck_panic(struct mbuf *m)
8848 {
8849 char buf[DUMP_MCA_BUF_SIZE];
8850 mcache_audit_t *mca;
8851
8852 MRANGE(m);
8853 mca = mcl_audit_buf2mca(MC_MBUF, (mcache_obj_t *)m);
8854
8855 panic("mcl_audit: freed mbuf %p with type 0x%x (instead of 0x%x)\n%s",
8856 m, (u_int16_t)m->m_type, MT_FREE, mcache_dump_mca(buf, mca));
8857 /* NOTREACHED */
8858 }
8859
8860 __abortlike
8861 static void
8862 mcl_audit_verify_nextptr_panic(void *next, mcache_audit_t *mca)
8863 {
8864 char buf[DUMP_MCA_BUF_SIZE];
8865 panic("mcl_audit: buffer %p modified after free at offset 0: "
8866 "%p out of range [%p-%p)\n%s\n",
8867 mca->mca_addr, next, mbutl, embutl, mcache_dump_mca(buf, mca));
8868 /* NOTREACHED */
8869 }
8870
8871 static void
8872 mcl_audit_verify_nextptr(void *next, mcache_audit_t *mca)
8873 {
8874 if (next != NULL && !MBUF_IN_MAP(next) &&
8875 (next != (void *)MCACHE_FREE_PATTERN || !mclverify)) {
8876 mcl_audit_verify_nextptr_panic(next, mca);
8877 }
8878 }
8879
8880 static uintptr_t
8881 hash_mix(uintptr_t x)
8882 {
8883 #ifndef __LP64__
8884 x += ~(x << 15);
8885 x ^= (x >> 10);
8886 x += (x << 3);
8887 x ^= (x >> 6);
8888 x += ~(x << 11);
8889 x ^= (x >> 16);
8890 #else
8891 x += ~(x << 32);
8892 x ^= (x >> 22);
8893 x += ~(x << 13);
8894 x ^= (x >> 8);
8895 x += (x << 3);
8896 x ^= (x >> 15);
8897 x += ~(x << 27);
8898 x ^= (x >> 31);
8899 #endif
8900 return x;
8901 }
8902
8903 static uint32_t
8904 hashbacktrace(uintptr_t* bt, uint32_t depth, uint32_t max_size)
8905 {
8906 uintptr_t hash = 0;
8907 uintptr_t mask = max_size - 1;
8908
8909 while (depth) {
8910 hash += bt[--depth];
8911 }
8912
8913 hash = hash_mix(hash) & mask;
8914
8915 assert(hash < max_size);
8916
8917 return (uint32_t) hash;
8918 }
8919
8920 static uint32_t
8921 hashaddr(uintptr_t pt, uint32_t max_size)
8922 {
8923 uintptr_t hash = 0;
8924 uintptr_t mask = max_size - 1;
8925
8926 hash = hash_mix(pt) & mask;
8927
8928 assert(hash < max_size);
8929
8930 return (uint32_t) hash;
8931 }
8932
8933 /* This function turns on mbuf leak detection */
8934 static void
8935 mleak_activate(void)
8936 {
8937 mleak_table.mleak_sample_factor = MLEAK_SAMPLE_FACTOR;
8938 PE_parse_boot_argn("mleak_sample_factor",
8939 &mleak_table.mleak_sample_factor,
8940 sizeof(mleak_table.mleak_sample_factor));
8941
8942 if (mleak_table.mleak_sample_factor == 0) {
8943 mclfindleak = 0;
8944 }
8945
8946 if (mclfindleak == 0) {
8947 return;
8948 }
8949
8950 vm_size_t alloc_size =
8951 mleak_alloc_buckets * sizeof(struct mallocation);
8952 vm_size_t trace_size = mleak_trace_buckets * sizeof(struct mtrace);
8953
8954 mleak_allocations = zalloc_permanent(alloc_size, ZALIGN(struct mallocation));
8955 mleak_traces = zalloc_permanent(trace_size, ZALIGN(struct mtrace));
8956 mleak_stat = zalloc_permanent(MLEAK_STAT_SIZE(MLEAK_NUM_TRACES),
8957 ZALIGN(mleak_stat_t));
8958
8959 mleak_stat->ml_cnt = MLEAK_NUM_TRACES;
8960 #ifdef __LP64__
8961 mleak_stat->ml_isaddr64 = 1;
8962 #endif /* __LP64__ */
8963 }
8964
8965 static void
8966 mleak_logger(u_int32_t num, mcache_obj_t *addr, boolean_t alloc)
8967 {
8968 int temp;
8969
8970 if (mclfindleak == 0) {
8971 return;
8972 }
8973
8974 if (!alloc) {
8975 return mleak_free(addr);
8976 }
8977
8978 temp = os_atomic_inc_orig(&mleak_table.mleak_capture, relaxed);
8979
8980 if ((temp % mleak_table.mleak_sample_factor) == 0 && addr != NULL) {
8981 uintptr_t bt[MLEAK_STACK_DEPTH];
8982 unsigned int logged = backtrace(bt, MLEAK_STACK_DEPTH, NULL, NULL);
8983 mleak_log(bt, addr, logged, num);
8984 }
8985 }
8986
8987 /*
8988 * This function records the allocation in the mleak_allocations table
8989 * and the backtrace in the mleak_traces table; if allocation slot is in use,
8990 * replace old allocation with new one if the trace slot is in use, return
8991 * (or increment refcount if same trace).
8992 */
8993 static boolean_t
8994 mleak_log(uintptr_t *bt, mcache_obj_t *addr, uint32_t depth, int num)
8995 {
8996 struct mallocation *allocation;
8997 struct mtrace *trace;
8998 uint32_t trace_index;
8999
9000 /* Quit if someone else modifying the tables */
9001 if (!lck_mtx_try_lock_spin(mleak_lock)) {
9002 mleak_table.total_conflicts++;
9003 return FALSE;
9004 }
9005
9006 allocation = &mleak_allocations[hashaddr((uintptr_t)addr,
9007 mleak_alloc_buckets)];
9008 trace_index = hashbacktrace(bt, depth, mleak_trace_buckets);
9009 trace = &mleak_traces[trace_index];
9010
9011 VERIFY(allocation <= &mleak_allocations[mleak_alloc_buckets - 1]);
9012 VERIFY(trace <= &mleak_traces[mleak_trace_buckets - 1]);
9013
9014 allocation->hitcount++;
9015 trace->hitcount++;
9016
9017 /*
9018 * If the allocation bucket we want is occupied
9019 * and the occupier has the same trace, just bail.
9020 */
9021 if (allocation->element != NULL &&
9022 trace_index == allocation->trace_index) {
9023 mleak_table.alloc_collisions++;
9024 lck_mtx_unlock(mleak_lock);
9025 return TRUE;
9026 }
9027
9028 /*
9029 * Store the backtrace in the traces array;
9030 * Size of zero = trace bucket is free.
9031 */
9032 if (trace->allocs > 0 &&
9033 bcmp(trace->addr, bt, (depth * sizeof(uintptr_t))) != 0) {
9034 /* Different, unique trace, but the same hash! Bail out. */
9035 trace->collisions++;
9036 mleak_table.trace_collisions++;
9037 lck_mtx_unlock(mleak_lock);
9038 return TRUE;
9039 } else if (trace->allocs > 0) {
9040 /* Same trace, already added, so increment refcount */
9041 trace->allocs++;
9042 } else {
9043 /* Found an unused trace bucket, so record the trace here */
9044 if (trace->depth != 0) {
9045 /* this slot previously used but not currently in use */
9046 mleak_table.trace_overwrites++;
9047 }
9048 mleak_table.trace_recorded++;
9049 trace->allocs = 1;
9050 memcpy(trace->addr, bt, (depth * sizeof(uintptr_t)));
9051 trace->depth = depth;
9052 trace->collisions = 0;
9053 }
9054
9055 /* Step 2: Store the allocation record in the allocations array */
9056 if (allocation->element != NULL) {
9057 /*
9058 * Replace an existing allocation. No need to preserve
9059 * because only a subset of the allocations are being
9060 * recorded anyway.
9061 */
9062 mleak_table.alloc_collisions++;
9063 } else if (allocation->trace_index != 0) {
9064 mleak_table.alloc_overwrites++;
9065 }
9066 allocation->element = addr;
9067 allocation->trace_index = trace_index;
9068 allocation->count = num;
9069 mleak_table.alloc_recorded++;
9070 mleak_table.outstanding_allocs++;
9071
9072 lck_mtx_unlock(mleak_lock);
9073 return TRUE;
9074 }
9075
9076 static void
9077 mleak_free(mcache_obj_t *addr)
9078 {
9079 while (addr != NULL) {
9080 struct mallocation *allocation = &mleak_allocations
9081 [hashaddr((uintptr_t)addr, mleak_alloc_buckets)];
9082
9083 if (allocation->element == addr &&
9084 allocation->trace_index < mleak_trace_buckets) {
9085 lck_mtx_lock_spin(mleak_lock);
9086 if (allocation->element == addr &&
9087 allocation->trace_index < mleak_trace_buckets) {
9088 struct mtrace *trace;
9089 trace = &mleak_traces[allocation->trace_index];
9090 /* allocs = 0 means trace bucket is unused */
9091 if (trace->allocs > 0) {
9092 trace->allocs--;
9093 }
9094 if (trace->allocs == 0) {
9095 trace->depth = 0;
9096 }
9097 /* NULL element means alloc bucket is unused */
9098 allocation->element = NULL;
9099 mleak_table.outstanding_allocs--;
9100 }
9101 lck_mtx_unlock(mleak_lock);
9102 }
9103 addr = addr->obj_next;
9104 }
9105 }
9106
9107 static void
9108 mleak_sort_traces()
9109 {
9110 int i, j, k;
9111 struct mtrace *swap;
9112
9113 for (i = 0; i < MLEAK_NUM_TRACES; i++) {
9114 mleak_top_trace[i] = NULL;
9115 }
9116
9117 for (i = 0, j = 0; j < MLEAK_NUM_TRACES && i < mleak_trace_buckets; i++) {
9118 if (mleak_traces[i].allocs <= 0) {
9119 continue;
9120 }
9121
9122 mleak_top_trace[j] = &mleak_traces[i];
9123 for (k = j; k > 0; k--) {
9124 if (mleak_top_trace[k]->allocs <=
9125 mleak_top_trace[k - 1]->allocs) {
9126 break;
9127 }
9128
9129 swap = mleak_top_trace[k - 1];
9130 mleak_top_trace[k - 1] = mleak_top_trace[k];
9131 mleak_top_trace[k] = swap;
9132 }
9133 j++;
9134 }
9135
9136 j--;
9137 for (; i < mleak_trace_buckets; i++) {
9138 if (mleak_traces[i].allocs <= mleak_top_trace[j]->allocs) {
9139 continue;
9140 }
9141
9142 mleak_top_trace[j] = &mleak_traces[i];
9143
9144 for (k = j; k > 0; k--) {
9145 if (mleak_top_trace[k]->allocs <=
9146 mleak_top_trace[k - 1]->allocs) {
9147 break;
9148 }
9149
9150 swap = mleak_top_trace[k - 1];
9151 mleak_top_trace[k - 1] = mleak_top_trace[k];
9152 mleak_top_trace[k] = swap;
9153 }
9154 }
9155 }
9156
9157 static void
9158 mleak_update_stats()
9159 {
9160 mleak_trace_stat_t *mltr;
9161 int i;
9162
9163 VERIFY(mleak_stat != NULL);
9164 #ifdef __LP64__
9165 VERIFY(mleak_stat->ml_isaddr64);
9166 #else
9167 VERIFY(!mleak_stat->ml_isaddr64);
9168 #endif /* !__LP64__ */
9169 VERIFY(mleak_stat->ml_cnt == MLEAK_NUM_TRACES);
9170
9171 mleak_sort_traces();
9172
9173 mltr = &mleak_stat->ml_trace[0];
9174 bzero(mltr, sizeof(*mltr) * MLEAK_NUM_TRACES);
9175 for (i = 0; i < MLEAK_NUM_TRACES; i++) {
9176 int j;
9177
9178 if (mleak_top_trace[i] == NULL ||
9179 mleak_top_trace[i]->allocs == 0) {
9180 continue;
9181 }
9182
9183 mltr->mltr_collisions = mleak_top_trace[i]->collisions;
9184 mltr->mltr_hitcount = mleak_top_trace[i]->hitcount;
9185 mltr->mltr_allocs = mleak_top_trace[i]->allocs;
9186 mltr->mltr_depth = mleak_top_trace[i]->depth;
9187
9188 VERIFY(mltr->mltr_depth <= MLEAK_STACK_DEPTH);
9189 for (j = 0; j < mltr->mltr_depth; j++) {
9190 mltr->mltr_addr[j] = mleak_top_trace[i]->addr[j];
9191 }
9192
9193 mltr++;
9194 }
9195 }
9196
9197 static struct mbtypes {
9198 int mt_type;
9199 const char *mt_name;
9200 } mbtypes[] = {
9201 { MT_DATA, "data" },
9202 { MT_OOBDATA, "oob data" },
9203 { MT_CONTROL, "ancillary data" },
9204 { MT_HEADER, "packet headers" },
9205 { MT_SOCKET, "socket structures" },
9206 { MT_PCB, "protocol control blocks" },
9207 { MT_RTABLE, "routing table entries" },
9208 { MT_HTABLE, "IMP host table entries" },
9209 { MT_ATABLE, "address resolution tables" },
9210 { MT_FTABLE, "fragment reassembly queue headers" },
9211 { MT_SONAME, "socket names and addresses" },
9212 { MT_SOOPTS, "socket options" },
9213 { MT_RIGHTS, "access rights" },
9214 { MT_IFADDR, "interface addresses" },
9215 { MT_TAG, "packet tags" },
9216 { 0, NULL }
9217 };
9218
9219 #define MBUF_DUMP_BUF_CHK() { \
9220 clen -= k; \
9221 if (clen < 1) \
9222 goto done; \
9223 c += k; \
9224 }
9225
9226 static char *
9227 mbuf_dump(void)
9228 {
9229 unsigned long totmem = 0, totfree = 0, totmbufs, totused, totpct,
9230 totreturned = 0;
9231 u_int32_t m_mbufs = 0, m_clfree = 0, m_bigclfree = 0;
9232 u_int32_t m_mbufclfree = 0, m_mbufbigclfree = 0;
9233 u_int32_t m_16kclusters = 0, m_16kclfree = 0, m_mbuf16kclfree = 0;
9234 int nmbtypes = sizeof(mbstat.m_mtypes) / sizeof(short);
9235 uint8_t seen[256];
9236 struct mbtypes *mp;
9237 mb_class_stat_t *sp;
9238 mleak_trace_stat_t *mltr;
9239 char *c = mbuf_dump_buf;
9240 int i, j, k, clen = MBUF_DUMP_BUF_SIZE;
9241 struct mbuf_watchdog_defunct_args args = {};
9242
9243 mbuf_dump_buf[0] = '\0';
9244
9245 /* synchronize all statistics in the mbuf table */
9246 mbuf_stat_sync();
9247 mbuf_mtypes_sync(TRUE);
9248
9249 sp = &mb_stat->mbs_class[0];
9250 for (i = 0; i < mb_stat->mbs_cnt; i++, sp++) {
9251 u_int32_t mem;
9252
9253 if (m_class(i) == MC_MBUF) {
9254 m_mbufs = sp->mbcl_active;
9255 } else if (m_class(i) == MC_CL) {
9256 m_clfree = sp->mbcl_total - sp->mbcl_active;
9257 } else if (m_class(i) == MC_BIGCL) {
9258 m_bigclfree = sp->mbcl_total - sp->mbcl_active;
9259 } else if (njcl > 0 && m_class(i) == MC_16KCL) {
9260 m_16kclfree = sp->mbcl_total - sp->mbcl_active;
9261 m_16kclusters = sp->mbcl_total;
9262 } else if (m_class(i) == MC_MBUF_CL) {
9263 m_mbufclfree = sp->mbcl_total - sp->mbcl_active;
9264 } else if (m_class(i) == MC_MBUF_BIGCL) {
9265 m_mbufbigclfree = sp->mbcl_total - sp->mbcl_active;
9266 } else if (njcl > 0 && m_class(i) == MC_MBUF_16KCL) {
9267 m_mbuf16kclfree = sp->mbcl_total - sp->mbcl_active;
9268 }
9269
9270 mem = sp->mbcl_ctotal * sp->mbcl_size;
9271 totmem += mem;
9272 totfree += (sp->mbcl_mc_cached + sp->mbcl_infree) *
9273 sp->mbcl_size;
9274 totreturned += sp->mbcl_release_cnt;
9275 }
9276
9277 /* adjust free counts to include composite caches */
9278 m_clfree += m_mbufclfree;
9279 m_bigclfree += m_mbufbigclfree;
9280 m_16kclfree += m_mbuf16kclfree;
9281
9282 totmbufs = 0;
9283 for (mp = mbtypes; mp->mt_name != NULL; mp++) {
9284 totmbufs += mbstat.m_mtypes[mp->mt_type];
9285 }
9286 if (totmbufs > m_mbufs) {
9287 totmbufs = m_mbufs;
9288 }
9289 k = scnprintf(c, clen, "%lu/%u mbufs in use:\n", totmbufs, m_mbufs);
9290 MBUF_DUMP_BUF_CHK();
9291
9292 bzero(&seen, sizeof(seen));
9293 for (mp = mbtypes; mp->mt_name != NULL; mp++) {
9294 if (mbstat.m_mtypes[mp->mt_type] != 0) {
9295 seen[mp->mt_type] = 1;
9296 k = scnprintf(c, clen, "\t%u mbufs allocated to %s\n",
9297 mbstat.m_mtypes[mp->mt_type], mp->mt_name);
9298 MBUF_DUMP_BUF_CHK();
9299 }
9300 }
9301 seen[MT_FREE] = 1;
9302 for (i = 0; i < nmbtypes; i++) {
9303 if (!seen[i] && mbstat.m_mtypes[i] != 0) {
9304 k = scnprintf(c, clen, "\t%u mbufs allocated to "
9305 "<mbuf type %d>\n", mbstat.m_mtypes[i], i);
9306 MBUF_DUMP_BUF_CHK();
9307 }
9308 }
9309 if ((m_mbufs - totmbufs) > 0) {
9310 k = scnprintf(c, clen, "\t%lu mbufs allocated to caches\n",
9311 m_mbufs - totmbufs);
9312 MBUF_DUMP_BUF_CHK();
9313 }
9314 k = scnprintf(c, clen, "%u/%u mbuf 2KB clusters in use\n"
9315 "%u/%u mbuf 4KB clusters in use\n",
9316 (unsigned int)(mbstat.m_clusters - m_clfree),
9317 (unsigned int)mbstat.m_clusters,
9318 (unsigned int)(mbstat.m_bigclusters - m_bigclfree),
9319 (unsigned int)mbstat.m_bigclusters);
9320 MBUF_DUMP_BUF_CHK();
9321
9322 if (njcl > 0) {
9323 k = scnprintf(c, clen, "%u/%u mbuf %uKB clusters in use\n",
9324 m_16kclusters - m_16kclfree, m_16kclusters,
9325 njclbytes / 1024);
9326 MBUF_DUMP_BUF_CHK();
9327 }
9328 totused = totmem - totfree;
9329 if (totmem == 0) {
9330 totpct = 0;
9331 } else if (totused < (ULONG_MAX / 100)) {
9332 totpct = (totused * 100) / totmem;
9333 } else {
9334 u_long totmem1 = totmem / 100;
9335 u_long totused1 = totused / 100;
9336 totpct = (totused1 * 100) / totmem1;
9337 }
9338 k = scnprintf(c, clen, "%lu KB allocated to network (approx. %lu%% "
9339 "in use)\n", totmem / 1024, totpct);
9340 MBUF_DUMP_BUF_CHK();
9341 k = scnprintf(c, clen, "%lu KB returned to the system\n",
9342 totreturned / 1024);
9343 MBUF_DUMP_BUF_CHK();
9344
9345 net_update_uptime();
9346
9347 k = scnprintf(c, clen,
9348 "worker thread runs: %u, expansions: %llu, cl %llu/%llu, "
9349 "bigcl %llu/%llu, 16k %llu/%llu\n", mbuf_worker_run_cnt,
9350 mb_expand_cnt, mb_expand_cl_cnt, mb_expand_cl_total,
9351 mb_expand_bigcl_cnt, mb_expand_bigcl_total, mb_expand_16kcl_cnt,
9352 mb_expand_16kcl_total);
9353 MBUF_DUMP_BUF_CHK();
9354 if (mbuf_worker_last_runtime != 0) {
9355 k = scnprintf(c, clen, "worker thread last run time: "
9356 "%llu (%llu seconds ago)\n",
9357 mbuf_worker_last_runtime,
9358 net_uptime() - mbuf_worker_last_runtime);
9359 MBUF_DUMP_BUF_CHK();
9360 }
9361 if (mbuf_drain_last_runtime != 0) {
9362 k = scnprintf(c, clen, "drain routine last run time: "
9363 "%llu (%llu seconds ago)\n",
9364 mbuf_drain_last_runtime,
9365 net_uptime() - mbuf_drain_last_runtime);
9366 MBUF_DUMP_BUF_CHK();
9367 }
9368
9369 /*
9370 * Log where the most mbufs have accumulated:
9371 * - Process socket buffers
9372 * - TCP reassembly queue
9373 * - Interface AQM queue (output) and DLIL input queue
9374 */
9375 args.non_blocking = true;
9376 proc_iterate(PROC_ALLPROCLIST,
9377 mbuf_watchdog_defunct_iterate, &args, NULL, NULL);
9378 if (args.top_app != NULL) {
9379 k = scnprintf(c, clen, "\ntop proc mbuf space %u bytes by %s:%d\n",
9380 args.top_app_space_used,
9381 proc_name_address(args.top_app),
9382 proc_pid(args.top_app));
9383 proc_rele(args.top_app);
9384 }
9385 MBUF_DUMP_BUF_CHK();
9386
9387 #if INET
9388 k = dump_tcp_reass_qlen(c, clen);
9389 MBUF_DUMP_BUF_CHK();
9390 #endif /* INET */
9391
9392 #if MPTCP
9393 k = dump_mptcp_reass_qlen(c, clen);
9394 MBUF_DUMP_BUF_CHK();
9395 #endif /* MPTCP */
9396
9397 #if NETWORKING
9398 k = dlil_dump_top_if_qlen(c, clen);
9399 MBUF_DUMP_BUF_CHK();
9400 #endif /* NETWORKING */
9401
9402 /* mbuf leak detection statistics */
9403 mleak_update_stats();
9404
9405 k = scnprintf(c, clen, "\nmbuf leak detection table:\n");
9406 MBUF_DUMP_BUF_CHK();
9407 k = scnprintf(c, clen, "\ttotal captured: %u (one per %u)\n",
9408 mleak_table.mleak_capture / mleak_table.mleak_sample_factor,
9409 mleak_table.mleak_sample_factor);
9410 MBUF_DUMP_BUF_CHK();
9411 k = scnprintf(c, clen, "\ttotal allocs outstanding: %llu\n",
9412 mleak_table.outstanding_allocs);
9413 MBUF_DUMP_BUF_CHK();
9414 k = scnprintf(c, clen, "\tnew hash recorded: %llu allocs, %llu traces\n",
9415 mleak_table.alloc_recorded, mleak_table.trace_recorded);
9416 MBUF_DUMP_BUF_CHK();
9417 k = scnprintf(c, clen, "\thash collisions: %llu allocs, %llu traces\n",
9418 mleak_table.alloc_collisions, mleak_table.trace_collisions);
9419 MBUF_DUMP_BUF_CHK();
9420 k = scnprintf(c, clen, "\toverwrites: %llu allocs, %llu traces\n",
9421 mleak_table.alloc_overwrites, mleak_table.trace_overwrites);
9422 MBUF_DUMP_BUF_CHK();
9423 k = scnprintf(c, clen, "\tlock conflicts: %llu\n\n",
9424 mleak_table.total_conflicts);
9425 MBUF_DUMP_BUF_CHK();
9426
9427 k = scnprintf(c, clen, "top %d outstanding traces:\n",
9428 mleak_stat->ml_cnt);
9429 MBUF_DUMP_BUF_CHK();
9430 for (i = 0; i < mleak_stat->ml_cnt; i++) {
9431 mltr = &mleak_stat->ml_trace[i];
9432 k = scnprintf(c, clen, "[%d] %llu outstanding alloc(s), "
9433 "%llu hit(s), %llu collision(s)\n", (i + 1),
9434 mltr->mltr_allocs, mltr->mltr_hitcount,
9435 mltr->mltr_collisions);
9436 MBUF_DUMP_BUF_CHK();
9437 }
9438
9439 if (mleak_stat->ml_isaddr64) {
9440 k = scnprintf(c, clen, MB_LEAK_HDR_64);
9441 } else {
9442 k = scnprintf(c, clen, MB_LEAK_HDR_32);
9443 }
9444 MBUF_DUMP_BUF_CHK();
9445
9446 for (i = 0; i < MLEAK_STACK_DEPTH; i++) {
9447 k = scnprintf(c, clen, "%2d: ", (i + 1));
9448 MBUF_DUMP_BUF_CHK();
9449 for (j = 0; j < mleak_stat->ml_cnt; j++) {
9450 mltr = &mleak_stat->ml_trace[j];
9451 if (i < mltr->mltr_depth) {
9452 if (mleak_stat->ml_isaddr64) {
9453 k = scnprintf(c, clen, "0x%0llx ",
9454 (uint64_t)VM_KERNEL_UNSLIDE(
9455 mltr->mltr_addr[i]));
9456 } else {
9457 k = scnprintf(c, clen,
9458 "0x%08x ",
9459 (uint32_t)VM_KERNEL_UNSLIDE(
9460 mltr->mltr_addr[i]));
9461 }
9462 } else {
9463 if (mleak_stat->ml_isaddr64) {
9464 k = scnprintf(c, clen,
9465 MB_LEAK_SPACING_64);
9466 } else {
9467 k = scnprintf(c, clen,
9468 MB_LEAK_SPACING_32);
9469 }
9470 }
9471 MBUF_DUMP_BUF_CHK();
9472 }
9473 k = scnprintf(c, clen, "\n");
9474 MBUF_DUMP_BUF_CHK();
9475 }
9476
9477 done:
9478 return mbuf_dump_buf;
9479 }
9480
9481 #undef MBUF_DUMP_BUF_CHK
9482 #endif /* CONFIG_MBUF_MCACHE */
9483
9484 /*
9485 * Convert between a regular and a packet header mbuf. Caller is responsible
9486 * for setting or clearing M_PKTHDR; this routine does the rest of the work.
9487 */
9488 int
9489 m_reinit(struct mbuf *m, int hdr)
9490 {
9491 int ret = 0;
9492
9493 if (hdr) {
9494 VERIFY(!(m->m_flags & M_PKTHDR));
9495 if (!(m->m_flags & M_EXT) &&
9496 (m->m_data != (uintptr_t)m->m_dat || m->m_len > 0)) {
9497 /*
9498 * If there's no external cluster attached and the
9499 * mbuf appears to contain user data, we cannot
9500 * safely convert this to a packet header mbuf,
9501 * as the packet header structure might overlap
9502 * with the data.
9503 */
9504 printf("%s: cannot set M_PKTHDR on altered mbuf %llx, "
9505 "m_data %llx (expected %llx), "
9506 "m_len %d (expected 0)\n",
9507 __func__,
9508 (uint64_t)VM_KERNEL_ADDRPERM((uintptr_t)m),
9509 (uint64_t)VM_KERNEL_ADDRPERM((uintptr_t)m->m_data),
9510 (uint64_t)VM_KERNEL_ADDRPERM((uintptr_t)(m->m_dat)), m->m_len);
9511 ret = EBUSY;
9512 } else {
9513 VERIFY((m->m_flags & M_EXT) || m->m_data == (uintptr_t)m->m_dat);
9514 m->m_flags |= M_PKTHDR;
9515 MBUF_INIT_PKTHDR(m);
9516 }
9517 } else {
9518 /* Free the aux data and tags if there is any */
9519 m_tag_delete_chain(m);
9520 m_do_tx_compl_callback(m, NULL);
9521 m->m_flags &= ~M_PKTHDR;
9522 }
9523
9524 return ret;
9525 }
9526
9527 int
9528 m_ext_set_prop(struct mbuf *m, uint32_t o, uint32_t n)
9529 {
9530 ASSERT(m->m_flags & M_EXT);
9531 return os_atomic_cmpxchg(&MEXT_PRIV(m), o, n, acq_rel);
9532 }
9533
9534 uint32_t
9535 m_ext_get_prop(struct mbuf *m)
9536 {
9537 ASSERT(m->m_flags & M_EXT);
9538 return MEXT_PRIV(m);
9539 }
9540
9541 int
9542 m_ext_paired_is_active(struct mbuf *m)
9543 {
9544 return MBUF_IS_PAIRED(m) ? (MEXT_PREF(m) > MEXT_MINREF(m)) : 1;
9545 }
9546
9547 void
9548 m_ext_paired_activate(struct mbuf *m)
9549 {
9550 struct ext_ref *rfa;
9551 int hdr, type;
9552 caddr_t extbuf;
9553 m_ext_free_func_t extfree;
9554 u_int extsize;
9555
9556 VERIFY(MBUF_IS_PAIRED(m));
9557 VERIFY(MEXT_REF(m) == MEXT_MINREF(m));
9558 VERIFY(MEXT_PREF(m) == MEXT_MINREF(m));
9559
9560 hdr = (m->m_flags & M_PKTHDR);
9561 type = m->m_type;
9562 extbuf = m->m_ext.ext_buf;
9563 extfree = m_get_ext_free(m);
9564 extsize = m->m_ext.ext_size;
9565 rfa = m_get_rfa(m);
9566
9567 VERIFY(extbuf != NULL && rfa != NULL);
9568
9569 /*
9570 * Safe to reinitialize packet header tags, since it's
9571 * already taken care of at m_free() time. Similar to
9572 * what's done in m_clattach() for the cluster. Bump
9573 * up MEXT_PREF to indicate activation.
9574 */
9575 MBUF_INIT(m, hdr, type);
9576 MEXT_INIT(m, extbuf, extsize, extfree, (caddr_t)m, rfa,
9577 1, 1, 2, EXTF_PAIRED, MEXT_PRIV(m), m);
9578 }
9579
9580 void
9581 m_scratch_init(struct mbuf *m)
9582 {
9583 struct pkthdr *pkt = &m->m_pkthdr;
9584
9585 VERIFY(m->m_flags & M_PKTHDR);
9586
9587 /* See comments in <rdar://problem/14040693> */
9588 if (pkt->pkt_flags & PKTF_PRIV_GUARDED) {
9589 panic_plain("Invalid attempt to modify guarded module-private "
9590 "area: mbuf %p, pkt_flags 0x%x\n", m, pkt->pkt_flags);
9591 /* NOTREACHED */
9592 }
9593
9594 bzero(&pkt->pkt_mpriv, sizeof(pkt->pkt_mpriv));
9595 }
9596
9597 /*
9598 * This routine is reserved for mbuf_get_driver_scratch(); clients inside
9599 * xnu that intend on utilizing the module-private area should directly
9600 * refer to the pkt_mpriv structure in the pkthdr. They are also expected
9601 * to set and clear PKTF_PRIV_GUARDED, while owning the packet and prior
9602 * to handing it off to another module, respectively.
9603 */
9604 u_int32_t
9605 m_scratch_get(struct mbuf *m, u_int8_t **p)
9606 {
9607 struct pkthdr *pkt = &m->m_pkthdr;
9608
9609 VERIFY(m->m_flags & M_PKTHDR);
9610
9611 /* See comments in <rdar://problem/14040693> */
9612 if (pkt->pkt_flags & PKTF_PRIV_GUARDED) {
9613 panic_plain("Invalid attempt to access guarded module-private "
9614 "area: mbuf %p, pkt_flags 0x%x\n", m, pkt->pkt_flags);
9615 /* NOTREACHED */
9616 }
9617
9618 #if CONFIG_MBUF_MCACHE
9619 if (mcltrace) {
9620 mcache_audit_t *mca;
9621
9622 lck_mtx_lock(mbuf_mlock);
9623 mca = mcl_audit_buf2mca(MC_MBUF, (mcache_obj_t *)m);
9624 if (mca->mca_uflags & MB_SCVALID) {
9625 mcl_audit_scratch(mca);
9626 }
9627 lck_mtx_unlock(mbuf_mlock);
9628 }
9629 #endif /* CONFIG_MBUF_MCACHE */
9630
9631 *p = (u_int8_t *)&pkt->pkt_mpriv;
9632 return sizeof(pkt->pkt_mpriv);
9633 }
9634
9635 void
9636 m_add_crumb(struct mbuf *m, uint16_t crumb)
9637 {
9638 VERIFY(m->m_flags & M_PKTHDR);
9639
9640 m->m_pkthdr.pkt_crumbs |= crumb;
9641 }
9642
9643 void
9644 m_add_hdr_crumb(struct mbuf *m, uint64_t crumb, uint64_t flag)
9645 {
9646 #if defined(__arm64__)
9647 while (m != NULL) {
9648 m->m_mhdrcommon_crumbs &= ~flag;
9649 m->m_mhdrcommon_crumbs |= (crumb & flag);
9650 m = m->m_next;
9651 }
9652 #else
9653 #pragma unused(m, crumb, flag)
9654 #endif /*__arm64__*/
9655 }
9656
9657 void
9658 m_add_hdr_crumb_chain(struct mbuf *head, uint64_t crumb, uint64_t flag)
9659 {
9660 #if defined(__arm64__)
9661 while (head) {
9662 /* This assumes that we might have a chain of mbuf chains */
9663 m_add_hdr_crumb(head, crumb, flag);
9664 head = head->m_nextpkt;
9665 }
9666 #else
9667 #pragma unused(head, crumb, flag)
9668 #endif /*__arm64__*/
9669 }
9670
9671 __private_extern__ inline void
9672 m_set_ext(struct mbuf *m, struct ext_ref *rfa, m_ext_free_func_t ext_free,
9673 caddr_t ext_arg)
9674 {
9675 VERIFY(m->m_flags & M_EXT);
9676
9677 m_set_rfa(m, rfa);
9678 m->m_ext.ext_free = ext_free;
9679 m->m_ext.ext_arg = ext_free == NULL ? NULL : ext_arg;
9680 }
9681
9682 __private_extern__ inline struct ext_ref * __stateful_pure
9683 m_get_rfa(struct mbuf *m)
9684 {
9685 return __unsafe_forge_single(struct ext_ref *, m->m_ext.ext_refflags);
9686 }
9687
9688 static inline void
9689 m_set_rfa(struct mbuf *m, struct ext_ref *rfa)
9690 {
9691 m->m_ext.ext_refflags = rfa;
9692 }
9693
9694 __private_extern__ inline m_ext_free_func_t __stateful_pure
9695 m_get_ext_free(struct mbuf *m)
9696 {
9697 return m->m_ext.ext_free;
9698 }
9699
9700 __private_extern__ inline caddr_t
9701 m_get_ext_arg(struct mbuf *m)
9702 {
9703 return __unsafe_forge_single(caddr_t, m->m_ext.ext_arg);
9704 }
9705
9706 #if CONFIG_MBUF_MCACHE
9707 /*
9708 * Simple routine to avoid taking the lock when we can't run the
9709 * mbuf drain.
9710 */
9711 static int
9712 mbuf_drain_checks(boolean_t ignore_waiters)
9713 {
9714 if (mb_drain_maxint == 0) {
9715 return 0;
9716 }
9717 if (!ignore_waiters && mb_waiters != 0) {
9718 return 0;
9719 }
9720
9721 return 1;
9722 }
9723
9724 /*
9725 * Called by the VM when there's memory pressure or when we exhausted
9726 * the 4k/16k reserved space.
9727 */
9728 static void
9729 mbuf_drain_locked(boolean_t ignore_waiters)
9730 {
9731 mbuf_class_t mc;
9732 mcl_slab_t *sp, *sp_tmp, *nsp;
9733 unsigned int num, k, interval, released = 0;
9734 unsigned long total_mem = 0, use_mem = 0;
9735 boolean_t ret, purge_caches = FALSE;
9736 ppnum_t offset;
9737 mcache_obj_t *obj;
9738 unsigned long per;
9739 static unsigned char scratch[32];
9740 static ppnum_t scratch_pa = 0;
9741
9742 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
9743 if (!mbuf_drain_checks(ignore_waiters)) {
9744 return;
9745 }
9746 if (scratch_pa == 0) {
9747 bzero(scratch, sizeof(scratch));
9748 scratch_pa = pmap_find_phys(kernel_pmap, (addr64_t)scratch);
9749 VERIFY(scratch_pa);
9750 } else if (mclverify) {
9751 /*
9752 * Panic if a driver wrote to our scratch memory.
9753 */
9754 for (k = 0; k < sizeof(scratch); k++) {
9755 if (scratch[k]) {
9756 panic("suspect DMA to freed address");
9757 }
9758 }
9759 }
9760 /*
9761 * Don't free memory too often as that could cause excessive
9762 * waiting times for mbufs. Purge caches if we were asked to drain
9763 * in the last 5 minutes.
9764 */
9765 if (mbuf_drain_last_runtime != 0) {
9766 interval = net_uptime() - mbuf_drain_last_runtime;
9767 if (interval <= mb_drain_maxint) {
9768 return;
9769 }
9770 if (interval <= mb_drain_maxint * 5) {
9771 purge_caches = TRUE;
9772 }
9773 }
9774 mbuf_drain_last_runtime = net_uptime();
9775 /*
9776 * Don't free any memory if we're using 60% or more.
9777 */
9778 for (mc = 0; mc < NELEM(mbuf_table); mc++) {
9779 total_mem += m_total(mc) * m_maxsize(mc);
9780 use_mem += m_active(mc) * m_maxsize(mc);
9781 }
9782 per = (use_mem * 100) / total_mem;
9783 if (per >= 60) {
9784 return;
9785 }
9786 /*
9787 * Purge all the caches. This effectively disables
9788 * caching for a few seconds, but the mbuf worker thread will
9789 * re-enable them again.
9790 */
9791 if (purge_caches == TRUE) {
9792 for (mc = 0; mc < NELEM(mbuf_table); mc++) {
9793 if (m_total(mc) < m_avgtotal(mc)) {
9794 continue;
9795 }
9796 lck_mtx_unlock(mbuf_mlock);
9797 ret = mcache_purge_cache(m_cache(mc), FALSE);
9798 lck_mtx_lock(mbuf_mlock);
9799 if (ret == TRUE) {
9800 m_purge_cnt(mc)++;
9801 }
9802 }
9803 }
9804 /*
9805 * Move the objects from the composite class freelist to
9806 * the rudimentary slabs list, but keep at least 10% of the average
9807 * total in the freelist.
9808 */
9809 for (mc = 0; mc < NELEM(mbuf_table); mc++) {
9810 while (m_cobjlist(mc) &&
9811 m_total(mc) < m_avgtotal(mc) &&
9812 m_infree(mc) > 0.1 * m_avgtotal(mc) + m_minlimit(mc)) {
9813 obj = m_cobjlist(mc);
9814 m_cobjlist(mc) = obj->obj_next;
9815 obj->obj_next = NULL;
9816 num = cslab_free(mc, obj, 1);
9817 VERIFY(num == 1);
9818 m_free_cnt(mc)++;
9819 m_infree(mc)--;
9820 /* cslab_free() handles m_total */
9821 }
9822 }
9823 /*
9824 * Free the buffers present in the slab list up to 10% of the total
9825 * average per class.
9826 *
9827 * We walk the list backwards in an attempt to reduce fragmentation.
9828 */
9829 for (mc = NELEM(mbuf_table) - 1; (int)mc >= 0; mc--) {
9830 TAILQ_FOREACH_SAFE(sp, &m_slablist(mc), sl_link, sp_tmp) {
9831 /*
9832 * Process only unused slabs occupying memory.
9833 */
9834 if (sp->sl_refcnt != 0 || sp->sl_len == 0 ||
9835 sp->sl_base == NULL) {
9836 continue;
9837 }
9838 if (m_total(mc) < m_avgtotal(mc) ||
9839 m_infree(mc) < 0.1 * m_avgtotal(mc) + m_minlimit(mc)) {
9840 break;
9841 }
9842 slab_remove(sp, mc);
9843 switch (mc) {
9844 case MC_MBUF:
9845 m_infree(mc) -= NMBPG;
9846 m_total(mc) -= NMBPG;
9847 if (mclaudit != NULL) {
9848 mcl_audit_free(sp->sl_base, NMBPG);
9849 }
9850 break;
9851 case MC_CL:
9852 m_infree(mc) -= NCLPG;
9853 m_total(mc) -= NCLPG;
9854 if (mclaudit != NULL) {
9855 mcl_audit_free(sp->sl_base, NMBPG);
9856 }
9857 break;
9858 case MC_BIGCL:
9859 {
9860 m_infree(mc) -= NBCLPG;
9861 m_total(mc) -= NBCLPG;
9862 if (mclaudit != NULL) {
9863 mcl_audit_free(sp->sl_base, NMBPG);
9864 }
9865 break;
9866 }
9867 case MC_16KCL:
9868 m_infree(mc)--;
9869 m_total(mc)--;
9870 for (nsp = sp, k = 1; k < NSLABSP16KB; k++) {
9871 nsp = nsp->sl_next;
9872 VERIFY(nsp->sl_refcnt == 0 &&
9873 nsp->sl_base != NULL &&
9874 nsp->sl_len == 0);
9875 slab_init(nsp, 0, 0, NULL, NULL, 0, 0,
9876 0);
9877 nsp->sl_flags = 0;
9878 }
9879 if (mclaudit != NULL) {
9880 if (sp->sl_len == PAGE_SIZE) {
9881 mcl_audit_free(sp->sl_base,
9882 NMBPG);
9883 } else {
9884 mcl_audit_free(sp->sl_base, 1);
9885 }
9886 }
9887 break;
9888 default:
9889 /*
9890 * The composite classes have their own
9891 * freelist (m_cobjlist), so we only
9892 * process rudimentary classes here.
9893 */
9894 VERIFY(0);
9895 }
9896 m_release_cnt(mc) += m_size(mc);
9897 released += m_size(mc);
9898 VERIFY(sp->sl_base != NULL &&
9899 sp->sl_len >= PAGE_SIZE);
9900 offset = MTOPG(sp->sl_base);
9901 /*
9902 * Make sure the IOMapper points to a valid, but
9903 * bogus, address. This should prevent further DMA
9904 * accesses to freed memory.
9905 */
9906 IOMapperInsertPage(mcl_paddr_base, offset, scratch_pa);
9907 mcl_paddr[offset] = 0;
9908 kmem_free(mb_map, (vm_offset_t)sp->sl_base,
9909 sp->sl_len);
9910 slab_init(sp, 0, 0, NULL, NULL, 0, 0, 0);
9911 sp->sl_flags = 0;
9912 }
9913 }
9914 mbstat.m_drain++;
9915 mbstat.m_bigclusters = m_total(MC_BIGCL);
9916 mbstat.m_clusters = m_total(MC_CL);
9917 mbstat.m_mbufs = m_total(MC_MBUF);
9918 mbuf_stat_sync();
9919 mbuf_mtypes_sync(TRUE);
9920 }
9921
9922 __private_extern__ void
9923 mbuf_drain(boolean_t ignore_waiters)
9924 {
9925 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_NOTOWNED);
9926 if (!mbuf_drain_checks(ignore_waiters)) {
9927 return;
9928 }
9929 lck_mtx_lock(mbuf_mlock);
9930 mbuf_drain_locked(ignore_waiters);
9931 lck_mtx_unlock(mbuf_mlock);
9932 }
9933
9934
9935 static int
9936 m_drain_force_sysctl SYSCTL_HANDLER_ARGS
9937 {
9938 #pragma unused(arg1, arg2)
9939 int val = 0, err;
9940
9941 err = sysctl_handle_int(oidp, &val, 0, req);
9942 if (err != 0 || req->newptr == USER_ADDR_NULL) {
9943 return err;
9944 }
9945 if (val) {
9946 mbuf_drain(TRUE);
9947 }
9948
9949 return err;
9950 }
9951
9952 #if DEBUG || DEVELOPMENT
9953 __printflike(3, 4)
9954 static void
9955 _mbwdog_logger(const char *func, const int line, const char *fmt, ...)
9956 {
9957 va_list ap;
9958 struct timeval now;
9959 char str[384], p[256];
9960 int len;
9961
9962 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
9963 if (mbwdog_logging == NULL) {
9964 /*
9965 * This might block under a mutex, which isn't really great,
9966 * but this happens once, so we'll live.
9967 */
9968 mbwdog_logging = zalloc_permanent(mbwdog_logging_size,
9969 ZALIGN_NONE);
9970 }
9971 va_start(ap, fmt);
9972 vsnprintf(p, sizeof(p), fmt, ap);
9973 va_end(ap);
9974 microuptime(&now);
9975 len = scnprintf(str, sizeof(str),
9976 "\n%ld.%d (%d/%llx) %s:%d %s",
9977 now.tv_sec, now.tv_usec,
9978 proc_getpid(current_proc()),
9979 (uint64_t)VM_KERNEL_ADDRPERM(current_thread()),
9980 func, line, p);
9981 if (len < 0) {
9982 return;
9983 }
9984 if (mbwdog_logging_used + len > mbwdog_logging_size) {
9985 mbwdog_logging_used = mbwdog_logging_used / 2;
9986 memmove(mbwdog_logging, mbwdog_logging + mbwdog_logging_used,
9987 mbwdog_logging_size - mbwdog_logging_used);
9988 mbwdog_logging[mbwdog_logging_used] = 0;
9989 }
9990 strlcat(mbwdog_logging, str, mbwdog_logging_size);
9991 mbwdog_logging_used += len;
9992 }
9993
9994 #endif // DEBUG || DEVELOPMENT
9995
9996 static void
9997 mtracelarge_register(size_t size)
9998 {
9999 int i;
10000 struct mtracelarge *trace;
10001 uintptr_t bt[MLEAK_STACK_DEPTH];
10002 unsigned int depth;
10003
10004 depth = backtrace(bt, MLEAK_STACK_DEPTH, NULL, NULL);
10005 /* Check if this entry is already on the list. */
10006 for (i = 0; i < MTRACELARGE_NUM_TRACES; i++) {
10007 trace = &mtracelarge_table[i];
10008 if (trace->size == size && trace->depth == depth &&
10009 memcmp(bt, trace->addr, depth * sizeof(uintptr_t)) == 0) {
10010 return;
10011 }
10012 }
10013 for (i = 0; i < MTRACELARGE_NUM_TRACES; i++) {
10014 trace = &mtracelarge_table[i];
10015 if (size > trace->size) {
10016 trace->depth = depth;
10017 memcpy(trace->addr, bt, depth * sizeof(uintptr_t));
10018 trace->size = size;
10019 break;
10020 }
10021 }
10022 }
10023
10024 #if DEBUG || DEVELOPMENT
10025
10026 static int
10027 mbuf_wd_dump_sysctl SYSCTL_HANDLER_ARGS
10028 {
10029 char *str;
10030
10031 ifnet_head_lock_shared();
10032 lck_mtx_lock(mbuf_mlock);
10033
10034 str = mbuf_dump();
10035
10036 lck_mtx_unlock(mbuf_mlock);
10037 ifnet_head_done();
10038
10039 return sysctl_io_string(req, str, 0, 0, NULL);
10040 }
10041
10042 #endif /* DEBUG || DEVELOPMENT */
10043 #endif /* CONFIG_MBUF_MCACHE */
10044
10045 SYSCTL_DECL(_kern_ipc);
10046 #if DEBUG || DEVELOPMENT
10047 #if SKYWALK && CONFIG_MBUF_MCACHE
10048 SYSCTL_UINT(_kern_ipc, OID_AUTO, mc_threshold_scale_factor,
10049 CTLFLAG_RW | CTLFLAG_LOCKED, &mc_threshold_scale_down_factor,
10050 MC_THRESHOLD_SCALE_DOWN_FACTOR,
10051 "scale down factor for mbuf cache thresholds");
10052 #endif /* SKYWALK && CONFIG_MBUF_MCACHE */
10053 #if CONFIG_MBUF_MCACHE
10054 SYSCTL_PROC(_kern_ipc, OID_AUTO, mb_wd_dump,
10055 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED,
10056 0, 0, mbuf_wd_dump_sysctl, "A", "mbuf watchdog dump");
10057 #endif /* CONFIG_MBUF_MCACHE */
10058 #endif /* DEBUG || DEVELOPMENT */
10059 SYSCTL_PROC(_kern_ipc, KIPC_MBSTAT, mbstat,
10060 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
10061 0, 0, mbstat_sysctl, "S,mbstat", "");
10062 SYSCTL_PROC(_kern_ipc, OID_AUTO, mb_stat,
10063 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
10064 0, 0, mb_stat_sysctl, "S,mb_stat", "");
10065 #if CONFIG_MBUF_MCACHE
10066 SYSCTL_PROC(_kern_ipc, OID_AUTO, mleak_top_trace,
10067 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
10068 0, 0, mleak_top_trace_sysctl, "S,mb_top_trace", "");
10069 SYSCTL_PROC(_kern_ipc, OID_AUTO, mleak_table,
10070 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
10071 0, 0, mleak_table_sysctl, "S,mleak_table", "");
10072 SYSCTL_INT(_kern_ipc, OID_AUTO, mleak_sample_factor,
10073 CTLFLAG_RW | CTLFLAG_LOCKED, &mleak_table.mleak_sample_factor, 0, "");
10074 SYSCTL_INT(_kern_ipc, OID_AUTO, mb_normalized,
10075 CTLFLAG_RD | CTLFLAG_LOCKED, &mb_normalized, 0, "");
10076 SYSCTL_INT(_kern_ipc, OID_AUTO, mb_watchdog,
10077 CTLFLAG_RW | CTLFLAG_LOCKED, &mb_watchdog, 0, "");
10078 SYSCTL_PROC(_kern_ipc, OID_AUTO, mb_drain_force,
10079 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, NULL, 0,
10080 m_drain_force_sysctl, "I",
10081 "Forces the mbuf garbage collection to run");
10082 SYSCTL_INT(_kern_ipc, OID_AUTO, mb_drain_maxint,
10083 CTLFLAG_RW | CTLFLAG_LOCKED, &mb_drain_maxint, 0,
10084 "Minimum time interval between garbage collection");
10085 #endif /* CONFIG_MBUF_MCACHE */
10086 SYSCTL_INT(_kern_ipc, OID_AUTO, mb_memory_pressure_percentage,
10087 CTLFLAG_RW | CTLFLAG_LOCKED, &mb_memory_pressure_percentage, 0,
10088 "Percentage of when we trigger memory-pressure for an mbuf-class");
10089 #if CONFIG_MBUF_MCACHE
10090 static int mb_uses_mcache = 1;
10091 #else
10092 static int mb_uses_mcache = 0;
10093 #endif /* CONFIG_MBUF_MCACHE */
10094 SYSCTL_INT(_kern_ipc, OID_AUTO, mb_uses_mcache,
10095 CTLFLAG_LOCKED, &mb_uses_mcache, 0,
10096 "Whether mbufs use mcache");
10097