1 /*
2 * Copyright (c) 1998-2022 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1988, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
62 */
63 /*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
69
70 #include <ptrauth.h>
71
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/malloc.h>
75 #include <sys/mbuf.h>
76 #include <sys/kernel.h>
77 #include <sys/sysctl.h>
78 #include <sys/syslog.h>
79 #include <sys/protosw.h>
80 #include <sys/domain.h>
81 #include <sys/queue.h>
82 #include <sys/proc.h>
83 #include <sys/filedesc.h>
84 #include <sys/file_internal.h>
85
86 #include <dev/random/randomdev.h>
87
88 #include <kern/kern_types.h>
89 #include <kern/simple_lock.h>
90 #include <kern/queue.h>
91 #include <kern/sched_prim.h>
92 #include <kern/backtrace.h>
93 #include <kern/percpu.h>
94 #include <kern/zalloc.h>
95
96 #include <libkern/OSDebug.h>
97 #include <libkern/libkern.h>
98
99 #include <os/log.h>
100 #include <os/ptrtools.h>
101
102 #include <IOKit/IOMapper.h>
103
104 #include <machine/limits.h>
105 #include <machine/machine_routines.h>
106
107 #if CONFIG_MBUF_MCACHE
108 #include <sys/mcache.h>
109 #endif /* CONFIG_MBUF_MCACHE */
110 #include <net/ntstat.h>
111
112 #if INET
113 extern int dump_tcp_reass_qlen(char *, int);
114 extern int tcp_reass_qlen_space(struct socket *);
115 #endif /* INET */
116
117 #if MPTCP
118 extern int dump_mptcp_reass_qlen(char *, int);
119 #endif /* MPTCP */
120
121
122 #if NETWORKING
123 extern int dlil_dump_top_if_qlen(char *, int);
124 #endif /* NETWORKING */
125
126 #if CONFIG_MBUF_MCACHE
127 /*
128 * MBUF IMPLEMENTATION NOTES.
129 *
130 * There is a total of 5 per-CPU caches:
131 *
132 * MC_MBUF:
133 * This is a cache of rudimentary objects of _MSIZE in size; each
134 * object represents an mbuf structure. This cache preserves only
135 * the m_type field of the mbuf during its transactions.
136 *
137 * MC_CL:
138 * This is a cache of rudimentary objects of MCLBYTES in size; each
139 * object represents a mcluster structure. This cache does not
140 * preserve the contents of the objects during its transactions.
141 *
142 * MC_BIGCL:
143 * This is a cache of rudimentary objects of MBIGCLBYTES in size; each
144 * object represents a mbigcluster structure. This cache does not
145 * preserve the contents of the objects during its transaction.
146 *
147 * MC_MBUF_CL:
148 * This is a cache of mbufs each having a cluster attached to it.
149 * It is backed by MC_MBUF and MC_CL rudimentary caches. Several
150 * fields of the mbuf related to the external cluster are preserved
151 * during transactions.
152 *
153 * MC_MBUF_BIGCL:
154 * This is a cache of mbufs each having a big cluster attached to it.
155 * It is backed by MC_MBUF and MC_BIGCL rudimentary caches. Several
156 * fields of the mbuf related to the external cluster are preserved
157 * during transactions.
158 *
159 * OBJECT ALLOCATION:
160 *
161 * Allocation requests are handled first at the per-CPU (mcache) layer
162 * before falling back to the slab layer. Performance is optimal when
163 * the request is satisfied at the CPU layer because global data/lock
164 * never gets accessed. When the slab layer is entered for allocation,
165 * the slab freelist will be checked first for available objects before
166 * the VM backing store is invoked. Slab layer operations are serialized
167 * for all of the caches as the mbuf global lock is held most of the time.
168 * Allocation paths are different depending on the class of objects:
169 *
170 * a. Rudimentary object:
171 *
172 * { m_get_common(), m_clattach(), m_mclget(),
173 * m_mclalloc(), m_bigalloc(), m_copym_with_hdrs(),
174 * composite object allocation }
175 * | ^
176 * | |
177 * | +-----------------------+
178 * v |
179 * mcache_alloc/mcache_alloc_ext() mbuf_slab_audit()
180 * | ^
181 * v |
182 * [CPU cache] -------> (found?) -------+
183 * | |
184 * v |
185 * mbuf_slab_alloc() |
186 * | |
187 * v |
188 * +---------> [freelist] -------> (found?) -------+
189 * | |
190 * | v
191 * | m_clalloc()
192 * | |
193 * | v
194 * +---<<---- kmem_mb_alloc()
195 *
196 * b. Composite object:
197 *
198 * { m_getpackets_internal(), m_allocpacket_internal() }
199 * | ^
200 * | |
201 * | +------ (done) ---------+
202 * v |
203 * mcache_alloc/mcache_alloc_ext() mbuf_cslab_audit()
204 * | ^
205 * v |
206 * [CPU cache] -------> (found?) -------+
207 * | |
208 * v |
209 * mbuf_cslab_alloc() |
210 * | |
211 * v |
212 * [freelist] -------> (found?) -------+
213 * | |
214 * v |
215 * (rudimentary object) |
216 * mcache_alloc/mcache_alloc_ext() ------>>-----+
217 *
218 * Auditing notes: If auditing is enabled, buffers will be subjected to
219 * integrity checks by the audit routine. This is done by verifying their
220 * contents against DEADBEEF (free) pattern before returning them to caller.
221 * As part of this step, the routine will also record the transaction and
222 * pattern-fill the buffers with BADDCAFE (uninitialized) pattern. It will
223 * also restore any constructed data structure fields if necessary.
224 *
225 * OBJECT DEALLOCATION:
226 *
227 * Freeing an object simply involves placing it into the CPU cache; this
228 * pollutes the cache to benefit subsequent allocations. The slab layer
229 * will only be entered if the object is to be purged out of the cache.
230 * During normal operations, this happens only when the CPU layer resizes
231 * its bucket while it's adjusting to the allocation load. Deallocation
232 * paths are different depending on the class of objects:
233 *
234 * a. Rudimentary object:
235 *
236 * { m_free(), m_freem_list(), composite object deallocation }
237 * | ^
238 * | |
239 * | +------ (done) ---------+
240 * v |
241 * mcache_free/mcache_free_ext() |
242 * | |
243 * v |
244 * mbuf_slab_audit() |
245 * | |
246 * v |
247 * [CPU cache] ---> (not purging?) -----+
248 * | |
249 * v |
250 * mbuf_slab_free() |
251 * | |
252 * v |
253 * [freelist] ----------->>------------+
254 * (objects get purged to VM only on demand)
255 *
256 * b. Composite object:
257 *
258 * { m_free(), m_freem_list() }
259 * | ^
260 * | |
261 * | +------ (done) ---------+
262 * v |
263 * mcache_free/mcache_free_ext() |
264 * | |
265 * v |
266 * mbuf_cslab_audit() |
267 * | |
268 * v |
269 * [CPU cache] ---> (not purging?) -----+
270 * | |
271 * v |
272 * mbuf_cslab_free() |
273 * | |
274 * v |
275 * [freelist] ---> (not purging?) -----+
276 * | |
277 * v |
278 * (rudimentary object) |
279 * mcache_free/mcache_free_ext() ------->>------+
280 *
281 * Auditing notes: If auditing is enabled, the audit routine will save
282 * any constructed data structure fields (if necessary) before filling the
283 * contents of the buffers with DEADBEEF (free) pattern and recording the
284 * transaction. Buffers that are freed (whether at CPU or slab layer) are
285 * expected to contain the free pattern.
286 *
287 * DEBUGGING:
288 *
289 * Debugging can be enabled by adding "mbuf_debug=0x3" to boot-args; this
290 * translates to the mcache flags (MCF_VERIFY | MCF_AUDIT). Additionally,
291 * the CPU layer cache can be disabled by setting the MCF_NOCPUCACHE flag,
292 * i.e. modify the boot argument parameter to "mbuf_debug=0x13". Leak
293 * detection may also be disabled by setting the MCF_NOLEAKLOG flag, e.g.
294 * "mbuf_debug=0x113". Note that debugging consumes more CPU and memory.
295 *
296 * Each object is associated with exactly one mcache_audit_t structure that
297 * contains the information related to its last buffer transaction. Given
298 * an address of an object, the audit structure can be retrieved by finding
299 * the position of the object relevant to the base address of the cluster:
300 *
301 * +------------+ +=============+
302 * | mbuf addr | | mclaudit[i] |
303 * +------------+ +=============+
304 * | | cl_audit[0] |
305 * i = MTOBG(addr) +-------------+
306 * | +-----> | cl_audit[1] | -----> mcache_audit_t
307 * b = BGTOM(i) | +-------------+
308 * | | | ... |
309 * x = MCLIDX(b, addr) | +-------------+
310 * | | | cl_audit[7] |
311 * +-----------------+ +-------------+
312 * (e.g. x == 1)
313 *
314 * The mclaudit[] array is allocated at initialization time, but its contents
315 * get populated when the corresponding cluster is created. Because a page
316 * can be turned into NMBPG number of mbufs, we preserve enough space for the
317 * mbufs so that there is a 1-to-1 mapping between them. A page that never
318 * gets (or has not yet) turned into mbufs will use only cl_audit[0] with the
319 * remaining entries unused. For 16KB cluster, only one entry from the first
320 * page is allocated and used for the entire object.
321 */
322 #else
323 /*
324 * MBUF IMPLEMENTATION NOTES (using zalloc).
325 *
326 * There are a total of 4 zones and 3 zcaches.
327 *
328 * MC_MBUF:
329 * This is a zone of rudimentary objects of _MSIZE in size; each
330 * object represents an mbuf structure. This cache preserves only
331 * the m_type field of the mbuf during its transactions.
332 *
333 * MC_CL:
334 * This is a zone of rudimentary objects of MCLBYTES in size; each
335 * object represents a mcluster structure. This cache does not
336 * preserve the contents of the objects during its transactions.
337 *
338 * MC_BIGCL:
339 * This is a zone of rudimentary objects of MBIGCLBYTES in size; each
340 * object represents a mbigcluster structure. This cache does not
341 * preserve the contents of the objects during its transaction.
342 *
343 * MC_16KCL:
344 * This is a zone of rudimentary objects of M16KCLBYTES in size; each
345 * object represents a m16kcluster structure. This cache does not
346 * preserve the contents of the objects during its transaction.
347 *
348 * MC_MBUF_CL:
349 * This is a cache of mbufs each having a cluster attached to it.
350 * It is backed by MC_MBUF and MC_CL rudimentary caches. Several
351 * fields of the mbuf related to the external cluster are preserved
352 * during transactions.
353 *
354 * MC_MBUF_BIGCL:
355 * This is a cache of mbufs each having a big cluster attached to it.
356 * It is backed by MC_MBUF and MC_BIGCL rudimentary caches. Several
357 * fields of the mbuf related to the external cluster are preserved
358 * during transactions.
359 *
360 * MC_MBUF_16KCL:
361 * This is a cache of mbufs each having a big cluster attached to it.
362 * It is backed by MC_MBUF and MC_16KCL rudimentary caches. Several
363 * fields of the mbuf related to the external cluster are preserved
364 * during transactions.
365 *
366 * OBJECT ALLOCATION:
367 *
368 * Allocation requests are handled first at the zalloc per-CPU layer
369 * before falling back to the zalloc depot. Performance is optimal when
370 * the request is satisfied at the CPU layer. zalloc has an additional
371 * overflow layer called the depot, not pictured in the diagram below.
372 *
373 * Allocation paths are different depending on the class of objects:
374 *
375 * a. Rudimentary object:
376 *
377 * { m_get_common(), m_clattach(), m_mclget(),
378 * m_mclalloc(), m_bigalloc(), m_copym_with_hdrs(),
379 * composite object allocation }
380 * | ^
381 * | |
382 * | +------- (done) --------+
383 * v |
384 * zalloc_flags/zalloc_n() KASAN
385 * | ^
386 * v |
387 * +----> [zalloc per-CPU cache] -----> (found?) --+
388 * | | |
389 * | v |
390 * | [zalloc recirculation layer] --> (found?) ---+
391 * | |
392 * | v
393 * +--<<-- [zone backing store]
394 *
395 * b. Composite object:
396 *
397 * { m_getpackets_internal(), m_allocpacket_internal() }
398 * | ^
399 * | |
400 * | +------ (done) ---------+
401 * v |
402 * mz_composite_alloc() KASAN
403 * | ^
404 * v |
405 * zcache_alloc_n() |
406 * | |
407 * v |
408 * [zalloc per-CPU cache] --> mark_valid() ---+
409 * | |
410 * v |
411 * [zalloc recirculation layer] -> mark_valid() -+
412 * | |
413 * v |
414 * mz_composite_build() |
415 * | |
416 * v |
417 * (rudimentary objects) |
418 * zalloc_id() ---------------->>-----+
419 *
420 * Auditing notes: If KASAN enabled, buffers will be subjected to
421 * integrity checks by the AddressSanitizer.
422 *
423 * OBJECT DEALLOCATION:
424 *
425 * Freeing an object simply involves placing it into the CPU cache; this
426 * pollutes the cache to benefit subsequent allocations. The depot
427 * will only be entered if the object is to be purged out of the cache.
428 * Objects may be purged based on the overall memory pressure or
429 * during zone garbage collection.
430 * To improve performance, objects are not zero-filled when freed
431 * as it's custom for other zalloc zones.
432 *
433 * Deallocation paths are different depending on the class of objects:
434 *
435 * a. Rudimentary object:
436 *
437 * { m_free(), m_freem_list(), composite object deallocation }
438 * | ^
439 * | |
440 * | +------ (done) ---------+
441 * v |
442 * zfree_nozero() |
443 * | |
444 * v |
445 * KASAN |
446 * | |
447 * v |
448 * [zalloc per-CPU cache] -> (not purging?) --+
449 * | |
450 * v |
451 * [zalloc recirculation layer] --->>----------+
452 *
453 *
454 * b. Composite object:
455 *
456 * { m_free(), m_freem_list() }
457 * | ^
458 * | |
459 * | +------ (done) ---------+
460 * v |
461 * mz_composite_free() |
462 * | |
463 * v |
464 * zcache_free_n() |
465 * | |
466 * v |
467 * KASAN |
468 * | |
469 * v |
470 * [zalloc per-CPU cache] -> mark_invalid() --+
471 * | |
472 * v |
473 * mz_composite_destroy() |
474 * | |
475 * v |
476 * (rudimentary object) |
477 * zfree_nozero() -------------->>------+
478 *
479 * Auditing notes: If KASAN enabled, buffers will be subjected to
480 * integrity checks by the AddressSanitizer.
481 *
482 * DEBUGGING:
483 *
484 * Debugging mbufs can be done by booting a KASAN enabled kernel.
485 */
486
487 #endif /* CONFIG_MBUF_MCACHE */
488
489 /* TODO: should be in header file */
490 /* kernel translater */
491 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
492 extern vm_map_t mb_map; /* special map */
493
494 #if CONFIG_MBUF_MCACHE
495 static uint32_t mb_kmem_contig_failed;
496 static uint32_t mb_kmem_failed;
497 static uint32_t mb_kmem_one_failed;
498 /* Timestamp of allocation failures. */
499 static uint64_t mb_kmem_contig_failed_ts;
500 static uint64_t mb_kmem_failed_ts;
501 static uint64_t mb_kmem_one_failed_ts;
502 static uint64_t mb_kmem_contig_failed_size;
503 static uint64_t mb_kmem_failed_size;
504 static uint32_t mb_kmem_stats[6];
505 #endif /* CONFIG_MBUF_MCACHE */
506
507 /* Global lock */
508 static LCK_GRP_DECLARE(mbuf_mlock_grp, "mbuf");
509 static LCK_MTX_DECLARE(mbuf_mlock_data, &mbuf_mlock_grp);
510 static lck_mtx_t *const mbuf_mlock = &mbuf_mlock_data;
511
512 #if CONFIG_MBUF_MCACHE
513 /* Back-end (common) layer */
514 static uint64_t mb_expand_cnt;
515 static uint64_t mb_expand_cl_cnt;
516 static uint64_t mb_expand_cl_total;
517 static uint64_t mb_expand_bigcl_cnt;
518 static uint64_t mb_expand_bigcl_total;
519 static uint64_t mb_expand_16kcl_cnt;
520 static uint64_t mb_expand_16kcl_total;
521 static boolean_t mbuf_worker_needs_wakeup; /* wait channel for mbuf worker */
522 static uint32_t mbuf_worker_run_cnt;
523 static uint64_t mbuf_worker_last_runtime;
524 static uint64_t mbuf_drain_last_runtime;
525 static int mbuf_worker_ready; /* worker thread is runnable */
526 static unsigned int ncpu; /* number of CPUs */
527 static ppnum_t *mcl_paddr; /* Array of cluster physical addresses */
528 static ppnum_t mcl_pages; /* Size of array (# physical pages) */
529 static ppnum_t mcl_paddr_base; /* Handle returned by IOMapper::iovmAlloc() */
530 static mcache_t *ref_cache; /* Cache of cluster reference & flags */
531 static mcache_t *mcl_audit_con_cache; /* Audit contents cache */
532 unsigned int mbuf_debug; /* patchable mbuf mcache flags */
533 #endif /* CONFIG_MBUF_DEBUG */
534 static unsigned int mb_normalized; /* number of packets "normalized" */
535
536 extern unsigned int mb_tag_mbuf;
537
538 #define MB_GROWTH_AGGRESSIVE 1 /* Threshold: 1/2 of total */
539 #define MB_GROWTH_NORMAL 2 /* Threshold: 3/4 of total */
540
541 typedef enum {
542 MC_MBUF = 0, /* Regular mbuf */
543 MC_CL, /* Cluster */
544 MC_BIGCL, /* Large (4KB) cluster */
545 MC_16KCL, /* Jumbo (16KB) cluster */
546 MC_MBUF_CL, /* mbuf + cluster */
547 MC_MBUF_BIGCL, /* mbuf + large (4KB) cluster */
548 MC_MBUF_16KCL /* mbuf + jumbo (16KB) cluster */
549 } mbuf_class_t;
550
551 #define MBUF_CLASS_MIN MC_MBUF
552 #define MBUF_CLASS_MAX MC_MBUF_16KCL
553 #define MBUF_CLASS_LAST MC_16KCL
554 #define MBUF_CLASS_VALID(c) \
555 ((int)(c) >= MBUF_CLASS_MIN && (int)(c) <= MBUF_CLASS_MAX)
556 #define MBUF_CLASS_COMPOSITE(c) \
557 ((int)(c) > MBUF_CLASS_LAST)
558
559
560 /*
561 * mbuf specific mcache allocation request flags.
562 */
563 #define MCR_COMP MCR_USR1 /* for MC_MBUF_{CL,BIGCL,16KCL} caches */
564
565 /*
566 * Per-cluster slab structure.
567 *
568 * A slab is a cluster control structure that contains one or more object
569 * chunks; the available chunks are chained in the slab's freelist (sl_head).
570 * Each time a chunk is taken out of the slab, the slab's reference count
571 * gets incremented. When all chunks have been taken out, the empty slab
572 * gets removed (SLF_DETACHED) from the class's slab list. A chunk that is
573 * returned to a slab causes the slab's reference count to be decremented;
574 * it also causes the slab to be reinserted back to class's slab list, if
575 * it's not already done.
576 *
577 * Compartmentalizing of the object chunks into slabs allows us to easily
578 * merge one or more slabs together when the adjacent slabs are idle, as
579 * well as to convert or move a slab from one class to another; e.g. the
580 * mbuf cluster slab can be converted to a regular cluster slab when all
581 * mbufs in the slab have been freed.
582 *
583 * A slab may also span across multiple clusters for chunks larger than
584 * a cluster's size. In this case, only the slab of the first cluster is
585 * used. The rest of the slabs are marked with SLF_PARTIAL to indicate
586 * that they are part of the larger slab.
587 *
588 * Each slab controls a page of memory.
589 */
590 typedef struct mcl_slab {
591 struct mcl_slab *sl_next; /* neighboring slab */
592 u_int8_t sl_class; /* controlling mbuf class */
593 int8_t sl_refcnt; /* outstanding allocations */
594 int8_t sl_chunks; /* chunks (bufs) in this slab */
595 u_int16_t sl_flags; /* slab flags (see below) */
596 u_int16_t sl_len; /* slab length */
597 void *sl_base; /* base of allocated memory */
598 void *sl_head; /* first free buffer */
599 TAILQ_ENTRY(mcl_slab) sl_link; /* next/prev slab on freelist */
600 } mcl_slab_t;
601
602 #define SLF_MAPPED 0x0001 /* backed by a mapped page */
603 #define SLF_PARTIAL 0x0002 /* part of another slab */
604 #define SLF_DETACHED 0x0004 /* not in slab freelist */
605
606 /*
607 * The array of slabs are broken into groups of arrays per 1MB of kernel
608 * memory to reduce the footprint. Each group is allocated on demand
609 * whenever a new piece of memory mapped in from the VM crosses the 1MB
610 * boundary.
611 */
612 #define NSLABSPMB ((1 << MBSHIFT) >> PAGE_SHIFT)
613
614 typedef struct mcl_slabg {
615 mcl_slab_t *slg_slab; /* group of slabs */
616 } mcl_slabg_t;
617
618 /*
619 * Number of slabs needed to control a 16KB cluster object.
620 */
621 #define NSLABSP16KB (M16KCLBYTES >> PAGE_SHIFT)
622
623 #if CONFIG_MBUF_MCACHE
624 /*
625 * Per-cluster audit structure.
626 */
627 typedef struct {
628 mcache_audit_t **cl_audit; /* array of audits */
629 } mcl_audit_t;
630
631 typedef struct {
632 struct thread *msa_thread; /* thread doing transaction */
633 struct thread *msa_pthread; /* previous transaction thread */
634 uint32_t msa_tstamp; /* transaction timestamp (ms) */
635 uint32_t msa_ptstamp; /* prev transaction timestamp (ms) */
636 uint16_t msa_depth; /* pc stack depth */
637 uint16_t msa_pdepth; /* previous transaction pc stack */
638 void *msa_stack[MCACHE_STACK_DEPTH];
639 void *msa_pstack[MCACHE_STACK_DEPTH];
640 } mcl_scratch_audit_t;
641
642 typedef struct {
643 /*
644 * Size of data from the beginning of an mbuf that covers m_hdr,
645 * pkthdr and m_ext structures. If auditing is enabled, we allocate
646 * a shadow mbuf structure of this size inside each audit structure,
647 * and the contents of the real mbuf gets copied into it when the mbuf
648 * is freed. This allows us to pattern-fill the mbuf for integrity
649 * check, and to preserve any constructed mbuf fields (e.g. mbuf +
650 * cluster cache case). Note that we don't save the contents of
651 * clusters when they are freed; we simply pattern-fill them.
652 */
653 u_int8_t sc_mbuf[(_MSIZE - _MHLEN) + sizeof(_m_ext_t)];
654 mcl_scratch_audit_t sc_scratch __attribute__((aligned(8)));
655 } mcl_saved_contents_t;
656
657 #define AUDIT_CONTENTS_SIZE (sizeof (mcl_saved_contents_t))
658
659 #define MCA_SAVED_MBUF_PTR(_mca) \
660 ((struct mbuf *)(void *)((mcl_saved_contents_t *) \
661 (_mca)->mca_contents)->sc_mbuf)
662 #define MCA_SAVED_MBUF_SIZE \
663 (sizeof (((mcl_saved_contents_t *)0)->sc_mbuf))
664 #define MCA_SAVED_SCRATCH_PTR(_mca) \
665 (&((mcl_saved_contents_t *)(_mca)->mca_contents)->sc_scratch)
666
667 /*
668 * mbuf specific mcache audit flags
669 */
670 #define MB_INUSE 0x01 /* object has not been returned to slab */
671 #define MB_COMP_INUSE 0x02 /* object has not been returned to cslab */
672 #define MB_SCVALID 0x04 /* object has valid saved contents */
673
674 /*
675 * Each of the following two arrays hold up to nmbclusters elements.
676 */
677 static mcl_audit_t *mclaudit; /* array of cluster audit information */
678 static unsigned int maxclaudit; /* max # of entries in audit table */
679 static mcl_slabg_t **slabstbl; /* cluster slabs table */
680 static unsigned int maxslabgrp; /* max # of entries in slabs table */
681 static unsigned int slabgrp; /* # of entries in slabs table */
682 #endif /* CONFIG_MBUF_MCACHE */
683
684 /* Globals */
685 int nclusters; /* # of clusters for non-jumbo (legacy) sizes */
686 int njcl; /* # of clusters for jumbo sizes */
687 int njclbytes; /* size of a jumbo cluster */
688 unsigned char *mbutl; /* first mapped cluster address */
689 unsigned char *embutl; /* ending virtual address of mclusters */
690 int max_linkhdr; /* largest link-level header */
691 int max_protohdr; /* largest protocol header */
692 int max_hdr; /* largest link+protocol header */
693 int max_datalen; /* MHLEN - max_hdr */
694
695 #if CONFIG_MBUF_MCACHE
696 static boolean_t mclverify; /* debug: pattern-checking */
697 static boolean_t mcltrace; /* debug: stack tracing */
698 static boolean_t mclfindleak; /* debug: leak detection */
699 static boolean_t mclexpleak; /* debug: expose leak info to user space */
700
701 static struct timeval mb_start; /* beginning of time */
702
703 /* mbuf leak detection variables */
704 static struct mleak_table mleak_table;
705 static mleak_stat_t *mleak_stat;
706
707 #define MLEAK_STAT_SIZE(n) \
708 __builtin_offsetof(mleak_stat_t, ml_trace[n])
709
710 struct mallocation {
711 mcache_obj_t *element; /* the alloc'ed element, NULL if unused */
712 u_int32_t trace_index; /* mtrace index for corresponding backtrace */
713 u_int32_t count; /* How many objects were requested */
714 u_int64_t hitcount; /* for determining hash effectiveness */
715 };
716
717 struct mtrace {
718 u_int64_t collisions;
719 u_int64_t hitcount;
720 u_int64_t allocs;
721 u_int64_t depth;
722 uintptr_t addr[MLEAK_STACK_DEPTH];
723 };
724
725 /* Size must be a power of two for the zhash to be able to just mask off bits */
726 #define MLEAK_ALLOCATION_MAP_NUM 512
727 #define MLEAK_TRACE_MAP_NUM 256
728
729 /*
730 * Sample factor for how often to record a trace. This is overwritable
731 * by the boot-arg mleak_sample_factor.
732 */
733 #define MLEAK_SAMPLE_FACTOR 500
734
735 /*
736 * Number of top leakers recorded.
737 */
738 #define MLEAK_NUM_TRACES 5
739
740 #define MB_LEAK_SPACING_64 " "
741 #define MB_LEAK_SPACING_32 " "
742
743
744 #define MB_LEAK_HDR_32 "\n\
745 trace [1] trace [2] trace [3] trace [4] trace [5] \n\
746 ---------- ---------- ---------- ---------- ---------- \n\
747 "
748
749 #define MB_LEAK_HDR_64 "\n\
750 trace [1] trace [2] trace [3] \
751 trace [4] trace [5] \n\
752 ------------------ ------------------ ------------------ \
753 ------------------ ------------------ \n\
754 "
755
756 static uint32_t mleak_alloc_buckets = MLEAK_ALLOCATION_MAP_NUM;
757 static uint32_t mleak_trace_buckets = MLEAK_TRACE_MAP_NUM;
758
759 /* Hashmaps of allocations and their corresponding traces */
760 static struct mallocation *mleak_allocations;
761 static struct mtrace *mleak_traces;
762 static struct mtrace *mleak_top_trace[MLEAK_NUM_TRACES];
763
764 /* Lock to protect mleak tables from concurrent modification */
765 static LCK_GRP_DECLARE(mleak_lock_grp, "mleak_lock");
766 static LCK_MTX_DECLARE(mleak_lock_data, &mleak_lock_grp);
767 static lck_mtx_t *const mleak_lock = &mleak_lock_data;
768
769 /* *Failed* large allocations. */
770 struct mtracelarge {
771 uint64_t size;
772 uint64_t depth;
773 uintptr_t addr[MLEAK_STACK_DEPTH];
774 };
775
776 #define MTRACELARGE_NUM_TRACES 5
777 static struct mtracelarge mtracelarge_table[MTRACELARGE_NUM_TRACES];
778
779 static void mtracelarge_register(size_t size);
780 #endif /* CONFIG_MBUF_MCACHE */
781
782 /* Lock to protect the completion callback table */
783 static LCK_GRP_DECLARE(mbuf_tx_compl_tbl_lck_grp, "mbuf_tx_compl_tbl");
784 LCK_RW_DECLARE(mbuf_tx_compl_tbl_lock, &mbuf_tx_compl_tbl_lck_grp);
785
786 extern u_int32_t high_sb_max;
787
788 /* The minimum number of objects that are allocated, to start. */
789 #define MINCL 32
790 #define MINBIGCL (MINCL >> 1)
791 #define MIN16KCL (MINCL >> 2)
792
793 /* Low watermarks (only map in pages once free counts go below) */
794 #define MBIGCL_LOWAT MINBIGCL
795 #define M16KCL_LOWAT MIN16KCL
796
797 typedef struct {
798 mbuf_class_t mtbl_class; /* class type */
799 #if CONFIG_MBUF_MCACHE
800 mcache_t *mtbl_cache; /* mcache for this buffer class */
801 TAILQ_HEAD(mcl_slhead, mcl_slab) mtbl_slablist; /* slab list */
802 mcache_obj_t *mtbl_cobjlist; /* composite objects freelist */
803 #endif /* CONFIG_MBUF_MCACHE */
804 mb_class_stat_t *mtbl_stats; /* statistics fetchable via sysctl */
805 u_int32_t mtbl_maxsize; /* maximum buffer size */
806 int mtbl_minlimit; /* minimum allowed */
807 int mtbl_maxlimit; /* maximum allowed */
808 u_int32_t mtbl_wantpurge; /* purge during next reclaim */
809 uint32_t mtbl_avgtotal; /* average total on iOS */
810 u_int32_t mtbl_expand; /* worker should expand the class */
811 } mbuf_table_t;
812
813 #define m_class(c) mbuf_table[c].mtbl_class
814 #if CONFIG_MBUF_MCACHE
815 #define m_cache(c) mbuf_table[c].mtbl_cache
816 #define m_slablist(c) mbuf_table[c].mtbl_slablist
817 #define m_cobjlist(c) mbuf_table[c].mtbl_cobjlist
818 #else
819 #define m_stats(c) mbuf_table[c].mtbl_stats
820 #endif /* CONFIG_MBUF_MCACHE */
821 #define m_maxsize(c) mbuf_table[c].mtbl_maxsize
822 #define m_minlimit(c) mbuf_table[c].mtbl_minlimit
823 #define m_maxlimit(c) mbuf_table[c].mtbl_maxlimit
824 #define m_wantpurge(c) mbuf_table[c].mtbl_wantpurge
825 #define m_cname(c) mbuf_table[c].mtbl_stats->mbcl_cname
826 #define m_size(c) mbuf_table[c].mtbl_stats->mbcl_size
827 #define m_total(c) mbuf_table[c].mtbl_stats->mbcl_total
828 #define m_active(c) mbuf_table[c].mtbl_stats->mbcl_active
829 #define m_infree(c) mbuf_table[c].mtbl_stats->mbcl_infree
830 #define m_slab_cnt(c) mbuf_table[c].mtbl_stats->mbcl_slab_cnt
831 #define m_alloc_cnt(c) mbuf_table[c].mtbl_stats->mbcl_alloc_cnt
832 #define m_free_cnt(c) mbuf_table[c].mtbl_stats->mbcl_free_cnt
833 #define m_notified(c) mbuf_table[c].mtbl_stats->mbcl_notified
834 #define m_purge_cnt(c) mbuf_table[c].mtbl_stats->mbcl_purge_cnt
835 #define m_fail_cnt(c) mbuf_table[c].mtbl_stats->mbcl_fail_cnt
836 #define m_ctotal(c) mbuf_table[c].mtbl_stats->mbcl_ctotal
837 #define m_peak(c) mbuf_table[c].mtbl_stats->mbcl_peak_reported
838 #define m_release_cnt(c) mbuf_table[c].mtbl_stats->mbcl_release_cnt
839 #define m_region_expand(c) mbuf_table[c].mtbl_expand
840
841 static mbuf_table_t mbuf_table[] = {
842 #if CONFIG_MBUF_MCACHE
843 /*
844 * The caches for mbufs, regular clusters and big clusters.
845 * The average total values were based on data gathered by actual
846 * usage patterns on iOS.
847 */
848 { MC_MBUF, NULL, TAILQ_HEAD_INITIALIZER(m_slablist(MC_MBUF)),
849 NULL, NULL, 0, 0, 0, 0, 3000, 0 },
850 { MC_CL, NULL, TAILQ_HEAD_INITIALIZER(m_slablist(MC_CL)),
851 NULL, NULL, 0, 0, 0, 0, 2000, 0 },
852 { MC_BIGCL, NULL, TAILQ_HEAD_INITIALIZER(m_slablist(MC_BIGCL)),
853 NULL, NULL, 0, 0, 0, 0, 1000, 0 },
854 { MC_16KCL, NULL, TAILQ_HEAD_INITIALIZER(m_slablist(MC_16KCL)),
855 NULL, NULL, 0, 0, 0, 0, 200, 0 },
856 /*
857 * The following are special caches; they serve as intermediate
858 * caches backed by the above rudimentary caches. Each object
859 * in the cache is an mbuf with a cluster attached to it. Unlike
860 * the above caches, these intermediate caches do not directly
861 * deal with the slab structures; instead, the constructed
862 * cached elements are simply stored in the freelists.
863 */
864 { MC_MBUF_CL, NULL, { NULL, NULL }, NULL, NULL, 0, 0, 0, 0, 2000, 0 },
865 { MC_MBUF_BIGCL, NULL, { NULL, NULL }, NULL, NULL, 0, 0, 0, 0, 1000, 0 },
866 { MC_MBUF_16KCL, NULL, { NULL, NULL }, NULL, NULL, 0, 0, 0, 0, 200, 0 },
867 #else
868 { .mtbl_class = MC_MBUF },
869 { .mtbl_class = MC_CL },
870 { .mtbl_class = MC_BIGCL },
871 { .mtbl_class = MC_16KCL },
872 { .mtbl_class = MC_MBUF_CL },
873 { .mtbl_class = MC_MBUF_BIGCL },
874 { .mtbl_class = MC_MBUF_16KCL },
875 #endif /* CONFIG_MBUF_MCACHE */
876 };
877
878 #define NELEM(a) (sizeof (a) / sizeof ((a)[0]))
879
880 #if SKYWALK && CONFIG_MBUF_MCACHE
881 #define MC_THRESHOLD_SCALE_DOWN_FACTOR 2
882 static unsigned int mc_threshold_scale_down_factor =
883 MC_THRESHOLD_SCALE_DOWN_FACTOR;
884 #endif /* SKYWALK */
885
886 #if CONFIG_MBUF_MCACHE
887 static uint32_t
m_avgtotal(mbuf_class_t c)888 m_avgtotal(mbuf_class_t c)
889 {
890 #if SKYWALK
891 return if_is_fsw_transport_netagent_enabled() ?
892 (mbuf_table[c].mtbl_avgtotal / mc_threshold_scale_down_factor) :
893 mbuf_table[c].mtbl_avgtotal;
894 #else /* !SKYWALK */
895 return mbuf_table[c].mtbl_avgtotal;
896 #endif /* SKYWALK */
897 }
898 #endif /* CONFIG_MBUF_MCACHE */
899
900 #if CONFIG_MBUF_MCACHE
901 static void *mb_waitchan = &mbuf_table; /* wait channel for all caches */
902 static int mb_waiters; /* number of waiters */
903 #endif /* CONFIG_MBUF_MCACHE */
904
905 boolean_t mb_peak_newreport = FALSE;
906 boolean_t mb_peak_firstreport = FALSE;
907
908 /* generate a report by default after 1 week of uptime */
909 #define MBUF_PEAK_FIRST_REPORT_THRESHOLD 604800
910
911 #if CONFIG_MBUF_MCACHE
912 #define MB_WDT_MAXTIME 10 /* # of secs before watchdog panic */
913 static struct timeval mb_wdtstart; /* watchdog start timestamp */
914 static char *mbuf_dump_buf;
915
916 #define MBUF_DUMP_BUF_SIZE 4096
917
918 /*
919 * mbuf watchdog is enabled by default. It is also toggeable via the
920 * kern.ipc.mb_watchdog sysctl.
921 * Garbage collection is enabled by default on embedded platforms.
922 * mb_drain_maxint controls the amount of time to wait (in seconds) before
923 * consecutive calls to mbuf_drain().
924 */
925 static unsigned int mb_watchdog = 1;
926 #if !XNU_TARGET_OS_OSX
927 static unsigned int mb_drain_maxint = 60;
928 #else /* XNU_TARGET_OS_OSX */
929 static unsigned int mb_drain_maxint = 0;
930 #endif /* XNU_TARGET_OS_OSX */
931 #endif /* CONFIG_MBUF_MCACHE */
932 static unsigned int mb_memory_pressure_percentage = 80;
933
934 uintptr_t mb_obscure_extfree __attribute__((visibility("hidden")));
935 uintptr_t mb_obscure_extref __attribute__((visibility("hidden")));
936
937 /* Red zone */
938 static u_int32_t mb_redzone_cookie;
939 static void m_redzone_init(struct mbuf *);
940 static void m_redzone_verify(struct mbuf *m);
941
942 static void m_set_rfa(struct mbuf *, struct ext_ref *);
943
944 #if CONFIG_MBUF_MCACHE
945 /* The following are used to serialize m_clalloc() */
946 static boolean_t mb_clalloc_busy;
947 static void *mb_clalloc_waitchan = &mb_clalloc_busy;
948 static int mb_clalloc_waiters;
949 #endif /* CONFIG_MBUF_MCACHE */
950
951 static void mbuf_mtypes_sync(boolean_t);
952 static int mbstat_sysctl SYSCTL_HANDLER_ARGS;
953 static void mbuf_stat_sync(void);
954 static int mb_stat_sysctl SYSCTL_HANDLER_ARGS;
955 #if CONFIG_MBUF_MCACHE
956 static int mleak_top_trace_sysctl SYSCTL_HANDLER_ARGS;
957 static int mleak_table_sysctl SYSCTL_HANDLER_ARGS;
958 static char *mbuf_dump(void);
959 #endif /* CONFIG_MBUF_MCACHE */
960 static void mbuf_table_init(void);
961 static inline void m_incref(struct mbuf *);
962 static inline u_int16_t m_decref(struct mbuf *);
963 static void mbuf_watchdog_defunct(thread_call_param_t, thread_call_param_t);
964 #if CONFIG_MBUF_MCACHE
965 static int m_clalloc(const u_int32_t, const int, const u_int32_t);
966 static void mbuf_worker_thread_init(void);
967 static mcache_obj_t *slab_alloc(mbuf_class_t, int);
968 static void slab_free(mbuf_class_t, mcache_obj_t *);
969 static unsigned int mbuf_slab_alloc(void *, mcache_obj_t ***,
970 unsigned int, int);
971 static void mbuf_slab_free(void *, mcache_obj_t *, int);
972 static void mbuf_slab_audit(void *, mcache_obj_t *, boolean_t);
973 static void mbuf_slab_notify(void *, u_int32_t);
974 static unsigned int cslab_alloc(mbuf_class_t, mcache_obj_t ***,
975 unsigned int);
976 static unsigned int cslab_free(mbuf_class_t, mcache_obj_t *, int);
977 static unsigned int mbuf_cslab_alloc(void *, mcache_obj_t ***,
978 unsigned int, int);
979 static void mbuf_cslab_free(void *, mcache_obj_t *, int);
980 static void mbuf_cslab_audit(void *, mcache_obj_t *, boolean_t);
981 static int freelist_populate(mbuf_class_t, unsigned int, int);
982 static void freelist_init(mbuf_class_t);
983 static boolean_t mbuf_cached_above(mbuf_class_t, int);
984 static boolean_t mbuf_steal(mbuf_class_t, unsigned int);
985 static void m_reclaim(mbuf_class_t, unsigned int, boolean_t);
986 static int m_howmany(int, size_t);
987 static void mbuf_worker_thread(void);
988 static void mbuf_watchdog(void);
989 static boolean_t mbuf_sleep(mbuf_class_t, unsigned int, int);
990
991 static void mcl_audit_init(void *, mcache_audit_t **, mcache_obj_t **,
992 size_t, unsigned int);
993 static void mcl_audit_free(void *, unsigned int);
994 static mcache_audit_t *mcl_audit_buf2mca(mbuf_class_t, mcache_obj_t *);
995 static void mcl_audit_mbuf(mcache_audit_t *, void *, boolean_t, boolean_t);
996 static void mcl_audit_cluster(mcache_audit_t *, void *, size_t, boolean_t,
997 boolean_t);
998 static void mcl_audit_restore_mbuf(struct mbuf *, mcache_audit_t *, boolean_t);
999 static void mcl_audit_save_mbuf(struct mbuf *, mcache_audit_t *);
1000 static void mcl_audit_scratch(mcache_audit_t *);
1001 static void mcl_audit_mcheck_panic(struct mbuf *);
1002 static void mcl_audit_verify_nextptr(void *, mcache_audit_t *);
1003
1004 static void mleak_activate(void);
1005 static void mleak_logger(u_int32_t, mcache_obj_t *, boolean_t);
1006 static boolean_t mleak_log(uintptr_t *, mcache_obj_t *, uint32_t, int);
1007 static void mleak_free(mcache_obj_t *);
1008 static void mleak_sort_traces(void);
1009 static void mleak_update_stats(void);
1010
1011 static mcl_slab_t *slab_get(void *);
1012 static void slab_init(mcl_slab_t *, mbuf_class_t, u_int32_t,
1013 void *, void *, unsigned int, int, int);
1014 static void slab_insert(mcl_slab_t *, mbuf_class_t);
1015 static void slab_remove(mcl_slab_t *, mbuf_class_t);
1016 static boolean_t slab_inrange(mcl_slab_t *, void *);
1017 static void slab_nextptr_panic(mcl_slab_t *, void *);
1018 static void slab_detach(mcl_slab_t *);
1019 static boolean_t slab_is_detached(mcl_slab_t *);
1020 #else /* !CONFIG_MBUF_MCACHE */
1021 static struct mbuf *mz_alloc(zalloc_flags_t);
1022 static void mz_free(struct mbuf *);
1023 static struct ext_ref *mz_ref_alloc(zalloc_flags_t);
1024 static void mz_ref_free(struct ext_ref *);
1025 static void *mz_cl_alloc(zone_id_t, zalloc_flags_t);
1026 static void mz_cl_free(zone_id_t, void *);
1027 static struct mbuf *mz_composite_alloc(mbuf_class_t, zalloc_flags_t);
1028 static zstack_t mz_composite_alloc_n(mbuf_class_t, unsigned int, zalloc_flags_t);
1029 static void mz_composite_free(mbuf_class_t, struct mbuf *);
1030 static void mz_composite_free_n(mbuf_class_t, zstack_t);
1031 static void *mz_composite_build(zone_id_t, zalloc_flags_t);
1032 static void *mz_composite_mark_valid(zone_id_t, void *);
1033 static void *mz_composite_mark_invalid(zone_id_t, void *);
1034 static void mz_composite_destroy(zone_id_t, void *);
1035
1036 ZONE_DEFINE_ID(ZONE_ID_MBUF_REF, "mbuf.ref", struct ext_ref,
1037 ZC_CACHING | ZC_NOPGZ | ZC_KASAN_NOQUARANTINE);
1038 ZONE_DEFINE_ID(ZONE_ID_MBUF, "mbuf", struct mbuf,
1039 ZC_CACHING | ZC_NOPGZ | ZC_KASAN_NOQUARANTINE);
1040 ZONE_DEFINE_ID(ZONE_ID_CLUSTER_2K, "mbuf.cluster.2k", union mcluster,
1041 ZC_CACHING | ZC_NOPGZ | ZC_KASAN_NOQUARANTINE | ZC_DATA);
1042 ZONE_DEFINE_ID(ZONE_ID_CLUSTER_4K, "mbuf.cluster.4k", union mbigcluster,
1043 ZC_CACHING | ZC_NOPGZ | ZC_KASAN_NOQUARANTINE | ZC_DATA);
1044 ZONE_DEFINE_ID(ZONE_ID_CLUSTER_16K, "mbuf.cluster.16k", union m16kcluster,
1045 ZC_CACHING | ZC_NOPGZ | ZC_KASAN_NOQUARANTINE | ZC_DATA);
1046 static_assert(sizeof(union mcluster) == MCLBYTES);
1047 static_assert(sizeof(union mbigcluster) == MBIGCLBYTES);
1048 static_assert(sizeof(union m16kcluster) == M16KCLBYTES);
1049
1050 static const struct zone_cache_ops mz_composite_ops = {
1051 .zc_op_alloc = mz_composite_build,
1052 .zc_op_mark_valid = mz_composite_mark_valid,
1053 .zc_op_mark_invalid = mz_composite_mark_invalid,
1054 .zc_op_free = mz_composite_destroy,
1055 };
1056 ZCACHE_DEFINE(ZONE_ID_MBUF_CLUSTER_2K, "mbuf.composite.2k", struct mbuf,
1057 sizeof(struct mbuf) + sizeof(struct ext_ref) + MCLBYTES,
1058 &mz_composite_ops);
1059 ZCACHE_DEFINE(ZONE_ID_MBUF_CLUSTER_4K, "mbuf.composite.4k", struct mbuf,
1060 sizeof(struct mbuf) + sizeof(struct ext_ref) + MBIGCLBYTES,
1061 &mz_composite_ops);
1062 ZCACHE_DEFINE(ZONE_ID_MBUF_CLUSTER_16K, "mbuf.composite.16k", struct mbuf,
1063 sizeof(struct mbuf) + sizeof(struct ext_ref) + M16KCLBYTES,
1064 &mz_composite_ops);
1065 static_assert(ZONE_ID_MBUF + MC_MBUF == ZONE_ID_MBUF);
1066 static_assert(ZONE_ID_MBUF + MC_CL == ZONE_ID_CLUSTER_2K);
1067 static_assert(ZONE_ID_MBUF + MC_BIGCL == ZONE_ID_CLUSTER_4K);
1068 static_assert(ZONE_ID_MBUF + MC_16KCL == ZONE_ID_CLUSTER_16K);
1069 static_assert(ZONE_ID_MBUF + MC_MBUF_CL == ZONE_ID_MBUF_CLUSTER_2K);
1070 static_assert(ZONE_ID_MBUF + MC_MBUF_BIGCL == ZONE_ID_MBUF_CLUSTER_4K);
1071 static_assert(ZONE_ID_MBUF + MC_MBUF_16KCL == ZONE_ID_MBUF_CLUSTER_16K);
1072
1073 /* Converts a an mbuf class to a zalloc zone ID. */
1074 __attribute__((always_inline))
1075 static inline zone_id_t
m_class_to_zid(mbuf_class_t class)1076 m_class_to_zid(mbuf_class_t class)
1077 {
1078 return ZONE_ID_MBUF + class - MC_MBUF;
1079 }
1080
1081 static thread_call_t mbuf_defunct_tcall;
1082 #endif /* CONFIG_MBUF_MCACHE */
1083
1084 static int m_copyback0(struct mbuf **, int, int, const void *, int, int);
1085 static struct mbuf *m_split0(struct mbuf *, int, int, int);
1086 __private_extern__ void mbuf_report_peak_usage(void);
1087 #if CONFIG_MBUF_MCACHE
1088 static boolean_t mbuf_report_usage(mbuf_class_t);
1089 #endif /* CONFIG_MBUF_MCACHE */
1090 #if CONFIG_MBUF_MCACHE && (DEBUG || DEVELOPMENT)
1091 #define mbwdog_logger(fmt, ...) _mbwdog_logger(__func__, __LINE__, fmt, ## __VA_ARGS__)
1092 static void _mbwdog_logger(const char *func, const int line, const char *fmt, ...);
1093 static char *mbwdog_logging;
1094 const unsigned mbwdog_logging_size = 4096;
1095 static size_t mbwdog_logging_used;
1096 #else
1097 #define mbwdog_logger(fmt, ...) do { } while (0)
1098 #endif /* CONFIG_MBUF_MCACHE &&DEBUG || DEVELOPMENT */
1099 #if CONFIG_MBUF_MCACHE
1100 static void mbuf_drain_locked(boolean_t);
1101 #endif /* CONFIG_MBUF_MCACHE */
1102
1103 /* flags for m_copyback0 */
1104 #define M_COPYBACK0_COPYBACK 0x0001 /* copyback from cp */
1105 #define M_COPYBACK0_PRESERVE 0x0002 /* preserve original data */
1106 #define M_COPYBACK0_COW 0x0004 /* do copy-on-write */
1107 #define M_COPYBACK0_EXTEND 0x0008 /* extend chain */
1108
1109 /*
1110 * This flag is set for all mbufs that come out of and into the composite
1111 * mbuf + cluster caches, i.e. MC_MBUF_CL and MC_MBUF_BIGCL. mbufs that
1112 * are marked with such a flag have clusters attached to them, and will be
1113 * treated differently when they are freed; instead of being placed back
1114 * into the mbuf and cluster freelists, the composite mbuf + cluster objects
1115 * are placed back into the appropriate composite cache's freelist, and the
1116 * actual freeing is deferred until the composite objects are purged. At
1117 * such a time, this flag will be cleared from the mbufs and the objects
1118 * will be freed into their own separate freelists.
1119 */
1120 #define EXTF_COMPOSITE 0x1
1121
1122 /*
1123 * This flag indicates that the external cluster is read-only, i.e. it is
1124 * or was referred to by more than one mbufs. Once set, this flag is never
1125 * cleared.
1126 */
1127 #define EXTF_READONLY 0x2
1128 /*
1129 * This flag indicates that the external cluster is paired with the mbuf.
1130 * Pairing implies an external free routine defined which will be invoked
1131 * when the reference count drops to the minimum at m_free time. This
1132 * flag is never cleared.
1133 */
1134 #define EXTF_PAIRED 0x4
1135
1136 #define EXTF_MASK \
1137 (EXTF_COMPOSITE | EXTF_READONLY | EXTF_PAIRED)
1138
1139 #define MEXT_MINREF(m) ((m_get_rfa(m))->minref)
1140 #define MEXT_REF(m) ((m_get_rfa(m))->refcnt)
1141 #define MEXT_PREF(m) ((m_get_rfa(m))->prefcnt)
1142 #define MEXT_FLAGS(m) ((m_get_rfa(m))->flags)
1143 #define MEXT_PRIV(m) ((m_get_rfa(m))->priv)
1144 #define MEXT_PMBUF(m) ((m_get_rfa(m))->paired)
1145 #define MEXT_TOKEN(m) ((m_get_rfa(m))->ext_token)
1146 #define MBUF_IS_COMPOSITE(m) \
1147 (MEXT_REF(m) == MEXT_MINREF(m) && \
1148 (MEXT_FLAGS(m) & EXTF_MASK) == EXTF_COMPOSITE)
1149 /*
1150 * This macro can be used to test if the mbuf is paired to an external
1151 * cluster. The test for MEXT_PMBUF being equal to the mbuf in subject
1152 * is important, as EXTF_PAIRED alone is insufficient since it is immutable,
1153 * and thus survives calls to m_free_paired.
1154 */
1155 #define MBUF_IS_PAIRED(m) \
1156 (((m)->m_flags & M_EXT) && \
1157 (MEXT_FLAGS(m) & EXTF_MASK) == EXTF_PAIRED && \
1158 MEXT_PMBUF(m) == (m))
1159
1160 /*
1161 * Macros used to verify the integrity of the mbuf.
1162 */
1163 #if CONFIG_MBUF_MCACHE
1164 #define _MCHECK(m) { \
1165 if ((m)->m_type != MT_FREE && !MBUF_IS_PAIRED(m)) { \
1166 if (mclaudit == NULL) \
1167 panic("MCHECK: m_type=%d m=%p", \
1168 (u_int16_t)(m)->m_type, m); \
1169 else \
1170 mcl_audit_mcheck_panic(m); \
1171 } \
1172 }
1173 #else
1174 #define _MCHECK(m) \
1175 if ((m)->m_type != MT_FREE && !MBUF_IS_PAIRED(m)) { \
1176 panic("MCHECK: m_type=%d m=%p", \
1177 (u_int16_t)(m)->m_type, m); \
1178 }
1179 #endif /* CONFIG_MBUF_MCACHE */
1180
1181 /*
1182 * Macro version of mtod.
1183 */
1184 #define MTOD(m, t) ((t)((m)->m_data))
1185
1186 #if CONFIG_MBUF_MCACHE
1187 #define MBUF_IN_MAP(addr) \
1188 ((unsigned char *)(addr) >= mbutl && \
1189 (unsigned char *)(addr) < embutl)
1190
1191 #define MRANGE(addr) { \
1192 if (!MBUF_IN_MAP(addr)) \
1193 panic("MRANGE: address out of range 0x%p", addr); \
1194 }
1195
1196 /*
1197 * Macros to obtain page index given a base cluster address
1198 */
1199 #define MTOPG(x) (((unsigned char *)x - mbutl) >> PAGE_SHIFT)
1200 #define PGTOM(x) (mbutl + (x << PAGE_SHIFT))
1201
1202 /*
1203 * Macro to find the mbuf index relative to a base.
1204 */
1205 #define MBPAGEIDX(c, m) \
1206 (((unsigned char *)(m) - (unsigned char *)(c)) >> _MSIZESHIFT)
1207
1208 /*
1209 * Same thing for 2KB cluster index.
1210 */
1211 #define CLPAGEIDX(c, m) \
1212 (((unsigned char *)(m) - (unsigned char *)(c)) >> MCLSHIFT)
1213
1214 /*
1215 * Macro to find 4KB cluster index relative to a base
1216 */
1217 #define BCLPAGEIDX(c, m) \
1218 (((unsigned char *)(m) - (unsigned char *)(c)) >> MBIGCLSHIFT)
1219 #endif /* CONFIG_MBUF_MCACHE */
1220
1221 /*
1222 * Macros used during mbuf and cluster initialization.
1223 */
1224 #define MBUF_INIT_PKTHDR(m) { \
1225 (m)->m_pkthdr.rcvif = NULL; \
1226 (m)->m_pkthdr.pkt_hdr = NULL; \
1227 (m)->m_pkthdr.len = 0; \
1228 (m)->m_pkthdr.csum_flags = 0; \
1229 (m)->m_pkthdr.csum_data = 0; \
1230 (m)->m_pkthdr.vlan_tag = 0; \
1231 (m)->m_pkthdr.comp_gencnt = 0; \
1232 (m)->m_pkthdr.pkt_crumbs = 0; \
1233 m_classifier_init(m, 0); \
1234 m_tag_init(m, 1); \
1235 m_scratch_init(m); \
1236 m_redzone_init(m); \
1237 }
1238
1239 #define MBUF_INIT(m, pkthdr, type) { \
1240 _MCHECK(m); \
1241 (m)->m_next = (m)->m_nextpkt = NULL; \
1242 (m)->m_len = 0; \
1243 (m)->m_type = type; \
1244 if ((pkthdr) == 0) { \
1245 (m)->m_data = (m)->m_dat; \
1246 (m)->m_flags = 0; \
1247 } else { \
1248 (m)->m_data = (m)->m_pktdat; \
1249 (m)->m_flags = M_PKTHDR; \
1250 MBUF_INIT_PKTHDR(m); \
1251 } \
1252 }
1253
1254 #define MEXT_INIT(m, buf, size, free, arg, rfa, min, ref, pref, flag, \
1255 priv, pm) { \
1256 (m)->m_data = (m)->m_ext.ext_buf = (buf); \
1257 (m)->m_flags |= M_EXT; \
1258 m_set_ext((m), (rfa), (free), (arg)); \
1259 (m)->m_ext.ext_size = (u_int)(size); \
1260 MEXT_MINREF(m) = (min); \
1261 MEXT_REF(m) = (ref); \
1262 MEXT_PREF(m) = (pref); \
1263 MEXT_FLAGS(m) = (flag); \
1264 MEXT_PRIV(m) = (priv); \
1265 MEXT_PMBUF(m) = (pm); \
1266 }
1267
1268 #define MBUF_CL_INIT(m, buf, rfa, ref, flag) \
1269 MEXT_INIT(m, buf, m_maxsize(MC_CL), NULL, NULL, rfa, 0, \
1270 ref, 0, flag, 0, NULL)
1271
1272 #define MBUF_BIGCL_INIT(m, buf, rfa, ref, flag) \
1273 MEXT_INIT(m, buf, m_maxsize(MC_BIGCL), m_bigfree, NULL, rfa, 0, \
1274 ref, 0, flag, 0, NULL)
1275
1276 #define MBUF_16KCL_INIT(m, buf, rfa, ref, flag) \
1277 MEXT_INIT(m, buf, m_maxsize(MC_16KCL), m_16kfree, NULL, rfa, 0, \
1278 ref, 0, flag, 0, NULL)
1279
1280 /*
1281 * Macro to convert BSD malloc sleep flag to mcache's
1282 */
1283 #define MSLEEPF(f) ((!((f) & M_DONTWAIT)) ? MCR_SLEEP : MCR_NOSLEEP)
1284
1285 /*
1286 * The structure that holds all mbuf class statistics exportable via sysctl.
1287 * Similar to mbstat structure, the mb_stat structure is protected by the
1288 * global mbuf lock. It contains additional information about the classes
1289 * that allows for a more accurate view of the state of the allocator.
1290 */
1291 struct mb_stat *mb_stat;
1292 struct omb_stat *omb_stat; /* For backwards compatibility */
1293
1294 #define MB_STAT_SIZE(n) \
1295 __builtin_offsetof(mb_stat_t, mbs_class[n])
1296 #define OMB_STAT_SIZE(n) \
1297 __builtin_offsetof(struct omb_stat, mbs_class[n])
1298
1299 /*
1300 * The legacy structure holding all of the mbuf allocation statistics.
1301 * The actual statistics used by the kernel are stored in the mbuf_table
1302 * instead, and are updated atomically while the global mbuf lock is held.
1303 * They are mirrored in mbstat to support legacy applications (e.g. netstat).
1304 * Unlike before, the kernel no longer relies on the contents of mbstat for
1305 * its operations (e.g. cluster expansion) because the structure is exposed
1306 * to outside and could possibly be modified, therefore making it unsafe.
1307 * With the exception of the mbstat.m_mtypes array (see below), all of the
1308 * statistics are updated as they change.
1309 */
1310 struct mbstat mbstat;
1311
1312 #define MBSTAT_MTYPES_MAX \
1313 (sizeof (mbstat.m_mtypes) / sizeof (mbstat.m_mtypes[0]))
1314
1315 /*
1316 * Allocation statistics related to mbuf types (up to MT_MAX-1) are updated
1317 * atomically and stored in a per-CPU structure which is lock-free; this is
1318 * done in order to avoid writing to the global mbstat data structure which
1319 * would cause false sharing. During sysctl request for kern.ipc.mbstat,
1320 * the statistics across all CPUs will be converged into the mbstat.m_mtypes
1321 * array and returned to the application. Any updates for types greater or
1322 * equal than MT_MAX would be done atomically to the mbstat; this slows down
1323 * performance but is okay since the kernel uses only up to MT_MAX-1 while
1324 * anything beyond that (up to type 255) is considered a corner case.
1325 */
1326 typedef struct {
1327 unsigned int cpu_mtypes[MT_MAX];
1328 } mbuf_mtypes_t;
1329
1330 static mbuf_mtypes_t PERCPU_DATA(mbuf_mtypes);
1331
1332 #define mtype_stat_add(type, n) { \
1333 if ((unsigned)(type) < MT_MAX) { \
1334 mbuf_mtypes_t *mbs = PERCPU_GET(mbuf_mtypes); \
1335 os_atomic_add(&mbs->cpu_mtypes[type], n, relaxed); \
1336 } else if ((unsigned)(type) < (unsigned)MBSTAT_MTYPES_MAX) { \
1337 os_atomic_add((int16_t *)&mbstat.m_mtypes[type], n, relaxed); \
1338 } \
1339 }
1340
1341 #define mtype_stat_sub(t, n) mtype_stat_add(t, -(n))
1342 #define mtype_stat_inc(t) mtype_stat_add(t, 1)
1343 #define mtype_stat_dec(t) mtype_stat_sub(t, 1)
1344
1345 static void
mbuf_mtypes_sync(boolean_t locked)1346 mbuf_mtypes_sync(boolean_t locked)
1347 {
1348 mbuf_mtypes_t mtc;
1349
1350 if (locked) {
1351 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
1352 }
1353
1354 mtc = *PERCPU_GET_MASTER(mbuf_mtypes);
1355 percpu_foreach_secondary(mtype, mbuf_mtypes) {
1356 for (int n = 0; n < MT_MAX; n++) {
1357 mtc.cpu_mtypes[n] += mtype->cpu_mtypes[n];
1358 }
1359 }
1360
1361 if (!locked) {
1362 lck_mtx_lock(mbuf_mlock);
1363 }
1364 for (int n = 0; n < MT_MAX; n++) {
1365 mbstat.m_mtypes[n] = mtc.cpu_mtypes[n];
1366 }
1367 if (!locked) {
1368 lck_mtx_unlock(mbuf_mlock);
1369 }
1370 }
1371
1372 static int
1373 mbstat_sysctl SYSCTL_HANDLER_ARGS
1374 {
1375 #pragma unused(oidp, arg1, arg2)
1376
1377 #if CONFIG_MBUF_MCACHE
1378 mbuf_mtypes_sync(FALSE);
1379 #else
1380 lck_mtx_lock(mbuf_mlock);
1381 mbuf_stat_sync();
1382 mbuf_mtypes_sync(TRUE);
1383 lck_mtx_unlock(mbuf_mlock);
1384 #endif
1385
1386 return SYSCTL_OUT(req, &mbstat, sizeof(mbstat));
1387 }
1388
1389 static void
mbuf_stat_sync(void)1390 mbuf_stat_sync(void)
1391 {
1392 mb_class_stat_t *sp;
1393 #if CONFIG_MBUF_MCACHE
1394 mcache_cpu_t *ccp;
1395 mcache_t *cp;
1396 int k, m, bktsize;
1397 #else
1398 int k;
1399 uint64_t drops = 0;
1400 #endif /* CONFIG_MBUF_MCACHE */
1401
1402
1403 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
1404
1405 #if CONFIG_MBUF_MCACHE
1406 for (k = 0; k < NELEM(mbuf_table); k++) {
1407 cp = m_cache(k);
1408 ccp = &cp->mc_cpu[0];
1409 bktsize = ccp->cc_bktsize;
1410 sp = mbuf_table[k].mtbl_stats;
1411
1412 if (cp->mc_flags & MCF_NOCPUCACHE) {
1413 sp->mbcl_mc_state = MCS_DISABLED;
1414 } else if (cp->mc_purge_cnt > 0) {
1415 sp->mbcl_mc_state = MCS_PURGING;
1416 } else if (bktsize == 0) {
1417 sp->mbcl_mc_state = MCS_OFFLINE;
1418 } else {
1419 sp->mbcl_mc_state = MCS_ONLINE;
1420 }
1421
1422 sp->mbcl_mc_cached = 0;
1423 for (m = 0; m < ncpu; m++) {
1424 ccp = &cp->mc_cpu[m];
1425 if (ccp->cc_objs > 0) {
1426 sp->mbcl_mc_cached += ccp->cc_objs;
1427 }
1428 if (ccp->cc_pobjs > 0) {
1429 sp->mbcl_mc_cached += ccp->cc_pobjs;
1430 }
1431 }
1432 sp->mbcl_mc_cached += (cp->mc_full.bl_total * bktsize);
1433 sp->mbcl_active = sp->mbcl_total - sp->mbcl_mc_cached -
1434 sp->mbcl_infree;
1435
1436 sp->mbcl_mc_waiter_cnt = cp->mc_waiter_cnt;
1437 sp->mbcl_mc_wretry_cnt = cp->mc_wretry_cnt;
1438 sp->mbcl_mc_nwretry_cnt = cp->mc_nwretry_cnt;
1439
1440 /* Calculate total count specific to each class */
1441 sp->mbcl_ctotal = sp->mbcl_total;
1442 switch (m_class(k)) {
1443 case MC_MBUF:
1444 /* Deduct mbufs used in composite caches */
1445 sp->mbcl_ctotal -= (m_total(MC_MBUF_CL) +
1446 m_total(MC_MBUF_BIGCL) - m_total(MC_MBUF_16KCL));
1447 break;
1448
1449 case MC_CL:
1450 /* Deduct clusters used in composite cache */
1451 sp->mbcl_ctotal -= m_total(MC_MBUF_CL);
1452 break;
1453
1454 case MC_BIGCL:
1455 /* Deduct clusters used in composite cache */
1456 sp->mbcl_ctotal -= m_total(MC_MBUF_BIGCL);
1457 break;
1458
1459 case MC_16KCL:
1460 /* Deduct clusters used in composite cache */
1461 sp->mbcl_ctotal -= m_total(MC_MBUF_16KCL);
1462 break;
1463
1464 default:
1465 break;
1466 }
1467 }
1468 #else
1469 for (k = 0; k < NELEM(mbuf_table); k++) {
1470 const zone_id_t zid = m_class_to_zid(m_class(k));
1471 const zone_t zone = zone_by_id(zid);
1472 struct zone_basic_stats stats = {};
1473
1474 sp = m_stats(k);
1475 zone_get_stats(zone, &stats);
1476 drops += stats.zbs_alloc_fail;
1477 sp->mbcl_total = stats.zbs_avail;
1478 sp->mbcl_active = stats.zbs_alloc;
1479 /*
1480 * infree is what mcache considers the freelist (uncached)
1481 * free_cnt contains all the cached/uncached elements
1482 * in a zone.
1483 */
1484 sp->mbcl_infree = stats.zbs_free - stats.zbs_cached;
1485 sp->mbcl_fail_cnt = stats.zbs_alloc_fail;
1486 sp->mbcl_ctotal = sp->mbcl_total;
1487
1488 /* These stats are not available in zalloc. */
1489 sp->mbcl_alloc_cnt = 0;
1490 sp->mbcl_free_cnt = 0;
1491 sp->mbcl_notified = 0;
1492 sp->mbcl_purge_cnt = 0;
1493 sp->mbcl_slab_cnt = 0;
1494 sp->mbcl_release_cnt = 0;
1495
1496 /* zalloc caches are always on. */
1497 sp->mbcl_mc_state = MCS_ONLINE;
1498 sp->mbcl_mc_cached = stats.zbs_cached;
1499 /* These stats are not collected by zalloc. */
1500 sp->mbcl_mc_waiter_cnt = 0;
1501 sp->mbcl_mc_wretry_cnt = 0;
1502 sp->mbcl_mc_nwretry_cnt = 0;
1503 }
1504 /* Deduct clusters used in composite cache */
1505 m_ctotal(MC_MBUF) -= (m_total(MC_MBUF_CL) +
1506 m_total(MC_MBUF_BIGCL) -
1507 m_total(MC_MBUF_16KCL));
1508 m_ctotal(MC_CL) -= m_total(MC_MBUF_CL);
1509 m_ctotal(MC_BIGCL) -= m_total(MC_MBUF_BIGCL);
1510 m_ctotal(MC_16KCL) -= m_total(MC_MBUF_16KCL);
1511
1512 /* Update mbstat. */
1513 mbstat.m_mbufs = m_total(MC_MBUF);
1514 mbstat.m_clusters = m_total(MC_CL);
1515 mbstat.m_clfree = m_infree(MC_CL) + m_infree(MC_MBUF_CL);
1516 mbstat.m_drops = drops;
1517 mbstat.m_bigclusters = m_total(MC_BIGCL);
1518 mbstat.m_bigclfree = m_infree(MC_BIGCL) + m_infree(MC_MBUF_BIGCL);
1519 #endif /* CONFIG_MBUF_MCACHE */
1520 }
1521
1522 static int
1523 mb_stat_sysctl SYSCTL_HANDLER_ARGS
1524 {
1525 #pragma unused(oidp, arg1, arg2)
1526 void *statp;
1527 int k, statsz, proc64 = proc_is64bit(req->p);
1528
1529 lck_mtx_lock(mbuf_mlock);
1530 mbuf_stat_sync();
1531
1532 if (!proc64) {
1533 struct omb_class_stat *oc;
1534 struct mb_class_stat *c;
1535
1536 omb_stat->mbs_cnt = mb_stat->mbs_cnt;
1537 oc = &omb_stat->mbs_class[0];
1538 c = &mb_stat->mbs_class[0];
1539 for (k = 0; k < omb_stat->mbs_cnt; k++, oc++, c++) {
1540 (void) snprintf(oc->mbcl_cname, sizeof(oc->mbcl_cname),
1541 "%s", c->mbcl_cname);
1542 oc->mbcl_size = c->mbcl_size;
1543 oc->mbcl_total = c->mbcl_total;
1544 oc->mbcl_active = c->mbcl_active;
1545 oc->mbcl_infree = c->mbcl_infree;
1546 oc->mbcl_slab_cnt = c->mbcl_slab_cnt;
1547 oc->mbcl_alloc_cnt = c->mbcl_alloc_cnt;
1548 oc->mbcl_free_cnt = c->mbcl_free_cnt;
1549 oc->mbcl_notified = c->mbcl_notified;
1550 oc->mbcl_purge_cnt = c->mbcl_purge_cnt;
1551 oc->mbcl_fail_cnt = c->mbcl_fail_cnt;
1552 oc->mbcl_ctotal = c->mbcl_ctotal;
1553 oc->mbcl_release_cnt = c->mbcl_release_cnt;
1554 oc->mbcl_mc_state = c->mbcl_mc_state;
1555 oc->mbcl_mc_cached = c->mbcl_mc_cached;
1556 oc->mbcl_mc_waiter_cnt = c->mbcl_mc_waiter_cnt;
1557 oc->mbcl_mc_wretry_cnt = c->mbcl_mc_wretry_cnt;
1558 oc->mbcl_mc_nwretry_cnt = c->mbcl_mc_nwretry_cnt;
1559 oc->mbcl_peak_reported = c->mbcl_peak_reported;
1560 }
1561 statp = omb_stat;
1562 statsz = OMB_STAT_SIZE(NELEM(mbuf_table));
1563 } else {
1564 statp = mb_stat;
1565 statsz = MB_STAT_SIZE(NELEM(mbuf_table));
1566 }
1567
1568 lck_mtx_unlock(mbuf_mlock);
1569
1570 return SYSCTL_OUT(req, statp, statsz);
1571 }
1572
1573 #if !CONFIG_MBUF_MCACHE
1574 /*
1575 * The following functions are wrappers around mbuf
1576 * allocation for zalloc. They all have the prefix "mz"
1577 * which was chosen to avoid conflicts with the mbuf KPIs.
1578 *
1579 * Z_NOPAGEWAIT is used in place of Z_NOWAIT because
1580 * Z_NOPAGEWAIT maps closer to MCR_TRYHARD. Z_NOWAIT will
1581 * fail immediately if it has to take a mutex and that
1582 * may cause packets to be dropped more frequently.
1583 * In general, the mbuf subsystem can sustain grabbing a mutex
1584 * during "non-blocking" allocation and that's the reason
1585 * why Z_NOPAGEWAIT was chosen.
1586 *
1587 * mbufs are elided (removed all pointers) before they are
1588 * returned to the cache. The exception are composite mbufs which
1589 * are re-initialized on allocation.
1590 */
1591 __attribute__((always_inline))
1592 static inline void
m_elide(struct mbuf * m)1593 m_elide(struct mbuf *m)
1594 {
1595 m->m_next = m->m_nextpkt = NULL;
1596 m->m_data = NULL;
1597 memset(&m->m_ext, 0, sizeof(m->m_ext));
1598 m->m_pkthdr.rcvif = NULL;
1599 m->m_pkthdr.pkt_hdr = NULL;
1600 m->m_flags |= M_PKTHDR;
1601 m_tag_init(m, 1);
1602 m->m_pkthdr.pkt_flags = 0;
1603 m_scratch_init(m);
1604 m->m_pkthdr.redzone = 0;
1605 m->m_flags &= ~M_PKTHDR;
1606 }
1607
1608 __attribute__((always_inline))
1609 static inline struct mbuf *
mz_alloc(zalloc_flags_t flags)1610 mz_alloc(zalloc_flags_t flags)
1611 {
1612 if (flags & Z_NOWAIT) {
1613 flags ^= Z_NOWAIT | Z_NOPAGEWAIT;
1614 } else if (!(flags & Z_NOPAGEWAIT)) {
1615 flags |= Z_NOFAIL;
1616 }
1617 return zalloc_id(ZONE_ID_MBUF, flags | Z_NOZZC);
1618 }
1619
1620 __attribute__((always_inline))
1621 static inline zstack_t
mz_alloc_n(uint32_t count,zalloc_flags_t flags)1622 mz_alloc_n(uint32_t count, zalloc_flags_t flags)
1623 {
1624 if (flags & Z_NOWAIT) {
1625 flags ^= Z_NOWAIT | Z_NOPAGEWAIT;
1626 } else if (!(flags & Z_NOPAGEWAIT)) {
1627 flags |= Z_NOFAIL;
1628 }
1629 return zalloc_n(ZONE_ID_MBUF, count, flags | Z_NOZZC);
1630 }
1631
1632 __attribute__((always_inline))
1633 static inline void
mz_free(struct mbuf * m)1634 mz_free(struct mbuf *m)
1635 {
1636 #if KASAN
1637 zone_require(zone_by_id(ZONE_ID_MBUF), m);
1638 #endif
1639 m_elide(m);
1640 zfree_nozero(ZONE_ID_MBUF, m);
1641 }
1642
1643 __attribute__((always_inline))
1644 static inline void
mz_free_n(zstack_t list)1645 mz_free_n(zstack_t list)
1646 {
1647 /* Callers of this function have already elided the mbuf. */
1648 zfree_nozero_n(ZONE_ID_MBUF, list);
1649 }
1650
1651 __attribute__((always_inline))
1652 static inline struct ext_ref *
mz_ref_alloc(zalloc_flags_t flags)1653 mz_ref_alloc(zalloc_flags_t flags)
1654 {
1655 if (flags & Z_NOWAIT) {
1656 flags ^= Z_NOWAIT | Z_NOPAGEWAIT;
1657 }
1658 return zalloc_id(ZONE_ID_MBUF_REF, flags | Z_NOZZC);
1659 }
1660
1661 __attribute__((always_inline))
1662 static inline void
mz_ref_free(struct ext_ref * rfa)1663 mz_ref_free(struct ext_ref *rfa)
1664 {
1665 VERIFY(rfa->minref == rfa->refcnt);
1666 #if KASAN
1667 zone_require(zone_by_id(ZONE_ID_MBUF_REF), rfa);
1668 #endif
1669 zfree_nozero(ZONE_ID_MBUF_REF, rfa);
1670 }
1671
1672 __attribute__((always_inline))
1673 static inline void *
mz_cl_alloc(zone_id_t zid,zalloc_flags_t flags)1674 mz_cl_alloc(zone_id_t zid, zalloc_flags_t flags)
1675 {
1676 if (flags & Z_NOWAIT) {
1677 flags ^= Z_NOWAIT | Z_NOPAGEWAIT;
1678 } else if (!(flags & Z_NOPAGEWAIT)) {
1679 flags |= Z_NOFAIL;
1680 }
1681 return (zalloc_id)(zid, flags | Z_NOZZC);
1682 }
1683
1684 __attribute__((always_inline))
1685 static inline void
mz_cl_free(zone_id_t zid,void * cl)1686 mz_cl_free(zone_id_t zid, void *cl)
1687 {
1688 #if KASAN
1689 zone_require(zone_by_id(zid), cl);
1690 #endif
1691 zfree_nozero(zid, cl);
1692 }
1693
1694 __attribute__((always_inline))
1695 static inline zstack_t
mz_composite_alloc_n(mbuf_class_t class,unsigned int n,zalloc_flags_t flags)1696 mz_composite_alloc_n(mbuf_class_t class, unsigned int n, zalloc_flags_t flags)
1697 {
1698 if (flags & Z_NOWAIT) {
1699 flags ^= Z_NOWAIT | Z_NOPAGEWAIT;
1700 }
1701 return (zcache_alloc_n)(m_class_to_zid(class), n, flags,
1702 &mz_composite_ops);
1703 }
1704
1705 __attribute__((always_inline))
1706 static inline struct mbuf *
mz_composite_alloc(mbuf_class_t class,zalloc_flags_t flags)1707 mz_composite_alloc(mbuf_class_t class, zalloc_flags_t flags)
1708 {
1709 zstack_t list = {};
1710 list = mz_composite_alloc_n(class, 1, flags);
1711 if (!zstack_empty(list)) {
1712 return zstack_pop(&list);
1713 } else {
1714 return NULL;
1715 }
1716 }
1717
1718 __attribute__((always_inline))
1719 static inline void
mz_composite_free_n(mbuf_class_t class,zstack_t list)1720 mz_composite_free_n(mbuf_class_t class, zstack_t list)
1721 {
1722 (zcache_free_n)(m_class_to_zid(class), list, &mz_composite_ops);
1723 }
1724
1725 __attribute__((always_inline))
1726 static inline void
mz_composite_free(mbuf_class_t class,struct mbuf * m)1727 mz_composite_free(mbuf_class_t class, struct mbuf *m)
1728 {
1729 zstack_t list = {};
1730 zstack_push(&list, m);
1731 (zcache_free_n)(m_class_to_zid(class), list, &mz_composite_ops);
1732 }
1733
1734 /* Converts composite zone ID to the cluster zone ID. */
1735 __attribute__((always_inline))
1736 static inline zone_id_t
mz_cl_zid(zone_id_t zid)1737 mz_cl_zid(zone_id_t zid)
1738 {
1739 return ZONE_ID_CLUSTER_2K + zid - ZONE_ID_MBUF_CLUSTER_2K;
1740 }
1741
1742 static void *
mz_composite_build(zone_id_t zid,zalloc_flags_t flags)1743 mz_composite_build(zone_id_t zid, zalloc_flags_t flags)
1744 {
1745 const zone_id_t cl_zid = mz_cl_zid(zid);
1746 struct mbuf *m = NULL;
1747 struct ext_ref *rfa = NULL;
1748 void *cl = NULL;
1749
1750 cl = mz_cl_alloc(cl_zid, flags);
1751 if (__improbable(cl == NULL)) {
1752 goto out;
1753 }
1754 rfa = mz_ref_alloc(flags);
1755 if (__improbable(rfa == NULL)) {
1756 goto out_free_cl;
1757 }
1758 m = mz_alloc(flags);
1759 if (__improbable(m == NULL)) {
1760 goto out_free_rfa;
1761 }
1762 MBUF_INIT(m, 0, MT_FREE);
1763 if (zid == ZONE_ID_MBUF_CLUSTER_2K) {
1764 MBUF_CL_INIT(m, cl, rfa, 0, EXTF_COMPOSITE);
1765 } else if (zid == ZONE_ID_MBUF_CLUSTER_4K) {
1766 MBUF_BIGCL_INIT(m, cl, rfa, 0, EXTF_COMPOSITE);
1767 } else {
1768 MBUF_16KCL_INIT(m, cl, rfa, 0, EXTF_COMPOSITE);
1769 }
1770 VERIFY(m->m_flags == M_EXT);
1771 VERIFY(m_get_rfa(m) != NULL && MBUF_IS_COMPOSITE(m));
1772
1773 return m;
1774 out_free_rfa:
1775 mz_ref_free(rfa);
1776 out_free_cl:
1777 mz_cl_free(cl_zid, cl);
1778 out:
1779 return NULL;
1780 }
1781
1782 static void *
mz_composite_mark_valid(zone_id_t zid,void * p)1783 mz_composite_mark_valid(zone_id_t zid, void *p)
1784 {
1785 struct mbuf *m = p;
1786
1787 m = zcache_mark_valid(zone_by_id(ZONE_ID_MBUF), m);
1788 #if KASAN
1789 struct ext_ref *rfa = m_get_rfa(m);
1790 const zone_id_t cl_zid = mz_cl_zid(zid);
1791 void *cl = m->m_ext.ext_buf;
1792
1793 cl = zcache_mark_valid(zone_by_id(cl_zid), cl);
1794 rfa = zcache_mark_valid(zone_by_id(ZONE_ID_MBUF_REF), rfa);
1795 m->m_data = m->m_ext.ext_buf = cl;
1796 m_set_rfa(m, rfa);
1797 #else
1798 #pragma unused(zid)
1799 #endif
1800 VERIFY(MBUF_IS_COMPOSITE(m));
1801
1802 return m;
1803 }
1804
1805 static void *
mz_composite_mark_invalid(zone_id_t zid,void * p)1806 mz_composite_mark_invalid(zone_id_t zid, void *p)
1807 {
1808 struct mbuf *m = p;
1809
1810 VERIFY(MBUF_IS_COMPOSITE(m));
1811 VERIFY(MEXT_REF(m) == MEXT_MINREF(m));
1812 #if KASAN
1813 struct ext_ref *rfa = m_get_rfa(m);
1814 const zone_id_t cl_zid = mz_cl_zid(zid);
1815 void *cl = m->m_ext.ext_buf;
1816
1817 cl = zcache_mark_invalid(zone_by_id(cl_zid), cl);
1818 rfa = zcache_mark_invalid(zone_by_id(ZONE_ID_MBUF_REF), rfa);
1819 m->m_data = m->m_ext.ext_buf = cl;
1820 m_set_rfa(m, rfa);
1821 #else
1822 #pragma unused(zid)
1823 #endif
1824
1825 return zcache_mark_invalid(zone_by_id(ZONE_ID_MBUF), m);
1826 }
1827
1828 static void
mz_composite_destroy(zone_id_t zid,void * p)1829 mz_composite_destroy(zone_id_t zid, void *p)
1830 {
1831 const zone_id_t cl_zid = mz_cl_zid(zid);
1832 struct ext_ref *rfa = NULL;
1833 struct mbuf *m = p;
1834
1835 VERIFY(MBUF_IS_COMPOSITE(m));
1836
1837 MEXT_MINREF(m) = 0;
1838 MEXT_REF(m) = 0;
1839 MEXT_PREF(m) = 0;
1840 MEXT_FLAGS(m) = 0;
1841 MEXT_PRIV(m) = 0;
1842 MEXT_PMBUF(m) = NULL;
1843 MEXT_TOKEN(m) = 0;
1844
1845 rfa = m_get_rfa(m);
1846 m_set_ext(m, NULL, NULL, NULL);
1847
1848 m->m_type = MT_FREE;
1849 m->m_flags = m->m_len = 0;
1850 m->m_next = m->m_nextpkt = NULL;
1851
1852 mz_cl_free(cl_zid, m->m_ext.ext_buf);
1853 m->m_ext.ext_buf = NULL;
1854 mz_ref_free(rfa);
1855 mz_free(m);
1856 }
1857 #endif /* !CONFIG_MBUF_MCACHE */
1858
1859 #if CONFIG_MBUF_MCACHE
1860 static int
1861 mleak_top_trace_sysctl SYSCTL_HANDLER_ARGS
1862 {
1863 #pragma unused(oidp, arg1, arg2)
1864 int i;
1865
1866 /* Ensure leak tracing turned on */
1867 if (!mclfindleak || !mclexpleak) {
1868 return ENXIO;
1869 }
1870
1871 lck_mtx_lock(mleak_lock);
1872 mleak_update_stats();
1873 i = SYSCTL_OUT(req, mleak_stat, MLEAK_STAT_SIZE(MLEAK_NUM_TRACES));
1874 lck_mtx_unlock(mleak_lock);
1875
1876 return i;
1877 }
1878
1879 static int
1880 mleak_table_sysctl SYSCTL_HANDLER_ARGS
1881 {
1882 #pragma unused(oidp, arg1, arg2)
1883 int i = 0;
1884
1885 /* Ensure leak tracing turned on */
1886 if (!mclfindleak || !mclexpleak) {
1887 return ENXIO;
1888 }
1889
1890 lck_mtx_lock(mleak_lock);
1891 i = SYSCTL_OUT(req, &mleak_table, sizeof(mleak_table));
1892 lck_mtx_unlock(mleak_lock);
1893
1894 return i;
1895 }
1896 #endif /* CONFIG_MBUF_MCACHE */
1897
1898 static inline void
m_incref(struct mbuf * m)1899 m_incref(struct mbuf *m)
1900 {
1901 uint16_t new = os_atomic_inc(&MEXT_REF(m), relaxed);
1902
1903 VERIFY(new != 0);
1904 /*
1905 * If cluster is shared, mark it with (sticky) EXTF_READONLY;
1906 * we don't clear the flag when the refcount goes back to the
1907 * minimum, to simplify code calling m_mclhasreference().
1908 */
1909 if (new > (MEXT_MINREF(m) + 1) && !(MEXT_FLAGS(m) & EXTF_READONLY)) {
1910 os_atomic_or(&MEXT_FLAGS(m), EXTF_READONLY, relaxed);
1911 }
1912 }
1913
1914 static inline uint16_t
m_decref(struct mbuf * m)1915 m_decref(struct mbuf *m)
1916 {
1917 VERIFY(MEXT_REF(m) != 0);
1918
1919 return os_atomic_dec(&MEXT_REF(m), acq_rel);
1920 }
1921
1922 static void
mbuf_table_init(void)1923 mbuf_table_init(void)
1924 {
1925 unsigned int b, c, s;
1926 int m, config_mbuf_jumbo = 0;
1927
1928 omb_stat = zalloc_permanent(OMB_STAT_SIZE(NELEM(mbuf_table)),
1929 ZALIGN(struct omb_stat));
1930
1931 mb_stat = zalloc_permanent(MB_STAT_SIZE(NELEM(mbuf_table)),
1932 ZALIGN(mb_stat_t));
1933
1934 mb_stat->mbs_cnt = NELEM(mbuf_table);
1935 for (m = 0; m < NELEM(mbuf_table); m++) {
1936 mbuf_table[m].mtbl_stats = &mb_stat->mbs_class[m];
1937 }
1938
1939 #if CONFIG_MBUF_JUMBO
1940 config_mbuf_jumbo = 1;
1941 #endif /* CONFIG_MBUF_JUMBO */
1942
1943 if (config_mbuf_jumbo == 1 || PAGE_SIZE == M16KCLBYTES) {
1944 /*
1945 * Set aside 1/3 of the mbuf cluster map for jumbo
1946 * clusters; we do this only on platforms where jumbo
1947 * cluster pool is enabled.
1948 */
1949 njcl = nmbclusters / 3;
1950 njclbytes = M16KCLBYTES;
1951 }
1952
1953 /*
1954 * nclusters holds both the 2KB and 4KB pools, so ensure it's
1955 * a multiple of 4KB clusters.
1956 */
1957 nclusters = P2ROUNDDOWN(nmbclusters - njcl, NCLPG);
1958 if (njcl > 0) {
1959 /*
1960 * Each jumbo cluster takes 8 2KB clusters, so make
1961 * sure that the pool size is evenly divisible by 8;
1962 * njcl is in 2KB unit, hence treated as such.
1963 */
1964 njcl = P2ROUNDDOWN(nmbclusters - nclusters, NCLPJCL);
1965
1966 /* Update nclusters with rounded down value of njcl */
1967 nclusters = P2ROUNDDOWN(nmbclusters - njcl, NCLPG);
1968 }
1969
1970 /*
1971 * njcl is valid only on platforms with 16KB jumbo clusters or
1972 * with 16KB pages, where it is configured to 1/3 of the pool
1973 * size. On these platforms, the remaining is used for 2KB
1974 * and 4KB clusters. On platforms without 16KB jumbo clusters,
1975 * the entire pool is used for both 2KB and 4KB clusters. A 4KB
1976 * cluster can either be splitted into 16 mbufs, or into 2 2KB
1977 * clusters.
1978 *
1979 * +---+---+------------ ... -----------+------- ... -------+
1980 * | c | b | s | njcl |
1981 * +---+---+------------ ... -----------+------- ... -------+
1982 *
1983 * 1/32th of the shared region is reserved for pure 2KB and 4KB
1984 * clusters (1/64th each.)
1985 */
1986 c = P2ROUNDDOWN((nclusters >> 6), NCLPG); /* in 2KB unit */
1987 b = P2ROUNDDOWN((nclusters >> (6 + NCLPBGSHIFT)), NBCLPG); /* in 4KB unit */
1988 s = nclusters - (c + (b << NCLPBGSHIFT)); /* in 2KB unit */
1989
1990 /*
1991 * 1/64th (c) is reserved for 2KB clusters.
1992 */
1993 m_minlimit(MC_CL) = c;
1994 m_maxlimit(MC_CL) = s + c; /* in 2KB unit */
1995 m_maxsize(MC_CL) = m_size(MC_CL) = MCLBYTES;
1996 snprintf(m_cname(MC_CL), MAX_MBUF_CNAME, "cl");
1997
1998 /*
1999 * Another 1/64th (b) of the map is reserved for 4KB clusters.
2000 * It cannot be turned into 2KB clusters or mbufs.
2001 */
2002 m_minlimit(MC_BIGCL) = b;
2003 m_maxlimit(MC_BIGCL) = (s >> NCLPBGSHIFT) + b; /* in 4KB unit */
2004 m_maxsize(MC_BIGCL) = m_size(MC_BIGCL) = MBIGCLBYTES;
2005 snprintf(m_cname(MC_BIGCL), MAX_MBUF_CNAME, "bigcl");
2006
2007 /*
2008 * The remaining 31/32ths (s) are all-purpose (mbufs, 2KB, or 4KB)
2009 */
2010 m_minlimit(MC_MBUF) = 0;
2011 m_maxlimit(MC_MBUF) = s * NMBPCL; /* in mbuf unit */
2012 m_maxsize(MC_MBUF) = m_size(MC_MBUF) = _MSIZE;
2013 snprintf(m_cname(MC_MBUF), MAX_MBUF_CNAME, "mbuf");
2014
2015 /*
2016 * Set limits for the composite classes.
2017 */
2018 m_minlimit(MC_MBUF_CL) = 0;
2019 m_maxlimit(MC_MBUF_CL) = m_maxlimit(MC_CL);
2020 m_maxsize(MC_MBUF_CL) = MCLBYTES;
2021 m_size(MC_MBUF_CL) = m_size(MC_MBUF) + m_size(MC_CL);
2022 snprintf(m_cname(MC_MBUF_CL), MAX_MBUF_CNAME, "mbuf_cl");
2023
2024 m_minlimit(MC_MBUF_BIGCL) = 0;
2025 m_maxlimit(MC_MBUF_BIGCL) = m_maxlimit(MC_BIGCL);
2026 m_maxsize(MC_MBUF_BIGCL) = MBIGCLBYTES;
2027 m_size(MC_MBUF_BIGCL) = m_size(MC_MBUF) + m_size(MC_BIGCL);
2028 snprintf(m_cname(MC_MBUF_BIGCL), MAX_MBUF_CNAME, "mbuf_bigcl");
2029
2030 /*
2031 * And for jumbo classes.
2032 */
2033 m_minlimit(MC_16KCL) = 0;
2034 m_maxlimit(MC_16KCL) = (njcl >> NCLPJCLSHIFT); /* in 16KB unit */
2035 m_maxsize(MC_16KCL) = m_size(MC_16KCL) = M16KCLBYTES;
2036 snprintf(m_cname(MC_16KCL), MAX_MBUF_CNAME, "16kcl");
2037
2038 m_minlimit(MC_MBUF_16KCL) = 0;
2039 m_maxlimit(MC_MBUF_16KCL) = m_maxlimit(MC_16KCL);
2040 m_maxsize(MC_MBUF_16KCL) = M16KCLBYTES;
2041 m_size(MC_MBUF_16KCL) = m_size(MC_MBUF) + m_size(MC_16KCL);
2042 snprintf(m_cname(MC_MBUF_16KCL), MAX_MBUF_CNAME, "mbuf_16kcl");
2043
2044 /*
2045 * Initialize the legacy mbstat structure.
2046 */
2047 bzero(&mbstat, sizeof(mbstat));
2048 mbstat.m_msize = m_maxsize(MC_MBUF);
2049 mbstat.m_mclbytes = m_maxsize(MC_CL);
2050 mbstat.m_minclsize = MINCLSIZE;
2051 mbstat.m_mlen = MLEN;
2052 mbstat.m_mhlen = MHLEN;
2053 mbstat.m_bigmclbytes = m_maxsize(MC_BIGCL);
2054 }
2055
2056 int
mbuf_get_class(struct mbuf * m)2057 mbuf_get_class(struct mbuf *m)
2058 {
2059 if (m->m_flags & M_EXT) {
2060 uint32_t composite = (MEXT_FLAGS(m) & EXTF_COMPOSITE);
2061 m_ext_free_func_t m_free_func = m_get_ext_free(m);
2062
2063 if (m_free_func == NULL) {
2064 if (composite) {
2065 return MC_MBUF_CL;
2066 } else {
2067 return MC_CL;
2068 }
2069 } else if (m_free_func == m_bigfree) {
2070 if (composite) {
2071 return MC_MBUF_BIGCL;
2072 } else {
2073 return MC_BIGCL;
2074 }
2075 } else if (m_free_func == m_16kfree) {
2076 if (composite) {
2077 return MC_MBUF_16KCL;
2078 } else {
2079 return MC_16KCL;
2080 }
2081 }
2082 }
2083
2084 return MC_MBUF;
2085 }
2086
2087 bool
mbuf_class_under_pressure(struct mbuf * m)2088 mbuf_class_under_pressure(struct mbuf *m)
2089 {
2090 int mclass = mbuf_get_class(m);
2091
2092 #if CONFIG_MBUF_MCACHE
2093 if (m_total(mclass) - m_infree(mclass) >= (m_maxlimit(mclass) * mb_memory_pressure_percentage) / 100) {
2094 /*
2095 * The above computation does not include the per-CPU cached objects.
2096 * As a fast-path check this is good-enough. But now we do
2097 * the "slower" count of the cached objects to know exactly the
2098 * number of active mbufs in use.
2099 *
2100 * We do not take the mbuf_lock here to avoid lock-contention. Numbers
2101 * might be slightly off but we don't try to be 100% accurate.
2102 * At worst, we drop a packet that we shouldn't have dropped or
2103 * we might go slightly above our memory-pressure threshold.
2104 */
2105 mcache_t *cp = m_cache(mclass);
2106 mcache_cpu_t *ccp = &cp->mc_cpu[0];
2107
2108 int bktsize = os_access_once(ccp->cc_bktsize);
2109 uint32_t bl_total = os_access_once(cp->mc_full.bl_total);
2110 uint32_t cached = 0;
2111 int i;
2112
2113 for (i = 0; i < ncpu; i++) {
2114 ccp = &cp->mc_cpu[i];
2115
2116 int cc_objs = os_access_once(ccp->cc_objs);
2117 if (cc_objs > 0) {
2118 cached += cc_objs;
2119 }
2120
2121 int cc_pobjs = os_access_once(ccp->cc_pobjs);
2122 if (cc_pobjs > 0) {
2123 cached += cc_pobjs;
2124 }
2125 }
2126 cached += (bl_total * bktsize);
2127 if (m_total(mclass) - m_infree(mclass) - cached >= (m_maxlimit(mclass) * mb_memory_pressure_percentage) / 100) {
2128 os_log(OS_LOG_DEFAULT,
2129 "%s memory-pressure on mbuf due to class %u, total %u free %u cached %u max %u",
2130 __func__, mclass, m_total(mclass), m_infree(mclass), cached, m_maxlimit(mclass));
2131 return true;
2132 }
2133 }
2134 #else
2135 /*
2136 * Grab the statistics from zalloc.
2137 * We can't call mbuf_stat_sync() since that requires a lock.
2138 */
2139 const zone_id_t zid = m_class_to_zid(m_class(mclass));
2140 const zone_t zone = zone_by_id(zid);
2141 struct zone_basic_stats stats = {};
2142
2143 zone_get_stats(zone, &stats);
2144 if (stats.zbs_avail - stats.zbs_free >= (m_maxlimit(mclass) * mb_memory_pressure_percentage) / 100) {
2145 os_log(OS_LOG_DEFAULT,
2146 "%s memory-pressure on mbuf due to class %u, total %llu free %llu max %u",
2147 __func__, mclass, stats.zbs_avail, stats.zbs_free, m_maxlimit(mclass));
2148 return true;
2149 }
2150 #endif /* CONFIG_MBUF_MCACHE */
2151
2152 return false;
2153 }
2154
2155 #if defined(__LP64__)
2156 typedef struct ncl_tbl {
2157 uint64_t nt_maxmem; /* memory (sane) size */
2158 uint32_t nt_mbpool; /* mbuf pool size */
2159 } ncl_tbl_t;
2160
2161 static const ncl_tbl_t ncl_table[] = {
2162 { (1ULL << GBSHIFT) /* 1 GB */, (64 << MBSHIFT) /* 64 MB */ },
2163 { (1ULL << (GBSHIFT + 2)) /* 4 GB */, (96 << MBSHIFT) /* 96 MB */ },
2164 { (1ULL << (GBSHIFT + 3)) /* 8 GB */, (128 << MBSHIFT) /* 128 MB */ },
2165 { (1ULL << (GBSHIFT + 4)) /* 16 GB */, (256 << MBSHIFT) /* 256 MB */ },
2166 { (1ULL << (GBSHIFT + 5)) /* 32 GB */, (512 << MBSHIFT) /* 512 MB */ },
2167 { 0, 0 }
2168 };
2169 #endif /* __LP64__ */
2170
2171 __private_extern__ unsigned int
mbuf_default_ncl(uint64_t mem)2172 mbuf_default_ncl(uint64_t mem)
2173 {
2174 #if !defined(__LP64__)
2175 unsigned int n;
2176 /*
2177 * 32-bit kernel (default to 64MB of mbuf pool for >= 1GB RAM).
2178 */
2179 if ((n = ((mem / 16) / MCLBYTES)) > 32768) {
2180 n = 32768;
2181 }
2182 #else
2183 unsigned int n, i;
2184 /*
2185 * 64-bit kernel (mbuf pool size based on table).
2186 */
2187 n = ncl_table[0].nt_mbpool;
2188 for (i = 0; ncl_table[i].nt_mbpool != 0; i++) {
2189 if (mem < ncl_table[i].nt_maxmem) {
2190 break;
2191 }
2192 n = ncl_table[i].nt_mbpool;
2193 }
2194 n >>= MCLSHIFT;
2195 #endif /* !__LP64__ */
2196 return n;
2197 }
2198
2199 __private_extern__ void
mbinit(void)2200 mbinit(void)
2201 {
2202 unsigned int m;
2203 #if CONFIG_MBUF_MCACHE
2204 unsigned int initmcl = 0;
2205 thread_t thread = THREAD_NULL;
2206 #endif /* CONFIG_MBUF_MCACHE */
2207
2208 #if CONFIG_MBUF_MCACHE
2209 microuptime(&mb_start);
2210 #endif /* CONFIG_MBUF_MCACHE */
2211
2212 /*
2213 * These MBUF_ values must be equal to their private counterparts.
2214 */
2215 _CASSERT(MBUF_EXT == M_EXT);
2216 _CASSERT(MBUF_PKTHDR == M_PKTHDR);
2217 _CASSERT(MBUF_EOR == M_EOR);
2218 _CASSERT(MBUF_LOOP == M_LOOP);
2219 _CASSERT(MBUF_BCAST == M_BCAST);
2220 _CASSERT(MBUF_MCAST == M_MCAST);
2221 _CASSERT(MBUF_FRAG == M_FRAG);
2222 _CASSERT(MBUF_FIRSTFRAG == M_FIRSTFRAG);
2223 _CASSERT(MBUF_LASTFRAG == M_LASTFRAG);
2224 _CASSERT(MBUF_PROMISC == M_PROMISC);
2225 _CASSERT(MBUF_HASFCS == M_HASFCS);
2226
2227 _CASSERT(MBUF_TYPE_FREE == MT_FREE);
2228 _CASSERT(MBUF_TYPE_DATA == MT_DATA);
2229 _CASSERT(MBUF_TYPE_HEADER == MT_HEADER);
2230 _CASSERT(MBUF_TYPE_SOCKET == MT_SOCKET);
2231 _CASSERT(MBUF_TYPE_PCB == MT_PCB);
2232 _CASSERT(MBUF_TYPE_RTABLE == MT_RTABLE);
2233 _CASSERT(MBUF_TYPE_HTABLE == MT_HTABLE);
2234 _CASSERT(MBUF_TYPE_ATABLE == MT_ATABLE);
2235 _CASSERT(MBUF_TYPE_SONAME == MT_SONAME);
2236 _CASSERT(MBUF_TYPE_SOOPTS == MT_SOOPTS);
2237 _CASSERT(MBUF_TYPE_FTABLE == MT_FTABLE);
2238 _CASSERT(MBUF_TYPE_RIGHTS == MT_RIGHTS);
2239 _CASSERT(MBUF_TYPE_IFADDR == MT_IFADDR);
2240 _CASSERT(MBUF_TYPE_CONTROL == MT_CONTROL);
2241 _CASSERT(MBUF_TYPE_OOBDATA == MT_OOBDATA);
2242
2243 _CASSERT(MBUF_TSO_IPV4 == CSUM_TSO_IPV4);
2244 _CASSERT(MBUF_TSO_IPV6 == CSUM_TSO_IPV6);
2245 _CASSERT(MBUF_CSUM_REQ_SUM16 == CSUM_PARTIAL);
2246 _CASSERT(MBUF_CSUM_TCP_SUM16 == MBUF_CSUM_REQ_SUM16);
2247 _CASSERT(MBUF_CSUM_REQ_ZERO_INVERT == CSUM_ZERO_INVERT);
2248 _CASSERT(MBUF_CSUM_REQ_IP == CSUM_IP);
2249 _CASSERT(MBUF_CSUM_REQ_TCP == CSUM_TCP);
2250 _CASSERT(MBUF_CSUM_REQ_UDP == CSUM_UDP);
2251 _CASSERT(MBUF_CSUM_REQ_TCPIPV6 == CSUM_TCPIPV6);
2252 _CASSERT(MBUF_CSUM_REQ_UDPIPV6 == CSUM_UDPIPV6);
2253 _CASSERT(MBUF_CSUM_DID_IP == CSUM_IP_CHECKED);
2254 _CASSERT(MBUF_CSUM_IP_GOOD == CSUM_IP_VALID);
2255 _CASSERT(MBUF_CSUM_DID_DATA == CSUM_DATA_VALID);
2256 _CASSERT(MBUF_CSUM_PSEUDO_HDR == CSUM_PSEUDO_HDR);
2257
2258 _CASSERT(MBUF_WAITOK == M_WAIT);
2259 _CASSERT(MBUF_DONTWAIT == M_DONTWAIT);
2260 _CASSERT(MBUF_COPYALL == M_COPYALL);
2261
2262 _CASSERT(MBUF_SC2TC(MBUF_SC_BK_SYS) == MBUF_TC_BK);
2263 _CASSERT(MBUF_SC2TC(MBUF_SC_BK) == MBUF_TC_BK);
2264 _CASSERT(MBUF_SC2TC(MBUF_SC_BE) == MBUF_TC_BE);
2265 _CASSERT(MBUF_SC2TC(MBUF_SC_RD) == MBUF_TC_BE);
2266 _CASSERT(MBUF_SC2TC(MBUF_SC_OAM) == MBUF_TC_BE);
2267 _CASSERT(MBUF_SC2TC(MBUF_SC_AV) == MBUF_TC_VI);
2268 _CASSERT(MBUF_SC2TC(MBUF_SC_RV) == MBUF_TC_VI);
2269 _CASSERT(MBUF_SC2TC(MBUF_SC_VI) == MBUF_TC_VI);
2270 _CASSERT(MBUF_SC2TC(MBUF_SC_SIG) == MBUF_TC_VI);
2271 _CASSERT(MBUF_SC2TC(MBUF_SC_VO) == MBUF_TC_VO);
2272 _CASSERT(MBUF_SC2TC(MBUF_SC_CTL) == MBUF_TC_VO);
2273
2274 _CASSERT(MBUF_TC2SCVAL(MBUF_TC_BK) == SCVAL_BK);
2275 _CASSERT(MBUF_TC2SCVAL(MBUF_TC_BE) == SCVAL_BE);
2276 _CASSERT(MBUF_TC2SCVAL(MBUF_TC_VI) == SCVAL_VI);
2277 _CASSERT(MBUF_TC2SCVAL(MBUF_TC_VO) == SCVAL_VO);
2278
2279 /* Module specific scratch space (32-bit alignment requirement) */
2280 _CASSERT(!(offsetof(struct mbuf, m_pkthdr.pkt_mpriv) %
2281 sizeof(uint32_t)));
2282
2283 /* pktdata needs to start at 128-bit offset! */
2284 _CASSERT((offsetof(struct mbuf, m_pktdat) % 16) == 0);
2285
2286 /* Initialize random red zone cookie value */
2287 _CASSERT(sizeof(mb_redzone_cookie) ==
2288 sizeof(((struct pkthdr *)0)->redzone));
2289 read_random(&mb_redzone_cookie, sizeof(mb_redzone_cookie));
2290 read_random(&mb_obscure_extref, sizeof(mb_obscure_extref));
2291 read_random(&mb_obscure_extfree, sizeof(mb_obscure_extfree));
2292 mb_obscure_extref |= 0x3;
2293 mb_obscure_extref = 0;
2294 mb_obscure_extfree |= 0x3;
2295
2296 #if CONFIG_MBUF_MCACHE
2297 /* Make sure we don't save more than we should */
2298 _CASSERT(MCA_SAVED_MBUF_SIZE <= sizeof(struct mbuf));
2299 #endif /* CONFIG_MBUF_MCACHE */
2300
2301 if (nmbclusters == 0) {
2302 nmbclusters = NMBCLUSTERS;
2303 }
2304
2305 /* This should be a sane (at least even) value by now */
2306 VERIFY(nmbclusters != 0 && !(nmbclusters & 0x1));
2307
2308 /* Setup the mbuf table */
2309 mbuf_table_init();
2310
2311 _CASSERT(sizeof(struct mbuf) == _MSIZE);
2312
2313 #if CONFIG_MBUF_MCACHE
2314 /*
2315 * Allocate cluster slabs table:
2316 *
2317 * maxslabgrp = (N * 2048) / (1024 * 1024)
2318 *
2319 * Where N is nmbclusters rounded up to the nearest 512. This yields
2320 * mcl_slab_g_t units, each one representing a MB of memory.
2321 */
2322 maxslabgrp =
2323 (P2ROUNDUP(nmbclusters, (MBSIZE >> MCLSHIFT)) << MCLSHIFT) >> MBSHIFT;
2324 slabstbl = zalloc_permanent(maxslabgrp * sizeof(mcl_slabg_t *),
2325 ZALIGN(mcl_slabg_t));
2326
2327 /*
2328 * Allocate audit structures, if needed:
2329 *
2330 * maxclaudit = (maxslabgrp * 1024 * 1024) / PAGE_SIZE
2331 *
2332 * This yields mcl_audit_t units, each one representing a page.
2333 */
2334 PE_parse_boot_argn("mbuf_debug", &mbuf_debug, sizeof(mbuf_debug));
2335 mbuf_debug |= mcache_getflags();
2336 if (mbuf_debug & MCF_DEBUG) {
2337 int l;
2338 mcl_audit_t *mclad;
2339 maxclaudit = ((maxslabgrp << MBSHIFT) >> PAGE_SHIFT);
2340 mclaudit = zalloc_permanent(maxclaudit * sizeof(*mclaudit),
2341 ZALIGN(mcl_audit_t));
2342 for (l = 0, mclad = mclaudit; l < maxclaudit; l++) {
2343 mclad[l].cl_audit = zalloc_permanent(NMBPG * sizeof(mcache_audit_t *),
2344 ZALIGN_PTR);
2345 }
2346
2347 mcl_audit_con_cache = mcache_create("mcl_audit_contents",
2348 AUDIT_CONTENTS_SIZE, sizeof(u_int64_t), 0, MCR_SLEEP);
2349 VERIFY(mcl_audit_con_cache != NULL);
2350 }
2351 mclverify = (mbuf_debug & MCF_VERIFY);
2352 mcltrace = (mbuf_debug & MCF_TRACE);
2353 mclfindleak = !(mbuf_debug & MCF_NOLEAKLOG);
2354 mclexpleak = mclfindleak && (mbuf_debug & MCF_EXPLEAKLOG);
2355
2356 /* Enable mbuf leak logging, with a lock to protect the tables */
2357
2358 mleak_activate();
2359
2360 /*
2361 * Allocate structure for per-CPU statistics that's aligned
2362 * on the CPU cache boundary; this code assumes that we never
2363 * uninitialize this framework, since the original address
2364 * before alignment is not saved.
2365 */
2366 ncpu = ml_wait_max_cpus();
2367
2368 /* Calculate the number of pages assigned to the cluster pool */
2369 mcl_pages = (nmbclusters << MCLSHIFT) / PAGE_SIZE;
2370 mcl_paddr = zalloc_permanent(mcl_pages * sizeof(ppnum_t),
2371 ZALIGN(ppnum_t));
2372
2373 /* Register with the I/O Bus mapper */
2374 mcl_paddr_base = IOMapperIOVMAlloc(mcl_pages);
2375
2376 embutl = (mbutl + (nmbclusters * MCLBYTES));
2377 VERIFY(((embutl - mbutl) % MBIGCLBYTES) == 0);
2378
2379 /* Prime up the freelist */
2380 PE_parse_boot_argn("initmcl", &initmcl, sizeof(initmcl));
2381 if (initmcl != 0) {
2382 initmcl >>= NCLPBGSHIFT; /* become a 4K unit */
2383 if (initmcl > m_maxlimit(MC_BIGCL)) {
2384 initmcl = m_maxlimit(MC_BIGCL);
2385 }
2386 }
2387 if (initmcl < m_minlimit(MC_BIGCL)) {
2388 initmcl = m_minlimit(MC_BIGCL);
2389 }
2390
2391 lck_mtx_lock(mbuf_mlock);
2392
2393 /*
2394 * For classes with non-zero minimum limits, populate their freelists
2395 * so that m_total(class) is at least m_minlimit(class).
2396 */
2397 VERIFY(m_total(MC_BIGCL) == 0 && m_minlimit(MC_BIGCL) != 0);
2398 freelist_populate(m_class(MC_BIGCL), initmcl, M_WAIT);
2399 VERIFY(m_total(MC_BIGCL) >= m_minlimit(MC_BIGCL));
2400 freelist_init(m_class(MC_CL));
2401 #else
2402 /*
2403 * We have yet to create the non composite zones
2404 * and thus we haven't asked zalloc to allocate
2405 * anything yet, which means that at this point
2406 * m_total() is zero. Once we create the zones and
2407 * raise the reserve, m_total() will be calculated,
2408 * but until then just assume that we will have
2409 * at least the minium limit allocated.
2410 */
2411 m_total(MC_BIGCL) = m_minlimit(MC_BIGCL);
2412 m_total(MC_CL) = m_minlimit(MC_CL);
2413 #endif /* CONFIG_MBUF_MCACHE */
2414
2415 for (m = 0; m < NELEM(mbuf_table); m++) {
2416 /* Make sure we didn't miss any */
2417 VERIFY(m_minlimit(m_class(m)) == 0 ||
2418 m_total(m_class(m)) >= m_minlimit(m_class(m)));
2419
2420 /* populate the initial sizes and report from there on */
2421 m_peak(m_class(m)) = m_total(m_class(m));
2422 }
2423 mb_peak_newreport = FALSE;
2424
2425 #if CONFIG_MBUF_MCACHE
2426 lck_mtx_unlock(mbuf_mlock);
2427
2428 (void) kernel_thread_start((thread_continue_t)mbuf_worker_thread_init,
2429 NULL, &thread);
2430 thread_deallocate(thread);
2431
2432 ref_cache = mcache_create("mext_ref", sizeof(struct ext_ref),
2433 0, 0, MCR_SLEEP);
2434 #endif /* CONFIG_MBUF_MCACHE */
2435
2436 /* Create the cache for each class */
2437 for (m = 0; m < NELEM(mbuf_table); m++) {
2438 #if CONFIG_MBUF_MCACHE
2439 void *allocfunc, *freefunc, *auditfunc, *logfunc;
2440 u_int32_t flags;
2441
2442 flags = mbuf_debug;
2443 if (m_class(m) == MC_MBUF_CL || m_class(m) == MC_MBUF_BIGCL ||
2444 m_class(m) == MC_MBUF_16KCL) {
2445 allocfunc = mbuf_cslab_alloc;
2446 freefunc = mbuf_cslab_free;
2447 auditfunc = mbuf_cslab_audit;
2448 logfunc = mleak_logger;
2449 } else {
2450 allocfunc = mbuf_slab_alloc;
2451 freefunc = mbuf_slab_free;
2452 auditfunc = mbuf_slab_audit;
2453 logfunc = mleak_logger;
2454 }
2455
2456 /*
2457 * Disable per-CPU caches for jumbo classes if there
2458 * is no jumbo cluster pool available in the system.
2459 * The cache itself is still created (but will never
2460 * be populated) since it simplifies the code.
2461 */
2462 if ((m_class(m) == MC_MBUF_16KCL || m_class(m) == MC_16KCL) &&
2463 njcl == 0) {
2464 flags |= MCF_NOCPUCACHE;
2465 }
2466
2467 if (!mclfindleak) {
2468 flags |= MCF_NOLEAKLOG;
2469 }
2470
2471 m_cache(m) = mcache_create_ext(m_cname(m), m_maxsize(m),
2472 allocfunc, freefunc, auditfunc, logfunc, mbuf_slab_notify,
2473 (void *)(uintptr_t)m, flags, MCR_SLEEP);
2474 #else
2475 if (!MBUF_CLASS_COMPOSITE(m)) {
2476 zone_t zone = zone_by_id(m_class_to_zid(m));
2477
2478 zone_set_exhaustible(zone, m_maxlimit(m));
2479 zone_raise_reserve(zone, m_minlimit(m));
2480 /*
2481 * Pretend that we have allocated m_total() items
2482 * at this point. zalloc will eventually do that
2483 * but it's an async operation.
2484 */
2485 m_total(m) = m_minlimit(m);
2486 }
2487 #endif /* CONFIG_MBUF_MCACHE */
2488 }
2489
2490 /*
2491 * Set the max limit on sb_max to be 1/16 th of the size of
2492 * memory allocated for mbuf clusters.
2493 */
2494 high_sb_max = (nmbclusters << (MCLSHIFT - 4));
2495 if (high_sb_max < sb_max) {
2496 /* sb_max is too large for this configuration, scale it down */
2497 if (high_sb_max > (1 << MBSHIFT)) {
2498 /* We have atleast 16 M of mbuf pool */
2499 sb_max = high_sb_max;
2500 } else if ((nmbclusters << MCLSHIFT) > (1 << MBSHIFT)) {
2501 /*
2502 * If we have more than 1M of mbufpool, cap the size of
2503 * max sock buf at 1M
2504 */
2505 sb_max = high_sb_max = (1 << MBSHIFT);
2506 } else {
2507 sb_max = high_sb_max;
2508 }
2509 sb_max_adj = SB_MAX_ADJUST(sb_max);
2510 assert(sb_max_adj < UINT32_MAX);
2511 }
2512
2513 #if CONFIG_MBUF_MCACHE
2514 /* allocate space for mbuf_dump_buf */
2515 mbuf_dump_buf = zalloc_permanent(MBUF_DUMP_BUF_SIZE, ZALIGN_NONE);
2516
2517 if (mbuf_debug & MCF_DEBUG) {
2518 printf("%s: MLEN %d, MHLEN %d\n", __func__,
2519 (int)_MLEN, (int)_MHLEN);
2520 }
2521 #else
2522 mbuf_defunct_tcall =
2523 thread_call_allocate_with_options(mbuf_watchdog_defunct,
2524 NULL,
2525 THREAD_CALL_PRIORITY_KERNEL,
2526 THREAD_CALL_OPTIONS_ONCE);
2527 #endif /* CONFIG_MBUF_MCACHE */
2528 printf("%s: done [%d MB total pool size, (%d/%d) split]\n", __func__,
2529 (nmbclusters << MCLSHIFT) >> MBSHIFT,
2530 (nclusters << MCLSHIFT) >> MBSHIFT,
2531 (njcl << MCLSHIFT) >> MBSHIFT);
2532
2533 PE_parse_boot_argn("mb_tag_mbuf", &mb_tag_mbuf, sizeof(mb_tag_mbuf));
2534 }
2535
2536 #if CONFIG_MBUF_MCACHE
2537 /*
2538 * Obtain a slab of object(s) from the class's freelist.
2539 */
2540 static mcache_obj_t *
slab_alloc(mbuf_class_t class,int wait)2541 slab_alloc(mbuf_class_t class, int wait)
2542 {
2543 mcl_slab_t *sp;
2544 mcache_obj_t *buf;
2545
2546 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
2547
2548 /* This should always be NULL for us */
2549 VERIFY(m_cobjlist(class) == NULL);
2550
2551 /*
2552 * Treat composite objects as having longer lifespan by using
2553 * a slab from the reverse direction, in hoping that this could
2554 * reduce the probability of fragmentation for slabs that hold
2555 * more than one buffer chunks (e.g. mbuf slabs). For other
2556 * slabs, this probably doesn't make much of a difference.
2557 */
2558 if ((class == MC_MBUF || class == MC_CL || class == MC_BIGCL)
2559 && (wait & MCR_COMP)) {
2560 sp = (mcl_slab_t *)TAILQ_LAST(&m_slablist(class), mcl_slhead);
2561 } else {
2562 sp = (mcl_slab_t *)TAILQ_FIRST(&m_slablist(class));
2563 }
2564
2565 if (sp == NULL) {
2566 VERIFY(m_infree(class) == 0 && m_slab_cnt(class) == 0);
2567 /* The slab list for this class is empty */
2568 return NULL;
2569 }
2570
2571 VERIFY(m_infree(class) > 0);
2572 VERIFY(!slab_is_detached(sp));
2573 VERIFY(sp->sl_class == class &&
2574 (sp->sl_flags & (SLF_MAPPED | SLF_PARTIAL)) == SLF_MAPPED);
2575 buf = sp->sl_head;
2576 VERIFY(slab_inrange(sp, buf) && sp == slab_get(buf));
2577 sp->sl_head = buf->obj_next;
2578 /* Increment slab reference */
2579 sp->sl_refcnt++;
2580
2581 VERIFY(sp->sl_head != NULL || sp->sl_refcnt == sp->sl_chunks);
2582
2583 if (sp->sl_head != NULL && !slab_inrange(sp, sp->sl_head)) {
2584 slab_nextptr_panic(sp, sp->sl_head);
2585 /* In case sl_head is in the map but not in the slab */
2586 VERIFY(slab_inrange(sp, sp->sl_head));
2587 /* NOTREACHED */
2588 }
2589
2590 if (mclaudit != NULL) {
2591 mcache_audit_t *mca = mcl_audit_buf2mca(class, buf);
2592 mca->mca_uflags = 0;
2593 /* Save contents on mbuf objects only */
2594 if (class == MC_MBUF) {
2595 mca->mca_uflags |= MB_SCVALID;
2596 }
2597 }
2598
2599 if (class == MC_CL) {
2600 mbstat.m_clfree = (--m_infree(MC_CL)) + m_infree(MC_MBUF_CL);
2601 /*
2602 * A 2K cluster slab can have at most NCLPG references.
2603 */
2604 VERIFY(sp->sl_refcnt >= 1 && sp->sl_refcnt <= NCLPG &&
2605 sp->sl_chunks == NCLPG && sp->sl_len == PAGE_SIZE);
2606 VERIFY(sp->sl_refcnt < NCLPG || sp->sl_head == NULL);
2607 } else if (class == MC_BIGCL) {
2608 mbstat.m_bigclfree = (--m_infree(MC_BIGCL)) +
2609 m_infree(MC_MBUF_BIGCL);
2610 /*
2611 * A 4K cluster slab can have NBCLPG references.
2612 */
2613 VERIFY(sp->sl_refcnt >= 1 && sp->sl_chunks == NBCLPG &&
2614 sp->sl_len == PAGE_SIZE &&
2615 (sp->sl_refcnt < NBCLPG || sp->sl_head == NULL));
2616 } else if (class == MC_16KCL) {
2617 mcl_slab_t *nsp;
2618 int k;
2619
2620 --m_infree(MC_16KCL);
2621 VERIFY(sp->sl_refcnt == 1 && sp->sl_chunks == 1 &&
2622 sp->sl_len == m_maxsize(class) && sp->sl_head == NULL);
2623 /*
2624 * Increment 2nd-Nth slab reference, where N is NSLABSP16KB.
2625 * A 16KB big cluster takes NSLABSP16KB slabs, each having at
2626 * most 1 reference.
2627 */
2628 for (nsp = sp, k = 1; k < NSLABSP16KB; k++) {
2629 nsp = nsp->sl_next;
2630 /* Next slab must already be present */
2631 VERIFY(nsp != NULL);
2632 nsp->sl_refcnt++;
2633 VERIFY(!slab_is_detached(nsp));
2634 VERIFY(nsp->sl_class == MC_16KCL &&
2635 nsp->sl_flags == (SLF_MAPPED | SLF_PARTIAL) &&
2636 nsp->sl_refcnt == 1 && nsp->sl_chunks == 0 &&
2637 nsp->sl_len == 0 && nsp->sl_base == sp->sl_base &&
2638 nsp->sl_head == NULL);
2639 }
2640 } else {
2641 VERIFY(class == MC_MBUF);
2642 --m_infree(MC_MBUF);
2643 /*
2644 * If auditing is turned on, this check is
2645 * deferred until later in mbuf_slab_audit().
2646 */
2647 if (mclaudit == NULL) {
2648 _MCHECK((struct mbuf *)buf);
2649 }
2650 /*
2651 * Since we have incremented the reference count above,
2652 * an mbuf slab (formerly a 4KB cluster slab that was cut
2653 * up into mbufs) must have a reference count between 1
2654 * and NMBPG at this point.
2655 */
2656 VERIFY(sp->sl_refcnt >= 1 && sp->sl_refcnt <= NMBPG &&
2657 sp->sl_chunks == NMBPG &&
2658 sp->sl_len == PAGE_SIZE);
2659 VERIFY(sp->sl_refcnt < NMBPG || sp->sl_head == NULL);
2660 }
2661
2662 /* If empty, remove this slab from the class's freelist */
2663 if (sp->sl_head == NULL) {
2664 VERIFY(class != MC_MBUF || sp->sl_refcnt == NMBPG);
2665 VERIFY(class != MC_CL || sp->sl_refcnt == NCLPG);
2666 VERIFY(class != MC_BIGCL || sp->sl_refcnt == NBCLPG);
2667 slab_remove(sp, class);
2668 }
2669
2670 return buf;
2671 }
2672
2673 /*
2674 * Place a slab of object(s) back into a class's slab list.
2675 */
2676 static void
slab_free(mbuf_class_t class,mcache_obj_t * buf)2677 slab_free(mbuf_class_t class, mcache_obj_t *buf)
2678 {
2679 mcl_slab_t *sp;
2680 boolean_t reinit_supercl = false;
2681 mbuf_class_t super_class;
2682
2683 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
2684
2685 VERIFY(class != MC_16KCL || njcl > 0);
2686 VERIFY(buf->obj_next == NULL);
2687
2688 /*
2689 * Synchronizing with m_clalloc, as it reads m_total, while we here
2690 * are modifying m_total.
2691 */
2692 while (mb_clalloc_busy) {
2693 mb_clalloc_waiters++;
2694 (void) msleep(mb_clalloc_waitchan, mbuf_mlock,
2695 (PZERO - 1), "m_clalloc", NULL);
2696 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
2697 }
2698
2699 /* We are busy now; tell everyone else to go away */
2700 mb_clalloc_busy = TRUE;
2701
2702 sp = slab_get(buf);
2703 VERIFY(sp->sl_class == class && slab_inrange(sp, buf) &&
2704 (sp->sl_flags & (SLF_MAPPED | SLF_PARTIAL)) == SLF_MAPPED);
2705
2706 /* Decrement slab reference */
2707 sp->sl_refcnt--;
2708
2709 if (class == MC_CL) {
2710 VERIFY(IS_P2ALIGNED(buf, MCLBYTES));
2711 /*
2712 * A slab that has been splitted for 2KB clusters can have
2713 * at most 1 outstanding reference at this point.
2714 */
2715 VERIFY(sp->sl_refcnt >= 0 && sp->sl_refcnt <= (NCLPG - 1) &&
2716 sp->sl_chunks == NCLPG && sp->sl_len == PAGE_SIZE);
2717 VERIFY(sp->sl_refcnt < (NCLPG - 1) ||
2718 (slab_is_detached(sp) && sp->sl_head == NULL));
2719 } else if (class == MC_BIGCL) {
2720 VERIFY(IS_P2ALIGNED(buf, MBIGCLBYTES));
2721
2722 /* A 4KB cluster slab can have NBCLPG references at most */
2723 VERIFY(sp->sl_refcnt >= 0 && sp->sl_chunks == NBCLPG);
2724 VERIFY(sp->sl_refcnt < (NBCLPG - 1) ||
2725 (slab_is_detached(sp) && sp->sl_head == NULL));
2726 } else if (class == MC_16KCL) {
2727 mcl_slab_t *nsp;
2728 int k;
2729 /*
2730 * A 16KB cluster takes NSLABSP16KB slabs, all must
2731 * now have 0 reference.
2732 */
2733 VERIFY(IS_P2ALIGNED(buf, PAGE_SIZE));
2734 VERIFY(sp->sl_refcnt == 0 && sp->sl_chunks == 1 &&
2735 sp->sl_len == m_maxsize(class) && sp->sl_head == NULL);
2736 VERIFY(slab_is_detached(sp));
2737 for (nsp = sp, k = 1; k < NSLABSP16KB; k++) {
2738 nsp = nsp->sl_next;
2739 /* Next slab must already be present */
2740 VERIFY(nsp != NULL);
2741 nsp->sl_refcnt--;
2742 VERIFY(slab_is_detached(nsp));
2743 VERIFY(nsp->sl_class == MC_16KCL &&
2744 (nsp->sl_flags & (SLF_MAPPED | SLF_PARTIAL)) &&
2745 nsp->sl_refcnt == 0 && nsp->sl_chunks == 0 &&
2746 nsp->sl_len == 0 && nsp->sl_base == sp->sl_base &&
2747 nsp->sl_head == NULL);
2748 }
2749 } else {
2750 /*
2751 * A slab that has been splitted for mbufs has at most
2752 * NMBPG reference counts. Since we have decremented
2753 * one reference above, it must now be between 0 and
2754 * NMBPG-1.
2755 */
2756 VERIFY(class == MC_MBUF);
2757 VERIFY(sp->sl_refcnt >= 0 &&
2758 sp->sl_refcnt <= (NMBPG - 1) &&
2759 sp->sl_chunks == NMBPG &&
2760 sp->sl_len == PAGE_SIZE);
2761 VERIFY(sp->sl_refcnt < (NMBPG - 1) ||
2762 (slab_is_detached(sp) && sp->sl_head == NULL));
2763 }
2764
2765 /*
2766 * When auditing is enabled, ensure that the buffer still
2767 * contains the free pattern. Otherwise it got corrupted
2768 * while at the CPU cache layer.
2769 */
2770 if (mclaudit != NULL) {
2771 mcache_audit_t *mca = mcl_audit_buf2mca(class, buf);
2772 if (mclverify) {
2773 mcache_audit_free_verify(mca, buf, 0,
2774 m_maxsize(class));
2775 }
2776 mca->mca_uflags &= ~MB_SCVALID;
2777 }
2778
2779 if (class == MC_CL) {
2780 mbstat.m_clfree = (++m_infree(MC_CL)) + m_infree(MC_MBUF_CL);
2781 buf->obj_next = sp->sl_head;
2782 } else if (class == MC_BIGCL) {
2783 mbstat.m_bigclfree = (++m_infree(MC_BIGCL)) +
2784 m_infree(MC_MBUF_BIGCL);
2785 buf->obj_next = sp->sl_head;
2786 } else if (class == MC_16KCL) {
2787 ++m_infree(MC_16KCL);
2788 } else {
2789 ++m_infree(MC_MBUF);
2790 buf->obj_next = sp->sl_head;
2791 }
2792 sp->sl_head = buf;
2793
2794 /*
2795 * If a slab has been split to either one which holds 2KB clusters,
2796 * or one which holds mbufs, turn it back to one which holds a
2797 * 4 or 16 KB cluster depending on the page size.
2798 */
2799 if (m_maxsize(MC_BIGCL) == PAGE_SIZE) {
2800 super_class = MC_BIGCL;
2801 } else {
2802 VERIFY(PAGE_SIZE == m_maxsize(MC_16KCL));
2803 super_class = MC_16KCL;
2804 }
2805 if (class == MC_MBUF && sp->sl_refcnt == 0 &&
2806 m_total(class) >= (m_minlimit(class) + NMBPG) &&
2807 m_total(super_class) < m_maxlimit(super_class)) {
2808 int i = NMBPG;
2809
2810 m_total(MC_MBUF) -= NMBPG;
2811 mbstat.m_mbufs = m_total(MC_MBUF);
2812 m_infree(MC_MBUF) -= NMBPG;
2813 mtype_stat_add(MT_FREE, -((unsigned)NMBPG));
2814
2815 while (i--) {
2816 struct mbuf *m = sp->sl_head;
2817 VERIFY(m != NULL);
2818 sp->sl_head = m->m_next;
2819 m->m_next = NULL;
2820 }
2821 reinit_supercl = true;
2822 } else if (class == MC_CL && sp->sl_refcnt == 0 &&
2823 m_total(class) >= (m_minlimit(class) + NCLPG) &&
2824 m_total(super_class) < m_maxlimit(super_class)) {
2825 int i = NCLPG;
2826
2827 m_total(MC_CL) -= NCLPG;
2828 mbstat.m_clusters = m_total(MC_CL);
2829 m_infree(MC_CL) -= NCLPG;
2830
2831 while (i--) {
2832 union mcluster *c = sp->sl_head;
2833 VERIFY(c != NULL);
2834 sp->sl_head = c->mcl_next;
2835 c->mcl_next = NULL;
2836 }
2837 reinit_supercl = true;
2838 } else if (class == MC_BIGCL && super_class != MC_BIGCL &&
2839 sp->sl_refcnt == 0 &&
2840 m_total(class) >= (m_minlimit(class) + NBCLPG) &&
2841 m_total(super_class) < m_maxlimit(super_class)) {
2842 int i = NBCLPG;
2843
2844 VERIFY(super_class == MC_16KCL);
2845 m_total(MC_BIGCL) -= NBCLPG;
2846 mbstat.m_bigclusters = m_total(MC_BIGCL);
2847 m_infree(MC_BIGCL) -= NBCLPG;
2848
2849 while (i--) {
2850 union mbigcluster *bc = sp->sl_head;
2851 VERIFY(bc != NULL);
2852 sp->sl_head = bc->mbc_next;
2853 bc->mbc_next = NULL;
2854 }
2855 reinit_supercl = true;
2856 }
2857
2858 if (reinit_supercl) {
2859 VERIFY(sp->sl_head == NULL);
2860 VERIFY(m_total(class) >= m_minlimit(class));
2861 slab_remove(sp, class);
2862
2863 /* Reinitialize it as a cluster for the super class */
2864 m_total(super_class)++;
2865 m_infree(super_class)++;
2866 VERIFY(sp->sl_flags == (SLF_MAPPED | SLF_DETACHED) &&
2867 sp->sl_len == PAGE_SIZE && sp->sl_refcnt == 0);
2868
2869 slab_init(sp, super_class, SLF_MAPPED, sp->sl_base,
2870 sp->sl_base, PAGE_SIZE, 0, 1);
2871 if (mclverify) {
2872 mcache_set_pattern(MCACHE_FREE_PATTERN,
2873 (caddr_t)sp->sl_base, sp->sl_len);
2874 }
2875 ((mcache_obj_t *)(sp->sl_base))->obj_next = NULL;
2876
2877 if (super_class == MC_BIGCL) {
2878 mbstat.m_bigclusters = m_total(MC_BIGCL);
2879 mbstat.m_bigclfree = m_infree(MC_BIGCL) +
2880 m_infree(MC_MBUF_BIGCL);
2881 }
2882
2883 VERIFY(slab_is_detached(sp));
2884 VERIFY(m_total(super_class) <= m_maxlimit(super_class));
2885
2886 /* And finally switch class */
2887 class = super_class;
2888 }
2889
2890 /* Reinsert the slab to the class's slab list */
2891 if (slab_is_detached(sp)) {
2892 slab_insert(sp, class);
2893 }
2894
2895 /* We're done; let others enter */
2896 mb_clalloc_busy = FALSE;
2897 if (mb_clalloc_waiters > 0) {
2898 mb_clalloc_waiters = 0;
2899 wakeup(mb_clalloc_waitchan);
2900 }
2901 }
2902
2903 /*
2904 * Common allocator for rudimentary objects called by the CPU cache layer
2905 * during an allocation request whenever there is no available element in the
2906 * bucket layer. It returns one or more elements from the appropriate global
2907 * freelist. If the freelist is empty, it will attempt to populate it and
2908 * retry the allocation.
2909 */
2910 static unsigned int
mbuf_slab_alloc(void * arg,mcache_obj_t *** plist,unsigned int num,int wait)2911 mbuf_slab_alloc(void *arg, mcache_obj_t ***plist, unsigned int num, int wait)
2912 {
2913 mbuf_class_t class = (mbuf_class_t)arg;
2914 unsigned int need = num;
2915 mcache_obj_t **list = *plist;
2916
2917 ASSERT(MBUF_CLASS_VALID(class) && !MBUF_CLASS_COMPOSITE(class));
2918 ASSERT(need > 0);
2919
2920 lck_mtx_lock(mbuf_mlock);
2921
2922 for (;;) {
2923 if ((*list = slab_alloc(class, wait)) != NULL) {
2924 (*list)->obj_next = NULL;
2925 list = *plist = &(*list)->obj_next;
2926
2927 if (--need == 0) {
2928 /*
2929 * If the number of elements in freelist has
2930 * dropped below low watermark, asynchronously
2931 * populate the freelist now rather than doing
2932 * it later when we run out of elements.
2933 */
2934 if (!mbuf_cached_above(class, wait) &&
2935 m_infree(class) < (m_total(class) >> 5)) {
2936 (void) freelist_populate(class, 1,
2937 M_DONTWAIT);
2938 }
2939 break;
2940 }
2941 } else {
2942 VERIFY(m_infree(class) == 0 || class == MC_CL);
2943
2944 (void) freelist_populate(class, 1,
2945 (wait & MCR_NOSLEEP) ? M_DONTWAIT : M_WAIT);
2946
2947 if (m_infree(class) > 0) {
2948 continue;
2949 }
2950
2951 /* Check if there's anything at the cache layer */
2952 if (mbuf_cached_above(class, wait)) {
2953 break;
2954 }
2955
2956 /* watchdog checkpoint */
2957 mbuf_watchdog();
2958
2959 /* We have nothing and cannot block; give up */
2960 if (wait & MCR_NOSLEEP) {
2961 if (!(wait & MCR_TRYHARD)) {
2962 m_fail_cnt(class)++;
2963 mbstat.m_drops++;
2964 break;
2965 }
2966 }
2967
2968 /*
2969 * If the freelist is still empty and the caller is
2970 * willing to be blocked, sleep on the wait channel
2971 * until an element is available. Otherwise, if
2972 * MCR_TRYHARD is set, do our best to satisfy the
2973 * request without having to go to sleep.
2974 */
2975 if (mbuf_worker_ready &&
2976 mbuf_sleep(class, need, wait)) {
2977 break;
2978 }
2979
2980 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
2981 }
2982 }
2983
2984 m_alloc_cnt(class) += num - need;
2985 lck_mtx_unlock(mbuf_mlock);
2986
2987 return num - need;
2988 }
2989
2990 /*
2991 * Common de-allocator for rudimentary objects called by the CPU cache
2992 * layer when one or more elements need to be returned to the appropriate
2993 * global freelist.
2994 */
2995 static void
mbuf_slab_free(void * arg,mcache_obj_t * list,__unused int purged)2996 mbuf_slab_free(void *arg, mcache_obj_t *list, __unused int purged)
2997 {
2998 mbuf_class_t class = (mbuf_class_t)arg;
2999 mcache_obj_t *nlist;
3000 unsigned int num = 0;
3001 int w;
3002
3003 ASSERT(MBUF_CLASS_VALID(class) && !MBUF_CLASS_COMPOSITE(class));
3004
3005 lck_mtx_lock(mbuf_mlock);
3006
3007 for (;;) {
3008 nlist = list->obj_next;
3009 list->obj_next = NULL;
3010 slab_free(class, list);
3011 ++num;
3012 if ((list = nlist) == NULL) {
3013 break;
3014 }
3015 }
3016 m_free_cnt(class) += num;
3017
3018 if ((w = mb_waiters) > 0) {
3019 mb_waiters = 0;
3020 }
3021 if (w) {
3022 mbwdog_logger("waking up all threads");
3023 }
3024 lck_mtx_unlock(mbuf_mlock);
3025
3026 if (w != 0) {
3027 wakeup(mb_waitchan);
3028 }
3029 }
3030
3031 /*
3032 * Common auditor for rudimentary objects called by the CPU cache layer
3033 * during an allocation or free request. For the former, this is called
3034 * after the objects are obtained from either the bucket or slab layer
3035 * and before they are returned to the caller. For the latter, this is
3036 * called immediately during free and before placing the objects into
3037 * the bucket or slab layer.
3038 */
3039 static void
mbuf_slab_audit(void * arg,mcache_obj_t * list,boolean_t alloc)3040 mbuf_slab_audit(void *arg, mcache_obj_t *list, boolean_t alloc)
3041 {
3042 mbuf_class_t class = (mbuf_class_t)arg;
3043 mcache_audit_t *mca;
3044
3045 ASSERT(MBUF_CLASS_VALID(class) && !MBUF_CLASS_COMPOSITE(class));
3046
3047 while (list != NULL) {
3048 lck_mtx_lock(mbuf_mlock);
3049 mca = mcl_audit_buf2mca(class, list);
3050
3051 /* Do the sanity checks */
3052 if (class == MC_MBUF) {
3053 mcl_audit_mbuf(mca, list, FALSE, alloc);
3054 ASSERT(mca->mca_uflags & MB_SCVALID);
3055 } else {
3056 mcl_audit_cluster(mca, list, m_maxsize(class),
3057 alloc, TRUE);
3058 ASSERT(!(mca->mca_uflags & MB_SCVALID));
3059 }
3060 /* Record this transaction */
3061 if (mcltrace) {
3062 mcache_buffer_log(mca, list, m_cache(class), &mb_start);
3063 }
3064
3065 if (alloc) {
3066 mca->mca_uflags |= MB_INUSE;
3067 } else {
3068 mca->mca_uflags &= ~MB_INUSE;
3069 }
3070 /* Unpair the object (unconditionally) */
3071 mca->mca_uptr = NULL;
3072 lck_mtx_unlock(mbuf_mlock);
3073
3074 list = list->obj_next;
3075 }
3076 }
3077
3078 /*
3079 * Common notify routine for all caches. It is called by mcache when
3080 * one or more objects get freed. We use this indication to trigger
3081 * the wakeup of any sleeping threads so that they can retry their
3082 * allocation requests.
3083 */
3084 static void
mbuf_slab_notify(void * arg,u_int32_t reason)3085 mbuf_slab_notify(void *arg, u_int32_t reason)
3086 {
3087 mbuf_class_t class = (mbuf_class_t)arg;
3088 int w;
3089
3090 ASSERT(MBUF_CLASS_VALID(class));
3091
3092 if (reason != MCN_RETRYALLOC) {
3093 return;
3094 }
3095
3096 lck_mtx_lock(mbuf_mlock);
3097 if ((w = mb_waiters) > 0) {
3098 m_notified(class)++;
3099 mb_waiters = 0;
3100 }
3101 if (w) {
3102 mbwdog_logger("waking up all threads");
3103 }
3104 lck_mtx_unlock(mbuf_mlock);
3105
3106 if (w != 0) {
3107 wakeup(mb_waitchan);
3108 }
3109 }
3110
3111 /*
3112 * Obtain object(s) from the composite class's freelist.
3113 */
3114 static unsigned int
cslab_alloc(mbuf_class_t class,mcache_obj_t *** plist,unsigned int num)3115 cslab_alloc(mbuf_class_t class, mcache_obj_t ***plist, unsigned int num)
3116 {
3117 unsigned int need = num;
3118 mcl_slab_t *sp, *clsp, *nsp;
3119 struct mbuf *m;
3120 mcache_obj_t **list = *plist;
3121 void *cl;
3122
3123 VERIFY(need > 0);
3124 VERIFY(class != MC_MBUF_16KCL || njcl > 0);
3125 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
3126
3127 /* Get what we can from the freelist */
3128 while ((*list = m_cobjlist(class)) != NULL) {
3129 MRANGE(*list);
3130
3131 m = (struct mbuf *)*list;
3132 sp = slab_get(m);
3133 cl = m->m_ext.ext_buf;
3134 clsp = slab_get(cl);
3135 VERIFY(m->m_flags == M_EXT && cl != NULL);
3136 VERIFY(m_get_rfa(m) != NULL && MBUF_IS_COMPOSITE(m));
3137
3138 if (class == MC_MBUF_CL) {
3139 VERIFY(clsp->sl_refcnt >= 1 &&
3140 clsp->sl_refcnt <= NCLPG);
3141 } else {
3142 VERIFY(clsp->sl_refcnt >= 1 &&
3143 clsp->sl_refcnt <= NBCLPG);
3144 }
3145
3146 if (class == MC_MBUF_16KCL) {
3147 int k;
3148 for (nsp = clsp, k = 1; k < NSLABSP16KB; k++) {
3149 nsp = nsp->sl_next;
3150 /* Next slab must already be present */
3151 VERIFY(nsp != NULL);
3152 VERIFY(nsp->sl_refcnt == 1);
3153 }
3154 }
3155
3156 if ((m_cobjlist(class) = (*list)->obj_next) != NULL &&
3157 !MBUF_IN_MAP(m_cobjlist(class))) {
3158 slab_nextptr_panic(sp, m_cobjlist(class));
3159 /* NOTREACHED */
3160 }
3161 (*list)->obj_next = NULL;
3162 list = *plist = &(*list)->obj_next;
3163
3164 if (--need == 0) {
3165 break;
3166 }
3167 }
3168 m_infree(class) -= (num - need);
3169
3170 return num - need;
3171 }
3172
3173 /*
3174 * Place object(s) back into a composite class's freelist.
3175 */
3176 static unsigned int
cslab_free(mbuf_class_t class,mcache_obj_t * list,int purged)3177 cslab_free(mbuf_class_t class, mcache_obj_t *list, int purged)
3178 {
3179 mcache_obj_t *o, *tail;
3180 unsigned int num = 0;
3181 struct mbuf *m, *ms;
3182 mcache_audit_t *mca = NULL;
3183 mcache_obj_t *ref_list = NULL;
3184 mcl_slab_t *clsp, *nsp;
3185 void *cl;
3186 mbuf_class_t cl_class;
3187
3188 ASSERT(MBUF_CLASS_VALID(class) && MBUF_CLASS_COMPOSITE(class));
3189 VERIFY(class != MC_MBUF_16KCL || njcl > 0);
3190 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
3191
3192 if (class == MC_MBUF_CL) {
3193 cl_class = MC_CL;
3194 } else if (class == MC_MBUF_BIGCL) {
3195 cl_class = MC_BIGCL;
3196 } else {
3197 VERIFY(class == MC_MBUF_16KCL);
3198 cl_class = MC_16KCL;
3199 }
3200
3201 o = tail = list;
3202
3203 while ((m = ms = (struct mbuf *)o) != NULL) {
3204 mcache_obj_t *rfa, *nexto = o->obj_next;
3205
3206 /* Do the mbuf sanity checks */
3207 if (mclaudit != NULL) {
3208 mca = mcl_audit_buf2mca(MC_MBUF, (mcache_obj_t *)m);
3209 if (mclverify) {
3210 mcache_audit_free_verify(mca, m, 0,
3211 m_maxsize(MC_MBUF));
3212 }
3213 ms = MCA_SAVED_MBUF_PTR(mca);
3214 }
3215
3216 /* Do the cluster sanity checks */
3217 cl = ms->m_ext.ext_buf;
3218 clsp = slab_get(cl);
3219 if (mclverify) {
3220 size_t size = m_maxsize(cl_class);
3221 mcache_audit_free_verify(mcl_audit_buf2mca(cl_class,
3222 (mcache_obj_t *)cl), cl, 0, size);
3223 }
3224 VERIFY(ms->m_type == MT_FREE);
3225 VERIFY(ms->m_flags == M_EXT);
3226 VERIFY(m_get_rfa(ms) != NULL && MBUF_IS_COMPOSITE(ms));
3227 if (cl_class == MC_CL) {
3228 VERIFY(clsp->sl_refcnt >= 1 &&
3229 clsp->sl_refcnt <= NCLPG);
3230 } else {
3231 VERIFY(clsp->sl_refcnt >= 1 &&
3232 clsp->sl_refcnt <= NBCLPG);
3233 }
3234 if (cl_class == MC_16KCL) {
3235 int k;
3236 for (nsp = clsp, k = 1; k < NSLABSP16KB; k++) {
3237 nsp = nsp->sl_next;
3238 /* Next slab must already be present */
3239 VERIFY(nsp != NULL);
3240 VERIFY(nsp->sl_refcnt == 1);
3241 }
3242 }
3243
3244 /*
3245 * If we're asked to purge, restore the actual mbuf using
3246 * contents of the shadow structure (if auditing is enabled)
3247 * and clear EXTF_COMPOSITE flag from the mbuf, as we are
3248 * about to free it and the attached cluster into their caches.
3249 */
3250 if (purged) {
3251 /* Restore constructed mbuf fields */
3252 if (mclaudit != NULL) {
3253 mcl_audit_restore_mbuf(m, mca, TRUE);
3254 }
3255
3256 MEXT_MINREF(m) = 0;
3257 MEXT_REF(m) = 0;
3258 MEXT_PREF(m) = 0;
3259 MEXT_FLAGS(m) = 0;
3260 MEXT_PRIV(m) = 0;
3261 MEXT_PMBUF(m) = NULL;
3262 MEXT_TOKEN(m) = 0;
3263
3264 rfa = (mcache_obj_t *)(void *)m_get_rfa(m);
3265 m_set_ext(m, NULL, NULL, NULL);
3266 rfa->obj_next = ref_list;
3267 ref_list = rfa;
3268
3269 m->m_type = MT_FREE;
3270 m->m_flags = m->m_len = 0;
3271 m->m_next = m->m_nextpkt = NULL;
3272
3273 /* Save mbuf fields and make auditing happy */
3274 if (mclaudit != NULL) {
3275 mcl_audit_mbuf(mca, o, FALSE, FALSE);
3276 }
3277
3278 VERIFY(m_total(class) > 0);
3279 m_total(class)--;
3280
3281 /* Free the mbuf */
3282 o->obj_next = NULL;
3283 slab_free(MC_MBUF, o);
3284
3285 /* And free the cluster */
3286 ((mcache_obj_t *)cl)->obj_next = NULL;
3287 if (class == MC_MBUF_CL) {
3288 slab_free(MC_CL, cl);
3289 } else if (class == MC_MBUF_BIGCL) {
3290 slab_free(MC_BIGCL, cl);
3291 } else {
3292 slab_free(MC_16KCL, cl);
3293 }
3294 }
3295
3296 ++num;
3297 tail = o;
3298 o = nexto;
3299 }
3300
3301 if (!purged) {
3302 tail->obj_next = m_cobjlist(class);
3303 m_cobjlist(class) = list;
3304 m_infree(class) += num;
3305 } else if (ref_list != NULL) {
3306 mcache_free_ext(ref_cache, ref_list);
3307 }
3308
3309 return num;
3310 }
3311
3312 /*
3313 * Common allocator for composite objects called by the CPU cache layer
3314 * during an allocation request whenever there is no available element in
3315 * the bucket layer. It returns one or more composite elements from the
3316 * appropriate global freelist. If the freelist is empty, it will attempt
3317 * to obtain the rudimentary objects from their caches and construct them
3318 * into composite mbuf + cluster objects.
3319 */
3320 static unsigned int
mbuf_cslab_alloc(void * arg,mcache_obj_t *** plist,unsigned int needed,int wait)3321 mbuf_cslab_alloc(void *arg, mcache_obj_t ***plist, unsigned int needed,
3322 int wait)
3323 {
3324 mbuf_class_t class = (mbuf_class_t)arg;
3325 mbuf_class_t cl_class = 0;
3326 unsigned int num = 0, cnum = 0, want = needed;
3327 mcache_obj_t *ref_list = NULL;
3328 mcache_obj_t *mp_list = NULL;
3329 mcache_obj_t *clp_list = NULL;
3330 mcache_obj_t **list;
3331 struct ext_ref *rfa;
3332 struct mbuf *m;
3333 void *cl;
3334
3335 ASSERT(MBUF_CLASS_VALID(class) && MBUF_CLASS_COMPOSITE(class));
3336 ASSERT(needed > 0);
3337
3338 VERIFY(class != MC_MBUF_16KCL || njcl > 0);
3339
3340 /* There should not be any slab for this class */
3341 VERIFY(m_slab_cnt(class) == 0 &&
3342 m_slablist(class).tqh_first == NULL &&
3343 m_slablist(class).tqh_last == NULL);
3344
3345 lck_mtx_lock(mbuf_mlock);
3346
3347 /* Try using the freelist first */
3348 num = cslab_alloc(class, plist, needed);
3349 list = *plist;
3350 if (num == needed) {
3351 m_alloc_cnt(class) += num;
3352 lck_mtx_unlock(mbuf_mlock);
3353 return needed;
3354 }
3355
3356 lck_mtx_unlock(mbuf_mlock);
3357
3358 /*
3359 * We could not satisfy the request using the freelist alone;
3360 * allocate from the appropriate rudimentary caches and use
3361 * whatever we can get to construct the composite objects.
3362 */
3363 needed -= num;
3364
3365 /*
3366 * Mark these allocation requests as coming from a composite cache.
3367 * Also, if the caller is willing to be blocked, mark the request
3368 * with MCR_FAILOK such that we don't end up sleeping at the mbuf
3369 * slab layer waiting for the individual object when one or more
3370 * of the already-constructed composite objects are available.
3371 */
3372 wait |= MCR_COMP;
3373 if (!(wait & MCR_NOSLEEP)) {
3374 wait |= MCR_FAILOK;
3375 }
3376
3377 /* allocate mbufs */
3378 needed = mcache_alloc_ext(m_cache(MC_MBUF), &mp_list, needed, wait);
3379 if (needed == 0) {
3380 ASSERT(mp_list == NULL);
3381 goto fail;
3382 }
3383
3384 /* allocate clusters */
3385 if (class == MC_MBUF_CL) {
3386 cl_class = MC_CL;
3387 } else if (class == MC_MBUF_BIGCL) {
3388 cl_class = MC_BIGCL;
3389 } else {
3390 VERIFY(class == MC_MBUF_16KCL);
3391 cl_class = MC_16KCL;
3392 }
3393 needed = mcache_alloc_ext(m_cache(cl_class), &clp_list, needed, wait);
3394 if (needed == 0) {
3395 ASSERT(clp_list == NULL);
3396 goto fail;
3397 }
3398
3399 needed = mcache_alloc_ext(ref_cache, &ref_list, needed, wait);
3400 if (needed == 0) {
3401 ASSERT(ref_list == NULL);
3402 goto fail;
3403 }
3404
3405 /*
3406 * By this time "needed" is MIN(mbuf, cluster, ref). Any left
3407 * overs will get freed accordingly before we return to caller.
3408 */
3409 for (cnum = 0; cnum < needed; cnum++) {
3410 struct mbuf *ms;
3411
3412 m = ms = (struct mbuf *)mp_list;
3413 mp_list = mp_list->obj_next;
3414
3415 cl = clp_list;
3416 clp_list = clp_list->obj_next;
3417 ((mcache_obj_t *)cl)->obj_next = NULL;
3418
3419 rfa = (struct ext_ref *)ref_list;
3420 ref_list = ref_list->obj_next;
3421 ((mcache_obj_t *)(void *)rfa)->obj_next = NULL;
3422
3423 /*
3424 * If auditing is enabled, construct the shadow mbuf
3425 * in the audit structure instead of in the actual one.
3426 * mbuf_cslab_audit() will take care of restoring the
3427 * contents after the integrity check.
3428 */
3429 if (mclaudit != NULL) {
3430 mcache_audit_t *mca, *cl_mca;
3431
3432 lck_mtx_lock(mbuf_mlock);
3433 mca = mcl_audit_buf2mca(MC_MBUF, (mcache_obj_t *)m);
3434 ms = MCA_SAVED_MBUF_PTR(mca);
3435 cl_mca = mcl_audit_buf2mca(cl_class,
3436 (mcache_obj_t *)cl);
3437
3438 /*
3439 * Pair them up. Note that this is done at the time
3440 * the mbuf+cluster objects are constructed. This
3441 * information should be treated as "best effort"
3442 * debugging hint since more than one mbufs can refer
3443 * to a cluster. In that case, the cluster might not
3444 * be freed along with the mbuf it was paired with.
3445 */
3446 mca->mca_uptr = cl_mca;
3447 cl_mca->mca_uptr = mca;
3448
3449 ASSERT(mca->mca_uflags & MB_SCVALID);
3450 ASSERT(!(cl_mca->mca_uflags & MB_SCVALID));
3451 lck_mtx_unlock(mbuf_mlock);
3452
3453 /* Technically, they are in the freelist */
3454 if (mclverify) {
3455 size_t size;
3456
3457 mcache_set_pattern(MCACHE_FREE_PATTERN, m,
3458 m_maxsize(MC_MBUF));
3459
3460 if (class == MC_MBUF_CL) {
3461 size = m_maxsize(MC_CL);
3462 } else if (class == MC_MBUF_BIGCL) {
3463 size = m_maxsize(MC_BIGCL);
3464 } else {
3465 size = m_maxsize(MC_16KCL);
3466 }
3467
3468 mcache_set_pattern(MCACHE_FREE_PATTERN, cl,
3469 size);
3470 }
3471 }
3472
3473 MBUF_INIT(ms, 0, MT_FREE);
3474 if (class == MC_MBUF_16KCL) {
3475 MBUF_16KCL_INIT(ms, cl, rfa, 0, EXTF_COMPOSITE);
3476 } else if (class == MC_MBUF_BIGCL) {
3477 MBUF_BIGCL_INIT(ms, cl, rfa, 0, EXTF_COMPOSITE);
3478 } else {
3479 MBUF_CL_INIT(ms, cl, rfa, 0, EXTF_COMPOSITE);
3480 }
3481 VERIFY(ms->m_flags == M_EXT);
3482 VERIFY(m_get_rfa(ms) != NULL && MBUF_IS_COMPOSITE(ms));
3483
3484 *list = (mcache_obj_t *)m;
3485 (*list)->obj_next = NULL;
3486 list = *plist = &(*list)->obj_next;
3487 }
3488
3489 fail:
3490 /*
3491 * Free up what's left of the above.
3492 */
3493 if (mp_list != NULL) {
3494 mcache_free_ext(m_cache(MC_MBUF), mp_list);
3495 }
3496 if (clp_list != NULL) {
3497 mcache_free_ext(m_cache(cl_class), clp_list);
3498 }
3499 if (ref_list != NULL) {
3500 mcache_free_ext(ref_cache, ref_list);
3501 }
3502
3503 lck_mtx_lock(mbuf_mlock);
3504 if (num > 0 || cnum > 0) {
3505 m_total(class) += cnum;
3506 VERIFY(m_total(class) <= m_maxlimit(class));
3507 m_alloc_cnt(class) += num + cnum;
3508 }
3509 if ((num + cnum) < want) {
3510 m_fail_cnt(class) += (want - (num + cnum));
3511 }
3512 lck_mtx_unlock(mbuf_mlock);
3513
3514 return num + cnum;
3515 }
3516
3517 /*
3518 * Common de-allocator for composite objects called by the CPU cache
3519 * layer when one or more elements need to be returned to the appropriate
3520 * global freelist.
3521 */
3522 static void
mbuf_cslab_free(void * arg,mcache_obj_t * list,int purged)3523 mbuf_cslab_free(void *arg, mcache_obj_t *list, int purged)
3524 {
3525 mbuf_class_t class = (mbuf_class_t)arg;
3526 unsigned int num;
3527 int w;
3528
3529 ASSERT(MBUF_CLASS_VALID(class) && MBUF_CLASS_COMPOSITE(class));
3530
3531 lck_mtx_lock(mbuf_mlock);
3532
3533 num = cslab_free(class, list, purged);
3534 m_free_cnt(class) += num;
3535
3536 if ((w = mb_waiters) > 0) {
3537 mb_waiters = 0;
3538 }
3539 if (w) {
3540 mbwdog_logger("waking up all threads");
3541 }
3542
3543 lck_mtx_unlock(mbuf_mlock);
3544
3545 if (w != 0) {
3546 wakeup(mb_waitchan);
3547 }
3548 }
3549
3550 /*
3551 * Common auditor for composite objects called by the CPU cache layer
3552 * during an allocation or free request. For the former, this is called
3553 * after the objects are obtained from either the bucket or slab layer
3554 * and before they are returned to the caller. For the latter, this is
3555 * called immediately during free and before placing the objects into
3556 * the bucket or slab layer.
3557 */
3558 static void
mbuf_cslab_audit(void * arg,mcache_obj_t * list,boolean_t alloc)3559 mbuf_cslab_audit(void *arg, mcache_obj_t *list, boolean_t alloc)
3560 {
3561 mbuf_class_t class = (mbuf_class_t)arg, cl_class;
3562 mcache_audit_t *mca;
3563 struct mbuf *m, *ms;
3564 mcl_slab_t *clsp, *nsp;
3565 size_t cl_size;
3566 void *cl;
3567
3568 ASSERT(MBUF_CLASS_VALID(class) && MBUF_CLASS_COMPOSITE(class));
3569 if (class == MC_MBUF_CL) {
3570 cl_class = MC_CL;
3571 } else if (class == MC_MBUF_BIGCL) {
3572 cl_class = MC_BIGCL;
3573 } else {
3574 cl_class = MC_16KCL;
3575 }
3576 cl_size = m_maxsize(cl_class);
3577
3578 while ((m = ms = (struct mbuf *)list) != NULL) {
3579 lck_mtx_lock(mbuf_mlock);
3580 /* Do the mbuf sanity checks and record its transaction */
3581 mca = mcl_audit_buf2mca(MC_MBUF, (mcache_obj_t *)m);
3582 mcl_audit_mbuf(mca, m, TRUE, alloc);
3583 if (mcltrace) {
3584 mcache_buffer_log(mca, m, m_cache(class), &mb_start);
3585 }
3586
3587 if (alloc) {
3588 mca->mca_uflags |= MB_COMP_INUSE;
3589 } else {
3590 mca->mca_uflags &= ~MB_COMP_INUSE;
3591 }
3592
3593 /*
3594 * Use the shadow mbuf in the audit structure if we are
3595 * freeing, since the contents of the actual mbuf has been
3596 * pattern-filled by the above call to mcl_audit_mbuf().
3597 */
3598 if (!alloc && mclverify) {
3599 ms = MCA_SAVED_MBUF_PTR(mca);
3600 }
3601
3602 /* Do the cluster sanity checks and record its transaction */
3603 cl = ms->m_ext.ext_buf;
3604 clsp = slab_get(cl);
3605 VERIFY(ms->m_flags == M_EXT && cl != NULL);
3606 VERIFY(m_get_rfa(ms) != NULL && MBUF_IS_COMPOSITE(ms));
3607 if (class == MC_MBUF_CL) {
3608 VERIFY(clsp->sl_refcnt >= 1 &&
3609 clsp->sl_refcnt <= NCLPG);
3610 } else {
3611 VERIFY(clsp->sl_refcnt >= 1 &&
3612 clsp->sl_refcnt <= NBCLPG);
3613 }
3614
3615 if (class == MC_MBUF_16KCL) {
3616 int k;
3617 for (nsp = clsp, k = 1; k < NSLABSP16KB; k++) {
3618 nsp = nsp->sl_next;
3619 /* Next slab must already be present */
3620 VERIFY(nsp != NULL);
3621 VERIFY(nsp->sl_refcnt == 1);
3622 }
3623 }
3624
3625
3626 mca = mcl_audit_buf2mca(cl_class, cl);
3627 mcl_audit_cluster(mca, cl, cl_size, alloc, FALSE);
3628 if (mcltrace) {
3629 mcache_buffer_log(mca, cl, m_cache(class), &mb_start);
3630 }
3631
3632 if (alloc) {
3633 mca->mca_uflags |= MB_COMP_INUSE;
3634 } else {
3635 mca->mca_uflags &= ~MB_COMP_INUSE;
3636 }
3637 lck_mtx_unlock(mbuf_mlock);
3638
3639 list = list->obj_next;
3640 }
3641 }
3642
3643 static void
m_vm_error_stats(uint32_t * cnt,uint64_t * ts,uint64_t * size,uint64_t alloc_size,kern_return_t error)3644 m_vm_error_stats(uint32_t *cnt, uint64_t *ts, uint64_t *size,
3645 uint64_t alloc_size, kern_return_t error)
3646 {
3647 *cnt = *cnt + 1;
3648 *ts = net_uptime();
3649 if (size) {
3650 *size = alloc_size;
3651 }
3652 switch (error) {
3653 case KERN_SUCCESS:
3654 break;
3655 case KERN_INVALID_ARGUMENT:
3656 mb_kmem_stats[0]++;
3657 break;
3658 case KERN_INVALID_ADDRESS:
3659 mb_kmem_stats[1]++;
3660 break;
3661 case KERN_RESOURCE_SHORTAGE:
3662 mb_kmem_stats[2]++;
3663 break;
3664 case KERN_NO_SPACE:
3665 mb_kmem_stats[3]++;
3666 break;
3667 case KERN_FAILURE:
3668 mb_kmem_stats[4]++;
3669 break;
3670 default:
3671 mb_kmem_stats[5]++;
3672 break;
3673 }
3674 }
3675
3676 static vm_offset_t
kmem_mb_alloc(vm_map_t mbmap,int size,int physContig,kern_return_t * err)3677 kmem_mb_alloc(vm_map_t mbmap, int size, int physContig, kern_return_t *err)
3678 {
3679 vm_offset_t addr = 0;
3680 kern_return_t kr = KERN_SUCCESS;
3681
3682 if (!physContig) {
3683 kr = kmem_alloc(mbmap, &addr, size,
3684 KMA_KOBJECT | KMA_LOMEM, VM_KERN_MEMORY_MBUF);
3685 } else {
3686 kr = kmem_alloc_contig(mbmap, &addr, size, PAGE_MASK, 0xfffff,
3687 0, KMA_KOBJECT | KMA_LOMEM, VM_KERN_MEMORY_MBUF);
3688 }
3689
3690 if (kr != KERN_SUCCESS) {
3691 addr = 0;
3692 }
3693 if (err) {
3694 *err = kr;
3695 }
3696
3697 return addr;
3698 }
3699
3700 /*
3701 * Allocate some number of mbuf clusters and place on cluster freelist.
3702 */
3703 static int
m_clalloc(const u_int32_t num,const int wait,const u_int32_t bufsize)3704 m_clalloc(const u_int32_t num, const int wait, const u_int32_t bufsize)
3705 {
3706 int i, count = 0;
3707 vm_size_t size = 0;
3708 int numpages = 0, large_buffer;
3709 vm_offset_t page = 0;
3710 mcache_audit_t *mca_list = NULL;
3711 mcache_obj_t *con_list = NULL;
3712 mcl_slab_t *sp;
3713 mbuf_class_t class;
3714 kern_return_t error;
3715
3716 /* Set if a buffer allocation needs allocation of multiple pages */
3717 large_buffer = ((bufsize == m_maxsize(MC_16KCL)) &&
3718 PAGE_SIZE < M16KCLBYTES);
3719 VERIFY(bufsize == m_maxsize(MC_BIGCL) ||
3720 bufsize == m_maxsize(MC_16KCL));
3721
3722 VERIFY((bufsize == PAGE_SIZE) ||
3723 (bufsize > PAGE_SIZE && bufsize == m_maxsize(MC_16KCL)));
3724
3725 if (bufsize == m_size(MC_BIGCL)) {
3726 class = MC_BIGCL;
3727 } else {
3728 class = MC_16KCL;
3729 }
3730
3731 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
3732
3733 /*
3734 * Multiple threads may attempt to populate the cluster map one
3735 * after another. Since we drop the lock below prior to acquiring
3736 * the physical page(s), our view of the cluster map may no longer
3737 * be accurate, and we could end up over-committing the pages beyond
3738 * the maximum allowed for each class. To prevent it, this entire
3739 * operation (including the page mapping) is serialized.
3740 */
3741 while (mb_clalloc_busy) {
3742 mb_clalloc_waiters++;
3743 (void) msleep(mb_clalloc_waitchan, mbuf_mlock,
3744 (PZERO - 1), "m_clalloc", NULL);
3745 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
3746 }
3747
3748 /* We are busy now; tell everyone else to go away */
3749 mb_clalloc_busy = TRUE;
3750
3751 /*
3752 * Honor the caller's wish to block or not block. We have a way
3753 * to grow the pool asynchronously using the mbuf worker thread.
3754 */
3755 i = m_howmany(num, bufsize);
3756 if (i <= 0 || (wait & M_DONTWAIT)) {
3757 goto out;
3758 }
3759
3760 lck_mtx_unlock(mbuf_mlock);
3761
3762 size = round_page(i * bufsize);
3763 page = kmem_mb_alloc(mb_map, size, large_buffer, &error);
3764
3765 /*
3766 * If we did ask for "n" 16KB physically contiguous chunks
3767 * and didn't get them, then please try again without this
3768 * restriction.
3769 */
3770 net_update_uptime();
3771 if (large_buffer && page == 0) {
3772 m_vm_error_stats(&mb_kmem_contig_failed,
3773 &mb_kmem_contig_failed_ts,
3774 &mb_kmem_contig_failed_size,
3775 size, error);
3776 page = kmem_mb_alloc(mb_map, size, 0, &error);
3777 }
3778
3779 if (page == 0) {
3780 m_vm_error_stats(&mb_kmem_failed,
3781 &mb_kmem_failed_ts,
3782 &mb_kmem_failed_size,
3783 size, error);
3784 #if PAGE_SIZE == 4096
3785 if (bufsize == m_maxsize(MC_BIGCL)) {
3786 #else
3787 if (bufsize >= m_maxsize(MC_BIGCL)) {
3788 #endif
3789 /* Try for 1 page if failed */
3790 size = PAGE_SIZE;
3791 page = kmem_mb_alloc(mb_map, size, 0, &error);
3792 if (page == 0) {
3793 m_vm_error_stats(&mb_kmem_one_failed,
3794 &mb_kmem_one_failed_ts,
3795 NULL, size, error);
3796 }
3797 }
3798
3799 if (page == 0) {
3800 lck_mtx_lock(mbuf_mlock);
3801 goto out;
3802 }
3803 }
3804
3805 VERIFY(IS_P2ALIGNED(page, PAGE_SIZE));
3806 numpages = size / PAGE_SIZE;
3807
3808 /* If auditing is enabled, allocate the audit structures now */
3809 if (mclaudit != NULL) {
3810 int needed;
3811
3812 /*
3813 * Yes, I realize this is a waste of memory for clusters
3814 * that never get transformed into mbufs, as we may end
3815 * up with NMBPG-1 unused audit structures per cluster.
3816 * But doing so tremendously simplifies the allocation
3817 * strategy, since at this point we are not holding the
3818 * mbuf lock and the caller is okay to be blocked.
3819 */
3820 if (bufsize == PAGE_SIZE) {
3821 needed = numpages * NMBPG;
3822
3823 i = mcache_alloc_ext(mcl_audit_con_cache,
3824 &con_list, needed, MCR_SLEEP);
3825
3826 VERIFY(con_list != NULL && i == needed);
3827 } else {
3828 /*
3829 * if multiple 4K pages are being used for a
3830 * 16K cluster
3831 */
3832 needed = numpages / NSLABSP16KB;
3833 }
3834
3835 i = mcache_alloc_ext(mcache_audit_cache,
3836 (mcache_obj_t **)&mca_list, needed, MCR_SLEEP);
3837
3838 VERIFY(mca_list != NULL && i == needed);
3839 }
3840
3841 lck_mtx_lock(mbuf_mlock);
3842
3843 for (i = 0; i < numpages; i++, page += PAGE_SIZE) {
3844 ppnum_t offset =
3845 ((unsigned char *)page - mbutl) >> PAGE_SHIFT;
3846 ppnum_t new_page = pmap_find_phys(kernel_pmap, page);
3847
3848 /*
3849 * If there is a mapper the appropriate I/O page is
3850 * returned; zero out the page to discard its past
3851 * contents to prevent exposing leftover kernel memory.
3852 */
3853 VERIFY(offset < mcl_pages);
3854 if (mcl_paddr_base != 0) {
3855 bzero((void *)(uintptr_t) page, PAGE_SIZE);
3856 new_page = IOMapperInsertPage(mcl_paddr_base,
3857 offset, new_page);
3858 }
3859 mcl_paddr[offset] = new_page;
3860
3861 /* Pattern-fill this fresh page */
3862 if (mclverify) {
3863 mcache_set_pattern(MCACHE_FREE_PATTERN,
3864 (caddr_t)page, PAGE_SIZE);
3865 }
3866 if (bufsize == PAGE_SIZE) {
3867 mcache_obj_t *buf;
3868 /* One for the entire page */
3869 sp = slab_get((void *)page);
3870 if (mclaudit != NULL) {
3871 mcl_audit_init((void *)page,
3872 &mca_list, &con_list,
3873 AUDIT_CONTENTS_SIZE, NMBPG);
3874 }
3875 VERIFY(sp->sl_refcnt == 0 && sp->sl_flags == 0);
3876 slab_init(sp, class, SLF_MAPPED, (void *)page,
3877 (void *)page, PAGE_SIZE, 0, 1);
3878 buf = (mcache_obj_t *)page;
3879 buf->obj_next = NULL;
3880
3881 /* Insert this slab */
3882 slab_insert(sp, class);
3883
3884 /* Update stats now since slab_get drops the lock */
3885 ++m_infree(class);
3886 ++m_total(class);
3887 VERIFY(m_total(class) <= m_maxlimit(class));
3888 if (class == MC_BIGCL) {
3889 mbstat.m_bigclfree = m_infree(MC_BIGCL) +
3890 m_infree(MC_MBUF_BIGCL);
3891 mbstat.m_bigclusters = m_total(MC_BIGCL);
3892 }
3893 ++count;
3894 } else if ((bufsize > PAGE_SIZE) &&
3895 (i % NSLABSP16KB) == 0) {
3896 union m16kcluster *m16kcl = (union m16kcluster *)page;
3897 mcl_slab_t *nsp;
3898 int k;
3899
3900 /* One for the entire 16KB */
3901 sp = slab_get(m16kcl);
3902 if (mclaudit != NULL) {
3903 mcl_audit_init(m16kcl, &mca_list, NULL, 0, 1);
3904 }
3905
3906 VERIFY(sp->sl_refcnt == 0 && sp->sl_flags == 0);
3907 slab_init(sp, MC_16KCL, SLF_MAPPED,
3908 m16kcl, m16kcl, bufsize, 0, 1);
3909 m16kcl->m16kcl_next = NULL;
3910
3911 /*
3912 * 2nd-Nth page's slab is part of the first one,
3913 * where N is NSLABSP16KB.
3914 */
3915 for (k = 1; k < NSLABSP16KB; k++) {
3916 nsp = slab_get(((union mbigcluster *)page) + k);
3917 VERIFY(nsp->sl_refcnt == 0 &&
3918 nsp->sl_flags == 0);
3919 slab_init(nsp, MC_16KCL,
3920 SLF_MAPPED | SLF_PARTIAL,
3921 m16kcl, NULL, 0, 0, 0);
3922 }
3923 /* Insert this slab */
3924 slab_insert(sp, MC_16KCL);
3925
3926 /* Update stats now since slab_get drops the lock */
3927 ++m_infree(MC_16KCL);
3928 ++m_total(MC_16KCL);
3929 VERIFY(m_total(MC_16KCL) <= m_maxlimit(MC_16KCL));
3930 ++count;
3931 }
3932 }
3933 VERIFY(mca_list == NULL && con_list == NULL);
3934
3935 if (!mb_peak_newreport && mbuf_report_usage(class)) {
3936 mb_peak_newreport = TRUE;
3937 }
3938
3939 /* We're done; let others enter */
3940 mb_clalloc_busy = FALSE;
3941 if (mb_clalloc_waiters > 0) {
3942 mb_clalloc_waiters = 0;
3943 wakeup(mb_clalloc_waitchan);
3944 }
3945
3946 return count;
3947 out:
3948 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
3949
3950 mtracelarge_register(size);
3951
3952 /* We're done; let others enter */
3953 mb_clalloc_busy = FALSE;
3954 if (mb_clalloc_waiters > 0) {
3955 mb_clalloc_waiters = 0;
3956 wakeup(mb_clalloc_waitchan);
3957 }
3958
3959 /*
3960 * When non-blocking we kick a thread if we have to grow the
3961 * pool or if the number of free clusters is less than requested.
3962 */
3963 if (i > 0 && mbuf_worker_ready && mbuf_worker_needs_wakeup) {
3964 mbwdog_logger("waking up the worker thread to to grow %s by %d",
3965 m_cname(class), i);
3966 wakeup((caddr_t)&mbuf_worker_needs_wakeup);
3967 mbuf_worker_needs_wakeup = FALSE;
3968 }
3969 if (class == MC_BIGCL) {
3970 if (i > 0) {
3971 /*
3972 * Remember total number of 4KB clusters needed
3973 * at this time.
3974 */
3975 i += m_total(MC_BIGCL);
3976 if (i > m_region_expand(MC_BIGCL)) {
3977 m_region_expand(MC_BIGCL) = i;
3978 }
3979 }
3980 if (m_infree(MC_BIGCL) >= num) {
3981 return 1;
3982 }
3983 } else {
3984 if (i > 0) {
3985 /*
3986 * Remember total number of 16KB clusters needed
3987 * at this time.
3988 */
3989 i += m_total(MC_16KCL);
3990 if (i > m_region_expand(MC_16KCL)) {
3991 m_region_expand(MC_16KCL) = i;
3992 }
3993 }
3994 if (m_infree(MC_16KCL) >= num) {
3995 return 1;
3996 }
3997 }
3998 return 0;
3999 }
4000
4001 /*
4002 * Populate the global freelist of the corresponding buffer class.
4003 */
4004 static int
4005 freelist_populate(mbuf_class_t class, unsigned int num, int wait)
4006 {
4007 mcache_obj_t *o = NULL;
4008 int i, numpages = 0, count;
4009 mbuf_class_t super_class;
4010
4011 VERIFY(class == MC_MBUF || class == MC_CL || class == MC_BIGCL ||
4012 class == MC_16KCL);
4013
4014 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
4015
4016 VERIFY(PAGE_SIZE == m_maxsize(MC_BIGCL) ||
4017 PAGE_SIZE == m_maxsize(MC_16KCL));
4018
4019 if (m_maxsize(class) >= PAGE_SIZE) {
4020 return m_clalloc(num, wait, m_maxsize(class)) != 0;
4021 }
4022
4023 /*
4024 * The rest of the function will allocate pages and will slice
4025 * them up into the right size
4026 */
4027
4028 numpages = (num * m_size(class) + PAGE_SIZE - 1) / PAGE_SIZE;
4029
4030 /* Currently assume that pages are 4K or 16K */
4031 if (PAGE_SIZE == m_maxsize(MC_BIGCL)) {
4032 super_class = MC_BIGCL;
4033 } else {
4034 super_class = MC_16KCL;
4035 }
4036
4037 i = m_clalloc(numpages, wait, m_maxsize(super_class));
4038
4039 /* how many objects will we cut the page into? */
4040 int numobj = PAGE_SIZE / m_maxsize(class);
4041
4042 for (count = 0; count < numpages; count++) {
4043 /* respect totals, minlimit, maxlimit */
4044 if (m_total(super_class) <= m_minlimit(super_class) ||
4045 m_total(class) >= m_maxlimit(class)) {
4046 break;
4047 }
4048
4049 if ((o = slab_alloc(super_class, wait)) == NULL) {
4050 break;
4051 }
4052
4053 struct mbuf *m = (struct mbuf *)o;
4054 union mcluster *c = (union mcluster *)o;
4055 union mbigcluster *mbc = (union mbigcluster *)o;
4056 mcl_slab_t *sp = slab_get(o);
4057 mcache_audit_t *mca = NULL;
4058
4059 /*
4060 * since one full page will be converted to MC_MBUF or
4061 * MC_CL, verify that the reference count will match that
4062 * assumption
4063 */
4064 VERIFY(sp->sl_refcnt == 1 && slab_is_detached(sp));
4065 VERIFY((sp->sl_flags & (SLF_MAPPED | SLF_PARTIAL)) == SLF_MAPPED);
4066 /*
4067 * Make sure that the cluster is unmolested
4068 * while in freelist
4069 */
4070 if (mclverify) {
4071 mca = mcl_audit_buf2mca(super_class,
4072 (mcache_obj_t *)o);
4073 mcache_audit_free_verify(mca,
4074 (mcache_obj_t *)o, 0, m_maxsize(super_class));
4075 }
4076
4077 /* Reinitialize it as an mbuf or 2K or 4K slab */
4078 slab_init(sp, class, sp->sl_flags,
4079 sp->sl_base, NULL, PAGE_SIZE, 0, numobj);
4080
4081 VERIFY(sp->sl_head == NULL);
4082
4083 VERIFY(m_total(super_class) >= 1);
4084 m_total(super_class)--;
4085
4086 if (super_class == MC_BIGCL) {
4087 mbstat.m_bigclusters = m_total(MC_BIGCL);
4088 }
4089
4090 m_total(class) += numobj;
4091 VERIFY(m_total(class) <= m_maxlimit(class));
4092 m_infree(class) += numobj;
4093
4094 if (!mb_peak_newreport && mbuf_report_usage(class)) {
4095 mb_peak_newreport = TRUE;
4096 }
4097
4098 i = numobj;
4099 if (class == MC_MBUF) {
4100 mbstat.m_mbufs = m_total(MC_MBUF);
4101 mtype_stat_add(MT_FREE, NMBPG);
4102 while (i--) {
4103 /*
4104 * If auditing is enabled, construct the
4105 * shadow mbuf in the audit structure
4106 * instead of the actual one.
4107 * mbuf_slab_audit() will take care of
4108 * restoring the contents after the
4109 * integrity check.
4110 */
4111 if (mclaudit != NULL) {
4112 struct mbuf *ms;
4113 mca = mcl_audit_buf2mca(MC_MBUF,
4114 (mcache_obj_t *)m);
4115 ms = MCA_SAVED_MBUF_PTR(mca);
4116 ms->m_type = MT_FREE;
4117 } else {
4118 m->m_type = MT_FREE;
4119 }
4120 m->m_next = sp->sl_head;
4121 sp->sl_head = (void *)m++;
4122 }
4123 } else if (class == MC_CL) { /* MC_CL */
4124 mbstat.m_clfree =
4125 m_infree(MC_CL) + m_infree(MC_MBUF_CL);
4126 mbstat.m_clusters = m_total(MC_CL);
4127 while (i--) {
4128 c->mcl_next = sp->sl_head;
4129 sp->sl_head = (void *)c++;
4130 }
4131 } else {
4132 VERIFY(class == MC_BIGCL);
4133 mbstat.m_bigclusters = m_total(MC_BIGCL);
4134 mbstat.m_bigclfree = m_infree(MC_BIGCL) +
4135 m_infree(MC_MBUF_BIGCL);
4136 while (i--) {
4137 mbc->mbc_next = sp->sl_head;
4138 sp->sl_head = (void *)mbc++;
4139 }
4140 }
4141
4142 /* Insert into the mbuf or 2k or 4k slab list */
4143 slab_insert(sp, class);
4144
4145 if ((i = mb_waiters) > 0) {
4146 mb_waiters = 0;
4147 }
4148 if (i != 0) {
4149 mbwdog_logger("waking up all threads");
4150 wakeup(mb_waitchan);
4151 }
4152 }
4153 return count != 0;
4154 }
4155
4156 /*
4157 * For each class, initialize the freelist to hold m_minlimit() objects.
4158 */
4159 static void
4160 freelist_init(mbuf_class_t class)
4161 {
4162 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
4163
4164 VERIFY(class == MC_CL || class == MC_BIGCL);
4165 VERIFY(m_total(class) == 0);
4166 VERIFY(m_minlimit(class) > 0);
4167
4168 while (m_total(class) < m_minlimit(class)) {
4169 (void) freelist_populate(class, m_minlimit(class), M_WAIT);
4170 }
4171
4172 VERIFY(m_total(class) >= m_minlimit(class));
4173 }
4174
4175 /*
4176 * (Inaccurately) check if it might be worth a trip back to the
4177 * mcache layer due the availability of objects there. We'll
4178 * end up back here if there's nothing up there.
4179 */
4180 static boolean_t
4181 mbuf_cached_above(mbuf_class_t class, int wait)
4182 {
4183 switch (class) {
4184 case MC_MBUF:
4185 if (wait & MCR_COMP) {
4186 return !mcache_bkt_isempty(m_cache(MC_MBUF_CL)) ||
4187 !mcache_bkt_isempty(m_cache(MC_MBUF_BIGCL));
4188 }
4189 break;
4190
4191 case MC_CL:
4192 if (wait & MCR_COMP) {
4193 return !mcache_bkt_isempty(m_cache(MC_MBUF_CL));
4194 }
4195 break;
4196
4197 case MC_BIGCL:
4198 if (wait & MCR_COMP) {
4199 return !mcache_bkt_isempty(m_cache(MC_MBUF_BIGCL));
4200 }
4201 break;
4202
4203 case MC_16KCL:
4204 if (wait & MCR_COMP) {
4205 return !mcache_bkt_isempty(m_cache(MC_MBUF_16KCL));
4206 }
4207 break;
4208
4209 case MC_MBUF_CL:
4210 case MC_MBUF_BIGCL:
4211 case MC_MBUF_16KCL:
4212 break;
4213
4214 default:
4215 VERIFY(0);
4216 /* NOTREACHED */
4217 }
4218
4219 return !mcache_bkt_isempty(m_cache(class));
4220 }
4221
4222 /*
4223 * If possible, convert constructed objects to raw ones.
4224 */
4225 static boolean_t
4226 mbuf_steal(mbuf_class_t class, unsigned int num)
4227 {
4228 mcache_obj_t *top = NULL;
4229 mcache_obj_t **list = ⊤
4230 unsigned int tot = 0;
4231
4232 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
4233
4234 switch (class) {
4235 case MC_MBUF:
4236 case MC_CL:
4237 case MC_BIGCL:
4238 case MC_16KCL:
4239 return FALSE;
4240
4241 case MC_MBUF_CL:
4242 case MC_MBUF_BIGCL:
4243 case MC_MBUF_16KCL:
4244 /* Get the required number of constructed objects if possible */
4245 if (m_infree(class) > m_minlimit(class)) {
4246 tot = cslab_alloc(class, &list,
4247 MIN(num, m_infree(class)));
4248 }
4249
4250 /* And destroy them to get back the raw objects */
4251 if (top != NULL) {
4252 (void) cslab_free(class, top, 1);
4253 }
4254 break;
4255
4256 default:
4257 VERIFY(0);
4258 /* NOTREACHED */
4259 }
4260
4261 return tot == num;
4262 }
4263
4264 static void
4265 m_reclaim(mbuf_class_t class, unsigned int num, boolean_t comp)
4266 {
4267 int m, bmap = 0;
4268
4269 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
4270
4271 VERIFY(m_total(MC_CL) <= m_maxlimit(MC_CL));
4272 VERIFY(m_total(MC_BIGCL) <= m_maxlimit(MC_BIGCL));
4273 VERIFY(m_total(MC_16KCL) <= m_maxlimit(MC_16KCL));
4274
4275 /*
4276 * This logic can be made smarter; for now, simply mark
4277 * all other related classes as potential victims.
4278 */
4279 switch (class) {
4280 case MC_MBUF:
4281 m_wantpurge(MC_CL)++;
4282 m_wantpurge(MC_BIGCL)++;
4283 m_wantpurge(MC_MBUF_CL)++;
4284 m_wantpurge(MC_MBUF_BIGCL)++;
4285 break;
4286
4287 case MC_CL:
4288 m_wantpurge(MC_MBUF)++;
4289 m_wantpurge(MC_BIGCL)++;
4290 m_wantpurge(MC_MBUF_BIGCL)++;
4291 if (!comp) {
4292 m_wantpurge(MC_MBUF_CL)++;
4293 }
4294 break;
4295
4296 case MC_BIGCL:
4297 m_wantpurge(MC_MBUF)++;
4298 m_wantpurge(MC_CL)++;
4299 m_wantpurge(MC_MBUF_CL)++;
4300 if (!comp) {
4301 m_wantpurge(MC_MBUF_BIGCL)++;
4302 }
4303 break;
4304
4305 case MC_16KCL:
4306 if (!comp) {
4307 m_wantpurge(MC_MBUF_16KCL)++;
4308 }
4309 break;
4310
4311 default:
4312 VERIFY(0);
4313 /* NOTREACHED */
4314 }
4315
4316 /*
4317 * Run through each marked class and check if we really need to
4318 * purge (and therefore temporarily disable) the per-CPU caches
4319 * layer used by the class. If so, remember the classes since
4320 * we are going to drop the lock below prior to purging.
4321 */
4322 for (m = 0; m < NELEM(mbuf_table); m++) {
4323 if (m_wantpurge(m) > 0) {
4324 m_wantpurge(m) = 0;
4325 /*
4326 * Try hard to steal the required number of objects
4327 * from the freelist of other mbuf classes. Only
4328 * purge and disable the per-CPU caches layer when
4329 * we don't have enough; it's the last resort.
4330 */
4331 if (!mbuf_steal(m, num)) {
4332 bmap |= (1 << m);
4333 }
4334 }
4335 }
4336
4337 lck_mtx_unlock(mbuf_mlock);
4338
4339 if (bmap != 0) {
4340 /* signal the domains to drain */
4341 net_drain_domains();
4342
4343 /* Sigh; we have no other choices but to ask mcache to purge */
4344 for (m = 0; m < NELEM(mbuf_table); m++) {
4345 if ((bmap & (1 << m)) &&
4346 mcache_purge_cache(m_cache(m), TRUE)) {
4347 lck_mtx_lock(mbuf_mlock);
4348 m_purge_cnt(m)++;
4349 mbstat.m_drain++;
4350 lck_mtx_unlock(mbuf_mlock);
4351 }
4352 }
4353 } else {
4354 /*
4355 * Request mcache to reap extra elements from all of its caches;
4356 * note that all reaps are serialized and happen only at a fixed
4357 * interval.
4358 */
4359 mcache_reap();
4360 }
4361 lck_mtx_lock(mbuf_mlock);
4362 }
4363 #endif /* CONFIG_MBUF_MCACHE */
4364
4365 static inline struct mbuf *
4366 m_get_common(int wait, short type, int hdr)
4367 {
4368 struct mbuf *m;
4369
4370 #if CONFIG_MBUF_MCACHE
4371 int mcflags = MSLEEPF(wait);
4372
4373 /* Is this due to a non-blocking retry? If so, then try harder */
4374 if (mcflags & MCR_NOSLEEP) {
4375 mcflags |= MCR_TRYHARD;
4376 }
4377
4378 m = mcache_alloc(m_cache(MC_MBUF), mcflags);
4379 #else
4380 m = mz_alloc(wait);
4381 #endif /* CONFIG_MBUF_MCACHE */
4382 if (m != NULL) {
4383 MBUF_INIT(m, hdr, type);
4384 mtype_stat_inc(type);
4385 mtype_stat_dec(MT_FREE);
4386 }
4387 return m;
4388 }
4389
4390 /*
4391 * Space allocation routines; these are also available as macros
4392 * for critical paths.
4393 */
4394 #define _M_GET(wait, type) m_get_common(wait, type, 0)
4395 #define _M_GETHDR(wait, type) m_get_common(wait, type, 1)
4396 #define _M_RETRY(wait, type) _M_GET(wait, type)
4397 #define _M_RETRYHDR(wait, type) _M_GETHDR(wait, type)
4398 #define _MGET(m, how, type) ((m) = _M_GET(how, type))
4399 #define _MGETHDR(m, how, type) ((m) = _M_GETHDR(how, type))
4400
4401 struct mbuf *
4402 m_get(int wait, int type)
4403 {
4404 return _M_GET(wait, type);
4405 }
4406
4407 struct mbuf *
4408 m_gethdr(int wait, int type)
4409 {
4410 return _M_GETHDR(wait, type);
4411 }
4412
4413 struct mbuf *
4414 m_retry(int wait, int type)
4415 {
4416 return _M_RETRY(wait, type);
4417 }
4418
4419 struct mbuf *
4420 m_retryhdr(int wait, int type)
4421 {
4422 return _M_RETRYHDR(wait, type);
4423 }
4424
4425 struct mbuf *
4426 m_getclr(int wait, int type)
4427 {
4428 struct mbuf *m;
4429
4430 _MGET(m, wait, type);
4431 if (m != NULL) {
4432 bzero(MTOD(m, caddr_t), MLEN);
4433 }
4434 return m;
4435 }
4436
4437 static int
4438 m_free_paired(struct mbuf *m)
4439 {
4440 VERIFY((m->m_flags & M_EXT) && (MEXT_FLAGS(m) & EXTF_PAIRED));
4441
4442 os_atomic_thread_fence(seq_cst);
4443 if (MEXT_PMBUF(m) == m) {
4444 /*
4445 * Paired ref count might be negative in case we lose
4446 * against another thread clearing MEXT_PMBUF, in the
4447 * event it occurs after the above memory barrier sync.
4448 * In that case just ignore as things have been unpaired.
4449 */
4450 int16_t prefcnt = os_atomic_dec(&MEXT_PREF(m), acq_rel);
4451 if (prefcnt > 1) {
4452 return 1;
4453 } else if (prefcnt == 1) {
4454 m_ext_free_func_t m_free_func = m_get_ext_free(m);
4455 VERIFY(m_free_func != NULL);
4456 (*m_free_func)(m->m_ext.ext_buf,
4457 m->m_ext.ext_size, m_get_ext_arg(m));
4458 return 1;
4459 } else if (prefcnt == 0) {
4460 VERIFY(MBUF_IS_PAIRED(m));
4461
4462 /*
4463 * Restore minref to its natural value, so that
4464 * the caller will be able to free the cluster
4465 * as appropriate.
4466 */
4467 MEXT_MINREF(m) = 0;
4468
4469 /*
4470 * Clear MEXT_PMBUF, but leave EXTF_PAIRED intact
4471 * as it is immutable. atomic_set_ptr also causes
4472 * memory barrier sync.
4473 */
4474 os_atomic_store(&MEXT_PMBUF(m), NULL, release);
4475
4476 switch (m->m_ext.ext_size) {
4477 case MCLBYTES:
4478 m_set_ext(m, m_get_rfa(m), NULL, NULL);
4479 break;
4480
4481 case MBIGCLBYTES:
4482 m_set_ext(m, m_get_rfa(m), m_bigfree, NULL);
4483 break;
4484
4485 case M16KCLBYTES:
4486 m_set_ext(m, m_get_rfa(m), m_16kfree, NULL);
4487 break;
4488
4489 default:
4490 VERIFY(0);
4491 /* NOTREACHED */
4492 }
4493 }
4494 }
4495
4496 /*
4497 * Tell caller the unpair has occurred, and that the reference
4498 * count on the external cluster held for the paired mbuf should
4499 * now be dropped.
4500 */
4501 return 0;
4502 }
4503
4504 struct mbuf *
4505 m_free(struct mbuf *m)
4506 {
4507 struct mbuf *n = m->m_next;
4508
4509 if (m->m_type == MT_FREE) {
4510 panic("m_free: freeing an already freed mbuf");
4511 }
4512
4513 if (m->m_flags & M_PKTHDR) {
4514 /* Check for scratch area overflow */
4515 m_redzone_verify(m);
4516 /* Free the aux data and tags if there is any */
4517 m_tag_delete_chain(m);
4518
4519 m_do_tx_compl_callback(m, NULL);
4520 }
4521
4522 if (m->m_flags & M_EXT) {
4523 if (MBUF_IS_PAIRED(m) && m_free_paired(m)) {
4524 return n;
4525 }
4526 /*
4527 * Make sure that we don't touch any ext_ref
4528 * member after we decrement the reference count
4529 * since that may lead to use-after-free
4530 * when we do not hold the last reference.
4531 */
4532 const bool composite = !!(MEXT_FLAGS(m) & EXTF_COMPOSITE);
4533 const m_ext_free_func_t m_free_func = m_get_ext_free(m);
4534 const uint16_t minref = MEXT_MINREF(m);
4535 const uint16_t refcnt = m_decref(m);
4536
4537 if (refcnt == minref && !composite) {
4538 #if CONFIG_MBUF_MCACHE
4539 if (m_free_func == NULL) {
4540 mcache_free(m_cache(MC_CL), m->m_ext.ext_buf);
4541 } else if (m_free_func == m_bigfree) {
4542 mcache_free(m_cache(MC_BIGCL),
4543 m->m_ext.ext_buf);
4544 } else if (m_free_func == m_16kfree) {
4545 mcache_free(m_cache(MC_16KCL),
4546 m->m_ext.ext_buf);
4547 } else {
4548 (*m_free_func)(m->m_ext.ext_buf,
4549 m->m_ext.ext_size, m_get_ext_arg(m));
4550 }
4551 mcache_free(ref_cache, m_get_rfa(m));
4552 #else
4553 if (m_free_func == NULL) {
4554 mz_cl_free(ZONE_ID_CLUSTER_2K, m->m_ext.ext_buf);
4555 } else if (m_free_func == m_bigfree) {
4556 mz_cl_free(ZONE_ID_CLUSTER_4K, m->m_ext.ext_buf);
4557 } else if (m_free_func == m_16kfree) {
4558 mz_cl_free(ZONE_ID_CLUSTER_16K, m->m_ext.ext_buf);
4559 } else {
4560 (*m_free_func)(m->m_ext.ext_buf,
4561 m->m_ext.ext_size, m_get_ext_arg(m));
4562 }
4563 mz_ref_free(m_get_rfa(m));
4564 #endif /* CONFIG_MBUF_MCACHE */
4565 m_set_ext(m, NULL, NULL, NULL);
4566 } else if (refcnt == minref && composite) {
4567 VERIFY(!(MEXT_FLAGS(m) & EXTF_PAIRED));
4568 VERIFY(m->m_type != MT_FREE);
4569
4570 mtype_stat_dec(m->m_type);
4571 mtype_stat_inc(MT_FREE);
4572
4573 m->m_type = MT_FREE;
4574 m->m_flags = M_EXT;
4575 m->m_len = 0;
4576 m->m_next = m->m_nextpkt = NULL;
4577 /*
4578 * MEXT_FLAGS is safe to access here
4579 * since we are now sure that we held
4580 * the last reference to ext_ref.
4581 */
4582 MEXT_FLAGS(m) &= ~EXTF_READONLY;
4583
4584 #if CONFIG_MBUF_MCACHE
4585 /* "Free" into the intermediate cache */
4586 if (m_free_func == NULL) {
4587 mcache_free(m_cache(MC_MBUF_CL), m);
4588 } else if (m_free_func == m_bigfree) {
4589 mcache_free(m_cache(MC_MBUF_BIGCL), m);
4590 } else {
4591 VERIFY(m_free_func == m_16kfree);
4592 mcache_free(m_cache(MC_MBUF_16KCL), m);
4593 }
4594 #else
4595 /* "Free" into the intermediate cache */
4596 if (m_free_func == NULL) {
4597 mz_composite_free(MC_MBUF_CL, m);
4598 } else if (m_free_func == m_bigfree) {
4599 mz_composite_free(MC_MBUF_BIGCL, m);
4600 } else {
4601 VERIFY(m_free_func == m_16kfree);
4602 mz_composite_free(MC_MBUF_16KCL, m);
4603 }
4604 #endif /* CONFIG_MBUF_MCACHE */
4605 return n;
4606 }
4607 }
4608
4609 if (m->m_type != MT_FREE) {
4610 mtype_stat_dec(m->m_type);
4611 mtype_stat_inc(MT_FREE);
4612 }
4613
4614 m->m_type = MT_FREE;
4615 m->m_flags = m->m_len = 0;
4616 m->m_next = m->m_nextpkt = NULL;
4617
4618 #if CONFIG_MBUF_MCACHE
4619 mcache_free(m_cache(MC_MBUF), m);
4620 #else
4621 mz_free(m);
4622 #endif /* CONFIG_MBUF_MCACHE */
4623
4624 return n;
4625 }
4626
4627 __private_extern__ struct mbuf *
4628 m_clattach(struct mbuf *m, int type, caddr_t extbuf,
4629 void (*extfree)(caddr_t, u_int, caddr_t), size_t extsize, caddr_t extarg,
4630 int wait, int pair)
4631 {
4632 struct ext_ref *rfa = NULL;
4633
4634 /*
4635 * If pairing is requested and an existing mbuf is provided, reject
4636 * it if it's already been paired to another cluster. Otherwise,
4637 * allocate a new one or free any existing below.
4638 */
4639 if ((m != NULL && MBUF_IS_PAIRED(m)) ||
4640 (m == NULL && (m = _M_GETHDR(wait, type)) == NULL)) {
4641 return NULL;
4642 }
4643
4644 if (m->m_flags & M_EXT) {
4645 /*
4646 * Make sure that we don't touch any ext_ref
4647 * member after we decrement the reference count
4648 * since that may lead to use-after-free
4649 * when we do not hold the last reference.
4650 */
4651 const bool composite = !!(MEXT_FLAGS(m) & EXTF_COMPOSITE);
4652 VERIFY(!(MEXT_FLAGS(m) & EXTF_PAIRED) && MEXT_PMBUF(m) == NULL);
4653 const m_ext_free_func_t m_free_func = m_get_ext_free(m);
4654 const uint16_t minref = MEXT_MINREF(m);
4655 const uint16_t refcnt = m_decref(m);
4656
4657 if (refcnt == minref && !composite) {
4658 #if CONFIG_MBUF_MCACHE
4659 if (m_free_func == NULL) {
4660 mcache_free(m_cache(MC_CL), m->m_ext.ext_buf);
4661 } else if (m_free_func == m_bigfree) {
4662 mcache_free(m_cache(MC_BIGCL),
4663 m->m_ext.ext_buf);
4664 } else if (m_free_func == m_16kfree) {
4665 mcache_free(m_cache(MC_16KCL),
4666 m->m_ext.ext_buf);
4667 } else {
4668 (*m_free_func)(m->m_ext.ext_buf,
4669 m->m_ext.ext_size, m_get_ext_arg(m));
4670 }
4671 #else
4672 if (m_free_func == NULL) {
4673 mz_cl_free(ZONE_ID_CLUSTER_2K, m->m_ext.ext_buf);
4674 } else if (m_free_func == m_bigfree) {
4675 mz_cl_free(ZONE_ID_CLUSTER_4K, m->m_ext.ext_buf);
4676 } else if (m_free_func == m_16kfree) {
4677 mz_cl_free(ZONE_ID_CLUSTER_16K, m->m_ext.ext_buf);
4678 } else {
4679 (*m_free_func)(m->m_ext.ext_buf,
4680 m->m_ext.ext_size, m_get_ext_arg(m));
4681 }
4682 #endif /* CONFIG_MBUF_MCACHE */
4683 /* Re-use the reference structure */
4684 rfa = m_get_rfa(m);
4685 } else if (refcnt == minref && composite) {
4686 VERIFY(m->m_type != MT_FREE);
4687
4688 mtype_stat_dec(m->m_type);
4689 mtype_stat_inc(MT_FREE);
4690
4691 m->m_type = MT_FREE;
4692 m->m_flags = M_EXT;
4693 m->m_len = 0;
4694 m->m_next = m->m_nextpkt = NULL;
4695
4696 /*
4697 * MEXT_FLAGS is safe to access here
4698 * since we are now sure that we held
4699 * the last reference to ext_ref.
4700 */
4701 MEXT_FLAGS(m) &= ~EXTF_READONLY;
4702
4703 /* "Free" into the intermediate cache */
4704 #if CONFIG_MBUF_MCACHE
4705 if (m_free_func == NULL) {
4706 mcache_free(m_cache(MC_MBUF_CL), m);
4707 } else if (m_free_func == m_bigfree) {
4708 mcache_free(m_cache(MC_MBUF_BIGCL), m);
4709 } else {
4710 VERIFY(m_free_func == m_16kfree);
4711 mcache_free(m_cache(MC_MBUF_16KCL), m);
4712 }
4713 #else
4714 if (m_free_func == NULL) {
4715 mz_composite_free(MC_MBUF_CL, m);
4716 } else if (m_free_func == m_bigfree) {
4717 mz_composite_free(MC_MBUF_BIGCL, m);
4718 } else {
4719 VERIFY(m_free_func == m_16kfree);
4720 mz_composite_free(MC_MBUF_16KCL, m);
4721 }
4722 #endif /* CONFIG_MBUF_MCACHE */
4723 /*
4724 * Allocate a new mbuf, since we didn't divorce
4725 * the composite mbuf + cluster pair above.
4726 */
4727 if ((m = _M_GETHDR(wait, type)) == NULL) {
4728 return NULL;
4729 }
4730 }
4731 }
4732
4733 #if CONFIG_MBUF_MCACHE
4734 if (rfa == NULL &&
4735 (rfa = mcache_alloc(ref_cache, MSLEEPF(wait))) == NULL) {
4736 m_free(m);
4737 return NULL;
4738 }
4739 #else
4740 if (rfa == NULL &&
4741 (rfa = mz_ref_alloc(wait)) == NULL) {
4742 m_free(m);
4743 return NULL;
4744 }
4745 #endif /* CONFIG_MBUF_MCACHE */
4746
4747 if (!pair) {
4748 MEXT_INIT(m, extbuf, extsize, extfree, extarg, rfa,
4749 0, 1, 0, 0, 0, NULL);
4750 } else {
4751 MEXT_INIT(m, extbuf, extsize, extfree, (caddr_t)m, rfa,
4752 1, 1, 1, EXTF_PAIRED, 0, m);
4753 }
4754
4755 return m;
4756 }
4757
4758 /*
4759 * Perform `fast' allocation mbuf clusters from a cache of recently-freed
4760 * clusters. (If the cache is empty, new clusters are allocated en-masse.)
4761 */
4762 struct mbuf *
4763 m_getcl(int wait, int type, int flags)
4764 {
4765 struct mbuf *m = NULL;
4766 int hdr = (flags & M_PKTHDR);
4767
4768 #if CONFIG_MBUF_MCACHE
4769 int mcflags = MSLEEPF(wait);
4770
4771 /* Is this due to a non-blocking retry? If so, then try harder */
4772 if (mcflags & MCR_NOSLEEP) {
4773 mcflags |= MCR_TRYHARD;
4774 }
4775
4776 m = mcache_alloc(m_cache(MC_MBUF_CL), mcflags);
4777 #else
4778 m = mz_composite_alloc(MC_MBUF_CL, wait);
4779 #endif /* CONFIG_MBUF_MCACHE */
4780 if (m != NULL) {
4781 u_int16_t flag;
4782 struct ext_ref *rfa;
4783 void *cl;
4784
4785 VERIFY(m->m_type == MT_FREE && m->m_flags == M_EXT);
4786 cl = m->m_ext.ext_buf;
4787 rfa = m_get_rfa(m);
4788
4789 ASSERT(cl != NULL && rfa != NULL);
4790 VERIFY(MBUF_IS_COMPOSITE(m) && m_get_ext_free(m) == NULL);
4791
4792 flag = MEXT_FLAGS(m);
4793
4794 MBUF_INIT(m, hdr, type);
4795 MBUF_CL_INIT(m, cl, rfa, 1, flag);
4796
4797 mtype_stat_inc(type);
4798 mtype_stat_dec(MT_FREE);
4799 }
4800 return m;
4801 }
4802
4803 /* m_mclget() add an mbuf cluster to a normal mbuf */
4804 struct mbuf *
4805 m_mclget(struct mbuf *m, int wait)
4806 {
4807 struct ext_ref *rfa = NULL;
4808
4809 #if CONFIG_MBUF_MCACHE
4810 if ((rfa = mcache_alloc(ref_cache, MSLEEPF(wait))) == NULL) {
4811 return m;
4812 }
4813 #else
4814 if ((rfa = mz_ref_alloc(wait)) == NULL) {
4815 return m;
4816 }
4817 #endif /* CONFIG_MBUF_MCACHE */
4818 m->m_ext.ext_buf = m_mclalloc(wait);
4819 if (m->m_ext.ext_buf != NULL) {
4820 MBUF_CL_INIT(m, m->m_ext.ext_buf, rfa, 1, 0);
4821 } else {
4822 #if CONFIG_MBUF_MCACHE
4823 mcache_free(ref_cache, rfa);
4824 #else
4825 mz_ref_free(rfa);
4826 #endif /* CONFIG_MBUF_MCACHE */
4827 }
4828
4829 return m;
4830 }
4831
4832 /* Allocate an mbuf cluster */
4833 caddr_t
4834 m_mclalloc(int wait)
4835 {
4836 #if CONFIG_MBUF_MCACHE
4837 int mcflags = MSLEEPF(wait);
4838
4839 /* Is this due to a non-blocking retry? If so, then try harder */
4840 if (mcflags & MCR_NOSLEEP) {
4841 mcflags |= MCR_TRYHARD;
4842 }
4843
4844 return mcache_alloc(m_cache(MC_CL), mcflags);
4845 #else
4846 return mz_cl_alloc(ZONE_ID_CLUSTER_2K, wait);
4847 #endif /* CONFIG_MBUF_MCACHE */
4848 }
4849
4850 /* Free an mbuf cluster */
4851 void
4852 m_mclfree(caddr_t p)
4853 {
4854 #if CONFIG_MBUF_MCACHE
4855 mcache_free(m_cache(MC_CL), p);
4856 #else
4857 mz_cl_free(ZONE_ID_CLUSTER_2K, p);
4858 #endif /* CONFIG_MBUF_MCACHE */
4859 }
4860
4861 /*
4862 * mcl_hasreference() checks if a cluster of an mbuf is referenced by
4863 * another mbuf; see comments in m_incref() regarding EXTF_READONLY.
4864 */
4865 int
4866 m_mclhasreference(struct mbuf *m)
4867 {
4868 if (!(m->m_flags & M_EXT)) {
4869 return 0;
4870 }
4871
4872 ASSERT(m_get_rfa(m) != NULL);
4873
4874 return (MEXT_FLAGS(m) & EXTF_READONLY) ? 1 : 0;
4875 }
4876
4877 __private_extern__ caddr_t
4878 m_bigalloc(int wait)
4879 {
4880 #if CONFIG_MBUF_MCACHE
4881 int mcflags = MSLEEPF(wait);
4882
4883 /* Is this due to a non-blocking retry? If so, then try harder */
4884 if (mcflags & MCR_NOSLEEP) {
4885 mcflags |= MCR_TRYHARD;
4886 }
4887
4888 return mcache_alloc(m_cache(MC_BIGCL), mcflags);
4889 #else
4890 return mz_cl_alloc(ZONE_ID_CLUSTER_4K, wait);
4891 #endif /* CONFIG_MBUF_MCACHE */
4892 }
4893
4894 __private_extern__ void
4895 m_bigfree(caddr_t p, __unused u_int size, __unused caddr_t arg)
4896 {
4897 #if CONFIG_MBUF_MCACHE
4898 mcache_free(m_cache(MC_BIGCL), p);
4899 #else
4900 mz_cl_free(ZONE_ID_CLUSTER_4K, p);
4901 #endif /* CONFIG_MBUF_MCACHE */
4902 }
4903
4904 /* m_mbigget() add an 4KB mbuf cluster to a normal mbuf */
4905 __private_extern__ struct mbuf *
4906 m_mbigget(struct mbuf *m, int wait)
4907 {
4908 struct ext_ref *rfa = NULL;
4909
4910 #if CONFIG_MBUF_MCACHE
4911 if ((rfa = mcache_alloc(ref_cache, MSLEEPF(wait))) == NULL) {
4912 return m;
4913 }
4914 #else
4915 if ((rfa = mz_ref_alloc(wait)) == NULL) {
4916 return m;
4917 }
4918 #endif /* CONFIG_MBUF_MCACHE */
4919 m->m_ext.ext_buf = m_bigalloc(wait);
4920 if (m->m_ext.ext_buf != NULL) {
4921 MBUF_BIGCL_INIT(m, m->m_ext.ext_buf, rfa, 1, 0);
4922 } else {
4923 #if CONFIG_MBUF_MCACHE
4924 mcache_free(ref_cache, rfa);
4925 #else
4926 mz_ref_free(rfa);
4927 #endif /* CONFIG_MBUF_MCACHE */
4928 }
4929 return m;
4930 }
4931
4932 __private_extern__ caddr_t
4933 m_16kalloc(int wait)
4934 {
4935 #if CONFIG_MBUF_MCACHE
4936 int mcflags = MSLEEPF(wait);
4937
4938 /* Is this due to a non-blocking retry? If so, then try harder */
4939 if (mcflags & MCR_NOSLEEP) {
4940 mcflags |= MCR_TRYHARD;
4941 }
4942
4943 return mcache_alloc(m_cache(MC_16KCL), mcflags);
4944 #else
4945 return mz_cl_alloc(ZONE_ID_CLUSTER_16K, wait);
4946 #endif /* CONFIG_MBUF_MCACHE */
4947 }
4948
4949 __private_extern__ void
4950 m_16kfree(caddr_t p, __unused u_int size, __unused caddr_t arg)
4951 {
4952 #if CONFIG_MBUF_MCACHE
4953 mcache_free(m_cache(MC_16KCL), p);
4954 #else
4955 mz_cl_free(ZONE_ID_CLUSTER_16K, p);
4956 #endif /* CONFIG_MBUF_MCACHE */
4957 }
4958
4959 /* m_m16kget() add a 16KB mbuf cluster to a normal mbuf */
4960 __private_extern__ struct mbuf *
4961 m_m16kget(struct mbuf *m, int wait)
4962 {
4963 struct ext_ref *rfa = NULL;
4964
4965 #if CONFIG_MBUF_MCACHE
4966 if ((rfa = mcache_alloc(ref_cache, MSLEEPF(wait))) == NULL) {
4967 return m;
4968 }
4969 #else
4970 if ((rfa = mz_ref_alloc(wait)) == NULL) {
4971 return m;
4972 }
4973 #endif /* CONFIG_MBUF_MCACHE */
4974 m->m_ext.ext_buf = m_16kalloc(wait);
4975 if (m->m_ext.ext_buf != NULL) {
4976 MBUF_16KCL_INIT(m, m->m_ext.ext_buf, rfa, 1, 0);
4977 } else {
4978 #if CONFIG_MBUF_MCACHE
4979 mcache_free(ref_cache, rfa);
4980 #else
4981 mz_ref_free(rfa);
4982 #endif /* CONFIG_MBUF_MCACHE */
4983 }
4984
4985 return m;
4986 }
4987
4988 /*
4989 * "Move" mbuf pkthdr from "from" to "to".
4990 * "from" must have M_PKTHDR set, and "to" must be empty.
4991 */
4992 void
4993 m_copy_pkthdr(struct mbuf *to, struct mbuf *from)
4994 {
4995 VERIFY(from->m_flags & M_PKTHDR);
4996
4997 /* Check for scratch area overflow */
4998 m_redzone_verify(from);
4999
5000 if (to->m_flags & M_PKTHDR) {
5001 /* Check for scratch area overflow */
5002 m_redzone_verify(to);
5003 /* We will be taking over the tags of 'to' */
5004 m_tag_delete_chain(to);
5005 }
5006 to->m_pkthdr = from->m_pkthdr; /* especially tags */
5007 m_classifier_init(from, 0); /* purge classifier info */
5008 m_tag_init(from, 1); /* purge all tags from src */
5009 m_scratch_init(from); /* clear src scratch area */
5010 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
5011 if ((to->m_flags & M_EXT) == 0) {
5012 to->m_data = to->m_pktdat;
5013 }
5014 m_redzone_init(to); /* setup red zone on dst */
5015 }
5016
5017 /*
5018 * Duplicate "from"'s mbuf pkthdr in "to".
5019 * "from" must have M_PKTHDR set, and "to" must be empty.
5020 * In particular, this does a deep copy of the packet tags.
5021 */
5022 int
5023 m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how)
5024 {
5025 VERIFY(from->m_flags & M_PKTHDR);
5026
5027 /* Check for scratch area overflow */
5028 m_redzone_verify(from);
5029
5030 if (to->m_flags & M_PKTHDR) {
5031 /* Check for scratch area overflow */
5032 m_redzone_verify(to);
5033 /* We will be taking over the tags of 'to' */
5034 m_tag_delete_chain(to);
5035 }
5036 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
5037 if ((to->m_flags & M_EXT) == 0) {
5038 to->m_data = to->m_pktdat;
5039 }
5040 to->m_pkthdr = from->m_pkthdr;
5041 /* clear TX completion flag so the callback is not called in the copy */
5042 to->m_pkthdr.pkt_flags &= ~PKTF_TX_COMPL_TS_REQ;
5043 m_redzone_init(to); /* setup red zone on dst */
5044 m_tag_init(to, 0); /* preserve dst static tags */
5045 return m_tag_copy_chain(to, from, how);
5046 }
5047
5048 void
5049 m_copy_pftag(struct mbuf *to, struct mbuf *from)
5050 {
5051 memcpy(m_pftag(to), m_pftag(from), sizeof(struct pf_mtag));
5052 #if PF_ECN
5053 m_pftag(to)->pftag_hdr = NULL;
5054 m_pftag(to)->pftag_flags &= ~(PF_TAG_HDR_INET | PF_TAG_HDR_INET6);
5055 #endif /* PF_ECN */
5056 }
5057
5058 void
5059 m_copy_necptag(struct mbuf *to, struct mbuf *from)
5060 {
5061 memcpy(m_necptag(to), m_necptag(from), sizeof(struct necp_mtag_));
5062 }
5063
5064 void
5065 m_classifier_init(struct mbuf *m, uint32_t pktf_mask)
5066 {
5067 VERIFY(m->m_flags & M_PKTHDR);
5068
5069 m->m_pkthdr.pkt_proto = 0;
5070 m->m_pkthdr.pkt_flowsrc = 0;
5071 m->m_pkthdr.pkt_flowid = 0;
5072 m->m_pkthdr.pkt_ext_flags = 0;
5073 m->m_pkthdr.pkt_flags &= pktf_mask; /* caller-defined mask */
5074 /* preserve service class and interface info for loopback packets */
5075 if (!(m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
5076 (void) m_set_service_class(m, MBUF_SC_BE);
5077 }
5078 if (!(m->m_pkthdr.pkt_flags & PKTF_IFAINFO)) {
5079 m->m_pkthdr.pkt_ifainfo = 0;
5080 }
5081 /*
5082 * Preserve timestamp if requested
5083 */
5084 if (!(m->m_pkthdr.pkt_flags & PKTF_TS_VALID)) {
5085 m->m_pkthdr.pkt_timestamp = 0;
5086 }
5087 }
5088
5089 void
5090 m_copy_classifier(struct mbuf *to, struct mbuf *from)
5091 {
5092 VERIFY(to->m_flags & M_PKTHDR);
5093 VERIFY(from->m_flags & M_PKTHDR);
5094
5095 to->m_pkthdr.pkt_proto = from->m_pkthdr.pkt_proto;
5096 to->m_pkthdr.pkt_flowsrc = from->m_pkthdr.pkt_flowsrc;
5097 to->m_pkthdr.pkt_flowid = from->m_pkthdr.pkt_flowid;
5098 to->m_pkthdr.pkt_flags = from->m_pkthdr.pkt_flags;
5099 to->m_pkthdr.pkt_ext_flags = from->m_pkthdr.pkt_ext_flags;
5100 (void) m_set_service_class(to, from->m_pkthdr.pkt_svc);
5101 to->m_pkthdr.pkt_ifainfo = from->m_pkthdr.pkt_ifainfo;
5102 }
5103
5104 /*
5105 * Return a list of mbuf hdrs that point to clusters. Try for num_needed;
5106 * if wantall is not set, return whatever number were available. Set up the
5107 * first num_with_pkthdrs with mbuf hdrs configured as packet headers; these
5108 * are chained on the m_nextpkt field. Any packets requested beyond this
5109 * are chained onto the last packet header's m_next field. The size of
5110 * the cluster is controlled by the parameter bufsize.
5111 */
5112 __private_extern__ struct mbuf *
5113 m_getpackets_internal(unsigned int *num_needed, int num_with_pkthdrs,
5114 int wait, int wantall, size_t bufsize)
5115 {
5116 struct mbuf *m = NULL;
5117 struct mbuf **np, *top;
5118 unsigned int pnum, needed = *num_needed;
5119 #if CONFIG_MBUF_MCACHE
5120 mcache_obj_t *mp_list = NULL;
5121 int mcflags = MSLEEPF(wait);
5122 mcache_t *cp;
5123 #else
5124 zstack_t mp_list = {};
5125 mbuf_class_t class = MC_MBUF_CL;
5126 #endif /* CONFIG_MBUF_MCACHE */
5127 u_int16_t flag;
5128 struct ext_ref *rfa;
5129 void *cl;
5130
5131 ASSERT(bufsize == m_maxsize(MC_CL) ||
5132 bufsize == m_maxsize(MC_BIGCL) ||
5133 bufsize == m_maxsize(MC_16KCL));
5134
5135 /*
5136 * Caller must first check for njcl because this
5137 * routine is internal and not exposed/used via KPI.
5138 */
5139 VERIFY(bufsize != m_maxsize(MC_16KCL) || njcl > 0);
5140
5141 top = NULL;
5142 np = ⊤
5143 pnum = 0;
5144
5145 #if CONFIG_MBUF_MCACHE
5146 /*
5147 * The caller doesn't want all the requested buffers; only some.
5148 * Try hard to get what we can, but don't block. This effectively
5149 * overrides MCR_SLEEP, since this thread will not go to sleep
5150 * if we can't get all the buffers.
5151 */
5152 if (!wantall || (mcflags & MCR_NOSLEEP)) {
5153 mcflags |= MCR_TRYHARD;
5154 }
5155
5156 /* Allocate the composite mbuf + cluster elements from the cache */
5157 if (bufsize == m_maxsize(MC_CL)) {
5158 cp = m_cache(MC_MBUF_CL);
5159 } else if (bufsize == m_maxsize(MC_BIGCL)) {
5160 cp = m_cache(MC_MBUF_BIGCL);
5161 } else {
5162 cp = m_cache(MC_MBUF_16KCL);
5163 }
5164 needed = mcache_alloc_ext(cp, &mp_list, needed, mcflags);
5165 #else
5166 /* Allocate the composite mbuf + cluster elements from the cache */
5167 if (bufsize == m_maxsize(MC_CL)) {
5168 class = MC_MBUF_CL;
5169 } else if (bufsize == m_maxsize(MC_BIGCL)) {
5170 class = MC_MBUF_BIGCL;
5171 } else {
5172 class = MC_MBUF_16KCL;
5173 }
5174 mp_list = mz_composite_alloc_n(class, needed, wait);
5175 needed = zstack_count(mp_list);
5176 #endif /* CONFIG_MBUF_MCACHE */
5177
5178 for (pnum = 0; pnum < needed; pnum++) {
5179 #if CONFIG_MBUF_MCACHE
5180 m = (struct mbuf *)mp_list;
5181 mp_list = mp_list->obj_next;
5182 #else
5183 m = zstack_pop(&mp_list);
5184 #endif /* CONFIG_MBUF_MCACHE */
5185
5186 VERIFY(m->m_type == MT_FREE && m->m_flags == M_EXT);
5187 cl = m->m_ext.ext_buf;
5188 rfa = m_get_rfa(m);
5189
5190 ASSERT(cl != NULL && rfa != NULL);
5191 VERIFY(MBUF_IS_COMPOSITE(m));
5192
5193 flag = MEXT_FLAGS(m);
5194
5195 MBUF_INIT(m, num_with_pkthdrs, MT_DATA);
5196 if (bufsize == m_maxsize(MC_16KCL)) {
5197 MBUF_16KCL_INIT(m, cl, rfa, 1, flag);
5198 } else if (bufsize == m_maxsize(MC_BIGCL)) {
5199 MBUF_BIGCL_INIT(m, cl, rfa, 1, flag);
5200 } else {
5201 MBUF_CL_INIT(m, cl, rfa, 1, flag);
5202 }
5203
5204 if (num_with_pkthdrs > 0) {
5205 --num_with_pkthdrs;
5206 }
5207
5208 *np = m;
5209 if (num_with_pkthdrs > 0) {
5210 np = &m->m_nextpkt;
5211 } else {
5212 np = &m->m_next;
5213 }
5214 }
5215 #if CONFIG_MBUF_MCACHE
5216 ASSERT(pnum != *num_needed || mp_list == NULL);
5217 if (mp_list != NULL) {
5218 mcache_free_ext(cp, mp_list);
5219 }
5220 #else
5221 ASSERT(pnum != *num_needed || zstack_empty(mp_list));
5222 if (!zstack_empty(mp_list)) {
5223 mz_composite_free_n(class, mp_list);
5224 }
5225 #endif /* CONFIG_MBUF_MCACHE */
5226 if (pnum > 0) {
5227 mtype_stat_add(MT_DATA, pnum);
5228 mtype_stat_sub(MT_FREE, pnum);
5229 }
5230
5231 if (wantall && (pnum != *num_needed)) {
5232 if (top != NULL) {
5233 m_freem_list(top);
5234 }
5235 return NULL;
5236 }
5237
5238 if (pnum > *num_needed) {
5239 printf("%s: File a radar related to <rdar://10146739>. \
5240 needed = %u, pnum = %u, num_needed = %u \n",
5241 __func__, needed, pnum, *num_needed);
5242 }
5243 *num_needed = pnum;
5244
5245 return top;
5246 }
5247
5248 /*
5249 * Return list of mbuf linked by m_nextpkt. Try for numlist, and if
5250 * wantall is not set, return whatever number were available. The size of
5251 * each mbuf in the list is controlled by the parameter packetlen. Each
5252 * mbuf of the list may have a chain of mbufs linked by m_next. Each mbuf
5253 * in the chain is called a segment. If maxsegments is not null and the
5254 * value pointed to is not null, this specify the maximum number of segments
5255 * for a chain of mbufs. If maxsegments is zero or the value pointed to
5256 * is zero the caller does not have any restriction on the number of segments.
5257 * The actual number of segments of a mbuf chain is return in the value
5258 * pointed to by maxsegments.
5259 */
5260 __private_extern__ struct mbuf *
5261 m_allocpacket_internal(unsigned int *numlist, size_t packetlen,
5262 unsigned int *maxsegments, int wait, int wantall, size_t wantsize)
5263 {
5264 struct mbuf **np, *top, *first = NULL;
5265 size_t bufsize, r_bufsize;
5266 unsigned int num = 0;
5267 unsigned int nsegs = 0;
5268 unsigned int needed = 0, resid;
5269 #if CONFIG_MBUF_MCACHE
5270 int mcflags = MSLEEPF(wait);
5271 mcache_obj_t *mp_list = NULL, *rmp_list = NULL;
5272 mcache_t *cp = NULL, *rcp = NULL;
5273 #else
5274 zstack_t mp_list = {}, rmp_list = {};
5275 mbuf_class_t class = MC_MBUF, rclass = MC_MBUF_CL;
5276 #endif /* CONFIG_MBUF_MCACHE */
5277
5278 if (*numlist == 0) {
5279 os_log(OS_LOG_DEFAULT, "m_allocpacket_internal *numlist is 0");
5280 return NULL;
5281 }
5282
5283 top = NULL;
5284 np = ⊤
5285
5286 if (wantsize == 0) {
5287 if (packetlen <= MINCLSIZE) {
5288 bufsize = packetlen;
5289 } else if (packetlen > m_maxsize(MC_CL)) {
5290 /* Use 4KB if jumbo cluster pool isn't available */
5291 if (packetlen <= m_maxsize(MC_BIGCL) || njcl == 0) {
5292 bufsize = m_maxsize(MC_BIGCL);
5293 } else {
5294 bufsize = m_maxsize(MC_16KCL);
5295 }
5296 } else {
5297 bufsize = m_maxsize(MC_CL);
5298 }
5299 } else if (wantsize == m_maxsize(MC_CL) ||
5300 wantsize == m_maxsize(MC_BIGCL) ||
5301 (wantsize == m_maxsize(MC_16KCL) && njcl > 0)) {
5302 bufsize = wantsize;
5303 } else {
5304 *numlist = 0;
5305 os_log(OS_LOG_DEFAULT, "m_allocpacket_internal wantsize unsupported");
5306 return NULL;
5307 }
5308
5309 if (bufsize <= MHLEN) {
5310 nsegs = 1;
5311 } else if (bufsize <= MINCLSIZE) {
5312 if (maxsegments != NULL && *maxsegments == 1) {
5313 bufsize = m_maxsize(MC_CL);
5314 nsegs = 1;
5315 } else {
5316 nsegs = 2;
5317 }
5318 } else if (bufsize == m_maxsize(MC_16KCL)) {
5319 VERIFY(njcl > 0);
5320 nsegs = ((packetlen - 1) >> M16KCLSHIFT) + 1;
5321 } else if (bufsize == m_maxsize(MC_BIGCL)) {
5322 nsegs = ((packetlen - 1) >> MBIGCLSHIFT) + 1;
5323 } else {
5324 nsegs = ((packetlen - 1) >> MCLSHIFT) + 1;
5325 }
5326 if (maxsegments != NULL) {
5327 if (*maxsegments && nsegs > *maxsegments) {
5328 *maxsegments = nsegs;
5329 *numlist = 0;
5330 os_log(OS_LOG_DEFAULT, "m_allocpacket_internal nsegs > *maxsegments");
5331 return NULL;
5332 }
5333 *maxsegments = nsegs;
5334 }
5335
5336 #if CONFIG_MBUF_MCACHE
5337 /*
5338 * The caller doesn't want all the requested buffers; only some.
5339 * Try hard to get what we can, but don't block. This effectively
5340 * overrides MCR_SLEEP, since this thread will not go to sleep
5341 * if we can't get all the buffers.
5342 */
5343 if (!wantall || (mcflags & MCR_NOSLEEP)) {
5344 mcflags |= MCR_TRYHARD;
5345 }
5346 #endif /* CONFIG_MBUF_MCACHE */
5347
5348 /*
5349 * Simple case where all elements in the lists/chains are mbufs.
5350 * Unless bufsize is greater than MHLEN, each segment chain is made
5351 * up of exactly 1 mbuf. Otherwise, each segment chain is made up
5352 * of 2 mbufs; the second one is used for the residual data, i.e.
5353 * the remaining data that cannot fit into the first mbuf.
5354 */
5355 if (bufsize <= MINCLSIZE) {
5356 /* Allocate the elements in one shot from the mbuf cache */
5357 ASSERT(bufsize <= MHLEN || nsegs == 2);
5358 #if CONFIG_MBUF_MCACHE
5359 cp = m_cache(MC_MBUF);
5360 needed = mcache_alloc_ext(cp, &mp_list,
5361 (*numlist) * nsegs, mcflags);
5362 #else
5363 class = MC_MBUF;
5364 mp_list = mz_alloc_n((*numlist) * nsegs, wait);
5365 needed = zstack_count(mp_list);
5366 #endif /* CONFIG_MBUF_MCACHE */
5367
5368 /*
5369 * The number of elements must be even if we are to use an
5370 * mbuf (instead of a cluster) to store the residual data.
5371 * If we couldn't allocate the requested number of mbufs,
5372 * trim the number down (if it's odd) in order to avoid
5373 * creating a partial segment chain.
5374 */
5375 if (bufsize > MHLEN && (needed & 0x1)) {
5376 needed--;
5377 }
5378
5379 while (num < needed) {
5380 struct mbuf *m = NULL;
5381
5382 #if CONFIG_MBUF_MCACHE
5383 m = (struct mbuf *)mp_list;
5384 mp_list = mp_list->obj_next;
5385 #else
5386 m = zstack_pop(&mp_list);
5387 #endif /* CONFIG_MBUF_MCACHE */
5388 ASSERT(m != NULL);
5389
5390 MBUF_INIT(m, 1, MT_DATA);
5391 num++;
5392 if (bufsize > MHLEN) {
5393 /* A second mbuf for this segment chain */
5394 #if CONFIG_MBUF_MCACHE
5395 m->m_next = (struct mbuf *)mp_list;
5396 mp_list = mp_list->obj_next;
5397 #else
5398 m->m_next = zstack_pop(&mp_list);
5399 #endif /* CONFIG_MBUF_MCACHE */
5400
5401 ASSERT(m->m_next != NULL);
5402
5403 MBUF_INIT(m->m_next, 0, MT_DATA);
5404 num++;
5405 }
5406 *np = m;
5407 np = &m->m_nextpkt;
5408 }
5409 #if CONFIG_MBUF_MCACHE
5410 ASSERT(num != *numlist || mp_list == NULL);
5411 #else
5412 ASSERT(num != *numlist || zstack_empty(mp_list));
5413 #endif /* CONFIG_MBUF_MCACHE */
5414
5415 if (num > 0) {
5416 mtype_stat_add(MT_DATA, num);
5417 mtype_stat_sub(MT_FREE, num);
5418 }
5419 num /= nsegs;
5420
5421 /* We've got them all; return to caller */
5422 if (num == *numlist) {
5423 return top;
5424 }
5425
5426 goto fail;
5427 }
5428
5429 /*
5430 * Complex cases where elements are made up of one or more composite
5431 * mbufs + cluster, depending on packetlen. Each N-segment chain can
5432 * be illustrated as follows:
5433 *
5434 * [mbuf + cluster 1] [mbuf + cluster 2] ... [mbuf + cluster N]
5435 *
5436 * Every composite mbuf + cluster element comes from the intermediate
5437 * cache (either MC_MBUF_CL or MC_MBUF_BIGCL). For space efficiency,
5438 * the last composite element will come from the MC_MBUF_CL cache,
5439 * unless the residual data is larger than 2KB where we use the
5440 * big cluster composite cache (MC_MBUF_BIGCL) instead. Residual
5441 * data is defined as extra data beyond the first element that cannot
5442 * fit into the previous element, i.e. there is no residual data if
5443 * the chain only has 1 segment.
5444 */
5445 r_bufsize = bufsize;
5446 resid = packetlen > bufsize ? packetlen % bufsize : 0;
5447 if (resid > 0) {
5448 /* There is residual data; figure out the cluster size */
5449 if (wantsize == 0 && packetlen > MINCLSIZE) {
5450 /*
5451 * Caller didn't request that all of the segments
5452 * in the chain use the same cluster size; use the
5453 * smaller of the cluster sizes.
5454 */
5455 if (njcl > 0 && resid > m_maxsize(MC_BIGCL)) {
5456 r_bufsize = m_maxsize(MC_16KCL);
5457 } else if (resid > m_maxsize(MC_CL)) {
5458 r_bufsize = m_maxsize(MC_BIGCL);
5459 } else {
5460 r_bufsize = m_maxsize(MC_CL);
5461 }
5462 } else {
5463 /* Use the same cluster size as the other segments */
5464 resid = 0;
5465 }
5466 }
5467
5468 needed = *numlist;
5469 if (resid > 0) {
5470 /*
5471 * Attempt to allocate composite mbuf + cluster elements for
5472 * the residual data in each chain; record the number of such
5473 * elements that can be allocated so that we know how many
5474 * segment chains we can afford to create.
5475 */
5476 #if CONFIG_MBUF_MCACHE
5477 if (r_bufsize <= m_maxsize(MC_CL)) {
5478 rcp = m_cache(MC_MBUF_CL);
5479 } else if (r_bufsize <= m_maxsize(MC_BIGCL)) {
5480 rcp = m_cache(MC_MBUF_BIGCL);
5481 } else {
5482 rcp = m_cache(MC_MBUF_16KCL);
5483 }
5484 needed = mcache_alloc_ext(rcp, &rmp_list, *numlist, mcflags);
5485 #else
5486 if (r_bufsize <= m_maxsize(MC_CL)) {
5487 rclass = MC_MBUF_CL;
5488 } else if (r_bufsize <= m_maxsize(MC_BIGCL)) {
5489 rclass = MC_MBUF_BIGCL;
5490 } else {
5491 rclass = MC_MBUF_16KCL;
5492 }
5493 rmp_list = mz_composite_alloc_n(rclass, *numlist, wait);
5494 needed = zstack_count(rmp_list);
5495 #endif /* CONFIG_MBUF_MCACHE */
5496 if (needed == 0) {
5497 goto fail;
5498 }
5499
5500 /* This is temporarily reduced for calculation */
5501 ASSERT(nsegs > 1);
5502 nsegs--;
5503 }
5504
5505 /*
5506 * Attempt to allocate the rest of the composite mbuf + cluster
5507 * elements for the number of segment chains that we need.
5508 */
5509 #if CONFIG_MBUF_MCACHE
5510 if (bufsize <= m_maxsize(MC_CL)) {
5511 cp = m_cache(MC_MBUF_CL);
5512 } else if (bufsize <= m_maxsize(MC_BIGCL)) {
5513 cp = m_cache(MC_MBUF_BIGCL);
5514 } else {
5515 cp = m_cache(MC_MBUF_16KCL);
5516 }
5517 needed = mcache_alloc_ext(cp, &mp_list, needed * nsegs, mcflags);
5518 #else
5519 if (bufsize <= m_maxsize(MC_CL)) {
5520 class = MC_MBUF_CL;
5521 } else if (bufsize <= m_maxsize(MC_BIGCL)) {
5522 class = MC_MBUF_BIGCL;
5523 } else {
5524 class = MC_MBUF_16KCL;
5525 }
5526 mp_list = mz_composite_alloc_n(class, needed * nsegs, wait);
5527 needed = zstack_count(mp_list);
5528 #endif /* CONFIG_MBUF_MCACHE */
5529
5530 /* Round it down to avoid creating a partial segment chain */
5531 needed = (needed / nsegs) * nsegs;
5532 if (needed == 0) {
5533 goto fail;
5534 }
5535
5536 if (resid > 0) {
5537 /*
5538 * We're about to construct the chain(s); take into account
5539 * the number of segments we have created above to hold the
5540 * residual data for each chain, as well as restore the
5541 * original count of segments per chain.
5542 */
5543 ASSERT(nsegs > 0);
5544 needed += needed / nsegs;
5545 nsegs++;
5546 }
5547
5548 for (;;) {
5549 struct mbuf *m = NULL;
5550 u_int16_t flag;
5551 struct ext_ref *rfa;
5552 void *cl;
5553 int pkthdr;
5554 m_ext_free_func_t m_free_func;
5555
5556 ++num;
5557
5558 if (nsegs == 1 || (num % nsegs) != 0 || resid == 0) {
5559 #if CONFIG_MBUF_MCACHE
5560 m = (struct mbuf *)mp_list;
5561 mp_list = mp_list->obj_next;
5562 #else
5563 m = zstack_pop(&mp_list);
5564 #endif /* CONFIG_MBUF_MCACHE */
5565 } else {
5566 #if CONFIG_MBUF_MCACHE
5567 m = (struct mbuf *)rmp_list;
5568 rmp_list = rmp_list->obj_next;
5569 #else
5570 m = zstack_pop(&rmp_list);
5571 #endif /* CONFIG_MBUF_MCACHE */
5572 }
5573 m_free_func = m_get_ext_free(m);
5574 ASSERT(m != NULL);
5575 VERIFY(m->m_type == MT_FREE && m->m_flags == M_EXT);
5576 VERIFY(m_free_func == NULL || m_free_func == m_bigfree ||
5577 m_free_func == m_16kfree);
5578
5579 cl = m->m_ext.ext_buf;
5580 rfa = m_get_rfa(m);
5581
5582 ASSERT(cl != NULL && rfa != NULL);
5583 VERIFY(MBUF_IS_COMPOSITE(m));
5584
5585 flag = MEXT_FLAGS(m);
5586
5587 pkthdr = (nsegs == 1 || (num % nsegs) == 1);
5588 if (pkthdr) {
5589 first = m;
5590 }
5591 MBUF_INIT(m, pkthdr, MT_DATA);
5592 if (m_free_func == m_16kfree) {
5593 MBUF_16KCL_INIT(m, cl, rfa, 1, flag);
5594 } else if (m_free_func == m_bigfree) {
5595 MBUF_BIGCL_INIT(m, cl, rfa, 1, flag);
5596 } else {
5597 MBUF_CL_INIT(m, cl, rfa, 1, flag);
5598 }
5599
5600 *np = m;
5601 if ((num % nsegs) == 0) {
5602 np = &first->m_nextpkt;
5603 } else {
5604 np = &m->m_next;
5605 }
5606
5607 if (num == needed) {
5608 break;
5609 }
5610 }
5611
5612 if (num > 0) {
5613 mtype_stat_add(MT_DATA, num);
5614 mtype_stat_sub(MT_FREE, num);
5615 }
5616
5617 num /= nsegs;
5618
5619 /* We've got them all; return to caller */
5620 if (num == *numlist) {
5621 #if CONFIG_MBUF_MCACHE
5622 ASSERT(mp_list == NULL && rmp_list == NULL);
5623 #else
5624 ASSERT(zstack_empty(mp_list) && zstack_empty(rmp_list));
5625 #endif /* CONFIG_MBUF_MCACHE */
5626 return top;
5627 }
5628
5629 fail:
5630 /* Free up what's left of the above */
5631 #if CONFIG_MBUF_MCACHE
5632 if (mp_list != NULL) {
5633 mcache_free_ext(cp, mp_list);
5634 }
5635 if (rmp_list != NULL) {
5636 mcache_free_ext(rcp, rmp_list);
5637 }
5638 #else
5639 if (!zstack_empty(mp_list)) {
5640 if (class == MC_MBUF) {
5641 /* No need to elide, these mbufs came from the cache. */
5642 mz_free_n(mp_list);
5643 } else {
5644 mz_composite_free_n(class, mp_list);
5645 }
5646 }
5647 if (!zstack_empty(rmp_list)) {
5648 mz_composite_free_n(rclass, rmp_list);
5649 }
5650 #endif /* CONFIG_MBUF_MCACHE */
5651 if (wantall && top != NULL) {
5652 m_freem_list(top);
5653 *numlist = 0;
5654 return NULL;
5655 }
5656 *numlist = num;
5657 return top;
5658 }
5659
5660 /*
5661 * Best effort to get a mbuf cluster + pkthdr. Used by drivers to allocated
5662 * packets on receive ring.
5663 */
5664 __private_extern__ struct mbuf *
5665 m_getpacket_how(int wait)
5666 {
5667 unsigned int num_needed = 1;
5668
5669 return m_getpackets_internal(&num_needed, 1, wait, 1,
5670 m_maxsize(MC_CL));
5671 }
5672
5673 /*
5674 * Best effort to get a mbuf cluster + pkthdr. Used by drivers to allocated
5675 * packets on receive ring.
5676 */
5677 struct mbuf *
5678 m_getpacket(void)
5679 {
5680 unsigned int num_needed = 1;
5681
5682 return m_getpackets_internal(&num_needed, 1, M_WAIT, 1,
5683 m_maxsize(MC_CL));
5684 }
5685
5686 /*
5687 * Return a list of mbuf hdrs that point to clusters. Try for num_needed;
5688 * if this can't be met, return whatever number were available. Set up the
5689 * first num_with_pkthdrs with mbuf hdrs configured as packet headers. These
5690 * are chained on the m_nextpkt field. Any packets requested beyond this are
5691 * chained onto the last packet header's m_next field.
5692 */
5693 struct mbuf *
5694 m_getpackets(int num_needed, int num_with_pkthdrs, int how)
5695 {
5696 unsigned int n = num_needed;
5697
5698 return m_getpackets_internal(&n, num_with_pkthdrs, how, 0,
5699 m_maxsize(MC_CL));
5700 }
5701
5702 /*
5703 * Return a list of mbuf hdrs set up as packet hdrs chained together
5704 * on the m_nextpkt field
5705 */
5706 struct mbuf *
5707 m_getpackethdrs(int num_needed, int how)
5708 {
5709 struct mbuf *m;
5710 struct mbuf **np, *top;
5711
5712 top = NULL;
5713 np = ⊤
5714
5715 while (num_needed--) {
5716 m = _M_RETRYHDR(how, MT_DATA);
5717 if (m == NULL) {
5718 break;
5719 }
5720
5721 *np = m;
5722 np = &m->m_nextpkt;
5723 }
5724
5725 return top;
5726 }
5727
5728 /*
5729 * Free an mbuf list (m_nextpkt) while following m_next. Returns the count
5730 * for mbufs packets freed. Used by the drivers.
5731 */
5732 int
5733 m_freem_list(struct mbuf *m)
5734 {
5735 struct mbuf *nextpkt;
5736 #if CONFIG_MBUF_MCACHE
5737 mcache_obj_t *mp_list = NULL;
5738 mcache_obj_t *mcl_list = NULL;
5739 mcache_obj_t *mbc_list = NULL;
5740 mcache_obj_t *m16k_list = NULL;
5741 mcache_obj_t *m_mcl_list = NULL;
5742 mcache_obj_t *m_mbc_list = NULL;
5743 mcache_obj_t *m_m16k_list = NULL;
5744 mcache_obj_t *ref_list = NULL;
5745 #else
5746 zstack_t mp_list = {}, mcl_list = {}, mbc_list = {},
5747 m16k_list = {}, m_mcl_list = {},
5748 m_mbc_list = {}, m_m16k_list = {}, ref_list = {};
5749 #endif /* CONFIG_MBUF_MCACHE */
5750 int pktcount = 0;
5751 int mt_free = 0, mt_data = 0, mt_header = 0, mt_soname = 0, mt_tag = 0;
5752
5753 while (m != NULL) {
5754 pktcount++;
5755
5756 nextpkt = m->m_nextpkt;
5757 m->m_nextpkt = NULL;
5758
5759 while (m != NULL) {
5760 struct mbuf *next = m->m_next;
5761 #if CONFIG_MBUF_MCACHE
5762 mcache_obj_t *o, *rfa;
5763 #else
5764 void *cl = NULL;
5765 #endif /* CONFIG_MBUF_MCACHE */
5766 if (m->m_type == MT_FREE) {
5767 panic("m_free: freeing an already freed mbuf");
5768 }
5769
5770 if (m->m_flags & M_PKTHDR) {
5771 /* Check for scratch area overflow */
5772 m_redzone_verify(m);
5773 /* Free the aux data and tags if there is any */
5774 m_tag_delete_chain(m);
5775 m_do_tx_compl_callback(m, NULL);
5776 }
5777
5778 if (!(m->m_flags & M_EXT)) {
5779 mt_free++;
5780 goto simple_free;
5781 }
5782
5783 if (MBUF_IS_PAIRED(m) && m_free_paired(m)) {
5784 m = next;
5785 continue;
5786 }
5787
5788 mt_free++;
5789
5790 #if CONFIG_MBUF_MCACHE
5791 o = (mcache_obj_t *)(void *)m->m_ext.ext_buf;
5792 #else
5793 cl = m->m_ext.ext_buf;
5794 #endif /* CONFIG_MBUF_MCACHE */
5795 /*
5796 * Make sure that we don't touch any ext_ref
5797 * member after we decrement the reference count
5798 * since that may lead to use-after-free
5799 * when we do not hold the last reference.
5800 */
5801 const bool composite = !!(MEXT_FLAGS(m) & EXTF_COMPOSITE);
5802 const m_ext_free_func_t m_free_func = m_get_ext_free(m);
5803 const uint16_t minref = MEXT_MINREF(m);
5804 const uint16_t refcnt = m_decref(m);
5805 if (refcnt == minref && !composite) {
5806 #if CONFIG_MBUF_MCACHE
5807 if (m_free_func == NULL) {
5808 o->obj_next = mcl_list;
5809 mcl_list = o;
5810 } else if (m_free_func == m_bigfree) {
5811 o->obj_next = mbc_list;
5812 mbc_list = o;
5813 } else if (m_free_func == m_16kfree) {
5814 o->obj_next = m16k_list;
5815 m16k_list = o;
5816 } else {
5817 (*(m_free_func))((caddr_t)o,
5818 m->m_ext.ext_size,
5819 m_get_ext_arg(m));
5820 }
5821 rfa = (mcache_obj_t *)(void *)m_get_rfa(m);
5822 rfa->obj_next = ref_list;
5823 ref_list = rfa;
5824 #else
5825 if (m_free_func == NULL) {
5826 zstack_push(&mcl_list, cl);
5827 } else if (m_free_func == m_bigfree) {
5828 zstack_push(&mbc_list, cl);
5829 } else if (m_free_func == m_16kfree) {
5830 zstack_push(&m16k_list, cl);
5831 } else {
5832 (*(m_free_func))((caddr_t)cl,
5833 m->m_ext.ext_size,
5834 m_get_ext_arg(m));
5835 }
5836 zstack_push(&ref_list, m_get_rfa(m));
5837 #endif /* CONFIG_MBUF_MCACHE */
5838 m_set_ext(m, NULL, NULL, NULL);
5839 } else if (refcnt == minref && composite) {
5840 VERIFY(!(MEXT_FLAGS(m) & EXTF_PAIRED));
5841 VERIFY(m->m_type != MT_FREE);
5842 /*
5843 * Amortize the costs of atomic operations
5844 * by doing them at the end, if possible.
5845 */
5846 if (m->m_type == MT_DATA) {
5847 mt_data++;
5848 } else if (m->m_type == MT_HEADER) {
5849 mt_header++;
5850 } else if (m->m_type == MT_SONAME) {
5851 mt_soname++;
5852 } else if (m->m_type == MT_TAG) {
5853 mt_tag++;
5854 } else {
5855 mtype_stat_dec(m->m_type);
5856 }
5857
5858 m->m_type = MT_FREE;
5859 m->m_flags = M_EXT;
5860 m->m_len = 0;
5861 m->m_next = m->m_nextpkt = NULL;
5862
5863 /*
5864 * MEXT_FLAGS is safe to access here
5865 * since we are now sure that we held
5866 * the last reference to ext_ref.
5867 */
5868 MEXT_FLAGS(m) &= ~EXTF_READONLY;
5869
5870 /* "Free" into the intermediate cache */
5871 #if CONFIG_MBUF_MCACHE
5872 o = (mcache_obj_t *)m;
5873 if (m_free_func == NULL) {
5874 o->obj_next = m_mcl_list;
5875 m_mcl_list = o;
5876 } else if (m_free_func == m_bigfree) {
5877 o->obj_next = m_mbc_list;
5878 m_mbc_list = o;
5879 } else {
5880 VERIFY(m_free_func == m_16kfree);
5881 o->obj_next = m_m16k_list;
5882 m_m16k_list = o;
5883 }
5884 #else
5885 if (m_free_func == NULL) {
5886 zstack_push(&m_mcl_list, m);
5887 } else if (m_free_func == m_bigfree) {
5888 zstack_push(&m_mbc_list, m);
5889 } else {
5890 VERIFY(m_free_func == m_16kfree);
5891 zstack_push(&m_m16k_list, m);
5892 }
5893 #endif /* CONFIG_MBUF_MCACHE */
5894 m = next;
5895 continue;
5896 }
5897 simple_free:
5898 /*
5899 * Amortize the costs of atomic operations
5900 * by doing them at the end, if possible.
5901 */
5902 if (m->m_type == MT_DATA) {
5903 mt_data++;
5904 } else if (m->m_type == MT_HEADER) {
5905 mt_header++;
5906 } else if (m->m_type == MT_SONAME) {
5907 mt_soname++;
5908 } else if (m->m_type == MT_TAG) {
5909 mt_tag++;
5910 } else if (m->m_type != MT_FREE) {
5911 mtype_stat_dec(m->m_type);
5912 }
5913
5914 m->m_type = MT_FREE;
5915 m->m_flags = m->m_len = 0;
5916 m->m_next = m->m_nextpkt = NULL;
5917
5918 #if CONFIG_MBUF_MCACHE
5919 ((mcache_obj_t *)m)->obj_next = mp_list;
5920 mp_list = (mcache_obj_t *)m;
5921 #else
5922 m_elide(m);
5923 zstack_push(&mp_list, m);
5924 #endif /* CONFIG_MBUF_MCACHE */
5925
5926 m = next;
5927 }
5928
5929 m = nextpkt;
5930 }
5931
5932 if (mt_free > 0) {
5933 mtype_stat_add(MT_FREE, mt_free);
5934 }
5935 if (mt_data > 0) {
5936 mtype_stat_sub(MT_DATA, mt_data);
5937 }
5938 if (mt_header > 0) {
5939 mtype_stat_sub(MT_HEADER, mt_header);
5940 }
5941 if (mt_soname > 0) {
5942 mtype_stat_sub(MT_SONAME, mt_soname);
5943 }
5944 if (mt_tag > 0) {
5945 mtype_stat_sub(MT_TAG, mt_tag);
5946 }
5947 #if CONFIG_MBUF_MCACHE
5948 if (mp_list != NULL) {
5949 mcache_free_ext(m_cache(MC_MBUF), mp_list);
5950 }
5951 if (mcl_list != NULL) {
5952 mcache_free_ext(m_cache(MC_CL), mcl_list);
5953 }
5954 if (mbc_list != NULL) {
5955 mcache_free_ext(m_cache(MC_BIGCL), mbc_list);
5956 }
5957 if (m16k_list != NULL) {
5958 mcache_free_ext(m_cache(MC_16KCL), m16k_list);
5959 }
5960 if (m_mcl_list != NULL) {
5961 mcache_free_ext(m_cache(MC_MBUF_CL), m_mcl_list);
5962 }
5963 if (m_mbc_list != NULL) {
5964 mcache_free_ext(m_cache(MC_MBUF_BIGCL), m_mbc_list);
5965 }
5966 if (m_m16k_list != NULL) {
5967 mcache_free_ext(m_cache(MC_MBUF_16KCL), m_m16k_list);
5968 }
5969 if (ref_list != NULL) {
5970 mcache_free_ext(ref_cache, ref_list);
5971 }
5972 #else
5973 if (!zstack_empty(mp_list)) {
5974 /* mbufs elided above. */
5975 mz_free_n(mp_list);
5976 }
5977 if (!zstack_empty(mcl_list)) {
5978 zfree_nozero_n(ZONE_ID_CLUSTER_2K, mcl_list);
5979 }
5980 if (!zstack_empty(mbc_list)) {
5981 zfree_nozero_n(ZONE_ID_CLUSTER_4K, mbc_list);
5982 }
5983 if (!zstack_empty(m16k_list)) {
5984 zfree_nozero_n(ZONE_ID_CLUSTER_16K, m16k_list);
5985 }
5986 if (!zstack_empty(m_mcl_list)) {
5987 mz_composite_free_n(MC_MBUF_CL, m_mcl_list);
5988 }
5989 if (!zstack_empty(m_mbc_list)) {
5990 mz_composite_free_n(MC_MBUF_BIGCL, m_mbc_list);
5991 }
5992 if (!zstack_empty(m_m16k_list)) {
5993 mz_composite_free_n(MC_MBUF_16KCL, m_m16k_list);
5994 }
5995 if (!zstack_empty(ref_list)) {
5996 zfree_nozero_n(ZONE_ID_MBUF_REF, ref_list);
5997 }
5998 #endif /* CONFIG_MBUF_MCACHE */
5999
6000 return pktcount;
6001 }
6002
6003 void
6004 m_freem(struct mbuf *m)
6005 {
6006 while (m != NULL) {
6007 m = m_free(m);
6008 }
6009 }
6010
6011 /*
6012 * Mbuffer utility routines.
6013 */
6014 /*
6015 * Set the m_data pointer of a newly allocated mbuf to place an object of the
6016 * specified size at the end of the mbuf, longword aligned.
6017 *
6018 * NB: Historically, we had M_ALIGN(), MH_ALIGN(), and MEXT_ALIGN() as
6019 * separate macros, each asserting that it was called at the proper moment.
6020 * This required callers to themselves test the storage type and call the
6021 * right one. Rather than require callers to be aware of those layout
6022 * decisions, we centralize here.
6023 */
6024 void
6025 m_align(struct mbuf *m, int len)
6026 {
6027 int adjust = 0;
6028
6029 /* At this point data must point to start */
6030 VERIFY(m->m_data == M_START(m));
6031 VERIFY(len >= 0);
6032 VERIFY(len <= M_SIZE(m));
6033 adjust = M_SIZE(m) - len;
6034 m->m_data += adjust & ~(sizeof(long) - 1);
6035 }
6036
6037 /*
6038 * Lesser-used path for M_PREPEND: allocate new mbuf to prepend to chain,
6039 * copy junk along. Does not adjust packet header length.
6040 */
6041 struct mbuf *
6042 m_prepend(struct mbuf *m, int len, int how)
6043 {
6044 struct mbuf *mn;
6045
6046 _MGET(mn, how, m->m_type);
6047 if (mn == NULL) {
6048 m_freem(m);
6049 return NULL;
6050 }
6051 if (m->m_flags & M_PKTHDR) {
6052 M_COPY_PKTHDR(mn, m);
6053 m->m_flags &= ~M_PKTHDR;
6054 }
6055 mn->m_next = m;
6056 m = mn;
6057 if (m->m_flags & M_PKTHDR) {
6058 VERIFY(len <= MHLEN);
6059 MH_ALIGN(m, len);
6060 } else {
6061 VERIFY(len <= MLEN);
6062 M_ALIGN(m, len);
6063 }
6064 m->m_len = len;
6065 return m;
6066 }
6067
6068 /*
6069 * Replacement for old M_PREPEND macro: allocate new mbuf to prepend to
6070 * chain, copy junk along, and adjust length.
6071 */
6072 struct mbuf *
6073 m_prepend_2(struct mbuf *m, int len, int how, int align)
6074 {
6075 if (M_LEADINGSPACE(m) >= len &&
6076 (!align || IS_P2ALIGNED((m->m_data - len), sizeof(u_int32_t)))) {
6077 m->m_data -= len;
6078 m->m_len += len;
6079 } else {
6080 m = m_prepend(m, len, how);
6081 }
6082 if ((m) && (m->m_flags & M_PKTHDR)) {
6083 m->m_pkthdr.len += len;
6084 }
6085 return m;
6086 }
6087
6088 /*
6089 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
6090 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
6091 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
6092 */
6093 struct mbuf *
6094 m_copym_mode(struct mbuf *m, int off0, int len, int wait, uint32_t mode)
6095 {
6096 struct mbuf *n, *mhdr = NULL, **np;
6097 int off = off0;
6098 struct mbuf *top;
6099 int copyhdr = 0;
6100
6101 if (off < 0 || len < 0) {
6102 panic("m_copym: invalid offset %d or len %d", off, len);
6103 }
6104
6105 VERIFY((mode != M_COPYM_MUST_COPY_HDR &&
6106 mode != M_COPYM_MUST_MOVE_HDR) || (m->m_flags & M_PKTHDR));
6107
6108 if ((off == 0 && (m->m_flags & M_PKTHDR)) ||
6109 mode == M_COPYM_MUST_COPY_HDR || mode == M_COPYM_MUST_MOVE_HDR) {
6110 mhdr = m;
6111 copyhdr = 1;
6112 }
6113
6114 while (off >= m->m_len) {
6115 if (m->m_next == NULL) {
6116 panic("m_copym: invalid mbuf chain");
6117 }
6118 off -= m->m_len;
6119 m = m->m_next;
6120 }
6121 np = ⊤
6122 top = NULL;
6123
6124 while (len > 0) {
6125 if (m == NULL) {
6126 if (len != M_COPYALL) {
6127 panic("m_copym: len != M_COPYALL");
6128 }
6129 break;
6130 }
6131
6132 if (copyhdr) {
6133 n = _M_RETRYHDR(wait, m->m_type);
6134 } else {
6135 n = _M_RETRY(wait, m->m_type);
6136 }
6137 *np = n;
6138
6139 if (n == NULL) {
6140 goto nospace;
6141 }
6142
6143 if (copyhdr != 0) {
6144 if ((mode == M_COPYM_MOVE_HDR) ||
6145 (mode == M_COPYM_MUST_MOVE_HDR)) {
6146 M_COPY_PKTHDR(n, mhdr);
6147 } else if ((mode == M_COPYM_COPY_HDR) ||
6148 (mode == M_COPYM_MUST_COPY_HDR)) {
6149 if (m_dup_pkthdr(n, mhdr, wait) == 0) {
6150 goto nospace;
6151 }
6152 }
6153 if (len == M_COPYALL) {
6154 n->m_pkthdr.len -= off0;
6155 } else {
6156 n->m_pkthdr.len = len;
6157 }
6158 copyhdr = 0;
6159 /*
6160 * There is data to copy from the packet header mbuf
6161 * if it is empty or it is before the starting offset
6162 */
6163 if (mhdr != m) {
6164 np = &n->m_next;
6165 continue;
6166 }
6167 }
6168 n->m_len = MIN(len, (m->m_len - off));
6169 if (m->m_flags & M_EXT) {
6170 n->m_ext = m->m_ext;
6171 m_incref(m);
6172 n->m_data = m->m_data + off;
6173 n->m_flags |= M_EXT;
6174 } else {
6175 /*
6176 * Limit to the capacity of the destination
6177 */
6178 if (n->m_flags & M_PKTHDR) {
6179 n->m_len = MIN(n->m_len, MHLEN);
6180 } else {
6181 n->m_len = MIN(n->m_len, MLEN);
6182 }
6183
6184 if (MTOD(n, char *) + n->m_len > ((char *)n) + _MSIZE) {
6185 panic("%s n %p copy overflow",
6186 __func__, n);
6187 }
6188
6189 bcopy(MTOD(m, caddr_t) + off, MTOD(n, caddr_t),
6190 (unsigned)n->m_len);
6191 }
6192 if (len != M_COPYALL) {
6193 len -= n->m_len;
6194 }
6195 off = 0;
6196 m = m->m_next;
6197 np = &n->m_next;
6198 }
6199
6200 return top;
6201 nospace:
6202 m_freem(top);
6203
6204 return NULL;
6205 }
6206
6207
6208 struct mbuf *
6209 m_copym(struct mbuf *m, int off0, int len, int wait)
6210 {
6211 return m_copym_mode(m, off0, len, wait, M_COPYM_MOVE_HDR);
6212 }
6213
6214 /*
6215 * Equivalent to m_copym except that all necessary mbuf hdrs are allocated
6216 * within this routine also, the last mbuf and offset accessed are passed
6217 * out and can be passed back in to avoid having to rescan the entire mbuf
6218 * list (normally hung off of the socket)
6219 */
6220 struct mbuf *
6221 m_copym_with_hdrs(struct mbuf *m0, int off0, int len0, int wait,
6222 struct mbuf **m_lastm, int *m_off, uint32_t mode)
6223 {
6224 struct mbuf *m = m0, *n, **np = NULL;
6225 int off = off0, len = len0;
6226 struct mbuf *top = NULL;
6227 #if CONFIG_MBUF_MCACHE
6228 int mcflags = MSLEEPF(wait);
6229 mcache_obj_t *list = NULL;
6230 #else
6231 zstack_t list = {};
6232 #endif /* CONFIG_MBUF_MCACHE */
6233 int copyhdr = 0;
6234 int type = 0;
6235 int needed = 0;
6236
6237 if (off == 0 && (m->m_flags & M_PKTHDR)) {
6238 copyhdr = 1;
6239 }
6240
6241 if (m_lastm != NULL && *m_lastm != NULL) {
6242 m = *m_lastm;
6243 off = *m_off;
6244 } else {
6245 while (off >= m->m_len) {
6246 off -= m->m_len;
6247 m = m->m_next;
6248 }
6249 }
6250
6251 n = m;
6252 while (len > 0) {
6253 needed++;
6254 ASSERT(n != NULL);
6255 len -= MIN(len, (n->m_len - ((needed == 1) ? off : 0)));
6256 n = n->m_next;
6257 }
6258 needed++;
6259 len = len0;
6260
6261 #if CONFIG_MBUF_MCACHE
6262 /*
6263 * If the caller doesn't want to be put to sleep, mark it with
6264 * MCR_TRYHARD so that we may reclaim buffers from other places
6265 * before giving up.
6266 */
6267 if (mcflags & MCR_NOSLEEP) {
6268 mcflags |= MCR_TRYHARD;
6269 }
6270
6271 if (mcache_alloc_ext(m_cache(MC_MBUF), &list, needed,
6272 mcflags) != needed) {
6273 goto nospace;
6274 }
6275 #else
6276 list = mz_alloc_n(needed, wait);
6277 if (zstack_count(list) != needed) {
6278 goto nospace;
6279 }
6280 #endif /* CONFIG_MBUF_MCACHE */
6281
6282 needed = 0;
6283 while (len > 0) {
6284 #if CONFIG_MBUF_MCACHE
6285 n = (struct mbuf *)list;
6286 list = list->obj_next;
6287 #else
6288 n = zstack_pop(&list);
6289 #endif /* CONFIG_MBUF_MCACHE */
6290 ASSERT(n != NULL && m != NULL);
6291
6292 type = (top == NULL) ? MT_HEADER : m->m_type;
6293 MBUF_INIT(n, (top == NULL), type);
6294
6295 if (top == NULL) {
6296 top = n;
6297 np = &top->m_next;
6298 continue;
6299 } else {
6300 needed++;
6301 *np = n;
6302 }
6303
6304 if (copyhdr) {
6305 if ((mode == M_COPYM_MOVE_HDR) ||
6306 (mode == M_COPYM_MUST_MOVE_HDR)) {
6307 M_COPY_PKTHDR(n, m);
6308 } else if ((mode == M_COPYM_COPY_HDR) ||
6309 (mode == M_COPYM_MUST_COPY_HDR)) {
6310 if (m_dup_pkthdr(n, m, wait) == 0) {
6311 #if !CONFIG_MBUF_MCACHE
6312 m_elide(n);
6313 #endif
6314 goto nospace;
6315 }
6316 }
6317 n->m_pkthdr.len = len;
6318 copyhdr = 0;
6319 }
6320 n->m_len = MIN(len, (m->m_len - off));
6321
6322 if (m->m_flags & M_EXT) {
6323 n->m_ext = m->m_ext;
6324 m_incref(m);
6325 n->m_data = m->m_data + off;
6326 n->m_flags |= M_EXT;
6327 } else {
6328 if (MTOD(n, char *) + n->m_len > ((char *)n) + _MSIZE) {
6329 panic("%s n %p copy overflow",
6330 __func__, n);
6331 }
6332
6333 bcopy(MTOD(m, caddr_t) + off, MTOD(n, caddr_t),
6334 (unsigned)n->m_len);
6335 }
6336 len -= n->m_len;
6337
6338 if (len == 0) {
6339 if (m_lastm != NULL && m_off != NULL) {
6340 if ((off + n->m_len) == m->m_len) {
6341 *m_lastm = m->m_next;
6342 *m_off = 0;
6343 } else {
6344 *m_lastm = m;
6345 *m_off = off + n->m_len;
6346 }
6347 }
6348 break;
6349 }
6350 off = 0;
6351 m = m->m_next;
6352 np = &n->m_next;
6353 }
6354
6355 mtype_stat_inc(MT_HEADER);
6356 mtype_stat_add(type, needed);
6357 mtype_stat_sub(MT_FREE, needed + 1);
6358
6359 #if CONFIG_MBUF_MCACHE
6360 ASSERT(list == NULL);
6361 #else
6362 ASSERT(zstack_empty(list));
6363 #endif /* CONFIG_MBUF_MCACHE */
6364
6365 return top;
6366
6367 nospace:
6368 #if CONFIG_MBUF_MCACHE
6369 if (list != NULL) {
6370 mcache_free_ext(m_cache(MC_MBUF), list);
6371 }
6372 #else
6373 if (!zstack_empty(list)) {
6374 /* No need to elide, these mbufs came from the cache. */
6375 mz_free_n(list);
6376 }
6377 #endif /* CONFIG_MBUF_MCACHE */
6378 if (top != NULL) {
6379 m_freem(top);
6380 }
6381 return NULL;
6382 }
6383
6384 /*
6385 * Copy data from an mbuf chain starting "off" bytes from the beginning,
6386 * continuing for "len" bytes, into the indicated buffer.
6387 */
6388 void
6389 m_copydata(struct mbuf *m, int off, int len, void *vp)
6390 {
6391 int off0 = off, len0 = len;
6392 struct mbuf *m0 = m;
6393 unsigned count;
6394 char *cp = vp;
6395
6396 if (__improbable(off < 0 || len < 0)) {
6397 panic("%s: invalid offset %d or len %d", __func__, off, len);
6398 /* NOTREACHED */
6399 }
6400
6401 while (off > 0) {
6402 if (__improbable(m == NULL)) {
6403 panic("%s: invalid mbuf chain %p [off %d, len %d]",
6404 __func__, m0, off0, len0);
6405 /* NOTREACHED */
6406 }
6407 if (off < m->m_len) {
6408 break;
6409 }
6410 off -= m->m_len;
6411 m = m->m_next;
6412 }
6413 while (len > 0) {
6414 if (__improbable(m == NULL)) {
6415 panic("%s: invalid mbuf chain %p [off %d, len %d]",
6416 __func__, m0, off0, len0);
6417 /* NOTREACHED */
6418 }
6419 count = MIN(m->m_len - off, len);
6420 bcopy(MTOD(m, caddr_t) + off, cp, count);
6421 len -= count;
6422 cp += count;
6423 off = 0;
6424 m = m->m_next;
6425 }
6426 }
6427
6428 /*
6429 * Concatenate mbuf chain n to m. Both chains must be of the same type
6430 * (e.g. MT_DATA). Any m_pkthdr is not updated.
6431 */
6432 void
6433 m_cat(struct mbuf *m, struct mbuf *n)
6434 {
6435 while (m->m_next) {
6436 m = m->m_next;
6437 }
6438 while (n) {
6439 if ((m->m_flags & M_EXT) ||
6440 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
6441 /* just join the two chains */
6442 m->m_next = n;
6443 return;
6444 }
6445 /* splat the data from one into the other */
6446 bcopy(MTOD(n, caddr_t), MTOD(m, caddr_t) + m->m_len,
6447 (u_int)n->m_len);
6448 m->m_len += n->m_len;
6449 n = m_free(n);
6450 }
6451 }
6452
6453 void
6454 m_adj(struct mbuf *mp, int req_len)
6455 {
6456 int len = req_len;
6457 struct mbuf *m;
6458 int count;
6459
6460 if ((m = mp) == NULL) {
6461 return;
6462 }
6463 if (len >= 0) {
6464 /*
6465 * Trim from head.
6466 */
6467 while (m != NULL && len > 0) {
6468 if (m->m_len <= len) {
6469 len -= m->m_len;
6470 m->m_len = 0;
6471 m = m->m_next;
6472 } else {
6473 m->m_len -= len;
6474 m->m_data += len;
6475 len = 0;
6476 }
6477 }
6478 m = mp;
6479 if (m->m_flags & M_PKTHDR) {
6480 m->m_pkthdr.len -= (req_len - len);
6481 }
6482 } else {
6483 /*
6484 * Trim from tail. Scan the mbuf chain,
6485 * calculating its length and finding the last mbuf.
6486 * If the adjustment only affects this mbuf, then just
6487 * adjust and return. Otherwise, rescan and truncate
6488 * after the remaining size.
6489 */
6490 len = -len;
6491 count = 0;
6492 for (;;) {
6493 count += m->m_len;
6494 if (m->m_next == (struct mbuf *)0) {
6495 break;
6496 }
6497 m = m->m_next;
6498 }
6499 if (m->m_len >= len) {
6500 m->m_len -= len;
6501 m = mp;
6502 if (m->m_flags & M_PKTHDR) {
6503 m->m_pkthdr.len -= len;
6504 }
6505 return;
6506 }
6507 count -= len;
6508 if (count < 0) {
6509 count = 0;
6510 }
6511 /*
6512 * Correct length for chain is "count".
6513 * Find the mbuf with last data, adjust its length,
6514 * and toss data from remaining mbufs on chain.
6515 */
6516 m = mp;
6517 if (m->m_flags & M_PKTHDR) {
6518 m->m_pkthdr.len = count;
6519 }
6520 for (; m; m = m->m_next) {
6521 if (m->m_len >= count) {
6522 m->m_len = count;
6523 break;
6524 }
6525 count -= m->m_len;
6526 }
6527 while ((m = m->m_next)) {
6528 m->m_len = 0;
6529 }
6530 }
6531 }
6532
6533 /*
6534 * Rearange an mbuf chain so that len bytes are contiguous
6535 * and in the data area of an mbuf (so that mtod
6536 * will work for a structure of size len). Returns the resulting
6537 * mbuf chain on success, frees it and returns null on failure.
6538 * If there is room, it will add up to max_protohdr-len extra bytes to the
6539 * contiguous region in an attempt to avoid being called next time.
6540 */
6541 struct mbuf *
6542 m_pullup(struct mbuf *n, int len)
6543 {
6544 struct mbuf *m;
6545 int count;
6546 int space;
6547
6548 /* check invalid arguments */
6549 if (n == NULL) {
6550 panic("%s: n == NULL", __func__);
6551 }
6552 if (len < 0) {
6553 os_log_info(OS_LOG_DEFAULT, "%s: failed negative len %d",
6554 __func__, len);
6555 goto bad;
6556 }
6557 if (len > MLEN) {
6558 os_log_info(OS_LOG_DEFAULT, "%s: failed len %d too big",
6559 __func__, len);
6560 goto bad;
6561 }
6562 if ((n->m_flags & M_EXT) == 0 &&
6563 n->m_data >= &n->m_dat[MLEN]) {
6564 os_log_info(OS_LOG_DEFAULT, "%s: m_data out of bounds",
6565 __func__);
6566 goto bad;
6567 }
6568
6569 /*
6570 * If first mbuf has no cluster, and has room for len bytes
6571 * without shifting current data, pullup into it,
6572 * otherwise allocate a new mbuf to prepend to the chain.
6573 */
6574 if ((n->m_flags & M_EXT) == 0 &&
6575 len < &n->m_dat[MLEN] - n->m_data && n->m_next != NULL) {
6576 if (n->m_len >= len) {
6577 return n;
6578 }
6579 m = n;
6580 n = n->m_next;
6581 len -= m->m_len;
6582 } else {
6583 if (len > MHLEN) {
6584 goto bad;
6585 }
6586 _MGET(m, M_DONTWAIT, n->m_type);
6587 if (m == 0) {
6588 goto bad;
6589 }
6590 m->m_len = 0;
6591 if (n->m_flags & M_PKTHDR) {
6592 M_COPY_PKTHDR(m, n);
6593 n->m_flags &= ~M_PKTHDR;
6594 }
6595 }
6596 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
6597 do {
6598 count = MIN(MIN(MAX(len, max_protohdr), space), n->m_len);
6599 bcopy(MTOD(n, caddr_t), MTOD(m, caddr_t) + m->m_len,
6600 (unsigned)count);
6601 len -= count;
6602 m->m_len += count;
6603 n->m_len -= count;
6604 space -= count;
6605 if (n->m_len != 0) {
6606 n->m_data += count;
6607 } else {
6608 n = m_free(n);
6609 }
6610 } while (len > 0 && n != NULL);
6611 if (len > 0) {
6612 (void) m_free(m);
6613 goto bad;
6614 }
6615 m->m_next = n;
6616 return m;
6617 bad:
6618 m_freem(n);
6619 return 0;
6620 }
6621
6622 /*
6623 * Like m_pullup(), except a new mbuf is always allocated, and we allow
6624 * the amount of empty space before the data in the new mbuf to be specified
6625 * (in the event that the caller expects to prepend later).
6626 */
6627 __private_extern__ struct mbuf *
6628 m_copyup(struct mbuf *n, int len, int dstoff)
6629 {
6630 struct mbuf *m;
6631 int count, space;
6632
6633 VERIFY(len >= 0 && dstoff >= 0);
6634
6635 if (len > (MHLEN - dstoff)) {
6636 goto bad;
6637 }
6638 MGET(m, M_DONTWAIT, n->m_type);
6639 if (m == NULL) {
6640 goto bad;
6641 }
6642 m->m_len = 0;
6643 if (n->m_flags & M_PKTHDR) {
6644 m_copy_pkthdr(m, n);
6645 n->m_flags &= ~M_PKTHDR;
6646 }
6647 m->m_data += dstoff;
6648 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
6649 do {
6650 count = min(min(max(len, max_protohdr), space), n->m_len);
6651 memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t),
6652 (unsigned)count);
6653 len -= count;
6654 m->m_len += count;
6655 n->m_len -= count;
6656 space -= count;
6657 if (n->m_len) {
6658 n->m_data += count;
6659 } else {
6660 n = m_free(n);
6661 }
6662 } while (len > 0 && n);
6663 if (len > 0) {
6664 (void) m_free(m);
6665 goto bad;
6666 }
6667 m->m_next = n;
6668 return m;
6669 bad:
6670 m_freem(n);
6671
6672 return NULL;
6673 }
6674
6675 /*
6676 * Partition an mbuf chain in two pieces, returning the tail --
6677 * all but the first len0 bytes. In case of failure, it returns NULL and
6678 * attempts to restore the chain to its original state.
6679 */
6680 struct mbuf *
6681 m_split(struct mbuf *m0, int len0, int wait)
6682 {
6683 return m_split0(m0, len0, wait, 1);
6684 }
6685
6686 static struct mbuf *
6687 m_split0(struct mbuf *m0, int len0, int wait, int copyhdr)
6688 {
6689 struct mbuf *m, *n;
6690 unsigned len = len0, remain;
6691
6692 /*
6693 * First iterate to the mbuf which contains the first byte of
6694 * data at offset len0
6695 */
6696 for (m = m0; m && len > m->m_len; m = m->m_next) {
6697 len -= m->m_len;
6698 }
6699 if (m == NULL) {
6700 return NULL;
6701 }
6702 /*
6703 * len effectively is now the offset in the current
6704 * mbuf where we have to perform split.
6705 *
6706 * remain becomes the tail length.
6707 * Note that len can also be == m->m_len
6708 */
6709 remain = m->m_len - len;
6710
6711 /*
6712 * If current mbuf len contains the entire remaining offset len,
6713 * just make the second mbuf chain pointing to next mbuf onwards
6714 * and return after making necessary adjustments
6715 */
6716 if (copyhdr && (m0->m_flags & M_PKTHDR) && remain == 0) {
6717 _MGETHDR(n, wait, m0->m_type);
6718 if (n == NULL) {
6719 return NULL;
6720 }
6721 n->m_next = m->m_next;
6722 m->m_next = NULL;
6723 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
6724 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
6725 m0->m_pkthdr.len = len0;
6726 return n;
6727 }
6728 if (copyhdr && (m0->m_flags & M_PKTHDR)) {
6729 _MGETHDR(n, wait, m0->m_type);
6730 if (n == NULL) {
6731 return NULL;
6732 }
6733 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
6734 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
6735 m0->m_pkthdr.len = len0;
6736
6737 /*
6738 * If current points to external storage
6739 * then it can be shared by making last mbuf
6740 * of head chain and first mbuf of current chain
6741 * pointing to different data offsets
6742 */
6743 if (m->m_flags & M_EXT) {
6744 goto extpacket;
6745 }
6746 if (remain > MHLEN) {
6747 /* m can't be the lead packet */
6748 MH_ALIGN(n, 0);
6749 n->m_next = m_split(m, len, wait);
6750 if (n->m_next == NULL) {
6751 (void) m_free(n);
6752 return NULL;
6753 } else {
6754 return n;
6755 }
6756 } else {
6757 MH_ALIGN(n, remain);
6758 }
6759 } else if (remain == 0) {
6760 n = m->m_next;
6761 m->m_next = NULL;
6762 return n;
6763 } else {
6764 _MGET(n, wait, m->m_type);
6765 if (n == NULL) {
6766 return NULL;
6767 }
6768
6769 if ((m->m_flags & M_EXT) == 0) {
6770 VERIFY(remain <= MLEN);
6771 M_ALIGN(n, remain);
6772 }
6773 }
6774 extpacket:
6775 if (m->m_flags & M_EXT) {
6776 n->m_flags |= M_EXT;
6777 n->m_ext = m->m_ext;
6778 m_incref(m);
6779 n->m_data = m->m_data + len;
6780 } else {
6781 bcopy(MTOD(m, caddr_t) + len, MTOD(n, caddr_t), remain);
6782 }
6783 n->m_len = remain;
6784 m->m_len = len;
6785 n->m_next = m->m_next;
6786 m->m_next = NULL;
6787 return n;
6788 }
6789
6790 /*
6791 * Routine to copy from device local memory into mbufs.
6792 */
6793 struct mbuf *
6794 m_devget(char *buf, int totlen, int off0, struct ifnet *ifp,
6795 void (*copy)(const void *, void *, size_t))
6796 {
6797 struct mbuf *m;
6798 struct mbuf *top = NULL, **mp = ⊤
6799 int off = off0, len;
6800 char *cp;
6801 char *epkt;
6802
6803 cp = buf;
6804 epkt = cp + totlen;
6805 if (off) {
6806 /*
6807 * If 'off' is non-zero, packet is trailer-encapsulated,
6808 * so we have to skip the type and length fields.
6809 */
6810 cp += off + 2 * sizeof(u_int16_t);
6811 totlen -= 2 * sizeof(u_int16_t);
6812 }
6813 _MGETHDR(m, M_DONTWAIT, MT_DATA);
6814 if (m == NULL) {
6815 return NULL;
6816 }
6817 m->m_pkthdr.rcvif = ifp;
6818 m->m_pkthdr.len = totlen;
6819 m->m_len = MHLEN;
6820
6821 while (totlen > 0) {
6822 if (top != NULL) {
6823 _MGET(m, M_DONTWAIT, MT_DATA);
6824 if (m == NULL) {
6825 m_freem(top);
6826 return NULL;
6827 }
6828 m->m_len = MLEN;
6829 }
6830 len = MIN(totlen, epkt - cp);
6831 if (len >= MINCLSIZE) {
6832 MCLGET(m, M_DONTWAIT);
6833 if (m->m_flags & M_EXT) {
6834 m->m_len = len = MIN(len, m_maxsize(MC_CL));
6835 } else {
6836 /* give up when it's out of cluster mbufs */
6837 if (top != NULL) {
6838 m_freem(top);
6839 }
6840 m_freem(m);
6841 return NULL;
6842 }
6843 } else {
6844 /*
6845 * Place initial small packet/header at end of mbuf.
6846 */
6847 if (len < m->m_len) {
6848 if (top == NULL &&
6849 len + max_linkhdr <= m->m_len) {
6850 m->m_data += max_linkhdr;
6851 }
6852 m->m_len = len;
6853 } else {
6854 len = m->m_len;
6855 }
6856 }
6857 if (copy) {
6858 copy(cp, MTOD(m, caddr_t), (unsigned)len);
6859 } else {
6860 bcopy(cp, MTOD(m, caddr_t), (unsigned)len);
6861 }
6862 cp += len;
6863 *mp = m;
6864 mp = &m->m_next;
6865 totlen -= len;
6866 if (cp == epkt) {
6867 cp = buf;
6868 }
6869 }
6870 return top;
6871 }
6872
6873 #if CONFIG_MBUF_MCACHE
6874 #ifndef MBUF_GROWTH_NORMAL_THRESH
6875 #define MBUF_GROWTH_NORMAL_THRESH 25
6876 #endif
6877
6878 /*
6879 * Cluster freelist allocation check.
6880 */
6881 static int
6882 m_howmany(int num, size_t bufsize)
6883 {
6884 int i = 0, j = 0;
6885 u_int32_t m_mbclusters, m_clusters, m_bigclusters, m_16kclusters;
6886 u_int32_t m_mbfree, m_clfree, m_bigclfree, m_16kclfree;
6887 u_int32_t sumclusters, freeclusters;
6888 u_int32_t percent_pool, percent_kmem;
6889 u_int32_t mb_growth, mb_growth_thresh;
6890
6891 VERIFY(bufsize == m_maxsize(MC_BIGCL) ||
6892 bufsize == m_maxsize(MC_16KCL));
6893
6894 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
6895
6896 /* Numbers in 2K cluster units */
6897 m_mbclusters = m_total(MC_MBUF) >> NMBPCLSHIFT;
6898 m_clusters = m_total(MC_CL);
6899 m_bigclusters = m_total(MC_BIGCL) << NCLPBGSHIFT;
6900 m_16kclusters = m_total(MC_16KCL);
6901 sumclusters = m_mbclusters + m_clusters + m_bigclusters;
6902
6903 m_mbfree = m_infree(MC_MBUF) >> NMBPCLSHIFT;
6904 m_clfree = m_infree(MC_CL);
6905 m_bigclfree = m_infree(MC_BIGCL) << NCLPBGSHIFT;
6906 m_16kclfree = m_infree(MC_16KCL);
6907 freeclusters = m_mbfree + m_clfree + m_bigclfree;
6908
6909 /* Bail if we've maxed out the mbuf memory map */
6910 if ((bufsize == m_maxsize(MC_BIGCL) && sumclusters >= nclusters) ||
6911 (njcl > 0 && bufsize == m_maxsize(MC_16KCL) &&
6912 (m_16kclusters << NCLPJCLSHIFT) >= njcl)) {
6913 mbwdog_logger("maxed out nclusters (%u >= %u) or njcl (%u >= %u)",
6914 sumclusters, nclusters,
6915 (m_16kclusters << NCLPJCLSHIFT), njcl);
6916 return 0;
6917 }
6918
6919 if (bufsize == m_maxsize(MC_BIGCL)) {
6920 /* Under minimum */
6921 if (m_bigclusters < m_minlimit(MC_BIGCL)) {
6922 return m_minlimit(MC_BIGCL) - m_bigclusters;
6923 }
6924
6925 percent_pool =
6926 ((sumclusters - freeclusters) * 100) / sumclusters;
6927 percent_kmem = (sumclusters * 100) / nclusters;
6928
6929 /*
6930 * If a light/normal user, grow conservatively (75%)
6931 * If a heavy user, grow aggressively (50%)
6932 */
6933 if (percent_kmem < MBUF_GROWTH_NORMAL_THRESH) {
6934 mb_growth = MB_GROWTH_NORMAL;
6935 } else {
6936 mb_growth = MB_GROWTH_AGGRESSIVE;
6937 }
6938
6939 if (percent_kmem < 5) {
6940 /* For initial allocations */
6941 i = num;
6942 } else {
6943 /* Return if >= MBIGCL_LOWAT clusters available */
6944 if (m_infree(MC_BIGCL) >= MBIGCL_LOWAT &&
6945 m_total(MC_BIGCL) >=
6946 MBIGCL_LOWAT + m_minlimit(MC_BIGCL)) {
6947 return 0;
6948 }
6949
6950 /* Ensure at least num clusters are accessible */
6951 if (num >= m_infree(MC_BIGCL)) {
6952 i = num - m_infree(MC_BIGCL);
6953 }
6954 if (num > m_total(MC_BIGCL) - m_minlimit(MC_BIGCL)) {
6955 j = num - (m_total(MC_BIGCL) -
6956 m_minlimit(MC_BIGCL));
6957 }
6958
6959 i = MAX(i, j);
6960
6961 /*
6962 * Grow pool if percent_pool > 75 (normal growth)
6963 * or percent_pool > 50 (aggressive growth).
6964 */
6965 mb_growth_thresh = 100 - (100 / (1 << mb_growth));
6966 if (percent_pool > mb_growth_thresh) {
6967 j = ((sumclusters + num) >> mb_growth) -
6968 freeclusters;
6969 }
6970 i = MAX(i, j);
6971 }
6972
6973 /* Check to ensure we didn't go over limits */
6974 if (i + m_bigclusters >= m_maxlimit(MC_BIGCL)) {
6975 i = m_maxlimit(MC_BIGCL) - m_bigclusters;
6976 }
6977 if ((i << 1) + sumclusters >= nclusters) {
6978 i = (nclusters - sumclusters) >> 1;
6979 }
6980 VERIFY((m_total(MC_BIGCL) + i) <= m_maxlimit(MC_BIGCL));
6981 VERIFY(sumclusters + (i << 1) <= nclusters);
6982 } else { /* 16K CL */
6983 VERIFY(njcl > 0);
6984 /* Ensure at least num clusters are available */
6985 if (num >= m_16kclfree) {
6986 i = num - m_16kclfree;
6987 }
6988
6989 /* Always grow 16KCL pool aggressively */
6990 if (((m_16kclusters + num) >> 1) > m_16kclfree) {
6991 j = ((m_16kclusters + num) >> 1) - m_16kclfree;
6992 }
6993 i = MAX(i, j);
6994
6995 /* Check to ensure we don't go over limit */
6996 if ((i + m_total(MC_16KCL)) >= m_maxlimit(MC_16KCL)) {
6997 i = m_maxlimit(MC_16KCL) - m_total(MC_16KCL);
6998 }
6999 }
7000 return i;
7001 }
7002 #endif /* CONFIG_MBUF_MCACHE */
7003 /*
7004 * Return the number of bytes in the mbuf chain, m.
7005 */
7006 unsigned int
7007 m_length(struct mbuf *m)
7008 {
7009 struct mbuf *m0;
7010 unsigned int pktlen;
7011
7012 if (m->m_flags & M_PKTHDR) {
7013 return m->m_pkthdr.len;
7014 }
7015
7016 pktlen = 0;
7017 for (m0 = m; m0 != NULL; m0 = m0->m_next) {
7018 pktlen += m0->m_len;
7019 }
7020 return pktlen;
7021 }
7022
7023 /*
7024 * Copy data from a buffer back into the indicated mbuf chain,
7025 * starting "off" bytes from the beginning, extending the mbuf
7026 * chain if necessary.
7027 */
7028 void
7029 m_copyback(struct mbuf *m0, int off, int len, const void *cp)
7030 {
7031 #if DEBUG
7032 struct mbuf *origm = m0;
7033 int error;
7034 #endif /* DEBUG */
7035
7036 if (m0 == NULL) {
7037 return;
7038 }
7039
7040 #if DEBUG
7041 error =
7042 #endif /* DEBUG */
7043 m_copyback0(&m0, off, len, cp,
7044 M_COPYBACK0_COPYBACK | M_COPYBACK0_EXTEND, M_DONTWAIT);
7045
7046 #if DEBUG
7047 if (error != 0 || (m0 != NULL && origm != m0)) {
7048 panic("m_copyback");
7049 }
7050 #endif /* DEBUG */
7051 }
7052
7053 struct mbuf *
7054 m_copyback_cow(struct mbuf *m0, int off, int len, const void *cp, int how)
7055 {
7056 int error;
7057
7058 /* don't support chain expansion */
7059 VERIFY(off + len <= m_length(m0));
7060
7061 error = m_copyback0(&m0, off, len, cp,
7062 M_COPYBACK0_COPYBACK | M_COPYBACK0_COW, how);
7063 if (error) {
7064 /*
7065 * no way to recover from partial success.
7066 * just free the chain.
7067 */
7068 m_freem(m0);
7069 return NULL;
7070 }
7071 return m0;
7072 }
7073
7074 /*
7075 * m_makewritable: ensure the specified range writable.
7076 */
7077 int
7078 m_makewritable(struct mbuf **mp, int off, int len, int how)
7079 {
7080 int error;
7081 #if DEBUG
7082 struct mbuf *n;
7083 int origlen, reslen;
7084
7085 origlen = m_length(*mp);
7086 #endif /* DEBUG */
7087
7088 #if 0 /* M_COPYALL is large enough */
7089 if (len == M_COPYALL) {
7090 len = m_length(*mp) - off; /* XXX */
7091 }
7092 #endif
7093
7094 error = m_copyback0(mp, off, len, NULL,
7095 M_COPYBACK0_PRESERVE | M_COPYBACK0_COW, how);
7096
7097 #if DEBUG
7098 reslen = 0;
7099 for (n = *mp; n; n = n->m_next) {
7100 reslen += n->m_len;
7101 }
7102 if (origlen != reslen) {
7103 panic("m_makewritable: length changed");
7104 }
7105 if (((*mp)->m_flags & M_PKTHDR) && reslen != (*mp)->m_pkthdr.len) {
7106 panic("m_makewritable: inconsist");
7107 }
7108 #endif /* DEBUG */
7109
7110 return error;
7111 }
7112
7113 static int
7114 m_copyback0(struct mbuf **mp0, int off, int len, const void *vp, int flags,
7115 int how)
7116 {
7117 int mlen;
7118 struct mbuf *m, *n;
7119 struct mbuf **mp;
7120 int totlen = 0;
7121 const char *cp = vp;
7122
7123 VERIFY(mp0 != NULL);
7124 VERIFY(*mp0 != NULL);
7125 VERIFY((flags & M_COPYBACK0_PRESERVE) == 0 || cp == NULL);
7126 VERIFY((flags & M_COPYBACK0_COPYBACK) == 0 || cp != NULL);
7127
7128 /*
7129 * we don't bother to update "totlen" in the case of M_COPYBACK0_COW,
7130 * assuming that M_COPYBACK0_EXTEND and M_COPYBACK0_COW are exclusive.
7131 */
7132
7133 VERIFY((~flags & (M_COPYBACK0_EXTEND | M_COPYBACK0_COW)) != 0);
7134
7135 mp = mp0;
7136 m = *mp;
7137 while (off > (mlen = m->m_len)) {
7138 off -= mlen;
7139 totlen += mlen;
7140 if (m->m_next == NULL) {
7141 int tspace;
7142 extend:
7143 if (!(flags & M_COPYBACK0_EXTEND)) {
7144 goto out;
7145 }
7146
7147 /*
7148 * try to make some space at the end of "m".
7149 */
7150
7151 mlen = m->m_len;
7152 if (off + len >= MINCLSIZE &&
7153 !(m->m_flags & M_EXT) && m->m_len == 0) {
7154 MCLGET(m, how);
7155 }
7156 tspace = M_TRAILINGSPACE(m);
7157 if (tspace > 0) {
7158 tspace = MIN(tspace, off + len);
7159 VERIFY(tspace > 0);
7160 bzero(mtod(m, char *) + m->m_len,
7161 MIN(off, tspace));
7162 m->m_len += tspace;
7163 off += mlen;
7164 totlen -= mlen;
7165 continue;
7166 }
7167
7168 /*
7169 * need to allocate an mbuf.
7170 */
7171
7172 if (off + len >= MINCLSIZE) {
7173 n = m_getcl(how, m->m_type, 0);
7174 } else {
7175 n = _M_GET(how, m->m_type);
7176 }
7177 if (n == NULL) {
7178 goto out;
7179 }
7180 n->m_len = 0;
7181 n->m_len = MIN(M_TRAILINGSPACE(n), off + len);
7182 bzero(mtod(n, char *), MIN(n->m_len, off));
7183 m->m_next = n;
7184 }
7185 mp = &m->m_next;
7186 m = m->m_next;
7187 }
7188 while (len > 0) {
7189 mlen = m->m_len - off;
7190 if (mlen != 0 && m_mclhasreference(m)) {
7191 char *datap;
7192 int eatlen;
7193
7194 /*
7195 * this mbuf is read-only.
7196 * allocate a new writable mbuf and try again.
7197 */
7198
7199 #if DIAGNOSTIC
7200 if (!(flags & M_COPYBACK0_COW)) {
7201 panic("m_copyback0: read-only");
7202 }
7203 #endif /* DIAGNOSTIC */
7204
7205 /*
7206 * if we're going to write into the middle of
7207 * a mbuf, split it first.
7208 */
7209 if (off > 0 && len < mlen) {
7210 n = m_split0(m, off, how, 0);
7211 if (n == NULL) {
7212 goto enobufs;
7213 }
7214 m->m_next = n;
7215 mp = &m->m_next;
7216 m = n;
7217 off = 0;
7218 continue;
7219 }
7220
7221 /*
7222 * XXX TODO coalesce into the trailingspace of
7223 * the previous mbuf when possible.
7224 */
7225
7226 /*
7227 * allocate a new mbuf. copy packet header if needed.
7228 */
7229 n = _M_GET(how, m->m_type);
7230 if (n == NULL) {
7231 goto enobufs;
7232 }
7233 if (off == 0 && (m->m_flags & M_PKTHDR)) {
7234 M_COPY_PKTHDR(n, m);
7235 n->m_len = MHLEN;
7236 } else {
7237 if (len >= MINCLSIZE) {
7238 MCLGET(n, M_DONTWAIT);
7239 }
7240 n->m_len =
7241 (n->m_flags & M_EXT) ? MCLBYTES : MLEN;
7242 }
7243 if (n->m_len > len) {
7244 n->m_len = len;
7245 }
7246
7247 /*
7248 * free the region which has been overwritten.
7249 * copying data from old mbufs if requested.
7250 */
7251 if (flags & M_COPYBACK0_PRESERVE) {
7252 datap = mtod(n, char *);
7253 } else {
7254 datap = NULL;
7255 }
7256 eatlen = n->m_len;
7257 VERIFY(off == 0 || eatlen >= mlen);
7258 if (off > 0) {
7259 VERIFY(len >= mlen);
7260 m->m_len = off;
7261 m->m_next = n;
7262 if (datap) {
7263 m_copydata(m, off, mlen, datap);
7264 datap += mlen;
7265 }
7266 eatlen -= mlen;
7267 mp = &m->m_next;
7268 m = m->m_next;
7269 }
7270 while (m != NULL && m_mclhasreference(m) &&
7271 n->m_type == m->m_type && eatlen > 0) {
7272 mlen = MIN(eatlen, m->m_len);
7273 if (datap) {
7274 m_copydata(m, 0, mlen, datap);
7275 datap += mlen;
7276 }
7277 m->m_data += mlen;
7278 m->m_len -= mlen;
7279 eatlen -= mlen;
7280 if (m->m_len == 0) {
7281 *mp = m = m_free(m);
7282 }
7283 }
7284 if (eatlen > 0) {
7285 n->m_len -= eatlen;
7286 }
7287 n->m_next = m;
7288 *mp = m = n;
7289 continue;
7290 }
7291 mlen = MIN(mlen, len);
7292 if (flags & M_COPYBACK0_COPYBACK) {
7293 bcopy(cp, mtod(m, caddr_t) + off, (unsigned)mlen);
7294 cp += mlen;
7295 }
7296 len -= mlen;
7297 mlen += off;
7298 off = 0;
7299 totlen += mlen;
7300 if (len == 0) {
7301 break;
7302 }
7303 if (m->m_next == NULL) {
7304 goto extend;
7305 }
7306 mp = &m->m_next;
7307 m = m->m_next;
7308 }
7309 out:
7310 if (((m = *mp0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) {
7311 VERIFY(flags & M_COPYBACK0_EXTEND);
7312 m->m_pkthdr.len = totlen;
7313 }
7314
7315 return 0;
7316
7317 enobufs:
7318 return ENOBUFS;
7319 }
7320
7321 uint64_t
7322 mcl_to_paddr(char *addr)
7323 {
7324 #if CONFIG_MBUF_MCACHE
7325 vm_offset_t base_phys;
7326
7327 if (!MBUF_IN_MAP(addr)) {
7328 return 0;
7329 }
7330 base_phys = mcl_paddr[atop_64(addr - (char *)mbutl)];
7331
7332 if (base_phys == 0) {
7333 return 0;
7334 }
7335 return (uint64_t)(ptoa_64(base_phys) | ((uint64_t)addr & PAGE_MASK));
7336 #else
7337 extern addr64_t kvtophys(vm_offset_t va);
7338
7339 return kvtophys((vm_offset_t)addr);
7340 #endif /* CONFIG_MBUF_MCACHE */
7341 }
7342
7343 /*
7344 * Dup the mbuf chain passed in. The whole thing. No cute additional cruft.
7345 * And really copy the thing. That way, we don't "precompute" checksums
7346 * for unsuspecting consumers. Assumption: m->m_nextpkt == 0. Trick: for
7347 * small packets, don't dup into a cluster. That way received packets
7348 * don't take up too much room in the sockbuf (cf. sbspace()).
7349 */
7350 struct mbuf *
7351 m_dup(struct mbuf *m, int how)
7352 {
7353 struct mbuf *n, **np;
7354 struct mbuf *top;
7355 int copyhdr = 0;
7356
7357 np = ⊤
7358 top = NULL;
7359 if (m->m_flags & M_PKTHDR) {
7360 copyhdr = 1;
7361 }
7362
7363 /*
7364 * Quick check: if we have one mbuf and its data fits in an
7365 * mbuf with packet header, just copy and go.
7366 */
7367 if (m->m_next == NULL) {
7368 /* Then just move the data into an mbuf and be done... */
7369 if (copyhdr) {
7370 if (m->m_pkthdr.len <= MHLEN && m->m_len <= MHLEN) {
7371 if ((n = _M_GETHDR(how, m->m_type)) == NULL) {
7372 return NULL;
7373 }
7374 n->m_len = m->m_len;
7375 m_dup_pkthdr(n, m, how);
7376 bcopy(m->m_data, n->m_data, m->m_len);
7377 return n;
7378 }
7379 } else if (m->m_len <= MLEN) {
7380 if ((n = _M_GET(how, m->m_type)) == NULL) {
7381 return NULL;
7382 }
7383 bcopy(m->m_data, n->m_data, m->m_len);
7384 n->m_len = m->m_len;
7385 return n;
7386 }
7387 }
7388 while (m != NULL) {
7389 #if BLUE_DEBUG
7390 printf("<%x: %x, %x, %x\n", m, m->m_flags, m->m_len,
7391 m->m_data);
7392 #endif
7393 if (copyhdr) {
7394 n = _M_GETHDR(how, m->m_type);
7395 } else {
7396 n = _M_GET(how, m->m_type);
7397 }
7398 if (n == NULL) {
7399 goto nospace;
7400 }
7401 if (m->m_flags & M_EXT) {
7402 if (m->m_len <= m_maxsize(MC_CL)) {
7403 MCLGET(n, how);
7404 } else if (m->m_len <= m_maxsize(MC_BIGCL)) {
7405 n = m_mbigget(n, how);
7406 } else if (m->m_len <= m_maxsize(MC_16KCL) && njcl > 0) {
7407 n = m_m16kget(n, how);
7408 }
7409 if (!(n->m_flags & M_EXT)) {
7410 (void) m_free(n);
7411 goto nospace;
7412 }
7413 } else {
7414 VERIFY((copyhdr == 1 && m->m_len <= MHLEN) ||
7415 (copyhdr == 0 && m->m_len <= MLEN));
7416 }
7417 *np = n;
7418 if (copyhdr) {
7419 /* Don't use M_COPY_PKTHDR: preserve m_data */
7420 m_dup_pkthdr(n, m, how);
7421 copyhdr = 0;
7422 if (!(n->m_flags & M_EXT)) {
7423 n->m_data = n->m_pktdat;
7424 }
7425 }
7426 n->m_len = m->m_len;
7427 /*
7428 * Get the dup on the same bdry as the original
7429 * Assume that the two mbufs have the same offset to data area
7430 * (up to word boundaries)
7431 */
7432 bcopy(MTOD(m, caddr_t), MTOD(n, caddr_t), (unsigned)n->m_len);
7433 m = m->m_next;
7434 np = &n->m_next;
7435 #if BLUE_DEBUG
7436 printf(">%x: %x, %x, %x\n", n, n->m_flags, n->m_len,
7437 n->m_data);
7438 #endif
7439 }
7440
7441 return top;
7442
7443 nospace:
7444 m_freem(top);
7445 return NULL;
7446 }
7447
7448 #define MBUF_MULTIPAGES(m) \
7449 (((m)->m_flags & M_EXT) && \
7450 ((IS_P2ALIGNED((m)->m_data, PAGE_SIZE) \
7451 && (m)->m_len > PAGE_SIZE) || \
7452 (!IS_P2ALIGNED((m)->m_data, PAGE_SIZE) && \
7453 P2ROUNDUP((m)->m_data, PAGE_SIZE) < ((uintptr_t)(m)->m_data + (m)->m_len))))
7454
7455 static struct mbuf *
7456 m_expand(struct mbuf *m, struct mbuf **last)
7457 {
7458 struct mbuf *top = NULL;
7459 struct mbuf **nm = ⊤
7460 uintptr_t data0, data;
7461 unsigned int len0, len;
7462
7463 VERIFY(MBUF_MULTIPAGES(m));
7464 VERIFY(m->m_next == NULL);
7465 data0 = (uintptr_t)m->m_data;
7466 len0 = m->m_len;
7467 *last = top;
7468
7469 for (;;) {
7470 struct mbuf *n;
7471
7472 data = data0;
7473 if (IS_P2ALIGNED(data, PAGE_SIZE) && len0 > PAGE_SIZE) {
7474 len = PAGE_SIZE;
7475 } else if (!IS_P2ALIGNED(data, PAGE_SIZE) &&
7476 P2ROUNDUP(data, PAGE_SIZE) < (data + len0)) {
7477 len = P2ROUNDUP(data, PAGE_SIZE) - data;
7478 } else {
7479 len = len0;
7480 }
7481
7482 VERIFY(len > 0);
7483 VERIFY(m->m_flags & M_EXT);
7484 m->m_data = (void *)data;
7485 m->m_len = len;
7486
7487 *nm = *last = m;
7488 nm = &m->m_next;
7489 m->m_next = NULL;
7490
7491 data0 += len;
7492 len0 -= len;
7493 if (len0 == 0) {
7494 break;
7495 }
7496
7497 n = _M_RETRY(M_DONTWAIT, MT_DATA);
7498 if (n == NULL) {
7499 m_freem(top);
7500 top = *last = NULL;
7501 break;
7502 }
7503
7504 n->m_ext = m->m_ext;
7505 m_incref(m);
7506 n->m_flags |= M_EXT;
7507 m = n;
7508 }
7509 return top;
7510 }
7511
7512 struct mbuf *
7513 m_normalize(struct mbuf *m)
7514 {
7515 struct mbuf *top = NULL;
7516 struct mbuf **nm = ⊤
7517 boolean_t expanded = FALSE;
7518
7519 while (m != NULL) {
7520 struct mbuf *n;
7521
7522 n = m->m_next;
7523 m->m_next = NULL;
7524
7525 /* Does the data cross one or more page boundaries? */
7526 if (MBUF_MULTIPAGES(m)) {
7527 struct mbuf *last;
7528 if ((m = m_expand(m, &last)) == NULL) {
7529 m_freem(n);
7530 m_freem(top);
7531 top = NULL;
7532 break;
7533 }
7534 *nm = m;
7535 nm = &last->m_next;
7536 expanded = TRUE;
7537 } else {
7538 *nm = m;
7539 nm = &m->m_next;
7540 }
7541 m = n;
7542 }
7543 if (expanded) {
7544 os_atomic_inc(&mb_normalized, relaxed);
7545 }
7546 return top;
7547 }
7548
7549 /*
7550 * Append the specified data to the indicated mbuf chain,
7551 * Extend the mbuf chain if the new data does not fit in
7552 * existing space.
7553 *
7554 * Return 1 if able to complete the job; otherwise 0.
7555 */
7556 int
7557 m_append(struct mbuf *m0, int len, caddr_t cp)
7558 {
7559 struct mbuf *m, *n;
7560 int remainder, space;
7561
7562 for (m = m0; m->m_next != NULL; m = m->m_next) {
7563 ;
7564 }
7565 remainder = len;
7566 space = M_TRAILINGSPACE(m);
7567 if (space > 0) {
7568 /*
7569 * Copy into available space.
7570 */
7571 if (space > remainder) {
7572 space = remainder;
7573 }
7574 bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
7575 m->m_len += space;
7576 cp += space;
7577 remainder -= space;
7578 }
7579 while (remainder > 0) {
7580 /*
7581 * Allocate a new mbuf; could check space
7582 * and allocate a cluster instead.
7583 */
7584 n = m_get(M_WAITOK, m->m_type);
7585 if (n == NULL) {
7586 break;
7587 }
7588 n->m_len = min(MLEN, remainder);
7589 bcopy(cp, mtod(n, caddr_t), n->m_len);
7590 cp += n->m_len;
7591 remainder -= n->m_len;
7592 m->m_next = n;
7593 m = n;
7594 }
7595 if (m0->m_flags & M_PKTHDR) {
7596 m0->m_pkthdr.len += len - remainder;
7597 }
7598 return remainder == 0;
7599 }
7600
7601 struct mbuf *
7602 m_last(struct mbuf *m)
7603 {
7604 while (m->m_next != NULL) {
7605 m = m->m_next;
7606 }
7607 return m;
7608 }
7609
7610 unsigned int
7611 m_fixhdr(struct mbuf *m0)
7612 {
7613 u_int len;
7614
7615 VERIFY(m0->m_flags & M_PKTHDR);
7616
7617 len = m_length2(m0, NULL);
7618 m0->m_pkthdr.len = len;
7619 return len;
7620 }
7621
7622 unsigned int
7623 m_length2(struct mbuf *m0, struct mbuf **last)
7624 {
7625 struct mbuf *m;
7626 u_int len;
7627
7628 len = 0;
7629 for (m = m0; m != NULL; m = m->m_next) {
7630 len += m->m_len;
7631 if (m->m_next == NULL) {
7632 break;
7633 }
7634 }
7635 if (last != NULL) {
7636 *last = m;
7637 }
7638 return len;
7639 }
7640
7641 /*
7642 * Defragment a mbuf chain, returning the shortest possible chain of mbufs
7643 * and clusters. If allocation fails and this cannot be completed, NULL will
7644 * be returned, but the passed in chain will be unchanged. Upon success,
7645 * the original chain will be freed, and the new chain will be returned.
7646 *
7647 * If a non-packet header is passed in, the original mbuf (chain?) will
7648 * be returned unharmed.
7649 *
7650 * If offset is specfied, the first mbuf in the chain will have a leading
7651 * space of the amount stated by the "off" parameter.
7652 *
7653 * This routine requires that the m_pkthdr.header field of the original
7654 * mbuf chain is cleared by the caller.
7655 */
7656 struct mbuf *
7657 m_defrag_offset(struct mbuf *m0, u_int32_t off, int how)
7658 {
7659 struct mbuf *m_new = NULL, *m_final = NULL;
7660 int progress = 0, length, pktlen;
7661
7662 if (!(m0->m_flags & M_PKTHDR)) {
7663 return m0;
7664 }
7665
7666 VERIFY(off < MHLEN);
7667 m_fixhdr(m0); /* Needed sanity check */
7668
7669 pktlen = m0->m_pkthdr.len + off;
7670 if (pktlen > MHLEN) {
7671 m_final = m_getcl(how, MT_DATA, M_PKTHDR);
7672 } else {
7673 m_final = m_gethdr(how, MT_DATA);
7674 }
7675
7676 if (m_final == NULL) {
7677 goto nospace;
7678 }
7679
7680 if (off > 0) {
7681 pktlen -= off;
7682 m_final->m_data += off;
7683 }
7684
7685 /*
7686 * Caller must have handled the contents pointed to by this
7687 * pointer before coming here, as otherwise it will point to
7688 * the original mbuf which will get freed upon success.
7689 */
7690 VERIFY(m0->m_pkthdr.pkt_hdr == NULL);
7691
7692 if (m_dup_pkthdr(m_final, m0, how) == 0) {
7693 goto nospace;
7694 }
7695
7696 m_new = m_final;
7697
7698 while (progress < pktlen) {
7699 length = pktlen - progress;
7700 if (length > MCLBYTES) {
7701 length = MCLBYTES;
7702 }
7703 length -= ((m_new == m_final) ? off : 0);
7704 if (length < 0) {
7705 goto nospace;
7706 }
7707
7708 if (m_new == NULL) {
7709 if (length > MLEN) {
7710 m_new = m_getcl(how, MT_DATA, 0);
7711 } else {
7712 m_new = m_get(how, MT_DATA);
7713 }
7714 if (m_new == NULL) {
7715 goto nospace;
7716 }
7717 }
7718
7719 m_copydata(m0, progress, length, mtod(m_new, caddr_t));
7720 progress += length;
7721 m_new->m_len = length;
7722 if (m_new != m_final) {
7723 m_cat(m_final, m_new);
7724 }
7725 m_new = NULL;
7726 }
7727 m_freem(m0);
7728 m0 = m_final;
7729 return m0;
7730 nospace:
7731 if (m_final) {
7732 m_freem(m_final);
7733 }
7734 return NULL;
7735 }
7736
7737 struct mbuf *
7738 m_defrag(struct mbuf *m0, int how)
7739 {
7740 return m_defrag_offset(m0, 0, how);
7741 }
7742
7743 void
7744 m_mchtype(struct mbuf *m, int t)
7745 {
7746 mtype_stat_inc(t);
7747 mtype_stat_dec(m->m_type);
7748 (m)->m_type = t;
7749 }
7750
7751 void *
7752 m_mtod(struct mbuf *m)
7753 {
7754 return MTOD(m, void *);
7755 }
7756
7757 void
7758 m_mcheck(struct mbuf *m)
7759 {
7760 _MCHECK(m);
7761 }
7762
7763 /*
7764 * Return a pointer to mbuf/offset of location in mbuf chain.
7765 */
7766 struct mbuf *
7767 m_getptr(struct mbuf *m, int loc, int *off)
7768 {
7769 while (loc >= 0) {
7770 /* Normal end of search. */
7771 if (m->m_len > loc) {
7772 *off = loc;
7773 return m;
7774 } else {
7775 loc -= m->m_len;
7776 if (m->m_next == NULL) {
7777 if (loc == 0) {
7778 /* Point at the end of valid data. */
7779 *off = m->m_len;
7780 return m;
7781 }
7782 return NULL;
7783 }
7784 m = m->m_next;
7785 }
7786 }
7787 return NULL;
7788 }
7789
7790 #if CONFIG_MBUF_MCACHE
7791 /*
7792 * Inform the corresponding mcache(s) that there's a waiter below.
7793 */
7794 static void
7795 mbuf_waiter_inc(mbuf_class_t class, boolean_t comp)
7796 {
7797 mcache_waiter_inc(m_cache(class));
7798 if (comp) {
7799 if (class == MC_CL) {
7800 mcache_waiter_inc(m_cache(MC_MBUF_CL));
7801 } else if (class == MC_BIGCL) {
7802 mcache_waiter_inc(m_cache(MC_MBUF_BIGCL));
7803 } else if (class == MC_16KCL) {
7804 mcache_waiter_inc(m_cache(MC_MBUF_16KCL));
7805 } else {
7806 mcache_waiter_inc(m_cache(MC_MBUF_CL));
7807 mcache_waiter_inc(m_cache(MC_MBUF_BIGCL));
7808 }
7809 }
7810 }
7811
7812 /*
7813 * Inform the corresponding mcache(s) that there's no more waiter below.
7814 */
7815 static void
7816 mbuf_waiter_dec(mbuf_class_t class, boolean_t comp)
7817 {
7818 mcache_waiter_dec(m_cache(class));
7819 if (comp) {
7820 if (class == MC_CL) {
7821 mcache_waiter_dec(m_cache(MC_MBUF_CL));
7822 } else if (class == MC_BIGCL) {
7823 mcache_waiter_dec(m_cache(MC_MBUF_BIGCL));
7824 } else if (class == MC_16KCL) {
7825 mcache_waiter_dec(m_cache(MC_MBUF_16KCL));
7826 } else {
7827 mcache_waiter_dec(m_cache(MC_MBUF_CL));
7828 mcache_waiter_dec(m_cache(MC_MBUF_BIGCL));
7829 }
7830 }
7831 }
7832 #endif /* CONFIG_MBUF_MCACHE */
7833
7834 static bool mbuf_watchdog_defunct_active = false;
7835
7836 static uint32_t
7837 mbuf_watchdog_socket_space(struct socket *so)
7838 {
7839 uint32_t space = 0;
7840
7841 if (so == NULL) {
7842 return 0;
7843 }
7844
7845 space = so->so_snd.sb_mbcnt + so->so_rcv.sb_mbcnt;
7846
7847 #if INET
7848 if ((SOCK_DOM(so) == PF_INET || SOCK_DOM(so) == PF_INET6) &&
7849 SOCK_PROTO(so) == IPPROTO_TCP) {
7850 space += tcp_reass_qlen_space(so);
7851 }
7852 #endif /* INET */
7853
7854 return space;
7855 }
7856
7857 struct mbuf_watchdog_defunct_args {
7858 struct proc *top_app;
7859 uint32_t top_app_space_used;
7860 bool non_blocking;
7861 };
7862
7863 static bool
7864 proc_fd_trylock(proc_t p)
7865 {
7866 return lck_mtx_try_lock(&p->p_fd.fd_lock);
7867 }
7868
7869 static int
7870 mbuf_watchdog_defunct_iterate(proc_t p, void *arg)
7871 {
7872 struct fileproc *fp = NULL;
7873 struct mbuf_watchdog_defunct_args *args =
7874 (struct mbuf_watchdog_defunct_args *)arg;
7875 uint32_t space_used = 0;
7876
7877 /*
7878 * Non-blocking is only used when dumping the mbuf usage from the watchdog
7879 */
7880 if (args->non_blocking) {
7881 if (!proc_fd_trylock(p)) {
7882 return PROC_RETURNED;
7883 }
7884 } else {
7885 proc_fdlock(p);
7886 }
7887 fdt_foreach(fp, p) {
7888 struct fileglob *fg = fp->fp_glob;
7889 struct socket *so = NULL;
7890
7891 if (FILEGLOB_DTYPE(fg) != DTYPE_SOCKET) {
7892 continue;
7893 }
7894 so = fg_get_data(fg);
7895 /*
7896 * We calculate the space without the socket
7897 * lock because we don't want to be blocked
7898 * by another process that called send() and
7899 * is stuck waiting for mbufs.
7900 *
7901 * These variables are 32-bit so we don't have
7902 * to worry about incomplete reads.
7903 */
7904 space_used += mbuf_watchdog_socket_space(so);
7905 }
7906 proc_fdunlock(p);
7907 if (space_used > args->top_app_space_used) {
7908 if (args->top_app != NULL) {
7909 proc_rele(args->top_app);
7910 }
7911 args->top_app = p;
7912 args->top_app_space_used = space_used;
7913
7914 return PROC_CLAIMED;
7915 } else {
7916 return PROC_RETURNED;
7917 }
7918 }
7919
7920 extern char *proc_name_address(void *p);
7921
7922 static void
7923 mbuf_watchdog_defunct(thread_call_param_t arg0, thread_call_param_t arg1)
7924 {
7925 #pragma unused(arg0, arg1)
7926 struct mbuf_watchdog_defunct_args args = {};
7927 struct fileproc *fp = NULL;
7928
7929 args.non_blocking = false;
7930 proc_iterate(PROC_ALLPROCLIST,
7931 mbuf_watchdog_defunct_iterate, &args, NULL, NULL);
7932
7933 /*
7934 * Defunct all sockets from this app.
7935 */
7936 if (args.top_app != NULL) {
7937 #if CONFIG_MBUF_MCACHE
7938 /* Restart the watchdog count. */
7939 lck_mtx_lock(mbuf_mlock);
7940 microuptime(&mb_wdtstart);
7941 lck_mtx_unlock(mbuf_mlock);
7942 #endif
7943 os_log(OS_LOG_DEFAULT, "%s: defuncting all sockets from %s.%d",
7944 __func__,
7945 proc_name_address(args.top_app),
7946 proc_pid(args.top_app));
7947 proc_fdlock(args.top_app);
7948 fdt_foreach(fp, args.top_app) {
7949 struct fileglob *fg = fp->fp_glob;
7950 struct socket *so = NULL;
7951
7952 if (FILEGLOB_DTYPE(fg) != DTYPE_SOCKET) {
7953 continue;
7954 }
7955 so = (struct socket *)fp_get_data(fp);
7956 if (!socket_try_lock(so)) {
7957 continue;
7958 }
7959 if (sosetdefunct(args.top_app, so,
7960 SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL,
7961 TRUE) == 0) {
7962 sodefunct(args.top_app, so,
7963 SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL);
7964 }
7965 socket_unlock(so, 0);
7966 }
7967 proc_fdunlock(args.top_app);
7968 proc_rele(args.top_app);
7969 mbstat.m_forcedefunct++;
7970 #if !CONFIG_MBUF_MCACHE
7971 zcache_drain(ZONE_ID_MBUF_CLUSTER_2K);
7972 zcache_drain(ZONE_ID_MBUF_CLUSTER_4K);
7973 zcache_drain(ZONE_ID_MBUF_CLUSTER_16K);
7974 zone_drain(zone_by_id(ZONE_ID_MBUF));
7975 zone_drain(zone_by_id(ZONE_ID_CLUSTER_2K));
7976 zone_drain(zone_by_id(ZONE_ID_CLUSTER_4K));
7977 zone_drain(zone_by_id(ZONE_ID_CLUSTER_16K));
7978 zone_drain(zone_by_id(ZONE_ID_MBUF_REF));
7979 #endif
7980 }
7981 mbuf_watchdog_defunct_active = false;
7982 }
7983
7984 #if !CONFIG_MBUF_MCACHE
7985 static void
7986 mbuf_zone_exhausted(zone_id_t zid, zone_t zone __unused)
7987 {
7988 if (mbuf_defunct_tcall == NULL) {
7989 return;
7990 }
7991
7992 if (zid == ZONE_ID_MBUF ||
7993 zid == ZONE_ID_CLUSTER_2K ||
7994 zid == ZONE_ID_CLUSTER_4K ||
7995 zid == ZONE_ID_CLUSTER_16K) {
7996 if (os_atomic_cmpxchg(&mbuf_watchdog_defunct_active,
7997 false, true, relaxed)) {
7998 thread_call_enter(mbuf_defunct_tcall);
7999 }
8000 }
8001 }
8002 EVENT_REGISTER_HANDLER(ZONE_EXHAUSTED, mbuf_zone_exhausted);
8003 #endif /* !CONFIG_MBUF_MCACHE */
8004
8005 #if CONFIG_MBUF_MCACHE
8006 /*
8007 * Called during slab (blocking and non-blocking) allocation. If there
8008 * is at least one waiter, and the time since the first waiter is blocked
8009 * is greater than the watchdog timeout, panic the system.
8010 */
8011 static void
8012 mbuf_watchdog(void)
8013 {
8014 struct timeval now;
8015 unsigned int since;
8016 static thread_call_t defunct_tcall = NULL;
8017
8018 if (mb_waiters == 0 || !mb_watchdog) {
8019 return;
8020 }
8021
8022 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
8023
8024 microuptime(&now);
8025 since = now.tv_sec - mb_wdtstart.tv_sec;
8026
8027 if (mbuf_watchdog_defunct_active) {
8028 /*
8029 * Don't panic the system while we are trying
8030 * to find sockets to defunct.
8031 */
8032 return;
8033 }
8034 if (since >= MB_WDT_MAXTIME) {
8035 panic_plain("%s: %d waiters stuck for %u secs\n%s", __func__,
8036 mb_waiters, since, mbuf_dump());
8037 /* NOTREACHED */
8038 }
8039 /*
8040 * Check if we are about to panic the system due
8041 * to lack of mbufs and start defuncting sockets
8042 * from processes that use too many sockets.
8043 *
8044 * We're always called with the mbuf_mlock held,
8045 * so that also protects mbuf_watchdog_defunct_active.
8046 */
8047 if (since >= MB_WDT_MAXTIME / 2) {
8048 /*
8049 * Start a thread to defunct sockets
8050 * from apps that are over-using their socket
8051 * buffers.
8052 */
8053 if (defunct_tcall == NULL) {
8054 defunct_tcall =
8055 thread_call_allocate_with_options(mbuf_watchdog_defunct,
8056 NULL,
8057 THREAD_CALL_PRIORITY_KERNEL,
8058 THREAD_CALL_OPTIONS_ONCE);
8059 }
8060 if (defunct_tcall != NULL) {
8061 mbuf_watchdog_defunct_active = true;
8062 thread_call_enter(defunct_tcall);
8063 }
8064 }
8065 }
8066
8067 /*
8068 * Called during blocking allocation. Returns TRUE if one or more objects
8069 * are available at the per-CPU caches layer and that allocation should be
8070 * retried at that level.
8071 */
8072 static boolean_t
8073 mbuf_sleep(mbuf_class_t class, unsigned int num, int wait)
8074 {
8075 boolean_t mcache_retry = FALSE;
8076
8077 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
8078
8079 /* Check if there's anything at the cache layer */
8080 if (mbuf_cached_above(class, wait)) {
8081 mcache_retry = TRUE;
8082 goto done;
8083 }
8084
8085 /* Nothing? Then try hard to get it from somewhere */
8086 m_reclaim(class, num, (wait & MCR_COMP));
8087
8088 /* We tried hard and got something? */
8089 if (m_infree(class) > 0) {
8090 mbstat.m_wait++;
8091 goto done;
8092 } else if (mbuf_cached_above(class, wait)) {
8093 mbstat.m_wait++;
8094 mcache_retry = TRUE;
8095 goto done;
8096 } else if (wait & MCR_TRYHARD) {
8097 mcache_retry = TRUE;
8098 goto done;
8099 }
8100
8101 /*
8102 * There's really nothing for us right now; inform the
8103 * cache(s) that there is a waiter below and go to sleep.
8104 */
8105 mbuf_waiter_inc(class, (wait & MCR_COMP));
8106
8107 VERIFY(!(wait & MCR_NOSLEEP));
8108
8109 /*
8110 * If this is the first waiter, arm the watchdog timer. Otherwise
8111 * check if we need to panic the system due to watchdog timeout.
8112 */
8113 if (mb_waiters == 0) {
8114 microuptime(&mb_wdtstart);
8115 } else {
8116 mbuf_watchdog();
8117 }
8118
8119 mb_waiters++;
8120 m_region_expand(class) += m_total(class) + num;
8121 /* wake up the worker thread */
8122 if (mbuf_worker_ready &&
8123 mbuf_worker_needs_wakeup) {
8124 wakeup((caddr_t)&mbuf_worker_needs_wakeup);
8125 mbuf_worker_needs_wakeup = FALSE;
8126 }
8127 mbwdog_logger("waiting (%d mbufs in class %s)", num, m_cname(class));
8128 (void) msleep(mb_waitchan, mbuf_mlock, (PZERO - 1), m_cname(class), NULL);
8129 mbwdog_logger("woke up (%d mbufs in class %s) ", num, m_cname(class));
8130
8131 /* We are now up; stop getting notified until next round */
8132 mbuf_waiter_dec(class, (wait & MCR_COMP));
8133
8134 /* We waited and got something */
8135 if (m_infree(class) > 0) {
8136 mbstat.m_wait++;
8137 goto done;
8138 } else if (mbuf_cached_above(class, wait)) {
8139 mbstat.m_wait++;
8140 mcache_retry = TRUE;
8141 }
8142 done:
8143 return mcache_retry;
8144 }
8145
8146 __attribute__((noreturn))
8147 static void
8148 mbuf_worker_thread(void)
8149 {
8150 int mbuf_expand;
8151
8152 while (1) {
8153 lck_mtx_lock(mbuf_mlock);
8154 mbwdog_logger("worker thread running");
8155 mbuf_worker_run_cnt++;
8156 mbuf_expand = 0;
8157 /*
8158 * Allocations are based on page size, so if we have depleted
8159 * the reserved spaces, try to free mbufs from the major classes.
8160 */
8161 #if PAGE_SIZE == 4096
8162 uint32_t m_mbclusters = m_total(MC_MBUF) >> NMBPCLSHIFT;
8163 uint32_t m_clusters = m_total(MC_CL);
8164 uint32_t m_bigclusters = m_total(MC_BIGCL) << NCLPBGSHIFT;
8165 uint32_t sumclusters = m_mbclusters + m_clusters + m_bigclusters;
8166 if (sumclusters >= nclusters) {
8167 mbwdog_logger("reclaiming bigcl");
8168 mbuf_drain_locked(TRUE);
8169 m_reclaim(MC_BIGCL, 4, FALSE);
8170 }
8171 #else
8172 uint32_t m_16kclusters = m_total(MC_16KCL);
8173 if (njcl > 0 && (m_16kclusters << NCLPJCLSHIFT) >= njcl) {
8174 mbwdog_logger("reclaiming 16kcl");
8175 mbuf_drain_locked(TRUE);
8176 m_reclaim(MC_16KCL, 4, FALSE);
8177 }
8178 #endif
8179 if (m_region_expand(MC_CL) > 0) {
8180 int n;
8181 mb_expand_cl_cnt++;
8182 /* Adjust to current number of cluster in use */
8183 n = m_region_expand(MC_CL) -
8184 (m_total(MC_CL) - m_infree(MC_CL));
8185 if ((n + m_total(MC_CL)) > m_maxlimit(MC_CL)) {
8186 n = m_maxlimit(MC_CL) - m_total(MC_CL);
8187 }
8188 if (n > 0) {
8189 mb_expand_cl_total += n;
8190 }
8191 m_region_expand(MC_CL) = 0;
8192
8193 if (n > 0) {
8194 mbwdog_logger("expanding MC_CL by %d", n);
8195 freelist_populate(MC_CL, n, M_WAIT);
8196 }
8197 }
8198 if (m_region_expand(MC_BIGCL) > 0) {
8199 int n;
8200 mb_expand_bigcl_cnt++;
8201 /* Adjust to current number of 4 KB cluster in use */
8202 n = m_region_expand(MC_BIGCL) -
8203 (m_total(MC_BIGCL) - m_infree(MC_BIGCL));
8204 if ((n + m_total(MC_BIGCL)) > m_maxlimit(MC_BIGCL)) {
8205 n = m_maxlimit(MC_BIGCL) - m_total(MC_BIGCL);
8206 }
8207 if (n > 0) {
8208 mb_expand_bigcl_total += n;
8209 }
8210 m_region_expand(MC_BIGCL) = 0;
8211
8212 if (n > 0) {
8213 mbwdog_logger("expanding MC_BIGCL by %d", n);
8214 freelist_populate(MC_BIGCL, n, M_WAIT);
8215 }
8216 }
8217 if (m_region_expand(MC_16KCL) > 0) {
8218 int n;
8219 mb_expand_16kcl_cnt++;
8220 /* Adjust to current number of 16 KB cluster in use */
8221 n = m_region_expand(MC_16KCL) -
8222 (m_total(MC_16KCL) - m_infree(MC_16KCL));
8223 if ((n + m_total(MC_16KCL)) > m_maxlimit(MC_16KCL)) {
8224 n = m_maxlimit(MC_16KCL) - m_total(MC_16KCL);
8225 }
8226 if (n > 0) {
8227 mb_expand_16kcl_total += n;
8228 }
8229 m_region_expand(MC_16KCL) = 0;
8230
8231 if (n > 0) {
8232 mbwdog_logger("expanding MC_16KCL by %d", n);
8233 (void) freelist_populate(MC_16KCL, n, M_WAIT);
8234 }
8235 }
8236
8237 /*
8238 * Because we can run out of memory before filling the mbuf
8239 * map, we should not allocate more clusters than they are
8240 * mbufs -- otherwise we could have a large number of useless
8241 * clusters allocated.
8242 */
8243 mbwdog_logger("totals: MC_MBUF %d MC_BIGCL %d MC_CL %d MC_16KCL %d",
8244 m_total(MC_MBUF), m_total(MC_BIGCL), m_total(MC_CL),
8245 m_total(MC_16KCL));
8246 uint32_t total_mbufs = m_total(MC_MBUF);
8247 uint32_t total_clusters = m_total(MC_BIGCL) + m_total(MC_CL) +
8248 m_total(MC_16KCL);
8249 if (total_mbufs < total_clusters) {
8250 mbwdog_logger("expanding MC_MBUF by %d",
8251 total_clusters - total_mbufs);
8252 }
8253 while (total_mbufs < total_clusters) {
8254 mb_expand_cnt++;
8255 if (freelist_populate(MC_MBUF, 1, M_WAIT) == 0) {
8256 break;
8257 }
8258 total_mbufs = m_total(MC_MBUF);
8259 total_clusters = m_total(MC_BIGCL) + m_total(MC_CL) +
8260 m_total(MC_16KCL);
8261 }
8262
8263 mbuf_worker_needs_wakeup = TRUE;
8264 /*
8265 * If there's a deadlock and we're not sending / receiving
8266 * packets, net_uptime() won't be updated. Update it here
8267 * so we are sure it's correct.
8268 */
8269 net_update_uptime();
8270 mbuf_worker_last_runtime = net_uptime();
8271 assert_wait((caddr_t)&mbuf_worker_needs_wakeup,
8272 THREAD_UNINT);
8273 mbwdog_logger("worker thread sleeping");
8274 lck_mtx_unlock(mbuf_mlock);
8275 (void) thread_block((thread_continue_t)mbuf_worker_thread);
8276 }
8277 }
8278
8279 __attribute__((noreturn))
8280 static void
8281 mbuf_worker_thread_init(void)
8282 {
8283 mbuf_worker_ready++;
8284 mbuf_worker_thread();
8285 }
8286
8287 static mcl_slab_t *
8288 slab_get(void *buf)
8289 {
8290 mcl_slabg_t *slg;
8291 unsigned int ix, k;
8292
8293 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
8294
8295 VERIFY(MBUF_IN_MAP(buf));
8296 ix = ((unsigned char *)buf - mbutl) >> MBSHIFT;
8297 VERIFY(ix < maxslabgrp);
8298
8299 if ((slg = slabstbl[ix]) == NULL) {
8300 /*
8301 * In the current implementation, we never shrink the slabs
8302 * table; if we attempt to reallocate a cluster group when
8303 * it's already allocated, panic since this is a sign of a
8304 * memory corruption (slabstbl[ix] got nullified).
8305 */
8306 ++slabgrp;
8307 VERIFY(ix < slabgrp);
8308 /*
8309 * Slabs expansion can only be done single threaded; when
8310 * we get here, it must be as a result of m_clalloc() which
8311 * is serialized and therefore mb_clalloc_busy must be set.
8312 */
8313 VERIFY(mb_clalloc_busy);
8314 lck_mtx_unlock(mbuf_mlock);
8315
8316 /* This is a new buffer; create the slabs group for it */
8317 slg = zalloc_permanent_type(mcl_slabg_t);
8318 slg->slg_slab = zalloc_permanent(sizeof(mcl_slab_t) * NSLABSPMB,
8319 ZALIGN(mcl_slab_t));
8320
8321 lck_mtx_lock(mbuf_mlock);
8322 /*
8323 * No other thread could have gone into m_clalloc() after
8324 * we dropped the lock above, so verify that it's true.
8325 */
8326 VERIFY(mb_clalloc_busy);
8327
8328 slabstbl[ix] = slg;
8329
8330 /* Chain each slab in the group to its forward neighbor */
8331 for (k = 1; k < NSLABSPMB; k++) {
8332 slg->slg_slab[k - 1].sl_next = &slg->slg_slab[k];
8333 }
8334 VERIFY(slg->slg_slab[NSLABSPMB - 1].sl_next == NULL);
8335
8336 /* And chain the last slab in the previous group to this */
8337 if (ix > 0) {
8338 VERIFY(slabstbl[ix - 1]->
8339 slg_slab[NSLABSPMB - 1].sl_next == NULL);
8340 slabstbl[ix - 1]->slg_slab[NSLABSPMB - 1].sl_next =
8341 &slg->slg_slab[0];
8342 }
8343 }
8344
8345 ix = MTOPG(buf) % NSLABSPMB;
8346 VERIFY(ix < NSLABSPMB);
8347
8348 return &slg->slg_slab[ix];
8349 }
8350
8351 static void
8352 slab_init(mcl_slab_t *sp, mbuf_class_t class, u_int32_t flags,
8353 void *base, void *head, unsigned int len, int refcnt, int chunks)
8354 {
8355 sp->sl_class = class;
8356 sp->sl_flags = flags;
8357 sp->sl_base = base;
8358 sp->sl_head = head;
8359 sp->sl_len = len;
8360 sp->sl_refcnt = refcnt;
8361 sp->sl_chunks = chunks;
8362 slab_detach(sp);
8363 }
8364
8365 static void
8366 slab_insert(mcl_slab_t *sp, mbuf_class_t class)
8367 {
8368 VERIFY(slab_is_detached(sp));
8369 m_slab_cnt(class)++;
8370 TAILQ_INSERT_TAIL(&m_slablist(class), sp, sl_link);
8371 sp->sl_flags &= ~SLF_DETACHED;
8372
8373 /*
8374 * If a buffer spans multiple contiguous pages then mark them as
8375 * detached too
8376 */
8377 if (class == MC_16KCL) {
8378 int k;
8379 for (k = 1; k < NSLABSP16KB; k++) {
8380 sp = sp->sl_next;
8381 /* Next slab must already be present */
8382 VERIFY(sp != NULL && slab_is_detached(sp));
8383 sp->sl_flags &= ~SLF_DETACHED;
8384 }
8385 }
8386 }
8387
8388 static void
8389 slab_remove(mcl_slab_t *sp, mbuf_class_t class)
8390 {
8391 int k;
8392 VERIFY(!slab_is_detached(sp));
8393 VERIFY(m_slab_cnt(class) > 0);
8394 m_slab_cnt(class)--;
8395 TAILQ_REMOVE(&m_slablist(class), sp, sl_link);
8396 slab_detach(sp);
8397 if (class == MC_16KCL) {
8398 for (k = 1; k < NSLABSP16KB; k++) {
8399 sp = sp->sl_next;
8400 /* Next slab must already be present */
8401 VERIFY(sp != NULL);
8402 VERIFY(!slab_is_detached(sp));
8403 slab_detach(sp);
8404 }
8405 }
8406 }
8407
8408 static boolean_t
8409 slab_inrange(mcl_slab_t *sp, void *buf)
8410 {
8411 return (uintptr_t)buf >= (uintptr_t)sp->sl_base &&
8412 (uintptr_t)buf < ((uintptr_t)sp->sl_base + sp->sl_len);
8413 }
8414
8415 #undef panic
8416
8417 static void
8418 slab_nextptr_panic(mcl_slab_t *sp, void *addr)
8419 {
8420 int i;
8421 unsigned int chunk_len = sp->sl_len / sp->sl_chunks;
8422 uintptr_t buf = (uintptr_t)sp->sl_base;
8423
8424 for (i = 0; i < sp->sl_chunks; i++, buf += chunk_len) {
8425 void *next = ((mcache_obj_t *)buf)->obj_next;
8426 if (next != addr) {
8427 continue;
8428 }
8429 if (!mclverify) {
8430 if (next != NULL && !MBUF_IN_MAP(next)) {
8431 mcache_t *cp = m_cache(sp->sl_class);
8432 panic("%s: %s buffer %p in slab %p modified "
8433 "after free at offset 0: %p out of range "
8434 "[%p-%p)\n", __func__, cp->mc_name,
8435 (void *)buf, sp, next, mbutl, embutl);
8436 /* NOTREACHED */
8437 }
8438 } else {
8439 mcache_audit_t *mca = mcl_audit_buf2mca(sp->sl_class,
8440 (mcache_obj_t *)buf);
8441 mcl_audit_verify_nextptr(next, mca);
8442 }
8443 }
8444 }
8445
8446 static void
8447 slab_detach(mcl_slab_t *sp)
8448 {
8449 sp->sl_link.tqe_next = (mcl_slab_t *)-1;
8450 sp->sl_link.tqe_prev = (mcl_slab_t **)-1;
8451 sp->sl_flags |= SLF_DETACHED;
8452 }
8453
8454 static boolean_t
8455 slab_is_detached(mcl_slab_t *sp)
8456 {
8457 return (intptr_t)sp->sl_link.tqe_next == -1 &&
8458 (intptr_t)sp->sl_link.tqe_prev == -1 &&
8459 (sp->sl_flags & SLF_DETACHED);
8460 }
8461
8462 static void
8463 mcl_audit_init(void *buf, mcache_audit_t **mca_list,
8464 mcache_obj_t **con_list, size_t con_size, unsigned int num)
8465 {
8466 mcache_audit_t *mca, *mca_tail;
8467 mcache_obj_t *con = NULL;
8468 boolean_t save_contents = (con_list != NULL);
8469 unsigned int i, ix;
8470
8471 ASSERT(num <= NMBPG);
8472 ASSERT(con_list == NULL || con_size != 0);
8473
8474 ix = MTOPG(buf);
8475 VERIFY(ix < maxclaudit);
8476
8477 /* Make sure we haven't been here before */
8478 for (i = 0; i < num; i++) {
8479 VERIFY(mclaudit[ix].cl_audit[i] == NULL);
8480 }
8481
8482 mca = mca_tail = *mca_list;
8483 if (save_contents) {
8484 con = *con_list;
8485 }
8486
8487 for (i = 0; i < num; i++) {
8488 mcache_audit_t *next;
8489
8490 next = mca->mca_next;
8491 bzero(mca, sizeof(*mca));
8492 mca->mca_next = next;
8493 mclaudit[ix].cl_audit[i] = mca;
8494
8495 /* Attach the contents buffer if requested */
8496 if (save_contents) {
8497 mcl_saved_contents_t *msc =
8498 (mcl_saved_contents_t *)(void *)con;
8499
8500 VERIFY(msc != NULL);
8501 VERIFY(IS_P2ALIGNED(msc, sizeof(u_int64_t)));
8502 VERIFY(con_size == sizeof(*msc));
8503 mca->mca_contents_size = con_size;
8504 mca->mca_contents = msc;
8505 con = con->obj_next;
8506 bzero(mca->mca_contents, mca->mca_contents_size);
8507 }
8508
8509 mca_tail = mca;
8510 mca = mca->mca_next;
8511 }
8512
8513 if (save_contents) {
8514 *con_list = con;
8515 }
8516
8517 *mca_list = mca_tail->mca_next;
8518 mca_tail->mca_next = NULL;
8519 }
8520
8521 static void
8522 mcl_audit_free(void *buf, unsigned int num)
8523 {
8524 unsigned int i, ix;
8525 mcache_audit_t *mca, *mca_list;
8526
8527 ix = MTOPG(buf);
8528 VERIFY(ix < maxclaudit);
8529
8530 if (mclaudit[ix].cl_audit[0] != NULL) {
8531 mca_list = mclaudit[ix].cl_audit[0];
8532 for (i = 0; i < num; i++) {
8533 mca = mclaudit[ix].cl_audit[i];
8534 mclaudit[ix].cl_audit[i] = NULL;
8535 if (mca->mca_contents) {
8536 mcache_free(mcl_audit_con_cache,
8537 mca->mca_contents);
8538 }
8539 }
8540 mcache_free_ext(mcache_audit_cache,
8541 (mcache_obj_t *)mca_list);
8542 }
8543 }
8544
8545 /*
8546 * Given an address of a buffer (mbuf/2KB/4KB/16KB), return
8547 * the corresponding audit structure for that buffer.
8548 */
8549 static mcache_audit_t *
8550 mcl_audit_buf2mca(mbuf_class_t class, mcache_obj_t *mobj)
8551 {
8552 mcache_audit_t *mca = NULL;
8553 int ix = MTOPG(mobj), m_idx = 0;
8554 unsigned char *page_addr;
8555
8556 VERIFY(ix < maxclaudit);
8557 VERIFY(IS_P2ALIGNED(mobj, MIN(m_maxsize(class), PAGE_SIZE)));
8558
8559 page_addr = PGTOM(ix);
8560
8561 switch (class) {
8562 case MC_MBUF:
8563 /*
8564 * For the mbuf case, find the index of the page
8565 * used by the mbuf and use that index to locate the
8566 * base address of the page. Then find out the
8567 * mbuf index relative to the page base and use
8568 * it to locate the audit structure.
8569 */
8570 m_idx = MBPAGEIDX(page_addr, mobj);
8571 VERIFY(m_idx < (int)NMBPG);
8572 mca = mclaudit[ix].cl_audit[m_idx];
8573 break;
8574
8575 case MC_CL:
8576 /*
8577 * Same thing as above, but for 2KB clusters in a page.
8578 */
8579 m_idx = CLPAGEIDX(page_addr, mobj);
8580 VERIFY(m_idx < (int)NCLPG);
8581 mca = mclaudit[ix].cl_audit[m_idx];
8582 break;
8583
8584 case MC_BIGCL:
8585 m_idx = BCLPAGEIDX(page_addr, mobj);
8586 VERIFY(m_idx < (int)NBCLPG);
8587 mca = mclaudit[ix].cl_audit[m_idx];
8588 break;
8589 case MC_16KCL:
8590 /*
8591 * Same as above, but only return the first element.
8592 */
8593 mca = mclaudit[ix].cl_audit[0];
8594 break;
8595
8596 default:
8597 VERIFY(0);
8598 /* NOTREACHED */
8599 }
8600
8601 return mca;
8602 }
8603
8604 static void
8605 mcl_audit_mbuf(mcache_audit_t *mca, void *addr, boolean_t composite,
8606 boolean_t alloc)
8607 {
8608 struct mbuf *m = addr;
8609 mcache_obj_t *next = ((mcache_obj_t *)m)->obj_next;
8610
8611 VERIFY(mca->mca_contents != NULL &&
8612 mca->mca_contents_size == AUDIT_CONTENTS_SIZE);
8613
8614 if (mclverify) {
8615 mcl_audit_verify_nextptr(next, mca);
8616 }
8617
8618 if (!alloc) {
8619 /* Save constructed mbuf fields */
8620 mcl_audit_save_mbuf(m, mca);
8621 if (mclverify) {
8622 mcache_set_pattern(MCACHE_FREE_PATTERN, m,
8623 m_maxsize(MC_MBUF));
8624 }
8625 ((mcache_obj_t *)m)->obj_next = next;
8626 return;
8627 }
8628
8629 /* Check if the buffer has been corrupted while in freelist */
8630 if (mclverify) {
8631 mcache_audit_free_verify_set(mca, addr, 0, m_maxsize(MC_MBUF));
8632 }
8633 /* Restore constructed mbuf fields */
8634 mcl_audit_restore_mbuf(m, mca, composite);
8635 }
8636
8637 static void
8638 mcl_audit_restore_mbuf(struct mbuf *m, mcache_audit_t *mca, boolean_t composite)
8639 {
8640 struct mbuf *ms = MCA_SAVED_MBUF_PTR(mca);
8641
8642 if (composite) {
8643 struct mbuf *next = m->m_next;
8644 VERIFY(ms->m_flags == M_EXT && m_get_rfa(ms) != NULL &&
8645 MBUF_IS_COMPOSITE(ms));
8646 VERIFY(mca->mca_contents_size == AUDIT_CONTENTS_SIZE);
8647 /*
8648 * We could have hand-picked the mbuf fields and restore
8649 * them individually, but that will be a maintenance
8650 * headache. Instead, restore everything that was saved;
8651 * the mbuf layer will recheck and reinitialize anyway.
8652 */
8653 bcopy(ms, m, MCA_SAVED_MBUF_SIZE);
8654 m->m_next = next;
8655 } else {
8656 /*
8657 * For a regular mbuf (no cluster attached) there's nothing
8658 * to restore other than the type field, which is expected
8659 * to be MT_FREE.
8660 */
8661 m->m_type = ms->m_type;
8662 }
8663 _MCHECK(m);
8664 }
8665
8666 static void
8667 mcl_audit_save_mbuf(struct mbuf *m, mcache_audit_t *mca)
8668 {
8669 VERIFY(mca->mca_contents_size == AUDIT_CONTENTS_SIZE);
8670 _MCHECK(m);
8671 bcopy(m, MCA_SAVED_MBUF_PTR(mca), MCA_SAVED_MBUF_SIZE);
8672 }
8673
8674 static void
8675 mcl_audit_cluster(mcache_audit_t *mca, void *addr, size_t size, boolean_t alloc,
8676 boolean_t save_next)
8677 {
8678 mcache_obj_t *next = ((mcache_obj_t *)addr)->obj_next;
8679
8680 if (!alloc) {
8681 if (mclverify) {
8682 mcache_set_pattern(MCACHE_FREE_PATTERN, addr, size);
8683 }
8684 if (save_next) {
8685 mcl_audit_verify_nextptr(next, mca);
8686 ((mcache_obj_t *)addr)->obj_next = next;
8687 }
8688 } else if (mclverify) {
8689 /* Check if the buffer has been corrupted while in freelist */
8690 mcl_audit_verify_nextptr(next, mca);
8691 mcache_audit_free_verify_set(mca, addr, 0, size);
8692 }
8693 }
8694
8695 static void
8696 mcl_audit_scratch(mcache_audit_t *mca)
8697 {
8698 void *stack[MCACHE_STACK_DEPTH + 1];
8699 mcl_scratch_audit_t *msa;
8700 struct timeval now;
8701
8702 VERIFY(mca->mca_contents != NULL);
8703 msa = MCA_SAVED_SCRATCH_PTR(mca);
8704
8705 msa->msa_pthread = msa->msa_thread;
8706 msa->msa_thread = current_thread();
8707 bcopy(msa->msa_stack, msa->msa_pstack, sizeof(msa->msa_pstack));
8708 msa->msa_pdepth = msa->msa_depth;
8709 bzero(stack, sizeof(stack));
8710 msa->msa_depth = OSBacktrace(stack, MCACHE_STACK_DEPTH + 1) - 1;
8711 bcopy(&stack[1], msa->msa_stack, sizeof(msa->msa_stack));
8712
8713 msa->msa_ptstamp = msa->msa_tstamp;
8714 microuptime(&now);
8715 /* tstamp is in ms relative to base_ts */
8716 msa->msa_tstamp = ((now.tv_usec - mb_start.tv_usec) / 1000);
8717 if ((now.tv_sec - mb_start.tv_sec) > 0) {
8718 msa->msa_tstamp += ((now.tv_sec - mb_start.tv_sec) * 1000);
8719 }
8720 }
8721
8722 __abortlike
8723 static void
8724 mcl_audit_mcheck_panic(struct mbuf *m)
8725 {
8726 char buf[DUMP_MCA_BUF_SIZE];
8727 mcache_audit_t *mca;
8728
8729 MRANGE(m);
8730 mca = mcl_audit_buf2mca(MC_MBUF, (mcache_obj_t *)m);
8731
8732 panic("mcl_audit: freed mbuf %p with type 0x%x (instead of 0x%x)\n%s",
8733 m, (u_int16_t)m->m_type, MT_FREE, mcache_dump_mca(buf, mca));
8734 /* NOTREACHED */
8735 }
8736
8737 __abortlike
8738 static void
8739 mcl_audit_verify_nextptr_panic(void *next, mcache_audit_t *mca)
8740 {
8741 char buf[DUMP_MCA_BUF_SIZE];
8742 panic("mcl_audit: buffer %p modified after free at offset 0: "
8743 "%p out of range [%p-%p)\n%s\n",
8744 mca->mca_addr, next, mbutl, embutl, mcache_dump_mca(buf, mca));
8745 /* NOTREACHED */
8746 }
8747
8748 static void
8749 mcl_audit_verify_nextptr(void *next, mcache_audit_t *mca)
8750 {
8751 if (next != NULL && !MBUF_IN_MAP(next) &&
8752 (next != (void *)MCACHE_FREE_PATTERN || !mclverify)) {
8753 mcl_audit_verify_nextptr_panic(next, mca);
8754 }
8755 }
8756
8757 static uintptr_t
8758 hash_mix(uintptr_t x)
8759 {
8760 #ifndef __LP64__
8761 x += ~(x << 15);
8762 x ^= (x >> 10);
8763 x += (x << 3);
8764 x ^= (x >> 6);
8765 x += ~(x << 11);
8766 x ^= (x >> 16);
8767 #else
8768 x += ~(x << 32);
8769 x ^= (x >> 22);
8770 x += ~(x << 13);
8771 x ^= (x >> 8);
8772 x += (x << 3);
8773 x ^= (x >> 15);
8774 x += ~(x << 27);
8775 x ^= (x >> 31);
8776 #endif
8777 return x;
8778 }
8779
8780 static uint32_t
8781 hashbacktrace(uintptr_t* bt, uint32_t depth, uint32_t max_size)
8782 {
8783 uintptr_t hash = 0;
8784 uintptr_t mask = max_size - 1;
8785
8786 while (depth) {
8787 hash += bt[--depth];
8788 }
8789
8790 hash = hash_mix(hash) & mask;
8791
8792 assert(hash < max_size);
8793
8794 return (uint32_t) hash;
8795 }
8796
8797 static uint32_t
8798 hashaddr(uintptr_t pt, uint32_t max_size)
8799 {
8800 uintptr_t hash = 0;
8801 uintptr_t mask = max_size - 1;
8802
8803 hash = hash_mix(pt) & mask;
8804
8805 assert(hash < max_size);
8806
8807 return (uint32_t) hash;
8808 }
8809
8810 /* This function turns on mbuf leak detection */
8811 static void
8812 mleak_activate(void)
8813 {
8814 mleak_table.mleak_sample_factor = MLEAK_SAMPLE_FACTOR;
8815 PE_parse_boot_argn("mleak_sample_factor",
8816 &mleak_table.mleak_sample_factor,
8817 sizeof(mleak_table.mleak_sample_factor));
8818
8819 if (mleak_table.mleak_sample_factor == 0) {
8820 mclfindleak = 0;
8821 }
8822
8823 if (mclfindleak == 0) {
8824 return;
8825 }
8826
8827 vm_size_t alloc_size =
8828 mleak_alloc_buckets * sizeof(struct mallocation);
8829 vm_size_t trace_size = mleak_trace_buckets * sizeof(struct mtrace);
8830
8831 mleak_allocations = zalloc_permanent(alloc_size, ZALIGN(struct mallocation));
8832 mleak_traces = zalloc_permanent(trace_size, ZALIGN(struct mtrace));
8833 mleak_stat = zalloc_permanent(MLEAK_STAT_SIZE(MLEAK_NUM_TRACES),
8834 ZALIGN(mleak_stat_t));
8835
8836 mleak_stat->ml_cnt = MLEAK_NUM_TRACES;
8837 #ifdef __LP64__
8838 mleak_stat->ml_isaddr64 = 1;
8839 #endif /* __LP64__ */
8840 }
8841
8842 static void
8843 mleak_logger(u_int32_t num, mcache_obj_t *addr, boolean_t alloc)
8844 {
8845 int temp;
8846
8847 if (mclfindleak == 0) {
8848 return;
8849 }
8850
8851 if (!alloc) {
8852 return mleak_free(addr);
8853 }
8854
8855 temp = os_atomic_inc_orig(&mleak_table.mleak_capture, relaxed);
8856
8857 if ((temp % mleak_table.mleak_sample_factor) == 0 && addr != NULL) {
8858 uintptr_t bt[MLEAK_STACK_DEPTH];
8859 unsigned int logged = backtrace(bt, MLEAK_STACK_DEPTH, NULL, NULL);
8860 mleak_log(bt, addr, logged, num);
8861 }
8862 }
8863
8864 /*
8865 * This function records the allocation in the mleak_allocations table
8866 * and the backtrace in the mleak_traces table; if allocation slot is in use,
8867 * replace old allocation with new one if the trace slot is in use, return
8868 * (or increment refcount if same trace).
8869 */
8870 static boolean_t
8871 mleak_log(uintptr_t *bt, mcache_obj_t *addr, uint32_t depth, int num)
8872 {
8873 struct mallocation *allocation;
8874 struct mtrace *trace;
8875 uint32_t trace_index;
8876
8877 /* Quit if someone else modifying the tables */
8878 if (!lck_mtx_try_lock_spin(mleak_lock)) {
8879 mleak_table.total_conflicts++;
8880 return FALSE;
8881 }
8882
8883 allocation = &mleak_allocations[hashaddr((uintptr_t)addr,
8884 mleak_alloc_buckets)];
8885 trace_index = hashbacktrace(bt, depth, mleak_trace_buckets);
8886 trace = &mleak_traces[trace_index];
8887
8888 VERIFY(allocation <= &mleak_allocations[mleak_alloc_buckets - 1]);
8889 VERIFY(trace <= &mleak_traces[mleak_trace_buckets - 1]);
8890
8891 allocation->hitcount++;
8892 trace->hitcount++;
8893
8894 /*
8895 * If the allocation bucket we want is occupied
8896 * and the occupier has the same trace, just bail.
8897 */
8898 if (allocation->element != NULL &&
8899 trace_index == allocation->trace_index) {
8900 mleak_table.alloc_collisions++;
8901 lck_mtx_unlock(mleak_lock);
8902 return TRUE;
8903 }
8904
8905 /*
8906 * Store the backtrace in the traces array;
8907 * Size of zero = trace bucket is free.
8908 */
8909 if (trace->allocs > 0 &&
8910 bcmp(trace->addr, bt, (depth * sizeof(uintptr_t))) != 0) {
8911 /* Different, unique trace, but the same hash! Bail out. */
8912 trace->collisions++;
8913 mleak_table.trace_collisions++;
8914 lck_mtx_unlock(mleak_lock);
8915 return TRUE;
8916 } else if (trace->allocs > 0) {
8917 /* Same trace, already added, so increment refcount */
8918 trace->allocs++;
8919 } else {
8920 /* Found an unused trace bucket, so record the trace here */
8921 if (trace->depth != 0) {
8922 /* this slot previously used but not currently in use */
8923 mleak_table.trace_overwrites++;
8924 }
8925 mleak_table.trace_recorded++;
8926 trace->allocs = 1;
8927 memcpy(trace->addr, bt, (depth * sizeof(uintptr_t)));
8928 trace->depth = depth;
8929 trace->collisions = 0;
8930 }
8931
8932 /* Step 2: Store the allocation record in the allocations array */
8933 if (allocation->element != NULL) {
8934 /*
8935 * Replace an existing allocation. No need to preserve
8936 * because only a subset of the allocations are being
8937 * recorded anyway.
8938 */
8939 mleak_table.alloc_collisions++;
8940 } else if (allocation->trace_index != 0) {
8941 mleak_table.alloc_overwrites++;
8942 }
8943 allocation->element = addr;
8944 allocation->trace_index = trace_index;
8945 allocation->count = num;
8946 mleak_table.alloc_recorded++;
8947 mleak_table.outstanding_allocs++;
8948
8949 lck_mtx_unlock(mleak_lock);
8950 return TRUE;
8951 }
8952
8953 static void
8954 mleak_free(mcache_obj_t *addr)
8955 {
8956 while (addr != NULL) {
8957 struct mallocation *allocation = &mleak_allocations
8958 [hashaddr((uintptr_t)addr, mleak_alloc_buckets)];
8959
8960 if (allocation->element == addr &&
8961 allocation->trace_index < mleak_trace_buckets) {
8962 lck_mtx_lock_spin(mleak_lock);
8963 if (allocation->element == addr &&
8964 allocation->trace_index < mleak_trace_buckets) {
8965 struct mtrace *trace;
8966 trace = &mleak_traces[allocation->trace_index];
8967 /* allocs = 0 means trace bucket is unused */
8968 if (trace->allocs > 0) {
8969 trace->allocs--;
8970 }
8971 if (trace->allocs == 0) {
8972 trace->depth = 0;
8973 }
8974 /* NULL element means alloc bucket is unused */
8975 allocation->element = NULL;
8976 mleak_table.outstanding_allocs--;
8977 }
8978 lck_mtx_unlock(mleak_lock);
8979 }
8980 addr = addr->obj_next;
8981 }
8982 }
8983
8984 static void
8985 mleak_sort_traces()
8986 {
8987 int i, j, k;
8988 struct mtrace *swap;
8989
8990 for (i = 0; i < MLEAK_NUM_TRACES; i++) {
8991 mleak_top_trace[i] = NULL;
8992 }
8993
8994 for (i = 0, j = 0; j < MLEAK_NUM_TRACES && i < mleak_trace_buckets; i++) {
8995 if (mleak_traces[i].allocs <= 0) {
8996 continue;
8997 }
8998
8999 mleak_top_trace[j] = &mleak_traces[i];
9000 for (k = j; k > 0; k--) {
9001 if (mleak_top_trace[k]->allocs <=
9002 mleak_top_trace[k - 1]->allocs) {
9003 break;
9004 }
9005
9006 swap = mleak_top_trace[k - 1];
9007 mleak_top_trace[k - 1] = mleak_top_trace[k];
9008 mleak_top_trace[k] = swap;
9009 }
9010 j++;
9011 }
9012
9013 j--;
9014 for (; i < mleak_trace_buckets; i++) {
9015 if (mleak_traces[i].allocs <= mleak_top_trace[j]->allocs) {
9016 continue;
9017 }
9018
9019 mleak_top_trace[j] = &mleak_traces[i];
9020
9021 for (k = j; k > 0; k--) {
9022 if (mleak_top_trace[k]->allocs <=
9023 mleak_top_trace[k - 1]->allocs) {
9024 break;
9025 }
9026
9027 swap = mleak_top_trace[k - 1];
9028 mleak_top_trace[k - 1] = mleak_top_trace[k];
9029 mleak_top_trace[k] = swap;
9030 }
9031 }
9032 }
9033
9034 static void
9035 mleak_update_stats()
9036 {
9037 mleak_trace_stat_t *mltr;
9038 int i;
9039
9040 VERIFY(mleak_stat != NULL);
9041 #ifdef __LP64__
9042 VERIFY(mleak_stat->ml_isaddr64);
9043 #else
9044 VERIFY(!mleak_stat->ml_isaddr64);
9045 #endif /* !__LP64__ */
9046 VERIFY(mleak_stat->ml_cnt == MLEAK_NUM_TRACES);
9047
9048 mleak_sort_traces();
9049
9050 mltr = &mleak_stat->ml_trace[0];
9051 bzero(mltr, sizeof(*mltr) * MLEAK_NUM_TRACES);
9052 for (i = 0; i < MLEAK_NUM_TRACES; i++) {
9053 int j;
9054
9055 if (mleak_top_trace[i] == NULL ||
9056 mleak_top_trace[i]->allocs == 0) {
9057 continue;
9058 }
9059
9060 mltr->mltr_collisions = mleak_top_trace[i]->collisions;
9061 mltr->mltr_hitcount = mleak_top_trace[i]->hitcount;
9062 mltr->mltr_allocs = mleak_top_trace[i]->allocs;
9063 mltr->mltr_depth = mleak_top_trace[i]->depth;
9064
9065 VERIFY(mltr->mltr_depth <= MLEAK_STACK_DEPTH);
9066 for (j = 0; j < mltr->mltr_depth; j++) {
9067 mltr->mltr_addr[j] = mleak_top_trace[i]->addr[j];
9068 }
9069
9070 mltr++;
9071 }
9072 }
9073
9074 static struct mbtypes {
9075 int mt_type;
9076 const char *mt_name;
9077 } mbtypes[] = {
9078 { MT_DATA, "data" },
9079 { MT_OOBDATA, "oob data" },
9080 { MT_CONTROL, "ancillary data" },
9081 { MT_HEADER, "packet headers" },
9082 { MT_SOCKET, "socket structures" },
9083 { MT_PCB, "protocol control blocks" },
9084 { MT_RTABLE, "routing table entries" },
9085 { MT_HTABLE, "IMP host table entries" },
9086 { MT_ATABLE, "address resolution tables" },
9087 { MT_FTABLE, "fragment reassembly queue headers" },
9088 { MT_SONAME, "socket names and addresses" },
9089 { MT_SOOPTS, "socket options" },
9090 { MT_RIGHTS, "access rights" },
9091 { MT_IFADDR, "interface addresses" },
9092 { MT_TAG, "packet tags" },
9093 { 0, NULL }
9094 };
9095
9096 #define MBUF_DUMP_BUF_CHK() { \
9097 clen -= k; \
9098 if (clen < 1) \
9099 goto done; \
9100 c += k; \
9101 }
9102
9103 static char *
9104 mbuf_dump(void)
9105 {
9106 unsigned long totmem = 0, totfree = 0, totmbufs, totused, totpct,
9107 totreturned = 0;
9108 u_int32_t m_mbufs = 0, m_clfree = 0, m_bigclfree = 0;
9109 u_int32_t m_mbufclfree = 0, m_mbufbigclfree = 0;
9110 u_int32_t m_16kclusters = 0, m_16kclfree = 0, m_mbuf16kclfree = 0;
9111 int nmbtypes = sizeof(mbstat.m_mtypes) / sizeof(short);
9112 uint8_t seen[256];
9113 struct mbtypes *mp;
9114 mb_class_stat_t *sp;
9115 mleak_trace_stat_t *mltr;
9116 char *c = mbuf_dump_buf;
9117 int i, j, k, clen = MBUF_DUMP_BUF_SIZE;
9118 struct mbuf_watchdog_defunct_args args = {};
9119
9120 mbuf_dump_buf[0] = '\0';
9121
9122 /* synchronize all statistics in the mbuf table */
9123 mbuf_stat_sync();
9124 mbuf_mtypes_sync(TRUE);
9125
9126 sp = &mb_stat->mbs_class[0];
9127 for (i = 0; i < mb_stat->mbs_cnt; i++, sp++) {
9128 u_int32_t mem;
9129
9130 if (m_class(i) == MC_MBUF) {
9131 m_mbufs = sp->mbcl_active;
9132 } else if (m_class(i) == MC_CL) {
9133 m_clfree = sp->mbcl_total - sp->mbcl_active;
9134 } else if (m_class(i) == MC_BIGCL) {
9135 m_bigclfree = sp->mbcl_total - sp->mbcl_active;
9136 } else if (njcl > 0 && m_class(i) == MC_16KCL) {
9137 m_16kclfree = sp->mbcl_total - sp->mbcl_active;
9138 m_16kclusters = sp->mbcl_total;
9139 } else if (m_class(i) == MC_MBUF_CL) {
9140 m_mbufclfree = sp->mbcl_total - sp->mbcl_active;
9141 } else if (m_class(i) == MC_MBUF_BIGCL) {
9142 m_mbufbigclfree = sp->mbcl_total - sp->mbcl_active;
9143 } else if (njcl > 0 && m_class(i) == MC_MBUF_16KCL) {
9144 m_mbuf16kclfree = sp->mbcl_total - sp->mbcl_active;
9145 }
9146
9147 mem = sp->mbcl_ctotal * sp->mbcl_size;
9148 totmem += mem;
9149 totfree += (sp->mbcl_mc_cached + sp->mbcl_infree) *
9150 sp->mbcl_size;
9151 totreturned += sp->mbcl_release_cnt;
9152 }
9153
9154 /* adjust free counts to include composite caches */
9155 m_clfree += m_mbufclfree;
9156 m_bigclfree += m_mbufbigclfree;
9157 m_16kclfree += m_mbuf16kclfree;
9158
9159 totmbufs = 0;
9160 for (mp = mbtypes; mp->mt_name != NULL; mp++) {
9161 totmbufs += mbstat.m_mtypes[mp->mt_type];
9162 }
9163 if (totmbufs > m_mbufs) {
9164 totmbufs = m_mbufs;
9165 }
9166 k = scnprintf(c, clen, "%lu/%u mbufs in use:\n", totmbufs, m_mbufs);
9167 MBUF_DUMP_BUF_CHK();
9168
9169 bzero(&seen, sizeof(seen));
9170 for (mp = mbtypes; mp->mt_name != NULL; mp++) {
9171 if (mbstat.m_mtypes[mp->mt_type] != 0) {
9172 seen[mp->mt_type] = 1;
9173 k = scnprintf(c, clen, "\t%u mbufs allocated to %s\n",
9174 mbstat.m_mtypes[mp->mt_type], mp->mt_name);
9175 MBUF_DUMP_BUF_CHK();
9176 }
9177 }
9178 seen[MT_FREE] = 1;
9179 for (i = 0; i < nmbtypes; i++) {
9180 if (!seen[i] && mbstat.m_mtypes[i] != 0) {
9181 k = scnprintf(c, clen, "\t%u mbufs allocated to "
9182 "<mbuf type %d>\n", mbstat.m_mtypes[i], i);
9183 MBUF_DUMP_BUF_CHK();
9184 }
9185 }
9186 if ((m_mbufs - totmbufs) > 0) {
9187 k = scnprintf(c, clen, "\t%lu mbufs allocated to caches\n",
9188 m_mbufs - totmbufs);
9189 MBUF_DUMP_BUF_CHK();
9190 }
9191 k = scnprintf(c, clen, "%u/%u mbuf 2KB clusters in use\n"
9192 "%u/%u mbuf 4KB clusters in use\n",
9193 (unsigned int)(mbstat.m_clusters - m_clfree),
9194 (unsigned int)mbstat.m_clusters,
9195 (unsigned int)(mbstat.m_bigclusters - m_bigclfree),
9196 (unsigned int)mbstat.m_bigclusters);
9197 MBUF_DUMP_BUF_CHK();
9198
9199 if (njcl > 0) {
9200 k = scnprintf(c, clen, "%u/%u mbuf %uKB clusters in use\n",
9201 m_16kclusters - m_16kclfree, m_16kclusters,
9202 njclbytes / 1024);
9203 MBUF_DUMP_BUF_CHK();
9204 }
9205 totused = totmem - totfree;
9206 if (totmem == 0) {
9207 totpct = 0;
9208 } else if (totused < (ULONG_MAX / 100)) {
9209 totpct = (totused * 100) / totmem;
9210 } else {
9211 u_long totmem1 = totmem / 100;
9212 u_long totused1 = totused / 100;
9213 totpct = (totused1 * 100) / totmem1;
9214 }
9215 k = scnprintf(c, clen, "%lu KB allocated to network (approx. %lu%% "
9216 "in use)\n", totmem / 1024, totpct);
9217 MBUF_DUMP_BUF_CHK();
9218 k = scnprintf(c, clen, "%lu KB returned to the system\n",
9219 totreturned / 1024);
9220 MBUF_DUMP_BUF_CHK();
9221
9222 net_update_uptime();
9223
9224 k = scnprintf(c, clen,
9225 "worker thread runs: %u, expansions: %llu, cl %llu/%llu, "
9226 "bigcl %llu/%llu, 16k %llu/%llu\n", mbuf_worker_run_cnt,
9227 mb_expand_cnt, mb_expand_cl_cnt, mb_expand_cl_total,
9228 mb_expand_bigcl_cnt, mb_expand_bigcl_total, mb_expand_16kcl_cnt,
9229 mb_expand_16kcl_total);
9230 MBUF_DUMP_BUF_CHK();
9231 if (mbuf_worker_last_runtime != 0) {
9232 k = scnprintf(c, clen, "worker thread last run time: "
9233 "%llu (%llu seconds ago)\n",
9234 mbuf_worker_last_runtime,
9235 net_uptime() - mbuf_worker_last_runtime);
9236 MBUF_DUMP_BUF_CHK();
9237 }
9238 if (mbuf_drain_last_runtime != 0) {
9239 k = scnprintf(c, clen, "drain routine last run time: "
9240 "%llu (%llu seconds ago)\n",
9241 mbuf_drain_last_runtime,
9242 net_uptime() - mbuf_drain_last_runtime);
9243 MBUF_DUMP_BUF_CHK();
9244 }
9245
9246 /*
9247 * Log where the most mbufs have accumulated:
9248 * - Process socket buffers
9249 * - TCP reassembly queue
9250 * - Interface AQM queue (output) and DLIL input queue
9251 */
9252 args.non_blocking = true;
9253 proc_iterate(PROC_ALLPROCLIST,
9254 mbuf_watchdog_defunct_iterate, &args, NULL, NULL);
9255 if (args.top_app != NULL) {
9256 k = scnprintf(c, clen, "\ntop proc mbuf space %u bytes by %s:%d\n",
9257 args.top_app_space_used,
9258 proc_name_address(args.top_app),
9259 proc_pid(args.top_app));
9260 proc_rele(args.top_app);
9261 }
9262 MBUF_DUMP_BUF_CHK();
9263
9264 #if INET
9265 k = dump_tcp_reass_qlen(c, clen);
9266 MBUF_DUMP_BUF_CHK();
9267 #endif /* INET */
9268
9269 #if MPTCP
9270 k = dump_mptcp_reass_qlen(c, clen);
9271 MBUF_DUMP_BUF_CHK();
9272 #endif /* MPTCP */
9273
9274 #if NETWORKING
9275 k = dlil_dump_top_if_qlen(c, clen);
9276 MBUF_DUMP_BUF_CHK();
9277 #endif /* NETWORKING */
9278
9279 /* mbuf leak detection statistics */
9280 mleak_update_stats();
9281
9282 k = scnprintf(c, clen, "\nmbuf leak detection table:\n");
9283 MBUF_DUMP_BUF_CHK();
9284 k = scnprintf(c, clen, "\ttotal captured: %u (one per %u)\n",
9285 mleak_table.mleak_capture / mleak_table.mleak_sample_factor,
9286 mleak_table.mleak_sample_factor);
9287 MBUF_DUMP_BUF_CHK();
9288 k = scnprintf(c, clen, "\ttotal allocs outstanding: %llu\n",
9289 mleak_table.outstanding_allocs);
9290 MBUF_DUMP_BUF_CHK();
9291 k = scnprintf(c, clen, "\tnew hash recorded: %llu allocs, %llu traces\n",
9292 mleak_table.alloc_recorded, mleak_table.trace_recorded);
9293 MBUF_DUMP_BUF_CHK();
9294 k = scnprintf(c, clen, "\thash collisions: %llu allocs, %llu traces\n",
9295 mleak_table.alloc_collisions, mleak_table.trace_collisions);
9296 MBUF_DUMP_BUF_CHK();
9297 k = scnprintf(c, clen, "\toverwrites: %llu allocs, %llu traces\n",
9298 mleak_table.alloc_overwrites, mleak_table.trace_overwrites);
9299 MBUF_DUMP_BUF_CHK();
9300 k = scnprintf(c, clen, "\tlock conflicts: %llu\n\n",
9301 mleak_table.total_conflicts);
9302 MBUF_DUMP_BUF_CHK();
9303
9304 k = scnprintf(c, clen, "top %d outstanding traces:\n",
9305 mleak_stat->ml_cnt);
9306 MBUF_DUMP_BUF_CHK();
9307 for (i = 0; i < mleak_stat->ml_cnt; i++) {
9308 mltr = &mleak_stat->ml_trace[i];
9309 k = scnprintf(c, clen, "[%d] %llu outstanding alloc(s), "
9310 "%llu hit(s), %llu collision(s)\n", (i + 1),
9311 mltr->mltr_allocs, mltr->mltr_hitcount,
9312 mltr->mltr_collisions);
9313 MBUF_DUMP_BUF_CHK();
9314 }
9315
9316 if (mleak_stat->ml_isaddr64) {
9317 k = scnprintf(c, clen, MB_LEAK_HDR_64);
9318 } else {
9319 k = scnprintf(c, clen, MB_LEAK_HDR_32);
9320 }
9321 MBUF_DUMP_BUF_CHK();
9322
9323 for (i = 0; i < MLEAK_STACK_DEPTH; i++) {
9324 k = scnprintf(c, clen, "%2d: ", (i + 1));
9325 MBUF_DUMP_BUF_CHK();
9326 for (j = 0; j < mleak_stat->ml_cnt; j++) {
9327 mltr = &mleak_stat->ml_trace[j];
9328 if (i < mltr->mltr_depth) {
9329 if (mleak_stat->ml_isaddr64) {
9330 k = scnprintf(c, clen, "0x%0llx ",
9331 (uint64_t)VM_KERNEL_UNSLIDE(
9332 mltr->mltr_addr[i]));
9333 } else {
9334 k = scnprintf(c, clen,
9335 "0x%08x ",
9336 (uint32_t)VM_KERNEL_UNSLIDE(
9337 mltr->mltr_addr[i]));
9338 }
9339 } else {
9340 if (mleak_stat->ml_isaddr64) {
9341 k = scnprintf(c, clen,
9342 MB_LEAK_SPACING_64);
9343 } else {
9344 k = scnprintf(c, clen,
9345 MB_LEAK_SPACING_32);
9346 }
9347 }
9348 MBUF_DUMP_BUF_CHK();
9349 }
9350 k = scnprintf(c, clen, "\n");
9351 MBUF_DUMP_BUF_CHK();
9352 }
9353
9354 done:
9355 return mbuf_dump_buf;
9356 }
9357
9358 #undef MBUF_DUMP_BUF_CHK
9359 #endif /* CONFIG_MBUF_MCACHE */
9360
9361 /*
9362 * Convert between a regular and a packet header mbuf. Caller is responsible
9363 * for setting or clearing M_PKTHDR; this routine does the rest of the work.
9364 */
9365 int
9366 m_reinit(struct mbuf *m, int hdr)
9367 {
9368 int ret = 0;
9369
9370 if (hdr) {
9371 VERIFY(!(m->m_flags & M_PKTHDR));
9372 if (!(m->m_flags & M_EXT) &&
9373 (m->m_data != m->m_dat || m->m_len > 0)) {
9374 /*
9375 * If there's no external cluster attached and the
9376 * mbuf appears to contain user data, we cannot
9377 * safely convert this to a packet header mbuf,
9378 * as the packet header structure might overlap
9379 * with the data.
9380 */
9381 printf("%s: cannot set M_PKTHDR on altered mbuf %llx, "
9382 "m_data %llx (expected %llx), "
9383 "m_len %d (expected 0)\n",
9384 __func__,
9385 (uint64_t)VM_KERNEL_ADDRPERM((uintptr_t)m),
9386 (uint64_t)VM_KERNEL_ADDRPERM((uintptr_t)m->m_data),
9387 (uint64_t)VM_KERNEL_ADDRPERM((uintptr_t)(m->m_dat)), m->m_len);
9388 ret = EBUSY;
9389 } else {
9390 VERIFY((m->m_flags & M_EXT) || m->m_data == m->m_dat);
9391 m->m_flags |= M_PKTHDR;
9392 MBUF_INIT_PKTHDR(m);
9393 }
9394 } else {
9395 /* Check for scratch area overflow */
9396 m_redzone_verify(m);
9397 /* Free the aux data and tags if there is any */
9398 m_tag_delete_chain(m);
9399 m_do_tx_compl_callback(m, NULL);
9400 m->m_flags &= ~M_PKTHDR;
9401 }
9402
9403 return ret;
9404 }
9405
9406 int
9407 m_ext_set_prop(struct mbuf *m, uint32_t o, uint32_t n)
9408 {
9409 ASSERT(m->m_flags & M_EXT);
9410 return os_atomic_cmpxchg(&MEXT_PRIV(m), o, n, acq_rel);
9411 }
9412
9413 uint32_t
9414 m_ext_get_prop(struct mbuf *m)
9415 {
9416 ASSERT(m->m_flags & M_EXT);
9417 return MEXT_PRIV(m);
9418 }
9419
9420 int
9421 m_ext_paired_is_active(struct mbuf *m)
9422 {
9423 return MBUF_IS_PAIRED(m) ? (MEXT_PREF(m) > MEXT_MINREF(m)) : 1;
9424 }
9425
9426 void
9427 m_ext_paired_activate(struct mbuf *m)
9428 {
9429 struct ext_ref *rfa;
9430 int hdr, type;
9431 caddr_t extbuf;
9432 m_ext_free_func_t extfree;
9433 u_int extsize;
9434
9435 VERIFY(MBUF_IS_PAIRED(m));
9436 VERIFY(MEXT_REF(m) == MEXT_MINREF(m));
9437 VERIFY(MEXT_PREF(m) == MEXT_MINREF(m));
9438
9439 hdr = (m->m_flags & M_PKTHDR);
9440 type = m->m_type;
9441 extbuf = m->m_ext.ext_buf;
9442 extfree = m_get_ext_free(m);
9443 extsize = m->m_ext.ext_size;
9444 rfa = m_get_rfa(m);
9445
9446 VERIFY(extbuf != NULL && rfa != NULL);
9447
9448 /*
9449 * Safe to reinitialize packet header tags, since it's
9450 * already taken care of at m_free() time. Similar to
9451 * what's done in m_clattach() for the cluster. Bump
9452 * up MEXT_PREF to indicate activation.
9453 */
9454 MBUF_INIT(m, hdr, type);
9455 MEXT_INIT(m, extbuf, extsize, extfree, (caddr_t)m, rfa,
9456 1, 1, 2, EXTF_PAIRED, MEXT_PRIV(m), m);
9457 }
9458
9459 void
9460 m_scratch_init(struct mbuf *m)
9461 {
9462 struct pkthdr *pkt = &m->m_pkthdr;
9463
9464 VERIFY(m->m_flags & M_PKTHDR);
9465
9466 /* See comments in <rdar://problem/14040693> */
9467 if (pkt->pkt_flags & PKTF_PRIV_GUARDED) {
9468 panic_plain("Invalid attempt to modify guarded module-private "
9469 "area: mbuf %p, pkt_flags 0x%x\n", m, pkt->pkt_flags);
9470 /* NOTREACHED */
9471 }
9472
9473 bzero(&pkt->pkt_mpriv, sizeof(pkt->pkt_mpriv));
9474 }
9475
9476 /*
9477 * This routine is reserved for mbuf_get_driver_scratch(); clients inside
9478 * xnu that intend on utilizing the module-private area should directly
9479 * refer to the pkt_mpriv structure in the pkthdr. They are also expected
9480 * to set and clear PKTF_PRIV_GUARDED, while owning the packet and prior
9481 * to handing it off to another module, respectively.
9482 */
9483 u_int32_t
9484 m_scratch_get(struct mbuf *m, u_int8_t **p)
9485 {
9486 struct pkthdr *pkt = &m->m_pkthdr;
9487
9488 VERIFY(m->m_flags & M_PKTHDR);
9489
9490 /* See comments in <rdar://problem/14040693> */
9491 if (pkt->pkt_flags & PKTF_PRIV_GUARDED) {
9492 panic_plain("Invalid attempt to access guarded module-private "
9493 "area: mbuf %p, pkt_flags 0x%x\n", m, pkt->pkt_flags);
9494 /* NOTREACHED */
9495 }
9496
9497 #if CONFIG_MBUF_MCACHE
9498 if (mcltrace) {
9499 mcache_audit_t *mca;
9500
9501 lck_mtx_lock(mbuf_mlock);
9502 mca = mcl_audit_buf2mca(MC_MBUF, (mcache_obj_t *)m);
9503 if (mca->mca_uflags & MB_SCVALID) {
9504 mcl_audit_scratch(mca);
9505 }
9506 lck_mtx_unlock(mbuf_mlock);
9507 }
9508 #endif /* CONFIG_MBUF_MCACHE */
9509
9510 *p = (u_int8_t *)&pkt->pkt_mpriv;
9511 return sizeof(pkt->pkt_mpriv);
9512 }
9513
9514 void
9515 m_add_crumb(struct mbuf *m, uint16_t crumb)
9516 {
9517 VERIFY(m->m_flags & M_PKTHDR);
9518
9519 m->m_pkthdr.pkt_crumbs |= crumb;
9520 }
9521
9522 static void
9523 m_redzone_init(struct mbuf *m)
9524 {
9525 VERIFY(m->m_flags & M_PKTHDR);
9526 /*
9527 * Each mbuf has a unique red zone pattern, which is a XOR
9528 * of the red zone cookie and the address of the mbuf.
9529 */
9530 m->m_pkthdr.redzone = ((u_int32_t)(uintptr_t)m) ^ mb_redzone_cookie;
9531 }
9532
9533 static void
9534 m_redzone_verify(struct mbuf *m)
9535 {
9536 u_int32_t mb_redzone;
9537
9538 VERIFY(m->m_flags & M_PKTHDR);
9539
9540 mb_redzone = ((u_int32_t)(uintptr_t)m) ^ mb_redzone_cookie;
9541 if (m->m_pkthdr.redzone != mb_redzone) {
9542 panic("mbuf %p redzone violation with value 0x%x "
9543 "(instead of 0x%x, using cookie 0x%x)\n",
9544 m, m->m_pkthdr.redzone, mb_redzone, mb_redzone_cookie);
9545 /* NOTREACHED */
9546 }
9547 }
9548
9549 __private_extern__ inline void
9550 m_set_ext(struct mbuf *m, struct ext_ref *rfa, m_ext_free_func_t ext_free,
9551 caddr_t ext_arg)
9552 {
9553 VERIFY(m->m_flags & M_EXT);
9554 if (rfa != NULL) {
9555 m_set_rfa(m, rfa);
9556 if (ext_free != NULL) {
9557 rfa->ext_token = ((uintptr_t)&rfa->ext_token) ^
9558 mb_obscure_extfree;
9559 uintptr_t ext_free_val = ptrauth_nop_cast(uintptr_t, ext_free) ^ rfa->ext_token;
9560 m->m_ext.ext_free = ptrauth_nop_cast(m_ext_free_func_t, ext_free_val);
9561 if (ext_arg != NULL) {
9562 m->m_ext.ext_arg =
9563 (caddr_t)(((uintptr_t)ext_arg) ^ rfa->ext_token);
9564 } else {
9565 m->m_ext.ext_arg = NULL;
9566 }
9567 } else {
9568 rfa->ext_token = 0;
9569 m->m_ext.ext_free = NULL;
9570 m->m_ext.ext_arg = NULL;
9571 }
9572 } else {
9573 /*
9574 * If we are going to loose the cookie in ext_token by
9575 * resetting the rfa, we should use the global cookie
9576 * to obscure the ext_free and ext_arg pointers.
9577 */
9578 if (ext_free != NULL) {
9579 uintptr_t ext_free_val = ptrauth_nop_cast(uintptr_t, ext_free) ^ mb_obscure_extfree;
9580 m->m_ext.ext_free = ptrauth_nop_cast(m_ext_free_func_t, ext_free_val);
9581 if (ext_arg != NULL) {
9582 m->m_ext.ext_arg =
9583 (caddr_t)((uintptr_t)ext_arg ^
9584 mb_obscure_extfree);
9585 } else {
9586 m->m_ext.ext_arg = NULL;
9587 }
9588 } else {
9589 m->m_ext.ext_free = NULL;
9590 m->m_ext.ext_arg = NULL;
9591 }
9592 m->m_ext.ext_refflags = NULL;
9593 }
9594 }
9595
9596 __private_extern__ inline struct ext_ref *
9597 m_get_rfa(struct mbuf *m)
9598 {
9599 if (m->m_ext.ext_refflags == NULL) {
9600 return NULL;
9601 } else {
9602 return (struct ext_ref *)(((uintptr_t)m->m_ext.ext_refflags) ^ mb_obscure_extref);
9603 }
9604 }
9605
9606 static inline void
9607 m_set_rfa(struct mbuf *m, struct ext_ref *rfa)
9608 {
9609 if (rfa != NULL) {
9610 m->m_ext.ext_refflags =
9611 (struct ext_ref *)(((uintptr_t)rfa) ^ mb_obscure_extref);
9612 } else {
9613 m->m_ext.ext_refflags = NULL;
9614 }
9615 }
9616
9617 __private_extern__ inline m_ext_free_func_t
9618 m_get_ext_free(struct mbuf *m)
9619 {
9620 struct ext_ref *rfa;
9621 if (m->m_ext.ext_free == NULL) {
9622 return NULL;
9623 }
9624
9625 rfa = m_get_rfa(m);
9626 if (rfa == NULL) {
9627 uintptr_t ext_free_val = ptrauth_nop_cast(uintptr_t, m->m_ext.ext_free) ^ mb_obscure_extfree;
9628 return ptrauth_nop_cast(m_ext_free_func_t, ext_free_val);
9629 } else {
9630 uintptr_t ext_free_val = ptrauth_nop_cast(uintptr_t, m->m_ext.ext_free) ^ rfa->ext_token;
9631 return ptrauth_nop_cast(m_ext_free_func_t, ext_free_val);
9632 }
9633 }
9634
9635 __private_extern__ inline caddr_t
9636 m_get_ext_arg(struct mbuf *m)
9637 {
9638 struct ext_ref *rfa;
9639 if (m->m_ext.ext_arg == NULL) {
9640 return NULL;
9641 }
9642
9643 rfa = m_get_rfa(m);
9644 if (rfa == NULL) {
9645 return (caddr_t)((uintptr_t)m->m_ext.ext_arg ^ mb_obscure_extfree);
9646 } else {
9647 return (caddr_t)(((uintptr_t)m->m_ext.ext_arg) ^
9648 rfa->ext_token);
9649 }
9650 }
9651
9652 #if CONFIG_MBUF_MCACHE
9653 /*
9654 * Send a report of mbuf usage if the usage is at least 6% of max limit
9655 * or if there has been at least 3% increase since the last report.
9656 *
9657 * The values 6% and 3% are chosen so that we can do simple arithmetic
9658 * with shift operations.
9659 */
9660 static boolean_t
9661 mbuf_report_usage(mbuf_class_t cl)
9662 {
9663 /* if a report is already in progress, nothing to do */
9664 if (mb_peak_newreport) {
9665 return TRUE;
9666 }
9667
9668 if (m_total(cl) > m_peak(cl) &&
9669 m_total(cl) >= (m_maxlimit(cl) >> 4) &&
9670 (m_total(cl) - m_peak(cl)) >= (m_peak(cl) >> 5)) {
9671 return TRUE;
9672 }
9673 return FALSE;
9674 }
9675 #endif /* CONFIG_MBUF_MCACHE */
9676
9677 __private_extern__ void
9678 mbuf_report_peak_usage(void)
9679 {
9680 int i = 0;
9681 u_int64_t uptime;
9682 struct nstat_sysinfo_data ns_data;
9683 uint32_t memreleased = 0;
9684 static uint32_t prevmemreleased;
9685
9686 uptime = net_uptime();
9687 lck_mtx_lock(mbuf_mlock);
9688 mbuf_stat_sync();
9689 mbuf_mtypes_sync(TRUE);
9690
9691 /* Generate an initial report after 1 week of uptime */
9692 if (!mb_peak_firstreport &&
9693 uptime > MBUF_PEAK_FIRST_REPORT_THRESHOLD) {
9694 mb_peak_newreport = TRUE;
9695 mb_peak_firstreport = TRUE;
9696 }
9697
9698 if (!mb_peak_newreport) {
9699 lck_mtx_unlock(mbuf_mlock);
9700 return;
9701 }
9702
9703 /*
9704 * Since a report is being generated before 1 week,
9705 * we do not need to force another one later
9706 */
9707 if (uptime < MBUF_PEAK_FIRST_REPORT_THRESHOLD) {
9708 mb_peak_firstreport = TRUE;
9709 }
9710
9711 for (i = 0; i < NELEM(mbuf_table); i++) {
9712 m_peak(m_class(i)) = m_total(m_class(i));
9713 memreleased += m_release_cnt(i);
9714 }
9715 memreleased = memreleased - prevmemreleased;
9716 prevmemreleased = memreleased;
9717 mb_peak_newreport = FALSE;
9718 lck_mtx_unlock(mbuf_mlock);
9719
9720 bzero(&ns_data, sizeof(ns_data));
9721 ns_data.flags = NSTAT_SYSINFO_MBUF_STATS;
9722 ns_data.u.mb_stats.total_256b = m_peak(MC_MBUF);
9723 ns_data.u.mb_stats.total_2kb = m_peak(MC_CL);
9724 ns_data.u.mb_stats.total_4kb = m_peak(MC_BIGCL);
9725 ns_data.u.mb_stats.total_16kb = m_peak(MC_16KCL);
9726 ns_data.u.mb_stats.sbmb_total = total_sbmb_cnt_peak;
9727 ns_data.u.mb_stats.sb_atmbuflimit = sbmb_limreached;
9728 ns_data.u.mb_stats.draincnt = mbstat.m_drain;
9729 ns_data.u.mb_stats.memreleased = memreleased;
9730 ns_data.u.mb_stats.sbmb_floor = total_sbmb_cnt_floor;
9731
9732 nstat_sysinfo_send_data(&ns_data);
9733
9734 /*
9735 * Reset the floor whenever we report a new
9736 * peak to track the trend (increase peek usage
9737 * is not a leak if mbufs get released
9738 * between reports and the floor stays low)
9739 */
9740 total_sbmb_cnt_floor = total_sbmb_cnt_peak;
9741 }
9742
9743 #if CONFIG_MBUF_MCACHE
9744 /*
9745 * Simple routine to avoid taking the lock when we can't run the
9746 * mbuf drain.
9747 */
9748 static int
9749 mbuf_drain_checks(boolean_t ignore_waiters)
9750 {
9751 if (mb_drain_maxint == 0) {
9752 return 0;
9753 }
9754 if (!ignore_waiters && mb_waiters != 0) {
9755 return 0;
9756 }
9757
9758 return 1;
9759 }
9760
9761 /*
9762 * Called by the VM when there's memory pressure or when we exhausted
9763 * the 4k/16k reserved space.
9764 */
9765 static void
9766 mbuf_drain_locked(boolean_t ignore_waiters)
9767 {
9768 mbuf_class_t mc;
9769 mcl_slab_t *sp, *sp_tmp, *nsp;
9770 unsigned int num, k, interval, released = 0;
9771 unsigned long total_mem = 0, use_mem = 0;
9772 boolean_t ret, purge_caches = FALSE;
9773 ppnum_t offset;
9774 mcache_obj_t *obj;
9775 unsigned long per;
9776 static unsigned char scratch[32];
9777 static ppnum_t scratch_pa = 0;
9778
9779 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
9780 if (!mbuf_drain_checks(ignore_waiters)) {
9781 return;
9782 }
9783 if (scratch_pa == 0) {
9784 bzero(scratch, sizeof(scratch));
9785 scratch_pa = pmap_find_phys(kernel_pmap, (addr64_t)scratch);
9786 VERIFY(scratch_pa);
9787 } else if (mclverify) {
9788 /*
9789 * Panic if a driver wrote to our scratch memory.
9790 */
9791 for (k = 0; k < sizeof(scratch); k++) {
9792 if (scratch[k]) {
9793 panic("suspect DMA to freed address");
9794 }
9795 }
9796 }
9797 /*
9798 * Don't free memory too often as that could cause excessive
9799 * waiting times for mbufs. Purge caches if we were asked to drain
9800 * in the last 5 minutes.
9801 */
9802 if (mbuf_drain_last_runtime != 0) {
9803 interval = net_uptime() - mbuf_drain_last_runtime;
9804 if (interval <= mb_drain_maxint) {
9805 return;
9806 }
9807 if (interval <= mb_drain_maxint * 5) {
9808 purge_caches = TRUE;
9809 }
9810 }
9811 mbuf_drain_last_runtime = net_uptime();
9812 /*
9813 * Don't free any memory if we're using 60% or more.
9814 */
9815 for (mc = 0; mc < NELEM(mbuf_table); mc++) {
9816 total_mem += m_total(mc) * m_maxsize(mc);
9817 use_mem += m_active(mc) * m_maxsize(mc);
9818 }
9819 per = (use_mem * 100) / total_mem;
9820 if (per >= 60) {
9821 return;
9822 }
9823 /*
9824 * Purge all the caches. This effectively disables
9825 * caching for a few seconds, but the mbuf worker thread will
9826 * re-enable them again.
9827 */
9828 if (purge_caches == TRUE) {
9829 for (mc = 0; mc < NELEM(mbuf_table); mc++) {
9830 if (m_total(mc) < m_avgtotal(mc)) {
9831 continue;
9832 }
9833 lck_mtx_unlock(mbuf_mlock);
9834 ret = mcache_purge_cache(m_cache(mc), FALSE);
9835 lck_mtx_lock(mbuf_mlock);
9836 if (ret == TRUE) {
9837 m_purge_cnt(mc)++;
9838 }
9839 }
9840 }
9841 /*
9842 * Move the objects from the composite class freelist to
9843 * the rudimentary slabs list, but keep at least 10% of the average
9844 * total in the freelist.
9845 */
9846 for (mc = 0; mc < NELEM(mbuf_table); mc++) {
9847 while (m_cobjlist(mc) &&
9848 m_total(mc) < m_avgtotal(mc) &&
9849 m_infree(mc) > 0.1 * m_avgtotal(mc) + m_minlimit(mc)) {
9850 obj = m_cobjlist(mc);
9851 m_cobjlist(mc) = obj->obj_next;
9852 obj->obj_next = NULL;
9853 num = cslab_free(mc, obj, 1);
9854 VERIFY(num == 1);
9855 m_free_cnt(mc)++;
9856 m_infree(mc)--;
9857 /* cslab_free() handles m_total */
9858 }
9859 }
9860 /*
9861 * Free the buffers present in the slab list up to 10% of the total
9862 * average per class.
9863 *
9864 * We walk the list backwards in an attempt to reduce fragmentation.
9865 */
9866 for (mc = NELEM(mbuf_table) - 1; (int)mc >= 0; mc--) {
9867 TAILQ_FOREACH_SAFE(sp, &m_slablist(mc), sl_link, sp_tmp) {
9868 /*
9869 * Process only unused slabs occupying memory.
9870 */
9871 if (sp->sl_refcnt != 0 || sp->sl_len == 0 ||
9872 sp->sl_base == NULL) {
9873 continue;
9874 }
9875 if (m_total(mc) < m_avgtotal(mc) ||
9876 m_infree(mc) < 0.1 * m_avgtotal(mc) + m_minlimit(mc)) {
9877 break;
9878 }
9879 slab_remove(sp, mc);
9880 switch (mc) {
9881 case MC_MBUF:
9882 m_infree(mc) -= NMBPG;
9883 m_total(mc) -= NMBPG;
9884 if (mclaudit != NULL) {
9885 mcl_audit_free(sp->sl_base, NMBPG);
9886 }
9887 break;
9888 case MC_CL:
9889 m_infree(mc) -= NCLPG;
9890 m_total(mc) -= NCLPG;
9891 if (mclaudit != NULL) {
9892 mcl_audit_free(sp->sl_base, NMBPG);
9893 }
9894 break;
9895 case MC_BIGCL:
9896 {
9897 m_infree(mc) -= NBCLPG;
9898 m_total(mc) -= NBCLPG;
9899 if (mclaudit != NULL) {
9900 mcl_audit_free(sp->sl_base, NMBPG);
9901 }
9902 break;
9903 }
9904 case MC_16KCL:
9905 m_infree(mc)--;
9906 m_total(mc)--;
9907 for (nsp = sp, k = 1; k < NSLABSP16KB; k++) {
9908 nsp = nsp->sl_next;
9909 VERIFY(nsp->sl_refcnt == 0 &&
9910 nsp->sl_base != NULL &&
9911 nsp->sl_len == 0);
9912 slab_init(nsp, 0, 0, NULL, NULL, 0, 0,
9913 0);
9914 nsp->sl_flags = 0;
9915 }
9916 if (mclaudit != NULL) {
9917 if (sp->sl_len == PAGE_SIZE) {
9918 mcl_audit_free(sp->sl_base,
9919 NMBPG);
9920 } else {
9921 mcl_audit_free(sp->sl_base, 1);
9922 }
9923 }
9924 break;
9925 default:
9926 /*
9927 * The composite classes have their own
9928 * freelist (m_cobjlist), so we only
9929 * process rudimentary classes here.
9930 */
9931 VERIFY(0);
9932 }
9933 m_release_cnt(mc) += m_size(mc);
9934 released += m_size(mc);
9935 VERIFY(sp->sl_base != NULL &&
9936 sp->sl_len >= PAGE_SIZE);
9937 offset = MTOPG(sp->sl_base);
9938 /*
9939 * Make sure the IOMapper points to a valid, but
9940 * bogus, address. This should prevent further DMA
9941 * accesses to freed memory.
9942 */
9943 IOMapperInsertPage(mcl_paddr_base, offset, scratch_pa);
9944 mcl_paddr[offset] = 0;
9945 kmem_free(mb_map, (vm_offset_t)sp->sl_base,
9946 sp->sl_len);
9947 slab_init(sp, 0, 0, NULL, NULL, 0, 0, 0);
9948 sp->sl_flags = 0;
9949 }
9950 }
9951 mbstat.m_drain++;
9952 mbstat.m_bigclusters = m_total(MC_BIGCL);
9953 mbstat.m_clusters = m_total(MC_CL);
9954 mbstat.m_mbufs = m_total(MC_MBUF);
9955 mbuf_stat_sync();
9956 mbuf_mtypes_sync(TRUE);
9957 }
9958
9959 __private_extern__ void
9960 mbuf_drain(boolean_t ignore_waiters)
9961 {
9962 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_NOTOWNED);
9963 if (!mbuf_drain_checks(ignore_waiters)) {
9964 return;
9965 }
9966 lck_mtx_lock(mbuf_mlock);
9967 mbuf_drain_locked(ignore_waiters);
9968 lck_mtx_unlock(mbuf_mlock);
9969 }
9970
9971
9972 static int
9973 m_drain_force_sysctl SYSCTL_HANDLER_ARGS
9974 {
9975 #pragma unused(arg1, arg2)
9976 int val = 0, err;
9977
9978 err = sysctl_handle_int(oidp, &val, 0, req);
9979 if (err != 0 || req->newptr == USER_ADDR_NULL) {
9980 return err;
9981 }
9982 if (val) {
9983 mbuf_drain(TRUE);
9984 }
9985
9986 return err;
9987 }
9988
9989 #if DEBUG || DEVELOPMENT
9990 __printflike(3, 4)
9991 static void
9992 _mbwdog_logger(const char *func, const int line, const char *fmt, ...)
9993 {
9994 va_list ap;
9995 struct timeval now;
9996 char str[384], p[256];
9997 int len;
9998
9999 LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
10000 if (mbwdog_logging == NULL) {
10001 /*
10002 * This might block under a mutex, which isn't really great,
10003 * but this happens once, so we'll live.
10004 */
10005 mbwdog_logging = zalloc_permanent(mbwdog_logging_size,
10006 ZALIGN_NONE);
10007 }
10008 va_start(ap, fmt);
10009 vsnprintf(p, sizeof(p), fmt, ap);
10010 va_end(ap);
10011 microuptime(&now);
10012 len = scnprintf(str, sizeof(str),
10013 "\n%ld.%d (%d/%llx) %s:%d %s",
10014 now.tv_sec, now.tv_usec,
10015 proc_getpid(current_proc()),
10016 (uint64_t)VM_KERNEL_ADDRPERM(current_thread()),
10017 func, line, p);
10018 if (len < 0) {
10019 return;
10020 }
10021 if (mbwdog_logging_used + len > mbwdog_logging_size) {
10022 mbwdog_logging_used = mbwdog_logging_used / 2;
10023 memmove(mbwdog_logging, mbwdog_logging + mbwdog_logging_used,
10024 mbwdog_logging_size - mbwdog_logging_used);
10025 mbwdog_logging[mbwdog_logging_used] = 0;
10026 }
10027 strlcat(mbwdog_logging, str, mbwdog_logging_size);
10028 mbwdog_logging_used += len;
10029 }
10030
10031 #endif // DEBUG || DEVELOPMENT
10032
10033 static void
10034 mtracelarge_register(size_t size)
10035 {
10036 int i;
10037 struct mtracelarge *trace;
10038 uintptr_t bt[MLEAK_STACK_DEPTH];
10039 unsigned int depth;
10040
10041 depth = backtrace(bt, MLEAK_STACK_DEPTH, NULL, NULL);
10042 /* Check if this entry is already on the list. */
10043 for (i = 0; i < MTRACELARGE_NUM_TRACES; i++) {
10044 trace = &mtracelarge_table[i];
10045 if (trace->size == size && trace->depth == depth &&
10046 memcmp(bt, trace->addr, depth * sizeof(uintptr_t)) == 0) {
10047 return;
10048 }
10049 }
10050 for (i = 0; i < MTRACELARGE_NUM_TRACES; i++) {
10051 trace = &mtracelarge_table[i];
10052 if (size > trace->size) {
10053 trace->depth = depth;
10054 memcpy(trace->addr, bt, depth * sizeof(uintptr_t));
10055 trace->size = size;
10056 break;
10057 }
10058 }
10059 }
10060
10061 #if DEBUG || DEVELOPMENT
10062
10063 static int
10064 mbuf_wd_dump_sysctl SYSCTL_HANDLER_ARGS
10065 {
10066 char *str;
10067
10068 ifnet_head_lock_shared();
10069 lck_mtx_lock(mbuf_mlock);
10070
10071 str = mbuf_dump();
10072
10073 lck_mtx_unlock(mbuf_mlock);
10074 ifnet_head_done();
10075
10076 return sysctl_io_string(req, str, 0, 0, NULL);
10077 }
10078
10079 #endif /* DEBUG || DEVELOPMENT */
10080 #endif /* CONFIG_MBUF_MCACHE */
10081
10082 SYSCTL_DECL(_kern_ipc);
10083 #if DEBUG || DEVELOPMENT
10084 #if SKYWALK && CONFIG_MBUF_MCACHE
10085 SYSCTL_UINT(_kern_ipc, OID_AUTO, mc_threshold_scale_factor,
10086 CTLFLAG_RW | CTLFLAG_LOCKED, &mc_threshold_scale_down_factor,
10087 MC_THRESHOLD_SCALE_DOWN_FACTOR,
10088 "scale down factor for mbuf cache thresholds");
10089 #endif /* SKYWALK && CONFIG_MBUF_MCACHE */
10090 #if CONFIG_MBUF_MCACHE
10091 SYSCTL_PROC(_kern_ipc, OID_AUTO, mb_wd_dump,
10092 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED,
10093 0, 0, mbuf_wd_dump_sysctl, "A", "mbuf watchdog dump");
10094 #endif /* CONFIG_MBUF_MCACHE */
10095 #endif /* DEBUG || DEVELOPMENT */
10096 SYSCTL_PROC(_kern_ipc, KIPC_MBSTAT, mbstat,
10097 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
10098 0, 0, mbstat_sysctl, "S,mbstat", "");
10099 SYSCTL_PROC(_kern_ipc, OID_AUTO, mb_stat,
10100 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
10101 0, 0, mb_stat_sysctl, "S,mb_stat", "");
10102 #if CONFIG_MBUF_MCACHE
10103 SYSCTL_PROC(_kern_ipc, OID_AUTO, mleak_top_trace,
10104 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
10105 0, 0, mleak_top_trace_sysctl, "S,mb_top_trace", "");
10106 SYSCTL_PROC(_kern_ipc, OID_AUTO, mleak_table,
10107 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
10108 0, 0, mleak_table_sysctl, "S,mleak_table", "");
10109 SYSCTL_INT(_kern_ipc, OID_AUTO, mleak_sample_factor,
10110 CTLFLAG_RW | CTLFLAG_LOCKED, &mleak_table.mleak_sample_factor, 0, "");
10111 SYSCTL_INT(_kern_ipc, OID_AUTO, mb_normalized,
10112 CTLFLAG_RD | CTLFLAG_LOCKED, &mb_normalized, 0, "");
10113 SYSCTL_INT(_kern_ipc, OID_AUTO, mb_watchdog,
10114 CTLFLAG_RW | CTLFLAG_LOCKED, &mb_watchdog, 0, "");
10115 SYSCTL_PROC(_kern_ipc, OID_AUTO, mb_drain_force,
10116 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, NULL, 0,
10117 m_drain_force_sysctl, "I",
10118 "Forces the mbuf garbage collection to run");
10119 SYSCTL_INT(_kern_ipc, OID_AUTO, mb_drain_maxint,
10120 CTLFLAG_RW | CTLFLAG_LOCKED, &mb_drain_maxint, 0,
10121 "Minimum time interval between garbage collection");
10122 #endif /* CONFIG_MBUF_MCACHE */
10123 SYSCTL_INT(_kern_ipc, OID_AUTO, mb_memory_pressure_percentage,
10124 CTLFLAG_RW | CTLFLAG_LOCKED, &mb_memory_pressure_percentage, 0,
10125 "Percentage of when we trigger memory-pressure for an mbuf-class");
10126 #if CONFIG_MBUF_MCACHE
10127 static int mb_uses_mcache = 1;
10128 #else
10129 static int mb_uses_mcache = 0;
10130 #endif /* CONFIG_MBUF_MCACHE */
10131 SYSCTL_INT(_kern_ipc, OID_AUTO, mb_uses_mcache,
10132 CTLFLAG_LOCKED, &mb_uses_mcache, 0,
10133 "Whether mbufs use mcache");
10134