xref: /xnu-8020.101.4/bsd/kern/uipc_mbuf.c (revision e7776783b89a353188416a9a346c6cdb4928faad)
1 /*
2  * Copyright (c) 1998-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30  * Copyright (c) 1982, 1986, 1988, 1991, 1993
31  *	The Regents of the University of California.  All rights reserved.
32  *
33  * Redistribution and use in source and binary forms, with or without
34  * modification, are permitted provided that the following conditions
35  * are met:
36  * 1. Redistributions of source code must retain the above copyright
37  *    notice, this list of conditions and the following disclaimer.
38  * 2. Redistributions in binary form must reproduce the above copyright
39  *    notice, this list of conditions and the following disclaimer in the
40  *    documentation and/or other materials provided with the distribution.
41  * 3. All advertising materials mentioning features or use of this software
42  *    must display the following acknowledgement:
43  *	This product includes software developed by the University of
44  *	California, Berkeley and its contributors.
45  * 4. Neither the name of the University nor the names of its contributors
46  *    may be used to endorse or promote products derived from this software
47  *    without specific prior written permission.
48  *
49  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59  * SUCH DAMAGE.
60  *
61  *	@(#)uipc_mbuf.c	8.2 (Berkeley) 1/4/94
62  */
63 /*
64  * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65  * support for mandatory and extensible security protections.  This notice
66  * is included in support of clause 2.2 (b) of the Apple Public License,
67  * Version 2.0.
68  */
69 
70 #include <ptrauth.h>
71 
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/malloc.h>
75 #include <sys/mbuf.h>
76 #include <sys/kernel.h>
77 #include <sys/sysctl.h>
78 #include <sys/syslog.h>
79 #include <sys/protosw.h>
80 #include <sys/domain.h>
81 #include <sys/queue.h>
82 #include <sys/proc.h>
83 #include <sys/filedesc.h>
84 #include <sys/file_internal.h>
85 
86 #include <dev/random/randomdev.h>
87 
88 #include <kern/kern_types.h>
89 #include <kern/simple_lock.h>
90 #include <kern/queue.h>
91 #include <kern/sched_prim.h>
92 #include <kern/backtrace.h>
93 #include <kern/percpu.h>
94 #include <kern/zalloc.h>
95 
96 #include <libkern/OSAtomic.h>
97 #include <libkern/OSDebug.h>
98 #include <libkern/libkern.h>
99 
100 #include <os/log.h>
101 #include <os/ptrtools.h>
102 
103 #include <IOKit/IOMapper.h>
104 
105 #include <machine/limits.h>
106 #include <machine/machine_routines.h>
107 
108 #include <sys/mcache.h>
109 #include <net/ntstat.h>
110 
111 /*
112  * MBUF IMPLEMENTATION NOTES.
113  *
114  * There is a total of 5 per-CPU caches:
115  *
116  * MC_MBUF:
117  *	This is a cache of rudimentary objects of MSIZE in size; each
118  *	object represents an mbuf structure.  This cache preserves only
119  *	the m_type field of the mbuf during its transactions.
120  *
121  * MC_CL:
122  *	This is a cache of rudimentary objects of MCLBYTES in size; each
123  *	object represents a mcluster structure.  This cache does not
124  *	preserve the contents of the objects during its transactions.
125  *
126  * MC_BIGCL:
127  *	This is a cache of rudimentary objects of MBIGCLBYTES in size; each
128  *	object represents a mbigcluster structure.  This cache does not
129  *	preserve the contents of the objects during its transaction.
130  *
131  * MC_MBUF_CL:
132  *	This is a cache of mbufs each having a cluster attached to it.
133  *	It is backed by MC_MBUF and MC_CL rudimentary caches.  Several
134  *	fields of the mbuf related to the external cluster are preserved
135  *	during transactions.
136  *
137  * MC_MBUF_BIGCL:
138  *	This is a cache of mbufs each having a big cluster attached to it.
139  *	It is backed by MC_MBUF and MC_BIGCL rudimentary caches.  Several
140  *	fields of the mbuf related to the external cluster are preserved
141  *	during transactions.
142  *
143  * OBJECT ALLOCATION:
144  *
145  * Allocation requests are handled first at the per-CPU (mcache) layer
146  * before falling back to the slab layer.  Performance is optimal when
147  * the request is satisfied at the CPU layer because global data/lock
148  * never gets accessed.  When the slab layer is entered for allocation,
149  * the slab freelist will be checked first for available objects before
150  * the VM backing store is invoked.  Slab layer operations are serialized
151  * for all of the caches as the mbuf global lock is held most of the time.
152  * Allocation paths are different depending on the class of objects:
153  *
154  * a. Rudimentary object:
155  *
156  *	{ m_get_common(), m_clattach(), m_mclget(),
157  *	  m_mclalloc(), m_bigalloc(), m_copym_with_hdrs(),
158  *	  composite object allocation }
159  *			|	^
160  *			|	|
161  *			|	+-----------------------+
162  *			v				|
163  *	   mcache_alloc/mcache_alloc_ext()	mbuf_slab_audit()
164  *			|				^
165  *			v				|
166  *		   [CPU cache] ------->	(found?) -------+
167  *			|				|
168  *			v				|
169  *		 mbuf_slab_alloc()			|
170  *			|				|
171  *			v				|
172  *	+---------> [freelist] ------->	(found?) -------+
173  *	|		|
174  *	|		v
175  *	|	    m_clalloc()
176  *	|		|
177  *	|		v
178  *	+---<<---- kmem_mb_alloc()
179  *
180  * b. Composite object:
181  *
182  *	{ m_getpackets_internal(), m_allocpacket_internal() }
183  *			|	^
184  *			|	|
185  *			|	+------	(done) ---------+
186  *			v				|
187  *	   mcache_alloc/mcache_alloc_ext()	mbuf_cslab_audit()
188  *			|				^
189  *			v				|
190  *		   [CPU cache] ------->	(found?) -------+
191  *			|				|
192  *			v				|
193  *		 mbuf_cslab_alloc()			|
194  *			|				|
195  *			v				|
196  *		    [freelist] ------->	(found?) -------+
197  *			|				|
198  *			v				|
199  *		(rudimentary object)			|
200  *	   mcache_alloc/mcache_alloc_ext() ------>>-----+
201  *
202  * Auditing notes: If auditing is enabled, buffers will be subjected to
203  * integrity checks by the audit routine.  This is done by verifying their
204  * contents against DEADBEEF (free) pattern before returning them to caller.
205  * As part of this step, the routine will also record the transaction and
206  * pattern-fill the buffers with BADDCAFE (uninitialized) pattern.  It will
207  * also restore any constructed data structure fields if necessary.
208  *
209  * OBJECT DEALLOCATION:
210  *
211  * Freeing an object simply involves placing it into the CPU cache; this
212  * pollutes the cache to benefit subsequent allocations.  The slab layer
213  * will only be entered if the object is to be purged out of the cache.
214  * During normal operations, this happens only when the CPU layer resizes
215  * its bucket while it's adjusting to the allocation load.  Deallocation
216  * paths are different depending on the class of objects:
217  *
218  * a. Rudimentary object:
219  *
220  *	{ m_free(), m_freem_list(), composite object deallocation }
221  *			|	^
222  *			|	|
223  *			|	+------	(done) ---------+
224  *			v				|
225  *	   mcache_free/mcache_free_ext()		|
226  *			|				|
227  *			v				|
228  *		mbuf_slab_audit()			|
229  *			|				|
230  *			v				|
231  *		   [CPU cache] ---> (not purging?) -----+
232  *			|				|
233  *			v				|
234  *		 mbuf_slab_free()			|
235  *			|				|
236  *			v				|
237  *		    [freelist] ----------->>------------+
238  *	 (objects get purged to VM only on demand)
239  *
240  * b. Composite object:
241  *
242  *	{ m_free(), m_freem_list() }
243  *			|	^
244  *			|	|
245  *			|	+------	(done) ---------+
246  *			v				|
247  *	   mcache_free/mcache_free_ext()		|
248  *			|				|
249  *			v				|
250  *		mbuf_cslab_audit()			|
251  *			|				|
252  *			v				|
253  *		   [CPU cache] ---> (not purging?) -----+
254  *			|				|
255  *			v				|
256  *		 mbuf_cslab_free()			|
257  *			|				|
258  *			v				|
259  *		    [freelist] ---> (not purging?) -----+
260  *			|				|
261  *			v				|
262  *		(rudimentary object)			|
263  *	   mcache_free/mcache_free_ext() ------->>------+
264  *
265  * Auditing notes: If auditing is enabled, the audit routine will save
266  * any constructed data structure fields (if necessary) before filling the
267  * contents of the buffers with DEADBEEF (free) pattern and recording the
268  * transaction.  Buffers that are freed (whether at CPU or slab layer) are
269  * expected to contain the free pattern.
270  *
271  * DEBUGGING:
272  *
273  * Debugging can be enabled by adding "mbuf_debug=0x3" to boot-args; this
274  * translates to the mcache flags (MCF_VERIFY | MCF_AUDIT).  Additionally,
275  * the CPU layer cache can be disabled by setting the MCF_NOCPUCACHE flag,
276  * i.e. modify the boot argument parameter to "mbuf_debug=0x13".  Leak
277  * detection may also be disabled by setting the MCF_NOLEAKLOG flag, e.g.
278  * "mbuf_debug=0x113".  Note that debugging consumes more CPU and memory.
279  *
280  * Each object is associated with exactly one mcache_audit_t structure that
281  * contains the information related to its last buffer transaction.  Given
282  * an address of an object, the audit structure can be retrieved by finding
283  * the position of the object relevant to the base address of the cluster:
284  *
285  *	+------------+			+=============+
286  *	| mbuf addr  |			| mclaudit[i] |
287  *	+------------+			+=============+
288  *	      |				| cl_audit[0] |
289  *	i = MTOBG(addr)			+-------------+
290  *	      |			+----->	| cl_audit[1] | -----> mcache_audit_t
291  *	b = BGTOM(i)		|	+-------------+
292  *	      |			|	|     ...     |
293  *	x = MCLIDX(b, addr)	|	+-------------+
294  *	      |			|	| cl_audit[7] |
295  *	      +-----------------+	+-------------+
296  *		 (e.g. x == 1)
297  *
298  * The mclaudit[] array is allocated at initialization time, but its contents
299  * get populated when the corresponding cluster is created.  Because a page
300  * can be turned into NMBPG number of mbufs, we preserve enough space for the
301  * mbufs so that there is a 1-to-1 mapping between them.  A page that never
302  * gets (or has not yet) turned into mbufs will use only cl_audit[0] with the
303  * remaining entries unused.  For 16KB cluster, only one entry from the first
304  * page is allocated and used for the entire object.
305  */
306 
307 /* TODO: should be in header file */
308 /* kernel translater */
309 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
310 extern vm_map_t mb_map;         /* special map */
311 
312 static uint32_t mb_kmem_contig_failed;
313 static uint32_t mb_kmem_failed;
314 static uint32_t mb_kmem_one_failed;
315 /* Timestamp of allocation failures. */
316 static uint64_t mb_kmem_contig_failed_ts;
317 static uint64_t mb_kmem_failed_ts;
318 static uint64_t mb_kmem_one_failed_ts;
319 static uint64_t mb_kmem_contig_failed_size;
320 static uint64_t mb_kmem_failed_size;
321 static uint32_t mb_kmem_stats[6];
322 static const char *mb_kmem_stats_labels[] = { "INVALID_ARGUMENT",
323 	                                      "INVALID_ADDRESS",
324 	                                      "RESOURCE_SHORTAGE",
325 	                                      "NO_SPACE",
326 	                                      "KERN_FAILURE",
327 	                                      "OTHERS" };
328 
329 /* Global lock */
330 static LCK_GRP_DECLARE(mbuf_mlock_grp, "mbuf");
331 static LCK_MTX_DECLARE(mbuf_mlock_data, &mbuf_mlock_grp);
332 static lck_mtx_t *const mbuf_mlock = &mbuf_mlock_data;
333 
334 /* Back-end (common) layer */
335 static uint64_t mb_expand_cnt;
336 static uint64_t mb_expand_cl_cnt;
337 static uint64_t mb_expand_cl_total;
338 static uint64_t mb_expand_bigcl_cnt;
339 static uint64_t mb_expand_bigcl_total;
340 static uint64_t mb_expand_16kcl_cnt;
341 static uint64_t mb_expand_16kcl_total;
342 static boolean_t mbuf_worker_needs_wakeup; /* wait channel for mbuf worker */
343 static uint32_t mbuf_worker_run_cnt;
344 static uint64_t mbuf_worker_last_runtime;
345 static uint64_t mbuf_drain_last_runtime;
346 static int mbuf_worker_ready;   /* worker thread is runnable */
347 static unsigned int ncpu;                /* number of CPUs */
348 static ppnum_t *mcl_paddr;      /* Array of cluster physical addresses */
349 static ppnum_t mcl_pages;       /* Size of array (# physical pages) */
350 static ppnum_t mcl_paddr_base;  /* Handle returned by IOMapper::iovmAlloc() */
351 static mcache_t *ref_cache;     /* Cache of cluster reference & flags */
352 static mcache_t *mcl_audit_con_cache; /* Audit contents cache */
353 unsigned int mbuf_debug; /* patchable mbuf mcache flags */
354 static unsigned int mb_normalized; /* number of packets "normalized" */
355 
356 #define MB_GROWTH_AGGRESSIVE    1       /* Threshold: 1/2 of total */
357 #define MB_GROWTH_NORMAL        2       /* Threshold: 3/4 of total */
358 
359 typedef enum {
360 	MC_MBUF = 0,    /* Regular mbuf */
361 	MC_CL,          /* Cluster */
362 	MC_BIGCL,       /* Large (4KB) cluster */
363 	MC_16KCL,       /* Jumbo (16KB) cluster */
364 	MC_MBUF_CL,     /* mbuf + cluster */
365 	MC_MBUF_BIGCL,  /* mbuf + large (4KB) cluster */
366 	MC_MBUF_16KCL   /* mbuf + jumbo (16KB) cluster */
367 } mbuf_class_t;
368 
369 #define MBUF_CLASS_MIN          MC_MBUF
370 #define MBUF_CLASS_MAX          MC_MBUF_16KCL
371 #define MBUF_CLASS_LAST         MC_16KCL
372 #define MBUF_CLASS_VALID(c) \
373 	((int)(c) >= MBUF_CLASS_MIN && (int)(c) <= MBUF_CLASS_MAX)
374 #define MBUF_CLASS_COMPOSITE(c) \
375 	((int)(c) > MBUF_CLASS_LAST)
376 
377 
378 /*
379  * mbuf specific mcache allocation request flags.
380  */
381 #define MCR_COMP        MCR_USR1 /* for MC_MBUF_{CL,BIGCL,16KCL} caches */
382 
383 /*
384  * Per-cluster slab structure.
385  *
386  * A slab is a cluster control structure that contains one or more object
387  * chunks; the available chunks are chained in the slab's freelist (sl_head).
388  * Each time a chunk is taken out of the slab, the slab's reference count
389  * gets incremented.  When all chunks have been taken out, the empty slab
390  * gets removed (SLF_DETACHED) from the class's slab list.  A chunk that is
391  * returned to a slab causes the slab's reference count to be decremented;
392  * it also causes the slab to be reinserted back to class's slab list, if
393  * it's not already done.
394  *
395  * Compartmentalizing of the object chunks into slabs allows us to easily
396  * merge one or more slabs together when the adjacent slabs are idle, as
397  * well as to convert or move a slab from one class to another; e.g. the
398  * mbuf cluster slab can be converted to a regular cluster slab when all
399  * mbufs in the slab have been freed.
400  *
401  * A slab may also span across multiple clusters for chunks larger than
402  * a cluster's size.  In this case, only the slab of the first cluster is
403  * used.  The rest of the slabs are marked with SLF_PARTIAL to indicate
404  * that they are part of the larger slab.
405  *
406  * Each slab controls a page of memory.
407  */
408 typedef struct mcl_slab {
409 	struct mcl_slab *sl_next;       /* neighboring slab */
410 	u_int8_t        sl_class;       /* controlling mbuf class */
411 	int8_t          sl_refcnt;      /* outstanding allocations */
412 	int8_t          sl_chunks;      /* chunks (bufs) in this slab */
413 	u_int16_t       sl_flags;       /* slab flags (see below) */
414 	u_int16_t       sl_len;         /* slab length */
415 	void            *sl_base;       /* base of allocated memory */
416 	void            *sl_head;       /* first free buffer */
417 	TAILQ_ENTRY(mcl_slab) sl_link;  /* next/prev slab on freelist */
418 } mcl_slab_t;
419 
420 #define SLF_MAPPED      0x0001          /* backed by a mapped page */
421 #define SLF_PARTIAL     0x0002          /* part of another slab */
422 #define SLF_DETACHED    0x0004          /* not in slab freelist */
423 
424 /*
425  * The array of slabs are broken into groups of arrays per 1MB of kernel
426  * memory to reduce the footprint.  Each group is allocated on demand
427  * whenever a new piece of memory mapped in from the VM crosses the 1MB
428  * boundary.
429  */
430 #define NSLABSPMB       ((1 << MBSHIFT) >> PAGE_SHIFT)
431 
432 typedef struct mcl_slabg {
433 	mcl_slab_t      *slg_slab;      /* group of slabs */
434 } mcl_slabg_t;
435 
436 /*
437  * Number of slabs needed to control a 16KB cluster object.
438  */
439 #define NSLABSP16KB     (M16KCLBYTES >> PAGE_SHIFT)
440 
441 /*
442  * Per-cluster audit structure.
443  */
444 typedef struct {
445 	mcache_audit_t  **cl_audit;     /* array of audits */
446 } mcl_audit_t;
447 
448 typedef struct {
449 	struct thread   *msa_thread;    /* thread doing transaction */
450 	struct thread   *msa_pthread;   /* previous transaction thread */
451 	uint32_t        msa_tstamp;     /* transaction timestamp (ms) */
452 	uint32_t        msa_ptstamp;    /* prev transaction timestamp (ms) */
453 	uint16_t        msa_depth;      /* pc stack depth */
454 	uint16_t        msa_pdepth;     /* previous transaction pc stack */
455 	void            *msa_stack[MCACHE_STACK_DEPTH];
456 	void            *msa_pstack[MCACHE_STACK_DEPTH];
457 } mcl_scratch_audit_t;
458 
459 typedef struct {
460 	/*
461 	 * Size of data from the beginning of an mbuf that covers m_hdr,
462 	 * pkthdr and m_ext structures.  If auditing is enabled, we allocate
463 	 * a shadow mbuf structure of this size inside each audit structure,
464 	 * and the contents of the real mbuf gets copied into it when the mbuf
465 	 * is freed.  This allows us to pattern-fill the mbuf for integrity
466 	 * check, and to preserve any constructed mbuf fields (e.g. mbuf +
467 	 * cluster cache case).  Note that we don't save the contents of
468 	 * clusters when they are freed; we simply pattern-fill them.
469 	 */
470 	u_int8_t                sc_mbuf[(MSIZE - _MHLEN) + sizeof(_m_ext_t)];
471 	mcl_scratch_audit_t     sc_scratch __attribute__((aligned(8)));
472 } mcl_saved_contents_t;
473 
474 #define AUDIT_CONTENTS_SIZE     (sizeof (mcl_saved_contents_t))
475 
476 #define MCA_SAVED_MBUF_PTR(_mca)                                        \
477 	((struct mbuf *)(void *)((mcl_saved_contents_t *)               \
478 	(_mca)->mca_contents)->sc_mbuf)
479 #define MCA_SAVED_MBUF_SIZE                                             \
480 	(sizeof (((mcl_saved_contents_t *)0)->sc_mbuf))
481 #define MCA_SAVED_SCRATCH_PTR(_mca)                                     \
482 	(&((mcl_saved_contents_t *)(_mca)->mca_contents)->sc_scratch)
483 
484 /*
485  * mbuf specific mcache audit flags
486  */
487 #define MB_INUSE        0x01    /* object has not been returned to slab */
488 #define MB_COMP_INUSE   0x02    /* object has not been returned to cslab */
489 #define MB_SCVALID      0x04    /* object has valid saved contents */
490 
491 /*
492  * Each of the following two arrays hold up to nmbclusters elements.
493  */
494 static mcl_audit_t *mclaudit;   /* array of cluster audit information */
495 static unsigned int maxclaudit; /* max # of entries in audit table */
496 static mcl_slabg_t **slabstbl;  /* cluster slabs table */
497 static unsigned int maxslabgrp; /* max # of entries in slabs table */
498 static unsigned int slabgrp;    /* # of entries in slabs table */
499 
500 /* Globals */
501 int nclusters;                  /* # of clusters for non-jumbo (legacy) sizes */
502 int njcl;                       /* # of clusters for jumbo sizes */
503 int njclbytes;                  /* size of a jumbo cluster */
504 unsigned char *mbutl;           /* first mapped cluster address */
505 unsigned char *embutl;          /* ending virtual address of mclusters */
506 int _max_linkhdr;               /* largest link-level header */
507 int _max_protohdr;              /* largest protocol header */
508 int max_hdr;                    /* largest link+protocol header */
509 int max_datalen;                /* MHLEN - max_hdr */
510 
511 static boolean_t mclverify;     /* debug: pattern-checking */
512 static boolean_t mcltrace;      /* debug: stack tracing */
513 static boolean_t mclfindleak;   /* debug: leak detection */
514 static boolean_t mclexpleak;    /* debug: expose leak info to user space */
515 
516 static struct timeval mb_start; /* beginning of time */
517 
518 /* mbuf leak detection variables */
519 static struct mleak_table mleak_table;
520 static mleak_stat_t *mleak_stat;
521 
522 #define MLEAK_STAT_SIZE(n) \
523 	__builtin_offsetof(mleak_stat_t, ml_trace[n])
524 
525 struct mallocation {
526 	mcache_obj_t *element;  /* the alloc'ed element, NULL if unused */
527 	u_int32_t trace_index;  /* mtrace index for corresponding backtrace */
528 	u_int32_t count;        /* How many objects were requested */
529 	u_int64_t hitcount;     /* for determining hash effectiveness */
530 };
531 
532 struct mtrace {
533 	u_int64_t       collisions;
534 	u_int64_t       hitcount;
535 	u_int64_t       allocs;
536 	u_int64_t       depth;
537 	uintptr_t       addr[MLEAK_STACK_DEPTH];
538 };
539 
540 /* Size must be a power of two for the zhash to be able to just mask off bits */
541 #define MLEAK_ALLOCATION_MAP_NUM        512
542 #define MLEAK_TRACE_MAP_NUM             256
543 
544 /*
545  * Sample factor for how often to record a trace.  This is overwritable
546  * by the boot-arg mleak_sample_factor.
547  */
548 #define MLEAK_SAMPLE_FACTOR             500
549 
550 /*
551  * Number of top leakers recorded.
552  */
553 #define MLEAK_NUM_TRACES                5
554 
555 #define MB_LEAK_SPACING_64 "                    "
556 #define MB_LEAK_SPACING_32 "            "
557 
558 
559 #define MB_LEAK_HDR_32  "\n\
560     trace [1]   trace [2]   trace [3]   trace [4]   trace [5]  \n\
561     ----------  ----------  ----------  ----------  ---------- \n\
562 "
563 
564 #define MB_LEAK_HDR_64  "\n\
565     trace [1]           trace [2]           trace [3]       \
566 	trace [4]           trace [5]      \n\
567     ------------------  ------------------  ------------------  \
568     ------------------  ------------------ \n\
569 "
570 
571 static uint32_t mleak_alloc_buckets = MLEAK_ALLOCATION_MAP_NUM;
572 static uint32_t mleak_trace_buckets = MLEAK_TRACE_MAP_NUM;
573 
574 /* Hashmaps of allocations and their corresponding traces */
575 static struct mallocation *mleak_allocations;
576 static struct mtrace *mleak_traces;
577 static struct mtrace *mleak_top_trace[MLEAK_NUM_TRACES];
578 
579 /* Lock to protect mleak tables from concurrent modification */
580 static LCK_GRP_DECLARE(mleak_lock_grp, "mleak_lock");
581 static LCK_MTX_DECLARE(mleak_lock_data, &mleak_lock_grp);
582 static lck_mtx_t *const mleak_lock = &mleak_lock_data;
583 
584 /* *Failed* large allocations. */
585 struct mtracelarge {
586 	uint64_t        size;
587 	uint64_t        depth;
588 	uintptr_t       addr[MLEAK_STACK_DEPTH];
589 };
590 
591 #define MTRACELARGE_NUM_TRACES          5
592 static struct mtracelarge mtracelarge_table[MTRACELARGE_NUM_TRACES];
593 
594 static void mtracelarge_register(size_t size);
595 
596 /* Lock to protect the completion callback table */
597 static LCK_GRP_DECLARE(mbuf_tx_compl_tbl_lck_grp, "mbuf_tx_compl_tbl");
598 LCK_RW_DECLARE(mbuf_tx_compl_tbl_lock, &mbuf_tx_compl_tbl_lck_grp);
599 
600 extern u_int32_t high_sb_max;
601 
602 /* The minimum number of objects that are allocated, to start. */
603 #define MINCL           32
604 #define MINBIGCL        (MINCL >> 1)
605 #define MIN16KCL        (MINCL >> 2)
606 
607 /* Low watermarks (only map in pages once free counts go below) */
608 #define MBIGCL_LOWAT    MINBIGCL
609 #define M16KCL_LOWAT    MIN16KCL
610 
611 typedef struct {
612 	mbuf_class_t    mtbl_class;     /* class type */
613 	mcache_t        *mtbl_cache;    /* mcache for this buffer class */
614 	TAILQ_HEAD(mcl_slhead, mcl_slab) mtbl_slablist; /* slab list */
615 	mcache_obj_t    *mtbl_cobjlist; /* composite objects freelist */
616 	mb_class_stat_t *mtbl_stats;    /* statistics fetchable via sysctl */
617 	u_int32_t       mtbl_maxsize;   /* maximum buffer size */
618 	int             mtbl_minlimit;  /* minimum allowed */
619 	int             mtbl_maxlimit;  /* maximum allowed */
620 	u_int32_t       mtbl_wantpurge; /* purge during next reclaim */
621 	uint32_t        mtbl_avgtotal;  /* average total on iOS */
622 	u_int32_t       mtbl_expand;    /* worker should expand the class */
623 } mbuf_table_t;
624 
625 #define m_class(c)      mbuf_table[c].mtbl_class
626 #define m_cache(c)      mbuf_table[c].mtbl_cache
627 #define m_slablist(c)   mbuf_table[c].mtbl_slablist
628 #define m_cobjlist(c)   mbuf_table[c].mtbl_cobjlist
629 #define m_maxsize(c)    mbuf_table[c].mtbl_maxsize
630 #define m_minlimit(c)   mbuf_table[c].mtbl_minlimit
631 #define m_maxlimit(c)   mbuf_table[c].mtbl_maxlimit
632 #define m_wantpurge(c)  mbuf_table[c].mtbl_wantpurge
633 #define m_cname(c)      mbuf_table[c].mtbl_stats->mbcl_cname
634 #define m_size(c)       mbuf_table[c].mtbl_stats->mbcl_size
635 #define m_total(c)      mbuf_table[c].mtbl_stats->mbcl_total
636 #define m_active(c)     mbuf_table[c].mtbl_stats->mbcl_active
637 #define m_infree(c)     mbuf_table[c].mtbl_stats->mbcl_infree
638 #define m_slab_cnt(c)   mbuf_table[c].mtbl_stats->mbcl_slab_cnt
639 #define m_alloc_cnt(c)  mbuf_table[c].mtbl_stats->mbcl_alloc_cnt
640 #define m_free_cnt(c)   mbuf_table[c].mtbl_stats->mbcl_free_cnt
641 #define m_notified(c)   mbuf_table[c].mtbl_stats->mbcl_notified
642 #define m_purge_cnt(c)  mbuf_table[c].mtbl_stats->mbcl_purge_cnt
643 #define m_fail_cnt(c)   mbuf_table[c].mtbl_stats->mbcl_fail_cnt
644 #define m_ctotal(c)     mbuf_table[c].mtbl_stats->mbcl_ctotal
645 #define m_peak(c)       mbuf_table[c].mtbl_stats->mbcl_peak_reported
646 #define m_release_cnt(c) mbuf_table[c].mtbl_stats->mbcl_release_cnt
647 #define m_region_expand(c)      mbuf_table[c].mtbl_expand
648 
649 static mbuf_table_t mbuf_table[] = {
650 	/*
651 	 * The caches for mbufs, regular clusters and big clusters.
652 	 * The average total values were based on data gathered by actual
653 	 * usage patterns on iOS.
654 	 */
655 	{ MC_MBUF, NULL, TAILQ_HEAD_INITIALIZER(m_slablist(MC_MBUF)),
656 	  NULL, NULL, 0, 0, 0, 0, 3000, 0 },
657 	{ MC_CL, NULL, TAILQ_HEAD_INITIALIZER(m_slablist(MC_CL)),
658 	  NULL, NULL, 0, 0, 0, 0, 2000, 0 },
659 	{ MC_BIGCL, NULL, TAILQ_HEAD_INITIALIZER(m_slablist(MC_BIGCL)),
660 	  NULL, NULL, 0, 0, 0, 0, 1000, 0 },
661 	{ MC_16KCL, NULL, TAILQ_HEAD_INITIALIZER(m_slablist(MC_16KCL)),
662 	  NULL, NULL, 0, 0, 0, 0, 200, 0 },
663 	/*
664 	 * The following are special caches; they serve as intermediate
665 	 * caches backed by the above rudimentary caches.  Each object
666 	 * in the cache is an mbuf with a cluster attached to it.  Unlike
667 	 * the above caches, these intermediate caches do not directly
668 	 * deal with the slab structures; instead, the constructed
669 	 * cached elements are simply stored in the freelists.
670 	 */
671 	{ MC_MBUF_CL, NULL, { NULL, NULL }, NULL, NULL, 0, 0, 0, 0, 2000, 0 },
672 	{ MC_MBUF_BIGCL, NULL, { NULL, NULL }, NULL, NULL, 0, 0, 0, 0, 1000, 0 },
673 	{ MC_MBUF_16KCL, NULL, { NULL, NULL }, NULL, NULL, 0, 0, 0, 0, 200, 0 },
674 };
675 
676 #define NELEM(a)        (sizeof (a) / sizeof ((a)[0]))
677 
678 #if SKYWALK
679 #define MC_THRESHOLD_SCALE_DOWN_FACTOR  2
680 static unsigned int mc_threshold_scale_down_factor =
681     MC_THRESHOLD_SCALE_DOWN_FACTOR;
682 #endif /* SKYWALK */
683 
684 static uint32_t
m_avgtotal(mbuf_class_t c)685 m_avgtotal(mbuf_class_t c)
686 {
687 #if SKYWALK
688 	return if_is_fsw_transport_netagent_enabled() ?
689 	       (mbuf_table[c].mtbl_avgtotal / mc_threshold_scale_down_factor) :
690 	       mbuf_table[c].mtbl_avgtotal;
691 #else /* !SKYWALK */
692 	return mbuf_table[c].mtbl_avgtotal;
693 #endif /* SKYWALK */
694 }
695 
696 static void *mb_waitchan = &mbuf_table; /* wait channel for all caches */
697 static int mb_waiters;                  /* number of waiters */
698 
699 boolean_t mb_peak_newreport = FALSE;
700 boolean_t mb_peak_firstreport = FALSE;
701 
702 /* generate a report by default after 1 week of uptime */
703 #define MBUF_PEAK_FIRST_REPORT_THRESHOLD        604800
704 
705 #define MB_WDT_MAXTIME  10              /* # of secs before watchdog panic */
706 static struct timeval mb_wdtstart;      /* watchdog start timestamp */
707 static char *mbuf_dump_buf;
708 
709 #define MBUF_DUMP_BUF_SIZE      4096
710 
711 /*
712  * mbuf watchdog is enabled by default.  It is also toggeable via the
713  * kern.ipc.mb_watchdog sysctl.
714  * Garbage collection is enabled by default on embedded platforms.
715  * mb_drain_maxint controls the amount of time to wait (in seconds) before
716  * consecutive calls to mbuf_drain().
717  */
718 static unsigned int mb_watchdog = 1;
719 #if !XNU_TARGET_OS_OSX
720 static unsigned int mb_drain_maxint = 60;
721 #else /* XNU_TARGET_OS_OSX */
722 static unsigned int mb_drain_maxint = 0;
723 #endif /* XNU_TARGET_OS_OSX */
724 static unsigned int mb_memory_pressure_percentage = 80;
725 
726 uintptr_t mb_obscure_extfree __attribute__((visibility("hidden")));
727 uintptr_t mb_obscure_extref __attribute__((visibility("hidden")));
728 
729 /* Red zone */
730 static u_int32_t mb_redzone_cookie;
731 static void m_redzone_init(struct mbuf *);
732 static void m_redzone_verify(struct mbuf *m);
733 
734 /* The following are used to serialize m_clalloc() */
735 static boolean_t mb_clalloc_busy;
736 static void *mb_clalloc_waitchan = &mb_clalloc_busy;
737 static int mb_clalloc_waiters;
738 
739 static void mbuf_mtypes_sync(boolean_t);
740 static int mbstat_sysctl SYSCTL_HANDLER_ARGS;
741 static void mbuf_stat_sync(void);
742 static int mb_stat_sysctl SYSCTL_HANDLER_ARGS;
743 static int mleak_top_trace_sysctl SYSCTL_HANDLER_ARGS;
744 static int mleak_table_sysctl SYSCTL_HANDLER_ARGS;
745 static char *mbuf_dump(void);
746 static void mbuf_table_init(void);
747 static inline void m_incref(struct mbuf *);
748 static inline u_int16_t m_decref(struct mbuf *);
749 static int m_clalloc(const u_int32_t, const int, const u_int32_t);
750 static void mbuf_worker_thread_init(void);
751 static mcache_obj_t *slab_alloc(mbuf_class_t, int);
752 static void slab_free(mbuf_class_t, mcache_obj_t *);
753 static unsigned int mbuf_slab_alloc(void *, mcache_obj_t ***,
754     unsigned int, int);
755 static void mbuf_slab_free(void *, mcache_obj_t *, int);
756 static void mbuf_slab_audit(void *, mcache_obj_t *, boolean_t);
757 static void mbuf_slab_notify(void *, u_int32_t);
758 static unsigned int cslab_alloc(mbuf_class_t, mcache_obj_t ***,
759     unsigned int);
760 static unsigned int cslab_free(mbuf_class_t, mcache_obj_t *, int);
761 static unsigned int mbuf_cslab_alloc(void *, mcache_obj_t ***,
762     unsigned int, int);
763 static void mbuf_cslab_free(void *, mcache_obj_t *, int);
764 static void mbuf_cslab_audit(void *, mcache_obj_t *, boolean_t);
765 static int freelist_populate(mbuf_class_t, unsigned int, int);
766 static void freelist_init(mbuf_class_t);
767 static boolean_t mbuf_cached_above(mbuf_class_t, int);
768 static boolean_t mbuf_steal(mbuf_class_t, unsigned int);
769 static void m_reclaim(mbuf_class_t, unsigned int, boolean_t);
770 static int m_howmany(int, size_t);
771 static void mbuf_worker_thread(void);
772 static void mbuf_watchdog(void);
773 static boolean_t mbuf_sleep(mbuf_class_t, unsigned int, int);
774 
775 static void mcl_audit_init(void *, mcache_audit_t **, mcache_obj_t **,
776     size_t, unsigned int);
777 static void mcl_audit_free(void *, unsigned int);
778 static mcache_audit_t *mcl_audit_buf2mca(mbuf_class_t, mcache_obj_t *);
779 static void mcl_audit_mbuf(mcache_audit_t *, void *, boolean_t, boolean_t);
780 static void mcl_audit_cluster(mcache_audit_t *, void *, size_t, boolean_t,
781     boolean_t);
782 static void mcl_audit_restore_mbuf(struct mbuf *, mcache_audit_t *, boolean_t);
783 static void mcl_audit_save_mbuf(struct mbuf *, mcache_audit_t *);
784 static void mcl_audit_scratch(mcache_audit_t *);
785 static void mcl_audit_mcheck_panic(struct mbuf *);
786 static void mcl_audit_verify_nextptr(void *, mcache_audit_t *);
787 
788 static void mleak_activate(void);
789 static void mleak_logger(u_int32_t, mcache_obj_t *, boolean_t);
790 static boolean_t mleak_log(uintptr_t *, mcache_obj_t *, uint32_t, int);
791 static void mleak_free(mcache_obj_t *);
792 static void mleak_sort_traces(void);
793 static void mleak_update_stats(void);
794 
795 static mcl_slab_t *slab_get(void *);
796 static void slab_init(mcl_slab_t *, mbuf_class_t, u_int32_t,
797     void *, void *, unsigned int, int, int);
798 static void slab_insert(mcl_slab_t *, mbuf_class_t);
799 static void slab_remove(mcl_slab_t *, mbuf_class_t);
800 static boolean_t slab_inrange(mcl_slab_t *, void *);
801 static void slab_nextptr_panic(mcl_slab_t *, void *);
802 static void slab_detach(mcl_slab_t *);
803 static boolean_t slab_is_detached(mcl_slab_t *);
804 
805 static int m_copyback0(struct mbuf **, int, int, const void *, int, int);
806 static struct mbuf *m_split0(struct mbuf *, int, int, int);
807 __private_extern__ void mbuf_report_peak_usage(void);
808 static boolean_t mbuf_report_usage(mbuf_class_t);
809 #if DEBUG || DEVELOPMENT
810 #define mbwdog_logger(fmt, ...)  _mbwdog_logger(__func__, __LINE__, fmt, ## __VA_ARGS__)
811 static void _mbwdog_logger(const char *func, const int line, const char *fmt, ...);
812 static char *mbwdog_logging;
813 const unsigned mbwdog_logging_size = 4096;
814 static size_t mbwdog_logging_used;
815 #else
816 #define mbwdog_logger(fmt, ...)  do { } while (0)
817 #endif
818 static void mbuf_drain_locked(boolean_t);
819 
820 /* flags for m_copyback0 */
821 #define M_COPYBACK0_COPYBACK    0x0001  /* copyback from cp */
822 #define M_COPYBACK0_PRESERVE    0x0002  /* preserve original data */
823 #define M_COPYBACK0_COW         0x0004  /* do copy-on-write */
824 #define M_COPYBACK0_EXTEND      0x0008  /* extend chain */
825 
826 /*
827  * This flag is set for all mbufs that come out of and into the composite
828  * mbuf + cluster caches, i.e. MC_MBUF_CL and MC_MBUF_BIGCL.  mbufs that
829  * are marked with such a flag have clusters attached to them, and will be
830  * treated differently when they are freed; instead of being placed back
831  * into the mbuf and cluster freelists, the composite mbuf + cluster objects
832  * are placed back into the appropriate composite cache's freelist, and the
833  * actual freeing is deferred until the composite objects are purged.  At
834  * such a time, this flag will be cleared from the mbufs and the objects
835  * will be freed into their own separate freelists.
836  */
837 #define EXTF_COMPOSITE  0x1
838 
839 /*
840  * This flag indicates that the external cluster is read-only, i.e. it is
841  * or was referred to by more than one mbufs.  Once set, this flag is never
842  * cleared.
843  */
844 #define EXTF_READONLY   0x2
845 /*
846  * This flag indicates that the external cluster is paired with the mbuf.
847  * Pairing implies an external free routine defined which will be invoked
848  * when the reference count drops to the minimum at m_free time.  This
849  * flag is never cleared.
850  */
851 #define EXTF_PAIRED     0x4
852 
853 #define EXTF_MASK       \
854 	(EXTF_COMPOSITE | EXTF_READONLY | EXTF_PAIRED)
855 
856 #define MEXT_MINREF(m)          ((m_get_rfa(m))->minref)
857 #define MEXT_REF(m)             ((m_get_rfa(m))->refcnt)
858 #define MEXT_PREF(m)            ((m_get_rfa(m))->prefcnt)
859 #define MEXT_FLAGS(m)           ((m_get_rfa(m))->flags)
860 #define MEXT_PRIV(m)            ((m_get_rfa(m))->priv)
861 #define MEXT_PMBUF(m)           ((m_get_rfa(m))->paired)
862 #define MEXT_TOKEN(m)           ((m_get_rfa(m))->ext_token)
863 #define MBUF_IS_COMPOSITE(m)                                            \
864 	(MEXT_REF(m) == MEXT_MINREF(m) &&                               \
865 	(MEXT_FLAGS(m) & EXTF_MASK) == EXTF_COMPOSITE)
866 /*
867  * This macro can be used to test if the mbuf is paired to an external
868  * cluster.  The test for MEXT_PMBUF being equal to the mbuf in subject
869  * is important, as EXTF_PAIRED alone is insufficient since it is immutable,
870  * and thus survives calls to m_free_paired.
871  */
872 #define MBUF_IS_PAIRED(m)                                               \
873 	(((m)->m_flags & M_EXT) &&                                      \
874 	(MEXT_FLAGS(m) & EXTF_MASK) == EXTF_PAIRED &&                   \
875 	MEXT_PMBUF(m) == (m))
876 
877 /*
878  * Macros used to verify the integrity of the mbuf.
879  */
880 #define _MCHECK(m) {                                                    \
881 	if ((m)->m_type != MT_FREE && !MBUF_IS_PAIRED(m)) {             \
882 	        if (mclaudit == NULL)                                   \
883 	                panic("MCHECK: m_type=%d m=%p",                 \
884 	                    (u_int16_t)(m)->m_type, m);                 \
885 	        else                                                    \
886 	                mcl_audit_mcheck_panic(m);                      \
887 	}                                                               \
888 }
889 
890 #define MBUF_IN_MAP(addr)                                               \
891 	((unsigned char *)(addr) >= mbutl &&                            \
892 	(unsigned char *)(addr) < embutl)
893 
894 #define MRANGE(addr) {                                                  \
895 	if (!MBUF_IN_MAP(addr))                                         \
896 	        panic("MRANGE: address out of range 0x%p", addr);       \
897 }
898 
899 /*
900  * Macro version of mtod.
901  */
902 #define MTOD(m, t)      ((t)((m)->m_data))
903 
904 /*
905  * Macros to obtain page index given a base cluster address
906  */
907 #define MTOPG(x)        (((unsigned char *)x - mbutl) >> PAGE_SHIFT)
908 #define PGTOM(x)        (mbutl + (x << PAGE_SHIFT))
909 
910 /*
911  * Macro to find the mbuf index relative to a base.
912  */
913 #define MBPAGEIDX(c, m) \
914 	(((unsigned char *)(m) - (unsigned char *)(c)) >> MSIZESHIFT)
915 
916 /*
917  * Same thing for 2KB cluster index.
918  */
919 #define CLPAGEIDX(c, m) \
920 	(((unsigned char *)(m) - (unsigned char *)(c)) >> MCLSHIFT)
921 
922 /*
923  * Macro to find 4KB cluster index relative to a base
924  */
925 #define BCLPAGEIDX(c, m) \
926 	(((unsigned char *)(m) - (unsigned char *)(c)) >> MBIGCLSHIFT)
927 
928 /*
929  * Macros used during mbuf and cluster initialization.
930  */
931 #define MBUF_INIT_PKTHDR(m) {                                           \
932 	(m)->m_pkthdr.rcvif = NULL;                                     \
933 	(m)->m_pkthdr.pkt_hdr = NULL;                                   \
934 	(m)->m_pkthdr.len = 0;                                          \
935 	(m)->m_pkthdr.csum_flags = 0;                                   \
936 	(m)->m_pkthdr.csum_data = 0;                                    \
937 	(m)->m_pkthdr.vlan_tag = 0;                                     \
938 	(m)->m_pkthdr.comp_gencnt = 0;                                  \
939 	(m)->m_pkthdr.pkt_crumbs = 0;                                     \
940 	m_classifier_init(m, 0);                                        \
941 	m_tag_init(m, 1);                                               \
942 	m_scratch_init(m);                                              \
943 	m_redzone_init(m);                                              \
944 }
945 
946 #define MBUF_INIT(m, pkthdr, type) {                                    \
947 	_MCHECK(m);                                                     \
948 	(m)->m_next = (m)->m_nextpkt = NULL;                            \
949 	(m)->m_len = 0;                                                 \
950 	(m)->m_type = type;                                             \
951 	if ((pkthdr) == 0) {                                            \
952 	        (m)->m_data = (m)->m_dat;                               \
953 	        (m)->m_flags = 0;                                       \
954 	} else {                                                        \
955 	        (m)->m_data = (m)->m_pktdat;                            \
956 	        (m)->m_flags = M_PKTHDR;                                \
957 	        MBUF_INIT_PKTHDR(m);                                    \
958 	}                                                               \
959 }
960 
961 #define MEXT_INIT(m, buf, size, free, arg, rfa, min, ref, pref, flag,   \
962 	    priv, pm) {                                                 \
963 	(m)->m_data = (m)->m_ext.ext_buf = (buf);                       \
964 	(m)->m_flags |= M_EXT;                                          \
965 	m_set_ext((m), (rfa), (free), (arg));                           \
966 	(m)->m_ext.ext_size = (u_int)(size);                            \
967 	MEXT_MINREF(m) = (min);                                         \
968 	MEXT_REF(m) = (ref);                                            \
969 	MEXT_PREF(m) = (pref);                                          \
970 	MEXT_FLAGS(m) = (flag);                                         \
971 	MEXT_PRIV(m) = (priv);                                          \
972 	MEXT_PMBUF(m) = (pm);                                           \
973 }
974 
975 #define MBUF_CL_INIT(m, buf, rfa, ref, flag)    \
976 	MEXT_INIT(m, buf, m_maxsize(MC_CL), NULL, NULL, rfa, 0,         \
977 	    ref, 0, flag, 0, NULL)
978 
979 #define MBUF_BIGCL_INIT(m, buf, rfa, ref, flag) \
980 	MEXT_INIT(m, buf, m_maxsize(MC_BIGCL), m_bigfree, NULL, rfa, 0, \
981 	    ref, 0, flag, 0, NULL)
982 
983 #define MBUF_16KCL_INIT(m, buf, rfa, ref, flag) \
984 	MEXT_INIT(m, buf, m_maxsize(MC_16KCL), m_16kfree, NULL, rfa, 0, \
985 	    ref, 0, flag, 0, NULL)
986 
987 /*
988  * Macro to convert BSD malloc sleep flag to mcache's
989  */
990 #define MSLEEPF(f)      ((!((f) & M_DONTWAIT)) ? MCR_SLEEP : MCR_NOSLEEP)
991 
992 /*
993  * The structure that holds all mbuf class statistics exportable via sysctl.
994  * Similar to mbstat structure, the mb_stat structure is protected by the
995  * global mbuf lock.  It contains additional information about the classes
996  * that allows for a more accurate view of the state of the allocator.
997  */
998 struct mb_stat *mb_stat;
999 struct omb_stat *omb_stat;      /* For backwards compatibility */
1000 
1001 #define MB_STAT_SIZE(n) \
1002 	__builtin_offsetof(mb_stat_t, mbs_class[n])
1003 #define OMB_STAT_SIZE(n) \
1004 	__builtin_offsetof(struct omb_stat, mbs_class[n])
1005 
1006 /*
1007  * The legacy structure holding all of the mbuf allocation statistics.
1008  * The actual statistics used by the kernel are stored in the mbuf_table
1009  * instead, and are updated atomically while the global mbuf lock is held.
1010  * They are mirrored in mbstat to support legacy applications (e.g. netstat).
1011  * Unlike before, the kernel no longer relies on the contents of mbstat for
1012  * its operations (e.g. cluster expansion) because the structure is exposed
1013  * to outside and could possibly be modified, therefore making it unsafe.
1014  * With the exception of the mbstat.m_mtypes array (see below), all of the
1015  * statistics are updated as they change.
1016  */
1017 struct mbstat mbstat;
1018 
1019 #define MBSTAT_MTYPES_MAX \
1020 	(sizeof (mbstat.m_mtypes) / sizeof (mbstat.m_mtypes[0]))
1021 
1022 /*
1023  * Allocation statistics related to mbuf types (up to MT_MAX-1) are updated
1024  * atomically and stored in a per-CPU structure which is lock-free; this is
1025  * done in order to avoid writing to the global mbstat data structure which
1026  * would cause false sharing.  During sysctl request for kern.ipc.mbstat,
1027  * the statistics across all CPUs will be converged into the mbstat.m_mtypes
1028  * array and returned to the application.  Any updates for types greater or
1029  * equal than MT_MAX would be done atomically to the mbstat; this slows down
1030  * performance but is okay since the kernel uses only up to MT_MAX-1 while
1031  * anything beyond that (up to type 255) is considered a corner case.
1032  */
1033 typedef struct {
1034 	unsigned int cpu_mtypes[MT_MAX];
1035 } mbuf_mtypes_t;
1036 
1037 static mbuf_mtypes_t PERCPU_DATA(mbuf_mtypes);
1038 
1039 #define mtype_stat_add(type, n) {                                       \
1040 	if ((unsigned)(type) < MT_MAX) {                                \
1041 	        mbuf_mtypes_t *mbs = PERCPU_GET(mbuf_mtypes);           \
1042 	        atomic_add_32(&mbs->cpu_mtypes[type], n);               \
1043 	} else if ((unsigned)(type) < (unsigned)MBSTAT_MTYPES_MAX) {    \
1044 	        atomic_add_16((int16_t *)&mbstat.m_mtypes[type], n);    \
1045 	}                                                               \
1046 }
1047 
1048 #define mtype_stat_sub(t, n)    mtype_stat_add(t, -(n))
1049 #define mtype_stat_inc(t)       mtype_stat_add(t, 1)
1050 #define mtype_stat_dec(t)       mtype_stat_sub(t, 1)
1051 
1052 static void
mbuf_mtypes_sync(boolean_t locked)1053 mbuf_mtypes_sync(boolean_t locked)
1054 {
1055 	mbuf_mtypes_t mtc;
1056 
1057 	if (locked) {
1058 		LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
1059 	}
1060 
1061 	mtc = *PERCPU_GET_MASTER(mbuf_mtypes);
1062 	percpu_foreach_secondary(mtype, mbuf_mtypes) {
1063 		for (int n = 0; n < MT_MAX; n++) {
1064 			mtc.cpu_mtypes[n] += mtype->cpu_mtypes[n];
1065 		}
1066 	}
1067 
1068 	if (!locked) {
1069 		lck_mtx_lock(mbuf_mlock);
1070 	}
1071 	for (int n = 0; n < MT_MAX; n++) {
1072 		mbstat.m_mtypes[n] = mtc.cpu_mtypes[n];
1073 	}
1074 	if (!locked) {
1075 		lck_mtx_unlock(mbuf_mlock);
1076 	}
1077 }
1078 
1079 static int
1080 mbstat_sysctl SYSCTL_HANDLER_ARGS
1081 {
1082 #pragma unused(oidp, arg1, arg2)
1083 	mbuf_mtypes_sync(FALSE);
1084 
1085 	return SYSCTL_OUT(req, &mbstat, sizeof(mbstat));
1086 }
1087 
1088 static void
mbuf_stat_sync(void)1089 mbuf_stat_sync(void)
1090 {
1091 	mb_class_stat_t *sp;
1092 	mcache_cpu_t *ccp;
1093 	mcache_t *cp;
1094 	int k, m, bktsize;
1095 
1096 	LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
1097 
1098 	for (k = 0; k < NELEM(mbuf_table); k++) {
1099 		cp = m_cache(k);
1100 		ccp = &cp->mc_cpu[0];
1101 		bktsize = ccp->cc_bktsize;
1102 		sp = mbuf_table[k].mtbl_stats;
1103 
1104 		if (cp->mc_flags & MCF_NOCPUCACHE) {
1105 			sp->mbcl_mc_state = MCS_DISABLED;
1106 		} else if (cp->mc_purge_cnt > 0) {
1107 			sp->mbcl_mc_state = MCS_PURGING;
1108 		} else if (bktsize == 0) {
1109 			sp->mbcl_mc_state = MCS_OFFLINE;
1110 		} else {
1111 			sp->mbcl_mc_state = MCS_ONLINE;
1112 		}
1113 
1114 		sp->mbcl_mc_cached = 0;
1115 		for (m = 0; m < ncpu; m++) {
1116 			ccp = &cp->mc_cpu[m];
1117 			if (ccp->cc_objs > 0) {
1118 				sp->mbcl_mc_cached += ccp->cc_objs;
1119 			}
1120 			if (ccp->cc_pobjs > 0) {
1121 				sp->mbcl_mc_cached += ccp->cc_pobjs;
1122 			}
1123 		}
1124 		sp->mbcl_mc_cached += (cp->mc_full.bl_total * bktsize);
1125 		sp->mbcl_active = sp->mbcl_total - sp->mbcl_mc_cached -
1126 		    sp->mbcl_infree;
1127 
1128 		sp->mbcl_mc_waiter_cnt = cp->mc_waiter_cnt;
1129 		sp->mbcl_mc_wretry_cnt = cp->mc_wretry_cnt;
1130 		sp->mbcl_mc_nwretry_cnt = cp->mc_nwretry_cnt;
1131 
1132 		/* Calculate total count specific to each class */
1133 		sp->mbcl_ctotal = sp->mbcl_total;
1134 		switch (m_class(k)) {
1135 		case MC_MBUF:
1136 			/* Deduct mbufs used in composite caches */
1137 			sp->mbcl_ctotal -= (m_total(MC_MBUF_CL) +
1138 			    m_total(MC_MBUF_BIGCL));
1139 			break;
1140 
1141 		case MC_CL:
1142 			/* Deduct clusters used in composite cache */
1143 			sp->mbcl_ctotal -= m_total(MC_MBUF_CL);
1144 			break;
1145 
1146 		case MC_BIGCL:
1147 			/* Deduct clusters used in composite cache */
1148 			sp->mbcl_ctotal -= m_total(MC_MBUF_BIGCL);
1149 			break;
1150 
1151 		case MC_16KCL:
1152 			/* Deduct clusters used in composite cache */
1153 			sp->mbcl_ctotal -= m_total(MC_MBUF_16KCL);
1154 			break;
1155 
1156 		default:
1157 			break;
1158 		}
1159 	}
1160 }
1161 
1162 static int
1163 mb_stat_sysctl SYSCTL_HANDLER_ARGS
1164 {
1165 #pragma unused(oidp, arg1, arg2)
1166 	void *statp;
1167 	int k, statsz, proc64 = proc_is64bit(req->p);
1168 
1169 	lck_mtx_lock(mbuf_mlock);
1170 	mbuf_stat_sync();
1171 
1172 	if (!proc64) {
1173 		struct omb_class_stat *oc;
1174 		struct mb_class_stat *c;
1175 
1176 		omb_stat->mbs_cnt = mb_stat->mbs_cnt;
1177 		oc = &omb_stat->mbs_class[0];
1178 		c = &mb_stat->mbs_class[0];
1179 		for (k = 0; k < omb_stat->mbs_cnt; k++, oc++, c++) {
1180 			(void) snprintf(oc->mbcl_cname, sizeof(oc->mbcl_cname),
1181 			    "%s", c->mbcl_cname);
1182 			oc->mbcl_size = c->mbcl_size;
1183 			oc->mbcl_total = c->mbcl_total;
1184 			oc->mbcl_active = c->mbcl_active;
1185 			oc->mbcl_infree = c->mbcl_infree;
1186 			oc->mbcl_slab_cnt = c->mbcl_slab_cnt;
1187 			oc->mbcl_alloc_cnt = c->mbcl_alloc_cnt;
1188 			oc->mbcl_free_cnt = c->mbcl_free_cnt;
1189 			oc->mbcl_notified = c->mbcl_notified;
1190 			oc->mbcl_purge_cnt = c->mbcl_purge_cnt;
1191 			oc->mbcl_fail_cnt = c->mbcl_fail_cnt;
1192 			oc->mbcl_ctotal = c->mbcl_ctotal;
1193 			oc->mbcl_release_cnt = c->mbcl_release_cnt;
1194 			oc->mbcl_mc_state = c->mbcl_mc_state;
1195 			oc->mbcl_mc_cached = c->mbcl_mc_cached;
1196 			oc->mbcl_mc_waiter_cnt = c->mbcl_mc_waiter_cnt;
1197 			oc->mbcl_mc_wretry_cnt = c->mbcl_mc_wretry_cnt;
1198 			oc->mbcl_mc_nwretry_cnt = c->mbcl_mc_nwretry_cnt;
1199 			oc->mbcl_peak_reported = c->mbcl_peak_reported;
1200 		}
1201 		statp = omb_stat;
1202 		statsz = OMB_STAT_SIZE(NELEM(mbuf_table));
1203 	} else {
1204 		statp = mb_stat;
1205 		statsz = MB_STAT_SIZE(NELEM(mbuf_table));
1206 	}
1207 
1208 	lck_mtx_unlock(mbuf_mlock);
1209 
1210 	return SYSCTL_OUT(req, statp, statsz);
1211 }
1212 
1213 static int
1214 mleak_top_trace_sysctl SYSCTL_HANDLER_ARGS
1215 {
1216 #pragma unused(oidp, arg1, arg2)
1217 	int i;
1218 
1219 	/* Ensure leak tracing turned on */
1220 	if (!mclfindleak || !mclexpleak) {
1221 		return ENXIO;
1222 	}
1223 
1224 	lck_mtx_lock(mleak_lock);
1225 	mleak_update_stats();
1226 	i = SYSCTL_OUT(req, mleak_stat, MLEAK_STAT_SIZE(MLEAK_NUM_TRACES));
1227 	lck_mtx_unlock(mleak_lock);
1228 
1229 	return i;
1230 }
1231 
1232 static int
1233 mleak_table_sysctl SYSCTL_HANDLER_ARGS
1234 {
1235 #pragma unused(oidp, arg1, arg2)
1236 	int i = 0;
1237 
1238 	/* Ensure leak tracing turned on */
1239 	if (!mclfindleak || !mclexpleak) {
1240 		return ENXIO;
1241 	}
1242 
1243 	lck_mtx_lock(mleak_lock);
1244 	i = SYSCTL_OUT(req, &mleak_table, sizeof(mleak_table));
1245 	lck_mtx_unlock(mleak_lock);
1246 
1247 	return i;
1248 }
1249 
1250 static inline void
m_incref(struct mbuf * m)1251 m_incref(struct mbuf *m)
1252 {
1253 	UInt16 old, new;
1254 	volatile UInt16 *addr = (volatile UInt16 *)&MEXT_REF(m);
1255 
1256 	do {
1257 		old = *addr;
1258 		new = old + 1;
1259 		VERIFY(new != 0);
1260 	} while (!OSCompareAndSwap16(old, new, addr));
1261 
1262 	/*
1263 	 * If cluster is shared, mark it with (sticky) EXTF_READONLY;
1264 	 * we don't clear the flag when the refcount goes back to the
1265 	 * minimum, to simplify code calling m_mclhasreference().
1266 	 */
1267 	if (new > (MEXT_MINREF(m) + 1) && !(MEXT_FLAGS(m) & EXTF_READONLY)) {
1268 		(void) OSBitOrAtomic16(EXTF_READONLY, &MEXT_FLAGS(m));
1269 	}
1270 }
1271 
1272 static inline u_int16_t
m_decref(struct mbuf * m)1273 m_decref(struct mbuf *m)
1274 {
1275 	UInt16 old, new;
1276 	volatile UInt16 *addr = (volatile UInt16 *)&MEXT_REF(m);
1277 
1278 	do {
1279 		old = *addr;
1280 		new = old - 1;
1281 		VERIFY(old != 0);
1282 	} while (!OSCompareAndSwap16(old, new, addr));
1283 
1284 	return new;
1285 }
1286 
1287 static void
mbuf_table_init(void)1288 mbuf_table_init(void)
1289 {
1290 	unsigned int b, c, s;
1291 	int m, config_mbuf_jumbo = 0;
1292 
1293 	omb_stat = zalloc_permanent(OMB_STAT_SIZE(NELEM(mbuf_table)),
1294 	    ZALIGN(struct omb_stat));
1295 
1296 	mb_stat = zalloc_permanent(MB_STAT_SIZE(NELEM(mbuf_table)),
1297 	    ZALIGN(mb_stat_t));
1298 
1299 	mb_stat->mbs_cnt = NELEM(mbuf_table);
1300 	for (m = 0; m < NELEM(mbuf_table); m++) {
1301 		mbuf_table[m].mtbl_stats = &mb_stat->mbs_class[m];
1302 	}
1303 
1304 #if CONFIG_MBUF_JUMBO
1305 	config_mbuf_jumbo = 1;
1306 #endif /* CONFIG_MBUF_JUMBO */
1307 
1308 	if (config_mbuf_jumbo == 1 || PAGE_SIZE == M16KCLBYTES) {
1309 		/*
1310 		 * Set aside 1/3 of the mbuf cluster map for jumbo
1311 		 * clusters; we do this only on platforms where jumbo
1312 		 * cluster pool is enabled.
1313 		 */
1314 		njcl = nmbclusters / 3;
1315 		njclbytes = M16KCLBYTES;
1316 	}
1317 
1318 	/*
1319 	 * nclusters holds both the 2KB and 4KB pools, so ensure it's
1320 	 * a multiple of 4KB clusters.
1321 	 */
1322 	nclusters = P2ROUNDDOWN(nmbclusters - njcl, NCLPG);
1323 	if (njcl > 0) {
1324 		/*
1325 		 * Each jumbo cluster takes 8 2KB clusters, so make
1326 		 * sure that the pool size is evenly divisible by 8;
1327 		 * njcl is in 2KB unit, hence treated as such.
1328 		 */
1329 		njcl = P2ROUNDDOWN(nmbclusters - nclusters, NCLPJCL);
1330 
1331 		/* Update nclusters with rounded down value of njcl */
1332 		nclusters = P2ROUNDDOWN(nmbclusters - njcl, NCLPG);
1333 	}
1334 
1335 	/*
1336 	 * njcl is valid only on platforms with 16KB jumbo clusters or
1337 	 * with 16KB pages, where it is configured to 1/3 of the pool
1338 	 * size.  On these platforms, the remaining is used for 2KB
1339 	 * and 4KB clusters.  On platforms without 16KB jumbo clusters,
1340 	 * the entire pool is used for both 2KB and 4KB clusters.  A 4KB
1341 	 * cluster can either be splitted into 16 mbufs, or into 2 2KB
1342 	 * clusters.
1343 	 *
1344 	 *  +---+---+------------ ... -----------+------- ... -------+
1345 	 *  | c | b |              s             |        njcl       |
1346 	 *  +---+---+------------ ... -----------+------- ... -------+
1347 	 *
1348 	 * 1/32th of the shared region is reserved for pure 2KB and 4KB
1349 	 * clusters (1/64th each.)
1350 	 */
1351 	c = P2ROUNDDOWN((nclusters >> 6), NCLPG);       /* in 2KB unit */
1352 	b = P2ROUNDDOWN((nclusters >> (6 + NCLPBGSHIFT)), NBCLPG); /* in 4KB unit */
1353 	s = nclusters - (c + (b << NCLPBGSHIFT));       /* in 2KB unit */
1354 
1355 	/*
1356 	 * 1/64th (c) is reserved for 2KB clusters.
1357 	 */
1358 	m_minlimit(MC_CL) = c;
1359 	m_maxlimit(MC_CL) = s + c;                      /* in 2KB unit */
1360 	m_maxsize(MC_CL) = m_size(MC_CL) = MCLBYTES;
1361 	(void) snprintf(m_cname(MC_CL), MAX_MBUF_CNAME, "cl");
1362 
1363 	/*
1364 	 * Another 1/64th (b) of the map is reserved for 4KB clusters.
1365 	 * It cannot be turned into 2KB clusters or mbufs.
1366 	 */
1367 	m_minlimit(MC_BIGCL) = b;
1368 	m_maxlimit(MC_BIGCL) = (s >> NCLPBGSHIFT) + b;  /* in 4KB unit */
1369 	m_maxsize(MC_BIGCL) = m_size(MC_BIGCL) = MBIGCLBYTES;
1370 	(void) snprintf(m_cname(MC_BIGCL), MAX_MBUF_CNAME, "bigcl");
1371 
1372 	/*
1373 	 * The remaining 31/32ths (s) are all-purpose (mbufs, 2KB, or 4KB)
1374 	 */
1375 	m_minlimit(MC_MBUF) = 0;
1376 	m_maxlimit(MC_MBUF) = (s << NMBPCLSHIFT);       /* in mbuf unit */
1377 	m_maxsize(MC_MBUF) = m_size(MC_MBUF) = MSIZE;
1378 	(void) snprintf(m_cname(MC_MBUF), MAX_MBUF_CNAME, "mbuf");
1379 
1380 	/*
1381 	 * Set limits for the composite classes.
1382 	 */
1383 	m_minlimit(MC_MBUF_CL) = 0;
1384 	m_maxlimit(MC_MBUF_CL) = m_maxlimit(MC_CL);
1385 	m_maxsize(MC_MBUF_CL) = MCLBYTES;
1386 	m_size(MC_MBUF_CL) = m_size(MC_MBUF) + m_size(MC_CL);
1387 	(void) snprintf(m_cname(MC_MBUF_CL), MAX_MBUF_CNAME, "mbuf_cl");
1388 
1389 	m_minlimit(MC_MBUF_BIGCL) = 0;
1390 	m_maxlimit(MC_MBUF_BIGCL) = m_maxlimit(MC_BIGCL);
1391 	m_maxsize(MC_MBUF_BIGCL) = MBIGCLBYTES;
1392 	m_size(MC_MBUF_BIGCL) = m_size(MC_MBUF) + m_size(MC_BIGCL);
1393 	(void) snprintf(m_cname(MC_MBUF_BIGCL), MAX_MBUF_CNAME, "mbuf_bigcl");
1394 
1395 	/*
1396 	 * And for jumbo classes.
1397 	 */
1398 	m_minlimit(MC_16KCL) = 0;
1399 	m_maxlimit(MC_16KCL) = (njcl >> NCLPJCLSHIFT);  /* in 16KB unit */
1400 	m_maxsize(MC_16KCL) = m_size(MC_16KCL) = M16KCLBYTES;
1401 	(void) snprintf(m_cname(MC_16KCL), MAX_MBUF_CNAME, "16kcl");
1402 
1403 	m_minlimit(MC_MBUF_16KCL) = 0;
1404 	m_maxlimit(MC_MBUF_16KCL) = m_maxlimit(MC_16KCL);
1405 	m_maxsize(MC_MBUF_16KCL) = M16KCLBYTES;
1406 	m_size(MC_MBUF_16KCL) = m_size(MC_MBUF) + m_size(MC_16KCL);
1407 	(void) snprintf(m_cname(MC_MBUF_16KCL), MAX_MBUF_CNAME, "mbuf_16kcl");
1408 
1409 	/*
1410 	 * Initialize the legacy mbstat structure.
1411 	 */
1412 	bzero(&mbstat, sizeof(mbstat));
1413 	mbstat.m_msize = m_maxsize(MC_MBUF);
1414 	mbstat.m_mclbytes = m_maxsize(MC_CL);
1415 	mbstat.m_minclsize = MINCLSIZE;
1416 	mbstat.m_mlen = MLEN;
1417 	mbstat.m_mhlen = MHLEN;
1418 	mbstat.m_bigmclbytes = m_maxsize(MC_BIGCL);
1419 }
1420 
1421 int
mbuf_get_class(struct mbuf * m)1422 mbuf_get_class(struct mbuf *m)
1423 {
1424 	if (m->m_flags & M_EXT) {
1425 		uint32_t composite = (MEXT_FLAGS(m) & EXTF_COMPOSITE);
1426 		m_ext_free_func_t m_free_func = m_get_ext_free(m);
1427 
1428 		if (m_free_func == NULL) {
1429 			if (composite) {
1430 				return MC_MBUF_CL;
1431 			} else {
1432 				return MC_CL;
1433 			}
1434 		} else if (m_free_func == m_bigfree) {
1435 			if (composite) {
1436 				return MC_MBUF_BIGCL;
1437 			} else {
1438 				return MC_BIGCL;
1439 			}
1440 		} else if (m_free_func == m_16kfree) {
1441 			if (composite) {
1442 				return MC_MBUF_16KCL;
1443 			} else {
1444 				return MC_16KCL;
1445 			}
1446 		}
1447 	}
1448 
1449 	return MC_MBUF;
1450 }
1451 
1452 bool
mbuf_class_under_pressure(struct mbuf * m)1453 mbuf_class_under_pressure(struct mbuf *m)
1454 {
1455 	int mclass = mbuf_get_class(m);
1456 
1457 	if (m_total(mclass) - m_infree(mclass) >= (m_maxlimit(mclass) * mb_memory_pressure_percentage) / 100) {
1458 		/*
1459 		 * The above computation does not include the per-CPU cached objects.
1460 		 * As a fast-path check this is good-enough. But now we do
1461 		 * the "slower" count of the cached objects to know exactly the
1462 		 * number of active mbufs in use.
1463 		 *
1464 		 * We do not take the mbuf_lock here to avoid lock-contention. Numbers
1465 		 * might be slightly off but we don't try to be 100% accurate.
1466 		 * At worst, we drop a packet that we shouldn't have dropped or
1467 		 * we might go slightly above our memory-pressure threshold.
1468 		 */
1469 		mcache_t *cp = m_cache(mclass);
1470 		mcache_cpu_t *ccp = &cp->mc_cpu[0];
1471 
1472 		int bktsize = os_access_once(ccp->cc_bktsize);
1473 		uint32_t bl_total = os_access_once(cp->mc_full.bl_total);
1474 		uint32_t cached = 0;
1475 		int i;
1476 
1477 		for (i = 0; i < ncpu; i++) {
1478 			ccp = &cp->mc_cpu[i];
1479 
1480 			int cc_objs = os_access_once(ccp->cc_objs);
1481 			if (cc_objs > 0) {
1482 				cached += cc_objs;
1483 			}
1484 
1485 			int cc_pobjs = os_access_once(ccp->cc_pobjs);
1486 			if (cc_pobjs > 0) {
1487 				cached += cc_pobjs;
1488 			}
1489 		}
1490 		cached += (bl_total * bktsize);
1491 
1492 		if (m_total(mclass) - m_infree(mclass) - cached >= (m_maxlimit(mclass) * mb_memory_pressure_percentage) / 100) {
1493 			os_log(OS_LOG_DEFAULT,
1494 			    "%s memory-pressure on mbuf due to class %u, total %u free %u cached %u max %u",
1495 			    __func__, mclass, m_total(mclass), m_infree(mclass), cached, m_maxlimit(mclass));
1496 			return true;
1497 		}
1498 	}
1499 
1500 	return false;
1501 }
1502 
1503 #if defined(__LP64__)
1504 typedef struct ncl_tbl {
1505 	uint64_t nt_maxmem;     /* memory (sane) size */
1506 	uint32_t nt_mbpool;     /* mbuf pool size */
1507 } ncl_tbl_t;
1508 
1509 static const ncl_tbl_t ncl_table[] = {
1510 	{ (1ULL << GBSHIFT) /*  1 GB */, (64 << MBSHIFT) /*  64 MB */ },
1511 	{ (1ULL << (GBSHIFT + 2)) /*  4 GB */, (96 << MBSHIFT) /*  96 MB */ },
1512 	{ (1ULL << (GBSHIFT + 3)) /* 8 GB */, (128 << MBSHIFT) /* 128 MB */ },
1513 	{ (1ULL << (GBSHIFT + 4)) /* 16 GB */, (256 << MBSHIFT) /* 256 MB */ },
1514 	{ (1ULL << (GBSHIFT + 5)) /* 32 GB */, (512 << MBSHIFT) /* 512 MB */ },
1515 	{ 0, 0 }
1516 };
1517 #endif /* __LP64__ */
1518 
1519 __private_extern__ unsigned int
mbuf_default_ncl(uint64_t mem)1520 mbuf_default_ncl(uint64_t mem)
1521 {
1522 #if !defined(__LP64__)
1523 	unsigned int n;
1524 	/*
1525 	 * 32-bit kernel (default to 64MB of mbuf pool for >= 1GB RAM).
1526 	 */
1527 	if ((n = ((mem / 16) / MCLBYTES)) > 32768) {
1528 		n = 32768;
1529 	}
1530 #else
1531 	unsigned int n, i;
1532 	/*
1533 	 * 64-bit kernel (mbuf pool size based on table).
1534 	 */
1535 	n = ncl_table[0].nt_mbpool;
1536 	for (i = 0; ncl_table[i].nt_mbpool != 0; i++) {
1537 		if (mem < ncl_table[i].nt_maxmem) {
1538 			break;
1539 		}
1540 		n = ncl_table[i].nt_mbpool;
1541 	}
1542 	n >>= MCLSHIFT;
1543 #endif /* !__LP64__ */
1544 	return n;
1545 }
1546 
1547 __private_extern__ void
mbinit(void)1548 mbinit(void)
1549 {
1550 	unsigned int m;
1551 	unsigned int initmcl = 0;
1552 	thread_t thread = THREAD_NULL;
1553 
1554 	microuptime(&mb_start);
1555 
1556 	/*
1557 	 * These MBUF_ values must be equal to their private counterparts.
1558 	 */
1559 	_CASSERT(MBUF_EXT == M_EXT);
1560 	_CASSERT(MBUF_PKTHDR == M_PKTHDR);
1561 	_CASSERT(MBUF_EOR == M_EOR);
1562 	_CASSERT(MBUF_LOOP == M_LOOP);
1563 	_CASSERT(MBUF_BCAST == M_BCAST);
1564 	_CASSERT(MBUF_MCAST == M_MCAST);
1565 	_CASSERT(MBUF_FRAG == M_FRAG);
1566 	_CASSERT(MBUF_FIRSTFRAG == M_FIRSTFRAG);
1567 	_CASSERT(MBUF_LASTFRAG == M_LASTFRAG);
1568 	_CASSERT(MBUF_PROMISC == M_PROMISC);
1569 	_CASSERT(MBUF_HASFCS == M_HASFCS);
1570 
1571 	_CASSERT(MBUF_TYPE_FREE == MT_FREE);
1572 	_CASSERT(MBUF_TYPE_DATA == MT_DATA);
1573 	_CASSERT(MBUF_TYPE_HEADER == MT_HEADER);
1574 	_CASSERT(MBUF_TYPE_SOCKET == MT_SOCKET);
1575 	_CASSERT(MBUF_TYPE_PCB == MT_PCB);
1576 	_CASSERT(MBUF_TYPE_RTABLE == MT_RTABLE);
1577 	_CASSERT(MBUF_TYPE_HTABLE == MT_HTABLE);
1578 	_CASSERT(MBUF_TYPE_ATABLE == MT_ATABLE);
1579 	_CASSERT(MBUF_TYPE_SONAME == MT_SONAME);
1580 	_CASSERT(MBUF_TYPE_SOOPTS == MT_SOOPTS);
1581 	_CASSERT(MBUF_TYPE_FTABLE == MT_FTABLE);
1582 	_CASSERT(MBUF_TYPE_RIGHTS == MT_RIGHTS);
1583 	_CASSERT(MBUF_TYPE_IFADDR == MT_IFADDR);
1584 	_CASSERT(MBUF_TYPE_CONTROL == MT_CONTROL);
1585 	_CASSERT(MBUF_TYPE_OOBDATA == MT_OOBDATA);
1586 
1587 	_CASSERT(MBUF_TSO_IPV4 == CSUM_TSO_IPV4);
1588 	_CASSERT(MBUF_TSO_IPV6 == CSUM_TSO_IPV6);
1589 	_CASSERT(MBUF_CSUM_REQ_SUM16 == CSUM_PARTIAL);
1590 	_CASSERT(MBUF_CSUM_TCP_SUM16 == MBUF_CSUM_REQ_SUM16);
1591 	_CASSERT(MBUF_CSUM_REQ_ZERO_INVERT == CSUM_ZERO_INVERT);
1592 	_CASSERT(MBUF_CSUM_REQ_IP == CSUM_IP);
1593 	_CASSERT(MBUF_CSUM_REQ_TCP == CSUM_TCP);
1594 	_CASSERT(MBUF_CSUM_REQ_UDP == CSUM_UDP);
1595 	_CASSERT(MBUF_CSUM_REQ_TCPIPV6 == CSUM_TCPIPV6);
1596 	_CASSERT(MBUF_CSUM_REQ_UDPIPV6 == CSUM_UDPIPV6);
1597 	_CASSERT(MBUF_CSUM_DID_IP == CSUM_IP_CHECKED);
1598 	_CASSERT(MBUF_CSUM_IP_GOOD == CSUM_IP_VALID);
1599 	_CASSERT(MBUF_CSUM_DID_DATA == CSUM_DATA_VALID);
1600 	_CASSERT(MBUF_CSUM_PSEUDO_HDR == CSUM_PSEUDO_HDR);
1601 
1602 	_CASSERT(MBUF_WAITOK == M_WAIT);
1603 	_CASSERT(MBUF_DONTWAIT == M_DONTWAIT);
1604 	_CASSERT(MBUF_COPYALL == M_COPYALL);
1605 
1606 	_CASSERT(MBUF_SC2TC(MBUF_SC_BK_SYS) == MBUF_TC_BK);
1607 	_CASSERT(MBUF_SC2TC(MBUF_SC_BK) == MBUF_TC_BK);
1608 	_CASSERT(MBUF_SC2TC(MBUF_SC_BE) == MBUF_TC_BE);
1609 	_CASSERT(MBUF_SC2TC(MBUF_SC_RD) == MBUF_TC_BE);
1610 	_CASSERT(MBUF_SC2TC(MBUF_SC_OAM) == MBUF_TC_BE);
1611 	_CASSERT(MBUF_SC2TC(MBUF_SC_AV) == MBUF_TC_VI);
1612 	_CASSERT(MBUF_SC2TC(MBUF_SC_RV) == MBUF_TC_VI);
1613 	_CASSERT(MBUF_SC2TC(MBUF_SC_VI) == MBUF_TC_VI);
1614 	_CASSERT(MBUF_SC2TC(MBUF_SC_SIG) == MBUF_TC_VI);
1615 	_CASSERT(MBUF_SC2TC(MBUF_SC_VO) == MBUF_TC_VO);
1616 	_CASSERT(MBUF_SC2TC(MBUF_SC_CTL) == MBUF_TC_VO);
1617 
1618 	_CASSERT(MBUF_TC2SCVAL(MBUF_TC_BK) == SCVAL_BK);
1619 	_CASSERT(MBUF_TC2SCVAL(MBUF_TC_BE) == SCVAL_BE);
1620 	_CASSERT(MBUF_TC2SCVAL(MBUF_TC_VI) == SCVAL_VI);
1621 	_CASSERT(MBUF_TC2SCVAL(MBUF_TC_VO) == SCVAL_VO);
1622 
1623 	/* Module specific scratch space (32-bit alignment requirement) */
1624 	_CASSERT(!(offsetof(struct mbuf, m_pkthdr.pkt_mpriv) %
1625 	    sizeof(uint32_t)));
1626 
1627 	/* pktdata needs to start at 128-bit offset! */
1628 	_CASSERT((offsetof(struct mbuf, m_pktdat) % 16) == 0);
1629 
1630 	/* Initialize random red zone cookie value */
1631 	_CASSERT(sizeof(mb_redzone_cookie) ==
1632 	    sizeof(((struct pkthdr *)0)->redzone));
1633 	read_random(&mb_redzone_cookie, sizeof(mb_redzone_cookie));
1634 	read_random(&mb_obscure_extref, sizeof(mb_obscure_extref));
1635 	read_random(&mb_obscure_extfree, sizeof(mb_obscure_extfree));
1636 	mb_obscure_extref |= 0x3;
1637 	mb_obscure_extfree |= 0x3;
1638 
1639 	/* Make sure we don't save more than we should */
1640 	_CASSERT(MCA_SAVED_MBUF_SIZE <= sizeof(struct mbuf));
1641 
1642 	if (nmbclusters == 0) {
1643 		nmbclusters = NMBCLUSTERS;
1644 	}
1645 
1646 	/* This should be a sane (at least even) value by now */
1647 	VERIFY(nmbclusters != 0 && !(nmbclusters & 0x1));
1648 
1649 	/* Setup the mbuf table */
1650 	mbuf_table_init();
1651 
1652 	/*
1653 	 * Allocate cluster slabs table:
1654 	 *
1655 	 *	maxslabgrp = (N * 2048) / (1024 * 1024)
1656 	 *
1657 	 * Where N is nmbclusters rounded up to the nearest 512.  This yields
1658 	 * mcl_slab_g_t units, each one representing a MB of memory.
1659 	 */
1660 	maxslabgrp =
1661 	    (P2ROUNDUP(nmbclusters, (MBSIZE >> MCLSHIFT)) << MCLSHIFT) >> MBSHIFT;
1662 	slabstbl = zalloc_permanent(maxslabgrp * sizeof(mcl_slabg_t *),
1663 	    ZALIGN(mcl_slabg_t));
1664 
1665 	/*
1666 	 * Allocate audit structures, if needed:
1667 	 *
1668 	 *	maxclaudit = (maxslabgrp * 1024 * 1024) / PAGE_SIZE
1669 	 *
1670 	 * This yields mcl_audit_t units, each one representing a page.
1671 	 */
1672 	PE_parse_boot_argn("mbuf_debug", &mbuf_debug, sizeof(mbuf_debug));
1673 	mbuf_debug |= mcache_getflags();
1674 	if (mbuf_debug & MCF_DEBUG) {
1675 		int l;
1676 		mcl_audit_t *mclad;
1677 		maxclaudit = ((maxslabgrp << MBSHIFT) >> PAGE_SHIFT);
1678 		mclaudit = zalloc_permanent(maxclaudit * sizeof(*mclaudit),
1679 		    ZALIGN(mcl_audit_t));
1680 		for (l = 0, mclad = mclaudit; l < maxclaudit; l++) {
1681 			mclad[l].cl_audit = zalloc_permanent(NMBPG * sizeof(mcache_audit_t *),
1682 			    ZALIGN_PTR);
1683 		}
1684 
1685 		mcl_audit_con_cache = mcache_create("mcl_audit_contents",
1686 		    AUDIT_CONTENTS_SIZE, sizeof(u_int64_t), 0, MCR_SLEEP);
1687 		VERIFY(mcl_audit_con_cache != NULL);
1688 	}
1689 	mclverify = (mbuf_debug & MCF_VERIFY);
1690 	mcltrace = (mbuf_debug & MCF_TRACE);
1691 	mclfindleak = !(mbuf_debug & MCF_NOLEAKLOG);
1692 	mclexpleak = mclfindleak && (mbuf_debug & MCF_EXPLEAKLOG);
1693 
1694 	/* Enable mbuf leak logging, with a lock to protect the tables */
1695 
1696 	mleak_activate();
1697 
1698 	/*
1699 	 * Allocate structure for per-CPU statistics that's aligned
1700 	 * on the CPU cache boundary; this code assumes that we never
1701 	 * uninitialize this framework, since the original address
1702 	 * before alignment is not saved.
1703 	 */
1704 	ncpu = ml_wait_max_cpus();
1705 
1706 	/* Calculate the number of pages assigned to the cluster pool */
1707 	mcl_pages = (nmbclusters << MCLSHIFT) / PAGE_SIZE;
1708 	mcl_paddr = zalloc_permanent(mcl_pages * sizeof(ppnum_t),
1709 	    ZALIGN(ppnum_t));
1710 
1711 	/* Register with the I/O Bus mapper */
1712 	mcl_paddr_base = IOMapperIOVMAlloc(mcl_pages);
1713 
1714 	embutl = (mbutl + (nmbclusters * MCLBYTES));
1715 	VERIFY(((embutl - mbutl) % MBIGCLBYTES) == 0);
1716 
1717 	/* Prime up the freelist */
1718 	PE_parse_boot_argn("initmcl", &initmcl, sizeof(initmcl));
1719 	if (initmcl != 0) {
1720 		initmcl >>= NCLPBGSHIFT;        /* become a 4K unit */
1721 		if (initmcl > m_maxlimit(MC_BIGCL)) {
1722 			initmcl = m_maxlimit(MC_BIGCL);
1723 		}
1724 	}
1725 	if (initmcl < m_minlimit(MC_BIGCL)) {
1726 		initmcl = m_minlimit(MC_BIGCL);
1727 	}
1728 
1729 	lck_mtx_lock(mbuf_mlock);
1730 
1731 	/*
1732 	 * For classes with non-zero minimum limits, populate their freelists
1733 	 * so that m_total(class) is at least m_minlimit(class).
1734 	 */
1735 	VERIFY(m_total(MC_BIGCL) == 0 && m_minlimit(MC_BIGCL) != 0);
1736 	freelist_populate(m_class(MC_BIGCL), initmcl, M_WAIT);
1737 	VERIFY(m_total(MC_BIGCL) >= m_minlimit(MC_BIGCL));
1738 	freelist_init(m_class(MC_CL));
1739 
1740 	for (m = 0; m < NELEM(mbuf_table); m++) {
1741 		/* Make sure we didn't miss any */
1742 		VERIFY(m_minlimit(m_class(m)) == 0 ||
1743 		    m_total(m_class(m)) >= m_minlimit(m_class(m)));
1744 
1745 		/* populate the initial sizes and report from there on */
1746 		m_peak(m_class(m)) = m_total(m_class(m));
1747 	}
1748 	mb_peak_newreport = FALSE;
1749 
1750 	lck_mtx_unlock(mbuf_mlock);
1751 
1752 	(void) kernel_thread_start((thread_continue_t)mbuf_worker_thread_init,
1753 	    NULL, &thread);
1754 	thread_deallocate(thread);
1755 
1756 	ref_cache = mcache_create("mext_ref", sizeof(struct ext_ref),
1757 	    0, 0, MCR_SLEEP);
1758 
1759 	/* Create the cache for each class */
1760 	for (m = 0; m < NELEM(mbuf_table); m++) {
1761 		void *allocfunc, *freefunc, *auditfunc, *logfunc;
1762 		u_int32_t flags;
1763 
1764 		flags = mbuf_debug;
1765 		if (m_class(m) == MC_MBUF_CL || m_class(m) == MC_MBUF_BIGCL ||
1766 		    m_class(m) == MC_MBUF_16KCL) {
1767 			allocfunc = mbuf_cslab_alloc;
1768 			freefunc = mbuf_cslab_free;
1769 			auditfunc = mbuf_cslab_audit;
1770 			logfunc = mleak_logger;
1771 		} else {
1772 			allocfunc = mbuf_slab_alloc;
1773 			freefunc = mbuf_slab_free;
1774 			auditfunc = mbuf_slab_audit;
1775 			logfunc = mleak_logger;
1776 		}
1777 
1778 		/*
1779 		 * Disable per-CPU caches for jumbo classes if there
1780 		 * is no jumbo cluster pool available in the system.
1781 		 * The cache itself is still created (but will never
1782 		 * be populated) since it simplifies the code.
1783 		 */
1784 		if ((m_class(m) == MC_MBUF_16KCL || m_class(m) == MC_16KCL) &&
1785 		    njcl == 0) {
1786 			flags |= MCF_NOCPUCACHE;
1787 		}
1788 
1789 		if (!mclfindleak) {
1790 			flags |= MCF_NOLEAKLOG;
1791 		}
1792 
1793 		m_cache(m) = mcache_create_ext(m_cname(m), m_maxsize(m),
1794 		    allocfunc, freefunc, auditfunc, logfunc, mbuf_slab_notify,
1795 		    (void *)(uintptr_t)m, flags, MCR_SLEEP);
1796 	}
1797 
1798 	/*
1799 	 * Set the max limit on sb_max to be 1/16 th of the size of
1800 	 * memory allocated for mbuf clusters.
1801 	 */
1802 	high_sb_max = (nmbclusters << (MCLSHIFT - 4));
1803 	if (high_sb_max < sb_max) {
1804 		/* sb_max is too large for this configuration, scale it down */
1805 		if (high_sb_max > (1 << MBSHIFT)) {
1806 			/* We have atleast 16 M of mbuf pool */
1807 			sb_max = high_sb_max;
1808 		} else if ((nmbclusters << MCLSHIFT) > (1 << MBSHIFT)) {
1809 			/*
1810 			 * If we have more than 1M of mbufpool, cap the size of
1811 			 * max sock buf at 1M
1812 			 */
1813 			sb_max = high_sb_max = (1 << MBSHIFT);
1814 		} else {
1815 			sb_max = high_sb_max;
1816 		}
1817 	}
1818 
1819 	/* allocate space for mbuf_dump_buf */
1820 	mbuf_dump_buf = zalloc_permanent(MBUF_DUMP_BUF_SIZE, ZALIGN_NONE);
1821 
1822 	if (mbuf_debug & MCF_DEBUG) {
1823 		printf("%s: MLEN %d, MHLEN %d\n", __func__,
1824 		    (int)_MLEN, (int)_MHLEN);
1825 	}
1826 
1827 	printf("%s: done [%d MB total pool size, (%d/%d) split]\n", __func__,
1828 	    (nmbclusters << MCLSHIFT) >> MBSHIFT,
1829 	    (nclusters << MCLSHIFT) >> MBSHIFT,
1830 	    (njcl << MCLSHIFT) >> MBSHIFT);
1831 }
1832 
1833 /*
1834  * Obtain a slab of object(s) from the class's freelist.
1835  */
1836 static mcache_obj_t *
slab_alloc(mbuf_class_t class,int wait)1837 slab_alloc(mbuf_class_t class, int wait)
1838 {
1839 	mcl_slab_t *sp;
1840 	mcache_obj_t *buf;
1841 
1842 	LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
1843 
1844 	/* This should always be NULL for us */
1845 	VERIFY(m_cobjlist(class) == NULL);
1846 
1847 	/*
1848 	 * Treat composite objects as having longer lifespan by using
1849 	 * a slab from the reverse direction, in hoping that this could
1850 	 * reduce the probability of fragmentation for slabs that hold
1851 	 * more than one buffer chunks (e.g. mbuf slabs).  For other
1852 	 * slabs, this probably doesn't make much of a difference.
1853 	 */
1854 	if ((class == MC_MBUF || class == MC_CL || class == MC_BIGCL)
1855 	    && (wait & MCR_COMP)) {
1856 		sp = (mcl_slab_t *)TAILQ_LAST(&m_slablist(class), mcl_slhead);
1857 	} else {
1858 		sp = (mcl_slab_t *)TAILQ_FIRST(&m_slablist(class));
1859 	}
1860 
1861 	if (sp == NULL) {
1862 		VERIFY(m_infree(class) == 0 && m_slab_cnt(class) == 0);
1863 		/* The slab list for this class is empty */
1864 		return NULL;
1865 	}
1866 
1867 	VERIFY(m_infree(class) > 0);
1868 	VERIFY(!slab_is_detached(sp));
1869 	VERIFY(sp->sl_class == class &&
1870 	    (sp->sl_flags & (SLF_MAPPED | SLF_PARTIAL)) == SLF_MAPPED);
1871 	buf = sp->sl_head;
1872 	VERIFY(slab_inrange(sp, buf) && sp == slab_get(buf));
1873 	sp->sl_head = buf->obj_next;
1874 	/* Increment slab reference */
1875 	sp->sl_refcnt++;
1876 
1877 	VERIFY(sp->sl_head != NULL || sp->sl_refcnt == sp->sl_chunks);
1878 
1879 	if (sp->sl_head != NULL && !slab_inrange(sp, sp->sl_head)) {
1880 		slab_nextptr_panic(sp, sp->sl_head);
1881 		/* In case sl_head is in the map but not in the slab */
1882 		VERIFY(slab_inrange(sp, sp->sl_head));
1883 		/* NOTREACHED */
1884 	}
1885 
1886 	if (mclaudit != NULL) {
1887 		mcache_audit_t *mca = mcl_audit_buf2mca(class, buf);
1888 		mca->mca_uflags = 0;
1889 		/* Save contents on mbuf objects only */
1890 		if (class == MC_MBUF) {
1891 			mca->mca_uflags |= MB_SCVALID;
1892 		}
1893 	}
1894 
1895 	if (class == MC_CL) {
1896 		mbstat.m_clfree = (--m_infree(MC_CL)) + m_infree(MC_MBUF_CL);
1897 		/*
1898 		 * A 2K cluster slab can have at most NCLPG references.
1899 		 */
1900 		VERIFY(sp->sl_refcnt >= 1 && sp->sl_refcnt <= NCLPG &&
1901 		    sp->sl_chunks == NCLPG && sp->sl_len == PAGE_SIZE);
1902 		VERIFY(sp->sl_refcnt < NCLPG || sp->sl_head == NULL);
1903 	} else if (class == MC_BIGCL) {
1904 		mbstat.m_bigclfree = (--m_infree(MC_BIGCL)) +
1905 		    m_infree(MC_MBUF_BIGCL);
1906 		/*
1907 		 * A 4K cluster slab can have NBCLPG references.
1908 		 */
1909 		VERIFY(sp->sl_refcnt >= 1 && sp->sl_chunks == NBCLPG &&
1910 		    sp->sl_len == PAGE_SIZE &&
1911 		    (sp->sl_refcnt < NBCLPG || sp->sl_head == NULL));
1912 	} else if (class == MC_16KCL) {
1913 		mcl_slab_t *nsp;
1914 		int k;
1915 
1916 		--m_infree(MC_16KCL);
1917 		VERIFY(sp->sl_refcnt == 1 && sp->sl_chunks == 1 &&
1918 		    sp->sl_len == m_maxsize(class) && sp->sl_head == NULL);
1919 		/*
1920 		 * Increment 2nd-Nth slab reference, where N is NSLABSP16KB.
1921 		 * A 16KB big cluster takes NSLABSP16KB slabs, each having at
1922 		 * most 1 reference.
1923 		 */
1924 		for (nsp = sp, k = 1; k < NSLABSP16KB; k++) {
1925 			nsp = nsp->sl_next;
1926 			/* Next slab must already be present */
1927 			VERIFY(nsp != NULL);
1928 			nsp->sl_refcnt++;
1929 			VERIFY(!slab_is_detached(nsp));
1930 			VERIFY(nsp->sl_class == MC_16KCL &&
1931 			    nsp->sl_flags == (SLF_MAPPED | SLF_PARTIAL) &&
1932 			    nsp->sl_refcnt == 1 && nsp->sl_chunks == 0 &&
1933 			    nsp->sl_len == 0 && nsp->sl_base == sp->sl_base &&
1934 			    nsp->sl_head == NULL);
1935 		}
1936 	} else {
1937 		VERIFY(class == MC_MBUF);
1938 		--m_infree(MC_MBUF);
1939 		/*
1940 		 * If auditing is turned on, this check is
1941 		 * deferred until later in mbuf_slab_audit().
1942 		 */
1943 		if (mclaudit == NULL) {
1944 			_MCHECK((struct mbuf *)buf);
1945 		}
1946 		/*
1947 		 * Since we have incremented the reference count above,
1948 		 * an mbuf slab (formerly a 4KB cluster slab that was cut
1949 		 * up into mbufs) must have a reference count between 1
1950 		 * and NMBPG at this point.
1951 		 */
1952 		VERIFY(sp->sl_refcnt >= 1 && sp->sl_refcnt <= NMBPG &&
1953 		    sp->sl_chunks == NMBPG &&
1954 		    sp->sl_len == PAGE_SIZE);
1955 		VERIFY(sp->sl_refcnt < NMBPG || sp->sl_head == NULL);
1956 	}
1957 
1958 	/* If empty, remove this slab from the class's freelist */
1959 	if (sp->sl_head == NULL) {
1960 		VERIFY(class != MC_MBUF || sp->sl_refcnt == NMBPG);
1961 		VERIFY(class != MC_CL || sp->sl_refcnt == NCLPG);
1962 		VERIFY(class != MC_BIGCL || sp->sl_refcnt == NBCLPG);
1963 		slab_remove(sp, class);
1964 	}
1965 
1966 	return buf;
1967 }
1968 
1969 /*
1970  * Place a slab of object(s) back into a class's slab list.
1971  */
1972 static void
slab_free(mbuf_class_t class,mcache_obj_t * buf)1973 slab_free(mbuf_class_t class, mcache_obj_t *buf)
1974 {
1975 	mcl_slab_t *sp;
1976 	boolean_t reinit_supercl = false;
1977 	mbuf_class_t super_class;
1978 
1979 	LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
1980 
1981 	VERIFY(class != MC_16KCL || njcl > 0);
1982 	VERIFY(buf->obj_next == NULL);
1983 
1984 	/*
1985 	 * Synchronizing with m_clalloc, as it reads m_total, while we here
1986 	 * are modifying m_total.
1987 	 */
1988 	while (mb_clalloc_busy) {
1989 		mb_clalloc_waiters++;
1990 		(void) msleep(mb_clalloc_waitchan, mbuf_mlock,
1991 		    (PZERO - 1), "m_clalloc", NULL);
1992 		LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
1993 	}
1994 
1995 	/* We are busy now; tell everyone else to go away */
1996 	mb_clalloc_busy = TRUE;
1997 
1998 	sp = slab_get(buf);
1999 	VERIFY(sp->sl_class == class && slab_inrange(sp, buf) &&
2000 	    (sp->sl_flags & (SLF_MAPPED | SLF_PARTIAL)) == SLF_MAPPED);
2001 
2002 	/* Decrement slab reference */
2003 	sp->sl_refcnt--;
2004 
2005 	if (class == MC_CL) {
2006 		VERIFY(IS_P2ALIGNED(buf, MCLBYTES));
2007 		/*
2008 		 * A slab that has been splitted for 2KB clusters can have
2009 		 * at most 1 outstanding reference at this point.
2010 		 */
2011 		VERIFY(sp->sl_refcnt >= 0 && sp->sl_refcnt <= (NCLPG - 1) &&
2012 		    sp->sl_chunks == NCLPG && sp->sl_len == PAGE_SIZE);
2013 		VERIFY(sp->sl_refcnt < (NCLPG - 1) ||
2014 		    (slab_is_detached(sp) && sp->sl_head == NULL));
2015 	} else if (class == MC_BIGCL) {
2016 		VERIFY(IS_P2ALIGNED(buf, MBIGCLBYTES));
2017 
2018 		/* A 4KB cluster slab can have NBCLPG references at most */
2019 		VERIFY(sp->sl_refcnt >= 0 && sp->sl_chunks == NBCLPG);
2020 		VERIFY(sp->sl_refcnt < (NBCLPG - 1) ||
2021 		    (slab_is_detached(sp) && sp->sl_head == NULL));
2022 	} else if (class == MC_16KCL) {
2023 		mcl_slab_t *nsp;
2024 		int k;
2025 		/*
2026 		 * A 16KB cluster takes NSLABSP16KB slabs, all must
2027 		 * now have 0 reference.
2028 		 */
2029 		VERIFY(IS_P2ALIGNED(buf, PAGE_SIZE));
2030 		VERIFY(sp->sl_refcnt == 0 && sp->sl_chunks == 1 &&
2031 		    sp->sl_len == m_maxsize(class) && sp->sl_head == NULL);
2032 		VERIFY(slab_is_detached(sp));
2033 		for (nsp = sp, k = 1; k < NSLABSP16KB; k++) {
2034 			nsp = nsp->sl_next;
2035 			/* Next slab must already be present */
2036 			VERIFY(nsp != NULL);
2037 			nsp->sl_refcnt--;
2038 			VERIFY(slab_is_detached(nsp));
2039 			VERIFY(nsp->sl_class == MC_16KCL &&
2040 			    (nsp->sl_flags & (SLF_MAPPED | SLF_PARTIAL)) &&
2041 			    nsp->sl_refcnt == 0 && nsp->sl_chunks == 0 &&
2042 			    nsp->sl_len == 0 && nsp->sl_base == sp->sl_base &&
2043 			    nsp->sl_head == NULL);
2044 		}
2045 	} else {
2046 		/*
2047 		 * A slab that has been splitted for mbufs has at most
2048 		 * NMBPG reference counts.  Since we have decremented
2049 		 * one reference above, it must now be between 0 and
2050 		 * NMBPG-1.
2051 		 */
2052 		VERIFY(class == MC_MBUF);
2053 		VERIFY(sp->sl_refcnt >= 0 &&
2054 		    sp->sl_refcnt <= (NMBPG - 1) &&
2055 		    sp->sl_chunks == NMBPG &&
2056 		    sp->sl_len == PAGE_SIZE);
2057 		VERIFY(sp->sl_refcnt < (NMBPG - 1) ||
2058 		    (slab_is_detached(sp) && sp->sl_head == NULL));
2059 	}
2060 
2061 	/*
2062 	 * When auditing is enabled, ensure that the buffer still
2063 	 * contains the free pattern.  Otherwise it got corrupted
2064 	 * while at the CPU cache layer.
2065 	 */
2066 	if (mclaudit != NULL) {
2067 		mcache_audit_t *mca = mcl_audit_buf2mca(class, buf);
2068 		if (mclverify) {
2069 			mcache_audit_free_verify(mca, buf, 0,
2070 			    m_maxsize(class));
2071 		}
2072 		mca->mca_uflags &= ~MB_SCVALID;
2073 	}
2074 
2075 	if (class == MC_CL) {
2076 		mbstat.m_clfree = (++m_infree(MC_CL)) + m_infree(MC_MBUF_CL);
2077 		buf->obj_next = sp->sl_head;
2078 	} else if (class == MC_BIGCL) {
2079 		mbstat.m_bigclfree = (++m_infree(MC_BIGCL)) +
2080 		    m_infree(MC_MBUF_BIGCL);
2081 		buf->obj_next = sp->sl_head;
2082 	} else if (class == MC_16KCL) {
2083 		++m_infree(MC_16KCL);
2084 	} else {
2085 		++m_infree(MC_MBUF);
2086 		buf->obj_next = sp->sl_head;
2087 	}
2088 	sp->sl_head = buf;
2089 
2090 	/*
2091 	 * If a slab has been split to either one which holds 2KB clusters,
2092 	 * or one which holds mbufs, turn it back to one which holds a
2093 	 * 4 or 16 KB cluster depending on the page size.
2094 	 */
2095 	if (m_maxsize(MC_BIGCL) == PAGE_SIZE) {
2096 		super_class = MC_BIGCL;
2097 	} else {
2098 		VERIFY(PAGE_SIZE == m_maxsize(MC_16KCL));
2099 		super_class = MC_16KCL;
2100 	}
2101 	if (class == MC_MBUF && sp->sl_refcnt == 0 &&
2102 	    m_total(class) >= (m_minlimit(class) + NMBPG) &&
2103 	    m_total(super_class) < m_maxlimit(super_class)) {
2104 		int i = NMBPG;
2105 
2106 		m_total(MC_MBUF) -= NMBPG;
2107 		mbstat.m_mbufs = m_total(MC_MBUF);
2108 		m_infree(MC_MBUF) -= NMBPG;
2109 		mtype_stat_add(MT_FREE, -((unsigned)NMBPG));
2110 
2111 		while (i--) {
2112 			struct mbuf *m = sp->sl_head;
2113 			VERIFY(m != NULL);
2114 			sp->sl_head = m->m_next;
2115 			m->m_next = NULL;
2116 		}
2117 		reinit_supercl = true;
2118 	} else if (class == MC_CL && sp->sl_refcnt == 0 &&
2119 	    m_total(class) >= (m_minlimit(class) + NCLPG) &&
2120 	    m_total(super_class) < m_maxlimit(super_class)) {
2121 		int i = NCLPG;
2122 
2123 		m_total(MC_CL) -= NCLPG;
2124 		mbstat.m_clusters = m_total(MC_CL);
2125 		m_infree(MC_CL) -= NCLPG;
2126 
2127 		while (i--) {
2128 			union mcluster *c = sp->sl_head;
2129 			VERIFY(c != NULL);
2130 			sp->sl_head = c->mcl_next;
2131 			c->mcl_next = NULL;
2132 		}
2133 		reinit_supercl = true;
2134 	} else if (class == MC_BIGCL && super_class != MC_BIGCL &&
2135 	    sp->sl_refcnt == 0 &&
2136 	    m_total(class) >= (m_minlimit(class) + NBCLPG) &&
2137 	    m_total(super_class) < m_maxlimit(super_class)) {
2138 		int i = NBCLPG;
2139 
2140 		VERIFY(super_class == MC_16KCL);
2141 		m_total(MC_BIGCL) -= NBCLPG;
2142 		mbstat.m_bigclusters = m_total(MC_BIGCL);
2143 		m_infree(MC_BIGCL) -= NBCLPG;
2144 
2145 		while (i--) {
2146 			union mbigcluster *bc = sp->sl_head;
2147 			VERIFY(bc != NULL);
2148 			sp->sl_head = bc->mbc_next;
2149 			bc->mbc_next = NULL;
2150 		}
2151 		reinit_supercl = true;
2152 	}
2153 
2154 	if (reinit_supercl) {
2155 		VERIFY(sp->sl_head == NULL);
2156 		VERIFY(m_total(class) >= m_minlimit(class));
2157 		slab_remove(sp, class);
2158 
2159 		/* Reinitialize it as a cluster for the super class */
2160 		m_total(super_class)++;
2161 		m_infree(super_class)++;
2162 		VERIFY(sp->sl_flags == (SLF_MAPPED | SLF_DETACHED) &&
2163 		    sp->sl_len == PAGE_SIZE && sp->sl_refcnt == 0);
2164 
2165 		slab_init(sp, super_class, SLF_MAPPED, sp->sl_base,
2166 		    sp->sl_base, PAGE_SIZE, 0, 1);
2167 		if (mclverify) {
2168 			mcache_set_pattern(MCACHE_FREE_PATTERN,
2169 			    (caddr_t)sp->sl_base, sp->sl_len);
2170 		}
2171 		((mcache_obj_t *)(sp->sl_base))->obj_next = NULL;
2172 
2173 		if (super_class == MC_BIGCL) {
2174 			mbstat.m_bigclusters = m_total(MC_BIGCL);
2175 			mbstat.m_bigclfree = m_infree(MC_BIGCL) +
2176 			    m_infree(MC_MBUF_BIGCL);
2177 		}
2178 
2179 		VERIFY(slab_is_detached(sp));
2180 		VERIFY(m_total(super_class) <= m_maxlimit(super_class));
2181 
2182 		/* And finally switch class */
2183 		class = super_class;
2184 	}
2185 
2186 	/* Reinsert the slab to the class's slab list */
2187 	if (slab_is_detached(sp)) {
2188 		slab_insert(sp, class);
2189 	}
2190 
2191 	/* We're done; let others enter */
2192 	mb_clalloc_busy = FALSE;
2193 	if (mb_clalloc_waiters > 0) {
2194 		mb_clalloc_waiters = 0;
2195 		wakeup(mb_clalloc_waitchan);
2196 	}
2197 }
2198 
2199 /*
2200  * Common allocator for rudimentary objects called by the CPU cache layer
2201  * during an allocation request whenever there is no available element in the
2202  * bucket layer.  It returns one or more elements from the appropriate global
2203  * freelist.  If the freelist is empty, it will attempt to populate it and
2204  * retry the allocation.
2205  */
2206 static unsigned int
mbuf_slab_alloc(void * arg,mcache_obj_t *** plist,unsigned int num,int wait)2207 mbuf_slab_alloc(void *arg, mcache_obj_t ***plist, unsigned int num, int wait)
2208 {
2209 	mbuf_class_t class = (mbuf_class_t)arg;
2210 	unsigned int need = num;
2211 	mcache_obj_t **list = *plist;
2212 
2213 	ASSERT(MBUF_CLASS_VALID(class) && !MBUF_CLASS_COMPOSITE(class));
2214 	ASSERT(need > 0);
2215 
2216 	lck_mtx_lock(mbuf_mlock);
2217 
2218 	for (;;) {
2219 		if ((*list = slab_alloc(class, wait)) != NULL) {
2220 			(*list)->obj_next = NULL;
2221 			list = *plist = &(*list)->obj_next;
2222 
2223 			if (--need == 0) {
2224 				/*
2225 				 * If the number of elements in freelist has
2226 				 * dropped below low watermark, asynchronously
2227 				 * populate the freelist now rather than doing
2228 				 * it later when we run out of elements.
2229 				 */
2230 				if (!mbuf_cached_above(class, wait) &&
2231 				    m_infree(class) < (m_total(class) >> 5)) {
2232 					(void) freelist_populate(class, 1,
2233 					    M_DONTWAIT);
2234 				}
2235 				break;
2236 			}
2237 		} else {
2238 			VERIFY(m_infree(class) == 0 || class == MC_CL);
2239 
2240 			(void) freelist_populate(class, 1,
2241 			    (wait & MCR_NOSLEEP) ? M_DONTWAIT : M_WAIT);
2242 
2243 			if (m_infree(class) > 0) {
2244 				continue;
2245 			}
2246 
2247 			/* Check if there's anything at the cache layer */
2248 			if (mbuf_cached_above(class, wait)) {
2249 				break;
2250 			}
2251 
2252 			/* watchdog checkpoint */
2253 			mbuf_watchdog();
2254 
2255 			/* We have nothing and cannot block; give up */
2256 			if (wait & MCR_NOSLEEP) {
2257 				if (!(wait & MCR_TRYHARD)) {
2258 					m_fail_cnt(class)++;
2259 					mbstat.m_drops++;
2260 					break;
2261 				}
2262 			}
2263 
2264 			/*
2265 			 * If the freelist is still empty and the caller is
2266 			 * willing to be blocked, sleep on the wait channel
2267 			 * until an element is available.  Otherwise, if
2268 			 * MCR_TRYHARD is set, do our best to satisfy the
2269 			 * request without having to go to sleep.
2270 			 */
2271 			if (mbuf_worker_ready &&
2272 			    mbuf_sleep(class, need, wait)) {
2273 				break;
2274 			}
2275 
2276 			LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
2277 		}
2278 	}
2279 
2280 	m_alloc_cnt(class) += num - need;
2281 	lck_mtx_unlock(mbuf_mlock);
2282 
2283 	return num - need;
2284 }
2285 
2286 /*
2287  * Common de-allocator for rudimentary objects called by the CPU cache
2288  * layer when one or more elements need to be returned to the appropriate
2289  * global freelist.
2290  */
2291 static void
mbuf_slab_free(void * arg,mcache_obj_t * list,__unused int purged)2292 mbuf_slab_free(void *arg, mcache_obj_t *list, __unused int purged)
2293 {
2294 	mbuf_class_t class = (mbuf_class_t)arg;
2295 	mcache_obj_t *nlist;
2296 	unsigned int num = 0;
2297 	int w;
2298 
2299 	ASSERT(MBUF_CLASS_VALID(class) && !MBUF_CLASS_COMPOSITE(class));
2300 
2301 	lck_mtx_lock(mbuf_mlock);
2302 
2303 	for (;;) {
2304 		nlist = list->obj_next;
2305 		list->obj_next = NULL;
2306 		slab_free(class, list);
2307 		++num;
2308 		if ((list = nlist) == NULL) {
2309 			break;
2310 		}
2311 	}
2312 	m_free_cnt(class) += num;
2313 
2314 	if ((w = mb_waiters) > 0) {
2315 		mb_waiters = 0;
2316 	}
2317 	if (w) {
2318 		mbwdog_logger("waking up all threads");
2319 	}
2320 	lck_mtx_unlock(mbuf_mlock);
2321 
2322 	if (w != 0) {
2323 		wakeup(mb_waitchan);
2324 	}
2325 }
2326 
2327 /*
2328  * Common auditor for rudimentary objects called by the CPU cache layer
2329  * during an allocation or free request.  For the former, this is called
2330  * after the objects are obtained from either the bucket or slab layer
2331  * and before they are returned to the caller.  For the latter, this is
2332  * called immediately during free and before placing the objects into
2333  * the bucket or slab layer.
2334  */
2335 static void
mbuf_slab_audit(void * arg,mcache_obj_t * list,boolean_t alloc)2336 mbuf_slab_audit(void *arg, mcache_obj_t *list, boolean_t alloc)
2337 {
2338 	mbuf_class_t class = (mbuf_class_t)arg;
2339 	mcache_audit_t *mca;
2340 
2341 	ASSERT(MBUF_CLASS_VALID(class) && !MBUF_CLASS_COMPOSITE(class));
2342 
2343 	while (list != NULL) {
2344 		lck_mtx_lock(mbuf_mlock);
2345 		mca = mcl_audit_buf2mca(class, list);
2346 
2347 		/* Do the sanity checks */
2348 		if (class == MC_MBUF) {
2349 			mcl_audit_mbuf(mca, list, FALSE, alloc);
2350 			ASSERT(mca->mca_uflags & MB_SCVALID);
2351 		} else {
2352 			mcl_audit_cluster(mca, list, m_maxsize(class),
2353 			    alloc, TRUE);
2354 			ASSERT(!(mca->mca_uflags & MB_SCVALID));
2355 		}
2356 		/* Record this transaction */
2357 		if (mcltrace) {
2358 			mcache_buffer_log(mca, list, m_cache(class), &mb_start);
2359 		}
2360 
2361 		if (alloc) {
2362 			mca->mca_uflags |= MB_INUSE;
2363 		} else {
2364 			mca->mca_uflags &= ~MB_INUSE;
2365 		}
2366 		/* Unpair the object (unconditionally) */
2367 		mca->mca_uptr = NULL;
2368 		lck_mtx_unlock(mbuf_mlock);
2369 
2370 		list = list->obj_next;
2371 	}
2372 }
2373 
2374 /*
2375  * Common notify routine for all caches.  It is called by mcache when
2376  * one or more objects get freed.  We use this indication to trigger
2377  * the wakeup of any sleeping threads so that they can retry their
2378  * allocation requests.
2379  */
2380 static void
mbuf_slab_notify(void * arg,u_int32_t reason)2381 mbuf_slab_notify(void *arg, u_int32_t reason)
2382 {
2383 	mbuf_class_t class = (mbuf_class_t)arg;
2384 	int w;
2385 
2386 	ASSERT(MBUF_CLASS_VALID(class));
2387 
2388 	if (reason != MCN_RETRYALLOC) {
2389 		return;
2390 	}
2391 
2392 	lck_mtx_lock(mbuf_mlock);
2393 	if ((w = mb_waiters) > 0) {
2394 		m_notified(class)++;
2395 		mb_waiters = 0;
2396 	}
2397 	if (w) {
2398 		mbwdog_logger("waking up all threads");
2399 	}
2400 	lck_mtx_unlock(mbuf_mlock);
2401 
2402 	if (w != 0) {
2403 		wakeup(mb_waitchan);
2404 	}
2405 }
2406 
2407 /*
2408  * Obtain object(s) from the composite class's freelist.
2409  */
2410 static unsigned int
cslab_alloc(mbuf_class_t class,mcache_obj_t *** plist,unsigned int num)2411 cslab_alloc(mbuf_class_t class, mcache_obj_t ***plist, unsigned int num)
2412 {
2413 	unsigned int need = num;
2414 	mcl_slab_t *sp, *clsp, *nsp;
2415 	struct mbuf *m;
2416 	mcache_obj_t **list = *plist;
2417 	void *cl;
2418 
2419 	VERIFY(need > 0);
2420 	VERIFY(class != MC_MBUF_16KCL || njcl > 0);
2421 	LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
2422 
2423 	/* Get what we can from the freelist */
2424 	while ((*list = m_cobjlist(class)) != NULL) {
2425 		MRANGE(*list);
2426 
2427 		m = (struct mbuf *)*list;
2428 		sp = slab_get(m);
2429 		cl = m->m_ext.ext_buf;
2430 		clsp = slab_get(cl);
2431 		VERIFY(m->m_flags == M_EXT && cl != NULL);
2432 		VERIFY(m_get_rfa(m) != NULL && MBUF_IS_COMPOSITE(m));
2433 
2434 		if (class == MC_MBUF_CL) {
2435 			VERIFY(clsp->sl_refcnt >= 1 &&
2436 			    clsp->sl_refcnt <= NCLPG);
2437 		} else {
2438 			VERIFY(clsp->sl_refcnt >= 1 &&
2439 			    clsp->sl_refcnt <= NBCLPG);
2440 		}
2441 
2442 		if (class == MC_MBUF_16KCL) {
2443 			int k;
2444 			for (nsp = clsp, k = 1; k < NSLABSP16KB; k++) {
2445 				nsp = nsp->sl_next;
2446 				/* Next slab must already be present */
2447 				VERIFY(nsp != NULL);
2448 				VERIFY(nsp->sl_refcnt == 1);
2449 			}
2450 		}
2451 
2452 		if ((m_cobjlist(class) = (*list)->obj_next) != NULL &&
2453 		    !MBUF_IN_MAP(m_cobjlist(class))) {
2454 			slab_nextptr_panic(sp, m_cobjlist(class));
2455 			/* NOTREACHED */
2456 		}
2457 		(*list)->obj_next = NULL;
2458 		list = *plist = &(*list)->obj_next;
2459 
2460 		if (--need == 0) {
2461 			break;
2462 		}
2463 	}
2464 	m_infree(class) -= (num - need);
2465 
2466 	return num - need;
2467 }
2468 
2469 /*
2470  * Place object(s) back into a composite class's freelist.
2471  */
2472 static unsigned int
cslab_free(mbuf_class_t class,mcache_obj_t * list,int purged)2473 cslab_free(mbuf_class_t class, mcache_obj_t *list, int purged)
2474 {
2475 	mcache_obj_t *o, *tail;
2476 	unsigned int num = 0;
2477 	struct mbuf *m, *ms;
2478 	mcache_audit_t *mca = NULL;
2479 	mcache_obj_t *ref_list = NULL;
2480 	mcl_slab_t *clsp, *nsp;
2481 	void *cl;
2482 	mbuf_class_t cl_class;
2483 
2484 	ASSERT(MBUF_CLASS_VALID(class) && MBUF_CLASS_COMPOSITE(class));
2485 	VERIFY(class != MC_MBUF_16KCL || njcl > 0);
2486 	LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
2487 
2488 	if (class == MC_MBUF_CL) {
2489 		cl_class = MC_CL;
2490 	} else if (class == MC_MBUF_BIGCL) {
2491 		cl_class = MC_BIGCL;
2492 	} else {
2493 		VERIFY(class == MC_MBUF_16KCL);
2494 		cl_class = MC_16KCL;
2495 	}
2496 
2497 	o = tail = list;
2498 
2499 	while ((m = ms = (struct mbuf *)o) != NULL) {
2500 		mcache_obj_t *rfa, *nexto = o->obj_next;
2501 
2502 		/* Do the mbuf sanity checks */
2503 		if (mclaudit != NULL) {
2504 			mca = mcl_audit_buf2mca(MC_MBUF, (mcache_obj_t *)m);
2505 			if (mclverify) {
2506 				mcache_audit_free_verify(mca, m, 0,
2507 				    m_maxsize(MC_MBUF));
2508 			}
2509 			ms = MCA_SAVED_MBUF_PTR(mca);
2510 		}
2511 
2512 		/* Do the cluster sanity checks */
2513 		cl = ms->m_ext.ext_buf;
2514 		clsp = slab_get(cl);
2515 		if (mclverify) {
2516 			size_t size = m_maxsize(cl_class);
2517 			mcache_audit_free_verify(mcl_audit_buf2mca(cl_class,
2518 			    (mcache_obj_t *)cl), cl, 0, size);
2519 		}
2520 		VERIFY(ms->m_type == MT_FREE);
2521 		VERIFY(ms->m_flags == M_EXT);
2522 		VERIFY(m_get_rfa(ms) != NULL && MBUF_IS_COMPOSITE(ms));
2523 		if (cl_class == MC_CL) {
2524 			VERIFY(clsp->sl_refcnt >= 1 &&
2525 			    clsp->sl_refcnt <= NCLPG);
2526 		} else {
2527 			VERIFY(clsp->sl_refcnt >= 1 &&
2528 			    clsp->sl_refcnt <= NBCLPG);
2529 		}
2530 		if (cl_class == MC_16KCL) {
2531 			int k;
2532 			for (nsp = clsp, k = 1; k < NSLABSP16KB; k++) {
2533 				nsp = nsp->sl_next;
2534 				/* Next slab must already be present */
2535 				VERIFY(nsp != NULL);
2536 				VERIFY(nsp->sl_refcnt == 1);
2537 			}
2538 		}
2539 
2540 		/*
2541 		 * If we're asked to purge, restore the actual mbuf using
2542 		 * contents of the shadow structure (if auditing is enabled)
2543 		 * and clear EXTF_COMPOSITE flag from the mbuf, as we are
2544 		 * about to free it and the attached cluster into their caches.
2545 		 */
2546 		if (purged) {
2547 			/* Restore constructed mbuf fields */
2548 			if (mclaudit != NULL) {
2549 				mcl_audit_restore_mbuf(m, mca, TRUE);
2550 			}
2551 
2552 			MEXT_MINREF(m) = 0;
2553 			MEXT_REF(m) = 0;
2554 			MEXT_PREF(m) = 0;
2555 			MEXT_FLAGS(m) = 0;
2556 			MEXT_PRIV(m) = 0;
2557 			MEXT_PMBUF(m) = NULL;
2558 			MEXT_TOKEN(m) = 0;
2559 
2560 			rfa = (mcache_obj_t *)(void *)m_get_rfa(m);
2561 			m_set_ext(m, NULL, NULL, NULL);
2562 			rfa->obj_next = ref_list;
2563 			ref_list = rfa;
2564 
2565 			m->m_type = MT_FREE;
2566 			m->m_flags = m->m_len = 0;
2567 			m->m_next = m->m_nextpkt = NULL;
2568 
2569 			/* Save mbuf fields and make auditing happy */
2570 			if (mclaudit != NULL) {
2571 				mcl_audit_mbuf(mca, o, FALSE, FALSE);
2572 			}
2573 
2574 			VERIFY(m_total(class) > 0);
2575 			m_total(class)--;
2576 
2577 			/* Free the mbuf */
2578 			o->obj_next = NULL;
2579 			slab_free(MC_MBUF, o);
2580 
2581 			/* And free the cluster */
2582 			((mcache_obj_t *)cl)->obj_next = NULL;
2583 			if (class == MC_MBUF_CL) {
2584 				slab_free(MC_CL, cl);
2585 			} else if (class == MC_MBUF_BIGCL) {
2586 				slab_free(MC_BIGCL, cl);
2587 			} else {
2588 				slab_free(MC_16KCL, cl);
2589 			}
2590 		}
2591 
2592 		++num;
2593 		tail = o;
2594 		o = nexto;
2595 	}
2596 
2597 	if (!purged) {
2598 		tail->obj_next = m_cobjlist(class);
2599 		m_cobjlist(class) = list;
2600 		m_infree(class) += num;
2601 	} else if (ref_list != NULL) {
2602 		mcache_free_ext(ref_cache, ref_list);
2603 	}
2604 
2605 	return num;
2606 }
2607 
2608 /*
2609  * Common allocator for composite objects called by the CPU cache layer
2610  * during an allocation request whenever there is no available element in
2611  * the bucket layer.  It returns one or more composite elements from the
2612  * appropriate global freelist.  If the freelist is empty, it will attempt
2613  * to obtain the rudimentary objects from their caches and construct them
2614  * into composite mbuf + cluster objects.
2615  */
2616 static unsigned int
mbuf_cslab_alloc(void * arg,mcache_obj_t *** plist,unsigned int needed,int wait)2617 mbuf_cslab_alloc(void *arg, mcache_obj_t ***plist, unsigned int needed,
2618     int wait)
2619 {
2620 	mbuf_class_t class = (mbuf_class_t)arg;
2621 	mbuf_class_t cl_class = 0;
2622 	unsigned int num = 0, cnum = 0, want = needed;
2623 	mcache_obj_t *ref_list = NULL;
2624 	mcache_obj_t *mp_list = NULL;
2625 	mcache_obj_t *clp_list = NULL;
2626 	mcache_obj_t **list;
2627 	struct ext_ref *rfa;
2628 	struct mbuf *m;
2629 	void *cl;
2630 
2631 	ASSERT(MBUF_CLASS_VALID(class) && MBUF_CLASS_COMPOSITE(class));
2632 	ASSERT(needed > 0);
2633 
2634 	VERIFY(class != MC_MBUF_16KCL || njcl > 0);
2635 
2636 	/* There should not be any slab for this class */
2637 	VERIFY(m_slab_cnt(class) == 0 &&
2638 	    m_slablist(class).tqh_first == NULL &&
2639 	    m_slablist(class).tqh_last == NULL);
2640 
2641 	lck_mtx_lock(mbuf_mlock);
2642 
2643 	/* Try using the freelist first */
2644 	num = cslab_alloc(class, plist, needed);
2645 	list = *plist;
2646 	if (num == needed) {
2647 		m_alloc_cnt(class) += num;
2648 		lck_mtx_unlock(mbuf_mlock);
2649 		return needed;
2650 	}
2651 
2652 	lck_mtx_unlock(mbuf_mlock);
2653 
2654 	/*
2655 	 * We could not satisfy the request using the freelist alone;
2656 	 * allocate from the appropriate rudimentary caches and use
2657 	 * whatever we can get to construct the composite objects.
2658 	 */
2659 	needed -= num;
2660 
2661 	/*
2662 	 * Mark these allocation requests as coming from a composite cache.
2663 	 * Also, if the caller is willing to be blocked, mark the request
2664 	 * with MCR_FAILOK such that we don't end up sleeping at the mbuf
2665 	 * slab layer waiting for the individual object when one or more
2666 	 * of the already-constructed composite objects are available.
2667 	 */
2668 	wait |= MCR_COMP;
2669 	if (!(wait & MCR_NOSLEEP)) {
2670 		wait |= MCR_FAILOK;
2671 	}
2672 
2673 	/* allocate mbufs */
2674 	needed = mcache_alloc_ext(m_cache(MC_MBUF), &mp_list, needed, wait);
2675 	if (needed == 0) {
2676 		ASSERT(mp_list == NULL);
2677 		goto fail;
2678 	}
2679 
2680 	/* allocate clusters */
2681 	if (class == MC_MBUF_CL) {
2682 		cl_class = MC_CL;
2683 	} else if (class == MC_MBUF_BIGCL) {
2684 		cl_class = MC_BIGCL;
2685 	} else {
2686 		VERIFY(class == MC_MBUF_16KCL);
2687 		cl_class = MC_16KCL;
2688 	}
2689 	needed = mcache_alloc_ext(m_cache(cl_class), &clp_list, needed, wait);
2690 	if (needed == 0) {
2691 		ASSERT(clp_list == NULL);
2692 		goto fail;
2693 	}
2694 
2695 	needed = mcache_alloc_ext(ref_cache, &ref_list, needed, wait);
2696 	if (needed == 0) {
2697 		ASSERT(ref_list == NULL);
2698 		goto fail;
2699 	}
2700 
2701 	/*
2702 	 * By this time "needed" is MIN(mbuf, cluster, ref).  Any left
2703 	 * overs will get freed accordingly before we return to caller.
2704 	 */
2705 	for (cnum = 0; cnum < needed; cnum++) {
2706 		struct mbuf *ms;
2707 
2708 		m = ms = (struct mbuf *)mp_list;
2709 		mp_list = mp_list->obj_next;
2710 
2711 		cl = clp_list;
2712 		clp_list = clp_list->obj_next;
2713 		((mcache_obj_t *)cl)->obj_next = NULL;
2714 
2715 		rfa = (struct ext_ref *)ref_list;
2716 		ref_list = ref_list->obj_next;
2717 		((mcache_obj_t *)(void *)rfa)->obj_next = NULL;
2718 
2719 		/*
2720 		 * If auditing is enabled, construct the shadow mbuf
2721 		 * in the audit structure instead of in the actual one.
2722 		 * mbuf_cslab_audit() will take care of restoring the
2723 		 * contents after the integrity check.
2724 		 */
2725 		if (mclaudit != NULL) {
2726 			mcache_audit_t *mca, *cl_mca;
2727 
2728 			lck_mtx_lock(mbuf_mlock);
2729 			mca = mcl_audit_buf2mca(MC_MBUF, (mcache_obj_t *)m);
2730 			ms = MCA_SAVED_MBUF_PTR(mca);
2731 			cl_mca = mcl_audit_buf2mca(cl_class,
2732 			    (mcache_obj_t *)cl);
2733 
2734 			/*
2735 			 * Pair them up.  Note that this is done at the time
2736 			 * the mbuf+cluster objects are constructed.  This
2737 			 * information should be treated as "best effort"
2738 			 * debugging hint since more than one mbufs can refer
2739 			 * to a cluster.  In that case, the cluster might not
2740 			 * be freed along with the mbuf it was paired with.
2741 			 */
2742 			mca->mca_uptr = cl_mca;
2743 			cl_mca->mca_uptr = mca;
2744 
2745 			ASSERT(mca->mca_uflags & MB_SCVALID);
2746 			ASSERT(!(cl_mca->mca_uflags & MB_SCVALID));
2747 			lck_mtx_unlock(mbuf_mlock);
2748 
2749 			/* Technically, they are in the freelist */
2750 			if (mclverify) {
2751 				size_t size;
2752 
2753 				mcache_set_pattern(MCACHE_FREE_PATTERN, m,
2754 				    m_maxsize(MC_MBUF));
2755 
2756 				if (class == MC_MBUF_CL) {
2757 					size = m_maxsize(MC_CL);
2758 				} else if (class == MC_MBUF_BIGCL) {
2759 					size = m_maxsize(MC_BIGCL);
2760 				} else {
2761 					size = m_maxsize(MC_16KCL);
2762 				}
2763 
2764 				mcache_set_pattern(MCACHE_FREE_PATTERN, cl,
2765 				    size);
2766 			}
2767 		}
2768 
2769 		MBUF_INIT(ms, 0, MT_FREE);
2770 		if (class == MC_MBUF_16KCL) {
2771 			MBUF_16KCL_INIT(ms, cl, rfa, 0, EXTF_COMPOSITE);
2772 		} else if (class == MC_MBUF_BIGCL) {
2773 			MBUF_BIGCL_INIT(ms, cl, rfa, 0, EXTF_COMPOSITE);
2774 		} else {
2775 			MBUF_CL_INIT(ms, cl, rfa, 0, EXTF_COMPOSITE);
2776 		}
2777 		VERIFY(ms->m_flags == M_EXT);
2778 		VERIFY(m_get_rfa(ms) != NULL && MBUF_IS_COMPOSITE(ms));
2779 
2780 		*list = (mcache_obj_t *)m;
2781 		(*list)->obj_next = NULL;
2782 		list = *plist = &(*list)->obj_next;
2783 	}
2784 
2785 fail:
2786 	/*
2787 	 * Free up what's left of the above.
2788 	 */
2789 	if (mp_list != NULL) {
2790 		mcache_free_ext(m_cache(MC_MBUF), mp_list);
2791 	}
2792 	if (clp_list != NULL) {
2793 		mcache_free_ext(m_cache(cl_class), clp_list);
2794 	}
2795 	if (ref_list != NULL) {
2796 		mcache_free_ext(ref_cache, ref_list);
2797 	}
2798 
2799 	lck_mtx_lock(mbuf_mlock);
2800 	if (num > 0 || cnum > 0) {
2801 		m_total(class) += cnum;
2802 		VERIFY(m_total(class) <= m_maxlimit(class));
2803 		m_alloc_cnt(class) += num + cnum;
2804 	}
2805 	if ((num + cnum) < want) {
2806 		m_fail_cnt(class) += (want - (num + cnum));
2807 	}
2808 	lck_mtx_unlock(mbuf_mlock);
2809 
2810 	return num + cnum;
2811 }
2812 
2813 /*
2814  * Common de-allocator for composite objects called by the CPU cache
2815  * layer when one or more elements need to be returned to the appropriate
2816  * global freelist.
2817  */
2818 static void
mbuf_cslab_free(void * arg,mcache_obj_t * list,int purged)2819 mbuf_cslab_free(void *arg, mcache_obj_t *list, int purged)
2820 {
2821 	mbuf_class_t class = (mbuf_class_t)arg;
2822 	unsigned int num;
2823 	int w;
2824 
2825 	ASSERT(MBUF_CLASS_VALID(class) && MBUF_CLASS_COMPOSITE(class));
2826 
2827 	lck_mtx_lock(mbuf_mlock);
2828 
2829 	num = cslab_free(class, list, purged);
2830 	m_free_cnt(class) += num;
2831 
2832 	if ((w = mb_waiters) > 0) {
2833 		mb_waiters = 0;
2834 	}
2835 	if (w) {
2836 		mbwdog_logger("waking up all threads");
2837 	}
2838 
2839 	lck_mtx_unlock(mbuf_mlock);
2840 
2841 	if (w != 0) {
2842 		wakeup(mb_waitchan);
2843 	}
2844 }
2845 
2846 /*
2847  * Common auditor for composite objects called by the CPU cache layer
2848  * during an allocation or free request.  For the former, this is called
2849  * after the objects are obtained from either the bucket or slab layer
2850  * and before they are returned to the caller.  For the latter, this is
2851  * called immediately during free and before placing the objects into
2852  * the bucket or slab layer.
2853  */
2854 static void
mbuf_cslab_audit(void * arg,mcache_obj_t * list,boolean_t alloc)2855 mbuf_cslab_audit(void *arg, mcache_obj_t *list, boolean_t alloc)
2856 {
2857 	mbuf_class_t class = (mbuf_class_t)arg, cl_class;
2858 	mcache_audit_t *mca;
2859 	struct mbuf *m, *ms;
2860 	mcl_slab_t *clsp, *nsp;
2861 	size_t cl_size;
2862 	void *cl;
2863 
2864 	ASSERT(MBUF_CLASS_VALID(class) && MBUF_CLASS_COMPOSITE(class));
2865 	if (class == MC_MBUF_CL) {
2866 		cl_class = MC_CL;
2867 	} else if (class == MC_MBUF_BIGCL) {
2868 		cl_class = MC_BIGCL;
2869 	} else {
2870 		cl_class = MC_16KCL;
2871 	}
2872 	cl_size = m_maxsize(cl_class);
2873 
2874 	while ((m = ms = (struct mbuf *)list) != NULL) {
2875 		lck_mtx_lock(mbuf_mlock);
2876 		/* Do the mbuf sanity checks and record its transaction */
2877 		mca = mcl_audit_buf2mca(MC_MBUF, (mcache_obj_t *)m);
2878 		mcl_audit_mbuf(mca, m, TRUE, alloc);
2879 		if (mcltrace) {
2880 			mcache_buffer_log(mca, m, m_cache(class), &mb_start);
2881 		}
2882 
2883 		if (alloc) {
2884 			mca->mca_uflags |= MB_COMP_INUSE;
2885 		} else {
2886 			mca->mca_uflags &= ~MB_COMP_INUSE;
2887 		}
2888 
2889 		/*
2890 		 * Use the shadow mbuf in the audit structure if we are
2891 		 * freeing, since the contents of the actual mbuf has been
2892 		 * pattern-filled by the above call to mcl_audit_mbuf().
2893 		 */
2894 		if (!alloc && mclverify) {
2895 			ms = MCA_SAVED_MBUF_PTR(mca);
2896 		}
2897 
2898 		/* Do the cluster sanity checks and record its transaction */
2899 		cl = ms->m_ext.ext_buf;
2900 		clsp = slab_get(cl);
2901 		VERIFY(ms->m_flags == M_EXT && cl != NULL);
2902 		VERIFY(m_get_rfa(ms) != NULL && MBUF_IS_COMPOSITE(ms));
2903 		if (class == MC_MBUF_CL) {
2904 			VERIFY(clsp->sl_refcnt >= 1 &&
2905 			    clsp->sl_refcnt <= NCLPG);
2906 		} else {
2907 			VERIFY(clsp->sl_refcnt >= 1 &&
2908 			    clsp->sl_refcnt <= NBCLPG);
2909 		}
2910 
2911 		if (class == MC_MBUF_16KCL) {
2912 			int k;
2913 			for (nsp = clsp, k = 1; k < NSLABSP16KB; k++) {
2914 				nsp = nsp->sl_next;
2915 				/* Next slab must already be present */
2916 				VERIFY(nsp != NULL);
2917 				VERIFY(nsp->sl_refcnt == 1);
2918 			}
2919 		}
2920 
2921 
2922 		mca = mcl_audit_buf2mca(cl_class, cl);
2923 		mcl_audit_cluster(mca, cl, cl_size, alloc, FALSE);
2924 		if (mcltrace) {
2925 			mcache_buffer_log(mca, cl, m_cache(class), &mb_start);
2926 		}
2927 
2928 		if (alloc) {
2929 			mca->mca_uflags |= MB_COMP_INUSE;
2930 		} else {
2931 			mca->mca_uflags &= ~MB_COMP_INUSE;
2932 		}
2933 		lck_mtx_unlock(mbuf_mlock);
2934 
2935 		list = list->obj_next;
2936 	}
2937 }
2938 
2939 static void
m_vm_error_stats(uint32_t * cnt,uint64_t * ts,uint64_t * size,uint64_t alloc_size,kern_return_t error)2940 m_vm_error_stats(uint32_t *cnt, uint64_t *ts, uint64_t *size,
2941     uint64_t alloc_size, kern_return_t error)
2942 {
2943 	*cnt = *cnt + 1;
2944 	*ts = net_uptime();
2945 	if (size) {
2946 		*size = alloc_size;
2947 	}
2948 	_CASSERT(sizeof(mb_kmem_stats) / sizeof(mb_kmem_stats[0]) ==
2949 	    sizeof(mb_kmem_stats_labels) / sizeof(mb_kmem_stats_labels[0]));
2950 	switch (error) {
2951 	case KERN_SUCCESS:
2952 		break;
2953 	case KERN_INVALID_ARGUMENT:
2954 		mb_kmem_stats[0]++;
2955 		break;
2956 	case KERN_INVALID_ADDRESS:
2957 		mb_kmem_stats[1]++;
2958 		break;
2959 	case KERN_RESOURCE_SHORTAGE:
2960 		mb_kmem_stats[2]++;
2961 		break;
2962 	case KERN_NO_SPACE:
2963 		mb_kmem_stats[3]++;
2964 		break;
2965 	case KERN_FAILURE:
2966 		mb_kmem_stats[4]++;
2967 		break;
2968 	default:
2969 		mb_kmem_stats[5]++;
2970 		break;
2971 	}
2972 }
2973 
2974 static vm_offset_t
kmem_mb_alloc(vm_map_t mbmap,int size,int physContig,kern_return_t * err)2975 kmem_mb_alloc(vm_map_t mbmap, int size, int physContig, kern_return_t *err)
2976 {
2977 	vm_offset_t addr = 0;
2978 	kern_return_t kr = KERN_SUCCESS;
2979 
2980 	if (!physContig) {
2981 		kr = kernel_memory_allocate(mbmap, &addr, size, 0,
2982 		    KMA_KOBJECT | KMA_LOMEM, VM_KERN_MEMORY_MBUF);
2983 	} else {
2984 		kr = kmem_alloc_contig(mbmap, &addr, size, PAGE_MASK, 0xfffff,
2985 		    0, KMA_KOBJECT | KMA_LOMEM, VM_KERN_MEMORY_MBUF);
2986 	}
2987 
2988 	if (kr != KERN_SUCCESS) {
2989 		addr = 0;
2990 	}
2991 	if (err) {
2992 		*err = kr;
2993 	}
2994 
2995 	return addr;
2996 }
2997 
2998 /*
2999  * Allocate some number of mbuf clusters and place on cluster freelist.
3000  */
3001 static int
m_clalloc(const u_int32_t num,const int wait,const u_int32_t bufsize)3002 m_clalloc(const u_int32_t num, const int wait, const u_int32_t bufsize)
3003 {
3004 	int i, count = 0;
3005 	vm_size_t size = 0;
3006 	int numpages = 0, large_buffer;
3007 	vm_offset_t page = 0;
3008 	mcache_audit_t *mca_list = NULL;
3009 	mcache_obj_t *con_list = NULL;
3010 	mcl_slab_t *sp;
3011 	mbuf_class_t class;
3012 	kern_return_t error;
3013 
3014 	/* Set if a buffer allocation needs allocation of multiple pages */
3015 	large_buffer = ((bufsize == m_maxsize(MC_16KCL)) &&
3016 	    PAGE_SIZE < M16KCLBYTES);
3017 	VERIFY(bufsize == m_maxsize(MC_BIGCL) ||
3018 	    bufsize == m_maxsize(MC_16KCL));
3019 
3020 	VERIFY((bufsize == PAGE_SIZE) ||
3021 	    (bufsize > PAGE_SIZE && bufsize == m_maxsize(MC_16KCL)));
3022 
3023 	if (bufsize == m_size(MC_BIGCL)) {
3024 		class = MC_BIGCL;
3025 	} else {
3026 		class = MC_16KCL;
3027 	}
3028 
3029 	LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
3030 
3031 	/*
3032 	 * Multiple threads may attempt to populate the cluster map one
3033 	 * after another.  Since we drop the lock below prior to acquiring
3034 	 * the physical page(s), our view of the cluster map may no longer
3035 	 * be accurate, and we could end up over-committing the pages beyond
3036 	 * the maximum allowed for each class.  To prevent it, this entire
3037 	 * operation (including the page mapping) is serialized.
3038 	 */
3039 	while (mb_clalloc_busy) {
3040 		mb_clalloc_waiters++;
3041 		(void) msleep(mb_clalloc_waitchan, mbuf_mlock,
3042 		    (PZERO - 1), "m_clalloc", NULL);
3043 		LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
3044 	}
3045 
3046 	/* We are busy now; tell everyone else to go away */
3047 	mb_clalloc_busy = TRUE;
3048 
3049 	/*
3050 	 * Honor the caller's wish to block or not block.  We have a way
3051 	 * to grow the pool asynchronously using the mbuf worker thread.
3052 	 */
3053 	i = m_howmany(num, bufsize);
3054 	if (i <= 0 || (wait & M_DONTWAIT)) {
3055 		goto out;
3056 	}
3057 
3058 	lck_mtx_unlock(mbuf_mlock);
3059 
3060 	size = round_page(i * bufsize);
3061 	page = kmem_mb_alloc(mb_map, size, large_buffer, &error);
3062 
3063 	/*
3064 	 * If we did ask for "n" 16KB physically contiguous chunks
3065 	 * and didn't get them, then please try again without this
3066 	 * restriction.
3067 	 */
3068 	net_update_uptime();
3069 	if (large_buffer && page == 0) {
3070 		m_vm_error_stats(&mb_kmem_contig_failed,
3071 		    &mb_kmem_contig_failed_ts,
3072 		    &mb_kmem_contig_failed_size,
3073 		    size, error);
3074 		page = kmem_mb_alloc(mb_map, size, 0, &error);
3075 	}
3076 
3077 	if (page == 0) {
3078 		m_vm_error_stats(&mb_kmem_failed,
3079 		    &mb_kmem_failed_ts,
3080 		    &mb_kmem_failed_size,
3081 		    size, error);
3082 #if PAGE_SIZE == 4096
3083 		if (bufsize == m_maxsize(MC_BIGCL)) {
3084 #else
3085 		if (bufsize >= m_maxsize(MC_BIGCL)) {
3086 #endif
3087 			/* Try for 1 page if failed */
3088 			size = PAGE_SIZE;
3089 			page = kmem_mb_alloc(mb_map, size, 0, &error);
3090 			if (page == 0) {
3091 				m_vm_error_stats(&mb_kmem_one_failed,
3092 				    &mb_kmem_one_failed_ts,
3093 				    NULL, size, error);
3094 			}
3095 		}
3096 
3097 		if (page == 0) {
3098 			lck_mtx_lock(mbuf_mlock);
3099 			goto out;
3100 		}
3101 	}
3102 
3103 	VERIFY(IS_P2ALIGNED(page, PAGE_SIZE));
3104 	numpages = size / PAGE_SIZE;
3105 
3106 	/* If auditing is enabled, allocate the audit structures now */
3107 	if (mclaudit != NULL) {
3108 		int needed;
3109 
3110 		/*
3111 		 * Yes, I realize this is a waste of memory for clusters
3112 		 * that never get transformed into mbufs, as we may end
3113 		 * up with NMBPG-1 unused audit structures per cluster.
3114 		 * But doing so tremendously simplifies the allocation
3115 		 * strategy, since at this point we are not holding the
3116 		 * mbuf lock and the caller is okay to be blocked.
3117 		 */
3118 		if (bufsize == PAGE_SIZE) {
3119 			needed = numpages * NMBPG;
3120 
3121 			i = mcache_alloc_ext(mcl_audit_con_cache,
3122 			    &con_list, needed, MCR_SLEEP);
3123 
3124 			VERIFY(con_list != NULL && i == needed);
3125 		} else {
3126 			/*
3127 			 * if multiple 4K pages are being used for a
3128 			 * 16K cluster
3129 			 */
3130 			needed = numpages / NSLABSP16KB;
3131 		}
3132 
3133 		i = mcache_alloc_ext(mcache_audit_cache,
3134 		    (mcache_obj_t **)&mca_list, needed, MCR_SLEEP);
3135 
3136 		VERIFY(mca_list != NULL && i == needed);
3137 	}
3138 
3139 	lck_mtx_lock(mbuf_mlock);
3140 
3141 	for (i = 0; i < numpages; i++, page += PAGE_SIZE) {
3142 		ppnum_t offset =
3143 		    ((unsigned char *)page - mbutl) >> PAGE_SHIFT;
3144 		ppnum_t new_page = pmap_find_phys(kernel_pmap, page);
3145 
3146 		/*
3147 		 * If there is a mapper the appropriate I/O page is
3148 		 * returned; zero out the page to discard its past
3149 		 * contents to prevent exposing leftover kernel memory.
3150 		 */
3151 		VERIFY(offset < mcl_pages);
3152 		if (mcl_paddr_base != 0) {
3153 			bzero((void *)(uintptr_t) page, PAGE_SIZE);
3154 			new_page = IOMapperInsertPage(mcl_paddr_base,
3155 			    offset, new_page);
3156 		}
3157 		mcl_paddr[offset] = new_page;
3158 
3159 		/* Pattern-fill this fresh page */
3160 		if (mclverify) {
3161 			mcache_set_pattern(MCACHE_FREE_PATTERN,
3162 			    (caddr_t)page, PAGE_SIZE);
3163 		}
3164 		if (bufsize == PAGE_SIZE) {
3165 			mcache_obj_t *buf;
3166 			/* One for the entire page */
3167 			sp = slab_get((void *)page);
3168 			if (mclaudit != NULL) {
3169 				mcl_audit_init((void *)page,
3170 				    &mca_list, &con_list,
3171 				    AUDIT_CONTENTS_SIZE, NMBPG);
3172 			}
3173 			VERIFY(sp->sl_refcnt == 0 && sp->sl_flags == 0);
3174 			slab_init(sp, class, SLF_MAPPED, (void *)page,
3175 			    (void *)page, PAGE_SIZE, 0, 1);
3176 			buf = (mcache_obj_t *)page;
3177 			buf->obj_next = NULL;
3178 
3179 			/* Insert this slab */
3180 			slab_insert(sp, class);
3181 
3182 			/* Update stats now since slab_get drops the lock */
3183 			++m_infree(class);
3184 			++m_total(class);
3185 			VERIFY(m_total(class) <= m_maxlimit(class));
3186 			if (class == MC_BIGCL) {
3187 				mbstat.m_bigclfree = m_infree(MC_BIGCL) +
3188 				    m_infree(MC_MBUF_BIGCL);
3189 				mbstat.m_bigclusters = m_total(MC_BIGCL);
3190 			}
3191 			++count;
3192 		} else if ((bufsize > PAGE_SIZE) &&
3193 		    (i % NSLABSP16KB) == 0) {
3194 			union m16kcluster *m16kcl = (union m16kcluster *)page;
3195 			mcl_slab_t *nsp;
3196 			int k;
3197 
3198 			/* One for the entire 16KB */
3199 			sp = slab_get(m16kcl);
3200 			if (mclaudit != NULL) {
3201 				mcl_audit_init(m16kcl, &mca_list, NULL, 0, 1);
3202 			}
3203 
3204 			VERIFY(sp->sl_refcnt == 0 && sp->sl_flags == 0);
3205 			slab_init(sp, MC_16KCL, SLF_MAPPED,
3206 			    m16kcl, m16kcl, bufsize, 0, 1);
3207 			m16kcl->m16kcl_next = NULL;
3208 
3209 			/*
3210 			 * 2nd-Nth page's slab is part of the first one,
3211 			 * where N is NSLABSP16KB.
3212 			 */
3213 			for (k = 1; k < NSLABSP16KB; k++) {
3214 				nsp = slab_get(((union mbigcluster *)page) + k);
3215 				VERIFY(nsp->sl_refcnt == 0 &&
3216 				    nsp->sl_flags == 0);
3217 				slab_init(nsp, MC_16KCL,
3218 				    SLF_MAPPED | SLF_PARTIAL,
3219 				    m16kcl, NULL, 0, 0, 0);
3220 			}
3221 			/* Insert this slab */
3222 			slab_insert(sp, MC_16KCL);
3223 
3224 			/* Update stats now since slab_get drops the lock */
3225 			++m_infree(MC_16KCL);
3226 			++m_total(MC_16KCL);
3227 			VERIFY(m_total(MC_16KCL) <= m_maxlimit(MC_16KCL));
3228 			++count;
3229 		}
3230 	}
3231 	VERIFY(mca_list == NULL && con_list == NULL);
3232 
3233 	if (!mb_peak_newreport && mbuf_report_usage(class)) {
3234 		mb_peak_newreport = TRUE;
3235 	}
3236 
3237 	/* We're done; let others enter */
3238 	mb_clalloc_busy = FALSE;
3239 	if (mb_clalloc_waiters > 0) {
3240 		mb_clalloc_waiters = 0;
3241 		wakeup(mb_clalloc_waitchan);
3242 	}
3243 
3244 	return count;
3245 out:
3246 	LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
3247 
3248 	mtracelarge_register(size);
3249 
3250 	/* We're done; let others enter */
3251 	mb_clalloc_busy = FALSE;
3252 	if (mb_clalloc_waiters > 0) {
3253 		mb_clalloc_waiters = 0;
3254 		wakeup(mb_clalloc_waitchan);
3255 	}
3256 
3257 	/*
3258 	 * When non-blocking we kick a thread if we have to grow the
3259 	 * pool or if the number of free clusters is less than requested.
3260 	 */
3261 	if (i > 0 && mbuf_worker_ready && mbuf_worker_needs_wakeup) {
3262 		mbwdog_logger("waking up the worker thread to to grow %s by %d",
3263 		    m_cname(class), i);
3264 		wakeup((caddr_t)&mbuf_worker_needs_wakeup);
3265 		mbuf_worker_needs_wakeup = FALSE;
3266 	}
3267 	if (class == MC_BIGCL) {
3268 		if (i > 0) {
3269 			/*
3270 			 * Remember total number of 4KB clusters needed
3271 			 * at this time.
3272 			 */
3273 			i += m_total(MC_BIGCL);
3274 			if (i > m_region_expand(MC_BIGCL)) {
3275 				m_region_expand(MC_BIGCL) = i;
3276 			}
3277 		}
3278 		if (m_infree(MC_BIGCL) >= num) {
3279 			return 1;
3280 		}
3281 	} else {
3282 		if (i > 0) {
3283 			/*
3284 			 * Remember total number of 16KB clusters needed
3285 			 * at this time.
3286 			 */
3287 			i += m_total(MC_16KCL);
3288 			if (i > m_region_expand(MC_16KCL)) {
3289 				m_region_expand(MC_16KCL) = i;
3290 			}
3291 		}
3292 		if (m_infree(MC_16KCL) >= num) {
3293 			return 1;
3294 		}
3295 	}
3296 	return 0;
3297 }
3298 
3299 /*
3300  * Populate the global freelist of the corresponding buffer class.
3301  */
3302 static int
3303 freelist_populate(mbuf_class_t class, unsigned int num, int wait)
3304 {
3305 	mcache_obj_t *o = NULL;
3306 	int i, numpages = 0, count;
3307 	mbuf_class_t super_class;
3308 
3309 	VERIFY(class == MC_MBUF || class == MC_CL || class == MC_BIGCL ||
3310 	    class == MC_16KCL);
3311 
3312 	LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
3313 
3314 	VERIFY(PAGE_SIZE == m_maxsize(MC_BIGCL) ||
3315 	    PAGE_SIZE == m_maxsize(MC_16KCL));
3316 
3317 	if (m_maxsize(class) >= PAGE_SIZE) {
3318 		return m_clalloc(num, wait, m_maxsize(class)) != 0;
3319 	}
3320 
3321 	/*
3322 	 * The rest of the function will allocate pages and will slice
3323 	 * them up into the right size
3324 	 */
3325 
3326 	numpages = (num * m_size(class) + PAGE_SIZE - 1) / PAGE_SIZE;
3327 
3328 	/* Currently assume that pages are 4K or 16K */
3329 	if (PAGE_SIZE == m_maxsize(MC_BIGCL)) {
3330 		super_class = MC_BIGCL;
3331 	} else {
3332 		super_class = MC_16KCL;
3333 	}
3334 
3335 	i = m_clalloc(numpages, wait, m_maxsize(super_class));
3336 
3337 	/* how many objects will we cut the page into? */
3338 	int numobj = PAGE_SIZE / m_maxsize(class);
3339 
3340 	for (count = 0; count < numpages; count++) {
3341 		/* respect totals, minlimit, maxlimit */
3342 		if (m_total(super_class) <= m_minlimit(super_class) ||
3343 		    m_total(class) >= m_maxlimit(class)) {
3344 			break;
3345 		}
3346 
3347 		if ((o = slab_alloc(super_class, wait)) == NULL) {
3348 			break;
3349 		}
3350 
3351 		struct mbuf *m = (struct mbuf *)o;
3352 		union mcluster *c = (union mcluster *)o;
3353 		union mbigcluster *mbc = (union mbigcluster *)o;
3354 		mcl_slab_t *sp = slab_get(o);
3355 		mcache_audit_t *mca = NULL;
3356 
3357 		/*
3358 		 * since one full page will be converted to MC_MBUF or
3359 		 * MC_CL, verify that the reference count will match that
3360 		 * assumption
3361 		 */
3362 		VERIFY(sp->sl_refcnt == 1 && slab_is_detached(sp));
3363 		VERIFY((sp->sl_flags & (SLF_MAPPED | SLF_PARTIAL)) == SLF_MAPPED);
3364 		/*
3365 		 * Make sure that the cluster is unmolested
3366 		 * while in freelist
3367 		 */
3368 		if (mclverify) {
3369 			mca = mcl_audit_buf2mca(super_class,
3370 			    (mcache_obj_t *)o);
3371 			mcache_audit_free_verify(mca,
3372 			    (mcache_obj_t *)o, 0, m_maxsize(super_class));
3373 		}
3374 
3375 		/* Reinitialize it as an mbuf or 2K or 4K slab */
3376 		slab_init(sp, class, sp->sl_flags,
3377 		    sp->sl_base, NULL, PAGE_SIZE, 0, numobj);
3378 
3379 		VERIFY(sp->sl_head == NULL);
3380 
3381 		VERIFY(m_total(super_class) >= 1);
3382 		m_total(super_class)--;
3383 
3384 		if (super_class == MC_BIGCL) {
3385 			mbstat.m_bigclusters = m_total(MC_BIGCL);
3386 		}
3387 
3388 		m_total(class) += numobj;
3389 		VERIFY(m_total(class) <= m_maxlimit(class));
3390 		m_infree(class) += numobj;
3391 
3392 		if (!mb_peak_newreport && mbuf_report_usage(class)) {
3393 			mb_peak_newreport = TRUE;
3394 		}
3395 
3396 		i = numobj;
3397 		if (class == MC_MBUF) {
3398 			mbstat.m_mbufs = m_total(MC_MBUF);
3399 			mtype_stat_add(MT_FREE, NMBPG);
3400 			while (i--) {
3401 				/*
3402 				 * If auditing is enabled, construct the
3403 				 * shadow mbuf in the audit structure
3404 				 * instead of the actual one.
3405 				 * mbuf_slab_audit() will take care of
3406 				 * restoring the contents after the
3407 				 * integrity check.
3408 				 */
3409 				if (mclaudit != NULL) {
3410 					struct mbuf *ms;
3411 					mca = mcl_audit_buf2mca(MC_MBUF,
3412 					    (mcache_obj_t *)m);
3413 					ms = MCA_SAVED_MBUF_PTR(mca);
3414 					ms->m_type = MT_FREE;
3415 				} else {
3416 					m->m_type = MT_FREE;
3417 				}
3418 				m->m_next = sp->sl_head;
3419 				sp->sl_head = (void *)m++;
3420 			}
3421 		} else if (class == MC_CL) { /* MC_CL */
3422 			mbstat.m_clfree =
3423 			    m_infree(MC_CL) + m_infree(MC_MBUF_CL);
3424 			mbstat.m_clusters = m_total(MC_CL);
3425 			while (i--) {
3426 				c->mcl_next = sp->sl_head;
3427 				sp->sl_head = (void *)c++;
3428 			}
3429 		} else {
3430 			VERIFY(class == MC_BIGCL);
3431 			mbstat.m_bigclusters = m_total(MC_BIGCL);
3432 			mbstat.m_bigclfree = m_infree(MC_BIGCL) +
3433 			    m_infree(MC_MBUF_BIGCL);
3434 			while (i--) {
3435 				mbc->mbc_next = sp->sl_head;
3436 				sp->sl_head = (void *)mbc++;
3437 			}
3438 		}
3439 
3440 		/* Insert into the mbuf or 2k or 4k slab list */
3441 		slab_insert(sp, class);
3442 
3443 		if ((i = mb_waiters) > 0) {
3444 			mb_waiters = 0;
3445 		}
3446 		if (i != 0) {
3447 			mbwdog_logger("waking up all threads");
3448 			wakeup(mb_waitchan);
3449 		}
3450 	}
3451 	return count != 0;
3452 }
3453 
3454 /*
3455  * For each class, initialize the freelist to hold m_minlimit() objects.
3456  */
3457 static void
3458 freelist_init(mbuf_class_t class)
3459 {
3460 	LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
3461 
3462 	VERIFY(class == MC_CL || class == MC_BIGCL);
3463 	VERIFY(m_total(class) == 0);
3464 	VERIFY(m_minlimit(class) > 0);
3465 
3466 	while (m_total(class) < m_minlimit(class)) {
3467 		(void) freelist_populate(class, m_minlimit(class), M_WAIT);
3468 	}
3469 
3470 	VERIFY(m_total(class) >= m_minlimit(class));
3471 }
3472 
3473 /*
3474  * (Inaccurately) check if it might be worth a trip back to the
3475  * mcache layer due the availability of objects there.  We'll
3476  * end up back here if there's nothing up there.
3477  */
3478 static boolean_t
3479 mbuf_cached_above(mbuf_class_t class, int wait)
3480 {
3481 	switch (class) {
3482 	case MC_MBUF:
3483 		if (wait & MCR_COMP) {
3484 			return !mcache_bkt_isempty(m_cache(MC_MBUF_CL)) ||
3485 			       !mcache_bkt_isempty(m_cache(MC_MBUF_BIGCL));
3486 		}
3487 		break;
3488 
3489 	case MC_CL:
3490 		if (wait & MCR_COMP) {
3491 			return !mcache_bkt_isempty(m_cache(MC_MBUF_CL));
3492 		}
3493 		break;
3494 
3495 	case MC_BIGCL:
3496 		if (wait & MCR_COMP) {
3497 			return !mcache_bkt_isempty(m_cache(MC_MBUF_BIGCL));
3498 		}
3499 		break;
3500 
3501 	case MC_16KCL:
3502 		if (wait & MCR_COMP) {
3503 			return !mcache_bkt_isempty(m_cache(MC_MBUF_16KCL));
3504 		}
3505 		break;
3506 
3507 	case MC_MBUF_CL:
3508 	case MC_MBUF_BIGCL:
3509 	case MC_MBUF_16KCL:
3510 		break;
3511 
3512 	default:
3513 		VERIFY(0);
3514 		/* NOTREACHED */
3515 	}
3516 
3517 	return !mcache_bkt_isempty(m_cache(class));
3518 }
3519 
3520 /*
3521  * If possible, convert constructed objects to raw ones.
3522  */
3523 static boolean_t
3524 mbuf_steal(mbuf_class_t class, unsigned int num)
3525 {
3526 	mcache_obj_t *top = NULL;
3527 	mcache_obj_t **list = &top;
3528 	unsigned int tot = 0;
3529 
3530 	LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
3531 
3532 	switch (class) {
3533 	case MC_MBUF:
3534 	case MC_CL:
3535 	case MC_BIGCL:
3536 	case MC_16KCL:
3537 		return FALSE;
3538 
3539 	case MC_MBUF_CL:
3540 	case MC_MBUF_BIGCL:
3541 	case MC_MBUF_16KCL:
3542 		/* Get the required number of constructed objects if possible */
3543 		if (m_infree(class) > m_minlimit(class)) {
3544 			tot = cslab_alloc(class, &list,
3545 			    MIN(num, m_infree(class)));
3546 		}
3547 
3548 		/* And destroy them to get back the raw objects */
3549 		if (top != NULL) {
3550 			(void) cslab_free(class, top, 1);
3551 		}
3552 		break;
3553 
3554 	default:
3555 		VERIFY(0);
3556 		/* NOTREACHED */
3557 	}
3558 
3559 	return tot == num;
3560 }
3561 
3562 static void
3563 m_reclaim(mbuf_class_t class, unsigned int num, boolean_t comp)
3564 {
3565 	int m, bmap = 0;
3566 
3567 	LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
3568 
3569 	VERIFY(m_total(MC_CL) <= m_maxlimit(MC_CL));
3570 	VERIFY(m_total(MC_BIGCL) <= m_maxlimit(MC_BIGCL));
3571 	VERIFY(m_total(MC_16KCL) <= m_maxlimit(MC_16KCL));
3572 
3573 	/*
3574 	 * This logic can be made smarter; for now, simply mark
3575 	 * all other related classes as potential victims.
3576 	 */
3577 	switch (class) {
3578 	case MC_MBUF:
3579 		m_wantpurge(MC_CL)++;
3580 		m_wantpurge(MC_BIGCL)++;
3581 		m_wantpurge(MC_MBUF_CL)++;
3582 		m_wantpurge(MC_MBUF_BIGCL)++;
3583 		break;
3584 
3585 	case MC_CL:
3586 		m_wantpurge(MC_MBUF)++;
3587 		m_wantpurge(MC_BIGCL)++;
3588 		m_wantpurge(MC_MBUF_BIGCL)++;
3589 		if (!comp) {
3590 			m_wantpurge(MC_MBUF_CL)++;
3591 		}
3592 		break;
3593 
3594 	case MC_BIGCL:
3595 		m_wantpurge(MC_MBUF)++;
3596 		m_wantpurge(MC_CL)++;
3597 		m_wantpurge(MC_MBUF_CL)++;
3598 		if (!comp) {
3599 			m_wantpurge(MC_MBUF_BIGCL)++;
3600 		}
3601 		break;
3602 
3603 	case MC_16KCL:
3604 		if (!comp) {
3605 			m_wantpurge(MC_MBUF_16KCL)++;
3606 		}
3607 		break;
3608 
3609 	default:
3610 		VERIFY(0);
3611 		/* NOTREACHED */
3612 	}
3613 
3614 	/*
3615 	 * Run through each marked class and check if we really need to
3616 	 * purge (and therefore temporarily disable) the per-CPU caches
3617 	 * layer used by the class.  If so, remember the classes since
3618 	 * we are going to drop the lock below prior to purging.
3619 	 */
3620 	for (m = 0; m < NELEM(mbuf_table); m++) {
3621 		if (m_wantpurge(m) > 0) {
3622 			m_wantpurge(m) = 0;
3623 			/*
3624 			 * Try hard to steal the required number of objects
3625 			 * from the freelist of other mbuf classes.  Only
3626 			 * purge and disable the per-CPU caches layer when
3627 			 * we don't have enough; it's the last resort.
3628 			 */
3629 			if (!mbuf_steal(m, num)) {
3630 				bmap |= (1 << m);
3631 			}
3632 		}
3633 	}
3634 
3635 	lck_mtx_unlock(mbuf_mlock);
3636 
3637 	if (bmap != 0) {
3638 		/* signal the domains to drain */
3639 		net_drain_domains();
3640 
3641 		/* Sigh; we have no other choices but to ask mcache to purge */
3642 		for (m = 0; m < NELEM(mbuf_table); m++) {
3643 			if ((bmap & (1 << m)) &&
3644 			    mcache_purge_cache(m_cache(m), TRUE)) {
3645 				lck_mtx_lock(mbuf_mlock);
3646 				m_purge_cnt(m)++;
3647 				mbstat.m_drain++;
3648 				lck_mtx_unlock(mbuf_mlock);
3649 			}
3650 		}
3651 	} else {
3652 		/*
3653 		 * Request mcache to reap extra elements from all of its caches;
3654 		 * note that all reaps are serialized and happen only at a fixed
3655 		 * interval.
3656 		 */
3657 		mcache_reap();
3658 	}
3659 	lck_mtx_lock(mbuf_mlock);
3660 }
3661 
3662 static inline struct mbuf *
3663 m_get_common(int wait, short type, int hdr)
3664 {
3665 	struct mbuf *m;
3666 	int mcflags = MSLEEPF(wait);
3667 
3668 	/* Is this due to a non-blocking retry?  If so, then try harder */
3669 	if (mcflags & MCR_NOSLEEP) {
3670 		mcflags |= MCR_TRYHARD;
3671 	}
3672 
3673 	m = mcache_alloc(m_cache(MC_MBUF), mcflags);
3674 	if (m != NULL) {
3675 		MBUF_INIT(m, hdr, type);
3676 		mtype_stat_inc(type);
3677 		mtype_stat_dec(MT_FREE);
3678 	}
3679 	return m;
3680 }
3681 
3682 /*
3683  * Space allocation routines; these are also available as macros
3684  * for critical paths.
3685  */
3686 #define _M_GET(wait, type)      m_get_common(wait, type, 0)
3687 #define _M_GETHDR(wait, type)   m_get_common(wait, type, 1)
3688 #define _M_RETRY(wait, type)    _M_GET(wait, type)
3689 #define _M_RETRYHDR(wait, type) _M_GETHDR(wait, type)
3690 #define _MGET(m, how, type)     ((m) = _M_GET(how, type))
3691 #define _MGETHDR(m, how, type)  ((m) = _M_GETHDR(how, type))
3692 
3693 struct mbuf *
3694 m_get(int wait, int type)
3695 {
3696 	return _M_GET(wait, type);
3697 }
3698 
3699 struct mbuf *
3700 m_gethdr(int wait, int type)
3701 {
3702 	return _M_GETHDR(wait, type);
3703 }
3704 
3705 struct mbuf *
3706 m_retry(int wait, int type)
3707 {
3708 	return _M_RETRY(wait, type);
3709 }
3710 
3711 struct mbuf *
3712 m_retryhdr(int wait, int type)
3713 {
3714 	return _M_RETRYHDR(wait, type);
3715 }
3716 
3717 struct mbuf *
3718 m_getclr(int wait, int type)
3719 {
3720 	struct mbuf *m;
3721 
3722 	_MGET(m, wait, type);
3723 	if (m != NULL) {
3724 		bzero(MTOD(m, caddr_t), MLEN);
3725 	}
3726 	return m;
3727 }
3728 
3729 static int
3730 m_free_paired(struct mbuf *m)
3731 {
3732 	VERIFY((m->m_flags & M_EXT) && (MEXT_FLAGS(m) & EXTF_PAIRED));
3733 
3734 	membar_sync();
3735 	if (MEXT_PMBUF(m) == m) {
3736 		volatile UInt16 *addr = (volatile UInt16 *)&MEXT_PREF(m);
3737 		int16_t oprefcnt, prefcnt;
3738 
3739 		/*
3740 		 * Paired ref count might be negative in case we lose
3741 		 * against another thread clearing MEXT_PMBUF, in the
3742 		 * event it occurs after the above memory barrier sync.
3743 		 * In that case just ignore as things have been unpaired.
3744 		 */
3745 		do {
3746 			oprefcnt = *addr;
3747 			prefcnt = oprefcnt - 1;
3748 		} while (!OSCompareAndSwap16(oprefcnt, prefcnt, addr));
3749 
3750 		if (prefcnt > 1) {
3751 			return 1;
3752 		} else if (prefcnt == 1) {
3753 			m_ext_free_func_t m_free_func = m_get_ext_free(m);
3754 			VERIFY(m_free_func != NULL);
3755 			(*m_free_func)(m->m_ext.ext_buf,
3756 			    m->m_ext.ext_size, m_get_ext_arg(m));
3757 			return 1;
3758 		} else if (prefcnt == 0) {
3759 			VERIFY(MBUF_IS_PAIRED(m));
3760 
3761 			/*
3762 			 * Restore minref to its natural value, so that
3763 			 * the caller will be able to free the cluster
3764 			 * as appropriate.
3765 			 */
3766 			MEXT_MINREF(m) = 0;
3767 
3768 			/*
3769 			 * Clear MEXT_PMBUF, but leave EXTF_PAIRED intact
3770 			 * as it is immutable.  atomic_set_ptr also causes
3771 			 * memory barrier sync.
3772 			 */
3773 			atomic_set_ptr(&MEXT_PMBUF(m), NULL);
3774 
3775 			switch (m->m_ext.ext_size) {
3776 			case MCLBYTES:
3777 				m_set_ext(m, m_get_rfa(m), NULL, NULL);
3778 				break;
3779 
3780 			case MBIGCLBYTES:
3781 				m_set_ext(m, m_get_rfa(m), m_bigfree, NULL);
3782 				break;
3783 
3784 			case M16KCLBYTES:
3785 				m_set_ext(m, m_get_rfa(m), m_16kfree, NULL);
3786 				break;
3787 
3788 			default:
3789 				VERIFY(0);
3790 				/* NOTREACHED */
3791 			}
3792 		}
3793 	}
3794 
3795 	/*
3796 	 * Tell caller the unpair has occurred, and that the reference
3797 	 * count on the external cluster held for the paired mbuf should
3798 	 * now be dropped.
3799 	 */
3800 	return 0;
3801 }
3802 
3803 struct mbuf *
3804 m_free(struct mbuf *m)
3805 {
3806 	struct mbuf *n = m->m_next;
3807 
3808 	if (m->m_type == MT_FREE) {
3809 		panic("m_free: freeing an already freed mbuf");
3810 	}
3811 
3812 	if (m->m_flags & M_PKTHDR) {
3813 		/* Check for scratch area overflow */
3814 		m_redzone_verify(m);
3815 		/* Free the aux data and tags if there is any */
3816 		m_tag_delete_chain(m, NULL);
3817 
3818 		m_do_tx_compl_callback(m, NULL);
3819 	}
3820 
3821 	if (m->m_flags & M_EXT) {
3822 		uint16_t refcnt;
3823 		uint32_t composite;
3824 		m_ext_free_func_t m_free_func;
3825 
3826 		if (MBUF_IS_PAIRED(m) && m_free_paired(m)) {
3827 			return n;
3828 		}
3829 
3830 		refcnt = m_decref(m);
3831 		composite = (MEXT_FLAGS(m) & EXTF_COMPOSITE);
3832 		m_free_func = m_get_ext_free(m);
3833 
3834 		if (refcnt == MEXT_MINREF(m) && !composite) {
3835 			if (m_free_func == NULL) {
3836 				mcache_free(m_cache(MC_CL), m->m_ext.ext_buf);
3837 			} else if (m_free_func == m_bigfree) {
3838 				mcache_free(m_cache(MC_BIGCL),
3839 				    m->m_ext.ext_buf);
3840 			} else if (m_free_func == m_16kfree) {
3841 				mcache_free(m_cache(MC_16KCL),
3842 				    m->m_ext.ext_buf);
3843 			} else {
3844 				(*m_free_func)(m->m_ext.ext_buf,
3845 				    m->m_ext.ext_size, m_get_ext_arg(m));
3846 			}
3847 			mcache_free(ref_cache, m_get_rfa(m));
3848 			m_set_ext(m, NULL, NULL, NULL);
3849 		} else if (refcnt == MEXT_MINREF(m) && composite) {
3850 			VERIFY(!(MEXT_FLAGS(m) & EXTF_PAIRED));
3851 			VERIFY(m->m_type != MT_FREE);
3852 
3853 			mtype_stat_dec(m->m_type);
3854 			mtype_stat_inc(MT_FREE);
3855 
3856 			m->m_type = MT_FREE;
3857 			m->m_flags = M_EXT;
3858 			m->m_len = 0;
3859 			m->m_next = m->m_nextpkt = NULL;
3860 
3861 			MEXT_FLAGS(m) &= ~EXTF_READONLY;
3862 
3863 			/* "Free" into the intermediate cache */
3864 			if (m_free_func == NULL) {
3865 				mcache_free(m_cache(MC_MBUF_CL), m);
3866 			} else if (m_free_func == m_bigfree) {
3867 				mcache_free(m_cache(MC_MBUF_BIGCL), m);
3868 			} else {
3869 				VERIFY(m_free_func == m_16kfree);
3870 				mcache_free(m_cache(MC_MBUF_16KCL), m);
3871 			}
3872 			return n;
3873 		}
3874 	}
3875 
3876 	if (m->m_type != MT_FREE) {
3877 		mtype_stat_dec(m->m_type);
3878 		mtype_stat_inc(MT_FREE);
3879 	}
3880 
3881 	m->m_type = MT_FREE;
3882 	m->m_flags = m->m_len = 0;
3883 	m->m_next = m->m_nextpkt = NULL;
3884 
3885 	mcache_free(m_cache(MC_MBUF), m);
3886 
3887 	return n;
3888 }
3889 
3890 __private_extern__ struct mbuf *
3891 m_clattach(struct mbuf *m, int type, caddr_t extbuf,
3892     void (*extfree)(caddr_t, u_int, caddr_t), size_t extsize, caddr_t extarg,
3893     int wait, int pair)
3894 {
3895 	struct ext_ref *rfa = NULL;
3896 
3897 	/*
3898 	 * If pairing is requested and an existing mbuf is provided, reject
3899 	 * it if it's already been paired to another cluster.  Otherwise,
3900 	 * allocate a new one or free any existing below.
3901 	 */
3902 	if ((m != NULL && MBUF_IS_PAIRED(m)) ||
3903 	    (m == NULL && (m = _M_GETHDR(wait, type)) == NULL)) {
3904 		return NULL;
3905 	}
3906 
3907 	if (m->m_flags & M_EXT) {
3908 		u_int16_t refcnt;
3909 		u_int32_t composite;
3910 		m_ext_free_func_t m_free_func;
3911 
3912 		refcnt = m_decref(m);
3913 		composite = (MEXT_FLAGS(m) & EXTF_COMPOSITE);
3914 		VERIFY(!(MEXT_FLAGS(m) & EXTF_PAIRED) && MEXT_PMBUF(m) == NULL);
3915 		m_free_func = m_get_ext_free(m);
3916 		if (refcnt == MEXT_MINREF(m) && !composite) {
3917 			if (m_free_func == NULL) {
3918 				mcache_free(m_cache(MC_CL), m->m_ext.ext_buf);
3919 			} else if (m_free_func == m_bigfree) {
3920 				mcache_free(m_cache(MC_BIGCL),
3921 				    m->m_ext.ext_buf);
3922 			} else if (m_free_func == m_16kfree) {
3923 				mcache_free(m_cache(MC_16KCL),
3924 				    m->m_ext.ext_buf);
3925 			} else {
3926 				(*m_free_func)(m->m_ext.ext_buf,
3927 				    m->m_ext.ext_size, m_get_ext_arg(m));
3928 			}
3929 			/* Re-use the reference structure */
3930 			rfa = m_get_rfa(m);
3931 		} else if (refcnt == MEXT_MINREF(m) && composite) {
3932 			VERIFY(m->m_type != MT_FREE);
3933 
3934 			mtype_stat_dec(m->m_type);
3935 			mtype_stat_inc(MT_FREE);
3936 
3937 			m->m_type = MT_FREE;
3938 			m->m_flags = M_EXT;
3939 			m->m_len = 0;
3940 			m->m_next = m->m_nextpkt = NULL;
3941 
3942 			MEXT_FLAGS(m) &= ~EXTF_READONLY;
3943 
3944 			/* "Free" into the intermediate cache */
3945 			if (m_free_func == NULL) {
3946 				mcache_free(m_cache(MC_MBUF_CL), m);
3947 			} else if (m_free_func == m_bigfree) {
3948 				mcache_free(m_cache(MC_MBUF_BIGCL), m);
3949 			} else {
3950 				VERIFY(m_free_func == m_16kfree);
3951 				mcache_free(m_cache(MC_MBUF_16KCL), m);
3952 			}
3953 			/*
3954 			 * Allocate a new mbuf, since we didn't divorce
3955 			 * the composite mbuf + cluster pair above.
3956 			 */
3957 			if ((m = _M_GETHDR(wait, type)) == NULL) {
3958 				return NULL;
3959 			}
3960 		}
3961 	}
3962 
3963 	if (rfa == NULL &&
3964 	    (rfa = mcache_alloc(ref_cache, MSLEEPF(wait))) == NULL) {
3965 		m_free(m);
3966 		return NULL;
3967 	}
3968 
3969 	if (!pair) {
3970 		MEXT_INIT(m, extbuf, extsize, extfree, extarg, rfa,
3971 		    0, 1, 0, 0, 0, NULL);
3972 	} else {
3973 		MEXT_INIT(m, extbuf, extsize, extfree, (caddr_t)m, rfa,
3974 		    1, 1, 1, EXTF_PAIRED, 0, m);
3975 	}
3976 
3977 	return m;
3978 }
3979 
3980 /*
3981  * Perform `fast' allocation mbuf clusters from a cache of recently-freed
3982  * clusters. (If the cache is empty, new clusters are allocated en-masse.)
3983  */
3984 struct mbuf *
3985 m_getcl(int wait, int type, int flags)
3986 {
3987 	struct mbuf *m;
3988 	int mcflags = MSLEEPF(wait);
3989 	int hdr = (flags & M_PKTHDR);
3990 
3991 	/* Is this due to a non-blocking retry?  If so, then try harder */
3992 	if (mcflags & MCR_NOSLEEP) {
3993 		mcflags |= MCR_TRYHARD;
3994 	}
3995 
3996 	m = mcache_alloc(m_cache(MC_MBUF_CL), mcflags);
3997 	if (m != NULL) {
3998 		u_int16_t flag;
3999 		struct ext_ref *rfa;
4000 		void *cl;
4001 
4002 		VERIFY(m->m_type == MT_FREE && m->m_flags == M_EXT);
4003 		cl = m->m_ext.ext_buf;
4004 		rfa = m_get_rfa(m);
4005 
4006 		ASSERT(cl != NULL && rfa != NULL);
4007 		VERIFY(MBUF_IS_COMPOSITE(m) && m_get_ext_free(m) == NULL);
4008 
4009 		flag = MEXT_FLAGS(m);
4010 
4011 		MBUF_INIT(m, hdr, type);
4012 		MBUF_CL_INIT(m, cl, rfa, 1, flag);
4013 
4014 		mtype_stat_inc(type);
4015 		mtype_stat_dec(MT_FREE);
4016 	}
4017 	return m;
4018 }
4019 
4020 /* m_mclget() add an mbuf cluster to a normal mbuf */
4021 struct mbuf *
4022 m_mclget(struct mbuf *m, int wait)
4023 {
4024 	struct ext_ref *rfa;
4025 
4026 	if ((rfa = mcache_alloc(ref_cache, MSLEEPF(wait))) == NULL) {
4027 		return m;
4028 	}
4029 
4030 	m->m_ext.ext_buf = m_mclalloc(wait);
4031 	if (m->m_ext.ext_buf != NULL) {
4032 		MBUF_CL_INIT(m, m->m_ext.ext_buf, rfa, 1, 0);
4033 	} else {
4034 		mcache_free(ref_cache, rfa);
4035 	}
4036 	return m;
4037 }
4038 
4039 /* Allocate an mbuf cluster */
4040 caddr_t
4041 m_mclalloc(int wait)
4042 {
4043 	int mcflags = MSLEEPF(wait);
4044 
4045 	/* Is this due to a non-blocking retry?  If so, then try harder */
4046 	if (mcflags & MCR_NOSLEEP) {
4047 		mcflags |= MCR_TRYHARD;
4048 	}
4049 
4050 	return mcache_alloc(m_cache(MC_CL), mcflags);
4051 }
4052 
4053 /* Free an mbuf cluster */
4054 void
4055 m_mclfree(caddr_t p)
4056 {
4057 	mcache_free(m_cache(MC_CL), p);
4058 }
4059 
4060 /*
4061  * mcl_hasreference() checks if a cluster of an mbuf is referenced by
4062  * another mbuf; see comments in m_incref() regarding EXTF_READONLY.
4063  */
4064 int
4065 m_mclhasreference(struct mbuf *m)
4066 {
4067 	if (!(m->m_flags & M_EXT)) {
4068 		return 0;
4069 	}
4070 
4071 	ASSERT(m_get_rfa(m) != NULL);
4072 
4073 	return (MEXT_FLAGS(m) & EXTF_READONLY) ? 1 : 0;
4074 }
4075 
4076 __private_extern__ caddr_t
4077 m_bigalloc(int wait)
4078 {
4079 	int mcflags = MSLEEPF(wait);
4080 
4081 	/* Is this due to a non-blocking retry?  If so, then try harder */
4082 	if (mcflags & MCR_NOSLEEP) {
4083 		mcflags |= MCR_TRYHARD;
4084 	}
4085 
4086 	return mcache_alloc(m_cache(MC_BIGCL), mcflags);
4087 }
4088 
4089 __private_extern__ void
4090 m_bigfree(caddr_t p, __unused u_int size, __unused caddr_t arg)
4091 {
4092 	mcache_free(m_cache(MC_BIGCL), p);
4093 }
4094 
4095 /* m_mbigget() add an 4KB mbuf cluster to a normal mbuf */
4096 __private_extern__ struct mbuf *
4097 m_mbigget(struct mbuf *m, int wait)
4098 {
4099 	struct ext_ref *rfa;
4100 
4101 	if ((rfa = mcache_alloc(ref_cache, MSLEEPF(wait))) == NULL) {
4102 		return m;
4103 	}
4104 
4105 	m->m_ext.ext_buf =  m_bigalloc(wait);
4106 	if (m->m_ext.ext_buf != NULL) {
4107 		MBUF_BIGCL_INIT(m, m->m_ext.ext_buf, rfa, 1, 0);
4108 	} else {
4109 		mcache_free(ref_cache, rfa);
4110 	}
4111 	return m;
4112 }
4113 
4114 __private_extern__ caddr_t
4115 m_16kalloc(int wait)
4116 {
4117 	int mcflags = MSLEEPF(wait);
4118 
4119 	/* Is this due to a non-blocking retry?  If so, then try harder */
4120 	if (mcflags & MCR_NOSLEEP) {
4121 		mcflags |= MCR_TRYHARD;
4122 	}
4123 
4124 	return mcache_alloc(m_cache(MC_16KCL), mcflags);
4125 }
4126 
4127 __private_extern__ void
4128 m_16kfree(caddr_t p, __unused u_int size, __unused caddr_t arg)
4129 {
4130 	mcache_free(m_cache(MC_16KCL), p);
4131 }
4132 
4133 /* m_m16kget() add a 16KB mbuf cluster to a normal mbuf */
4134 __private_extern__ struct mbuf *
4135 m_m16kget(struct mbuf *m, int wait)
4136 {
4137 	struct ext_ref *rfa;
4138 
4139 	if ((rfa = mcache_alloc(ref_cache, MSLEEPF(wait))) == NULL) {
4140 		return m;
4141 	}
4142 
4143 	m->m_ext.ext_buf =  m_16kalloc(wait);
4144 	if (m->m_ext.ext_buf != NULL) {
4145 		MBUF_16KCL_INIT(m, m->m_ext.ext_buf, rfa, 1, 0);
4146 	} else {
4147 		mcache_free(ref_cache, rfa);
4148 	}
4149 	return m;
4150 }
4151 
4152 /*
4153  * "Move" mbuf pkthdr from "from" to "to".
4154  * "from" must have M_PKTHDR set, and "to" must be empty.
4155  */
4156 void
4157 m_copy_pkthdr(struct mbuf *to, struct mbuf *from)
4158 {
4159 	VERIFY(from->m_flags & M_PKTHDR);
4160 
4161 	/* Check for scratch area overflow */
4162 	m_redzone_verify(from);
4163 
4164 	if (to->m_flags & M_PKTHDR) {
4165 		/* Check for scratch area overflow */
4166 		m_redzone_verify(to);
4167 		/* We will be taking over the tags of 'to' */
4168 		m_tag_delete_chain(to, NULL);
4169 	}
4170 	to->m_pkthdr = from->m_pkthdr;          /* especially tags */
4171 	m_classifier_init(from, 0);             /* purge classifier info */
4172 	m_tag_init(from, 1);                    /* purge all tags from src */
4173 	m_scratch_init(from);                   /* clear src scratch area */
4174 	to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
4175 	if ((to->m_flags & M_EXT) == 0) {
4176 		to->m_data = to->m_pktdat;
4177 	}
4178 	m_redzone_init(to);                     /* setup red zone on dst */
4179 }
4180 
4181 /*
4182  * Duplicate "from"'s mbuf pkthdr in "to".
4183  * "from" must have M_PKTHDR set, and "to" must be empty.
4184  * In particular, this does a deep copy of the packet tags.
4185  */
4186 int
4187 m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how)
4188 {
4189 	VERIFY(from->m_flags & M_PKTHDR);
4190 
4191 	/* Check for scratch area overflow */
4192 	m_redzone_verify(from);
4193 
4194 	if (to->m_flags & M_PKTHDR) {
4195 		/* Check for scratch area overflow */
4196 		m_redzone_verify(to);
4197 		/* We will be taking over the tags of 'to' */
4198 		m_tag_delete_chain(to, NULL);
4199 	}
4200 	to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
4201 	if ((to->m_flags & M_EXT) == 0) {
4202 		to->m_data = to->m_pktdat;
4203 	}
4204 	to->m_pkthdr = from->m_pkthdr;
4205 	m_redzone_init(to);                     /* setup red zone on dst */
4206 	m_tag_init(to, 0);                      /* preserve dst static tags */
4207 	return m_tag_copy_chain(to, from, how);
4208 }
4209 
4210 void
4211 m_copy_pftag(struct mbuf *to, struct mbuf *from)
4212 {
4213 	memcpy(m_pftag(to), m_pftag(from), sizeof(struct pf_mtag));
4214 #if PF_ECN
4215 	m_pftag(to)->pftag_hdr = NULL;
4216 	m_pftag(to)->pftag_flags &= ~(PF_TAG_HDR_INET | PF_TAG_HDR_INET6);
4217 #endif /* PF_ECN */
4218 }
4219 
4220 void
4221 m_copy_necptag(struct mbuf *to, struct mbuf *from)
4222 {
4223 	memcpy(m_necptag(to), m_necptag(from), sizeof(struct necp_mtag_));
4224 }
4225 
4226 void
4227 m_classifier_init(struct mbuf *m, uint32_t pktf_mask)
4228 {
4229 	VERIFY(m->m_flags & M_PKTHDR);
4230 
4231 	m->m_pkthdr.pkt_proto = 0;
4232 	m->m_pkthdr.pkt_flowsrc = 0;
4233 	m->m_pkthdr.pkt_flowid = 0;
4234 	m->m_pkthdr.pkt_flags &= pktf_mask;     /* caller-defined mask */
4235 	/* preserve service class and interface info for loopback packets */
4236 	if (!(m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
4237 		(void) m_set_service_class(m, MBUF_SC_BE);
4238 	}
4239 	if (!(m->m_pkthdr.pkt_flags & PKTF_IFAINFO)) {
4240 		m->m_pkthdr.pkt_ifainfo = 0;
4241 	}
4242 	/*
4243 	 * Preserve timestamp if requested
4244 	 */
4245 	if (!(m->m_pkthdr.pkt_flags & PKTF_TS_VALID)) {
4246 		m->m_pkthdr.pkt_timestamp = 0;
4247 	}
4248 }
4249 
4250 void
4251 m_copy_classifier(struct mbuf *to, struct mbuf *from)
4252 {
4253 	VERIFY(to->m_flags & M_PKTHDR);
4254 	VERIFY(from->m_flags & M_PKTHDR);
4255 
4256 	to->m_pkthdr.pkt_proto = from->m_pkthdr.pkt_proto;
4257 	to->m_pkthdr.pkt_flowsrc = from->m_pkthdr.pkt_flowsrc;
4258 	to->m_pkthdr.pkt_flowid = from->m_pkthdr.pkt_flowid;
4259 	to->m_pkthdr.pkt_flags = from->m_pkthdr.pkt_flags;
4260 	to->m_pkthdr.pkt_ext_flags = from->m_pkthdr.pkt_ext_flags;
4261 	(void) m_set_service_class(to, from->m_pkthdr.pkt_svc);
4262 	to->m_pkthdr.pkt_ifainfo  = from->m_pkthdr.pkt_ifainfo;
4263 }
4264 
4265 /*
4266  * Return a list of mbuf hdrs that point to clusters.  Try for num_needed;
4267  * if wantall is not set, return whatever number were available.  Set up the
4268  * first num_with_pkthdrs with mbuf hdrs configured as packet headers; these
4269  * are chained on the m_nextpkt field.  Any packets requested beyond this
4270  * are chained onto the last packet header's m_next field.  The size of
4271  * the cluster is controlled by the parameter bufsize.
4272  */
4273 __private_extern__ struct mbuf *
4274 m_getpackets_internal(unsigned int *num_needed, int num_with_pkthdrs,
4275     int wait, int wantall, size_t bufsize)
4276 {
4277 	struct mbuf *m;
4278 	struct mbuf **np, *top;
4279 	unsigned int pnum, needed = *num_needed;
4280 	mcache_obj_t *mp_list = NULL;
4281 	int mcflags = MSLEEPF(wait);
4282 	u_int16_t flag;
4283 	struct ext_ref *rfa;
4284 	mcache_t *cp;
4285 	void *cl;
4286 
4287 	ASSERT(bufsize == m_maxsize(MC_CL) ||
4288 	    bufsize == m_maxsize(MC_BIGCL) ||
4289 	    bufsize == m_maxsize(MC_16KCL));
4290 
4291 	/*
4292 	 * Caller must first check for njcl because this
4293 	 * routine is internal and not exposed/used via KPI.
4294 	 */
4295 	VERIFY(bufsize != m_maxsize(MC_16KCL) || njcl > 0);
4296 
4297 	top = NULL;
4298 	np = &top;
4299 	pnum = 0;
4300 
4301 	/*
4302 	 * The caller doesn't want all the requested buffers; only some.
4303 	 * Try hard to get what we can, but don't block.  This effectively
4304 	 * overrides MCR_SLEEP, since this thread will not go to sleep
4305 	 * if we can't get all the buffers.
4306 	 */
4307 	if (!wantall || (mcflags & MCR_NOSLEEP)) {
4308 		mcflags |= MCR_TRYHARD;
4309 	}
4310 
4311 	/* Allocate the composite mbuf + cluster elements from the cache */
4312 	if (bufsize == m_maxsize(MC_CL)) {
4313 		cp = m_cache(MC_MBUF_CL);
4314 	} else if (bufsize == m_maxsize(MC_BIGCL)) {
4315 		cp = m_cache(MC_MBUF_BIGCL);
4316 	} else {
4317 		cp = m_cache(MC_MBUF_16KCL);
4318 	}
4319 	needed = mcache_alloc_ext(cp, &mp_list, needed, mcflags);
4320 
4321 	for (pnum = 0; pnum < needed; pnum++) {
4322 		m = (struct mbuf *)mp_list;
4323 		mp_list = mp_list->obj_next;
4324 
4325 		VERIFY(m->m_type == MT_FREE && m->m_flags == M_EXT);
4326 		cl = m->m_ext.ext_buf;
4327 		rfa = m_get_rfa(m);
4328 
4329 		ASSERT(cl != NULL && rfa != NULL);
4330 		VERIFY(MBUF_IS_COMPOSITE(m));
4331 
4332 		flag = MEXT_FLAGS(m);
4333 
4334 		MBUF_INIT(m, num_with_pkthdrs, MT_DATA);
4335 		if (bufsize == m_maxsize(MC_16KCL)) {
4336 			MBUF_16KCL_INIT(m, cl, rfa, 1, flag);
4337 		} else if (bufsize == m_maxsize(MC_BIGCL)) {
4338 			MBUF_BIGCL_INIT(m, cl, rfa, 1, flag);
4339 		} else {
4340 			MBUF_CL_INIT(m, cl, rfa, 1, flag);
4341 		}
4342 
4343 		if (num_with_pkthdrs > 0) {
4344 			--num_with_pkthdrs;
4345 		}
4346 
4347 		*np = m;
4348 		if (num_with_pkthdrs > 0) {
4349 			np = &m->m_nextpkt;
4350 		} else {
4351 			np = &m->m_next;
4352 		}
4353 	}
4354 	ASSERT(pnum != *num_needed || mp_list == NULL);
4355 	if (mp_list != NULL) {
4356 		mcache_free_ext(cp, mp_list);
4357 	}
4358 
4359 	if (pnum > 0) {
4360 		mtype_stat_add(MT_DATA, pnum);
4361 		mtype_stat_sub(MT_FREE, pnum);
4362 	}
4363 
4364 	if (wantall && (pnum != *num_needed)) {
4365 		if (top != NULL) {
4366 			m_freem_list(top);
4367 		}
4368 		return NULL;
4369 	}
4370 
4371 	if (pnum > *num_needed) {
4372 		printf("%s: File a radar related to <rdar://10146739>. \
4373 			needed = %u, pnum = %u, num_needed = %u \n",
4374 		    __func__, needed, pnum, *num_needed);
4375 	}
4376 
4377 	*num_needed = pnum;
4378 	return top;
4379 }
4380 
4381 /*
4382  * Return list of mbuf linked by m_nextpkt.  Try for numlist, and if
4383  * wantall is not set, return whatever number were available.  The size of
4384  * each mbuf in the list is controlled by the parameter packetlen.  Each
4385  * mbuf of the list may have a chain of mbufs linked by m_next.  Each mbuf
4386  * in the chain is called a segment.  If maxsegments is not null and the
4387  * value pointed to is not null, this specify the maximum number of segments
4388  * for a chain of mbufs.  If maxsegments is zero or the value pointed to
4389  * is zero the caller does not have any restriction on the number of segments.
4390  * The actual  number of segments of a mbuf chain is return in the value
4391  * pointed to by maxsegments.
4392  */
4393 __private_extern__ struct mbuf *
4394 m_allocpacket_internal(unsigned int *numlist, size_t packetlen,
4395     unsigned int *maxsegments, int wait, int wantall, size_t wantsize)
4396 {
4397 	struct mbuf **np, *top, *first = NULL;
4398 	size_t bufsize, r_bufsize;
4399 	unsigned int num = 0;
4400 	unsigned int nsegs = 0;
4401 	unsigned int needed, resid;
4402 	int mcflags = MSLEEPF(wait);
4403 	mcache_obj_t *mp_list = NULL, *rmp_list = NULL;
4404 	mcache_t *cp = NULL, *rcp = NULL;
4405 
4406 	if (*numlist == 0) {
4407 		return NULL;
4408 	}
4409 
4410 	top = NULL;
4411 	np = &top;
4412 
4413 	if (wantsize == 0) {
4414 		if (packetlen <= MINCLSIZE) {
4415 			bufsize = packetlen;
4416 		} else if (packetlen > m_maxsize(MC_CL)) {
4417 			/* Use 4KB if jumbo cluster pool isn't available */
4418 			if (packetlen <= m_maxsize(MC_BIGCL) || njcl == 0) {
4419 				bufsize = m_maxsize(MC_BIGCL);
4420 			} else {
4421 				bufsize = m_maxsize(MC_16KCL);
4422 			}
4423 		} else {
4424 			bufsize = m_maxsize(MC_CL);
4425 		}
4426 	} else if (wantsize == m_maxsize(MC_CL) ||
4427 	    wantsize == m_maxsize(MC_BIGCL) ||
4428 	    (wantsize == m_maxsize(MC_16KCL) && njcl > 0)) {
4429 		bufsize = wantsize;
4430 	} else {
4431 		*numlist = 0;
4432 		return NULL;
4433 	}
4434 
4435 	if (bufsize <= MHLEN) {
4436 		nsegs = 1;
4437 	} else if (bufsize <= MINCLSIZE) {
4438 		if (maxsegments != NULL && *maxsegments == 1) {
4439 			bufsize = m_maxsize(MC_CL);
4440 			nsegs = 1;
4441 		} else {
4442 			nsegs = 2;
4443 		}
4444 	} else if (bufsize == m_maxsize(MC_16KCL)) {
4445 		VERIFY(njcl > 0);
4446 		nsegs = ((packetlen - 1) >> M16KCLSHIFT) + 1;
4447 	} else if (bufsize == m_maxsize(MC_BIGCL)) {
4448 		nsegs = ((packetlen - 1) >> MBIGCLSHIFT) + 1;
4449 	} else {
4450 		nsegs = ((packetlen - 1) >> MCLSHIFT) + 1;
4451 	}
4452 	if (maxsegments != NULL) {
4453 		if (*maxsegments && nsegs > *maxsegments) {
4454 			*maxsegments = nsegs;
4455 			*numlist = 0;
4456 			return NULL;
4457 		}
4458 		*maxsegments = nsegs;
4459 	}
4460 
4461 	/*
4462 	 * The caller doesn't want all the requested buffers; only some.
4463 	 * Try hard to get what we can, but don't block.  This effectively
4464 	 * overrides MCR_SLEEP, since this thread will not go to sleep
4465 	 * if we can't get all the buffers.
4466 	 */
4467 	if (!wantall || (mcflags & MCR_NOSLEEP)) {
4468 		mcflags |= MCR_TRYHARD;
4469 	}
4470 
4471 	/*
4472 	 * Simple case where all elements in the lists/chains are mbufs.
4473 	 * Unless bufsize is greater than MHLEN, each segment chain is made
4474 	 * up of exactly 1 mbuf.  Otherwise, each segment chain is made up
4475 	 * of 2 mbufs; the second one is used for the residual data, i.e.
4476 	 * the remaining data that cannot fit into the first mbuf.
4477 	 */
4478 	if (bufsize <= MINCLSIZE) {
4479 		/* Allocate the elements in one shot from the mbuf cache */
4480 		ASSERT(bufsize <= MHLEN || nsegs == 2);
4481 		cp = m_cache(MC_MBUF);
4482 		needed = mcache_alloc_ext(cp, &mp_list,
4483 		    (*numlist) * nsegs, mcflags);
4484 
4485 		/*
4486 		 * The number of elements must be even if we are to use an
4487 		 * mbuf (instead of a cluster) to store the residual data.
4488 		 * If we couldn't allocate the requested number of mbufs,
4489 		 * trim the number down (if it's odd) in order to avoid
4490 		 * creating a partial segment chain.
4491 		 */
4492 		if (bufsize > MHLEN && (needed & 0x1)) {
4493 			needed--;
4494 		}
4495 
4496 		while (num < needed) {
4497 			struct mbuf *m;
4498 
4499 			m = (struct mbuf *)mp_list;
4500 			mp_list = mp_list->obj_next;
4501 			ASSERT(m != NULL);
4502 
4503 			MBUF_INIT(m, 1, MT_DATA);
4504 			num++;
4505 			if (bufsize > MHLEN) {
4506 				/* A second mbuf for this segment chain */
4507 				m->m_next = (struct mbuf *)mp_list;
4508 				mp_list = mp_list->obj_next;
4509 				ASSERT(m->m_next != NULL);
4510 
4511 				MBUF_INIT(m->m_next, 0, MT_DATA);
4512 				num++;
4513 			}
4514 			*np = m;
4515 			np = &m->m_nextpkt;
4516 		}
4517 		ASSERT(num != *numlist || mp_list == NULL);
4518 
4519 		if (num > 0) {
4520 			mtype_stat_add(MT_DATA, num);
4521 			mtype_stat_sub(MT_FREE, num);
4522 		}
4523 		num /= nsegs;
4524 
4525 		/* We've got them all; return to caller */
4526 		if (num == *numlist) {
4527 			return top;
4528 		}
4529 
4530 		goto fail;
4531 	}
4532 
4533 	/*
4534 	 * Complex cases where elements are made up of one or more composite
4535 	 * mbufs + cluster, depending on packetlen.  Each N-segment chain can
4536 	 * be illustrated as follows:
4537 	 *
4538 	 * [mbuf + cluster 1] [mbuf + cluster 2] ... [mbuf + cluster N]
4539 	 *
4540 	 * Every composite mbuf + cluster element comes from the intermediate
4541 	 * cache (either MC_MBUF_CL or MC_MBUF_BIGCL).  For space efficiency,
4542 	 * the last composite element will come from the MC_MBUF_CL cache,
4543 	 * unless the residual data is larger than 2KB where we use the
4544 	 * big cluster composite cache (MC_MBUF_BIGCL) instead.  Residual
4545 	 * data is defined as extra data beyond the first element that cannot
4546 	 * fit into the previous element, i.e. there is no residual data if
4547 	 * the chain only has 1 segment.
4548 	 */
4549 	r_bufsize = bufsize;
4550 	resid = packetlen > bufsize ? packetlen % bufsize : 0;
4551 	if (resid > 0) {
4552 		/* There is residual data; figure out the cluster size */
4553 		if (wantsize == 0 && packetlen > MINCLSIZE) {
4554 			/*
4555 			 * Caller didn't request that all of the segments
4556 			 * in the chain use the same cluster size; use the
4557 			 * smaller of the cluster sizes.
4558 			 */
4559 			if (njcl > 0 && resid > m_maxsize(MC_BIGCL)) {
4560 				r_bufsize = m_maxsize(MC_16KCL);
4561 			} else if (resid > m_maxsize(MC_CL)) {
4562 				r_bufsize = m_maxsize(MC_BIGCL);
4563 			} else {
4564 				r_bufsize = m_maxsize(MC_CL);
4565 			}
4566 		} else {
4567 			/* Use the same cluster size as the other segments */
4568 			resid = 0;
4569 		}
4570 	}
4571 
4572 	needed = *numlist;
4573 	if (resid > 0) {
4574 		/*
4575 		 * Attempt to allocate composite mbuf + cluster elements for
4576 		 * the residual data in each chain; record the number of such
4577 		 * elements that can be allocated so that we know how many
4578 		 * segment chains we can afford to create.
4579 		 */
4580 		if (r_bufsize <= m_maxsize(MC_CL)) {
4581 			rcp = m_cache(MC_MBUF_CL);
4582 		} else if (r_bufsize <= m_maxsize(MC_BIGCL)) {
4583 			rcp = m_cache(MC_MBUF_BIGCL);
4584 		} else {
4585 			rcp = m_cache(MC_MBUF_16KCL);
4586 		}
4587 		needed = mcache_alloc_ext(rcp, &rmp_list, *numlist, mcflags);
4588 
4589 		if (needed == 0) {
4590 			goto fail;
4591 		}
4592 
4593 		/* This is temporarily reduced for calculation */
4594 		ASSERT(nsegs > 1);
4595 		nsegs--;
4596 	}
4597 
4598 	/*
4599 	 * Attempt to allocate the rest of the composite mbuf + cluster
4600 	 * elements for the number of segment chains that we need.
4601 	 */
4602 	if (bufsize <= m_maxsize(MC_CL)) {
4603 		cp = m_cache(MC_MBUF_CL);
4604 	} else if (bufsize <= m_maxsize(MC_BIGCL)) {
4605 		cp = m_cache(MC_MBUF_BIGCL);
4606 	} else {
4607 		cp = m_cache(MC_MBUF_16KCL);
4608 	}
4609 	needed = mcache_alloc_ext(cp, &mp_list, needed * nsegs, mcflags);
4610 
4611 	/* Round it down to avoid creating a partial segment chain */
4612 	needed = (needed / nsegs) * nsegs;
4613 	if (needed == 0) {
4614 		goto fail;
4615 	}
4616 
4617 	if (resid > 0) {
4618 		/*
4619 		 * We're about to construct the chain(s); take into account
4620 		 * the number of segments we have created above to hold the
4621 		 * residual data for each chain, as well as restore the
4622 		 * original count of segments per chain.
4623 		 */
4624 		ASSERT(nsegs > 0);
4625 		needed += needed / nsegs;
4626 		nsegs++;
4627 	}
4628 
4629 	for (;;) {
4630 		struct mbuf *m;
4631 		u_int16_t flag;
4632 		struct ext_ref *rfa;
4633 		void *cl;
4634 		int pkthdr;
4635 		m_ext_free_func_t m_free_func;
4636 
4637 		++num;
4638 		if (nsegs == 1 || (num % nsegs) != 0 || resid == 0) {
4639 			m = (struct mbuf *)mp_list;
4640 			mp_list = mp_list->obj_next;
4641 		} else {
4642 			m = (struct mbuf *)rmp_list;
4643 			rmp_list = rmp_list->obj_next;
4644 		}
4645 		m_free_func = m_get_ext_free(m);
4646 		ASSERT(m != NULL);
4647 		VERIFY(m->m_type == MT_FREE && m->m_flags == M_EXT);
4648 		VERIFY(m_free_func == NULL || m_free_func == m_bigfree ||
4649 		    m_free_func == m_16kfree);
4650 
4651 		cl = m->m_ext.ext_buf;
4652 		rfa = m_get_rfa(m);
4653 
4654 		ASSERT(cl != NULL && rfa != NULL);
4655 		VERIFY(MBUF_IS_COMPOSITE(m));
4656 
4657 		flag = MEXT_FLAGS(m);
4658 
4659 		pkthdr = (nsegs == 1 || (num % nsegs) == 1);
4660 		if (pkthdr) {
4661 			first = m;
4662 		}
4663 		MBUF_INIT(m, pkthdr, MT_DATA);
4664 		if (m_free_func == m_16kfree) {
4665 			MBUF_16KCL_INIT(m, cl, rfa, 1, flag);
4666 		} else if (m_free_func == m_bigfree) {
4667 			MBUF_BIGCL_INIT(m, cl, rfa, 1, flag);
4668 		} else {
4669 			MBUF_CL_INIT(m, cl, rfa, 1, flag);
4670 		}
4671 
4672 		*np = m;
4673 		if ((num % nsegs) == 0) {
4674 			np = &first->m_nextpkt;
4675 		} else {
4676 			np = &m->m_next;
4677 		}
4678 
4679 		if (num == needed) {
4680 			break;
4681 		}
4682 	}
4683 
4684 	if (num > 0) {
4685 		mtype_stat_add(MT_DATA, num);
4686 		mtype_stat_sub(MT_FREE, num);
4687 	}
4688 
4689 	num /= nsegs;
4690 
4691 	/* We've got them all; return to caller */
4692 	if (num == *numlist) {
4693 		ASSERT(mp_list == NULL && rmp_list == NULL);
4694 		return top;
4695 	}
4696 
4697 fail:
4698 	/* Free up what's left of the above */
4699 	if (mp_list != NULL) {
4700 		mcache_free_ext(cp, mp_list);
4701 	}
4702 	if (rmp_list != NULL) {
4703 		mcache_free_ext(rcp, rmp_list);
4704 	}
4705 	if (wantall && top != NULL) {
4706 		m_freem_list(top);
4707 		*numlist = 0;
4708 		return NULL;
4709 	}
4710 	*numlist = num;
4711 	return top;
4712 }
4713 
4714 /*
4715  * Best effort to get a mbuf cluster + pkthdr.  Used by drivers to allocated
4716  * packets on receive ring.
4717  */
4718 __private_extern__ struct mbuf *
4719 m_getpacket_how(int wait)
4720 {
4721 	unsigned int num_needed = 1;
4722 
4723 	return m_getpackets_internal(&num_needed, 1, wait, 1,
4724 	           m_maxsize(MC_CL));
4725 }
4726 
4727 /*
4728  * Best effort to get a mbuf cluster + pkthdr.  Used by drivers to allocated
4729  * packets on receive ring.
4730  */
4731 struct mbuf *
4732 m_getpacket(void)
4733 {
4734 	unsigned int num_needed = 1;
4735 
4736 	return m_getpackets_internal(&num_needed, 1, M_WAIT, 1,
4737 	           m_maxsize(MC_CL));
4738 }
4739 
4740 /*
4741  * Return a list of mbuf hdrs that point to clusters.  Try for num_needed;
4742  * if this can't be met, return whatever number were available.  Set up the
4743  * first num_with_pkthdrs with mbuf hdrs configured as packet headers.  These
4744  * are chained on the m_nextpkt field.  Any packets requested beyond this are
4745  * chained onto the last packet header's m_next field.
4746  */
4747 struct mbuf *
4748 m_getpackets(int num_needed, int num_with_pkthdrs, int how)
4749 {
4750 	unsigned int n = num_needed;
4751 
4752 	return m_getpackets_internal(&n, num_with_pkthdrs, how, 0,
4753 	           m_maxsize(MC_CL));
4754 }
4755 
4756 /*
4757  * Return a list of mbuf hdrs set up as packet hdrs chained together
4758  * on the m_nextpkt field
4759  */
4760 struct mbuf *
4761 m_getpackethdrs(int num_needed, int how)
4762 {
4763 	struct mbuf *m;
4764 	struct mbuf **np, *top;
4765 
4766 	top = NULL;
4767 	np = &top;
4768 
4769 	while (num_needed--) {
4770 		m = _M_RETRYHDR(how, MT_DATA);
4771 		if (m == NULL) {
4772 			break;
4773 		}
4774 
4775 		*np = m;
4776 		np = &m->m_nextpkt;
4777 	}
4778 
4779 	return top;
4780 }
4781 
4782 /*
4783  * Free an mbuf list (m_nextpkt) while following m_next.  Returns the count
4784  * for mbufs packets freed.  Used by the drivers.
4785  */
4786 int
4787 m_freem_list(struct mbuf *m)
4788 {
4789 	struct mbuf *nextpkt;
4790 	mcache_obj_t *mp_list = NULL;
4791 	mcache_obj_t *mcl_list = NULL;
4792 	mcache_obj_t *mbc_list = NULL;
4793 	mcache_obj_t *m16k_list = NULL;
4794 	mcache_obj_t *m_mcl_list = NULL;
4795 	mcache_obj_t *m_mbc_list = NULL;
4796 	mcache_obj_t *m_m16k_list = NULL;
4797 	mcache_obj_t *ref_list = NULL;
4798 	int pktcount = 0;
4799 	int mt_free = 0, mt_data = 0, mt_header = 0, mt_soname = 0, mt_tag = 0;
4800 
4801 	while (m != NULL) {
4802 		pktcount++;
4803 
4804 		nextpkt = m->m_nextpkt;
4805 		m->m_nextpkt = NULL;
4806 
4807 		while (m != NULL) {
4808 			struct mbuf *next = m->m_next;
4809 			mcache_obj_t *o, *rfa;
4810 			u_int32_t composite;
4811 			u_int16_t refcnt;
4812 			m_ext_free_func_t m_free_func;
4813 
4814 			if (m->m_type == MT_FREE) {
4815 				panic("m_free: freeing an already freed mbuf");
4816 			}
4817 
4818 			if (m->m_flags & M_PKTHDR) {
4819 				/* Check for scratch area overflow */
4820 				m_redzone_verify(m);
4821 				/* Free the aux data and tags if there is any */
4822 				m_tag_delete_chain(m, NULL);
4823 			}
4824 
4825 			if (!(m->m_flags & M_EXT)) {
4826 				mt_free++;
4827 				goto simple_free;
4828 			}
4829 
4830 			if (MBUF_IS_PAIRED(m) && m_free_paired(m)) {
4831 				m = next;
4832 				continue;
4833 			}
4834 
4835 			mt_free++;
4836 
4837 			o = (mcache_obj_t *)(void *)m->m_ext.ext_buf;
4838 			refcnt = m_decref(m);
4839 			composite = (MEXT_FLAGS(m) & EXTF_COMPOSITE);
4840 			m_free_func = m_get_ext_free(m);
4841 			if (refcnt == MEXT_MINREF(m) && !composite) {
4842 				if (m_free_func == NULL) {
4843 					o->obj_next = mcl_list;
4844 					mcl_list = o;
4845 				} else if (m_free_func == m_bigfree) {
4846 					o->obj_next = mbc_list;
4847 					mbc_list = o;
4848 				} else if (m_free_func == m_16kfree) {
4849 					o->obj_next = m16k_list;
4850 					m16k_list = o;
4851 				} else {
4852 					(*(m_free_func))((caddr_t)o,
4853 					    m->m_ext.ext_size,
4854 					    m_get_ext_arg(m));
4855 				}
4856 				rfa = (mcache_obj_t *)(void *)m_get_rfa(m);
4857 				rfa->obj_next = ref_list;
4858 				ref_list = rfa;
4859 				m_set_ext(m, NULL, NULL, NULL);
4860 			} else if (refcnt == MEXT_MINREF(m) && composite) {
4861 				VERIFY(!(MEXT_FLAGS(m) & EXTF_PAIRED));
4862 				VERIFY(m->m_type != MT_FREE);
4863 				/*
4864 				 * Amortize the costs of atomic operations
4865 				 * by doing them at the end, if possible.
4866 				 */
4867 				if (m->m_type == MT_DATA) {
4868 					mt_data++;
4869 				} else if (m->m_type == MT_HEADER) {
4870 					mt_header++;
4871 				} else if (m->m_type == MT_SONAME) {
4872 					mt_soname++;
4873 				} else if (m->m_type == MT_TAG) {
4874 					mt_tag++;
4875 				} else {
4876 					mtype_stat_dec(m->m_type);
4877 				}
4878 
4879 				m->m_type = MT_FREE;
4880 				m->m_flags = M_EXT;
4881 				m->m_len = 0;
4882 				m->m_next = m->m_nextpkt = NULL;
4883 
4884 				MEXT_FLAGS(m) &= ~EXTF_READONLY;
4885 
4886 				/* "Free" into the intermediate cache */
4887 				o = (mcache_obj_t *)m;
4888 				if (m_free_func == NULL) {
4889 					o->obj_next = m_mcl_list;
4890 					m_mcl_list = o;
4891 				} else if (m_free_func == m_bigfree) {
4892 					o->obj_next = m_mbc_list;
4893 					m_mbc_list = o;
4894 				} else {
4895 					VERIFY(m_free_func == m_16kfree);
4896 					o->obj_next = m_m16k_list;
4897 					m_m16k_list = o;
4898 				}
4899 				m = next;
4900 				continue;
4901 			}
4902 simple_free:
4903 			/*
4904 			 * Amortize the costs of atomic operations
4905 			 * by doing them at the end, if possible.
4906 			 */
4907 			if (m->m_type == MT_DATA) {
4908 				mt_data++;
4909 			} else if (m->m_type == MT_HEADER) {
4910 				mt_header++;
4911 			} else if (m->m_type == MT_SONAME) {
4912 				mt_soname++;
4913 			} else if (m->m_type == MT_TAG) {
4914 				mt_tag++;
4915 			} else if (m->m_type != MT_FREE) {
4916 				mtype_stat_dec(m->m_type);
4917 			}
4918 
4919 			m->m_type = MT_FREE;
4920 			m->m_flags = m->m_len = 0;
4921 			m->m_next = m->m_nextpkt = NULL;
4922 
4923 			((mcache_obj_t *)m)->obj_next = mp_list;
4924 			mp_list = (mcache_obj_t *)m;
4925 
4926 			m = next;
4927 		}
4928 
4929 		m = nextpkt;
4930 	}
4931 
4932 	if (mt_free > 0) {
4933 		mtype_stat_add(MT_FREE, mt_free);
4934 	}
4935 	if (mt_data > 0) {
4936 		mtype_stat_sub(MT_DATA, mt_data);
4937 	}
4938 	if (mt_header > 0) {
4939 		mtype_stat_sub(MT_HEADER, mt_header);
4940 	}
4941 	if (mt_soname > 0) {
4942 		mtype_stat_sub(MT_SONAME, mt_soname);
4943 	}
4944 	if (mt_tag > 0) {
4945 		mtype_stat_sub(MT_TAG, mt_tag);
4946 	}
4947 
4948 	if (mp_list != NULL) {
4949 		mcache_free_ext(m_cache(MC_MBUF), mp_list);
4950 	}
4951 	if (mcl_list != NULL) {
4952 		mcache_free_ext(m_cache(MC_CL), mcl_list);
4953 	}
4954 	if (mbc_list != NULL) {
4955 		mcache_free_ext(m_cache(MC_BIGCL), mbc_list);
4956 	}
4957 	if (m16k_list != NULL) {
4958 		mcache_free_ext(m_cache(MC_16KCL), m16k_list);
4959 	}
4960 	if (m_mcl_list != NULL) {
4961 		mcache_free_ext(m_cache(MC_MBUF_CL), m_mcl_list);
4962 	}
4963 	if (m_mbc_list != NULL) {
4964 		mcache_free_ext(m_cache(MC_MBUF_BIGCL), m_mbc_list);
4965 	}
4966 	if (m_m16k_list != NULL) {
4967 		mcache_free_ext(m_cache(MC_MBUF_16KCL), m_m16k_list);
4968 	}
4969 	if (ref_list != NULL) {
4970 		mcache_free_ext(ref_cache, ref_list);
4971 	}
4972 
4973 	return pktcount;
4974 }
4975 
4976 void
4977 m_freem(struct mbuf *m)
4978 {
4979 	while (m != NULL) {
4980 		m = m_free(m);
4981 	}
4982 }
4983 
4984 /*
4985  * Mbuffer utility routines.
4986  */
4987 /*
4988  * Set the m_data pointer of a newly allocated mbuf to place an object of the
4989  * specified size at the end of the mbuf, longword aligned.
4990  *
4991  * NB: Historically, we had M_ALIGN(), MH_ALIGN(), and MEXT_ALIGN() as
4992  * separate macros, each asserting that it was called at the proper moment.
4993  * This required callers to themselves test the storage type and call the
4994  * right one.  Rather than require callers to be aware of those layout
4995  * decisions, we centralize here.
4996  */
4997 void
4998 m_align(struct mbuf *m, int len)
4999 {
5000 	int adjust = 0;
5001 
5002 	/* At this point data must point to start */
5003 	VERIFY(m->m_data == M_START(m));
5004 	VERIFY(len >= 0);
5005 	VERIFY(len <= M_SIZE(m));
5006 	adjust = M_SIZE(m) - len;
5007 	m->m_data += adjust & ~(sizeof(long) - 1);
5008 }
5009 
5010 /*
5011  * Lesser-used path for M_PREPEND: allocate new mbuf to prepend to chain,
5012  * copy junk along.  Does not adjust packet header length.
5013  */
5014 struct mbuf *
5015 m_prepend(struct mbuf *m, int len, int how)
5016 {
5017 	struct mbuf *mn;
5018 
5019 	_MGET(mn, how, m->m_type);
5020 	if (mn == NULL) {
5021 		m_freem(m);
5022 		return NULL;
5023 	}
5024 	if (m->m_flags & M_PKTHDR) {
5025 		M_COPY_PKTHDR(mn, m);
5026 		m->m_flags &= ~M_PKTHDR;
5027 	}
5028 	mn->m_next = m;
5029 	m = mn;
5030 	if (m->m_flags & M_PKTHDR) {
5031 		VERIFY(len <= MHLEN);
5032 		MH_ALIGN(m, len);
5033 	} else {
5034 		VERIFY(len <= MLEN);
5035 		M_ALIGN(m, len);
5036 	}
5037 	m->m_len = len;
5038 	return m;
5039 }
5040 
5041 /*
5042  * Replacement for old M_PREPEND macro: allocate new mbuf to prepend to
5043  * chain, copy junk along, and adjust length.
5044  */
5045 struct mbuf *
5046 m_prepend_2(struct mbuf *m, int len, int how, int align)
5047 {
5048 	if (M_LEADINGSPACE(m) >= len &&
5049 	    (!align || IS_P2ALIGNED((m->m_data - len), sizeof(u_int32_t)))) {
5050 		m->m_data -= len;
5051 		m->m_len += len;
5052 	} else {
5053 		m = m_prepend(m, len, how);
5054 	}
5055 	if ((m) && (m->m_flags & M_PKTHDR)) {
5056 		m->m_pkthdr.len += len;
5057 	}
5058 	return m;
5059 }
5060 
5061 /*
5062  * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
5063  * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
5064  * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
5065  */
5066 int MCFail;
5067 
5068 struct mbuf *
5069 m_copym_mode(struct mbuf *m, int off0, int len, int wait, uint32_t mode)
5070 {
5071 	struct mbuf *n, *mhdr = NULL, **np;
5072 	int off = off0;
5073 	struct mbuf *top;
5074 	int copyhdr = 0;
5075 
5076 	if (off < 0 || len < 0) {
5077 		panic("m_copym: invalid offset %d or len %d", off, len);
5078 	}
5079 
5080 	VERIFY((mode != M_COPYM_MUST_COPY_HDR &&
5081 	    mode != M_COPYM_MUST_MOVE_HDR) || (m->m_flags & M_PKTHDR));
5082 
5083 	if ((off == 0 && (m->m_flags & M_PKTHDR)) ||
5084 	    mode == M_COPYM_MUST_COPY_HDR || mode == M_COPYM_MUST_MOVE_HDR) {
5085 		mhdr = m;
5086 		copyhdr = 1;
5087 	}
5088 
5089 	while (off >= m->m_len) {
5090 		if (m->m_next == NULL) {
5091 			panic("m_copym: invalid mbuf chain");
5092 		}
5093 		off -= m->m_len;
5094 		m = m->m_next;
5095 	}
5096 	np = &top;
5097 	top = NULL;
5098 
5099 	while (len > 0) {
5100 		if (m == NULL) {
5101 			if (len != M_COPYALL) {
5102 				panic("m_copym: len != M_COPYALL");
5103 			}
5104 			break;
5105 		}
5106 
5107 		if (copyhdr) {
5108 			n = _M_RETRYHDR(wait, m->m_type);
5109 		} else {
5110 			n = _M_RETRY(wait, m->m_type);
5111 		}
5112 		*np = n;
5113 
5114 		if (n == NULL) {
5115 			goto nospace;
5116 		}
5117 
5118 		if (copyhdr != 0) {
5119 			if ((mode == M_COPYM_MOVE_HDR) ||
5120 			    (mode == M_COPYM_MUST_MOVE_HDR)) {
5121 				M_COPY_PKTHDR(n, mhdr);
5122 			} else if ((mode == M_COPYM_COPY_HDR) ||
5123 			    (mode == M_COPYM_MUST_COPY_HDR)) {
5124 				if (m_dup_pkthdr(n, mhdr, wait) == 0) {
5125 					goto nospace;
5126 				}
5127 			}
5128 			if (len == M_COPYALL) {
5129 				n->m_pkthdr.len -= off0;
5130 			} else {
5131 				n->m_pkthdr.len = len;
5132 			}
5133 			copyhdr = 0;
5134 			/*
5135 			 * There is data to copy from the packet header mbuf
5136 			 * if it is empty or it is before the starting offset
5137 			 */
5138 			if (mhdr != m) {
5139 				np = &n->m_next;
5140 				continue;
5141 			}
5142 		}
5143 		n->m_len = MIN(len, (m->m_len - off));
5144 		if (m->m_flags & M_EXT) {
5145 			n->m_ext = m->m_ext;
5146 			m_incref(m);
5147 			n->m_data = m->m_data + off;
5148 			n->m_flags |= M_EXT;
5149 		} else {
5150 			/*
5151 			 * Limit to the capacity of the destination
5152 			 */
5153 			if (n->m_flags & M_PKTHDR) {
5154 				n->m_len = MIN(n->m_len, MHLEN);
5155 			} else {
5156 				n->m_len = MIN(n->m_len, MLEN);
5157 			}
5158 
5159 			if (MTOD(n, char *) + n->m_len > ((char *)n) + MSIZE) {
5160 				panic("%s n %p copy overflow",
5161 				    __func__, n);
5162 			}
5163 
5164 			bcopy(MTOD(m, caddr_t) + off, MTOD(n, caddr_t),
5165 			    (unsigned)n->m_len);
5166 		}
5167 		if (len != M_COPYALL) {
5168 			len -= n->m_len;
5169 		}
5170 		off = 0;
5171 		m = m->m_next;
5172 		np = &n->m_next;
5173 	}
5174 
5175 	if (top == NULL) {
5176 		MCFail++;
5177 	}
5178 
5179 	return top;
5180 nospace:
5181 
5182 	m_freem(top);
5183 	MCFail++;
5184 	return NULL;
5185 }
5186 
5187 
5188 struct mbuf *
5189 m_copym(struct mbuf *m, int off0, int len, int wait)
5190 {
5191 	return m_copym_mode(m, off0, len, wait, M_COPYM_MOVE_HDR);
5192 }
5193 
5194 /*
5195  * Equivalent to m_copym except that all necessary mbuf hdrs are allocated
5196  * within this routine also, the last mbuf and offset accessed are passed
5197  * out and can be passed back in to avoid having to rescan the entire mbuf
5198  * list (normally hung off of the socket)
5199  */
5200 struct mbuf *
5201 m_copym_with_hdrs(struct mbuf *m0, int off0, int len0, int wait,
5202     struct mbuf **m_lastm, int *m_off, uint32_t mode)
5203 {
5204 	struct mbuf *m = m0, *n, **np = NULL;
5205 	int off = off0, len = len0;
5206 	struct mbuf *top = NULL;
5207 	int mcflags = MSLEEPF(wait);
5208 	int copyhdr = 0;
5209 	int type = 0;
5210 	mcache_obj_t *list = NULL;
5211 	int needed = 0;
5212 
5213 	if (off == 0 && (m->m_flags & M_PKTHDR)) {
5214 		copyhdr = 1;
5215 	}
5216 
5217 	if (m_lastm != NULL && *m_lastm != NULL) {
5218 		m = *m_lastm;
5219 		off = *m_off;
5220 	} else {
5221 		while (off >= m->m_len) {
5222 			off -= m->m_len;
5223 			m = m->m_next;
5224 		}
5225 	}
5226 
5227 	n = m;
5228 	while (len > 0) {
5229 		needed++;
5230 		ASSERT(n != NULL);
5231 		len -= MIN(len, (n->m_len - ((needed == 1) ? off : 0)));
5232 		n = n->m_next;
5233 	}
5234 	needed++;
5235 	len = len0;
5236 
5237 	/*
5238 	 * If the caller doesn't want to be put to sleep, mark it with
5239 	 * MCR_TRYHARD so that we may reclaim buffers from other places
5240 	 * before giving up.
5241 	 */
5242 	if (mcflags & MCR_NOSLEEP) {
5243 		mcflags |= MCR_TRYHARD;
5244 	}
5245 
5246 	if (mcache_alloc_ext(m_cache(MC_MBUF), &list, needed,
5247 	    mcflags) != needed) {
5248 		goto nospace;
5249 	}
5250 
5251 	needed = 0;
5252 	while (len > 0) {
5253 		n = (struct mbuf *)list;
5254 		list = list->obj_next;
5255 		ASSERT(n != NULL && m != NULL);
5256 
5257 		type = (top == NULL) ? MT_HEADER : m->m_type;
5258 		MBUF_INIT(n, (top == NULL), type);
5259 
5260 		if (top == NULL) {
5261 			top = n;
5262 			np = &top->m_next;
5263 			continue;
5264 		} else {
5265 			needed++;
5266 			*np = n;
5267 		}
5268 
5269 		if (copyhdr) {
5270 			if ((mode == M_COPYM_MOVE_HDR) ||
5271 			    (mode == M_COPYM_MUST_MOVE_HDR)) {
5272 				M_COPY_PKTHDR(n, m);
5273 			} else if ((mode == M_COPYM_COPY_HDR) ||
5274 			    (mode == M_COPYM_MUST_COPY_HDR)) {
5275 				if (m_dup_pkthdr(n, m, wait) == 0) {
5276 					goto nospace;
5277 				}
5278 			}
5279 			n->m_pkthdr.len = len;
5280 			copyhdr = 0;
5281 		}
5282 		n->m_len = MIN(len, (m->m_len - off));
5283 
5284 		if (m->m_flags & M_EXT) {
5285 			n->m_ext = m->m_ext;
5286 			m_incref(m);
5287 			n->m_data = m->m_data + off;
5288 			n->m_flags |= M_EXT;
5289 		} else {
5290 			if (MTOD(n, char *) + n->m_len > ((char *)n) + MSIZE) {
5291 				panic("%s n %p copy overflow",
5292 				    __func__, n);
5293 			}
5294 
5295 			bcopy(MTOD(m, caddr_t) + off, MTOD(n, caddr_t),
5296 			    (unsigned)n->m_len);
5297 		}
5298 		len -= n->m_len;
5299 
5300 		if (len == 0) {
5301 			if (m_lastm != NULL && m_off != NULL) {
5302 				if ((off + n->m_len) == m->m_len) {
5303 					*m_lastm = m->m_next;
5304 					*m_off  = 0;
5305 				} else {
5306 					*m_lastm = m;
5307 					*m_off  = off + n->m_len;
5308 				}
5309 			}
5310 			break;
5311 		}
5312 		off = 0;
5313 		m = m->m_next;
5314 		np = &n->m_next;
5315 	}
5316 
5317 	mtype_stat_inc(MT_HEADER);
5318 	mtype_stat_add(type, needed);
5319 	mtype_stat_sub(MT_FREE, needed + 1);
5320 
5321 	ASSERT(list == NULL);
5322 	return top;
5323 
5324 nospace:
5325 	if (list != NULL) {
5326 		mcache_free_ext(m_cache(MC_MBUF), list);
5327 	}
5328 	if (top != NULL) {
5329 		m_freem(top);
5330 	}
5331 	MCFail++;
5332 	return NULL;
5333 }
5334 
5335 /*
5336  * Copy data from an mbuf chain starting "off" bytes from the beginning,
5337  * continuing for "len" bytes, into the indicated buffer.
5338  */
5339 void
5340 m_copydata(struct mbuf *m, int off, int len, void *vp)
5341 {
5342 	int off0 = off, len0 = len;
5343 	struct mbuf *m0 = m;
5344 	unsigned count;
5345 	char *cp = vp;
5346 
5347 	if (__improbable(off < 0 || len < 0)) {
5348 		panic("%s: invalid offset %d or len %d", __func__, off, len);
5349 		/* NOTREACHED */
5350 	}
5351 
5352 	while (off > 0) {
5353 		if (__improbable(m == NULL)) {
5354 			panic("%s: invalid mbuf chain %p [off %d, len %d]",
5355 			    __func__, m0, off0, len0);
5356 			/* NOTREACHED */
5357 		}
5358 		if (off < m->m_len) {
5359 			break;
5360 		}
5361 		off -= m->m_len;
5362 		m = m->m_next;
5363 	}
5364 	while (len > 0) {
5365 		if (__improbable(m == NULL)) {
5366 			panic("%s: invalid mbuf chain %p [off %d, len %d]",
5367 			    __func__, m0, off0, len0);
5368 			/* NOTREACHED */
5369 		}
5370 		count = MIN(m->m_len - off, len);
5371 		bcopy(MTOD(m, caddr_t) + off, cp, count);
5372 		len -= count;
5373 		cp += count;
5374 		off = 0;
5375 		m = m->m_next;
5376 	}
5377 }
5378 
5379 /*
5380  * Concatenate mbuf chain n to m.  Both chains must be of the same type
5381  * (e.g. MT_DATA).  Any m_pkthdr is not updated.
5382  */
5383 void
5384 m_cat(struct mbuf *m, struct mbuf *n)
5385 {
5386 	while (m->m_next) {
5387 		m = m->m_next;
5388 	}
5389 	while (n) {
5390 		if ((m->m_flags & M_EXT) ||
5391 		    m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
5392 			/* just join the two chains */
5393 			m->m_next = n;
5394 			return;
5395 		}
5396 		/* splat the data from one into the other */
5397 		bcopy(MTOD(n, caddr_t), MTOD(m, caddr_t) + m->m_len,
5398 		    (u_int)n->m_len);
5399 		m->m_len += n->m_len;
5400 		n = m_free(n);
5401 	}
5402 }
5403 
5404 void
5405 m_adj(struct mbuf *mp, int req_len)
5406 {
5407 	int len = req_len;
5408 	struct mbuf *m;
5409 	int count;
5410 
5411 	if ((m = mp) == NULL) {
5412 		return;
5413 	}
5414 	if (len >= 0) {
5415 		/*
5416 		 * Trim from head.
5417 		 */
5418 		while (m != NULL && len > 0) {
5419 			if (m->m_len <= len) {
5420 				len -= m->m_len;
5421 				m->m_len = 0;
5422 				m = m->m_next;
5423 			} else {
5424 				m->m_len -= len;
5425 				m->m_data += len;
5426 				len = 0;
5427 			}
5428 		}
5429 		m = mp;
5430 		if (m->m_flags & M_PKTHDR) {
5431 			m->m_pkthdr.len -= (req_len - len);
5432 		}
5433 	} else {
5434 		/*
5435 		 * Trim from tail.  Scan the mbuf chain,
5436 		 * calculating its length and finding the last mbuf.
5437 		 * If the adjustment only affects this mbuf, then just
5438 		 * adjust and return.  Otherwise, rescan and truncate
5439 		 * after the remaining size.
5440 		 */
5441 		len = -len;
5442 		count = 0;
5443 		for (;;) {
5444 			count += m->m_len;
5445 			if (m->m_next == (struct mbuf *)0) {
5446 				break;
5447 			}
5448 			m = m->m_next;
5449 		}
5450 		if (m->m_len >= len) {
5451 			m->m_len -= len;
5452 			m = mp;
5453 			if (m->m_flags & M_PKTHDR) {
5454 				m->m_pkthdr.len -= len;
5455 			}
5456 			return;
5457 		}
5458 		count -= len;
5459 		if (count < 0) {
5460 			count = 0;
5461 		}
5462 		/*
5463 		 * Correct length for chain is "count".
5464 		 * Find the mbuf with last data, adjust its length,
5465 		 * and toss data from remaining mbufs on chain.
5466 		 */
5467 		m = mp;
5468 		if (m->m_flags & M_PKTHDR) {
5469 			m->m_pkthdr.len = count;
5470 		}
5471 		for (; m; m = m->m_next) {
5472 			if (m->m_len >= count) {
5473 				m->m_len = count;
5474 				break;
5475 			}
5476 			count -= m->m_len;
5477 		}
5478 		while ((m = m->m_next)) {
5479 			m->m_len = 0;
5480 		}
5481 	}
5482 }
5483 
5484 /*
5485  * Rearange an mbuf chain so that len bytes are contiguous
5486  * and in the data area of an mbuf (so that mtod and dtom
5487  * will work for a structure of size len).  Returns the resulting
5488  * mbuf chain on success, frees it and returns null on failure.
5489  * If there is room, it will add up to max_protohdr-len extra bytes to the
5490  * contiguous region in an attempt to avoid being called next time.
5491  */
5492 int MPFail;
5493 
5494 struct mbuf *
5495 m_pullup(struct mbuf *n, int len)
5496 {
5497 	struct mbuf *m;
5498 	int count;
5499 	int space;
5500 
5501 	/* check invalid arguments */
5502 	if (n == NULL) {
5503 		panic("%s: n == NULL", __func__);
5504 	}
5505 	if (len < 0) {
5506 		os_log_info(OS_LOG_DEFAULT, "%s: failed negative len %d",
5507 		    __func__, len);
5508 		goto bad;
5509 	}
5510 	if (len > MLEN) {
5511 		os_log_info(OS_LOG_DEFAULT, "%s: failed len %d too big",
5512 		    __func__, len);
5513 		goto bad;
5514 	}
5515 	if ((n->m_flags & M_EXT) == 0 &&
5516 	    n->m_data >= &n->m_dat[MLEN]) {
5517 		os_log_info(OS_LOG_DEFAULT, "%s: m_data out of bounds",
5518 		    __func__);
5519 		goto bad;
5520 	}
5521 
5522 	/*
5523 	 * If first mbuf has no cluster, and has room for len bytes
5524 	 * without shifting current data, pullup into it,
5525 	 * otherwise allocate a new mbuf to prepend to the chain.
5526 	 */
5527 	if ((n->m_flags & M_EXT) == 0 &&
5528 	    len < &n->m_dat[MLEN] - n->m_data && n->m_next != NULL) {
5529 		if (n->m_len >= len) {
5530 			return n;
5531 		}
5532 		m = n;
5533 		n = n->m_next;
5534 		len -= m->m_len;
5535 	} else {
5536 		if (len > MHLEN) {
5537 			goto bad;
5538 		}
5539 		_MGET(m, M_DONTWAIT, n->m_type);
5540 		if (m == 0) {
5541 			goto bad;
5542 		}
5543 		m->m_len = 0;
5544 		if (n->m_flags & M_PKTHDR) {
5545 			M_COPY_PKTHDR(m, n);
5546 			n->m_flags &= ~M_PKTHDR;
5547 		}
5548 	}
5549 	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
5550 	do {
5551 		count = MIN(MIN(MAX(len, max_protohdr), space), n->m_len);
5552 		bcopy(MTOD(n, caddr_t), MTOD(m, caddr_t) + m->m_len,
5553 		    (unsigned)count);
5554 		len -= count;
5555 		m->m_len += count;
5556 		n->m_len -= count;
5557 		space -= count;
5558 		if (n->m_len != 0) {
5559 			n->m_data += count;
5560 		} else {
5561 			n = m_free(n);
5562 		}
5563 	} while (len > 0 && n != NULL);
5564 	if (len > 0) {
5565 		(void) m_free(m);
5566 		goto bad;
5567 	}
5568 	m->m_next = n;
5569 	return m;
5570 bad:
5571 	m_freem(n);
5572 	MPFail++;
5573 	return 0;
5574 }
5575 
5576 /*
5577  * Like m_pullup(), except a new mbuf is always allocated, and we allow
5578  * the amount of empty space before the data in the new mbuf to be specified
5579  * (in the event that the caller expects to prepend later).
5580  */
5581 __private_extern__ int MSFail = 0;
5582 
5583 __private_extern__ struct mbuf *
5584 m_copyup(struct mbuf *n, int len, int dstoff)
5585 {
5586 	struct mbuf *m;
5587 	int count, space;
5588 
5589 	VERIFY(len >= 0 && dstoff >= 0);
5590 
5591 	if (len > (MHLEN - dstoff)) {
5592 		goto bad;
5593 	}
5594 	MGET(m, M_DONTWAIT, n->m_type);
5595 	if (m == NULL) {
5596 		goto bad;
5597 	}
5598 	m->m_len = 0;
5599 	if (n->m_flags & M_PKTHDR) {
5600 		m_copy_pkthdr(m, n);
5601 		n->m_flags &= ~M_PKTHDR;
5602 	}
5603 	m->m_data += dstoff;
5604 	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
5605 	do {
5606 		count = min(min(max(len, max_protohdr), space), n->m_len);
5607 		memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t),
5608 		    (unsigned)count);
5609 		len -= count;
5610 		m->m_len += count;
5611 		n->m_len -= count;
5612 		space -= count;
5613 		if (n->m_len) {
5614 			n->m_data += count;
5615 		} else {
5616 			n = m_free(n);
5617 		}
5618 	} while (len > 0 && n);
5619 	if (len > 0) {
5620 		(void) m_free(m);
5621 		goto bad;
5622 	}
5623 	m->m_next = n;
5624 	return m;
5625 bad:
5626 	m_freem(n);
5627 	MSFail++;
5628 	return NULL;
5629 }
5630 
5631 /*
5632  * Partition an mbuf chain in two pieces, returning the tail --
5633  * all but the first len0 bytes.  In case of failure, it returns NULL and
5634  * attempts to restore the chain to its original state.
5635  */
5636 struct mbuf *
5637 m_split(struct mbuf *m0, int len0, int wait)
5638 {
5639 	return m_split0(m0, len0, wait, 1);
5640 }
5641 
5642 static struct mbuf *
5643 m_split0(struct mbuf *m0, int len0, int wait, int copyhdr)
5644 {
5645 	struct mbuf *m, *n;
5646 	unsigned len = len0, remain;
5647 
5648 	/*
5649 	 * First iterate to the mbuf which contains the first byte of
5650 	 * data at offset len0
5651 	 */
5652 	for (m = m0; m && len > m->m_len; m = m->m_next) {
5653 		len -= m->m_len;
5654 	}
5655 	if (m == NULL) {
5656 		return NULL;
5657 	}
5658 	/*
5659 	 * len effectively is now the offset in the current
5660 	 * mbuf where we have to perform split.
5661 	 *
5662 	 * remain becomes the tail length.
5663 	 * Note that len can also be == m->m_len
5664 	 */
5665 	remain = m->m_len - len;
5666 
5667 	/*
5668 	 * If current mbuf len contains the entire remaining offset len,
5669 	 * just make the second mbuf chain pointing to next mbuf onwards
5670 	 * and return after making necessary adjustments
5671 	 */
5672 	if (copyhdr && (m0->m_flags & M_PKTHDR) && remain == 0) {
5673 		_MGETHDR(n, wait, m0->m_type);
5674 		if (n == NULL) {
5675 			return NULL;
5676 		}
5677 		n->m_next = m->m_next;
5678 		m->m_next = NULL;
5679 		n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
5680 		n->m_pkthdr.len = m0->m_pkthdr.len - len0;
5681 		m0->m_pkthdr.len = len0;
5682 		return n;
5683 	}
5684 	if (copyhdr && (m0->m_flags & M_PKTHDR)) {
5685 		_MGETHDR(n, wait, m0->m_type);
5686 		if (n == NULL) {
5687 			return NULL;
5688 		}
5689 		n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
5690 		n->m_pkthdr.len = m0->m_pkthdr.len - len0;
5691 		m0->m_pkthdr.len = len0;
5692 
5693 		/*
5694 		 * If current points to external storage
5695 		 * then it can be shared by making last mbuf
5696 		 * of head chain and first mbuf of current chain
5697 		 * pointing to different data offsets
5698 		 */
5699 		if (m->m_flags & M_EXT) {
5700 			goto extpacket;
5701 		}
5702 		if (remain > MHLEN) {
5703 			/* m can't be the lead packet */
5704 			MH_ALIGN(n, 0);
5705 			n->m_next = m_split(m, len, wait);
5706 			if (n->m_next == NULL) {
5707 				(void) m_free(n);
5708 				return NULL;
5709 			} else {
5710 				return n;
5711 			}
5712 		} else {
5713 			MH_ALIGN(n, remain);
5714 		}
5715 	} else if (remain == 0) {
5716 		n = m->m_next;
5717 		m->m_next = NULL;
5718 		return n;
5719 	} else {
5720 		_MGET(n, wait, m->m_type);
5721 		if (n == NULL) {
5722 			return NULL;
5723 		}
5724 
5725 		if ((m->m_flags & M_EXT) == 0) {
5726 			VERIFY(remain <= MLEN);
5727 			M_ALIGN(n, remain);
5728 		}
5729 	}
5730 extpacket:
5731 	if (m->m_flags & M_EXT) {
5732 		n->m_flags |= M_EXT;
5733 		n->m_ext = m->m_ext;
5734 		m_incref(m);
5735 		n->m_data = m->m_data + len;
5736 	} else {
5737 		bcopy(MTOD(m, caddr_t) + len, MTOD(n, caddr_t), remain);
5738 	}
5739 	n->m_len = remain;
5740 	m->m_len = len;
5741 	n->m_next = m->m_next;
5742 	m->m_next = NULL;
5743 	return n;
5744 }
5745 
5746 /*
5747  * Routine to copy from device local memory into mbufs.
5748  */
5749 struct mbuf *
5750 m_devget(char *buf, int totlen, int off0, struct ifnet *ifp,
5751     void (*copy)(const void *, void *, size_t))
5752 {
5753 	struct mbuf *m;
5754 	struct mbuf *top = NULL, **mp = &top;
5755 	int off = off0, len;
5756 	char *cp;
5757 	char *epkt;
5758 
5759 	cp = buf;
5760 	epkt = cp + totlen;
5761 	if (off) {
5762 		/*
5763 		 * If 'off' is non-zero, packet is trailer-encapsulated,
5764 		 * so we have to skip the type and length fields.
5765 		 */
5766 		cp += off + 2 * sizeof(u_int16_t);
5767 		totlen -= 2 * sizeof(u_int16_t);
5768 	}
5769 	_MGETHDR(m, M_DONTWAIT, MT_DATA);
5770 	if (m == NULL) {
5771 		return NULL;
5772 	}
5773 	m->m_pkthdr.rcvif = ifp;
5774 	m->m_pkthdr.len = totlen;
5775 	m->m_len = MHLEN;
5776 
5777 	while (totlen > 0) {
5778 		if (top != NULL) {
5779 			_MGET(m, M_DONTWAIT, MT_DATA);
5780 			if (m == NULL) {
5781 				m_freem(top);
5782 				return NULL;
5783 			}
5784 			m->m_len = MLEN;
5785 		}
5786 		len = MIN(totlen, epkt - cp);
5787 		if (len >= MINCLSIZE) {
5788 			MCLGET(m, M_DONTWAIT);
5789 			if (m->m_flags & M_EXT) {
5790 				m->m_len = len = MIN(len, m_maxsize(MC_CL));
5791 			} else {
5792 				/* give up when it's out of cluster mbufs */
5793 				if (top != NULL) {
5794 					m_freem(top);
5795 				}
5796 				m_freem(m);
5797 				return NULL;
5798 			}
5799 		} else {
5800 			/*
5801 			 * Place initial small packet/header at end of mbuf.
5802 			 */
5803 			if (len < m->m_len) {
5804 				if (top == NULL &&
5805 				    len + max_linkhdr <= m->m_len) {
5806 					m->m_data += max_linkhdr;
5807 				}
5808 				m->m_len = len;
5809 			} else {
5810 				len = m->m_len;
5811 			}
5812 		}
5813 		if (copy) {
5814 			copy(cp, MTOD(m, caddr_t), (unsigned)len);
5815 		} else {
5816 			bcopy(cp, MTOD(m, caddr_t), (unsigned)len);
5817 		}
5818 		cp += len;
5819 		*mp = m;
5820 		mp = &m->m_next;
5821 		totlen -= len;
5822 		if (cp == epkt) {
5823 			cp = buf;
5824 		}
5825 	}
5826 	return top;
5827 }
5828 
5829 #ifndef MBUF_GROWTH_NORMAL_THRESH
5830 #define MBUF_GROWTH_NORMAL_THRESH 25
5831 #endif
5832 
5833 /*
5834  * Cluster freelist allocation check.
5835  */
5836 static int
5837 m_howmany(int num, size_t bufsize)
5838 {
5839 	int i = 0, j = 0;
5840 	u_int32_t m_mbclusters, m_clusters, m_bigclusters, m_16kclusters;
5841 	u_int32_t m_mbfree, m_clfree, m_bigclfree, m_16kclfree;
5842 	u_int32_t sumclusters, freeclusters;
5843 	u_int32_t percent_pool, percent_kmem;
5844 	u_int32_t mb_growth, mb_growth_thresh;
5845 
5846 	VERIFY(bufsize == m_maxsize(MC_BIGCL) ||
5847 	    bufsize == m_maxsize(MC_16KCL));
5848 
5849 	LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
5850 
5851 	/* Numbers in 2K cluster units */
5852 	m_mbclusters = m_total(MC_MBUF) >> NMBPCLSHIFT;
5853 	m_clusters = m_total(MC_CL);
5854 	m_bigclusters = m_total(MC_BIGCL) << NCLPBGSHIFT;
5855 	m_16kclusters = m_total(MC_16KCL);
5856 	sumclusters = m_mbclusters + m_clusters + m_bigclusters;
5857 
5858 	m_mbfree = m_infree(MC_MBUF) >> NMBPCLSHIFT;
5859 	m_clfree = m_infree(MC_CL);
5860 	m_bigclfree = m_infree(MC_BIGCL) << NCLPBGSHIFT;
5861 	m_16kclfree = m_infree(MC_16KCL);
5862 	freeclusters = m_mbfree + m_clfree + m_bigclfree;
5863 
5864 	/* Bail if we've maxed out the mbuf memory map */
5865 	if ((bufsize == m_maxsize(MC_BIGCL) && sumclusters >= nclusters) ||
5866 	    (njcl > 0 && bufsize == m_maxsize(MC_16KCL) &&
5867 	    (m_16kclusters << NCLPJCLSHIFT) >= njcl)) {
5868 		mbwdog_logger("maxed out nclusters (%u >= %u) or njcl (%u >= %u)",
5869 		    sumclusters, nclusters,
5870 		    (m_16kclusters << NCLPJCLSHIFT), njcl);
5871 		return 0;
5872 	}
5873 
5874 	if (bufsize == m_maxsize(MC_BIGCL)) {
5875 		/* Under minimum */
5876 		if (m_bigclusters < m_minlimit(MC_BIGCL)) {
5877 			return m_minlimit(MC_BIGCL) - m_bigclusters;
5878 		}
5879 
5880 		percent_pool =
5881 		    ((sumclusters - freeclusters) * 100) / sumclusters;
5882 		percent_kmem = (sumclusters * 100) / nclusters;
5883 
5884 		/*
5885 		 * If a light/normal user, grow conservatively (75%)
5886 		 * If a heavy user, grow aggressively (50%)
5887 		 */
5888 		if (percent_kmem < MBUF_GROWTH_NORMAL_THRESH) {
5889 			mb_growth = MB_GROWTH_NORMAL;
5890 		} else {
5891 			mb_growth = MB_GROWTH_AGGRESSIVE;
5892 		}
5893 
5894 		if (percent_kmem < 5) {
5895 			/* For initial allocations */
5896 			i = num;
5897 		} else {
5898 			/* Return if >= MBIGCL_LOWAT clusters available */
5899 			if (m_infree(MC_BIGCL) >= MBIGCL_LOWAT &&
5900 			    m_total(MC_BIGCL) >=
5901 			    MBIGCL_LOWAT + m_minlimit(MC_BIGCL)) {
5902 				return 0;
5903 			}
5904 
5905 			/* Ensure at least num clusters are accessible */
5906 			if (num >= m_infree(MC_BIGCL)) {
5907 				i = num - m_infree(MC_BIGCL);
5908 			}
5909 			if (num > m_total(MC_BIGCL) - m_minlimit(MC_BIGCL)) {
5910 				j = num - (m_total(MC_BIGCL) -
5911 				    m_minlimit(MC_BIGCL));
5912 			}
5913 
5914 			i = MAX(i, j);
5915 
5916 			/*
5917 			 * Grow pool if percent_pool > 75 (normal growth)
5918 			 * or percent_pool > 50 (aggressive growth).
5919 			 */
5920 			mb_growth_thresh = 100 - (100 / (1 << mb_growth));
5921 			if (percent_pool > mb_growth_thresh) {
5922 				j = ((sumclusters + num) >> mb_growth) -
5923 				    freeclusters;
5924 			}
5925 			i = MAX(i, j);
5926 		}
5927 
5928 		/* Check to ensure we didn't go over limits */
5929 		if (i + m_bigclusters >= m_maxlimit(MC_BIGCL)) {
5930 			i = m_maxlimit(MC_BIGCL) - m_bigclusters;
5931 		}
5932 		if ((i << 1) + sumclusters >= nclusters) {
5933 			i = (nclusters - sumclusters) >> 1;
5934 		}
5935 		VERIFY((m_total(MC_BIGCL) + i) <= m_maxlimit(MC_BIGCL));
5936 		VERIFY(sumclusters + (i << 1) <= nclusters);
5937 	} else { /* 16K CL */
5938 		VERIFY(njcl > 0);
5939 		/* Ensure at least num clusters are available */
5940 		if (num >= m_16kclfree) {
5941 			i = num - m_16kclfree;
5942 		}
5943 
5944 		/* Always grow 16KCL pool aggressively */
5945 		if (((m_16kclusters + num) >> 1) > m_16kclfree) {
5946 			j = ((m_16kclusters + num) >> 1) - m_16kclfree;
5947 		}
5948 		i = MAX(i, j);
5949 
5950 		/* Check to ensure we don't go over limit */
5951 		if ((i + m_total(MC_16KCL)) >= m_maxlimit(MC_16KCL)) {
5952 			i = m_maxlimit(MC_16KCL) - m_total(MC_16KCL);
5953 		}
5954 	}
5955 	return i;
5956 }
5957 /*
5958  * Return the number of bytes in the mbuf chain, m.
5959  */
5960 unsigned int
5961 m_length(struct mbuf *m)
5962 {
5963 	struct mbuf *m0;
5964 	unsigned int pktlen;
5965 
5966 	if (m->m_flags & M_PKTHDR) {
5967 		return m->m_pkthdr.len;
5968 	}
5969 
5970 	pktlen = 0;
5971 	for (m0 = m; m0 != NULL; m0 = m0->m_next) {
5972 		pktlen += m0->m_len;
5973 	}
5974 	return pktlen;
5975 }
5976 
5977 /*
5978  * Copy data from a buffer back into the indicated mbuf chain,
5979  * starting "off" bytes from the beginning, extending the mbuf
5980  * chain if necessary.
5981  */
5982 void
5983 m_copyback(struct mbuf *m0, int off, int len, const void *cp)
5984 {
5985 #if DEBUG
5986 	struct mbuf *origm = m0;
5987 	int error;
5988 #endif /* DEBUG */
5989 
5990 	if (m0 == NULL) {
5991 		return;
5992 	}
5993 
5994 #if DEBUG
5995 	error =
5996 #endif /* DEBUG */
5997 	m_copyback0(&m0, off, len, cp,
5998 	    M_COPYBACK0_COPYBACK | M_COPYBACK0_EXTEND, M_DONTWAIT);
5999 
6000 #if DEBUG
6001 	if (error != 0 || (m0 != NULL && origm != m0)) {
6002 		panic("m_copyback");
6003 	}
6004 #endif /* DEBUG */
6005 }
6006 
6007 struct mbuf *
6008 m_copyback_cow(struct mbuf *m0, int off, int len, const void *cp, int how)
6009 {
6010 	int error;
6011 
6012 	/* don't support chain expansion */
6013 	VERIFY(off + len <= m_length(m0));
6014 
6015 	error = m_copyback0(&m0, off, len, cp,
6016 	    M_COPYBACK0_COPYBACK | M_COPYBACK0_COW, how);
6017 	if (error) {
6018 		/*
6019 		 * no way to recover from partial success.
6020 		 * just free the chain.
6021 		 */
6022 		m_freem(m0);
6023 		return NULL;
6024 	}
6025 	return m0;
6026 }
6027 
6028 /*
6029  * m_makewritable: ensure the specified range writable.
6030  */
6031 int
6032 m_makewritable(struct mbuf **mp, int off, int len, int how)
6033 {
6034 	int error;
6035 #if DEBUG
6036 	struct mbuf *n;
6037 	int origlen, reslen;
6038 
6039 	origlen = m_length(*mp);
6040 #endif /* DEBUG */
6041 
6042 #if 0 /* M_COPYALL is large enough */
6043 	if (len == M_COPYALL) {
6044 		len = m_length(*mp) - off; /* XXX */
6045 	}
6046 #endif
6047 
6048 	error = m_copyback0(mp, off, len, NULL,
6049 	    M_COPYBACK0_PRESERVE | M_COPYBACK0_COW, how);
6050 
6051 #if DEBUG
6052 	reslen = 0;
6053 	for (n = *mp; n; n = n->m_next) {
6054 		reslen += n->m_len;
6055 	}
6056 	if (origlen != reslen) {
6057 		panic("m_makewritable: length changed");
6058 	}
6059 	if (((*mp)->m_flags & M_PKTHDR) && reslen != (*mp)->m_pkthdr.len) {
6060 		panic("m_makewritable: inconsist");
6061 	}
6062 #endif /* DEBUG */
6063 
6064 	return error;
6065 }
6066 
6067 static int
6068 m_copyback0(struct mbuf **mp0, int off, int len, const void *vp, int flags,
6069     int how)
6070 {
6071 	int mlen;
6072 	struct mbuf *m, *n;
6073 	struct mbuf **mp;
6074 	int totlen = 0;
6075 	const char *cp = vp;
6076 
6077 	VERIFY(mp0 != NULL);
6078 	VERIFY(*mp0 != NULL);
6079 	VERIFY((flags & M_COPYBACK0_PRESERVE) == 0 || cp == NULL);
6080 	VERIFY((flags & M_COPYBACK0_COPYBACK) == 0 || cp != NULL);
6081 
6082 	/*
6083 	 * we don't bother to update "totlen" in the case of M_COPYBACK0_COW,
6084 	 * assuming that M_COPYBACK0_EXTEND and M_COPYBACK0_COW are exclusive.
6085 	 */
6086 
6087 	VERIFY((~flags & (M_COPYBACK0_EXTEND | M_COPYBACK0_COW)) != 0);
6088 
6089 	mp = mp0;
6090 	m = *mp;
6091 	while (off > (mlen = m->m_len)) {
6092 		off -= mlen;
6093 		totlen += mlen;
6094 		if (m->m_next == NULL) {
6095 			int tspace;
6096 extend:
6097 			if (!(flags & M_COPYBACK0_EXTEND)) {
6098 				goto out;
6099 			}
6100 
6101 			/*
6102 			 * try to make some space at the end of "m".
6103 			 */
6104 
6105 			mlen = m->m_len;
6106 			if (off + len >= MINCLSIZE &&
6107 			    !(m->m_flags & M_EXT) && m->m_len == 0) {
6108 				MCLGET(m, how);
6109 			}
6110 			tspace = M_TRAILINGSPACE(m);
6111 			if (tspace > 0) {
6112 				tspace = MIN(tspace, off + len);
6113 				VERIFY(tspace > 0);
6114 				bzero(mtod(m, char *) + m->m_len,
6115 				    MIN(off, tspace));
6116 				m->m_len += tspace;
6117 				off += mlen;
6118 				totlen -= mlen;
6119 				continue;
6120 			}
6121 
6122 			/*
6123 			 * need to allocate an mbuf.
6124 			 */
6125 
6126 			if (off + len >= MINCLSIZE) {
6127 				n = m_getcl(how, m->m_type, 0);
6128 			} else {
6129 				n = _M_GET(how, m->m_type);
6130 			}
6131 			if (n == NULL) {
6132 				goto out;
6133 			}
6134 			n->m_len = 0;
6135 			n->m_len = MIN(M_TRAILINGSPACE(n), off + len);
6136 			bzero(mtod(n, char *), MIN(n->m_len, off));
6137 			m->m_next = n;
6138 		}
6139 		mp = &m->m_next;
6140 		m = m->m_next;
6141 	}
6142 	while (len > 0) {
6143 		mlen = m->m_len - off;
6144 		if (mlen != 0 && m_mclhasreference(m)) {
6145 			char *datap;
6146 			int eatlen;
6147 
6148 			/*
6149 			 * this mbuf is read-only.
6150 			 * allocate a new writable mbuf and try again.
6151 			 */
6152 
6153 #if DIAGNOSTIC
6154 			if (!(flags & M_COPYBACK0_COW)) {
6155 				panic("m_copyback0: read-only");
6156 			}
6157 #endif /* DIAGNOSTIC */
6158 
6159 			/*
6160 			 * if we're going to write into the middle of
6161 			 * a mbuf, split it first.
6162 			 */
6163 			if (off > 0 && len < mlen) {
6164 				n = m_split0(m, off, how, 0);
6165 				if (n == NULL) {
6166 					goto enobufs;
6167 				}
6168 				m->m_next = n;
6169 				mp = &m->m_next;
6170 				m = n;
6171 				off = 0;
6172 				continue;
6173 			}
6174 
6175 			/*
6176 			 * XXX TODO coalesce into the trailingspace of
6177 			 * the previous mbuf when possible.
6178 			 */
6179 
6180 			/*
6181 			 * allocate a new mbuf.  copy packet header if needed.
6182 			 */
6183 			n = _M_GET(how, m->m_type);
6184 			if (n == NULL) {
6185 				goto enobufs;
6186 			}
6187 			if (off == 0 && (m->m_flags & M_PKTHDR)) {
6188 				M_COPY_PKTHDR(n, m);
6189 				n->m_len = MHLEN;
6190 			} else {
6191 				if (len >= MINCLSIZE) {
6192 					MCLGET(n, M_DONTWAIT);
6193 				}
6194 				n->m_len =
6195 				    (n->m_flags & M_EXT) ? MCLBYTES : MLEN;
6196 			}
6197 			if (n->m_len > len) {
6198 				n->m_len = len;
6199 			}
6200 
6201 			/*
6202 			 * free the region which has been overwritten.
6203 			 * copying data from old mbufs if requested.
6204 			 */
6205 			if (flags & M_COPYBACK0_PRESERVE) {
6206 				datap = mtod(n, char *);
6207 			} else {
6208 				datap = NULL;
6209 			}
6210 			eatlen = n->m_len;
6211 			VERIFY(off == 0 || eatlen >= mlen);
6212 			if (off > 0) {
6213 				VERIFY(len >= mlen);
6214 				m->m_len = off;
6215 				m->m_next = n;
6216 				if (datap) {
6217 					m_copydata(m, off, mlen, datap);
6218 					datap += mlen;
6219 				}
6220 				eatlen -= mlen;
6221 				mp = &m->m_next;
6222 				m = m->m_next;
6223 			}
6224 			while (m != NULL && m_mclhasreference(m) &&
6225 			    n->m_type == m->m_type && eatlen > 0) {
6226 				mlen = MIN(eatlen, m->m_len);
6227 				if (datap) {
6228 					m_copydata(m, 0, mlen, datap);
6229 					datap += mlen;
6230 				}
6231 				m->m_data += mlen;
6232 				m->m_len -= mlen;
6233 				eatlen -= mlen;
6234 				if (m->m_len == 0) {
6235 					*mp = m = m_free(m);
6236 				}
6237 			}
6238 			if (eatlen > 0) {
6239 				n->m_len -= eatlen;
6240 			}
6241 			n->m_next = m;
6242 			*mp = m = n;
6243 			continue;
6244 		}
6245 		mlen = MIN(mlen, len);
6246 		if (flags & M_COPYBACK0_COPYBACK) {
6247 			bcopy(cp, mtod(m, caddr_t) + off, (unsigned)mlen);
6248 			cp += mlen;
6249 		}
6250 		len -= mlen;
6251 		mlen += off;
6252 		off = 0;
6253 		totlen += mlen;
6254 		if (len == 0) {
6255 			break;
6256 		}
6257 		if (m->m_next == NULL) {
6258 			goto extend;
6259 		}
6260 		mp = &m->m_next;
6261 		m = m->m_next;
6262 	}
6263 out:
6264 	if (((m = *mp0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) {
6265 		VERIFY(flags & M_COPYBACK0_EXTEND);
6266 		m->m_pkthdr.len = totlen;
6267 	}
6268 
6269 	return 0;
6270 
6271 enobufs:
6272 	return ENOBUFS;
6273 }
6274 
6275 uint64_t
6276 mcl_to_paddr(char *addr)
6277 {
6278 	vm_offset_t base_phys;
6279 
6280 	if (!MBUF_IN_MAP(addr)) {
6281 		return 0;
6282 	}
6283 	base_phys = mcl_paddr[atop_64(addr - (char *)mbutl)];
6284 
6285 	if (base_phys == 0) {
6286 		return 0;
6287 	}
6288 	return (uint64_t)(ptoa_64(base_phys) | ((uint64_t)addr & PAGE_MASK));
6289 }
6290 
6291 /*
6292  * Dup the mbuf chain passed in.  The whole thing.  No cute additional cruft.
6293  * And really copy the thing.  That way, we don't "precompute" checksums
6294  * for unsuspecting consumers.  Assumption: m->m_nextpkt == 0.  Trick: for
6295  * small packets, don't dup into a cluster.  That way received  packets
6296  * don't take up too much room in the sockbuf (cf. sbspace()).
6297  */
6298 int MDFail;
6299 
6300 struct mbuf *
6301 m_dup(struct mbuf *m, int how)
6302 {
6303 	struct mbuf *n, **np;
6304 	struct mbuf *top;
6305 	int copyhdr = 0;
6306 
6307 	np = &top;
6308 	top = NULL;
6309 	if (m->m_flags & M_PKTHDR) {
6310 		copyhdr = 1;
6311 	}
6312 
6313 	/*
6314 	 * Quick check: if we have one mbuf and its data fits in an
6315 	 *  mbuf with packet header, just copy and go.
6316 	 */
6317 	if (m->m_next == NULL) {
6318 		/* Then just move the data into an mbuf and be done... */
6319 		if (copyhdr) {
6320 			if (m->m_pkthdr.len <= MHLEN && m->m_len <= MHLEN) {
6321 				if ((n = _M_GETHDR(how, m->m_type)) == NULL) {
6322 					return NULL;
6323 				}
6324 				n->m_len = m->m_len;
6325 				m_dup_pkthdr(n, m, how);
6326 				bcopy(m->m_data, n->m_data, m->m_len);
6327 				return n;
6328 			}
6329 		} else if (m->m_len <= MLEN) {
6330 			if ((n = _M_GET(how, m->m_type)) == NULL) {
6331 				return NULL;
6332 			}
6333 			bcopy(m->m_data, n->m_data, m->m_len);
6334 			n->m_len = m->m_len;
6335 			return n;
6336 		}
6337 	}
6338 	while (m != NULL) {
6339 #if BLUE_DEBUG
6340 		printf("<%x: %x, %x, %x\n", m, m->m_flags, m->m_len,
6341 		    m->m_data);
6342 #endif
6343 		if (copyhdr) {
6344 			n = _M_GETHDR(how, m->m_type);
6345 		} else {
6346 			n = _M_GET(how, m->m_type);
6347 		}
6348 		if (n == NULL) {
6349 			goto nospace;
6350 		}
6351 		if (m->m_flags & M_EXT) {
6352 			if (m->m_len <= m_maxsize(MC_CL)) {
6353 				MCLGET(n, how);
6354 			} else if (m->m_len <= m_maxsize(MC_BIGCL)) {
6355 				n = m_mbigget(n, how);
6356 			} else if (m->m_len <= m_maxsize(MC_16KCL) && njcl > 0) {
6357 				n = m_m16kget(n, how);
6358 			}
6359 			if (!(n->m_flags & M_EXT)) {
6360 				(void) m_free(n);
6361 				goto nospace;
6362 			}
6363 		} else {
6364 			VERIFY((copyhdr == 1 && m->m_len <= MHLEN) ||
6365 			    (copyhdr == 0 && m->m_len <= MLEN));
6366 		}
6367 		*np = n;
6368 		if (copyhdr) {
6369 			/* Don't use M_COPY_PKTHDR: preserve m_data */
6370 			m_dup_pkthdr(n, m, how);
6371 			copyhdr = 0;
6372 			if (!(n->m_flags & M_EXT)) {
6373 				n->m_data = n->m_pktdat;
6374 			}
6375 		}
6376 		n->m_len = m->m_len;
6377 		/*
6378 		 * Get the dup on the same bdry as the original
6379 		 * Assume that the two mbufs have the same offset to data area
6380 		 * (up to word boundaries)
6381 		 */
6382 		bcopy(MTOD(m, caddr_t), MTOD(n, caddr_t), (unsigned)n->m_len);
6383 		m = m->m_next;
6384 		np = &n->m_next;
6385 #if BLUE_DEBUG
6386 		printf(">%x: %x, %x, %x\n", n, n->m_flags, n->m_len,
6387 		    n->m_data);
6388 #endif
6389 	}
6390 
6391 	if (top == NULL) {
6392 		MDFail++;
6393 	}
6394 	return top;
6395 
6396 nospace:
6397 	m_freem(top);
6398 	MDFail++;
6399 	return NULL;
6400 }
6401 
6402 #define MBUF_MULTIPAGES(m)                                              \
6403 	(((m)->m_flags & M_EXT) &&                                      \
6404 	((IS_P2ALIGNED((m)->m_data, PAGE_SIZE)                          \
6405 	&& (m)->m_len > PAGE_SIZE) ||                                   \
6406 	(!IS_P2ALIGNED((m)->m_data, PAGE_SIZE) &&                       \
6407 	P2ROUNDUP((m)->m_data, PAGE_SIZE) < ((uintptr_t)(m)->m_data + (m)->m_len))))
6408 
6409 static struct mbuf *
6410 m_expand(struct mbuf *m, struct mbuf **last)
6411 {
6412 	struct mbuf *top = NULL;
6413 	struct mbuf **nm = &top;
6414 	uintptr_t data0, data;
6415 	unsigned int len0, len;
6416 
6417 	VERIFY(MBUF_MULTIPAGES(m));
6418 	VERIFY(m->m_next == NULL);
6419 	data0 = (uintptr_t)m->m_data;
6420 	len0 = m->m_len;
6421 	*last = top;
6422 
6423 	for (;;) {
6424 		struct mbuf *n;
6425 
6426 		data = data0;
6427 		if (IS_P2ALIGNED(data, PAGE_SIZE) && len0 > PAGE_SIZE) {
6428 			len = PAGE_SIZE;
6429 		} else if (!IS_P2ALIGNED(data, PAGE_SIZE) &&
6430 		    P2ROUNDUP(data, PAGE_SIZE) < (data + len0)) {
6431 			len = P2ROUNDUP(data, PAGE_SIZE) - data;
6432 		} else {
6433 			len = len0;
6434 		}
6435 
6436 		VERIFY(len > 0);
6437 		VERIFY(m->m_flags & M_EXT);
6438 		m->m_data = (void *)data;
6439 		m->m_len = len;
6440 
6441 		*nm = *last = m;
6442 		nm = &m->m_next;
6443 		m->m_next = NULL;
6444 
6445 		data0 += len;
6446 		len0 -= len;
6447 		if (len0 == 0) {
6448 			break;
6449 		}
6450 
6451 		n = _M_RETRY(M_DONTWAIT, MT_DATA);
6452 		if (n == NULL) {
6453 			m_freem(top);
6454 			top = *last = NULL;
6455 			break;
6456 		}
6457 
6458 		n->m_ext = m->m_ext;
6459 		m_incref(m);
6460 		n->m_flags |= M_EXT;
6461 		m = n;
6462 	}
6463 	return top;
6464 }
6465 
6466 struct mbuf *
6467 m_normalize(struct mbuf *m)
6468 {
6469 	struct mbuf *top = NULL;
6470 	struct mbuf **nm = &top;
6471 	boolean_t expanded = FALSE;
6472 
6473 	while (m != NULL) {
6474 		struct mbuf *n;
6475 
6476 		n = m->m_next;
6477 		m->m_next = NULL;
6478 
6479 		/* Does the data cross one or more page boundaries? */
6480 		if (MBUF_MULTIPAGES(m)) {
6481 			struct mbuf *last;
6482 			if ((m = m_expand(m, &last)) == NULL) {
6483 				m_freem(n);
6484 				m_freem(top);
6485 				top = NULL;
6486 				break;
6487 			}
6488 			*nm = m;
6489 			nm = &last->m_next;
6490 			expanded = TRUE;
6491 		} else {
6492 			*nm = m;
6493 			nm = &m->m_next;
6494 		}
6495 		m = n;
6496 	}
6497 	if (expanded) {
6498 		atomic_add_32(&mb_normalized, 1);
6499 	}
6500 	return top;
6501 }
6502 
6503 /*
6504  * Append the specified data to the indicated mbuf chain,
6505  * Extend the mbuf chain if the new data does not fit in
6506  * existing space.
6507  *
6508  * Return 1 if able to complete the job; otherwise 0.
6509  */
6510 int
6511 m_append(struct mbuf *m0, int len, caddr_t cp)
6512 {
6513 	struct mbuf *m, *n;
6514 	int remainder, space;
6515 
6516 	for (m = m0; m->m_next != NULL; m = m->m_next) {
6517 		;
6518 	}
6519 	remainder = len;
6520 	space = M_TRAILINGSPACE(m);
6521 	if (space > 0) {
6522 		/*
6523 		 * Copy into available space.
6524 		 */
6525 		if (space > remainder) {
6526 			space = remainder;
6527 		}
6528 		bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
6529 		m->m_len += space;
6530 		cp += space;
6531 		remainder -= space;
6532 	}
6533 	while (remainder > 0) {
6534 		/*
6535 		 * Allocate a new mbuf; could check space
6536 		 * and allocate a cluster instead.
6537 		 */
6538 		n = m_get(M_WAITOK, m->m_type);
6539 		if (n == NULL) {
6540 			break;
6541 		}
6542 		n->m_len = min(MLEN, remainder);
6543 		bcopy(cp, mtod(n, caddr_t), n->m_len);
6544 		cp += n->m_len;
6545 		remainder -= n->m_len;
6546 		m->m_next = n;
6547 		m = n;
6548 	}
6549 	if (m0->m_flags & M_PKTHDR) {
6550 		m0->m_pkthdr.len += len - remainder;
6551 	}
6552 	return remainder == 0;
6553 }
6554 
6555 struct mbuf *
6556 m_last(struct mbuf *m)
6557 {
6558 	while (m->m_next != NULL) {
6559 		m = m->m_next;
6560 	}
6561 	return m;
6562 }
6563 
6564 unsigned int
6565 m_fixhdr(struct mbuf *m0)
6566 {
6567 	u_int len;
6568 
6569 	VERIFY(m0->m_flags & M_PKTHDR);
6570 
6571 	len = m_length2(m0, NULL);
6572 	m0->m_pkthdr.len = len;
6573 	return len;
6574 }
6575 
6576 unsigned int
6577 m_length2(struct mbuf *m0, struct mbuf **last)
6578 {
6579 	struct mbuf *m;
6580 	u_int len;
6581 
6582 	len = 0;
6583 	for (m = m0; m != NULL; m = m->m_next) {
6584 		len += m->m_len;
6585 		if (m->m_next == NULL) {
6586 			break;
6587 		}
6588 	}
6589 	if (last != NULL) {
6590 		*last = m;
6591 	}
6592 	return len;
6593 }
6594 
6595 /*
6596  * Defragment a mbuf chain, returning the shortest possible chain of mbufs
6597  * and clusters.  If allocation fails and this cannot be completed, NULL will
6598  * be returned, but the passed in chain will be unchanged.  Upon success,
6599  * the original chain will be freed, and the new chain will be returned.
6600  *
6601  * If a non-packet header is passed in, the original mbuf (chain?) will
6602  * be returned unharmed.
6603  *
6604  * If offset is specfied, the first mbuf in the chain will have a leading
6605  * space of the amount stated by the "off" parameter.
6606  *
6607  * This routine requires that the m_pkthdr.header field of the original
6608  * mbuf chain is cleared by the caller.
6609  */
6610 struct mbuf *
6611 m_defrag_offset(struct mbuf *m0, u_int32_t off, int how)
6612 {
6613 	struct mbuf *m_new = NULL, *m_final = NULL;
6614 	int progress = 0, length, pktlen;
6615 
6616 	if (!(m0->m_flags & M_PKTHDR)) {
6617 		return m0;
6618 	}
6619 
6620 	VERIFY(off < MHLEN);
6621 	m_fixhdr(m0); /* Needed sanity check */
6622 
6623 	pktlen = m0->m_pkthdr.len + off;
6624 	if (pktlen > MHLEN) {
6625 		m_final = m_getcl(how, MT_DATA, M_PKTHDR);
6626 	} else {
6627 		m_final = m_gethdr(how, MT_DATA);
6628 	}
6629 
6630 	if (m_final == NULL) {
6631 		goto nospace;
6632 	}
6633 
6634 	if (off > 0) {
6635 		pktlen -= off;
6636 		m_final->m_data += off;
6637 	}
6638 
6639 	/*
6640 	 * Caller must have handled the contents pointed to by this
6641 	 * pointer before coming here, as otherwise it will point to
6642 	 * the original mbuf which will get freed upon success.
6643 	 */
6644 	VERIFY(m0->m_pkthdr.pkt_hdr == NULL);
6645 
6646 	if (m_dup_pkthdr(m_final, m0, how) == 0) {
6647 		goto nospace;
6648 	}
6649 
6650 	m_new = m_final;
6651 
6652 	while (progress < pktlen) {
6653 		length = pktlen - progress;
6654 		if (length > MCLBYTES) {
6655 			length = MCLBYTES;
6656 		}
6657 		length -= ((m_new == m_final) ? off : 0);
6658 		if (length < 0) {
6659 			goto nospace;
6660 		}
6661 
6662 		if (m_new == NULL) {
6663 			if (length > MLEN) {
6664 				m_new = m_getcl(how, MT_DATA, 0);
6665 			} else {
6666 				m_new = m_get(how, MT_DATA);
6667 			}
6668 			if (m_new == NULL) {
6669 				goto nospace;
6670 			}
6671 		}
6672 
6673 		m_copydata(m0, progress, length, mtod(m_new, caddr_t));
6674 		progress += length;
6675 		m_new->m_len = length;
6676 		if (m_new != m_final) {
6677 			m_cat(m_final, m_new);
6678 		}
6679 		m_new = NULL;
6680 	}
6681 	m_freem(m0);
6682 	m0 = m_final;
6683 	return m0;
6684 nospace:
6685 	if (m_final) {
6686 		m_freem(m_final);
6687 	}
6688 	return NULL;
6689 }
6690 
6691 struct mbuf *
6692 m_defrag(struct mbuf *m0, int how)
6693 {
6694 	return m_defrag_offset(m0, 0, how);
6695 }
6696 
6697 void
6698 m_mchtype(struct mbuf *m, int t)
6699 {
6700 	mtype_stat_inc(t);
6701 	mtype_stat_dec(m->m_type);
6702 	(m)->m_type = t;
6703 }
6704 
6705 void *
6706 m_mtod(struct mbuf *m)
6707 {
6708 	return MTOD(m, void *);
6709 }
6710 
6711 struct mbuf *
6712 m_dtom(void *x)
6713 {
6714 	return (struct mbuf *)((uintptr_t)(x) & ~(MSIZE - 1));
6715 }
6716 
6717 void
6718 m_mcheck(struct mbuf *m)
6719 {
6720 	_MCHECK(m);
6721 }
6722 
6723 /*
6724  * Return a pointer to mbuf/offset of location in mbuf chain.
6725  */
6726 struct mbuf *
6727 m_getptr(struct mbuf *m, int loc, int *off)
6728 {
6729 	while (loc >= 0) {
6730 		/* Normal end of search. */
6731 		if (m->m_len > loc) {
6732 			*off = loc;
6733 			return m;
6734 		} else {
6735 			loc -= m->m_len;
6736 			if (m->m_next == NULL) {
6737 				if (loc == 0) {
6738 					/* Point at the end of valid data. */
6739 					*off = m->m_len;
6740 					return m;
6741 				}
6742 				return NULL;
6743 			}
6744 			m = m->m_next;
6745 		}
6746 	}
6747 	return NULL;
6748 }
6749 
6750 /*
6751  * Inform the corresponding mcache(s) that there's a waiter below.
6752  */
6753 static void
6754 mbuf_waiter_inc(mbuf_class_t class, boolean_t comp)
6755 {
6756 	mcache_waiter_inc(m_cache(class));
6757 	if (comp) {
6758 		if (class == MC_CL) {
6759 			mcache_waiter_inc(m_cache(MC_MBUF_CL));
6760 		} else if (class == MC_BIGCL) {
6761 			mcache_waiter_inc(m_cache(MC_MBUF_BIGCL));
6762 		} else if (class == MC_16KCL) {
6763 			mcache_waiter_inc(m_cache(MC_MBUF_16KCL));
6764 		} else {
6765 			mcache_waiter_inc(m_cache(MC_MBUF_CL));
6766 			mcache_waiter_inc(m_cache(MC_MBUF_BIGCL));
6767 		}
6768 	}
6769 }
6770 
6771 /*
6772  * Inform the corresponding mcache(s) that there's no more waiter below.
6773  */
6774 static void
6775 mbuf_waiter_dec(mbuf_class_t class, boolean_t comp)
6776 {
6777 	mcache_waiter_dec(m_cache(class));
6778 	if (comp) {
6779 		if (class == MC_CL) {
6780 			mcache_waiter_dec(m_cache(MC_MBUF_CL));
6781 		} else if (class == MC_BIGCL) {
6782 			mcache_waiter_dec(m_cache(MC_MBUF_BIGCL));
6783 		} else if (class == MC_16KCL) {
6784 			mcache_waiter_dec(m_cache(MC_MBUF_16KCL));
6785 		} else {
6786 			mcache_waiter_dec(m_cache(MC_MBUF_CL));
6787 			mcache_waiter_dec(m_cache(MC_MBUF_BIGCL));
6788 		}
6789 	}
6790 }
6791 
6792 static bool mbuf_watchdog_defunct_active = false;
6793 
6794 static uint32_t
6795 mbuf_watchdog_socket_space(struct socket *so)
6796 {
6797 	if (so == NULL) {
6798 		return 0;
6799 	}
6800 
6801 	return so->so_snd.sb_mbcnt + so->so_rcv.sb_mbcnt;
6802 }
6803 
6804 struct mbuf_watchdog_defunct_args {
6805 	struct proc *top_app;
6806 	uint32_t top_app_space_used;
6807 };
6808 
6809 static int
6810 mbuf_watchdog_defunct_iterate(proc_t p, void *arg)
6811 {
6812 	struct fileproc *fp = NULL;
6813 	struct mbuf_watchdog_defunct_args *args =
6814 	    (struct mbuf_watchdog_defunct_args *)arg;
6815 	uint32_t space_used = 0;
6816 
6817 	proc_fdlock(p);
6818 	fdt_foreach(fp, p) {
6819 		struct fileglob *fg = fp->fp_glob;
6820 		struct socket *so = NULL;
6821 
6822 		if (FILEGLOB_DTYPE(fg) != DTYPE_SOCKET) {
6823 			continue;
6824 		}
6825 		so = fg_get_data(fg);
6826 		/*
6827 		 * We calculate the space without the socket
6828 		 * lock because we don't want to be blocked
6829 		 * by another process that called send() and
6830 		 * is stuck waiting for mbufs.
6831 		 *
6832 		 * These variables are 32-bit so we don't have
6833 		 * to worry about incomplete reads.
6834 		 */
6835 		space_used += mbuf_watchdog_socket_space(so);
6836 	}
6837 	proc_fdunlock(p);
6838 	if (space_used > args->top_app_space_used) {
6839 		if (args->top_app != NULL) {
6840 			proc_rele(args->top_app);
6841 		}
6842 		args->top_app = p;
6843 		args->top_app_space_used = space_used;
6844 
6845 		return PROC_CLAIMED;
6846 	} else {
6847 		return PROC_RETURNED;
6848 	}
6849 }
6850 
6851 extern char *proc_name_address(void *p);
6852 
6853 static void
6854 mbuf_watchdog_defunct(thread_call_param_t arg0, thread_call_param_t arg1)
6855 {
6856 #pragma unused(arg0, arg1)
6857 	struct mbuf_watchdog_defunct_args args = {};
6858 	struct fileproc *fp = NULL;
6859 
6860 	proc_iterate(PROC_ALLPROCLIST,
6861 	    mbuf_watchdog_defunct_iterate, &args, NULL, NULL);
6862 
6863 	/*
6864 	 * Defunct all sockets from this app.
6865 	 */
6866 	if (args.top_app != NULL) {
6867 		/* Restart the watchdog count. */
6868 		lck_mtx_lock(mbuf_mlock);
6869 		microuptime(&mb_wdtstart);
6870 		lck_mtx_unlock(mbuf_mlock);
6871 		os_log(OS_LOG_DEFAULT, "%s: defuncting all sockets from %s.%d",
6872 		    __func__,
6873 		    proc_name_address(args.top_app),
6874 		    proc_pid(args.top_app));
6875 		proc_fdlock(args.top_app);
6876 		fdt_foreach(fp, args.top_app) {
6877 			struct fileglob *fg = fp->fp_glob;
6878 			struct socket *so = NULL;
6879 
6880 			if (FILEGLOB_DTYPE(fg) != DTYPE_SOCKET) {
6881 				continue;
6882 			}
6883 			so = (struct socket *)fp_get_data(fp);
6884 			socket_lock(so, 0);
6885 			if (sosetdefunct(args.top_app, so,
6886 			    SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL,
6887 			    TRUE) == 0) {
6888 				sodefunct(args.top_app, so,
6889 				    SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL);
6890 			}
6891 			socket_unlock(so, 0);
6892 		}
6893 		proc_fdunlock(args.top_app);
6894 		proc_rele(args.top_app);
6895 		mbstat.m_forcedefunct++;
6896 	}
6897 	mbuf_watchdog_defunct_active = false;
6898 }
6899 
6900 /*
6901  * Called during slab (blocking and non-blocking) allocation.  If there
6902  * is at least one waiter, and the time since the first waiter is blocked
6903  * is greater than the watchdog timeout, panic the system.
6904  */
6905 static void
6906 mbuf_watchdog(void)
6907 {
6908 	struct timeval now;
6909 	unsigned int since;
6910 	static thread_call_t defunct_tcall = NULL;
6911 
6912 	if (mb_waiters == 0 || !mb_watchdog) {
6913 		return;
6914 	}
6915 
6916 	LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
6917 
6918 	microuptime(&now);
6919 	since = now.tv_sec - mb_wdtstart.tv_sec;
6920 
6921 	if (mbuf_watchdog_defunct_active) {
6922 		/*
6923 		 * Don't panic the system while we are trying
6924 		 * to find sockets to defunct.
6925 		 */
6926 		return;
6927 	}
6928 	if (since >= MB_WDT_MAXTIME) {
6929 		panic_plain("%s: %d waiters stuck for %u secs\n%s", __func__,
6930 		    mb_waiters, since, mbuf_dump());
6931 		/* NOTREACHED */
6932 	}
6933 	/*
6934 	 * Check if we are about to panic the system due
6935 	 * to lack of mbufs and start defuncting sockets
6936 	 * from processes that use too many sockets.
6937 	 *
6938 	 * We're always called with the mbuf_mlock held,
6939 	 * so that also protects mbuf_watchdog_defunct_active.
6940 	 */
6941 	if (since >= MB_WDT_MAXTIME / 2) {
6942 		/*
6943 		 * Start a thread to defunct sockets
6944 		 * from apps that are over-using their socket
6945 		 * buffers.
6946 		 */
6947 		if (defunct_tcall == NULL) {
6948 			defunct_tcall =
6949 			    thread_call_allocate_with_options(mbuf_watchdog_defunct,
6950 			    NULL,
6951 			    THREAD_CALL_PRIORITY_KERNEL,
6952 			    THREAD_CALL_OPTIONS_ONCE);
6953 		}
6954 		if (defunct_tcall != NULL) {
6955 			mbuf_watchdog_defunct_active = true;
6956 			thread_call_enter(defunct_tcall);
6957 		}
6958 	}
6959 }
6960 
6961 /*
6962  * Called during blocking allocation.  Returns TRUE if one or more objects
6963  * are available at the per-CPU caches layer and that allocation should be
6964  * retried at that level.
6965  */
6966 static boolean_t
6967 mbuf_sleep(mbuf_class_t class, unsigned int num, int wait)
6968 {
6969 	boolean_t mcache_retry = FALSE;
6970 
6971 	LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
6972 
6973 	/* Check if there's anything at the cache layer */
6974 	if (mbuf_cached_above(class, wait)) {
6975 		mcache_retry = TRUE;
6976 		goto done;
6977 	}
6978 
6979 	/* Nothing?  Then try hard to get it from somewhere */
6980 	m_reclaim(class, num, (wait & MCR_COMP));
6981 
6982 	/* We tried hard and got something? */
6983 	if (m_infree(class) > 0) {
6984 		mbstat.m_wait++;
6985 		goto done;
6986 	} else if (mbuf_cached_above(class, wait)) {
6987 		mbstat.m_wait++;
6988 		mcache_retry = TRUE;
6989 		goto done;
6990 	} else if (wait & MCR_TRYHARD) {
6991 		mcache_retry = TRUE;
6992 		goto done;
6993 	}
6994 
6995 	/*
6996 	 * There's really nothing for us right now; inform the
6997 	 * cache(s) that there is a waiter below and go to sleep.
6998 	 */
6999 	mbuf_waiter_inc(class, (wait & MCR_COMP));
7000 
7001 	VERIFY(!(wait & MCR_NOSLEEP));
7002 
7003 	/*
7004 	 * If this is the first waiter, arm the watchdog timer.  Otherwise
7005 	 * check if we need to panic the system due to watchdog timeout.
7006 	 */
7007 	if (mb_waiters == 0) {
7008 		microuptime(&mb_wdtstart);
7009 	} else {
7010 		mbuf_watchdog();
7011 	}
7012 
7013 	mb_waiters++;
7014 	m_region_expand(class) += m_total(class) + num;
7015 	/* wake up the worker thread */
7016 	if (mbuf_worker_ready &&
7017 	    mbuf_worker_needs_wakeup) {
7018 		wakeup((caddr_t)&mbuf_worker_needs_wakeup);
7019 		mbuf_worker_needs_wakeup = FALSE;
7020 	}
7021 	mbwdog_logger("waiting (%d mbufs in class %s)", num, m_cname(class));
7022 	(void) msleep(mb_waitchan, mbuf_mlock, (PZERO - 1), m_cname(class), NULL);
7023 	mbwdog_logger("woke up (%d mbufs in class %s) ", num, m_cname(class));
7024 
7025 	/* We are now up; stop getting notified until next round */
7026 	mbuf_waiter_dec(class, (wait & MCR_COMP));
7027 
7028 	/* We waited and got something */
7029 	if (m_infree(class) > 0) {
7030 		mbstat.m_wait++;
7031 		goto done;
7032 	} else if (mbuf_cached_above(class, wait)) {
7033 		mbstat.m_wait++;
7034 		mcache_retry = TRUE;
7035 	}
7036 done:
7037 	return mcache_retry;
7038 }
7039 
7040 __attribute__((noreturn))
7041 static void
7042 mbuf_worker_thread(void)
7043 {
7044 	int mbuf_expand;
7045 
7046 	while (1) {
7047 		lck_mtx_lock(mbuf_mlock);
7048 		mbwdog_logger("worker thread running");
7049 		mbuf_worker_run_cnt++;
7050 		mbuf_expand = 0;
7051 		/*
7052 		 * Allocations are based on page size, so if we have depleted
7053 		 * the reserved spaces, try to free mbufs from the major classes.
7054 		 */
7055 #if PAGE_SIZE == 4096
7056 		uint32_t m_mbclusters = m_total(MC_MBUF) >> NMBPCLSHIFT;
7057 		uint32_t m_clusters = m_total(MC_CL);
7058 		uint32_t m_bigclusters = m_total(MC_BIGCL) << NCLPBGSHIFT;
7059 		uint32_t sumclusters = m_mbclusters + m_clusters + m_bigclusters;
7060 		if (sumclusters >= nclusters) {
7061 			mbwdog_logger("reclaiming bigcl");
7062 			mbuf_drain_locked(TRUE);
7063 			m_reclaim(MC_BIGCL, 4, FALSE);
7064 		}
7065 #else
7066 		uint32_t m_16kclusters = m_total(MC_16KCL);
7067 		if (njcl > 0 && (m_16kclusters << NCLPJCLSHIFT) >= njcl) {
7068 			mbwdog_logger("reclaiming 16kcl");
7069 			mbuf_drain_locked(TRUE);
7070 			m_reclaim(MC_16KCL, 4, FALSE);
7071 		}
7072 #endif
7073 		if (m_region_expand(MC_CL) > 0) {
7074 			int n;
7075 			mb_expand_cl_cnt++;
7076 			/* Adjust to current number of cluster in use */
7077 			n = m_region_expand(MC_CL) -
7078 			    (m_total(MC_CL) - m_infree(MC_CL));
7079 			if ((n + m_total(MC_CL)) > m_maxlimit(MC_CL)) {
7080 				n = m_maxlimit(MC_CL) - m_total(MC_CL);
7081 			}
7082 			if (n > 0) {
7083 				mb_expand_cl_total += n;
7084 			}
7085 			m_region_expand(MC_CL) = 0;
7086 
7087 			if (n > 0) {
7088 				mbwdog_logger("expanding MC_CL by %d", n);
7089 				freelist_populate(MC_CL, n, M_WAIT);
7090 			}
7091 		}
7092 		if (m_region_expand(MC_BIGCL) > 0) {
7093 			int n;
7094 			mb_expand_bigcl_cnt++;
7095 			/* Adjust to current number of 4 KB cluster in use */
7096 			n = m_region_expand(MC_BIGCL) -
7097 			    (m_total(MC_BIGCL) - m_infree(MC_BIGCL));
7098 			if ((n + m_total(MC_BIGCL)) > m_maxlimit(MC_BIGCL)) {
7099 				n = m_maxlimit(MC_BIGCL) - m_total(MC_BIGCL);
7100 			}
7101 			if (n > 0) {
7102 				mb_expand_bigcl_total += n;
7103 			}
7104 			m_region_expand(MC_BIGCL) = 0;
7105 
7106 			if (n > 0) {
7107 				mbwdog_logger("expanding MC_BIGCL by %d", n);
7108 				freelist_populate(MC_BIGCL, n, M_WAIT);
7109 			}
7110 		}
7111 		if (m_region_expand(MC_16KCL) > 0) {
7112 			int n;
7113 			mb_expand_16kcl_cnt++;
7114 			/* Adjust to current number of 16 KB cluster in use */
7115 			n = m_region_expand(MC_16KCL) -
7116 			    (m_total(MC_16KCL) - m_infree(MC_16KCL));
7117 			if ((n + m_total(MC_16KCL)) > m_maxlimit(MC_16KCL)) {
7118 				n = m_maxlimit(MC_16KCL) - m_total(MC_16KCL);
7119 			}
7120 			if (n > 0) {
7121 				mb_expand_16kcl_total += n;
7122 			}
7123 			m_region_expand(MC_16KCL) = 0;
7124 
7125 			if (n > 0) {
7126 				mbwdog_logger("expanding MC_16KCL by %d", n);
7127 				(void) freelist_populate(MC_16KCL, n, M_WAIT);
7128 			}
7129 		}
7130 
7131 		/*
7132 		 * Because we can run out of memory before filling the mbuf
7133 		 * map, we should not allocate more clusters than they are
7134 		 * mbufs -- otherwise we could have a large number of useless
7135 		 * clusters allocated.
7136 		 */
7137 		mbwdog_logger("totals: MC_MBUF %d MC_BIGCL %d MC_CL %d MC_16KCL %d",
7138 		    m_total(MC_MBUF), m_total(MC_BIGCL), m_total(MC_CL),
7139 		    m_total(MC_16KCL));
7140 		uint32_t total_mbufs = m_total(MC_MBUF);
7141 		uint32_t total_clusters = m_total(MC_BIGCL) + m_total(MC_CL) +
7142 		    m_total(MC_16KCL);
7143 		if (total_mbufs < total_clusters) {
7144 			mbwdog_logger("expanding MC_MBUF by %d",
7145 			    total_clusters - total_mbufs);
7146 		}
7147 		while (total_mbufs < total_clusters) {
7148 			mb_expand_cnt++;
7149 			if (freelist_populate(MC_MBUF, 1, M_WAIT) == 0) {
7150 				break;
7151 			}
7152 			total_mbufs = m_total(MC_MBUF);
7153 			total_clusters = m_total(MC_BIGCL) + m_total(MC_CL) +
7154 			    m_total(MC_16KCL);
7155 		}
7156 
7157 		mbuf_worker_needs_wakeup = TRUE;
7158 		/*
7159 		 * If there's a deadlock and we're not sending / receiving
7160 		 * packets, net_uptime() won't be updated.  Update it here
7161 		 * so we are sure it's correct.
7162 		 */
7163 		net_update_uptime();
7164 		mbuf_worker_last_runtime = net_uptime();
7165 		assert_wait((caddr_t)&mbuf_worker_needs_wakeup,
7166 		    THREAD_UNINT);
7167 		mbwdog_logger("worker thread sleeping");
7168 		lck_mtx_unlock(mbuf_mlock);
7169 		(void) thread_block((thread_continue_t)mbuf_worker_thread);
7170 	}
7171 }
7172 
7173 __attribute__((noreturn))
7174 static void
7175 mbuf_worker_thread_init(void)
7176 {
7177 	mbuf_worker_ready++;
7178 	mbuf_worker_thread();
7179 }
7180 
7181 static mcl_slab_t *
7182 slab_get(void *buf)
7183 {
7184 	mcl_slabg_t *slg;
7185 	unsigned int ix, k;
7186 
7187 	LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
7188 
7189 	VERIFY(MBUF_IN_MAP(buf));
7190 	ix = ((unsigned char *)buf - mbutl) >> MBSHIFT;
7191 	VERIFY(ix < maxslabgrp);
7192 
7193 	if ((slg = slabstbl[ix]) == NULL) {
7194 		/*
7195 		 * In the current implementation, we never shrink the slabs
7196 		 * table; if we attempt to reallocate a cluster group when
7197 		 * it's already allocated, panic since this is a sign of a
7198 		 * memory corruption (slabstbl[ix] got nullified).
7199 		 */
7200 		++slabgrp;
7201 		VERIFY(ix < slabgrp);
7202 		/*
7203 		 * Slabs expansion can only be done single threaded; when
7204 		 * we get here, it must be as a result of m_clalloc() which
7205 		 * is serialized and therefore mb_clalloc_busy must be set.
7206 		 */
7207 		VERIFY(mb_clalloc_busy);
7208 		lck_mtx_unlock(mbuf_mlock);
7209 
7210 		/* This is a new buffer; create the slabs group for it */
7211 		slg = zalloc_permanent_type(mcl_slabg_t);
7212 		slg->slg_slab = zalloc_permanent(sizeof(mcl_slab_t) * NSLABSPMB,
7213 		    ZALIGN(mcl_slab_t));
7214 
7215 		lck_mtx_lock(mbuf_mlock);
7216 		/*
7217 		 * No other thread could have gone into m_clalloc() after
7218 		 * we dropped the lock above, so verify that it's true.
7219 		 */
7220 		VERIFY(mb_clalloc_busy);
7221 
7222 		slabstbl[ix] = slg;
7223 
7224 		/* Chain each slab in the group to its forward neighbor */
7225 		for (k = 1; k < NSLABSPMB; k++) {
7226 			slg->slg_slab[k - 1].sl_next = &slg->slg_slab[k];
7227 		}
7228 		VERIFY(slg->slg_slab[NSLABSPMB - 1].sl_next == NULL);
7229 
7230 		/* And chain the last slab in the previous group to this */
7231 		if (ix > 0) {
7232 			VERIFY(slabstbl[ix - 1]->
7233 			    slg_slab[NSLABSPMB - 1].sl_next == NULL);
7234 			slabstbl[ix - 1]->slg_slab[NSLABSPMB - 1].sl_next =
7235 			    &slg->slg_slab[0];
7236 		}
7237 	}
7238 
7239 	ix = MTOPG(buf) % NSLABSPMB;
7240 	VERIFY(ix < NSLABSPMB);
7241 
7242 	return &slg->slg_slab[ix];
7243 }
7244 
7245 static void
7246 slab_init(mcl_slab_t *sp, mbuf_class_t class, u_int32_t flags,
7247     void *base, void *head, unsigned int len, int refcnt, int chunks)
7248 {
7249 	sp->sl_class = class;
7250 	sp->sl_flags = flags;
7251 	sp->sl_base = base;
7252 	sp->sl_head = head;
7253 	sp->sl_len = len;
7254 	sp->sl_refcnt = refcnt;
7255 	sp->sl_chunks = chunks;
7256 	slab_detach(sp);
7257 }
7258 
7259 static void
7260 slab_insert(mcl_slab_t *sp, mbuf_class_t class)
7261 {
7262 	VERIFY(slab_is_detached(sp));
7263 	m_slab_cnt(class)++;
7264 	TAILQ_INSERT_TAIL(&m_slablist(class), sp, sl_link);
7265 	sp->sl_flags &= ~SLF_DETACHED;
7266 
7267 	/*
7268 	 * If a buffer spans multiple contiguous pages then mark them as
7269 	 * detached too
7270 	 */
7271 	if (class == MC_16KCL) {
7272 		int k;
7273 		for (k = 1; k < NSLABSP16KB; k++) {
7274 			sp = sp->sl_next;
7275 			/* Next slab must already be present */
7276 			VERIFY(sp != NULL && slab_is_detached(sp));
7277 			sp->sl_flags &= ~SLF_DETACHED;
7278 		}
7279 	}
7280 }
7281 
7282 static void
7283 slab_remove(mcl_slab_t *sp, mbuf_class_t class)
7284 {
7285 	int k;
7286 	VERIFY(!slab_is_detached(sp));
7287 	VERIFY(m_slab_cnt(class) > 0);
7288 	m_slab_cnt(class)--;
7289 	TAILQ_REMOVE(&m_slablist(class), sp, sl_link);
7290 	slab_detach(sp);
7291 	if (class == MC_16KCL) {
7292 		for (k = 1; k < NSLABSP16KB; k++) {
7293 			sp = sp->sl_next;
7294 			/* Next slab must already be present */
7295 			VERIFY(sp != NULL);
7296 			VERIFY(!slab_is_detached(sp));
7297 			slab_detach(sp);
7298 		}
7299 	}
7300 }
7301 
7302 static boolean_t
7303 slab_inrange(mcl_slab_t *sp, void *buf)
7304 {
7305 	return (uintptr_t)buf >= (uintptr_t)sp->sl_base &&
7306 	       (uintptr_t)buf < ((uintptr_t)sp->sl_base + sp->sl_len);
7307 }
7308 
7309 #undef panic
7310 
7311 static void
7312 slab_nextptr_panic(mcl_slab_t *sp, void *addr)
7313 {
7314 	int i;
7315 	unsigned int chunk_len = sp->sl_len / sp->sl_chunks;
7316 	uintptr_t buf = (uintptr_t)sp->sl_base;
7317 
7318 	for (i = 0; i < sp->sl_chunks; i++, buf += chunk_len) {
7319 		void *next = ((mcache_obj_t *)buf)->obj_next;
7320 		if (next != addr) {
7321 			continue;
7322 		}
7323 		if (!mclverify) {
7324 			if (next != NULL && !MBUF_IN_MAP(next)) {
7325 				mcache_t *cp = m_cache(sp->sl_class);
7326 				panic("%s: %s buffer %p in slab %p modified "
7327 				    "after free at offset 0: %p out of range "
7328 				    "[%p-%p)\n", __func__, cp->mc_name,
7329 				    (void *)buf, sp, next, mbutl, embutl);
7330 				/* NOTREACHED */
7331 			}
7332 		} else {
7333 			mcache_audit_t *mca = mcl_audit_buf2mca(sp->sl_class,
7334 			    (mcache_obj_t *)buf);
7335 			mcl_audit_verify_nextptr(next, mca);
7336 		}
7337 	}
7338 }
7339 
7340 static void
7341 slab_detach(mcl_slab_t *sp)
7342 {
7343 	sp->sl_link.tqe_next = (mcl_slab_t *)-1;
7344 	sp->sl_link.tqe_prev = (mcl_slab_t **)-1;
7345 	sp->sl_flags |= SLF_DETACHED;
7346 }
7347 
7348 static boolean_t
7349 slab_is_detached(mcl_slab_t *sp)
7350 {
7351 	return (intptr_t)sp->sl_link.tqe_next == -1 &&
7352 	       (intptr_t)sp->sl_link.tqe_prev == -1 &&
7353 	       (sp->sl_flags & SLF_DETACHED);
7354 }
7355 
7356 static void
7357 mcl_audit_init(void *buf, mcache_audit_t **mca_list,
7358     mcache_obj_t **con_list, size_t con_size, unsigned int num)
7359 {
7360 	mcache_audit_t *mca, *mca_tail;
7361 	mcache_obj_t *con = NULL;
7362 	boolean_t save_contents = (con_list != NULL);
7363 	unsigned int i, ix;
7364 
7365 	ASSERT(num <= NMBPG);
7366 	ASSERT(con_list == NULL || con_size != 0);
7367 
7368 	ix = MTOPG(buf);
7369 	VERIFY(ix < maxclaudit);
7370 
7371 	/* Make sure we haven't been here before */
7372 	for (i = 0; i < num; i++) {
7373 		VERIFY(mclaudit[ix].cl_audit[i] == NULL);
7374 	}
7375 
7376 	mca = mca_tail = *mca_list;
7377 	if (save_contents) {
7378 		con = *con_list;
7379 	}
7380 
7381 	for (i = 0; i < num; i++) {
7382 		mcache_audit_t *next;
7383 
7384 		next = mca->mca_next;
7385 		bzero(mca, sizeof(*mca));
7386 		mca->mca_next = next;
7387 		mclaudit[ix].cl_audit[i] = mca;
7388 
7389 		/* Attach the contents buffer if requested */
7390 		if (save_contents) {
7391 			mcl_saved_contents_t *msc =
7392 			    (mcl_saved_contents_t *)(void *)con;
7393 
7394 			VERIFY(msc != NULL);
7395 			VERIFY(IS_P2ALIGNED(msc, sizeof(u_int64_t)));
7396 			VERIFY(con_size == sizeof(*msc));
7397 			mca->mca_contents_size = con_size;
7398 			mca->mca_contents = msc;
7399 			con = con->obj_next;
7400 			bzero(mca->mca_contents, mca->mca_contents_size);
7401 		}
7402 
7403 		mca_tail = mca;
7404 		mca = mca->mca_next;
7405 	}
7406 
7407 	if (save_contents) {
7408 		*con_list = con;
7409 	}
7410 
7411 	*mca_list = mca_tail->mca_next;
7412 	mca_tail->mca_next = NULL;
7413 }
7414 
7415 static void
7416 mcl_audit_free(void *buf, unsigned int num)
7417 {
7418 	unsigned int i, ix;
7419 	mcache_audit_t *mca, *mca_list;
7420 
7421 	ix = MTOPG(buf);
7422 	VERIFY(ix < maxclaudit);
7423 
7424 	if (mclaudit[ix].cl_audit[0] != NULL) {
7425 		mca_list = mclaudit[ix].cl_audit[0];
7426 		for (i = 0; i < num; i++) {
7427 			mca = mclaudit[ix].cl_audit[i];
7428 			mclaudit[ix].cl_audit[i] = NULL;
7429 			if (mca->mca_contents) {
7430 				mcache_free(mcl_audit_con_cache,
7431 				    mca->mca_contents);
7432 			}
7433 		}
7434 		mcache_free_ext(mcache_audit_cache,
7435 		    (mcache_obj_t *)mca_list);
7436 	}
7437 }
7438 
7439 /*
7440  * Given an address of a buffer (mbuf/2KB/4KB/16KB), return
7441  * the corresponding audit structure for that buffer.
7442  */
7443 static mcache_audit_t *
7444 mcl_audit_buf2mca(mbuf_class_t class, mcache_obj_t *mobj)
7445 {
7446 	mcache_audit_t *mca = NULL;
7447 	int ix = MTOPG(mobj), m_idx = 0;
7448 	unsigned char *page_addr;
7449 
7450 	VERIFY(ix < maxclaudit);
7451 	VERIFY(IS_P2ALIGNED(mobj, MIN(m_maxsize(class), PAGE_SIZE)));
7452 
7453 	page_addr = PGTOM(ix);
7454 
7455 	switch (class) {
7456 	case MC_MBUF:
7457 		/*
7458 		 * For the mbuf case, find the index of the page
7459 		 * used by the mbuf and use that index to locate the
7460 		 * base address of the page.  Then find out the
7461 		 * mbuf index relative to the page base and use
7462 		 * it to locate the audit structure.
7463 		 */
7464 		m_idx = MBPAGEIDX(page_addr, mobj);
7465 		VERIFY(m_idx < (int)NMBPG);
7466 		mca = mclaudit[ix].cl_audit[m_idx];
7467 		break;
7468 
7469 	case MC_CL:
7470 		/*
7471 		 * Same thing as above, but for 2KB clusters in a page.
7472 		 */
7473 		m_idx = CLPAGEIDX(page_addr, mobj);
7474 		VERIFY(m_idx < (int)NCLPG);
7475 		mca = mclaudit[ix].cl_audit[m_idx];
7476 		break;
7477 
7478 	case MC_BIGCL:
7479 		m_idx = BCLPAGEIDX(page_addr, mobj);
7480 		VERIFY(m_idx < (int)NBCLPG);
7481 		mca = mclaudit[ix].cl_audit[m_idx];
7482 		break;
7483 	case MC_16KCL:
7484 		/*
7485 		 * Same as above, but only return the first element.
7486 		 */
7487 		mca = mclaudit[ix].cl_audit[0];
7488 		break;
7489 
7490 	default:
7491 		VERIFY(0);
7492 		/* NOTREACHED */
7493 	}
7494 
7495 	return mca;
7496 }
7497 
7498 static void
7499 mcl_audit_mbuf(mcache_audit_t *mca, void *addr, boolean_t composite,
7500     boolean_t alloc)
7501 {
7502 	struct mbuf *m = addr;
7503 	mcache_obj_t *next = ((mcache_obj_t *)m)->obj_next;
7504 
7505 	VERIFY(mca->mca_contents != NULL &&
7506 	    mca->mca_contents_size == AUDIT_CONTENTS_SIZE);
7507 
7508 	if (mclverify) {
7509 		mcl_audit_verify_nextptr(next, mca);
7510 	}
7511 
7512 	if (!alloc) {
7513 		/* Save constructed mbuf fields */
7514 		mcl_audit_save_mbuf(m, mca);
7515 		if (mclverify) {
7516 			mcache_set_pattern(MCACHE_FREE_PATTERN, m,
7517 			    m_maxsize(MC_MBUF));
7518 		}
7519 		((mcache_obj_t *)m)->obj_next = next;
7520 		return;
7521 	}
7522 
7523 	/* Check if the buffer has been corrupted while in freelist */
7524 	if (mclverify) {
7525 		mcache_audit_free_verify_set(mca, addr, 0, m_maxsize(MC_MBUF));
7526 	}
7527 	/* Restore constructed mbuf fields */
7528 	mcl_audit_restore_mbuf(m, mca, composite);
7529 }
7530 
7531 static void
7532 mcl_audit_restore_mbuf(struct mbuf *m, mcache_audit_t *mca, boolean_t composite)
7533 {
7534 	struct mbuf *ms = MCA_SAVED_MBUF_PTR(mca);
7535 
7536 	if (composite) {
7537 		struct mbuf *next = m->m_next;
7538 		VERIFY(ms->m_flags == M_EXT && m_get_rfa(ms) != NULL &&
7539 		    MBUF_IS_COMPOSITE(ms));
7540 		VERIFY(mca->mca_contents_size == AUDIT_CONTENTS_SIZE);
7541 		/*
7542 		 * We could have hand-picked the mbuf fields and restore
7543 		 * them individually, but that will be a maintenance
7544 		 * headache.  Instead, restore everything that was saved;
7545 		 * the mbuf layer will recheck and reinitialize anyway.
7546 		 */
7547 		bcopy(ms, m, MCA_SAVED_MBUF_SIZE);
7548 		m->m_next = next;
7549 	} else {
7550 		/*
7551 		 * For a regular mbuf (no cluster attached) there's nothing
7552 		 * to restore other than the type field, which is expected
7553 		 * to be MT_FREE.
7554 		 */
7555 		m->m_type = ms->m_type;
7556 	}
7557 	_MCHECK(m);
7558 }
7559 
7560 static void
7561 mcl_audit_save_mbuf(struct mbuf *m, mcache_audit_t *mca)
7562 {
7563 	VERIFY(mca->mca_contents_size == AUDIT_CONTENTS_SIZE);
7564 	_MCHECK(m);
7565 	bcopy(m, MCA_SAVED_MBUF_PTR(mca), MCA_SAVED_MBUF_SIZE);
7566 }
7567 
7568 static void
7569 mcl_audit_cluster(mcache_audit_t *mca, void *addr, size_t size, boolean_t alloc,
7570     boolean_t save_next)
7571 {
7572 	mcache_obj_t *next = ((mcache_obj_t *)addr)->obj_next;
7573 
7574 	if (!alloc) {
7575 		if (mclverify) {
7576 			mcache_set_pattern(MCACHE_FREE_PATTERN, addr, size);
7577 		}
7578 		if (save_next) {
7579 			mcl_audit_verify_nextptr(next, mca);
7580 			((mcache_obj_t *)addr)->obj_next = next;
7581 		}
7582 	} else if (mclverify) {
7583 		/* Check if the buffer has been corrupted while in freelist */
7584 		mcl_audit_verify_nextptr(next, mca);
7585 		mcache_audit_free_verify_set(mca, addr, 0, size);
7586 	}
7587 }
7588 
7589 static void
7590 mcl_audit_scratch(mcache_audit_t *mca)
7591 {
7592 	void *stack[MCACHE_STACK_DEPTH + 1];
7593 	mcl_scratch_audit_t *msa;
7594 	struct timeval now;
7595 
7596 	VERIFY(mca->mca_contents != NULL);
7597 	msa = MCA_SAVED_SCRATCH_PTR(mca);
7598 
7599 	msa->msa_pthread = msa->msa_thread;
7600 	msa->msa_thread = current_thread();
7601 	bcopy(msa->msa_stack, msa->msa_pstack, sizeof(msa->msa_pstack));
7602 	msa->msa_pdepth = msa->msa_depth;
7603 	bzero(stack, sizeof(stack));
7604 	msa->msa_depth = OSBacktrace(stack, MCACHE_STACK_DEPTH + 1) - 1;
7605 	bcopy(&stack[1], msa->msa_stack, sizeof(msa->msa_stack));
7606 
7607 	msa->msa_ptstamp = msa->msa_tstamp;
7608 	microuptime(&now);
7609 	/* tstamp is in ms relative to base_ts */
7610 	msa->msa_tstamp = ((now.tv_usec - mb_start.tv_usec) / 1000);
7611 	if ((now.tv_sec - mb_start.tv_sec) > 0) {
7612 		msa->msa_tstamp += ((now.tv_sec - mb_start.tv_sec) * 1000);
7613 	}
7614 }
7615 
7616 __abortlike
7617 static void
7618 mcl_audit_mcheck_panic(struct mbuf *m)
7619 {
7620 	char buf[DUMP_MCA_BUF_SIZE];
7621 	mcache_audit_t *mca;
7622 
7623 	MRANGE(m);
7624 	mca = mcl_audit_buf2mca(MC_MBUF, (mcache_obj_t *)m);
7625 
7626 	panic("mcl_audit: freed mbuf %p with type 0x%x (instead of 0x%x)\n%s",
7627 	    m, (u_int16_t)m->m_type, MT_FREE, mcache_dump_mca(buf, mca));
7628 	/* NOTREACHED */
7629 }
7630 
7631 __abortlike
7632 static void
7633 mcl_audit_verify_nextptr_panic(void *next, mcache_audit_t *mca)
7634 {
7635 	char buf[DUMP_MCA_BUF_SIZE];
7636 	panic("mcl_audit: buffer %p modified after free at offset 0: "
7637 	    "%p out of range [%p-%p)\n%s\n",
7638 	    mca->mca_addr, next, mbutl, embutl, mcache_dump_mca(buf, mca));
7639 	/* NOTREACHED */
7640 }
7641 
7642 static void
7643 mcl_audit_verify_nextptr(void *next, mcache_audit_t *mca)
7644 {
7645 	if (next != NULL && !MBUF_IN_MAP(next) &&
7646 	    (next != (void *)MCACHE_FREE_PATTERN || !mclverify)) {
7647 		mcl_audit_verify_nextptr_panic(next, mca);
7648 	}
7649 }
7650 
7651 static uintptr_t
7652 hash_mix(uintptr_t x)
7653 {
7654 #ifndef __LP64__
7655 	x += ~(x << 15);
7656 	x ^=  (x >> 10);
7657 	x +=  (x << 3);
7658 	x ^=  (x >> 6);
7659 	x += ~(x << 11);
7660 	x ^=  (x >> 16);
7661 #else
7662 	x += ~(x << 32);
7663 	x ^=  (x >> 22);
7664 	x += ~(x << 13);
7665 	x ^=  (x >> 8);
7666 	x +=  (x << 3);
7667 	x ^=  (x >> 15);
7668 	x += ~(x << 27);
7669 	x ^=  (x >> 31);
7670 #endif
7671 	return x;
7672 }
7673 
7674 static uint32_t
7675 hashbacktrace(uintptr_t* bt, uint32_t depth, uint32_t max_size)
7676 {
7677 	uintptr_t hash = 0;
7678 	uintptr_t mask = max_size - 1;
7679 
7680 	while (depth) {
7681 		hash += bt[--depth];
7682 	}
7683 
7684 	hash = hash_mix(hash) & mask;
7685 
7686 	assert(hash < max_size);
7687 
7688 	return (uint32_t) hash;
7689 }
7690 
7691 static uint32_t
7692 hashaddr(uintptr_t pt, uint32_t max_size)
7693 {
7694 	uintptr_t hash = 0;
7695 	uintptr_t mask = max_size - 1;
7696 
7697 	hash = hash_mix(pt) & mask;
7698 
7699 	assert(hash < max_size);
7700 
7701 	return (uint32_t) hash;
7702 }
7703 
7704 /* This function turns on mbuf leak detection */
7705 static void
7706 mleak_activate(void)
7707 {
7708 	mleak_table.mleak_sample_factor = MLEAK_SAMPLE_FACTOR;
7709 	PE_parse_boot_argn("mleak_sample_factor",
7710 	    &mleak_table.mleak_sample_factor,
7711 	    sizeof(mleak_table.mleak_sample_factor));
7712 
7713 	if (mleak_table.mleak_sample_factor == 0) {
7714 		mclfindleak = 0;
7715 	}
7716 
7717 	if (mclfindleak == 0) {
7718 		return;
7719 	}
7720 
7721 	vm_size_t alloc_size =
7722 	    mleak_alloc_buckets * sizeof(struct mallocation);
7723 	vm_size_t trace_size = mleak_trace_buckets * sizeof(struct mtrace);
7724 
7725 	mleak_allocations = zalloc_permanent(alloc_size, ZALIGN(struct mallocation));
7726 	mleak_traces = zalloc_permanent(trace_size, ZALIGN(struct mtrace));
7727 	mleak_stat = zalloc_permanent(MLEAK_STAT_SIZE(MLEAK_NUM_TRACES),
7728 	    ZALIGN(mleak_stat_t));
7729 
7730 	mleak_stat->ml_cnt = MLEAK_NUM_TRACES;
7731 #ifdef __LP64__
7732 	mleak_stat->ml_isaddr64 = 1;
7733 #endif /* __LP64__ */
7734 }
7735 
7736 static void
7737 mleak_logger(u_int32_t num, mcache_obj_t *addr, boolean_t alloc)
7738 {
7739 	int temp;
7740 
7741 	if (mclfindleak == 0) {
7742 		return;
7743 	}
7744 
7745 	if (!alloc) {
7746 		return mleak_free(addr);
7747 	}
7748 
7749 	temp = atomic_add_32_ov(&mleak_table.mleak_capture, 1);
7750 
7751 	if ((temp % mleak_table.mleak_sample_factor) == 0 && addr != NULL) {
7752 		uintptr_t bt[MLEAK_STACK_DEPTH];
7753 		unsigned int logged = backtrace(bt, MLEAK_STACK_DEPTH, NULL, NULL);
7754 		mleak_log(bt, addr, logged, num);
7755 	}
7756 }
7757 
7758 /*
7759  * This function records the allocation in the mleak_allocations table
7760  * and the backtrace in the mleak_traces table; if allocation slot is in use,
7761  * replace old allocation with new one if the trace slot is in use, return
7762  * (or increment refcount if same trace).
7763  */
7764 static boolean_t
7765 mleak_log(uintptr_t *bt, mcache_obj_t *addr, uint32_t depth, int num)
7766 {
7767 	struct mallocation *allocation;
7768 	struct mtrace *trace;
7769 	uint32_t trace_index;
7770 
7771 	/* Quit if someone else modifying the tables */
7772 	if (!lck_mtx_try_lock_spin(mleak_lock)) {
7773 		mleak_table.total_conflicts++;
7774 		return FALSE;
7775 	}
7776 
7777 	allocation = &mleak_allocations[hashaddr((uintptr_t)addr,
7778 	    mleak_alloc_buckets)];
7779 	trace_index = hashbacktrace(bt, depth, mleak_trace_buckets);
7780 	trace = &mleak_traces[trace_index];
7781 
7782 	VERIFY(allocation <= &mleak_allocations[mleak_alloc_buckets - 1]);
7783 	VERIFY(trace <= &mleak_traces[mleak_trace_buckets - 1]);
7784 
7785 	allocation->hitcount++;
7786 	trace->hitcount++;
7787 
7788 	/*
7789 	 * If the allocation bucket we want is occupied
7790 	 * and the occupier has the same trace, just bail.
7791 	 */
7792 	if (allocation->element != NULL &&
7793 	    trace_index == allocation->trace_index) {
7794 		mleak_table.alloc_collisions++;
7795 		lck_mtx_unlock(mleak_lock);
7796 		return TRUE;
7797 	}
7798 
7799 	/*
7800 	 * Store the backtrace in the traces array;
7801 	 * Size of zero = trace bucket is free.
7802 	 */
7803 	if (trace->allocs > 0 &&
7804 	    bcmp(trace->addr, bt, (depth * sizeof(uintptr_t))) != 0) {
7805 		/* Different, unique trace, but the same hash! Bail out. */
7806 		trace->collisions++;
7807 		mleak_table.trace_collisions++;
7808 		lck_mtx_unlock(mleak_lock);
7809 		return TRUE;
7810 	} else if (trace->allocs > 0) {
7811 		/* Same trace, already added, so increment refcount */
7812 		trace->allocs++;
7813 	} else {
7814 		/* Found an unused trace bucket, so record the trace here */
7815 		if (trace->depth != 0) {
7816 			/* this slot previously used but not currently in use */
7817 			mleak_table.trace_overwrites++;
7818 		}
7819 		mleak_table.trace_recorded++;
7820 		trace->allocs = 1;
7821 		memcpy(trace->addr, bt, (depth * sizeof(uintptr_t)));
7822 		trace->depth = depth;
7823 		trace->collisions = 0;
7824 	}
7825 
7826 	/* Step 2: Store the allocation record in the allocations array */
7827 	if (allocation->element != NULL) {
7828 		/*
7829 		 * Replace an existing allocation.  No need to preserve
7830 		 * because only a subset of the allocations are being
7831 		 * recorded anyway.
7832 		 */
7833 		mleak_table.alloc_collisions++;
7834 	} else if (allocation->trace_index != 0) {
7835 		mleak_table.alloc_overwrites++;
7836 	}
7837 	allocation->element = addr;
7838 	allocation->trace_index = trace_index;
7839 	allocation->count = num;
7840 	mleak_table.alloc_recorded++;
7841 	mleak_table.outstanding_allocs++;
7842 
7843 	lck_mtx_unlock(mleak_lock);
7844 	return TRUE;
7845 }
7846 
7847 static void
7848 mleak_free(mcache_obj_t *addr)
7849 {
7850 	while (addr != NULL) {
7851 		struct mallocation *allocation = &mleak_allocations
7852 		    [hashaddr((uintptr_t)addr, mleak_alloc_buckets)];
7853 
7854 		if (allocation->element == addr &&
7855 		    allocation->trace_index < mleak_trace_buckets) {
7856 			lck_mtx_lock_spin(mleak_lock);
7857 			if (allocation->element == addr &&
7858 			    allocation->trace_index < mleak_trace_buckets) {
7859 				struct mtrace *trace;
7860 				trace = &mleak_traces[allocation->trace_index];
7861 				/* allocs = 0 means trace bucket is unused */
7862 				if (trace->allocs > 0) {
7863 					trace->allocs--;
7864 				}
7865 				if (trace->allocs == 0) {
7866 					trace->depth = 0;
7867 				}
7868 				/* NULL element means alloc bucket is unused */
7869 				allocation->element = NULL;
7870 				mleak_table.outstanding_allocs--;
7871 			}
7872 			lck_mtx_unlock(mleak_lock);
7873 		}
7874 		addr = addr->obj_next;
7875 	}
7876 }
7877 
7878 static void
7879 mleak_sort_traces()
7880 {
7881 	int i, j, k;
7882 	struct mtrace *swap;
7883 
7884 	for (i = 0; i < MLEAK_NUM_TRACES; i++) {
7885 		mleak_top_trace[i] = NULL;
7886 	}
7887 
7888 	for (i = 0, j = 0; j < MLEAK_NUM_TRACES && i < mleak_trace_buckets; i++) {
7889 		if (mleak_traces[i].allocs <= 0) {
7890 			continue;
7891 		}
7892 
7893 		mleak_top_trace[j] = &mleak_traces[i];
7894 		for (k = j; k > 0; k--) {
7895 			if (mleak_top_trace[k]->allocs <=
7896 			    mleak_top_trace[k - 1]->allocs) {
7897 				break;
7898 			}
7899 
7900 			swap = mleak_top_trace[k - 1];
7901 			mleak_top_trace[k - 1] = mleak_top_trace[k];
7902 			mleak_top_trace[k] = swap;
7903 		}
7904 		j++;
7905 	}
7906 
7907 	j--;
7908 	for (; i < mleak_trace_buckets; i++) {
7909 		if (mleak_traces[i].allocs <= mleak_top_trace[j]->allocs) {
7910 			continue;
7911 		}
7912 
7913 		mleak_top_trace[j] = &mleak_traces[i];
7914 
7915 		for (k = j; k > 0; k--) {
7916 			if (mleak_top_trace[k]->allocs <=
7917 			    mleak_top_trace[k - 1]->allocs) {
7918 				break;
7919 			}
7920 
7921 			swap = mleak_top_trace[k - 1];
7922 			mleak_top_trace[k - 1] = mleak_top_trace[k];
7923 			mleak_top_trace[k] = swap;
7924 		}
7925 	}
7926 }
7927 
7928 static void
7929 mleak_update_stats()
7930 {
7931 	mleak_trace_stat_t *mltr;
7932 	int i;
7933 
7934 	VERIFY(mleak_stat != NULL);
7935 #ifdef __LP64__
7936 	VERIFY(mleak_stat->ml_isaddr64);
7937 #else
7938 	VERIFY(!mleak_stat->ml_isaddr64);
7939 #endif /* !__LP64__ */
7940 	VERIFY(mleak_stat->ml_cnt == MLEAK_NUM_TRACES);
7941 
7942 	mleak_sort_traces();
7943 
7944 	mltr = &mleak_stat->ml_trace[0];
7945 	bzero(mltr, sizeof(*mltr) * MLEAK_NUM_TRACES);
7946 	for (i = 0; i < MLEAK_NUM_TRACES; i++) {
7947 		int j;
7948 
7949 		if (mleak_top_trace[i] == NULL ||
7950 		    mleak_top_trace[i]->allocs == 0) {
7951 			continue;
7952 		}
7953 
7954 		mltr->mltr_collisions   = mleak_top_trace[i]->collisions;
7955 		mltr->mltr_hitcount     = mleak_top_trace[i]->hitcount;
7956 		mltr->mltr_allocs       = mleak_top_trace[i]->allocs;
7957 		mltr->mltr_depth        = mleak_top_trace[i]->depth;
7958 
7959 		VERIFY(mltr->mltr_depth <= MLEAK_STACK_DEPTH);
7960 		for (j = 0; j < mltr->mltr_depth; j++) {
7961 			mltr->mltr_addr[j] = mleak_top_trace[i]->addr[j];
7962 		}
7963 
7964 		mltr++;
7965 	}
7966 }
7967 
7968 static struct mbtypes {
7969 	int             mt_type;
7970 	const char      *mt_name;
7971 } mbtypes[] = {
7972 	{ MT_DATA, "data" },
7973 	{ MT_OOBDATA, "oob data" },
7974 	{ MT_CONTROL, "ancillary data" },
7975 	{ MT_HEADER, "packet headers" },
7976 	{ MT_SOCKET, "socket structures" },
7977 	{ MT_PCB, "protocol control blocks" },
7978 	{ MT_RTABLE, "routing table entries" },
7979 	{ MT_HTABLE, "IMP host table entries" },
7980 	{ MT_ATABLE, "address resolution tables" },
7981 	{ MT_FTABLE, "fragment reassembly queue headers" },
7982 	{ MT_SONAME, "socket names and addresses" },
7983 	{ MT_SOOPTS, "socket options" },
7984 	{ MT_RIGHTS, "access rights" },
7985 	{ MT_IFADDR, "interface addresses" },
7986 	{ MT_TAG, "packet tags" },
7987 	{ 0, NULL }
7988 };
7989 
7990 #define MBUF_DUMP_BUF_CHK() {   \
7991 	clen -= k;              \
7992 	if (clen < 1)           \
7993 	        goto done;      \
7994 	c += k;                 \
7995 }
7996 
7997 static char *
7998 mbuf_dump(void)
7999 {
8000 	unsigned long totmem = 0, totfree = 0, totmbufs, totused, totpct,
8001 	    totreturned = 0;
8002 	u_int32_t m_mbufs = 0, m_clfree = 0, m_bigclfree = 0;
8003 	u_int32_t m_mbufclfree = 0, m_mbufbigclfree = 0;
8004 	u_int32_t m_16kclusters = 0, m_16kclfree = 0, m_mbuf16kclfree = 0;
8005 	int nmbtypes = sizeof(mbstat.m_mtypes) / sizeof(short);
8006 	uint8_t seen[256];
8007 	struct mbtypes *mp;
8008 	mb_class_stat_t *sp;
8009 	mleak_trace_stat_t *mltr;
8010 	char *c = mbuf_dump_buf;
8011 	int i, j, k, clen = MBUF_DUMP_BUF_SIZE;
8012 	bool printed_banner = false;
8013 
8014 	mbuf_dump_buf[0] = '\0';
8015 
8016 	/* synchronize all statistics in the mbuf table */
8017 	mbuf_stat_sync();
8018 	mbuf_mtypes_sync(TRUE);
8019 
8020 	sp = &mb_stat->mbs_class[0];
8021 	for (i = 0; i < mb_stat->mbs_cnt; i++, sp++) {
8022 		u_int32_t mem;
8023 
8024 		if (m_class(i) == MC_MBUF) {
8025 			m_mbufs = sp->mbcl_active;
8026 		} else if (m_class(i) == MC_CL) {
8027 			m_clfree = sp->mbcl_total - sp->mbcl_active;
8028 		} else if (m_class(i) == MC_BIGCL) {
8029 			m_bigclfree = sp->mbcl_total - sp->mbcl_active;
8030 		} else if (njcl > 0 && m_class(i) == MC_16KCL) {
8031 			m_16kclfree = sp->mbcl_total - sp->mbcl_active;
8032 			m_16kclusters = sp->mbcl_total;
8033 		} else if (m_class(i) == MC_MBUF_CL) {
8034 			m_mbufclfree = sp->mbcl_total - sp->mbcl_active;
8035 		} else if (m_class(i) == MC_MBUF_BIGCL) {
8036 			m_mbufbigclfree = sp->mbcl_total - sp->mbcl_active;
8037 		} else if (njcl > 0 && m_class(i) == MC_MBUF_16KCL) {
8038 			m_mbuf16kclfree = sp->mbcl_total - sp->mbcl_active;
8039 		}
8040 
8041 		mem = sp->mbcl_ctotal * sp->mbcl_size;
8042 		totmem += mem;
8043 		totfree += (sp->mbcl_mc_cached + sp->mbcl_infree) *
8044 		    sp->mbcl_size;
8045 		totreturned += sp->mbcl_release_cnt;
8046 	}
8047 
8048 	/* adjust free counts to include composite caches */
8049 	m_clfree += m_mbufclfree;
8050 	m_bigclfree += m_mbufbigclfree;
8051 	m_16kclfree += m_mbuf16kclfree;
8052 
8053 	totmbufs = 0;
8054 	for (mp = mbtypes; mp->mt_name != NULL; mp++) {
8055 		totmbufs += mbstat.m_mtypes[mp->mt_type];
8056 	}
8057 	if (totmbufs > m_mbufs) {
8058 		totmbufs = m_mbufs;
8059 	}
8060 	k = scnprintf(c, clen, "%lu/%u mbufs in use:\n", totmbufs, m_mbufs);
8061 	MBUF_DUMP_BUF_CHK();
8062 
8063 	bzero(&seen, sizeof(seen));
8064 	for (mp = mbtypes; mp->mt_name != NULL; mp++) {
8065 		if (mbstat.m_mtypes[mp->mt_type] != 0) {
8066 			seen[mp->mt_type] = 1;
8067 			k = scnprintf(c, clen, "\t%u mbufs allocated to %s\n",
8068 			    mbstat.m_mtypes[mp->mt_type], mp->mt_name);
8069 			MBUF_DUMP_BUF_CHK();
8070 		}
8071 	}
8072 	seen[MT_FREE] = 1;
8073 	for (i = 0; i < nmbtypes; i++) {
8074 		if (!seen[i] && mbstat.m_mtypes[i] != 0) {
8075 			k = scnprintf(c, clen, "\t%u mbufs allocated to "
8076 			    "<mbuf type %d>\n", mbstat.m_mtypes[i], i);
8077 			MBUF_DUMP_BUF_CHK();
8078 		}
8079 	}
8080 	if ((m_mbufs - totmbufs) > 0) {
8081 		k = scnprintf(c, clen, "\t%lu mbufs allocated to caches\n",
8082 		    m_mbufs - totmbufs);
8083 		MBUF_DUMP_BUF_CHK();
8084 	}
8085 	k = scnprintf(c, clen, "%u/%u mbuf 2KB clusters in use\n"
8086 	    "%u/%u mbuf 4KB clusters in use\n",
8087 	    (unsigned int)(mbstat.m_clusters - m_clfree),
8088 	    (unsigned int)mbstat.m_clusters,
8089 	    (unsigned int)(mbstat.m_bigclusters - m_bigclfree),
8090 	    (unsigned int)mbstat.m_bigclusters);
8091 	MBUF_DUMP_BUF_CHK();
8092 
8093 	if (njcl > 0) {
8094 		k = scnprintf(c, clen, "%u/%u mbuf %uKB clusters in use\n",
8095 		    m_16kclusters - m_16kclfree, m_16kclusters,
8096 		    njclbytes / 1024);
8097 		MBUF_DUMP_BUF_CHK();
8098 	}
8099 	totused = totmem - totfree;
8100 	if (totmem == 0) {
8101 		totpct = 0;
8102 	} else if (totused < (ULONG_MAX / 100)) {
8103 		totpct = (totused * 100) / totmem;
8104 	} else {
8105 		u_long totmem1 = totmem / 100;
8106 		u_long totused1 = totused / 100;
8107 		totpct = (totused1 * 100) / totmem1;
8108 	}
8109 	k = scnprintf(c, clen, "%lu KB allocated to network (approx. %lu%% "
8110 	    "in use)\n", totmem / 1024, totpct);
8111 	MBUF_DUMP_BUF_CHK();
8112 	k = scnprintf(c, clen, "%lu KB returned to the system\n",
8113 	    totreturned / 1024);
8114 	MBUF_DUMP_BUF_CHK();
8115 
8116 	net_update_uptime();
8117 	k = scnprintf(c, clen,
8118 	    "VM allocation failures: contiguous %u, normal %u, one page %u\n",
8119 	    mb_kmem_contig_failed, mb_kmem_failed, mb_kmem_one_failed);
8120 	MBUF_DUMP_BUF_CHK();
8121 	if (mb_kmem_contig_failed_ts || mb_kmem_failed_ts ||
8122 	    mb_kmem_one_failed_ts) {
8123 		k = scnprintf(c, clen,
8124 		    "VM allocation failure timestamps: contiguous %llu "
8125 		    "(size %llu), normal %llu (size %llu), one page %llu "
8126 		    "(now %llu)\n",
8127 		    mb_kmem_contig_failed_ts, mb_kmem_contig_failed_size,
8128 		    mb_kmem_failed_ts, mb_kmem_failed_size,
8129 		    mb_kmem_one_failed_ts, net_uptime());
8130 		MBUF_DUMP_BUF_CHK();
8131 		k = scnprintf(c, clen,
8132 		    "VM return codes: ");
8133 		MBUF_DUMP_BUF_CHK();
8134 		for (i = 0;
8135 		    i < sizeof(mb_kmem_stats) / sizeof(mb_kmem_stats[0]);
8136 		    i++) {
8137 			k = scnprintf(c, clen, "%s: %u ", mb_kmem_stats_labels[i],
8138 			    mb_kmem_stats[i]);
8139 			MBUF_DUMP_BUF_CHK();
8140 		}
8141 		k = scnprintf(c, clen, "\n");
8142 		MBUF_DUMP_BUF_CHK();
8143 	}
8144 	k = scnprintf(c, clen,
8145 	    "worker thread runs: %u, expansions: %llu, cl %llu/%llu, "
8146 	    "bigcl %llu/%llu, 16k %llu/%llu\n", mbuf_worker_run_cnt,
8147 	    mb_expand_cnt, mb_expand_cl_cnt, mb_expand_cl_total,
8148 	    mb_expand_bigcl_cnt, mb_expand_bigcl_total, mb_expand_16kcl_cnt,
8149 	    mb_expand_16kcl_total);
8150 	MBUF_DUMP_BUF_CHK();
8151 	if (mbuf_worker_last_runtime != 0) {
8152 		k = scnprintf(c, clen, "worker thread last run time: "
8153 		    "%llu (%llu seconds ago)\n",
8154 		    mbuf_worker_last_runtime,
8155 		    net_uptime() - mbuf_worker_last_runtime);
8156 		MBUF_DUMP_BUF_CHK();
8157 	}
8158 	if (mbuf_drain_last_runtime != 0) {
8159 		k = scnprintf(c, clen, "drain routine last run time: "
8160 		    "%llu (%llu seconds ago)\n",
8161 		    mbuf_drain_last_runtime,
8162 		    net_uptime() - mbuf_drain_last_runtime);
8163 		MBUF_DUMP_BUF_CHK();
8164 	}
8165 
8166 #if DEBUG || DEVELOPMENT
8167 	k = scnprintf(c, clen, "\nworker thread log:\n%s\n", mbwdog_logging);
8168 	MBUF_DUMP_BUF_CHK();
8169 #endif
8170 
8171 	for (j = 0; j < MTRACELARGE_NUM_TRACES; j++) {
8172 		struct mtracelarge *trace = &mtracelarge_table[j];
8173 		if (trace->size == 0 || trace->depth == 0) {
8174 			continue;
8175 		}
8176 		if (printed_banner == false) {
8177 			k = scnprintf(c, clen,
8178 			    "\nlargest allocation failure backtraces:\n");
8179 			MBUF_DUMP_BUF_CHK();
8180 			printed_banner = true;
8181 		}
8182 		k = scnprintf(c, clen, "size %llu: < ", trace->size);
8183 		MBUF_DUMP_BUF_CHK();
8184 		for (i = 0; i < trace->depth; i++) {
8185 			if (mleak_stat->ml_isaddr64) {
8186 				k = scnprintf(c, clen, "0x%0llx ",
8187 				    (uint64_t)VM_KERNEL_UNSLIDE(
8188 					    trace->addr[i]));
8189 			} else {
8190 				k = scnprintf(c, clen,
8191 				    "0x%08x ",
8192 				    (uint32_t)VM_KERNEL_UNSLIDE(
8193 					    trace->addr[i]));
8194 			}
8195 			MBUF_DUMP_BUF_CHK();
8196 		}
8197 		k = scnprintf(c, clen, ">\n");
8198 		MBUF_DUMP_BUF_CHK();
8199 	}
8200 
8201 	/* mbuf leak detection statistics */
8202 	mleak_update_stats();
8203 
8204 	k = scnprintf(c, clen, "\nmbuf leak detection table:\n");
8205 	MBUF_DUMP_BUF_CHK();
8206 	k = scnprintf(c, clen, "\ttotal captured: %u (one per %u)\n",
8207 	    mleak_table.mleak_capture / mleak_table.mleak_sample_factor,
8208 	    mleak_table.mleak_sample_factor);
8209 	MBUF_DUMP_BUF_CHK();
8210 	k = scnprintf(c, clen, "\ttotal allocs outstanding: %llu\n",
8211 	    mleak_table.outstanding_allocs);
8212 	MBUF_DUMP_BUF_CHK();
8213 	k = scnprintf(c, clen, "\tnew hash recorded: %llu allocs, %llu traces\n",
8214 	    mleak_table.alloc_recorded, mleak_table.trace_recorded);
8215 	MBUF_DUMP_BUF_CHK();
8216 	k = scnprintf(c, clen, "\thash collisions: %llu allocs, %llu traces\n",
8217 	    mleak_table.alloc_collisions, mleak_table.trace_collisions);
8218 	MBUF_DUMP_BUF_CHK();
8219 	k = scnprintf(c, clen, "\toverwrites: %llu allocs, %llu traces\n",
8220 	    mleak_table.alloc_overwrites, mleak_table.trace_overwrites);
8221 	MBUF_DUMP_BUF_CHK();
8222 	k = scnprintf(c, clen, "\tlock conflicts: %llu\n\n",
8223 	    mleak_table.total_conflicts);
8224 	MBUF_DUMP_BUF_CHK();
8225 
8226 	k = scnprintf(c, clen, "top %d outstanding traces:\n",
8227 	    mleak_stat->ml_cnt);
8228 	MBUF_DUMP_BUF_CHK();
8229 	for (i = 0; i < mleak_stat->ml_cnt; i++) {
8230 		mltr = &mleak_stat->ml_trace[i];
8231 		k = scnprintf(c, clen, "[%d] %llu outstanding alloc(s), "
8232 		    "%llu hit(s), %llu collision(s)\n", (i + 1),
8233 		    mltr->mltr_allocs, mltr->mltr_hitcount,
8234 		    mltr->mltr_collisions);
8235 		MBUF_DUMP_BUF_CHK();
8236 	}
8237 
8238 	if (mleak_stat->ml_isaddr64) {
8239 		k = scnprintf(c, clen, MB_LEAK_HDR_64);
8240 	} else {
8241 		k = scnprintf(c, clen, MB_LEAK_HDR_32);
8242 	}
8243 	MBUF_DUMP_BUF_CHK();
8244 
8245 	for (i = 0; i < MLEAK_STACK_DEPTH; i++) {
8246 		k = scnprintf(c, clen, "%2d: ", (i + 1));
8247 		MBUF_DUMP_BUF_CHK();
8248 		for (j = 0; j < mleak_stat->ml_cnt; j++) {
8249 			mltr = &mleak_stat->ml_trace[j];
8250 			if (i < mltr->mltr_depth) {
8251 				if (mleak_stat->ml_isaddr64) {
8252 					k = scnprintf(c, clen, "0x%0llx  ",
8253 					    (uint64_t)VM_KERNEL_UNSLIDE(
8254 						    mltr->mltr_addr[i]));
8255 				} else {
8256 					k = scnprintf(c, clen,
8257 					    "0x%08x  ",
8258 					    (uint32_t)VM_KERNEL_UNSLIDE(
8259 						    mltr->mltr_addr[i]));
8260 				}
8261 			} else {
8262 				if (mleak_stat->ml_isaddr64) {
8263 					k = scnprintf(c, clen,
8264 					    MB_LEAK_SPACING_64);
8265 				} else {
8266 					k = scnprintf(c, clen,
8267 					    MB_LEAK_SPACING_32);
8268 				}
8269 			}
8270 			MBUF_DUMP_BUF_CHK();
8271 		}
8272 		k = scnprintf(c, clen, "\n");
8273 		MBUF_DUMP_BUF_CHK();
8274 	}
8275 done:
8276 	return mbuf_dump_buf;
8277 }
8278 
8279 #undef MBUF_DUMP_BUF_CHK
8280 
8281 /*
8282  * Convert between a regular and a packet header mbuf.  Caller is responsible
8283  * for setting or clearing M_PKTHDR; this routine does the rest of the work.
8284  */
8285 int
8286 m_reinit(struct mbuf *m, int hdr)
8287 {
8288 	int ret = 0;
8289 
8290 	if (hdr) {
8291 		VERIFY(!(m->m_flags & M_PKTHDR));
8292 		if (!(m->m_flags & M_EXT) &&
8293 		    (m->m_data != m->m_dat || m->m_len > 0)) {
8294 			/*
8295 			 * If there's no external cluster attached and the
8296 			 * mbuf appears to contain user data, we cannot
8297 			 * safely convert this to a packet header mbuf,
8298 			 * as the packet header structure might overlap
8299 			 * with the data.
8300 			 */
8301 			printf("%s: cannot set M_PKTHDR on altered mbuf %llx, "
8302 			    "m_data %llx (expected %llx), "
8303 			    "m_len %d (expected 0)\n",
8304 			    __func__,
8305 			    (uint64_t)VM_KERNEL_ADDRPERM((uintptr_t)m),
8306 			    (uint64_t)VM_KERNEL_ADDRPERM((uintptr_t)m->m_data),
8307 			    (uint64_t)VM_KERNEL_ADDRPERM((uintptr_t)(m->m_dat)), m->m_len);
8308 			ret = EBUSY;
8309 		} else {
8310 			VERIFY((m->m_flags & M_EXT) || m->m_data == m->m_dat);
8311 			m->m_flags |= M_PKTHDR;
8312 			MBUF_INIT_PKTHDR(m);
8313 		}
8314 	} else {
8315 		/* Check for scratch area overflow */
8316 		m_redzone_verify(m);
8317 		/* Free the aux data and tags if there is any */
8318 		m_tag_delete_chain(m, NULL);
8319 		m->m_flags &= ~M_PKTHDR;
8320 	}
8321 
8322 	return ret;
8323 }
8324 
8325 int
8326 m_ext_set_prop(struct mbuf *m, uint32_t o, uint32_t n)
8327 {
8328 	ASSERT(m->m_flags & M_EXT);
8329 	return atomic_test_set_32(&MEXT_PRIV(m), o, n);
8330 }
8331 
8332 uint32_t
8333 m_ext_get_prop(struct mbuf *m)
8334 {
8335 	ASSERT(m->m_flags & M_EXT);
8336 	return MEXT_PRIV(m);
8337 }
8338 
8339 int
8340 m_ext_paired_is_active(struct mbuf *m)
8341 {
8342 	return MBUF_IS_PAIRED(m) ? (MEXT_PREF(m) > MEXT_MINREF(m)) : 1;
8343 }
8344 
8345 void
8346 m_ext_paired_activate(struct mbuf *m)
8347 {
8348 	struct ext_ref *rfa;
8349 	int hdr, type;
8350 	caddr_t extbuf;
8351 	m_ext_free_func_t extfree;
8352 	u_int extsize;
8353 
8354 	VERIFY(MBUF_IS_PAIRED(m));
8355 	VERIFY(MEXT_REF(m) == MEXT_MINREF(m));
8356 	VERIFY(MEXT_PREF(m) == MEXT_MINREF(m));
8357 
8358 	hdr = (m->m_flags & M_PKTHDR);
8359 	type = m->m_type;
8360 	extbuf = m->m_ext.ext_buf;
8361 	extfree = m_get_ext_free(m);
8362 	extsize = m->m_ext.ext_size;
8363 	rfa = m_get_rfa(m);
8364 
8365 	VERIFY(extbuf != NULL && rfa != NULL);
8366 
8367 	/*
8368 	 * Safe to reinitialize packet header tags, since it's
8369 	 * already taken care of at m_free() time.  Similar to
8370 	 * what's done in m_clattach() for the cluster.  Bump
8371 	 * up MEXT_PREF to indicate activation.
8372 	 */
8373 	MBUF_INIT(m, hdr, type);
8374 	MEXT_INIT(m, extbuf, extsize, extfree, (caddr_t)m, rfa,
8375 	    1, 1, 2, EXTF_PAIRED, MEXT_PRIV(m), m);
8376 }
8377 
8378 void
8379 m_scratch_init(struct mbuf *m)
8380 {
8381 	struct pkthdr *pkt = &m->m_pkthdr;
8382 
8383 	VERIFY(m->m_flags & M_PKTHDR);
8384 
8385 	/* See comments in <rdar://problem/14040693> */
8386 	if (pkt->pkt_flags & PKTF_PRIV_GUARDED) {
8387 		panic_plain("Invalid attempt to modify guarded module-private "
8388 		    "area: mbuf %p, pkt_flags 0x%x\n", m, pkt->pkt_flags);
8389 		/* NOTREACHED */
8390 	}
8391 
8392 	bzero(&pkt->pkt_mpriv, sizeof(pkt->pkt_mpriv));
8393 }
8394 
8395 /*
8396  * This routine is reserved for mbuf_get_driver_scratch(); clients inside
8397  * xnu that intend on utilizing the module-private area should directly
8398  * refer to the pkt_mpriv structure in the pkthdr.  They are also expected
8399  * to set and clear PKTF_PRIV_GUARDED, while owning the packet and prior
8400  * to handing it off to another module, respectively.
8401  */
8402 u_int32_t
8403 m_scratch_get(struct mbuf *m, u_int8_t **p)
8404 {
8405 	struct pkthdr *pkt = &m->m_pkthdr;
8406 
8407 	VERIFY(m->m_flags & M_PKTHDR);
8408 
8409 	/* See comments in <rdar://problem/14040693> */
8410 	if (pkt->pkt_flags & PKTF_PRIV_GUARDED) {
8411 		panic_plain("Invalid attempt to access guarded module-private "
8412 		    "area: mbuf %p, pkt_flags 0x%x\n", m, pkt->pkt_flags);
8413 		/* NOTREACHED */
8414 	}
8415 
8416 	if (mcltrace) {
8417 		mcache_audit_t *mca;
8418 
8419 		lck_mtx_lock(mbuf_mlock);
8420 		mca = mcl_audit_buf2mca(MC_MBUF, (mcache_obj_t *)m);
8421 		if (mca->mca_uflags & MB_SCVALID) {
8422 			mcl_audit_scratch(mca);
8423 		}
8424 		lck_mtx_unlock(mbuf_mlock);
8425 	}
8426 
8427 	*p = (u_int8_t *)&pkt->pkt_mpriv;
8428 	return sizeof(pkt->pkt_mpriv);
8429 }
8430 
8431 void
8432 m_add_crumb(struct mbuf *m, uint16_t crumb)
8433 {
8434 	VERIFY(m->m_flags & M_PKTHDR);
8435 
8436 	m->m_pkthdr.pkt_crumbs |= crumb;
8437 }
8438 
8439 static void
8440 m_redzone_init(struct mbuf *m)
8441 {
8442 	VERIFY(m->m_flags & M_PKTHDR);
8443 	/*
8444 	 * Each mbuf has a unique red zone pattern, which is a XOR
8445 	 * of the red zone cookie and the address of the mbuf.
8446 	 */
8447 	m->m_pkthdr.redzone = ((u_int32_t)(uintptr_t)m) ^ mb_redzone_cookie;
8448 }
8449 
8450 static void
8451 m_redzone_verify(struct mbuf *m)
8452 {
8453 	u_int32_t mb_redzone;
8454 
8455 	VERIFY(m->m_flags & M_PKTHDR);
8456 
8457 	mb_redzone = ((u_int32_t)(uintptr_t)m) ^ mb_redzone_cookie;
8458 	if (m->m_pkthdr.redzone != mb_redzone) {
8459 		panic("mbuf %p redzone violation with value 0x%x "
8460 		    "(instead of 0x%x, using cookie 0x%x)\n",
8461 		    m, m->m_pkthdr.redzone, mb_redzone, mb_redzone_cookie);
8462 		/* NOTREACHED */
8463 	}
8464 }
8465 
8466 __private_extern__ inline void
8467 m_set_ext(struct mbuf *m, struct ext_ref *rfa, m_ext_free_func_t ext_free,
8468     caddr_t ext_arg)
8469 {
8470 	VERIFY(m->m_flags & M_EXT);
8471 	if (rfa != NULL) {
8472 		m->m_ext.ext_refflags =
8473 		    (struct ext_ref *)(((uintptr_t)rfa) ^ mb_obscure_extref);
8474 		if (ext_free != NULL) {
8475 			rfa->ext_token = ((uintptr_t)&rfa->ext_token) ^
8476 			    mb_obscure_extfree;
8477 			uintptr_t ext_free_val = ptrauth_nop_cast(uintptr_t, ext_free) ^ rfa->ext_token;
8478 			m->m_ext.ext_free = ptrauth_nop_cast(m_ext_free_func_t, ext_free_val);
8479 			if (ext_arg != NULL) {
8480 				m->m_ext.ext_arg =
8481 				    (caddr_t)(((uintptr_t)ext_arg) ^ rfa->ext_token);
8482 			} else {
8483 				m->m_ext.ext_arg = NULL;
8484 			}
8485 		} else {
8486 			rfa->ext_token = 0;
8487 			m->m_ext.ext_free = NULL;
8488 			m->m_ext.ext_arg = NULL;
8489 		}
8490 	} else {
8491 		/*
8492 		 * If we are going to loose the cookie in ext_token by
8493 		 * resetting the rfa, we should use the global cookie
8494 		 * to obscure the ext_free and ext_arg pointers.
8495 		 */
8496 		if (ext_free != NULL) {
8497 			uintptr_t ext_free_val = ptrauth_nop_cast(uintptr_t, ext_free) ^ mb_obscure_extfree;
8498 			m->m_ext.ext_free = ptrauth_nop_cast(m_ext_free_func_t, ext_free_val);
8499 			if (ext_arg != NULL) {
8500 				m->m_ext.ext_arg =
8501 				    (caddr_t)((uintptr_t)ext_arg ^
8502 				    mb_obscure_extfree);
8503 			} else {
8504 				m->m_ext.ext_arg = NULL;
8505 			}
8506 		} else {
8507 			m->m_ext.ext_free = NULL;
8508 			m->m_ext.ext_arg = NULL;
8509 		}
8510 		m->m_ext.ext_refflags = NULL;
8511 	}
8512 }
8513 
8514 __private_extern__ inline struct ext_ref *
8515 m_get_rfa(struct mbuf *m)
8516 {
8517 	if (m->m_ext.ext_refflags == NULL) {
8518 		return NULL;
8519 	} else {
8520 		return (struct ext_ref *)(((uintptr_t)m->m_ext.ext_refflags) ^ mb_obscure_extref);
8521 	}
8522 }
8523 
8524 __private_extern__ inline m_ext_free_func_t
8525 m_get_ext_free(struct mbuf *m)
8526 {
8527 	struct ext_ref *rfa;
8528 	if (m->m_ext.ext_free == NULL) {
8529 		return NULL;
8530 	}
8531 
8532 	rfa = m_get_rfa(m);
8533 	if (rfa == NULL) {
8534 		uintptr_t ext_free_val = ptrauth_nop_cast(uintptr_t, m->m_ext.ext_free) ^ mb_obscure_extfree;
8535 		return ptrauth_nop_cast(m_ext_free_func_t, ext_free_val);
8536 	} else {
8537 		uintptr_t ext_free_val = ptrauth_nop_cast(uintptr_t, m->m_ext.ext_free) ^ rfa->ext_token;
8538 		return ptrauth_nop_cast(m_ext_free_func_t, ext_free_val);
8539 	}
8540 }
8541 
8542 __private_extern__ inline caddr_t
8543 m_get_ext_arg(struct mbuf *m)
8544 {
8545 	struct ext_ref *rfa;
8546 	if (m->m_ext.ext_arg == NULL) {
8547 		return NULL;
8548 	}
8549 
8550 	rfa = m_get_rfa(m);
8551 	if (rfa == NULL) {
8552 		return (caddr_t)((uintptr_t)m->m_ext.ext_arg ^ mb_obscure_extfree);
8553 	} else {
8554 		return (caddr_t)(((uintptr_t)m->m_ext.ext_arg) ^
8555 		       rfa->ext_token);
8556 	}
8557 }
8558 
8559 /*
8560  * Send a report of mbuf usage if the usage is at least 6% of max limit
8561  * or if there has been at least 3% increase since the last report.
8562  *
8563  * The values 6% and 3% are chosen so that we can do simple arithmetic
8564  * with shift operations.
8565  */
8566 static boolean_t
8567 mbuf_report_usage(mbuf_class_t cl)
8568 {
8569 	/* if a report is already in progress, nothing to do */
8570 	if (mb_peak_newreport) {
8571 		return TRUE;
8572 	}
8573 
8574 	if (m_total(cl) > m_peak(cl) &&
8575 	    m_total(cl) >= (m_maxlimit(cl) >> 4) &&
8576 	    (m_total(cl) - m_peak(cl)) >= (m_peak(cl) >> 5)) {
8577 		return TRUE;
8578 	}
8579 	return FALSE;
8580 }
8581 
8582 __private_extern__ void
8583 mbuf_report_peak_usage(void)
8584 {
8585 	int i = 0;
8586 	u_int64_t uptime;
8587 	struct nstat_sysinfo_data ns_data;
8588 	uint32_t memreleased = 0;
8589 	static uint32_t prevmemreleased;
8590 
8591 	uptime = net_uptime();
8592 	lck_mtx_lock(mbuf_mlock);
8593 
8594 	/* Generate an initial report after 1 week of uptime */
8595 	if (!mb_peak_firstreport &&
8596 	    uptime > MBUF_PEAK_FIRST_REPORT_THRESHOLD) {
8597 		mb_peak_newreport = TRUE;
8598 		mb_peak_firstreport = TRUE;
8599 	}
8600 
8601 	if (!mb_peak_newreport) {
8602 		lck_mtx_unlock(mbuf_mlock);
8603 		return;
8604 	}
8605 
8606 	/*
8607 	 * Since a report is being generated before 1 week,
8608 	 * we do not need to force another one later
8609 	 */
8610 	if (uptime < MBUF_PEAK_FIRST_REPORT_THRESHOLD) {
8611 		mb_peak_firstreport = TRUE;
8612 	}
8613 
8614 	for (i = 0; i < NELEM(mbuf_table); i++) {
8615 		m_peak(m_class(i)) = m_total(m_class(i));
8616 		memreleased += m_release_cnt(i);
8617 	}
8618 	memreleased = memreleased - prevmemreleased;
8619 	prevmemreleased = memreleased;
8620 	mb_peak_newreport = FALSE;
8621 	lck_mtx_unlock(mbuf_mlock);
8622 
8623 	bzero(&ns_data, sizeof(ns_data));
8624 	ns_data.flags = NSTAT_SYSINFO_MBUF_STATS;
8625 	ns_data.u.mb_stats.total_256b = m_peak(MC_MBUF);
8626 	ns_data.u.mb_stats.total_2kb = m_peak(MC_CL);
8627 	ns_data.u.mb_stats.total_4kb = m_peak(MC_BIGCL);
8628 	ns_data.u.mb_stats.total_16kb = m_peak(MC_16KCL);
8629 	ns_data.u.mb_stats.sbmb_total = total_sbmb_cnt_peak;
8630 	ns_data.u.mb_stats.sb_atmbuflimit = sbmb_limreached;
8631 	ns_data.u.mb_stats.draincnt = mbstat.m_drain;
8632 	ns_data.u.mb_stats.memreleased = memreleased;
8633 	ns_data.u.mb_stats.sbmb_floor = total_sbmb_cnt_floor;
8634 
8635 	nstat_sysinfo_send_data(&ns_data);
8636 
8637 	/*
8638 	 * Reset the floor whenever we report a new
8639 	 * peak to track the trend (increase peek usage
8640 	 * is not a leak if mbufs get released
8641 	 * between reports and the floor stays low)
8642 	 */
8643 	total_sbmb_cnt_floor = total_sbmb_cnt_peak;
8644 }
8645 
8646 /*
8647  * Simple routine to avoid taking the lock when we can't run the
8648  * mbuf drain.
8649  */
8650 static int
8651 mbuf_drain_checks(boolean_t ignore_waiters)
8652 {
8653 	if (mb_drain_maxint == 0) {
8654 		return 0;
8655 	}
8656 	if (!ignore_waiters && mb_waiters != 0) {
8657 		return 0;
8658 	}
8659 
8660 	return 1;
8661 }
8662 
8663 /*
8664  * Called by the VM when there's memory pressure or when we exhausted
8665  * the 4k/16k reserved space.
8666  */
8667 static void
8668 mbuf_drain_locked(boolean_t ignore_waiters)
8669 {
8670 	mbuf_class_t mc;
8671 	mcl_slab_t *sp, *sp_tmp, *nsp;
8672 	unsigned int num, k, interval, released = 0;
8673 	unsigned long total_mem = 0, use_mem = 0;
8674 	boolean_t ret, purge_caches = FALSE;
8675 	ppnum_t offset;
8676 	mcache_obj_t *obj;
8677 	unsigned long per;
8678 	static unsigned char scratch[32];
8679 	static ppnum_t scratch_pa = 0;
8680 
8681 	LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
8682 	if (!mbuf_drain_checks(ignore_waiters)) {
8683 		return;
8684 	}
8685 	if (scratch_pa == 0) {
8686 		bzero(scratch, sizeof(scratch));
8687 		scratch_pa = pmap_find_phys(kernel_pmap, (addr64_t)scratch);
8688 		VERIFY(scratch_pa);
8689 	} else if (mclverify) {
8690 		/*
8691 		 * Panic if a driver wrote to our scratch memory.
8692 		 */
8693 		for (k = 0; k < sizeof(scratch); k++) {
8694 			if (scratch[k]) {
8695 				panic("suspect DMA to freed address");
8696 			}
8697 		}
8698 	}
8699 	/*
8700 	 * Don't free memory too often as that could cause excessive
8701 	 * waiting times for mbufs.  Purge caches if we were asked to drain
8702 	 * in the last 5 minutes.
8703 	 */
8704 	if (mbuf_drain_last_runtime != 0) {
8705 		interval = net_uptime() - mbuf_drain_last_runtime;
8706 		if (interval <= mb_drain_maxint) {
8707 			return;
8708 		}
8709 		if (interval <= mb_drain_maxint * 5) {
8710 			purge_caches = TRUE;
8711 		}
8712 	}
8713 	mbuf_drain_last_runtime = net_uptime();
8714 	/*
8715 	 * Don't free any memory if we're using 60% or more.
8716 	 */
8717 	for (mc = 0; mc < NELEM(mbuf_table); mc++) {
8718 		total_mem += m_total(mc) * m_maxsize(mc);
8719 		use_mem += m_active(mc) * m_maxsize(mc);
8720 	}
8721 	per = (use_mem * 100) / total_mem;
8722 	if (per >= 60) {
8723 		return;
8724 	}
8725 	/*
8726 	 * Purge all the caches.  This effectively disables
8727 	 * caching for a few seconds, but the mbuf worker thread will
8728 	 * re-enable them again.
8729 	 */
8730 	if (purge_caches == TRUE) {
8731 		for (mc = 0; mc < NELEM(mbuf_table); mc++) {
8732 			if (m_total(mc) < m_avgtotal(mc)) {
8733 				continue;
8734 			}
8735 			lck_mtx_unlock(mbuf_mlock);
8736 			ret = mcache_purge_cache(m_cache(mc), FALSE);
8737 			lck_mtx_lock(mbuf_mlock);
8738 			if (ret == TRUE) {
8739 				m_purge_cnt(mc)++;
8740 			}
8741 		}
8742 	}
8743 	/*
8744 	 * Move the objects from the composite class freelist to
8745 	 * the rudimentary slabs list, but keep at least 10% of the average
8746 	 * total in the freelist.
8747 	 */
8748 	for (mc = 0; mc < NELEM(mbuf_table); mc++) {
8749 		while (m_cobjlist(mc) &&
8750 		    m_total(mc) < m_avgtotal(mc) &&
8751 		    m_infree(mc) > 0.1 * m_avgtotal(mc) + m_minlimit(mc)) {
8752 			obj = m_cobjlist(mc);
8753 			m_cobjlist(mc) = obj->obj_next;
8754 			obj->obj_next = NULL;
8755 			num = cslab_free(mc, obj, 1);
8756 			VERIFY(num == 1);
8757 			m_free_cnt(mc)++;
8758 			m_infree(mc)--;
8759 			/* cslab_free() handles m_total */
8760 		}
8761 	}
8762 	/*
8763 	 * Free the buffers present in the slab list up to 10% of the total
8764 	 * average per class.
8765 	 *
8766 	 * We walk the list backwards in an attempt to reduce fragmentation.
8767 	 */
8768 	for (mc = NELEM(mbuf_table) - 1; (int)mc >= 0; mc--) {
8769 		TAILQ_FOREACH_SAFE(sp, &m_slablist(mc), sl_link, sp_tmp) {
8770 			/*
8771 			 * Process only unused slabs occupying memory.
8772 			 */
8773 			if (sp->sl_refcnt != 0 || sp->sl_len == 0 ||
8774 			    sp->sl_base == NULL) {
8775 				continue;
8776 			}
8777 			if (m_total(mc) < m_avgtotal(mc) ||
8778 			    m_infree(mc) < 0.1 * m_avgtotal(mc) + m_minlimit(mc)) {
8779 				break;
8780 			}
8781 			slab_remove(sp, mc);
8782 			switch (mc) {
8783 			case MC_MBUF:
8784 				m_infree(mc) -= NMBPG;
8785 				m_total(mc) -= NMBPG;
8786 				if (mclaudit != NULL) {
8787 					mcl_audit_free(sp->sl_base, NMBPG);
8788 				}
8789 				break;
8790 			case MC_CL:
8791 				m_infree(mc) -= NCLPG;
8792 				m_total(mc) -= NCLPG;
8793 				if (mclaudit != NULL) {
8794 					mcl_audit_free(sp->sl_base, NMBPG);
8795 				}
8796 				break;
8797 			case MC_BIGCL:
8798 			{
8799 				m_infree(mc) -= NBCLPG;
8800 				m_total(mc) -= NBCLPG;
8801 				if (mclaudit != NULL) {
8802 					mcl_audit_free(sp->sl_base, NMBPG);
8803 				}
8804 				break;
8805 			}
8806 			case MC_16KCL:
8807 				m_infree(mc)--;
8808 				m_total(mc)--;
8809 				for (nsp = sp, k = 1; k < NSLABSP16KB; k++) {
8810 					nsp = nsp->sl_next;
8811 					VERIFY(nsp->sl_refcnt == 0 &&
8812 					    nsp->sl_base != NULL &&
8813 					    nsp->sl_len == 0);
8814 					slab_init(nsp, 0, 0, NULL, NULL, 0, 0,
8815 					    0);
8816 					nsp->sl_flags = 0;
8817 				}
8818 				if (mclaudit != NULL) {
8819 					if (sp->sl_len == PAGE_SIZE) {
8820 						mcl_audit_free(sp->sl_base,
8821 						    NMBPG);
8822 					} else {
8823 						mcl_audit_free(sp->sl_base, 1);
8824 					}
8825 				}
8826 				break;
8827 			default:
8828 				/*
8829 				 * The composite classes have their own
8830 				 * freelist (m_cobjlist), so we only
8831 				 * process rudimentary classes here.
8832 				 */
8833 				VERIFY(0);
8834 			}
8835 			m_release_cnt(mc) += m_size(mc);
8836 			released += m_size(mc);
8837 			VERIFY(sp->sl_base != NULL &&
8838 			    sp->sl_len >= PAGE_SIZE);
8839 			offset = MTOPG(sp->sl_base);
8840 			/*
8841 			 * Make sure the IOMapper points to a valid, but
8842 			 * bogus, address.  This should prevent further DMA
8843 			 * accesses to freed memory.
8844 			 */
8845 			IOMapperInsertPage(mcl_paddr_base, offset, scratch_pa);
8846 			mcl_paddr[offset] = 0;
8847 			kmem_free(mb_map, (vm_offset_t)sp->sl_base,
8848 			    sp->sl_len);
8849 			slab_init(sp, 0, 0, NULL, NULL, 0, 0, 0);
8850 			sp->sl_flags = 0;
8851 		}
8852 	}
8853 	mbstat.m_drain++;
8854 	mbstat.m_bigclusters = m_total(MC_BIGCL);
8855 	mbstat.m_clusters = m_total(MC_CL);
8856 	mbstat.m_mbufs = m_total(MC_MBUF);
8857 	mbuf_stat_sync();
8858 	mbuf_mtypes_sync(TRUE);
8859 }
8860 
8861 __private_extern__ void
8862 mbuf_drain(boolean_t ignore_waiters)
8863 {
8864 	LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_NOTOWNED);
8865 	if (!mbuf_drain_checks(ignore_waiters)) {
8866 		return;
8867 	}
8868 	lck_mtx_lock(mbuf_mlock);
8869 	mbuf_drain_locked(ignore_waiters);
8870 	lck_mtx_unlock(mbuf_mlock);
8871 }
8872 
8873 
8874 static int
8875 m_drain_force_sysctl SYSCTL_HANDLER_ARGS
8876 {
8877 #pragma unused(arg1, arg2)
8878 	int val = 0, err;
8879 
8880 	err = sysctl_handle_int(oidp, &val, 0, req);
8881 	if (err != 0 || req->newptr == USER_ADDR_NULL) {
8882 		return err;
8883 	}
8884 	if (val) {
8885 		mbuf_drain(TRUE);
8886 	}
8887 
8888 	return err;
8889 }
8890 
8891 #if DEBUG || DEVELOPMENT
8892 __printflike(3, 4)
8893 static void
8894 _mbwdog_logger(const char *func, const int line, const char *fmt, ...)
8895 {
8896 	va_list ap;
8897 	struct timeval now;
8898 	char str[384], p[256];
8899 	int len;
8900 
8901 	LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
8902 	if (mbwdog_logging == NULL) {
8903 		/*
8904 		 * This might block under a mutex, which isn't really great,
8905 		 * but this happens once, so we'll live.
8906 		 */
8907 		mbwdog_logging = zalloc_permanent(mbwdog_logging_size,
8908 		    ZALIGN_NONE);
8909 	}
8910 	va_start(ap, fmt);
8911 	vsnprintf(p, sizeof(p), fmt, ap);
8912 	va_end(ap);
8913 	microuptime(&now);
8914 	len = scnprintf(str, sizeof(str),
8915 	    "\n%ld.%d (%d/%llx) %s:%d %s",
8916 	    now.tv_sec, now.tv_usec,
8917 	    proc_getpid(current_proc()),
8918 	    (uint64_t)VM_KERNEL_ADDRPERM(current_thread()),
8919 	    func, line, p);
8920 	if (len < 0) {
8921 		return;
8922 	}
8923 	if (mbwdog_logging_used + len > mbwdog_logging_size) {
8924 		mbwdog_logging_used = mbwdog_logging_used / 2;
8925 		memmove(mbwdog_logging, mbwdog_logging + mbwdog_logging_used,
8926 		    mbwdog_logging_size - mbwdog_logging_used);
8927 		mbwdog_logging[mbwdog_logging_used] = 0;
8928 	}
8929 	strlcat(mbwdog_logging, str, mbwdog_logging_size);
8930 	mbwdog_logging_used += len;
8931 }
8932 
8933 #endif // DEBUG || DEVELOPMENT
8934 
8935 static void
8936 mtracelarge_register(size_t size)
8937 {
8938 	int i;
8939 	struct mtracelarge *trace;
8940 	uintptr_t bt[MLEAK_STACK_DEPTH];
8941 	unsigned int depth;
8942 
8943 	depth = backtrace(bt, MLEAK_STACK_DEPTH, NULL, NULL);
8944 	/* Check if this entry is already on the list. */
8945 	for (i = 0; i < MTRACELARGE_NUM_TRACES; i++) {
8946 		trace = &mtracelarge_table[i];
8947 		if (trace->size == size && trace->depth == depth &&
8948 		    memcmp(bt, trace->addr, depth * sizeof(uintptr_t)) == 0) {
8949 			return;
8950 		}
8951 	}
8952 	for (i = 0; i < MTRACELARGE_NUM_TRACES; i++) {
8953 		trace = &mtracelarge_table[i];
8954 		if (size > trace->size) {
8955 			trace->depth = depth;
8956 			memcpy(trace->addr, bt, depth * sizeof(uintptr_t));
8957 			trace->size = size;
8958 			break;
8959 		}
8960 	}
8961 }
8962 
8963 SYSCTL_DECL(_kern_ipc);
8964 #if DEBUG || DEVELOPMENT
8965 #if SKYWALK
8966 SYSCTL_UINT(_kern_ipc, OID_AUTO, mc_threshold_scale_factor,
8967     CTLFLAG_RW | CTLFLAG_LOCKED, &mc_threshold_scale_down_factor,
8968     MC_THRESHOLD_SCALE_DOWN_FACTOR,
8969     "scale down factor for mbuf cache thresholds");
8970 #endif /* SKYWALK */
8971 #endif
8972 SYSCTL_PROC(_kern_ipc, KIPC_MBSTAT, mbstat,
8973     CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
8974     0, 0, mbstat_sysctl, "S,mbstat", "");
8975 SYSCTL_PROC(_kern_ipc, OID_AUTO, mb_stat,
8976     CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
8977     0, 0, mb_stat_sysctl, "S,mb_stat", "");
8978 SYSCTL_PROC(_kern_ipc, OID_AUTO, mleak_top_trace,
8979     CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
8980     0, 0, mleak_top_trace_sysctl, "S,mb_top_trace", "");
8981 SYSCTL_PROC(_kern_ipc, OID_AUTO, mleak_table,
8982     CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
8983     0, 0, mleak_table_sysctl, "S,mleak_table", "");
8984 SYSCTL_INT(_kern_ipc, OID_AUTO, mleak_sample_factor,
8985     CTLFLAG_RW | CTLFLAG_LOCKED, &mleak_table.mleak_sample_factor, 0, "");
8986 SYSCTL_INT(_kern_ipc, OID_AUTO, mb_normalized,
8987     CTLFLAG_RD | CTLFLAG_LOCKED, &mb_normalized, 0, "");
8988 SYSCTL_INT(_kern_ipc, OID_AUTO, mb_watchdog,
8989     CTLFLAG_RW | CTLFLAG_LOCKED, &mb_watchdog, 0, "");
8990 SYSCTL_PROC(_kern_ipc, OID_AUTO, mb_drain_force,
8991     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, NULL, 0,
8992     m_drain_force_sysctl, "I",
8993     "Forces the mbuf garbage collection to run");
8994 SYSCTL_INT(_kern_ipc, OID_AUTO, mb_drain_maxint,
8995     CTLFLAG_RW | CTLFLAG_LOCKED, &mb_drain_maxint, 0,
8996     "Minimum time interval between garbage collection");
8997 SYSCTL_INT(_kern_ipc, OID_AUTO, mb_memory_pressure_percentage,
8998     CTLFLAG_RW | CTLFLAG_LOCKED, &mb_memory_pressure_percentage, 0,
8999     "Percentage of when we trigger memory-pressure for an mbuf-class");
9000