xref: /xnu-11215.41.3/bsd/dev/unix_startup.c (revision 33de042d024d46de5ff4e89f2471de6608e37fa4)
1 /*
2  * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * Copyright (c) 1992,7 NeXT Computer, Inc.
30  *
31  * Unix data structure initialization.
32  *
33  */
34 
35 #include <mach/mach_types.h>
36 
37 #include <kern/startup.h>
38 #include <vm/vm_kern_xnu.h>
39 #include <mach/vm_prot.h>
40 
41 #include <sys/param.h>
42 #include <sys/buf_internal.h>
43 #include <sys/file_internal.h>
44 #include <sys/proc_internal.h>
45 #include <sys/mcache.h>
46 #include <sys/mbuf.h>
47 #include <sys/systm.h>
48 #include <sys/tty.h>
49 #include <sys/vnode.h>
50 #include <sys/sysctl.h>
51 #include <machine/cons.h>
52 #include <pexpert/pexpert.h>
53 #include <sys/socketvar.h>
54 #include <pexpert/pexpert.h>
55 #include <netinet/tcp_var.h>
56 
57 extern uint32_t kern_maxvnodes;
58 #if CONFIG_MBUF_MCACHE
59 extern vm_map_t mb_map;
60 #endif /* CONFIG_MBUF_MCACHE */
61 
62 #if INET
63 extern uint32_t   tcp_sendspace;
64 extern uint32_t   tcp_recvspace;
65 #endif
66 
67 void            bsd_bufferinit(void);
68 
69 unsigned int    bsd_mbuf_cluster_reserve(boolean_t *);
70 void bsd_scale_setup(int);
71 void bsd_exec_setup(int);
72 
73 /*
74  * Declare these as initialized data so we can patch them.
75  */
76 
77 #ifdef  NBUF
78 int             max_nbuf_headers = NBUF;
79 int             niobuf_headers = (NBUF / 2) + 2048;
80 int             nbuf_hashelements = NBUF;
81 int             nbuf_headers = NBUF;
82 #else
83 int             max_nbuf_headers = 0;
84 int             niobuf_headers = 0;
85 int             nbuf_hashelements = 0;
86 int             nbuf_headers = 0;
87 #endif
88 
89 SYSCTL_INT(_kern, OID_AUTO, nbuf, CTLFLAG_RD | CTLFLAG_LOCKED, &nbuf_headers, 0, "");
90 SYSCTL_INT(_kern, OID_AUTO, maxnbuf, CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_KERN, &max_nbuf_headers, 0, "");
91 
92 __private_extern__ int customnbuf = 0;
93 
94 #if SOCKETS
95 static unsigned int mbuf_poolsz;
96 #endif
97 
98 vm_map_t        buffer_map;
99 vm_map_t        bufferhdr_map;
100 static int vnodes_sized = 0;
101 
102 extern void     bsd_startupearly(void);
103 
104 static vm_map_size_t    bufferhdr_map_size;
105 SECURITY_READ_ONLY_LATE(struct mach_vm_range)  bufferhdr_range = {};
106 
107 static vm_map_size_t
bsd_get_bufferhdr_map_size(void)108 bsd_get_bufferhdr_map_size(void)
109 {
110 	vm_size_t       size;
111 
112 	/* clip the number of buf headers upto 16k */
113 	if (max_nbuf_headers == 0) {
114 		max_nbuf_headers = (int)atop_kernel(sane_size / 50); /* Get 2% of ram, but no more than we can map */
115 	}
116 	if ((customnbuf == 0) && ((unsigned int)max_nbuf_headers > 16384)) {
117 		max_nbuf_headers = 16384;
118 	}
119 	if (max_nbuf_headers < CONFIG_MIN_NBUF) {
120 		max_nbuf_headers = CONFIG_MIN_NBUF;
121 	}
122 
123 	if (niobuf_headers == 0) {
124 		if (max_nbuf_headers < 4096) {
125 			niobuf_headers = max_nbuf_headers;
126 		} else {
127 			niobuf_headers = (max_nbuf_headers / 2) + 2048;
128 		}
129 	}
130 	if (niobuf_headers < CONFIG_MIN_NIOBUF) {
131 		niobuf_headers = CONFIG_MIN_NIOBUF;
132 	}
133 
134 	size = (max_nbuf_headers + niobuf_headers) * sizeof(struct buf);
135 	size = round_page(size);
136 
137 	return size;
138 }
139 
140 KMEM_RANGE_REGISTER_DYNAMIC(bufferhdr, &bufferhdr_range, ^() {
141 	return bufferhdr_map_size = bsd_get_bufferhdr_map_size();
142 });
143 
144 void
bsd_startupearly(void)145 bsd_startupearly(void)
146 {
147 	vm_size_t size = bufferhdr_map_size;
148 
149 	assert(size);
150 
151 	/* clip the number of hash elements  to 200000 */
152 	if ((customnbuf == 0) && nbuf_hashelements == 0) {
153 		nbuf_hashelements = (int)atop_kernel(sane_size / 50);
154 		if ((unsigned int)nbuf_hashelements > 200000) {
155 			nbuf_hashelements = 200000;
156 		}
157 	} else {
158 		nbuf_hashelements = max_nbuf_headers;
159 	}
160 
161 	bufferhdr_map = kmem_suballoc(kernel_map,
162 	    &bufferhdr_range.min_address,
163 	    size,
164 	    VM_MAP_CREATE_NEVER_FAULTS,
165 	    VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
166 	    KMS_PERMANENT | KMS_NOFAIL,
167 	    VM_KERN_MEMORY_FILE).kmr_submap;
168 
169 	kmem_alloc(bufferhdr_map,
170 	    &(vm_offset_t){ bufferhdr_range.min_address },
171 	    size,
172 	    KMA_NOFAIL | KMA_PERMANENT | KMA_ZERO | KMA_KOBJECT,
173 	    VM_KERN_MEMORY_FILE);
174 
175 	buf_headers = (struct buf *)bufferhdr_range.min_address;
176 
177 #if SOCKETS
178 	{
179 		static const unsigned int       maxspace = 128 * 1024;
180 		int             scale;
181 
182 #if INET
183 		if ((scale = nmbclusters / NMBCLUSTERS) > 1) {
184 			tcp_sendspace *= scale;
185 			tcp_recvspace *= scale;
186 
187 			if (tcp_sendspace > maxspace) {
188 				tcp_sendspace = maxspace;
189 			}
190 			if (tcp_recvspace > maxspace) {
191 				tcp_recvspace = maxspace;
192 			}
193 		}
194 #endif /* INET */
195 	}
196 #endif /* SOCKETS */
197 
198 	if (vnodes_sized == 0) {
199 		if (!PE_get_default("kern.maxvnodes", &desiredvnodes, sizeof(desiredvnodes))) {
200 			/*
201 			 * Size vnodes based on memory
202 			 * Number vnodes  is (memsize/64k) + 1024
203 			 * This is the calculation that is used by launchd in tiger
204 			 * we are clipping the max based on 16G
205 			 * ie ((16*1024*1024*1024)/(64 *1024)) + 1024 = 263168;
206 			 * CONFIG_VNODES is set to 263168 for "medium" configurations (the default)
207 			 * but can be smaller or larger.
208 			 */
209 			desiredvnodes  = (int)(sane_size / 65536) + 1024;
210 #ifdef CONFIG_VNODES
211 			if (desiredvnodes > CONFIG_VNODES) {
212 				desiredvnodes = CONFIG_VNODES;
213 			}
214 #endif
215 		}
216 		vnodes_sized = 1;
217 	}
218 }
219 
220 #if SOCKETS
221 SECURITY_READ_ONLY_LATE(struct mach_vm_range) mb_range = {};
222 KMEM_RANGE_REGISTER_DYNAMIC(mb, &mb_range, ^() {
223 	nmbclusters = bsd_mbuf_cluster_reserve(NULL) / MCLBYTES;
224 	return (vm_map_size_t)(nmbclusters * MCLBYTES);
225 });
226 #endif /* SOCKETS */
227 
228 void
bsd_bufferinit(void)229 bsd_bufferinit(void)
230 {
231 	/*
232 	 * Note: Console device initialized in kminit() from bsd_autoconf()
233 	 * prior to call to us in bsd_init().
234 	 */
235 
236 	bsd_startupearly();
237 
238 #if CONFIG_MBUF_MCACHE
239 	mb_map = kmem_suballoc(kernel_map,
240 	    &mb_range.min_address,
241 	    (vm_size_t) (nmbclusters * MCLBYTES),
242 	    FALSE,
243 	    VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
244 	    KMS_PERMANENT | KMS_NOFAIL,
245 	    VM_KERN_MEMORY_MBUF).kmr_submap;
246 	mbutl = (unsigned char *)mb_range.min_address;
247 #endif /* CONFIG_MBUF_MCACHE */
248 
249 	/*
250 	 * Set up buffers, so they can be used to read disk labels.
251 	 */
252 	bufinit();
253 }
254 
255 /* 512 MB (K32) or 2 GB (K64) hard limit on size of the mbuf pool */
256 #if !defined(__LP64__)
257 #define MAX_MBUF_POOL   (512 << MBSHIFT)
258 #else
259 #define MAX_MBUF_POOL   (2ULL << GBSHIFT)
260 #endif /* !__LP64__ */
261 #define MAX_NCL         (MAX_MBUF_POOL >> MCLSHIFT)
262 
263 #if SOCKETS
264 /*
265  * this has been broken out into a separate routine that
266  * can be called from the x86 early vm initialization to
267  * determine how much lo memory to reserve on systems with
268  * DMA hardware that can't fully address all of the physical
269  * memory that is present.
270  */
271 unsigned int
bsd_mbuf_cluster_reserve(boolean_t * overridden)272 bsd_mbuf_cluster_reserve(boolean_t *overridden)
273 {
274 	int mbuf_pool = 0, ncl = 0;
275 	static boolean_t was_overridden = FALSE;
276 
277 	/* If called more than once, return the previously calculated size */
278 	if (mbuf_poolsz != 0) {
279 		goto done;
280 	}
281 
282 	/*
283 	 * Some of these are parsed in parse_bsd_args(), but for x86 we get
284 	 * here early from i386_vm_init() and so we parse them now, in order
285 	 * to correctly compute the size of the low-memory VM pool.  It is
286 	 * redundant but rather harmless.
287 	 */
288 	(void) PE_parse_boot_argn("ncl", &ncl, sizeof(ncl));
289 	(void) PE_parse_boot_argn("mbuf_pool", &mbuf_pool, sizeof(mbuf_pool));
290 
291 	/*
292 	 * Convert "mbuf_pool" from MB to # of 2KB clusters; it is
293 	 * equivalent to "ncl", except that it uses different unit.
294 	 */
295 	if (mbuf_pool != 0) {
296 		ncl = (mbuf_pool << MBSHIFT) >> MCLSHIFT;
297 	}
298 
299 	if (sane_size > (64 * 1024 * 1024) || ncl != 0) {
300 		if (ncl || serverperfmode) {
301 			was_overridden = TRUE;
302 		}
303 
304 		if ((nmbclusters = ncl) == 0) {
305 			/* Auto-configure the mbuf pool size */
306 			nmbclusters = mbuf_default_ncl(mem_actual);
307 		} else {
308 			/* Make sure it's not odd in case ncl is manually set */
309 			if (nmbclusters & 0x1) {
310 				--nmbclusters;
311 			}
312 
313 			/* And obey the upper limit */
314 			if (nmbclusters > MAX_NCL) {
315 				nmbclusters = MAX_NCL;
316 			}
317 		}
318 
319 		/* Round it down to nearest multiple of PAGE_SIZE */
320 		nmbclusters = (unsigned int)P2ROUNDDOWN(nmbclusters, NCLPG);
321 	}
322 	mbuf_poolsz = nmbclusters << MCLSHIFT;
323 done:
324 	if (overridden) {
325 		*overridden = was_overridden;
326 	}
327 
328 	return mbuf_poolsz;
329 }
330 #endif
331 
332 #if defined(__LP64__)
333 extern int tcp_tcbhashsize;
334 extern int max_cached_sock_count;
335 #endif
336 
337 void
bsd_scale_setup(int scale)338 bsd_scale_setup(int scale)
339 {
340 #if defined(__LP64__)
341 	if ((scale > 0) && (serverperfmode == 0)) {
342 		maxproc *= scale;
343 		maxprocperuid = (maxproc * 2) / 3;
344 		if (scale > 2) {
345 			maxfiles *= scale;
346 			maxfilesperproc = maxfiles / 2;
347 		}
348 	}
349 	/* Apply server scaling rules */
350 	if ((scale > 0) && (serverperfmode != 0)) {
351 		maxproc = 2500 * scale;
352 		hard_maxproc = maxproc;
353 		/* no fp usage */
354 		maxprocperuid = (maxproc * 3) / 4;
355 		maxfiles = (150000 * scale);
356 		maxfilesperproc = maxfiles / 2;
357 		desiredvnodes = maxfiles;
358 		vnodes_sized = 1;
359 		tcp_tfo_backlog = 100 * scale;
360 		if (scale > 4) {
361 			/* clip somaxconn at 32G level */
362 			somaxconn = 2048;
363 			/*
364 			 * For scale > 4 (> 32G), clip
365 			 * tcp_tcbhashsize to 32K
366 			 */
367 			tcp_tcbhashsize = 32 * 1024;
368 
369 			if (scale > 7) {
370 				/* clip at 64G level */
371 				max_cached_sock_count = 165000;
372 			} else {
373 				max_cached_sock_count = 60000 + ((scale - 1) * 15000);
374 			}
375 		} else {
376 			somaxconn = 512 * scale;
377 			tcp_tcbhashsize = 4 * 1024 * scale;
378 			max_cached_sock_count = 60000 + ((scale - 1) * 15000);
379 		}
380 	}
381 
382 	if (maxproc > hard_maxproc) {
383 		hard_maxproc = maxproc;
384 	}
385 #endif
386 	bsd_exec_setup(scale);
387 }
388