xref: /xnu-8792.61.2/bsd/dev/unix_startup.c (revision 42e220869062b56f8d7d0726fd4c88954f87902c)
1 /*
2  * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * Copyright (c) 1992,7 NeXT Computer, Inc.
30  *
31  * Unix data structure initialization.
32  *
33  */
34 
35 #include <mach/mach_types.h>
36 
37 #include <kern/startup.h>
38 #include <vm/vm_kern.h>
39 #include <mach/vm_prot.h>
40 
41 #include <sys/param.h>
42 #include <sys/buf_internal.h>
43 #include <sys/file_internal.h>
44 #include <sys/proc_internal.h>
45 #include <sys/mcache.h>
46 #include <sys/mbuf.h>
47 #include <sys/systm.h>
48 #include <sys/tty.h>
49 #include <sys/vnode.h>
50 #include <sys/sysctl.h>
51 #include <machine/cons.h>
52 #include <pexpert/pexpert.h>
53 #include <sys/socketvar.h>
54 #include <pexpert/pexpert.h>
55 #include <netinet/tcp_var.h>
56 
57 extern uint32_t kern_maxvnodes;
58 extern vm_map_t mb_map;
59 
60 #if INET
61 extern uint32_t   tcp_sendspace;
62 extern uint32_t   tcp_recvspace;
63 #endif
64 
65 void            bsd_bufferinit(void);
66 
67 unsigned int    bsd_mbuf_cluster_reserve(boolean_t *);
68 void bsd_scale_setup(int);
69 void bsd_exec_setup(int);
70 
71 /*
72  * Declare these as initialized data so we can patch them.
73  */
74 
75 #ifdef  NBUF
76 int             max_nbuf_headers = NBUF;
77 int             niobuf_headers = (NBUF / 2) + 2048;
78 int             nbuf_hashelements = NBUF;
79 int             nbuf_headers = NBUF;
80 #else
81 int             max_nbuf_headers = 0;
82 int             niobuf_headers = 0;
83 int             nbuf_hashelements = 0;
84 int             nbuf_headers = 0;
85 #endif
86 
87 SYSCTL_INT(_kern, OID_AUTO, nbuf, CTLFLAG_RD | CTLFLAG_LOCKED, &nbuf_headers, 0, "");
88 SYSCTL_INT(_kern, OID_AUTO, maxnbuf, CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_KERN, &max_nbuf_headers, 0, "");
89 
90 __private_extern__ int customnbuf = 0;
91 
92 /* Indicates a server boot when set */
93 TUNABLE(int, serverperfmode, "serverperfmode", 0);
94 
95 #if SOCKETS
96 static unsigned int mbuf_poolsz;
97 #endif
98 
99 vm_map_t        buffer_map;
100 vm_map_t        bufferhdr_map;
101 static int vnodes_sized = 0;
102 
103 extern void     bsd_startupearly(void);
104 
105 static vm_map_size_t    bufferhdr_map_size;
106 SECURITY_READ_ONLY_LATE(struct mach_vm_range)  bufferhdr_range = {};
107 
108 static vm_map_size_t
bsd_get_bufferhdr_map_size(void)109 bsd_get_bufferhdr_map_size(void)
110 {
111 	vm_size_t       size;
112 
113 	/* clip the number of buf headers upto 16k */
114 	if (max_nbuf_headers == 0) {
115 		max_nbuf_headers = (int)atop_kernel(sane_size / 50); /* Get 2% of ram, but no more than we can map */
116 	}
117 	if ((customnbuf == 0) && ((unsigned int)max_nbuf_headers > 16384)) {
118 		max_nbuf_headers = 16384;
119 	}
120 	if (max_nbuf_headers < CONFIG_MIN_NBUF) {
121 		max_nbuf_headers = CONFIG_MIN_NBUF;
122 	}
123 
124 	if (niobuf_headers == 0) {
125 		if (max_nbuf_headers < 4096) {
126 			niobuf_headers = max_nbuf_headers;
127 		} else {
128 			niobuf_headers = (max_nbuf_headers / 2) + 2048;
129 		}
130 	}
131 	if (niobuf_headers < CONFIG_MIN_NIOBUF) {
132 		niobuf_headers = CONFIG_MIN_NIOBUF;
133 	}
134 
135 	size = (max_nbuf_headers + niobuf_headers) * sizeof(struct buf);
136 	size = round_page(size);
137 
138 	return size;
139 }
140 
141 KMEM_RANGE_REGISTER_DYNAMIC(bufferhdr, &bufferhdr_range, ^() {
142 	return bufferhdr_map_size = bsd_get_bufferhdr_map_size();
143 });
144 
145 void
bsd_startupearly(void)146 bsd_startupearly(void)
147 {
148 	vm_size_t size = bufferhdr_map_size;
149 
150 	assert(size);
151 
152 	/* clip the number of hash elements  to 200000 */
153 	if ((customnbuf == 0) && nbuf_hashelements == 0) {
154 		nbuf_hashelements = (int)atop_kernel(sane_size / 50);
155 		if ((unsigned int)nbuf_hashelements > 200000) {
156 			nbuf_hashelements = 200000;
157 		}
158 	} else {
159 		nbuf_hashelements = max_nbuf_headers;
160 	}
161 
162 	bufferhdr_map = kmem_suballoc(kernel_map,
163 	    &bufferhdr_range.min_address,
164 	    size,
165 	    VM_MAP_CREATE_NEVER_FAULTS,
166 	    VM_FLAGS_FIXED_RANGE_SUBALLOC,
167 	    KMS_PERMANENT | KMS_NOFAIL,
168 	    VM_KERN_MEMORY_FILE).kmr_submap;
169 
170 	kmem_alloc(bufferhdr_map,
171 	    &(vm_offset_t){ bufferhdr_range.min_address },
172 	    size,
173 	    KMA_NOFAIL | KMA_PERMANENT | KMA_ZERO | KMA_KOBJECT,
174 	    VM_KERN_MEMORY_FILE);
175 
176 	buf_headers = (struct buf *)bufferhdr_range.min_address;
177 
178 #if SOCKETS
179 	{
180 		static const unsigned int       maxspace = 128 * 1024;
181 		int             scale;
182 
183 #if INET
184 		if ((scale = nmbclusters / NMBCLUSTERS) > 1) {
185 			tcp_sendspace *= scale;
186 			tcp_recvspace *= scale;
187 
188 			if (tcp_sendspace > maxspace) {
189 				tcp_sendspace = maxspace;
190 			}
191 			if (tcp_recvspace > maxspace) {
192 				tcp_recvspace = maxspace;
193 			}
194 		}
195 #endif /* INET */
196 	}
197 #endif /* SOCKETS */
198 
199 	if (vnodes_sized == 0) {
200 		if (!PE_get_default("kern.maxvnodes", &desiredvnodes, sizeof(desiredvnodes))) {
201 			/*
202 			 * Size vnodes based on memory
203 			 * Number vnodes  is (memsize/64k) + 1024
204 			 * This is the calculation that is used by launchd in tiger
205 			 * we are clipping the max based on 16G
206 			 * ie ((16*1024*1024*1024)/(64 *1024)) + 1024 = 263168;
207 			 * CONFIG_VNODES is set to 263168 for "medium" configurations (the default)
208 			 * but can be smaller or larger.
209 			 */
210 			desiredvnodes  = (int)(sane_size / 65536) + 1024;
211 #ifdef CONFIG_VNODES
212 			if (desiredvnodes > CONFIG_VNODES) {
213 				desiredvnodes = CONFIG_VNODES;
214 			}
215 #endif
216 		}
217 		vnodes_sized = 1;
218 	}
219 }
220 
221 #if SOCKETS
222 SECURITY_READ_ONLY_LATE(struct mach_vm_range) mb_range = {};
223 KMEM_RANGE_REGISTER_DYNAMIC(mb, &mb_range, ^() {
224 	nmbclusters = bsd_mbuf_cluster_reserve(NULL) / MCLBYTES;
225 	return (vm_map_size_t)(nmbclusters * MCLBYTES);
226 });
227 #endif /* SOCKETS */
228 
229 void
bsd_bufferinit(void)230 bsd_bufferinit(void)
231 {
232 	/*
233 	 * Note: Console device initialized in kminit() from bsd_autoconf()
234 	 * prior to call to us in bsd_init().
235 	 */
236 
237 	bsd_startupearly();
238 
239 #if SOCKETS
240 	mb_map = kmem_suballoc(kernel_map,
241 	    &mb_range.min_address,
242 	    (vm_size_t) (nmbclusters * MCLBYTES),
243 	    FALSE,
244 	    VM_FLAGS_FIXED_RANGE_SUBALLOC,
245 	    KMS_PERMANENT | KMS_NOFAIL,
246 	    VM_KERN_MEMORY_MBUF).kmr_submap;
247 	mbutl = (unsigned char *)mb_range.min_address;
248 #endif /* SOCKETS */
249 
250 	/*
251 	 * Set up buffers, so they can be used to read disk labels.
252 	 */
253 	bufinit();
254 }
255 
256 /* 512 MB (K32) or 2 GB (K64) hard limit on size of the mbuf pool */
257 #if !defined(__LP64__)
258 #define MAX_MBUF_POOL   (512 << MBSHIFT)
259 #else
260 #define MAX_MBUF_POOL   (2ULL << GBSHIFT)
261 #endif /* !__LP64__ */
262 #define MAX_NCL         (MAX_MBUF_POOL >> MCLSHIFT)
263 
264 #if SOCKETS
265 /*
266  * this has been broken out into a separate routine that
267  * can be called from the x86 early vm initialization to
268  * determine how much lo memory to reserve on systems with
269  * DMA hardware that can't fully address all of the physical
270  * memory that is present.
271  */
272 unsigned int
bsd_mbuf_cluster_reserve(boolean_t * overridden)273 bsd_mbuf_cluster_reserve(boolean_t *overridden)
274 {
275 	int mbuf_pool = 0, ncl = 0;
276 	static boolean_t was_overridden = FALSE;
277 
278 	/* If called more than once, return the previously calculated size */
279 	if (mbuf_poolsz != 0) {
280 		goto done;
281 	}
282 
283 	/*
284 	 * Some of these are parsed in parse_bsd_args(), but for x86 we get
285 	 * here early from i386_vm_init() and so we parse them now, in order
286 	 * to correctly compute the size of the low-memory VM pool.  It is
287 	 * redundant but rather harmless.
288 	 */
289 	(void) PE_parse_boot_argn("ncl", &ncl, sizeof(ncl));
290 	(void) PE_parse_boot_argn("mbuf_pool", &mbuf_pool, sizeof(mbuf_pool));
291 
292 	/*
293 	 * Convert "mbuf_pool" from MB to # of 2KB clusters; it is
294 	 * equivalent to "ncl", except that it uses different unit.
295 	 */
296 	if (mbuf_pool != 0) {
297 		ncl = (mbuf_pool << MBSHIFT) >> MCLSHIFT;
298 	}
299 
300 	if (sane_size > (64 * 1024 * 1024) || ncl != 0) {
301 		if (ncl || serverperfmode) {
302 			was_overridden = TRUE;
303 		}
304 
305 		if ((nmbclusters = ncl) == 0) {
306 			/* Auto-configure the mbuf pool size */
307 			nmbclusters = mbuf_default_ncl(mem_actual);
308 		} else {
309 			/* Make sure it's not odd in case ncl is manually set */
310 			if (nmbclusters & 0x1) {
311 				--nmbclusters;
312 			}
313 
314 			/* And obey the upper limit */
315 			if (nmbclusters > MAX_NCL) {
316 				nmbclusters = MAX_NCL;
317 			}
318 		}
319 
320 		/* Round it down to nearest multiple of PAGE_SIZE */
321 		nmbclusters = (unsigned int)P2ROUNDDOWN(nmbclusters, NCLPG);
322 	}
323 	mbuf_poolsz = nmbclusters << MCLSHIFT;
324 done:
325 	if (overridden) {
326 		*overridden = was_overridden;
327 	}
328 
329 	return mbuf_poolsz;
330 }
331 #endif
332 
333 #if defined(__LP64__)
334 extern int tcp_tcbhashsize;
335 extern int max_cached_sock_count;
336 #endif
337 
338 
339 void
bsd_scale_setup(int scale)340 bsd_scale_setup(int scale)
341 {
342 #if defined(__LP64__)
343 	if ((scale > 0) && (serverperfmode == 0)) {
344 		maxproc *= scale;
345 		maxprocperuid = (maxproc * 2) / 3;
346 		if (scale > 2) {
347 			maxfiles *= scale;
348 			maxfilesperproc = maxfiles / 2;
349 		}
350 	}
351 	/* Apply server scaling rules */
352 	if ((scale > 0) && (serverperfmode != 0)) {
353 		maxproc = 2500 * scale;
354 		hard_maxproc = maxproc;
355 		/* no fp usage */
356 		maxprocperuid = (maxproc * 3) / 4;
357 		maxfiles = (150000 * scale);
358 		maxfilesperproc = maxfiles / 2;
359 		desiredvnodes = maxfiles;
360 		vnodes_sized = 1;
361 		tcp_tfo_backlog = 100 * scale;
362 		if (scale > 4) {
363 			/* clip somaxconn at 32G level */
364 			somaxconn = 2048;
365 			/*
366 			 * For scale > 4 (> 32G), clip
367 			 * tcp_tcbhashsize to 32K
368 			 */
369 			tcp_tcbhashsize = 32 * 1024;
370 
371 			if (scale > 7) {
372 				/* clip at 64G level */
373 				max_cached_sock_count = 165000;
374 			} else {
375 				max_cached_sock_count = 60000 + ((scale - 1) * 15000);
376 			}
377 		} else {
378 			somaxconn = 512 * scale;
379 			tcp_tcbhashsize = 4 * 1024 * scale;
380 			max_cached_sock_count = 60000 + ((scale - 1) * 15000);
381 		}
382 	}
383 
384 	if (maxproc > hard_maxproc) {
385 		hard_maxproc = maxproc;
386 	}
387 #endif
388 	bsd_exec_setup(scale);
389 }
390