xref: /xnu-8019.80.24/osfmk/arm/loose_ends.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <mach_assert.h>
30 #include <mach/vm_types.h>
31 #include <mach/mach_time.h>
32 #include <kern/timer.h>
33 #include <kern/clock.h>
34 #include <kern/machine.h>
35 #include <mach/machine.h>
36 #include <mach/machine/vm_param.h>
37 #include <mach_kdp.h>
38 #include <kdp/kdp_udp.h>
39 #if !MACH_KDP
40 #include <kdp/kdp_callout.h>
41 #endif /* !MACH_KDP */
42 #include <arm/cpu_data.h>
43 #include <arm/cpu_data_internal.h>
44 #include <arm/caches_internal.h>
45 
46 #include <vm/vm_kern.h>
47 #include <vm/vm_map.h>
48 #include <vm/pmap.h>
49 
50 #include <arm/misc_protos.h>
51 
52 #include <sys/errno.h>
53 
54 #define INT_SIZE        (BYTE_SIZE * sizeof (int))
55 
56 /* machine_routines_asm.s calls these */
57 extern int copyin_validate(const user_addr_t, uintptr_t, vm_size_t);
58 extern int copyin_user_validate(const user_addr_t, uintptr_t, vm_size_t);
59 extern int copyout_validate(uintptr_t, const user_addr_t, vm_size_t);
60 extern int copyio_user_validate(int, int, user_addr_t, vm_size_t);
61 extern int copyoutstr_prevalidate(const void *, user_addr_t, size_t);
62 
63 void
bcopy_phys(addr64_t src,addr64_t dst,vm_size_t bytes)64 bcopy_phys(addr64_t src, addr64_t dst, vm_size_t bytes)
65 {
66 	unsigned int    src_index;
67 	unsigned int    dst_index;
68 	vm_offset_t     src_offset;
69 	vm_offset_t     dst_offset;
70 	unsigned int    cpu_num;
71 	unsigned int    wimg_bits_src, wimg_bits_dst;
72 	ppnum_t         pn_src = (src >> PAGE_SHIFT);
73 	ppnum_t         pn_dst = (dst >> PAGE_SHIFT);
74 
75 	wimg_bits_src = pmap_cache_attributes(pn_src);
76 	wimg_bits_dst = pmap_cache_attributes(pn_dst);
77 
78 	if (mmu_kvtop_wpreflight(phystokv((pmap_paddr_t) dst)) &&
79 	    ((wimg_bits_src & VM_WIMG_MASK) == VM_WIMG_DEFAULT) &&
80 	    ((wimg_bits_dst & VM_WIMG_MASK) == VM_WIMG_DEFAULT)) {
81 		/* Fast path - dst is writable and both source and destination have default attributes */
82 		bcopy((char *)phystokv((pmap_paddr_t) src), (char *)phystokv((pmap_paddr_t) dst), bytes);
83 		return;
84 	}
85 
86 	src_offset = src & PAGE_MASK;
87 	dst_offset = dst & PAGE_MASK;
88 
89 	if ((src_offset + bytes) > PAGE_SIZE || (dst_offset + bytes) > PAGE_SIZE) {
90 		panic("bcopy extends beyond copy windows");
91 	}
92 
93 	mp_disable_preemption();
94 	cpu_num = cpu_number();
95 	src_index = pmap_map_cpu_windows_copy(pn_src, VM_PROT_READ, wimg_bits_src);
96 	dst_index = pmap_map_cpu_windows_copy(pn_dst, VM_PROT_READ | VM_PROT_WRITE, wimg_bits_dst);
97 
98 	bcopy((char *)(pmap_cpu_windows_copy_addr(cpu_num, src_index) + src_offset),
99 	    (char *)(pmap_cpu_windows_copy_addr(cpu_num, dst_index) + dst_offset),
100 	    bytes);
101 
102 	pmap_unmap_cpu_windows_copy(src_index);
103 	pmap_unmap_cpu_windows_copy(dst_index);
104 	mp_enable_preemption();
105 }
106 
107 void
bzero_phys_nc(addr64_t src64,vm_size_t bytes)108 bzero_phys_nc(addr64_t src64, vm_size_t bytes)
109 {
110 	bzero_phys(src64, bytes);
111 }
112 
113 /* Zero bytes starting at a physical address */
114 void
bzero_phys(addr64_t src,vm_size_t bytes)115 bzero_phys(addr64_t src, vm_size_t bytes)
116 {
117 	unsigned int    wimg_bits;
118 	ppnum_t         pn = (src >> PAGE_SHIFT);
119 
120 	wimg_bits = pmap_cache_attributes(pn);
121 	if (__probable((wimg_bits & VM_WIMG_MASK) == VM_WIMG_DEFAULT)) {
122 		/* Fast path - default attributes */
123 		bzero((char *)phystokv((pmap_paddr_t) src), bytes);
124 	} else {
125 		mp_disable_preemption();
126 
127 		unsigned int cpu_num = cpu_number();
128 
129 		while (bytes > 0) {
130 			vm_offset_t offset = src & PAGE_MASK;
131 			uint32_t count = PAGE_SIZE - offset;
132 
133 			if (count > bytes) {
134 				count = bytes;
135 			}
136 
137 			unsigned int index = pmap_map_cpu_windows_copy(src >> PAGE_SHIFT, VM_PROT_READ | VM_PROT_WRITE, wimg_bits);
138 
139 			bzero((char *)(pmap_cpu_windows_copy_addr(cpu_num, index) + offset), count);
140 
141 			pmap_unmap_cpu_windows_copy(index);
142 
143 			src += count;
144 			bytes -= count;
145 		}
146 
147 		mp_enable_preemption();
148 	}
149 }
150 
151 /*
152  *  Read data from a physical address.
153  */
154 
155 
156 static unsigned int
ml_phys_read_data(pmap_paddr_t paddr,int size)157 ml_phys_read_data(pmap_paddr_t paddr, int size)
158 {
159 	unsigned int    index;
160 	unsigned int    result;
161 	unsigned int    wimg_bits;
162 	ppnum_t         pn = (paddr >> PAGE_SHIFT);
163 	unsigned char   s1;
164 	unsigned short  s2;
165 	vm_offset_t     copywindow_vaddr = 0;
166 
167 	mp_disable_preemption();
168 	wimg_bits = pmap_cache_attributes(pn);
169 	index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ, wimg_bits);
170 	copywindow_vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | ((uint32_t)paddr & PAGE_MASK);
171 
172 	switch (size) {
173 	case 1:
174 		s1 = *(volatile unsigned char *)(copywindow_vaddr);
175 		result = s1;
176 		break;
177 	case 2:
178 		s2 = *(volatile unsigned short *)(copywindow_vaddr);
179 		result = s2;
180 		break;
181 	case 4:
182 	default:
183 		result = *(volatile unsigned int *)(copywindow_vaddr);
184 		break;
185 	}
186 
187 	pmap_unmap_cpu_windows_copy(index);
188 	mp_enable_preemption();
189 
190 	return result;
191 }
192 
193 static unsigned long long
ml_phys_read_long_long(pmap_paddr_t paddr)194 ml_phys_read_long_long(pmap_paddr_t paddr)
195 {
196 	unsigned int    index;
197 	unsigned int    result;
198 	unsigned int    wimg_bits;
199 	ppnum_t         pn = (paddr >> PAGE_SHIFT);
200 
201 	mp_disable_preemption();
202 	wimg_bits = pmap_cache_attributes(pn);
203 	index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ, wimg_bits);
204 
205 	result = *(volatile unsigned long long *)(pmap_cpu_windows_copy_addr(cpu_number(), index)
206 	    | ((uint32_t)paddr & PAGE_MASK));
207 
208 	pmap_unmap_cpu_windows_copy(index);
209 	mp_enable_preemption();
210 
211 	return result;
212 }
213 
214 unsigned int
ml_phys_read(vm_offset_t paddr)215 ml_phys_read( vm_offset_t paddr)
216 {
217 	return ml_phys_read_data((pmap_paddr_t)paddr, 4);
218 }
219 
220 unsigned int
ml_phys_read_word(vm_offset_t paddr)221 ml_phys_read_word(vm_offset_t paddr)
222 {
223 	return ml_phys_read_data((pmap_paddr_t)paddr, 4);
224 }
225 
226 unsigned int
ml_phys_read_64(addr64_t paddr64)227 ml_phys_read_64(addr64_t paddr64)
228 {
229 	return ml_phys_read_data((pmap_paddr_t)paddr64, 4);
230 }
231 
232 unsigned int
ml_phys_read_word_64(addr64_t paddr64)233 ml_phys_read_word_64(addr64_t paddr64)
234 {
235 	return ml_phys_read_data((pmap_paddr_t)paddr64, 4);
236 }
237 
238 unsigned int
ml_phys_read_half(vm_offset_t paddr)239 ml_phys_read_half(vm_offset_t paddr)
240 {
241 	return ml_phys_read_data((pmap_paddr_t)paddr, 2);
242 }
243 
244 unsigned int
ml_phys_read_half_64(addr64_t paddr64)245 ml_phys_read_half_64(addr64_t paddr64)
246 {
247 	return ml_phys_read_data((pmap_paddr_t)paddr64, 2);
248 }
249 
250 unsigned int
ml_phys_read_byte(vm_offset_t paddr)251 ml_phys_read_byte(vm_offset_t paddr)
252 {
253 	return ml_phys_read_data((pmap_paddr_t)paddr, 1);
254 }
255 
256 unsigned int
ml_phys_read_byte_64(addr64_t paddr64)257 ml_phys_read_byte_64(addr64_t paddr64)
258 {
259 	return ml_phys_read_data((pmap_paddr_t)paddr64, 1);
260 }
261 
262 unsigned long long
ml_phys_read_double(vm_offset_t paddr)263 ml_phys_read_double(vm_offset_t paddr)
264 {
265 	return ml_phys_read_long_long((pmap_paddr_t)paddr);
266 }
267 
268 unsigned long long
ml_phys_read_double_64(addr64_t paddr64)269 ml_phys_read_double_64(addr64_t paddr64)
270 {
271 	return ml_phys_read_long_long((pmap_paddr_t)paddr64);
272 }
273 
274 
275 
276 /*
277  *  Write data to a physical address.
278  */
279 
280 static void
ml_phys_write_data(pmap_paddr_t paddr,unsigned long data,int size)281 ml_phys_write_data(pmap_paddr_t paddr, unsigned long data, int size)
282 {
283 	unsigned int    index;
284 	unsigned int    wimg_bits;
285 	ppnum_t         pn = (paddr >> PAGE_SHIFT);
286 	vm_offset_t     copywindow_vaddr = 0;
287 
288 	mp_disable_preemption();
289 	wimg_bits = pmap_cache_attributes(pn);
290 	index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ | VM_PROT_WRITE, wimg_bits);
291 	copywindow_vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | ((uint32_t) paddr & PAGE_MASK);
292 
293 	switch (size) {
294 	case 1:
295 		*(volatile unsigned char *)(copywindow_vaddr) = (unsigned char)data;
296 		break;
297 	case 2:
298 		*(volatile unsigned short *)(copywindow_vaddr) = (unsigned short)data;
299 		break;
300 	case 4:
301 	default:
302 		*(volatile unsigned int *)(copywindow_vaddr) = (uint32_t)data;
303 		break;
304 	}
305 
306 	pmap_unmap_cpu_windows_copy(index);
307 	mp_enable_preemption();
308 }
309 
310 static void
ml_phys_write_long_long(pmap_paddr_t paddr,unsigned long long data)311 ml_phys_write_long_long(pmap_paddr_t paddr, unsigned long long data)
312 {
313 	unsigned int    index;
314 	unsigned int    wimg_bits;
315 	ppnum_t         pn = (paddr >> PAGE_SHIFT);
316 
317 	mp_disable_preemption();
318 	wimg_bits = pmap_cache_attributes(pn);
319 	index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ | VM_PROT_WRITE, wimg_bits);
320 
321 	*(volatile unsigned long long *)(pmap_cpu_windows_copy_addr(cpu_number(), index)
322 	| ((uint32_t)paddr & PAGE_MASK)) = data;
323 
324 	pmap_unmap_cpu_windows_copy(index);
325 	mp_enable_preemption();
326 }
327 
328 
329 
330 void
ml_phys_write_byte(vm_offset_t paddr,unsigned int data)331 ml_phys_write_byte(vm_offset_t paddr, unsigned int data)
332 {
333 	ml_phys_write_data((pmap_paddr_t)paddr, data, 1);
334 }
335 
336 void
ml_phys_write_byte_64(addr64_t paddr64,unsigned int data)337 ml_phys_write_byte_64(addr64_t paddr64, unsigned int data)
338 {
339 	ml_phys_write_data((pmap_paddr_t)paddr64, data, 1);
340 }
341 
342 void
ml_phys_write_half(vm_offset_t paddr,unsigned int data)343 ml_phys_write_half(vm_offset_t paddr, unsigned int data)
344 {
345 	ml_phys_write_data((pmap_paddr_t)paddr, data, 2);
346 }
347 
348 void
ml_phys_write_half_64(addr64_t paddr64,unsigned int data)349 ml_phys_write_half_64(addr64_t paddr64, unsigned int data)
350 {
351 	ml_phys_write_data((pmap_paddr_t)paddr64, data, 2);
352 }
353 
354 void
ml_phys_write(vm_offset_t paddr,unsigned int data)355 ml_phys_write(vm_offset_t paddr, unsigned int data)
356 {
357 	ml_phys_write_data((pmap_paddr_t)paddr, data, 4);
358 }
359 
360 void
ml_phys_write_64(addr64_t paddr64,unsigned int data)361 ml_phys_write_64(addr64_t paddr64, unsigned int data)
362 {
363 	ml_phys_write_data((pmap_paddr_t)paddr64, data, 4);
364 }
365 
366 void
ml_phys_write_word(vm_offset_t paddr,unsigned int data)367 ml_phys_write_word(vm_offset_t paddr, unsigned int data)
368 {
369 	ml_phys_write_data((pmap_paddr_t)paddr, data, 4);
370 }
371 
372 void
ml_phys_write_word_64(addr64_t paddr64,unsigned int data)373 ml_phys_write_word_64(addr64_t paddr64, unsigned int data)
374 {
375 	ml_phys_write_data((pmap_paddr_t)paddr64, data, 4);
376 }
377 
378 void
ml_phys_write_double(vm_offset_t paddr,unsigned long long data)379 ml_phys_write_double(vm_offset_t paddr, unsigned long long data)
380 {
381 	ml_phys_write_long_long((pmap_paddr_t)paddr, data);
382 }
383 
384 void
ml_phys_write_double_64(addr64_t paddr64,unsigned long long data)385 ml_phys_write_double_64(addr64_t paddr64, unsigned long long data)
386 {
387 	ml_phys_write_long_long((pmap_paddr_t)paddr64, data);
388 }
389 
390 
391 /*
392  * Set indicated bit in bit string.
393  */
394 void
setbit(int bitno,int * s)395 setbit(int bitno, int *s)
396 {
397 	s[bitno / INT_SIZE] |= 1 << (bitno % INT_SIZE);
398 }
399 
400 /*
401  * Clear indicated bit in bit string.
402  */
403 void
clrbit(int bitno,int * s)404 clrbit(int bitno, int *s)
405 {
406 	s[bitno / INT_SIZE] &= ~(1 << (bitno % INT_SIZE));
407 }
408 
409 /*
410  * Test if indicated bit is set in bit string.
411  */
412 int
testbit(int bitno,int * s)413 testbit(int bitno, int *s)
414 {
415 	return s[bitno / INT_SIZE] & (1 << (bitno % INT_SIZE));
416 }
417 
418 /*
419  * Find first bit set in bit string.
420  */
421 int
ffsbit(int * s)422 ffsbit(int *s)
423 {
424 	int             offset;
425 
426 	for (offset = 0; !*s; offset += INT_SIZE, ++s) {
427 		;
428 	}
429 	return offset + __builtin_ctz(*s);
430 }
431 
432 int
ffs(unsigned int mask)433 ffs(unsigned int mask)
434 {
435 	if (mask == 0) {
436 		return 0;
437 	}
438 
439 	/*
440 	 * NOTE: cannot use __builtin_ffs because it generates a call to
441 	 * 'ffs'
442 	 */
443 	return 1 + __builtin_ctz(mask);
444 }
445 
446 int
ffsll(unsigned long long mask)447 ffsll(unsigned long long mask)
448 {
449 	if (mask == 0) {
450 		return 0;
451 	}
452 
453 	/*
454 	 * NOTE: cannot use __builtin_ffsll because it generates a call to
455 	 * 'ffsll'
456 	 */
457 	return 1 + __builtin_ctzll(mask);
458 }
459 
460 /*
461  * Find last bit set in bit string.
462  */
463 int
fls(unsigned int mask)464 fls(unsigned int mask)
465 {
466 	if (mask == 0) {
467 		return 0;
468 	}
469 
470 	return (sizeof(mask) << 3) - __builtin_clz(mask);
471 }
472 
473 int
flsll(unsigned long long mask)474 flsll(unsigned long long mask)
475 {
476 	if (mask == 0) {
477 		return 0;
478 	}
479 
480 	return (sizeof(mask) << 3) - __builtin_clzll(mask);
481 }
482 
483 int
bcmp(const void * pa,const void * pb,size_t len)484 bcmp(
485 	const void *pa,
486 	const void *pb,
487 	size_t len)
488 {
489 	const char     *a = (const char *) pa;
490 	const char     *b = (const char *) pb;
491 
492 	if (len == 0) {
493 		return 0;
494 	}
495 
496 	do{
497 		if (*a++ != *b++) {
498 			break;
499 		}
500 	} while (--len);
501 
502 	return len;
503 }
504 
505 int
memcmp(const void * s1,const void * s2,size_t n)506 memcmp(const void *s1, const void *s2, size_t n)
507 {
508 	if (n != 0) {
509 		const unsigned char *p1 = s1, *p2 = s2;
510 
511 		do {
512 			if (*p1++ != *p2++) {
513 				return *--p1 - *--p2;
514 			}
515 		} while (--n != 0);
516 	}
517 	return 0;
518 }
519 
520 unsigned long
memcmp_zero_ptr_aligned(const void * s,size_t n)521 memcmp_zero_ptr_aligned(const void *s, size_t n)
522 {
523 	uintptr_t p = (uintptr_t)s;
524 	uintptr_t end = (uintptr_t)s + n;
525 	uint32_t a, b;
526 
527 	static_assert(sizeof(unsigned long) == sizeof(uint32_t));
528 
529 	a = *(const uint32_t *)p;
530 	b = *(const uint32_t *)(end - sizeof(uint32_t));
531 
532 	/*
533 	 * align p to the next 64bit boundary
534 	 * align end to the previous 64bit boundary
535 	 *
536 	 * and do a nice ldrd loop.
537 	 */
538 	p = (p + sizeof(uint64_t) - 1) & -sizeof(uint64_t);
539 	end &= -sizeof(uint64_t);
540 
541 	for (; p < end; p += sizeof(uint64_t)) {
542 		uint64_t v = *(const uint64_t *)p;
543 		a |= (uint32_t)v;
544 		b |= (uint32_t)(v >> 32);
545 	}
546 
547 	return a | b;
548 }
549 
550 kern_return_t
copypv(addr64_t source,addr64_t sink,unsigned int size,int which)551 copypv(addr64_t source, addr64_t sink, unsigned int size, int which)
552 {
553 	kern_return_t   retval = KERN_SUCCESS;
554 	void            *from, *to;
555 	unsigned int    from_wimg_bits, to_wimg_bits;
556 
557 	from = CAST_DOWN(void *, source);
558 	to = CAST_DOWN(void *, sink);
559 
560 	if ((which & (cppvPsrc | cppvPsnk)) == 0) {     /* Make sure that only
561 		                                         * one is virtual */
562 		panic("copypv: no more than 1 parameter may be virtual");     /* Not allowed */
563 	}
564 	if (which & cppvPsrc) {
565 		from = (void *)phystokv((pmap_paddr_t)from);
566 	}
567 	if (which & cppvPsnk) {
568 		to = (void *)phystokv((pmap_paddr_t)to);
569 	}
570 
571 	if ((which & (cppvPsrc | cppvKmap)) == 0) {     /* Source is virtual in
572 		                                         * current map */
573 		retval = copyin((user_addr_t) from, to, size);
574 	} else if ((which & (cppvPsnk | cppvKmap)) == 0) { /* Sink is virtual in
575 		                                            * current map */
576 		retval = copyout(from, (user_addr_t) to, size);
577 	} else {                /* both addresses are physical or kernel map */
578 		bcopy(from, to, size);
579 	}
580 
581 	if (which & cppvFsrc) {
582 		flush_dcache64(source, size, ((which & cppvPsrc) == cppvPsrc));
583 	} else if (which & cppvPsrc) {
584 		from_wimg_bits = pmap_cache_attributes(source >> PAGE_SHIFT);
585 		if ((from_wimg_bits != VM_WIMG_COPYBACK) && (from_wimg_bits != VM_WIMG_WTHRU)) {
586 			flush_dcache64(source, size, TRUE);
587 		}
588 	}
589 
590 	if (which & cppvFsnk) {
591 		flush_dcache64(sink, size, ((which & cppvPsnk) == cppvPsnk));
592 	} else if (which & cppvPsnk) {
593 		to_wimg_bits = pmap_cache_attributes(sink >> PAGE_SHIFT);
594 		if (to_wimg_bits != VM_WIMG_COPYBACK) {
595 			flush_dcache64(sink, size, TRUE);
596 		}
597 	}
598 	return retval;
599 }
600 
601 /*
602  * Copy sizes bigger than this value will cause a kernel panic.
603  *
604  * Yes, this is an arbitrary fixed limit, but it's almost certainly
605  * a programming error to be copying more than this amount between
606  * user and wired kernel memory in a single invocation on this
607  * platform.
608  */
609 const int copysize_limit_panic = (64 * 1024 * 1024);
610 
611 static inline bool
is_kernel_to_kernel_copy()612 is_kernel_to_kernel_copy()
613 {
614 	return current_thread()->map->pmap == kernel_pmap;
615 }
616 
617 static int
copy_validate_user(const user_addr_t user_addr,vm_size_t nbytes,bool kern_to_kern_allowed)618 copy_validate_user(const user_addr_t user_addr, vm_size_t nbytes, bool kern_to_kern_allowed)
619 {
620 	user_addr_t user_addr_last = user_addr + nbytes;
621 	thread_t self = current_thread();
622 
623 	if (__improbable(!kern_to_kern_allowed && is_kernel_to_kernel_copy())) {
624 		return EFAULT;
625 	}
626 
627 	if (__improbable((user_addr_last < user_addr) ||
628 	    ((user_addr + nbytes) > vm_map_max(self->map)) ||
629 	    (user_addr < vm_map_min(self->map)))) {
630 		return EFAULT;
631 	}
632 
633 	if (__improbable(nbytes > copysize_limit_panic)) {
634 		panic("%s(%p, ..., %u) - transfer too large", __func__,
635 		    (void *)user_addr, nbytes);
636 	}
637 
638 	return 0;
639 }
640 
641 /*
642  * Validate the arguments to copy{in,out} on this platform.
643  *
644  * Called when nbytes is "large" e.g. more than a page.  Such sizes are
645  * infrequent, and very large sizes are likely indications of attempts
646  * to exploit kernel programming errors (bugs).
647  */
648 static int
copy_validate(const user_addr_t user_addr,uintptr_t kernel_addr,vm_size_t nbytes,bool kern_to_kern_allowed)649 copy_validate(const user_addr_t user_addr,
650     uintptr_t kernel_addr, vm_size_t nbytes, bool kern_to_kern_allowed)
651 {
652 	uintptr_t kernel_addr_last = kernel_addr + nbytes;
653 
654 	if (__improbable(kernel_addr < VM_MIN_KERNEL_ADDRESS ||
655 	    kernel_addr > VM_MAX_KERNEL_ADDRESS ||
656 	    kernel_addr_last < kernel_addr ||
657 	    kernel_addr_last > VM_MAX_KERNEL_ADDRESS)) {
658 		panic("%s(%p, %p, %u) - kaddr not in kernel", __func__,
659 		    (void *)user_addr, (void *)kernel_addr, nbytes);
660 	}
661 
662 	return copy_validate_user(user_addr, nbytes, kern_to_kern_allowed);
663 }
664 
665 int
copyin_validate(const user_addr_t ua,uintptr_t ka,vm_size_t nbytes)666 copyin_validate(const user_addr_t ua, uintptr_t ka, vm_size_t nbytes)
667 {
668 	return copy_validate(ua, ka, nbytes, true);
669 }
670 
671 int
copyin_user_validate(const user_addr_t ua,uintptr_t ka,vm_size_t nbytes)672 copyin_user_validate(const user_addr_t ua, uintptr_t ka, vm_size_t nbytes)
673 {
674 	return copy_validate(ua, ka, nbytes, false);
675 }
676 
677 int
copyout_validate(uintptr_t ka,const user_addr_t ua,vm_size_t nbytes)678 copyout_validate(uintptr_t ka, const user_addr_t ua, vm_size_t nbytes)
679 {
680 	return copy_validate(ua, ka, nbytes, true);
681 }
682 
683 int
copyio_user_validate(int a __unused,int b __unused,user_addr_t ua,vm_size_t nbytes)684 copyio_user_validate(int a __unused, int b __unused,
685     user_addr_t ua, vm_size_t nbytes)
686 {
687 	return copy_validate_user(ua, nbytes, false);
688 }
689 
690 int
copyoutstr_prevalidate(const void * __unused kaddr,user_addr_t __unused uaddr,size_t __unused len)691 copyoutstr_prevalidate(const void *__unused kaddr, user_addr_t __unused uaddr, size_t __unused len)
692 {
693 	if (__improbable(is_kernel_to_kernel_copy())) {
694 		return EFAULT;
695 	}
696 
697 	return 0;
698 }
699 
700 int
clr_be_bit(void)701 clr_be_bit(void)
702 {
703 	panic("clr_be_bit");
704 	return 0;
705 }
706 
707 boolean_t
ml_probe_read(__unused vm_offset_t paddr,__unused unsigned int * val)708 ml_probe_read(
709 	__unused vm_offset_t paddr,
710 	__unused unsigned int *val)
711 {
712 	panic("ml_probe_read() unimplemented");
713 	return 1;
714 }
715 
716 boolean_t
ml_probe_read_64(__unused addr64_t paddr,__unused unsigned int * val)717 ml_probe_read_64(
718 	__unused addr64_t paddr,
719 	__unused unsigned int *val)
720 {
721 	panic("ml_probe_read_64() unimplemented");
722 	return 1;
723 }
724 
725 
726 void
ml_thread_policy(__unused thread_t thread,__unused unsigned policy_id,__unused unsigned policy_info)727 ml_thread_policy(
728 	__unused thread_t thread,
729 	__unused unsigned policy_id,
730 	__unused unsigned policy_info)
731 {
732 	//    <rdar://problem/7141284>: Reduce print noise
733 	//	kprintf("ml_thread_policy() unimplemented\n");
734 }
735 
736 #if !MACH_KDP
737 void
kdp_register_callout(kdp_callout_fn_t fn,void * arg)738 kdp_register_callout(kdp_callout_fn_t fn, void *arg)
739 {
740 #pragma unused(fn,arg)
741 }
742 #endif
743