xref: /xnu-8019.80.24/osfmk/arm64/loose_ends.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <mach_assert.h>
30 #include <mach/vm_types.h>
31 #include <mach/mach_time.h>
32 #include <kern/timer.h>
33 #include <kern/clock.h>
34 #include <kern/machine.h>
35 #include <mach/machine.h>
36 #include <mach/machine/vm_param.h>
37 #include <mach_kdp.h>
38 #include <kdp/kdp_udp.h>
39 #if !MACH_KDP
40 #include <kdp/kdp_callout.h>
41 #endif /* !MACH_KDP */
42 #include <arm/cpu_data.h>
43 #include <arm/cpu_data_internal.h>
44 #include <arm/caches_internal.h>
45 
46 #include <vm/vm_kern.h>
47 #include <vm/vm_map.h>
48 #include <vm/pmap.h>
49 
50 #include <arm/misc_protos.h>
51 
52 #include <sys/errno.h>
53 
54 #include <libkern/section_keywords.h>
55 #include <libkern/OSDebug.h>
56 
57 #define INT_SIZE        (BYTE_SIZE * sizeof (int))
58 
59 #define BCOPY_PHYS_SRC_IS_PHYS(flags) (((flags) & cppvPsrc) != 0)
60 #define BCOPY_PHYS_DST_IS_PHYS(flags) (((flags) & cppvPsnk) != 0)
61 #define BCOPY_PHYS_SRC_IS_USER(flags) (((flags) & (cppvPsrc | cppvKmap)) == 0)
62 #define BCOPY_PHYS_DST_IS_USER(flags) (((flags) & (cppvPsnk | cppvKmap)) == 0)
63 
64 static kern_return_t
bcopy_phys_internal(addr64_t src,addr64_t dst,vm_size_t bytes,int flags)65 bcopy_phys_internal(addr64_t src, addr64_t dst, vm_size_t bytes, int flags)
66 {
67 	unsigned int    src_index;
68 	unsigned int    dst_index;
69 	vm_offset_t     src_offset;
70 	vm_offset_t     dst_offset;
71 	unsigned int    wimg_bits_src, wimg_bits_dst;
72 	unsigned int    cpu_num = 0;
73 	ppnum_t         pn_src;
74 	ppnum_t         pn_dst;
75 	addr64_t        end __assert_only;
76 	kern_return_t   res = KERN_SUCCESS;
77 
78 	if (!BCOPY_PHYS_SRC_IS_USER(flags)) {
79 		assert(!__improbable(os_add_overflow(src, bytes, &end)));
80 	}
81 	if (!BCOPY_PHYS_DST_IS_USER(flags)) {
82 		assert(!__improbable(os_add_overflow(dst, bytes, &end)));
83 	}
84 
85 	while ((bytes > 0) && (res == KERN_SUCCESS)) {
86 		src_offset = src & PAGE_MASK;
87 		dst_offset = dst & PAGE_MASK;
88 		boolean_t use_copy_window_src = FALSE;
89 		boolean_t use_copy_window_dst = FALSE;
90 		vm_size_t count = bytes;
91 		vm_size_t count2 = bytes;
92 		if (BCOPY_PHYS_SRC_IS_PHYS(flags)) {
93 			use_copy_window_src = !pmap_valid_address(src);
94 			pn_src = (ppnum_t)(src >> PAGE_SHIFT);
95 #if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__
96 			count = PAGE_SIZE - src_offset;
97 			wimg_bits_src = pmap_cache_attributes(pn_src);
98 			if ((wimg_bits_src & VM_WIMG_MASK) != VM_WIMG_DEFAULT) {
99 				use_copy_window_src = TRUE;
100 			}
101 #else
102 			if (use_copy_window_src) {
103 				wimg_bits_src = pmap_cache_attributes(pn_src);
104 				count = PAGE_SIZE - src_offset;
105 			}
106 #endif
107 		}
108 		if (BCOPY_PHYS_DST_IS_PHYS(flags)) {
109 			// write preflighting needed for things like dtrace which may write static read-only mappings
110 			use_copy_window_dst = (!pmap_valid_address(dst) || !mmu_kvtop_wpreflight(phystokv((pmap_paddr_t)dst)));
111 			pn_dst = (ppnum_t)(dst >> PAGE_SHIFT);
112 #if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__
113 			count2 = PAGE_SIZE - dst_offset;
114 			wimg_bits_dst = pmap_cache_attributes(pn_dst);
115 			if ((wimg_bits_dst & VM_WIMG_MASK) != VM_WIMG_DEFAULT) {
116 				use_copy_window_dst = TRUE;
117 			}
118 #else
119 			if (use_copy_window_dst) {
120 				wimg_bits_dst = pmap_cache_attributes(pn_dst);
121 				count2 = PAGE_SIZE - dst_offset;
122 			}
123 #endif
124 		}
125 
126 		char *tmp_src;
127 		char *tmp_dst;
128 
129 		if (use_copy_window_src || use_copy_window_dst) {
130 			mp_disable_preemption();
131 			cpu_num = cpu_number();
132 		}
133 
134 		if (use_copy_window_src) {
135 			src_index = pmap_map_cpu_windows_copy(pn_src, VM_PROT_READ, wimg_bits_src);
136 			tmp_src = (char*)(pmap_cpu_windows_copy_addr(cpu_num, src_index) + src_offset);
137 		} else if (BCOPY_PHYS_SRC_IS_PHYS(flags)) {
138 			tmp_src = (char*)phystokv_range((pmap_paddr_t)src, &count);
139 		} else {
140 			tmp_src = (char*)src;
141 		}
142 		if (use_copy_window_dst) {
143 			dst_index = pmap_map_cpu_windows_copy(pn_dst, VM_PROT_READ | VM_PROT_WRITE, wimg_bits_dst);
144 			tmp_dst = (char*)(pmap_cpu_windows_copy_addr(cpu_num, dst_index) + dst_offset);
145 		} else if (BCOPY_PHYS_DST_IS_PHYS(flags)) {
146 			tmp_dst = (char*)phystokv_range((pmap_paddr_t)dst, &count2);
147 		} else {
148 			tmp_dst = (char*)dst;
149 		}
150 
151 		if (count > count2) {
152 			count = count2;
153 		}
154 		if (count > bytes) {
155 			count = bytes;
156 		}
157 
158 		if (BCOPY_PHYS_SRC_IS_USER(flags)) {
159 			res = copyin((user_addr_t)src, tmp_dst, count);
160 		} else if (BCOPY_PHYS_DST_IS_USER(flags)) {
161 			res = copyout(tmp_src, (user_addr_t)dst, count);
162 		} else {
163 			bcopy(tmp_src, tmp_dst, count);
164 		}
165 
166 		if (use_copy_window_src) {
167 			pmap_unmap_cpu_windows_copy(src_index);
168 		}
169 		if (use_copy_window_dst) {
170 			pmap_unmap_cpu_windows_copy(dst_index);
171 		}
172 		if (use_copy_window_src || use_copy_window_dst) {
173 			mp_enable_preemption();
174 		}
175 
176 		src += count;
177 		dst += count;
178 		bytes -= count;
179 	}
180 	return res;
181 }
182 
183 void
bcopy_phys(addr64_t src,addr64_t dst,vm_size_t bytes)184 bcopy_phys(addr64_t src, addr64_t dst, vm_size_t bytes)
185 {
186 	bcopy_phys_internal(src, dst, bytes, cppvPsrc | cppvPsnk);
187 }
188 
189 void
bzero_phys_nc(addr64_t src64,vm_size_t bytes)190 bzero_phys_nc(addr64_t src64, vm_size_t bytes)
191 {
192 	bzero_phys(src64, bytes);
193 }
194 
195 extern void *secure_memset(void *, int, size_t);
196 
197 /* Zero bytes starting at a physical address */
198 void
bzero_phys(addr64_t src,vm_size_t bytes)199 bzero_phys(addr64_t src, vm_size_t bytes)
200 {
201 	unsigned int    wimg_bits;
202 	unsigned int    cpu_num = cpu_number();
203 	ppnum_t         pn;
204 	addr64_t        end __assert_only;
205 
206 	assert(!__improbable(os_add_overflow(src, bytes, &end)));
207 
208 	vm_offset_t offset = src & PAGE_MASK;
209 	while (bytes > 0) {
210 		vm_size_t count = bytes;
211 
212 		boolean_t use_copy_window = !pmap_valid_address(src);
213 		pn = (ppnum_t)(src >> PAGE_SHIFT);
214 		wimg_bits = pmap_cache_attributes(pn);
215 #if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__
216 		count = PAGE_SIZE - offset;
217 		if ((wimg_bits & VM_WIMG_MASK) != VM_WIMG_DEFAULT) {
218 			use_copy_window = TRUE;
219 		}
220 #else
221 		if (use_copy_window) {
222 			count = PAGE_SIZE - offset;
223 		}
224 #endif
225 		char *buf;
226 		unsigned int index;
227 		if (use_copy_window) {
228 			mp_disable_preemption();
229 			cpu_num = cpu_number();
230 			index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ | VM_PROT_WRITE, wimg_bits);
231 			buf = (char *)(pmap_cpu_windows_copy_addr(cpu_num, index) + offset);
232 		} else {
233 			buf = (char *)phystokv_range((pmap_paddr_t)src, &count);
234 		}
235 
236 		if (count > bytes) {
237 			count = bytes;
238 		}
239 
240 		switch (wimg_bits & VM_WIMG_MASK) {
241 		case VM_WIMG_DEFAULT:
242 		case VM_WIMG_WCOMB:
243 		case VM_WIMG_INNERWBACK:
244 		case VM_WIMG_WTHRU:
245 #if HAS_UCNORMAL_MEM
246 		case VM_WIMG_RT:
247 #endif
248 			bzero(buf, count);
249 			break;
250 		default:
251 			/* 'dc zva' performed by bzero is not safe for device memory */
252 			secure_memset((void*)buf, 0, count);
253 		}
254 
255 		if (use_copy_window) {
256 			pmap_unmap_cpu_windows_copy(index);
257 			mp_enable_preemption();
258 		}
259 
260 		src += count;
261 		bytes -= count;
262 		offset = 0;
263 	}
264 }
265 
266 /*
267  *  Read data from a physical address.
268  */
269 
270 
271 static uint64_t
ml_phys_read_data(pmap_paddr_t paddr,int size)272 ml_phys_read_data(pmap_paddr_t paddr, int size)
273 {
274 	vm_address_t   addr;
275 	ppnum_t        pn = atop_kernel(paddr);
276 	ppnum_t        pn_end = atop_kernel(paddr + size - 1);
277 	uint64_t       result = 0;
278 	uint8_t        s1;
279 	uint16_t       s2;
280 	uint32_t       s4;
281 	unsigned int   index;
282 	bool           use_copy_window = true;
283 
284 	if (__improbable(pn_end != pn)) {
285 		panic("%s: paddr 0x%llx spans a page boundary", __func__, (uint64_t)paddr);
286 	}
287 
288 #ifdef ML_IO_TIMEOUTS_ENABLED
289 	bool istate, timeread = false;
290 	uint64_t sabs, eabs;
291 
292 	uint32_t const report_phy_read_delay = os_atomic_load(&report_phy_read_delay_to, relaxed);
293 	uint32_t const trace_phy_read_delay = os_atomic_load(&trace_phy_read_delay_to, relaxed);
294 
295 	if (__improbable(report_phy_read_delay != 0)) {
296 		istate = ml_set_interrupts_enabled(FALSE);
297 		sabs = mach_absolute_time();
298 		timeread = true;
299 	}
300 #ifdef ML_IO_SIMULATE_STRETCHED_ENABLED
301 	if (__improbable(timeread && simulate_stretched_io)) {
302 		sabs -= simulate_stretched_io;
303 	}
304 #endif /* ML_IO_SIMULATE_STRETCHED_ENABLED */
305 #endif /* ML_IO_TIMEOUTS_ENABLED */
306 
307 #if defined(__ARM_COHERENT_IO__) || __ARM_PTE_PHYSMAP__
308 	if (pmap_valid_address(paddr)) {
309 		addr = phystokv(paddr);
310 		use_copy_window = false;
311 	}
312 #endif /* defined(__ARM_COHERENT_IO__) || __ARM_PTE_PHYSMAP__ */
313 
314 	if (use_copy_window) {
315 		mp_disable_preemption();
316 		unsigned int wimg_bits = pmap_cache_attributes(pn);
317 		index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ, wimg_bits);
318 		addr = pmap_cpu_windows_copy_addr(cpu_number(), index) | ((uint32_t)paddr & PAGE_MASK);
319 	}
320 
321 	switch (size) {
322 	case 1:
323 		s1 = *(volatile uint8_t *)addr;
324 		result = s1;
325 		break;
326 	case 2:
327 		s2 = *(volatile uint16_t *)addr;
328 		result = s2;
329 		break;
330 	case 4:
331 		s4 = *(volatile uint32_t *)addr;
332 		result = s4;
333 		break;
334 	case 8:
335 		result = *(volatile uint64_t *)addr;
336 		break;
337 	default:
338 		panic("Invalid size %d for ml_phys_read_data", size);
339 		break;
340 	}
341 
342 	if (use_copy_window) {
343 		pmap_unmap_cpu_windows_copy(index);
344 		mp_enable_preemption();
345 	}
346 
347 #ifdef ML_IO_TIMEOUTS_ENABLED
348 	if (__improbable(timeread)) {
349 		eabs = mach_absolute_time();
350 
351 #ifdef ML_IO_IOTRACE_ENABLED
352 		iotrace(IOTRACE_PHYS_READ, 0, addr, size, result, sabs, eabs - sabs);
353 #endif /* ML_IO_IOTRACE_ENABLED */
354 
355 		if (__improbable((eabs - sabs) > report_phy_read_delay)) {
356 			ml_set_interrupts_enabled(istate);
357 
358 			if (phy_read_panic && (machine_timeout_suspended() == FALSE)) {
359 				panic("Read from physical addr 0x%llx took %llu ns, "
360 				    "result: 0x%llx (start: %llu, end: %llu), ceiling: %llu",
361 				    (unsigned long long)addr, (eabs - sabs), result, sabs, eabs,
362 				    (uint64_t)report_phy_read_delay);
363 			}
364 
365 			if (report_phy_read_osbt) {
366 				OSReportWithBacktrace("ml_phys_read_data took %llu us",
367 				    (eabs - sabs) / NSEC_PER_USEC);
368 			}
369 #if CONFIG_DTRACE
370 			DTRACE_PHYSLAT4(physread, uint64_t, (eabs - sabs),
371 			    uint64_t, addr, uint32_t, size, uint64_t, result);
372 #endif /* CONFIG_DTRACE */
373 		} else if (__improbable(trace_phy_read_delay > 0 && (eabs - sabs) > trace_phy_read_delay)) {
374 			KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_PHYS_READ),
375 			    (eabs - sabs), sabs, addr, result);
376 
377 			ml_set_interrupts_enabled(istate);
378 		} else {
379 			ml_set_interrupts_enabled(istate);
380 		}
381 	}
382 #endif /*  ML_IO_TIMEOUTS_ENABLED */
383 
384 	return result;
385 }
386 
387 unsigned int
ml_phys_read(vm_offset_t paddr)388 ml_phys_read(vm_offset_t paddr)
389 {
390 	return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 4);
391 }
392 
393 unsigned int
ml_phys_read_word(vm_offset_t paddr)394 ml_phys_read_word(vm_offset_t paddr)
395 {
396 	return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 4);
397 }
398 
399 unsigned int
ml_phys_read_64(addr64_t paddr64)400 ml_phys_read_64(addr64_t paddr64)
401 {
402 	return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 4);
403 }
404 
405 unsigned int
ml_phys_read_word_64(addr64_t paddr64)406 ml_phys_read_word_64(addr64_t paddr64)
407 {
408 	return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 4);
409 }
410 
411 unsigned int
ml_phys_read_half(vm_offset_t paddr)412 ml_phys_read_half(vm_offset_t paddr)
413 {
414 	return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 2);
415 }
416 
417 unsigned int
ml_phys_read_half_64(addr64_t paddr64)418 ml_phys_read_half_64(addr64_t paddr64)
419 {
420 	return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 2);
421 }
422 
423 unsigned int
ml_phys_read_byte(vm_offset_t paddr)424 ml_phys_read_byte(vm_offset_t paddr)
425 {
426 	return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 1);
427 }
428 
429 unsigned int
ml_phys_read_byte_64(addr64_t paddr64)430 ml_phys_read_byte_64(addr64_t paddr64)
431 {
432 	return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 1);
433 }
434 
435 unsigned long long
ml_phys_read_double(vm_offset_t paddr)436 ml_phys_read_double(vm_offset_t paddr)
437 {
438 	return ml_phys_read_data((pmap_paddr_t)paddr, 8);
439 }
440 
441 unsigned long long
ml_phys_read_double_64(addr64_t paddr64)442 ml_phys_read_double_64(addr64_t paddr64)
443 {
444 	return ml_phys_read_data((pmap_paddr_t)paddr64, 8);
445 }
446 
447 
448 
449 /*
450  *  Write data to a physical address.
451  */
452 
453 static void
ml_phys_write_data(pmap_paddr_t paddr,uint64_t data,int size)454 ml_phys_write_data(pmap_paddr_t paddr, uint64_t data, int size)
455 {
456 	vm_address_t   addr;
457 	ppnum_t        pn = atop_kernel(paddr);
458 	ppnum_t        pn_end = atop_kernel(paddr + size - 1);
459 	unsigned int   index;
460 	bool           use_copy_window = true;
461 
462 	if (__improbable(pn_end != pn)) {
463 		panic("%s: paddr 0x%llx spans a page boundary", __func__, (uint64_t)paddr);
464 	}
465 
466 #ifdef ML_IO_TIMEOUTS_ENABLED
467 	bool istate, timewrite = false;
468 	uint64_t sabs, eabs;
469 
470 	uint32_t const report_phy_write_delay = os_atomic_load(&report_phy_write_delay_to, relaxed);
471 	uint32_t const trace_phy_write_delay = os_atomic_load(&trace_phy_write_delay_to, relaxed);
472 
473 	if (__improbable(report_phy_write_delay != 0)) {
474 		istate = ml_set_interrupts_enabled(FALSE);
475 		sabs = mach_absolute_time();
476 		timewrite = true;
477 	}
478 #ifdef ML_IO_SIMULATE_STRETCHED_ENABLED
479 	if (__improbable(timewrite && simulate_stretched_io)) {
480 		sabs -= simulate_stretched_io;
481 	}
482 #endif /* ML_IO_SIMULATE_STRETCHED_ENABLED */
483 #endif /* ML_IO_TIMEOUTS_ENABLED */
484 
485 #if defined(__ARM_COHERENT_IO__) || __ARM_PTE_PHYSMAP__
486 	if (pmap_valid_address(paddr)) {
487 		addr = phystokv(paddr);
488 		use_copy_window = false;
489 	}
490 #endif /* defined(__ARM_COHERENT_IO__) || __ARM_PTE_PHYSMAP__ */
491 
492 	if (use_copy_window) {
493 		mp_disable_preemption();
494 		unsigned int wimg_bits = pmap_cache_attributes(pn);
495 		index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ | VM_PROT_WRITE, wimg_bits);
496 		addr = pmap_cpu_windows_copy_addr(cpu_number(), index) | ((uint32_t)paddr & PAGE_MASK);
497 	}
498 
499 	switch (size) {
500 	case 1:
501 		*(volatile uint8_t *)addr = (uint8_t)data;
502 		break;
503 	case 2:
504 		*(volatile uint16_t *)addr = (uint16_t)data;
505 		break;
506 	case 4:
507 		*(volatile uint32_t *)addr = (uint32_t)data;
508 		break;
509 	case 8:
510 		*(volatile uint64_t *)addr = data;
511 		break;
512 	default:
513 		panic("Invalid size %d for ml_phys_write_data", size);
514 	}
515 
516 	if (use_copy_window) {
517 		pmap_unmap_cpu_windows_copy(index);
518 		mp_enable_preemption();
519 	}
520 
521 #ifdef ML_IO_TIMEOUTS_ENABLED
522 	if (__improbable(timewrite)) {
523 		eabs = mach_absolute_time();
524 
525 #ifdef ML_IO_IOTRACE_ENABLED
526 		iotrace(IOTRACE_PHYS_WRITE, 0, paddr, size, data, sabs, eabs - sabs);
527 #endif /*  ML_IO_IOTRACE_ENABLED */
528 
529 		if (__improbable((eabs - sabs) > report_phy_write_delay)) {
530 			ml_set_interrupts_enabled(istate);
531 
532 			if (phy_write_panic && (machine_timeout_suspended() == FALSE)) {
533 				panic("Write from physical addr 0x%llx took %llu ns, "
534 				    "data: 0x%llx (start: %llu, end: %llu), ceiling: %llu",
535 				    (unsigned long long)paddr, (eabs - sabs), data, sabs, eabs,
536 				    (uint64_t)report_phy_write_delay);
537 			}
538 
539 			if (report_phy_write_osbt) {
540 				OSReportWithBacktrace("ml_phys_write_data took %llu us",
541 				    (eabs - sabs) / NSEC_PER_USEC);
542 			}
543 #if CONFIG_DTRACE
544 			DTRACE_PHYSLAT4(physwrite, uint64_t, (eabs - sabs),
545 			    uint64_t, paddr, uint32_t, size, uint64_t, data);
546 #endif /* CONFIG_DTRACE */
547 		} else if (__improbable(trace_phy_write_delay > 0 && (eabs - sabs) > trace_phy_write_delay)) {
548 			KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_PHYS_WRITE),
549 			    (eabs - sabs), sabs, paddr, data);
550 
551 			ml_set_interrupts_enabled(istate);
552 		} else {
553 			ml_set_interrupts_enabled(istate);
554 		}
555 	}
556 #endif /*  ML_IO_TIMEOUTS_ENABLED */
557 }
558 
559 void
ml_phys_write_byte(vm_offset_t paddr,unsigned int data)560 ml_phys_write_byte(vm_offset_t paddr, unsigned int data)
561 {
562 	ml_phys_write_data((pmap_paddr_t)paddr, data, 1);
563 }
564 
565 void
ml_phys_write_byte_64(addr64_t paddr64,unsigned int data)566 ml_phys_write_byte_64(addr64_t paddr64, unsigned int data)
567 {
568 	ml_phys_write_data((pmap_paddr_t)paddr64, data, 1);
569 }
570 
571 void
ml_phys_write_half(vm_offset_t paddr,unsigned int data)572 ml_phys_write_half(vm_offset_t paddr, unsigned int data)
573 {
574 	ml_phys_write_data((pmap_paddr_t)paddr, data, 2);
575 }
576 
577 void
ml_phys_write_half_64(addr64_t paddr64,unsigned int data)578 ml_phys_write_half_64(addr64_t paddr64, unsigned int data)
579 {
580 	ml_phys_write_data((pmap_paddr_t)paddr64, data, 2);
581 }
582 
583 void
ml_phys_write(vm_offset_t paddr,unsigned int data)584 ml_phys_write(vm_offset_t paddr, unsigned int data)
585 {
586 	ml_phys_write_data((pmap_paddr_t)paddr, data, 4);
587 }
588 
589 void
ml_phys_write_64(addr64_t paddr64,unsigned int data)590 ml_phys_write_64(addr64_t paddr64, unsigned int data)
591 {
592 	ml_phys_write_data((pmap_paddr_t)paddr64, data, 4);
593 }
594 
595 void
ml_phys_write_word(vm_offset_t paddr,unsigned int data)596 ml_phys_write_word(vm_offset_t paddr, unsigned int data)
597 {
598 	ml_phys_write_data((pmap_paddr_t)paddr, data, 4);
599 }
600 
601 void
ml_phys_write_word_64(addr64_t paddr64,unsigned int data)602 ml_phys_write_word_64(addr64_t paddr64, unsigned int data)
603 {
604 	ml_phys_write_data((pmap_paddr_t)paddr64, data, 4);
605 }
606 
607 void
ml_phys_write_double(vm_offset_t paddr,unsigned long long data)608 ml_phys_write_double(vm_offset_t paddr, unsigned long long data)
609 {
610 	ml_phys_write_data((pmap_paddr_t)paddr, data, 8);
611 }
612 
613 void
ml_phys_write_double_64(addr64_t paddr64,unsigned long long data)614 ml_phys_write_double_64(addr64_t paddr64, unsigned long long data)
615 {
616 	ml_phys_write_data((pmap_paddr_t)paddr64, data, 8);
617 }
618 
619 
620 /*
621  * Set indicated bit in bit string.
622  */
623 void
setbit(int bitno,int * s)624 setbit(int bitno, int *s)
625 {
626 	s[bitno / INT_SIZE] |= 1U << (bitno % INT_SIZE);
627 }
628 
629 /*
630  * Clear indicated bit in bit string.
631  */
632 void
clrbit(int bitno,int * s)633 clrbit(int bitno, int *s)
634 {
635 	s[bitno / INT_SIZE] &= ~(1U << (bitno % INT_SIZE));
636 }
637 
638 /*
639  * Test if indicated bit is set in bit string.
640  */
641 int
testbit(int bitno,int * s)642 testbit(int bitno, int *s)
643 {
644 	return s[bitno / INT_SIZE] & (1U << (bitno % INT_SIZE));
645 }
646 
647 /*
648  * Find first bit set in bit string.
649  */
650 int
ffsbit(int * s)651 ffsbit(int *s)
652 {
653 	int             offset;
654 
655 	for (offset = 0; !*s; offset += INT_SIZE, ++s) {
656 		;
657 	}
658 	return offset + __builtin_ctz(*s);
659 }
660 
661 int
ffs(unsigned int mask)662 ffs(unsigned int mask)
663 {
664 	if (mask == 0) {
665 		return 0;
666 	}
667 
668 	/*
669 	 * NOTE: cannot use __builtin_ffs because it generates a call to
670 	 * 'ffs'
671 	 */
672 	return 1 + __builtin_ctz(mask);
673 }
674 
675 int
ffsll(unsigned long long mask)676 ffsll(unsigned long long mask)
677 {
678 	if (mask == 0) {
679 		return 0;
680 	}
681 
682 	/*
683 	 * NOTE: cannot use __builtin_ffsll because it generates a call to
684 	 * 'ffsll'
685 	 */
686 	return 1 + __builtin_ctzll(mask);
687 }
688 
689 /*
690  * Find last bit set in bit string.
691  */
692 int
fls(unsigned int mask)693 fls(unsigned int mask)
694 {
695 	if (mask == 0) {
696 		return 0;
697 	}
698 
699 	return (sizeof(mask) << 3) - __builtin_clz(mask);
700 }
701 
702 int
flsll(unsigned long long mask)703 flsll(unsigned long long mask)
704 {
705 	if (mask == 0) {
706 		return 0;
707 	}
708 
709 	return (sizeof(mask) << 3) - __builtin_clzll(mask);
710 }
711 
712 #undef bcmp
713 int
bcmp(const void * pa,const void * pb,size_t len)714 bcmp(
715 	const void *pa,
716 	const void *pb,
717 	size_t len)
718 {
719 	const char     *a = (const char *) pa;
720 	const char     *b = (const char *) pb;
721 
722 	if (len == 0) {
723 		return 0;
724 	}
725 
726 	do{
727 		if (*a++ != *b++) {
728 			break;
729 		}
730 	} while (--len);
731 
732 	/*
733 	 * Check for the overflow case but continue to handle the non-overflow
734 	 * case the same way just in case someone is using the return value
735 	 * as more than zero/non-zero
736 	 */
737 	if ((len & 0xFFFFFFFF00000000ULL) && !(len & 0x00000000FFFFFFFFULL)) {
738 		return 0xFFFFFFFFL;
739 	} else {
740 		return (int)len;
741 	}
742 }
743 
744 #undef memcmp
745 MARK_AS_HIBERNATE_TEXT
746 int
memcmp(const void * s1,const void * s2,size_t n)747 memcmp(const void *s1, const void *s2, size_t n)
748 {
749 	if (n != 0) {
750 		const unsigned char *p1 = s1, *p2 = s2;
751 
752 		do {
753 			if (*p1++ != *p2++) {
754 				return *--p1 - *--p2;
755 			}
756 		} while (--n != 0);
757 	}
758 	return 0;
759 }
760 
761 kern_return_t
copypv(addr64_t source,addr64_t sink,unsigned int size,int which)762 copypv(addr64_t source, addr64_t sink, unsigned int size, int which)
763 {
764 	if ((which & (cppvPsrc | cppvPsnk)) == 0) {     /* Make sure that only one is virtual */
765 		panic("%s: no more than 1 parameter may be virtual", __func__);
766 	}
767 
768 	kern_return_t res = bcopy_phys_internal(source, sink, size, which);
769 
770 #ifndef __ARM_COHERENT_IO__
771 	if (which & cppvFsrc) {
772 		flush_dcache64(source, size, ((which & cppvPsrc) == cppvPsrc));
773 	}
774 
775 	if (which & cppvFsnk) {
776 		flush_dcache64(sink, size, ((which & cppvPsnk) == cppvPsnk));
777 	}
778 #endif
779 
780 	return res;
781 }
782 
783 int
clr_be_bit(void)784 clr_be_bit(void)
785 {
786 	panic("clr_be_bit");
787 	return 0;
788 }
789 
790 boolean_t
ml_probe_read(__unused vm_offset_t paddr,__unused unsigned int * val)791 ml_probe_read(
792 	__unused vm_offset_t paddr,
793 	__unused unsigned int *val)
794 {
795 	panic("ml_probe_read() unimplemented");
796 	return 1;
797 }
798 
799 boolean_t
ml_probe_read_64(__unused addr64_t paddr,__unused unsigned int * val)800 ml_probe_read_64(
801 	__unused addr64_t paddr,
802 	__unused unsigned int *val)
803 {
804 	panic("ml_probe_read_64() unimplemented");
805 	return 1;
806 }
807 
808 
809 void
ml_thread_policy(__unused thread_t thread,__unused unsigned policy_id,__unused unsigned policy_info)810 ml_thread_policy(
811 	__unused thread_t thread,
812 	__unused unsigned policy_id,
813 	__unused unsigned policy_info)
814 {
815 	//    <rdar://problem/7141284>: Reduce print noise
816 	//	kprintf("ml_thread_policy() unimplemented\n");
817 }
818 
819 __dead2
820 void
panic_unimplemented(void)821 panic_unimplemented(void)
822 {
823 	panic("Not yet implemented.");
824 }
825 
826 /* ARM64_TODO <rdar://problem/9198953> */
827 void abort(void) __dead2;
828 
829 void
abort(void)830 abort(void)
831 {
832 	panic("Abort.");
833 }
834 
835 
836 #if !MACH_KDP
837 void
kdp_register_callout(kdp_callout_fn_t fn,void * arg)838 kdp_register_callout(kdp_callout_fn_t fn, void *arg)
839 {
840 #pragma unused(fn,arg)
841 }
842 #endif
843 
844 /*
845  * Get a quick virtual mapping of a physical page and run a callback on that
846  * page's virtual address.
847  *
848  * @param dst64 Physical address to access (doesn't need to be page-aligned).
849  * @param bytes Number of bytes to be accessed. This cannot cross page boundaries.
850  * @param func Callback function to call with the page's virtual address.
851  * @param arg Argument passed directly to `func`.
852  *
853  * @return The return value from `func`.
854  */
855 int
apply_func_phys(addr64_t dst64,vm_size_t bytes,int (* func)(void * buffer,vm_size_t bytes,void * arg),void * arg)856 apply_func_phys(
857 	addr64_t dst64,
858 	vm_size_t bytes,
859 	int (*func)(void * buffer, vm_size_t bytes, void * arg),
860 	void * arg)
861 {
862 	/* The physical aperture is only guaranteed to work with kernel-managed addresses. */
863 	if (!pmap_valid_address(dst64)) {
864 		panic("%s address error: passed in address (%#llx) not a kernel managed address",
865 		    __FUNCTION__, dst64);
866 	}
867 
868 	/* Ensure we stay within a single page */
869 	if (((((uint32_t)dst64 & (ARM_PGBYTES - 1)) + bytes) > ARM_PGBYTES)) {
870 		panic("%s alignment error: tried accessing addresses spanning more than one page %#llx %#lx",
871 		    __FUNCTION__, dst64, bytes);
872 	}
873 
874 	return func((void*)phystokv(dst64), bytes, arg);
875 }
876