xref: /xnu-12377.61.12/osfmk/i386/phys.c (revision 4d495c6e23c53686cf65f45067f79024cf5dcee8)
1 /*
2  * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 
57 #include <mach_ldebug.h>
58 
59 #include <sys/kdebug.h>
60 
61 #include <mach/kern_return.h>
62 #include <mach/thread_status.h>
63 #include <mach/vm_param.h>
64 
65 #include <kern/mach_param.h>
66 #include <kern/task.h>
67 #include <kern/thread.h>
68 #include <kern/sched_prim.h>
69 #include <kern/misc_protos.h>
70 #include <kern/assert.h>
71 #include <kern/spl.h>
72 #include <ipc/ipc_port.h>
73 #include <vm/vm_kern.h>
74 #include <vm/vm_map.h>
75 #include <vm/pmap.h>
76 
77 #include <i386/cpu_data.h>
78 #include <i386/cpu_number.h>
79 #include <i386/thread.h>
80 #include <i386/eflags.h>
81 #include <i386/proc_reg.h>
82 #include <i386/seg.h>
83 #include <i386/tss.h>
84 #include <i386/user_ldt.h>
85 #include <i386/fpu.h>
86 #include <i386/misc_protos.h>
87 
88 /*
89  *	pmap_zero_page zeros the specified (machine independent) page.
90  */
91 void
pmap_zero_page(ppnum_t pn)92 pmap_zero_page(
93 	ppnum_t pn)
94 {
95 	assert(pn != vm_page_fictitious_addr);
96 	assert(pn != vm_page_guard_addr);
97 	bzero_phys((addr64_t)i386_ptob(pn), PAGE_SIZE);
98 }
99 
100 void
pmap_zero_page_with_options(ppnum_t pn,__unused int options)101 pmap_zero_page_with_options(
102 	ppnum_t pn,
103 	__unused int options)
104 {
105 	pmap_zero_page(pn);
106 }
107 
108 /*
109  *	pmap_zero_part_page
110  *	zeros the specified (machine independent) part of a page.
111  */
112 void
pmap_zero_part_page(ppnum_t pn,vm_offset_t offset,vm_size_t len)113 pmap_zero_part_page(
114 	ppnum_t         pn,
115 	vm_offset_t     offset,
116 	vm_size_t       len)
117 {
118 	assert(pn != vm_page_fictitious_addr);
119 	assert(pn != vm_page_guard_addr);
120 	assert(offset + len <= PAGE_SIZE);
121 	bzero_phys((addr64_t)(i386_ptob(pn) + offset), (uint32_t)len);
122 }
123 
124 /*
125  *	pmap_copy_page copies the specified (machine independent) pages.
126  */
127 void
pmap_copy_part_page(ppnum_t psrc,vm_offset_t src_offset,ppnum_t pdst,vm_offset_t dst_offset,vm_size_t len)128 pmap_copy_part_page(
129 	ppnum_t         psrc,
130 	vm_offset_t     src_offset,
131 	ppnum_t         pdst,
132 	vm_offset_t     dst_offset,
133 	vm_size_t       len)
134 {
135 	pmap_paddr_t src, dst;
136 
137 	assert(psrc != vm_page_fictitious_addr);
138 	assert(pdst != vm_page_fictitious_addr);
139 	assert(psrc != vm_page_guard_addr);
140 	assert(pdst != vm_page_guard_addr);
141 
142 	src = i386_ptob(psrc);
143 	dst = i386_ptob(pdst);
144 
145 	assert((((uintptr_t)dst & PAGE_MASK) + dst_offset + len) <= PAGE_SIZE);
146 	assert((((uintptr_t)src & PAGE_MASK) + src_offset + len) <= PAGE_SIZE);
147 
148 	bcopy_phys((addr64_t)src + (src_offset & INTEL_OFFMASK),
149 	    (addr64_t)dst + (dst_offset & INTEL_OFFMASK),
150 	    len);
151 }
152 
153 /*
154  *      pmap_copy_part_lpage copies part of a virtually addressed page
155  *      to a physically addressed page.
156  */
157 void
pmap_copy_part_lpage(__unused vm_offset_t src,__unused ppnum_t pdst,__unused vm_offset_t dst_offset,__unused vm_size_t len)158 pmap_copy_part_lpage(
159 	__unused vm_offset_t    src,
160 	__unused ppnum_t        pdst,
161 	__unused vm_offset_t    dst_offset,
162 	__unused vm_size_t      len)
163 {
164 	assert(pdst != vm_page_fictitious_addr);
165 	assert(pdst != vm_page_guard_addr);
166 	assert((dst_offset + len) <= PAGE_SIZE);
167 }
168 
169 /*
170  *      pmap_copy_part_rpage copies part of a physically addressed page
171  *      to a virtually addressed page.
172  */
173 void
pmap_copy_part_rpage(__unused ppnum_t psrc,__unused vm_offset_t src_offset,__unused vm_offset_t dst,__unused vm_size_t len)174 pmap_copy_part_rpage(
175 	__unused ppnum_t                psrc,
176 	__unused vm_offset_t    src_offset,
177 	__unused vm_offset_t    dst,
178 	__unused vm_size_t      len)
179 {
180 	assert(psrc != vm_page_fictitious_addr);
181 	assert(psrc != vm_page_guard_addr);
182 	assert((src_offset + len) <= PAGE_SIZE);
183 }
184 
185 /*
186  *	kvtophys(addr)
187  *
188  *	Convert a kernel virtual address to a physical address
189  */
190 addr64_t
kvtophys(vm_offset_t addr)191 kvtophys(
192 	vm_offset_t addr)
193 {
194 	pmap_paddr_t pa;
195 
196 	pa = ((pmap_paddr_t)pmap_find_phys(kernel_pmap, addr)) << INTEL_PGSHIFT;
197 	if (pa) {
198 		pa |= (addr & INTEL_OFFMASK);
199 	}
200 
201 	return (addr64_t)pa;
202 }
203 
204 extern pt_entry_t *debugger_ptep;
205 extern vm_offset_t debugger_window_kva;
206 extern int _bcopy(const void *, void *, vm_size_t);
207 extern int _bcopy2(const void *, void *);
208 extern int _bcopy4(const void *, void *);
209 extern int _bcopy8(const void *, void *);
210 
211 __private_extern__ int
ml_copy_phys(addr64_t src64,addr64_t dst64,vm_size_t bytes)212 ml_copy_phys(addr64_t src64, addr64_t dst64, vm_size_t bytes)
213 {
214 	void *src, *dst;
215 	int err = 0;
216 
217 	mp_disable_preemption();
218 	addr64_t debug_pa = 0;
219 
220 	/* If either destination or source are outside the
221 	 * physical map, establish a physical window onto the target frame.
222 	 */
223 	assert(physmap_enclosed(src64) || physmap_enclosed(dst64));
224 
225 	if (physmap_enclosed(src64) == FALSE) {
226 		src = (void *)(debugger_window_kva | (src64 & INTEL_OFFMASK));
227 		dst = PHYSMAP_PTOV(dst64);
228 		debug_pa = src64 & PG_FRAME;
229 	} else if (physmap_enclosed(dst64) == FALSE) {
230 		src = PHYSMAP_PTOV(src64);
231 		dst = (void *)(debugger_window_kva | (dst64 & INTEL_OFFMASK));
232 		debug_pa = dst64 & PG_FRAME;
233 	} else {
234 		src = PHYSMAP_PTOV(src64);
235 		dst = PHYSMAP_PTOV(dst64);
236 	}
237 	/* DRK: debugger only routine, we don't bother checking for an
238 	 * identical mapping.
239 	 */
240 	if (debug_pa) {
241 		if (debugger_window_kva == 0) {
242 			panic("%s: invoked in non-debug mode", __FUNCTION__);
243 		}
244 		/* Establish a cache-inhibited physical window; some platforms
245 		 * may not cover arbitrary ranges with MTRRs
246 		 */
247 		pmap_store_pte(FALSE, debugger_ptep, debug_pa | INTEL_PTE_NCACHE | INTEL_PTE_RW | INTEL_PTE_REF | INTEL_PTE_MOD | INTEL_PTE_VALID);
248 		pmap_tlbi_range(0, ~0ULL, true, 0);
249 #if     DEBUG
250 		kprintf("Remapping debugger physical window at %p to 0x%llx\n", (void *)debugger_window_kva, debug_pa);
251 #endif
252 	}
253 	/* ensure we stay within a page */
254 	if (((((uint32_t)src64 & (I386_PGBYTES - 1)) + bytes) > I386_PGBYTES) || ((((uint32_t)dst64 & (I386_PGBYTES - 1)) + bytes) > I386_PGBYTES)) {
255 		panic("ml_copy_phys spans pages, src: 0x%llx, dst: 0x%llx", src64, dst64);
256 	}
257 
258 	/*
259 	 * For device register access from the debugger,
260 	 * 2-byte/16-bit, 4-byte/32-bit and 8-byte/64-bit copies are handled
261 	 * by assembly routines ensuring the required access widths.
262 	 * 1-byte and other copies are handled by the regular _bcopy.
263 	 */
264 	switch (bytes) {
265 	case 2:
266 		err = _bcopy2(src, dst);
267 		break;
268 	case 4:
269 		err = _bcopy4(src, dst);
270 		break;
271 	case 8:
272 		err = _bcopy8(src, dst);
273 		break;
274 	case 1:
275 	default:
276 		err = _bcopy(src, dst, bytes);
277 		break;
278 	}
279 
280 	mp_enable_preemption();
281 
282 	return err;
283 }
284