xref: /xnu-11215.1.10/osfmk/i386/pmap_common.c (revision 8d741a5de7ff4191bf97d57b9f54c2f6d4a15585)
1 /*
2  * Copyright (c) 2000-2022 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #include <vm/pmap.h>
29 #include <kern/ledger.h>
30 #include <i386/pmap_internal.h>
31 
32 
33 /*
34  *	Each entry in the pv_head_table is locked by a bit in the
35  *	pv_lock_table.  The lock bits are accessed by the physical
36  *	address of the page they lock.
37  */
38 
39 char    *pv_lock_table;         /* pointer to array of bits */
40 char    *pv_hash_lock_table;
41 
42 pv_rooted_entry_t       pv_head_table;          /* array of entries, one per
43                                                  * page */
44 uint32_t                        pv_hashed_free_count = 0;
45 uint32_t                        pv_hashed_kern_free_count = 0;
46 
47 pmap_pagetable_corruption_record_t pmap_pagetable_corruption_records[PMAP_PAGETABLE_CORRUPTION_MAX_LOG];
48 uint32_t pmap_pagetable_corruption_incidents;
49 uint64_t pmap_pagetable_corruption_last_abstime = (~(0ULL) >> 1);
50 uint64_t pmap_pagetable_corruption_interval_abstime;
51 thread_call_t   pmap_pagetable_corruption_log_call;
52 static thread_call_data_t       pmap_pagetable_corruption_log_call_data;
53 boolean_t pmap_pagetable_corruption_timeout = FALSE;
54 
55 volatile uint32_t       mappingrecurse = 0;
56 
57 uint32_t  pv_hashed_low_water_mark, pv_hashed_kern_low_water_mark, pv_hashed_alloc_chunk, pv_hashed_kern_alloc_chunk;
58 
59 thread_t mapping_replenish_thread;
60 event_t mapping_replenish_event, pmap_user_pv_throttle_event;
61 
62 uint64_t pmap_pv_throttle_stat, pmap_pv_throttled_waiters;
63 
64 int pmap_asserts_enabled = (DEBUG);
65 int pmap_asserts_traced = 0;
66 
67 unsigned int
pmap_cache_attributes(ppnum_t pn)68 pmap_cache_attributes(ppnum_t pn)
69 {
70 	int cacheattr = pmap_get_cache_attributes(pn, FALSE);
71 
72 	if (cacheattr & INTEL_PTE_NCACHE) {
73 		if (cacheattr & INTEL_PTE_PAT) {
74 			/* WC */
75 			return VM_WIMG_WCOMB;
76 		}
77 		return VM_WIMG_IO;
78 	} else {
79 		return VM_WIMG_COPYBACK;
80 	}
81 }
82 
83 void
pmap_batch_set_cache_attributes(const unified_page_list_t * page_list,unsigned int cacheattr)84 pmap_batch_set_cache_attributes(
85 	const unified_page_list_t *page_list,
86 	unsigned int cacheattr)
87 {
88 	unified_page_list_iterator_t iter;
89 
90 	for (unified_page_list_iterator_init(page_list, &iter);
91 	    !unified_page_list_iterator_end(&iter);
92 	    unified_page_list_iterator_next(&iter)) {
93 		bool is_fictitious = false;
94 		const ppnum_t pn = unified_page_list_iterator_page(&iter, &is_fictitious);
95 
96 		if (__probable(!is_fictitious)) {
97 			pmap_set_cache_attributes(pn, cacheattr);
98 		}
99 	}
100 }
101 
102 void
pmap_set_cache_attributes(ppnum_t pn,unsigned int cacheattr)103 pmap_set_cache_attributes(ppnum_t pn, unsigned int cacheattr)
104 {
105 	unsigned int current, template = 0;
106 	int pai;
107 
108 	if (cacheattr & VM_MEM_NOT_CACHEABLE) {
109 		if (!(cacheattr & VM_MEM_GUARDED)) {
110 			template |= PHYS_PAT;
111 		}
112 		template |= PHYS_NCACHE;
113 	}
114 
115 	pmap_intr_assert();
116 
117 	assert((pn != vm_page_fictitious_addr) && (pn != vm_page_guard_addr));
118 
119 	pai = ppn_to_pai(pn);
120 
121 	if (!IS_MANAGED_PAGE(pai)) {
122 		return;
123 	}
124 
125 	/* override cache attributes for this phys page
126 	 * Does not walk through existing mappings to adjust,
127 	 * assumes page is disconnected
128 	 */
129 
130 	LOCK_PVH(pai);
131 
132 	pmap_update_cache_attributes_locked(pn, template);
133 
134 	current = pmap_phys_attributes[pai] & PHYS_CACHEABILITY_MASK;
135 	pmap_phys_attributes[pai] &= ~PHYS_CACHEABILITY_MASK;
136 	pmap_phys_attributes[pai] = pmap_phys_attributes[pai] | (char)template;
137 
138 	UNLOCK_PVH(pai);
139 
140 	if ((template & PHYS_NCACHE) && !(current & PHYS_NCACHE)) {
141 		pmap_sync_page_attributes_phys(pn);
142 	}
143 }
144 
145 unsigned
pmap_get_cache_attributes(ppnum_t pn,boolean_t is_ept)146 pmap_get_cache_attributes(ppnum_t pn, boolean_t is_ept)
147 {
148 	if (last_managed_page == 0) {
149 		return 0;
150 	}
151 
152 	if (!IS_MANAGED_PAGE(ppn_to_pai(pn))) {
153 		return PTE_NCACHE(is_ept);
154 	}
155 
156 	/*
157 	 * The cache attributes are read locklessly for efficiency.
158 	 */
159 	unsigned int attr = pmap_phys_attributes[ppn_to_pai(pn)];
160 	unsigned int template = 0;
161 
162 	/*
163 	 * The PTA bit is currently unsupported for EPT PTEs.
164 	 */
165 	if ((attr & PHYS_PAT) && !is_ept) {
166 		template |= INTEL_PTE_PAT;
167 	}
168 
169 	/*
170 	 * If the page isn't marked as NCACHE, the default for EPT entries
171 	 * is WB.
172 	 */
173 	if (attr & PHYS_NCACHE) {
174 		template |= PTE_NCACHE(is_ept);
175 	} else if (is_ept) {
176 		template |= INTEL_EPT_WB;
177 	}
178 
179 	return template;
180 }
181 
182 boolean_t
pmap_has_managed_page(ppnum_t first,ppnum_t last)183 pmap_has_managed_page(ppnum_t first, ppnum_t last)
184 {
185 	ppnum_t     pn, kdata_start, kdata_end;
186 	boolean_t   result;
187 	boot_args * args;
188 
189 	args        = (boot_args *) PE_state.bootArgs;
190 
191 	// Allow pages that the booter added to the end of the kernel.
192 	// We may miss reporting some pages in this range that were freed
193 	// with ml_static_free()
194 	kdata_start = atop_32(args->kaddr);
195 	kdata_end   = atop_32(args->kaddr + args->ksize);
196 
197 	assert(last_managed_page);
198 	assert(first <= last);
199 
200 	for (result = FALSE, pn = first;
201 	    !result
202 	    && (pn <= last)
203 	    && (pn <= last_managed_page);
204 	    pn++) {
205 		if ((pn >= kdata_start) && (pn < kdata_end)) {
206 			continue;
207 		}
208 		result = (0 != (pmap_phys_attributes[pn] & PHYS_MANAGED));
209 	}
210 
211 	return result;
212 }
213 
214 boolean_t
pmap_is_noencrypt(ppnum_t pn)215 pmap_is_noencrypt(ppnum_t pn)
216 {
217 	int             pai;
218 
219 	pai = ppn_to_pai(pn);
220 
221 	if (!IS_MANAGED_PAGE(pai)) {
222 		return FALSE;
223 	}
224 
225 	if (pmap_phys_attributes[pai] & PHYS_NOENCRYPT) {
226 		return TRUE;
227 	}
228 
229 	return FALSE;
230 }
231 
232 
233 void
pmap_set_noencrypt(ppnum_t pn)234 pmap_set_noencrypt(ppnum_t pn)
235 {
236 	int             pai;
237 
238 	pai = ppn_to_pai(pn);
239 
240 	if (IS_MANAGED_PAGE(pai)) {
241 		LOCK_PVH(pai);
242 
243 		pmap_phys_attributes[pai] |= PHYS_NOENCRYPT;
244 
245 		UNLOCK_PVH(pai);
246 	}
247 }
248 
249 
250 void
pmap_clear_noencrypt(ppnum_t pn)251 pmap_clear_noencrypt(ppnum_t pn)
252 {
253 	int             pai;
254 
255 	pai = ppn_to_pai(pn);
256 
257 	if (IS_MANAGED_PAGE(pai)) {
258 		/*
259 		 * synchronization at VM layer prevents PHYS_NOENCRYPT
260 		 * from changing state, so we don't need the lock to inspect
261 		 */
262 		if (pmap_phys_attributes[pai] & PHYS_NOENCRYPT) {
263 			LOCK_PVH(pai);
264 
265 			pmap_phys_attributes[pai] &= ~PHYS_NOENCRYPT;
266 
267 			UNLOCK_PVH(pai);
268 		}
269 	}
270 }
271 
272 void
compute_pmap_gc_throttle(void * arg __unused)273 compute_pmap_gc_throttle(void *arg __unused)
274 {
275 }
276 
277 
278 void
pmap_lock_phys_page(ppnum_t pn)279 pmap_lock_phys_page(ppnum_t pn)
280 {
281 	int             pai;
282 
283 	pai = ppn_to_pai(pn);
284 
285 	if (IS_MANAGED_PAGE(pai)) {
286 		LOCK_PVH(pai);
287 	} else {
288 		simple_lock(&phys_backup_lock, LCK_GRP_NULL);
289 	}
290 }
291 
292 
293 void
pmap_unlock_phys_page(ppnum_t pn)294 pmap_unlock_phys_page(ppnum_t pn)
295 {
296 	int             pai;
297 
298 	pai = ppn_to_pai(pn);
299 
300 	if (IS_MANAGED_PAGE(pai)) {
301 		UNLOCK_PVH(pai);
302 	} else {
303 		simple_unlock(&phys_backup_lock);
304 	}
305 }
306 
307 
308 
309 __private_extern__ void
310 pmap_pagetable_corruption_msg_log(int (*log_func)(const char * fmt, ...)__printflike(1, 2))
311 {
312 	if (pmap_pagetable_corruption_incidents > 0) {
313 		int i, j, e = MIN(pmap_pagetable_corruption_incidents, PMAP_PAGETABLE_CORRUPTION_MAX_LOG);
314 		(*log_func)("%u pagetable corruption incident(s) detected, timeout: %u\n", pmap_pagetable_corruption_incidents, pmap_pagetable_corruption_timeout);
315 		for (i = 0; i < e; i++) {
316 			(*log_func)("Incident 0x%x, reason: 0x%x, action: 0x%x, time: 0x%llx\n",
317 			    pmap_pagetable_corruption_records[i].incident,
318 			    pmap_pagetable_corruption_records[i].reason,
319 			    pmap_pagetable_corruption_records[i].action,
320 			    pmap_pagetable_corruption_records[i].abstime);
321 
322 			if (pmap_pagetable_corruption_records[i].adj_ptes_count > 0) {
323 				for (j = 0; j < pmap_pagetable_corruption_records[i].adj_ptes_count; j++) {
324 					(*log_func)("\tAdjacent PTE[%d] = 0x%llx\n", j,
325 					    pmap_pagetable_corruption_records[i].adj_ptes[j]);
326 				}
327 			}
328 		}
329 	}
330 }
331 
332 static inline void
pmap_pagetable_corruption_log_setup(void)333 pmap_pagetable_corruption_log_setup(void)
334 {
335 	if (pmap_pagetable_corruption_log_call == NULL) {
336 		nanotime_to_absolutetime(PMAP_PAGETABLE_CORRUPTION_INTERVAL, 0, &pmap_pagetable_corruption_interval_abstime);
337 		thread_call_setup(&pmap_pagetable_corruption_log_call_data,
338 		    (thread_call_func_t) (void (*)(void))pmap_pagetable_corruption_msg_log,
339 		    (thread_call_param_t) &printf);
340 		pmap_pagetable_corruption_log_call = &pmap_pagetable_corruption_log_call_data;
341 	}
342 }
343 
344 void
mapping_free_prime(void)345 mapping_free_prime(void)
346 {
347 	unsigned                i;
348 	pv_hashed_entry_t       pvh_e;
349 	pv_hashed_entry_t       pvh_eh;
350 	pv_hashed_entry_t       pvh_et;
351 	int                     pv_cnt;
352 
353 	/* Scale based on DRAM size */
354 	pv_hashed_low_water_mark = MAX(PV_HASHED_LOW_WATER_MARK_DEFAULT, ((uint32_t)(sane_size >> 30)) * 2000);
355 	pv_hashed_low_water_mark = MIN(pv_hashed_low_water_mark, 16000);
356 	/* Alterable via sysctl */
357 	pv_hashed_kern_low_water_mark = MAX(PV_HASHED_KERN_LOW_WATER_MARK_DEFAULT, ((uint32_t)(sane_size >> 30)) * 1000);
358 	pv_hashed_kern_low_water_mark = MIN(pv_hashed_kern_low_water_mark, 16000);
359 	pv_hashed_kern_alloc_chunk = PV_HASHED_KERN_ALLOC_CHUNK_INITIAL;
360 	pv_hashed_alloc_chunk = PV_HASHED_ALLOC_CHUNK_INITIAL;
361 
362 	pv_cnt = 0;
363 	pvh_eh = pvh_et = PV_HASHED_ENTRY_NULL;
364 
365 	for (i = 0; i < (5 * PV_HASHED_ALLOC_CHUNK_INITIAL); i++) {
366 		pvh_e = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
367 
368 		pvh_e->qlink.next = (queue_entry_t)pvh_eh;
369 		pvh_eh = pvh_e;
370 
371 		if (pvh_et == PV_HASHED_ENTRY_NULL) {
372 			pvh_et = pvh_e;
373 		}
374 		pv_cnt++;
375 	}
376 	PV_HASHED_FREE_LIST(pvh_eh, pvh_et, pv_cnt);
377 
378 	pv_cnt = 0;
379 	pvh_eh = pvh_et = PV_HASHED_ENTRY_NULL;
380 	for (i = 0; i < PV_HASHED_KERN_ALLOC_CHUNK_INITIAL; i++) {
381 		pvh_e = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
382 
383 		pvh_e->qlink.next = (queue_entry_t)pvh_eh;
384 		pvh_eh = pvh_e;
385 
386 		if (pvh_et == PV_HASHED_ENTRY_NULL) {
387 			pvh_et = pvh_e;
388 		}
389 		pv_cnt++;
390 	}
391 	PV_HASHED_KERN_FREE_LIST(pvh_eh, pvh_et, pv_cnt);
392 }
393 
394 void mapping_replenish(void);
395 
396 void
mapping_adjust(void)397 mapping_adjust(void)
398 {
399 	kern_return_t mres;
400 
401 	pmap_pagetable_corruption_log_setup();
402 
403 	mres = kernel_thread_start_priority((thread_continue_t)mapping_replenish, NULL, MAXPRI_KERNEL, &mapping_replenish_thread);
404 	if (mres != KERN_SUCCESS) {
405 		panic("pmap: mapping_replenish_thread creation failed");
406 	}
407 	thread_deallocate(mapping_replenish_thread);
408 }
409 
410 unsigned pmap_mapping_thread_wakeups;
411 unsigned pmap_kernel_reserve_replenish_stat;
412 unsigned pmap_user_reserve_replenish_stat;
413 unsigned pmap_kern_reserve_alloc_stat;
414 
415 __attribute__((noreturn))
416 void
mapping_replenish(void)417 mapping_replenish(void)
418 {
419 	pv_hashed_entry_t       pvh_e;
420 	pv_hashed_entry_t       pvh_eh;
421 	pv_hashed_entry_t       pvh_et;
422 	int                     pv_cnt;
423 	unsigned                i;
424 
425 	/* We qualify for VM privileges...*/
426 	current_thread()->options |= TH_OPT_VMPRIV;
427 
428 	for (;;) {
429 		while (pv_hashed_kern_free_count < pv_hashed_kern_low_water_mark) {
430 			pv_cnt = 0;
431 			pvh_eh = pvh_et = PV_HASHED_ENTRY_NULL;
432 
433 			for (i = 0; i < pv_hashed_kern_alloc_chunk; i++) {
434 				pvh_e = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
435 				pvh_e->qlink.next = (queue_entry_t)pvh_eh;
436 				pvh_eh = pvh_e;
437 
438 				if (pvh_et == PV_HASHED_ENTRY_NULL) {
439 					pvh_et = pvh_e;
440 				}
441 				pv_cnt++;
442 			}
443 			pmap_kernel_reserve_replenish_stat += pv_cnt;
444 			PV_HASHED_KERN_FREE_LIST(pvh_eh, pvh_et, pv_cnt);
445 		}
446 
447 		pv_cnt = 0;
448 		pvh_eh = pvh_et = PV_HASHED_ENTRY_NULL;
449 
450 		if (pv_hashed_free_count < pv_hashed_low_water_mark) {
451 			for (i = 0; i < pv_hashed_alloc_chunk; i++) {
452 				pvh_e = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
453 
454 				pvh_e->qlink.next = (queue_entry_t)pvh_eh;
455 				pvh_eh = pvh_e;
456 
457 				if (pvh_et == PV_HASHED_ENTRY_NULL) {
458 					pvh_et = pvh_e;
459 				}
460 				pv_cnt++;
461 			}
462 			pmap_user_reserve_replenish_stat += pv_cnt;
463 			PV_HASHED_FREE_LIST(pvh_eh, pvh_et, pv_cnt);
464 		}
465 /* Wake threads throttled while the kernel reserve was being replenished.
466  */
467 		if (pmap_pv_throttled_waiters) {
468 			pmap_pv_throttled_waiters = 0;
469 			thread_wakeup(&pmap_user_pv_throttle_event);
470 		}
471 		/* Check if the kernel pool has been depleted since the
472 		 * first pass, to reduce refill latency.
473 		 */
474 		if (pv_hashed_kern_free_count < pv_hashed_kern_low_water_mark) {
475 			continue;
476 		}
477 		/* Block sans continuation to avoid yielding kernel stack */
478 		assert_wait(&mapping_replenish_event, THREAD_UNINT);
479 		mappingrecurse = 0;
480 		thread_block(THREAD_CONTINUE_NULL);
481 		pmap_mapping_thread_wakeups++;
482 	}
483 }
484 
485 /*
486  *	Set specified attribute bits.
487  */
488 
489 void
phys_attribute_set(ppnum_t pn,int bits)490 phys_attribute_set(
491 	ppnum_t         pn,
492 	int             bits)
493 {
494 	int             pai;
495 
496 	pmap_intr_assert();
497 	assert(pn != vm_page_fictitious_addr);
498 	if (pn == vm_page_guard_addr) {
499 		return;
500 	}
501 
502 	pai = ppn_to_pai(pn);
503 
504 	if (!IS_MANAGED_PAGE(pai)) {
505 		/* Not a managed page.  */
506 		return;
507 	}
508 
509 	LOCK_PVH(pai);
510 	pmap_phys_attributes[pai] = pmap_phys_attributes[pai] | (char)bits;
511 	UNLOCK_PVH(pai);
512 }
513 
514 /*
515  *	Set the modify bit on the specified physical page.
516  */
517 
518 void
pmap_set_modify(ppnum_t pn)519 pmap_set_modify(ppnum_t pn)
520 {
521 	phys_attribute_set(pn, PHYS_MODIFIED);
522 }
523 
524 /*
525  *	Clear the modify bits on the specified physical page.
526  */
527 
528 void
pmap_clear_modify(ppnum_t pn)529 pmap_clear_modify(ppnum_t pn)
530 {
531 	phys_attribute_clear(pn, PHYS_MODIFIED, 0, NULL);
532 }
533 
534 /*
535  *	pmap_is_modified:
536  *
537  *	Return whether or not the specified physical page is modified
538  *	by any physical maps.
539  */
540 
541 boolean_t
pmap_is_modified(ppnum_t pn)542 pmap_is_modified(ppnum_t pn)
543 {
544 	if (phys_attribute_test(pn, PHYS_MODIFIED)) {
545 		return TRUE;
546 	}
547 	return FALSE;
548 }
549 
550 
551 /*
552  *	pmap_clear_reference:
553  *
554  *	Clear the reference bit on the specified physical page.
555  */
556 
557 void
pmap_clear_reference(ppnum_t pn)558 pmap_clear_reference(ppnum_t pn)
559 {
560 	phys_attribute_clear(pn, PHYS_REFERENCED, 0, NULL);
561 }
562 
563 void
pmap_set_reference(ppnum_t pn)564 pmap_set_reference(ppnum_t pn)
565 {
566 	phys_attribute_set(pn, PHYS_REFERENCED);
567 }
568 
569 /*
570  *	pmap_is_referenced:
571  *
572  *	Return whether or not the specified physical page is referenced
573  *	by any physical maps.
574  */
575 
576 boolean_t
pmap_is_referenced(ppnum_t pn)577 pmap_is_referenced(ppnum_t pn)
578 {
579 	if (phys_attribute_test(pn, PHYS_REFERENCED)) {
580 		return TRUE;
581 	}
582 	return FALSE;
583 }
584 
585 
586 /*
587  * pmap_get_refmod(phys)
588  *  returns the referenced and modified bits of the specified
589  *  physical page.
590  */
591 unsigned int
pmap_get_refmod(ppnum_t pn)592 pmap_get_refmod(ppnum_t pn)
593 {
594 	int             refmod;
595 	unsigned int    retval = 0;
596 
597 	refmod = phys_attribute_test(pn, PHYS_MODIFIED | PHYS_REFERENCED);
598 
599 	if (refmod & PHYS_MODIFIED) {
600 		retval |= VM_MEM_MODIFIED;
601 	}
602 	if (refmod & PHYS_REFERENCED) {
603 		retval |= VM_MEM_REFERENCED;
604 	}
605 
606 	return retval;
607 }
608 
609 
610 void
pmap_clear_refmod_options(ppnum_t pn,unsigned int mask,unsigned int options,void * arg)611 pmap_clear_refmod_options(ppnum_t pn, unsigned int mask, unsigned int options, void *arg)
612 {
613 	unsigned int  x86Mask;
614 
615 	x86Mask = (((mask &   VM_MEM_MODIFIED)?   PHYS_MODIFIED : 0)
616 	    | ((mask & VM_MEM_REFERENCED)? PHYS_REFERENCED : 0));
617 
618 	phys_attribute_clear(pn, x86Mask, options, arg);
619 }
620 
621 /*
622  * pmap_clear_refmod(phys, mask)
623  *  clears the referenced and modified bits as specified by the mask
624  *  of the specified physical page.
625  */
626 void
pmap_clear_refmod(ppnum_t pn,unsigned int mask)627 pmap_clear_refmod(ppnum_t pn, unsigned int mask)
628 {
629 	unsigned int  x86Mask;
630 
631 	x86Mask = (((mask &   VM_MEM_MODIFIED)?   PHYS_MODIFIED : 0)
632 	    | ((mask & VM_MEM_REFERENCED)? PHYS_REFERENCED : 0));
633 
634 	phys_attribute_clear(pn, x86Mask, 0, NULL);
635 }
636 
637 unsigned int
pmap_disconnect(ppnum_t pa)638 pmap_disconnect(ppnum_t pa)
639 {
640 	return pmap_disconnect_options(pa, 0, NULL);
641 }
642 
643 /*
644  *	Routine:
645  *		pmap_disconnect_options
646  *
647  *	Function:
648  *		Disconnect all mappings for this page and return reference and change status
649  *		in generic format.
650  *
651  */
652 unsigned int
pmap_disconnect_options(ppnum_t pa,unsigned int options,void * arg)653 pmap_disconnect_options(ppnum_t pa, unsigned int options, void *arg)
654 {
655 	unsigned refmod, vmrefmod = 0;
656 
657 	pmap_page_protect_options(pa, 0, options, arg);         /* disconnect the page */
658 
659 	pmap_assert(pa != vm_page_fictitious_addr);
660 	if ((pa == vm_page_guard_addr) || !IS_MANAGED_PAGE(pa) || (options & PMAP_OPTIONS_NOREFMOD)) {
661 		return 0;
662 	}
663 	refmod = pmap_phys_attributes[pa] & (PHYS_MODIFIED | PHYS_REFERENCED);
664 
665 	if (refmod & PHYS_MODIFIED) {
666 		vmrefmod |= VM_MEM_MODIFIED;
667 	}
668 	if (refmod & PHYS_REFERENCED) {
669 		vmrefmod |= VM_MEM_REFERENCED;
670 	}
671 
672 	return vmrefmod;
673 }
674