xref: /xnu-8020.121.3/osfmk/i386/pmap_common.c (revision fdd8201d7b966f0c3ea610489d29bd841d358941) !
1 /*
2  * Copyright (c) 2000-2022 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #include <vm/pmap.h>
29 #include <kern/ledger.h>
30 #include <i386/pmap_internal.h>
31 
32 
33 /*
34  *	Each entry in the pv_head_table is locked by a bit in the
35  *	pv_lock_table.  The lock bits are accessed by the physical
36  *	address of the page they lock.
37  */
38 
39 char    *pv_lock_table;         /* pointer to array of bits */
40 char    *pv_hash_lock_table;
41 
42 pv_rooted_entry_t       pv_head_table;          /* array of entries, one per
43                                                  * page */
44 uint32_t                        pv_hashed_free_count = 0;
45 uint32_t                        pv_hashed_kern_free_count = 0;
46 
47 pmap_pagetable_corruption_record_t pmap_pagetable_corruption_records[PMAP_PAGETABLE_CORRUPTION_MAX_LOG];
48 uint32_t pmap_pagetable_corruption_incidents;
49 uint64_t pmap_pagetable_corruption_last_abstime = (~(0ULL) >> 1);
50 uint64_t pmap_pagetable_corruption_interval_abstime;
51 thread_call_t   pmap_pagetable_corruption_log_call;
52 static thread_call_data_t       pmap_pagetable_corruption_log_call_data;
53 boolean_t pmap_pagetable_corruption_timeout = FALSE;
54 
55 volatile uint32_t       mappingrecurse = 0;
56 
57 uint32_t  pv_hashed_low_water_mark, pv_hashed_kern_low_water_mark, pv_hashed_alloc_chunk, pv_hashed_kern_alloc_chunk;
58 
59 thread_t mapping_replenish_thread;
60 event_t mapping_replenish_event, pmap_user_pv_throttle_event;
61 
62 uint64_t pmap_pv_throttle_stat, pmap_pv_throttled_waiters;
63 
64 int pmap_asserts_enabled = (DEBUG);
65 int pmap_asserts_traced = 0;
66 
67 unsigned int
pmap_cache_attributes(ppnum_t pn)68 pmap_cache_attributes(ppnum_t pn)
69 {
70 	int cacheattr = pmap_get_cache_attributes(pn, FALSE);
71 
72 	if (cacheattr & INTEL_PTE_NCACHE) {
73 		if (cacheattr & INTEL_PTE_PAT) {
74 			/* WC */
75 			return VM_WIMG_WCOMB;
76 		}
77 		return VM_WIMG_IO;
78 	} else {
79 		return VM_WIMG_COPYBACK;
80 	}
81 }
82 
83 void
pmap_set_cache_attributes(ppnum_t pn,unsigned int cacheattr)84 pmap_set_cache_attributes(ppnum_t pn, unsigned int cacheattr)
85 {
86 	unsigned int current, template = 0;
87 	int pai;
88 
89 	if (cacheattr & VM_MEM_NOT_CACHEABLE) {
90 		if (!(cacheattr & VM_MEM_GUARDED)) {
91 			template |= PHYS_PAT;
92 		}
93 		template |= PHYS_NCACHE;
94 	}
95 
96 	pmap_intr_assert();
97 
98 	assert((pn != vm_page_fictitious_addr) && (pn != vm_page_guard_addr));
99 
100 	pai = ppn_to_pai(pn);
101 
102 	if (!IS_MANAGED_PAGE(pai)) {
103 		return;
104 	}
105 
106 	/* override cache attributes for this phys page
107 	 * Does not walk through existing mappings to adjust,
108 	 * assumes page is disconnected
109 	 */
110 
111 	LOCK_PVH(pai);
112 
113 	pmap_update_cache_attributes_locked(pn, template);
114 
115 	current = pmap_phys_attributes[pai] & PHYS_CACHEABILITY_MASK;
116 	pmap_phys_attributes[pai] &= ~PHYS_CACHEABILITY_MASK;
117 	pmap_phys_attributes[pai] = pmap_phys_attributes[pai] | (char)template;
118 
119 	UNLOCK_PVH(pai);
120 
121 	if ((template & PHYS_NCACHE) && !(current & PHYS_NCACHE)) {
122 		pmap_sync_page_attributes_phys(pn);
123 	}
124 }
125 
126 unsigned
pmap_get_cache_attributes(ppnum_t pn,boolean_t is_ept)127 pmap_get_cache_attributes(ppnum_t pn, boolean_t is_ept)
128 {
129 	if (last_managed_page == 0) {
130 		return 0;
131 	}
132 
133 	if (!IS_MANAGED_PAGE(ppn_to_pai(pn))) {
134 		return PTE_NCACHE(is_ept);
135 	}
136 
137 	/*
138 	 * The cache attributes are read locklessly for efficiency.
139 	 */
140 	unsigned int attr = pmap_phys_attributes[ppn_to_pai(pn)];
141 	unsigned int template = 0;
142 
143 	/*
144 	 * The PTA bit is currently unsupported for EPT PTEs.
145 	 */
146 	if ((attr & PHYS_PAT) && !is_ept) {
147 		template |= INTEL_PTE_PAT;
148 	}
149 
150 	/*
151 	 * If the page isn't marked as NCACHE, the default for EPT entries
152 	 * is WB.
153 	 */
154 	if (attr & PHYS_NCACHE) {
155 		template |= PTE_NCACHE(is_ept);
156 	} else if (is_ept) {
157 		template |= INTEL_EPT_WB;
158 	}
159 
160 	return template;
161 }
162 
163 boolean_t
pmap_has_managed_page(ppnum_t first,ppnum_t last)164 pmap_has_managed_page(ppnum_t first, ppnum_t last)
165 {
166 	ppnum_t     pn, kdata_start, kdata_end;
167 	boolean_t   result;
168 	boot_args * args;
169 
170 	args        = (boot_args *) PE_state.bootArgs;
171 
172 	// Allow pages that the booter added to the end of the kernel.
173 	// We may miss reporting some pages in this range that were freed
174 	// with ml_static_free()
175 	kdata_start = atop_32(args->kaddr);
176 	kdata_end   = atop_32(args->kaddr + args->ksize);
177 
178 	assert(last_managed_page);
179 	assert(first <= last);
180 
181 	for (result = FALSE, pn = first;
182 	    !result
183 	    && (pn <= last)
184 	    && (pn <= last_managed_page);
185 	    pn++) {
186 		if ((pn >= kdata_start) && (pn < kdata_end)) {
187 			continue;
188 		}
189 		result = (0 != (pmap_phys_attributes[pn] & PHYS_MANAGED));
190 	}
191 
192 	return result;
193 }
194 
195 boolean_t
pmap_is_noencrypt(ppnum_t pn)196 pmap_is_noencrypt(ppnum_t pn)
197 {
198 	int             pai;
199 
200 	pai = ppn_to_pai(pn);
201 
202 	if (!IS_MANAGED_PAGE(pai)) {
203 		return FALSE;
204 	}
205 
206 	if (pmap_phys_attributes[pai] & PHYS_NOENCRYPT) {
207 		return TRUE;
208 	}
209 
210 	return FALSE;
211 }
212 
213 
214 void
pmap_set_noencrypt(ppnum_t pn)215 pmap_set_noencrypt(ppnum_t pn)
216 {
217 	int             pai;
218 
219 	pai = ppn_to_pai(pn);
220 
221 	if (IS_MANAGED_PAGE(pai)) {
222 		LOCK_PVH(pai);
223 
224 		pmap_phys_attributes[pai] |= PHYS_NOENCRYPT;
225 
226 		UNLOCK_PVH(pai);
227 	}
228 }
229 
230 
231 void
pmap_clear_noencrypt(ppnum_t pn)232 pmap_clear_noencrypt(ppnum_t pn)
233 {
234 	int             pai;
235 
236 	pai = ppn_to_pai(pn);
237 
238 	if (IS_MANAGED_PAGE(pai)) {
239 		/*
240 		 * synchronization at VM layer prevents PHYS_NOENCRYPT
241 		 * from changing state, so we don't need the lock to inspect
242 		 */
243 		if (pmap_phys_attributes[pai] & PHYS_NOENCRYPT) {
244 			LOCK_PVH(pai);
245 
246 			pmap_phys_attributes[pai] &= ~PHYS_NOENCRYPT;
247 
248 			UNLOCK_PVH(pai);
249 		}
250 	}
251 }
252 
253 void
compute_pmap_gc_throttle(void * arg __unused)254 compute_pmap_gc_throttle(void *arg __unused)
255 {
256 }
257 
258 
259 void
pmap_lock_phys_page(ppnum_t pn)260 pmap_lock_phys_page(ppnum_t pn)
261 {
262 	int             pai;
263 
264 	pai = ppn_to_pai(pn);
265 
266 	if (IS_MANAGED_PAGE(pai)) {
267 		LOCK_PVH(pai);
268 	} else {
269 		simple_lock(&phys_backup_lock, LCK_GRP_NULL);
270 	}
271 }
272 
273 
274 void
pmap_unlock_phys_page(ppnum_t pn)275 pmap_unlock_phys_page(ppnum_t pn)
276 {
277 	int             pai;
278 
279 	pai = ppn_to_pai(pn);
280 
281 	if (IS_MANAGED_PAGE(pai)) {
282 		UNLOCK_PVH(pai);
283 	} else {
284 		simple_unlock(&phys_backup_lock);
285 	}
286 }
287 
288 
289 
290 __private_extern__ void
291 pmap_pagetable_corruption_msg_log(int (*log_func)(const char * fmt, ...)__printflike(1, 2))
292 {
293 	if (pmap_pagetable_corruption_incidents > 0) {
294 		int i, j, e = MIN(pmap_pagetable_corruption_incidents, PMAP_PAGETABLE_CORRUPTION_MAX_LOG);
295 		(*log_func)("%u pagetable corruption incident(s) detected, timeout: %u\n", pmap_pagetable_corruption_incidents, pmap_pagetable_corruption_timeout);
296 		for (i = 0; i < e; i++) {
297 			(*log_func)("Incident 0x%x, reason: 0x%x, action: 0x%x, time: 0x%llx\n",
298 			    pmap_pagetable_corruption_records[i].incident,
299 			    pmap_pagetable_corruption_records[i].reason,
300 			    pmap_pagetable_corruption_records[i].action,
301 			    pmap_pagetable_corruption_records[i].abstime);
302 
303 			if (pmap_pagetable_corruption_records[i].adj_ptes_count > 0) {
304 				for (j = 0; j < pmap_pagetable_corruption_records[i].adj_ptes_count; j++) {
305 					(*log_func)("\tAdjacent PTE[%d] = 0x%llx\n", j,
306 					    pmap_pagetable_corruption_records[i].adj_ptes[j]);
307 				}
308 			}
309 		}
310 	}
311 }
312 
313 static inline void
pmap_pagetable_corruption_log_setup(void)314 pmap_pagetable_corruption_log_setup(void)
315 {
316 	if (pmap_pagetable_corruption_log_call == NULL) {
317 		nanotime_to_absolutetime(PMAP_PAGETABLE_CORRUPTION_INTERVAL, 0, &pmap_pagetable_corruption_interval_abstime);
318 		thread_call_setup(&pmap_pagetable_corruption_log_call_data,
319 		    (thread_call_func_t) pmap_pagetable_corruption_msg_log,
320 		    (thread_call_param_t) &printf);
321 		pmap_pagetable_corruption_log_call = &pmap_pagetable_corruption_log_call_data;
322 	}
323 }
324 
325 void
mapping_free_prime(void)326 mapping_free_prime(void)
327 {
328 	unsigned                i;
329 	pv_hashed_entry_t       pvh_e;
330 	pv_hashed_entry_t       pvh_eh;
331 	pv_hashed_entry_t       pvh_et;
332 	int                     pv_cnt;
333 
334 	/* Scale based on DRAM size */
335 	pv_hashed_low_water_mark = MAX(PV_HASHED_LOW_WATER_MARK_DEFAULT, ((uint32_t)(sane_size >> 30)) * 2000);
336 	pv_hashed_low_water_mark = MIN(pv_hashed_low_water_mark, 16000);
337 	/* Alterable via sysctl */
338 	pv_hashed_kern_low_water_mark = MAX(PV_HASHED_KERN_LOW_WATER_MARK_DEFAULT, ((uint32_t)(sane_size >> 30)) * 1000);
339 	pv_hashed_kern_low_water_mark = MIN(pv_hashed_kern_low_water_mark, 16000);
340 	pv_hashed_kern_alloc_chunk = PV_HASHED_KERN_ALLOC_CHUNK_INITIAL;
341 	pv_hashed_alloc_chunk = PV_HASHED_ALLOC_CHUNK_INITIAL;
342 
343 	pv_cnt = 0;
344 	pvh_eh = pvh_et = PV_HASHED_ENTRY_NULL;
345 
346 	for (i = 0; i < (5 * PV_HASHED_ALLOC_CHUNK_INITIAL); i++) {
347 		pvh_e = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
348 
349 		pvh_e->qlink.next = (queue_entry_t)pvh_eh;
350 		pvh_eh = pvh_e;
351 
352 		if (pvh_et == PV_HASHED_ENTRY_NULL) {
353 			pvh_et = pvh_e;
354 		}
355 		pv_cnt++;
356 	}
357 	PV_HASHED_FREE_LIST(pvh_eh, pvh_et, pv_cnt);
358 
359 	pv_cnt = 0;
360 	pvh_eh = pvh_et = PV_HASHED_ENTRY_NULL;
361 	for (i = 0; i < PV_HASHED_KERN_ALLOC_CHUNK_INITIAL; i++) {
362 		pvh_e = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
363 
364 		pvh_e->qlink.next = (queue_entry_t)pvh_eh;
365 		pvh_eh = pvh_e;
366 
367 		if (pvh_et == PV_HASHED_ENTRY_NULL) {
368 			pvh_et = pvh_e;
369 		}
370 		pv_cnt++;
371 	}
372 	PV_HASHED_KERN_FREE_LIST(pvh_eh, pvh_et, pv_cnt);
373 }
374 
375 void mapping_replenish(void);
376 
377 void
mapping_adjust(void)378 mapping_adjust(void)
379 {
380 	kern_return_t mres;
381 
382 	pmap_pagetable_corruption_log_setup();
383 
384 	mres = kernel_thread_start_priority((thread_continue_t)mapping_replenish, NULL, MAXPRI_KERNEL, &mapping_replenish_thread);
385 	if (mres != KERN_SUCCESS) {
386 		panic("pmap: mapping_replenish_thread creation failed");
387 	}
388 	thread_deallocate(mapping_replenish_thread);
389 }
390 
391 unsigned pmap_mapping_thread_wakeups;
392 unsigned pmap_kernel_reserve_replenish_stat;
393 unsigned pmap_user_reserve_replenish_stat;
394 unsigned pmap_kern_reserve_alloc_stat;
395 
396 __attribute__((noreturn))
397 void
mapping_replenish(void)398 mapping_replenish(void)
399 {
400 	pv_hashed_entry_t       pvh_e;
401 	pv_hashed_entry_t       pvh_eh;
402 	pv_hashed_entry_t       pvh_et;
403 	int                     pv_cnt;
404 	unsigned                i;
405 
406 	/* We qualify for VM privileges...*/
407 	current_thread()->options |= TH_OPT_VMPRIV;
408 
409 	for (;;) {
410 		while (pv_hashed_kern_free_count < pv_hashed_kern_low_water_mark) {
411 			pv_cnt = 0;
412 			pvh_eh = pvh_et = PV_HASHED_ENTRY_NULL;
413 
414 			for (i = 0; i < pv_hashed_kern_alloc_chunk; i++) {
415 				pvh_e = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
416 				pvh_e->qlink.next = (queue_entry_t)pvh_eh;
417 				pvh_eh = pvh_e;
418 
419 				if (pvh_et == PV_HASHED_ENTRY_NULL) {
420 					pvh_et = pvh_e;
421 				}
422 				pv_cnt++;
423 			}
424 			pmap_kernel_reserve_replenish_stat += pv_cnt;
425 			PV_HASHED_KERN_FREE_LIST(pvh_eh, pvh_et, pv_cnt);
426 		}
427 
428 		pv_cnt = 0;
429 		pvh_eh = pvh_et = PV_HASHED_ENTRY_NULL;
430 
431 		if (pv_hashed_free_count < pv_hashed_low_water_mark) {
432 			for (i = 0; i < pv_hashed_alloc_chunk; i++) {
433 				pvh_e = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone);
434 
435 				pvh_e->qlink.next = (queue_entry_t)pvh_eh;
436 				pvh_eh = pvh_e;
437 
438 				if (pvh_et == PV_HASHED_ENTRY_NULL) {
439 					pvh_et = pvh_e;
440 				}
441 				pv_cnt++;
442 			}
443 			pmap_user_reserve_replenish_stat += pv_cnt;
444 			PV_HASHED_FREE_LIST(pvh_eh, pvh_et, pv_cnt);
445 		}
446 /* Wake threads throttled while the kernel reserve was being replenished.
447  */
448 		if (pmap_pv_throttled_waiters) {
449 			pmap_pv_throttled_waiters = 0;
450 			thread_wakeup(&pmap_user_pv_throttle_event);
451 		}
452 		/* Check if the kernel pool has been depleted since the
453 		 * first pass, to reduce refill latency.
454 		 */
455 		if (pv_hashed_kern_free_count < pv_hashed_kern_low_water_mark) {
456 			continue;
457 		}
458 		/* Block sans continuation to avoid yielding kernel stack */
459 		assert_wait(&mapping_replenish_event, THREAD_UNINT);
460 		mappingrecurse = 0;
461 		thread_block(THREAD_CONTINUE_NULL);
462 		pmap_mapping_thread_wakeups++;
463 	}
464 }
465 
466 /*
467  *	Set specified attribute bits.
468  */
469 
470 void
phys_attribute_set(ppnum_t pn,int bits)471 phys_attribute_set(
472 	ppnum_t         pn,
473 	int             bits)
474 {
475 	int             pai;
476 
477 	pmap_intr_assert();
478 	assert(pn != vm_page_fictitious_addr);
479 	if (pn == vm_page_guard_addr) {
480 		return;
481 	}
482 
483 	pai = ppn_to_pai(pn);
484 
485 	if (!IS_MANAGED_PAGE(pai)) {
486 		/* Not a managed page.  */
487 		return;
488 	}
489 
490 	LOCK_PVH(pai);
491 	pmap_phys_attributes[pai] = pmap_phys_attributes[pai] | (char)bits;
492 	UNLOCK_PVH(pai);
493 }
494 
495 /*
496  *	Set the modify bit on the specified physical page.
497  */
498 
499 void
pmap_set_modify(ppnum_t pn)500 pmap_set_modify(ppnum_t pn)
501 {
502 	phys_attribute_set(pn, PHYS_MODIFIED);
503 }
504 
505 /*
506  *	Clear the modify bits on the specified physical page.
507  */
508 
509 void
pmap_clear_modify(ppnum_t pn)510 pmap_clear_modify(ppnum_t pn)
511 {
512 	phys_attribute_clear(pn, PHYS_MODIFIED, 0, NULL);
513 }
514 
515 /*
516  *	pmap_is_modified:
517  *
518  *	Return whether or not the specified physical page is modified
519  *	by any physical maps.
520  */
521 
522 boolean_t
pmap_is_modified(ppnum_t pn)523 pmap_is_modified(ppnum_t pn)
524 {
525 	if (phys_attribute_test(pn, PHYS_MODIFIED)) {
526 		return TRUE;
527 	}
528 	return FALSE;
529 }
530 
531 
532 /*
533  *	pmap_clear_reference:
534  *
535  *	Clear the reference bit on the specified physical page.
536  */
537 
538 void
pmap_clear_reference(ppnum_t pn)539 pmap_clear_reference(ppnum_t pn)
540 {
541 	phys_attribute_clear(pn, PHYS_REFERENCED, 0, NULL);
542 }
543 
544 void
pmap_set_reference(ppnum_t pn)545 pmap_set_reference(ppnum_t pn)
546 {
547 	phys_attribute_set(pn, PHYS_REFERENCED);
548 }
549 
550 /*
551  *	pmap_is_referenced:
552  *
553  *	Return whether or not the specified physical page is referenced
554  *	by any physical maps.
555  */
556 
557 boolean_t
pmap_is_referenced(ppnum_t pn)558 pmap_is_referenced(ppnum_t pn)
559 {
560 	if (phys_attribute_test(pn, PHYS_REFERENCED)) {
561 		return TRUE;
562 	}
563 	return FALSE;
564 }
565 
566 
567 /*
568  * pmap_get_refmod(phys)
569  *  returns the referenced and modified bits of the specified
570  *  physical page.
571  */
572 unsigned int
pmap_get_refmod(ppnum_t pn)573 pmap_get_refmod(ppnum_t pn)
574 {
575 	int             refmod;
576 	unsigned int    retval = 0;
577 
578 	refmod = phys_attribute_test(pn, PHYS_MODIFIED | PHYS_REFERENCED);
579 
580 	if (refmod & PHYS_MODIFIED) {
581 		retval |= VM_MEM_MODIFIED;
582 	}
583 	if (refmod & PHYS_REFERENCED) {
584 		retval |= VM_MEM_REFERENCED;
585 	}
586 
587 	return retval;
588 }
589 
590 
591 void
pmap_clear_refmod_options(ppnum_t pn,unsigned int mask,unsigned int options,void * arg)592 pmap_clear_refmod_options(ppnum_t pn, unsigned int mask, unsigned int options, void *arg)
593 {
594 	unsigned int  x86Mask;
595 
596 	x86Mask = (((mask &   VM_MEM_MODIFIED)?   PHYS_MODIFIED : 0)
597 	    | ((mask & VM_MEM_REFERENCED)? PHYS_REFERENCED : 0));
598 
599 	phys_attribute_clear(pn, x86Mask, options, arg);
600 }
601 
602 /*
603  * pmap_clear_refmod(phys, mask)
604  *  clears the referenced and modified bits as specified by the mask
605  *  of the specified physical page.
606  */
607 void
pmap_clear_refmod(ppnum_t pn,unsigned int mask)608 pmap_clear_refmod(ppnum_t pn, unsigned int mask)
609 {
610 	unsigned int  x86Mask;
611 
612 	x86Mask = (((mask &   VM_MEM_MODIFIED)?   PHYS_MODIFIED : 0)
613 	    | ((mask & VM_MEM_REFERENCED)? PHYS_REFERENCED : 0));
614 
615 	phys_attribute_clear(pn, x86Mask, 0, NULL);
616 }
617 
618 unsigned int
pmap_disconnect(ppnum_t pa)619 pmap_disconnect(ppnum_t pa)
620 {
621 	return pmap_disconnect_options(pa, 0, NULL);
622 }
623 
624 /*
625  *	Routine:
626  *		pmap_disconnect_options
627  *
628  *	Function:
629  *		Disconnect all mappings for this page and return reference and change status
630  *		in generic format.
631  *
632  */
633 unsigned int
pmap_disconnect_options(ppnum_t pa,unsigned int options,void * arg)634 pmap_disconnect_options(ppnum_t pa, unsigned int options, void *arg)
635 {
636 	unsigned refmod, vmrefmod = 0;
637 
638 	pmap_page_protect_options(pa, 0, options, arg);         /* disconnect the page */
639 
640 	pmap_assert(pa != vm_page_fictitious_addr);
641 	if ((pa == vm_page_guard_addr) || !IS_MANAGED_PAGE(pa) || (options & PMAP_OPTIONS_NOREFMOD)) {
642 		return 0;
643 	}
644 	refmod = pmap_phys_attributes[pa] & (PHYS_MODIFIED | PHYS_REFERENCED);
645 
646 	if (refmod & PHYS_MODIFIED) {
647 		vmrefmod |= VM_MEM_MODIFIED;
648 	}
649 	if (refmod & PHYS_REFERENCED) {
650 		vmrefmod |= VM_MEM_REFERENCED;
651 	}
652 
653 	return vmrefmod;
654 }
655