1 /*
2 * Copyright (c) 2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <vm/vm_page.h>
30 #include <vm/pmap.h>
31 #include <kern/ledger.h>
32 #include <kern/thread.h>
33 #if defined(__arm64__)
34 #include <pexpert/arm64/board_config.h>
35 #endif
36 #include <vm/vm_map.h>
37
38 extern ledger_template_t task_ledger_template;
39
40 extern boolean_t arm_force_fast_fault(ppnum_t, vm_prot_t, int, void*);
41 extern kern_return_t arm_fast_fault(pmap_t, vm_map_address_t, vm_prot_t, bool, bool);
42
43 kern_return_t test_pmap_enter_disconnect(unsigned int num_loops);
44 kern_return_t test_pmap_iommu_disconnect(void);
45 kern_return_t test_pmap_extended(void);
46 void test_pmap_call_overhead(unsigned int num_loops);
47 uint64_t test_pmap_page_protect_overhead(unsigned int num_loops, unsigned int num_aliases);
48
49 #define PMAP_TEST_VA (0xDEAD << PAGE_SHIFT)
50
51 typedef struct {
52 pmap_t pmap;
53 volatile boolean_t stop;
54 ppnum_t pn;
55 } pmap_test_thread_args;
56
57 static pmap_t
pmap_create_wrapper(unsigned int flags)58 pmap_create_wrapper(unsigned int flags)
59 {
60 pmap_t new_pmap = NULL;
61 ledger_t ledger;
62 assert(task_ledger_template != NULL);
63 if ((ledger = ledger_instantiate(task_ledger_template, LEDGER_CREATE_ACTIVE_ENTRIES)) == NULL) {
64 return NULL;
65 }
66 new_pmap = pmap_create_options(ledger, 0, flags);
67 ledger_dereference(ledger);
68 return new_pmap;
69 }
70
71 static void
pmap_disconnect_thread(void * arg,wait_result_t __unused wres)72 pmap_disconnect_thread(void *arg, wait_result_t __unused wres)
73 {
74 pmap_test_thread_args *args = arg;
75 do {
76 pmap_disconnect(args->pn);
77 } while (!args->stop);
78 thread_wakeup((event_t)args);
79 }
80
81 kern_return_t
test_pmap_enter_disconnect(unsigned int num_loops)82 test_pmap_enter_disconnect(unsigned int num_loops)
83 {
84 kern_return_t kr = KERN_SUCCESS;
85 thread_t disconnect_thread;
86 pmap_t new_pmap = pmap_create_wrapper(0);
87 if (new_pmap == NULL) {
88 return KERN_FAILURE;
89 }
90 vm_page_t m = vm_page_grab();
91 if (m == VM_PAGE_NULL) {
92 pmap_destroy(new_pmap);
93 return KERN_FAILURE;
94 }
95 ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m);
96 pmap_test_thread_args args = {new_pmap, FALSE, phys_page};
97 kern_return_t res = kernel_thread_start(pmap_disconnect_thread, &args, &disconnect_thread);
98 if (res) {
99 pmap_destroy(new_pmap);
100 vm_page_lock_queues();
101 vm_page_free(m);
102 vm_page_unlock_queues();
103 return res;
104 }
105 thread_deallocate(disconnect_thread);
106
107 while (num_loops-- != 0) {
108 kr = pmap_enter(new_pmap, PMAP_TEST_VA, phys_page,
109 VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE);
110 assert(kr == KERN_SUCCESS);
111 }
112
113 assert_wait((event_t)&args, THREAD_UNINT);
114 args.stop = TRUE;
115 thread_block(THREAD_CONTINUE_NULL);
116
117 pmap_remove(new_pmap, PMAP_TEST_VA, PMAP_TEST_VA + PAGE_SIZE);
118 vm_page_lock_queues();
119 vm_page_free(m);
120 vm_page_unlock_queues();
121 pmap_destroy(new_pmap);
122 return KERN_SUCCESS;
123 }
124
125 kern_return_t
test_pmap_iommu_disconnect(void)126 test_pmap_iommu_disconnect(void)
127 {
128 return KERN_SUCCESS;
129 }
130
131
132 kern_return_t
test_pmap_extended(void)133 test_pmap_extended(void)
134 {
135 return KERN_SUCCESS;
136 }
137
138 void
test_pmap_call_overhead(unsigned int num_loops __unused)139 test_pmap_call_overhead(unsigned int num_loops __unused)
140 {
141 #if defined(__arm__) || defined(__arm64__)
142 pmap_t pmap = current_thread()->map->pmap;
143 for (unsigned int i = 0; i < num_loops; ++i) {
144 pmap_nop(pmap);
145 }
146 #endif
147 }
148
149 uint64_t
test_pmap_page_protect_overhead(unsigned int num_loops __unused,unsigned int num_aliases __unused)150 test_pmap_page_protect_overhead(unsigned int num_loops __unused, unsigned int num_aliases __unused)
151 {
152 uint64_t duration = 0;
153 #if defined(__arm__) || defined(__arm64__)
154 pmap_t new_pmap = pmap_create_wrapper(0);
155 vm_page_t m = vm_page_grab();
156 kern_return_t kr = KERN_SUCCESS;
157
158 vm_page_lock_queues();
159 if (m != VM_PAGE_NULL) {
160 vm_page_wire(m, VM_KERN_MEMORY_PTE, TRUE);
161 }
162 vm_page_unlock_queues();
163
164 if ((new_pmap == NULL) || (m == VM_PAGE_NULL)) {
165 goto ppo_cleanup;
166 }
167
168 ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m);
169
170 for (unsigned int loop = 0; loop < num_loops; ++loop) {
171 for (unsigned int alias = 0; alias < num_aliases; ++alias) {
172 kr = pmap_enter(new_pmap, PMAP_TEST_VA + (PAGE_SIZE * alias), phys_page,
173 VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE);
174 assert(kr == KERN_SUCCESS);
175 }
176
177 uint64_t start_time = mach_absolute_time();
178
179 pmap_page_protect_options(phys_page, VM_PROT_READ, 0, NULL);
180
181 duration += (mach_absolute_time() - start_time);
182
183 pmap_remove(new_pmap, PMAP_TEST_VA, PMAP_TEST_VA + (num_aliases * PAGE_SIZE));
184 }
185
186 ppo_cleanup:
187 vm_page_lock_queues();
188 if (m != VM_PAGE_NULL) {
189 vm_page_free(m);
190 }
191 vm_page_unlock_queues();
192 if (new_pmap != NULL) {
193 pmap_destroy(new_pmap);
194 }
195 #endif
196 return duration;
197 }
198