xref: /xnu-12377.61.12/osfmk/vm/vm_lock_perf.h (revision 4d495c6e23c53686cf65f45067f79024cf5dcee8)
1 /*
2  * Copyright (c) 2024 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #pragma once
30 
31 #include <sys/kdebug.h>
32 
33 /* This should only be enabled at desk */
34 #define ENABLE_VM_LOCK_PERF 0
35 
36 /*
37  * The VM Lock Perf (VMLP) module uses ktrace to gather insights into the
38  * performance profile of the VM subsystem, particularly as it pertains to
39  * locking behavior.
40  * We use the ktrace events, further subdividing the code field as below.
41  * The "type" field indicates which type of VMLP event is being reported.
42  * Currently supported types are API, Lock, and Range (see below).
43  * The subcode is type-dependent.
44  * DBG_MACH  VMLP  type subcode function
45  * ╭──────┬───────┬────┬────────┬─╮
46  * │  8   │   8   │  5 |   9    │2│
47  * ╰──────┴───────┴────┴────────┴─╯
48  */
49 
50 #pragma mark VM Lock Performance Event IDs
51 
52 typedef enum __enum_closed {
53 	VM_LOCK_PERF_API_EVENT = 1, /* Operations on map lock */
54 	VM_LOCK_PERF_LOCK_EVENT,   /* Function start/end */
55 	VM_LOCK_PERF_RANGE_EVENT,  /* Reporting a range */
56 } vmlp_event_type_t;
57 
58 #define VMLP_CODE_TYPE_OFFSET (9)
59 #define VMLP_CODE_TYPE_MASK (0x1f)
60 #define VMLP_CODE_SUBCODE_OFFSET (0)
61 #define VMLP_CODE_SUBCODE_MASK (0x1ff)
62 #define VMLP_CODE(type, subcode) ((((type) & VMLP_CODE_TYPE_MASK) << VMLP_CODE_TYPE_OFFSET) | (((subcode) & VMLP_CODE_SUBCODE_MASK) << VMLP_CODE_SUBCODE_OFFSET))
63 #define VMLP_EVENTID(type, subcode, function) (MACHDBG_CODE(DBG_MACH_VM_LOCK_PERF, VMLP_CODE((type), (subcode))) | (function))
64 
65 #pragma mark Subcodes for API events
66 
67 #define VMLPAN(name) VMLP_EVENT_API_ ## name /* VM Perf API Name */
68 
69 typedef enum __enum_closed {
70 	VMLPAN(FILL_PROCREGIONINFO) = 1,
71 	VMLPAN(FILL_PROCREGIONINFO_ONLYMAPPEDVNODES),
72 	VMLPAN(FIND_MAPPING_TO_SLIDE),
73 	VMLPAN(GET_VMMAP_ENTRIES),
74 	VMLPAN(GET_VMSUBMAP_ENTRIES),
75 	VMLPAN(KDP_LIGHTWEIGHT_FAULT),
76 	VMLPAN(KMEM_ALLOC_GUARD_INTERNAL),
77 	VMLPAN(KMEM_FREE_GUARD),
78 	VMLPAN(KMEM_GET_GOBJ_STATS),
79 	VMLPAN(KMEM_POPULATE_META_LOCKED),
80 	VMLPAN(KMEM_REALLOC_GUARD),
81 	VMLPAN(KMEM_SIZE_GUARD),
82 	VMLPAN(MACH_MAKE_MEMORY_ENTRY_SHARE),
83 	VMLPAN(MACH_VM_RANGE_CREATE_V1),
84 	VMLPAN(MOVE_PAGES_TO_QUEUE),
85 	VMLPAN(TASK_FIND_REGION_DETAILS),
86 	VMLPAN(TASK_INFO),
87 	VMLPAN(VM32_REGION_INFO),
88 	VMLPAN(VM32_REGION_INFO_64),
89 	VMLPAN(VM32__MAP_EXEC_LOCKDOWN),
90 	VMLPAN(VMTC_REVALIDATE_LOOKUP),
91 	VMLPAN(VM_FAULT_COPY),
92 	VMLPAN(VM_FAULT_INTERNAL),
93 	VMLPAN(VM_KERN_ALLOCATION_INFO),
94 	VMLPAN(VM_MAP_APPLE_PROTECTED),
95 	VMLPAN(VM_MAP_BEHAVIOR_SET),
96 	VMLPAN(VM_MAP_CAN_REUSE),
97 	VMLPAN(VM_MAP_CHECK_PROTECTION),
98 	VMLPAN(VM_MAP_COPYIN_INTERNAL),
99 	VMLPAN(VM_MAP_COPYOUT_INTERNAL),
100 	VMLPAN(VM_MAP_COPY_OVERWRITE),
101 	VMLPAN(VM_MAP_COPY_OVERWRITE_ALIGNED),
102 	VMLPAN(VM_MAP_COPY_OVERWRITE_NESTED),
103 	VMLPAN(VM_MAP_COPY_OVERWRITE_UNALIGNED),
104 	VMLPAN(VM_MAP_CREATE_UPL),
105 	VMLPAN(VM_MAP_CS_DEBUGGED_SET),
106 	VMLPAN(VM_MAP_CS_ENFORCEMENT_SET),
107 	VMLPAN(VM_MAP_DELETE),
108 	VMLPAN(VM_MAP_DELETE_SUBMAP_RECURSE),
109 	VMLPAN(VM_MAP_DESTROY),
110 	VMLPAN(VM_MAP_DISCONNECT_PAGE_MAPPINGS),
111 	VMLPAN(VM_MAP_ENTER),
112 	VMLPAN(VM_MAP_ENTER_MEM_OBJECT),
113 	VMLPAN(VM_MAP_ENTRY_HAS_DEVICE_PAGER),
114 	VMLPAN(VM_MAP_EXEC_LOCKDOWN),
115 	VMLPAN(VM_MAP_FIND_SPACE),
116 	VMLPAN(VM_MAP_FORK),
117 	VMLPAN(VM_MAP_FORK_COPY),
118 	VMLPAN(VM_MAP_FREEZE),
119 	VMLPAN(VM_MAP_GET_PHYS_PAGE),
120 	VMLPAN(VM_MAP_INHERIT),
121 	VMLPAN(VM_MAP_INJECT_ERROR),
122 	VMLPAN(VM_MAP_IS_CORPSE_SOURCE),
123 	VMLPAN(VM_MAP_LOOKUP_AND_LOCK_OBJECT),
124 	VMLPAN(VM_MAP_MACHINE_ATTRIBUTE),
125 	VMLPAN(VM_MAP_MARK_ALIEN),
126 	VMLPAN(VM_MAP_MSYNC),
127 	VMLPAN(VM_MAP_NON_ALIGNED_TEST), /* now unused; can be removed on next breaking change */
128 	VMLPAN(VM_MAP_OVERWRITE_SUBMAP_RECURSE),
129 	VMLPAN(VM_MAP_PAGEOUT),
130 	VMLPAN(VM_MAP_PAGE_RANGE_INFO_INTERNAL),
131 	VMLPAN(VM_MAP_PARTIAL_REAP),
132 	VMLPAN(VM_MAP_PROTECT),
133 	VMLPAN(VM_MAP_PURGABLE_CONTROL),
134 	VMLPAN(VM_MAP_RAISE_MAX_OFFSET),
135 	VMLPAN(VM_MAP_RAISE_MIN_OFFSET),
136 	VMLPAN(VM_MAP_RANGE_CONFIGURE),
137 	VMLPAN(VM_MAP_REGION),
138 	VMLPAN(VM_MAP_REGION_RECURSE_64),
139 	VMLPAN(VM_MAP_REMAP),
140 	VMLPAN(VM_MAP_REMAP_EXTRACT),
141 	VMLPAN(VM_MAP_REMOVE_AND_UNLOCK),
142 	VMLPAN(VM_MAP_REMOVE_GUARD),
143 	VMLPAN(VM_MAP_REUSABLE_PAGES),
144 	VMLPAN(VM_MAP_REUSE_PAGES),
145 	VMLPAN(VM_MAP_SET_CACHE_ATTR),
146 	VMLPAN(VM_MAP_SET_CORPSE_SOURCE),
147 	VMLPAN(VM_MAP_SET_DATA_LIMIT),
148 	VMLPAN(VM_MAP_SET_MAX_ADDR),
149 	VMLPAN(VM_MAP_SET_SIZE_LIMIT),
150 	VMLPAN(VM_MAP_SET_TPRO_ENFORCEMENT),
151 	VMLPAN(VM_MAP_SET_TPRO_RANGE),
152 	VMLPAN(VM_MAP_SET_USER_WIRE_LIMIT),
153 	VMLPAN(VM_MAP_SHADOW_MAX),
154 	VMLPAN(VM_MAP_SIGN),
155 	VMLPAN(VM_MAP_SIMPLIFY),
156 	VMLPAN(VM_MAP_SINGLE_JIT),
157 	VMLPAN(VM_MAP_SIZES),
158 	VMLPAN(VM_MAP_SUBMAP_PMAP_CLEAN),
159 	VMLPAN(VM_MAP_SWITCH_PROTECT),
160 	VMLPAN(VM_MAP_TERMINATE),
161 	VMLPAN(VM_MAP_UNSET_CORPSE_SOURCE),
162 	VMLPAN(VM_MAP_UNWIRE_NESTED),
163 	VMLPAN(VM_MAP_WILLNEED),
164 	VMLPAN(VM_MAP_WIRE_NESTED),
165 	VMLPAN(VM_MAP_ZERO),
166 	VMLPAN(VM_PAGE_DIAGNOSE),
167 	VMLPAN(VM_SHARED_REGION_MAP_FILE),
168 	VMLPAN(VM_TOGGLE_ENTRY_REUSE),
169 	VMLPAN(ZONE_METADATA_INIT),
170 	VMLPAN(ZONE_SUBMAP_ALLOC_SEQUESTERED_VA),
171 } vmlp_api_event_t;
172 
173 #pragma mark Subcodes for Lock events
174 
175 typedef enum __enum_closed {
176 	VMLP_EVENT_LOCK_TRY_EXCL = 1,
177 	VMLP_EVENT_LOCK_FAIL_EXCL,
178 	VMLP_EVENT_LOCK_REQ_EXCL,
179 	VMLP_EVENT_LOCK_GOT_EXCL,
180 	VMLP_EVENT_LOCK_UNLOCK_EXCL,
181 	VMLP_EVENT_LOCK_DOWNGRADE,
182 	VMLP_EVENT_LOCK_TRY_SH,
183 	VMLP_EVENT_LOCK_FAIL_SH,
184 	VMLP_EVENT_LOCK_REQ_SH,
185 	VMLP_EVENT_LOCK_GOT_SH,
186 	VMLP_EVENT_LOCK_UNLOCK_SH,
187 	VMLP_EVENT_LOCK_TRY_UPGRADE,
188 	VMLP_EVENT_LOCK_GOT_UPGRADE,
189 	VMLP_EVENT_LOCK_FAIL_UPGRADE,
190 	VMLP_EVENT_LOCK_SLEEP_BEGIN,
191 	VMLP_EVENT_LOCK_SLEEP_END,
192 	VMLP_EVENT_LOCK_YIELD_BEGIN,
193 	VMLP_EVENT_LOCK_YIELD_END,
194 } vmlp_lock_event_t;
195 
196 #pragma mark Subcodes for Range events
197 
198 typedef enum __enum_closed {
199 	VMLP_EVENT_RANGE = 1,
200 } vmlp_range_event_t;
201 
202 /*
203  * vmlp_* function calls do nothing under normal circumstances...
204  * If we ever change this behavior we need to reconsider whether DBG_MACH is
205  * the right class to be a subclass of given that it is enabled entirely in
206  * default traces.
207  */
208 #if !ENABLE_VM_LOCK_PERF
209 
210 #define vmlp_lock_event_unlocked(event, map)
211 #define vmlp_lock_event_locked(event, map)
212 #define vmlp_api_start(func)
213 #define vmlp_api_end(func, kr)
214 #define vmlp_range_event(map, addr, size)
215 #define vmlp_range_event_entry(map, entry)
216 #define vmlp_range_event_none(map)
217 #define vmlp_range_event_all(map)
218 
219 #else /* ...but when the module is enabled they emit tracepoints */
220 
221 #pragma mark Debug infra
222 
223 /*
224  * Use stack counters to debug extra or missing end annotations.
225  * Should only be turned on while debugging annotations.
226  */
227 #define VMLP_DEBUG_COUNTERS 0
228 
229 #if VMLP_DEBUG_COUNTERS
230 static inline void
__vmlp_debug_counter_check(int * __vmlp_debug_counter)231 __vmlp_debug_counter_check(int *__vmlp_debug_counter)
232 {
233 	if (1 != *__vmlp_debug_counter) {
234 		panic("vmlp_api_end was run %d times in this function (expected 1).", *__vmlp_debug_counter);
235 	}
236 }
237 #define VMLP_DEBUG_COUNTER_DECLARE int __vmlp_debug_counter __attribute__((cleanup(__vmlp_debug_counter_check))) = 0
238 #define VMLP_DEBUG_COUNTER_UPDATE __vmlp_debug_counter++
239 #else
240 #define VMLP_DEBUG_COUNTER_DECLARE
241 #define VMLP_DEBUG_COUNTER_UPDATE
242 #endif
243 
244 #pragma mark API events
245 
246 static inline void
__vmlp_api_start(vmlp_api_event_t api)247 __vmlp_api_start(vmlp_api_event_t api)
248 {
249 	(void)api;
250 	KDBG(VMLP_EVENTID(VM_LOCK_PERF_API_EVENT, api, DBG_FUNC_START));
251 }
252 #define vmlp_api_start(func) VMLP_DEBUG_COUNTER_DECLARE;                       \
253 	__vmlp_api_start(VMLPAN(func));
254 
255 static inline void
__vmlp_api_end(vmlp_api_event_t api,uint64_t kr)256 __vmlp_api_end(vmlp_api_event_t api, uint64_t kr)
257 {
258 	(void)api, (void)kr;
259 	KDBG(VMLP_EVENTID(VM_LOCK_PERF_API_EVENT, api, DBG_FUNC_END), kr);
260 }
261 /*
262  * Note that post-processing will treat any non-zero kr as failure, so annotate
263  * accordingly when APIs do not return a kern_return_t.
264  */
265 #define vmlp_api_end(func, kr) do {                                            \
266 	VMLP_DEBUG_COUNTER_UPDATE;                                             \
267 	__vmlp_api_end(VMLPAN(func), kr);                                      \
268 } while (0)
269 
270 #pragma mark Lock events
271 
272 static inline void
__vmlp_lock_event(vmlp_lock_event_t event,vm_map_t map,unsigned int timestamp)273 __vmlp_lock_event(vmlp_lock_event_t event, vm_map_t map, unsigned int timestamp)
274 {
275 	(void)event, (void)map, (void)timestamp;
276 	KDBG(VMLP_EVENTID(VM_LOCK_PERF_LOCK_EVENT, event, DBG_FUNC_NONE), map, timestamp);
277 }
278 static inline void
vmlp_lock_event_unlocked(vmlp_lock_event_t event,vm_map_t map)279 vmlp_lock_event_unlocked(vmlp_lock_event_t event, vm_map_t map)
280 {
281 	/*
282 	 * If we don't hold a lock on the map it's not safe to access the
283 	 * timestamp. Pass 0 as placeholder.
284 	 */
285 	__vmlp_lock_event(event, map, 0);
286 }
287 /*
288  * Map timestamps get incremented at unlock time. Care should be taken to
289  * position this annotation before the timestamp increase.
290  */
291 static inline void
vmlp_lock_event_locked(vmlp_lock_event_t event,vm_map_t map)292 vmlp_lock_event_locked(vmlp_lock_event_t event, vm_map_t map)
293 {
294 	/*
295 	 * Postprocessing can use the map timestamp to reorder events that are
296 	 * causally related but end up having the same ktrace-timestamp and
297 	 * showing up in reverse order because they occured on different CPUs.
298 	 */
299 	__vmlp_lock_event(event, map, map->timestamp);
300 }
301 
302 #pragma mark Range events
303 
304 static inline void
vmlp_range_event(vm_map_t map,mach_vm_address_t addr,mach_vm_size_t size)305 vmlp_range_event(vm_map_t map, mach_vm_address_t addr, mach_vm_size_t size)
306 {
307 	(void)map, (void)addr, (void)size;
308 	KDBG(VMLP_EVENTID(VM_LOCK_PERF_RANGE_EVENT, VMLP_EVENT_RANGE, DBG_FUNC_NONE), map, map->timestamp, addr, size);
309 }
310 
311 static inline void
vmlp_range_event_entry(vm_map_t map,vm_map_entry_t entry)312 vmlp_range_event_entry(vm_map_t map, vm_map_entry_t entry)
313 {
314 	vmlp_range_event(map, entry->vme_start, entry->vme_end - entry->vme_start);
315 }
316 
317 static inline void
vmlp_range_event_none(vm_map_t map)318 vmlp_range_event_none(vm_map_t map)
319 {
320 	vmlp_range_event(map, 0, 0);
321 }
322 
323 static inline void
vmlp_range_event_all(vm_map_t map)324 vmlp_range_event_all(vm_map_t map)
325 {
326 	vmlp_range_event(map, 0, 0xffffffffffffffff);
327 }
328 
329 #endif /* !ENABLE_VM_LOCK_PERF */
330