xref: /xnu-12377.61.12/osfmk/vm/vm_mteinfo_internal.h (revision 4d495c6e23c53686cf65f45067f79024cf5dcee8)
1 /*
2  * Copyright (c) 2024 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _VM_VM_MTEINFO_INTERNAL_H_
30 #define _VM_VM_MTEINFO_INTERNAL_H_
31 
32 #include <stdint.h>
33 #include <kern/kcdata.h>
34 #include <mach/vm_param.h>
35 #ifndef VM_MTE_FF_VERIFY
36 #include <vm/vm_page.h>
37 #if MACH_KERNEL_PRIVATE
38 #include <vm/vm_page_internal.h>
39 #endif
40 #endif /* VM_MTE_FF_VERIFY */
41 
42 __BEGIN_DECLS
43 #if HAS_MTE
44 
45 #pragma mark Types
46 
47 struct vm_page;
48 
49 /*!
50  * @typedef mte_cell_state_t
51  *
52  * @abstract
53  * This type denotes the state of a cell, which influences which queue it
54  * belongs to.
55  *
56  * @discussion
57  * For any given state untagged covered pages associated with a tag storage page
58  * (or its cell) can be allocated.  However, tagged covered pages can only be
59  * allocated if the associated tag storage cell is in the MTE_STATE_ACTIVE
60  * state.
61  *
62  * @const MTE_STATE_DISABLED
63  * This cell is disabled from being selected as a tag storage page.
64  *
65  * This can happen for:
66  * - recursive tag storage,
67  * - tag storage for iBoot carveouts,
68  * - tag storage for unmanaged memory not using MTE,
69  * - pages with ECC errors that have been retired.
70  *
71  * In the first two cases, the page is usable for regular untaggable usage,
72  * and is on the global free queue, in the latter case the page is retired
73  * and unusable.
74  *
75  * @const MTE_STATE_PINNED
76  * The tag storage page is currently used as non tag storage, and a reclaim was
77  * attempted and failed due to the page being pinned (most likely wired).
78  *
79  * This state is discovered lazily by the refill thread as it would be expensive
80  * to maintain explicitly. It serves as a way to not attempt reclaiming the same
81  * pages over and over again when they are in a state that doesn't permit it.
82  *
83  * This page shall have no covered pages with MTE enabled (the SPTM will
84  * enforce this).
85  *
86  * @const MTE_STATE_DEACTIVATING
87  * List of pages in the process of being deactivated (from MTE_STATE_ACTIVE).
88  *
89  * This page might transiently have pages with MTE enabled,
90  * however none should be in use.
91  *
92  * @const MTE_STATE_CLAIMED
93  * The tag storage page is currently used as non tag storage, and is typically
94  * typed XNU_DEFAULT (though nothing prevents other uses, provided the usage is
95  * relocatable).
96  *
97  * This page shall have no covered pages with MTE enabled (the SPTM will
98  * enforce this).
99  *
100  * @const MTE_STATE_INACTIVE
101  * The tag storage page is currently completely free and unused, and is typed
102  * XNU_DEFAULT.
103  *
104  * This page shall have no covered pages with MTE enabled (the SPTM will
105  * enforce this).
106  *
107  * @const MTE_STATE_RECLAIMING
108  * List of pages which used to be in MTE_STATE_CLAIMED state, that the fill
109  * thread is attempting to relocate.
110  *
111  * This page shall have no covered pages with MTE enabled (the SPTM must
112  * enforce this).
113  *
114  * @const MTE_STATE_ACTIVATING
115  * List of pages in the process of being activated (from MTE_STATE_INACTIVE).
116  *
117  * This page shall have no covered pages with MTE enabled (the SPTM must
118  * enforce this).
119  *
120  * @const MTE_STATE_ACTIVE
121  * The tag storage page is currently typed XNU_TAG_STORAGE and might have
122  * covered pages with MTE enabled.
123  */
124 __enum_closed_decl(mte_cell_state_t, uint8_t, {
125 	MTE_STATE_DISABLED,
126 	MTE_STATE_PINNED,
127 	MTE_STATE_DEACTIVATING,
128 	MTE_STATE_CLAIMED,
129 	MTE_STATE_INACTIVE,
130 	MTE_STATE_RECLAIMING,
131 	MTE_STATE_ACTIVATING,
132 	MTE_STATE_ACTIVE,
133 });
134 
135 /*!
136  * @const MTE_BUCKETS_COUNT_MAX
137  * The maximum number of buckets in a cell list.
138  *
139  * Cell list buckets are a function of the number of free covered pages
140  * associated with the tag storage pages being considered:
141  *  - bucket 0: no free covered page.
142  *  - bucket 1: 1 to 8 free covered pages.
143  *  - bucket 2: 9 to 16 free covered pages.
144  *  - bucket 3: 17 to 24 free covered pages.
145  *  - bucket 4: 25 to 32 free covered pages.
146  */
147 #define MTE_BUCKETS_COUNT_MAX           5
148 
149 /*!
150  * @typedef mte_cell_list_idx_t
151  *
152  * @abstract
153  * Represents the index of a cell list inside the mteinfo data structure.
154  *
155  * @discussion
156  * The order of these values matter:
157  * - Lists with single buckets must be first
158  * - Active lists must be last
159  */
160 __enum_closed_decl(mte_cell_list_idx_t, uint32_t, {
161 	MTE_LIST_DISABLED_IDX           = MTE_STATE_DISABLED,
162 	MTE_LIST_PINNED_IDX             = MTE_STATE_PINNED,
163 	MTE_LIST_DEACTIVATING_IDX       = MTE_STATE_DEACTIVATING,
164 	MTE_LIST_CLAIMED_IDX            = MTE_STATE_CLAIMED,
165 	MTE_LIST_INACTIVE_IDX           = MTE_STATE_INACTIVE,
166 	MTE_LIST_RECLAIMING_IDX         = MTE_STATE_RECLAIMING,
167 	MTE_LIST_ACTIVATING_IDX         = MTE_STATE_ACTIVATING,
168 	MTE_LIST_ACTIVE_0_IDX           = MTE_STATE_ACTIVE,
169 	MTE_LIST_ACTIVE_IDX             = MTE_STATE_ACTIVE + 1,
170 
171 	MTE_LISTS_COUNT,
172 });
173 
174 /*!
175  * @typedef mte_cell_list_t
176  *
177  * @abstract
178  * This data structure represents a segregated list of page queues.
179  *
180  * @discussion
181  * The list is segregated per number of free pages. Each segregation queue is
182  * called a "bucket".
183  *
184  * @field mask
185  * Mask of all the non empty buckets in this list.
186  *
187  * @field count
188  * Total number of cells on the list
189  *
190  * @field buckets
191  * A vector of queues this list covers. The number of buckets depends on the
192  * list and can range from 1 to MTE_BUCKETS_COUNT_MAX
193  */
194 typedef struct mte_cell_list {
195 	uint32_t                        mask;
196 	uint32_t                        count;
197 	struct mte_cell_queue_head     *buckets;
198 } *mte_cell_list_t;
199 
200 /*!
201  * @abstract
202  * Indices for the mte free queue buckets.
203  *
204  * @discussion
205  * This bucketing is designed to order allocations:
206  *
207  * - untagged allocations will consider buckets in ascending order from
208  *   @c MTE_FREE_UNTAGGABLE through @c MTE_FREE_UNTAGGABLE_ACTIVATING.
209  *
210  * - tagged allocations will consider buckets in descending order from
211  *   @c MTE_FREE_ACTIVE_3 through MTE_FREE_ACTIVE_0.
212  *
213  * Said another way: lower indices denote buckets where untagged allocations are
214  * more desirable and higher indices buckets where tagged allocations are more
215  * desirable.
216  *
217  *
218  * @const MTE_FREE_UNTAGGABLE_0
219  * The bucket for pages with disabled, pinned, deactivating tag storage
220  * pages, or claimed with 16 or less associated free pages.
221  *
222  * @const MTE_FREE_UNTAGGABLE_1
223  * The bucket for claimed pages with 17 or more associated free pages or
224  * inactive pages with 16 or less associated free pages.
225  *
226  * @const MTE_FREE_UNTAGGABLE_2
227  * The bucket for pages with inactive tag storage pages which have 17 associated
228  * covered free pages or more.
229  *
230  *
231  * @const MTE_FREE_UNTAGGABLE_ACTIVATING
232  * The bucket for pages with activating or reclaiming tag storage pages.
233  *
234  * This bucket is kept "last" because the system has selected these pages
235  * for upgrading into the active pools either from inactive or claimed
236  * as being the best current candidates. In other words, once the activation
237  * is finished, we expect these pages to fall into a high @c MTE_FREE_ACTIVE_*
238  * bucket.
239  *
240  * These transitions are unfortunately not atomic and the untagged workloads
241  * can tap into these during the transitions defeating the purpose of the
242  * upgrades, so by making them last, we protect them from untagged allocations.
243  *
244  *
245  * @const MTE_FREE_NOT_QUEUED
246  * This a pseudo bucket for tag storage pages with no free pages.
247  */
248 __enum_closed_decl(mte_free_queue_idx_t, uint32_t, {
249 	MTE_FREE_UNTAGGABLE_0,
250 	MTE_FREE_UNTAGGABLE_1,
251 	MTE_FREE_UNTAGGABLE_2,
252 	MTE_FREE_ACTIVE_0,
253 	MTE_FREE_ACTIVE_1,
254 	MTE_FREE_ACTIVE_2,
255 	MTE_FREE_ACTIVE_3,
256 	MTE_FREE_UNTAGGABLE_ACTIVATING,
257 	MTE_FREE_NOT_QUEUED,
258 });
259 
260 
261 #pragma mark Counters and Globals
262 
263 /*!
264  * The cell lists, in mte_cell_list_idx_t order.
265  *
266  * Each list contains this many buckets:
267  *  - MTE_LIST_DISABLED_IDX:    1
268  *  - MTE_LIST_PINNED_IDX:      1
269  *  - MTE_LIST_CLAIMED_IDX:     MTE_BUCKETS_COUNT_MAX
270  *  - MTE_LIST_INACTIVE_IDX:    MTE_BUCKETS_COUNT_MAX
271  *  - MTE_LIST_RECLAIMING_IDX:  1
272  *  - MTE_LIST_ACTIVATING_IDX:  1
273  *  - MTE_LIST_ACTIVE_0_IDX:    MTE_BUCKETS_COUNT_MAX
274  *  - MTE_LIST_ACTIVE_IDX:      1
275  */
276 extern struct mte_cell_list mte_info_lists[MTE_LISTS_COUNT];
277 
278 /*!
279  * The MTE free queues.
280  */
281 extern struct vm_page_free_queue mte_free_queues[MTE_FREE_NOT_QUEUED];
282 
283 /*!
284  * The queue of claimable tag storage pages.
285  * (pages with <= 8 associated free pages and inactive).
286  */
287 extern struct vm_page_free_queue mte_claimable_queue;
288 
289 #ifndef VM_MTE_FF_VERIFY
290 
291 /*!
292  * @abstract
293  * The type for MTE related per-cpu free queues.
294  *
295  * @field free_tagged_pages
296  * The per-cpu free list of tagged pages.
297  * Pages on this list have the @c VM_PAGE_ON_FREE_LOCAL_Q state.
298  * Their associated cell will have state @c MTE_STATE_ACTIVE.
299  * This list is only accessed by the current CPU with preemption disabled.
300  *
301  * @field free_claimed_pages
302  * The per-cpu free queue of claimed pages.
303  * Pages on this list have the @c VM_PAGE_ON_FREE_LOCAL_Q state.
304  * Their associated cell will have state @c MTE_STATE_CLAIMED.
305  * Access to this queue is protected by the @c free_claimed_lock.
306  *
307  * @field free_claimed_lock
308  * The lock protecting the per-cpu free queue of claimed pages.
309  * This allows for the refill thread to steal claimed pages from this queue.
310  *
311  * @field deactivate_suspend
312  * Per-cpu marker that suspends page deactivations until the current CPU
313  * has finished some untagging (see mteinfo_tag_storage_deactivate_barrier()).
314  */
315 typedef struct mte_pcpu {
316 	vm_page_queue_head_t   free_claimed_pages VM_PAGE_PACKED_ALIGNED;
317 	vm_page_t              free_tagged_pages;
318 	lck_ticket_t           free_claimed_lock;
319 	uint32_t               deactivate_suspend;
320 } *mte_pcpu_t;
321 PERCPU_DECL(struct mte_pcpu, mte_pcpu);
322 
323 /*!
324  * @var vm_cpu_free_count
325  * Scalable counter of the number of free pages CPU free list.
326  * (not an MTE concept but here for the sake of vm_unix.c)
327  *
328  * @var vm_cpu_free_tagged_count
329  * Scalable counter of the number of free tagged pages on CPU free lists.
330  *
331  * @var vm_cpu_free_tagged_count
332  * Scalable counter of the number of free claimed pages on CPU free lists.
333  */
334 SCALABLE_COUNTER_DECLARE(vm_cpu_free_count);
335 SCALABLE_COUNTER_DECLARE(vm_cpu_free_tagged_count);
336 SCALABLE_COUNTER_DECLARE(vm_cpu_free_claimed_count);
337 
338 #endif /* VM_MTE_FF_VERIFY */
339 
340 /*!
341  * @var vm_page_free_taggable_count
342  * Number of free pages in the MTE_FREE_ACTIVE_* MTE free queue buckets.
343  *
344  * @var vm_page_free_unmanaged_tag_storage_count
345  * Number of free unmanaged tag storage pages. These do not participate in the
346  * global free count.
347  *
348  * @var vm_page_recursive_tag_storage_count
349  * Number of recursive tag storage pages.
350  * These should be VM_MEMORY_CLASS_DEAD_TAG_STORAGE.
351  *
352  * @var vm_page_retired_tag_storage_count
353  * Number of retired tag storage pages.
354  * These should be unusable pages due to ECC errors.
355  *
356  * @var vm_page_unmanaged_tag_storage_count
357  * Number of unmanaged tag storage pages.
358  * These should be VM_MEMORY_CLASS_DEAD_TAG_STORAGE.
359  *
360  * @var vm_page_wired_tag_storage_count
361  * Number of tag storage range pages that are wired (note: this is not
362  * current the number of VM_MEMORY_CLASS_TAG_STORAGE pages that are wired).
363  *
364  * @var vm_page_tagged_count
365  * Number of tagged pages in use.
366  *
367  * @var vm_mte_refill_thread_wakeups
368  * The number of times the refill thread was woken up.
369  *
370  * @var vm_page_tag_storage_activation_count
371  * Number of activation (inactive/claimed -> active) transitions ever done.
372  *
373  * @var vm_page_tag_storage_deactivation_count
374  * Number of deactivation (active -> inactive) transitions ever done.
375  *
376  * @var vm_page_tag_storage_reclaim_from_cpu_count
377  * Number of times a claimed tag storage page was successfully reclaimed from
378  * a cpu free list.
379  *
380  * @var vm_page_tag_storage_reclaim_success_count
381  * Number of times a claimed tag storage page was successfully reclaimed.
382  *
383  * @var vm_page_tag_storage_reclaim_failure_count
384  * Number of times a claimed tag storage page failed to be reclaimed.
385  *
386  * @var vm_page_tag_storage_reclaim_wired_failure_count
387  * Number of times a claimed tag storage page failed to be reclaimed because it
388  * was wired.
389  *
390  * @var vm_page_tag_storage_wire_relocation_count
391  * Number of relocations of tag storage pages due to wiring.
392  *
393  * @var vm_page_tag_storage_reclaim_compressor_failure_count
394  * Number of times a claimed tag storage page failed to be reclaimed because it
395  * was used in the compressor pool and getting swapped out.
396  *
397  * @var vm_page_tag_storage_compressor_relocation_count
398  * Number of relocations of tag storage pages due to the compressor.
399  *
400  * @var vm_page_free_wanted_tagged
401  * Number of threads that are waiting for covered tagged pages.  Also the event
402  * those threads wait on.
403  *
404  * @var vm_page_free_wanted_tagged_privileged
405  * Number privileged threads that are waiting for covered tagged pages.  Also
406  * the event those threads wait on.
407  */
408 extern uint32_t vm_page_free_taggable_count;
409 extern uint32_t vm_page_free_unmanaged_tag_storage_count;
410 extern uint32_t vm_page_recursive_tag_storage_count;
411 extern uint32_t vm_page_retired_tag_storage_count;
412 extern uint32_t vm_page_unmanaged_tag_storage_count;
413 extern uint32_t vm_page_wired_tag_storage_count;
414 extern uint32_t vm_page_tagged_count;
415 extern uint64_t vm_mte_refill_thread_wakeups;
416 extern uint64_t vm_page_tag_storage_activation_count;
417 extern uint64_t vm_page_tag_storage_deactivation_count;
418 extern uint64_t vm_page_tag_storage_reclaim_from_cpu_count;
419 extern uint64_t vm_page_tag_storage_reclaim_success_count;
420 extern uint64_t vm_page_tag_storage_reclaim_failure_count;
421 extern uint64_t vm_page_tag_storage_reclaim_wired_failure_count;
422 extern uint64_t vm_page_tag_storage_wire_relocation_count;
423 extern uint64_t vm_page_tag_storage_reclaim_compressor_failure_count;
424 extern uint64_t vm_page_tag_storage_compressor_relocation_count;
425 extern uint32_t vm_page_free_wanted_tagged;
426 extern uint32_t vm_page_free_wanted_tagged_privileged;
427 
428 #ifndef VM_MTE_FF_VERIFY
429 /*!
430  * @var vm_cpu_claimed_count
431  * Scalable counter of the number of claimed tag storage pages allocated.
432  */
433 SCALABLE_COUNTER_DECLARE(vm_cpu_claimed_count);
434 #endif /* VM_MTE_FF_VERIFY */
435 
436 /*!
437  * @var vm_page_tag_storage_reserved
438  * Number of free tag storage pages reserved for the fill thread.
439  */
440 extern uint32_t vm_page_tag_storage_reserved;
441 
442 /*!
443  * @var vm_mte_tag_storage_for_compressor
444  * Whether we use tag storage pages for the compressor pool.
445  */
446 extern bool vm_mte_tag_storage_for_compressor;
447 
448 /*!
449  * @var vm_mte_tag_storage_for_vm_tags_mask
450  * Which VM tags can use tag storage.
451  */
452 extern bitmap_t vm_mte_tag_storage_for_vm_tags_mask[BITMAP_LEN(VM_MEMORY_COUNT)];
453 
454 #if MACH_KERNEL_PRIVATE
455 #pragma mark Tag storage space state machine
456 
457 /*!
458  * @function mteinfo_tag_storage_disabled()
459  *
460  * @abstract
461  * Returns whether a tag storage page is disabled.
462  *
463  * @discussion
464  * Unlike other mteinfo_* functions, this can be called without the free queue
465  * lock held because disabling pages is a one way transition after lockdown,
466  * and before lockdown the kernel is single threaded.
467  *
468  * @param page  The pointer to a page inside the tag storage space.
469  */
470 extern bool mteinfo_tag_storage_disabled(const struct vm_page *page);
471 
472 /*!
473  * @function mteinfo_tag_storage_set_retired()
474  *
475  * @abstract
476  * Mark a tag storage page as retired due to ECC errors.
477  *
478  * @param page  The pointer to a page inside the tag storage space.
479  */
480 extern void mteinfo_tag_storage_set_retired(struct vm_page *page);
481 
482 /*!
483  * @function mteinfo_tag_storage_set_inactive()
484  *
485  * @abstract
486  * Mark a tag storage page as inactive.
487  *
488  * @param page  The pointer to a page inside the tag storage space.
489  * @param init  This is the initial "inactive" transition.
490  */
491 extern void mteinfo_tag_storage_set_inactive(struct vm_page *page, bool init);
492 
493 /*!
494  * @function mteinfo_tag_storage_set_claimed()
495  *
496  * @abstract
497  * Mark a tag storage page as claimed for regular memory usage.
498  *
499  * @discussion
500  * The tag storage page must be either inactive or reclaiming.
501  *
502  * @param page  The pointer to a page inside the tag storage space.
503  */
504 extern void mteinfo_tag_storage_set_claimed(struct vm_page *page);
505 
506 /*!
507  * @function mteinfo_tag_storage_wakeup()
508  *
509  * @abstract
510  * Mark a tag storage page as no longer pinned.
511  *
512  * @discussion
513  * The tag storage page must be in the pinned state.
514  * The page queues lock must be held.
515  *
516  * @param page          The pointer to a page inside the tag storage space.
517  * @param fq_locked     Whether the page free queue lock is held.
518  */
519 extern void mteinfo_tag_storage_wakeup(struct vm_page *page, bool fq_locked);
520 
521 
522 #pragma mark Covered pages state machine
523 
524 /*!
525  * @function mteinfo_covered_page_taggable()
526  *
527  * @abstract
528  * Returns whether a specified covered page has an active tag storage page
529  * associated.
530  *
531  * @param pnum  A page number outside of the tag storage space.
532  */
533 extern bool mteinfo_covered_page_taggable(ppnum_t pnum);
534 
535 /*!
536  * @function mteinfo_covered_page_set_free()
537  *
538  * @abstract
539  * Mark the specified untagged page as free in its tag storage tracking metadata.
540  *
541  * @param pnum          A page number outside of the tag storage space.
542  * @param tagged        Whether the page will was used as tagged.
543  */
544 extern void mteinfo_covered_page_set_free(ppnum_t pnum, bool tagged);
545 
546 /*!
547  * @function mteinfo_covered_page_set_used()
548  *
549  * @abstract
550  * Mark the specified untagged page as used in its tag storage tracking metadata.
551  *
552  * @param pnum          A page number outside of the tag storage space.
553  * @param tagged        Whether the page will be used as tagged.
554  */
555 extern void mteinfo_covered_page_set_used(ppnum_t pnum, bool tagged);
556 
557 /*!
558  * @function mteinfo_covered_page_set_stolen_tagged()
559  *
560  * @abstract
561  * Mark the specified page as using MTE in its tag storage tracking metadata.
562  *
563  * @discussion
564  * These pages are expected to be "stolen" in that bootstrap has allocated them
565  * through bootstrap allocation strategies (see: bump allocation) before MachVM
566  * is properly initialized and able to call into this module properly.  Because
567  * of this, bootstrap will call into this method to directly tell the module
568  * that the page is used as tagged now.
569  *
570  * @param pnum  A page number outside of the tag storage space,
571  *              the page must be used.
572  */
573 __startup_func
574 extern void mteinfo_covered_page_set_stolen_tagged(ppnum_t pnum);
575 
576 /*!
577  * @function mteinfo_covered_page_clear_tagged()
578  *
579  * @abstract
580  * Mark the specified page as no longer using MTE in its tag storage tracking
581  * metadata, while remaining "in use".
582  *
583  * @param pnum  A page number outside of the tag storage space,
584  *              the page must be used.
585  */
586 extern void mteinfo_covered_page_clear_tagged(ppnum_t pnum);
587 
588 
589 #pragma mark Activate
590 
591 /*!
592  * @function mteinfo_tag_storage_try_activate()
593  *
594  * @abstract
595  * Try to activate tag storage pages in order to make a certain amount of
596  * covered taggable pages available.
597  *
598  * @discussion
599  * This must be called with the page free queue lock held.
600  * This function will have dropped the free queue lock if it returns true.
601  *
602  * @param target        how many covered taggable free pages to try to generate
603  *                      as a result of this activation.
604  * @param spin_mode     whether to take the free page queue lock in spin mode.
605  *
606  * @returns             whether the page free queue lock was dropped
607  *                      (in which case it means pages have been activated,
608  *                      either by this thread or another we synchronized with).
609  */
610 extern bool mteinfo_tag_storage_try_activate(uint32_t target, bool spin_mode);
611 
612 
613 #pragma mark Refill
614 
615 /*!
616  * @function mteinfo_wake_fill_thread()
617  *
618  * @abstract
619  * Wake the fill thread if it has not already been woken.
620  */
621 extern void mteinfo_wake_fill_thread(void);
622 
623 
624 #pragma mark Alloc
625 
626 /*!
627  * @function mteinfo_free_queue_grab()
628  *
629  * @abstract
630  * Gets pages from the MTE free queue.
631  *
632  * @discussion
633  * Clients cannot get more pages than the free queue has; attempting to do so
634  * will cause a panic.
635  *
636  * @param options       The grab options.
637  * @param mem_class     The memory class to allocate from.
638  * @param num_pages     The number of pages to grab.
639  * @param q_state       The vmp_q_state to set on the page.
640  *
641  * @returns
642  * A list of pages; the list will be at least num_pages long.
643  */
644 extern vm_page_list_t mteinfo_free_queue_grab(
645 	vm_grab_options_t       options,
646 	vm_memory_class_t       mem_class,
647 	unsigned int            num_pages,
648 	vm_page_q_state_t       q_state);
649 
650 
651 /*!
652  * @function mteinfo_page_list_fix_tagging()
653  *
654  * @abstract
655  * Fix the tagging for a list returned by @c mteinfo_free_queue_grab().
656  *
657  * @discussion
658  * Preemption must be disabled (under the same preemption disabled
659  * hold as the call to @c mteinfo_free_queue_grab() that preceded).
660  *
661  * @param mem_class     The memory class being allocated.
662  * @param list          The list returned by mteinfo_free_queue_grab().
663  */
664 extern void mteinfo_page_list_fix_tagging(
665 	vm_memory_class_t       mem_class,
666 	vm_page_list_t         *list);
667 
668 
669 #pragma mark Bootstrap API
670 
671 extern void mteinfo_init(uint32_t num_tag_pages);
672 
673 #if HIBERNATION
674 
675 /*!
676  * @abstract
677  * Iterate all free pages from the MTE free queue (covered or tag storage).
678  */
679 extern void mteinfo_free_queue_foreach(void (^block)(vm_page_t));
680 
681 #endif /* HIBERNATION */
682 
683 /*!
684  * @function mteinfo_tag_storage_release_startup()
685  *
686  * @abstract
687  * Marks a tag storage page active or inactive, as appropriate.
688  *
689  * @discussion
690  * The tag storage page does not have to be active, but none of its covered
691  * pages may have been made tagged.  If the tag storage page was active, then
692  * it will be put on a list to be added to the mte_tags_object by @see
693  * mteinfo_tag_storage_startup_list_flush.
694  *
695  * @param page
696  * The tag storage page to be marked.
697  */
698 __startup_func
699 extern void mteinfo_tag_storage_release_startup(vm_page_t page);
700 
701 #endif /* MACH_KERNEL_PRIVATE */
702 #pragma mark Counter methods
703 
704 /*!
705  * @function mteinfo_tag_storage_fragmentation()
706  *
707  * @abstract
708  * Computed value returning the tag storage fragmentation
709  * in parts per thousand.
710  *
711  * @param actual        Whether to show the "actual" fragmentation
712  *                      or what is achievable assuming enough memory
713  *                      pressure.
714  */
715 extern uint32_t mteinfo_tag_storage_fragmentation(bool actual);
716 
717 /*!
718  * @function mteinfo_tag_storage_active()
719  *
720  * @abstract
721  * Computed value returning the number of active tag storage pages.
722  */
723 extern uint32_t mteinfo_tag_storage_active(bool fq_locked);
724 
725 /*!
726  * @function mteinfo_tag_storage_free_pages_for_covered()
727  *
728  * @abstract
729  * Returns the number of covered pages that are free for the tag storage page
730  * associated with this page.
731  *
732  * @param page  The pointer to a page outside of the tag storage space.
733  */
734 extern uint32_t mteinfo_tag_storage_free_pages_for_covered(const struct vm_page *page);
735 
736 /*!
737  * @function mteinfo_increment_wire_count()
738  *
739  * @abstract
740  * Increment the wired tag storage page counter if the given page is tag
741  * storage.
742  *
743  * @discussion
744  * This currently considers all pages in the tag storage range to be tag
745  * storage, whether or not they are VM_MEMORY_CLASS_TAG_STORAGE.  This is due
746  * to how the other page counters are initialized; currently they account for
747  * all tag storage range pages.
748  *
749  * Note that the callers should make sure the reason for wiring isn't because
750  * the page is used for tag storage by checking against the VM_KERN_MEMORY_MTAG
751  * tag which is used in that case.
752  *
753  * @param page
754  * The page being wired.
755  */
756 extern void mteinfo_increment_wire_count(vm_page_t page);
757 
758 /*!
759  * @function mteinfo_decrement_wire_count()
760  *
761  * @abstract
762  * Decrement the wired tag storage page counter if the given page is tag
763  * storage.
764  *
765  * @discussion
766  * This currently considers all pages in the tag storage range to be tag
767  * storage, whether or not they are VM_MEMORY_CLASS_TAG_STORAGE.  This is due
768  * to how the other page counters are initialized; currently they account for
769  * all tag storage range pages.
770  *
771  * Note that this function is a no-op if the page was associated with the
772  * mte_tags_object as it means its wiring was because it's used for tag storage.
773  *
774  * @param page          The page being wired.
775  * @param pqs_locked    Whether the page queues locked is held
776  *                      (possibly in spin mode).
777  */
778 extern void mteinfo_decrement_wire_count(vm_page_t page, bool pqs_locked);
779 
780 #ifndef VM_MTE_FF_VERIFY
781 /*!
782  * @function mteinfo_vm_tag_can_use_tag_storage()
783  *
784  * @abstract
785  * Determine if a given VM tag is eligible to dip into tag storage.
786  *
787  * @param vm_tag		The VM tag in question.
788  */
789 extern bool mteinfo_vm_tag_can_use_tag_storage(vm_tag_t vm_tag);
790 /*!
791  * @function kdp_mteinfo_snapshot()
792  *
793  * @abstract
794  * Snapshot the current state of all tag storage pages.
795  *
796  * @discussion
797  * Can only be called from debugger context.
798  *
799  * @param cells        Array of struct mte_cell_info (from kcdata.h)
800  * @param count        Size of the array, must match mte_tag_storage_count
801  */
802 extern void kdp_mteinfo_snapshot(struct mte_info_cell __counted_by(count) *cells, size_t count);
803 
804 
805 #endif /* VM_MTE_FF_VERIFY */
806 
807 #endif /* HAS_MTE */
808 __END_DECLS
809 
810 #endif /* _VM_VM_MTEINFO_INTERNAL_H_ */
811