1 #include <mach_ldebug.h>
2 #include <debug.h>
3
4 #include <mach/kern_return.h>
5 #include <mach/mach_host_server.h>
6 #include <mach_debug/lockgroup_info.h>
7
8 #include <os/atomic.h>
9
10 #include <kern/locks.h>
11 #include <kern/misc_protos.h>
12 #include <kern/kalloc.h>
13 #include <kern/thread.h>
14 #include <kern/processor.h>
15 #include <kern/sched_prim.h>
16 #include <kern/debug.h>
17 #include <libkern/section_keywords.h>
18 #include <machine/atomic.h>
19 #include <machine/machine_cpu.h>
20 #include <machine/atomic.h>
21 #include <string.h>
22 #include <kern/kalloc.h>
23
24 #include <sys/kdebug.h>
25 #include <sys/errno.h>
26
27 #if SCHED_HYGIENE_DEBUG
28 static uint64_t
sane_us2abs(uint64_t us)29 sane_us2abs(uint64_t us)
30 {
31 uint64_t t;
32 nanoseconds_to_absolutetime(us * NSEC_PER_USEC, &t);
33 return t;
34 }
35 #endif
36
37 #if !KASAN
38 static void
hw_lck_ticket_test_wait_for_delta(hw_lck_ticket_t * lck,uint8_t delta,int msec)39 hw_lck_ticket_test_wait_for_delta(hw_lck_ticket_t *lck, uint8_t delta, int msec)
40 {
41 hw_lck_ticket_t tmp;
42
43 delta *= HW_LCK_TICKET_LOCK_INCREMENT;
44 for (int i = 0; i < msec * 1000; i++) {
45 tmp.lck_value = os_atomic_load(&lck->lck_value, relaxed);
46 #if CONFIG_PV_TICKET
47 const uint8_t cticket = tmp.cticket &
48 ~HW_LCK_TICKET_LOCK_PVWAITFLAG;
49 #else
50 const uint8_t cticket = tmp.cticket;
51 #endif
52 if ((uint8_t)(tmp.nticket - cticket) == delta) {
53 return;
54 }
55 delay(1);
56 }
57 assert(false);
58 }
59
60 __dead2
61 static void
hw_lck_ticket_allow_invalid_worker(void * arg,wait_result_t __unused wr)62 hw_lck_ticket_allow_invalid_worker(void *arg, wait_result_t __unused wr)
63 {
64 hw_lck_ticket_t *lck = arg;
65 hw_lock_status_t rc;
66
67 /* wait until we can observe the test take the lock */
68 hw_lck_ticket_test_wait_for_delta(lck, 1, 10);
69
70 rc = hw_lck_ticket_lock_allow_invalid(lck,
71 &hw_lock_test_give_up_policy, NULL);
72 assert(rc == HW_LOCK_INVALID); // because the other thread invalidated it
73 assert(preemption_enabled());
74
75 thread_terminate_self();
76 __builtin_unreachable();
77 }
78 #endif /* !KASAN */
79
80 static int
hw_lck_ticket_allow_invalid_test(__unused int64_t in,int64_t * out)81 hw_lck_ticket_allow_invalid_test(__unused int64_t in, int64_t *out)
82 {
83 vm_offset_t addr = 0;
84 hw_lck_ticket_t *lck;
85 kern_return_t kr;
86 hw_lock_status_t rc;
87
88 printf("%s: STARTING\n", __func__);
89
90 kr = kmem_alloc(kernel_map, &addr, PAGE_SIZE,
91 KMA_ZERO | KMA_KOBJECT, VM_KERN_MEMORY_DIAG);
92 if (kr != KERN_SUCCESS) {
93 printf("%s: kma failed (%d)\n", __func__, kr);
94 return ENOMEM;
95 }
96
97 lck = (hw_lck_ticket_t *)addr;
98 rc = hw_lck_ticket_lock_allow_invalid(lck,
99 &hw_lock_test_give_up_policy, NULL);
100 assert(rc == HW_LOCK_INVALID); // because the lock is 0
101 assert(preemption_enabled());
102
103 hw_lck_ticket_init(lck, NULL);
104
105 assert(hw_lck_ticket_lock_try(lck, NULL));
106 assert(!hw_lck_ticket_lock_try(lck, NULL));
107 hw_lck_ticket_unlock(lck);
108
109 rc = hw_lck_ticket_lock_allow_invalid(lck,
110 &hw_lock_test_give_up_policy, NULL);
111 assert(rc == HW_LOCK_ACQUIRED); // because the lock is initialized
112 assert(!preemption_enabled());
113
114 #if SCHED_HYGIENE_DEBUG
115 if (os_atomic_load(&sched_preemption_disable_threshold_mt, relaxed) < sane_us2abs(20 * 1000)) {
116 /*
117 * This test currently relies on timeouts that cannot always
118 * be guaranteed (rdar://84691107). Abandon the measurement if
119 * we have a tight timeout.
120 */
121 abandon_preemption_disable_measurement();
122 }
123 #endif
124
125 hw_lck_ticket_unlock(lck);
126 assert(preemption_enabled());
127
128 #if !KASAN
129 thread_t th;
130
131 kr = kernel_thread_start_priority(hw_lck_ticket_allow_invalid_worker, lck,
132 BASEPRI_KERNEL, &th);
133 assert(kr == KERN_SUCCESS);
134 thread_deallocate(th);
135
136 /* invalidate the lock */
137 hw_lck_ticket_lock(lck, NULL);
138
139 /* wait for the worker thread to take the reservation */
140 hw_lck_ticket_test_wait_for_delta(lck, 2, 20);
141 hw_lck_ticket_invalidate(lck);
142 hw_lck_ticket_unlock(lck);
143 hw_lck_ticket_destroy(lck, NULL);
144
145 hw_lck_ticket_init(lck, NULL);
146 #endif /* !KASAN */
147
148 kernel_memory_depopulate(addr, PAGE_SIZE, KMA_KOBJECT,
149 VM_KERN_MEMORY_DIAG);
150
151 rc = hw_lck_ticket_lock_allow_invalid(lck,
152 &hw_lock_test_give_up_policy, NULL);
153 assert(rc == HW_LOCK_INVALID); // because the memory is unmapped
154
155 kmem_free(kernel_map, addr, PAGE_SIZE);
156
157 printf("%s: SUCCESS\n", __func__);
158
159 *out = 1;
160 return 0;
161 }
162 SYSCTL_TEST_REGISTER(hw_lck_ticket_allow_invalid, hw_lck_ticket_allow_invalid_test);
163