1 /*
2 * Copyright (c) 2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #define ATOMIC_PRIVATE 1
29 #define LOCK_PRIVATE 1
30
31 #include <stdint.h>
32 #include <kern/thread.h>
33 #include <machine/atomic.h>
34 #include <kern/locks.h>
35 #include <kern/lock_stat.h>
36 #include <machine/machine_cpu.h>
37 #include <os/atomic_private.h>
38 #include <i386/x86_hypercall.h>
39
40 #include <i386/lapic.h>
41 #include <i386/mp.h>
42
43 static inline void
kvm_hc_kick_cpu(int cpu_number)44 kvm_hc_kick_cpu(int cpu_number)
45 {
46 const unsigned long KVM_HC_KICK_CPU = 5;
47 const unsigned long apicid = ml_get_apicid(cpu_number);
48 kvmcompat_hypercall2(KVM_HC_KICK_CPU, 0, apicid);
49 }
50
51 static inline void
kvm_hc_wait(boolean_t ien)52 kvm_hc_wait(boolean_t ien)
53 {
54 if (ien) {
55 __asm__ volatile ("sti; hlt");
56 } else {
57 __asm__ volatile ("hlt");
58 }
59 }
60
61 static cpumask_t ticket_waitmask_pv;
62
63 /*
64 * The ticket has been unlocked i.e. we just incremented cticket, so it's
65 * ready for acquisition by an acquirer that has nticket == cticket.
66 * Find the waiting vcpu and kick it out of its passive state.
67 */
68 __attribute__((noinline))
69 void
hw_lck_ticket_unlock_kick_pv(hw_lck_ticket_t * lck,uint8_t ticket)70 hw_lck_ticket_unlock_kick_pv(hw_lck_ticket_t *lck, uint8_t ticket)
71 {
72 const cpumask_t wmask = os_atomic_load(&ticket_waitmask_pv, acquire);
73
74 percpu_foreach_base(base) {
75 const processor_t ps = PERCPU_GET_WITH_BASE(base, processor);
76 const uint32_t tcpunum = ps->cpu_id;
77
78 if ((wmask & cpu_to_cpumask(tcpunum)) == 0) {
79 continue; // vcpu not currently waiting for a kick
80 }
81 const lck_tktlock_pv_info_t ltpi = PERCPU_GET_WITH_BASE(base,
82 lck_tktlock_pv_info);
83
84 const hw_lck_ticket_t *wlck = os_atomic_load(<pi->ltpi_lck,
85 acquire);
86 if (wlck != lck) {
87 continue; // vcpu waiting on a different lock
88 }
89
90 const uint8_t wt = os_atomic_load(<pi->ltpi_wt, acquire);
91 if (wt != ticket) {
92 continue; // vcpu doesn't have the right ticket
93 }
94
95 kvm_hc_kick_cpu(tcpunum);
96 PVTICKET_STATS_INC(kick_count);
97 break;
98 }
99 }
100
101
102 /*
103 * The current vcpu wants 'lck' but the vcpu holding it may not be running.
104 * Wait for it to kick us (above), just /after/ it increments cticket to
105 * drop the lock.
106 *
107 * Other states are possible e.g. the lock may have been unlocked just before
108 * this routine and so no kick was sent because we haven't initialized
109 * the per-cpu wait data. Or we may be sent a kick immediately after storing
110 * the wait data, but before halting.
111 *
112 * All we really know is that when we get here, spinning has been unsuccessful.
113 */
114 __attribute__((noinline))
115 void
hw_lck_ticket_lock_wait_pv(hw_lck_ticket_t * lck,uint8_t mt)116 hw_lck_ticket_lock_wait_pv(hw_lck_ticket_t *lck, uint8_t mt)
117 {
118 /*
119 * Disable interrupts so we don't lose the kick.
120 * (Also prevents collisions with ticket lock
121 * acquisition in an interrupt handler)
122 */
123
124 const boolean_t istate = ml_set_interrupts_enabled(FALSE);
125
126 /* Record the ticket + the lock this cpu is waiting for */
127
128 assert(!preemption_enabled());
129 lck_tktlock_pv_info_t ltpi = PERCPU_GET(lck_tktlock_pv_info);
130
131 os_atomic_store(<pi->ltpi_lck, NULL, release);
132 os_atomic_store(<pi->ltpi_wt, mt, release);
133 os_atomic_store(<pi->ltpi_lck, lck, release);
134
135 /* Mark this cpu as eligible for kicking */
136
137 const cpumask_t kickmask = cpu_to_cpumask(cpu_number());
138 os_atomic_or(&ticket_waitmask_pv, kickmask, acq_rel);
139
140 assert((mt & HW_LCK_TICKET_LOCK_PVWAITFLAG) == 0);
141
142 /* Check the "now serving" field one last time */
143
144 const uint8_t cticket = os_atomic_load(&lck->cticket, acquire);
145 const uint8_t ccount = cticket & ~HW_LCK_TICKET_LOCK_PVWAITFLAG;
146
147 if (__probable(ccount != mt)) {
148 PVTICKET_STATS_INC(wait_count);
149 assert(cticket & HW_LCK_TICKET_LOCK_PVWAITFLAG);
150
151 /* wait for a kick (or other interrupt) */
152 kvm_hc_wait(istate);
153 /*
154 * Note: if interrupts were enabled at entry to the routine,
155 * even though we disabled them above, they'll be enabled here.
156 */
157 } else {
158 /* just return to the caller to claim the ticket */
159 PVTICKET_STATS_INC(already_count);
160 }
161
162 os_atomic_andnot(&ticket_waitmask_pv, kickmask, acq_rel);
163 os_atomic_store(<pi->ltpi_lck, NULL, release);
164
165 (void) ml_set_interrupts_enabled(istate);
166
167 assert(!preemption_enabled());
168 }
169