xref: /xnu-10002.1.13/osfmk/i386/ucode.c (revision 1031c584a5e37aff177559b9f69dbd3c8c3fd30a)
1 /*
2  * Copyright (c) 2017-2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  *  ucode.c
30  *
31  *  Microcode updater interface sysctl
32  */
33 
34 #include <kern/locks.h>
35 #include <i386/ucode.h>
36 #include <sys/errno.h>
37 #include <i386/proc_reg.h>
38 #include <i386/cpuid.h>
39 #include <vm/vm_kern.h>
40 #include <i386/cpu_data.h> // mp_*_preemption
41 #include <i386/mp.h> // mp_cpus_call
42 #include <i386/commpage/commpage.h>
43 #include <i386/fpu.h>
44 #include <machine/cpu_number.h> // cpu_number
45 #include <pexpert/pexpert.h>  // boot-args
46 
47 #define IA32_BIOS_UPDT_TRIG (0x79) /* microcode update trigger MSR */
48 
49 struct intel_ucupdate *global_update = NULL;
50 
51 /* Exceute the actual update! */
52 static void
update_microcode(void)53 update_microcode(void)
54 {
55 	/* SDM Example 9-8 code shows that we load the
56 	 * address of the UpdateData within the microcode blob,
57 	 * not the address of the header.
58 	 */
59 	wrmsr64(IA32_BIOS_UPDT_TRIG, (uint64_t)(uintptr_t)&global_update->data);
60 }
61 
62 /* locks */
63 static LCK_GRP_DECLARE(ucode_slock_grp, "uccode_lock");
64 static LCK_SPIN_DECLARE(ucode_slock, &ucode_slock_grp);
65 
66 /* Copy in an update */
67 static int
copyin_update(uint64_t inaddr)68 copyin_update(uint64_t inaddr)
69 {
70 	struct intel_ucupdate update_header;
71 	struct intel_ucupdate *update;
72 	vm_size_t size;
73 	kern_return_t ret;
74 	int error;
75 
76 	/* Copy in enough header to peek at the size */
77 	error = copyin((user_addr_t)inaddr, (void *)&update_header, sizeof(update_header));
78 	if (error) {
79 		return error;
80 	}
81 
82 	/* Get the actual, alleged size */
83 	size = update_header.total_size;
84 
85 	/* huge bogus piece of data that somehow made it through? */
86 	if (size >= 1024 * 1024) {
87 		return ENOMEM;
88 	}
89 
90 	/* Old microcodes? */
91 	if (size == 0) {
92 		size = 2048; /* default update size; see SDM */
93 	}
94 	/*
95 	 * create the buffer for the update
96 	 * It need only be aligned to 16-bytes, according to the SDM.
97 	 * This also wires it down
98 	 */
99 	ret = kmem_alloc(kernel_map, (vm_offset_t *)&update, size,
100 	    KMA_KOBJECT | KMA_DATA, VM_KERN_MEMORY_OSFMK);
101 	if (ret != KERN_SUCCESS) {
102 		return ENOMEM;
103 	}
104 
105 	/* Copy it in */
106 	error = copyin((user_addr_t)inaddr, (void*)update, size);
107 	if (error) {
108 		kmem_free(kernel_map, (vm_offset_t)update, size);
109 		return error;
110 	}
111 
112 	global_update = update;
113 	return 0;
114 }
115 
116 static void
cpu_apply_microcode(void)117 cpu_apply_microcode(void)
118 {
119 	/* grab the lock */
120 	lck_spin_lock(&ucode_slock);
121 
122 	/* execute the update */
123 	update_microcode();
124 
125 	/* release the lock */
126 	lck_spin_unlock(&ucode_slock);
127 }
128 
129 static void
cpu_update(__unused void * arg)130 cpu_update(__unused void *arg)
131 {
132 	cpu_apply_microcode();
133 
134 	cpuid_do_was();
135 }
136 
137 /*
138  * This is called once by every CPU on a wake from sleep/hibernate
139  * and is meant to re-apply a microcode update that got lost
140  * by sleeping.
141  */
142 void
ucode_update_wake_and_apply_cpu_was()143 ucode_update_wake_and_apply_cpu_was()
144 {
145 	if (global_update) {
146 		kprintf("ucode: Re-applying update after wake (CPU #%d)\n", cpu_number());
147 		cpu_update(NULL);
148 	} else {
149 		cpuid_do_was();
150 #if DEBUG
151 		kprintf("ucode: No update to apply (CPU #%d)\n", cpu_number());
152 #endif
153 	}
154 }
155 
156 static void
ucode_cpuid_set_info(void)157 ucode_cpuid_set_info(void)
158 {
159 	uint64_t saved_xcr0, dest_xcr0;
160 	int need_xcr0_restore = 0;
161 	boolean_t intrs_enabled = ml_set_interrupts_enabled(FALSE);
162 
163 	/*
164 	 * Before we cache the CPUID information, we must configure XCR0 with the maximal set of
165 	 * features to ensure the save area returned in the xsave leaf is correctly-sized.
166 	 *
167 	 * Since we are guaranteed that init_fpu() has already happened, we can use state
168 	 * variables set there that were already predicated on the presence of explicit
169 	 * boot-args enables/disables.
170 	 */
171 
172 	if (fpu_capability == AVX512 || fpu_capability == AVX) {
173 		saved_xcr0 = xgetbv(XCR0);
174 		dest_xcr0 = (fpu_capability == AVX512) ? AVX512_XMASK : AVX_XMASK;
175 		assert((get_cr4() & CR4_OSXSAVE) != 0);
176 		if (saved_xcr0 != dest_xcr0) {
177 			need_xcr0_restore = 1;
178 			xsetbv(dest_xcr0 >> 32, dest_xcr0 & 0xFFFFFFFFUL);
179 		}
180 	}
181 
182 	cpuid_set_info();
183 
184 	if (need_xcr0_restore) {
185 		xsetbv(saved_xcr0 >> 32, saved_xcr0 & 0xFFFFFFFFUL);
186 	}
187 
188 	ml_set_interrupts_enabled(intrs_enabled);
189 }
190 
191 /* Farm an update out to all CPUs */
192 static void
xcpu_update(void)193 xcpu_update(void)
194 {
195 	cpumask_t dest_cpumask;
196 
197 	mp_disable_preemption();
198 	dest_cpumask = CPUMASK_OTHERS;
199 	cpu_apply_microcode();
200 	/* Update the cpuid info */
201 	ucode_cpuid_set_info();
202 	mp_enable_preemption();
203 
204 	/* Get all other CPUs to perform the update */
205 	/*
206 	 * Calling mp_cpus_call with the ASYNC flag ensures that the
207 	 * IPI dispatch occurs in parallel, but that we will not
208 	 * proceed until all targeted CPUs complete the microcode
209 	 * update.
210 	 */
211 	mp_cpus_call(dest_cpumask, ASYNC, cpu_update, NULL);
212 
213 	/* Update the commpage only after we update all CPUs' microcode */
214 	commpage_post_ucode_update();
215 }
216 
217 /*
218  * sysctl function
219  *
220  */
221 int
ucode_interface(uint64_t addr)222 ucode_interface(uint64_t addr)
223 {
224 	int error;
225 	char arg[16];
226 
227 	if (PE_parse_boot_argn("-x", arg, sizeof(arg))) {
228 		printf("ucode: no updates in safe mode\n");
229 		return EPERM;
230 	}
231 
232 #if !DEBUG
233 	/*
234 	 * Userland may only call this once per boot. Anything else
235 	 * would not make sense (all updates are cumulative), and also
236 	 * leak memory, because we don't free previous updates.
237 	 */
238 	if (global_update) {
239 		return EPERM;
240 	}
241 #endif
242 
243 	/* Get the whole microcode */
244 	error = copyin_update(addr);
245 
246 	if (error) {
247 		return error;
248 	}
249 
250 	/* Farm out the updates */
251 	xcpu_update();
252 
253 	return 0;
254 }
255