1 /*
2 * Copyright (c) 2000-2011 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/kern_return.h>
30 #include <kern/zalloc.h>
31 #include <kern/cpu_number.h>
32 #include <kern/cpu_data.h>
33 #include <i386/cpuid.h>
34 #include <i386/mp.h>
35 #include <i386/proc_reg.h>
36 #include <i386/mtrr.h>
37 #include <i386/machine_check.h>
38
39 struct mtrr_var_range {
40 uint64_t base; /* in IA32_MTRR_PHYSBASE format */
41 uint64_t mask; /* in IA32_MTRR_PHYSMASK format */
42 uint32_t refcnt; /* var ranges reference count */
43 };
44
45 struct mtrr_fix_range {
46 uint64_t types; /* fixed-range type octet */
47 };
48
49 typedef struct mtrr_var_range mtrr_var_range_t;
50 typedef struct mtrr_fix_range mtrr_fix_range_t;
51
52 static struct {
53 uint64_t MTRRcap;
54 uint64_t MTRRdefType;
55 mtrr_var_range_t * var_range;
56 unsigned int var_count;
57 mtrr_fix_range_t fix_range[11];
58 } mtrr_state;
59
60 static boolean_t mtrr_initialized = FALSE;
61
62 decl_simple_lock_data(static, mtrr_lock);
63 #define MTRR_LOCK() simple_lock(&mtrr_lock, LCK_GRP_NULL);
64 #define MTRR_UNLOCK() simple_unlock(&mtrr_lock);
65
66 //#define MTRR_DEBUG 1
67 #if MTRR_DEBUG
68 #define DBG(x...) kprintf(x)
69 #else
70 #define DBG(x...)
71 #endif
72
73 /* Private functions */
74 static void mtrr_get_var_ranges(mtrr_var_range_t * range, int count);
75 static void mtrr_set_var_ranges(const mtrr_var_range_t * range, int count);
76 static void mtrr_get_fix_ranges(mtrr_fix_range_t * range);
77 static void mtrr_set_fix_ranges(const mtrr_fix_range_t * range);
78 static void mtrr_update_setup(void * param);
79 static void mtrr_update_teardown(void * param);
80 static void mtrr_update_action(void * param);
81 static void var_range_encode(mtrr_var_range_t * range, addr64_t address,
82 uint64_t length, uint32_t type, int valid);
83 static int var_range_overlap(mtrr_var_range_t * range, addr64_t address,
84 uint64_t length, uint32_t type);
85
86 #define CACHE_CONTROL_MTRR (NULL)
87 #define CACHE_CONTROL_PAT ((void *)1)
88
89 /*
90 * MTRR MSR bit fields.
91 */
92 #define IA32_MTRR_DEF_TYPE_MT 0x000000ff
93 #define IA32_MTRR_DEF_TYPE_FE 0x00000400
94 #define IA32_MTRR_DEF_TYPE_E 0x00000800
95
96 #define IA32_MTRRCAP_VCNT 0x000000ff
97 #define IA32_MTRRCAP_FIX 0x00000100
98 #define IA32_MTRRCAP_WC 0x00000400
99
100 /* 0 < bits <= 64 */
101 #define PHYS_BITS_TO_MASK(bits) \
102 ((((1ULL << (bits-1)) - 1) << 1) | 1)
103
104 /*
105 * Default mask for 36 physical address bits, this can
106 * change depending on the cpu model.
107 */
108 static uint64_t mtrr_phys_mask = PHYS_BITS_TO_MASK(36);
109
110 #define IA32_MTRR_PHYMASK_VALID 0x0000000000000800ULL
111 #define IA32_MTRR_PHYSBASE_MASK (mtrr_phys_mask & ~0x0000000000000FFFULL)
112 #define IA32_MTRR_PHYSBASE_TYPE 0x00000000000000FFULL
113
114 /*
115 * Variable-range mask to/from length conversions.
116 */
117 #define MASK_TO_LEN(mask) \
118 ((~((mask) & IA32_MTRR_PHYSBASE_MASK) & mtrr_phys_mask) + 1)
119
120 #define LEN_TO_MASK(len) \
121 (~((len) - 1) & IA32_MTRR_PHYSBASE_MASK)
122
123 #define LSB(x) ((x) & (~((x) - 1)))
124
125 /*
126 * Fetch variable-range MTRR register pairs.
127 */
128 static void
mtrr_get_var_ranges(mtrr_var_range_t * range,int count)129 mtrr_get_var_ranges(mtrr_var_range_t * range, int count)
130 {
131 int i;
132
133 for (i = 0; i < count; i++) {
134 range[i].base = rdmsr64(MSR_IA32_MTRR_PHYSBASE(i));
135 range[i].mask = rdmsr64(MSR_IA32_MTRR_PHYSMASK(i));
136
137 /* bump ref count for firmware configured ranges */
138 if (range[i].mask & IA32_MTRR_PHYMASK_VALID) {
139 range[i].refcnt = 1;
140 } else {
141 range[i].refcnt = 0;
142 }
143 }
144 }
145
146 /*
147 * Update variable-range MTRR register pairs.
148 */
149 static void
mtrr_set_var_ranges(const mtrr_var_range_t * range,int count)150 mtrr_set_var_ranges(const mtrr_var_range_t * range, int count)
151 {
152 int i;
153
154 for (i = 0; i < count; i++) {
155 wrmsr64(MSR_IA32_MTRR_PHYSBASE(i), range[i].base);
156 wrmsr64(MSR_IA32_MTRR_PHYSMASK(i), range[i].mask);
157 }
158 }
159
160 /*
161 * Fetch all fixed-range MTRR's. Note MSR offsets are not consecutive.
162 */
163 static void
mtrr_get_fix_ranges(mtrr_fix_range_t * range)164 mtrr_get_fix_ranges(mtrr_fix_range_t * range)
165 {
166 int i;
167
168 /* assume 11 fix range registers */
169 range[0].types = rdmsr64(MSR_IA32_MTRR_FIX64K_00000);
170 range[1].types = rdmsr64(MSR_IA32_MTRR_FIX16K_80000);
171 range[2].types = rdmsr64(MSR_IA32_MTRR_FIX16K_A0000);
172 for (i = 0; i < 8; i++) {
173 range[3 + i].types = rdmsr64(MSR_IA32_MTRR_FIX4K_C0000 + i);
174 }
175 }
176
177 /*
178 * Update all fixed-range MTRR's.
179 */
180 static void
mtrr_set_fix_ranges(const struct mtrr_fix_range * range)181 mtrr_set_fix_ranges(const struct mtrr_fix_range * range)
182 {
183 int i;
184
185 /* assume 11 fix range registers */
186 wrmsr64(MSR_IA32_MTRR_FIX64K_00000, range[0].types);
187 wrmsr64(MSR_IA32_MTRR_FIX16K_80000, range[1].types);
188 wrmsr64(MSR_IA32_MTRR_FIX16K_A0000, range[2].types);
189 for (i = 0; i < 8; i++) {
190 wrmsr64(MSR_IA32_MTRR_FIX4K_C0000 + i, range[3 + i].types);
191 }
192 }
193
194 static boolean_t
mtrr_check_fix_ranges(const struct mtrr_fix_range * range)195 mtrr_check_fix_ranges(const struct mtrr_fix_range * range)
196 {
197 int i;
198 boolean_t match = TRUE;
199
200 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
201
202 /* assume 11 fix range registers */
203 match = range[0].types == rdmsr64(MSR_IA32_MTRR_FIX64K_00000) &&
204 range[1].types == rdmsr64(MSR_IA32_MTRR_FIX16K_80000) &&
205 range[2].types == rdmsr64(MSR_IA32_MTRR_FIX16K_A0000);
206 for (i = 0; match && i < 8; i++) {
207 match = range[3 + i].types ==
208 rdmsr64(MSR_IA32_MTRR_FIX4K_C0000 + i);
209 }
210
211 return match;
212 }
213
214 static boolean_t
mtrr_check_var_ranges(mtrr_var_range_t * range,int count)215 mtrr_check_var_ranges(mtrr_var_range_t * range, int count)
216 {
217 int i;
218 boolean_t match = TRUE;
219
220 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
221
222 for (i = 0; match && i < count; i++) {
223 match = range[i].base == rdmsr64(MSR_IA32_MTRR_PHYSBASE(i)) &&
224 range[i].mask == rdmsr64(MSR_IA32_MTRR_PHYSMASK(i));
225 }
226
227 return match;
228 }
229
230 #if MTRR_DEBUG
231 static void
mtrr_msr_dump(void)232 mtrr_msr_dump(void)
233 {
234 int i;
235 int count = rdmsr64(MSR_IA32_MTRRCAP) & IA32_MTRRCAP_VCNT;
236
237 DBG("VAR -- BASE -------------- MASK -------------- SIZE\n");
238 for (i = 0; i < count; i++) {
239 DBG(" %02x 0x%016llx 0x%016llx 0x%llx\n", i,
240 rdmsr64(MSR_IA32_MTRR_PHYSBASE(i)),
241 rdmsr64(MSR_IA32_MTRR_PHYSMASK(i)),
242 MASK_TO_LEN(rdmsr64(MSR_IA32_MTRR_PHYSMASK(i))));
243 }
244 DBG("\n");
245
246 DBG("FIX64K_00000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX64K_00000));
247 DBG("FIX16K_80000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX16K_80000));
248 DBG("FIX16K_A0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX16K_A0000));
249 DBG(" FIX4K_C0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_C0000));
250 DBG(" FIX4K_C8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_C8000));
251 DBG(" FIX4K_D0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_D0000));
252 DBG(" FIX4K_D8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_D8000));
253 DBG(" FIX4K_E0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_E0000));
254 DBG(" FIX4K_E8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_E8000));
255 DBG(" FIX4K_F0000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_F0000));
256 DBG(" FIX4K_F8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_F8000));
257
258 DBG("\nMTRRcap = 0x%llx MTRRdefType = 0x%llx\n",
259 rdmsr64(MSR_IA32_MTRRCAP), rdmsr64(MSR_IA32_MTRR_DEF_TYPE));
260 }
261 #endif /* MTRR_DEBUG */
262
263 /*
264 * Called by the boot processor (BP) early during boot to initialize MTRR
265 * support. The MTRR state on the BP is saved, any additional processors
266 * will have the same settings applied to ensure MTRR consistency.
267 */
268 void
mtrr_init(void)269 mtrr_init(void)
270 {
271 /* no reason to init more than once */
272 if (mtrr_initialized == TRUE) {
273 return;
274 }
275
276 /* check for presence of MTRR feature on the processor */
277 if ((cpuid_features() & CPUID_FEATURE_MTRR) == 0) {
278 return; /* no MTRR feature */
279 }
280 /* use a lock to serialize MTRR changes */
281 bzero((void *)&mtrr_state, sizeof(mtrr_state));
282 simple_lock_init(&mtrr_lock, 0);
283
284 mtrr_state.MTRRcap = rdmsr64(MSR_IA32_MTRRCAP);
285 mtrr_state.MTRRdefType = rdmsr64(MSR_IA32_MTRR_DEF_TYPE);
286 mtrr_state.var_count = (unsigned int)(mtrr_state.MTRRcap & IA32_MTRRCAP_VCNT);
287
288 /* allocate storage for variable ranges (can block?) */
289 if (mtrr_state.var_count) {
290 mtrr_state.var_range = (mtrr_var_range_t *)
291 zalloc_permanent_tag(sizeof(mtrr_var_range_t) *
292 mtrr_state.var_count, ZALIGN(mtrr_var_range_t),
293 VM_KERN_MEMORY_CPU);
294 if (mtrr_state.var_range == NULL) {
295 mtrr_state.var_count = 0;
296 }
297 }
298
299 /* fetch the initial firmware configured variable ranges */
300 if (mtrr_state.var_count) {
301 mtrr_get_var_ranges(mtrr_state.var_range,
302 mtrr_state.var_count);
303 }
304
305 /* fetch the initial firmware configured fixed ranges */
306 if (mtrr_state.MTRRcap & IA32_MTRRCAP_FIX) {
307 mtrr_get_fix_ranges(mtrr_state.fix_range);
308 }
309
310 mtrr_initialized = TRUE;
311
312 #if MTRR_DEBUG
313 mtrr_msr_dump(); /* dump firmware settings */
314 #endif
315 }
316
317 /*
318 * Performs the Intel recommended procedure for changing the MTRR
319 * in a MP system. Leverage rendezvous mechanism for the required
320 * barrier synchronization among all processors. This function is
321 * called from the rendezvous IPI handler, and mtrr_update_cpu().
322 */
323 static void
mtrr_update_action(void * cache_control_type)324 mtrr_update_action(void * cache_control_type)
325 {
326 uintptr_t cr0, cr4;
327 uintptr_t tmp;
328
329 cr0 = get_cr0();
330 cr4 = get_cr4();
331
332 /* enter no-fill cache mode */
333 tmp = cr0 | CR0_CD;
334 tmp &= ~CR0_NW;
335 set_cr0(tmp);
336
337 /* flush caches */
338 wbinvd();
339
340 /* clear the PGE flag in CR4 */
341 if (cr4 & CR4_PGE) {
342 set_cr4(cr4 & ~CR4_PGE);
343 } else {
344 set_cr3_raw(get_cr3_raw());
345 }
346
347 if (CACHE_CONTROL_PAT == cache_control_type) {
348 /* Change PA6 attribute field to WC */
349 uint64_t pat = rdmsr64(MSR_IA32_CR_PAT);
350 DBG("CPU%d PAT: was 0x%016llx\n", get_cpu_number(), pat);
351 /*
352 * Intel doc states:
353 * "The IA32_PAT MSR contains eight page attribute fields: PA0 through PA7.
354 * The three low-order bits of each field are used to specify a memory type.
355 * The five high-order bits of each field are reserved, and must be set to all 0s."
356 * So, we zero-out the high 5 bits of the PA6 entry here:
357 */
358 pat &= ~(0xFFULL << 48);
359 pat |= (0x01ULL << 48);
360 wrmsr64(MSR_IA32_CR_PAT, pat);
361 DBG("CPU%d PAT: is 0x%016llx\n",
362 get_cpu_number(), rdmsr64(MSR_IA32_CR_PAT));
363 } else {
364 /* disable all MTRR ranges */
365 wrmsr64(MSR_IA32_MTRR_DEF_TYPE,
366 mtrr_state.MTRRdefType & ~IA32_MTRR_DEF_TYPE_E);
367
368 /* apply MTRR settings */
369 if (mtrr_state.var_count) {
370 mtrr_set_var_ranges(mtrr_state.var_range,
371 mtrr_state.var_count);
372 }
373
374 if (mtrr_state.MTRRcap & IA32_MTRRCAP_FIX) {
375 mtrr_set_fix_ranges(mtrr_state.fix_range);
376 }
377
378 /* enable all MTRR range registers (what if E was not set?) */
379 wrmsr64(MSR_IA32_MTRR_DEF_TYPE,
380 mtrr_state.MTRRdefType | IA32_MTRR_DEF_TYPE_E);
381 }
382
383 /* flush all caches and TLBs a second time */
384 wbinvd();
385 set_cr3_raw(get_cr3_raw());
386 /* restore normal cache mode */
387 set_cr0(cr0);
388
389 /* restore PGE flag */
390 if (cr4 & CR4_PGE) {
391 set_cr4(cr4);
392 }
393
394 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
395 }
396
397 static void
mtrr_update_setup(__unused void * param_not_used)398 mtrr_update_setup(__unused void * param_not_used)
399 {
400 /* disable interrupts before the first barrier */
401 current_cpu_datap()->cpu_iflag = ml_set_interrupts_enabled(FALSE);
402 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
403 }
404
405 static void
mtrr_update_teardown(__unused void * param_not_used)406 mtrr_update_teardown(__unused void * param_not_used)
407 {
408 /* restore interrupt flag following MTRR changes */
409 ml_set_interrupts_enabled(current_cpu_datap()->cpu_iflag);
410 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
411 }
412
413 /*
414 * Update MTRR settings on all processors.
415 */
416 kern_return_t
mtrr_update_all_cpus(void)417 mtrr_update_all_cpus(void)
418 {
419 if (mtrr_initialized == FALSE) {
420 return KERN_NOT_SUPPORTED;
421 }
422
423 MTRR_LOCK();
424 mp_rendezvous(mtrr_update_setup,
425 mtrr_update_action,
426 mtrr_update_teardown, NULL);
427 MTRR_UNLOCK();
428
429 return KERN_SUCCESS;
430 }
431
432 /*
433 * Verify that a processor has been set with the BSP's MTRR settings. Called
434 * during slave processor initialization to check and set MTRR settings
435 * discovered on the boot processor by mtrr_init().
436 */
437 kern_return_t
mtrr_update_cpu(void)438 mtrr_update_cpu(void)
439 {
440 boolean_t match = TRUE;
441
442 if (mtrr_initialized == FALSE) {
443 return KERN_NOT_SUPPORTED;
444 }
445
446 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
447
448 MTRR_LOCK();
449
450 /* Check MSR_IA32_MTRR_DEF_TYPE MSR */
451 match = mtrr_state.MTRRdefType == rdmsr64(MSR_IA32_MTRR_DEF_TYPE);
452
453 /* Check MSR_IA32_MTRRCAP MSR */
454 if (match) {
455 match = mtrr_state.MTRRcap == rdmsr64(MSR_IA32_MTRRCAP);
456 }
457
458 /* Check variable ranges */
459 if (match && mtrr_state.var_count) {
460 match = mtrr_check_var_ranges(mtrr_state.var_range,
461 mtrr_state.var_count);
462 }
463
464 /* Check fixed ranges */
465 if (match && (mtrr_state.MTRRcap & IA32_MTRRCAP_FIX)) {
466 match = mtrr_check_fix_ranges(mtrr_state.fix_range);
467 }
468
469 #if MTRR_DEBUG
470 if (!match) {
471 mtrr_msr_dump();
472 }
473 #endif
474 if (!match) {
475 DBG("mtrr_update_cpu() setting MTRR for cpu %d\n",
476 get_cpu_number());
477 mtrr_update_action(NULL);
478 }
479 #if MTRR_DEBUG
480 if (!match) {
481 mtrr_msr_dump();
482 }
483 #endif
484
485 MTRR_UNLOCK();
486
487 return KERN_SUCCESS;
488 }
489
490 /*
491 * Add a MTRR range to associate the physical memory range specified
492 * with a given memory caching type.
493 */
494 kern_return_t
mtrr_range_add(addr64_t address,uint64_t length,uint32_t type)495 mtrr_range_add(addr64_t address, uint64_t length, uint32_t type)
496 {
497 mtrr_var_range_t * vr;
498 mtrr_var_range_t * free_range;
499 kern_return_t ret = KERN_NO_SPACE;
500 int overlap;
501 unsigned int i;
502
503 DBG("mtrr_range_add base = 0x%llx, size = 0x%llx, type = %d\n",
504 address, length, type);
505
506 if (mtrr_initialized == FALSE) {
507 return KERN_NOT_SUPPORTED;
508 }
509
510 /* check memory type (GPF exception for undefined types) */
511 if ((type != MTRR_TYPE_UNCACHEABLE) &&
512 (type != MTRR_TYPE_WRITECOMBINE) &&
513 (type != MTRR_TYPE_WRITETHROUGH) &&
514 (type != MTRR_TYPE_WRITEPROTECT) &&
515 (type != MTRR_TYPE_WRITEBACK)) {
516 return KERN_INVALID_ARGUMENT;
517 }
518
519 /* check WC support if requested */
520 if ((type == MTRR_TYPE_WRITECOMBINE) &&
521 (mtrr_state.MTRRcap & IA32_MTRRCAP_WC) == 0) {
522 return KERN_NOT_SUPPORTED;
523 }
524
525 /* leave the fix range area below 1MB alone */
526 if (address < 0x100000 || mtrr_state.var_count == 0) {
527 return KERN_NOT_SUPPORTED;
528 }
529
530 /*
531 * Length must be a power of 2 given by 2^n, where n >= 12.
532 * Base address alignment must be larger than or equal to length.
533 */
534 if ((length < 0x1000) ||
535 (LSB(length) != length) ||
536 (address && (length > LSB(address)))) {
537 return KERN_INVALID_ARGUMENT;
538 }
539
540 MTRR_LOCK();
541
542 /*
543 * Check for overlap and locate a free range.
544 */
545 for (i = 0, free_range = NULL; i < mtrr_state.var_count; i++) {
546 vr = &mtrr_state.var_range[i];
547
548 if (vr->refcnt == 0) {
549 /* free range candidate if no overlaps are found */
550 free_range = vr;
551 continue;
552 }
553
554 overlap = var_range_overlap(vr, address, length, type);
555 if (overlap > 0) {
556 /*
557 * identical overlap permitted, increment ref count.
558 * no hardware update required.
559 */
560 free_range = vr;
561 break;
562 }
563 if (overlap < 0) {
564 /* unsupported overlapping of memory types */
565 free_range = NULL;
566 break;
567 }
568 }
569
570 if (free_range) {
571 if (free_range->refcnt++ == 0) {
572 var_range_encode(free_range, address, length, type, 1);
573 mp_rendezvous(mtrr_update_setup,
574 mtrr_update_action,
575 mtrr_update_teardown, NULL);
576 }
577 ret = KERN_SUCCESS;
578 }
579
580 #if MTRR_DEBUG
581 mtrr_msr_dump();
582 #endif
583
584 MTRR_UNLOCK();
585
586 return ret;
587 }
588
589 /*
590 * Remove a previously added MTRR range. The same arguments used for adding
591 * the memory range must be supplied again.
592 */
593 kern_return_t
mtrr_range_remove(addr64_t address,uint64_t length,uint32_t type)594 mtrr_range_remove(addr64_t address, uint64_t length, uint32_t type)
595 {
596 mtrr_var_range_t * vr;
597 int result = KERN_FAILURE;
598 int cpu_update = 0;
599 unsigned int i;
600
601 DBG("mtrr_range_remove base = 0x%llx, size = 0x%llx, type = %d\n",
602 address, length, type);
603
604 if (mtrr_initialized == FALSE) {
605 return KERN_NOT_SUPPORTED;
606 }
607
608 MTRR_LOCK();
609
610 for (i = 0; i < mtrr_state.var_count; i++) {
611 vr = &mtrr_state.var_range[i];
612
613 if (vr->refcnt &&
614 var_range_overlap(vr, address, length, type) > 0) {
615 /* found specified variable range */
616 if (--mtrr_state.var_range[i].refcnt == 0) {
617 var_range_encode(vr, address, length, type, 0);
618 cpu_update = 1;
619 }
620 result = KERN_SUCCESS;
621 break;
622 }
623 }
624
625 if (cpu_update) {
626 mp_rendezvous(mtrr_update_setup,
627 mtrr_update_action,
628 mtrr_update_teardown, NULL);
629 result = KERN_SUCCESS;
630 }
631
632 #if MTRR_DEBUG
633 mtrr_msr_dump();
634 #endif
635
636 MTRR_UNLOCK();
637
638 return result;
639 }
640
641 /*
642 * Variable range helper routines
643 */
644 static void
var_range_encode(mtrr_var_range_t * range,addr64_t address,uint64_t length,uint32_t type,int valid)645 var_range_encode(mtrr_var_range_t * range, addr64_t address,
646 uint64_t length, uint32_t type, int valid)
647 {
648 range->base = (address & IA32_MTRR_PHYSBASE_MASK) |
649 (type & (uint32_t)IA32_MTRR_PHYSBASE_TYPE);
650
651 range->mask = LEN_TO_MASK(length) |
652 (valid ? IA32_MTRR_PHYMASK_VALID : 0);
653 }
654
655 static int
var_range_overlap(mtrr_var_range_t * range,addr64_t address,uint64_t length,uint32_t type)656 var_range_overlap(mtrr_var_range_t * range, addr64_t address,
657 uint64_t length, uint32_t type)
658 {
659 uint64_t v_address, v_length;
660 uint32_t v_type;
661 int result = 0; /* no overlap, or overlap ok */
662
663 v_address = range->base & IA32_MTRR_PHYSBASE_MASK;
664 v_type = (uint32_t)(range->base & IA32_MTRR_PHYSBASE_TYPE);
665 v_length = MASK_TO_LEN(range->mask);
666
667 /* detect range overlap */
668 if ((v_address >= address && v_address < (address + length)) ||
669 (address >= v_address && address < (v_address + v_length))) {
670 if (v_address == address && v_length == length && v_type == type) {
671 result = 1; /* identical overlap ok */
672 } else if (v_type == MTRR_TYPE_UNCACHEABLE &&
673 type == MTRR_TYPE_UNCACHEABLE) {
674 /* UC ranges can overlap */
675 } else if ((v_type == MTRR_TYPE_UNCACHEABLE &&
676 type == MTRR_TYPE_WRITEBACK) ||
677 (v_type == MTRR_TYPE_WRITEBACK &&
678 type == MTRR_TYPE_UNCACHEABLE)) {
679 /* UC/WB can overlap - effective type becomes UC */
680 } else {
681 /* anything else may cause undefined behavior */
682 result = -1;
683 }
684 }
685
686 return result;
687 }
688
689 /*
690 * Initialize PAT (Page Attribute Table)
691 */
692 void
pat_init(void)693 pat_init(void)
694 {
695 boolean_t istate;
696 uint64_t pat;
697
698 if (!(cpuid_features() & CPUID_FEATURE_PAT)) {
699 return;
700 }
701
702 istate = ml_set_interrupts_enabled(FALSE);
703
704 pat = rdmsr64(MSR_IA32_CR_PAT);
705 DBG("CPU%d PAT: was 0x%016llx\n", get_cpu_number(), pat);
706
707 /* Change PA6 attribute field to WC if required */
708 if ((pat & (0x07ULL << 48)) != (0x01ULL << 48)) {
709 mtrr_update_action(CACHE_CONTROL_PAT);
710 }
711 ml_set_interrupts_enabled(istate);
712 }
713