xref: /xnu-8020.101.4/osfmk/arm/pcb.c (revision e7776783b89a353188416a9a346c6cdb4928faad)
1 /*
2  * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #include <debug.h>
29 
30 #include <types.h>
31 
32 #include <mach/mach_types.h>
33 #include <mach/thread_status.h>
34 #include <mach/vm_types.h>
35 
36 #include <kern/kern_types.h>
37 #include <kern/task.h>
38 #include <kern/thread.h>
39 #include <kern/misc_protos.h>
40 #include <kern/mach_param.h>
41 #include <kern/spl.h>
42 #include <kern/machine.h>
43 #include <kern/kpc.h>
44 
45 #include <arm/proc_reg.h>
46 #include <arm/cpu_data_internal.h>
47 #include <arm/misc_protos.h>
48 #include <arm/cpuid.h>
49 
50 #include <vm/vm_map.h>
51 #include <vm/vm_protos.h>
52 
53 #include <sys/kdebug.h>
54 
55 #include <san/kcov_stksz.h>
56 
57 
58 extern int      debug_task;
59 
60 /* zone for debug_state area */
61 ZONE_DEFINE_TYPE(ads_zone, "arm debug state", arm_debug_state_t, ZC_NONE);
62 
63 /*
64  * Routine:	consider_machine_collect
65  *
66  */
67 void
consider_machine_collect(void)68 consider_machine_collect(void)
69 {
70 	pmap_gc();
71 }
72 
73 /*
74  * Routine:	consider_machine_adjust
75  *
76  */
77 void
consider_machine_adjust(void)78 consider_machine_adjust(void)
79 {
80 }
81 
82 static inline void
machine_thread_switch_cpu_data(thread_t old,thread_t new)83 machine_thread_switch_cpu_data(thread_t old, thread_t new)
84 {
85 	/*
86 	 * We build with -fno-strict-aliasing, so the load through temporaries
87 	 * is required so that this generates a single load / store pair.
88 	 */
89 	cpu_data_t *datap = old->machine.CpuDatap;
90 	vm_offset_t base  = old->machine.pcpu_data_base;
91 
92 	/* TODO: Should this be ordered? */
93 
94 	/*
95 	 * arm relies on CpuDatap being set for a thread that has run,
96 	 * so we only reset pcpu_data_base.
97 	 */
98 	old->machine.pcpu_data_base = -1;
99 
100 	new->machine.CpuDatap = datap;
101 	new->machine.pcpu_data_base = base;
102 }
103 
104 /*
105  * Routine:	machine_switch_context
106  *
107  */
108 thread_t
machine_switch_context(thread_t old,thread_continue_t continuation,thread_t new)109 machine_switch_context(
110 	thread_t old,
111 	thread_continue_t continuation,
112 	thread_t new)
113 {
114 	thread_t retval;
115 
116 #define machine_switch_context_kprintf(x...) \
117 	/* kprintf("machine_switch_context: " x) */
118 
119 	if (old == new) {
120 		panic("machine_switch_context");
121 	}
122 
123 	kpc_off_cpu(old);
124 
125 	/*
126 	 * If the thread is preempted while performing cache or TLB maintenance,
127 	 * it may be migrated to a different CPU between the completion of the relevant
128 	 * maintenance instruction and the synchronizing DSB.   ARM requires that the
129 	 * synchronizing DSB must be issued *on the PE that issued the maintenance instruction*
130 	 * in order to guarantee completion of the instruction and visibility of its effects.
131 	 * Issue DSB here to enforce that guarantee.  Note that due to __ARM_USER_PROTECT__,
132 	 * pmap_set_pmap() will not update TTBR0 (which ordinarily would include DSB).
133 	 */
134 	__builtin_arm_dsb(DSB_ISH);
135 	pmap_set_pmap(new->map->pmap, new);
136 
137 	machine_thread_switch_cpu_data(old, new);
138 
139 	machine_switch_context_kprintf("old= %x contination = %x new = %x\n", old, continuation, new);
140 	retval = Switch_context(old, continuation, new);
141 	assert(retval != NULL);
142 
143 	return retval;
144 }
145 
146 boolean_t
machine_thread_on_core(thread_t thread)147 machine_thread_on_core(thread_t thread)
148 {
149 	return thread->machine.pcpu_data_base != -1;
150 }
151 
152 /*
153  * Routine:	machine_thread_create
154  *
155  */
156 void
machine_thread_create(thread_t thread,task_t task,bool first_thread)157 machine_thread_create(thread_t thread, task_t task, bool first_thread)
158 {
159 #define machine_thread_create_kprintf(x...)     /* kprintf("machine_thread_create: " x) */
160 
161 	machine_thread_create_kprintf("thread = %x\n", thread);
162 
163 	if (!first_thread) {
164 		thread->machine.CpuDatap = (cpu_data_t *)0;
165 		// setting this offset will cause trying to use it to panic
166 		thread->machine.pcpu_data_base = -1;
167 	}
168 	thread->machine.preemption_count = 0;
169 	thread->machine.cthread_self = 0;
170 #if     __ARM_USER_PROTECT__
171 	{
172 		struct pmap *new_pmap = vm_map_pmap(task->map);
173 
174 		thread->machine.kptw_ttb = ((unsigned int) kernel_pmap->ttep) | TTBR_SETUP;
175 		thread->machine.asid = new_pmap->hw_asid;
176 		thread->machine.uptw_ttb = ((unsigned int) new_pmap->ttep) | TTBR_SETUP;
177 	}
178 #else
179 	(void)task;
180 #endif
181 	machine_thread_state_initialize(thread);
182 }
183 
184 /*
185  * Routine:	machine_thread_destroy
186  *
187  */
188 void
machine_thread_destroy(thread_t thread)189 machine_thread_destroy(
190 	thread_t thread)
191 {
192 	if (thread->machine.DebugData != NULL) {
193 		if (thread->machine.DebugData == getCpuDatap()->cpu_user_debug) {
194 			arm_debug_set(NULL);
195 		}
196 		zfree(ads_zone, thread->machine.DebugData);
197 	}
198 }
199 
200 
201 /*
202  * Routine:	machine_thread_init
203  *
204  */
205 void
machine_thread_init(void)206 machine_thread_init(void)
207 {
208 }
209 
210 /*
211  * Routine:	machine_thread_template_init
212  *
213  */
214 void
machine_thread_template_init(thread_t __unused thr_template)215 machine_thread_template_init(thread_t __unused thr_template)
216 {
217 	/* Nothing to do on this platform. */
218 }
219 
220 /*
221  * Routine:	get_useraddr
222  *
223  */
224 user_addr_t
get_useraddr()225 get_useraddr()
226 {
227 	return current_thread()->machine.PcbData.pc;
228 }
229 
230 /*
231  * Routine:	machine_stack_detach
232  *
233  */
234 vm_offset_t
machine_stack_detach(thread_t thread)235 machine_stack_detach(
236 	thread_t thread)
237 {
238 	vm_offset_t     stack;
239 
240 	KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
241 	    (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
242 
243 	stack = thread->kernel_stack;
244 #if CONFIG_KCOV
245 	kcov_stksz_set_thread_stack(thread, stack);
246 #endif
247 	thread->kernel_stack = 0;
248 	thread->machine.kstackptr = 0;
249 
250 	return stack;
251 }
252 
253 
254 /*
255  * Routine:	machine_stack_attach
256  *
257  */
258 void
machine_stack_attach(thread_t thread,vm_offset_t stack)259 machine_stack_attach(
260 	thread_t thread,
261 	vm_offset_t stack)
262 {
263 	struct arm_saved_state *savestate;
264 
265 #define machine_stack_attach_kprintf(x...)      /* kprintf("machine_stack_attach: " x) */
266 
267 	KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
268 	    (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
269 
270 	thread->kernel_stack = stack;
271 #if CONFIG_KCOV
272 	kcov_stksz_set_thread_stack(thread, 0);
273 #endif
274 	thread->machine.kstackptr = stack + kernel_stack_size - sizeof(struct thread_kernel_state);
275 	thread_initialize_kernel_state(thread);
276 	savestate = (struct arm_saved_state *) thread->machine.kstackptr;
277 
278 	savestate->lr = (uint32_t) thread_continue;
279 	savestate->sp = thread->machine.kstackptr;
280 	savestate->r[7] = 0x0UL;
281 	savestate->r[9] = (uint32_t) NULL;
282 	savestate->cpsr = PSR_SVC_MODE | PSR_INTMASK;
283 	vfp_state_initialize(&savestate->VFPdata);
284 	machine_stack_attach_kprintf("thread = %x pc = %x, sp = %x\n", thread, savestate->lr, savestate->sp);
285 }
286 
287 
288 /*
289  * Routine:	machine_stack_handoff
290  *
291  */
292 void
machine_stack_handoff(thread_t old,thread_t new)293 machine_stack_handoff(
294 	thread_t old,
295 	thread_t new)
296 {
297 	vm_offset_t     stack;
298 
299 	kpc_off_cpu(old);
300 
301 	stack = machine_stack_detach(old);
302 	new->kernel_stack = stack;
303 #if CONFIG_KCOV
304 	kcov_stksz_set_thread_stack(new, 0);
305 #endif
306 	new->machine.kstackptr = stack + kernel_stack_size - sizeof(struct thread_kernel_state);
307 	if (stack == old->reserved_stack) {
308 		assert(new->reserved_stack);
309 		old->reserved_stack = new->reserved_stack;
310 		new->reserved_stack = stack;
311 	}
312 
313 	/*
314 	 * If the thread is preempted while performing cache or TLB maintenance,
315 	 * it may be migrated to a different CPU between the completion of the relevant
316 	 * maintenance instruction and the synchronizing DSB.   ARM requires that the
317 	 * synchronizing DSB must be issued *on the PE that issued the maintenance instruction*
318 	 * in order to guarantee completion of the instruction and visibility of its effects.
319 	 * Issue DSB here to enforce that guarantee.  Note that due to __ARM_USER_PROTECT__,
320 	 * pmap_set_pmap() will not update TTBR0 (which ordinarily would include DSB).
321 	 */
322 	__builtin_arm_dsb(DSB_ISH);
323 	pmap_set_pmap(new->map->pmap, new);
324 
325 	machine_thread_switch_cpu_data(old, new);
326 
327 	machine_set_current_thread(new);
328 	thread_initialize_kernel_state(new);
329 }
330 
331 
332 /*
333  * Routine:	call_continuation
334  *
335  */
336 void
call_continuation(thread_continue_t continuation,void * parameter,wait_result_t wresult,boolean_t enable_interrupts)337 call_continuation(
338 	thread_continue_t continuation,
339 	void *parameter,
340 	wait_result_t wresult,
341 	boolean_t enable_interrupts)
342 {
343 #define call_continuation_kprintf(x...) /* kprintf("call_continuation_kprintf:
344 	                                 *  " x) */
345 
346 	call_continuation_kprintf("thread = %x continuation = %x, stack = %x\n", current_thread(), continuation, current_thread()->machine.kstackptr);
347 	Call_continuation(continuation, parameter, wresult, enable_interrupts);
348 }
349 
350 void
arm_debug_set(arm_debug_state_t * debug_state)351 arm_debug_set(arm_debug_state_t *debug_state)
352 {
353 	/* If this CPU supports the memory-mapped debug interface, use it, otherwise
354 	 * attempt the Extended CP14 interface.  The two routines need to be kept in sync,
355 	 * functionality-wise.
356 	 */
357 	struct cpu_data *cpu_data_ptr;
358 	arm_debug_info_t *debug_info = arm_debug_info();
359 	boolean_t       intr;
360 
361 	intr = ml_set_interrupts_enabled(FALSE);
362 	cpu_data_ptr = getCpuDatap();
363 
364 	// Set current user debug
365 	cpu_data_ptr->cpu_user_debug = debug_state;
366 
367 	if (debug_info->memory_mapped_core_debug) {
368 		int i;
369 		uintptr_t debug_map = cpu_data_ptr->cpu_debug_interface_map;
370 
371 		// unlock debug registers
372 		*(volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
373 
374 		// read DBGPRSR to clear the sticky power-down bit (necessary to access debug registers)
375 		*(volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGPRSR);
376 
377 		// enable monitor mode (needed to set and use debug registers)
378 		*(volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGDSCR) |= ARM_DBGDSCR_MDBGEN;
379 
380 		// first turn off all breakpoints/watchpoints
381 		for (i = 0; i < 16; i++) {
382 			((volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGBCR))[i] = 0;
383 			((volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGWCR))[i] = 0;
384 		}
385 
386 		// if (debug_state == NULL) disable monitor mode
387 		if (debug_state == NULL) {
388 			*(volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGDSCR) &= ~ARM_DBGDSCR_MDBGEN;
389 		} else {
390 			for (i = 0; i < 16; i++) {
391 				((volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGBVR))[i] = debug_state->bvr[i];
392 				((volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGBCR))[i] = debug_state->bcr[i];
393 				((volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGWVR))[i] = debug_state->wvr[i];
394 				((volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGWCR))[i] = debug_state->wcr[i];
395 			}
396 		}
397 
398 		// lock debug registers
399 		*(volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGLAR) = 0;
400 	} else if (debug_info->coprocessor_core_debug) {
401 		arm_debug_set_cp14(debug_state);
402 	}
403 
404 	(void) ml_set_interrupts_enabled(intr);
405 }
406 
407 /*
408  * Duplicate one arm_debug_state_t to another.  "all" parameter
409  * is ignored in the case of ARM -- Is this the right assumption?
410  */
411 void
copy_debug_state(arm_debug_state_t * src,arm_debug_state_t * target,__unused boolean_t all)412 copy_debug_state(
413 	arm_debug_state_t *src,
414 	arm_debug_state_t *target,
415 	__unused boolean_t all)
416 {
417 	bcopy(src, target, sizeof(arm_debug_state_t));
418 }
419 
420 kern_return_t
machine_thread_set_tsd_base(thread_t thread,mach_vm_offset_t tsd_base)421 machine_thread_set_tsd_base(
422 	thread_t                        thread,
423 	mach_vm_offset_t        tsd_base)
424 {
425 	if (get_threadtask(thread) == kernel_task) {
426 		return KERN_INVALID_ARGUMENT;
427 	}
428 
429 	if (tsd_base & 0x3) {
430 		return KERN_INVALID_ARGUMENT;
431 	}
432 
433 	if (tsd_base > UINT32_MAX) {
434 		tsd_base = 0ULL;
435 	}
436 
437 	thread->machine.cthread_self = tsd_base;
438 
439 	/* For current thread, make the TSD base active immediately */
440 	if (thread == current_thread()) {
441 		mp_disable_preemption();
442 		__asm__ volatile (
443                          "mrc    p15, 0, r6, c13, c0, 3\n"
444                          "and	r6, r6, #3\n"
445                          "orr	r6, r6, %0\n"
446                          "mcr	p15, 0, r6, c13, c0, 3\n"
447                          :               /* output */
448                          : "r"((uint32_t)tsd_base)       /* input */
449                          : "r6"          /* clobbered register */
450                 );
451 		mp_enable_preemption();
452 	}
453 
454 	return KERN_SUCCESS;
455 }
456 
457 void
machine_tecs(__unused thread_t thr)458 machine_tecs(__unused thread_t thr)
459 {
460 }
461 
462 int
machine_csv(__unused cpuvn_e cve)463 machine_csv(__unused cpuvn_e cve)
464 {
465 	return 0;
466 }
467