xref: /xnu-11215.1.10/osfmk/kern/kpc_thread.c (revision 8d741a5de7ff4191bf97d57b9f54c2f6d4a15585) !
1 /*
2  * Copyright (c) 2012 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <mach/mach_types.h>
30 #include <kern/processor.h>
31 #include <kern/thread.h>
32 #include <kern/assert.h>
33 #include <kern/locks.h>
34 #include <sys/errno.h>
35 
36 #include <kperf/kperf.h>
37 #include <kperf/buffer.h>
38 #include <kperf/context.h>
39 #include <kperf/sample.h>
40 #include <kperf/action.h>
41 #include <kperf/kperf_kpc.h>
42 #include <kern/kpc.h>
43 
44 #if defined (__arm64__)
45 #include <arm/cpu_data_internal.h>
46 #endif
47 
48 /* global for whether to read PMCs on context switch */
49 int kpc_threads_counting = 0;
50 
51 /* whether to call into KPC when a thread goes off CPU */
52 boolean_t kpc_off_cpu_active = FALSE;
53 
54 /* current config and number of counters in that config */
55 static uint32_t kpc_thread_classes = 0;
56 static uint32_t kpc_thread_classes_count = 0;
57 
58 static LCK_GRP_DECLARE(kpc_thread_lckgrp, "kpc thread");
59 static LCK_MTX_DECLARE(kpc_thread_lock, &kpc_thread_lckgrp);
60 
61 uint32_t
kpc_get_thread_counting(void)62 kpc_get_thread_counting(void)
63 {
64 	uint32_t kpc_thread_classes_tmp;
65 	int kpc_threads_counting_tmp;
66 
67 	/* Make sure we get a consistent snapshot of these values */
68 	lck_mtx_lock(&kpc_thread_lock);
69 
70 	kpc_thread_classes_tmp = kpc_thread_classes;
71 	kpc_threads_counting_tmp = kpc_threads_counting;
72 
73 	lck_mtx_unlock(&kpc_thread_lock);
74 
75 	if (kpc_threads_counting_tmp) {
76 		return kpc_thread_classes_tmp;
77 	} else {
78 		return 0;
79 	}
80 }
81 
82 int
kpc_set_thread_counting(uint32_t classes)83 kpc_set_thread_counting(uint32_t classes)
84 {
85 	uint32_t count;
86 
87 	lck_mtx_lock(&kpc_thread_lock);
88 
89 	count = kpc_get_counter_count(classes);
90 
91 	if ((classes == 0)
92 	    || (count == 0)) {
93 		/* shut down */
94 		kpc_threads_counting = FALSE;
95 	} else {
96 		/* stash the config */
97 		kpc_thread_classes = classes;
98 
99 		/* work out the size */
100 		kpc_thread_classes_count = count;
101 		assert(kpc_thread_classes_count <= KPC_MAX_COUNTERS);
102 
103 		/* enable switch */
104 		kpc_threads_counting = TRUE;
105 
106 		/* and schedule an AST for this thread... */
107 		if (!current_thread()->kpc_buf) {
108 			current_thread()->kperf_ast |= T_KPC_ALLOC;
109 			act_set_kperf(current_thread());
110 		}
111 	}
112 
113 	kpc_off_cpu_update();
114 	lck_mtx_unlock(&kpc_thread_lock);
115 
116 	return 0;
117 }
118 
119 /* snapshot current PMCs and update counters in the current thread */
120 static void
kpc_update_thread_counters(thread_t thread)121 kpc_update_thread_counters( thread_t thread )
122 {
123 	uint32_t i;
124 	uint64_t *tmp = NULL;
125 	cpu_data_t *cpu = NULL;
126 
127 	cpu = current_cpu_datap();
128 
129 	/* 1. stash current PMCs into latest CPU block */
130 	kpc_get_cpu_counters( FALSE, kpc_thread_classes,
131 	    NULL, cpu->cpu_kpc_buf[1] );
132 
133 	/* 2. apply delta to old thread */
134 	if (thread->kpc_buf) {
135 		for (i = 0; i < kpc_thread_classes_count; i++) {
136 			thread->kpc_buf[i] += cpu->cpu_kpc_buf[1][i] - cpu->cpu_kpc_buf[0][i];
137 		}
138 	}
139 
140 	/* schedule any necessary allocations */
141 	if (!current_thread()->kpc_buf) {
142 		current_thread()->kperf_ast |= T_KPC_ALLOC;
143 		act_set_kperf(current_thread());
144 	}
145 
146 	/* 3. switch the PMC block pointers */
147 	tmp = cpu->cpu_kpc_buf[1];
148 	cpu->cpu_kpc_buf[1] = cpu->cpu_kpc_buf[0];
149 	cpu->cpu_kpc_buf[0] = tmp;
150 }
151 
152 /* get counter values for a thread */
153 int
kpc_get_curthread_counters(uint32_t * inoutcount,uint64_t * buf)154 kpc_get_curthread_counters(uint32_t *inoutcount, uint64_t *buf)
155 {
156 	thread_t thread = current_thread();
157 	boolean_t enabled;
158 
159 	/* buffer too small :( */
160 	if (*inoutcount < kpc_thread_classes_count) {
161 		return EINVAL;
162 	}
163 
164 	/* copy data and actual size */
165 	if (!thread->kpc_buf) {
166 		return EINVAL;
167 	}
168 
169 	enabled = ml_set_interrupts_enabled(FALSE);
170 
171 	/* snap latest version of counters for this thread */
172 	kpc_update_thread_counters( current_thread());
173 
174 	/* copy out */
175 	memcpy( buf, thread->kpc_buf,
176 	    kpc_thread_classes_count * sizeof(*buf));
177 	*inoutcount = kpc_thread_classes_count;
178 
179 	ml_set_interrupts_enabled(enabled);
180 
181 	return 0;
182 }
183 
184 void
kpc_off_cpu_update(void)185 kpc_off_cpu_update(void)
186 {
187 	kpc_off_cpu_active = kpc_threads_counting;
188 }
189 
190 void
kpc_off_cpu_internal(thread_t thread)191 kpc_off_cpu_internal(thread_t thread)
192 {
193 	if (kpc_threads_counting) {
194 		kpc_update_thread_counters(thread);
195 	}
196 }
197 
198 void
kpc_thread_create(thread_t thread)199 kpc_thread_create(thread_t thread)
200 {
201 	/* nothing to do if we're not counting */
202 	if (!kpc_threads_counting) {
203 		return;
204 	}
205 
206 	/* give the new thread a counterbuf */
207 	thread->kpc_buf = kpc_counterbuf_alloc();
208 }
209 
210 void
kpc_thread_destroy(thread_t thread)211 kpc_thread_destroy(thread_t thread)
212 {
213 	uint64_t *buf = NULL;
214 
215 	/* usual case: no kpc buf, just return */
216 	if (!thread->kpc_buf) {
217 		return;
218 	}
219 
220 	/* otherwise, don't leak */
221 	buf = thread->kpc_buf;
222 	thread->kpc_buf = NULL;
223 	kpc_counterbuf_free(buf);
224 }
225 
226 void
kpc_thread_ast_handler(thread_t thread)227 kpc_thread_ast_handler(thread_t thread)
228 {
229 	if (thread->kperf_ast & T_KPC_ALLOC) {
230 		thread->kpc_buf = kpc_counterbuf_alloc();
231 	}
232 }
233