xref: /xnu-12377.41.6/osfmk/kperf/kperf_kpc.c (revision bbb1b6f9e71b8cdde6e5cd6f4841f207dee3d828)
1 /*
2  * Copyright (c) 2013 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 /*
30  * Sample KPC data into kperf and manage shared context-switch and AST handlers
31  */
32 
33 #include <kperf/kperf.h>
34 #include <kperf/buffer.h>
35 #include <kperf/context.h>
36 #include <kperf/pet.h>
37 #include <kperf/kperf_kpc.h>
38 #include <kern/kpc.h> /* kpc_cswitch_context, kpc_threads_counting */
39 
40 void
kperf_kpc_thread_sample(struct kpcdata * kpcd,int sample_config)41 kperf_kpc_thread_sample(struct kpcdata *kpcd, int sample_config)
42 {
43 	BUF_INFO(PERF_KPC_THREAD_SAMPLE | DBG_FUNC_START, sample_config);
44 
45 	kpcd->running = kpc_get_running();
46 	/* let kpc_get_curthread_counters set the correct count */
47 	kpcd->counterc = KPC_MAX_COUNTERS;
48 	if (kpc_get_curthread_counters(&kpcd->counterc,
49 	    kpcd->counterv)) {
50 		/* if thread counters aren't ready, default to 0 */
51 		memset(kpcd->counterv, 0,
52 		    sizeof(uint64_t) * kpcd->counterc);
53 	}
54 	/* help out Instruments by sampling KPC's config */
55 	if (!sample_config) {
56 		kpcd->configc = 0;
57 	} else {
58 		kpcd->configc = kpc_get_config_count(kpcd->running);
59 		kpc_get_config(kpcd->running, kpcd->configv);
60 	}
61 
62 	BUF_INFO(PERF_KPC_THREAD_SAMPLE | DBG_FUNC_END, kpcd->running, kpcd->counterc);
63 }
64 
65 void
kperf_kpc_cpu_sample(struct kpcdata * kpcd,int sample_config)66 kperf_kpc_cpu_sample(struct kpcdata *kpcd, int sample_config)
67 {
68 	BUF_INFO(PERF_KPC_CPU_SAMPLE | DBG_FUNC_START, sample_config);
69 
70 	kpcd->running  = kpc_get_running();
71 	kpcd->counterc = kpc_get_cpu_counters(0, kpcd->running,
72 	    &kpcd->curcpu,
73 	    kpcd->counterv);
74 	if (!sample_config) {
75 		kpcd->configc = 0;
76 	} else {
77 		kpcd->configc = kpc_get_config_count(kpcd->running);
78 		kpc_get_config(kpcd->running, kpcd->configv);
79 	}
80 
81 	BUF_INFO(PERF_KPC_CPU_SAMPLE | DBG_FUNC_END, kpcd->running, kpcd->counterc);
82 }
83 
84 void
kperf_kpc_config_log(const struct kpcdata * kpcd)85 kperf_kpc_config_log(const struct kpcdata *kpcd)
86 {
87 	BUF_DATA(PERF_KPC_CONFIG,
88 	    kpcd->running,
89 	    kpcd->counterc,
90 	    kpc_get_counter_count(KPC_CLASS_FIXED_MASK),
91 	    kpcd->configc);
92 
93 #if __LP64__
94 	unsigned int max = (kpcd->configc + 3) / 4;
95 	for (unsigned int i = 0; i < max; i++) {
96 		uint32_t flag = (i == 0) ? DBG_FUNC_START : ((i == (max - 1)) ? DBG_FUNC_END : DBG_FUNC_NONE);
97 		BUF_DATA(PERF_KPC_CFG_REG | flag,
98 		    kpcd->configv[0 + i * 4], kpcd->configv[1 + i * 4],
99 		    kpcd->configv[2 + i * 4], kpcd->configv[3 + i * 4]);
100 	}
101 #else /* __LP64__ */
102 	unsigned int max = (kpcd->configc + 1) / 2;
103 	for (unsigned int i = 0; i < max; i++) {
104 		uint32_t flag = (i == 0) ? DBG_FUNC_START : ((i == (max - 1)) ? DBG_FUNC_END : DBG_FUNC_NONE);
105 		BUF_DATA(PERF_KPC_CFG_REG32 | flag,
106 		    kpcd->configv[i * 2] >> 32ULL,
107 		    kpcd->configv[i * 2] & 0xffffffffULL,
108 		    kpcd->configv[i * 2 + 1] >> 32ULL,
109 		    kpcd->configv[i * 2 + 1] & 0xffffffffULL);
110 	}
111 #endif /* !__LP64__ */
112 }
113 
114 static void
kperf_kpc_log(uint32_t code,uint32_t code32,const struct kpcdata * kpcd)115 kperf_kpc_log(uint32_t code, uint32_t code32, const struct kpcdata *kpcd)
116 {
117 #if __LP64__
118 #pragma unused(code32)
119 	unsigned int max = (kpcd->counterc + 3) / 4;
120 	/* and the actual counts with one 64-bit argument each */
121 	for (unsigned int i = 0; i < max; i++) {
122 		uint32_t flag = (i == 0) ? DBG_FUNC_START : ((i == (max - 1)) ? DBG_FUNC_END : DBG_FUNC_NONE);
123 		BUF_DATA(code | flag,
124 		    kpcd->counterv[0 + i * 4],
125 		    kpcd->counterv[1 + i * 4],
126 		    kpcd->counterv[2 + i * 4],
127 		    kpcd->counterv[3 + i * 4]);
128 	}
129 #else /* __LP64__ */
130 #pragma unused(code)
131 	unsigned int max = (kpcd->counterc + 1) / 2;
132 	/* and the actual counts with two 32-bit trace arguments each */
133 	for (unsigned int i = 0; i < max; i++) {
134 		uint32_t flag = (i == 0) ? DBG_FUNC_START : ((i == (max - 1)) ? DBG_FUNC_END : DBG_FUNC_NONE);
135 		BUF_DATA(code32 | flag,
136 		    (kpcd->counterv[0 + i * 2] >> 32ULL),
137 		    kpcd->counterv[0 + i * 2] & 0xffffffffULL,
138 		    (kpcd->counterv[1 + i * 2] >> 32ULL),
139 		    kpcd->counterv[1 + i * 2] & 0xffffffffULL);
140 	}
141 #endif /* !__LP64__ */
142 }
143 
144 void
kperf_kpc_cpu_log(const struct kpcdata * kpcd)145 kperf_kpc_cpu_log(const struct kpcdata *kpcd)
146 {
147 	kperf_kpc_log(PERF_KPC_DATA, PERF_KPC_DATA32, kpcd);
148 }
149 
150 void
kperf_kpc_thread_log(const struct kpcdata * kpcd)151 kperf_kpc_thread_log(const struct kpcdata *kpcd)
152 {
153 	kperf_kpc_log(PERF_KPC_DATA_THREAD, PERF_KPC_DATA_THREAD32, kpcd);
154 }
155