xref: /xnu-8796.101.5/bsd/kern/sys_recount.c (revision aca3beaa3dfbd42498b42c5e5ce20a938e6554e5)
1 // Copyright (c) 2021 Apple Inc.  All rights reserved.
2 //
3 // @APPLE_OSREFERENCE_LICENSE_HEADER_START@
4 //
5 // This file contains Original Code and/or Modifications of Original Code
6 // as defined in and that are subject to the Apple Public Source License
7 // Version 2.0 (the 'License'). You may not use this file except in
8 // compliance with the License. The rights granted to you under the License
9 // may not be used to create, or enable the creation or redistribution of,
10 // unlawful or unlicensed copies of an Apple operating system, or to
11 // circumvent, violate, or enable the circumvention or violation of, any
12 // terms of an Apple operating system software license agreement.
13 //
14 // Please obtain a copy of the License at
15 // http://www.opensource.apple.com/apsl/ and read it before using this file.
16 //
17 // The Original Code and all software distributed under the License are
18 // distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
19 // EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
20 // INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
21 // FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
22 // Please see the License for the specific language governing rights and
23 // limitations under the License.
24 //
25 // @APPLE_OSREFERENCE_LICENSE_HEADER_END@
26 
27 #include <kern/recount.h>
28 #include <machine/machine_routines.h>
29 #include <machine/smp.h>
30 #include <sys/proc_info.h>
31 #include <sys/resource_private.h>
32 #include <sys/sysproto.h>
33 #include <sys/systm.h>
34 #include <sys/types.h>
35 
36 // Recount's BSD-specific implementation for syscalls.
37 
38 #if CONFIG_PERVASIVE_CPI
39 
40 static struct thsc_cpi
_usage_to_cpi(struct recount_usage * usage)41 _usage_to_cpi(struct recount_usage *usage)
42 {
43 	return (struct thsc_cpi){
44 		       .tcpi_instructions = usage->ru_instructions,
45 		       .tcpi_cycles = usage->ru_cycles,
46 	};
47 }
48 
49 static struct thsc_time_cpi
_usage_to_time_cpi(struct recount_usage * usage)50 _usage_to_time_cpi(struct recount_usage *usage)
51 {
52 	return (struct thsc_time_cpi){
53 		       .ttci_instructions = usage->ru_instructions,
54 		       .ttci_cycles = usage->ru_cycles,
55 		       .ttci_system_time_mach = usage->ru_system_time_mach,
56 		       .ttci_user_time_mach = usage->ru_user_time_mach,
57 	};
58 }
59 
60 static struct thsc_time_energy_cpi
_usage_to_time_energy_cpi(struct recount_usage * usage)61 _usage_to_time_energy_cpi(struct recount_usage *usage)
62 {
63 	return (struct thsc_time_energy_cpi){
64 		       .ttec_instructions = usage->ru_instructions,
65 		       .ttec_cycles = usage->ru_cycles,
66 		       .ttec_system_time_mach = usage->ru_system_time_mach,
67 		       .ttec_user_time_mach = usage->ru_user_time_mach,
68 #if CONFIG_PERVASIVE_ENERGY
69 		       .ttec_energy_nj = usage->ru_energy_nj,
70 #endif // CONFIG_PERVASIVE_ENERGY
71 	};
72 }
73 
74 static int
_selfcounts(thread_selfcounts_kind_t kind,user_addr_t buf,size_t size)75 _selfcounts(thread_selfcounts_kind_t kind, user_addr_t buf, size_t size)
76 {
77 	struct recount_usage usage = { 0 };
78 	boolean_t interrupt_state = ml_set_interrupts_enabled(FALSE);
79 	recount_current_thread_usage(&usage);
80 	ml_set_interrupts_enabled(interrupt_state);
81 
82 	switch (kind) {
83 	case THSC_CPI: {
84 		struct thsc_cpi counts = _usage_to_cpi(&usage);
85 		return copyout(&counts, buf, MIN(sizeof(counts), size));
86 	}
87 	case THSC_TIME_CPI: {
88 		struct thsc_time_cpi counts = _usage_to_time_cpi(&usage);
89 		return copyout(&counts, buf, MIN(sizeof(counts), size));
90 	}
91 	case THSC_TIME_ENERGY_CPI: {
92 		struct thsc_time_energy_cpi counts = _usage_to_time_energy_cpi(&usage);
93 		return copyout(&counts, buf, MIN(sizeof(counts), size));
94 	}
95 	default:
96 		panic("recount: unexpected thread_selfcounts kind: %d", kind);
97 	}
98 }
99 
100 static int
_selfcounts_perf_level(thread_selfcounts_kind_t kind,user_addr_t buf,size_t size)101 _selfcounts_perf_level(thread_selfcounts_kind_t kind, user_addr_t buf,
102     size_t size)
103 {
104 	struct recount_usage usages[RCT_CPU_KIND_COUNT] = { 0 };
105 	boolean_t interrupt_state = ml_set_interrupts_enabled(FALSE);
106 	recount_current_thread_perf_level_usage(usages);
107 	ml_set_interrupts_enabled(interrupt_state);
108 
109 	switch (kind) {
110 	case THSC_CPI_PER_PERF_LEVEL: {
111 		struct thsc_cpi counts[RCT_CPU_KIND_COUNT] = { 0 };
112 		for (size_t i = 0; i < RCT_CPU_KIND_COUNT; i++) {
113 			counts[i] = _usage_to_cpi(&usages[i]);
114 		}
115 		return copyout(&counts, buf, MIN(sizeof(counts), size));
116 	}
117 	case THSC_TIME_CPI_PER_PERF_LEVEL: {
118 		struct thsc_time_cpi counts[RCT_CPU_KIND_COUNT] = { 0 };
119 		for (size_t i = 0; i < RCT_CPU_KIND_COUNT; i++) {
120 			counts[i] = _usage_to_time_cpi(&usages[i]);
121 		}
122 		return copyout(&counts, buf, MIN(sizeof(counts), size));
123 	}
124 	case THSC_TIME_ENERGY_CPI_PER_PERF_LEVEL: {
125 		struct thsc_time_energy_cpi counts[RCT_CPU_KIND_COUNT] = { 0 };
126 		for (size_t i = 0; i < RCT_CPU_KIND_COUNT; i++) {
127 			counts[i] = _usage_to_time_energy_cpi(&usages[i]);
128 		}
129 		return copyout(&counts, buf, MIN(sizeof(counts), size));
130 	}
131 	default:
132 		panic("recount: unexpected thread_selfcounts kind: %d", kind);
133 	}
134 }
135 
136 int
thread_selfcounts(__unused struct proc * p,struct thread_selfcounts_args * uap,__unused int * ret_out)137 thread_selfcounts(__unused struct proc *p,
138     struct thread_selfcounts_args *uap, __unused int *ret_out)
139 {
140 	switch (uap->kind) {
141 	case THSC_CPI:
142 	case THSC_TIME_CPI:
143 	case THSC_TIME_ENERGY_CPI:
144 		return _selfcounts(uap->kind, uap->buf, uap->size);
145 
146 	case THSC_CPI_PER_PERF_LEVEL:
147 	case THSC_TIME_CPI_PER_PERF_LEVEL:
148 	case THSC_TIME_ENERGY_CPI_PER_PERF_LEVEL:
149 		return _selfcounts_perf_level(uap->kind, uap->buf, uap->size);
150 
151 	default:
152 		return ENOTSUP;
153 	}
154 }
155 
156 static struct proc_threadcounts_data
_usage_to_proc_threadcounts(struct recount_usage * usage)157 _usage_to_proc_threadcounts(struct recount_usage *usage)
158 {
159 	return (struct proc_threadcounts_data){
160 		       .ptcd_instructions = usage->ru_instructions,
161 		       .ptcd_cycles = usage->ru_cycles,
162 		       .ptcd_system_time_mach = usage->ru_system_time_mach,
163 		       .ptcd_user_time_mach = usage->ru_user_time_mach,
164 #if CONFIG_PERVASIVE_ENERGY
165 		       .ptcd_energy_nj = usage->ru_energy_nj,
166 #endif // CONFIG_PERVASIVE_ENERGY
167 	};
168 }
169 
170 static recount_cpu_kind_t
_perflevel_index_to_cpu_kind(unsigned int perflevel)171 _perflevel_index_to_cpu_kind(unsigned int perflevel)
172 {
173 #if __AMP__
174 	extern cluster_type_t cpu_type_for_perflevel(int perflevel);
175 	cluster_type_t cluster = cpu_type_for_perflevel(perflevel);
176 #else // __AMP__
177 	cluster_type_t cluster = CLUSTER_TYPE_SMP;
178 #endif // !__AMP__
179 
180 	switch (cluster) {
181 	case CLUSTER_TYPE_SMP:
182 		// Default to first index for SMP.
183 		return (recount_cpu_kind_t)0;
184 #if __AMP__
185 	case CLUSTER_TYPE_E:
186 		return RCT_CPU_EFFICIENCY;
187 	case CLUSTER_TYPE_P:
188 		return RCT_CPU_PERFORMANCE;
189 #endif // __AMP__
190 	default:
191 		panic("recount: unexpected CPU type %d for perflevel %d", cluster,
192 		    perflevel);
193 	}
194 }
195 
196 int
proc_pidthreadcounts(struct proc * p,uint64_t tid,user_addr_t uaddr,size_t usize,int * size_out)197 proc_pidthreadcounts(
198 	struct proc *p,
199 	uint64_t tid,
200 	user_addr_t uaddr,
201 	size_t usize,
202 	int *size_out)
203 {
204 	struct recount_usage usages[RCT_CPU_KIND_COUNT] = { 0 };
205 	// Keep this in sync with proc_threadcounts_data -- this one just has the
206 	// array length hard-coded to the maximum.
207 	struct {
208 		uint16_t counts_len;
209 		uint16_t reserved0;
210 		uint32_t reserved1;
211 		struct proc_threadcounts_data counts[RCT_CPU_KIND_COUNT];
212 	} counts = { 0 };
213 
214 	task_t task = proc_task(p);
215 	if (task == TASK_NULL) {
216 		return ESRCH;
217 	}
218 
219 	bool found = recount_task_thread_perf_level_usage(task, tid, usages);
220 	if (!found) {
221 		return ESRCH;
222 	}
223 
224 	const size_t counts_len = MIN(recount_topo_count(RCT_TOPO_CPU_KIND),
225 	    RCT_CPU_KIND_COUNT);
226 	counts.counts_len = (uint16_t)counts_len;
227 	// The number of perflevels for this boot can be constrained by the `cpus=`
228 	// boot-arg, so determine the runtime number to prevent unexpected calls
229 	// into the machine-dependent layers from asserting.
230 	unsigned int cpu_types = ml_get_cpu_types();
231 	unsigned int level_count = __builtin_popcount(cpu_types);
232 
233 	for (unsigned int i = 0; i < counts_len; i++) {
234 		if (i < level_count) {
235 			const recount_cpu_kind_t cpu_kind = _perflevel_index_to_cpu_kind(i);
236 			counts.counts[i] = _usage_to_proc_threadcounts(&usages[cpu_kind]);
237 		}
238 	}
239 	size_t copyout_size = MIN(sizeof(uint64_t) +
240 	    counts_len * sizeof(struct proc_threadcounts_data), usize);
241 	assert(copyout_size <= sizeof(counts));
242 	int error = copyout(&counts, uaddr, copyout_size);
243 	if (error == 0) {
244 		*size_out = (int)copyout_size;
245 	}
246 	return error;
247 }
248 
249 #else // CONFIG_PERVASIVE_CPI
250 
251 int
proc_pidthreadcounts(__unused struct proc * p,__unused uint64_t tid,__unused user_addr_t uaddr,__unused size_t usize,__unused int * ret_out)252 proc_pidthreadcounts(
253 	__unused struct proc *p,
254 	__unused uint64_t tid,
255 	__unused user_addr_t uaddr,
256 	__unused size_t usize,
257 	__unused int *ret_out)
258 {
259 	return ENOTSUP;
260 }
261 
262 int
thread_selfcounts(__unused struct proc * p,__unused struct thread_selfcounts_args * uap,__unused int * ret_out)263 thread_selfcounts(__unused struct proc *p,
264     __unused struct thread_selfcounts_args *uap, __unused int *ret_out)
265 {
266 	return ENOTSUP;
267 }
268 
269 #endif // !CONFIG_PERVASIVE_CPI
270