1 /*
2 * Copyright (c) 2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/energy_perf.h>
30
31 #include <libsa/types.h>
32 #include <sys/kdebug.h>
33 #include <stddef.h>
34 #include <machine/machine_routines.h>
35
36 #include <kern/coalition.h>
37 #include <kern/task.h>
38 #include <kern/task_ident.h>
39
40 void
gpu_describe(__unused gpu_descriptor_t gdesc)41 gpu_describe(__unused gpu_descriptor_t gdesc)
42 {
43 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_ENERGY_PERF, 1), gdesc->gpu_id, gdesc->gpu_max_domains, 0, 0, 0);
44 }
45
46 uint64_t
gpu_accumulate_time(__unused uint32_t scope,__unused uint32_t gpu_id,__unused uint32_t gpu_domain,__unused uint64_t gpu_accumulated_ns,__unused uint64_t gpu_tstamp_ns)47 gpu_accumulate_time(__unused uint32_t scope, __unused uint32_t gpu_id, __unused uint32_t gpu_domain, __unused uint64_t gpu_accumulated_ns, __unused uint64_t gpu_tstamp_ns)
48 {
49 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_ENERGY_PERF, 2), scope, gpu_id, gpu_domain, gpu_accumulated_ns, gpu_tstamp_ns);
50 ml_gpu_stat_update(gpu_accumulated_ns);
51 return 0;
52 }
53
54 static uint64_t
io_rate_update_cb_default(__unused uint64_t io_rate_flags,__unused uint64_t read_ops_delta,__unused uint64_t write_ops_delta,__unused uint64_t read_bytes_delta,__unused uint64_t write_bytes_delta)55 io_rate_update_cb_default(__unused uint64_t io_rate_flags, __unused uint64_t read_ops_delta, __unused uint64_t write_ops_delta, __unused uint64_t read_bytes_delta, __unused uint64_t write_bytes_delta)
56 {
57 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_ENERGY_PERF, 3), io_rate_flags, read_ops_delta, write_ops_delta, read_bytes_delta, write_bytes_delta);
58 return 0;
59 }
60
61 io_rate_update_callback_t io_rate_update_cb = io_rate_update_cb_default;
62
63 void
io_rate_update_register(io_rate_update_callback_t io_rate_update_cb_new)64 io_rate_update_register(io_rate_update_callback_t io_rate_update_cb_new)
65 {
66 if (io_rate_update_cb_new != NULL) {
67 io_rate_update_cb = io_rate_update_cb_new;
68 } else {
69 io_rate_update_cb = io_rate_update_cb_default;
70 }
71 }
72
73 uint64_t
io_rate_update(uint64_t io_rate_flags,uint64_t read_ops_delta,uint64_t write_ops_delta,uint64_t read_bytes_delta,uint64_t write_bytes_delta)74 io_rate_update(uint64_t io_rate_flags, uint64_t read_ops_delta, uint64_t write_ops_delta, uint64_t read_bytes_delta, uint64_t write_bytes_delta)
75 {
76 return io_rate_update_cb(io_rate_flags, read_ops_delta, write_ops_delta, read_bytes_delta, write_bytes_delta);
77 }
78
79 static uint64_t
gpu_set_fceiling_cb_default(__unused uint32_t gfr,__unused uint64_t gfp)80 gpu_set_fceiling_cb_default(__unused uint32_t gfr, __unused uint64_t gfp)
81 {
82 return 0ULL;
83 }
84
85 gpu_set_fceiling_t gpu_set_fceiling_cb = gpu_set_fceiling_cb_default;
86
87 void
gpu_fceiling_cb_register(gpu_set_fceiling_t gnewcb)88 gpu_fceiling_cb_register(gpu_set_fceiling_t gnewcb)
89 {
90 if (gnewcb != NULL) {
91 gpu_set_fceiling_cb = gnewcb;
92 } else {
93 gpu_set_fceiling_cb = gpu_set_fceiling_cb_default;
94 }
95 }
96
97 void
gpu_submission_telemetry(__unused uint64_t gpu_ncmds,__unused uint64_t gpu_noutstanding_avg,__unused uint64_t gpu_busy_ns_total,__unused uint64_t gpu_cycles,__unused uint64_t gpu_telemetry_valid_flags,__unused uint64_t gpu_telemetry_misc)98 gpu_submission_telemetry(
99 __unused uint64_t gpu_ncmds,
100 __unused uint64_t gpu_noutstanding_avg,
101 __unused uint64_t gpu_busy_ns_total,
102 __unused uint64_t gpu_cycles,
103 __unused uint64_t gpu_telemetry_valid_flags,
104 __unused uint64_t gpu_telemetry_misc)
105 {
106 }
107
108 kern_return_t
current_energy_id(energy_id_t * energy_id)109 current_energy_id(energy_id_t *energy_id)
110 {
111 coalition_t coalition = task_get_coalition(current_task(),
112 COALITION_TYPE_RESOURCE);
113
114 if (coalition == COALITION_NULL) {
115 *energy_id = ENERGY_ID_NONE;
116 return KERN_FAILURE;
117 }
118
119 uint64_t cid = coalition_id(coalition);
120
121 *energy_id = cid;
122
123 return KERN_SUCCESS;
124 }
125
126 kern_return_t
task_id_token_to_energy_id(mach_port_name_t name,energy_id_t * energy_id)127 task_id_token_to_energy_id(mach_port_name_t name, energy_id_t *energy_id)
128 {
129 if (current_task() == kernel_task) {
130 panic("cannot translate task id token from a kernel thread");
131 }
132
133 task_t task = TASK_NULL;
134 kern_return_t kr = task_id_token_port_name_to_task(name, &task);
135 /* holds task reference upon success */
136
137 if (kr != KERN_SUCCESS) {
138 assert(task == TASK_NULL);
139 return kr;
140 }
141
142 coalition_t coalition = task_get_coalition(task, COALITION_TYPE_RESOURCE);
143
144 assert(coalition != COALITION_NULL);
145
146 uint64_t cid = coalition_id(coalition);
147
148 *energy_id = cid;
149
150 task_deallocate(task);
151
152 return KERN_SUCCESS;
153 }
154
155 kern_return_t
energy_id_report_energy(energy_id_source_t energy_source,energy_id_t self_id,energy_id_t on_behalf_of_id,uint64_t energy)156 energy_id_report_energy(energy_id_source_t energy_source, energy_id_t self_id,
157 energy_id_t on_behalf_of_id, uint64_t energy)
158 {
159 if (energy_source != ENERGY_ID_SOURCE_GPU) {
160 return KERN_NOT_SUPPORTED;
161 }
162
163 if (self_id == ENERGY_ID_NONE) {
164 return KERN_INVALID_ARGUMENT;
165 }
166
167 bool exists;
168
169 if (on_behalf_of_id == ENERGY_ID_NONE) {
170 exists = coalition_add_to_gpu_energy(self_id, CGE_SELF, energy);
171 } else {
172 exists = coalition_add_to_gpu_energy(self_id, CGE_SELF | CGE_OTHERS,
173 energy);
174 coalition_add_to_gpu_energy(on_behalf_of_id, CGE_BILLED,
175 energy);
176 }
177
178 if (exists) {
179 return KERN_SUCCESS;
180 } else {
181 return KERN_NOT_FOUND;
182 }
183 }
184