1 /*
2 * Copyright (c) 2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/host_priv.h>
30 #include <mach/host_special_ports.h>
31 #include <mach/memory_error_notification.h>
32
33 #include <mach/mach_types.h>
34 #include <mach/host_info.h>
35 #include <kern/host.h>
36 #include <kern/locks.h>
37 #include <kern/ecc.h>
38 #include <kern/spl.h>
39 #include <kern/mpsc_queue.h>
40 #include <kern/thread.h>
41 #include <kern/thread_call.h>
42 #include <kern/startup.h>
43 #include <os/log.h>
44 #include <pexpert/pexpert.h>
45 #include <pexpert/device_tree.h>
46 #include <libkern/OSAtomic.h>
47 #include <arm/pmap_public.h>
48 #include <vm/vm_protos.h>
49
50 /* New CoreAnalytics ECC logging mechanism */
51
52 /**
53 * Stubs for targets which do not support ECC.
54 */
55
56 kern_return_t
ecc_log_memory_error(__unused pmap_paddr_t physical_address,__unused uint32_t ecc_flags)57 ecc_log_memory_error(
58 __unused pmap_paddr_t physical_address,
59 __unused uint32_t ecc_flags)
60 {
61 return KERN_NOT_SUPPORTED;
62 }
63
64 kern_return_t
ecc_log_memory_error_internal(__unused pmap_paddr_t physical_address,__unused uint32_t ecc_flags)65 ecc_log_memory_error_internal(
66 __unused pmap_paddr_t physical_address,
67 __unused uint32_t ecc_flags)
68 {
69 return KERN_NOT_SUPPORTED;
70 }
71
72 kern_return_t
ecc_log_memory_error_ce(__unused pmap_paddr_t physical_address,__unused uint32_t ecc_flags,__unused uint32_t ce_count)73 ecc_log_memory_error_ce(
74 __unused pmap_paddr_t physical_address,
75 __unused uint32_t ecc_flags,
76 __unused uint32_t ce_count)
77 {
78 return KERN_NOT_SUPPORTED;
79 }
80
81
82 kern_return_t
kern_ecc_poll_register(__unused platform_error_handler_ecc_poll_t poll_func,__unused uint32_t max_errors)83 kern_ecc_poll_register(
84 __unused platform_error_handler_ecc_poll_t poll_func,
85 __unused uint32_t max_errors)
86 {
87 return KERN_NOT_SUPPORTED;
88 }
89
90 /*
91 * Used to report earlier errors that were found after ECC gets enabled.
92 * We don't want the VM to panic for these.
93 */
94 kern_return_t
ecc_log_memory_error_delayed(__unused pmap_paddr_t physical_address,__unused uint32_t ecc_flags)95 ecc_log_memory_error_delayed(
96 __unused pmap_paddr_t physical_address,
97 __unused uint32_t ecc_flags)
98 {
99 return KERN_FAILURE;
100 }
101
102 /**
103 * MCC Logging
104 */
105
106 /**
107 * TODO: rdar://97394997 (Clean up ECC / MCC logging)
108 * We can probably clean some of this up and share some of the code with ECC.
109 */
110 #if XNU_HANDLE_MCC
111
112 static struct mpsc_daemon_queue mcc_memory_error_event_queue;
113 struct _mcc_mem_err_event {
114 struct mpsc_queue_chain link;
115 mcc_ecc_event_t event;
116 };
117 typedef struct _mcc_mem_err_event* mcc_mem_err_event_t;
118
119 #define MCC_ECC_NUM_ERRORS (1024)
120 #define MCC_ERROR_EVENT_QUEUE_PRIORITY MAXPRI_USER
121 static struct _mcc_mem_err_event mcc_events[MCC_ECC_NUM_ERRORS];
122 static atomic_int mcc_events_producer_idx = 0;
123 static atomic_int mcc_events_consumer_idx = 0;
124 SCALABLE_COUNTER_DEFINE(mcc_dropped_events);
125 LCK_GRP_DECLARE(mcc_lock_grp, "mcc");
126 LCK_SPIN_DECLARE(mcc_lock, &mcc_lock_grp);
127
128 static inline int
mcc_events_next(int idx)129 mcc_events_next(int idx)
130 {
131 assert(idx < MCC_ECC_NUM_ERRORS);
132 return (idx + 1) % MCC_ECC_NUM_ERRORS;
133 }
134
135 /* MCC ECC CoreAnalytics Error Logging */
136 static void
mcc_error_notify_user(mcc_ecc_event_t event)137 mcc_error_notify_user(mcc_ecc_event_t event)
138 {
139 mach_port_t user_port = MACH_PORT_NULL;
140
141 kern_return_t kr = host_get_memory_error_port(host_priv_self(), &user_port);
142
143 if ((kr != KERN_SUCCESS) || !IPC_PORT_VALID(user_port)) {
144 os_log(OS_LOG_DEFAULT, "Failed to get memory error port");
145 return;
146 }
147
148 mcc_memory_error_notification(user_port, event);
149
150 ipc_port_release_send(user_port);
151 }
152
153 static void
mcc_memory_error_event_queue_invoke(mpsc_queue_chain_t e,mpsc_daemon_queue_t queue __unused)154 mcc_memory_error_event_queue_invoke(mpsc_queue_chain_t e, mpsc_daemon_queue_t queue __unused)
155 {
156 mcc_mem_err_event_t event;
157
158 /* The consumer should never be invoked if there is nothing to consume. */
159 int mcc_events_consumer_curr_idx = atomic_load(&mcc_events_consumer_idx);
160 assert(mcc_events_consumer_curr_idx != atomic_load(&mcc_events_producer_idx));
161
162 event = mpsc_queue_element(e, struct _mcc_mem_err_event, link);
163 mcc_error_notify_user(event->event);
164 int mcc_events_consumer_next_idx = mcc_events_next(mcc_events_consumer_curr_idx);
165 atomic_store(&mcc_events_consumer_idx, mcc_events_consumer_next_idx);
166 }
167
168 static mcc_mem_err_event_t
mcc_memory_error_create_event(mcc_ecc_event_t mcc_event)169 mcc_memory_error_create_event(mcc_ecc_event_t mcc_event)
170 {
171 mcc_mem_err_event_t ret = NULL;
172
173 /**
174 * @note We are unable to dynamically allocate events, because this function can be called from
175 * the primary interrupt context. Instead, we allocate from a statically sized ring buffer.
176 */
177 const boolean_t interrupts_enabled = ml_set_interrupts_enabled(FALSE);
178 lck_spin_lock(&mcc_lock);
179 int mcc_events_producer_curr_idx = atomic_load(&mcc_events_producer_idx);
180 int mcc_events_producer_next_idx = mcc_events_next(mcc_events_producer_curr_idx);
181 if (mcc_events_producer_next_idx == atomic_load(&mcc_events_consumer_idx)) {
182 /**
183 * The consumer is running behind the producer, and we're in the primary interrupt context.
184 * Drop this event and return NULL to the caller.
185 */
186 counter_inc(&mcc_dropped_events);
187 ret = NULL;
188 goto done;
189 }
190
191 mcc_mem_err_event_t event = &mcc_events[mcc_events_producer_curr_idx];
192 event->event = mcc_event;
193 atomic_store(&mcc_events_producer_idx, mcc_events_producer_next_idx);
194 ret = event;
195
196 done:
197 lck_spin_unlock(&mcc_lock);
198 ml_set_interrupts_enabled(interrupts_enabled);
199 return ret;
200 }
201
202 __startup_func
203 static void
mcc_logging_init(void)204 mcc_logging_init(void)
205 {
206 mpsc_daemon_queue_init_with_thread(&mcc_memory_error_event_queue,
207 mcc_memory_error_event_queue_invoke, MCC_ERROR_EVENT_QUEUE_PRIORITY,
208 "daemon.mcc_error-events", MPSC_DAEMON_INIT_INACTIVE);
209
210 mpsc_daemon_queue_activate(&mcc_memory_error_event_queue);
211 }
212 STARTUP(THREAD_CALL, STARTUP_RANK_MIDDLE, mcc_logging_init);
213
214 #endif /* XNU_HANDLE_MCC */
215
216 kern_return_t
mcc_log_memory_error(mcc_ecc_event_t mcc_event __unused)217 mcc_log_memory_error(mcc_ecc_event_t mcc_event __unused)
218 {
219 #if XNU_HANDLE_MCC
220 mcc_mem_err_event_t event = mcc_memory_error_create_event(mcc_event);
221 if (event == NULL) {
222 return KERN_RESOURCE_SHORTAGE;
223 }
224 assert(mcc_memory_error_event_queue.mpd_thread != NULL);
225 mpsc_daemon_enqueue(&mcc_memory_error_event_queue,
226 &event->link, MPSC_QUEUE_DISABLE_PREEMPTION);
227 return KERN_SUCCESS;
228 #else
229 return KERN_FAILURE;
230 #endif
231 }
232
233 #if (DEBUG || DEVELOPMENT)
234 static int
mcc_memory_error_notify_test_run(int64_t in,int64_t * out)235 mcc_memory_error_notify_test_run(int64_t in, int64_t *out)
236 {
237 printf("Running mcc_memory_error_notify_test for %llu iterations\n", in);
238 for (uint64_t i = 0; i < in; i++) {
239 mcc_ecc_event_t event = {.version = MCC_ECC_V1, .status = (uint32_t)i};
240 /**
241 * To accurately test mcc_log_memory_error, we must disable preemption, because it is called
242 * from the primary interrupt context.
243 */
244 disable_preemption();
245 mcc_log_memory_error(event);
246 enable_preemption();
247 }
248
249 *out = 1;
250 return 0;
251 }
252
253 SYSCTL_TEST_REGISTER(mcc_memory_error_notify_test, mcc_memory_error_notify_test_run);
254 #endif /* (DEBUG || DEVELOPMENT) */
255
256
257 /* Legacy ECC logging mechanism */
258
259 /*
260 * ECC data. Not really KPCs, but this still seems like the
261 * best home for this code.
262 *
263 * Circular buffer of events. When we fill up, drop data.
264 */
265 #define ECC_EVENT_BUFFER_COUNT (256)
266
267 struct ecc_event ecc_data[ECC_EVENT_BUFFER_COUNT];
268 static uint32_t ecc_data_next_read;
269 static uint32_t ecc_data_next_write;
270 static boolean_t ecc_data_empty = TRUE; // next read == next write : empty or full?
271 static LCK_GRP_DECLARE(ecc_data_lock_group, "ecc-data");
272 static LCK_SPIN_DECLARE(ecc_data_lock, &ecc_data_lock_group);
273 static uint32_t ecc_correction_count;
274
275
276 uint32_t
ecc_log_get_correction_count()277 ecc_log_get_correction_count()
278 {
279 return ecc_correction_count;
280 }
281
282 kern_return_t
ecc_log_record_event(const struct ecc_event * ev)283 ecc_log_record_event(const struct ecc_event *ev)
284 {
285 spl_t x;
286
287 if (ev->count > ECC_EVENT_INFO_DATA_ENTRIES) {
288 panic("Count of %u on ecc event is too large.", (unsigned)ev->count);
289 }
290
291 x = splhigh();
292 lck_spin_lock(&ecc_data_lock);
293
294 ecc_correction_count++;
295
296 if (ecc_data_next_read == ecc_data_next_write && !ecc_data_empty) {
297 lck_spin_unlock(&ecc_data_lock);
298 splx(x);
299 return KERN_FAILURE;
300 }
301
302 bcopy(ev, &ecc_data[ecc_data_next_write], sizeof(*ev));
303 ecc_data_next_write++;
304 ecc_data_next_write %= ECC_EVENT_BUFFER_COUNT;
305 ecc_data_empty = FALSE;
306
307 lck_spin_unlock(&ecc_data_lock);
308 splx(x);
309
310 return KERN_SUCCESS;
311 }
312
313
314 kern_return_t
ecc_log_get_next_event(struct ecc_event * ev)315 ecc_log_get_next_event(struct ecc_event *ev)
316 {
317 spl_t x;
318
319 x = splhigh();
320 lck_spin_lock(&ecc_data_lock);
321
322 if (ecc_data_empty) {
323 assert(ecc_data_next_write == ecc_data_next_read);
324
325 lck_spin_unlock(&ecc_data_lock);
326 splx(x);
327 return KERN_FAILURE;
328 }
329
330 bcopy(&ecc_data[ecc_data_next_read], ev, sizeof(*ev));
331 ecc_data_next_read++;
332 ecc_data_next_read %= ECC_EVENT_BUFFER_COUNT;
333
334 if (ecc_data_next_read == ecc_data_next_write) {
335 ecc_data_empty = TRUE;
336 }
337
338 lck_spin_unlock(&ecc_data_lock);
339 splx(x);
340
341 return KERN_SUCCESS;
342 }
343