1 /*
2 * Copyright (c) 2020-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 * log_queue_failed_intr);
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25 #include <kern/assert.h>
26 #include <kern/counter.h>
27 #include <kern/cpu_data.h>
28 #include <kern/percpu.h>
29 #include <kern/kalloc.h>
30 #include <kern/thread_call.h>
31 #include <libkern/libkern.h>
32 #include <sys/queue.h>
33 #include <vm/vm_kern.h>
34
35 #include "log_queue.h"
36 #include "log_mem.h"
37
38 #define LQ_DEFAULT_SZ_ORDER 15 // 32K per slot
39 #define LQ_DEFAULT_FREE_AFTER_CNT 15000 // Deallocate log queue after N logs
40 #define LQ_MAX_SZ_ORDER 20 // 1MB per CPU should really be enough and a hard cap
41 #define LQ_MIN_LOG_SZ_ORDER 5
42 #define LQ_MAX_LOG_SZ_ORDER 11
43 #define LQ_BATCH_SIZE 24
44 #define LQ_MAX_LM_SLOTS 8
45 #define LQ_LOW_MEM_SCALE 3
46
47 #define LQ_MEM_ENABLE(q, i) ((q)->lq_mem_set |= (1 << (i)))
48 #define LQ_MEM_ENABLED(q, i) ((q)->lq_mem_set & (1 << (i)))
49 #define LQ_MEM_DISABLE(q, i) ((q)->lq_mem_set &= ~(1 << (i)))
50
51 OS_ENUM(log_queue_entry_state, uint8_t,
52 LOG_QUEUE_ENTRY_STATE_INVALID = 0,
53 LOG_QUEUE_ENTRY_STATE_STORED,
54 LOG_QUEUE_ENTRY_STATE_DISPATCHED,
55 LOG_QUEUE_ENTRY_STATE_SENT,
56 LOG_QUEUE_ENTRY_STATE_FAILED
57 );
58
59 OS_ENUM(lq_mem_state, uint8_t,
60 LQ_MEM_STATE_READY = 0,
61 LQ_MEM_STATE_ALLOCATING,
62 LQ_MEM_STATE_RELEASING
63 );
64
65 OS_ENUM(lq_req_state, uint8_t,
66 LQ_REQ_STATE_INVALID = 0,
67 LQ_REQ_STATE_ALLOCATING,
68 LQ_REQ_STATE_RELEASING,
69 LQ_REQ_STATE_READY
70 );
71
72 typedef struct log_queue_entry {
73 STAILQ_ENTRY(log_queue_entry) lqe_link;
74 uint16_t lqe_size;
75 uint16_t lqe_lm_id;
76 _Atomic log_queue_entry_state_t lqe_state;
77 log_payload_s lqe_payload;
78 } log_queue_entry_s, *log_queue_entry_t;
79
80 typedef STAILQ_HEAD(, log_queue_entry) log_queue_list_s, *log_queue_list_t;
81
82 typedef struct {
83 log_queue_list_s lq_log_list;
84 log_queue_list_s lq_dispatch_list;
85 logmem_t lq_mem[LQ_MAX_LM_SLOTS];
86 size_t lq_mem_set;
87 size_t lq_mem_size;
88 size_t lq_mem_size_order;
89 lq_mem_state_t lq_mem_state;
90 thread_call_t lq_mem_handler;
91 size_t lq_cnt_mem_active;
92 size_t lq_cnt_mem_avail;
93 _Atomic lq_req_state_t lq_req_state;
94 void *lq_req_mem;
95 uint32_t lq_ready : 1;
96 uint32_t lq_suspend : 1;
97 } log_queue_s, *log_queue_t;
98
99 extern bool os_log_disabled(void);
100
101 /*
102 * Log Queue
103 *
104 * Log queues are allocated and set up per cpu. When a firehose memory is full
105 * logs are stored in a log queue and sent into the firehose once it has a free
106 * space again. Each log queue (memory) can grow and shrink based on demand by
107 * adding/removing additional memory to/from its memory slots. There are
108 * LQ_MAX_LM_SLOTS memory slots available for every log queue to use. Memory
109 * slots are released when not needed, with one slot always allocated per queue
110 * as a minimum.
111 *
112 * Boot args:
113 *
114 * lq_size_order: Per slot memory size defined as a power of 2 exponent
115 * (i.e. 2^lq_bootarg_size_order). Zero disables queues.
116 *
117 * lq_nslots: Number of allocated slots to boot with per each log queue.
118 * Once initial log traffic decreases, log queues release
119 * slots as needed.
120 *
121 * If extensive number of logs is expected, setting aforementioned boot-args as
122 * needed allows to capture the vast majority of logs and avoid drops.
123 */
124 TUNABLE(size_t, lq_bootarg_size_order, "lq_size_order", LQ_DEFAULT_SZ_ORDER);
125 TUNABLE(size_t, lq_bootarg_nslots, "lq_nslots", LQ_MAX_LM_SLOTS);
126
127 SCALABLE_COUNTER_DEFINE(log_queue_cnt_received);
128 SCALABLE_COUNTER_DEFINE(log_queue_cnt_rejected_fh);
129 SCALABLE_COUNTER_DEFINE(log_queue_cnt_queued);
130 SCALABLE_COUNTER_DEFINE(log_queue_cnt_sent);
131 SCALABLE_COUNTER_DEFINE(log_queue_cnt_dropped_nomem);
132 SCALABLE_COUNTER_DEFINE(log_queue_cnt_dropped_off);
133 SCALABLE_COUNTER_DEFINE(log_queue_cnt_mem_allocated);
134 SCALABLE_COUNTER_DEFINE(log_queue_cnt_mem_released);
135 SCALABLE_COUNTER_DEFINE(log_queue_cnt_mem_failed);
136
137 static log_queue_s PERCPU_DATA(oslog_queue);
138 static size_t lq_low_mem_limit;
139
140 static void *
log_queue_buffer_alloc(size_t amount)141 log_queue_buffer_alloc(size_t amount)
142 {
143 return kalloc_data_tag(amount, Z_WAITOK_ZERO, VM_KERN_MEMORY_LOG);
144 }
145
146 static void
log_queue_buffer_free(void * addr,size_t amount)147 log_queue_buffer_free(void *addr, size_t amount)
148 {
149 kfree_data(addr, amount);
150 }
151
152 #define log_queue_entry_size(p) (sizeof(log_queue_entry_s) + (p)->lp_data_size)
153
154 #define publish(a, v) os_atomic_store((a), (v), release)
155 #define read_dependency(v) os_atomic_load((v), dependency)
156 #define read_dependent(v, t) os_atomic_load_with_dependency_on((v), (uintptr_t)(t))
157 #define read_dependent_w(v, t) ({ \
158 __auto_type _v = os_atomic_inject_dependency((v), (uintptr_t)(t)); \
159 os_atomic_load_wide(_v, dependency); \
160 })
161
162 static log_queue_entry_state_t
log_queue_entry_state(const log_queue_entry_t lqe)163 log_queue_entry_state(const log_queue_entry_t lqe)
164 {
165 log_queue_entry_state_t state = read_dependency(&lqe->lqe_state);
166 assert(state != LOG_QUEUE_ENTRY_STATE_INVALID);
167 return state;
168 }
169
170 static log_queue_entry_t
log_queue_entry_alloc(log_queue_t lq,size_t lqe_size)171 log_queue_entry_alloc(log_queue_t lq, size_t lqe_size)
172 {
173 for (short i = 0; i < LQ_MAX_LM_SLOTS; i++) {
174 if (!LQ_MEM_ENABLED(lq, i)) {
175 continue;
176 }
177 log_queue_entry_t lqe = logmem_alloc(&lq->lq_mem[i], &lqe_size);
178 if (lqe) {
179 assert(lqe_size <= lq->lq_cnt_mem_avail);
180 lq->lq_cnt_mem_avail -= lqe_size;
181 assert(lqe_size <= UINT16_MAX);
182 lqe->lqe_size = (uint16_t)lqe_size;
183 lqe->lqe_lm_id = i;
184 return lqe;
185 }
186 }
187
188 return NULL;
189 }
190
191 static void
log_queue_entry_free(log_queue_t lq,log_queue_entry_t lqe)192 log_queue_entry_free(log_queue_t lq, log_queue_entry_t lqe)
193 {
194 const size_t lqe_size = lqe->lqe_size;
195 const uint16_t lqe_lm_id = lqe->lqe_lm_id;
196
197 bzero(lqe, lqe_size);
198 logmem_free(&lq->lq_mem[lqe_lm_id], lqe, lqe_size);
199 lq->lq_cnt_mem_avail += lqe_size;
200 }
201
202 static bool
log_queue_add_entry(log_queue_t lq,log_payload_t lp,const uint8_t * lp_data)203 log_queue_add_entry(log_queue_t lq, log_payload_t lp, const uint8_t *lp_data)
204 {
205 log_queue_entry_t lqe = log_queue_entry_alloc(lq, log_queue_entry_size(lp));
206 if (!lqe) {
207 counter_inc_preemption_disabled(&log_queue_cnt_dropped_nomem);
208 return false;
209 }
210 assert(lqe->lqe_size >= lp->lp_data_size);
211
212 lqe->lqe_payload = *lp;
213 (void) memcpy((uint8_t *)lqe + sizeof(*lqe), lp_data, lqe->lqe_payload.lp_data_size);
214 STAILQ_INSERT_TAIL(&lq->lq_log_list, lqe, lqe_link);
215 publish(&lqe->lqe_state, LOG_QUEUE_ENTRY_STATE_STORED);
216
217 counter_inc_preemption_disabled(&log_queue_cnt_queued);
218
219 return true;
220 }
221
222 /*
223 * Remove successfully sent logs from a dispatch list and free them.
224 */
225 static size_t
dispatch_list_cleanup(log_queue_t lq)226 dispatch_list_cleanup(log_queue_t lq)
227 {
228 log_queue_entry_t lqe, lqe_tmp;
229 size_t freed = 0;
230
231 STAILQ_FOREACH_SAFE(lqe, &lq->lq_dispatch_list, lqe_link, lqe_tmp) {
232 log_queue_entry_state_t lqe_state = log_queue_entry_state(lqe);
233 assert(lqe_state != LOG_QUEUE_ENTRY_STATE_STORED);
234
235 if (lqe_state == LOG_QUEUE_ENTRY_STATE_SENT) {
236 STAILQ_REMOVE(&lq->lq_dispatch_list, lqe, log_queue_entry, lqe_link);
237 publish(&lqe->lqe_state, LOG_QUEUE_ENTRY_STATE_INVALID);
238 log_queue_entry_free(lq, lqe);
239 counter_dec_preemption_disabled(&log_queue_cnt_queued);
240 freed++;
241 }
242 }
243
244 return freed;
245 }
246
247 /*
248 * Walk and collect logs stored in the log queue suitable for dispatching.
249 * First, collect previously failed logs, then (if still enough space) grab new
250 * logs.
251 */
252 static size_t
log_dispatch_prepare(log_queue_t lq,size_t requested,log_queue_entry_t * buf)253 log_dispatch_prepare(log_queue_t lq, size_t requested, log_queue_entry_t *buf)
254 {
255 log_queue_entry_t lqe, lqe_tmp;
256 size_t collected = 0;
257
258 STAILQ_FOREACH(lqe, &lq->lq_dispatch_list, lqe_link) {
259 log_queue_entry_state_t lqe_state = log_queue_entry_state(lqe);
260 assert(lqe_state != LOG_QUEUE_ENTRY_STATE_STORED);
261
262 if (lqe_state == LOG_QUEUE_ENTRY_STATE_FAILED) {
263 publish(&lqe->lqe_state, LOG_QUEUE_ENTRY_STATE_DISPATCHED);
264 buf[collected++] = lqe;
265 }
266
267 if (collected == requested) {
268 return collected;
269 }
270 }
271 assert(collected < requested);
272
273 STAILQ_FOREACH_SAFE(lqe, &lq->lq_log_list, lqe_link, lqe_tmp) {
274 assert(log_queue_entry_state(lqe) == LOG_QUEUE_ENTRY_STATE_STORED);
275
276 STAILQ_REMOVE(&lq->lq_log_list, lqe, log_queue_entry, lqe_link);
277 STAILQ_INSERT_TAIL(&lq->lq_dispatch_list, lqe, lqe_link);
278 publish(&lqe->lqe_state, LOG_QUEUE_ENTRY_STATE_DISPATCHED);
279
280 buf[collected++] = lqe;
281 if (collected == requested) {
282 break;
283 }
284 }
285
286 return collected;
287 }
288
289 /*
290 * Send dispatched logs to the firehose. Skip streaming when replaying.
291 * Streaming does not process timestamps and would therefore show logs out of
292 * order.
293 */
294 static void
log_queue_dispatch_logs(size_t logs_count,log_queue_entry_t * logs)295 log_queue_dispatch_logs(size_t logs_count, log_queue_entry_t *logs)
296 {
297 for (size_t i = 0; i < logs_count; i++) {
298 const log_queue_entry_t lqe = logs[i];
299 log_queue_entry_state_t lqe_state = log_queue_entry_state(lqe);
300
301 if (lqe_state == LOG_QUEUE_ENTRY_STATE_DISPATCHED) {
302 const log_payload_t lqe_lp = &lqe->lqe_payload;
303
304 log_payload_s lp = {
305 .lp_ftid = read_dependent_w(&lqe_lp->lp_ftid, lqe_state),
306 .lp_timestamp = read_dependent_w(&lqe_lp->lp_timestamp, lqe_state),
307 .lp_stream = read_dependent(&lqe_lp->lp_stream, lqe_state),
308 .lp_data_size = read_dependent(&lqe_lp->lp_data_size, lqe_state)
309 };
310 const void *lp_data = (uint8_t *)lqe + sizeof(*lqe);
311
312 /*
313 * The log queue mechanism expects only the state to be
314 * modified here since we are likely running on a
315 * different cpu. Queue cleanup will be done safely
316 * later in dispatch_list_cleanup().
317 */
318 if (log_payload_send(&lp, lp_data, false)) {
319 publish(&lqe->lqe_state, LOG_QUEUE_ENTRY_STATE_SENT);
320 counter_inc(&log_queue_cnt_sent);
321 } else {
322 publish(&lqe->lqe_state, LOG_QUEUE_ENTRY_STATE_FAILED);
323 }
324 }
325 }
326 }
327
328 static bool
log_queue_empty(const log_queue_t lq)329 log_queue_empty(const log_queue_t lq)
330 {
331 return STAILQ_EMPTY(&lq->lq_log_list) && STAILQ_EMPTY(&lq->lq_dispatch_list);
332 }
333
334 static boolean_t
log_queue_low_mem(const log_queue_t lq)335 log_queue_low_mem(const log_queue_t lq)
336 {
337 return lq->lq_cnt_mem_avail < (lq->lq_cnt_mem_active * lq_low_mem_limit);
338 }
339
340 static lq_req_state_t
log_queue_request_state(log_queue_t lq)341 log_queue_request_state(log_queue_t lq)
342 {
343 lq_req_state_t req_state = read_dependency(&lq->lq_req_state);
344 return req_state;
345 }
346
347 static void
log_queue_mem_init(log_queue_t lq,size_t idx,void * buf,size_t buflen)348 log_queue_mem_init(log_queue_t lq, size_t idx, void *buf, size_t buflen)
349 {
350 assert(buf);
351 assert(buflen > 0);
352 assert(idx < LQ_MAX_LM_SLOTS);
353 assert(!LQ_MEM_ENABLED(lq, idx));
354
355 logmem_init(&lq->lq_mem[idx], buf, buflen, lq->lq_mem_size_order,
356 LQ_MIN_LOG_SZ_ORDER, LQ_MAX_LOG_SZ_ORDER);
357 }
358
359 static void
log_queue_mem_deinit(log_queue_t lq,size_t idx)360 log_queue_mem_deinit(log_queue_t lq, size_t idx)
361 {
362 assert(idx < LQ_MAX_LM_SLOTS);
363 assert(!LQ_MEM_ENABLED(lq, idx));
364
365 logmem_t *lm = &lq->lq_mem[idx];
366 bzero((void *)lm, sizeof(*lm));
367 }
368
369 static int
log_queue_mem_free_slot(log_queue_t lq)370 log_queue_mem_free_slot(log_queue_t lq)
371 {
372 assert(LQ_MEM_ENABLED(lq, 0));
373
374 for (int i = 1; i < LQ_MAX_LM_SLOTS; i++) {
375 if (!LQ_MEM_ENABLED(lq, i)) {
376 return i;
377 }
378 }
379 return -1;
380 }
381
382 static void
log_queue_memory_handler(thread_call_param_t a0,__unused thread_call_param_t a1)383 log_queue_memory_handler(thread_call_param_t a0, __unused thread_call_param_t a1)
384 {
385 log_queue_t lq = (log_queue_t)a0;
386 lq_req_state_t req_state = log_queue_request_state(lq);
387
388 assert(req_state != LQ_REQ_STATE_INVALID);
389
390 if (req_state == LQ_REQ_STATE_ALLOCATING) {
391 lq->lq_req_mem = log_queue_buffer_alloc(lq->lq_mem_size);
392 publish(&lq->lq_req_state, LQ_REQ_STATE_READY);
393
394 if (lq->lq_req_mem) {
395 counter_inc(&log_queue_cnt_mem_allocated);
396 } else {
397 counter_inc(&log_queue_cnt_mem_failed);
398 }
399 } else if (req_state == LQ_REQ_STATE_RELEASING) {
400 void *buf = read_dependent(&lq->lq_req_mem, req_state);
401
402 log_queue_buffer_free(buf, lq->lq_mem_size);
403 lq->lq_req_mem = NULL;
404 publish(&lq->lq_req_state, LQ_REQ_STATE_READY);
405
406 counter_inc(&log_queue_cnt_mem_released);
407 }
408 }
409
410 static void
log_queue_order_memory(log_queue_t lq)411 log_queue_order_memory(log_queue_t lq)
412 {
413 boolean_t __assert_only running;
414
415 lq->lq_req_mem = NULL;
416 publish(&lq->lq_req_state, LQ_REQ_STATE_ALLOCATING);
417
418 running = thread_call_enter(lq->lq_mem_handler);
419 assert(!running);
420 }
421
422 static void
log_queue_release_memory(log_queue_t lq,void * buf)423 log_queue_release_memory(log_queue_t lq, void *buf)
424 {
425 boolean_t __assert_only running;
426
427 assert(buf);
428 lq->lq_req_mem = buf;
429 publish(&lq->lq_req_state, LQ_REQ_STATE_RELEASING);
430
431 running = thread_call_enter(lq->lq_mem_handler);
432 assert(!running);
433 }
434
435 static void
log_queue_mem_enable(log_queue_t lq,size_t i)436 log_queue_mem_enable(log_queue_t lq, size_t i)
437 {
438 logmem_t *lm = &lq->lq_mem[i];
439 assert(!LQ_MEM_ENABLED(lq, i));
440
441 LQ_MEM_ENABLE(lq, i);
442 lq->lq_cnt_mem_active++;
443 lq->lq_cnt_mem_avail += lm->lm_cnt_free;
444 }
445
446 static void
log_queue_mem_disable(log_queue_t lq,size_t i)447 log_queue_mem_disable(log_queue_t lq, size_t i)
448 {
449 logmem_t *lm = &lq->lq_mem[i];
450 assert(LQ_MEM_ENABLED(lq, i));
451
452 LQ_MEM_DISABLE(lq, i);
453 lq->lq_cnt_mem_active--;
454 lq->lq_cnt_mem_avail -= lm->lm_cnt_free;
455 }
456
457 static void *
log_queue_mem_reclaim(log_queue_t lq)458 log_queue_mem_reclaim(log_queue_t lq)
459 {
460 for (int i = 1; i < LQ_MAX_LM_SLOTS; i++) {
461 logmem_t *lm = &lq->lq_mem[i];
462 if (LQ_MEM_ENABLED(lq, i) && logmem_empty(lm)) {
463 assert(lm->lm_mem_size == lq->lq_mem_size);
464 void *reclaimed = lm->lm_mem;
465 log_queue_mem_disable(lq, i);
466 log_queue_mem_deinit(lq, i);
467 return reclaimed;
468 }
469 }
470 return NULL;
471 }
472
473 static void
log_queue_mem_reconfigure(log_queue_t lq)474 log_queue_mem_reconfigure(log_queue_t lq)
475 {
476 assert(lq->lq_mem_state == LQ_MEM_STATE_ALLOCATING ||
477 lq->lq_mem_state == LQ_MEM_STATE_RELEASING);
478
479 lq_req_state_t req_state = log_queue_request_state(lq);
480
481 if (req_state == LQ_REQ_STATE_READY) {
482 if (lq->lq_mem_state == LQ_MEM_STATE_ALLOCATING) {
483 void *buf = read_dependent(&lq->lq_req_mem, req_state);
484 if (buf) {
485 const int i = log_queue_mem_free_slot(lq);
486 assert(i > 0);
487 log_queue_mem_init(lq, i, buf, lq->lq_mem_size);
488 log_queue_mem_enable(lq, i);
489 }
490 }
491 lq->lq_mem_state = LQ_MEM_STATE_READY;
492 publish(&lq->lq_req_state, LQ_REQ_STATE_INVALID);
493 }
494 }
495
496 static boolean_t
log_queue_needs_memory(log_queue_t lq,boolean_t new_suspend)497 log_queue_needs_memory(log_queue_t lq, boolean_t new_suspend)
498 {
499 if (new_suspend || log_queue_low_mem(lq)) {
500 return lq->lq_cnt_mem_active < LQ_MAX_LM_SLOTS;
501 }
502 return false;
503 }
504
505 static boolean_t
log_queue_can_release_memory(log_queue_t lq)506 log_queue_can_release_memory(log_queue_t lq)
507 {
508 assert(lq->lq_mem_state == LQ_MEM_STATE_READY);
509
510 if (lq->lq_cnt_mem_active > 1 && log_queue_empty(lq) && !lq->lq_suspend) {
511 const uint64_t total_log_cnt = counter_load(&log_queue_cnt_received);
512 return total_log_cnt > LQ_DEFAULT_FREE_AFTER_CNT;
513 }
514 return false;
515 }
516
517 extern boolean_t tasks_suspend_state;
518
519 static boolean_t
detect_new_suspend(log_queue_t lq)520 detect_new_suspend(log_queue_t lq)
521 {
522 if (!tasks_suspend_state) {
523 lq->lq_suspend = false;
524 return false;
525 }
526
527 if (!lq->lq_suspend) {
528 lq->lq_suspend = true;
529 return true;
530 }
531
532 return false;
533 }
534
535 static void
log_queue_dispatch(void)536 log_queue_dispatch(void)
537 {
538 lq_mem_state_t new_mem_state = LQ_MEM_STATE_READY;
539 void *reclaimed_memory = NULL;
540
541 disable_preemption();
542
543 log_queue_t lq = PERCPU_GET(oslog_queue);
544 if (__improbable(!lq->lq_ready)) {
545 enable_preemption();
546 return;
547 }
548
549 dispatch_list_cleanup(lq);
550
551 log_queue_entry_t logs[LQ_BATCH_SIZE];
552 size_t logs_count = log_dispatch_prepare(lq, LQ_BATCH_SIZE, (log_queue_entry_t *)&logs);
553
554 boolean_t new_suspend = detect_new_suspend(lq);
555
556 if (__improbable(lq->lq_mem_state != LQ_MEM_STATE_READY)) {
557 log_queue_mem_reconfigure(lq);
558 } else if (logs_count == 0 && log_queue_can_release_memory(lq)) {
559 reclaimed_memory = log_queue_mem_reclaim(lq);
560 if (reclaimed_memory) {
561 lq->lq_mem_state = LQ_MEM_STATE_RELEASING;
562 new_mem_state = lq->lq_mem_state;
563 }
564 } else if (log_queue_needs_memory(lq, new_suspend)) {
565 lq->lq_mem_state = LQ_MEM_STATE_ALLOCATING;
566 new_mem_state = lq->lq_mem_state;
567 }
568
569 enable_preemption();
570
571 switch (new_mem_state) {
572 case LQ_MEM_STATE_RELEASING:
573 assert(logs_count == 0);
574 log_queue_release_memory(lq, reclaimed_memory);
575 break;
576 case LQ_MEM_STATE_ALLOCATING:
577 log_queue_order_memory(lq);
578 /* FALLTHROUGH */
579 case LQ_MEM_STATE_READY:
580 log_queue_dispatch_logs(logs_count, logs);
581 break;
582 default:
583 panic("Invalid log memory state %u", new_mem_state);
584 break;
585 }
586 }
587
588 static bool
log_queue_add(log_payload_t lp,const uint8_t * lp_data)589 log_queue_add(log_payload_t lp, const uint8_t *lp_data)
590 {
591 boolean_t order_memory = false;
592
593 disable_preemption();
594
595 log_queue_t lq = PERCPU_GET(oslog_queue);
596 if (__improbable(!lq->lq_ready)) {
597 enable_preemption();
598 counter_inc(&log_queue_cnt_dropped_off);
599 return false;
600 }
601
602 boolean_t new_suspend = detect_new_suspend(lq);
603
604 if (__improbable(lq->lq_mem_state != LQ_MEM_STATE_READY)) {
605 log_queue_mem_reconfigure(lq);
606 } else if (log_queue_needs_memory(lq, new_suspend)) {
607 lq->lq_mem_state = LQ_MEM_STATE_ALLOCATING;
608 order_memory = true;
609 }
610
611 bool added = log_queue_add_entry(lq, lp, lp_data);
612 enable_preemption();
613
614 if (order_memory) {
615 log_queue_order_memory(lq);
616 }
617
618 return added;
619 }
620
621 __startup_func
622 static size_t
log_queue_init_memory(log_queue_t lq,size_t lm_count)623 log_queue_init_memory(log_queue_t lq, size_t lm_count)
624 {
625 assert(lm_count <= LQ_MAX_LM_SLOTS);
626
627 for (size_t i = 0; i < lm_count; i++) {
628 void *buf = log_queue_buffer_alloc(lq->lq_mem_size);
629 if (!buf) {
630 return i;
631 }
632 counter_inc(&log_queue_cnt_mem_allocated);
633 log_queue_mem_init(lq, i, buf, lq->lq_mem_size);
634 log_queue_mem_enable(lq, i);
635 }
636
637 return lm_count;
638 }
639
640 __startup_func
641 static void
oslog_init_log_queues(void)642 oslog_init_log_queues(void)
643 {
644 if (os_log_disabled()) {
645 printf("Log queues disabled: Logging disabled by ATM\n");
646 return;
647 }
648
649 if (lq_bootarg_size_order == 0) {
650 printf("Log queues disabled: Zero lq_size_order boot argument\n");
651 return;
652 }
653
654 lq_bootarg_size_order = MAX(lq_bootarg_size_order, PAGE_SHIFT);
655 lq_bootarg_size_order = MIN(lq_bootarg_size_order, LQ_MAX_SZ_ORDER);
656
657 lq_bootarg_nslots = MAX(lq_bootarg_nslots, 1);
658 lq_bootarg_nslots = MIN(lq_bootarg_nslots, LQ_MAX_LM_SLOTS);
659
660 lq_low_mem_limit = MAX(1 << (lq_bootarg_size_order - LQ_LOW_MEM_SCALE), 1024);
661
662 unsigned int slot_count = 0;
663
664 percpu_foreach(lq, oslog_queue) {
665 lq->lq_mem_size_order = lq_bootarg_size_order;
666 lq->lq_mem_size = round_page(logmem_required_size(lq->lq_mem_size_order, LQ_MIN_LOG_SZ_ORDER));
667 lq->lq_mem_handler = thread_call_allocate(log_queue_memory_handler, (thread_call_param_t)lq);
668 slot_count += log_queue_init_memory(lq, lq_bootarg_nslots);
669 STAILQ_INIT(&lq->lq_log_list);
670 STAILQ_INIT(&lq->lq_dispatch_list);
671 lq->lq_ready = true;
672 }
673
674 printf("Log queues configured: slot count: %u, per-slot size: %u, total size: %u\n",
675 slot_count, (1 << lq_bootarg_size_order),
676 slot_count * (1 << lq_bootarg_size_order));
677 }
678 STARTUP(OSLOG, STARTUP_RANK_SECOND, oslog_init_log_queues);
679
680 bool
log_queue_log(log_payload_t lp,const void * lp_data,bool stream)681 log_queue_log(log_payload_t lp, const void *lp_data, bool stream)
682 {
683 assert(lp);
684 assert(oslog_is_safe() || startup_phase < STARTUP_SUB_EARLY_BOOT);
685
686 counter_inc(&log_queue_cnt_received);
687
688 if (log_payload_send(lp, lp_data, stream)) {
689 counter_inc(&log_queue_cnt_sent);
690 log_queue_dispatch();
691 return true;
692 }
693 counter_inc(&log_queue_cnt_rejected_fh);
694
695 if (!log_queue_add(lp, lp_data)) {
696 return false;
697 }
698
699 return true;
700 }
701