xref: /xnu-8019.80.24/libkern/os/log_queue.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2020-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. Please obtain a copy of the License at
10  * http://www.opensource.apple.com/apsl/ and read it before using this
11  * file.
12  *
13  * The Original Code and all software distributed under the License are
14  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18  * Please see the License for the specific language governing rights and
19  * limitations under the License.
20  *           log_queue_failed_intr);
21  *
22  * @APPLE_LICENSE_HEADER_END@
23  */
24 
25 #include <kern/assert.h>
26 #include <kern/counter.h>
27 #include <kern/cpu_data.h>
28 #include <kern/percpu.h>
29 #include <kern/thread_call.h>
30 #include <libkern/libkern.h>
31 #include <sys/queue.h>
32 #include <vm/vm_kern.h>
33 
34 #include "log_queue.h"
35 #include "log_mem.h"
36 
37 #define LQ_DEFAULT_SZ_ORDER 15 // 32K per slot
38 #define LQ_DEFAULT_FREE_AFTER_CNT 15000 // Deallocate log queue after N logs
39 #define LQ_MAX_SZ_ORDER 20 // 1MB per CPU should really be enough and a hard cap
40 #define LQ_MIN_LOG_SZ_ORDER 5
41 #define LQ_MAX_LOG_SZ_ORDER 11
42 #define LQ_BATCH_SIZE 24
43 #define LQ_MAX_LM_SLOTS 8
44 #define LQ_LOW_MEM_SCALE 3
45 
46 #define LQ_MEM_ENABLE(q, i) ((q)->lq_mem_set |= (1 << (i)))
47 #define LQ_MEM_ENABLED(q, i) ((q)->lq_mem_set & (1 << (i)))
48 #define LQ_MEM_DISABLE(q, i) ((q)->lq_mem_set &= ~(1 << (i)))
49 
50 OS_ENUM(log_queue_entry_state, uint8_t,
51     LOG_QUEUE_ENTRY_STATE_INVALID = 0,
52     LOG_QUEUE_ENTRY_STATE_STORED,
53     LOG_QUEUE_ENTRY_STATE_DISPATCHED,
54     LOG_QUEUE_ENTRY_STATE_SENT,
55     LOG_QUEUE_ENTRY_STATE_FAILED
56     );
57 
58 OS_ENUM(lq_mem_state, uint8_t,
59     LQ_MEM_STATE_READY = 0,
60     LQ_MEM_STATE_ALLOCATING,
61     LQ_MEM_STATE_RELEASING
62     );
63 
64 OS_ENUM(lq_req_state, uint8_t,
65     LQ_REQ_STATE_INVALID = 0,
66     LQ_REQ_STATE_ALLOCATING,
67     LQ_REQ_STATE_RELEASING,
68     LQ_REQ_STATE_READY
69     );
70 
71 typedef struct log_queue_entry {
72 	STAILQ_ENTRY(log_queue_entry)   lqe_link;
73 	uint16_t                        lqe_size;
74 	uint16_t                        lqe_lm_id;
75 	_Atomic log_queue_entry_state_t lqe_state;
76 	log_payload_s                   lqe_payload;
77 } log_queue_entry_s, *log_queue_entry_t;
78 
79 typedef STAILQ_HEAD(, log_queue_entry) log_queue_list_s, *log_queue_list_t;
80 
81 typedef struct {
82 	log_queue_list_s        lq_log_list;
83 	log_queue_list_s        lq_dispatch_list;
84 	logmem_t                lq_mem[LQ_MAX_LM_SLOTS];
85 	size_t                  lq_mem_set;
86 	size_t                  lq_mem_size;
87 	size_t                  lq_mem_size_order;
88 	lq_mem_state_t          lq_mem_state;
89 	thread_call_t           lq_mem_handler;
90 	size_t                  lq_cnt_mem_active;
91 	size_t                  lq_cnt_mem_avail;
92 	_Atomic lq_req_state_t  lq_req_state;
93 	void                    *lq_req_mem;
94 	uint32_t                lq_ready : 1;
95 	uint32_t                lq_suspend : 1;
96 } log_queue_s, *log_queue_t;
97 
98 extern bool os_log_disabled(void);
99 
100 /*
101  * Log Queue
102  *
103  * Log queues are allocated and set up per cpu. When a firehose memory is full
104  * logs are stored in a log queue and sent into the firehose once it has a free
105  * space again. Each log queue (memory) can grow and shrink based on demand by
106  * adding/removing additional memory to/from its memory slots. There are
107  * LQ_MAX_LM_SLOTS memory slots available for every log queue to use. Memory
108  * slots are released when not needed, with one slot always allocated per queue
109  * as a minimum.
110  *
111  * Boot args:
112  *
113  * lq_size_order: Per slot memory size defined as a power of 2 exponent
114  *                (i.e. 2^lq_bootarg_size_order). Zero disables queues.
115  *
116  * lq_nslots: Number of allocated slots to boot with per each log queue.
117  *            Once initial log traffic decreases, log queues release
118  *            slots as needed.
119  *
120  * If extensive number of logs is expected, setting aforementioned boot-args as
121  * needed allows to capture the vast majority of logs and avoid drops.
122  */
123 TUNABLE(size_t, lq_bootarg_size_order, "lq_size_order", LQ_DEFAULT_SZ_ORDER);
124 TUNABLE(size_t, lq_bootarg_nslots, "lq_nslots", LQ_MAX_LM_SLOTS);
125 
126 SCALABLE_COUNTER_DEFINE(log_queue_cnt_received);
127 SCALABLE_COUNTER_DEFINE(log_queue_cnt_rejected_fh);
128 SCALABLE_COUNTER_DEFINE(log_queue_cnt_queued);
129 SCALABLE_COUNTER_DEFINE(log_queue_cnt_sent);
130 SCALABLE_COUNTER_DEFINE(log_queue_cnt_dropped_nomem);
131 SCALABLE_COUNTER_DEFINE(log_queue_cnt_dropped_off);
132 SCALABLE_COUNTER_DEFINE(log_queue_cnt_mem_allocated);
133 SCALABLE_COUNTER_DEFINE(log_queue_cnt_mem_released);
134 SCALABLE_COUNTER_DEFINE(log_queue_cnt_mem_failed);
135 
136 static log_queue_s PERCPU_DATA(oslog_queue);
137 static size_t lq_low_mem_limit;
138 
139 static void *
log_queue_buffer_alloc(size_t amount)140 log_queue_buffer_alloc(size_t amount)
141 {
142 	assert(amount > 0);
143 
144 	vm_offset_t addr = 0;
145 	if (kmem_alloc(kernel_map, &addr, amount, VM_KERN_MEMORY_LOG) == KERN_SUCCESS) {
146 		bzero((void *)addr, amount);
147 		return (void *)addr;
148 	}
149 
150 	return NULL;
151 }
152 
153 static void
log_queue_buffer_free(void * addr,size_t amount)154 log_queue_buffer_free(void *addr, size_t amount)
155 {
156 	assert(addr);
157 	assert(amount > 0);
158 
159 	bzero(addr, amount);
160 	kmem_free(kernel_map, (vm_offset_t)addr, round_page(amount));
161 }
162 
163 #define log_queue_entry_size(p) (sizeof(log_queue_entry_s) + (p)->lp_data_size)
164 
165 #define publish(a, v) os_atomic_store((a), (v), release)
166 #define read_dependency(v) os_atomic_load((v), dependency)
167 #define read_dependent(v, t) os_atomic_load_with_dependency_on((v), (uintptr_t)(t))
168 #define read_dependent_w(v, t) ({ \
169 	__auto_type _v = os_atomic_inject_dependency((v), (uintptr_t)(t)); \
170 	os_atomic_load_wide(_v, dependency); \
171 })
172 
173 static log_queue_entry_state_t
log_queue_entry_state(const log_queue_entry_t lqe)174 log_queue_entry_state(const log_queue_entry_t lqe)
175 {
176 	log_queue_entry_state_t state = read_dependency(&lqe->lqe_state);
177 	assert(state != LOG_QUEUE_ENTRY_STATE_INVALID);
178 	return state;
179 }
180 
181 static log_queue_entry_t
log_queue_entry_alloc(log_queue_t lq,size_t lqe_size)182 log_queue_entry_alloc(log_queue_t lq, size_t lqe_size)
183 {
184 	for (short i = 0; i < LQ_MAX_LM_SLOTS; i++) {
185 		if (!LQ_MEM_ENABLED(lq, i)) {
186 			continue;
187 		}
188 		log_queue_entry_t lqe = logmem_alloc(&lq->lq_mem[i], &lqe_size);
189 		if (lqe) {
190 			assert(lqe_size <= lq->lq_cnt_mem_avail);
191 			lq->lq_cnt_mem_avail -= lqe_size;
192 			assert(lqe_size <= UINT16_MAX);
193 			lqe->lqe_size = (uint16_t)lqe_size;
194 			lqe->lqe_lm_id = i;
195 			return lqe;
196 		}
197 	}
198 
199 	return NULL;
200 }
201 
202 static void
log_queue_entry_free(log_queue_t lq,log_queue_entry_t lqe)203 log_queue_entry_free(log_queue_t lq, log_queue_entry_t lqe)
204 {
205 	const size_t lqe_size = lqe->lqe_size;
206 	const uint16_t lqe_lm_id = lqe->lqe_lm_id;
207 
208 	bzero(lqe, lqe_size);
209 	logmem_free(&lq->lq_mem[lqe_lm_id], lqe, lqe_size);
210 	lq->lq_cnt_mem_avail += lqe_size;
211 }
212 
213 static bool
log_queue_add_entry(log_queue_t lq,log_payload_t lp,const uint8_t * lp_data)214 log_queue_add_entry(log_queue_t lq, log_payload_t lp, const uint8_t *lp_data)
215 {
216 	log_queue_entry_t lqe = log_queue_entry_alloc(lq, log_queue_entry_size(lp));
217 	if (!lqe) {
218 		counter_inc_preemption_disabled(&log_queue_cnt_dropped_nomem);
219 		return false;
220 	}
221 	assert(lqe->lqe_size >= lp->lp_data_size);
222 
223 	lqe->lqe_payload = *lp;
224 	(void) memcpy((uint8_t *)lqe + sizeof(*lqe), lp_data, lqe->lqe_payload.lp_data_size);
225 	STAILQ_INSERT_TAIL(&lq->lq_log_list, lqe, lqe_link);
226 	publish(&lqe->lqe_state, LOG_QUEUE_ENTRY_STATE_STORED);
227 
228 	counter_inc_preemption_disabled(&log_queue_cnt_queued);
229 
230 	return true;
231 }
232 
233 /*
234  * Remove successfully sent logs from a dispatch list and free them.
235  */
236 static size_t
dispatch_list_cleanup(log_queue_t lq)237 dispatch_list_cleanup(log_queue_t lq)
238 {
239 	log_queue_entry_t lqe, lqe_tmp;
240 	size_t freed = 0;
241 
242 	STAILQ_FOREACH_SAFE(lqe, &lq->lq_dispatch_list, lqe_link, lqe_tmp) {
243 		log_queue_entry_state_t lqe_state = log_queue_entry_state(lqe);
244 		assert(lqe_state != LOG_QUEUE_ENTRY_STATE_STORED);
245 
246 		if (lqe_state == LOG_QUEUE_ENTRY_STATE_SENT) {
247 			STAILQ_REMOVE(&lq->lq_dispatch_list, lqe, log_queue_entry, lqe_link);
248 			publish(&lqe->lqe_state, LOG_QUEUE_ENTRY_STATE_INVALID);
249 			log_queue_entry_free(lq, lqe);
250 			counter_dec_preemption_disabled(&log_queue_cnt_queued);
251 			freed++;
252 		}
253 	}
254 
255 	return freed;
256 }
257 
258 /*
259  * Walk and collect logs stored in the log queue suitable for dispatching.
260  * First, collect previously failed logs, then (if still enough space) grab new
261  * logs.
262  */
263 static size_t
log_dispatch_prepare(log_queue_t lq,size_t requested,log_queue_entry_t * buf)264 log_dispatch_prepare(log_queue_t lq, size_t requested, log_queue_entry_t *buf)
265 {
266 	log_queue_entry_t lqe, lqe_tmp;
267 	size_t collected = 0;
268 
269 	STAILQ_FOREACH(lqe, &lq->lq_dispatch_list, lqe_link) {
270 		log_queue_entry_state_t lqe_state = log_queue_entry_state(lqe);
271 		assert(lqe_state != LOG_QUEUE_ENTRY_STATE_STORED);
272 
273 		if (lqe_state == LOG_QUEUE_ENTRY_STATE_FAILED) {
274 			publish(&lqe->lqe_state, LOG_QUEUE_ENTRY_STATE_DISPATCHED);
275 			buf[collected++] = lqe;
276 		}
277 
278 		if (collected == requested) {
279 			return collected;
280 		}
281 	}
282 	assert(collected < requested);
283 
284 	STAILQ_FOREACH_SAFE(lqe, &lq->lq_log_list, lqe_link, lqe_tmp) {
285 		assert(log_queue_entry_state(lqe) == LOG_QUEUE_ENTRY_STATE_STORED);
286 
287 		STAILQ_REMOVE(&lq->lq_log_list, lqe, log_queue_entry, lqe_link);
288 		STAILQ_INSERT_TAIL(&lq->lq_dispatch_list, lqe, lqe_link);
289 		publish(&lqe->lqe_state, LOG_QUEUE_ENTRY_STATE_DISPATCHED);
290 
291 		buf[collected++] = lqe;
292 		if (collected == requested) {
293 			break;
294 		}
295 	}
296 
297 	return collected;
298 }
299 
300 /*
301  * Send dispatched logs to the firehose. Skip streaming when replaying.
302  * Streaming does not process timestamps and would therefore show logs out of
303  * order.
304  */
305 static void
log_queue_dispatch_logs(size_t logs_count,log_queue_entry_t * logs)306 log_queue_dispatch_logs(size_t logs_count, log_queue_entry_t *logs)
307 {
308 	for (size_t i = 0; i < logs_count; i++) {
309 		const log_queue_entry_t lqe = logs[i];
310 		log_queue_entry_state_t lqe_state = log_queue_entry_state(lqe);
311 
312 		if (lqe_state == LOG_QUEUE_ENTRY_STATE_DISPATCHED) {
313 			const log_payload_t lqe_lp = &lqe->lqe_payload;
314 
315 			log_payload_s lp = {
316 				.lp_ftid = read_dependent_w(&lqe_lp->lp_ftid, lqe_state),
317 				.lp_timestamp = read_dependent_w(&lqe_lp->lp_timestamp, lqe_state),
318 				.lp_stream = read_dependent(&lqe_lp->lp_stream, lqe_state),
319 				.lp_data_size = read_dependent(&lqe_lp->lp_data_size, lqe_state)
320 			};
321 			const void *lp_data = (uint8_t *)lqe + sizeof(*lqe);
322 
323 			/*
324 			 * The log queue mechanism expects only the state to be
325 			 * modified here since we are likely running on a
326 			 * different cpu. Queue cleanup will be done safely
327 			 * later in dispatch_list_cleanup().
328 			 */
329 			if (log_payload_send(&lp, lp_data, false)) {
330 				publish(&lqe->lqe_state, LOG_QUEUE_ENTRY_STATE_SENT);
331 				counter_inc(&log_queue_cnt_sent);
332 			} else {
333 				publish(&lqe->lqe_state, LOG_QUEUE_ENTRY_STATE_FAILED);
334 			}
335 		}
336 	}
337 }
338 
339 static bool
log_queue_empty(const log_queue_t lq)340 log_queue_empty(const log_queue_t lq)
341 {
342 	return STAILQ_EMPTY(&lq->lq_log_list) && STAILQ_EMPTY(&lq->lq_dispatch_list);
343 }
344 
345 static boolean_t
log_queue_low_mem(const log_queue_t lq)346 log_queue_low_mem(const log_queue_t lq)
347 {
348 	return lq->lq_cnt_mem_avail < (lq->lq_cnt_mem_active * lq_low_mem_limit);
349 }
350 
351 static lq_req_state_t
log_queue_request_state(log_queue_t lq)352 log_queue_request_state(log_queue_t lq)
353 {
354 	lq_req_state_t req_state = read_dependency(&lq->lq_req_state);
355 	return req_state;
356 }
357 
358 static void
log_queue_mem_init(log_queue_t lq,size_t idx,void * buf,size_t buflen)359 log_queue_mem_init(log_queue_t lq, size_t idx, void *buf, size_t buflen)
360 {
361 	assert(buf);
362 	assert(buflen > 0);
363 	assert(idx < LQ_MAX_LM_SLOTS);
364 	assert(!LQ_MEM_ENABLED(lq, idx));
365 
366 	logmem_init(&lq->lq_mem[idx], buf, buflen, lq->lq_mem_size_order,
367 	    LQ_MIN_LOG_SZ_ORDER, LQ_MAX_LOG_SZ_ORDER);
368 }
369 
370 static void
log_queue_mem_deinit(log_queue_t lq,size_t idx)371 log_queue_mem_deinit(log_queue_t lq, size_t idx)
372 {
373 	assert(idx < LQ_MAX_LM_SLOTS);
374 	assert(!LQ_MEM_ENABLED(lq, idx));
375 
376 	logmem_t *lm = &lq->lq_mem[idx];
377 	bzero((void *)lm, sizeof(*lm));
378 }
379 
380 static int
log_queue_mem_free_slot(log_queue_t lq)381 log_queue_mem_free_slot(log_queue_t lq)
382 {
383 	assert(LQ_MEM_ENABLED(lq, 0));
384 
385 	for (int i = 1; i < LQ_MAX_LM_SLOTS; i++) {
386 		if (!LQ_MEM_ENABLED(lq, i)) {
387 			return i;
388 		}
389 	}
390 	return -1;
391 }
392 
393 static void
log_queue_memory_handler(thread_call_param_t a0,__unused thread_call_param_t a1)394 log_queue_memory_handler(thread_call_param_t a0, __unused thread_call_param_t a1)
395 {
396 	log_queue_t lq = (log_queue_t)a0;
397 	lq_req_state_t req_state = log_queue_request_state(lq);
398 
399 	assert(req_state != LQ_REQ_STATE_INVALID);
400 
401 	if (req_state == LQ_REQ_STATE_ALLOCATING) {
402 		lq->lq_req_mem = log_queue_buffer_alloc(lq->lq_mem_size);
403 		publish(&lq->lq_req_state, LQ_REQ_STATE_READY);
404 
405 		if (lq->lq_req_mem) {
406 			counter_inc(&log_queue_cnt_mem_allocated);
407 		} else {
408 			counter_inc(&log_queue_cnt_mem_failed);
409 		}
410 	} else if (req_state == LQ_REQ_STATE_RELEASING) {
411 		void *buf = read_dependent(&lq->lq_req_mem, req_state);
412 
413 		log_queue_buffer_free(buf, lq->lq_mem_size);
414 		lq->lq_req_mem = NULL;
415 		publish(&lq->lq_req_state, LQ_REQ_STATE_READY);
416 
417 		counter_inc(&log_queue_cnt_mem_released);
418 	}
419 }
420 
421 static void
log_queue_order_memory(log_queue_t lq)422 log_queue_order_memory(log_queue_t lq)
423 {
424 	boolean_t __assert_only running;
425 
426 	lq->lq_req_mem = NULL;
427 	publish(&lq->lq_req_state, LQ_REQ_STATE_ALLOCATING);
428 
429 	running = thread_call_enter(lq->lq_mem_handler);
430 	assert(!running);
431 }
432 
433 static void
log_queue_release_memory(log_queue_t lq,void * buf)434 log_queue_release_memory(log_queue_t lq, void *buf)
435 {
436 	boolean_t __assert_only running;
437 
438 	assert(buf);
439 	lq->lq_req_mem = buf;
440 	publish(&lq->lq_req_state, LQ_REQ_STATE_RELEASING);
441 
442 	running = thread_call_enter(lq->lq_mem_handler);
443 	assert(!running);
444 }
445 
446 static void
log_queue_mem_enable(log_queue_t lq,size_t i)447 log_queue_mem_enable(log_queue_t lq, size_t i)
448 {
449 	logmem_t *lm = &lq->lq_mem[i];
450 	assert(!LQ_MEM_ENABLED(lq, i));
451 
452 	LQ_MEM_ENABLE(lq, i);
453 	lq->lq_cnt_mem_active++;
454 	lq->lq_cnt_mem_avail += lm->lm_cnt_free;
455 }
456 
457 static void
log_queue_mem_disable(log_queue_t lq,size_t i)458 log_queue_mem_disable(log_queue_t lq, size_t i)
459 {
460 	logmem_t *lm = &lq->lq_mem[i];
461 	assert(LQ_MEM_ENABLED(lq, i));
462 
463 	LQ_MEM_DISABLE(lq, i);
464 	lq->lq_cnt_mem_active--;
465 	lq->lq_cnt_mem_avail -= lm->lm_cnt_free;
466 }
467 
468 static void *
log_queue_mem_reclaim(log_queue_t lq)469 log_queue_mem_reclaim(log_queue_t lq)
470 {
471 	for (int i = 1; i < LQ_MAX_LM_SLOTS; i++) {
472 		logmem_t *lm = &lq->lq_mem[i];
473 		if (LQ_MEM_ENABLED(lq, i) && logmem_empty(lm)) {
474 			assert(lm->lm_mem_size == lq->lq_mem_size);
475 			void *reclaimed = lm->lm_mem;
476 			log_queue_mem_disable(lq, i);
477 			log_queue_mem_deinit(lq, i);
478 			return reclaimed;
479 		}
480 	}
481 	return NULL;
482 }
483 
484 static void
log_queue_mem_reconfigure(log_queue_t lq)485 log_queue_mem_reconfigure(log_queue_t lq)
486 {
487 	assert(lq->lq_mem_state == LQ_MEM_STATE_ALLOCATING ||
488 	    lq->lq_mem_state == LQ_MEM_STATE_RELEASING);
489 
490 	lq_req_state_t req_state = log_queue_request_state(lq);
491 
492 	if (req_state == LQ_REQ_STATE_READY) {
493 		if (lq->lq_mem_state == LQ_MEM_STATE_ALLOCATING) {
494 			void *buf = read_dependent(&lq->lq_req_mem, req_state);
495 			if (buf) {
496 				const int i = log_queue_mem_free_slot(lq);
497 				assert(i > 0);
498 				log_queue_mem_init(lq, i, buf, lq->lq_mem_size);
499 				log_queue_mem_enable(lq, i);
500 			}
501 		}
502 		lq->lq_mem_state = LQ_MEM_STATE_READY;
503 		publish(&lq->lq_req_state, LQ_REQ_STATE_INVALID);
504 	}
505 }
506 
507 static boolean_t
log_queue_needs_memory(log_queue_t lq,boolean_t new_suspend)508 log_queue_needs_memory(log_queue_t lq, boolean_t new_suspend)
509 {
510 	if (new_suspend || log_queue_low_mem(lq)) {
511 		return lq->lq_cnt_mem_active < LQ_MAX_LM_SLOTS;
512 	}
513 	return false;
514 }
515 
516 static boolean_t
log_queue_can_release_memory(log_queue_t lq)517 log_queue_can_release_memory(log_queue_t lq)
518 {
519 	assert(lq->lq_mem_state == LQ_MEM_STATE_READY);
520 
521 	if (lq->lq_cnt_mem_active > 1 && log_queue_empty(lq) && !lq->lq_suspend) {
522 		const uint64_t total_log_cnt = counter_load(&log_queue_cnt_received);
523 		return total_log_cnt > LQ_DEFAULT_FREE_AFTER_CNT;
524 	}
525 	return false;
526 }
527 
528 extern boolean_t tasks_suspend_state;
529 
530 static boolean_t
detect_new_suspend(log_queue_t lq)531 detect_new_suspend(log_queue_t lq)
532 {
533 	if (!tasks_suspend_state) {
534 		lq->lq_suspend = false;
535 		return false;
536 	}
537 
538 	if (!lq->lq_suspend) {
539 		lq->lq_suspend = true;
540 		return true;
541 	}
542 
543 	return false;
544 }
545 
546 static void
log_queue_dispatch(void)547 log_queue_dispatch(void)
548 {
549 	lq_mem_state_t new_mem_state = LQ_MEM_STATE_READY;
550 	void *reclaimed_memory = NULL;
551 
552 	disable_preemption();
553 
554 	log_queue_t lq = PERCPU_GET(oslog_queue);
555 	if (__improbable(!lq->lq_ready)) {
556 		enable_preemption();
557 		return;
558 	}
559 
560 	dispatch_list_cleanup(lq);
561 
562 	log_queue_entry_t logs[LQ_BATCH_SIZE];
563 	size_t logs_count = log_dispatch_prepare(lq, LQ_BATCH_SIZE, (log_queue_entry_t *)&logs);
564 
565 	boolean_t new_suspend = detect_new_suspend(lq);
566 
567 	if (__improbable(lq->lq_mem_state != LQ_MEM_STATE_READY)) {
568 		log_queue_mem_reconfigure(lq);
569 	} else if (logs_count == 0 && log_queue_can_release_memory(lq)) {
570 		reclaimed_memory = log_queue_mem_reclaim(lq);
571 		if (reclaimed_memory) {
572 			lq->lq_mem_state = LQ_MEM_STATE_RELEASING;
573 			new_mem_state = lq->lq_mem_state;
574 		}
575 	} else if (log_queue_needs_memory(lq, new_suspend)) {
576 		lq->lq_mem_state = LQ_MEM_STATE_ALLOCATING;
577 		new_mem_state = lq->lq_mem_state;
578 	}
579 
580 	enable_preemption();
581 
582 	switch (new_mem_state) {
583 	case LQ_MEM_STATE_RELEASING:
584 		assert(logs_count == 0);
585 		log_queue_release_memory(lq, reclaimed_memory);
586 		break;
587 	case LQ_MEM_STATE_ALLOCATING:
588 		log_queue_order_memory(lq);
589 	/* FALLTHROUGH */
590 	case LQ_MEM_STATE_READY:
591 		log_queue_dispatch_logs(logs_count, logs);
592 		break;
593 	default:
594 		panic("Invalid log memory state %u", new_mem_state);
595 		break;
596 	}
597 }
598 
599 static bool
log_queue_add(log_payload_t lp,const uint8_t * lp_data)600 log_queue_add(log_payload_t lp, const uint8_t *lp_data)
601 {
602 	boolean_t order_memory = false;
603 
604 	disable_preemption();
605 
606 	log_queue_t lq = PERCPU_GET(oslog_queue);
607 	if (__improbable(!lq->lq_ready)) {
608 		enable_preemption();
609 		counter_inc(&log_queue_cnt_dropped_off);
610 		return false;
611 	}
612 
613 	boolean_t new_suspend = detect_new_suspend(lq);
614 
615 	if (__improbable(lq->lq_mem_state != LQ_MEM_STATE_READY)) {
616 		log_queue_mem_reconfigure(lq);
617 	} else if (log_queue_needs_memory(lq, new_suspend)) {
618 		lq->lq_mem_state = LQ_MEM_STATE_ALLOCATING;
619 		order_memory = true;
620 	}
621 
622 	bool added = log_queue_add_entry(lq, lp, lp_data);
623 	enable_preemption();
624 
625 	if (order_memory) {
626 		log_queue_order_memory(lq);
627 	}
628 
629 	return added;
630 }
631 
632 __startup_func
633 static size_t
log_queue_init_memory(log_queue_t lq,size_t lm_count)634 log_queue_init_memory(log_queue_t lq, size_t lm_count)
635 {
636 	assert(lm_count <= LQ_MAX_LM_SLOTS);
637 
638 	for (size_t i = 0; i < lm_count; i++) {
639 		void *buf = log_queue_buffer_alloc(lq->lq_mem_size);
640 		if (!buf) {
641 			return i;
642 		}
643 		counter_inc(&log_queue_cnt_mem_allocated);
644 		log_queue_mem_init(lq, i, buf, lq->lq_mem_size);
645 		log_queue_mem_enable(lq, i);
646 	}
647 
648 	return lm_count;
649 }
650 
651 __startup_func
652 static void
oslog_init_log_queues(void)653 oslog_init_log_queues(void)
654 {
655 	if (os_log_disabled()) {
656 		printf("Log queues disabled: Logging disabled by ATM\n");
657 		return;
658 	}
659 
660 	if (lq_bootarg_size_order == 0) {
661 		printf("Log queues disabled: Zero lq_size_order boot argument\n");
662 		return;
663 	}
664 
665 	lq_bootarg_size_order = MAX(lq_bootarg_size_order, PAGE_SHIFT);
666 	lq_bootarg_size_order = MIN(lq_bootarg_size_order, LQ_MAX_SZ_ORDER);
667 
668 	lq_bootarg_nslots = MAX(lq_bootarg_nslots, 1);
669 	lq_bootarg_nslots = MIN(lq_bootarg_nslots, LQ_MAX_LM_SLOTS);
670 
671 	lq_low_mem_limit = MAX(1 << (lq_bootarg_size_order - LQ_LOW_MEM_SCALE), 1024);
672 
673 	unsigned int slot_count = 0;
674 
675 	percpu_foreach(lq, oslog_queue) {
676 		lq->lq_mem_size_order = lq_bootarg_size_order;
677 		lq->lq_mem_size = round_page(logmem_required_size(lq->lq_mem_size_order, LQ_MIN_LOG_SZ_ORDER));
678 		lq->lq_mem_handler = thread_call_allocate(log_queue_memory_handler, (thread_call_param_t)lq);
679 		slot_count += log_queue_init_memory(lq, lq_bootarg_nslots);
680 		STAILQ_INIT(&lq->lq_log_list);
681 		STAILQ_INIT(&lq->lq_dispatch_list);
682 		lq->lq_ready = true;
683 	}
684 
685 	printf("Log queues configured: slot count: %u, per-slot size: %u, total size: %u\n",
686 	    slot_count, (1 << lq_bootarg_size_order),
687 	    slot_count * (1 << lq_bootarg_size_order));
688 }
689 STARTUP(OSLOG, STARTUP_RANK_SECOND, oslog_init_log_queues);
690 
691 bool
log_queue_log(log_payload_t lp,const void * lp_data,bool stream)692 log_queue_log(log_payload_t lp, const void *lp_data, bool stream)
693 {
694 	assert(lp);
695 	assert(oslog_is_safe() || startup_phase < STARTUP_SUB_EARLY_BOOT);
696 
697 	counter_inc(&log_queue_cnt_received);
698 
699 	if (log_payload_send(lp, lp_data, stream)) {
700 		counter_inc(&log_queue_cnt_sent);
701 		log_queue_dispatch();
702 		return true;
703 	}
704 	counter_inc(&log_queue_cnt_rejected_fh);
705 
706 	if (!log_queue_add(lp, lp_data)) {
707 		return false;
708 	}
709 
710 	return true;
711 }
712