1 /*
2 * Copyright (c) 2000-2022 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)subr_log.c 8.3 (Berkeley) 2/14/95
62 */
63
64 /*
65 * Error log buffer for kernel printf's.
66 */
67
68 #include <machine/atomic.h>
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/proc_internal.h>
72 #include <sys/vnode.h>
73 #include <stdbool.h>
74 #include <firehose/tracepoint_private.h>
75 #include <firehose/chunk_private.h>
76 #include <firehose/ioctl_private.h>
77 #include <os/firehose_buffer_private.h>
78
79 #include <os/log_private.h>
80 #include <sys/ioctl.h>
81 #include <sys/msgbuf.h>
82 #include <sys/file_internal.h>
83 #include <sys/errno.h>
84 #include <sys/select.h>
85 #include <sys/kernel.h>
86 #include <kern/thread.h>
87 #include <kern/sched_prim.h>
88 #include <kern/simple_lock.h>
89 #include <sys/lock.h>
90 #include <sys/signalvar.h>
91 #include <sys/conf.h>
92 #include <sys/sysctl.h>
93 #include <sys/queue.h>
94 #include <kern/kalloc.h>
95 #include <pexpert/pexpert.h>
96 #include <mach/mach_port.h>
97 #include <mach/mach_vm.h>
98 #include <mach/vm_map.h>
99 #include <vm/vm_kern.h>
100 #include <kern/task.h>
101 #include <kern/locks.h>
102
103 #define LOG_NBIO 0x02
104 #define LOG_ASYNC 0x04
105 #define LOG_RDWAIT 0x08
106
107 /* All globals should be accessed under bsd_log_lock() or bsd_log_lock_safe() */
108
109 /* logsoftc only valid while log_open=1 */
110 struct logsoftc {
111 int sc_state; /* see above for possibilities */
112 struct selinfo sc_selp; /* thread waiting for select */
113 int sc_pgid; /* process/group for async I/O */
114 struct msgbuf *sc_mbp;
115 } logsoftc;
116
117 char smsg_bufc[CONFIG_MSG_BSIZE]; /* static buffer */
118 struct firehose_chunk_s oslog_boot_buf = {
119 .fc_pos = {
120 .fcp_next_entry_offs = offsetof(struct firehose_chunk_s, fc_data),
121 .fcp_private_offs = FIREHOSE_CHUNK_SIZE,
122 .fcp_refcnt = 1, // indicate that there is a writer to this chunk
123 .fcp_stream = firehose_stream_persist,
124 .fcp_flag_io = 1, // for now, lets assume this is coming from the io bank
125 },
126 }; /* static buffer */
127 firehose_chunk_t firehose_boot_chunk = &oslog_boot_buf;
128 struct msgbuf msgbuf = {.msg_magic = MSG_MAGIC, .msg_size = sizeof(smsg_bufc), .msg_bufx = 0, .msg_bufr = 0, .msg_bufc = smsg_bufc};
129 struct msgbuf *msgbufp __attribute__((used)) = &msgbuf;
130
131 /* oslogsoftc only valid while oslog_open=true */
132 struct oslogsoftc {
133 int sc_state; /* see above for possibilities */
134 struct selinfo sc_selp; /* thread waiting for select */
135 int sc_pgid; /* process/group for async I/O */
136 } oslogsoftc;
137
138 static bool log_open = false;
139 static bool oslog_open = false;
140 static bool os_log_wakeup = false;
141
142 uint32_t oslog_msgbuf_dropped_charcount = 0;
143
144 SECURITY_READ_ONLY_LATE(vm_offset_t) kernel_firehose_addr = 0;
145 SECURITY_READ_ONLY_LATE(uint8_t) __firehose_buffer_kernel_chunk_count =
146 FIREHOSE_BUFFER_KERNEL_DEFAULT_CHUNK_COUNT;
147 SECURITY_READ_ONLY_LATE(uint8_t) __firehose_num_kernel_io_pages =
148 FIREHOSE_BUFFER_KERNEL_DEFAULT_IO_PAGES;
149
150 /* defined in osfmk/kern/printf.c */
151 extern bool bsd_log_lock(bool);
152 extern void bsd_log_lock_safe(void);
153 extern void bsd_log_unlock(void);
154
155 extern void logwakeup(void);
156 extern void oslogwakeup(void);
157 extern bool os_log_disabled(void);
158
159 /* XXX wants a linker set so these can be static */
160 extern d_open_t logopen;
161 extern d_close_t logclose;
162 extern d_read_t logread;
163 extern d_ioctl_t logioctl;
164 extern d_select_t logselect;
165
166 /* XXX wants a linker set so these can be static */
167 extern d_open_t oslogopen;
168 extern d_close_t oslogclose;
169 extern d_select_t oslogselect;
170 extern d_ioctl_t oslogioctl;
171
172 /*
173 * Serialize log access. Note that the log can be written at interrupt level,
174 * so any log manipulations that can be done from, or affect, another processor
175 * at interrupt level must be guarded with a spin lock.
176 */
177
178 static int sysctl_kern_msgbuf(struct sysctl_oid *oidp,
179 void *arg1, int arg2, struct sysctl_req *req);
180
181 /*ARGSUSED*/
182 int
logopen(__unused dev_t dev,__unused int flags,__unused int mode,struct proc * p)183 logopen(__unused dev_t dev, __unused int flags, __unused int mode, struct proc *p)
184 {
185 bsd_log_lock_safe();
186
187 if (log_open) {
188 bsd_log_unlock();
189 return EBUSY;
190 }
191
192 /*
193 * Legacy logging has to be supported as long as userspace supports it.
194 */
195 if ((atm_get_diagnostic_config() & ATM_ENABLE_LEGACY_LOGGING)) {
196 logsoftc.sc_mbp = msgbufp;
197 logsoftc.sc_pgid = proc_getpid(p); /* signal process only */
198 log_open = true;
199 bsd_log_unlock();
200 return 0;
201 }
202
203 bsd_log_unlock();
204 return ENOTSUP;
205 }
206
207 /*ARGSUSED*/
208 int
logclose(__unused dev_t dev,__unused int flag,__unused int devtype,__unused struct proc * p)209 logclose(__unused dev_t dev, __unused int flag, __unused int devtype, __unused struct proc *p)
210 {
211 bsd_log_lock_safe();
212
213 logsoftc.sc_state &= ~(LOG_NBIO | LOG_ASYNC);
214 selthreadclear(&logsoftc.sc_selp);
215 log_open = false;
216
217 bsd_log_unlock();
218 return 0;
219 }
220
221 int
oslogopen(__unused dev_t dev,__unused int flags,__unused int mode,struct proc * p)222 oslogopen(__unused dev_t dev, __unused int flags, __unused int mode, struct proc *p)
223 {
224 bsd_log_lock_safe();
225 if (oslog_open) {
226 bsd_log_unlock();
227 return EBUSY;
228 }
229 oslogsoftc.sc_pgid = proc_getpid(p); /* signal process only */
230 oslog_open = true;
231
232 bsd_log_unlock();
233 return 0;
234 }
235
236 int
oslogclose(__unused dev_t dev,__unused int flag,__unused int devtype,__unused struct proc * p)237 oslogclose(__unused dev_t dev, __unused int flag, __unused int devtype, __unused struct proc *p)
238 {
239 bsd_log_lock_safe();
240 oslogsoftc.sc_state &= ~(LOG_NBIO | LOG_ASYNC);
241 selthreadclear(&oslogsoftc.sc_selp);
242 oslog_open = false;
243 bsd_log_unlock();
244 return 0;
245 }
246
247 /*ARGSUSED*/
248 int
logread(__unused dev_t dev,struct uio * uio,int flag)249 logread(__unused dev_t dev, struct uio *uio, int flag)
250 {
251 int error;
252 ssize_t resid;
253
254 bsd_log_lock_safe();
255
256 struct msgbuf *mbp = logsoftc.sc_mbp;
257
258 while (mbp->msg_bufr == mbp->msg_bufx) {
259 if ((flag & IO_NDELAY) || (logsoftc.sc_state & LOG_NBIO)) {
260 bsd_log_unlock();
261 return EWOULDBLOCK;
262 }
263 logsoftc.sc_state |= LOG_RDWAIT;
264 bsd_log_unlock();
265 /*
266 * If the wakeup is missed then wait for 5 sec and reevaluate.
267 * If it times out, carry on.
268 */
269 error = tsleep((caddr_t)mbp, (PZERO + 1) | PCATCH, "klog", 5 * hz);
270 if (error && error != EWOULDBLOCK) {
271 return error;
272 }
273 bsd_log_lock_safe();
274 }
275 logsoftc.sc_state &= ~LOG_RDWAIT;
276
277 while ((resid = uio_resid(uio)) > 0) {
278 size_t l;
279
280 if (mbp->msg_bufx >= mbp->msg_bufr) {
281 l = mbp->msg_bufx - mbp->msg_bufr;
282 } else {
283 l = mbp->msg_size - mbp->msg_bufr;
284 }
285 if ((l = MIN(l, (size_t)resid)) == 0) {
286 break;
287 }
288
289 const size_t readpos = mbp->msg_bufr;
290
291 bsd_log_unlock();
292 error = uiomove((caddr_t)&mbp->msg_bufc[readpos], (int)l, uio);
293 if (error) {
294 return error;
295 }
296 bsd_log_lock_safe();
297
298 mbp->msg_bufr = (int)(readpos + l);
299 if (mbp->msg_bufr >= mbp->msg_size) {
300 mbp->msg_bufr = 0;
301 }
302 }
303
304 bsd_log_unlock();
305 return 0;
306 }
307
308 /*ARGSUSED*/
309 int
logselect(__unused dev_t dev,int rw,void * wql,struct proc * p)310 logselect(__unused dev_t dev, int rw, void * wql, struct proc *p)
311 {
312 if (rw != FREAD) {
313 return 0;
314 }
315
316 bsd_log_lock_safe();
317 if (logsoftc.sc_mbp->msg_bufr == logsoftc.sc_mbp->msg_bufx) {
318 selrecord(p, &logsoftc.sc_selp, wql);
319 bsd_log_unlock();
320 return 0;
321 }
322 bsd_log_unlock();
323
324 return 1;
325 }
326
327 int
oslogselect(__unused dev_t dev,int rw,void * wql,struct proc * p)328 oslogselect(__unused dev_t dev, int rw, void * wql, struct proc *p)
329 {
330 if (rw != FREAD) {
331 return 0;
332 }
333
334 bsd_log_lock_safe();
335 if (os_log_wakeup) {
336 bsd_log_unlock();
337 return 1;
338 }
339 selrecord(p, &oslogsoftc.sc_selp, wql);
340 bsd_log_unlock();
341
342 return 0;
343 }
344
345 void
logwakeup(void)346 logwakeup(void)
347 {
348 /*
349 * Legacy logging is rarely enabled during a typical system run. Check
350 * log_open without taking a lock as a shortcut.
351 */
352 if (!log_open || !oslog_is_safe()) {
353 return;
354 }
355
356 bsd_log_lock_safe();
357
358 if (!log_open) {
359 bsd_log_unlock();
360 return;
361 }
362
363 selwakeup(&logsoftc.sc_selp);
364 if (logsoftc.sc_state & LOG_ASYNC) {
365 int pgid = logsoftc.sc_pgid;
366 bsd_log_unlock();
367 if (pgid < 0) {
368 gsignal(-pgid, SIGIO);
369 } else {
370 proc_signal(pgid, SIGIO);
371 }
372 bsd_log_lock_safe();
373 }
374
375 if (log_open && (logsoftc.sc_state & LOG_RDWAIT)) {
376 wakeup((caddr_t)logsoftc.sc_mbp);
377 logsoftc.sc_state &= ~LOG_RDWAIT;
378 }
379
380 bsd_log_unlock();
381 }
382
383 void
oslogwakeup(void)384 oslogwakeup(void)
385 {
386 if (!oslog_is_safe()) {
387 return;
388 }
389
390 bsd_log_lock_safe();
391 if (!oslog_open) {
392 bsd_log_unlock();
393 return;
394 }
395 selwakeup(&oslogsoftc.sc_selp);
396 os_log_wakeup = true;
397 bsd_log_unlock();
398 }
399
400 /*ARGSUSED*/
401 int
logioctl(__unused dev_t dev,u_long com,caddr_t data,__unused int flag,__unused struct proc * p)402 logioctl(__unused dev_t dev, u_long com, caddr_t data, __unused int flag, __unused struct proc *p)
403 {
404 bsd_log_lock_safe();
405
406 const struct msgbuf *mbp = logsoftc.sc_mbp;
407 int l;
408
409 switch (com) {
410 /* return number of characters immediately available */
411 case FIONREAD:
412 l = mbp->msg_bufx - mbp->msg_bufr;
413 if (l < 0) {
414 l += mbp->msg_size;
415 }
416 *(off_t *)data = l;
417 break;
418
419 case FIONBIO:
420 if (*(int *)data) {
421 logsoftc.sc_state |= LOG_NBIO;
422 } else {
423 logsoftc.sc_state &= ~LOG_NBIO;
424 }
425 break;
426
427 case FIOASYNC:
428 if (*(int *)data) {
429 logsoftc.sc_state |= LOG_ASYNC;
430 } else {
431 logsoftc.sc_state &= ~LOG_ASYNC;
432 }
433 break;
434
435 case TIOCSPGRP:
436 logsoftc.sc_pgid = *(int *)data;
437 break;
438
439 case TIOCGPGRP:
440 *(int *)data = logsoftc.sc_pgid;
441 break;
442
443 default:
444 bsd_log_unlock();
445 return -1;
446 }
447
448 bsd_log_unlock();
449 return 0;
450 }
451
452 /*ARGSUSED*/
453 int
oslogioctl(__unused dev_t dev,u_long com,caddr_t data,__unused int flag,__unused struct proc * p)454 oslogioctl(__unused dev_t dev, u_long com, caddr_t data, __unused int flag, __unused struct proc *p)
455 {
456 int ret = 0;
457 mach_vm_size_t buffer_size = (__firehose_buffer_kernel_chunk_count * FIREHOSE_CHUNK_SIZE);
458 firehose_buffer_map_info_t map_info = {0, 0};
459 firehose_buffer_t kernel_firehose_buffer = NULL;
460 mach_vm_address_t user_addr = 0;
461 mach_port_t mem_entry_ptr = MACH_PORT_NULL;
462 bool has_more;
463
464 switch (com) {
465 /* return number of characters immediately available */
466
467 case LOGBUFFERMAP:
468 kernel_firehose_buffer = (firehose_buffer_t)kernel_firehose_addr;
469
470 ret = mach_make_memory_entry_64(kernel_map,
471 &buffer_size,
472 (mach_vm_offset_t) kernel_firehose_buffer,
473 (MAP_MEM_VM_SHARE | VM_PROT_READ),
474 &mem_entry_ptr,
475 MACH_PORT_NULL);
476 if (ret == KERN_SUCCESS) {
477 ret = mach_vm_map_kernel(get_task_map(current_task()),
478 &user_addr,
479 buffer_size,
480 0, /* mask */
481 VM_MAP_KERNEL_FLAGS_ANYWHERE(),
482 mem_entry_ptr,
483 0, /* offset */
484 FALSE, /* copy */
485 VM_PROT_READ,
486 VM_PROT_READ,
487 VM_INHERIT_SHARE);
488 }
489
490 if (ret == KERN_SUCCESS) {
491 map_info.fbmi_addr = (uint64_t) (user_addr);
492 map_info.fbmi_size = buffer_size;
493 bcopy(&map_info, data, sizeof(firehose_buffer_map_info_t));
494 }
495 break;
496 case LOGFLUSHED:
497 has_more = __firehose_merge_updates(*(firehose_push_reply_t *)(data));
498 bsd_log_lock_safe();
499 os_log_wakeup = has_more;
500 if (os_log_wakeup) {
501 selwakeup(&oslogsoftc.sc_selp);
502 }
503 bsd_log_unlock();
504 break;
505 default:
506 return -1;
507 }
508 return 0;
509 }
510
511 __startup_func
512 static void
oslog_init_firehose(void)513 oslog_init_firehose(void)
514 {
515 if (os_log_disabled()) {
516 printf("Firehose disabled: Logging disabled by ATM\n");
517 return;
518 }
519
520 if (!PE_parse_boot_argn("firehose_chunk_count", &__firehose_buffer_kernel_chunk_count, sizeof(__firehose_buffer_kernel_chunk_count))) {
521 __firehose_buffer_kernel_chunk_count = FIREHOSE_BUFFER_KERNEL_DEFAULT_CHUNK_COUNT;
522 }
523 if (!PE_parse_boot_argn("firehose_io_pages", &__firehose_num_kernel_io_pages, sizeof(__firehose_num_kernel_io_pages))) {
524 __firehose_num_kernel_io_pages = FIREHOSE_BUFFER_KERNEL_DEFAULT_IO_PAGES;
525 }
526 if (!__firehose_kernel_configuration_valid(__firehose_buffer_kernel_chunk_count, __firehose_num_kernel_io_pages)) {
527 printf("illegal firehose configuration %u/%u, using defaults\n", __firehose_buffer_kernel_chunk_count, __firehose_num_kernel_io_pages);
528 __firehose_buffer_kernel_chunk_count = FIREHOSE_BUFFER_KERNEL_DEFAULT_CHUNK_COUNT;
529 __firehose_num_kernel_io_pages = FIREHOSE_BUFFER_KERNEL_DEFAULT_IO_PAGES;
530 }
531 vm_size_t size = __firehose_buffer_kernel_chunk_count * FIREHOSE_CHUNK_SIZE;
532
533 kmem_alloc(kernel_map, &kernel_firehose_addr, size + ptoa(2),
534 KMA_NOFAIL | KMA_PERMANENT | KMA_GUARD_FIRST | KMA_GUARD_LAST |
535 KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_LOG);
536
537 kernel_firehose_addr += PAGE_SIZE;
538 /* register buffer with firehose */
539 kernel_firehose_addr = (vm_offset_t)__firehose_buffer_create((size_t *) &size);
540
541 printf("Firehose configured: %u chunks, %u io pages\n",
542 __firehose_buffer_kernel_chunk_count, __firehose_num_kernel_io_pages);
543 }
544 STARTUP(OSLOG, STARTUP_RANK_SECOND, oslog_init_firehose);
545
546 /*
547 * log_putc_locked
548 *
549 * Decription: Output a character to the log; assumes the bsd_log_lock() or
550 * bsd_log_lock_safe() is held by the caller.
551 *
552 * Parameters: c Character to output
553 *
554 * Returns: (void)
555 *
556 * Notes: This functions is used for multibyte output to the log; it
557 * should be used preferrentially where possible to ensure that
558 * log entries do not end up interspersed due to preemption or
559 * SMP reentrancy.
560 */
561 void
log_putc_locked(struct msgbuf * mbp,char c)562 log_putc_locked(struct msgbuf *mbp, char c)
563 {
564 mbp->msg_bufc[mbp->msg_bufx++] = c;
565 if (mbp->msg_bufx >= mbp->msg_size) {
566 mbp->msg_bufx = 0;
567 }
568 }
569
570 /*
571 * log_putc
572 *
573 * Decription: Output a character to the log; assumes the bsd_log_lock() or
574 * bsd_log_lock_safe() is NOT held by the caller.
575 *
576 * Parameters: c Character to output
577 *
578 * Returns: (void)
579 *
580 * Notes: This function is used for single byte output to the log. It
581 * primarily exists to maintain binary backward compatibility.
582 */
583 void
log_putc(char c)584 log_putc(char c)
585 {
586 if (!bsd_log_lock(oslog_is_safe())) {
587 os_atomic_inc(&oslog_msgbuf_dropped_charcount, relaxed);
588 return;
589 }
590
591 log_putc_locked(msgbufp, c);
592 int unread_count = msgbufp->msg_bufx - msgbufp->msg_bufr;
593
594 bsd_log_unlock();
595
596 if (unread_count < 0) {
597 unread_count = 0 - unread_count;
598 }
599 if (c == '\n' || unread_count >= (msgbufp->msg_size / 2)) {
600 logwakeup();
601 }
602 }
603
604 /*
605 * it is possible to increase the kernel log buffer size by adding
606 * msgbuf=n
607 * to the kernel command line, and to read the current size using
608 * sysctl kern.msgbuf
609 * If there is no parameter on the kernel command line, the buffer is
610 * allocated statically and is CONFIG_MSG_BSIZE characters in size, otherwise
611 * memory is dynamically allocated. Memory management must already be up.
612 */
613 static int
log_setsize(size_t size)614 log_setsize(size_t size)
615 {
616 int i, count;
617 char *p;
618
619 if (size == 0 || size > MAX_MSG_BSIZE) {
620 return EINVAL;
621 }
622
623 int new_logsize = (int)size;
624 char *new_logdata = kalloc_data(size, Z_WAITOK | Z_ZERO);
625 if (!new_logdata) {
626 printf("Cannot resize system message buffer: Not enough memory\n");
627 return ENOMEM;
628 }
629
630 bsd_log_lock_safe();
631
632 char *old_logdata = msgbufp->msg_bufc;
633 int old_logsize = msgbufp->msg_size;
634 int old_bufr = msgbufp->msg_bufr;
635 int old_bufx = msgbufp->msg_bufx;
636
637 /* start "new_logsize" bytes before the write pointer */
638 if (new_logsize <= old_bufx) {
639 count = new_logsize;
640 p = old_logdata + old_bufx - count;
641 } else {
642 /*
643 * if new buffer is bigger, copy what we have and let the
644 * bzero above handle the difference
645 */
646 count = MIN(new_logsize, old_logsize);
647 p = old_logdata + old_logsize - (count - old_bufx);
648 }
649 for (i = 0; i < count; i++) {
650 if (p >= old_logdata + old_logsize) {
651 p = old_logdata;
652 }
653 new_logdata[i] = *p++;
654 }
655
656 int new_bufx = i;
657 if (new_bufx >= new_logsize) {
658 new_bufx = 0;
659 }
660 msgbufp->msg_bufx = new_bufx;
661
662 int new_bufr = old_bufx - old_bufr; /* how much were we trailing bufx by? */
663 if (new_bufr < 0) {
664 new_bufr += old_logsize;
665 }
666 new_bufr = new_bufx - new_bufr; /* now relative to oldest data in new buffer */
667 if (new_bufr < 0) {
668 new_bufr += new_logsize;
669 }
670 msgbufp->msg_bufr = new_bufr;
671
672 msgbufp->msg_size = new_logsize;
673 msgbufp->msg_bufc = new_logdata;
674
675 bsd_log_unlock();
676
677 /*
678 * This memory is now dead - clear it so that it compresses better
679 * in case of suspend to disk etc.
680 */
681 bzero(old_logdata, old_logsize);
682 if (old_logdata != smsg_bufc) {
683 /* dynamic memory that must be freed */
684 kfree_data(old_logdata, old_logsize);
685 }
686
687 printf("System message buffer configured: %lu bytes\n", size);
688
689 return 0;
690 }
691
692 SYSCTL_PROC(_kern, OID_AUTO, msgbuf,
693 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0,
694 sysctl_kern_msgbuf, "I", "");
695
696 static int
sysctl_kern_msgbuf(struct sysctl_oid * oidp __unused,void * arg1 __unused,int arg2 __unused,struct sysctl_req * req)697 sysctl_kern_msgbuf(struct sysctl_oid *oidp __unused,
698 void *arg1 __unused, int arg2 __unused, struct sysctl_req *req)
699 {
700 int old_bufsize, bufsize;
701 int error;
702
703 bsd_log_lock_safe();
704 old_bufsize = bufsize = msgbufp->msg_size;
705 bsd_log_unlock();
706
707 error = sysctl_io_number(req, bufsize, sizeof(bufsize), &bufsize, NULL);
708 if (error) {
709 return error;
710 }
711
712 if (bufsize < 0) {
713 return EINVAL;
714 }
715
716 if (bufsize != old_bufsize) {
717 error = log_setsize(bufsize);
718 }
719
720 return error;
721 }
722
723 /*
724 * This should be called by /sbin/dmesg only via libproc.
725 * It returns as much data still in the buffer as possible.
726 */
727 int
log_dmesg(user_addr_t buffer,uint32_t buffersize,int32_t * retval)728 log_dmesg(user_addr_t buffer, uint32_t buffersize, int32_t *retval)
729 {
730 uint32_t i;
731 uint32_t localbuff_size;
732 int error = 0, newl, skip;
733 char *localbuff, *p, *copystart, ch;
734 size_t copysize;
735
736 bsd_log_lock_safe();
737 localbuff_size = (msgbufp->msg_size + 2); /* + '\n' + '\0' */
738 bsd_log_unlock();
739
740 /* Allocate a temporary non-circular buffer for copyout */
741 localbuff = kalloc_data(localbuff_size, Z_WAITOK);
742 if (!localbuff) {
743 printf("log_dmesg: unable to allocate memory\n");
744 return ENOMEM;
745 }
746
747 /* in between here, the log could become bigger, but that's fine */
748 bsd_log_lock_safe();
749
750 /*
751 * The message buffer is circular; start at the write pointer, and
752 * make one loop up to write pointer - 1.
753 */
754 p = msgbufp->msg_bufc + msgbufp->msg_bufx;
755 for (i = newl = skip = 0; p != msgbufp->msg_bufc + msgbufp->msg_bufx - 1; ++p) {
756 if (p >= msgbufp->msg_bufc + msgbufp->msg_size) {
757 p = msgbufp->msg_bufc;
758 }
759 ch = *p;
760 /* Skip "\n<.*>" syslog sequences. */
761 if (skip) {
762 if (ch == '>') {
763 newl = skip = 0;
764 }
765 continue;
766 }
767 if (newl && ch == '<') {
768 skip = 1;
769 continue;
770 }
771 if (ch == '\0') {
772 continue;
773 }
774 newl = (ch == '\n');
775 localbuff[i++] = ch;
776 /* The original version of this routine contained a buffer
777 * overflow. At the time, a "small" targeted fix was desired
778 * so the change below to check the buffer bounds was made.
779 * TODO: rewrite this needlessly convoluted routine.
780 */
781 if (i == (localbuff_size - 2)) {
782 break;
783 }
784 }
785 if (!newl) {
786 localbuff[i++] = '\n';
787 }
788 localbuff[i++] = 0;
789
790 if (buffersize >= i) {
791 copystart = localbuff;
792 copysize = i;
793 } else {
794 copystart = localbuff + i - buffersize;
795 copysize = buffersize;
796 }
797
798 bsd_log_unlock();
799
800 error = copyout(copystart, buffer, copysize);
801 if (!error) {
802 *retval = (int32_t)copysize;
803 }
804
805 kfree_data(localbuff, localbuff_size);
806 return error;
807 }
808
809 #ifdef CONFIG_XNUPOST
810
811 size_t find_pattern_in_buffer(const char *, size_t, size_t);
812
813 /*
814 * returns count of pattern found in systemlog buffer.
815 * stops searching further if count reaches expected_count.
816 */
817 size_t
find_pattern_in_buffer(const char * pattern,size_t len,size_t expected_count)818 find_pattern_in_buffer(const char *pattern, size_t len, size_t expected_count)
819 {
820 if (pattern == NULL || len == 0 || expected_count == 0) {
821 return 0;
822 }
823
824 size_t msg_bufx = msgbufp->msg_bufx;
825 size_t msg_size = msgbufp->msg_size;
826 size_t match_count = 0;
827
828 for (size_t i = 0; i < msg_size; i++) {
829 boolean_t match = TRUE;
830 for (size_t j = 0; j < len; j++) {
831 size_t pos = (msg_bufx + i + j) % msg_size;
832 if (msgbufp->msg_bufc[pos] != pattern[j]) {
833 match = FALSE;
834 break;
835 }
836 }
837 if (match && ++match_count >= expected_count) {
838 break;
839 }
840 }
841
842 return match_count;
843 }
844
845 __startup_func
846 static void
oslog_init_msgbuf(void)847 oslog_init_msgbuf(void)
848 {
849 size_t msgbuf_size = 0;
850
851 if (PE_parse_boot_argn("msgbuf", &msgbuf_size, sizeof(msgbuf_size))) {
852 (void) log_setsize(msgbuf_size);
853 }
854 }
855 STARTUP(OSLOG, STARTUP_RANK_SECOND, oslog_init_msgbuf);
856
857 #endif
858