1 /*-
2 * Copyright (c) 2008-2010 Apple Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Apple Inc. ("Apple") nor the names of
15 * its contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include <string.h>
31
32 #include <sys/kernel.h>
33 #include <sys/proc.h>
34 #include <sys/systm.h>
35
36 #include <kern/host.h>
37 #include <kern/kalloc.h>
38 #include <kern/locks.h>
39 #include <kern/sched_prim.h>
40
41 #include <libkern/OSAtomic.h>
42
43 #include <bsm/audit.h>
44 #include <bsm/audit_internal.h>
45
46 #include <security/audit/audit_bsd.h>
47 #include <security/audit/audit.h>
48 #include <security/audit/audit_private.h>
49
50 #include <mach/host_priv.h>
51 #include <mach/host_special_ports.h>
52 #include <mach/audit_triggers_server.h>
53 #include <mach/audit_triggers_types.h>
54
55 #include <os/overflow.h>
56
57 extern void ipc_port_release_send(ipc_port_t port);
58
59 #if CONFIG_AUDIT
60 struct mhdr {
61 size_t mh_size;
62 au_malloc_type_t *mh_type;
63 u_long mh_magic;
64 char mh_data[0];
65 };
66
67 /*
68 * The lock group for the audit subsystem.
69 */
70 static LCK_GRP_DECLARE(audit_lck_grp, "Audit");
71
72 #define AUDIT_MHMAGIC 0x4D656C53
73
74 /*
75 * Initialize a condition variable. Must be called before use.
76 */
77 void
_audit_cv_init(struct cv * cvp,const char * desc)78 _audit_cv_init(struct cv *cvp, const char *desc)
79 {
80 if (desc == NULL) {
81 cvp->cv_description = "UNKNOWN";
82 } else {
83 cvp->cv_description = desc;
84 }
85 cvp->cv_waiters = 0;
86 }
87
88 /*
89 * Destory a condition variable.
90 */
91 void
_audit_cv_destroy(struct cv * cvp)92 _audit_cv_destroy(struct cv *cvp)
93 {
94 cvp->cv_description = NULL;
95 cvp->cv_waiters = 0;
96 }
97
98 /*
99 * Signal a condition variable, wakes up one waiting thread.
100 */
101 void
_audit_cv_signal(struct cv * cvp)102 _audit_cv_signal(struct cv *cvp)
103 {
104 if (cvp->cv_waiters > 0) {
105 wakeup_one((caddr_t)cvp);
106 cvp->cv_waiters--;
107 }
108 }
109
110 /*
111 * Broadcast a signal to a condition variable.
112 */
113 void
_audit_cv_broadcast(struct cv * cvp)114 _audit_cv_broadcast(struct cv *cvp)
115 {
116 if (cvp->cv_waiters > 0) {
117 wakeup((caddr_t)cvp);
118 cvp->cv_waiters = 0;
119 }
120 }
121
122 /*
123 * Wait on a condition variable. A cv_signal or cv_broadcast on the same
124 * condition variable will resume the thread. It is recommended that the mutex
125 * be held when cv_signal or cv_broadcast are called.
126 */
127 void
_audit_cv_wait(struct cv * cvp,lck_mtx_t * mp,const char * desc)128 _audit_cv_wait(struct cv *cvp, lck_mtx_t *mp, const char *desc)
129 {
130 cvp->cv_waiters++;
131 (void) msleep(cvp, mp, PZERO, desc, 0);
132 }
133
134 /*
135 * Wait on a condition variable, allowing interruption by signals. Return 0
136 * if the thread was resumed with cv_signal or cv_broadcast, EINTR or
137 * ERESTART if a signal was caught. If ERESTART is returned the system call
138 * should be restarted if possible.
139 */
140 int
_audit_cv_wait_sig(struct cv * cvp,lck_mtx_t * mp,const char * desc)141 _audit_cv_wait_sig(struct cv *cvp, lck_mtx_t *mp, const char *desc)
142 {
143 cvp->cv_waiters++;
144 return msleep(cvp, mp, PSOCK | PCATCH, desc, 0);
145 }
146
147 /*
148 * BSD Mutexes.
149 */
150 void
151 #if DIAGNOSTIC
_audit_mtx_init(struct mtx * mp,const char * lckname)152 _audit_mtx_init(struct mtx *mp, const char *lckname)
153 #else
154 _audit_mtx_init(struct mtx *mp, __unused const char *lckname)
155 #endif
156 {
157 mp->mtx_lock = lck_mtx_alloc_init(&audit_lck_grp, LCK_ATTR_NULL);
158 KASSERT(mp->mtx_lock != NULL,
159 ("_audit_mtx_init: Could not allocate a mutex."));
160 #if DIAGNOSTIC
161 strlcpy(mp->mtx_name, lckname, AU_MAX_LCK_NAME);
162 #endif
163 }
164
165 void
_audit_mtx_destroy(struct mtx * mp)166 _audit_mtx_destroy(struct mtx *mp)
167 {
168 if (mp->mtx_lock) {
169 lck_mtx_free(mp->mtx_lock, &audit_lck_grp);
170 mp->mtx_lock = NULL;
171 }
172 }
173
174 /*
175 * BSD rw locks.
176 */
177 void
178 #if DIAGNOSTIC
_audit_rw_init(struct rwlock * lp,const char * lckname)179 _audit_rw_init(struct rwlock *lp, const char *lckname)
180 #else
181 _audit_rw_init(struct rwlock *lp, __unused const char *lckname)
182 #endif
183 {
184 lp->rw_lock = lck_rw_alloc_init(&audit_lck_grp, LCK_ATTR_NULL);
185 KASSERT(lp->rw_lock != NULL,
186 ("_audit_rw_init: Could not allocate a rw lock."));
187 #if DIAGNOSTIC
188 strlcpy(lp->rw_name, lckname, AU_MAX_LCK_NAME);
189 #endif
190 }
191
192 void
_audit_rw_destroy(struct rwlock * lp)193 _audit_rw_destroy(struct rwlock *lp)
194 {
195 if (lp->rw_lock) {
196 lck_rw_free(lp->rw_lock, &audit_lck_grp);
197 lp->rw_lock = NULL;
198 }
199 }
200 /*
201 * Wait on a condition variable in a continuation (i.e. yield kernel stack).
202 * A cv_signal or cv_broadcast on the same condition variable will cause
203 * the thread to be scheduled.
204 */
205 int
_audit_cv_wait_continuation(struct cv * cvp,lck_mtx_t * mp,thread_continue_t function)206 _audit_cv_wait_continuation(struct cv *cvp, lck_mtx_t *mp, thread_continue_t function)
207 {
208 int status = KERN_SUCCESS;
209
210 cvp->cv_waiters++;
211 assert_wait(cvp, THREAD_UNINT);
212 lck_mtx_unlock(mp);
213
214 status = thread_block(function);
215
216 /* should not be reached, but just in case, re-lock */
217 lck_mtx_lock(mp);
218
219 return status;
220 }
221
222 /*
223 * Simple recursive lock.
224 */
225 void
226 #if DIAGNOSTIC
_audit_rlck_init(struct rlck * lp,const char * lckname)227 _audit_rlck_init(struct rlck *lp, const char *lckname)
228 #else
229 _audit_rlck_init(struct rlck *lp, __unused const char *lckname)
230 #endif
231 {
232 lp->rl_mtx = lck_mtx_alloc_init(&audit_lck_grp, LCK_ATTR_NULL);
233 KASSERT(lp->rl_mtx != NULL,
234 ("_audit_rlck_init: Could not allocate a recursive lock."));
235 #if DIAGNOSTIC
236 strlcpy(lp->rl_name, lckname, AU_MAX_LCK_NAME);
237 #endif
238 lp->rl_thread = 0;
239 lp->rl_recurse = 0;
240 }
241
242 /*
243 * Recursive lock. Allow same thread to recursively lock the same lock.
244 */
245 void
_audit_rlck_lock(struct rlck * lp)246 _audit_rlck_lock(struct rlck *lp)
247 {
248 if (lp->rl_thread == current_thread()) {
249 OSAddAtomic(1, &lp->rl_recurse);
250 KASSERT(lp->rl_recurse < 10000,
251 ("_audit_rlck_lock: lock nested too deep."));
252 } else {
253 lck_mtx_lock(lp->rl_mtx);
254 lp->rl_thread = current_thread();
255 lp->rl_recurse = 1;
256 }
257 }
258
259 /*
260 * Recursive unlock. It should be the same thread that does the unlock.
261 */
262 void
_audit_rlck_unlock(struct rlck * lp)263 _audit_rlck_unlock(struct rlck *lp)
264 {
265 KASSERT(lp->rl_thread == current_thread(),
266 ("_audit_rlck_unlock(): Don't own lock."));
267
268 /* Note: OSAddAtomic returns old value. */
269 if (OSAddAtomic(-1, &lp->rl_recurse) == 1) {
270 lp->rl_thread = 0;
271 lck_mtx_unlock(lp->rl_mtx);
272 }
273 }
274
275 void
_audit_rlck_destroy(struct rlck * lp)276 _audit_rlck_destroy(struct rlck *lp)
277 {
278 if (lp->rl_mtx) {
279 lck_mtx_free(lp->rl_mtx, &audit_lck_grp);
280 lp->rl_mtx = NULL;
281 }
282 }
283
284 /*
285 * Recursive lock assert.
286 */
287 void
_audit_rlck_assert(struct rlck * lp,u_int assert)288 _audit_rlck_assert(struct rlck *lp, u_int assert)
289 {
290 thread_t cthd = current_thread();
291
292 if (assert == LCK_MTX_ASSERT_OWNED && lp->rl_thread == cthd) {
293 panic("recursive lock (%p) not held by this thread (%p).",
294 lp, cthd);
295 }
296 if (assert == LCK_MTX_ASSERT_NOTOWNED && lp->rl_thread != 0) {
297 panic("recursive lock (%p) held by thread (%p).",
298 lp, cthd);
299 }
300 }
301
302 /*
303 * Simple sleep lock.
304 */
305 void
306 #if DIAGNOSTIC
_audit_slck_init(struct slck * lp,const char * lckname)307 _audit_slck_init(struct slck *lp, const char *lckname)
308 #else
309 _audit_slck_init(struct slck *lp, __unused const char *lckname)
310 #endif
311 {
312 lp->sl_mtx = lck_mtx_alloc_init(&audit_lck_grp, LCK_ATTR_NULL);
313 KASSERT(lp->sl_mtx != NULL,
314 ("_audit_slck_init: Could not allocate a sleep lock."));
315 #if DIAGNOSTIC
316 strlcpy(lp->sl_name, lckname, AU_MAX_LCK_NAME);
317 #endif
318 lp->sl_locked = 0;
319 lp->sl_waiting = 0;
320 }
321
322 /*
323 * Sleep lock lock. The 'intr' flag determines if the lock is interruptible.
324 * If 'intr' is true then signals or other events can interrupt the sleep lock.
325 */
326 wait_result_t
_audit_slck_lock(struct slck * lp,int intr)327 _audit_slck_lock(struct slck *lp, int intr)
328 {
329 wait_result_t res = THREAD_AWAKENED;
330
331 lck_mtx_lock(lp->sl_mtx);
332 while (lp->sl_locked && res == THREAD_AWAKENED) {
333 lp->sl_waiting = 1;
334 res = lck_mtx_sleep(lp->sl_mtx, LCK_SLEEP_DEFAULT,
335 (event_t) lp, (intr) ? THREAD_INTERRUPTIBLE : THREAD_UNINT);
336 }
337 if (res == THREAD_AWAKENED) {
338 lp->sl_locked = 1;
339 }
340 lck_mtx_unlock(lp->sl_mtx);
341
342 return res;
343 }
344
345 /*
346 * Sleep lock unlock. Wake up all the threads waiting for this lock.
347 */
348 void
_audit_slck_unlock(struct slck * lp)349 _audit_slck_unlock(struct slck *lp)
350 {
351 lck_mtx_lock(lp->sl_mtx);
352 lp->sl_locked = 0;
353 if (lp->sl_waiting) {
354 lp->sl_waiting = 0;
355
356 /* Wake up *all* sleeping threads. */
357 wakeup((event_t) lp);
358 }
359 lck_mtx_unlock(lp->sl_mtx);
360 }
361
362 /*
363 * Sleep lock try. Don't sleep if it doesn't get the lock.
364 */
365 int
_audit_slck_trylock(struct slck * lp)366 _audit_slck_trylock(struct slck *lp)
367 {
368 int result;
369
370 lck_mtx_lock(lp->sl_mtx);
371 result = !lp->sl_locked;
372 if (result) {
373 lp->sl_locked = 1;
374 }
375 lck_mtx_unlock(lp->sl_mtx);
376
377 return result;
378 }
379
380 /*
381 * Sleep lock assert.
382 */
383 void
_audit_slck_assert(struct slck * lp,u_int assert)384 _audit_slck_assert(struct slck *lp, u_int assert)
385 {
386 if (assert == LCK_MTX_ASSERT_OWNED && lp->sl_locked == 0) {
387 panic("sleep lock (%p) not held.", lp);
388 }
389 if (assert == LCK_MTX_ASSERT_NOTOWNED && lp->sl_locked == 1) {
390 panic("sleep lock (%p) held.", lp);
391 }
392 }
393
394 void
_audit_slck_destroy(struct slck * lp)395 _audit_slck_destroy(struct slck *lp)
396 {
397 if (lp->sl_mtx) {
398 lck_mtx_free(lp->sl_mtx, &audit_lck_grp);
399 lp->sl_mtx = NULL;
400 }
401 }
402
403 /*
404 * XXXss - This code was taken from bsd/netinet6/icmp6.c. Maybe ppsratecheck()
405 * should be made global in icmp6.c.
406 */
407 #ifndef timersub
408 #define timersub(tvp, uvp, vvp) \
409 do { \
410 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \
411 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \
412 if ((vvp)->tv_usec < 0) { \
413 (vvp)->tv_sec--; \
414 (vvp)->tv_usec += 1000000; \
415 } \
416 } while (0)
417 #endif
418
419 /*
420 * Packets (or events) per second limitation.
421 */
422 int
_audit_ppsratecheck(struct timeval * lasttime,int * curpps,int maxpps)423 _audit_ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps)
424 {
425 struct timeval tv, delta;
426 int rv;
427
428 microtime(&tv);
429
430 timersub(&tv, lasttime, &delta);
431
432 /*
433 * Check for 0,0 so that the message will be seen at least once.
434 * If more than one second has passed since the last update of
435 * lasttime, reset the counter.
436 *
437 * we do increment *curpps even in *curpps < maxpps case, as some may
438 * try to use *curpps for stat purposes as well.
439 */
440 if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) ||
441 delta.tv_sec >= 1) {
442 *lasttime = tv;
443 *curpps = 0;
444 rv = 1;
445 } else if (maxpps < 0) {
446 rv = 1;
447 } else if (*curpps < maxpps) {
448 rv = 1;
449 } else {
450 rv = 0;
451 }
452 if (*curpps + 1 > 0) {
453 *curpps = *curpps + 1;
454 }
455
456 return rv;
457 }
458
459 int
audit_send_trigger(unsigned int trigger)460 audit_send_trigger(unsigned int trigger)
461 {
462 mach_port_t audit_port;
463 int error;
464
465 error = host_get_audit_control_port(host_priv_self(), &audit_port);
466 if (error == KERN_SUCCESS && audit_port != MACH_PORT_NULL) {
467 (void)audit_triggers(audit_port, trigger);
468 ipc_port_release_send(audit_port);
469 return 0;
470 } else {
471 printf("Cannot get audit control port\n");
472 return error;
473 }
474 }
475
476 int
audit_send_analytics(char * signing_id,char * process_name)477 audit_send_analytics(char* signing_id, char* process_name)
478 {
479 mach_port_t audit_port;
480 int error;
481
482 error = host_get_audit_control_port(host_priv_self(), &audit_port);
483 if (error == KERN_SUCCESS && audit_port != MACH_PORT_NULL) {
484 (void)audit_analytics(audit_port, signing_id, process_name);
485 ipc_port_release_send(audit_port);
486 return 0;
487 } else {
488 printf("Cannot get audit control port for analytics \n");
489 return error;
490 }
491 }
492
493 #endif /* CONFIG_AUDIT */
494