1 /*
2 * Copyright (c) 2004-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <stdarg.h>
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/event.h> // for kqueue related stuff
32 #include <sys/fsevents.h>
33
34 #if CONFIG_FSE
35 #include <sys/namei.h>
36 #include <sys/filedesc.h>
37 #include <sys/kernel.h>
38 #include <sys/file_internal.h>
39 #include <sys/stat.h>
40 #include <sys/vnode_internal.h>
41 #include <sys/mount_internal.h>
42 #include <sys/proc_internal.h>
43 #include <sys/kauth.h>
44 #include <sys/uio.h>
45 #include <kern/kalloc.h>
46 #include <sys/dirent.h>
47 #include <sys/attr.h>
48 #include <sys/sysctl.h>
49 #include <sys/ubc.h>
50 #include <machine/cons.h>
51 #include <miscfs/specfs/specdev.h>
52 #include <miscfs/devfs/devfs.h>
53 #include <sys/filio.h>
54 #include <kern/locks.h>
55 #include <libkern/OSAtomic.h>
56 #include <kern/zalloc.h>
57 #include <mach/mach_time.h>
58 #include <kern/thread_call.h>
59 #include <kern/clock.h>
60 #include <IOKit/IOBSD.h>
61
62 #include <security/audit/audit.h>
63 #include <bsm/audit_kevents.h>
64
65 #include <pexpert/pexpert.h>
66 #include <libkern/section_keywords.h>
67
68 typedef struct kfs_event {
69 LIST_ENTRY(kfs_event) kevent_list;
70 int16_t type; // type code of this event
71 u_int16_t flags, // per-event flags
72 len; // the length of the path in "str"
73 int32_t refcount; // number of clients referencing this
74 pid_t pid; // pid of the process that did the op
75
76 uint64_t abstime; // when this event happened (mach_absolute_time())
77 ino64_t ino;
78 dev_t dev;
79 int32_t mode;
80 uid_t uid;
81 gid_t gid;
82
83 const char *str;
84
85 struct kfs_event *dest; // if this is a two-file op
86 } kfs_event;
87
88 // flags for the flags field
89 #define KFSE_COMBINED_EVENTS 0x0001
90 #define KFSE_CONTAINS_DROPPED_EVENTS 0x0002
91 #define KFSE_RECYCLED_EVENT 0x0004
92 #define KFSE_BEING_CREATED 0x0008
93
94 LIST_HEAD(kfse_list, kfs_event) kfse_list_head = LIST_HEAD_INITIALIZER(x);
95 int num_events_outstanding = 0;
96 int num_pending_rename = 0;
97
98
99 struct fsevent_handle;
100
101 typedef struct fs_event_watcher {
102 int8_t *event_list; // the events we're interested in
103 int32_t num_events;
104 dev_t *devices_not_to_watch;// report events from devices not in this list
105 uint32_t num_devices;
106 int32_t flags;
107 kfs_event **event_queue;
108 int32_t eventq_size; // number of event pointers in queue
109 int32_t num_readers;
110 int32_t rd; // read index into the event_queue
111 int32_t wr; // write index into the event_queue
112 int32_t blockers;
113 int32_t my_id;
114 uint32_t num_dropped;
115 uint64_t max_event_id;
116 struct fsevent_handle *fseh;
117 pid_t pid;
118 char proc_name[(2 * MAXCOMLEN) + 1];
119 } fs_event_watcher;
120
121 // fs_event_watcher flags
122 #define WATCHER_DROPPED_EVENTS 0x0001
123 #define WATCHER_CLOSING 0x0002
124 #define WATCHER_WANTS_COMPACT_EVENTS 0x0004
125 #define WATCHER_WANTS_EXTENDED_INFO 0x0008
126 #define WATCHER_APPLE_SYSTEM_SERVICE 0x0010 // fseventsd, coreservicesd, mds, revisiond
127
128 #define MAX_WATCHERS 8
129 static fs_event_watcher *watcher_table[MAX_WATCHERS];
130
131 #define DEFAULT_MAX_KFS_EVENTS 4096
132 static int max_kfs_events = DEFAULT_MAX_KFS_EVENTS;
133
134 // we allocate kfs_event structures out of this zone
135 static zone_t event_zone;
136 static int fs_event_init = 0;
137
138 //
139 // this array records whether anyone is interested in a
140 // particular type of event. if no one is, we bail out
141 // early from the event delivery
142 //
143 static int16_t fs_event_type_watchers[FSE_MAX_EVENTS];
144
145 // the device currently being unmounted:
146 static dev_t fsevent_unmount_dev = 0;
147 // how many ACKs are still outstanding:
148 static int fsevent_unmount_ack_count = 0;
149
150 static int watcher_add_event(fs_event_watcher *watcher, kfs_event *kfse);
151 static void fsevents_wakeup(fs_event_watcher *watcher);
152
153 //
154 // Locks
155 //
156 static LCK_ATTR_DECLARE(fsevent_lock_attr, 0, 0);
157 static LCK_GRP_DECLARE(fsevent_mutex_group, "fsevent-mutex");
158 static LCK_GRP_DECLARE(fsevent_rw_group, "fsevent-rw");
159
160 static LCK_RW_DECLARE_ATTR(event_handling_lock, // handles locking for event manipulation and recycling
161 &fsevent_rw_group, &fsevent_lock_attr);
162 static LCK_MTX_DECLARE_ATTR(watch_table_lock,
163 &fsevent_mutex_group, &fsevent_lock_attr);
164 static LCK_MTX_DECLARE_ATTR(event_buf_lock,
165 &fsevent_mutex_group, &fsevent_lock_attr);
166 static LCK_MTX_DECLARE_ATTR(event_writer_lock,
167 &fsevent_mutex_group, &fsevent_lock_attr);
168
169
170 /* Explicitly declare qsort so compiler doesn't complain */
171 __private_extern__ void qsort(
172 void * array,
173 size_t nmembers,
174 size_t member_size,
175 int (*)(const void *, const void *));
176
177 static int
is_ignored_directory(const char * path)178 is_ignored_directory(const char *path)
179 {
180 if (!path) {
181 return 0;
182 }
183
184 #define IS_TLD(x) strnstr(__DECONST(char *, path), x, MAXPATHLEN)
185 if (IS_TLD("/.Spotlight-V100/") ||
186 IS_TLD("/.MobileBackups/") ||
187 IS_TLD("/Backups.backupdb/")) {
188 return 1;
189 }
190 #undef IS_TLD
191
192 return 0;
193 }
194
195 static void
fsevents_internal_init(void)196 fsevents_internal_init(void)
197 {
198 int i;
199
200 if (fs_event_init++ != 0) {
201 return;
202 }
203
204 for (i = 0; i < FSE_MAX_EVENTS; i++) {
205 fs_event_type_watchers[i] = 0;
206 }
207
208 memset(watcher_table, 0, sizeof(watcher_table));
209
210 PE_get_default("kern.maxkfsevents", &max_kfs_events, sizeof(max_kfs_events));
211
212 event_zone = zone_create_ext("fs-event-buf", sizeof(kfs_event),
213 ZC_NOGC | ZC_NOCALLOUT, ZONE_ID_ANY, ^(zone_t z) {
214 // mark the zone as exhaustible so that it will not
215 // ever grow beyond what we initially filled it with
216 zone_set_exhaustible(z, max_kfs_events);
217 });
218
219 zone_fill_initially(event_zone, max_kfs_events);
220 }
221
222 static void
lock_watch_table(void)223 lock_watch_table(void)
224 {
225 lck_mtx_lock(&watch_table_lock);
226 }
227
228 static void
unlock_watch_table(void)229 unlock_watch_table(void)
230 {
231 lck_mtx_unlock(&watch_table_lock);
232 }
233
234 static void
lock_fs_event_list(void)235 lock_fs_event_list(void)
236 {
237 lck_mtx_lock(&event_buf_lock);
238 }
239
240 static void
unlock_fs_event_list(void)241 unlock_fs_event_list(void)
242 {
243 lck_mtx_unlock(&event_buf_lock);
244 }
245
246 // forward prototype
247 static void release_event_ref(kfs_event *kfse);
248
249 static boolean_t
watcher_cares_about_dev(fs_event_watcher * watcher,dev_t dev)250 watcher_cares_about_dev(fs_event_watcher *watcher, dev_t dev)
251 {
252 unsigned int i;
253
254 // if devices_not_to_watch is NULL then we care about all
255 // events from all devices
256 if (watcher->devices_not_to_watch == NULL) {
257 return true;
258 }
259
260 for (i = 0; i < watcher->num_devices; i++) {
261 if (dev == watcher->devices_not_to_watch[i]) {
262 // found a match! that means we do not
263 // want events from this device.
264 return false;
265 }
266 }
267
268 // if we're here it's not in the devices_not_to_watch[]
269 // list so that means we do care about it
270 return true;
271 }
272
273
274 int
need_fsevent(int type,vnode_t vp)275 need_fsevent(int type, vnode_t vp)
276 {
277 if (type >= 0 && type < FSE_MAX_EVENTS && fs_event_type_watchers[type] == 0) {
278 return 0;
279 }
280
281 // events in /dev aren't really interesting...
282 if (vp->v_tag == VT_DEVFS) {
283 return 0;
284 }
285
286 return 1;
287 }
288
289
290 #define is_throw_away(x) ((x) == FSE_STAT_CHANGED || (x) == FSE_CONTENT_MODIFIED)
291
292
293 // Ways that an event can be reused:
294 //
295 // "combined" events mean that there were two events for
296 // the same vnode or path and we're combining both events
297 // into a single event. The primary event gets a bit that
298 // marks it as having been combined. The secondary event
299 // is essentially dropped and the kfse structure reused.
300 //
301 // "collapsed" means that multiple events below a given
302 // directory are collapsed into a single event. in this
303 // case, the directory that we collapse into and all of
304 // its children must be re-scanned.
305 //
306 // "recycled" means that we're completely blowing away
307 // the event since there are other events that have info
308 // about the same vnode or path (and one of those other
309 // events will be marked as combined or collapsed as
310 // appropriate).
311 //
312 #define KFSE_COMBINED 0x0001
313 #define KFSE_COLLAPSED 0x0002
314 #define KFSE_RECYCLED 0x0004
315
316 int num_dropped = 0;
317 int num_parent_switch = 0;
318 int num_recycled_rename = 0;
319
320 static struct timeval last_print;
321
322 //
323 // These variables are used to track coalescing multiple identical
324 // events for the same vnode/pathname. If we get the same event
325 // type and same vnode/pathname as the previous event, we just drop
326 // the event since it's superfluous. This improves some micro-
327 // benchmarks considerably and actually has a real-world impact on
328 // tests like a Finder copy where multiple stat-changed events can
329 // get coalesced.
330 //
331 static int last_event_type = -1;
332 static void *last_ptr = NULL;
333 static char last_str[MAXPATHLEN];
334 static int last_nlen = 0;
335 static int last_vid = -1;
336 static uint64_t last_coalesced_time = 0;
337 static void *last_event_ptr = NULL;
338 static pid_t last_pid = -1;
339 int last_coalesced = 0;
340 static mach_timebase_info_data_t sTimebaseInfo = { 0, 0 };
341
342 #define MAX_HARDLINK_NOTIFICATIONS 128
343
344 int
add_fsevent(int type,vfs_context_t ctx,...)345 add_fsevent(int type, vfs_context_t ctx, ...)
346 {
347 struct proc *p = vfs_context_proc(ctx);
348 int i, arg_type, ret;
349 kfs_event *kfse, *kfse_dest = NULL, *cur;
350 fs_event_watcher *watcher;
351 va_list ap;
352 int error = 0, did_alloc = 0;
353 int64_t orig_linkcount = -1;
354 dev_t dev = 0;
355 uint64_t now, elapsed;
356 uint64_t orig_linkid = 0, next_linkid = 0;
357 uint64_t link_parentid = 0;
358 char *pathbuff = NULL, *path_override = NULL;
359 char *link_name = NULL;
360 vnode_t link_vp = NULL;
361 int pathbuff_len;
362 uthread_t ut = get_bsdthread_info(current_thread());
363 bool do_all_links = true;
364 bool do_cache_reset = false;
365
366 if (type == FSE_CONTENT_MODIFIED_NO_HLINK) {
367 do_all_links = false;
368 type = FSE_CONTENT_MODIFIED;
369 }
370
371
372 restart:
373 va_start(ap, ctx);
374
375 // ignore bogus event types..
376 if (type < 0 || type >= FSE_MAX_EVENTS) {
377 return EINVAL;
378 }
379
380 // if no one cares about this type of event, bail out
381 if (fs_event_type_watchers[type] == 0) {
382 va_end(ap);
383
384 return 0;
385 }
386
387 now = mach_absolute_time();
388
389 // find a free event and snag it for our use
390 // NOTE: do not do anything that would block until
391 // the lock is dropped.
392 lock_fs_event_list();
393
394 //
395 // check if this event is identical to the previous one...
396 // (as long as it's not an event type that can never be the
397 // same as a previous event)
398 //
399 if (path_override == NULL && type != FSE_CREATE_FILE && type != FSE_DELETE && type != FSE_RENAME && type != FSE_EXCHANGE && type != FSE_CHOWN && type != FSE_DOCID_CHANGED && type != FSE_DOCID_CREATED && type != FSE_CLONE) {
400 void *ptr = NULL;
401 int vid = 0, was_str = 0, nlen = 0;
402
403 for (arg_type = va_arg(ap, int32_t); arg_type != FSE_ARG_DONE; arg_type = va_arg(ap, int32_t)) {
404 switch (arg_type) {
405 case FSE_ARG_VNODE: {
406 ptr = va_arg(ap, void *);
407 vid = vnode_vid((struct vnode *)ptr);
408 last_str[0] = '\0';
409 break;
410 }
411 case FSE_ARG_STRING: {
412 nlen = va_arg(ap, int32_t);
413 ptr = va_arg(ap, void *);
414 was_str = 1;
415 break;
416 }
417 }
418 if (ptr != NULL) {
419 break;
420 }
421 }
422
423 if (sTimebaseInfo.denom == 0) {
424 (void) clock_timebase_info(&sTimebaseInfo);
425 }
426
427 elapsed = (now - last_coalesced_time);
428 if (sTimebaseInfo.denom != sTimebaseInfo.numer) {
429 if (sTimebaseInfo.denom == 1) {
430 elapsed *= sTimebaseInfo.numer;
431 } else {
432 // this could overflow... the worst that will happen is that we'll
433 // send (or not send) an extra event so I'm not going to worry about
434 // doing the math right like dtrace_abs_to_nano() does.
435 elapsed = (elapsed * sTimebaseInfo.numer) / (uint64_t)sTimebaseInfo.denom;
436 }
437 }
438
439 if (type == last_event_type
440 && (elapsed < 1000000000)
441 && (last_pid == proc_getpid(p))
442 &&
443 ((vid && vid == last_vid && last_ptr == ptr)
444 ||
445 (last_str[0] && last_nlen == nlen && ptr && strcmp(last_str, ptr) == 0))
446 ) {
447 last_coalesced++;
448 unlock_fs_event_list();
449 va_end(ap);
450
451 return 0;
452 } else {
453 last_ptr = ptr;
454 if (ptr && was_str) {
455 strlcpy(last_str, ptr, sizeof(last_str));
456 }
457 last_nlen = nlen;
458 last_vid = vid;
459 last_event_type = type;
460 last_coalesced_time = now;
461 last_pid = proc_getpid(p);
462 }
463 }
464 va_start(ap, ctx);
465
466
467 kfse = zalloc_noblock(event_zone);
468 if (kfse && (type == FSE_RENAME || type == FSE_EXCHANGE || type == FSE_CLONE)) {
469 kfse_dest = zalloc_noblock(event_zone);
470 if (kfse_dest == NULL) {
471 did_alloc = 1;
472 zfree(event_zone, kfse);
473 kfse = NULL;
474 }
475 }
476
477
478 if (kfse == NULL) { // yikes! no free events
479 unlock_fs_event_list();
480 lock_watch_table();
481
482 for (i = 0; i < MAX_WATCHERS; i++) {
483 watcher = watcher_table[i];
484 if (watcher == NULL) {
485 continue;
486 }
487
488 watcher->flags |= WATCHER_DROPPED_EVENTS;
489 fsevents_wakeup(watcher);
490 }
491 unlock_watch_table();
492
493 {
494 struct timeval current_tv;
495
496 num_dropped++;
497
498 // only print a message at most once every 5 seconds
499 microuptime(¤t_tv);
500 if ((current_tv.tv_sec - last_print.tv_sec) > 10) {
501 int ii;
502 void *junkptr = zalloc_noblock(event_zone), *listhead = kfse_list_head.lh_first;
503
504 printf("add_fsevent: event queue is full! dropping events (num dropped events: %d; num events outstanding: %d).\n", num_dropped, num_events_outstanding);
505 printf("add_fsevent: kfse_list head %p ; num_pending_rename %d\n", listhead, num_pending_rename);
506 printf("add_fsevent: zalloc sez: %p\n", junkptr);
507 printf("add_fsevent: event_zone info: %d 0x%x\n", ((int *)event_zone)[0], ((int *)event_zone)[1]);
508 lock_watch_table();
509 for (ii = 0; ii < MAX_WATCHERS; ii++) {
510 if (watcher_table[ii] == NULL) {
511 continue;
512 }
513
514 printf("add_fsevent: watcher %s %p: rd %4d wr %4d q_size %4d flags 0x%x\n",
515 watcher_table[ii]->proc_name,
516 watcher_table[ii],
517 watcher_table[ii]->rd, watcher_table[ii]->wr,
518 watcher_table[ii]->eventq_size, watcher_table[ii]->flags);
519 }
520 unlock_watch_table();
521
522 last_print = current_tv;
523 if (junkptr) {
524 zfree(event_zone, junkptr);
525 }
526 }
527 }
528
529 if (pathbuff) {
530 release_pathbuff(pathbuff);
531 pathbuff = NULL;
532 }
533 return ENOSPC;
534 }
535
536 memset(kfse, 0, sizeof(kfs_event));
537 kfse->refcount = 1;
538 OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse->flags);
539
540 last_event_ptr = kfse;
541 kfse->type = (int16_t)type;
542 kfse->abstime = now;
543 kfse->pid = proc_getpid(p);
544 if (type == FSE_RENAME || type == FSE_EXCHANGE || type == FSE_CLONE) {
545 memset(kfse_dest, 0, sizeof(kfs_event));
546 kfse_dest->refcount = 1;
547 OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse_dest->flags);
548 kfse_dest->type = (int16_t)type;
549 kfse_dest->pid = proc_getpid(p);
550 kfse_dest->abstime = now;
551
552 kfse->dest = kfse_dest;
553 }
554
555 num_events_outstanding++;
556 if (kfse->type == FSE_RENAME) {
557 num_pending_rename++;
558 }
559 LIST_INSERT_HEAD(&kfse_list_head, kfse, kevent_list);
560
561 if (kfse->refcount < 1) {
562 panic("add_fsevent: line %d: kfse recount %d but should be at least 1", __LINE__, kfse->refcount);
563 }
564
565 unlock_fs_event_list(); // at this point it's safe to unlock
566
567 //
568 // now process the arguments passed in and copy them into
569 // the kfse
570 //
571
572 cur = kfse;
573
574 if (type == FSE_DOCID_CREATED || type == FSE_DOCID_CHANGED) {
575 uint64_t val;
576
577 //
578 // These events are special and not like the other events. They only
579 // have a dev_t, src inode #, dest inode #, and a doc-id. We use the
580 // fields that we can in the kfse but have to overlay the dest inode
581 // number and the doc-id on the other fields.
582 //
583
584 // First the dev_t
585 arg_type = va_arg(ap, int32_t);
586 if (arg_type == FSE_ARG_DEV) {
587 cur->dev = (dev_t)(va_arg(ap, dev_t));
588 } else {
589 cur->dev = (dev_t)0xbadc0de1;
590 }
591
592 // next the source inode #
593 arg_type = va_arg(ap, int32_t);
594 if (arg_type == FSE_ARG_INO) {
595 cur->ino = (ino64_t)(va_arg(ap, ino64_t));
596 } else {
597 cur->ino = 0xbadc0de2;
598 }
599
600 // now the dest inode #
601 arg_type = va_arg(ap, int32_t);
602 if (arg_type == FSE_ARG_INO) {
603 val = (ino64_t)(va_arg(ap, ino64_t));
604 } else {
605 val = 0xbadc0de2;
606 }
607 // overlay the dest inode number on the str/dest pointer fields
608 __nochk_memcpy(&cur->str, &val, sizeof(ino64_t));
609
610
611 // and last the document-id
612 arg_type = va_arg(ap, int32_t);
613 if (arg_type == FSE_ARG_INT32) {
614 val = (uint64_t)va_arg(ap, uint32_t);
615 } else if (arg_type == FSE_ARG_INT64) {
616 val = (uint64_t)va_arg(ap, uint64_t);
617 } else {
618 val = 0xbadc0de3;
619 }
620
621 // the docid is 64-bit and overlays the uid/gid fields
622 static_assert(sizeof(cur->uid) + sizeof(cur->gid) == sizeof(val), "gid/uid size mismatch");
623 static_assert(offsetof(struct kfs_event, gid) - offsetof(struct kfs_event, uid) == sizeof(cur->uid), "unexpected struct kfs_event layout");
624 memcpy(&cur->uid, &val, sizeof(cur->uid));
625 memcpy(&cur->gid, (u_int8_t *)&val + sizeof(cur->uid), sizeof(cur->gid));
626
627 goto done_with_args;
628 }
629
630 if (type == FSE_UNMOUNT_PENDING) {
631 // Just a dev_t
632 arg_type = va_arg(ap, int32_t);
633 if (arg_type == FSE_ARG_DEV) {
634 cur->dev = (dev_t)(va_arg(ap, dev_t));
635 } else {
636 cur->dev = (dev_t)0xbadc0de1;
637 }
638
639 goto done_with_args;
640 }
641
642 for (arg_type = va_arg(ap, int32_t); arg_type != FSE_ARG_DONE; arg_type = va_arg(ap, int32_t)) {
643 switch (arg_type) {
644 case FSE_ARG_VNODE: {
645 // this expands out into multiple arguments to the client
646 struct vnode *vp;
647 struct vnode_attr va;
648
649 if (kfse->str != NULL) {
650 cur = kfse_dest;
651 }
652
653 vp = va_arg(ap, struct vnode *);
654 if (vp == NULL) {
655 panic("add_fsevent: you can't pass me a NULL vnode ptr (type %d)!",
656 cur->type);
657 }
658
659 VATTR_INIT(&va);
660 VATTR_WANTED(&va, va_fsid);
661 VATTR_WANTED(&va, va_fileid);
662 VATTR_WANTED(&va, va_mode);
663 VATTR_WANTED(&va, va_uid);
664 VATTR_WANTED(&va, va_gid);
665 VATTR_WANTED(&va, va_nlink);
666 if ((ret = vnode_getattr(vp, &va, vfs_context_kernel())) != 0) {
667 // printf("add_fsevent: failed to getattr on vp %p (%d)\n", cur->fref.vp, ret);
668 cur->str = NULL;
669 error = EINVAL;
670 goto clean_up;
671 }
672
673 cur->dev = dev = (dev_t)va.va_fsid;
674 cur->ino = (ino64_t)va.va_fileid;
675 cur->mode = (int32_t)vnode_vttoif(vnode_vtype(vp)) | va.va_mode;
676 cur->uid = va.va_uid;
677 cur->gid = va.va_gid;
678 if (vp->v_flag & VISHARDLINK) {
679 cur->mode |= FSE_MODE_HLINK;
680 if ((vp->v_type == VDIR && va.va_dirlinkcount == 0) || (vp->v_type == VREG && va.va_nlink == 0)) {
681 cur->mode |= FSE_MODE_LAST_HLINK;
682 }
683 if (orig_linkid == 0) {
684 orig_linkid = cur->ino;
685 orig_linkcount = MIN(va.va_nlink, MAX_HARDLINK_NOTIFICATIONS);
686 link_vp = vp;
687 if (vp->v_mount->mnt_kern_flag & MNTK_PATH_FROM_ID && !link_name) {
688 VATTR_INIT(&va);
689 VATTR_WANTED(&va, va_parentid);
690 VATTR_WANTED(&va, va_name);
691 link_name = zalloc(ZV_NAMEI);
692 va.va_name = link_name;
693 if ((ret = vnode_getattr(vp, &va, vfs_context_kernel()) != 0) ||
694 !(VATTR_IS_SUPPORTED(&va, va_name)) ||
695 !(VATTR_IS_SUPPORTED(&va, va_parentid))) {
696 zfree(ZV_NAMEI, link_name);
697 link_name = NULL;
698 }
699 if (link_name) {
700 link_parentid = va.va_parentid;
701 }
702 va.va_name = NULL;
703 }
704 }
705 }
706
707 // if we haven't gotten the path yet, get it.
708 if (pathbuff == NULL && path_override == NULL) {
709 pathbuff = get_pathbuff();
710 pathbuff_len = MAXPATHLEN;
711
712 pathbuff[0] = '\0';
713 if ((ret = vn_getpath_no_firmlink(vp, pathbuff, &pathbuff_len)) != 0 || pathbuff[0] == '\0') {
714 cur->flags |= KFSE_CONTAINS_DROPPED_EVENTS;
715
716 do {
717 if (vp->v_parent != NULL) {
718 vp = vp->v_parent;
719 } else if (vp->v_mount) {
720 strlcpy(pathbuff, vp->v_mount->mnt_vfsstat.f_mntonname, MAXPATHLEN);
721 break;
722 } else {
723 vp = NULL;
724 }
725
726 if (vp == NULL) {
727 break;
728 }
729
730 pathbuff_len = MAXPATHLEN;
731 ret = vn_getpath_no_firmlink(vp, pathbuff, &pathbuff_len);
732 } while (ret == ENOSPC);
733
734 if (ret != 0 || vp == NULL) {
735 error = ENOENT;
736 goto clean_up;
737 }
738 }
739 } else if (path_override) {
740 pathbuff = path_override;
741 pathbuff_len = (int)strlen(path_override) + 1;
742 } else {
743 strlcpy(pathbuff, "NOPATH", MAXPATHLEN);
744 pathbuff_len = (int)strlen(pathbuff) + 1;
745 }
746
747 // store the path by adding it to the global string table
748 cur->len = (u_int16_t)pathbuff_len;
749 cur->str = vfs_addname(pathbuff, pathbuff_len, 0, 0);
750 if (cur->str == NULL || cur->str[0] == '\0') {
751 panic("add_fsevent: was not able to add path %s to event %p.", pathbuff, cur);
752 }
753
754 if (pathbuff != path_override) {
755 release_pathbuff(pathbuff);
756 }
757 pathbuff = NULL;
758
759 break;
760 }
761
762 case FSE_ARG_FINFO: {
763 fse_info *fse;
764
765 fse = va_arg(ap, fse_info *);
766
767 cur->dev = dev = (dev_t)fse->dev;
768 cur->ino = (ino64_t)fse->ino;
769 cur->mode = (int32_t)fse->mode;
770 cur->uid = (uid_t)fse->uid;
771 cur->gid = (uid_t)fse->gid;
772 // if it's a hard-link and this is the last link, flag it
773 if (fse->mode & FSE_MODE_HLINK) {
774 if (fse->nlink == 0) {
775 cur->mode |= FSE_MODE_LAST_HLINK;
776 }
777 if (orig_linkid == 0) {
778 orig_linkid = cur->ino;
779 orig_linkcount = MIN(fse->nlink, MAX_HARDLINK_NOTIFICATIONS);
780 }
781 }
782 if (cur->mode & FSE_TRUNCATED_PATH) {
783 cur->flags |= KFSE_CONTAINS_DROPPED_EVENTS;
784 cur->mode &= ~FSE_TRUNCATED_PATH;
785 }
786 break;
787 }
788
789 case FSE_ARG_STRING:
790 if (kfse->str != NULL) {
791 cur = kfse_dest;
792 }
793
794 cur->len = (int16_t)(va_arg(ap, int32_t) & 0x7fff);
795 if (cur->len >= 1) {
796 cur->str = vfs_addname(va_arg(ap, char *), cur->len, 0, 0);
797 } else {
798 printf("add_fsevent: funny looking string length: %d\n", (int)cur->len);
799 cur->len = 2;
800 cur->str = vfs_addname("/", cur->len, 0, 0);
801 }
802 if (cur->str[0] == 0) {
803 printf("add_fsevent: bogus looking string (len %d)\n", cur->len);
804 }
805 break;
806
807 case FSE_ARG_INT32: {
808 uint32_t ival = (uint32_t)va_arg(ap, int32_t);
809 kfse->uid = ival;
810 break;
811 }
812
813 default:
814 printf("add_fsevent: unknown type %d\n", arg_type);
815 // just skip one 32-bit word and hope we sync up...
816 (void)va_arg(ap, int32_t);
817 }
818 }
819
820 done_with_args:
821 va_end(ap);
822
823 OSBitAndAtomic16(~KFSE_BEING_CREATED, &kfse->flags);
824 if (kfse_dest) {
825 OSBitAndAtomic16(~KFSE_BEING_CREATED, &kfse_dest->flags);
826 }
827
828 //
829 // now we have to go and let everyone know that
830 // is interested in this type of event
831 //
832 lock_watch_table();
833
834 for (i = 0; i < MAX_WATCHERS; i++) {
835 watcher = watcher_table[i];
836 if (watcher == NULL) {
837 continue;
838 }
839
840 if (type < watcher->num_events
841 && watcher->event_list[type] == FSE_REPORT
842 && watcher_cares_about_dev(watcher, dev)) {
843 if (watcher_add_event(watcher, kfse) != 0) {
844 watcher->num_dropped++;
845 continue;
846 }
847 }
848
849 // if (kfse->refcount < 1) {
850 // panic("add_fsevent: line %d: kfse recount %d but should be at least 1", __LINE__, kfse->refcount);
851 // }
852 }
853
854 unlock_watch_table();
855
856 clean_up:
857
858 if (pathbuff) {
859 release_pathbuff(pathbuff);
860 pathbuff = NULL;
861 }
862 // replicate events for sibling hardlinks
863 if (do_all_links && (kfse->mode & FSE_MODE_HLINK) && !(kfse->mode & FSE_MODE_LAST_HLINK) && (type == FSE_STAT_CHANGED || type == FSE_CONTENT_MODIFIED || type == FSE_FINDER_INFO_CHANGED || type == FSE_XATTR_MODIFIED)) {
864 if (orig_linkcount > 0 && orig_linkid != 0) {
865 #ifndef APFSIOC_NEXT_LINK
866 #define APFSIOC_NEXT_LINK _IOWR('J', 10, uint64_t)
867 #endif
868 if (path_override == NULL) {
869 path_override = get_pathbuff();
870 }
871 if (next_linkid == 0) {
872 next_linkid = orig_linkid;
873 }
874
875 if (link_vp) {
876 mount_t mp = NULL;
877 vnode_t mnt_rootvp = NULL;
878 int iret = -1;
879
880 mp = vnode_mount(link_vp);
881 if (mp) {
882 iret = VFS_ROOT(mp, &mnt_rootvp, vfs_context_kernel());
883 }
884
885 if (iret == 0 && mnt_rootvp) {
886 iret = VNOP_IOCTL(mnt_rootvp, APFSIOC_NEXT_LINK, (char *)&next_linkid, (int)0, vfs_context_kernel());
887 vnode_put(mnt_rootvp);
888 }
889
890 int32_t fsid0;
891 int path_override_len = MAXPATHLEN;
892
893 // continue resolving hardlink paths if there is a valid next_linkid retrieved
894 // file systems not supporting APFSIOC_NEXT_LINK will skip replicating events for sibling hardlinks
895 if (iret == 0 && next_linkid != 0) {
896 fsid0 = link_vp->v_mount->mnt_vfsstat.f_fsid.val[0];
897 ut->uu_flag |= UT_KERN_RAGE_VNODES;
898 if (!do_cache_reset) {
899 do_cache_reset = true;
900 }
901 if ((iret = fsgetpath_internal(ctx, fsid0, next_linkid, MAXPATHLEN, path_override, FSOPT_NOFIRMLINKPATH, &path_override_len)) == 0) {
902 orig_linkcount--;
903 ut->uu_flag &= ~UT_KERN_RAGE_VNODES;
904
905 if (orig_linkcount >= 0) {
906 release_event_ref(kfse);
907 goto restart;
908 }
909 } else {
910 // failed to get override path
911 // encountered a broken link or the linkid has been deleted before retrieving the path
912 orig_linkcount--;
913 ut->uu_flag &= ~UT_KERN_RAGE_VNODES;
914
915 if (orig_linkcount >= 0) {
916 goto clean_up;
917 }
918 }
919 }
920 }
921 }
922 }
923
924 if (link_name) {
925 /*
926 * If we call fsgetpath on all the links, it will set the link origin cache
927 * to the last link that the path was obtained for.
928 * To restore the the original link id cache in APFS we need to issue a
929 * lookup on the original directory + name for the link.
930 */
931 if (do_cache_reset) {
932 vnode_t dvp = NULLVP;
933
934 if ((ret = VFS_VGET(link_vp->v_mount, (ino64_t)link_parentid, &dvp, vfs_context_kernel())) == 0) {
935 vnode_t lvp = NULLVP;
936
937 ret = vnode_lookupat(link_name, 0, &lvp, ctx, dvp);
938 if (!ret) {
939 vnode_put(lvp);
940 lvp = NULLVP;
941 }
942 vnode_put(dvp);
943 dvp = NULLVP;
944 }
945 ret = 0;
946 }
947 zfree(ZV_NAMEI, link_name);
948 link_name = NULL;
949 }
950
951 if (path_override) {
952 release_pathbuff(path_override);
953 path_override = NULL;
954 }
955
956 release_event_ref(kfse);
957
958 return error;
959 }
960
961
962 static void
release_event_ref(kfs_event * kfse)963 release_event_ref(kfs_event *kfse)
964 {
965 int old_refcount;
966 kfs_event copy, dest_copy;
967
968
969 old_refcount = OSAddAtomic(-1, &kfse->refcount);
970 if (old_refcount > 1) {
971 return;
972 }
973
974 lock_fs_event_list();
975 if (last_event_ptr == kfse) {
976 last_event_ptr = NULL;
977 last_event_type = -1;
978 last_coalesced_time = 0;
979 }
980
981 if (kfse->refcount < 0) {
982 panic("release_event_ref: bogus kfse refcount %d", kfse->refcount);
983 }
984
985 if (kfse->refcount > 0 || kfse->type == FSE_INVALID) {
986 // This is very subtle. Either of these conditions can
987 // be true if an event got recycled while we were waiting
988 // on the fs_event_list lock or the event got recycled,
989 // delivered, _and_ free'd by someone else while we were
990 // waiting on the fs event list lock. In either case
991 // we need to just unlock the list and return without
992 // doing anything because if the refcount is > 0 then
993 // someone else will take care of free'ing it and when
994 // the kfse->type is invalid then someone else already
995 // has handled free'ing the event (while we were blocked
996 // on the event list lock).
997 //
998 unlock_fs_event_list();
999 return;
1000 }
1001
1002 //
1003 // make a copy of this so we can free things without
1004 // holding the fs_event_buf lock
1005 //
1006 copy = *kfse;
1007 if (kfse->type != FSE_DOCID_CREATED && kfse->type != FSE_DOCID_CHANGED && kfse->dest && OSAddAtomic(-1, &kfse->dest->refcount) == 1) {
1008 dest_copy = *kfse->dest;
1009 } else {
1010 dest_copy.str = NULL;
1011 dest_copy.len = 0;
1012 dest_copy.type = FSE_INVALID;
1013 }
1014
1015 kfse->pid = kfse->type; // save this off for debugging...
1016 kfse->uid = (uid_t)(long)kfse->str; // save this off for debugging...
1017 kfse->gid = (gid_t)(long)current_thread();
1018
1019 kfse->str = (char *)0xdeadbeef; // XXXdbg - catch any cheaters...
1020
1021 if (dest_copy.type != FSE_INVALID) {
1022 kfse->dest->str = (char *)0xbadc0de; // XXXdbg - catch any cheaters...
1023 kfse->dest->type = FSE_INVALID;
1024
1025 if (kfse->dest->kevent_list.le_prev != NULL) {
1026 num_events_outstanding--;
1027 LIST_REMOVE(kfse->dest, kevent_list);
1028 memset(&kfse->dest->kevent_list, 0xa5, sizeof(kfse->dest->kevent_list));
1029 }
1030
1031 zfree(event_zone, kfse->dest);
1032 }
1033
1034 // mark this fsevent as invalid
1035 {
1036 int otype;
1037
1038 otype = kfse->type;
1039 kfse->type = FSE_INVALID;
1040
1041 if (kfse->kevent_list.le_prev != NULL) {
1042 num_events_outstanding--;
1043 if (otype == FSE_RENAME) {
1044 num_pending_rename--;
1045 }
1046 LIST_REMOVE(kfse, kevent_list);
1047 memset(&kfse->kevent_list, 0, sizeof(kfse->kevent_list));
1048 }
1049 }
1050
1051 zfree(event_zone, kfse);
1052
1053 unlock_fs_event_list();
1054
1055 // if we have a pointer in the union
1056 if (copy.str && copy.type != FSE_DOCID_CREATED && copy.type != FSE_DOCID_CHANGED) {
1057 if (copy.len == 0) { // and it's not a string
1058 panic("no more fref.vp!");
1059 // vnode_rele_ext(copy.fref.vp, O_EVTONLY, 0);
1060 } else { // else it's a string
1061 vfs_removename(copy.str);
1062 }
1063 }
1064
1065 if (dest_copy.type != FSE_INVALID && dest_copy.str) {
1066 if (dest_copy.len == 0) {
1067 panic("no more fref.vp!");
1068 // vnode_rele_ext(dest_copy.fref.vp, O_EVTONLY, 0);
1069 } else {
1070 vfs_removename(dest_copy.str);
1071 }
1072 }
1073 }
1074
1075 #define FSEVENTS_WATCHER_ENTITLEMENT \
1076 "com.apple.private.vfs.fsevents-watcher"
1077
1078 static int
add_watcher(int8_t * event_list,int32_t num_events,int32_t eventq_size,fs_event_watcher ** watcher_out,void * fseh)1079 add_watcher(int8_t *event_list, int32_t num_events, int32_t eventq_size, fs_event_watcher **watcher_out, void *fseh)
1080 {
1081 int i;
1082 fs_event_watcher *watcher;
1083
1084 if (eventq_size <= 0 || eventq_size > 100 * max_kfs_events) {
1085 eventq_size = max_kfs_events;
1086 }
1087
1088 // Note: the event_queue follows the fs_event_watcher struct
1089 // in memory so we only have to do one allocation
1090 watcher = kalloc_type(fs_event_watcher, kfs_event *, eventq_size, Z_WAITOK);
1091 if (watcher == NULL) {
1092 return ENOMEM;
1093 }
1094
1095 watcher->event_list = event_list;
1096 watcher->num_events = num_events;
1097 watcher->devices_not_to_watch = NULL;
1098 watcher->num_devices = 0;
1099 watcher->flags = 0;
1100 watcher->event_queue = (kfs_event **)&watcher[1];
1101 watcher->eventq_size = eventq_size;
1102 watcher->rd = 0;
1103 watcher->wr = 0;
1104 watcher->blockers = 0;
1105 watcher->num_readers = 0;
1106 watcher->max_event_id = 0;
1107 watcher->fseh = fseh;
1108 watcher->pid = proc_selfpid();
1109 proc_selfname(watcher->proc_name, sizeof(watcher->proc_name));
1110
1111 watcher->num_dropped = 0; // XXXdbg - debugging
1112
1113 if (IOTaskHasEntitlement(current_task(),
1114 FSEVENTS_WATCHER_ENTITLEMENT)) {
1115 watcher->flags |= WATCHER_APPLE_SYSTEM_SERVICE;
1116 } else if (!strncmp(watcher->proc_name, "fseventsd", sizeof(watcher->proc_name)) ||
1117 !strncmp(watcher->proc_name, "coreservicesd", sizeof(watcher->proc_name)) ||
1118 !strncmp(watcher->proc_name, "revisiond", sizeof(watcher->proc_name)) ||
1119 !strncmp(watcher->proc_name, "mds", sizeof(watcher->proc_name))) {
1120 printf("fsevents: watcher %s (pid: %d) needs '%s' entitlement\n",
1121 watcher->proc_name, watcher->pid,
1122 FSEVENTS_WATCHER_ENTITLEMENT);
1123 watcher->flags |= WATCHER_APPLE_SYSTEM_SERVICE;
1124 } else {
1125 printf("fsevents: watcher %s (pid: %d) - Using /dev/fsevents directly is unsupported. Migrate to FSEventsFramework\n",
1126 watcher->proc_name, watcher->pid);
1127 }
1128
1129 lock_watch_table();
1130
1131 // find a slot for the new watcher
1132 for (i = 0; i < MAX_WATCHERS; i++) {
1133 if (watcher_table[i] == NULL) {
1134 watcher->my_id = i;
1135 watcher_table[i] = watcher;
1136 break;
1137 }
1138 }
1139
1140 if (i >= MAX_WATCHERS) {
1141 printf("fsevents: too many watchers!\n");
1142 unlock_watch_table();
1143 kfree_type(fs_event_watcher, kfs_event *, watcher->eventq_size, watcher);
1144 return ENOSPC;
1145 }
1146
1147 // now update the global list of who's interested in
1148 // events of a particular type...
1149 for (i = 0; i < num_events; i++) {
1150 if (event_list[i] != FSE_IGNORE && i < FSE_MAX_EVENTS) {
1151 fs_event_type_watchers[i]++;
1152 }
1153 }
1154
1155 unlock_watch_table();
1156
1157 *watcher_out = watcher;
1158
1159 return 0;
1160 }
1161
1162
1163
1164 static void
remove_watcher(fs_event_watcher * target)1165 remove_watcher(fs_event_watcher *target)
1166 {
1167 int i, j, counter = 0;
1168 fs_event_watcher *watcher;
1169 kfs_event *kfse;
1170
1171 lock_watch_table();
1172
1173 for (j = 0; j < MAX_WATCHERS; j++) {
1174 watcher = watcher_table[j];
1175 if (watcher != target) {
1176 continue;
1177 }
1178
1179 watcher_table[j] = NULL;
1180
1181 for (i = 0; i < watcher->num_events; i++) {
1182 if (watcher->event_list[i] != FSE_IGNORE && i < FSE_MAX_EVENTS) {
1183 fs_event_type_watchers[i]--;
1184 }
1185 }
1186
1187 if (watcher->flags & WATCHER_CLOSING) {
1188 unlock_watch_table();
1189 return;
1190 }
1191
1192 // printf("fsevents: removing watcher %p (rd %d wr %d num_readers %d flags 0x%x)\n", watcher, watcher->rd, watcher->wr, watcher->num_readers, watcher->flags);
1193 watcher->flags |= WATCHER_CLOSING;
1194 OSAddAtomic(1, &watcher->num_readers);
1195
1196 unlock_watch_table();
1197
1198 while (watcher->num_readers > 1 && counter++ < 5000) {
1199 lock_watch_table();
1200 fsevents_wakeup(watcher); // in case they're asleep
1201 unlock_watch_table();
1202
1203 tsleep(watcher, PRIBIO, "fsevents-close", 1);
1204 }
1205 if (counter++ >= 5000) {
1206 // printf("fsevents: close: still have readers! (%d)\n", watcher->num_readers);
1207 panic("fsevents: close: still have readers! (%d)", watcher->num_readers);
1208 }
1209
1210 // drain the event_queue
1211
1212 lck_rw_lock_exclusive(&event_handling_lock);
1213 while (watcher->rd != watcher->wr) {
1214 kfse = watcher->event_queue[watcher->rd];
1215 watcher->event_queue[watcher->rd] = NULL;
1216 watcher->rd = (watcher->rd + 1) % watcher->eventq_size;
1217 OSSynchronizeIO();
1218 if (kfse != NULL && kfse->type != FSE_INVALID && kfse->refcount >= 1) {
1219 release_event_ref(kfse);
1220 }
1221 }
1222 lck_rw_unlock_exclusive(&event_handling_lock);
1223
1224 kfree_data(watcher->event_list, watcher->num_events * sizeof(int8_t));
1225 kfree_data(watcher->devices_not_to_watch, watcher->num_devices * sizeof(dev_t));
1226 kfree_type(fs_event_watcher, kfs_event *, watcher->eventq_size, watcher);
1227 return;
1228 }
1229
1230 unlock_watch_table();
1231 }
1232
1233
1234 #define EVENT_DELAY_IN_MS 10
1235 static thread_call_t event_delivery_timer = NULL;
1236 static int timer_set = 0;
1237
1238
1239 static void
delayed_event_delivery(__unused void * param0,__unused void * param1)1240 delayed_event_delivery(__unused void *param0, __unused void *param1)
1241 {
1242 int i;
1243
1244 lock_watch_table();
1245
1246 for (i = 0; i < MAX_WATCHERS; i++) {
1247 if (watcher_table[i] != NULL && watcher_table[i]->rd != watcher_table[i]->wr) {
1248 fsevents_wakeup(watcher_table[i]);
1249 }
1250 }
1251
1252 timer_set = 0;
1253
1254 unlock_watch_table();
1255 }
1256
1257
1258 //
1259 // The watch table must be locked before calling this function.
1260 //
1261 static void
schedule_event_wakeup(void)1262 schedule_event_wakeup(void)
1263 {
1264 uint64_t deadline;
1265
1266 if (event_delivery_timer == NULL) {
1267 event_delivery_timer = thread_call_allocate((thread_call_func_t)delayed_event_delivery, NULL);
1268 }
1269
1270 clock_interval_to_deadline(EVENT_DELAY_IN_MS, 1000 * 1000, &deadline);
1271
1272 thread_call_enter_delayed(event_delivery_timer, deadline);
1273 timer_set = 1;
1274 }
1275
1276
1277
1278 #define MAX_NUM_PENDING 16
1279
1280 //
1281 // NOTE: the watch table must be locked before calling
1282 // this routine.
1283 //
1284 static int
watcher_add_event(fs_event_watcher * watcher,kfs_event * kfse)1285 watcher_add_event(fs_event_watcher *watcher, kfs_event *kfse)
1286 {
1287 if (kfse->abstime > watcher->max_event_id) {
1288 watcher->max_event_id = kfse->abstime;
1289 }
1290
1291 if (((watcher->wr + 1) % watcher->eventq_size) == watcher->rd) {
1292 watcher->flags |= WATCHER_DROPPED_EVENTS;
1293 fsevents_wakeup(watcher);
1294 return ENOSPC;
1295 }
1296
1297 OSAddAtomic(1, &kfse->refcount);
1298 watcher->event_queue[watcher->wr] = kfse;
1299 OSSynchronizeIO();
1300 watcher->wr = (watcher->wr + 1) % watcher->eventq_size;
1301
1302 //
1303 // wake up the watcher if there are more than MAX_NUM_PENDING events.
1304 // otherwise schedule a timer (if one isn't already set) which will
1305 // send any pending events if no more are received in the next
1306 // EVENT_DELAY_IN_MS milli-seconds.
1307 //
1308 int32_t num_pending = 0;
1309 if (watcher->rd < watcher->wr) {
1310 num_pending = watcher->wr - watcher->rd;
1311 }
1312
1313 if (watcher->rd > watcher->wr) {
1314 num_pending = watcher->wr + watcher->eventq_size - watcher->rd;
1315 }
1316
1317 if (num_pending > (watcher->eventq_size * 3 / 4) && !(watcher->flags & WATCHER_APPLE_SYSTEM_SERVICE)) {
1318 /* Non-Apple Service is falling behind, start dropping events for this process */
1319 lck_rw_lock_exclusive(&event_handling_lock);
1320 while (watcher->rd != watcher->wr) {
1321 kfse = watcher->event_queue[watcher->rd];
1322 watcher->event_queue[watcher->rd] = NULL;
1323 watcher->rd = (watcher->rd + 1) % watcher->eventq_size;
1324 OSSynchronizeIO();
1325 if (kfse != NULL && kfse->type != FSE_INVALID && kfse->refcount >= 1) {
1326 release_event_ref(kfse);
1327 }
1328 }
1329 watcher->flags |= WATCHER_DROPPED_EVENTS;
1330 lck_rw_unlock_exclusive(&event_handling_lock);
1331
1332 printf("fsevents: watcher falling behind: %s (pid: %d) rd: %4d wr: %4d q_size: %4d flags: 0x%x\n",
1333 watcher->proc_name, watcher->pid, watcher->rd, watcher->wr,
1334 watcher->eventq_size, watcher->flags);
1335
1336 fsevents_wakeup(watcher);
1337 } else if (num_pending > MAX_NUM_PENDING) {
1338 fsevents_wakeup(watcher);
1339 } else if (timer_set == 0) {
1340 schedule_event_wakeup();
1341 }
1342
1343 return 0;
1344 }
1345
1346 static int
fill_buff(uint16_t type,int32_t size,const void * data,char * buff,int32_t * _buff_idx,int32_t buff_sz,struct uio * uio)1347 fill_buff(uint16_t type, int32_t size, const void *data,
1348 char *buff, int32_t *_buff_idx, int32_t buff_sz,
1349 struct uio *uio)
1350 {
1351 int32_t amt, error = 0, buff_idx = *_buff_idx;
1352 uint16_t tmp;
1353
1354 //
1355 // the +1 on the size is to guarantee that the main data
1356 // copy loop will always copy at least 1 byte
1357 //
1358 if ((buff_sz - buff_idx) <= (int)(2 * sizeof(uint16_t) + 1)) {
1359 if (buff_idx > uio_resid(uio)) {
1360 error = ENOSPC;
1361 goto get_out;
1362 }
1363
1364 error = uiomove(buff, buff_idx, uio);
1365 if (error) {
1366 goto get_out;
1367 }
1368 buff_idx = 0;
1369 }
1370
1371 // copy out the header (type & size)
1372 memcpy(&buff[buff_idx], &type, sizeof(uint16_t));
1373 buff_idx += sizeof(uint16_t);
1374
1375 tmp = size & 0xffff;
1376 memcpy(&buff[buff_idx], &tmp, sizeof(uint16_t));
1377 buff_idx += sizeof(uint16_t);
1378
1379 // now copy the body of the data, flushing along the way
1380 // if the buffer fills up.
1381 //
1382 while (size > 0) {
1383 amt = (size < (buff_sz - buff_idx)) ? size : (buff_sz - buff_idx);
1384 memcpy(&buff[buff_idx], data, amt);
1385
1386 size -= amt;
1387 buff_idx += amt;
1388 data = (const char *)data + amt;
1389 if (size > (buff_sz - buff_idx)) {
1390 if (buff_idx > uio_resid(uio)) {
1391 error = ENOSPC;
1392 goto get_out;
1393 }
1394 error = uiomove(buff, buff_idx, uio);
1395 if (error) {
1396 goto get_out;
1397 }
1398 buff_idx = 0;
1399 }
1400
1401 if (amt == 0) { // just in case...
1402 break;
1403 }
1404 }
1405
1406 get_out:
1407 *_buff_idx = buff_idx;
1408
1409 return error;
1410 }
1411
1412
1413 static int copy_out_kfse(fs_event_watcher *watcher, kfs_event *kfse, struct uio *uio) __attribute__((noinline));
1414
1415 static int
copy_out_kfse(fs_event_watcher * watcher,kfs_event * kfse,struct uio * uio)1416 copy_out_kfse(fs_event_watcher *watcher, kfs_event *kfse, struct uio *uio)
1417 {
1418 int error;
1419 uint16_t tmp16;
1420 int32_t type;
1421 kfs_event *cur;
1422 char evbuff[512];
1423 int evbuff_idx = 0;
1424
1425 if (kfse->type == FSE_INVALID) {
1426 panic("fsevents: copy_out_kfse: asked to copy out an invalid event (kfse %p, refcount %d fref ptr %p)", kfse, kfse->refcount, kfse->str);
1427 }
1428
1429 if (kfse->flags & KFSE_BEING_CREATED) {
1430 return 0;
1431 }
1432
1433 if (((kfse->type == FSE_RENAME) || (kfse->type == FSE_CLONE)) && kfse->dest == NULL) {
1434 //
1435 // This can happen if an event gets recycled but we had a
1436 // pointer to it in our event queue. The event is the
1437 // destination of a rename or clone which we'll process separately
1438 // (that is, another kfse points to this one so it's ok
1439 // to skip this guy because we'll process it when we process
1440 // the other one)
1441 error = 0;
1442 goto get_out;
1443 }
1444
1445 if (watcher->flags & WATCHER_WANTS_EXTENDED_INFO) {
1446 type = (kfse->type & 0xfff);
1447
1448 if (kfse->flags & KFSE_CONTAINS_DROPPED_EVENTS) {
1449 type |= (FSE_CONTAINS_DROPPED_EVENTS << FSE_FLAG_SHIFT);
1450 } else if (kfse->flags & KFSE_COMBINED_EVENTS) {
1451 type |= (FSE_COMBINED_EVENTS << FSE_FLAG_SHIFT);
1452 }
1453 } else {
1454 type = (int32_t)kfse->type;
1455 }
1456
1457 // copy out the type of the event
1458 memcpy(evbuff, &type, sizeof(int32_t));
1459 evbuff_idx += sizeof(int32_t);
1460
1461 // copy out the pid of the person that generated the event
1462 memcpy(&evbuff[evbuff_idx], &kfse->pid, sizeof(pid_t));
1463 evbuff_idx += sizeof(pid_t);
1464
1465 cur = kfse;
1466
1467 copy_again:
1468
1469 if (kfse->type == FSE_DOCID_CHANGED || kfse->type == FSE_DOCID_CREATED) {
1470 dev_t dev = cur->dev;
1471 ino64_t ino = cur->ino;
1472 uint64_t ival;
1473
1474 error = fill_buff(FSE_ARG_DEV, sizeof(dev_t), &dev, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1475 if (error != 0) {
1476 goto get_out;
1477 }
1478
1479 error = fill_buff(FSE_ARG_INO, sizeof(ino64_t), &ino, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1480 if (error != 0) {
1481 goto get_out;
1482 }
1483
1484 memcpy(&ino, &cur->str, sizeof(ino64_t));
1485 error = fill_buff(FSE_ARG_INO, sizeof(ino64_t), &ino, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1486 if (error != 0) {
1487 goto get_out;
1488 }
1489
1490 memcpy(&ival, &cur->uid, sizeof(uint64_t)); // the docid gets stuffed into the ino field
1491 error = fill_buff(FSE_ARG_INT64, sizeof(uint64_t), &ival, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1492 if (error != 0) {
1493 goto get_out;
1494 }
1495
1496 goto done;
1497 }
1498
1499 if (kfse->type == FSE_UNMOUNT_PENDING) {
1500 dev_t dev = cur->dev;
1501
1502 error = fill_buff(FSE_ARG_DEV, sizeof(dev_t), &dev, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1503 if (error != 0) {
1504 goto get_out;
1505 }
1506
1507 goto done;
1508 }
1509
1510 if (cur->str == NULL || cur->str[0] == '\0') {
1511 printf("copy_out_kfse:2: empty/short path (%s)\n", cur->str);
1512 error = fill_buff(FSE_ARG_STRING, 2, "/", evbuff, &evbuff_idx, sizeof(evbuff), uio);
1513 } else {
1514 error = fill_buff(FSE_ARG_STRING, cur->len, cur->str, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1515 }
1516 if (error != 0) {
1517 goto get_out;
1518 }
1519
1520 if (cur->dev == 0 && cur->ino == 0) {
1521 // this happens when a rename event happens and the
1522 // destination of the rename did not previously exist.
1523 // it thus has no other file info so skip copying out
1524 // the stuff below since it isn't initialized
1525 goto done;
1526 }
1527
1528
1529 if (watcher->flags & WATCHER_WANTS_COMPACT_EVENTS) {
1530 int32_t finfo_size;
1531
1532 finfo_size = sizeof(dev_t) + sizeof(ino64_t) + sizeof(int32_t) + sizeof(uid_t) + sizeof(gid_t);
1533 error = fill_buff(FSE_ARG_FINFO, finfo_size, &cur->ino, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1534 if (error != 0) {
1535 goto get_out;
1536 }
1537 } else {
1538 error = fill_buff(FSE_ARG_DEV, sizeof(dev_t), &cur->dev, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1539 if (error != 0) {
1540 goto get_out;
1541 }
1542
1543 error = fill_buff(FSE_ARG_INO, sizeof(ino64_t), &cur->ino, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1544 if (error != 0) {
1545 goto get_out;
1546 }
1547
1548 error = fill_buff(FSE_ARG_MODE, sizeof(int32_t), &cur->mode, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1549 if (error != 0) {
1550 goto get_out;
1551 }
1552
1553 error = fill_buff(FSE_ARG_UID, sizeof(uid_t), &cur->uid, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1554 if (error != 0) {
1555 goto get_out;
1556 }
1557
1558 error = fill_buff(FSE_ARG_GID, sizeof(gid_t), &cur->gid, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1559 if (error != 0) {
1560 goto get_out;
1561 }
1562 }
1563
1564
1565 if (cur->dest) {
1566 cur = cur->dest;
1567 goto copy_again;
1568 }
1569
1570 done:
1571 // very last thing: the time stamp
1572 error = fill_buff(FSE_ARG_INT64, sizeof(uint64_t), &cur->abstime, evbuff, &evbuff_idx, sizeof(evbuff), uio);
1573 if (error != 0) {
1574 goto get_out;
1575 }
1576
1577 // check if the FSE_ARG_DONE will fit
1578 if (sizeof(uint16_t) > sizeof(evbuff) - evbuff_idx) {
1579 if (evbuff_idx > uio_resid(uio)) {
1580 error = ENOSPC;
1581 goto get_out;
1582 }
1583 error = uiomove(evbuff, evbuff_idx, uio);
1584 if (error) {
1585 goto get_out;
1586 }
1587 evbuff_idx = 0;
1588 }
1589
1590 tmp16 = FSE_ARG_DONE;
1591 memcpy(&evbuff[evbuff_idx], &tmp16, sizeof(uint16_t));
1592 evbuff_idx += sizeof(uint16_t);
1593
1594 // flush any remaining data in the buffer (and hopefully
1595 // in most cases this is the only uiomove we'll do)
1596 if (evbuff_idx > uio_resid(uio)) {
1597 error = ENOSPC;
1598 } else {
1599 error = uiomove(evbuff, evbuff_idx, uio);
1600 }
1601
1602 get_out:
1603
1604 return error;
1605 }
1606
1607
1608
1609 static int
fmod_watch(fs_event_watcher * watcher,struct uio * uio)1610 fmod_watch(fs_event_watcher *watcher, struct uio *uio)
1611 {
1612 int error = 0;
1613 user_ssize_t last_full_event_resid;
1614 kfs_event *kfse;
1615 uint16_t tmp16;
1616 int skipped;
1617
1618 last_full_event_resid = uio_resid(uio);
1619
1620 // need at least 2048 bytes of space (maxpathlen + 1 event buf)
1621 if (uio_resid(uio) < 2048 || watcher == NULL) {
1622 return EINVAL;
1623 }
1624
1625 if (watcher->flags & WATCHER_CLOSING) {
1626 return 0;
1627 }
1628
1629 if (OSAddAtomic(1, &watcher->num_readers) != 0) {
1630 // don't allow multiple threads to read from the fd at the same time
1631 OSAddAtomic(-1, &watcher->num_readers);
1632 return EAGAIN;
1633 }
1634
1635 restart_watch:
1636 if (watcher->rd == watcher->wr) {
1637 if (watcher->flags & WATCHER_CLOSING) {
1638 OSAddAtomic(-1, &watcher->num_readers);
1639 return 0;
1640 }
1641 OSAddAtomic(1, &watcher->blockers);
1642
1643 // there's nothing to do, go to sleep
1644 error = tsleep((caddr_t)watcher, PUSER | PCATCH, "fsevents_empty", 0);
1645
1646 OSAddAtomic(-1, &watcher->blockers);
1647
1648 if (error != 0 || (watcher->flags & WATCHER_CLOSING)) {
1649 OSAddAtomic(-1, &watcher->num_readers);
1650 return error;
1651 }
1652 }
1653
1654 // if we dropped events, return that as an event first
1655 if (watcher->flags & WATCHER_DROPPED_EVENTS) {
1656 int32_t val = FSE_EVENTS_DROPPED;
1657
1658 error = uiomove((caddr_t)&val, sizeof(int32_t), uio);
1659 if (error == 0) {
1660 val = 0; // a fake pid
1661 error = uiomove((caddr_t)&val, sizeof(int32_t), uio);
1662
1663 tmp16 = FSE_ARG_DONE; // makes it a consistent msg
1664 error = uiomove((caddr_t)&tmp16, sizeof(int16_t), uio);
1665
1666 last_full_event_resid = uio_resid(uio);
1667 }
1668
1669 if (error) {
1670 OSAddAtomic(-1, &watcher->num_readers);
1671 return error;
1672 }
1673
1674 watcher->flags &= ~WATCHER_DROPPED_EVENTS;
1675 }
1676
1677 skipped = 0;
1678
1679 lck_rw_lock_shared(&event_handling_lock);
1680 while (uio_resid(uio) > 0 && watcher->rd != watcher->wr) {
1681 if (watcher->flags & WATCHER_CLOSING) {
1682 break;
1683 }
1684
1685 //
1686 // check if the event is something of interest to us
1687 // (since it may have been recycled/reused and changed
1688 // its type or which device it is for)
1689 //
1690 kfse = watcher->event_queue[watcher->rd];
1691 if (!kfse || kfse->type == FSE_INVALID || kfse->type >= watcher->num_events || kfse->refcount < 1) {
1692 break;
1693 }
1694
1695 if (watcher->event_list[kfse->type] == FSE_REPORT) {
1696 if (!(watcher->flags & WATCHER_APPLE_SYSTEM_SERVICE) &&
1697 kfse->type != FSE_DOCID_CREATED &&
1698 kfse->type != FSE_DOCID_CHANGED &&
1699 is_ignored_directory(kfse->str)) {
1700 // If this is not an Apple System Service, skip specified directories
1701 // radar://12034844
1702 error = 0;
1703 skipped = 1;
1704 } else {
1705 skipped = 0;
1706 if (last_event_ptr == kfse) {
1707 last_event_ptr = NULL;
1708 last_event_type = -1;
1709 last_coalesced_time = 0;
1710 }
1711 error = copy_out_kfse(watcher, kfse, uio);
1712 if (error != 0) {
1713 // if an event won't fit or encountered an error while
1714 // we were copying it out, then backup to the last full
1715 // event and just bail out. if the error was ENOENT
1716 // then we can continue regular processing, otherwise
1717 // we should unlock things and return.
1718 uio_setresid(uio, last_full_event_resid);
1719 if (error != ENOENT) {
1720 lck_rw_unlock_shared(&event_handling_lock);
1721 error = 0;
1722 goto get_out;
1723 }
1724 }
1725
1726 last_full_event_resid = uio_resid(uio);
1727 }
1728 }
1729
1730 watcher->event_queue[watcher->rd] = NULL;
1731 watcher->rd = (watcher->rd + 1) % watcher->eventq_size;
1732 OSSynchronizeIO();
1733 release_event_ref(kfse);
1734 }
1735 lck_rw_unlock_shared(&event_handling_lock);
1736
1737 if (skipped && error == 0) {
1738 goto restart_watch;
1739 }
1740
1741 get_out:
1742 OSAddAtomic(-1, &watcher->num_readers);
1743
1744 return error;
1745 }
1746
1747
1748 //
1749 // Shoo watchers away from a volume that's about to be unmounted
1750 // (so that it can be cleanly unmounted).
1751 //
1752 void
fsevent_unmount(__unused struct mount * mp,__unused vfs_context_t ctx)1753 fsevent_unmount(__unused struct mount *mp, __unused vfs_context_t ctx)
1754 {
1755 #if !defined(XNU_TARGET_OS_OSX)
1756 dev_t dev = mp->mnt_vfsstat.f_fsid.val[0];
1757 int error, waitcount = 0;
1758 struct timespec ts = {.tv_sec = 1, .tv_nsec = 0};
1759
1760 // wait for any other pending unmounts to complete
1761 lock_watch_table();
1762 while (fsevent_unmount_dev != 0) {
1763 error = msleep((caddr_t)&fsevent_unmount_dev, &watch_table_lock, PRIBIO, "fsevent_unmount_wait", &ts);
1764 if (error == EWOULDBLOCK) {
1765 error = 0;
1766 }
1767 if (!error && (++waitcount >= 10)) {
1768 error = EWOULDBLOCK;
1769 printf("timeout waiting to signal unmount pending for dev %d (fsevent_unmount_dev %d)\n", dev, fsevent_unmount_dev);
1770 }
1771 if (error) {
1772 // there's a problem, bail out
1773 unlock_watch_table();
1774 return;
1775 }
1776 }
1777 if (fs_event_type_watchers[FSE_UNMOUNT_PENDING] == 0) {
1778 // nobody watching for unmount pending events
1779 unlock_watch_table();
1780 return;
1781 }
1782 // this is now the current unmount pending
1783 fsevent_unmount_dev = dev;
1784 fsevent_unmount_ack_count = fs_event_type_watchers[FSE_UNMOUNT_PENDING];
1785 unlock_watch_table();
1786
1787 // send an event to notify the watcher they need to get off the mount
1788 error = add_fsevent(FSE_UNMOUNT_PENDING, ctx, FSE_ARG_DEV, dev, FSE_ARG_DONE);
1789
1790 // wait for acknowledgment(s) (give up if it takes too long)
1791 lock_watch_table();
1792 waitcount = 0;
1793 while (fsevent_unmount_dev == dev) {
1794 error = msleep((caddr_t)&fsevent_unmount_dev, &watch_table_lock, PRIBIO, "fsevent_unmount_pending", &ts);
1795 if (error == EWOULDBLOCK) {
1796 error = 0;
1797 }
1798 if (!error && (++waitcount >= 10)) {
1799 error = EWOULDBLOCK;
1800 printf("unmount pending ack timeout for dev %d\n", dev);
1801 }
1802 if (error) {
1803 // there's a problem, bail out
1804 if (fsevent_unmount_dev == dev) {
1805 fsevent_unmount_dev = 0;
1806 fsevent_unmount_ack_count = 0;
1807 }
1808 wakeup((caddr_t)&fsevent_unmount_dev);
1809 break;
1810 }
1811 }
1812 unlock_watch_table();
1813 #endif /* ! XNU_TARGET_OS_OSX */
1814 }
1815
1816
1817 //
1818 // /dev/fsevents device code
1819 //
1820 static int fsevents_installed = 0;
1821
1822 typedef struct fsevent_handle {
1823 UInt32 flags;
1824 SInt32 active;
1825 fs_event_watcher *watcher;
1826 struct klist knotes;
1827 struct selinfo si;
1828 } fsevent_handle;
1829
1830 #define FSEH_CLOSING 0x0001
1831
1832 static int
fseventsf_read(struct fileproc * fp,struct uio * uio,__unused int flags,__unused vfs_context_t ctx)1833 fseventsf_read(struct fileproc *fp, struct uio *uio,
1834 __unused int flags, __unused vfs_context_t ctx)
1835 {
1836 fsevent_handle *fseh = (struct fsevent_handle *)fp_get_data(fp);
1837 int error;
1838
1839 error = fmod_watch(fseh->watcher, uio);
1840
1841 return error;
1842 }
1843
1844
1845 #pragma pack(push, 4)
1846 typedef struct fsevent_dev_filter_args32 {
1847 uint32_t num_devices;
1848 user32_addr_t devices;
1849 } fsevent_dev_filter_args32;
1850 typedef struct fsevent_dev_filter_args64 {
1851 uint32_t num_devices;
1852 user64_addr_t devices;
1853 } fsevent_dev_filter_args64;
1854 #pragma pack(pop)
1855
1856 #define FSEVENTS_DEVICE_FILTER_32 _IOW('s', 100, fsevent_dev_filter_args32)
1857 #define FSEVENTS_DEVICE_FILTER_64 _IOW('s', 100, fsevent_dev_filter_args64)
1858
1859 static int
fseventsf_ioctl(struct fileproc * fp,u_long cmd,caddr_t data,vfs_context_t ctx)1860 fseventsf_ioctl(struct fileproc *fp, u_long cmd, caddr_t data, vfs_context_t ctx)
1861 {
1862 fsevent_handle *fseh = (struct fsevent_handle *)fp_get_data(fp);
1863 int ret = 0;
1864 fsevent_dev_filter_args64 *devfilt_args, _devfilt_args;
1865
1866 OSAddAtomic(1, &fseh->active);
1867 if (fseh->flags & FSEH_CLOSING) {
1868 OSAddAtomic(-1, &fseh->active);
1869 return 0;
1870 }
1871
1872 switch (cmd) {
1873 case FIONBIO:
1874 case FIOASYNC:
1875 break;
1876
1877 case FSEVENTS_WANT_COMPACT_EVENTS: {
1878 fseh->watcher->flags |= WATCHER_WANTS_COMPACT_EVENTS;
1879 break;
1880 }
1881
1882 case FSEVENTS_WANT_EXTENDED_INFO: {
1883 fseh->watcher->flags |= WATCHER_WANTS_EXTENDED_INFO;
1884 break;
1885 }
1886
1887 case FSEVENTS_GET_CURRENT_ID: {
1888 *(uint64_t *)data = fseh->watcher->max_event_id;
1889 ret = 0;
1890 break;
1891 }
1892
1893 case FSEVENTS_DEVICE_FILTER_32: {
1894 if (proc_is64bit(vfs_context_proc(ctx))) {
1895 ret = EINVAL;
1896 break;
1897 }
1898 fsevent_dev_filter_args32 *devfilt_args32 = (fsevent_dev_filter_args32 *)data;
1899
1900 devfilt_args = &_devfilt_args;
1901 memset(devfilt_args, 0, sizeof(fsevent_dev_filter_args64));
1902 devfilt_args->num_devices = devfilt_args32->num_devices;
1903 devfilt_args->devices = CAST_USER_ADDR_T(devfilt_args32->devices);
1904 goto handle_dev_filter;
1905 }
1906
1907 case FSEVENTS_DEVICE_FILTER_64:
1908 if (!proc_is64bit(vfs_context_proc(ctx))) {
1909 ret = EINVAL;
1910 break;
1911 }
1912 devfilt_args = (fsevent_dev_filter_args64 *)data;
1913
1914 handle_dev_filter:
1915 {
1916 int new_num_devices, old_num_devices = 0;
1917 dev_t *devices_not_to_watch, *tmp = NULL;
1918
1919 if (devfilt_args->num_devices > 256) {
1920 ret = EINVAL;
1921 break;
1922 }
1923
1924 new_num_devices = devfilt_args->num_devices;
1925 if (new_num_devices == 0) {
1926 lock_watch_table();
1927
1928 tmp = fseh->watcher->devices_not_to_watch;
1929 fseh->watcher->devices_not_to_watch = NULL;
1930 old_num_devices = fseh->watcher->num_devices;
1931 fseh->watcher->num_devices = new_num_devices;
1932
1933 unlock_watch_table();
1934 kfree_data(tmp, old_num_devices * sizeof(dev_t));
1935 break;
1936 }
1937
1938 devices_not_to_watch = kalloc_data(new_num_devices * sizeof(dev_t), Z_WAITOK);
1939 if (devices_not_to_watch == NULL) {
1940 ret = ENOMEM;
1941 break;
1942 }
1943
1944 ret = copyin((user_addr_t)devfilt_args->devices,
1945 (void *)devices_not_to_watch,
1946 new_num_devices * sizeof(dev_t));
1947 if (ret) {
1948 kfree_data(devices_not_to_watch, new_num_devices * sizeof(dev_t));
1949 break;
1950 }
1951
1952 lock_watch_table();
1953 old_num_devices = fseh->watcher->num_devices;
1954 fseh->watcher->num_devices = new_num_devices;
1955 tmp = fseh->watcher->devices_not_to_watch;
1956 fseh->watcher->devices_not_to_watch = devices_not_to_watch;
1957 unlock_watch_table();
1958
1959 kfree_data(tmp, old_num_devices * sizeof(dev_t));
1960
1961 break;
1962 }
1963
1964 case FSEVENTS_UNMOUNT_PENDING_ACK: {
1965 lock_watch_table();
1966 dev_t dev = *(dev_t *)data;
1967 if (fsevent_unmount_dev == dev) {
1968 if (--fsevent_unmount_ack_count <= 0) {
1969 fsevent_unmount_dev = 0;
1970 wakeup((caddr_t)&fsevent_unmount_dev);
1971 }
1972 } else {
1973 printf("unexpected unmount pending ack %d (%d)\n", dev, fsevent_unmount_dev);
1974 ret = EINVAL;
1975 }
1976 unlock_watch_table();
1977 break;
1978 }
1979
1980 default:
1981 ret = EINVAL;
1982 break;
1983 }
1984
1985 OSAddAtomic(-1, &fseh->active);
1986 return ret;
1987 }
1988
1989
1990 static int
fseventsf_select(struct fileproc * fp,int which,__unused void * wql,vfs_context_t ctx)1991 fseventsf_select(struct fileproc *fp, int which, __unused void *wql, vfs_context_t ctx)
1992 {
1993 fsevent_handle *fseh = (struct fsevent_handle *)fp_get_data(fp);
1994 int ready = 0;
1995
1996 if ((which != FREAD) || (fseh->watcher->flags & WATCHER_CLOSING)) {
1997 return 0;
1998 }
1999
2000
2001 // if there's nothing in the queue, we're not ready
2002 if (fseh->watcher->rd != fseh->watcher->wr) {
2003 ready = 1;
2004 }
2005
2006 if (!ready) {
2007 lock_watch_table();
2008 selrecord(vfs_context_proc(ctx), &fseh->si, wql);
2009 unlock_watch_table();
2010 }
2011
2012 return ready;
2013 }
2014
2015
2016 #if NOTUSED
2017 static int
fseventsf_stat(__unused struct fileproc * fp,__unused struct stat * sb,__unused vfs_context_t ctx)2018 fseventsf_stat(__unused struct fileproc *fp, __unused struct stat *sb, __unused vfs_context_t ctx)
2019 {
2020 return ENOTSUP;
2021 }
2022 #endif
2023
2024 static int
fseventsf_close(struct fileglob * fg,__unused vfs_context_t ctx)2025 fseventsf_close(struct fileglob *fg, __unused vfs_context_t ctx)
2026 {
2027 fsevent_handle *fseh = (struct fsevent_handle *)fg_get_data(fg);
2028 fs_event_watcher *watcher;
2029
2030 OSBitOrAtomic(FSEH_CLOSING, &fseh->flags);
2031 while (OSAddAtomic(0, &fseh->active) > 0) {
2032 tsleep((caddr_t)fseh->watcher, PRIBIO, "fsevents-close", 1);
2033 }
2034
2035 watcher = fseh->watcher;
2036 fg_set_data(fg, NULL);
2037 fseh->watcher = NULL;
2038
2039 remove_watcher(watcher);
2040 selthreadclear(&fseh->si);
2041 kfree_type(fsevent_handle, fseh);
2042
2043 return 0;
2044 }
2045
2046 static void
filt_fsevent_detach(struct knote * kn)2047 filt_fsevent_detach(struct knote *kn)
2048 {
2049 fsevent_handle *fseh = (struct fsevent_handle *)kn->kn_hook;
2050
2051 lock_watch_table();
2052
2053 KNOTE_DETACH(&fseh->knotes, kn);
2054
2055 unlock_watch_table();
2056 }
2057
2058 /*
2059 * Determine whether this knote should be active
2060 *
2061 * This is kind of subtle.
2062 * --First, notice if the vnode has been revoked: in so, override hint
2063 * --EVFILT_READ knotes are checked no matter what the hint is
2064 * --Other knotes activate based on hint.
2065 * --If hint is revoke, set special flags and activate
2066 */
2067 static int
filt_fsevent_common(struct knote * kn,struct kevent_qos_s * kev,long hint)2068 filt_fsevent_common(struct knote *kn, struct kevent_qos_s *kev, long hint)
2069 {
2070 fsevent_handle *fseh = (struct fsevent_handle *)kn->kn_hook;
2071 int activate = 0;
2072 int32_t rd, wr, amt;
2073 int64_t data = 0;
2074
2075 if (NOTE_REVOKE == hint) {
2076 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
2077 activate = 1;
2078 }
2079
2080 rd = fseh->watcher->rd;
2081 wr = fseh->watcher->wr;
2082 if (rd <= wr) {
2083 amt = wr - rd;
2084 } else {
2085 amt = fseh->watcher->eventq_size - (rd - wr);
2086 }
2087
2088 switch (kn->kn_filter) {
2089 case EVFILT_READ:
2090 data = amt;
2091 activate = (data != 0);
2092 break;
2093 case EVFILT_VNODE:
2094 /* Check events this note matches against the hint */
2095 if (kn->kn_sfflags & hint) {
2096 kn->kn_fflags |= hint; /* Set which event occurred */
2097 }
2098 if (kn->kn_fflags != 0) {
2099 activate = 1;
2100 }
2101 break;
2102 default:
2103 // nothing to do...
2104 break;
2105 }
2106
2107 if (activate && kev) {
2108 knote_fill_kevent(kn, kev, data);
2109 }
2110 return activate;
2111 }
2112
2113 static int
filt_fsevent(struct knote * kn,long hint)2114 filt_fsevent(struct knote *kn, long hint)
2115 {
2116 return filt_fsevent_common(kn, NULL, hint);
2117 }
2118
2119 static int
filt_fsevent_touch(struct knote * kn,struct kevent_qos_s * kev)2120 filt_fsevent_touch(struct knote *kn, struct kevent_qos_s *kev)
2121 {
2122 int res;
2123
2124 lock_watch_table();
2125
2126 /* accept new fflags/data as saved */
2127 kn->kn_sfflags = kev->fflags;
2128 kn->kn_sdata = kev->data;
2129
2130 /* restrict the current results to the (smaller?) set of new interest */
2131 /*
2132 * For compatibility with previous implementations, we leave kn_fflags
2133 * as they were before.
2134 */
2135 //kn->kn_fflags &= kev->fflags;
2136
2137 /* determine if the filter is now fired */
2138 res = filt_fsevent_common(kn, NULL, 0);
2139
2140 unlock_watch_table();
2141
2142 return res;
2143 }
2144
2145 static int
filt_fsevent_process(struct knote * kn,struct kevent_qos_s * kev)2146 filt_fsevent_process(struct knote *kn, struct kevent_qos_s *kev)
2147 {
2148 int res;
2149
2150 lock_watch_table();
2151
2152 res = filt_fsevent_common(kn, kev, 0);
2153
2154 unlock_watch_table();
2155
2156 return res;
2157 }
2158
2159 SECURITY_READ_ONLY_EARLY(struct filterops) fsevent_filtops = {
2160 .f_isfd = 1,
2161 .f_attach = NULL,
2162 .f_detach = filt_fsevent_detach,
2163 .f_event = filt_fsevent,
2164 .f_touch = filt_fsevent_touch,
2165 .f_process = filt_fsevent_process,
2166 };
2167
2168 static int
fseventsf_kqfilter(struct fileproc * fp,struct knote * kn,__unused struct kevent_qos_s * kev)2169 fseventsf_kqfilter(struct fileproc *fp, struct knote *kn,
2170 __unused struct kevent_qos_s *kev)
2171 {
2172 fsevent_handle *fseh = (struct fsevent_handle *)fp_get_data(fp);
2173 int res;
2174
2175 kn->kn_hook = (void*)fseh;
2176 kn->kn_filtid = EVFILTID_FSEVENT;
2177
2178 lock_watch_table();
2179
2180 KNOTE_ATTACH(&fseh->knotes, kn);
2181
2182 /* check to see if it is fired already */
2183 res = filt_fsevent_common(kn, NULL, 0);
2184
2185 unlock_watch_table();
2186
2187 return res;
2188 }
2189
2190
2191 static int
fseventsf_drain(struct fileproc * fp,__unused vfs_context_t ctx)2192 fseventsf_drain(struct fileproc *fp, __unused vfs_context_t ctx)
2193 {
2194 int counter = 0;
2195 fsevent_handle *fseh = (struct fsevent_handle *)fp_get_data(fp);
2196
2197 // if there are people still waiting, sleep for 10ms to
2198 // let them clean up and get out of there. however we
2199 // also don't want to get stuck forever so if they don't
2200 // exit after 5 seconds we're tearing things down anyway.
2201 while (fseh->watcher->blockers && counter++ < 500) {
2202 // issue wakeup in case anyone is blocked waiting for an event
2203 // do this each time we wakeup in case the blocker missed
2204 // the wakeup due to the unprotected test of WATCHER_CLOSING
2205 // and decision to tsleep in fmod_watch... this bit of
2206 // latency is a decent tradeoff against not having to
2207 // take and drop a lock in fmod_watch
2208 lock_watch_table();
2209 fsevents_wakeup(fseh->watcher);
2210 unlock_watch_table();
2211
2212 tsleep((caddr_t)fseh->watcher, PRIBIO, "watcher-close", 1);
2213 }
2214
2215 return 0;
2216 }
2217
2218
2219 static int
fseventsopen(__unused dev_t dev,__unused int flag,__unused int mode,__unused struct proc * p)2220 fseventsopen(__unused dev_t dev, __unused int flag, __unused int mode, __unused struct proc *p)
2221 {
2222 if (!kauth_cred_issuser(kauth_cred_get())) {
2223 return EPERM;
2224 }
2225
2226 return 0;
2227 }
2228
2229 static int
fseventsclose(__unused dev_t dev,__unused int flag,__unused int mode,__unused struct proc * p)2230 fseventsclose(__unused dev_t dev, __unused int flag, __unused int mode, __unused struct proc *p)
2231 {
2232 return 0;
2233 }
2234
2235 static int
fseventsread(__unused dev_t dev,__unused struct uio * uio,__unused int ioflag)2236 fseventsread(__unused dev_t dev, __unused struct uio *uio, __unused int ioflag)
2237 {
2238 return EIO;
2239 }
2240
2241
2242 static int
parse_buffer_and_add_events(const char * buffer,size_t bufsize,vfs_context_t ctx,size_t * remainder)2243 parse_buffer_and_add_events(const char *buffer, size_t bufsize, vfs_context_t ctx, size_t *remainder)
2244 {
2245 const fse_info *finfo, *dest_finfo;
2246 const char *path, *ptr, *dest_path, *event_start = buffer;
2247 size_t path_len, dest_path_len;
2248 int type, err = 0;
2249
2250
2251 ptr = buffer;
2252 while ((ptr + sizeof(int) + sizeof(fse_info) + 1) < buffer + bufsize) {
2253 type = *(const int *)ptr;
2254 if (type < 0 || type >= FSE_MAX_EVENTS) {
2255 err = EINVAL;
2256 break;
2257 }
2258
2259 ptr += sizeof(int);
2260
2261 finfo = (const fse_info *)ptr;
2262 ptr += sizeof(fse_info);
2263
2264 path = ptr;
2265 while (ptr < buffer + bufsize && *ptr != '\0') {
2266 ptr++;
2267 }
2268
2269 if (ptr >= buffer + bufsize) {
2270 break;
2271 }
2272
2273 ptr++; // advance over the trailing '\0'
2274
2275 path_len = ptr - path;
2276
2277 if (type != FSE_RENAME && type != FSE_EXCHANGE && type != FSE_CLONE) {
2278 event_start = ptr; // record where the next event starts
2279
2280 err = add_fsevent(type, ctx, FSE_ARG_STRING, path_len, path, FSE_ARG_FINFO, finfo, FSE_ARG_DONE);
2281 if (err) {
2282 break;
2283 }
2284 continue;
2285 }
2286
2287 //
2288 // if we're here we have to slurp up the destination finfo
2289 // and path so that we can pass them to the add_fsevent()
2290 // call. basically it's a copy of the above code.
2291 //
2292 dest_finfo = (const fse_info *)ptr;
2293 ptr += sizeof(fse_info);
2294
2295 dest_path = ptr;
2296 while (ptr < buffer + bufsize && *ptr != '\0') {
2297 ptr++;
2298 }
2299
2300 if (ptr >= buffer + bufsize) {
2301 break;
2302 }
2303
2304 ptr++; // advance over the trailing '\0'
2305 event_start = ptr; // record where the next event starts
2306
2307 dest_path_len = ptr - dest_path;
2308 //
2309 // If the destination inode number is non-zero, generate a rename
2310 // with both source and destination FSE_ARG_FINFO. Otherwise generate
2311 // a rename with only one FSE_ARG_FINFO. If you need to inject an
2312 // exchange with an inode of zero, just make that inode (and its path)
2313 // come in as the first one, not the second.
2314 //
2315 if (dest_finfo->ino) {
2316 err = add_fsevent(type, ctx,
2317 FSE_ARG_STRING, path_len, path, FSE_ARG_FINFO, finfo,
2318 FSE_ARG_STRING, dest_path_len, dest_path, FSE_ARG_FINFO, dest_finfo,
2319 FSE_ARG_DONE);
2320 } else {
2321 err = add_fsevent(type, ctx,
2322 FSE_ARG_STRING, path_len, path, FSE_ARG_FINFO, finfo,
2323 FSE_ARG_STRING, dest_path_len, dest_path,
2324 FSE_ARG_DONE);
2325 }
2326
2327 if (err) {
2328 break;
2329 }
2330 }
2331
2332 // if the last event wasn't complete, set the remainder
2333 // to be the last event start boundary.
2334 //
2335 *remainder = (long)((buffer + bufsize) - event_start);
2336
2337 return err;
2338 }
2339
2340
2341 //
2342 // Note: this buffer size can not ever be less than
2343 // 2*MAXPATHLEN + 2*sizeof(fse_info) + sizeof(int)
2344 // because that is the max size for a single event.
2345 // I made it 4k to be a "nice" size. making it
2346 // smaller is not a good idea.
2347 //
2348 #define WRITE_BUFFER_SIZE 4096
2349 static char *write_buffer = NULL;
2350
2351 static int
fseventswrite(__unused dev_t dev,struct uio * uio,__unused int ioflag)2352 fseventswrite(__unused dev_t dev, struct uio *uio, __unused int ioflag)
2353 {
2354 int error = 0;
2355 size_t count, offset = 0, remainder = 0;
2356 vfs_context_t ctx = vfs_context_current();
2357
2358 lck_mtx_lock(&event_writer_lock);
2359
2360 if (write_buffer == NULL) {
2361 write_buffer = zalloc_permanent(WRITE_BUFFER_SIZE, ZALIGN_64);
2362 }
2363
2364 //
2365 // this loop copies in and processes the events written.
2366 // it takes care to copy in reasonable size chunks and
2367 // process them. if there is an event that spans a chunk
2368 // boundary we're careful to copy those bytes down to the
2369 // beginning of the buffer and read the next chunk in just
2370 // after it.
2371 //
2372 while (uio_resid(uio)) {
2373 count = MIN(WRITE_BUFFER_SIZE - offset, (size_t)uio_resid(uio));
2374
2375 error = uiomove(write_buffer + offset, (int)count, uio);
2376 if (error) {
2377 break;
2378 }
2379
2380 error = parse_buffer_and_add_events(write_buffer, offset + count, ctx, &remainder);
2381 if (error) {
2382 break;
2383 }
2384
2385 //
2386 // if there's any remainder, copy it down to the beginning
2387 // of the buffer so that it will get processed the next time
2388 // through the loop. note that the remainder always starts
2389 // at an event boundary.
2390 //
2391 memmove(write_buffer, (write_buffer + count + offset) - remainder, remainder);
2392 offset = remainder;
2393 }
2394
2395 lck_mtx_unlock(&event_writer_lock);
2396
2397 return error;
2398 }
2399
2400
2401 static const struct fileops fsevents_fops = {
2402 .fo_type = DTYPE_FSEVENTS,
2403 .fo_read = fseventsf_read,
2404 .fo_write = fo_no_write,
2405 .fo_ioctl = fseventsf_ioctl,
2406 .fo_select = fseventsf_select,
2407 .fo_close = fseventsf_close,
2408 .fo_kqfilter = fseventsf_kqfilter,
2409 .fo_drain = fseventsf_drain,
2410 };
2411
2412 typedef struct fsevent_clone_args32 {
2413 user32_addr_t event_list;
2414 int32_t num_events;
2415 int32_t event_queue_depth;
2416 user32_addr_t fd;
2417 } fsevent_clone_args32;
2418
2419 typedef struct fsevent_clone_args64 {
2420 user64_addr_t event_list;
2421 int32_t num_events;
2422 int32_t event_queue_depth;
2423 user64_addr_t fd;
2424 } fsevent_clone_args64;
2425
2426 #define FSEVENTS_CLONE_32 _IOW('s', 1, fsevent_clone_args32)
2427 #define FSEVENTS_CLONE_64 _IOW('s', 1, fsevent_clone_args64)
2428
2429 static int
fseventsioctl(__unused dev_t dev,u_long cmd,caddr_t data,__unused int flag,struct proc * p)2430 fseventsioctl(__unused dev_t dev, u_long cmd, caddr_t data, __unused int flag, struct proc *p)
2431 {
2432 struct fileproc *f;
2433 int fd, error;
2434 fsevent_handle *fseh = NULL;
2435 fsevent_clone_args64 *fse_clone_args, _fse_clone;
2436 int8_t *event_list;
2437 int is64bit = proc_is64bit(p);
2438
2439 switch (cmd) {
2440 case FSEVENTS_CLONE_32: {
2441 if (is64bit) {
2442 return EINVAL;
2443 }
2444 fsevent_clone_args32 *args32 = (fsevent_clone_args32 *)data;
2445
2446 fse_clone_args = &_fse_clone;
2447 memset(fse_clone_args, 0, sizeof(fsevent_clone_args64));
2448
2449 fse_clone_args->event_list = CAST_USER_ADDR_T(args32->event_list);
2450 fse_clone_args->num_events = args32->num_events;
2451 fse_clone_args->event_queue_depth = args32->event_queue_depth;
2452 fse_clone_args->fd = CAST_USER_ADDR_T(args32->fd);
2453 goto handle_clone;
2454 }
2455
2456 case FSEVENTS_CLONE_64:
2457 if (!is64bit) {
2458 return EINVAL;
2459 }
2460 fse_clone_args = (fsevent_clone_args64 *)data;
2461
2462 handle_clone:
2463 if (fse_clone_args->num_events <= 0 || fse_clone_args->num_events > 4096) {
2464 return EINVAL;
2465 }
2466
2467 fseh = kalloc_type(fsevent_handle, Z_WAITOK | Z_ZERO | Z_NOFAIL);
2468
2469 klist_init(&fseh->knotes);
2470
2471 event_list = kalloc_data(fse_clone_args->num_events * sizeof(int8_t), Z_WAITOK);
2472 if (event_list == NULL) {
2473 kfree_type(fsevent_handle, fseh);
2474 return ENOMEM;
2475 }
2476
2477 error = copyin((user_addr_t)fse_clone_args->event_list,
2478 (void *)event_list,
2479 fse_clone_args->num_events * sizeof(int8_t));
2480 if (error) {
2481 kfree_data(event_list, fse_clone_args->num_events * sizeof(int8_t));
2482 kfree_type(fsevent_handle, fseh);
2483 return error;
2484 }
2485
2486 /*
2487 * Lock down the user's "fd" result buffer so it's safe
2488 * to hold locks while we copy it out.
2489 */
2490 error = vslock((user_addr_t)fse_clone_args->fd,
2491 sizeof(int32_t));
2492 if (error) {
2493 kfree_data(event_list, fse_clone_args->num_events * sizeof(int8_t));
2494 kfree_type(fsevent_handle, fseh);
2495 return error;
2496 }
2497
2498 error = add_watcher(event_list,
2499 fse_clone_args->num_events,
2500 fse_clone_args->event_queue_depth,
2501 &fseh->watcher,
2502 fseh);
2503 if (error) {
2504 vsunlock((user_addr_t)fse_clone_args->fd,
2505 sizeof(int32_t), 0);
2506 kfree_data(event_list, fse_clone_args->num_events * sizeof(int8_t));
2507 kfree_type(fsevent_handle, fseh);
2508 return error;
2509 }
2510
2511 fseh->watcher->fseh = fseh;
2512
2513 error = falloc(p, &f, &fd, vfs_context_current());
2514 if (error) {
2515 remove_watcher(fseh->watcher);
2516 vsunlock((user_addr_t)fse_clone_args->fd,
2517 sizeof(int32_t), 0);
2518 kfree_data(event_list, fse_clone_args->num_events * sizeof(int8_t));
2519 kfree_type(fsevent_handle, fseh);
2520 return error;
2521 }
2522 proc_fdlock(p);
2523 f->fp_glob->fg_flag = FREAD | FWRITE;
2524 f->fp_glob->fg_ops = &fsevents_fops;
2525 fp_set_data(f, fseh);
2526
2527 /*
2528 * We can safely hold the proc_fdlock across this copyout()
2529 * because of the vslock() call above. The vslock() call
2530 * also ensures that we will never get an error, so assert
2531 * this.
2532 */
2533 error = copyout((void *)&fd, (user_addr_t)fse_clone_args->fd, sizeof(int32_t));
2534 assert(error == 0);
2535
2536 procfdtbl_releasefd(p, fd, NULL);
2537 fp_drop(p, fd, f, 1);
2538 proc_fdunlock(p);
2539
2540 vsunlock((user_addr_t)fse_clone_args->fd,
2541 sizeof(int32_t), 1);
2542 break;
2543
2544 default:
2545 error = EINVAL;
2546 break;
2547 }
2548
2549 return error;
2550 }
2551
2552 static void
fsevents_wakeup(fs_event_watcher * watcher)2553 fsevents_wakeup(fs_event_watcher *watcher)
2554 {
2555 selwakeup(&watcher->fseh->si);
2556 KNOTE(&watcher->fseh->knotes, NOTE_WRITE | NOTE_NONE);
2557 wakeup((caddr_t)watcher);
2558 }
2559
2560
2561 /*
2562 * A struct describing which functions will get invoked for certain
2563 * actions.
2564 */
2565 static const struct cdevsw fsevents_cdevsw =
2566 {
2567 .d_open = fseventsopen,
2568 .d_close = fseventsclose,
2569 .d_read = fseventsread,
2570 .d_write = fseventswrite,
2571 .d_ioctl = fseventsioctl,
2572 .d_stop = (stop_fcn_t *)&nulldev,
2573 .d_reset = (reset_fcn_t *)&nulldev,
2574 .d_select = eno_select,
2575 .d_mmap = eno_mmap,
2576 .d_strategy = eno_strat,
2577 .d_reserved_1 = eno_getc,
2578 .d_reserved_2 = eno_putc,
2579 };
2580
2581
2582 /*
2583 * Called to initialize our device,
2584 * and to register ourselves with devfs
2585 */
2586
2587 void
fsevents_init(void)2588 fsevents_init(void)
2589 {
2590 int ret;
2591
2592 if (fsevents_installed) {
2593 return;
2594 }
2595
2596 fsevents_installed = 1;
2597
2598 ret = cdevsw_add(-1, &fsevents_cdevsw);
2599 if (ret < 0) {
2600 fsevents_installed = 0;
2601 return;
2602 }
2603
2604 devfs_make_node(makedev(ret, 0), DEVFS_CHAR,
2605 UID_ROOT, GID_WHEEL, 0644, "fsevents");
2606
2607 fsevents_internal_init();
2608 }
2609
2610
2611 char *
get_pathbuff(void)2612 get_pathbuff(void)
2613 {
2614 return zalloc(ZV_NAMEI);
2615 }
2616
2617 void
release_pathbuff(char * path)2618 release_pathbuff(char *path)
2619 {
2620 if (path == NULL) {
2621 return;
2622 }
2623 zfree(ZV_NAMEI, path);
2624 }
2625
2626 int
get_fse_info(struct vnode * vp,fse_info * fse,__unused vfs_context_t ctx)2627 get_fse_info(struct vnode *vp, fse_info *fse, __unused vfs_context_t ctx)
2628 {
2629 struct vnode_attr va;
2630
2631 VATTR_INIT(&va);
2632 VATTR_WANTED(&va, va_fsid);
2633 va.va_vaflags |= VA_REALFSID;
2634 VATTR_WANTED(&va, va_fileid);
2635 VATTR_WANTED(&va, va_mode);
2636 VATTR_WANTED(&va, va_uid);
2637 VATTR_WANTED(&va, va_gid);
2638 if (vp->v_flag & VISHARDLINK) {
2639 if (vp->v_type == VDIR) {
2640 VATTR_WANTED(&va, va_dirlinkcount);
2641 } else {
2642 VATTR_WANTED(&va, va_nlink);
2643 }
2644 }
2645
2646 if (vnode_getattr(vp, &va, vfs_context_kernel()) != 0) {
2647 memset(fse, 0, sizeof(fse_info));
2648 return -1;
2649 }
2650
2651 return vnode_get_fse_info_from_vap(vp, fse, &va);
2652 }
2653
2654 int
vnode_get_fse_info_from_vap(vnode_t vp,fse_info * fse,struct vnode_attr * vap)2655 vnode_get_fse_info_from_vap(vnode_t vp, fse_info *fse, struct vnode_attr *vap)
2656 {
2657 fse->ino = (ino64_t)vap->va_fileid;
2658 fse->dev = (dev_t)vap->va_fsid;
2659 fse->mode = (int32_t)vnode_vttoif(vnode_vtype(vp)) | vap->va_mode;
2660 fse->uid = (uid_t)vap->va_uid;
2661 fse->gid = (gid_t)vap->va_gid;
2662 if (vp->v_flag & VISHARDLINK) {
2663 fse->mode |= FSE_MODE_HLINK;
2664 if (vp->v_type == VDIR) {
2665 fse->nlink = (uint64_t)vap->va_dirlinkcount;
2666 } else {
2667 fse->nlink = (uint64_t)vap->va_nlink;
2668 }
2669 }
2670
2671 return 0;
2672 }
2673
2674 void
create_fsevent_from_kevent(vnode_t vp,uint32_t kevents,struct vnode_attr * vap)2675 create_fsevent_from_kevent(vnode_t vp, uint32_t kevents, struct vnode_attr *vap)
2676 {
2677 int fsevent_type = FSE_CONTENT_MODIFIED, len; // the default is the most pessimistic
2678 char pathbuf[MAXPATHLEN];
2679 fse_info fse;
2680
2681
2682 if (kevents & VNODE_EVENT_DELETE) {
2683 fsevent_type = FSE_DELETE;
2684 } else if (kevents & (VNODE_EVENT_EXTEND | VNODE_EVENT_WRITE)) {
2685 fsevent_type = FSE_CONTENT_MODIFIED;
2686 } else if (kevents & VNODE_EVENT_LINK) {
2687 fsevent_type = FSE_CREATE_FILE;
2688 } else if (kevents & VNODE_EVENT_RENAME) {
2689 fsevent_type = FSE_CREATE_FILE; // XXXdbg - should use FSE_RENAME but we don't have the destination info;
2690 } else if (kevents & (VNODE_EVENT_FILE_CREATED | VNODE_EVENT_FILE_REMOVED | VNODE_EVENT_DIR_CREATED | VNODE_EVENT_DIR_REMOVED)) {
2691 fsevent_type = FSE_STAT_CHANGED; // XXXdbg - because vp is a dir and the thing created/removed lived inside it
2692 } else { // a catch all for VNODE_EVENT_PERMS, VNODE_EVENT_ATTRIB and anything else
2693 fsevent_type = FSE_STAT_CHANGED;
2694 }
2695
2696 // printf("convert_kevent: kevents 0x%x fsevent type 0x%x (for %s)\n", kevents, fsevent_type, vp->v_name ? vp->v_name : "(no-name)");
2697
2698 fse.dev = vap->va_fsid;
2699 fse.ino = vap->va_fileid;
2700 fse.mode = vnode_vttoif(vnode_vtype(vp)) | (uint32_t)vap->va_mode;
2701 if (vp->v_flag & VISHARDLINK) {
2702 fse.mode |= FSE_MODE_HLINK;
2703 if (vp->v_type == VDIR) {
2704 fse.nlink = vap->va_dirlinkcount;
2705 } else {
2706 fse.nlink = vap->va_nlink;
2707 }
2708 }
2709
2710 if (vp->v_type == VDIR) {
2711 fse.mode |= FSE_REMOTE_DIR_EVENT;
2712 }
2713
2714
2715 fse.uid = vap->va_uid;
2716 fse.gid = vap->va_gid;
2717
2718 len = sizeof(pathbuf);
2719 if (vn_getpath_no_firmlink(vp, pathbuf, &len) == 0) {
2720 add_fsevent(fsevent_type, vfs_context_current(), FSE_ARG_STRING, len, pathbuf, FSE_ARG_FINFO, &fse, FSE_ARG_DONE);
2721 }
2722 return;
2723 }
2724
2725 #else /* CONFIG_FSE */
2726
2727 #include <sys/fsevents.h>
2728
2729 /*
2730 * The get_pathbuff and release_pathbuff routines are used in places not
2731 * related to fsevents, and it's a handy abstraction, so define trivial
2732 * versions that don't cache a pool of buffers. This way, we don't have
2733 * to conditionalize the callers, and they still get the advantage of the
2734 * pool of buffers if CONFIG_FSE is turned on.
2735 */
2736 char *
get_pathbuff(void)2737 get_pathbuff(void)
2738 {
2739 return zalloc(ZV_NAMEI);
2740 }
2741
2742 void
release_pathbuff(char * path)2743 release_pathbuff(char *path)
2744 {
2745 zfree(ZV_NAMEI, path);
2746 }
2747
2748 int
add_fsevent(__unused int type,__unused vfs_context_t ctx,...)2749 add_fsevent(__unused int type, __unused vfs_context_t ctx, ...)
2750 {
2751 return 0;
2752 }
2753
2754 int
need_fsevent(__unused int type,__unused vnode_t vp)2755 need_fsevent(__unused int type, __unused vnode_t vp)
2756 {
2757 return 0;
2758 }
2759
2760 #endif /* CONFIG_FSE */
2761