1 /*
2 * Copyright (c) 2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/error.h>
30 #include <mach/mach_types.h>
31 #include <mach/mach_traps.h>
32 #include <mach/mach_vm_server.h>
33 #include <mach/mach_port_server.h>
34 #include <mach/mach_host_server.h>
35 #include <mach/mach_voucher_server.h>
36 #include <mach/vm_map.h>
37 #include <mach/mach_vm.h>
38 #include <kern/task.h>
39 #include <kern/ipc_tt.h>
40 #include <kern/kalloc.h>
41 #include <ipc/ipc_space.h>
42 #include <vm/vm_protos.h>
43 #include <vm/vm_kern_xnu.h>
44 #include <vm/vm_reclaim_internal.h>
45 #include <vm/vm_sanitize_internal.h>
46 #include <kdp/kdp_dyld.h>
47
48 kern_return_t
49 mach_port_get_attributes(
50 ipc_space_t space,
51 mach_port_name_t name,
52 int flavor,
53 mach_port_info_t info,
54 mach_msg_type_number_t *count);
55
56 extern lck_mtx_t g_dyldinfo_mtx;
57
58 int
_kernelrpc_mach_vm_allocate_trap(struct _kernelrpc_mach_vm_allocate_trap_args * args)59 _kernelrpc_mach_vm_allocate_trap(struct _kernelrpc_mach_vm_allocate_trap_args *args)
60 {
61 mach_vm_offset_t addr;
62 task_t task = port_name_to_current_task_noref(args->target);
63 int rv = MACH_SEND_INVALID_DEST;
64
65 if (task) {
66 if ((rv = mach_copyin(args->addr, (char *)&addr, sizeof(addr)))) {
67 goto done;
68 }
69
70 rv = mach_vm_allocate_external(task->map, &addr, args->size, args->flags);
71 if (rv == KERN_SUCCESS) {
72 rv = mach_copyout(&addr, args->addr, sizeof(addr));
73 }
74 }
75
76 done:
77 return rv;
78 }
79
80 int
_kernelrpc_mach_vm_deallocate_trap(struct _kernelrpc_mach_vm_deallocate_args * args)81 _kernelrpc_mach_vm_deallocate_trap(struct _kernelrpc_mach_vm_deallocate_args *args)
82 {
83 task_t task = port_name_to_current_task_noref(args->target);
84 int rv = MACH_SEND_INVALID_DEST;
85
86 if (task) {
87 rv = mach_vm_deallocate(task->map, args->address, args->size);
88 }
89
90 return rv;
91 }
92
93 int
_kernelrpc_mach_vm_protect_trap(struct _kernelrpc_mach_vm_protect_args * args)94 _kernelrpc_mach_vm_protect_trap(struct _kernelrpc_mach_vm_protect_args *args)
95 {
96 task_t task = port_name_to_current_task_noref(args->target);
97 int rv = MACH_SEND_INVALID_DEST;
98
99 if (task) {
100 rv = mach_vm_protect(task->map, args->address, args->size,
101 args->set_maximum, args->new_protection);
102 }
103
104 return rv;
105 }
106
107 int
_kernelrpc_mach_vm_map_trap(struct _kernelrpc_mach_vm_map_trap_args * args)108 _kernelrpc_mach_vm_map_trap(struct _kernelrpc_mach_vm_map_trap_args *args)
109 {
110 task_t task = port_name_to_current_task_noref(args->target);
111 mach_vm_offset_t addr;
112 int rv = MACH_SEND_INVALID_DEST;
113
114 if (!task) {
115 goto done;
116 }
117
118 if ((rv = mach_copyin(args->addr, (char *)&addr, sizeof(addr)))) {
119 goto done;
120 }
121
122 rv = mach_vm_map_external(task->map, &addr, args->size,
123 args->mask, args->flags, IPC_PORT_NULL, 0, FALSE,
124 args->cur_protection, VM_PROT_ALL, VM_INHERIT_DEFAULT);
125 if (rv == KERN_SUCCESS) {
126 rv = mach_copyout(&addr, args->addr, sizeof(addr));
127 }
128
129 done:
130 return rv;
131 }
132
133 int
_kernelrpc_mach_vm_purgable_control_trap(struct _kernelrpc_mach_vm_purgable_control_trap_args * args)134 _kernelrpc_mach_vm_purgable_control_trap(
135 struct _kernelrpc_mach_vm_purgable_control_trap_args *args)
136 {
137 int state;
138 task_t task;
139 int rv = MACH_SEND_INVALID_DEST;
140
141 if (args->control == VM_PURGABLE_GET_STATE) {
142 task = port_name_to_current_task_read_noref(args->target);
143 } else {
144 task = port_name_to_current_task_noref(args->target);
145 }
146
147 if (!task) {
148 goto done;
149 }
150
151 if ((rv = mach_copyin(args->state, (char *)&state, sizeof(state)))) {
152 goto done;
153 }
154
155 rv = mach_vm_purgable_control(task->map,
156 args->address,
157 args->control,
158 &state);
159 if (rv == KERN_SUCCESS) {
160 rv = mach_copyout(&state, args->state, sizeof(state));
161 }
162
163 done:
164 return rv;
165 }
166
167 int
_kernelrpc_mach_port_allocate_trap(struct _kernelrpc_mach_port_allocate_args * args)168 _kernelrpc_mach_port_allocate_trap(struct _kernelrpc_mach_port_allocate_args *args)
169 {
170 task_t task = port_name_to_current_task_noref(args->target);
171 mach_port_name_t name;
172 int rv = MACH_SEND_INVALID_DEST;
173
174 if (task) {
175 rv = mach_port_allocate(task->itk_space, args->right, &name);
176 if (rv == KERN_SUCCESS) {
177 rv = mach_copyout(&name, args->name, sizeof(name));
178 }
179 }
180
181 return rv;
182 }
183
184 int
_kernelrpc_mach_port_deallocate_trap(struct _kernelrpc_mach_port_deallocate_args * args)185 _kernelrpc_mach_port_deallocate_trap(struct _kernelrpc_mach_port_deallocate_args *args)
186 {
187 task_t task = port_name_to_current_task_noref(args->target);
188 int rv = MACH_SEND_INVALID_DEST;
189
190 if (task) {
191 rv = mach_port_deallocate(task->itk_space, args->name);
192 }
193
194 return rv;
195 }
196
197 int
_kernelrpc_mach_port_mod_refs_trap(struct _kernelrpc_mach_port_mod_refs_args * args)198 _kernelrpc_mach_port_mod_refs_trap(struct _kernelrpc_mach_port_mod_refs_args *args)
199 {
200 task_t task = port_name_to_current_task_noref(args->target);
201 int rv = MACH_SEND_INVALID_DEST;
202
203 if (task) {
204 rv = mach_port_mod_refs(task->itk_space,
205 args->name, args->right, args->delta);
206 }
207
208 return rv;
209 }
210
211
212 int
_kernelrpc_mach_port_move_member_trap(struct _kernelrpc_mach_port_move_member_args * args)213 _kernelrpc_mach_port_move_member_trap(struct _kernelrpc_mach_port_move_member_args *args)
214 {
215 task_t task = port_name_to_current_task_noref(args->target);
216 int rv = MACH_SEND_INVALID_DEST;
217
218 if (task) {
219 rv = mach_port_move_member(task->itk_space,
220 args->member, args->after);
221 }
222
223 return rv;
224 }
225
226 int
_kernelrpc_mach_port_insert_right_trap(struct _kernelrpc_mach_port_insert_right_args * args)227 _kernelrpc_mach_port_insert_right_trap(struct _kernelrpc_mach_port_insert_right_args *args)
228 {
229 task_t task = port_name_to_current_task_noref(args->target);
230 ipc_port_t port;
231 mach_msg_type_name_t disp;
232 int rv = MACH_SEND_INVALID_DEST;
233
234 if (!task) {
235 goto done;
236 }
237
238 if (args->name == args->poly) {
239 switch (args->polyPoly) {
240 case MACH_MSG_TYPE_MAKE_SEND:
241 case MACH_MSG_TYPE_COPY_SEND:
242 /* fastpath MAKE_SEND / COPY_SEND which is the most common case */
243 rv = ipc_object_insert_send_right(task->itk_space, args->poly,
244 args->polyPoly);
245 goto done;
246
247 default:
248 break;
249 }
250 }
251
252 rv = ipc_object_copyin(task->itk_space, args->poly, args->polyPoly,
253 IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND, IPC_COPYIN_KERNEL_DESTINATION, NULL, &port);
254 if (rv != KERN_SUCCESS) {
255 goto done;
256 }
257 disp = ipc_object_copyin_type(args->polyPoly);
258
259 rv = mach_port_insert_right(task->itk_space, args->name, port, disp);
260 if (rv != KERN_SUCCESS && IP_VALID(port)) {
261 ipc_object_destroy(port, disp);
262 }
263
264 done:
265 return rv;
266 }
267
268 int
_kernelrpc_mach_port_get_attributes_trap(struct _kernelrpc_mach_port_get_attributes_args * args)269 _kernelrpc_mach_port_get_attributes_trap(struct _kernelrpc_mach_port_get_attributes_args *args)
270 {
271 task_read_t task = port_name_to_current_task_read_noref(args->target);
272 int rv = MACH_SEND_INVALID_DEST;
273 mach_msg_type_number_t count;
274
275 // MIG does not define the type or size of the mach_port_info_t out array
276 // anywhere, so derive them from the field in the generated reply struct
277 #define MACH_PORT_INFO_OUT (((__Reply__mach_port_get_attributes_from_user_t*)NULL)->port_info_out)
278 #define MACH_PORT_INFO_STACK_LIMIT 80 // current size is 68 == 17 * sizeof(integer_t)
279 _Static_assert(sizeof(MACH_PORT_INFO_OUT) < MACH_PORT_INFO_STACK_LIMIT,
280 "mach_port_info_t has grown significantly, reevaluate stack usage");
281 const mach_msg_type_number_t max_count = (sizeof(MACH_PORT_INFO_OUT) / sizeof(MACH_PORT_INFO_OUT[0]));
282 typeof(MACH_PORT_INFO_OUT[0]) info[max_count];
283
284 if (!task) {
285 goto done;
286 }
287
288 /*
289 * zero out our stack buffer because not all flavors of
290 * port_get_attributes initialize the whole struct
291 */
292 bzero(info, sizeof(MACH_PORT_INFO_OUT));
293
294 if ((rv = mach_copyin(CAST_USER_ADDR_T(args->count), &count, sizeof(count)))) {
295 goto done;
296 }
297 if (count > max_count) {
298 count = max_count;
299 }
300
301 rv = mach_port_get_attributes(task->itk_space, args->name, args->flavor, info, &count);
302 if (rv == KERN_SUCCESS) {
303 rv = mach_copyout(&count, CAST_USER_ADDR_T(args->count), sizeof(count));
304 }
305 if (rv == KERN_SUCCESS && count > 0) {
306 rv = mach_copyout(info, CAST_USER_ADDR_T(args->info), count * sizeof(info[0]));
307 }
308
309 done:
310 return rv;
311 }
312
313 int
_kernelrpc_mach_port_insert_member_trap(struct _kernelrpc_mach_port_insert_member_args * args)314 _kernelrpc_mach_port_insert_member_trap(struct _kernelrpc_mach_port_insert_member_args *args)
315 {
316 task_t task = port_name_to_current_task_noref(args->target);
317 int rv = MACH_SEND_INVALID_DEST;
318
319 if (task) {
320 rv = mach_port_insert_member(task->itk_space,
321 args->name, args->pset);
322 }
323
324 return rv;
325 }
326
327
328 int
_kernelrpc_mach_port_extract_member_trap(struct _kernelrpc_mach_port_extract_member_args * args)329 _kernelrpc_mach_port_extract_member_trap(struct _kernelrpc_mach_port_extract_member_args *args)
330 {
331 task_t task = port_name_to_current_task_noref(args->target);
332 int rv = MACH_SEND_INVALID_DEST;
333
334 if (task) {
335 rv = mach_port_extract_member(task->itk_space,
336 args->name, args->pset);
337 }
338
339 return rv;
340 }
341
342 int
_kernelrpc_mach_port_construct_trap(struct _kernelrpc_mach_port_construct_args * args)343 _kernelrpc_mach_port_construct_trap(struct _kernelrpc_mach_port_construct_args *args)
344 {
345 task_t task = port_name_to_current_task_noref(args->target);
346 mach_port_name_t name;
347 int rv = MACH_SEND_INVALID_DEST;
348 mach_port_options_t options;
349
350 if (!task) {
351 goto done;
352 }
353
354 if ((rv = mach_copyin(args->options, (char *)&options, sizeof(options)))) {
355 goto done;
356 }
357
358 rv = mach_port_construct(task->itk_space, &options, args->context, &name);
359 if (rv == KERN_SUCCESS) {
360 rv = mach_copyout(&name, args->name, sizeof(name));
361 }
362
363 done:
364 return rv;
365 }
366
367 int
_kernelrpc_mach_port_destruct_trap(struct _kernelrpc_mach_port_destruct_args * args)368 _kernelrpc_mach_port_destruct_trap(struct _kernelrpc_mach_port_destruct_args *args)
369 {
370 task_t task = port_name_to_current_task_noref(args->target);
371 int rv = MACH_SEND_INVALID_DEST;
372
373 if (task) {
374 rv = mach_port_destruct(task->itk_space,
375 args->name, args->srdelta, args->guard);
376 }
377
378 return rv;
379 }
380
381 int
_kernelrpc_mach_port_guard_trap(struct _kernelrpc_mach_port_guard_args * args)382 _kernelrpc_mach_port_guard_trap(struct _kernelrpc_mach_port_guard_args *args)
383 {
384 task_t task = port_name_to_current_task_noref(args->target);
385 int rv = MACH_SEND_INVALID_DEST;
386
387 if (task) {
388 rv = mach_port_guard(task->itk_space,
389 args->name, args->guard, args->strict);
390 }
391
392 return rv;
393 }
394
395 int
_kernelrpc_mach_port_unguard_trap(struct _kernelrpc_mach_port_unguard_args * args)396 _kernelrpc_mach_port_unguard_trap(struct _kernelrpc_mach_port_unguard_args *args)
397 {
398 task_t task = port_name_to_current_task_noref(args->target);
399 int rv = MACH_SEND_INVALID_DEST;
400
401 if (task) {
402 rv = mach_port_unguard(task->itk_space, args->name, args->guard);
403 }
404
405 return rv;
406 }
407
408 int
_kernelrpc_mach_port_type_trap(struct _kernelrpc_mach_port_type_args * args)409 _kernelrpc_mach_port_type_trap(struct _kernelrpc_mach_port_type_args *args)
410 {
411 task_t task = port_name_to_current_task_noref(args->target);
412 int rv = MACH_SEND_INVALID_DEST;
413 mach_port_type_t type;
414
415 if (task) {
416 rv = mach_port_type(task->itk_space, args->name, &type);
417 if (rv == KERN_SUCCESS) {
418 rv = mach_copyout(&type, args->ptype, sizeof(type));
419 }
420 }
421
422 return rv;
423 }
424
425 int
_kernelrpc_mach_port_request_notification_trap(struct _kernelrpc_mach_port_request_notification_args * args)426 _kernelrpc_mach_port_request_notification_trap(
427 struct _kernelrpc_mach_port_request_notification_args *args)
428 {
429 task_t task = port_name_to_current_task_noref(args->target);
430 int rv = MACH_SEND_INVALID_DEST;
431 ipc_port_t notify, previous;
432 mach_msg_type_name_t disp;
433 mach_port_name_t previous_name = MACH_PORT_NULL;
434
435 if (!task) {
436 goto done;
437 }
438
439 disp = ipc_object_copyin_type(args->notifyPoly);
440 if (disp != MACH_MSG_TYPE_PORT_SEND_ONCE) {
441 goto done;
442 }
443
444 if (MACH_PORT_VALID(args->notify)) {
445 rv = ipc_object_copyin(task->itk_space, args->notify, args->notifyPoly,
446 IPC_OBJECT_COPYIN_FLAGS_NONE, IPC_COPYIN_KERNEL_DESTINATION, NULL, ¬ify);
447 } else {
448 notify = CAST_MACH_NAME_TO_PORT(args->notify);
449 }
450 if (rv != KERN_SUCCESS) {
451 goto done;
452 }
453
454 rv = mach_port_request_notification(task->itk_space, args->name,
455 args->msgid, args->sync, notify, &previous);
456 if (rv != KERN_SUCCESS) {
457 if (IP_VALID(notify)) {
458 ipc_object_destroy(notify, disp);
459 }
460 goto done;
461 }
462
463 if (IP_VALID(previous)) {
464 // Remove once <rdar://problem/45522961> is fixed.
465 // We need to make ith_knote NULL as ipc_object_copyout() uses
466 // thread-argument-passing and its value should not be garbage
467 current_thread()->ith_knote = ITH_KNOTE_NULL;
468 rv = ipc_object_copyout(task->itk_space, previous,
469 MACH_MSG_TYPE_PORT_SEND_ONCE, IPC_OBJECT_COPYOUT_FLAGS_NONE,
470 NULL, &previous_name);
471 if (rv != KERN_SUCCESS) {
472 goto done;
473 }
474 }
475
476 rv = mach_copyout(&previous_name, args->previous, sizeof(previous_name));
477
478 done:
479 return rv;
480 }
481
482 kern_return_t
host_create_mach_voucher_trap(struct host_create_mach_voucher_args * args)483 host_create_mach_voucher_trap(struct host_create_mach_voucher_args *args)
484 {
485 host_t host = port_name_to_host(args->host);
486 ipc_voucher_t new_voucher = IV_NULL;
487 ipc_port_t voucher_port = IPC_PORT_NULL;
488 mach_port_name_t voucher_name = 0;
489 kern_return_t kr = KERN_SUCCESS;
490
491 if (host == HOST_NULL) {
492 return MACH_SEND_INVALID_DEST;
493 }
494 if (args->recipes_size < 0) {
495 return KERN_INVALID_ARGUMENT;
496 }
497 if (args->recipes_size > MACH_VOUCHER_ATTR_MAX_RAW_RECIPE_ARRAY_SIZE) {
498 return MIG_ARRAY_TOO_LARGE;
499 }
500
501 /* keep small recipes on the stack for speed */
502 uint8_t buf[MACH_VOUCHER_TRAP_STACK_LIMIT];
503 uint8_t *krecipes = buf;
504
505 if (args->recipes_size > MACH_VOUCHER_TRAP_STACK_LIMIT) {
506 krecipes = kalloc_data(args->recipes_size, Z_WAITOK);
507 if (krecipes == NULL) {
508 return KERN_RESOURCE_SHORTAGE;
509 }
510 }
511
512 if ((kr = mach_copyin(CAST_USER_ADDR_T(args->recipes), (void *)krecipes, args->recipes_size))) {
513 goto done;
514 }
515
516 kr = host_create_mach_voucher(host, krecipes, args->recipes_size, &new_voucher);
517 if (kr != KERN_SUCCESS) {
518 goto done;
519 }
520
521 voucher_port = convert_voucher_to_port(new_voucher);
522 voucher_name = ipc_port_copyout_send(voucher_port, current_space());
523
524 kr = mach_copyout(&voucher_name, args->voucher, sizeof(voucher_name));
525
526 done:
527 if (args->recipes_size > MACH_VOUCHER_TRAP_STACK_LIMIT) {
528 kfree_data(krecipes, args->recipes_size);
529 }
530
531 return kr;
532 }
533
534 kern_return_t
mach_voucher_extract_attr_recipe_trap(struct mach_voucher_extract_attr_recipe_args * args)535 mach_voucher_extract_attr_recipe_trap(struct mach_voucher_extract_attr_recipe_args *args)
536 {
537 ipc_voucher_t voucher = IV_NULL;
538 kern_return_t kr = KERN_SUCCESS;
539 mach_msg_type_number_t sz = 0;
540
541 if ((kr = mach_copyin(args->recipe_size, (void *)&sz, sizeof(sz)))) {
542 return kr;
543 }
544
545 if (sz > MACH_VOUCHER_ATTR_MAX_RAW_RECIPE_ARRAY_SIZE) {
546 return MIG_ARRAY_TOO_LARGE;
547 }
548
549 voucher = convert_port_name_to_voucher(args->voucher_name);
550 if (voucher == IV_NULL) {
551 return MACH_SEND_INVALID_DEST;
552 }
553
554 /* keep small recipes on the stack for speed */
555 uint8_t buf[MACH_VOUCHER_TRAP_STACK_LIMIT];
556 uint8_t *krecipe = buf;
557 mach_msg_type_number_t max_sz = sz;
558
559 if (max_sz > MACH_VOUCHER_TRAP_STACK_LIMIT) {
560 krecipe = kalloc_data(max_sz, Z_WAITOK);
561 if (!krecipe) {
562 kr = KERN_RESOURCE_SHORTAGE;
563 goto done;
564 }
565 }
566
567 if ((kr = mach_copyin(CAST_USER_ADDR_T(args->recipe), (void *)krecipe, max_sz))) {
568 goto done;
569 }
570
571 kr = mach_voucher_extract_attr_recipe(voucher, args->key,
572 (mach_voucher_attr_raw_recipe_t)krecipe, &sz);
573 assert(sz <= max_sz);
574
575 if (kr == KERN_SUCCESS && sz > 0) {
576 kr = mach_copyout(krecipe, CAST_USER_ADDR_T(args->recipe), sz);
577 }
578 if (kr == KERN_SUCCESS) {
579 kr = mach_copyout(&sz, args->recipe_size, sizeof(sz));
580 }
581
582
583 done:
584 if (max_sz > MACH_VOUCHER_TRAP_STACK_LIMIT) {
585 kfree_data(krecipe, max_sz);
586 }
587
588 ipc_voucher_release(voucher);
589 return kr;
590 }
591
592 /*
593 * Mach Trap: task_dyld_process_info_notify_get_trap
594 *
595 * Return an array of active dyld notifier port names for current_task(). User
596 * is responsible for allocating the memory for the mach port names array
597 * and deallocating the port names inside the array returned.
598 *
599 * Does not consume any reference.
600 *
601 * Args:
602 * names_addr: Address for mach port names array. (In param only)
603 * names_count_addr: Number of active dyld notifier ports. (In-Out param)
604 * In: Number of slots available for copyout in caller
605 * Out: Actual number of ports copied out
606 *
607 * Returns:
608 *
609 * KERN_SUCCESS: A valid namesCnt is returned. (Can be zero)
610 * KERN_INVALID_ARGUMENT: Arguments are invalid.
611 * KERN_MEMORY_ERROR: Memory copyio operations failed.
612 * KERN_NO_SPACE: User allocated memory for port names copyout is insufficient.
613 *
614 * Other error code see task_info().
615 */
616 kern_return_t
task_dyld_process_info_notify_get_trap(struct task_dyld_process_info_notify_get_trap_args * args)617 task_dyld_process_info_notify_get_trap(struct task_dyld_process_info_notify_get_trap_args *args)
618 {
619 struct task_dyld_info dyld_info;
620 mach_msg_type_number_t info_count = TASK_DYLD_INFO_COUNT;
621 mach_port_name_t copyout_names[DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT];
622 ipc_port_t copyout_ports[DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT];
623 ipc_port_t release_ports[DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT];
624 uint32_t copyout_count = 0, release_count = 0, active_count = 0;
625 mach_vm_address_t ports_addr; /* a user space address */
626 mach_port_name_t new_name;
627 natural_t user_names_count = 0;
628 ipc_port_t sright;
629 kern_return_t kr;
630 ipc_port_t *portp;
631 ipc_entry_t entry;
632
633 if ((mach_port_name_array_t)args->names_addr == NULL ||
634 (natural_t *)args->names_count_addr == NULL) {
635 return KERN_INVALID_ARGUMENT;
636 }
637
638 kr = mach_copyin((vm_map_address_t)args->names_count_addr, &user_names_count,
639 sizeof(natural_t));
640 if (kr) {
641 return kr;
642 }
643
644 if (user_names_count == 0) {
645 return KERN_NO_SPACE;
646 }
647
648 kr = task_info(current_task(), TASK_DYLD_INFO, (task_info_t)&dyld_info, &info_count);
649 if (kr) {
650 return kr;
651 }
652
653 if (dyld_info.all_image_info_format == TASK_DYLD_ALL_IMAGE_INFO_32) {
654 ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
655 offsetof(struct user32_dyld_all_image_infos, notifyMachPorts));
656 } else {
657 ports_addr = (mach_vm_address_t)(dyld_info.all_image_info_addr +
658 offsetof(struct user64_dyld_all_image_infos, notifyMachPorts));
659 }
660
661 lck_mtx_lock(&g_dyldinfo_mtx);
662 itk_lock(current_task());
663
664 if (current_task()->itk_dyld_notify == NULL) {
665 itk_unlock(current_task());
666 (void)copyoutmap_atomic32(current_task()->map, MACH_PORT_NULL,
667 (vm_map_address_t)ports_addr); /* reset magic */
668 lck_mtx_unlock(&g_dyldinfo_mtx);
669
670 kr = mach_copyout(©out_count, (vm_map_address_t)args->names_count_addr,
671 sizeof(natural_t));
672 return kr;
673 }
674
675 for (int slot = 0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; slot++) {
676 portp = ¤t_task()->itk_dyld_notify[slot];
677 if (*portp == IPC_PORT_NULL) {
678 continue;
679 } else {
680 sright = ipc_port_copy_send_mqueue(*portp);
681 if (IP_VALID(sright)) {
682 copyout_ports[active_count++] = sright; /* donates */
683 sright = IPC_PORT_NULL;
684 } else {
685 release_ports[release_count++] = *portp; /* donates */
686 *portp = IPC_PORT_NULL;
687 }
688 }
689 }
690
691 task_dyld_process_info_update_helper(current_task(), active_count,
692 (vm_map_address_t)ports_addr, release_ports, release_count);
693 /* itk_lock, g_dyldinfo_mtx are unlocked upon return */
694
695 for (int i = 0; i < active_count; i++) {
696 sright = copyout_ports[i]; /* donates */
697 copyout_ports[i] = IPC_PORT_NULL;
698
699 assert(IP_VALID(sright));
700 ip_reference(sright);
701 /*
702 * Below we consume each send right in copyout_ports, and if copyout_send
703 * succeeds, replace it with a port ref; otherwise release the port ref.
704 *
705 * We can reuse copyout_ports array for this purpose since
706 * copyout_count <= active_count.
707 */
708 new_name = ipc_port_copyout_send(sright, current_space()); /* consumes */
709 if (MACH_PORT_VALID(new_name)) {
710 copyout_names[copyout_count] = new_name;
711 copyout_ports[copyout_count] = sright; /* now holds port ref */
712 copyout_count++;
713 } else {
714 ip_release(sright);
715 }
716 }
717
718 assert(copyout_count <= active_count);
719
720 if (user_names_count < copyout_count) {
721 kr = KERN_NO_SPACE;
722 goto copyout_failed;
723 }
724
725 /* copyout to caller's local copy */
726 kr = mach_copyout(copyout_names, (vm_map_address_t)args->names_addr,
727 copyout_count * sizeof(mach_port_name_t));
728 if (kr) {
729 goto copyout_failed;
730 }
731
732 kr = mach_copyout(©out_count, (vm_map_address_t)args->names_count_addr,
733 sizeof(natural_t));
734 if (kr) {
735 goto copyout_failed;
736 }
737
738 /* now, release port refs on copyout_ports */
739 for (int i = 0; i < copyout_count; i++) {
740 sright = copyout_ports[i];
741 assert(IP_VALID(sright));
742 ip_release(sright);
743 }
744
745 return KERN_SUCCESS;
746
747
748 copyout_failed:
749 /*
750 * No locks are held beyond this point.
751 *
752 * Release port refs on copyout_ports, and deallocate ports that we copied out
753 * earlier.
754 */
755 for (int i = 0; i < copyout_count; i++) {
756 sright = copyout_ports[i];
757 assert(IP_VALID(sright));
758
759 if (ipc_right_lookup_write(current_space(), copyout_names[i], &entry)) {
760 /* userspace has deallocated the name we copyout */
761 ip_release(sright);
762 continue;
763 }
764 /* space is locked and active */
765 if (entry->ie_port == sright ||
766 IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_DEAD_NAME) {
767 (void)ipc_right_dealloc(current_space(), copyout_names[i], entry); /* unlocks space */
768 } else {
769 is_write_unlock(current_space());
770 }
771
772 /* space is unlocked */
773 ip_release(sright);
774 }
775
776 return kr;
777 }
778
779 #if __LP64__
780 mach_error_t
mach_vm_reclaim_update_kernel_accounting_trap(struct mach_vm_reclaim_update_kernel_accounting_trap_args * args)781 mach_vm_reclaim_update_kernel_accounting_trap(
782 struct mach_vm_reclaim_update_kernel_accounting_trap_args *args)
783 {
784 task_t task = port_name_to_current_task_noref(args->target_task);
785 if (task == TASK_NULL) {
786 return MACH_SEND_INVALID_DEST;
787 }
788 if (args->bytes_reclaimed_out == USER_ADDR_NULL) {
789 return KERN_INVALID_ARGUMENT;
790 }
791 #if CONFIG_DEFERRED_RECLAIM
792 mach_error_t err;
793 uint64_t bytes_reclaimed;
794 err = vm_deferred_reclamation_update_accounting_internal(
795 task, &bytes_reclaimed);
796 if (!err) {
797 mach_copyout(&bytes_reclaimed, (user_addr_t)args->bytes_reclaimed_out, sizeof(bytes_reclaimed));
798 }
799 return err;
800 #else /* !CONFIG_DEFERRED_RECLAIM */
801 return KERN_NOT_SUPPORTED;
802 #endif /* CONFIG_DEFERRED_RECLAIM */
803 }
804 #endif /* __LP64__ */
805