1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94
67 */
68
69 #include <machine/atomic.h>
70
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/proc_internal.h>
74 #include <sys/malloc.h>
75 #include <sys/queue.h>
76 #include <vm/pmap.h>
77 #include <sys/uio_internal.h>
78 #include <kern/kalloc.h>
79
80 #include <kdebug.h>
81
82 #include <sys/kdebug.h>
83 #define DBG_UIO_COPYOUT 16
84 #define DBG_UIO_COPYIN 17
85
86 #if DEBUG
87 #include <kern/simple_lock.h>
88
89 static uint32_t uio_t_count = 0;
90 #endif /* DEBUG */
91
92 #define IS_VALID_UIO_SEGFLG(segflg) \
93 ( (segflg) == UIO_USERSPACE || \
94 (segflg) == UIO_SYSSPACE || \
95 (segflg) == UIO_USERSPACE32 || \
96 (segflg) == UIO_USERSPACE64 || \
97 (segflg) == UIO_SYSSPACE32 || \
98 (segflg) == UIO_USERISPACE || \
99 (segflg) == UIO_PHYS_USERSPACE || \
100 (segflg) == UIO_PHYS_SYSSPACE || \
101 (segflg) == UIO_USERISPACE32 || \
102 (segflg) == UIO_PHYS_USERSPACE32 || \
103 (segflg) == UIO_USERISPACE64 || \
104 (segflg) == UIO_PHYS_USERSPACE64 )
105
106 #define IS_SYS_SPACE_SEGFLG(segflg) \
107 ( (segflg) == UIO_SYSSPACE || \
108 (segflg) == UIO_PHYS_SYSSPACE || \
109 (segflg) == UIO_SYSSPACE32 )
110
111 #define IS_PHYS_SEGFLG(segflg) \
112 ( (segflg) == UIO_PHYS_USERSPACE || \
113 (segflg) == UIO_PHYS_SYSSPACE || \
114 (segflg) == UIO_PHYS_USERSPACE64 || \
115 (segflg) == UIO_PHYS_USERSPACE32 )
116
117 #if __has_feature(ptrauth_calls)
118 __attribute__((always_inline))
119 static u_int64_t
blend_iov_components(const struct kern_iovec * kiovp)120 blend_iov_components(const struct kern_iovec *kiovp)
121 {
122 return ptrauth_blend_discriminator(
123 (void *)((u_int64_t)&kiovp->iov_base ^ kiovp->iov_len),
124 ptrauth_string_discriminator("kiovp"));
125 }
126 #endif
127
128 __attribute__((always_inline))
129 static u_int64_t
kiovp_get_base(const struct kern_iovec * kiovp)130 kiovp_get_base(const struct kern_iovec *kiovp)
131 {
132 #if __has_feature(ptrauth_calls)
133 if (kiovp->iov_base == 0) {
134 return 0;
135 } else {
136 return (u_int64_t)ptrauth_auth_data((void *)kiovp->iov_base,
137 ptrauth_key_process_independent_data,
138 blend_iov_components(kiovp));
139 }
140 #else
141 return kiovp->iov_base;
142 #endif
143 }
144
145 __attribute__((always_inline))
146 static void
kiovp_set_base(struct kern_iovec * kiovp,u_int64_t addr)147 kiovp_set_base(struct kern_iovec *kiovp, u_int64_t addr)
148 {
149 #if __has_feature(ptrauth_calls)
150 if (addr == 0) {
151 kiovp->iov_base = 0;
152 } else {
153 kiovp->iov_base = (u_int64_t)ptrauth_sign_unauthenticated(
154 (void *)addr, ptrauth_key_process_independent_data,
155 blend_iov_components(kiovp));
156 }
157 #else
158 kiovp->iov_base = addr;
159 #endif
160 }
161
162 static struct kern_iovec *
uio_kiovp(uio_t uio)163 uio_kiovp(uio_t uio)
164 {
165 if (!UIO_IS_SYS_SPACE(uio)) {
166 panic("%s: uio is not sys space", __func__);
167 }
168
169 return (struct kern_iovec *)uio->uio_iovs;
170 }
171
172 static struct user_iovec *
uio_uiovp(uio_t uio)173 uio_uiovp(uio_t uio)
174 {
175 if (!UIO_IS_USER_SPACE(uio)) {
176 panic("%s: uio is not user space", __func__);
177 }
178
179 return (struct user_iovec *)uio->uio_iovs;
180 }
181
182 static void *
uio_advance(uio_t uio)183 uio_advance(uio_t uio)
184 {
185 if (UIO_IS_USER_SPACE(uio)) {
186 uio->uio_iovs = (void *)((uintptr_t)uio->uio_iovs + sizeof(struct user_iovec));
187 } else {
188 uio->uio_iovs = (void *)((uintptr_t)uio->uio_iovs + sizeof(struct kern_iovec));
189 }
190
191 return uio->uio_iovs;
192 }
193
194 /*
195 * Returns: 0 Success
196 * uiomove64:EFAULT
197 *
198 * Notes: The first argument should be a caddr_t, but const poisoning
199 * for typedef'ed types doesn't work in gcc.
200 */
201 int
uiomove(const char * cp,int n,uio_t uio)202 uiomove(const char * cp, int n, uio_t uio)
203 {
204 return uiomove64((const addr64_t)(uintptr_t)cp, n, uio);
205 }
206
207 /*
208 * Returns: 0 Success
209 * EFAULT
210 * copyout:EFAULT
211 * copyin:EFAULT
212 * copywithin:EFAULT
213 * copypv:EFAULT
214 */
215 int
uiomove64(const addr64_t c_cp,int n,struct uio * uio)216 uiomove64(const addr64_t c_cp, int n, struct uio *uio)
217 {
218 addr64_t cp = c_cp;
219 uint64_t acnt;
220 int error = 0;
221 struct kern_iovec *kiovp;
222 struct user_iovec *uiovp;
223
224 #if DIAGNOSTIC
225 if (uio->uio_rw != UIO_READ && uio->uio_rw != UIO_WRITE) {
226 panic("uiomove: mode");
227 }
228 #endif
229
230 #if LP64_DEBUG
231 if (IS_VALID_UIO_SEGFLG(uio->uio_segflg) == 0) {
232 panic("invalid uio_segflg");
233 }
234 #endif /* LP64_DEBUG */
235
236 while (n > 0 && uio_resid(uio)) {
237 uio_update(uio, 0);
238 acnt = uio_curriovlen(uio);
239 if (acnt == 0) {
240 continue;
241 }
242 if (n > 0 && acnt > (uint64_t)n) {
243 acnt = n;
244 }
245
246 switch ((int) uio->uio_segflg) {
247 case UIO_USERSPACE64:
248 case UIO_USERISPACE64:
249 case UIO_USERSPACE32:
250 case UIO_USERISPACE32:
251 case UIO_USERSPACE:
252 case UIO_USERISPACE:
253 uiovp = uio_uiovp(uio);
254
255 // LP64 - 3rd argument in debug code is 64 bit, expected to be 32 bit
256 if (uio->uio_rw == UIO_READ) {
257 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
258 (int)cp, (uintptr_t)uiovp->iov_base, acnt, 0, 0);
259
260 error = copyout( CAST_DOWN(caddr_t, cp), uiovp->iov_base, (size_t)acnt );
261
262 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
263 (int)cp, (uintptr_t)uiovp->iov_base, acnt, 0, 0);
264 } else {
265 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
266 (uintptr_t)uiovp->iov_base, (int)cp, acnt, 0, 0);
267
268 error = copyin(uiovp->iov_base, CAST_DOWN(caddr_t, cp), (size_t)acnt);
269
270 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
271 (uintptr_t)uiovp->iov_base, (int)cp, acnt, 0, 0);
272 }
273 if (error) {
274 return error;
275 }
276 break;
277
278 case UIO_SYSSPACE32:
279 case UIO_SYSSPACE:
280 kiovp = uio_kiovp(uio);
281
282 if (uio->uio_rw == UIO_READ) {
283 error = copywithin(CAST_DOWN(caddr_t, cp), CAST_DOWN(caddr_t, kiovp_get_base(kiovp)),
284 (size_t)acnt);
285 } else {
286 error = copywithin(CAST_DOWN(caddr_t, kiovp_get_base(kiovp)), CAST_DOWN(caddr_t, cp),
287 (size_t)acnt);
288 }
289 break;
290
291 case UIO_PHYS_USERSPACE64:
292 case UIO_PHYS_USERSPACE32:
293 case UIO_PHYS_USERSPACE:
294 acnt = MIN(acnt, UINT_MAX);
295 uiovp = uio_uiovp(uio);
296
297 if (uio->uio_rw == UIO_READ) {
298 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
299 (int)cp, (uintptr_t)uiovp->iov_base, acnt, 1, 0);
300
301 error = copypv((addr64_t)cp, uiovp->iov_base, (unsigned int)acnt, cppvPsrc | cppvNoRefSrc);
302 if (error) { /* Copy physical to virtual */
303 error = EFAULT;
304 }
305
306 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
307 (int)cp, (uintptr_t)uiovp->iov_base, acnt, 1, 0);
308 } else {
309 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
310 (uintptr_t)uiovp->iov_base, (int)cp, acnt, 1, 0);
311
312 error = copypv(uiovp->iov_base, (addr64_t)cp, (unsigned int)acnt, cppvPsnk | cppvNoRefSrc | cppvNoModSnk);
313 if (error) { /* Copy virtual to physical */
314 error = EFAULT;
315 }
316
317 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
318 (uintptr_t)uiovp->iov_base, (int)cp, acnt, 1, 0);
319 }
320 if (error) {
321 return error;
322 }
323 break;
324
325 case UIO_PHYS_SYSSPACE:
326 acnt = MIN(acnt, UINT_MAX);
327 kiovp = uio_kiovp(uio);
328
329 if (uio->uio_rw == UIO_READ) {
330 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
331 (int)cp, (uintptr_t)kiovp_get_base(kiovp), acnt, 2, 0);
332
333 error = copypv((addr64_t)cp, (addr64_t)kiovp_get_base(kiovp), (unsigned int)acnt, cppvKmap | cppvPsrc | cppvNoRefSrc);
334 if (error) { /* Copy physical to virtual */
335 error = EFAULT;
336 }
337
338 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
339 (int)cp, (uintptr_t)kiovp_get_base(kiovp), acnt, 2, 0);
340 } else {
341 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
342 (uintptr_t)kiovp_get_base(kiovp), (int)cp, acnt, 2, 0);
343
344 error = copypv((addr64_t)kiovp_get_base(kiovp), (addr64_t)cp, (unsigned int)acnt, cppvKmap | cppvPsnk | cppvNoRefSrc | cppvNoModSnk);
345 if (error) { /* Copy virtual to physical */
346 error = EFAULT;
347 }
348
349 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
350 (uintptr_t)kiovp_get_base(kiovp), (int)cp, acnt, 2, 0);
351 }
352 if (error) {
353 return error;
354 }
355 break;
356
357 default:
358 break;
359 }
360 uio_update(uio, (user_size_t)acnt);
361 cp += acnt;
362 n -= acnt;
363 }
364 return error;
365 }
366
367 /*
368 * Give next character to user as result of read.
369 */
370 int
ureadc(int c,struct uio * uio)371 ureadc(int c, struct uio *uio)
372 {
373 struct kern_iovec *kiovp;
374 struct user_iovec *uiovp;
375
376 if (uio_resid(uio) <= 0) {
377 panic("ureadc: non-positive resid");
378 }
379 uio_update(uio, 0);
380 if (uio->uio_iovcnt == 0) {
381 panic("ureadc: non-positive iovcnt");
382 }
383 if (uio_curriovlen(uio) <= 0) {
384 panic("ureadc: non-positive iovlen");
385 }
386
387 switch ((int) uio->uio_segflg) {
388 case UIO_USERSPACE32:
389 case UIO_USERSPACE:
390 case UIO_USERISPACE32:
391 case UIO_USERISPACE:
392 case UIO_USERSPACE64:
393 case UIO_USERISPACE64:
394 uiovp = uio_uiovp(uio);
395
396 if (subyte((user_addr_t)uiovp->iov_base, c) < 0) {
397 return EFAULT;
398 }
399 break;
400
401 case UIO_SYSSPACE32:
402 case UIO_SYSSPACE:
403 kiovp = uio_kiovp(uio);
404 *(CAST_DOWN(caddr_t, kiovp_get_base(kiovp))) = (char)c;
405 break;
406
407 default:
408 break;
409 }
410 uio_update(uio, 1);
411 return 0;
412 }
413
414 LIST_HEAD(generic_hash_head, generic);
415
416 /*
417 * General routine to allocate a hash table.
418 */
419 void *
hashinit(int elements,int type __unused,u_long * hashmask)420 hashinit(int elements, int type __unused, u_long *hashmask)
421 {
422 struct generic_hash_head *hashtbl;
423 vm_size_t hashsize;
424
425 if (elements <= 0) {
426 panic("hashinit: bad cnt");
427 }
428
429 hashsize = 1UL << (fls(elements) - 1);
430 hashtbl = kalloc_type(struct generic_hash_head, hashsize, Z_WAITOK | Z_ZERO);
431 if (hashtbl != NULL) {
432 *hashmask = hashsize - 1;
433 }
434 return hashtbl;
435 }
436
437 void
hashdestroy(void * hash,int type __unused,u_long hashmask)438 hashdestroy(void *hash, int type __unused, u_long hashmask)
439 {
440 assert(powerof2(hashmask + 1));
441 kfree_type(struct generic_hash_head, hashmask + 1, hash);
442 }
443
444 /*
445 * uio_resid - return the residual IO value for the given uio_t
446 */
447 user_ssize_t
uio_resid(uio_t a_uio)448 uio_resid( uio_t a_uio )
449 {
450 #if DEBUG
451 if (a_uio == NULL) {
452 printf("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
453 }
454 /* if (IS_VALID_UIO_SEGFLG(a_uio->uio_segflg) == 0) { */
455 /* panic("invalid uio_segflg"); */
456 /* } */
457 #endif /* DEBUG */
458
459 /* return 0 if there are no active iovecs */
460 if (a_uio == NULL) {
461 return 0;
462 }
463
464 return a_uio->uio_resid_64;
465 }
466
467 /*
468 * uio_setresid - set the residual IO value for the given uio_t
469 */
470 void
uio_setresid(uio_t a_uio,user_ssize_t a_value)471 uio_setresid( uio_t a_uio, user_ssize_t a_value )
472 {
473 #if DEBUG
474 if (a_uio == NULL) {
475 panic("invalid uio_t");
476 }
477 /* if (IS_VALID_UIO_SEGFLG(a_uio->uio_segflg) == 0) { */
478 /* panic("invalid uio_segflg"); */
479 /* } */
480 #endif /* DEBUG */
481
482 if (a_uio == NULL) {
483 return;
484 }
485
486 a_uio->uio_resid_64 = a_value;
487 return;
488 }
489
490 /*
491 * uio_curriovbase - return the base address of the current iovec associated
492 * with the given uio_t. May return 0.
493 */
494 user_addr_t
uio_curriovbase(uio_t a_uio)495 uio_curriovbase( uio_t a_uio )
496 {
497 struct kern_iovec *kiovp;
498 struct user_iovec *uiovp;
499
500 #if LP64_DEBUG
501 if (a_uio == NULL) {
502 panic("invalid uio_t");
503 }
504 #endif /* LP64_DEBUG */
505
506 if (a_uio == NULL || a_uio->uio_iovcnt < 1) {
507 return 0;
508 }
509
510 if (UIO_IS_USER_SPACE(a_uio)) {
511 uiovp = uio_uiovp(a_uio);
512 return uiovp->iov_base;
513 }
514
515 kiovp = uio_kiovp(a_uio);
516 return (user_addr_t)kiovp_get_base(kiovp);
517 }
518
519 /*
520 * uio_curriovlen - return the length value of the current iovec associated
521 * with the given uio_t.
522 */
523 user_size_t
uio_curriovlen(uio_t a_uio)524 uio_curriovlen( uio_t a_uio )
525 {
526 struct kern_iovec *kiovp;
527 struct user_iovec *uiovp;
528
529 #if LP64_DEBUG
530 if (a_uio == NULL) {
531 panic("invalid uio_t");
532 }
533 #endif /* LP64_DEBUG */
534
535 if (a_uio == NULL || a_uio->uio_iovcnt < 1) {
536 return 0;
537 }
538
539 if (UIO_IS_USER_SPACE(a_uio)) {
540 uiovp = uio_uiovp(a_uio);
541 return uiovp->iov_len;
542 }
543
544 kiovp = uio_kiovp(a_uio);
545 return (user_size_t)kiovp->iov_len;
546 }
547
548 /*
549 * uio_setcurriovlen - set the length value of the current iovec associated
550 * with the given uio_t.
551 */
552 __private_extern__ void
uio_setcurriovlen(uio_t a_uio,user_size_t a_value)553 uio_setcurriovlen( uio_t a_uio, user_size_t a_value )
554 {
555 struct kern_iovec *kiovp;
556 struct user_iovec *uiovp;
557 u_int64_t base;
558
559 #if LP64_DEBUG
560 if (a_uio == NULL) {
561 panic("invalid uio_t");
562 }
563 #endif /* LP64_DEBUG */
564
565 if (a_uio == NULL) {
566 return;
567 }
568
569 if (UIO_IS_USER_SPACE(a_uio)) {
570 uiovp = uio_uiovp(a_uio);
571 uiovp->iov_len = a_value;
572 } else {
573 #if LP64_DEBUG
574 if (a_value > 0xFFFFFFFFull) {
575 panic("invalid a_value");
576 }
577 #endif /* LP64_DEBUG */
578 kiovp = uio_kiovp(a_uio);
579 base = kiovp_get_base(kiovp);
580 kiovp->iov_len = (size_t)a_value;
581 kiovp_set_base(kiovp, base);
582 }
583 return;
584 }
585
586 /*
587 * uio_iovcnt - return count of active iovecs for the given uio_t
588 */
589 int
uio_iovcnt(uio_t a_uio)590 uio_iovcnt( uio_t a_uio )
591 {
592 #if LP64_DEBUG
593 if (a_uio == NULL) {
594 panic("invalid uio_t");
595 }
596 #endif /* LP64_DEBUG */
597
598 if (a_uio == NULL) {
599 return 0;
600 }
601
602 return a_uio->uio_iovcnt;
603 }
604
605 /*
606 * uio_offset - return the current offset value for the given uio_t
607 */
608 off_t
uio_offset(uio_t a_uio)609 uio_offset( uio_t a_uio )
610 {
611 #if LP64_DEBUG
612 if (a_uio == NULL) {
613 panic("invalid uio_t");
614 }
615 #endif /* LP64_DEBUG */
616
617 if (a_uio == NULL) {
618 return 0;
619 }
620 return a_uio->uio_offset;
621 }
622
623 /*
624 * uio_setoffset - set the current offset value for the given uio_t
625 */
626 void
uio_setoffset(uio_t a_uio,off_t a_offset)627 uio_setoffset( uio_t a_uio, off_t a_offset )
628 {
629 #if LP64_DEBUG
630 if (a_uio == NULL) {
631 panic("invalid uio_t");
632 }
633 #endif /* LP64_DEBUG */
634
635 if (a_uio == NULL) {
636 return;
637 }
638 a_uio->uio_offset = a_offset;
639 return;
640 }
641
642 /*
643 * uio_rw - return the read / write flag for the given uio_t
644 */
645 int
uio_rw(uio_t a_uio)646 uio_rw( uio_t a_uio )
647 {
648 #if LP64_DEBUG
649 if (a_uio == NULL) {
650 panic("invalid uio_t");
651 }
652 #endif /* LP64_DEBUG */
653
654 if (a_uio == NULL) {
655 return -1;
656 }
657 return a_uio->uio_rw;
658 }
659
660 /*
661 * uio_setrw - set the read / write flag for the given uio_t
662 */
663 void
uio_setrw(uio_t a_uio,int a_value)664 uio_setrw( uio_t a_uio, int a_value )
665 {
666 if (a_uio == NULL) {
667 #if LP64_DEBUG
668 panic("invalid uio_t");
669 #endif /* LP64_DEBUG */
670 return;
671 }
672
673 #if LP64_DEBUG
674 if (!(a_value == UIO_READ || a_value == UIO_WRITE)) {
675 panic("invalid a_value");
676 }
677 #endif /* LP64_DEBUG */
678
679 if (a_value == UIO_READ || a_value == UIO_WRITE) {
680 a_uio->uio_rw = a_value;
681 }
682 return;
683 }
684
685 /*
686 * uio_isuserspace - return non zero value if the address space
687 * flag is for a user address space (could be 32 or 64 bit).
688 */
689 int
uio_isuserspace(uio_t a_uio)690 uio_isuserspace( uio_t a_uio )
691 {
692 if (a_uio == NULL) {
693 #if LP64_DEBUG
694 panic("invalid uio_t");
695 #endif /* LP64_DEBUG */
696 return 0;
697 }
698
699 if (UIO_SEG_IS_USER_SPACE(a_uio->uio_segflg)) {
700 return 1;
701 }
702 return 0;
703 }
704
705 static void
uio_init(uio_t uio,int a_iovcount,off_t a_offset,int a_spacetype,int a_iodirection,void * iovecs)706 uio_init(uio_t uio,
707 int a_iovcount, /* number of iovecs */
708 off_t a_offset, /* current offset */
709 int a_spacetype, /* type of address space */
710 int a_iodirection, /* read or write flag */
711 void *iovecs) /* pointer to iovec array */
712 {
713 assert(uio != NULL);
714 assert(a_iovcount >= 0 && a_iovcount <= UIO_MAXIOV);
715 assert(IS_VALID_UIO_SEGFLG(a_spacetype));
716 assert(a_iodirection == UIO_READ || a_iodirection == UIO_WRITE);
717
718 /*
719 * we use uio_segflg to indicate if the uio_t is the new format or
720 * old (pre LP64 support) legacy format
721 * This switch statement should canonicalize incoming space type
722 * to one of UIO_USERSPACE32/64, UIO_PHYS_USERSPACE32/64, or
723 * UIO_SYSSPACE/UIO_PHYS_SYSSPACE
724 */
725 switch (a_spacetype) {
726 case UIO_USERSPACE:
727 uio->uio_segflg = UIO_USERSPACE32;
728 break;
729 case UIO_SYSSPACE32:
730 uio->uio_segflg = UIO_SYSSPACE;
731 break;
732 case UIO_PHYS_USERSPACE:
733 uio->uio_segflg = UIO_PHYS_USERSPACE32;
734 break;
735 default:
736 uio->uio_segflg = a_spacetype;
737 break;
738 }
739
740 uio->uio_iovbase = iovecs;
741 uio->uio_iovs = iovecs;
742 uio->uio_max_iovs = a_iovcount;
743 uio->uio_offset = a_offset;
744 uio->uio_rw = a_iodirection;
745 uio->uio_flags = UIO_FLAGS_INITED;
746 }
747
748 static void *
uio_alloc_iov_array(int a_spacetype,size_t a_iovcount)749 uio_alloc_iov_array(int a_spacetype, size_t a_iovcount)
750 {
751 if (IS_SYS_SPACE_SEGFLG(a_spacetype) || IS_PHYS_SEGFLG(a_spacetype)) {
752 return kalloc_type(struct kern_iovec, a_iovcount, Z_WAITOK | Z_ZERO);
753 }
754
755 size_t bytes = UIO_SIZEOF_IOVS(a_iovcount);
756 return kalloc_data(bytes, Z_WAITOK | Z_ZERO);
757 }
758
759 static void
uio_free_iov_array(int a_spacetype,void * iovs,size_t a_iovcount)760 uio_free_iov_array(int a_spacetype, void *iovs, size_t a_iovcount)
761 {
762 if (IS_SYS_SPACE_SEGFLG(a_spacetype) || IS_PHYS_SEGFLG(a_spacetype)) {
763 kfree_type(struct kern_iovec, a_iovcount, iovs);
764 } else {
765 size_t bytes = UIO_SIZEOF_IOVS(a_iovcount);
766 kfree_data(iovs, bytes);
767 }
768 }
769
770 /*
771 * uio_create - create an uio_t.
772 * Space is allocated to hold up to a_iovcount number of iovecs. The uio_t
773 * is not fully initialized until all iovecs are added using uio_addiov calls.
774 * a_iovcount is the maximum number of iovecs you may add.
775 */
776 uio_t
uio_create(int a_iovcount,off_t a_offset,int a_spacetype,int a_iodirection)777 uio_create( int a_iovcount, /* number of iovecs */
778 off_t a_offset, /* current offset */
779 int a_spacetype, /* type of address space */
780 int a_iodirection ) /* read or write flag */
781 {
782 uio_t uio;
783 void *iovecs;
784
785 if (a_iovcount < 0 || a_iovcount > UIO_MAXIOV) {
786 return NULL;
787 }
788
789 uio = kalloc_type(struct uio, Z_WAITOK | Z_ZERO | Z_NOFAIL);
790 iovecs = uio_alloc_iov_array(a_spacetype, (size_t)a_iovcount);
791
792 uio_init(uio, a_iovcount, a_offset, a_spacetype, a_iodirection, iovecs);
793
794 /* leave a note that we allocated this uio_t */
795 uio->uio_flags |= UIO_FLAGS_WE_ALLOCED;
796 #if DEBUG
797 os_atomic_inc(&uio_t_count, relaxed);
798 #endif
799
800 return uio;
801 }
802
803
804 /*
805 * uio_createwithbuffer - create an uio_t.
806 * Create a uio_t using the given buffer. The uio_t
807 * is not fully initialized until all iovecs are added using uio_addiov calls.
808 * a_iovcount is the maximum number of iovecs you may add.
809 * This call may fail if the given buffer is not large enough.
810 */
811 __private_extern__ uio_t
uio_createwithbuffer(int a_iovcount,off_t a_offset,int a_spacetype,int a_iodirection,void * a_buf_p,size_t a_buffer_size)812 uio_createwithbuffer( int a_iovcount, /* number of iovecs */
813 off_t a_offset, /* current offset */
814 int a_spacetype, /* type of address space */
815 int a_iodirection, /* read or write flag */
816 void *a_buf_p, /* pointer to a uio_t buffer */
817 size_t a_buffer_size ) /* size of uio_t buffer */
818 {
819 uio_t uio = (uio_t) a_buf_p;
820 void *iovecs = NULL;
821
822 if (a_iovcount < 0 || a_iovcount > UIO_MAXIOV) {
823 return NULL;
824 }
825
826 if (a_buffer_size < UIO_SIZEOF(a_iovcount)) {
827 return NULL;
828 }
829
830 if (a_iovcount > 0) {
831 iovecs = (uint8_t *)uio + sizeof(struct uio);
832 }
833
834 bzero(a_buf_p, a_buffer_size);
835 uio_init(uio, a_iovcount, a_offset, a_spacetype, a_iodirection, iovecs);
836
837 return uio;
838 }
839
840 /*
841 * uio_spacetype - return the address space type for the given uio_t
842 */
843 __private_extern__ int
uio_spacetype(uio_t a_uio)844 uio_spacetype( uio_t a_uio )
845 {
846 if (a_uio == NULL) {
847 #if LP64_DEBUG
848 panic("invalid uio_t");
849 #endif /* LP64_DEBUG */
850 return -1;
851 }
852
853 return a_uio->uio_segflg;
854 }
855
856 /*
857 * uio_iovsaddr - get the address of the iovec array for the given uio_t.
858 * This returns the location of the iovecs within the uio.
859 * NOTE - for compatibility mode we just return the current value in uio_iovs
860 * which will increase as the IO is completed and is NOT embedded within the
861 * uio, it is a seperate array of one or more iovecs.
862 */
863 __private_extern__ struct user_iovec *
uio_iovsaddr(uio_t a_uio)864 uio_iovsaddr( uio_t a_uio )
865 {
866 struct user_iovec * my_addr;
867
868 if (a_uio == NULL) {
869 return NULL;
870 }
871
872 if (UIO_SEG_IS_USER_SPACE(a_uio->uio_segflg)) {
873 /* we need this for compatibility mode. */
874 my_addr = uio_uiovp(a_uio);
875 } else {
876 #if DEBUG
877 panic("uio_iovsaddr called for UIO_SYSSPACE request");
878 #endif
879 my_addr = 0;
880 }
881 return my_addr;
882 }
883
884 /*
885 * uio_reset - reset an uio_t.
886 * Reset the given uio_t to initial values. The uio_t is not fully initialized
887 * until all iovecs are added using uio_addiov calls.
888 * The a_iovcount value passed in the uio_create is the maximum number of
889 * iovecs you may add.
890 */
891 void
uio_reset(uio_t a_uio,off_t a_offset,int a_spacetype,int a_iodirection)892 uio_reset( uio_t a_uio,
893 off_t a_offset, /* current offset */
894 int a_spacetype, /* type of address space */
895 int a_iodirection ) /* read or write flag */
896 {
897 int my_max_iovs;
898 u_int32_t my_old_flags;
899 void *my_iovs;
900
901 #if LP64_DEBUG
902 if (a_uio == NULL) {
903 panic("could not allocate uio_t");
904 }
905 if (!IS_VALID_UIO_SEGFLG(a_spacetype)) {
906 panic("invalid address space type");
907 }
908 if (!(a_iodirection == UIO_READ || a_iodirection == UIO_WRITE)) {
909 panic("invalid IO direction flag");
910 }
911 #endif /* LP64_DEBUG */
912
913 if (a_uio == NULL) {
914 return;
915 }
916
917 my_old_flags = a_uio->uio_flags;
918 my_max_iovs = a_uio->uio_max_iovs;
919 my_iovs = a_uio->uio_iovbase;
920
921 bzero(a_uio, sizeof(*a_uio));
922 if (my_iovs != NULL) {
923 bzero(my_iovs, UIO_SIZEOF_IOVS(my_max_iovs));
924 }
925
926 /*
927 * we use uio_segflg to indicate if the uio_t is the new format or
928 * old (pre LP64 support) legacy format
929 * This switch statement should canonicalize incoming space type
930 * to one of UIO_USERSPACE32/64, UIO_PHYS_USERSPACE32/64, or
931 * UIO_SYSSPACE/UIO_PHYS_SYSSPACE
932 */
933 switch (a_spacetype) {
934 case UIO_USERSPACE:
935 a_uio->uio_segflg = UIO_USERSPACE32;
936 break;
937 case UIO_SYSSPACE32:
938 a_uio->uio_segflg = UIO_SYSSPACE;
939 break;
940 case UIO_PHYS_USERSPACE:
941 a_uio->uio_segflg = UIO_PHYS_USERSPACE32;
942 break;
943 default:
944 a_uio->uio_segflg = a_spacetype;
945 break;
946 }
947
948 a_uio->uio_iovs = my_iovs;
949 a_uio->uio_iovbase = my_iovs;
950 a_uio->uio_max_iovs = my_max_iovs;
951 a_uio->uio_offset = a_offset;
952 a_uio->uio_rw = a_iodirection;
953 a_uio->uio_flags = my_old_flags;
954
955 return;
956 }
957
958 /*
959 * uio_free - free a uio_t allocated via uio_init. this also frees all
960 * associated iovecs.
961 */
962 void
uio_free(uio_t a_uio)963 uio_free( uio_t a_uio )
964 {
965 #if DEBUG
966 if (a_uio == NULL) {
967 panic("passing NULL uio_t");
968 }
969 #endif /* LP64_DEBUG */
970
971 if (a_uio != NULL && (a_uio->uio_flags & UIO_FLAGS_WE_ALLOCED) != 0) {
972 #if DEBUG
973 if (os_atomic_dec_orig(&uio_t_count, relaxed) == 0) {
974 panic("uio_t_count underflow");
975 }
976 #endif
977 if (__improbable(a_uio->uio_max_iovs < 0 || a_uio->uio_max_iovs > UIO_MAXIOV)) {
978 panic("%s: bad uio_max_iovs", __func__);
979 }
980
981 uio_free_iov_array(a_uio->uio_segflg, a_uio->uio_iovbase,
982 (size_t)a_uio->uio_max_iovs);
983
984 kfree_type(struct uio, a_uio);
985 }
986 }
987
988 /*
989 * uio_addiov - add an iovec to the given uio_t. You may call this up to
990 * the a_iovcount number that was passed to uio_create. This call will
991 * increment the residual IO count as iovecs are added to the uio_t.
992 * returns 0 if add was successful else non zero.
993 */
994 int
uio_addiov(uio_t a_uio,user_addr_t a_baseaddr,user_size_t a_length)995 uio_addiov( uio_t a_uio, user_addr_t a_baseaddr, user_size_t a_length )
996 {
997 int i;
998 user_size_t resid;
999 struct kern_iovec *kiovp;
1000 struct user_iovec *uiovp;
1001
1002 if (a_uio == NULL) {
1003 #if DEBUG
1004 panic("invalid uio_t");
1005 #endif
1006 return -1;
1007 }
1008
1009 if (os_add_overflow(a_length, a_uio->uio_resid_64, &resid)) {
1010 #if DEBUG
1011 panic("invalid length %lu", (unsigned long)a_length);
1012 #endif
1013 return -1;
1014 }
1015
1016 if (UIO_IS_USER_SPACE(a_uio)) {
1017 uiovp = uio_uiovp(a_uio);
1018 for (i = 0; i < a_uio->uio_max_iovs; i++) {
1019 if (uiovp[i].iov_len == 0 &&
1020 uiovp[i].iov_base == 0) {
1021 uiovp[i].iov_len = a_length;
1022 uiovp[i].iov_base = a_baseaddr;
1023 a_uio->uio_iovcnt++;
1024 a_uio->uio_resid_64 = resid;
1025 return 0;
1026 }
1027 }
1028 } else {
1029 kiovp = uio_kiovp(a_uio);
1030 for (i = 0; i < a_uio->uio_max_iovs; i++) {
1031 if (kiovp[i].iov_len == 0 &&
1032 kiovp_get_base(&kiovp[i]) == 0) {
1033 kiovp[i].iov_len = (u_int64_t)a_length;
1034 kiovp_set_base(&kiovp[i], (u_int64_t)a_baseaddr);
1035 a_uio->uio_iovcnt++;
1036 a_uio->uio_resid_64 = resid;
1037 return 0;
1038 }
1039 }
1040 }
1041
1042 return -1;
1043 }
1044
1045 /*
1046 * uio_getiov - get iovec data associated with the given uio_t. Use
1047 * a_index to iterate over each iovec (0 to (uio_iovcnt(uio_t) - 1)).
1048 * a_baseaddr_p and a_length_p may be NULL.
1049 * returns -1 when a_index is >= uio_t.uio_iovcnt or invalid uio_t.
1050 * returns 0 when data is returned.
1051 */
1052 int
uio_getiov(uio_t a_uio,int a_index,user_addr_t * a_baseaddr_p,user_size_t * a_length_p)1053 uio_getiov( uio_t a_uio,
1054 int a_index,
1055 user_addr_t * a_baseaddr_p,
1056 user_size_t * a_length_p )
1057 {
1058 struct kern_iovec *kiovp;
1059 struct user_iovec *uiovp;
1060
1061 if (a_uio == NULL) {
1062 #if DEBUG
1063 panic("invalid uio_t");
1064 #endif /* DEBUG */
1065 return -1;
1066 }
1067 if (a_index < 0 || a_index >= a_uio->uio_iovcnt) {
1068 return -1;
1069 }
1070
1071 if (UIO_IS_USER_SPACE(a_uio)) {
1072 uiovp = uio_uiovp(a_uio);
1073
1074 if (a_baseaddr_p != NULL) {
1075 *a_baseaddr_p = uiovp[a_index].iov_base;
1076 }
1077 if (a_length_p != NULL) {
1078 *a_length_p = uiovp[a_index].iov_len;
1079 }
1080 } else {
1081 kiovp = uio_kiovp(a_uio);
1082
1083 if (a_baseaddr_p != NULL) {
1084 *a_baseaddr_p = (user_addr_t)kiovp_get_base(&kiovp[a_index]);
1085 }
1086 if (a_length_p != NULL) {
1087 *a_length_p = (user_size_t)kiovp[a_index].iov_len;
1088 }
1089 }
1090
1091 return 0;
1092 }
1093
1094 /*
1095 * uio_calculateresid - runs through all iovecs associated with this
1096 * uio_t and calculates (and sets) the residual IO count.
1097 */
1098 __private_extern__ int
uio_calculateresid(uio_t a_uio)1099 uio_calculateresid( uio_t a_uio )
1100 {
1101 int i;
1102 u_int64_t resid = 0;
1103 struct kern_iovec *kiovp;
1104 struct user_iovec *uiovp;
1105
1106 if (a_uio == NULL) {
1107 #if LP64_DEBUG
1108 panic("invalid uio_t");
1109 #endif /* LP64_DEBUG */
1110 return EINVAL;
1111 }
1112
1113 a_uio->uio_iovcnt = a_uio->uio_max_iovs;
1114 if (UIO_IS_USER_SPACE(a_uio)) {
1115 uiovp = uio_uiovp(a_uio);
1116 a_uio->uio_resid_64 = 0;
1117 for (i = 0; i < a_uio->uio_max_iovs; i++) {
1118 if (uiovp[i].iov_len != 0 && uiovp[i].iov_base != 0) {
1119 if (uiovp[i].iov_len > LONG_MAX) {
1120 return EINVAL;
1121 }
1122 resid += uiovp[i].iov_len;
1123 if (resid > LONG_MAX) {
1124 return EINVAL;
1125 }
1126 }
1127 }
1128 a_uio->uio_resid_64 = (user_size_t)resid;
1129
1130 /* position to first non zero length iovec (4235922) */
1131 while (a_uio->uio_iovcnt > 0 && uiovp->iov_len == 0) {
1132 a_uio->uio_iovcnt--;
1133 if (a_uio->uio_iovcnt > 0) {
1134 uiovp = uio_advance(a_uio);
1135 }
1136 }
1137 } else {
1138 kiovp = uio_kiovp(a_uio);
1139 a_uio->uio_resid_64 = 0;
1140 for (i = 0; i < a_uio->uio_max_iovs; i++) {
1141 if (kiovp[i].iov_len != 0 && kiovp_get_base(&kiovp[i]) != 0) {
1142 if (kiovp[i].iov_len > LONG_MAX) {
1143 return EINVAL;
1144 }
1145 resid += kiovp[i].iov_len;
1146 if (resid > LONG_MAX) {
1147 return EINVAL;
1148 }
1149 }
1150 }
1151 a_uio->uio_resid_64 = (user_size_t)resid;
1152
1153 /* position to first non zero length iovec (4235922) */
1154 while (a_uio->uio_iovcnt > 0 && kiovp->iov_len == 0) {
1155 a_uio->uio_iovcnt--;
1156 if (a_uio->uio_iovcnt > 0) {
1157 kiovp = uio_advance(a_uio);
1158 }
1159 }
1160 }
1161
1162 return 0;
1163 }
1164
1165 /*
1166 * uio_update - update the given uio_t for a_count of completed IO.
1167 * This call decrements the current iovec length and residual IO value
1168 * and increments the current iovec base address and offset value.
1169 * If the current iovec length is 0 then advance to the next
1170 * iovec (if any).
1171 * If the a_count passed in is 0, than only do the advancement
1172 * over any 0 length iovec's.
1173 */
1174 void
uio_update(uio_t a_uio,user_size_t a_count)1175 uio_update( uio_t a_uio, user_size_t a_count )
1176 {
1177 struct kern_iovec *kiovp;
1178 struct user_iovec *uiovp;
1179
1180 #if LP64_DEBUG
1181 if (a_uio == NULL) {
1182 panic("invalid uio_t");
1183 }
1184 if (UIO_IS_32_BIT_SPACE(a_uio) && a_count > 0xFFFFFFFFull) {
1185 panic("invalid count value ");
1186 }
1187 #endif /* LP64_DEBUG */
1188
1189 if (a_uio == NULL || a_uio->uio_iovcnt < 1) {
1190 return;
1191 }
1192
1193 if (UIO_IS_USER_SPACE(a_uio)) {
1194 uiovp = uio_uiovp(a_uio);
1195
1196 /*
1197 * if a_count == 0, then we are asking to skip over
1198 * any empty iovs
1199 */
1200 if (a_count) {
1201 if (a_count > uiovp->iov_len) {
1202 uiovp->iov_base += uiovp->iov_len;
1203 uiovp->iov_len = 0;
1204 } else {
1205 uiovp->iov_base += a_count;
1206 uiovp->iov_len -= a_count;
1207 }
1208 if (a_count > (user_size_t)a_uio->uio_resid_64) {
1209 a_uio->uio_offset += a_uio->uio_resid_64;
1210 a_uio->uio_resid_64 = 0;
1211 } else {
1212 a_uio->uio_offset += a_count;
1213 a_uio->uio_resid_64 -= a_count;
1214 }
1215 }
1216 /*
1217 * advance to next iovec if current one is totally consumed
1218 */
1219 while (a_uio->uio_iovcnt > 0 && uiovp->iov_len == 0) {
1220 a_uio->uio_iovcnt--;
1221 if (a_uio->uio_iovcnt > 0) {
1222 uiovp = uio_advance(a_uio);
1223 }
1224 }
1225 } else {
1226 kiovp = uio_kiovp(a_uio);
1227
1228 /*
1229 * if a_count == 0, then we are asking to skip over
1230 * any empty iovs
1231 */
1232 if (a_count) {
1233 u_int64_t prev_base = kiovp_get_base(kiovp);
1234 if (a_count > kiovp->iov_len) {
1235 u_int64_t len = kiovp->iov_len;
1236 kiovp->iov_len = 0;
1237 kiovp_set_base(kiovp, prev_base + len);
1238 } else {
1239 kiovp->iov_len -= a_count;
1240 kiovp_set_base(kiovp, prev_base + a_count);
1241 }
1242 if (a_count > (user_size_t)a_uio->uio_resid_64) {
1243 a_uio->uio_offset += a_uio->uio_resid_64;
1244 a_uio->uio_resid_64 = 0;
1245 } else {
1246 a_uio->uio_offset += a_count;
1247 a_uio->uio_resid_64 -= a_count;
1248 }
1249 }
1250 /*
1251 * advance to next iovec if current one is totally consumed
1252 */
1253 while (a_uio->uio_iovcnt > 0 && kiovp->iov_len == 0) {
1254 a_uio->uio_iovcnt--;
1255 if (a_uio->uio_iovcnt > 0) {
1256 kiovp = uio_advance(a_uio);
1257 }
1258 }
1259 }
1260 return;
1261 }
1262
1263 /*
1264 * uio_pushback - undo uncommitted I/O by subtracting from the
1265 * current base address and offset, and incrementing the residiual
1266 * IO. If the UIO was previously exhausted, this call will panic.
1267 * New code should not use this functionality.
1268 */
1269 __private_extern__ void
uio_pushback(uio_t a_uio,user_size_t a_count)1270 uio_pushback( uio_t a_uio, user_size_t a_count )
1271 {
1272 u_int64_t prev_base;
1273 struct kern_iovec *kiovp;
1274 struct user_iovec *uiovp;
1275
1276 #if LP64_DEBUG
1277 if (a_uio == NULL) {
1278 panic("invalid uio_t");
1279 }
1280 if (UIO_IS_32_BIT_SPACE(a_uio) && a_count > 0xFFFFFFFFull) {
1281 panic("invalid count value ");
1282 }
1283 #endif /* LP64_DEBUG */
1284
1285 if (a_uio == NULL || a_count == 0) {
1286 return;
1287 }
1288
1289 if (a_uio->uio_iovcnt < 1) {
1290 panic("Invalid uio for pushback");
1291 }
1292
1293 if (UIO_IS_USER_SPACE(a_uio)) {
1294 uiovp = uio_uiovp(a_uio);
1295 uiovp->iov_base -= a_count;
1296 uiovp->iov_len += a_count;
1297 } else {
1298 kiovp = uio_kiovp(a_uio);
1299 prev_base = kiovp_get_base(kiovp);
1300 kiovp->iov_len += a_count;
1301 kiovp_set_base(kiovp, prev_base - a_count);
1302 }
1303
1304 a_uio->uio_offset -= a_count;
1305 a_uio->uio_resid_64 += a_count;
1306
1307 return;
1308 }
1309
1310
1311 /*
1312 * uio_duplicate - allocate a new uio and make a copy of the given uio_t.
1313 * may return NULL.
1314 */
1315 uio_t
uio_duplicate(uio_t uio)1316 uio_duplicate(uio_t uio)
1317 {
1318 uio_t new_uio;
1319 size_t n;
1320 struct kern_iovec *kiovp;
1321 struct user_iovec *uiovp;
1322
1323 if (uio->uio_max_iovs < 0 || uio->uio_max_iovs > UIO_MAXIOV) {
1324 return NULL;
1325 }
1326
1327 new_uio = kalloc_type(struct uio, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1328 *new_uio = *uio;
1329
1330 if (new_uio->uio_max_iovs > 0) {
1331 new_uio->uio_iovbase = uio_alloc_iov_array(new_uio->uio_segflg,
1332 (size_t)new_uio->uio_max_iovs);
1333 new_uio->uio_iovs = new_uio->uio_iovbase;
1334
1335 n = UIO_SIZEOF_IOVS(new_uio->uio_iovcnt);
1336 bcopy((const void *)uio->uio_iovs, (void *)new_uio->uio_iovs, n);
1337 if (UIO_IS_SYS_SPACE(new_uio)) {
1338 struct kern_iovec *kiovp_old = uio_kiovp(uio);
1339
1340 kiovp = uio_kiovp(new_uio);
1341
1342 for (n = 0; n < new_uio->uio_max_iovs; ++n) {
1343 kiovp_set_base(&kiovp[n],
1344 kiovp_get_base(&kiovp_old[n]));
1345 }
1346 } else {
1347 uiovp = uio_uiovp(new_uio);
1348 }
1349
1350 /* advance to first nonzero iovec */
1351 for (n = 0; n < new_uio->uio_max_iovs; ++n) {
1352 if (UIO_IS_USER_SPACE(new_uio)) {
1353 if (uiovp->iov_len != 0) {
1354 break;
1355 }
1356
1357 uiovp = uio_advance(new_uio);
1358 } else {
1359 if (kiovp->iov_len != 0) {
1360 break;
1361 }
1362
1363 kiovp = uio_advance(new_uio);
1364 }
1365 }
1366 } else {
1367 new_uio->uio_iovs = NULL;
1368 }
1369
1370 new_uio->uio_flags = UIO_FLAGS_WE_ALLOCED | UIO_FLAGS_INITED;
1371 #if DEBUG
1372 os_atomic_inc(&uio_t_count, relaxed);
1373 #endif
1374
1375 return new_uio;
1376 }
1377
1378 int
copyin_user_iovec_array(user_addr_t uaddr,int spacetype,int count,struct user_iovec * dst)1379 copyin_user_iovec_array(user_addr_t uaddr, int spacetype, int count, struct user_iovec *dst)
1380 {
1381 size_t size_of_iovec = (spacetype == UIO_USERSPACE64 ? sizeof(struct user64_iovec) : sizeof(struct user32_iovec));
1382 int error;
1383 int i;
1384
1385 // copyin to the front of "dst", without regard for putting records in the right places
1386 error = copyin(uaddr, dst, count * size_of_iovec);
1387 if (error) {
1388 return error;
1389 }
1390
1391 // now, unpack the entries in reverse order, so we don't overwrite anything
1392 for (i = count - 1; i >= 0; i--) {
1393 if (spacetype == UIO_USERSPACE64) {
1394 struct user64_iovec iovec = ((struct user64_iovec *)dst)[i];
1395 dst[i].iov_base = (user_addr_t)iovec.iov_base;
1396 dst[i].iov_len = (user_size_t)iovec.iov_len;
1397 } else {
1398 struct user32_iovec iovec = ((struct user32_iovec *)dst)[i];
1399 dst[i].iov_base = iovec.iov_base;
1400 dst[i].iov_len = iovec.iov_len;
1401 }
1402 }
1403
1404 return 0;
1405 }
1406