1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1982, 1986, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94
67 */
68
69 #include <machine/atomic.h>
70
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/proc_internal.h>
74 #include <sys/malloc.h>
75 #include <sys/queue.h>
76 #include <vm/pmap.h>
77 #include <sys/uio_internal.h>
78 #include <kern/kalloc.h>
79
80 #include <kdebug.h>
81
82 #include <sys/kdebug.h>
83 #define DBG_UIO_COPYOUT 16
84 #define DBG_UIO_COPYIN 17
85
86 #if DEBUG
87 #include <kern/simple_lock.h>
88
89 static uint32_t uio_t_count = 0;
90 #endif /* DEBUG */
91
92 #define IS_VALID_UIO_SEGFLG(segflg) \
93 ( (segflg) == UIO_USERSPACE || \
94 (segflg) == UIO_SYSSPACE || \
95 (segflg) == UIO_USERSPACE32 || \
96 (segflg) == UIO_USERSPACE64 || \
97 (segflg) == UIO_SYSSPACE32 || \
98 (segflg) == UIO_USERISPACE || \
99 (segflg) == UIO_PHYS_USERSPACE || \
100 (segflg) == UIO_PHYS_SYSSPACE || \
101 (segflg) == UIO_USERISPACE32 || \
102 (segflg) == UIO_PHYS_USERSPACE32 || \
103 (segflg) == UIO_USERISPACE64 || \
104 (segflg) == UIO_PHYS_USERSPACE64 )
105
106 #define IS_SYS_SPACE_SEGFLG(segflg) \
107 ( (segflg) == UIO_SYSSPACE || \
108 (segflg) == UIO_PHYS_SYSSPACE || \
109 (segflg) == UIO_SYSSPACE32 )
110
111 #define IS_PHYS_SEGFLG(segflg) \
112 ( (segflg) == UIO_PHYS_USERSPACE || \
113 (segflg) == UIO_PHYS_SYSSPACE || \
114 (segflg) == UIO_PHYS_USERSPACE64 || \
115 (segflg) == UIO_PHYS_USERSPACE32 )
116
117 __attribute__((always_inline))
118 static u_int64_t
kiovp_get_base(const struct kern_iovec * kiovp)119 kiovp_get_base(const struct kern_iovec *kiovp)
120 {
121 #if __has_feature(ptrauth_calls)
122 if (kiovp->iov_base == 0) {
123 return 0;
124 } else {
125 return (u_int64_t)ptrauth_auth_data((void *)kiovp->iov_base,
126 ptrauth_key_process_independent_data,
127 ptrauth_blend_discriminator(&kiovp->iov_base,
128 kiovp->iov_len));
129 }
130 #else
131 return kiovp->iov_base;
132 #endif
133 }
134
135 __attribute__((always_inline))
136 static void
kiovp_set_base(struct kern_iovec * kiovp,u_int64_t addr)137 kiovp_set_base(struct kern_iovec *kiovp, u_int64_t addr)
138 {
139 #if __has_feature(ptrauth_calls)
140 if (addr == 0) {
141 kiovp->iov_base = 0;
142 } else {
143 kiovp->iov_base = (u_int64_t)ptrauth_sign_unauthenticated(
144 (void *)addr, ptrauth_key_process_independent_data,
145 ptrauth_blend_discriminator(&kiovp->iov_base,
146 kiovp->iov_len));
147 }
148 #else
149 kiovp->iov_base = addr;
150 #endif
151 }
152
153 static struct kern_iovec *
uio_kiovp(uio_t uio)154 uio_kiovp(uio_t uio)
155 {
156 if (!UIO_IS_SYS_SPACE(uio)) {
157 panic("%s: uio is not sys space", __func__);
158 }
159
160 return (struct kern_iovec *)uio->uio_iovs;
161 }
162
163 static struct user_iovec *
uio_uiovp(uio_t uio)164 uio_uiovp(uio_t uio)
165 {
166 if (!UIO_IS_USER_SPACE(uio)) {
167 panic("%s: uio is not user space", __func__);
168 }
169
170 return (struct user_iovec *)uio->uio_iovs;
171 }
172
173 static void *
uio_advance(uio_t uio)174 uio_advance(uio_t uio)
175 {
176 if (UIO_IS_USER_SPACE(uio)) {
177 uio->uio_iovs = (void *)((uintptr_t)uio->uio_iovs + sizeof(struct user_iovec));
178 } else {
179 uio->uio_iovs = (void *)((uintptr_t)uio->uio_iovs + sizeof(struct kern_iovec));
180 }
181
182 return uio->uio_iovs;
183 }
184
185 /*
186 * Returns: 0 Success
187 * uiomove64:EFAULT
188 *
189 * Notes: The first argument should be a caddr_t, but const poisoning
190 * for typedef'ed types doesn't work in gcc.
191 */
192 int
uiomove(const char * cp,int n,uio_t uio)193 uiomove(const char * cp, int n, uio_t uio)
194 {
195 return uiomove64((const addr64_t)(uintptr_t)cp, n, uio);
196 }
197
198 /*
199 * Returns: 0 Success
200 * EFAULT
201 * copyout:EFAULT
202 * copyin:EFAULT
203 * copywithin:EFAULT
204 * copypv:EFAULT
205 */
206 int
uiomove64(const addr64_t c_cp,int n,struct uio * uio)207 uiomove64(const addr64_t c_cp, int n, struct uio *uio)
208 {
209 addr64_t cp = c_cp;
210 uint64_t acnt;
211 int error = 0;
212 struct kern_iovec *kiovp;
213 struct user_iovec *uiovp;
214
215 #if DIAGNOSTIC
216 if (uio->uio_rw != UIO_READ && uio->uio_rw != UIO_WRITE) {
217 panic("uiomove: mode");
218 }
219 #endif
220
221 #if LP64_DEBUG
222 if (IS_VALID_UIO_SEGFLG(uio->uio_segflg) == 0) {
223 panic("invalid uio_segflg");
224 }
225 #endif /* LP64_DEBUG */
226
227 while (n > 0 && uio_resid(uio)) {
228 uio_update(uio, 0);
229 acnt = uio_curriovlen(uio);
230 if (acnt == 0) {
231 continue;
232 }
233 if (n > 0 && acnt > (uint64_t)n) {
234 acnt = n;
235 }
236
237 switch ((int) uio->uio_segflg) {
238 case UIO_USERSPACE64:
239 case UIO_USERISPACE64:
240 case UIO_USERSPACE32:
241 case UIO_USERISPACE32:
242 case UIO_USERSPACE:
243 case UIO_USERISPACE:
244 uiovp = uio_uiovp(uio);
245
246 // LP64 - 3rd argument in debug code is 64 bit, expected to be 32 bit
247 if (uio->uio_rw == UIO_READ) {
248 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
249 (int)cp, (uintptr_t)uiovp->iov_base, acnt, 0, 0);
250
251 error = copyout( CAST_DOWN(caddr_t, cp), uiovp->iov_base, (size_t)acnt );
252
253 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
254 (int)cp, (uintptr_t)uiovp->iov_base, acnt, 0, 0);
255 } else {
256 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
257 (uintptr_t)uiovp->iov_base, (int)cp, acnt, 0, 0);
258
259 error = copyin(uiovp->iov_base, CAST_DOWN(caddr_t, cp), (size_t)acnt);
260
261 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
262 (uintptr_t)uiovp->iov_base, (int)cp, acnt, 0, 0);
263 }
264 if (error) {
265 return error;
266 }
267 break;
268
269 case UIO_SYSSPACE32:
270 case UIO_SYSSPACE:
271 kiovp = uio_kiovp(uio);
272
273 if (uio->uio_rw == UIO_READ) {
274 error = copywithin(CAST_DOWN(caddr_t, cp), CAST_DOWN(caddr_t, kiovp_get_base(kiovp)),
275 (size_t)acnt);
276 } else {
277 error = copywithin(CAST_DOWN(caddr_t, kiovp_get_base(kiovp)), CAST_DOWN(caddr_t, cp),
278 (size_t)acnt);
279 }
280 break;
281
282 case UIO_PHYS_USERSPACE64:
283 case UIO_PHYS_USERSPACE32:
284 case UIO_PHYS_USERSPACE:
285 acnt = MIN(acnt, UINT_MAX);
286 uiovp = uio_uiovp(uio);
287
288 if (uio->uio_rw == UIO_READ) {
289 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
290 (int)cp, (uintptr_t)uiovp->iov_base, acnt, 1, 0);
291
292 error = copypv((addr64_t)cp, uiovp->iov_base, (unsigned int)acnt, cppvPsrc | cppvNoRefSrc);
293 if (error) { /* Copy physical to virtual */
294 error = EFAULT;
295 }
296
297 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
298 (int)cp, (uintptr_t)uiovp->iov_base, acnt, 1, 0);
299 } else {
300 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
301 (uintptr_t)uiovp->iov_base, (int)cp, acnt, 1, 0);
302
303 error = copypv(uiovp->iov_base, (addr64_t)cp, (unsigned int)acnt, cppvPsnk | cppvNoRefSrc | cppvNoModSnk);
304 if (error) { /* Copy virtual to physical */
305 error = EFAULT;
306 }
307
308 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
309 (uintptr_t)uiovp->iov_base, (int)cp, acnt, 1, 0);
310 }
311 if (error) {
312 return error;
313 }
314 break;
315
316 case UIO_PHYS_SYSSPACE:
317 acnt = MIN(acnt, UINT_MAX);
318 kiovp = uio_kiovp(uio);
319
320 if (uio->uio_rw == UIO_READ) {
321 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
322 (int)cp, (uintptr_t)kiovp_get_base(kiovp), acnt, 2, 0);
323
324 error = copypv((addr64_t)cp, (addr64_t)kiovp_get_base(kiovp), (unsigned int)acnt, cppvKmap | cppvPsrc | cppvNoRefSrc);
325 if (error) { /* Copy physical to virtual */
326 error = EFAULT;
327 }
328
329 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
330 (int)cp, (uintptr_t)kiovp_get_base(kiovp), acnt, 2, 0);
331 } else {
332 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
333 (uintptr_t)kiovp_get_base(kiovp), (int)cp, acnt, 2, 0);
334
335 error = copypv((addr64_t)kiovp_get_base(kiovp), (addr64_t)cp, (unsigned int)acnt, cppvKmap | cppvPsnk | cppvNoRefSrc | cppvNoModSnk);
336 if (error) { /* Copy virtual to physical */
337 error = EFAULT;
338 }
339
340 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
341 (uintptr_t)kiovp_get_base(kiovp), (int)cp, acnt, 2, 0);
342 }
343 if (error) {
344 return error;
345 }
346 break;
347
348 default:
349 break;
350 }
351 uio_update(uio, (user_size_t)acnt);
352 cp += acnt;
353 n -= acnt;
354 }
355 return error;
356 }
357
358 /*
359 * Give next character to user as result of read.
360 */
361 int
ureadc(int c,struct uio * uio)362 ureadc(int c, struct uio *uio)
363 {
364 struct kern_iovec *kiovp;
365 struct user_iovec *uiovp;
366
367 if (uio_resid(uio) <= 0) {
368 panic("ureadc: non-positive resid");
369 }
370 uio_update(uio, 0);
371 if (uio->uio_iovcnt == 0) {
372 panic("ureadc: non-positive iovcnt");
373 }
374 if (uio_curriovlen(uio) <= 0) {
375 panic("ureadc: non-positive iovlen");
376 }
377
378 switch ((int) uio->uio_segflg) {
379 case UIO_USERSPACE32:
380 case UIO_USERSPACE:
381 case UIO_USERISPACE32:
382 case UIO_USERISPACE:
383 case UIO_USERSPACE64:
384 case UIO_USERISPACE64:
385 uiovp = uio_uiovp(uio);
386
387 if (subyte((user_addr_t)uiovp->iov_base, c) < 0) {
388 return EFAULT;
389 }
390 break;
391
392 case UIO_SYSSPACE32:
393 case UIO_SYSSPACE:
394 kiovp = uio_kiovp(uio);
395 *(CAST_DOWN(caddr_t, kiovp_get_base(kiovp))) = (char)c;
396 break;
397
398 default:
399 break;
400 }
401 uio_update(uio, 1);
402 return 0;
403 }
404
405 LIST_HEAD(generic_hash_head, generic);
406
407 /*
408 * General routine to allocate a hash table.
409 */
410 void *
hashinit(int elements,int type __unused,u_long * hashmask)411 hashinit(int elements, int type __unused, u_long *hashmask)
412 {
413 struct generic_hash_head *hashtbl;
414 vm_size_t hashsize;
415
416 if (elements <= 0) {
417 panic("hashinit: bad cnt");
418 }
419
420 hashsize = 1UL << (fls(elements) - 1);
421 hashtbl = kheap_alloc(KHEAP_DEFAULT, hashsize * sizeof(*hashtbl),
422 Z_WAITOK | Z_ZERO);
423 if (hashtbl != NULL) {
424 *hashmask = hashsize - 1;
425 }
426 return hashtbl;
427 }
428
429 void
hashdestroy(void * hash,int type __unused,u_long hashmask)430 hashdestroy(void *hash, int type __unused, u_long hashmask)
431 {
432 struct generic_hash_head *hashtbl = hash;
433 assert(powerof2(hashmask + 1));
434 kheap_free(KHEAP_DEFAULT, hashtbl, (hashmask + 1) * sizeof(*hashtbl));
435 }
436
437 /*
438 * uio_resid - return the residual IO value for the given uio_t
439 */
440 user_ssize_t
uio_resid(uio_t a_uio)441 uio_resid( uio_t a_uio )
442 {
443 #if DEBUG
444 if (a_uio == NULL) {
445 printf("%s :%d - invalid uio_t\n", __FILE__, __LINE__);
446 }
447 /* if (IS_VALID_UIO_SEGFLG(a_uio->uio_segflg) == 0) { */
448 /* panic("invalid uio_segflg"); */
449 /* } */
450 #endif /* DEBUG */
451
452 /* return 0 if there are no active iovecs */
453 if (a_uio == NULL) {
454 return 0;
455 }
456
457 return a_uio->uio_resid_64;
458 }
459
460 /*
461 * uio_setresid - set the residual IO value for the given uio_t
462 */
463 void
uio_setresid(uio_t a_uio,user_ssize_t a_value)464 uio_setresid( uio_t a_uio, user_ssize_t a_value )
465 {
466 #if DEBUG
467 if (a_uio == NULL) {
468 panic("invalid uio_t");
469 }
470 /* if (IS_VALID_UIO_SEGFLG(a_uio->uio_segflg) == 0) { */
471 /* panic("invalid uio_segflg"); */
472 /* } */
473 #endif /* DEBUG */
474
475 if (a_uio == NULL) {
476 return;
477 }
478
479 a_uio->uio_resid_64 = a_value;
480 return;
481 }
482
483 /*
484 * uio_curriovbase - return the base address of the current iovec associated
485 * with the given uio_t. May return 0.
486 */
487 user_addr_t
uio_curriovbase(uio_t a_uio)488 uio_curriovbase( uio_t a_uio )
489 {
490 struct kern_iovec *kiovp;
491 struct user_iovec *uiovp;
492
493 #if LP64_DEBUG
494 if (a_uio == NULL) {
495 panic("invalid uio_t");
496 }
497 #endif /* LP64_DEBUG */
498
499 if (a_uio == NULL || a_uio->uio_iovcnt < 1) {
500 return 0;
501 }
502
503 if (UIO_IS_USER_SPACE(a_uio)) {
504 uiovp = uio_uiovp(a_uio);
505 return uiovp->iov_base;
506 }
507
508 kiovp = uio_kiovp(a_uio);
509 return (user_addr_t)kiovp_get_base(kiovp);
510 }
511
512 /*
513 * uio_curriovlen - return the length value of the current iovec associated
514 * with the given uio_t.
515 */
516 user_size_t
uio_curriovlen(uio_t a_uio)517 uio_curriovlen( uio_t a_uio )
518 {
519 struct kern_iovec *kiovp;
520 struct user_iovec *uiovp;
521
522 #if LP64_DEBUG
523 if (a_uio == NULL) {
524 panic("invalid uio_t");
525 }
526 #endif /* LP64_DEBUG */
527
528 if (a_uio == NULL || a_uio->uio_iovcnt < 1) {
529 return 0;
530 }
531
532 if (UIO_IS_USER_SPACE(a_uio)) {
533 uiovp = uio_uiovp(a_uio);
534 return uiovp->iov_len;
535 }
536
537 kiovp = uio_kiovp(a_uio);
538 return (user_size_t)kiovp->iov_len;
539 }
540
541 /*
542 * uio_setcurriovlen - set the length value of the current iovec associated
543 * with the given uio_t.
544 */
545 __private_extern__ void
uio_setcurriovlen(uio_t a_uio,user_size_t a_value)546 uio_setcurriovlen( uio_t a_uio, user_size_t a_value )
547 {
548 struct kern_iovec *kiovp;
549 struct user_iovec *uiovp;
550 u_int64_t base;
551
552 #if LP64_DEBUG
553 if (a_uio == NULL) {
554 panic("invalid uio_t");
555 }
556 #endif /* LP64_DEBUG */
557
558 if (a_uio == NULL) {
559 return;
560 }
561
562 if (UIO_IS_USER_SPACE(a_uio)) {
563 uiovp = uio_uiovp(a_uio);
564 uiovp->iov_len = a_value;
565 } else {
566 #if LP64_DEBUG
567 if (a_value > 0xFFFFFFFFull) {
568 panic("invalid a_value");
569 }
570 #endif /* LP64_DEBUG */
571 kiovp = uio_kiovp(a_uio);
572 base = kiovp_get_base(kiovp);
573 kiovp->iov_len = (size_t)a_value;
574 kiovp_set_base(kiovp, base);
575 }
576 return;
577 }
578
579 /*
580 * uio_iovcnt - return count of active iovecs for the given uio_t
581 */
582 int
uio_iovcnt(uio_t a_uio)583 uio_iovcnt( uio_t a_uio )
584 {
585 #if LP64_DEBUG
586 if (a_uio == NULL) {
587 panic("invalid uio_t");
588 }
589 #endif /* LP64_DEBUG */
590
591 if (a_uio == NULL) {
592 return 0;
593 }
594
595 return a_uio->uio_iovcnt;
596 }
597
598 /*
599 * uio_offset - return the current offset value for the given uio_t
600 */
601 off_t
uio_offset(uio_t a_uio)602 uio_offset( uio_t a_uio )
603 {
604 #if LP64_DEBUG
605 if (a_uio == NULL) {
606 panic("invalid uio_t");
607 }
608 #endif /* LP64_DEBUG */
609
610 if (a_uio == NULL) {
611 return 0;
612 }
613 return a_uio->uio_offset;
614 }
615
616 /*
617 * uio_setoffset - set the current offset value for the given uio_t
618 */
619 void
uio_setoffset(uio_t a_uio,off_t a_offset)620 uio_setoffset( uio_t a_uio, off_t a_offset )
621 {
622 #if LP64_DEBUG
623 if (a_uio == NULL) {
624 panic("invalid uio_t");
625 }
626 #endif /* LP64_DEBUG */
627
628 if (a_uio == NULL) {
629 return;
630 }
631 a_uio->uio_offset = a_offset;
632 return;
633 }
634
635 /*
636 * uio_rw - return the read / write flag for the given uio_t
637 */
638 int
uio_rw(uio_t a_uio)639 uio_rw( uio_t a_uio )
640 {
641 #if LP64_DEBUG
642 if (a_uio == NULL) {
643 panic("invalid uio_t");
644 }
645 #endif /* LP64_DEBUG */
646
647 if (a_uio == NULL) {
648 return -1;
649 }
650 return a_uio->uio_rw;
651 }
652
653 /*
654 * uio_setrw - set the read / write flag for the given uio_t
655 */
656 void
uio_setrw(uio_t a_uio,int a_value)657 uio_setrw( uio_t a_uio, int a_value )
658 {
659 if (a_uio == NULL) {
660 #if LP64_DEBUG
661 panic("invalid uio_t");
662 #endif /* LP64_DEBUG */
663 return;
664 }
665
666 #if LP64_DEBUG
667 if (!(a_value == UIO_READ || a_value == UIO_WRITE)) {
668 panic("invalid a_value");
669 }
670 #endif /* LP64_DEBUG */
671
672 if (a_value == UIO_READ || a_value == UIO_WRITE) {
673 a_uio->uio_rw = a_value;
674 }
675 return;
676 }
677
678 /*
679 * uio_isuserspace - return non zero value if the address space
680 * flag is for a user address space (could be 32 or 64 bit).
681 */
682 int
uio_isuserspace(uio_t a_uio)683 uio_isuserspace( uio_t a_uio )
684 {
685 if (a_uio == NULL) {
686 #if LP64_DEBUG
687 panic("invalid uio_t");
688 #endif /* LP64_DEBUG */
689 return 0;
690 }
691
692 if (UIO_SEG_IS_USER_SPACE(a_uio->uio_segflg)) {
693 return 1;
694 }
695 return 0;
696 }
697
698 static void
uio_init(uio_t uio,int a_iovcount,off_t a_offset,int a_spacetype,int a_iodirection,void * iovecs)699 uio_init(uio_t uio,
700 int a_iovcount, /* number of iovecs */
701 off_t a_offset, /* current offset */
702 int a_spacetype, /* type of address space */
703 int a_iodirection, /* read or write flag */
704 void *iovecs) /* pointer to iovec array */
705 {
706 assert(uio != NULL);
707 assert(a_iovcount >= 0 && a_iovcount <= UIO_MAXIOV);
708 assert(IS_VALID_UIO_SEGFLG(a_spacetype));
709 assert(a_iodirection == UIO_READ || a_iodirection == UIO_WRITE);
710
711 /*
712 * we use uio_segflg to indicate if the uio_t is the new format or
713 * old (pre LP64 support) legacy format
714 * This switch statement should canonicalize incoming space type
715 * to one of UIO_USERSPACE32/64, UIO_PHYS_USERSPACE32/64, or
716 * UIO_SYSSPACE/UIO_PHYS_SYSSPACE
717 */
718 switch (a_spacetype) {
719 case UIO_USERSPACE:
720 uio->uio_segflg = UIO_USERSPACE32;
721 break;
722 case UIO_SYSSPACE32:
723 uio->uio_segflg = UIO_SYSSPACE;
724 break;
725 case UIO_PHYS_USERSPACE:
726 uio->uio_segflg = UIO_PHYS_USERSPACE32;
727 break;
728 default:
729 uio->uio_segflg = a_spacetype;
730 break;
731 }
732
733 uio->uio_iovbase = iovecs;
734 uio->uio_iovs = iovecs;
735 uio->uio_max_iovs = a_iovcount;
736 uio->uio_offset = a_offset;
737 uio->uio_rw = a_iodirection;
738 uio->uio_flags = UIO_FLAGS_INITED;
739 }
740
741 static void *
uio_alloc_iov_array(int a_spacetype,size_t a_iovcount)742 uio_alloc_iov_array(int a_spacetype, size_t a_iovcount)
743 {
744 if (IS_SYS_SPACE_SEGFLG(a_spacetype) || IS_PHYS_SEGFLG(a_spacetype)) {
745 return kalloc_type(struct kern_iovec, a_iovcount, Z_WAITOK | Z_ZERO);
746 }
747
748 size_t bytes = UIO_SIZEOF_IOVS(a_iovcount);
749 return kalloc_data(bytes, Z_WAITOK | Z_ZERO);
750 }
751
752 static void
uio_free_iov_array(int a_spacetype,void * iovs,size_t a_iovcount)753 uio_free_iov_array(int a_spacetype, void *iovs, size_t a_iovcount)
754 {
755 if (IS_SYS_SPACE_SEGFLG(a_spacetype) || IS_PHYS_SEGFLG(a_spacetype)) {
756 kfree_type(struct kern_iovec, a_iovcount, iovs);
757 } else {
758 size_t bytes = UIO_SIZEOF_IOVS(a_iovcount);
759 kfree_data(iovs, bytes);
760 }
761 }
762
763 /*
764 * uio_create - create an uio_t.
765 * Space is allocated to hold up to a_iovcount number of iovecs. The uio_t
766 * is not fully initialized until all iovecs are added using uio_addiov calls.
767 * a_iovcount is the maximum number of iovecs you may add.
768 */
769 uio_t
uio_create(int a_iovcount,off_t a_offset,int a_spacetype,int a_iodirection)770 uio_create( int a_iovcount, /* number of iovecs */
771 off_t a_offset, /* current offset */
772 int a_spacetype, /* type of address space */
773 int a_iodirection ) /* read or write flag */
774 {
775 uio_t uio;
776 void *iovecs;
777
778 if (a_iovcount < 0 || a_iovcount > UIO_MAXIOV) {
779 return NULL;
780 }
781
782 uio = kalloc_type(struct uio, Z_WAITOK | Z_ZERO | Z_NOFAIL);
783 iovecs = uio_alloc_iov_array(a_spacetype, (size_t)a_iovcount);
784
785 uio_init(uio, a_iovcount, a_offset, a_spacetype, a_iodirection, iovecs);
786
787 /* leave a note that we allocated this uio_t */
788 uio->uio_flags |= UIO_FLAGS_WE_ALLOCED;
789 #if DEBUG
790 os_atomic_inc(&uio_t_count, relaxed);
791 #endif
792
793 return uio;
794 }
795
796
797 /*
798 * uio_createwithbuffer - create an uio_t.
799 * Create a uio_t using the given buffer. The uio_t
800 * is not fully initialized until all iovecs are added using uio_addiov calls.
801 * a_iovcount is the maximum number of iovecs you may add.
802 * This call may fail if the given buffer is not large enough.
803 */
804 __private_extern__ uio_t
uio_createwithbuffer(int a_iovcount,off_t a_offset,int a_spacetype,int a_iodirection,void * a_buf_p,size_t a_buffer_size)805 uio_createwithbuffer( int a_iovcount, /* number of iovecs */
806 off_t a_offset, /* current offset */
807 int a_spacetype, /* type of address space */
808 int a_iodirection, /* read or write flag */
809 void *a_buf_p, /* pointer to a uio_t buffer */
810 size_t a_buffer_size ) /* size of uio_t buffer */
811 {
812 uio_t uio = (uio_t) a_buf_p;
813 void *iovecs = NULL;
814
815 if (a_iovcount < 0 || a_iovcount > UIO_MAXIOV) {
816 return NULL;
817 }
818
819 if (a_buffer_size < UIO_SIZEOF(a_iovcount)) {
820 return NULL;
821 }
822
823 if (a_iovcount > 0) {
824 iovecs = (uint8_t *)uio + sizeof(struct uio);
825 }
826
827 bzero(a_buf_p, a_buffer_size);
828 uio_init(uio, a_iovcount, a_offset, a_spacetype, a_iodirection, iovecs);
829
830 return uio;
831 }
832
833 /*
834 * uio_spacetype - return the address space type for the given uio_t
835 */
836 __private_extern__ int
uio_spacetype(uio_t a_uio)837 uio_spacetype( uio_t a_uio )
838 {
839 if (a_uio == NULL) {
840 #if LP64_DEBUG
841 panic("invalid uio_t");
842 #endif /* LP64_DEBUG */
843 return -1;
844 }
845
846 return a_uio->uio_segflg;
847 }
848
849 /*
850 * uio_iovsaddr - get the address of the iovec array for the given uio_t.
851 * This returns the location of the iovecs within the uio.
852 * NOTE - for compatibility mode we just return the current value in uio_iovs
853 * which will increase as the IO is completed and is NOT embedded within the
854 * uio, it is a seperate array of one or more iovecs.
855 */
856 __private_extern__ struct user_iovec *
uio_iovsaddr(uio_t a_uio)857 uio_iovsaddr( uio_t a_uio )
858 {
859 struct user_iovec * my_addr;
860
861 if (a_uio == NULL) {
862 return NULL;
863 }
864
865 if (UIO_SEG_IS_USER_SPACE(a_uio->uio_segflg)) {
866 /* we need this for compatibility mode. */
867 my_addr = uio_uiovp(a_uio);
868 } else {
869 #if DEBUG
870 panic("uio_iovsaddr called for UIO_SYSSPACE request");
871 #endif
872 my_addr = 0;
873 }
874 return my_addr;
875 }
876
877 /*
878 * uio_reset - reset an uio_t.
879 * Reset the given uio_t to initial values. The uio_t is not fully initialized
880 * until all iovecs are added using uio_addiov calls.
881 * The a_iovcount value passed in the uio_create is the maximum number of
882 * iovecs you may add.
883 */
884 void
uio_reset(uio_t a_uio,off_t a_offset,int a_spacetype,int a_iodirection)885 uio_reset( uio_t a_uio,
886 off_t a_offset, /* current offset */
887 int a_spacetype, /* type of address space */
888 int a_iodirection ) /* read or write flag */
889 {
890 int my_max_iovs;
891 u_int32_t my_old_flags;
892 void *my_iovs;
893
894 #if LP64_DEBUG
895 if (a_uio == NULL) {
896 panic("could not allocate uio_t");
897 }
898 if (!IS_VALID_UIO_SEGFLG(a_spacetype)) {
899 panic("invalid address space type");
900 }
901 if (!(a_iodirection == UIO_READ || a_iodirection == UIO_WRITE)) {
902 panic("invalid IO direction flag");
903 }
904 #endif /* LP64_DEBUG */
905
906 if (a_uio == NULL) {
907 return;
908 }
909
910 my_old_flags = a_uio->uio_flags;
911 my_max_iovs = a_uio->uio_max_iovs;
912 my_iovs = a_uio->uio_iovbase;
913
914 bzero(a_uio, sizeof(*a_uio));
915 if (my_iovs != NULL) {
916 bzero(my_iovs, UIO_SIZEOF_IOVS(my_max_iovs));
917 }
918
919 /*
920 * we use uio_segflg to indicate if the uio_t is the new format or
921 * old (pre LP64 support) legacy format
922 * This switch statement should canonicalize incoming space type
923 * to one of UIO_USERSPACE32/64, UIO_PHYS_USERSPACE32/64, or
924 * UIO_SYSSPACE/UIO_PHYS_SYSSPACE
925 */
926 switch (a_spacetype) {
927 case UIO_USERSPACE:
928 a_uio->uio_segflg = UIO_USERSPACE32;
929 break;
930 case UIO_SYSSPACE32:
931 a_uio->uio_segflg = UIO_SYSSPACE;
932 break;
933 case UIO_PHYS_USERSPACE:
934 a_uio->uio_segflg = UIO_PHYS_USERSPACE32;
935 break;
936 default:
937 a_uio->uio_segflg = a_spacetype;
938 break;
939 }
940
941 a_uio->uio_iovs = my_iovs;
942 a_uio->uio_iovbase = my_iovs;
943 a_uio->uio_max_iovs = my_max_iovs;
944 a_uio->uio_offset = a_offset;
945 a_uio->uio_rw = a_iodirection;
946 a_uio->uio_flags = my_old_flags;
947
948 return;
949 }
950
951 /*
952 * uio_free - free a uio_t allocated via uio_init. this also frees all
953 * associated iovecs.
954 */
955 void
uio_free(uio_t a_uio)956 uio_free( uio_t a_uio )
957 {
958 #if DEBUG
959 if (a_uio == NULL) {
960 panic("passing NULL uio_t");
961 }
962 #endif /* LP64_DEBUG */
963
964 if (a_uio != NULL && (a_uio->uio_flags & UIO_FLAGS_WE_ALLOCED) != 0) {
965 #if DEBUG
966 if (os_atomic_dec_orig(&uio_t_count, relaxed) == 0) {
967 panic("uio_t_count underflow");
968 }
969 #endif
970 if (__improbable(a_uio->uio_max_iovs < 0 || a_uio->uio_max_iovs > UIO_MAXIOV)) {
971 panic("%s: bad uio_max_iovs", __func__);
972 }
973
974 uio_free_iov_array(a_uio->uio_segflg, a_uio->uio_iovbase,
975 (size_t)a_uio->uio_max_iovs);
976
977 kfree_type(struct uio, a_uio);
978 }
979 }
980
981 /*
982 * uio_addiov - add an iovec to the given uio_t. You may call this up to
983 * the a_iovcount number that was passed to uio_create. This call will
984 * increment the residual IO count as iovecs are added to the uio_t.
985 * returns 0 if add was successful else non zero.
986 */
987 int
uio_addiov(uio_t a_uio,user_addr_t a_baseaddr,user_size_t a_length)988 uio_addiov( uio_t a_uio, user_addr_t a_baseaddr, user_size_t a_length )
989 {
990 int i;
991 user_size_t resid;
992 struct kern_iovec *kiovp;
993 struct user_iovec *uiovp;
994
995 if (a_uio == NULL) {
996 #if DEBUG
997 panic("invalid uio_t");
998 #endif
999 return -1;
1000 }
1001
1002 if (os_add_overflow(a_length, a_uio->uio_resid_64, &resid)) {
1003 #if DEBUG
1004 panic("invalid length %lu", (unsigned long)a_length);
1005 #endif
1006 return -1;
1007 }
1008
1009 if (UIO_IS_USER_SPACE(a_uio)) {
1010 uiovp = uio_uiovp(a_uio);
1011 for (i = 0; i < a_uio->uio_max_iovs; i++) {
1012 if (uiovp[i].iov_len == 0 &&
1013 uiovp[i].iov_base == 0) {
1014 uiovp[i].iov_len = a_length;
1015 uiovp[i].iov_base = a_baseaddr;
1016 a_uio->uio_iovcnt++;
1017 a_uio->uio_resid_64 = resid;
1018 return 0;
1019 }
1020 }
1021 } else {
1022 kiovp = uio_kiovp(a_uio);
1023 for (i = 0; i < a_uio->uio_max_iovs; i++) {
1024 if (kiovp[i].iov_len == 0 &&
1025 kiovp_get_base(&kiovp[i]) == 0) {
1026 kiovp[i].iov_len = (u_int64_t)a_length;
1027 kiovp_set_base(&kiovp[i], (u_int64_t)a_baseaddr);
1028 a_uio->uio_iovcnt++;
1029 a_uio->uio_resid_64 = resid;
1030 return 0;
1031 }
1032 }
1033 }
1034
1035 return -1;
1036 }
1037
1038 /*
1039 * uio_getiov - get iovec data associated with the given uio_t. Use
1040 * a_index to iterate over each iovec (0 to (uio_iovcnt(uio_t) - 1)).
1041 * a_baseaddr_p and a_length_p may be NULL.
1042 * returns -1 when a_index is >= uio_t.uio_iovcnt or invalid uio_t.
1043 * returns 0 when data is returned.
1044 */
1045 int
uio_getiov(uio_t a_uio,int a_index,user_addr_t * a_baseaddr_p,user_size_t * a_length_p)1046 uio_getiov( uio_t a_uio,
1047 int a_index,
1048 user_addr_t * a_baseaddr_p,
1049 user_size_t * a_length_p )
1050 {
1051 struct kern_iovec *kiovp;
1052 struct user_iovec *uiovp;
1053
1054 if (a_uio == NULL) {
1055 #if DEBUG
1056 panic("invalid uio_t");
1057 #endif /* DEBUG */
1058 return -1;
1059 }
1060 if (a_index < 0 || a_index >= a_uio->uio_iovcnt) {
1061 return -1;
1062 }
1063
1064 if (UIO_IS_USER_SPACE(a_uio)) {
1065 uiovp = uio_uiovp(a_uio);
1066
1067 if (a_baseaddr_p != NULL) {
1068 *a_baseaddr_p = uiovp[a_index].iov_base;
1069 }
1070 if (a_length_p != NULL) {
1071 *a_length_p = uiovp[a_index].iov_len;
1072 }
1073 } else {
1074 kiovp = uio_kiovp(a_uio);
1075
1076 if (a_baseaddr_p != NULL) {
1077 *a_baseaddr_p = (user_addr_t)kiovp_get_base(&kiovp[a_index]);
1078 }
1079 if (a_length_p != NULL) {
1080 *a_length_p = (user_size_t)kiovp[a_index].iov_len;
1081 }
1082 }
1083
1084 return 0;
1085 }
1086
1087 /*
1088 * uio_calculateresid - runs through all iovecs associated with this
1089 * uio_t and calculates (and sets) the residual IO count.
1090 */
1091 __private_extern__ int
uio_calculateresid(uio_t a_uio)1092 uio_calculateresid( uio_t a_uio )
1093 {
1094 int i;
1095 u_int64_t resid = 0;
1096 struct kern_iovec *kiovp;
1097 struct user_iovec *uiovp;
1098
1099 if (a_uio == NULL) {
1100 #if LP64_DEBUG
1101 panic("invalid uio_t");
1102 #endif /* LP64_DEBUG */
1103 return EINVAL;
1104 }
1105
1106 a_uio->uio_iovcnt = a_uio->uio_max_iovs;
1107 if (UIO_IS_USER_SPACE(a_uio)) {
1108 uiovp = uio_uiovp(a_uio);
1109 a_uio->uio_resid_64 = 0;
1110 for (i = 0; i < a_uio->uio_max_iovs; i++) {
1111 if (uiovp[i].iov_len != 0 && uiovp[i].iov_base != 0) {
1112 if (uiovp[i].iov_len > LONG_MAX) {
1113 return EINVAL;
1114 }
1115 resid += uiovp[i].iov_len;
1116 if (resid > LONG_MAX) {
1117 return EINVAL;
1118 }
1119 }
1120 }
1121 a_uio->uio_resid_64 = (user_size_t)resid;
1122
1123 /* position to first non zero length iovec (4235922) */
1124 while (a_uio->uio_iovcnt > 0 && uiovp->iov_len == 0) {
1125 a_uio->uio_iovcnt--;
1126 if (a_uio->uio_iovcnt > 0) {
1127 uiovp = uio_advance(a_uio);
1128 }
1129 }
1130 } else {
1131 kiovp = uio_kiovp(a_uio);
1132 a_uio->uio_resid_64 = 0;
1133 for (i = 0; i < a_uio->uio_max_iovs; i++) {
1134 if (kiovp[i].iov_len != 0 && kiovp_get_base(&kiovp[i]) != 0) {
1135 if (kiovp[i].iov_len > LONG_MAX) {
1136 return EINVAL;
1137 }
1138 resid += kiovp[i].iov_len;
1139 if (resid > LONG_MAX) {
1140 return EINVAL;
1141 }
1142 }
1143 }
1144 a_uio->uio_resid_64 = (user_size_t)resid;
1145
1146 /* position to first non zero length iovec (4235922) */
1147 while (a_uio->uio_iovcnt > 0 && kiovp->iov_len == 0) {
1148 a_uio->uio_iovcnt--;
1149 if (a_uio->uio_iovcnt > 0) {
1150 kiovp = uio_advance(a_uio);
1151 }
1152 }
1153 }
1154
1155 return 0;
1156 }
1157
1158 /*
1159 * uio_update - update the given uio_t for a_count of completed IO.
1160 * This call decrements the current iovec length and residual IO value
1161 * and increments the current iovec base address and offset value.
1162 * If the current iovec length is 0 then advance to the next
1163 * iovec (if any).
1164 * If the a_count passed in is 0, than only do the advancement
1165 * over any 0 length iovec's.
1166 */
1167 void
uio_update(uio_t a_uio,user_size_t a_count)1168 uio_update( uio_t a_uio, user_size_t a_count )
1169 {
1170 struct kern_iovec *kiovp;
1171 struct user_iovec *uiovp;
1172
1173 #if LP64_DEBUG
1174 if (a_uio == NULL) {
1175 panic("invalid uio_t");
1176 }
1177 if (UIO_IS_32_BIT_SPACE(a_uio) && a_count > 0xFFFFFFFFull) {
1178 panic("invalid count value ");
1179 }
1180 #endif /* LP64_DEBUG */
1181
1182 if (a_uio == NULL || a_uio->uio_iovcnt < 1) {
1183 return;
1184 }
1185
1186 if (UIO_IS_USER_SPACE(a_uio)) {
1187 uiovp = uio_uiovp(a_uio);
1188
1189 /*
1190 * if a_count == 0, then we are asking to skip over
1191 * any empty iovs
1192 */
1193 if (a_count) {
1194 if (a_count > uiovp->iov_len) {
1195 uiovp->iov_base += uiovp->iov_len;
1196 uiovp->iov_len = 0;
1197 } else {
1198 uiovp->iov_base += a_count;
1199 uiovp->iov_len -= a_count;
1200 }
1201 if (a_count > (user_size_t)a_uio->uio_resid_64) {
1202 a_uio->uio_offset += a_uio->uio_resid_64;
1203 a_uio->uio_resid_64 = 0;
1204 } else {
1205 a_uio->uio_offset += a_count;
1206 a_uio->uio_resid_64 -= a_count;
1207 }
1208 }
1209 /*
1210 * advance to next iovec if current one is totally consumed
1211 */
1212 while (a_uio->uio_iovcnt > 0 && uiovp->iov_len == 0) {
1213 a_uio->uio_iovcnt--;
1214 if (a_uio->uio_iovcnt > 0) {
1215 uiovp = uio_advance(a_uio);
1216 }
1217 }
1218 } else {
1219 kiovp = uio_kiovp(a_uio);
1220
1221 /*
1222 * if a_count == 0, then we are asking to skip over
1223 * any empty iovs
1224 */
1225 if (a_count) {
1226 u_int64_t prev_base = kiovp_get_base(kiovp);
1227 if (a_count > kiovp->iov_len) {
1228 u_int64_t len = kiovp->iov_len;
1229 kiovp->iov_len = 0;
1230 kiovp_set_base(kiovp, prev_base + len);
1231 } else {
1232 kiovp->iov_len -= a_count;
1233 kiovp_set_base(kiovp, prev_base + a_count);
1234 }
1235 if (a_count > (user_size_t)a_uio->uio_resid_64) {
1236 a_uio->uio_offset += a_uio->uio_resid_64;
1237 a_uio->uio_resid_64 = 0;
1238 } else {
1239 a_uio->uio_offset += a_count;
1240 a_uio->uio_resid_64 -= a_count;
1241 }
1242 }
1243 /*
1244 * advance to next iovec if current one is totally consumed
1245 */
1246 while (a_uio->uio_iovcnt > 0 && kiovp->iov_len == 0) {
1247 a_uio->uio_iovcnt--;
1248 if (a_uio->uio_iovcnt > 0) {
1249 kiovp = uio_advance(a_uio);
1250 }
1251 }
1252 }
1253 return;
1254 }
1255
1256 /*
1257 * uio_pushback - undo uncommitted I/O by subtracting from the
1258 * current base address and offset, and incrementing the residiual
1259 * IO. If the UIO was previously exhausted, this call will panic.
1260 * New code should not use this functionality.
1261 */
1262 __private_extern__ void
uio_pushback(uio_t a_uio,user_size_t a_count)1263 uio_pushback( uio_t a_uio, user_size_t a_count )
1264 {
1265 u_int64_t prev_base;
1266 struct kern_iovec *kiovp;
1267 struct user_iovec *uiovp;
1268
1269 #if LP64_DEBUG
1270 if (a_uio == NULL) {
1271 panic("invalid uio_t");
1272 }
1273 if (UIO_IS_32_BIT_SPACE(a_uio) && a_count > 0xFFFFFFFFull) {
1274 panic("invalid count value ");
1275 }
1276 #endif /* LP64_DEBUG */
1277
1278 if (a_uio == NULL || a_count == 0) {
1279 return;
1280 }
1281
1282 if (a_uio->uio_iovcnt < 1) {
1283 panic("Invalid uio for pushback");
1284 }
1285
1286 if (UIO_IS_USER_SPACE(a_uio)) {
1287 uiovp = uio_uiovp(a_uio);
1288 uiovp->iov_base -= a_count;
1289 uiovp->iov_len += a_count;
1290 } else {
1291 kiovp = uio_kiovp(a_uio);
1292 prev_base = kiovp_get_base(kiovp);
1293 kiovp->iov_len += a_count;
1294 kiovp_set_base(kiovp, prev_base - a_count);
1295 }
1296
1297 a_uio->uio_offset -= a_count;
1298 a_uio->uio_resid_64 += a_count;
1299
1300 return;
1301 }
1302
1303
1304 /*
1305 * uio_duplicate - allocate a new uio and make a copy of the given uio_t.
1306 * may return NULL.
1307 */
1308 uio_t
uio_duplicate(uio_t uio)1309 uio_duplicate(uio_t uio)
1310 {
1311 uio_t new_uio;
1312 size_t n;
1313 struct kern_iovec *kiovp;
1314 struct user_iovec *uiovp;
1315
1316 if (uio->uio_max_iovs < 0 || uio->uio_max_iovs > UIO_MAXIOV) {
1317 return NULL;
1318 }
1319
1320 new_uio = kalloc_type(struct uio, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1321 *new_uio = *uio;
1322
1323 if (new_uio->uio_max_iovs > 0) {
1324 new_uio->uio_iovbase = uio_alloc_iov_array(new_uio->uio_segflg,
1325 (size_t)new_uio->uio_max_iovs);
1326 new_uio->uio_iovs = new_uio->uio_iovbase;
1327
1328 n = UIO_SIZEOF_IOVS(new_uio->uio_iovcnt);
1329 bcopy((const void *)uio->uio_iovs, (void *)new_uio->uio_iovs, n);
1330 if (UIO_IS_SYS_SPACE(new_uio)) {
1331 struct kern_iovec *kiovp_old = uio_kiovp(uio);
1332
1333 kiovp = uio_kiovp(new_uio);
1334
1335 for (n = 0; n < new_uio->uio_max_iovs; ++n) {
1336 kiovp_set_base(&kiovp[n],
1337 kiovp_get_base(&kiovp_old[n]));
1338 }
1339 } else {
1340 uiovp = uio_uiovp(new_uio);
1341 }
1342
1343 /* advance to first nonzero iovec */
1344 for (n = 0; n < new_uio->uio_max_iovs; ++n) {
1345 if (UIO_IS_USER_SPACE(new_uio)) {
1346 if (uiovp->iov_len != 0) {
1347 break;
1348 }
1349
1350 uiovp = uio_advance(new_uio);
1351 } else {
1352 if (kiovp->iov_len != 0) {
1353 break;
1354 }
1355
1356 kiovp = uio_advance(new_uio);
1357 }
1358 }
1359 } else {
1360 new_uio->uio_iovs = NULL;
1361 }
1362
1363 new_uio->uio_flags = UIO_FLAGS_WE_ALLOCED | UIO_FLAGS_INITED;
1364 #if DEBUG
1365 os_atomic_inc(&uio_t_count, relaxed);
1366 #endif
1367
1368 return new_uio;
1369 }
1370
1371 int
copyin_user_iovec_array(user_addr_t uaddr,int spacetype,int count,struct user_iovec * dst)1372 copyin_user_iovec_array(user_addr_t uaddr, int spacetype, int count, struct user_iovec *dst)
1373 {
1374 size_t size_of_iovec = (spacetype == UIO_USERSPACE64 ? sizeof(struct user64_iovec) : sizeof(struct user32_iovec));
1375 int error;
1376 int i;
1377
1378 // copyin to the front of "dst", without regard for putting records in the right places
1379 error = copyin(uaddr, dst, count * size_of_iovec);
1380 if (error) {
1381 return error;
1382 }
1383
1384 // now, unpack the entries in reverse order, so we don't overwrite anything
1385 for (i = count - 1; i >= 0; i--) {
1386 if (spacetype == UIO_USERSPACE64) {
1387 struct user64_iovec iovec = ((struct user64_iovec *)dst)[i];
1388 dst[i].iov_base = (user_addr_t)iovec.iov_base;
1389 dst[i].iov_len = (user_size_t)iovec.iov_len;
1390 } else {
1391 struct user32_iovec iovec = ((struct user32_iovec *)dst)[i];
1392 dst[i].iov_base = iovec.iov_base;
1393 dst[i].iov_len = iovec.iov_len;
1394 }
1395 }
1396
1397 return 0;
1398 }
1399