1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1989, 1993, 1995
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed to Berkeley by
34 * Rick Macklem at The University of Guelph.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 *
64 * @(#)nfs_vfsops.c 8.12 (Berkeley) 5/20/95
65 * FreeBSD-Id: nfs_vfsops.c,v 1.52 1997/11/12 05:42:21 julian Exp $
66 */
67
68 #include <nfs/nfs_conf.h>
69 #if CONFIG_NFS_CLIENT
70
71 /*
72 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
73 * support for mandatory and extensible security protections. This notice
74 * is included in support of clause 2.2 (b) of the Apple Public License,
75 * Version 2.0.
76 */
77
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/conf.h>
81 #include <sys/ioctl.h>
82 #include <sys/signal.h>
83 #include <sys/proc_internal.h> /* for fs rooting to update rootdir in fdp */
84 #include <sys/kauth.h>
85 #include <sys/vnode_internal.h>
86 #include <sys/malloc.h>
87 #include <sys/kernel.h>
88 #include <sys/sysctl.h>
89 #include <sys/mount_internal.h>
90 #include <sys/kpi_mbuf.h>
91 #include <sys/socket.h>
92 #include <sys/un.h>
93 #include <sys/socketvar.h>
94 #include <sys/fcntl.h>
95 #include <sys/quota.h>
96 #include <sys/priv.h>
97 #include <libkern/OSAtomic.h>
98
99 #include <sys/vm.h>
100 #include <sys/vmparam.h>
101
102 #if !defined(NO_MOUNT_PRIVATE)
103 #include <sys/filedesc.h>
104 #endif /* NO_MOUNT_PRIVATE */
105
106 #include <net/if.h>
107 #include <net/route.h>
108 #include <netinet/in.h>
109
110 #include <nfs/rpcv2.h>
111 #include <nfs/krpc.h>
112 #include <nfs/nfsproto.h>
113 #include <nfs/nfs.h>
114 #include <nfs/nfsnode.h>
115 #include <nfs/nfs_gss.h>
116 #include <nfs/nfsmount.h>
117 #include <nfs/xdr_subs.h>
118 #include <nfs/nfsm_subs.h>
119 #include <nfs/nfs_lock.h>
120
121 #include <miscfs/devfs/devfs.h>
122 #include <pexpert/pexpert.h>
123
124 #define NFS_VFS_DBG(...) NFSCLNT_DBG(NFSCLNT_FAC_VFS, 7, ## __VA_ARGS__)
125
126 /*
127 * NFS client globals
128 */
129
130 ZONE_DECLARE(nfsmnt_zone, "NFS mount",
131 sizeof(struct nfsmount), ZC_ZFREE_CLEARMEM);
132
133 int nfs_ticks;
134 static LCK_GRP_DECLARE(nfs_global_grp, "nfs_global");
135 static LCK_GRP_DECLARE(nfs_mount_grp, "nfs_mount");
136 LCK_MTX_DECLARE(nfs_global_mutex, &nfs_global_grp);
137 uint32_t nfs_fs_attr_bitmap[NFS_ATTR_BITMAP_LEN];
138 uint32_t nfs_object_attr_bitmap[NFS_ATTR_BITMAP_LEN];
139 uint32_t nfs_getattr_bitmap[NFS_ATTR_BITMAP_LEN];
140 uint32_t nfs4_getattr_write_bitmap[NFS_ATTR_BITMAP_LEN];
141 struct nfsclientidlist nfsclientids;
142
143 /* NFS requests */
144 struct nfs_reqqhead nfs_reqq;
145 LCK_GRP_DECLARE(nfs_request_grp, "nfs_request");
146 LCK_MTX_DECLARE(nfs_request_mutex, &nfs_request_grp);
147 thread_call_t nfs_request_timer_call;
148 int nfs_request_timer_on;
149 u_int64_t nfs_xid = 0;
150 u_int64_t nfs_xidwrap = 0; /* to build a (non-wrapping) 64 bit xid */
151
152 thread_call_t nfs_buf_timer_call;
153
154 /* NFSv4 */
155 LCK_GRP_DECLARE(nfs_open_grp, "nfs_open");
156 uint32_t nfs_open_owner_seqnum = 0;
157 uint32_t nfs_lock_owner_seqnum = 0;
158 thread_call_t nfs4_callback_timer_call;
159 int nfs4_callback_timer_on = 0;
160 char nfs4_default_domain[MAXPATHLEN];
161
162 /* nfsiod */
163 static LCK_GRP_DECLARE(nfsiod_lck_grp, "nfsiod");
164 LCK_MTX_DECLARE(nfsiod_mutex, &nfsiod_lck_grp);
165 struct nfsiodlist nfsiodfree, nfsiodwork;
166 struct nfsiodmountlist nfsiodmounts;
167 int nfsiod_thread_count = 0;
168 int nfsiod_thread_max = NFS_DEFASYNCTHREAD;
169 int nfs_max_async_writes = NFS_DEFMAXASYNCWRITES;
170
171 int nfs_iosize = NFS_IOSIZE;
172 int nfs_access_cache_timeout = NFS_MAXATTRTIMO;
173 int nfs_access_delete = 1; /* too many servers get this wrong - workaround on by default */
174 int nfs_access_dotzfs = 1;
175 int nfs_access_for_getattr = 0;
176 int nfs_allow_async = 0;
177 int nfs_statfs_rate_limit = NFS_DEFSTATFSRATELIMIT;
178 int nfs_lockd_mounts = 0;
179 int nfs_lockd_request_sent = 0;
180 int nfs_idmap_ctrl = NFS_IDMAP_CTRL_USE_IDMAP_SERVICE;
181 int nfs_callback_port = 0;
182 int nfs_split_open_owner = 0;
183
184 int nfs_tprintf_initial_delay = NFS_TPRINTF_INITIAL_DELAY;
185 int nfs_tprintf_delay = NFS_TPRINTF_DELAY;
186
187 int nfs_mount_timeout = NFS_MOUNT_TIMEOUT;
188 int nfs_mount_quick_timeout = NFS_MOUNT_QUICK_TIMEOUT;
189
190 int mountnfs(char *, mount_t, vfs_context_t, vnode_t *);
191 int nfs_mount_connect(struct nfsmount *);
192 void nfs_mount_drain_and_cleanup(struct nfsmount *);
193 void nfs_mount_cleanup(struct nfsmount *);
194 int nfs_mountinfo_assemble(struct nfsmount *, struct xdrbuf *);
195 int nfs4_mount_update_path_with_symlink(struct nfsmount *, struct nfs_fs_path *, uint32_t, fhandle_t *, int *, fhandle_t *, vfs_context_t);
196
197 /*
198 * NFS VFS operations.
199 */
200 int nfs_vfs_mount(mount_t, vnode_t, user_addr_t, vfs_context_t);
201 int nfs_vfs_start(mount_t, int, vfs_context_t);
202 int nfs_vfs_unmount(mount_t, int, vfs_context_t);
203 int nfs_vfs_root(mount_t, vnode_t *, vfs_context_t);
204 int nfs_vfs_quotactl(mount_t, int, uid_t, caddr_t, vfs_context_t);
205 int nfs_vfs_getattr(mount_t, struct vfs_attr *, vfs_context_t);
206 int nfs_vfs_sync(mount_t, int, vfs_context_t);
207 int nfs_vfs_vget(mount_t, ino64_t, vnode_t *, vfs_context_t);
208 int nfs_vfs_vptofh(vnode_t, int *, unsigned char *, vfs_context_t);
209 int nfs_vfs_fhtovp(mount_t, int, unsigned char *, vnode_t *, vfs_context_t);
210 int nfs_vfs_init(struct vfsconf *);
211 int nfs_vfs_sysctl(int *, u_int, user_addr_t, size_t *, user_addr_t, size_t, vfs_context_t);
212
213 const struct vfsops nfs_vfsops = {
214 .vfs_mount = nfs_vfs_mount,
215 .vfs_start = nfs_vfs_start,
216 .vfs_unmount = nfs_vfs_unmount,
217 .vfs_root = nfs_vfs_root,
218 .vfs_quotactl = nfs_vfs_quotactl,
219 .vfs_getattr = nfs_vfs_getattr,
220 .vfs_sync = nfs_vfs_sync,
221 .vfs_vget = nfs_vfs_vget,
222 .vfs_fhtovp = nfs_vfs_fhtovp,
223 .vfs_vptofh = nfs_vfs_vptofh,
224 .vfs_init = nfs_vfs_init,
225 .vfs_sysctl = nfs_vfs_sysctl,
226 // We do not support the remaining VFS ops
227 };
228
229
230 /*
231 * version-specific NFS functions
232 */
233 int nfs3_mount(struct nfsmount *, vfs_context_t, nfsnode_t *);
234 int nfs4_mount(struct nfsmount *, vfs_context_t, nfsnode_t *);
235 int nfs3_fsinfo(struct nfsmount *, nfsnode_t, vfs_context_t);
236 int nfs3_update_statfs(struct nfsmount *, vfs_context_t);
237 int nfs4_update_statfs(struct nfsmount *, vfs_context_t);
238 #if !QUOTA
239 #define nfs3_getquota NULL
240 #define nfs4_getquota NULL
241 #else
242 int nfs3_getquota(struct nfsmount *, vfs_context_t, uid_t, int, struct dqblk *);
243 int nfs4_getquota(struct nfsmount *, vfs_context_t, uid_t, int, struct dqblk *);
244 #endif
245
246 const struct nfs_funcs nfs3_funcs = {
247 .nf_mount = nfs3_mount,
248 .nf_update_statfs = nfs3_update_statfs,
249 .nf_getquota = nfs3_getquota,
250 .nf_access_rpc = nfs3_access_rpc,
251 .nf_getattr_rpc = nfs3_getattr_rpc,
252 .nf_setattr_rpc = nfs3_setattr_rpc,
253 .nf_read_rpc_async = nfs3_read_rpc_async,
254 .nf_read_rpc_async_finish = nfs3_read_rpc_async_finish,
255 .nf_readlink_rpc = nfs3_readlink_rpc,
256 .nf_write_rpc_async = nfs3_write_rpc_async,
257 .nf_write_rpc_async_finish = nfs3_write_rpc_async_finish,
258 .nf_commit_rpc = nfs3_commit_rpc,
259 .nf_lookup_rpc_async = nfs3_lookup_rpc_async,
260 .nf_lookup_rpc_async_finish = nfs3_lookup_rpc_async_finish,
261 .nf_remove_rpc = nfs3_remove_rpc,
262 .nf_rename_rpc = nfs3_rename_rpc,
263 .nf_setlock_rpc = nfs3_setlock_rpc,
264 .nf_unlock_rpc = nfs3_unlock_rpc,
265 .nf_getlock_rpc = nfs3_getlock_rpc
266 };
267 #if CONFIG_NFS4
268 const struct nfs_funcs nfs4_funcs = {
269 .nf_mount = nfs4_mount,
270 .nf_update_statfs = nfs4_update_statfs,
271 .nf_getquota = nfs4_getquota,
272 .nf_access_rpc = nfs4_access_rpc,
273 .nf_getattr_rpc = nfs4_getattr_rpc,
274 .nf_setattr_rpc = nfs4_setattr_rpc,
275 .nf_read_rpc_async = nfs4_read_rpc_async,
276 .nf_read_rpc_async_finish = nfs4_read_rpc_async_finish,
277 .nf_readlink_rpc = nfs4_readlink_rpc,
278 .nf_write_rpc_async = nfs4_write_rpc_async,
279 .nf_write_rpc_async_finish = nfs4_write_rpc_async_finish,
280 .nf_commit_rpc = nfs4_commit_rpc,
281 .nf_lookup_rpc_async = nfs4_lookup_rpc_async,
282 .nf_lookup_rpc_async_finish = nfs4_lookup_rpc_async_finish,
283 .nf_remove_rpc = nfs4_remove_rpc,
284 .nf_rename_rpc = nfs4_rename_rpc,
285 .nf_setlock_rpc = nfs4_setlock_rpc,
286 .nf_unlock_rpc = nfs4_unlock_rpc,
287 .nf_getlock_rpc = nfs4_getlock_rpc
288 };
289 #endif
290
291 /*
292 * Called once to initialize data structures...
293 */
294 int
nfs_vfs_init(__unused struct vfsconf * vfsp)295 nfs_vfs_init(__unused struct vfsconf *vfsp)
296 {
297 #if CONFIG_NFS4
298 int i;
299 #endif
300 /*
301 * Check to see if major data structures haven't bloated.
302 */
303 if (sizeof(struct nfsnode) > NFS_NODEALLOC) {
304 printf("struct nfsnode bloated (> %dbytes)\n", NFS_NODEALLOC);
305 printf("Try reducing NFS_SMALLFH\n");
306 }
307 if (sizeof(struct nfsmount) > NFS_MNTALLOC) {
308 printf("struct nfsmount bloated (> %dbytes)\n", NFS_MNTALLOC);
309 }
310
311 nfs_ticks = (hz * NFS_TICKINTVL + 500) / 1000;
312 if (nfs_ticks < 1) {
313 nfs_ticks = 1;
314 }
315
316 /* init async I/O thread pool state */
317 TAILQ_INIT(&nfsiodfree);
318 TAILQ_INIT(&nfsiodwork);
319 TAILQ_INIT(&nfsiodmounts);
320
321 /* initialize NFS request list */
322 TAILQ_INIT(&nfs_reqq);
323
324 nfs_nbinit(); /* Init the nfsbuf table */
325
326 #if CONFIG_NFS4
327 /* NFSv4 stuff */
328 NFS4_PER_FS_ATTRIBUTES(nfs_fs_attr_bitmap);
329 NFS4_PER_OBJECT_ATTRIBUTES(nfs_object_attr_bitmap);
330 NFS4_DEFAULT_WRITE_ATTRIBUTES(nfs4_getattr_write_bitmap);
331 NFS4_DEFAULT_ATTRIBUTES(nfs_getattr_bitmap);
332 for (i = 0; i < NFS_ATTR_BITMAP_LEN; i++) {
333 nfs_getattr_bitmap[i] &= nfs_object_attr_bitmap[i];
334 nfs4_getattr_write_bitmap[i] &= nfs_object_attr_bitmap[i];
335 }
336 TAILQ_INIT(&nfsclientids);
337 #endif
338
339 /* initialize NFS timer callouts */
340 nfs_request_timer_call = thread_call_allocate(nfs_request_timer, NULL);
341 nfs_buf_timer_call = thread_call_allocate(nfs_buf_timer, NULL);
342 #if CONFIG_NFS4
343 nfs4_callback_timer_call = thread_call_allocate(nfs4_callback_timer, NULL);
344 #endif
345
346 /*
347 * Assign NFS hooks
348 */
349 struct nfs_hooks hooks = { .f_vinvalbuf = nfs_vinvalbuf1, .f_buf_page_inval = nfs_buf_page_inval_internal };
350 nfs_register_hooks(&hooks);
351
352 return 0;
353 }
354
355
356 /*
357 * nfs statfs call
358 */
359 int
nfs3_update_statfs(struct nfsmount * nmp,vfs_context_t ctx)360 nfs3_update_statfs(struct nfsmount *nmp, vfs_context_t ctx)
361 {
362 nfsnode_t np;
363 int error = 0, lockerror, status, nfsvers;
364 u_int64_t xid;
365 struct nfsm_chain nmreq, nmrep;
366 uint32_t val = 0;
367
368 nfsvers = nmp->nm_vers;
369 np = nmp->nm_dnp;
370 if (!np) {
371 return ENXIO;
372 }
373 if ((error = vnode_get(NFSTOV(np)))) {
374 return error;
375 }
376
377 nfsm_chain_null(&nmreq);
378 nfsm_chain_null(&nmrep);
379
380 nfsm_chain_build_alloc_init(error, &nmreq, NFSX_FH(nfsvers));
381 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
382 nfsm_chain_build_done(error, &nmreq);
383 nfsmout_if(error);
384 error = nfs_request2(np, NULL, &nmreq, NFSPROC_FSSTAT, vfs_context_thread(ctx),
385 vfs_context_ucred(ctx), NULL, R_SOFT, &nmrep, &xid, &status);
386 if (error == ETIMEDOUT) {
387 goto nfsmout;
388 }
389 if ((lockerror = nfs_node_lock(np))) {
390 error = lockerror;
391 }
392 if (nfsvers == NFS_VER3) {
393 nfsm_chain_postop_attr_update(error, &nmrep, np, &xid);
394 }
395 if (!lockerror) {
396 nfs_node_unlock(np);
397 }
398 if (!error) {
399 error = status;
400 }
401 nfsm_assert(error, NFSTONMP(np), ENXIO);
402 nfsmout_if(error);
403 lck_mtx_lock(&nmp->nm_lock);
404 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SPACE_TOTAL);
405 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SPACE_FREE);
406 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SPACE_AVAIL);
407 if (nfsvers == NFS_VER3) {
408 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_FILES_AVAIL);
409 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_FILES_TOTAL);
410 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_FILES_FREE);
411 nmp->nm_fsattr.nfsa_bsize = NFS_FABLKSIZE;
412 nfsm_chain_get_64(error, &nmrep, nmp->nm_fsattr.nfsa_space_total);
413 nfsm_chain_get_64(error, &nmrep, nmp->nm_fsattr.nfsa_space_free);
414 nfsm_chain_get_64(error, &nmrep, nmp->nm_fsattr.nfsa_space_avail);
415 nfsm_chain_get_64(error, &nmrep, nmp->nm_fsattr.nfsa_files_total);
416 nfsm_chain_get_64(error, &nmrep, nmp->nm_fsattr.nfsa_files_free);
417 nfsm_chain_get_64(error, &nmrep, nmp->nm_fsattr.nfsa_files_avail);
418 // skip invarsec
419 } else {
420 nfsm_chain_adv(error, &nmrep, NFSX_UNSIGNED); // skip tsize?
421 nfsm_chain_get_32(error, &nmrep, nmp->nm_fsattr.nfsa_bsize);
422 nfsm_chain_get_32(error, &nmrep, val);
423 nfsmout_if(error);
424 if (nmp->nm_fsattr.nfsa_bsize <= 0) {
425 nmp->nm_fsattr.nfsa_bsize = NFS_FABLKSIZE;
426 }
427 nmp->nm_fsattr.nfsa_space_total = (uint64_t)val * nmp->nm_fsattr.nfsa_bsize;
428 nfsm_chain_get_32(error, &nmrep, val);
429 nfsmout_if(error);
430 nmp->nm_fsattr.nfsa_space_free = (uint64_t)val * nmp->nm_fsattr.nfsa_bsize;
431 nfsm_chain_get_32(error, &nmrep, val);
432 nfsmout_if(error);
433 nmp->nm_fsattr.nfsa_space_avail = (uint64_t)val * nmp->nm_fsattr.nfsa_bsize;
434 }
435 lck_mtx_unlock(&nmp->nm_lock);
436 nfsmout:
437 nfsm_chain_cleanup(&nmreq);
438 nfsm_chain_cleanup(&nmrep);
439 vnode_put(NFSTOV(np));
440 return error;
441 }
442
443 #if CONFIG_NFS4
444 int
nfs4_update_statfs(struct nfsmount * nmp,vfs_context_t ctx)445 nfs4_update_statfs(struct nfsmount *nmp, vfs_context_t ctx)
446 {
447 nfsnode_t np;
448 int error = 0, lockerror, status, nfsvers, numops;
449 u_int64_t xid;
450 struct nfsm_chain nmreq, nmrep;
451 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
452 struct nfs_vattr nvattr;
453 struct nfsreq_secinfo_args si;
454
455 nfsvers = nmp->nm_vers;
456 np = nmp->nm_dnp;
457 if (!np) {
458 return ENXIO;
459 }
460 if ((error = vnode_get(NFSTOV(np)))) {
461 return error;
462 }
463
464 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
465 NVATTR_INIT(&nvattr);
466 nfsm_chain_null(&nmreq);
467 nfsm_chain_null(&nmrep);
468
469 // PUTFH + GETATTR
470 numops = 2;
471 nfsm_chain_build_alloc_init(error, &nmreq, 15 * NFSX_UNSIGNED);
472 nfsm_chain_add_compound_header(error, &nmreq, "statfs", nmp->nm_minor_vers, numops);
473 numops--;
474 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
475 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
476 numops--;
477 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
478 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
479 NFS4_STATFS_ATTRIBUTES(bitmap);
480 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
481 nfsm_chain_build_done(error, &nmreq);
482 nfsm_assert(error, (numops == 0), EPROTO);
483 nfsmout_if(error);
484 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
485 vfs_context_thread(ctx), vfs_context_ucred(ctx),
486 NULL, R_SOFT, &nmrep, &xid, &status);
487 nfsm_chain_skip_tag(error, &nmrep);
488 nfsm_chain_get_32(error, &nmrep, numops);
489 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
490 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
491 nfsm_assert(error, NFSTONMP(np), ENXIO);
492 nfsmout_if(error);
493 lck_mtx_lock(&nmp->nm_lock);
494 error = nfs4_parsefattr(&nmrep, &nmp->nm_fsattr, &nvattr, NULL, NULL, NULL);
495 lck_mtx_unlock(&nmp->nm_lock);
496 nfsmout_if(error);
497 if ((lockerror = nfs_node_lock(np))) {
498 error = lockerror;
499 }
500 if (!error) {
501 nfs_loadattrcache(np, &nvattr, &xid, 0);
502 }
503 if (!lockerror) {
504 nfs_node_unlock(np);
505 }
506 nfsm_assert(error, NFSTONMP(np), ENXIO);
507 nfsmout_if(error);
508 nmp->nm_fsattr.nfsa_bsize = NFS_FABLKSIZE;
509 nfsmout:
510 NVATTR_CLEANUP(&nvattr);
511 nfsm_chain_cleanup(&nmreq);
512 nfsm_chain_cleanup(&nmrep);
513 vnode_put(NFSTOV(np));
514 return error;
515 }
516 #endif /* CONFIG_NFS4 */
517
518
519 /*
520 * Return an NFS volume name from the mntfrom name.
521 */
522 static void
nfs_get_volname(struct mount * mp,char * volname,size_t len,__unused vfs_context_t ctx)523 nfs_get_volname(struct mount *mp, char *volname, size_t len, __unused vfs_context_t ctx)
524 {
525 const char *ptr, *cptr;
526 const char *mntfrom = vfs_statfs(mp)->f_mntfromname;
527 size_t mflen;
528
529
530 mflen = strnlen(mntfrom, MAXPATHLEN + 1);
531
532 if (mflen > MAXPATHLEN || mflen == 0) {
533 strlcpy(volname, "Bad volname", len);
534 return;
535 }
536
537 /* Move back over trailing slashes */
538 for (ptr = &mntfrom[mflen - 1]; ptr != mntfrom && *ptr == '/'; ptr--) {
539 mflen--;
540 }
541
542 /* Find first character after the last slash */
543 cptr = ptr = NULL;
544 for (size_t i = 0; i < mflen; i++) {
545 if (mntfrom[i] == '/') {
546 ptr = &mntfrom[i + 1];
547 }
548 /* And the first character after the first colon */
549 else if (cptr == NULL && mntfrom[i] == ':') {
550 cptr = &mntfrom[i + 1];
551 }
552 }
553
554 /*
555 * No slash or nothing after the last slash
556 * use everything past the first colon
557 */
558 if (ptr == NULL || *ptr == '\0') {
559 ptr = cptr;
560 }
561 /* Otherwise use the mntfrom name */
562 if (ptr == NULL) {
563 ptr = mntfrom;
564 }
565
566 mflen = &mntfrom[mflen] - ptr;
567 len = mflen + 1 < len ? mflen + 1 : len;
568
569 strlcpy(volname, ptr, len);
570 }
571
572
573 /*
574 * The NFS VFS_GETATTR function: "statfs"-type information is retrieved
575 * using the nf_update_statfs() function, and other attributes are cobbled
576 * together from whatever sources we can (getattr, fsinfo, pathconf).
577 */
578 int
nfs_vfs_getattr(mount_t mp,struct vfs_attr * fsap,vfs_context_t ctx)579 nfs_vfs_getattr(mount_t mp, struct vfs_attr *fsap, vfs_context_t ctx)
580 {
581 struct nfsmount *nmp;
582 uint32_t bsize;
583 int error = 0, nfsvers;
584
585 nmp = VFSTONFS(mp);
586 if (nfs_mount_gone(nmp)) {
587 return ENXIO;
588 }
589 nfsvers = nmp->nm_vers;
590
591 if (VFSATTR_IS_ACTIVE(fsap, f_bsize) ||
592 VFSATTR_IS_ACTIVE(fsap, f_iosize) ||
593 VFSATTR_IS_ACTIVE(fsap, f_blocks) ||
594 VFSATTR_IS_ACTIVE(fsap, f_bfree) ||
595 VFSATTR_IS_ACTIVE(fsap, f_bavail) ||
596 VFSATTR_IS_ACTIVE(fsap, f_bused) ||
597 VFSATTR_IS_ACTIVE(fsap, f_files) ||
598 VFSATTR_IS_ACTIVE(fsap, f_ffree)) {
599 int statfsrate = nfs_statfs_rate_limit;
600 int refresh = 1;
601
602 /*
603 * Are we rate-limiting statfs RPCs?
604 * (Treat values less than 1 or greater than 1,000,000 as no limit.)
605 */
606 if ((statfsrate > 0) && (statfsrate < 1000000)) {
607 struct timeval now;
608 time_t stamp;
609
610 microuptime(&now);
611 lck_mtx_lock(&nmp->nm_lock);
612 stamp = (now.tv_sec * statfsrate) + (now.tv_usec / (1000000 / statfsrate));
613 if (stamp != nmp->nm_fsattrstamp) {
614 refresh = 1;
615 nmp->nm_fsattrstamp = stamp;
616 } else {
617 refresh = 0;
618 }
619 lck_mtx_unlock(&nmp->nm_lock);
620 }
621
622 if (refresh && !nfs_use_cache(nmp)) {
623 error = nmp->nm_funcs->nf_update_statfs(nmp, ctx);
624 }
625 if ((error == ESTALE) || (error == ETIMEDOUT)) {
626 error = 0;
627 }
628 if (error) {
629 return NFS_MAPERR(error);
630 }
631
632 lck_mtx_lock(&nmp->nm_lock);
633 VFSATTR_RETURN(fsap, f_iosize, nfs_iosize);
634 VFSATTR_RETURN(fsap, f_bsize, nmp->nm_fsattr.nfsa_bsize);
635 bsize = nmp->nm_fsattr.nfsa_bsize;
636 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SPACE_TOTAL)) {
637 VFSATTR_RETURN(fsap, f_blocks, nmp->nm_fsattr.nfsa_space_total / bsize);
638 }
639 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SPACE_FREE)) {
640 VFSATTR_RETURN(fsap, f_bfree, nmp->nm_fsattr.nfsa_space_free / bsize);
641 }
642 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SPACE_AVAIL)) {
643 VFSATTR_RETURN(fsap, f_bavail, nmp->nm_fsattr.nfsa_space_avail / bsize);
644 }
645 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SPACE_TOTAL) &&
646 NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SPACE_FREE)) {
647 VFSATTR_RETURN(fsap, f_bused,
648 (nmp->nm_fsattr.nfsa_space_total / bsize) -
649 (nmp->nm_fsattr.nfsa_space_free / bsize));
650 }
651 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_FILES_TOTAL)) {
652 VFSATTR_RETURN(fsap, f_files, nmp->nm_fsattr.nfsa_files_total);
653 }
654 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_FILES_FREE)) {
655 VFSATTR_RETURN(fsap, f_ffree, nmp->nm_fsattr.nfsa_files_free);
656 }
657 lck_mtx_unlock(&nmp->nm_lock);
658 }
659
660 if (VFSATTR_IS_ACTIVE(fsap, f_vol_name)) {
661 /*%%% IF fail over support is implemented we may need to take nm_lock */
662 nfs_get_volname(mp, fsap->f_vol_name, MAXPATHLEN, ctx);
663 VFSATTR_SET_SUPPORTED(fsap, f_vol_name);
664 }
665 if (VFSATTR_IS_ACTIVE(fsap, f_capabilities)
666 ) {
667 u_int32_t caps, valid;
668 nfsnode_t np = nmp->nm_dnp;
669
670 nfsm_assert(error, VFSTONFS(mp) && np, ENXIO);
671 if (error) {
672 return NFS_MAPERR(error);
673 }
674 lck_mtx_lock(&nmp->nm_lock);
675
676 /*
677 * The capabilities[] array defines what this volume supports.
678 *
679 * The valid[] array defines which bits this code understands
680 * the meaning of (whether the volume has that capability or
681 * not). Any zero bits here means "I don't know what you're
682 * asking about" and the caller cannot tell whether that
683 * capability is present or not.
684 */
685 caps = valid = 0;
686 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SYMLINK_SUPPORT)) {
687 valid |= VOL_CAP_FMT_SYMBOLICLINKS;
688 if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_SYMLINK) {
689 caps |= VOL_CAP_FMT_SYMBOLICLINKS;
690 }
691 }
692 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_LINK_SUPPORT)) {
693 valid |= VOL_CAP_FMT_HARDLINKS;
694 if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_LINK) {
695 caps |= VOL_CAP_FMT_HARDLINKS;
696 }
697 }
698 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_CASE_INSENSITIVE)) {
699 valid |= VOL_CAP_FMT_CASE_SENSITIVE;
700 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_CASE_INSENSITIVE)) {
701 caps |= VOL_CAP_FMT_CASE_SENSITIVE;
702 }
703 }
704 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_CASE_PRESERVING)) {
705 valid |= VOL_CAP_FMT_CASE_PRESERVING;
706 if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_CASE_PRESERVING) {
707 caps |= VOL_CAP_FMT_CASE_PRESERVING;
708 }
709 }
710 /* Note: VOL_CAP_FMT_2TB_FILESIZE is actually used to test for "large file support" */
711 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXFILESIZE)) {
712 /* Is server's max file size at least 4GB? */
713 if (nmp->nm_fsattr.nfsa_maxfilesize >= 0x100000000ULL) {
714 caps |= VOL_CAP_FMT_2TB_FILESIZE;
715 }
716 } else if (nfsvers >= NFS_VER3) {
717 /*
718 * NFSv3 and up supports 64 bits of file size.
719 * So, we'll just assume maxfilesize >= 4GB
720 */
721 caps |= VOL_CAP_FMT_2TB_FILESIZE;
722 }
723 #if CONFIG_NFS4
724 if (nfsvers >= NFS_VER4) {
725 caps |= VOL_CAP_FMT_HIDDEN_FILES;
726 valid |= VOL_CAP_FMT_HIDDEN_FILES;
727 // VOL_CAP_FMT_OPENDENYMODES
728 // caps |= VOL_CAP_FMT_OPENDENYMODES;
729 // valid |= VOL_CAP_FMT_OPENDENYMODES;
730 }
731 #endif
732 // no version of nfs supports immutable files
733 caps |= VOL_CAP_FMT_NO_IMMUTABLE_FILES;
734 valid |= VOL_CAP_FMT_NO_IMMUTABLE_FILES;
735
736 fsap->f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] =
737 // VOL_CAP_FMT_PERSISTENTOBJECTIDS |
738 // VOL_CAP_FMT_SYMBOLICLINKS |
739 // VOL_CAP_FMT_HARDLINKS |
740 // VOL_CAP_FMT_JOURNAL |
741 // VOL_CAP_FMT_JOURNAL_ACTIVE |
742 // VOL_CAP_FMT_NO_ROOT_TIMES |
743 // VOL_CAP_FMT_SPARSE_FILES |
744 // VOL_CAP_FMT_ZERO_RUNS |
745 // VOL_CAP_FMT_CASE_SENSITIVE |
746 // VOL_CAP_FMT_CASE_PRESERVING |
747 // VOL_CAP_FMT_FAST_STATFS |
748 // VOL_CAP_FMT_2TB_FILESIZE |
749 // VOL_CAP_FMT_OPENDENYMODES |
750 // VOL_CAP_FMT_HIDDEN_FILES |
751 caps;
752 fsap->f_capabilities.valid[VOL_CAPABILITIES_FORMAT] =
753 VOL_CAP_FMT_PERSISTENTOBJECTIDS |
754 // VOL_CAP_FMT_SYMBOLICLINKS |
755 // VOL_CAP_FMT_HARDLINKS |
756 // VOL_CAP_FMT_JOURNAL |
757 // VOL_CAP_FMT_JOURNAL_ACTIVE |
758 // VOL_CAP_FMT_NO_ROOT_TIMES |
759 // VOL_CAP_FMT_SPARSE_FILES |
760 // VOL_CAP_FMT_ZERO_RUNS |
761 // VOL_CAP_FMT_CASE_SENSITIVE |
762 // VOL_CAP_FMT_CASE_PRESERVING |
763 VOL_CAP_FMT_FAST_STATFS |
764 VOL_CAP_FMT_2TB_FILESIZE |
765 // VOL_CAP_FMT_OPENDENYMODES |
766 // VOL_CAP_FMT_HIDDEN_FILES |
767 valid;
768
769 /*
770 * We don't support most of the interfaces.
771 *
772 * We MAY support locking, but we don't have any easy way of
773 * probing. We can tell if there's no lockd running or if
774 * locks have been disabled for a mount, so we can definitely
775 * answer NO in that case. Any attempt to send a request to
776 * lockd to test for locking support may cause the lazily-
777 * launched locking daemons to be started unnecessarily. So
778 * we avoid that. However, we do record if we ever successfully
779 * perform a lock operation on a mount point, so if it looks
780 * like lock ops have worked, we do report that we support them.
781 */
782 caps = valid = 0;
783 #if CONFIG_NFS4
784 if (nfsvers >= NFS_VER4) {
785 caps = VOL_CAP_INT_ADVLOCK | VOL_CAP_INT_FLOCK;
786 valid = VOL_CAP_INT_ADVLOCK | VOL_CAP_INT_FLOCK;
787 if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL) {
788 caps |= VOL_CAP_INT_EXTENDED_SECURITY;
789 }
790 valid |= VOL_CAP_INT_EXTENDED_SECURITY;
791 if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR) {
792 caps |= VOL_CAP_INT_EXTENDED_ATTR;
793 }
794 valid |= VOL_CAP_INT_EXTENDED_ATTR;
795 #if NAMEDSTREAMS
796 if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR) {
797 caps |= VOL_CAP_INT_NAMEDSTREAMS;
798 }
799 valid |= VOL_CAP_INT_NAMEDSTREAMS;
800 #endif
801 } else
802 #endif
803 if (nmp->nm_lockmode == NFS_LOCK_MODE_DISABLED) {
804 /* locks disabled on this mount, so they definitely won't work */
805 valid = VOL_CAP_INT_ADVLOCK | VOL_CAP_INT_FLOCK;
806 } else if (nmp->nm_state & NFSSTA_LOCKSWORK) {
807 caps = VOL_CAP_INT_ADVLOCK | VOL_CAP_INT_FLOCK;
808 valid = VOL_CAP_INT_ADVLOCK | VOL_CAP_INT_FLOCK;
809 }
810 fsap->f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] =
811 // VOL_CAP_INT_SEARCHFS |
812 // VOL_CAP_INT_ATTRLIST |
813 // VOL_CAP_INT_NFSEXPORT |
814 // VOL_CAP_INT_READDIRATTR |
815 // VOL_CAP_INT_EXCHANGEDATA |
816 // VOL_CAP_INT_COPYFILE |
817 // VOL_CAP_INT_ALLOCATE |
818 // VOL_CAP_INT_VOL_RENAME |
819 // VOL_CAP_INT_ADVLOCK |
820 // VOL_CAP_INT_FLOCK |
821 // VOL_CAP_INT_EXTENDED_SECURITY |
822 // VOL_CAP_INT_USERACCESS |
823 // VOL_CAP_INT_MANLOCK |
824 // VOL_CAP_INT_NAMEDSTREAMS |
825 // VOL_CAP_INT_EXTENDED_ATTR |
826 VOL_CAP_INT_REMOTE_EVENT |
827 caps;
828 fsap->f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] =
829 VOL_CAP_INT_SEARCHFS |
830 VOL_CAP_INT_ATTRLIST |
831 VOL_CAP_INT_NFSEXPORT |
832 VOL_CAP_INT_READDIRATTR |
833 VOL_CAP_INT_EXCHANGEDATA |
834 VOL_CAP_INT_COPYFILE |
835 VOL_CAP_INT_ALLOCATE |
836 VOL_CAP_INT_VOL_RENAME |
837 // VOL_CAP_INT_ADVLOCK |
838 // VOL_CAP_INT_FLOCK |
839 // VOL_CAP_INT_EXTENDED_SECURITY |
840 // VOL_CAP_INT_USERACCESS |
841 // VOL_CAP_INT_MANLOCK |
842 // VOL_CAP_INT_NAMEDSTREAMS |
843 // VOL_CAP_INT_EXTENDED_ATTR |
844 VOL_CAP_INT_REMOTE_EVENT |
845 valid;
846
847 fsap->f_capabilities.capabilities[VOL_CAPABILITIES_RESERVED1] = 0;
848 fsap->f_capabilities.valid[VOL_CAPABILITIES_RESERVED1] = 0;
849
850 fsap->f_capabilities.capabilities[VOL_CAPABILITIES_RESERVED2] = 0;
851 fsap->f_capabilities.valid[VOL_CAPABILITIES_RESERVED2] = 0;
852
853 VFSATTR_SET_SUPPORTED(fsap, f_capabilities);
854 lck_mtx_unlock(&nmp->nm_lock);
855 }
856
857 if (VFSATTR_IS_ACTIVE(fsap, f_attributes)) {
858 fsap->f_attributes.validattr.commonattr = 0;
859 fsap->f_attributes.validattr.volattr =
860 ATTR_VOL_NAME | ATTR_VOL_CAPABILITIES | ATTR_VOL_ATTRIBUTES;
861 fsap->f_attributes.validattr.dirattr = 0;
862 fsap->f_attributes.validattr.fileattr = 0;
863 fsap->f_attributes.validattr.forkattr = 0;
864
865 fsap->f_attributes.nativeattr.commonattr = 0;
866 fsap->f_attributes.nativeattr.volattr =
867 ATTR_VOL_NAME | ATTR_VOL_CAPABILITIES | ATTR_VOL_ATTRIBUTES;
868 fsap->f_attributes.nativeattr.dirattr = 0;
869 fsap->f_attributes.nativeattr.fileattr = 0;
870 fsap->f_attributes.nativeattr.forkattr = 0;
871
872 VFSATTR_SET_SUPPORTED(fsap, f_attributes);
873 }
874
875 return NFS_MAPERR(error);
876 }
877
878 /*
879 * nfs version 3 fsinfo rpc call
880 */
881 int
nfs3_fsinfo(struct nfsmount * nmp,nfsnode_t np,vfs_context_t ctx)882 nfs3_fsinfo(struct nfsmount *nmp, nfsnode_t np, vfs_context_t ctx)
883 {
884 int error = 0, lockerror, status, nmlocked = 0;
885 u_int64_t xid;
886 uint32_t val, prefsize, maxsize;
887 struct nfsm_chain nmreq, nmrep;
888
889 nfsm_chain_null(&nmreq);
890 nfsm_chain_null(&nmrep);
891
892 nfsm_chain_build_alloc_init(error, &nmreq, NFSX_FH(nmp->nm_vers));
893 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
894 nfsm_chain_build_done(error, &nmreq);
895 nfsmout_if(error);
896 error = nfs_request(np, NULL, &nmreq, NFSPROC_FSINFO, ctx, NULL, &nmrep, &xid, &status);
897 if ((lockerror = nfs_node_lock(np))) {
898 error = lockerror;
899 }
900 nfsm_chain_postop_attr_update(error, &nmrep, np, &xid);
901 if (!lockerror) {
902 nfs_node_unlock(np);
903 }
904 if (!error) {
905 error = status;
906 }
907 nfsmout_if(error);
908
909 lck_mtx_lock(&nmp->nm_lock);
910 nmlocked = 1;
911
912 nfsm_chain_get_32(error, &nmrep, maxsize);
913 nfsm_chain_get_32(error, &nmrep, prefsize);
914 nfsmout_if(error);
915 nmp->nm_fsattr.nfsa_maxread = maxsize;
916 if (prefsize < nmp->nm_rsize) {
917 nmp->nm_rsize = (prefsize + NFS_FABLKSIZE - 1) &
918 ~(NFS_FABLKSIZE - 1);
919 }
920 if ((maxsize > 0) && (maxsize < nmp->nm_rsize)) {
921 nmp->nm_rsize = maxsize & ~(NFS_FABLKSIZE - 1);
922 if (nmp->nm_rsize == 0) {
923 nmp->nm_rsize = maxsize;
924 }
925 }
926 nfsm_chain_adv(error, &nmrep, NFSX_UNSIGNED); // skip rtmult
927
928 nfsm_chain_get_32(error, &nmrep, maxsize);
929 nfsm_chain_get_32(error, &nmrep, prefsize);
930 nfsmout_if(error);
931 nmp->nm_fsattr.nfsa_maxwrite = maxsize;
932 if (prefsize < nmp->nm_wsize) {
933 nmp->nm_wsize = (prefsize + NFS_FABLKSIZE - 1) &
934 ~(NFS_FABLKSIZE - 1);
935 }
936 if ((maxsize > 0) && (maxsize < nmp->nm_wsize)) {
937 nmp->nm_wsize = maxsize & ~(NFS_FABLKSIZE - 1);
938 if (nmp->nm_wsize == 0) {
939 nmp->nm_wsize = maxsize;
940 }
941 }
942 nfsm_chain_adv(error, &nmrep, NFSX_UNSIGNED); // skip wtmult
943
944 nfsm_chain_get_32(error, &nmrep, prefsize);
945 nfsmout_if(error);
946 if ((prefsize > 0) && (prefsize < nmp->nm_readdirsize)) {
947 nmp->nm_readdirsize = prefsize;
948 }
949 if ((nmp->nm_fsattr.nfsa_maxread > 0) &&
950 (nmp->nm_fsattr.nfsa_maxread < nmp->nm_readdirsize)) {
951 nmp->nm_readdirsize = nmp->nm_fsattr.nfsa_maxread;
952 }
953
954 nfsm_chain_get_64(error, &nmrep, nmp->nm_fsattr.nfsa_maxfilesize);
955
956 nfsm_chain_adv(error, &nmrep, 2 * NFSX_UNSIGNED); // skip time_delta
957
958 /* convert FS properties to our own flags */
959 nfsm_chain_get_32(error, &nmrep, val);
960 nfsmout_if(error);
961 if (val & NFSV3FSINFO_LINK) {
962 nmp->nm_fsattr.nfsa_flags |= NFS_FSFLAG_LINK;
963 }
964 if (val & NFSV3FSINFO_SYMLINK) {
965 nmp->nm_fsattr.nfsa_flags |= NFS_FSFLAG_SYMLINK;
966 }
967 if (val & NFSV3FSINFO_HOMOGENEOUS) {
968 nmp->nm_fsattr.nfsa_flags |= NFS_FSFLAG_HOMOGENEOUS;
969 }
970 if (val & NFSV3FSINFO_CANSETTIME) {
971 nmp->nm_fsattr.nfsa_flags |= NFS_FSFLAG_SET_TIME;
972 }
973 nmp->nm_state |= NFSSTA_GOTFSINFO;
974 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXREAD);
975 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXWRITE);
976 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXFILESIZE);
977 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_LINK_SUPPORT);
978 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SYMLINK_SUPPORT);
979 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_HOMOGENEOUS);
980 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_CANSETTIME);
981 nfsmout:
982 if (nmlocked) {
983 lck_mtx_unlock(&nmp->nm_lock);
984 }
985 nfsm_chain_cleanup(&nmreq);
986 nfsm_chain_cleanup(&nmrep);
987 return error;
988 }
989
990 /*
991 * Convert old style NFS mount args to XDR.
992 */
993 static int
nfs_convert_old_nfs_args(mount_t mp,user_addr_t data,vfs_context_t ctx,int argsversion,int inkernel,char ** xdrbufp)994 nfs_convert_old_nfs_args(mount_t mp, user_addr_t data, vfs_context_t ctx, int argsversion, int inkernel, char **xdrbufp)
995 {
996 int error = 0, args64bit, argsize, numcomps;
997 struct user_nfs_args args;
998 struct nfs_args tempargs;
999 caddr_t argsp;
1000 size_t len;
1001 u_char nfh[NFS4_FHSIZE];
1002 char *mntfrom, *endserverp, *frompath, *p, *cp;
1003 struct sockaddr_storage ss;
1004 void *sinaddr = NULL;
1005 char uaddr[MAX_IPv6_STR_LEN];
1006 uint32_t mattrs[NFS_MATTR_BITMAP_LEN];
1007 uint32_t mflags_mask[NFS_MFLAG_BITMAP_LEN], mflags[NFS_MFLAG_BITMAP_LEN];
1008 uint32_t nfsvers, nfslockmode = 0;
1009 size_t argslength_offset, attrslength_offset, end_offset;
1010 struct xdrbuf xb;
1011
1012 *xdrbufp = NULL;
1013
1014 /* allocate a temporary buffer for mntfrom */
1015 mntfrom = zalloc(ZV_NAMEI);
1016
1017 args64bit = (inkernel || vfs_context_is64bit(ctx));
1018 argsp = args64bit ? (void*)&args : (void*)&tempargs;
1019
1020 argsize = args64bit ? sizeof(args) : sizeof(tempargs);
1021 switch (argsversion) {
1022 case 3:
1023 argsize -= NFS_ARGSVERSION4_INCSIZE;
1024 OS_FALLTHROUGH;
1025 case 4:
1026 argsize -= NFS_ARGSVERSION5_INCSIZE;
1027 OS_FALLTHROUGH;
1028 case 5:
1029 argsize -= NFS_ARGSVERSION6_INCSIZE;
1030 OS_FALLTHROUGH;
1031 case 6:
1032 break;
1033 default:
1034 error = EPROGMISMATCH;
1035 goto nfsmout;
1036 }
1037
1038 /* read in the structure */
1039 if (inkernel) {
1040 bcopy(CAST_DOWN(void *, data), argsp, argsize);
1041 } else {
1042 error = copyin(data, argsp, argsize);
1043 }
1044 nfsmout_if(error);
1045
1046 if (!args64bit) {
1047 args.addrlen = tempargs.addrlen;
1048 args.sotype = tempargs.sotype;
1049 args.proto = tempargs.proto;
1050 args.fhsize = tempargs.fhsize;
1051 args.flags = tempargs.flags;
1052 args.wsize = tempargs.wsize;
1053 args.rsize = tempargs.rsize;
1054 args.readdirsize = tempargs.readdirsize;
1055 args.timeo = tempargs.timeo;
1056 args.retrans = tempargs.retrans;
1057 args.maxgrouplist = tempargs.maxgrouplist;
1058 args.readahead = tempargs.readahead;
1059 args.leaseterm = tempargs.leaseterm;
1060 args.deadthresh = tempargs.deadthresh;
1061 args.addr = CAST_USER_ADDR_T(tempargs.addr);
1062 args.fh = CAST_USER_ADDR_T(tempargs.fh);
1063 args.hostname = CAST_USER_ADDR_T(tempargs.hostname);
1064 args.version = tempargs.version;
1065 if (args.version >= 4) {
1066 args.acregmin = tempargs.acregmin;
1067 args.acregmax = tempargs.acregmax;
1068 args.acdirmin = tempargs.acdirmin;
1069 args.acdirmax = tempargs.acdirmax;
1070 }
1071 if (args.version >= 5) {
1072 args.auth = tempargs.auth;
1073 }
1074 if (args.version >= 6) {
1075 args.deadtimeout = tempargs.deadtimeout;
1076 }
1077 }
1078
1079 if ((args.fhsize < 0) || (args.fhsize > NFS4_FHSIZE)) {
1080 error = EINVAL;
1081 goto nfsmout;
1082 }
1083 if (args.fhsize > 0) {
1084 if (inkernel) {
1085 bcopy(CAST_DOWN(void *, args.fh), (caddr_t)nfh, args.fhsize);
1086 } else {
1087 error = copyin(args.fh, (caddr_t)nfh, args.fhsize);
1088 }
1089 nfsmout_if(error);
1090 }
1091
1092 if (inkernel) {
1093 error = copystr(CAST_DOWN(void *, args.hostname), mntfrom, MAXPATHLEN - 1, &len);
1094 } else {
1095 error = copyinstr(args.hostname, mntfrom, MAXPATHLEN - 1, &len);
1096 }
1097 nfsmout_if(error);
1098 bzero(&mntfrom[len], MAXPATHLEN - len);
1099
1100 /* find the server-side path being mounted */
1101 frompath = mntfrom;
1102 if (*frompath == '[') { /* skip IPv6 literal address */
1103 while (*frompath && (*frompath != ']')) {
1104 frompath++;
1105 }
1106 if (*frompath == ']') {
1107 frompath++;
1108 }
1109 }
1110 while (*frompath && (*frompath != ':')) {
1111 frompath++;
1112 }
1113 endserverp = frompath;
1114 while (*frompath && (*frompath == ':')) {
1115 frompath++;
1116 }
1117 /* count fs location path components */
1118 p = frompath;
1119 while (*p && (*p == '/')) {
1120 p++;
1121 }
1122 numcomps = 0;
1123 while (*p) {
1124 numcomps++;
1125 while (*p && (*p != '/')) {
1126 p++;
1127 }
1128 while (*p && (*p == '/')) {
1129 p++;
1130 }
1131 }
1132
1133 /* copy socket address */
1134 if (inkernel) {
1135 bcopy(CAST_DOWN(void *, args.addr), &ss, args.addrlen);
1136 } else {
1137 if (args.addrlen > sizeof(struct sockaddr_storage)) {
1138 error = EINVAL;
1139 } else {
1140 error = copyin(args.addr, &ss, args.addrlen);
1141 }
1142 }
1143 nfsmout_if(error);
1144 ss.ss_len = args.addrlen;
1145
1146 /* convert address to universal address string */
1147 if (ss.ss_family == AF_INET) {
1148 if (ss.ss_len != sizeof(struct sockaddr_in)) {
1149 error = EINVAL;
1150 } else {
1151 sinaddr = &((struct sockaddr_in*)&ss)->sin_addr;
1152 }
1153 } else if (ss.ss_family == AF_INET6) {
1154 if (ss.ss_len != sizeof(struct sockaddr_in6)) {
1155 error = EINVAL;
1156 } else {
1157 sinaddr = &((struct sockaddr_in6*)&ss)->sin6_addr;
1158 }
1159 } else {
1160 sinaddr = NULL;
1161 }
1162 nfsmout_if(error);
1163
1164 if (!sinaddr || (inet_ntop(ss.ss_family, sinaddr, uaddr, sizeof(uaddr)) != uaddr)) {
1165 error = EINVAL;
1166 goto nfsmout;
1167 }
1168
1169 /* prepare mount flags */
1170 NFS_BITMAP_ZERO(mflags_mask, NFS_MFLAG_BITMAP_LEN);
1171 NFS_BITMAP_ZERO(mflags, NFS_MFLAG_BITMAP_LEN);
1172 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_SOFT);
1173 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_INTR);
1174 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_RESVPORT);
1175 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NOCONNECT);
1176 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_DUMBTIMER);
1177 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_CALLUMNT);
1178 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_RDIRPLUS);
1179 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NONEGNAMECACHE);
1180 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_MUTEJUKEBOX);
1181 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NOQUOTA);
1182 if (args.flags & NFSMNT_SOFT) {
1183 NFS_BITMAP_SET(mflags, NFS_MFLAG_SOFT);
1184 }
1185 if (args.flags & NFSMNT_INT) {
1186 NFS_BITMAP_SET(mflags, NFS_MFLAG_INTR);
1187 }
1188 if (args.flags & NFSMNT_RESVPORT) {
1189 NFS_BITMAP_SET(mflags, NFS_MFLAG_RESVPORT);
1190 }
1191 if (args.flags & NFSMNT_NOCONN) {
1192 NFS_BITMAP_SET(mflags, NFS_MFLAG_NOCONNECT);
1193 }
1194 if (args.flags & NFSMNT_DUMBTIMR) {
1195 NFS_BITMAP_SET(mflags, NFS_MFLAG_DUMBTIMER);
1196 }
1197 if (args.flags & NFSMNT_CALLUMNT) {
1198 NFS_BITMAP_SET(mflags, NFS_MFLAG_CALLUMNT);
1199 }
1200 if (args.flags & NFSMNT_RDIRPLUS) {
1201 NFS_BITMAP_SET(mflags, NFS_MFLAG_RDIRPLUS);
1202 }
1203 if (args.flags & NFSMNT_NONEGNAMECACHE) {
1204 NFS_BITMAP_SET(mflags, NFS_MFLAG_NONEGNAMECACHE);
1205 }
1206 if (args.flags & NFSMNT_MUTEJUKEBOX) {
1207 NFS_BITMAP_SET(mflags, NFS_MFLAG_MUTEJUKEBOX);
1208 }
1209 if (args.flags & NFSMNT_NOQUOTA) {
1210 NFS_BITMAP_SET(mflags, NFS_MFLAG_NOQUOTA);
1211 }
1212
1213 /* prepare mount attributes */
1214 NFS_BITMAP_ZERO(mattrs, NFS_MATTR_BITMAP_LEN);
1215 NFS_BITMAP_SET(mattrs, NFS_MATTR_FLAGS);
1216 NFS_BITMAP_SET(mattrs, NFS_MATTR_NFS_VERSION);
1217 NFS_BITMAP_SET(mattrs, NFS_MATTR_SOCKET_TYPE);
1218 NFS_BITMAP_SET(mattrs, NFS_MATTR_NFS_PORT);
1219 NFS_BITMAP_SET(mattrs, NFS_MATTR_FH);
1220 NFS_BITMAP_SET(mattrs, NFS_MATTR_FS_LOCATIONS);
1221 NFS_BITMAP_SET(mattrs, NFS_MATTR_MNTFLAGS);
1222 NFS_BITMAP_SET(mattrs, NFS_MATTR_MNTFROM);
1223 if (args.flags & NFSMNT_NFSV4) {
1224 nfsvers = 4;
1225 } else if (args.flags & NFSMNT_NFSV3) {
1226 nfsvers = 3;
1227 } else {
1228 nfsvers = 2;
1229 }
1230 if ((args.flags & NFSMNT_RSIZE) && (args.rsize > 0)) {
1231 NFS_BITMAP_SET(mattrs, NFS_MATTR_READ_SIZE);
1232 }
1233 if ((args.flags & NFSMNT_WSIZE) && (args.wsize > 0)) {
1234 NFS_BITMAP_SET(mattrs, NFS_MATTR_WRITE_SIZE);
1235 }
1236 if ((args.flags & NFSMNT_TIMEO) && (args.timeo > 0)) {
1237 NFS_BITMAP_SET(mattrs, NFS_MATTR_REQUEST_TIMEOUT);
1238 }
1239 if ((args.flags & NFSMNT_RETRANS) && (args.retrans > 0)) {
1240 NFS_BITMAP_SET(mattrs, NFS_MATTR_SOFT_RETRY_COUNT);
1241 }
1242 if ((args.flags & NFSMNT_MAXGRPS) && (args.maxgrouplist > 0)) {
1243 NFS_BITMAP_SET(mattrs, NFS_MATTR_MAX_GROUP_LIST);
1244 }
1245 if ((args.flags & NFSMNT_READAHEAD) && (args.readahead > 0)) {
1246 NFS_BITMAP_SET(mattrs, NFS_MATTR_READAHEAD);
1247 }
1248 if ((args.flags & NFSMNT_READDIRSIZE) && (args.readdirsize > 0)) {
1249 NFS_BITMAP_SET(mattrs, NFS_MATTR_READDIR_SIZE);
1250 }
1251 if ((args.flags & NFSMNT_NOLOCKS) ||
1252 (args.flags & NFSMNT_LOCALLOCKS)) {
1253 NFS_BITMAP_SET(mattrs, NFS_MATTR_LOCK_MODE);
1254 if (args.flags & NFSMNT_NOLOCKS) {
1255 nfslockmode = NFS_LOCK_MODE_DISABLED;
1256 } else if (args.flags & NFSMNT_LOCALLOCKS) {
1257 nfslockmode = NFS_LOCK_MODE_LOCAL;
1258 } else {
1259 nfslockmode = NFS_LOCK_MODE_ENABLED;
1260 }
1261 }
1262 if (args.version >= 4) {
1263 if ((args.flags & NFSMNT_ACREGMIN) && (args.acregmin > 0)) {
1264 NFS_BITMAP_SET(mattrs, NFS_MATTR_ATTRCACHE_REG_MIN);
1265 }
1266 if ((args.flags & NFSMNT_ACREGMAX) && (args.acregmax > 0)) {
1267 NFS_BITMAP_SET(mattrs, NFS_MATTR_ATTRCACHE_REG_MAX);
1268 }
1269 if ((args.flags & NFSMNT_ACDIRMIN) && (args.acdirmin > 0)) {
1270 NFS_BITMAP_SET(mattrs, NFS_MATTR_ATTRCACHE_DIR_MIN);
1271 }
1272 if ((args.flags & NFSMNT_ACDIRMAX) && (args.acdirmax > 0)) {
1273 NFS_BITMAP_SET(mattrs, NFS_MATTR_ATTRCACHE_DIR_MAX);
1274 }
1275 }
1276 if (args.version >= 5) {
1277 if ((args.flags & NFSMNT_SECFLAVOR) || (args.flags & NFSMNT_SECSYSOK)) {
1278 NFS_BITMAP_SET(mattrs, NFS_MATTR_SECURITY);
1279 }
1280 }
1281 if (args.version >= 6) {
1282 if ((args.flags & NFSMNT_DEADTIMEOUT) && (args.deadtimeout > 0)) {
1283 NFS_BITMAP_SET(mattrs, NFS_MATTR_DEAD_TIMEOUT);
1284 }
1285 }
1286
1287 /* build xdr buffer */
1288 xb_init_buffer(&xb, NULL, 0);
1289 xb_add_32(error, &xb, args.version);
1290 argslength_offset = xb_offset(&xb);
1291 xb_add_32(error, &xb, 0); // args length
1292 xb_add_32(error, &xb, NFS_XDRARGS_VERSION_0);
1293 xb_add_bitmap(error, &xb, mattrs, NFS_MATTR_BITMAP_LEN);
1294 attrslength_offset = xb_offset(&xb);
1295 xb_add_32(error, &xb, 0); // attrs length
1296 xb_add_bitmap(error, &xb, mflags_mask, NFS_MFLAG_BITMAP_LEN); /* mask */
1297 xb_add_bitmap(error, &xb, mflags, NFS_MFLAG_BITMAP_LEN); /* value */
1298 xb_add_32(error, &xb, nfsvers);
1299 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READ_SIZE)) {
1300 xb_add_32(error, &xb, args.rsize);
1301 }
1302 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_WRITE_SIZE)) {
1303 xb_add_32(error, &xb, args.wsize);
1304 }
1305 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READDIR_SIZE)) {
1306 xb_add_32(error, &xb, args.readdirsize);
1307 }
1308 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READAHEAD)) {
1309 xb_add_32(error, &xb, args.readahead);
1310 }
1311 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_REG_MIN)) {
1312 xb_add_32(error, &xb, args.acregmin);
1313 xb_add_32(error, &xb, 0);
1314 }
1315 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_REG_MAX)) {
1316 xb_add_32(error, &xb, args.acregmax);
1317 xb_add_32(error, &xb, 0);
1318 }
1319 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_DIR_MIN)) {
1320 xb_add_32(error, &xb, args.acdirmin);
1321 xb_add_32(error, &xb, 0);
1322 }
1323 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_DIR_MAX)) {
1324 xb_add_32(error, &xb, args.acdirmax);
1325 xb_add_32(error, &xb, 0);
1326 }
1327 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_LOCK_MODE)) {
1328 xb_add_32(error, &xb, nfslockmode);
1329 }
1330 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SECURITY)) {
1331 uint32_t flavors[2], i = 0;
1332 if (args.flags & NFSMNT_SECFLAVOR) {
1333 flavors[i++] = args.auth;
1334 }
1335 if ((args.flags & NFSMNT_SECSYSOK) && ((i == 0) || (flavors[0] != RPCAUTH_SYS))) {
1336 flavors[i++] = RPCAUTH_SYS;
1337 }
1338 xb_add_word_array(error, &xb, flavors, i);
1339 }
1340 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MAX_GROUP_LIST)) {
1341 xb_add_32(error, &xb, args.maxgrouplist);
1342 }
1343 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SOCKET_TYPE)) {
1344 xb_add_string(error, &xb, ((args.sotype == SOCK_DGRAM) ? "udp" : "tcp"), 3);
1345 }
1346 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_PORT)) {
1347 xb_add_32(error, &xb, ((ss.ss_family == AF_INET) ?
1348 ntohs(((struct sockaddr_in*)&ss)->sin_port) :
1349 ntohs(((struct sockaddr_in6*)&ss)->sin6_port)));
1350 }
1351 /* NFS_MATTR_MOUNT_PORT (not available in old args) */
1352 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_REQUEST_TIMEOUT)) {
1353 /* convert from .1s increments to time */
1354 xb_add_32(error, &xb, args.timeo / 10);
1355 xb_add_32(error, &xb, (args.timeo % 10) * 100000000);
1356 }
1357 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SOFT_RETRY_COUNT)) {
1358 xb_add_32(error, &xb, args.retrans);
1359 }
1360 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_DEAD_TIMEOUT)) {
1361 xb_add_32(error, &xb, args.deadtimeout);
1362 xb_add_32(error, &xb, 0);
1363 }
1364 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_FH)) {
1365 xb_add_fh(error, &xb, &nfh[0], args.fhsize);
1366 }
1367 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_FS_LOCATIONS)) {
1368 xb_add_32(error, &xb, 1); /* fs location count */
1369 xb_add_32(error, &xb, 1); /* server count */
1370 xb_add_string(error, &xb, mntfrom, (endserverp - mntfrom)); /* server name */
1371 xb_add_32(error, &xb, 1); /* address count */
1372 xb_add_string(error, &xb, uaddr, strlen(uaddr)); /* address */
1373 xb_add_32(error, &xb, 0); /* empty server info */
1374 xb_add_32(error, &xb, numcomps); /* pathname component count */
1375 nfsmout_if(error);
1376 p = frompath;
1377 while (*p && (*p == '/')) {
1378 p++;
1379 }
1380 while (*p) {
1381 cp = p;
1382 while (*p && (*p != '/')) {
1383 p++;
1384 }
1385 xb_add_string(error, &xb, cp, (p - cp)); /* component */
1386 nfsmout_if(error);
1387 while (*p && (*p == '/')) {
1388 p++;
1389 }
1390 }
1391 xb_add_32(error, &xb, 0); /* empty fsl info */
1392 }
1393 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MNTFLAGS)) {
1394 xb_add_32(error, &xb, (vfs_flags(mp) & MNT_VISFLAGMASK)); /* VFS MNT_* flags */
1395 }
1396 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MNTFROM)) {
1397 xb_add_string(error, &xb, mntfrom, strlen(mntfrom)); /* fixed f_mntfromname */
1398 }
1399 xb_build_done(error, &xb);
1400
1401 /* update opaque counts */
1402 end_offset = xb_offset(&xb);
1403 error = xb_seek(&xb, argslength_offset);
1404 xb_add_32(error, &xb, end_offset - argslength_offset + XDRWORD /*version*/);
1405 nfsmout_if(error);
1406 error = xb_seek(&xb, attrslength_offset);
1407 xb_add_32(error, &xb, end_offset - attrslength_offset - XDRWORD /*don't include length field*/);
1408
1409 if (!error) {
1410 /* grab the assembled buffer */
1411 *xdrbufp = xb_buffer_base(&xb);
1412 xb.xb_flags &= ~XB_CLEANUP;
1413 }
1414 nfsmout:
1415 xb_cleanup(&xb);
1416 NFS_ZFREE(ZV_NAMEI, mntfrom);
1417 return error;
1418 }
1419
1420 /*
1421 * VFS Operations.
1422 *
1423 * mount system call
1424 */
1425 int
nfs_vfs_mount(mount_t mp,vnode_t vp,user_addr_t data,vfs_context_t ctx)1426 nfs_vfs_mount(mount_t mp, vnode_t vp, user_addr_t data, vfs_context_t ctx)
1427 {
1428 int error = 0, inkernel = vfs_iskernelmount(mp);
1429 uint32_t argsversion, argslength;
1430 char *xdrbuf = NULL;
1431
1432 /* read in version */
1433 if (inkernel) {
1434 bcopy(CAST_DOWN(void *, data), &argsversion, sizeof(argsversion));
1435 } else if ((error = copyin(data, &argsversion, sizeof(argsversion)))) {
1436 return NFS_MAPERR(error);
1437 }
1438
1439 /* If we have XDR args, then all values in the buffer are in network order */
1440 if (argsversion == htonl(NFS_ARGSVERSION_XDR)) {
1441 argsversion = NFS_ARGSVERSION_XDR;
1442 }
1443
1444 switch (argsversion) {
1445 case 3:
1446 case 4:
1447 case 5:
1448 case 6:
1449 /* convert old-style args to xdr */
1450 error = nfs_convert_old_nfs_args(mp, data, ctx, argsversion, inkernel, &xdrbuf);
1451 break;
1452 case NFS_ARGSVERSION_XDR:
1453 /* copy in xdr buffer */
1454 if (inkernel) {
1455 bcopy(CAST_DOWN(void *, (data + XDRWORD)), &argslength, XDRWORD);
1456 } else {
1457 error = copyin((data + XDRWORD), &argslength, XDRWORD);
1458 }
1459 if (error) {
1460 break;
1461 }
1462 argslength = ntohl(argslength);
1463 /* put a reasonable limit on the size of the XDR args */
1464 if (argslength > 16 * 1024) {
1465 error = E2BIG;
1466 break;
1467 }
1468 /* allocate xdr buffer */
1469 xdrbuf = xb_malloc(xdr_rndup(argslength));
1470 if (!xdrbuf) {
1471 error = ENOMEM;
1472 break;
1473 }
1474 if (inkernel) {
1475 bcopy(CAST_DOWN(void *, data), xdrbuf, argslength);
1476 } else {
1477 error = copyin(data, xdrbuf, argslength);
1478 }
1479
1480 if (!inkernel) {
1481 /* Recheck buffer size to avoid double fetch vulnerability */
1482 struct xdrbuf xb;
1483 uint32_t _version, _length;
1484 xb_init_buffer(&xb, xdrbuf, 2 * XDRWORD);
1485 xb_get_32(error, &xb, _version); /* version */
1486 xb_get_32(error, &xb, _length); /* args length */
1487 if (_length != argslength) {
1488 printf("nfs: actual buffer length (%u) does not match the initial value (%u)\n", _length, argslength);
1489 error = EINVAL;
1490 break;
1491 }
1492 }
1493
1494 break;
1495 default:
1496 error = EPROGMISMATCH;
1497 }
1498
1499 if (error) {
1500 if (xdrbuf) {
1501 xb_free(xdrbuf);
1502 }
1503 return NFS_MAPERR(error);
1504 }
1505 error = mountnfs(xdrbuf, mp, ctx, &vp);
1506 return NFS_MAPERR(error);
1507 }
1508
1509 /*
1510 * Common code for mount and mountroot
1511 */
1512
1513 /* Set up an NFSv2/v3 mount */
1514 int
nfs3_mount(struct nfsmount * nmp,vfs_context_t ctx,nfsnode_t * npp)1515 nfs3_mount(
1516 struct nfsmount *nmp,
1517 vfs_context_t ctx,
1518 nfsnode_t *npp)
1519 {
1520 int error = 0;
1521 struct nfs_vattr nvattr;
1522 u_int64_t xid;
1523
1524 *npp = NULL;
1525
1526 if (!nmp->nm_fh) {
1527 return EINVAL;
1528 }
1529
1530 /*
1531 * Get file attributes for the mountpoint. These are needed
1532 * in order to properly create the root vnode.
1533 */
1534 error = nfs3_getattr_rpc(NULL, nmp->nm_mountp, nmp->nm_fh->fh_data, nmp->nm_fh->fh_len, 0,
1535 ctx, &nvattr, &xid);
1536 if (error) {
1537 goto out;
1538 }
1539
1540 error = nfs_nget(nmp->nm_mountp, NULL, NULL, nmp->nm_fh->fh_data, nmp->nm_fh->fh_len,
1541 &nvattr, &xid, RPCAUTH_UNKNOWN, NG_MARKROOT, npp);
1542 if (*npp) {
1543 nfs_node_unlock(*npp);
1544 }
1545 if (error) {
1546 goto out;
1547 }
1548
1549 /*
1550 * Try to make sure we have all the general info from the server.
1551 */
1552 if (nmp->nm_vers == NFS_VER2) {
1553 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXNAME);
1554 nmp->nm_fsattr.nfsa_maxname = NFS_MAXNAMLEN;
1555 } else if (nmp->nm_vers == NFS_VER3) {
1556 /* get the NFSv3 FSINFO */
1557 error = nfs3_fsinfo(nmp, *npp, ctx);
1558 if (error) {
1559 goto out;
1560 }
1561 /* grab a copy of root info now (even if server does not support FSF_HOMOGENEOUS) */
1562 struct nfs_fsattr nfsa;
1563 if (!nfs3_pathconf_rpc(*npp, &nfsa, ctx)) {
1564 /* cache a copy of the results */
1565 lck_mtx_lock(&nmp->nm_lock);
1566 nfs3_pathconf_cache(nmp, &nfsa);
1567 lck_mtx_unlock(&nmp->nm_lock);
1568 }
1569 }
1570 out:
1571 if (*npp && error) {
1572 vnode_put(NFSTOV(*npp));
1573 vnode_recycle(NFSTOV(*npp));
1574 *npp = NULL;
1575 }
1576 return error;
1577 }
1578
1579 #if CONFIG_NFS4
1580 /*
1581 * Update an NFSv4 mount path with the contents of the symlink.
1582 *
1583 * Read the link for the given file handle.
1584 * Insert the link's components into the path.
1585 */
1586 int
nfs4_mount_update_path_with_symlink(struct nfsmount * nmp,struct nfs_fs_path * nfsp,uint32_t curcomp,fhandle_t * dirfhp,int * depthp,fhandle_t * fhp,vfs_context_t ctx)1587 nfs4_mount_update_path_with_symlink(struct nfsmount *nmp, struct nfs_fs_path *nfsp, uint32_t curcomp, fhandle_t *dirfhp, int *depthp, fhandle_t *fhp, vfs_context_t ctx)
1588 {
1589 int error = 0, status, numops;
1590 uint32_t len = 0, comp, newcomp, linkcompcount;
1591 u_int64_t xid;
1592 struct nfsm_chain nmreq, nmrep;
1593 struct nfsreq rq, *req = &rq;
1594 struct nfsreq_secinfo_args si;
1595 char *link = NULL, *p, *q, ch;
1596 struct nfs_fs_path nfsp2;
1597
1598 bzero(&nfsp2, sizeof(nfsp2));
1599 if (dirfhp->fh_len) {
1600 NFSREQ_SECINFO_SET(&si, NULL, dirfhp->fh_data, dirfhp->fh_len, nfsp->np_components[curcomp], 0);
1601 } else {
1602 NFSREQ_SECINFO_SET(&si, NULL, NULL, 0, nfsp->np_components[curcomp], 0);
1603 }
1604 nfsm_chain_null(&nmreq);
1605 nfsm_chain_null(&nmrep);
1606
1607 link = zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_NOFAIL);
1608
1609 // PUTFH, READLINK
1610 numops = 2;
1611 nfsm_chain_build_alloc_init(error, &nmreq, 12 * NFSX_UNSIGNED);
1612 nfsm_chain_add_compound_header(error, &nmreq, "readlink", nmp->nm_minor_vers, numops);
1613 numops--;
1614 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
1615 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, fhp->fh_data, fhp->fh_len);
1616 numops--;
1617 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_READLINK);
1618 nfsm_chain_build_done(error, &nmreq);
1619 nfsm_assert(error, (numops == 0), EPROTO);
1620 nfsmout_if(error);
1621
1622 error = nfs_request_async(NULL, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND,
1623 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
1624 if (!error) {
1625 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
1626 }
1627
1628 nfsm_chain_skip_tag(error, &nmrep);
1629 nfsm_chain_get_32(error, &nmrep, numops);
1630 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1631 nfsm_chain_op_check(error, &nmrep, NFS_OP_READLINK);
1632 nfsm_chain_get_32(error, &nmrep, len);
1633 nfsmout_if(error);
1634 if (len == 0) {
1635 error = ENOENT;
1636 } else if (len >= MAXPATHLEN) {
1637 len = MAXPATHLEN - 1;
1638 }
1639 nfsm_chain_get_opaque(error, &nmrep, len, link);
1640 nfsmout_if(error);
1641 /* make sure link string is terminated properly */
1642 link[len] = '\0';
1643
1644 /* count the number of components in link */
1645 p = link;
1646 while (*p && (*p == '/')) {
1647 p++;
1648 }
1649 linkcompcount = 0;
1650 while (*p) {
1651 linkcompcount++;
1652 while (*p && (*p != '/')) {
1653 p++;
1654 }
1655 while (*p && (*p == '/')) {
1656 p++;
1657 }
1658 }
1659
1660 /* free up used components */
1661 for (comp = 0; comp <= curcomp; comp++) {
1662 if (nfsp->np_components[comp]) {
1663 kfree_data_addr(nfsp->np_components[comp]);
1664 }
1665 }
1666
1667 /* set up new path */
1668 nfsp2.np_compcount = nfsp->np_compcount - curcomp - 1 + linkcompcount;
1669 MALLOC(nfsp2.np_components, char **, nfsp2.np_compcount * sizeof(char*), M_TEMP, M_WAITOK | M_ZERO);
1670 if (!nfsp2.np_components) {
1671 error = ENOMEM;
1672 goto nfsmout;
1673 }
1674
1675 /* add link components */
1676 p = link;
1677 while (*p && (*p == '/')) {
1678 p++;
1679 }
1680 for (newcomp = 0; newcomp < linkcompcount; newcomp++) {
1681 /* find end of component */
1682 q = p;
1683 while (*q && (*q != '/')) {
1684 q++;
1685 }
1686 nfsp2.np_components[newcomp] = kalloc_data(q - p + 1, Z_WAITOK | Z_ZERO);
1687 if (!nfsp2.np_components[newcomp]) {
1688 error = ENOMEM;
1689 break;
1690 }
1691 ch = *q;
1692 *q = '\0';
1693 strlcpy(nfsp2.np_components[newcomp], p, q - p + 1);
1694 *q = ch;
1695 p = q;
1696 while (*p && (*p == '/')) {
1697 p++;
1698 }
1699 }
1700 nfsmout_if(error);
1701
1702 /* add remaining components */
1703 for (comp = curcomp + 1; comp < nfsp->np_compcount; comp++, newcomp++) {
1704 nfsp2.np_components[newcomp] = nfsp->np_components[comp];
1705 nfsp->np_components[comp] = NULL;
1706 }
1707
1708 /* move new path into place */
1709 FREE(nfsp->np_components, M_TEMP);
1710 nfsp->np_components = nfsp2.np_components;
1711 nfsp->np_compcount = nfsp2.np_compcount;
1712 nfsp2.np_components = NULL;
1713
1714 /* for absolute link, let the caller now that the next dirfh is root */
1715 if (link[0] == '/') {
1716 dirfhp->fh_len = 0;
1717 *depthp = 0;
1718 }
1719 nfsmout:
1720 NFS_ZFREE(ZV_NAMEI, link);
1721 if (nfsp2.np_components) {
1722 for (comp = 0; comp < nfsp2.np_compcount; comp++) {
1723 if (nfsp2.np_components[comp]) {
1724 kfree_data_addr(nfsp2.np_components[comp]);
1725 }
1726 }
1727 FREE(nfsp2.np_components, M_TEMP);
1728 }
1729 nfsm_chain_cleanup(&nmreq);
1730 nfsm_chain_cleanup(&nmrep);
1731 return error;
1732 }
1733
1734 /* Set up an NFSv4 mount */
1735 int
nfs4_mount(struct nfsmount * nmp,vfs_context_t ctx,nfsnode_t * npp)1736 nfs4_mount(
1737 struct nfsmount *nmp,
1738 vfs_context_t ctx,
1739 nfsnode_t *npp)
1740 {
1741 struct nfsm_chain nmreq, nmrep;
1742 int error = 0, numops, status, interval, isdotdot, loopcnt = 0, depth = 0;
1743 struct nfs_fs_path fspath, *nfsp, fspath2;
1744 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], comp, comp2;
1745 fhandle_t fh, dirfh;
1746 struct nfs_vattr nvattr;
1747 u_int64_t xid;
1748 struct nfsreq rq, *req = &rq;
1749 struct nfsreq_secinfo_args si;
1750 struct nfs_sec sec;
1751 struct nfs_fs_locations nfsls;
1752
1753 *npp = NULL;
1754 fh.fh_len = dirfh.fh_len = 0;
1755 TAILQ_INIT(&nmp->nm_open_owners);
1756 TAILQ_INIT(&nmp->nm_delegations);
1757 TAILQ_INIT(&nmp->nm_dreturnq);
1758 nmp->nm_stategenid = 1;
1759 NVATTR_INIT(&nvattr);
1760 bzero(&nfsls, sizeof(nfsls));
1761 nfsm_chain_null(&nmreq);
1762 nfsm_chain_null(&nmrep);
1763
1764 /*
1765 * If no security flavors were specified we'll want to default to the server's
1766 * preferred flavor. For NFSv4.0 we need a file handle and name to get that via
1767 * SECINFO, so we'll do that on the last component of the server path we are
1768 * mounting. If we are mounting the server's root, we'll need to defer the
1769 * SECINFO call to the first successful LOOKUP request.
1770 */
1771 if (!nmp->nm_sec.count) {
1772 nmp->nm_state |= NFSSTA_NEEDSECINFO;
1773 }
1774
1775 /* make a copy of the current location's path */
1776 nfsp = &nmp->nm_locations.nl_locations[nmp->nm_locations.nl_current.nli_loc]->nl_path;
1777 bzero(&fspath, sizeof(fspath));
1778 fspath.np_compcount = nfsp->np_compcount;
1779 if (fspath.np_compcount > 0) {
1780 MALLOC(fspath.np_components, char **, fspath.np_compcount * sizeof(char*), M_TEMP, M_WAITOK | M_ZERO);
1781 if (!fspath.np_components) {
1782 error = ENOMEM;
1783 goto nfsmout;
1784 }
1785 for (comp = 0; comp < nfsp->np_compcount; comp++) {
1786 size_t slen = strlen(nfsp->np_components[comp]);
1787 fspath.np_components[comp] = kalloc_data(slen + 1, Z_WAITOK | Z_ZERO);
1788 if (!fspath.np_components[comp]) {
1789 error = ENOMEM;
1790 break;
1791 }
1792 strlcpy(fspath.np_components[comp], nfsp->np_components[comp], slen + 1);
1793 }
1794 if (error) {
1795 goto nfsmout;
1796 }
1797 }
1798
1799 /* for mirror mounts, we can just use the file handle passed in */
1800 if (nmp->nm_fh) {
1801 dirfh.fh_len = nmp->nm_fh->fh_len;
1802 bcopy(nmp->nm_fh->fh_data, dirfh.fh_data, dirfh.fh_len);
1803 NFSREQ_SECINFO_SET(&si, NULL, dirfh.fh_data, dirfh.fh_len, NULL, 0);
1804 goto gotfh;
1805 }
1806
1807 /* otherwise, we need to get the fh for the directory we are mounting */
1808
1809 /* if no components, just get root */
1810 if (fspath.np_compcount == 0) {
1811 nocomponents:
1812 // PUTROOTFH + GETATTR(FH)
1813 NFSREQ_SECINFO_SET(&si, NULL, NULL, 0, NULL, 0);
1814 numops = 2;
1815 nfsm_chain_build_alloc_init(error, &nmreq, 9 * NFSX_UNSIGNED);
1816 nfsm_chain_add_compound_header(error, &nmreq, "mount", nmp->nm_minor_vers, numops);
1817 numops--;
1818 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTROOTFH);
1819 numops--;
1820 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
1821 NFS_CLEAR_ATTRIBUTES(bitmap);
1822 NFS4_DEFAULT_ATTRIBUTES(bitmap);
1823 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
1824 nfsm_chain_add_bitmap(error, &nmreq, bitmap, NFS_ATTR_BITMAP_LEN);
1825 nfsm_chain_build_done(error, &nmreq);
1826 nfsm_assert(error, (numops == 0), EPROTO);
1827 nfsmout_if(error);
1828 error = nfs_request_async(NULL, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND,
1829 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
1830 if (!error) {
1831 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
1832 }
1833 nfsm_chain_skip_tag(error, &nmrep);
1834 nfsm_chain_get_32(error, &nmrep, numops);
1835 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTROOTFH);
1836 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1837 nfsmout_if(error);
1838 NFS_CLEAR_ATTRIBUTES(nmp->nm_fsattr.nfsa_bitmap);
1839 error = nfs4_parsefattr(&nmrep, &nmp->nm_fsattr, &nvattr, &dirfh, NULL, NULL);
1840 if (!error && !NFS_BITMAP_ISSET(&nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
1841 printf("nfs: mount didn't return filehandle?\n");
1842 error = EBADRPC;
1843 }
1844 nfsmout_if(error);
1845 nfsm_chain_cleanup(&nmrep);
1846 nfsm_chain_null(&nmreq);
1847 NVATTR_CLEANUP(&nvattr);
1848 goto gotfh;
1849 }
1850
1851 /* look up each path component */
1852 for (comp = 0; comp < fspath.np_compcount;) {
1853 isdotdot = 0;
1854 if (fspath.np_components[comp][0] == '.') {
1855 if (fspath.np_components[comp][1] == '\0') {
1856 /* skip "." */
1857 comp++;
1858 continue;
1859 }
1860 /* treat ".." specially */
1861 if ((fspath.np_components[comp][1] == '.') &&
1862 (fspath.np_components[comp][2] == '\0')) {
1863 isdotdot = 1;
1864 }
1865 if (isdotdot && (dirfh.fh_len == 0)) {
1866 /* ".." in root directory is same as "." */
1867 comp++;
1868 continue;
1869 }
1870 }
1871 // PUT(ROOT)FH + LOOKUP(P) + GETFH + GETATTR
1872 if (dirfh.fh_len == 0) {
1873 NFSREQ_SECINFO_SET(&si, NULL, NULL, 0, isdotdot ? NULL : fspath.np_components[comp], 0);
1874 } else {
1875 NFSREQ_SECINFO_SET(&si, NULL, dirfh.fh_data, dirfh.fh_len, isdotdot ? NULL : fspath.np_components[comp], 0);
1876 }
1877 numops = 4;
1878 nfsm_chain_build_alloc_init(error, &nmreq, 18 * NFSX_UNSIGNED);
1879 nfsm_chain_add_compound_header(error, &nmreq, "mount", nmp->nm_minor_vers, numops);
1880 numops--;
1881 if (dirfh.fh_len) {
1882 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
1883 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, dirfh.fh_data, dirfh.fh_len);
1884 } else {
1885 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTROOTFH);
1886 }
1887 numops--;
1888 if (isdotdot) {
1889 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_LOOKUPP);
1890 } else {
1891 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_LOOKUP);
1892 nfsm_chain_add_name(error, &nmreq,
1893 fspath.np_components[comp], strlen(fspath.np_components[comp]), nmp);
1894 }
1895 numops--;
1896 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETFH);
1897 numops--;
1898 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
1899 NFS_CLEAR_ATTRIBUTES(bitmap);
1900 NFS4_DEFAULT_ATTRIBUTES(bitmap);
1901 /* if no namedattr support or component is ".zfs", clear NFS_FATTR_NAMED_ATTR */
1902 if (!NMFLAG(nmp, NAMEDATTR) || !strcmp(fspath.np_components[comp], ".zfs")) {
1903 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
1904 }
1905 nfsm_chain_add_bitmap(error, &nmreq, bitmap, NFS_ATTR_BITMAP_LEN);
1906 nfsm_chain_build_done(error, &nmreq);
1907 nfsm_assert(error, (numops == 0), EPROTO);
1908 nfsmout_if(error);
1909 error = nfs_request_async(NULL, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND,
1910 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
1911 if (!error) {
1912 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
1913 }
1914 nfsm_chain_skip_tag(error, &nmrep);
1915 nfsm_chain_get_32(error, &nmrep, numops);
1916 nfsm_chain_op_check(error, &nmrep, dirfh.fh_len ? NFS_OP_PUTFH : NFS_OP_PUTROOTFH);
1917 nfsm_chain_op_check(error, &nmrep, isdotdot ? NFS_OP_LOOKUPP : NFS_OP_LOOKUP);
1918 nfsmout_if(error);
1919 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETFH);
1920 nfsm_chain_get_32(error, &nmrep, fh.fh_len);
1921 if (fh.fh_len > sizeof(fh.fh_data)) {
1922 error = EBADRPC;
1923 }
1924 nfsmout_if(error);
1925 nfsm_chain_get_opaque(error, &nmrep, fh.fh_len, fh.fh_data);
1926 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1927 if (!error) {
1928 NFS_CLEAR_ATTRIBUTES(nmp->nm_fsattr.nfsa_bitmap);
1929 error = nfs4_parsefattr(&nmrep, &nmp->nm_fsattr, &nvattr, NULL, NULL, &nfsls);
1930 }
1931 nfsm_chain_cleanup(&nmrep);
1932 nfsm_chain_null(&nmreq);
1933 if (error) {
1934 /* LOOKUP succeeded but GETATTR failed? This could be a referral. */
1935 /* Try the lookup again with a getattr for fs_locations. */
1936 nfs_fs_locations_cleanup(&nfsls);
1937 error = nfs4_get_fs_locations(nmp, NULL, dirfh.fh_data, dirfh.fh_len, fspath.np_components[comp], ctx, &nfsls);
1938 if (!error && (nfsls.nl_numlocs < 1)) {
1939 error = ENOENT;
1940 }
1941 nfsmout_if(error);
1942 if (++loopcnt > MAXSYMLINKS) {
1943 /* too many symlink/referral redirections */
1944 error = ELOOP;
1945 goto nfsmout;
1946 }
1947 /* tear down the current connection */
1948 nfs_disconnect(nmp);
1949 /* replace fs locations */
1950 nfs_fs_locations_cleanup(&nmp->nm_locations);
1951 nmp->nm_locations = nfsls;
1952 bzero(&nfsls, sizeof(nfsls));
1953 /* initiate a connection using the new fs locations */
1954 error = nfs_mount_connect(nmp);
1955 if (!error && !(nmp->nm_locations.nl_current.nli_flags & NLI_VALID)) {
1956 error = EIO;
1957 }
1958 nfsmout_if(error);
1959 /* add new server's remote path to beginning of our path and continue */
1960 nfsp = &nmp->nm_locations.nl_locations[nmp->nm_locations.nl_current.nli_loc]->nl_path;
1961 bzero(&fspath2, sizeof(fspath2));
1962 fspath2.np_compcount = (fspath.np_compcount - comp - 1) + nfsp->np_compcount;
1963 if (fspath2.np_compcount > 0) {
1964 MALLOC(fspath2.np_components, char **, fspath2.np_compcount * sizeof(char*), M_TEMP, M_WAITOK | M_ZERO);
1965 if (!fspath2.np_components) {
1966 error = ENOMEM;
1967 goto nfsmout;
1968 }
1969 for (comp2 = 0; comp2 < nfsp->np_compcount; comp2++) {
1970 size_t slen = strlen(nfsp->np_components[comp2]);
1971 fspath2.np_components[comp2] = kalloc_data(slen + 1, Z_WAITOK | Z_ZERO);
1972 if (!fspath2.np_components[comp2]) {
1973 /* clean up fspath2, then error out */
1974 while (comp2 > 0) {
1975 comp2--;
1976 kfree_data_addr(fspath2.np_components[comp2]);
1977 }
1978 FREE(fspath2.np_components, M_TEMP);
1979 error = ENOMEM;
1980 goto nfsmout;
1981 }
1982 strlcpy(fspath2.np_components[comp2], nfsp->np_components[comp2], slen + 1);
1983 }
1984 if ((fspath.np_compcount - comp - 1) > 0) {
1985 bcopy(&fspath.np_components[comp + 1], &fspath2.np_components[nfsp->np_compcount], (fspath.np_compcount - comp - 1) * sizeof(char*));
1986 }
1987 /* free up unused parts of old path (prior components and component array) */
1988 do {
1989 kfree_data_addr(fspath.np_components[comp]);
1990 } while (comp-- > 0);
1991 FREE(fspath.np_components, M_TEMP);
1992 /* put new path in place */
1993 fspath = fspath2;
1994 }
1995 /* reset dirfh and component index */
1996 dirfh.fh_len = 0;
1997 comp = 0;
1998 NVATTR_CLEANUP(&nvattr);
1999 if (fspath.np_compcount == 0) {
2000 goto nocomponents;
2001 }
2002 continue;
2003 }
2004 nfsmout_if(error);
2005 /* if file handle is for a symlink, then update the path with the symlink contents */
2006 if (NFS_BITMAP_ISSET(&nvattr.nva_bitmap, NFS_FATTR_TYPE) && (nvattr.nva_type == VLNK)) {
2007 if (++loopcnt > MAXSYMLINKS) {
2008 error = ELOOP;
2009 } else {
2010 error = nfs4_mount_update_path_with_symlink(nmp, &fspath, comp, &dirfh, &depth, &fh, ctx);
2011 }
2012 nfsmout_if(error);
2013 /* directory file handle is either left the same or reset to root (if link was absolute) */
2014 /* path traversal starts at beginning of the path again */
2015 comp = 0;
2016 NVATTR_CLEANUP(&nvattr);
2017 nfs_fs_locations_cleanup(&nfsls);
2018 continue;
2019 }
2020 NVATTR_CLEANUP(&nvattr);
2021 nfs_fs_locations_cleanup(&nfsls);
2022 /* not a symlink... */
2023 if ((nmp->nm_state & NFSSTA_NEEDSECINFO) && (comp == (fspath.np_compcount - 1)) && !isdotdot) {
2024 /* need to get SECINFO for the directory being mounted */
2025 if (dirfh.fh_len == 0) {
2026 NFSREQ_SECINFO_SET(&si, NULL, NULL, 0, isdotdot ? NULL : fspath.np_components[comp], 0);
2027 } else {
2028 NFSREQ_SECINFO_SET(&si, NULL, dirfh.fh_data, dirfh.fh_len, isdotdot ? NULL : fspath.np_components[comp], 0);
2029 }
2030 sec.count = NX_MAX_SEC_FLAVORS;
2031 error = nfs4_secinfo_rpc(nmp, &si, vfs_context_ucred(ctx), sec.flavors, &sec.count);
2032 /* [sigh] some implementations return "illegal" error for unsupported ops */
2033 if (error == NFSERR_OP_ILLEGAL) {
2034 error = 0;
2035 }
2036 nfsmout_if(error);
2037 /* set our default security flavor to the first in the list */
2038 if (sec.count) {
2039 nmp->nm_auth = sec.flavors[0];
2040 }
2041 nmp->nm_state &= ~NFSSTA_NEEDSECINFO;
2042 }
2043 /* advance directory file handle, component index, & update depth */
2044 dirfh = fh;
2045 comp++;
2046 if (!isdotdot) { /* going down the hierarchy */
2047 depth++;
2048 } else if (--depth <= 0) { /* going up the hierarchy */
2049 dirfh.fh_len = 0; /* clear dirfh when we hit root */
2050 }
2051 }
2052
2053 gotfh:
2054 /* get attrs for mount point root */
2055 numops = NMFLAG(nmp, NAMEDATTR) ? 3 : 2; // PUTFH + GETATTR + OPENATTR
2056 nfsm_chain_build_alloc_init(error, &nmreq, 25 * NFSX_UNSIGNED);
2057 nfsm_chain_add_compound_header(error, &nmreq, "mount", nmp->nm_minor_vers, numops);
2058 numops--;
2059 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
2060 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, dirfh.fh_data, dirfh.fh_len);
2061 numops--;
2062 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
2063 NFS_CLEAR_ATTRIBUTES(bitmap);
2064 NFS4_DEFAULT_ATTRIBUTES(bitmap);
2065 /* if no namedattr support or last component is ".zfs", clear NFS_FATTR_NAMED_ATTR */
2066 if (!NMFLAG(nmp, NAMEDATTR) || ((fspath.np_compcount > 0) && !strcmp(fspath.np_components[fspath.np_compcount - 1], ".zfs"))) {
2067 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
2068 }
2069 nfsm_chain_add_bitmap(error, &nmreq, bitmap, NFS_ATTR_BITMAP_LEN);
2070 if (NMFLAG(nmp, NAMEDATTR)) {
2071 numops--;
2072 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_OPENATTR);
2073 nfsm_chain_add_32(error, &nmreq, 0);
2074 }
2075 nfsm_chain_build_done(error, &nmreq);
2076 nfsm_assert(error, (numops == 0), EPROTO);
2077 nfsmout_if(error);
2078 error = nfs_request_async(NULL, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND,
2079 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
2080 if (!error) {
2081 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
2082 }
2083 nfsm_chain_skip_tag(error, &nmrep);
2084 nfsm_chain_get_32(error, &nmrep, numops);
2085 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
2086 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
2087 nfsmout_if(error);
2088 NFS_CLEAR_ATTRIBUTES(nmp->nm_fsattr.nfsa_bitmap);
2089 error = nfs4_parsefattr(&nmrep, &nmp->nm_fsattr, &nvattr, NULL, NULL, NULL);
2090 nfsmout_if(error);
2091 if (NMFLAG(nmp, NAMEDATTR)) {
2092 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
2093 if (error == ENOENT) {
2094 error = 0;
2095 }
2096 /* [sigh] some implementations return "illegal" error for unsupported ops */
2097 if (error || !NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_supp_attr, NFS_FATTR_NAMED_ATTR)) {
2098 nmp->nm_fsattr.nfsa_flags &= ~NFS_FSFLAG_NAMED_ATTR;
2099 } else {
2100 nmp->nm_fsattr.nfsa_flags |= NFS_FSFLAG_NAMED_ATTR;
2101 }
2102 } else {
2103 nmp->nm_fsattr.nfsa_flags &= ~NFS_FSFLAG_NAMED_ATTR;
2104 }
2105 if (NMFLAG(nmp, NOACL)) { /* make sure ACL support is turned off */
2106 nmp->nm_fsattr.nfsa_flags &= ~NFS_FSFLAG_ACL;
2107 }
2108 if (NMFLAG(nmp, ACLONLY) && !(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL)) {
2109 NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_ACLONLY);
2110 }
2111 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_supp_attr, NFS_FATTR_FH_EXPIRE_TYPE)) {
2112 uint32_t fhtype = ((nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_FHTYPE_MASK) >> NFS_FSFLAG_FHTYPE_SHIFT);
2113 if (fhtype != NFS_FH_PERSISTENT) {
2114 printf("nfs: warning: non-persistent file handles! for %s\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
2115 }
2116 }
2117
2118 /* make sure it's a directory */
2119 if (!NFS_BITMAP_ISSET(&nvattr.nva_bitmap, NFS_FATTR_TYPE) || (nvattr.nva_type != VDIR)) {
2120 error = ENOTDIR;
2121 goto nfsmout;
2122 }
2123
2124 /* save the NFS fsid */
2125 nmp->nm_fsid = nvattr.nva_fsid;
2126
2127 /* create the root node */
2128 error = nfs_nget(nmp->nm_mountp, NULL, NULL, dirfh.fh_data, dirfh.fh_len, &nvattr, &xid, rq.r_auth, NG_MARKROOT, npp);
2129 nfsmout_if(error);
2130
2131 if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL) {
2132 vfs_setextendedsecurity(nmp->nm_mountp);
2133 }
2134
2135 /* adjust I/O sizes to server limits */
2136 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXREAD) && (nmp->nm_fsattr.nfsa_maxread > 0)) {
2137 if (nmp->nm_fsattr.nfsa_maxread < (uint64_t)nmp->nm_rsize) {
2138 nmp->nm_rsize = nmp->nm_fsattr.nfsa_maxread & ~(NFS_FABLKSIZE - 1);
2139 if (nmp->nm_rsize == 0) {
2140 nmp->nm_rsize = nmp->nm_fsattr.nfsa_maxread;
2141 }
2142 }
2143 }
2144 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXWRITE) && (nmp->nm_fsattr.nfsa_maxwrite > 0)) {
2145 if (nmp->nm_fsattr.nfsa_maxwrite < (uint64_t)nmp->nm_wsize) {
2146 nmp->nm_wsize = nmp->nm_fsattr.nfsa_maxwrite & ~(NFS_FABLKSIZE - 1);
2147 if (nmp->nm_wsize == 0) {
2148 nmp->nm_wsize = nmp->nm_fsattr.nfsa_maxwrite;
2149 }
2150 }
2151 }
2152
2153 /* set up lease renew timer */
2154 nmp->nm_renew_timer = thread_call_allocate_with_options(nfs4_renew_timer, nmp, THREAD_CALL_PRIORITY_HIGH, THREAD_CALL_OPTIONS_ONCE);
2155 interval = nmp->nm_fsattr.nfsa_lease / 2;
2156 if (interval < 1) {
2157 interval = 1;
2158 }
2159 nfs_interval_timer_start(nmp->nm_renew_timer, interval * 1000);
2160
2161 nfsmout:
2162 if (fspath.np_components) {
2163 for (comp = 0; comp < fspath.np_compcount; comp++) {
2164 if (fspath.np_components[comp]) {
2165 kfree_data_addr(fspath.np_components[comp]);
2166 }
2167 }
2168 FREE(fspath.np_components, M_TEMP);
2169 }
2170 NVATTR_CLEANUP(&nvattr);
2171 nfs_fs_locations_cleanup(&nfsls);
2172 if (*npp) {
2173 nfs_node_unlock(*npp);
2174 }
2175 nfsm_chain_cleanup(&nmreq);
2176 nfsm_chain_cleanup(&nmrep);
2177 return error;
2178 }
2179 #endif /* CONFIG_NFS4 */
2180
2181 /*
2182 * Thread to handle initial NFS mount connection.
2183 */
2184 void
nfs_mount_connect_thread(void * arg,__unused wait_result_t wr)2185 nfs_mount_connect_thread(void *arg, __unused wait_result_t wr)
2186 {
2187 struct nfsmount *nmp = arg;
2188 int error = 0, savederror = 0, slpflag = (NMFLAG(nmp, INTR) ? PCATCH : 0);
2189 int done = 0, timeo, tries, maxtries;
2190
2191 if (NM_OMFLAG(nmp, MNTQUICK)) {
2192 timeo = nfs_mount_quick_timeout >= 1 ? nfs_mount_quick_timeout : NFS_MOUNT_QUICK_TIMEOUT;
2193 maxtries = 1;
2194 } else {
2195 timeo = nfs_mount_timeout >= 1 ? nfs_mount_timeout : NFS_MOUNT_TIMEOUT;
2196 maxtries = 2;
2197 }
2198
2199 for (tries = 0; tries < maxtries; tries++) {
2200 error = nfs_connect(nmp, 1, timeo);
2201 switch (error) {
2202 case ETIMEDOUT:
2203 case EAGAIN:
2204 case EPIPE:
2205 case EADDRNOTAVAIL:
2206 case ENETDOWN:
2207 case ENETUNREACH:
2208 case ENETRESET:
2209 case ECONNABORTED:
2210 case ECONNRESET:
2211 case EISCONN:
2212 case ENOTCONN:
2213 case ESHUTDOWN:
2214 case ECONNREFUSED:
2215 case EHOSTDOWN:
2216 case EHOSTUNREACH:
2217 /* just keep retrying on any of these errors */
2218 break;
2219 case 0:
2220 default:
2221 /* looks like we got an answer... */
2222 done = 1;
2223 break;
2224 }
2225
2226 /* save the best error */
2227 if (nfs_connect_error_class(error) >= nfs_connect_error_class(savederror)) {
2228 savederror = error;
2229 }
2230 if (done) {
2231 error = savederror;
2232 break;
2233 }
2234
2235 /* pause before next attempt */
2236 if ((error = nfs_sigintr(nmp, NULL, current_thread(), 0))) {
2237 break;
2238 }
2239 error = tsleep(nmp, PSOCK | slpflag, "nfs_mount_connect_retry", 2 * hz);
2240 if (error && (error != EWOULDBLOCK)) {
2241 break;
2242 }
2243 error = savederror;
2244 }
2245
2246 /* update status of mount connect */
2247 lck_mtx_lock(&nmp->nm_lock);
2248 if (!nmp->nm_mounterror) {
2249 nmp->nm_mounterror = error;
2250 }
2251 nmp->nm_state &= ~NFSSTA_MOUNT_THREAD;
2252 lck_mtx_unlock(&nmp->nm_lock);
2253 wakeup(&nmp->nm_nss);
2254 }
2255
2256 int
nfs_mount_connect(struct nfsmount * nmp)2257 nfs_mount_connect(struct nfsmount *nmp)
2258 {
2259 int error = 0, slpflag;
2260 thread_t thd;
2261 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
2262
2263 /*
2264 * Set up the socket. Perform initial search for a location/server/address to
2265 * connect to and negotiate any unspecified mount parameters. This work is
2266 * done on a kernel thread to satisfy reserved port usage needs.
2267 */
2268 slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
2269 lck_mtx_lock(&nmp->nm_lock);
2270 /* set flag that the thread is running */
2271 nmp->nm_state |= NFSSTA_MOUNT_THREAD;
2272 if (kernel_thread_start(nfs_mount_connect_thread, nmp, &thd) != KERN_SUCCESS) {
2273 nmp->nm_state &= ~NFSSTA_MOUNT_THREAD;
2274 nmp->nm_mounterror = EIO;
2275 printf("nfs mount %s start socket connect thread failed\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
2276 } else {
2277 thread_deallocate(thd);
2278 }
2279
2280 /* wait until mount connect thread is finished/gone */
2281 while (nmp->nm_state & NFSSTA_MOUNT_THREAD) {
2282 error = msleep(&nmp->nm_nss, &nmp->nm_lock, slpflag | PSOCK, "nfsconnectthread", &ts);
2283 if ((error && (error != EWOULDBLOCK)) || ((error = nfs_sigintr(nmp, NULL, current_thread(), 1)))) {
2284 /* record error */
2285 if (!nmp->nm_mounterror) {
2286 nmp->nm_mounterror = error;
2287 }
2288 /* signal the thread that we are aborting */
2289 nmp->nm_sockflags |= NMSOCK_UNMOUNT;
2290 if (nmp->nm_nss) {
2291 wakeup(nmp->nm_nss);
2292 }
2293 /* and continue waiting on it to finish */
2294 slpflag = 0;
2295 }
2296 }
2297 lck_mtx_unlock(&nmp->nm_lock);
2298
2299 /* grab mount connect status */
2300 error = nmp->nm_mounterror;
2301
2302 return error;
2303 }
2304
2305 /* Table of maximum minor version for a given version */
2306 uint32_t maxminorverstab[] = {
2307 0, /* Version 0 (does not exist) */
2308 0, /* Version 1 (does not exist) */
2309 0, /* Version 2 */
2310 0, /* Version 3 */
2311 0, /* Version 4 */
2312 };
2313
2314 #define NFS_MAX_SUPPORTED_VERSION ((long)(sizeof (maxminorverstab) / sizeof (uint32_t) - 1))
2315 #define NFS_MAX_SUPPORTED_MINOR_VERSION(v) ((long)(maxminorverstab[(v)]))
2316
2317 #define DEFAULT_NFS_MIN_VERS VER2PVER(2, 0)
2318 #define DEFAULT_NFS_MAX_VERS VER2PVER(3, 0)
2319
2320 /*
2321 * Common code to mount an NFS file system.
2322 */
2323 int
mountnfs(char * xdrbuf,mount_t mp,vfs_context_t ctx,vnode_t * vpp)2324 mountnfs(
2325 char *xdrbuf,
2326 mount_t mp,
2327 vfs_context_t ctx,
2328 vnode_t *vpp)
2329 {
2330 struct nfsmount *nmp;
2331 nfsnode_t np;
2332 int error = 0;
2333 struct vfsstatfs *sbp;
2334 struct xdrbuf xb;
2335 uint32_t i, val, maxio, iosize, len;
2336 uint32_t *mattrs;
2337 uint32_t *mflags_mask;
2338 uint32_t *mflags;
2339 uint32_t argslength, attrslength;
2340 uid_t set_owner = 0;
2341 struct nfs_location_index firstloc = {
2342 .nli_flags = NLI_VALID,
2343 .nli_loc = 0,
2344 .nli_serv = 0,
2345 .nli_addr = 0
2346 };
2347 static const struct nfs_etype nfs_default_etypes = {
2348 .count = NFS_MAX_ETYPES,
2349 .selected = NFS_MAX_ETYPES,
2350 .etypes = { NFS_AES256_CTS_HMAC_SHA1_96,
2351 NFS_AES128_CTS_HMAC_SHA1_96,
2352 NFS_DES3_CBC_SHA1_KD}
2353 };
2354
2355 /* make sure mbuf constants are set up */
2356 if (!nfs_mbuf_mhlen) {
2357 nfs_mbuf_init();
2358 }
2359
2360 if (vfs_flags(mp) & MNT_UPDATE) {
2361 nmp = VFSTONFS(mp);
2362 /* update paths, file handles, etc, here XXX */
2363 xb_free(xdrbuf);
2364 return 0;
2365 } else {
2366 /* allocate an NFS mount structure for this mount */
2367 nmp = zalloc_flags(nfsmnt_zone, Z_WAITOK | Z_ZERO);
2368 lck_mtx_init(&nmp->nm_lock, &nfs_mount_grp, LCK_ATTR_NULL);
2369 TAILQ_INIT(&nmp->nm_resendq);
2370 TAILQ_INIT(&nmp->nm_iodq);
2371 TAILQ_INIT(&nmp->nm_gsscl);
2372 LIST_INIT(&nmp->nm_monlist);
2373 vfs_setfsprivate(mp, nmp);
2374 vfs_getnewfsid(mp);
2375 nmp->nm_mountp = mp;
2376 vfs_setauthopaque(mp);
2377 /*
2378 * Disable cache_lookup_path for NFS. NFS lookup always needs
2379 * to be called to check if the directory attribute cache is
2380 * valid and possibly purge the directory before calling
2381 * cache_lookup.
2382 */
2383 vfs_setauthcache_ttl(mp, 0);
2384
2385 nfs_nhinit_finish();
2386
2387 nmp->nm_args = xdrbuf;
2388
2389 /* set up defaults */
2390 nmp->nm_ref = 0;
2391 nmp->nm_vers = 0;
2392 nmp->nm_min_vers = DEFAULT_NFS_MIN_VERS;
2393 nmp->nm_max_vers = DEFAULT_NFS_MAX_VERS;
2394 nmp->nm_timeo = NFS_TIMEO;
2395 nmp->nm_retry = NFS_RETRANS;
2396 nmp->nm_sotype = 0;
2397 nmp->nm_sofamily = 0;
2398 nmp->nm_nfsport = 0;
2399 nmp->nm_wsize = NFS_WSIZE;
2400 nmp->nm_rsize = NFS_RSIZE;
2401 nmp->nm_readdirsize = NFS_READDIRSIZE;
2402 nmp->nm_numgrps = NFS_MAXGRPS;
2403 nmp->nm_readahead = NFS_DEFRAHEAD;
2404 nmp->nm_tprintf_delay = nfs_tprintf_delay;
2405 if (nmp->nm_tprintf_delay < 0) {
2406 nmp->nm_tprintf_delay = 0;
2407 }
2408 nmp->nm_tprintf_initial_delay = nfs_tprintf_initial_delay;
2409 if (nmp->nm_tprintf_initial_delay < 0) {
2410 nmp->nm_tprintf_initial_delay = 0;
2411 }
2412 nmp->nm_acregmin = NFS_MINATTRTIMO;
2413 nmp->nm_acregmax = NFS_MAXATTRTIMO;
2414 nmp->nm_acdirmin = NFS_MINDIRATTRTIMO;
2415 nmp->nm_acdirmax = NFS_MAXDIRATTRTIMO;
2416 nmp->nm_etype = nfs_default_etypes;
2417 nmp->nm_auth = RPCAUTH_SYS;
2418 nmp->nm_iodlink.tqe_next = NFSNOLIST;
2419 nmp->nm_deadtimeout = 0;
2420 nmp->nm_curdeadtimeout = 0;
2421 NFS_BITMAP_SET(nmp->nm_flags, NFS_MFLAG_RDIRPLUS); /* enable RDIRPLUS by default. It will be reverted later in case NFSv2 is used */
2422 NFS_BITMAP_SET(nmp->nm_flags, NFS_MFLAG_NOACL);
2423 nmp->nm_realm = NULL;
2424 nmp->nm_principal = NULL;
2425 nmp->nm_sprinc = NULL;
2426 }
2427
2428 mattrs = nmp->nm_mattrs;
2429 mflags = nmp->nm_mflags;
2430 mflags_mask = nmp->nm_mflags_mask;
2431
2432 /* set up NFS mount with args */
2433 xb_init_buffer(&xb, xdrbuf, 2 * XDRWORD);
2434 xb_get_32(error, &xb, val); /* version */
2435 xb_get_32(error, &xb, argslength); /* args length */
2436 nfsmerr_if(error);
2437 xb_init_buffer(&xb, xdrbuf, argslength); /* restart parsing with actual buffer length */
2438 xb_get_32(error, &xb, val); /* version */
2439 xb_get_32(error, &xb, argslength); /* args length */
2440 xb_get_32(error, &xb, val); /* XDR args version */
2441 if (val != NFS_XDRARGS_VERSION_0 || argslength < ((4 + NFS_MATTR_BITMAP_LEN + 1) * XDRWORD)) {
2442 error = EINVAL;
2443 }
2444 len = NFS_MATTR_BITMAP_LEN;
2445 xb_get_bitmap(error, &xb, mattrs, len); /* mount attribute bitmap */
2446 attrslength = 0;
2447 xb_get_32(error, &xb, attrslength); /* attrs length */
2448 if (!error && (attrslength > (argslength - ((4 + NFS_MATTR_BITMAP_LEN + 1) * XDRWORD)))) {
2449 error = EINVAL;
2450 }
2451 nfsmerr_if(error);
2452 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_FLAGS)) {
2453 len = NFS_MFLAG_BITMAP_LEN;
2454 xb_get_bitmap(error, &xb, mflags_mask, len); /* mount flag mask */
2455 len = NFS_MFLAG_BITMAP_LEN;
2456 xb_get_bitmap(error, &xb, mflags, len); /* mount flag values */
2457 if (!error) {
2458 /* clear all mask bits and OR in all the ones that are set */
2459 nmp->nm_flags[0] &= ~mflags_mask[0];
2460 nmp->nm_flags[0] |= (mflags_mask[0] & mflags[0]);
2461 }
2462 }
2463 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_VERSION)) {
2464 /* Can't specify a single version and a range */
2465 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_VERSION_RANGE)) {
2466 error = EINVAL;
2467 }
2468 xb_get_32(error, &xb, nmp->nm_vers);
2469 if (nmp->nm_vers > NFS_MAX_SUPPORTED_VERSION ||
2470 nmp->nm_vers < NFS_VER2) {
2471 error = EINVAL;
2472 }
2473 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_MINOR_VERSION)) {
2474 xb_get_32(error, &xb, nmp->nm_minor_vers);
2475 } else {
2476 nmp->nm_minor_vers = maxminorverstab[nmp->nm_vers];
2477 }
2478 if (nmp->nm_minor_vers > maxminorverstab[nmp->nm_vers]) {
2479 error = EINVAL;
2480 }
2481 nmp->nm_max_vers = nmp->nm_min_vers =
2482 VER2PVER(nmp->nm_vers, nmp->nm_minor_vers);
2483 }
2484 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_MINOR_VERSION)) {
2485 /* should have also gotten NFS version (and already gotten minor version) */
2486 if (!NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_VERSION)) {
2487 error = EINVAL;
2488 }
2489 }
2490 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_VERSION_RANGE)) {
2491 xb_get_32(error, &xb, nmp->nm_min_vers);
2492 xb_get_32(error, &xb, nmp->nm_max_vers);
2493 if ((nmp->nm_min_vers > nmp->nm_max_vers) ||
2494 (PVER2MAJOR(nmp->nm_max_vers) > NFS_MAX_SUPPORTED_VERSION) ||
2495 (PVER2MINOR(nmp->nm_min_vers) > maxminorverstab[PVER2MAJOR(nmp->nm_min_vers)]) ||
2496 (PVER2MINOR(nmp->nm_max_vers) > maxminorverstab[PVER2MAJOR(nmp->nm_max_vers)])) {
2497 error = EINVAL;
2498 }
2499 }
2500 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READ_SIZE)) {
2501 xb_get_32(error, &xb, nmp->nm_rsize);
2502 }
2503 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_WRITE_SIZE)) {
2504 xb_get_32(error, &xb, nmp->nm_wsize);
2505 }
2506 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READDIR_SIZE)) {
2507 xb_get_32(error, &xb, nmp->nm_readdirsize);
2508 }
2509 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READAHEAD)) {
2510 xb_get_32(error, &xb, nmp->nm_readahead);
2511 }
2512 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_REG_MIN)) {
2513 xb_get_32(error, &xb, nmp->nm_acregmin);
2514 xb_skip(error, &xb, XDRWORD);
2515 }
2516 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_REG_MAX)) {
2517 xb_get_32(error, &xb, nmp->nm_acregmax);
2518 xb_skip(error, &xb, XDRWORD);
2519 }
2520 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_DIR_MIN)) {
2521 xb_get_32(error, &xb, nmp->nm_acdirmin);
2522 xb_skip(error, &xb, XDRWORD);
2523 }
2524 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_DIR_MAX)) {
2525 xb_get_32(error, &xb, nmp->nm_acdirmax);
2526 xb_skip(error, &xb, XDRWORD);
2527 }
2528 nfsmerr_if(error);
2529 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_LOCK_MODE)) {
2530 xb_get_32(error, &xb, val);
2531 switch (val) {
2532 case NFS_LOCK_MODE_DISABLED:
2533 case NFS_LOCK_MODE_LOCAL:
2534 #if CONFIG_NFS4
2535 if (nmp->nm_vers >= NFS_VER4) {
2536 /* disabled/local lock mode only allowed on v2/v3 */
2537 error = EINVAL;
2538 break;
2539 }
2540 #endif
2541 OS_FALLTHROUGH;
2542 case NFS_LOCK_MODE_ENABLED:
2543 nmp->nm_lockmode = val;
2544 break;
2545 default:
2546 error = EINVAL;
2547 }
2548 }
2549 nfsmerr_if(error);
2550 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SECURITY)) {
2551 uint32_t seccnt;
2552 xb_get_32(error, &xb, seccnt);
2553 if (!error && ((seccnt < 1) || (seccnt > NX_MAX_SEC_FLAVORS))) {
2554 error = EINVAL;
2555 }
2556 nfsmerr_if(error);
2557 nmp->nm_sec.count = seccnt;
2558 for (i = 0; i < seccnt; i++) {
2559 xb_get_32(error, &xb, nmp->nm_sec.flavors[i]);
2560 /* Check for valid security flavor */
2561 switch (nmp->nm_sec.flavors[i]) {
2562 case RPCAUTH_NONE:
2563 case RPCAUTH_SYS:
2564 case RPCAUTH_KRB5:
2565 case RPCAUTH_KRB5I:
2566 case RPCAUTH_KRB5P:
2567 break;
2568 default:
2569 error = EINVAL;
2570 }
2571 }
2572 /* start with the first flavor */
2573 nmp->nm_auth = nmp->nm_sec.flavors[0];
2574 }
2575 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_KERB_ETYPE)) {
2576 uint32_t etypecnt;
2577 xb_get_32(error, &xb, etypecnt);
2578 if (!error && ((etypecnt < 1) || (etypecnt > NFS_MAX_ETYPES))) {
2579 error = EINVAL;
2580 }
2581 nfsmerr_if(error);
2582 nmp->nm_etype.count = etypecnt;
2583 xb_get_32(error, &xb, nmp->nm_etype.selected);
2584 nfsmerr_if(error);
2585 if (etypecnt) {
2586 nmp->nm_etype.selected = etypecnt; /* Nothing is selected yet, so set selected to count */
2587 for (i = 0; i < etypecnt; i++) {
2588 xb_get_32(error, &xb, nmp->nm_etype.etypes[i]);
2589 /* Check for valid encryption type */
2590 switch (nmp->nm_etype.etypes[i]) {
2591 case NFS_DES3_CBC_SHA1_KD:
2592 case NFS_AES128_CTS_HMAC_SHA1_96:
2593 case NFS_AES256_CTS_HMAC_SHA1_96:
2594 break;
2595 default:
2596 error = EINVAL;
2597 }
2598 }
2599 }
2600 }
2601 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MAX_GROUP_LIST)) {
2602 xb_get_32(error, &xb, nmp->nm_numgrps);
2603 }
2604 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SOCKET_TYPE)) {
2605 char sotype[16];
2606
2607 *sotype = '\0';
2608 xb_get_32(error, &xb, val);
2609 if (!error && ((val < 3) || (val > sizeof(sotype)))) {
2610 error = EINVAL;
2611 }
2612 nfsmerr_if(error);
2613 error = xb_get_bytes(&xb, sotype, val, 0);
2614 nfsmerr_if(error);
2615 sotype[val] = '\0';
2616 if (!strcmp(sotype, "tcp")) {
2617 nmp->nm_sotype = SOCK_STREAM;
2618 } else if (!strcmp(sotype, "udp")) {
2619 nmp->nm_sotype = SOCK_DGRAM;
2620 } else if (!strcmp(sotype, "tcp4")) {
2621 nmp->nm_sotype = SOCK_STREAM;
2622 nmp->nm_sofamily = AF_INET;
2623 } else if (!strcmp(sotype, "udp4")) {
2624 nmp->nm_sotype = SOCK_DGRAM;
2625 nmp->nm_sofamily = AF_INET;
2626 } else if (!strcmp(sotype, "tcp6")) {
2627 nmp->nm_sotype = SOCK_STREAM;
2628 nmp->nm_sofamily = AF_INET6;
2629 } else if (!strcmp(sotype, "udp6")) {
2630 nmp->nm_sotype = SOCK_DGRAM;
2631 nmp->nm_sofamily = AF_INET6;
2632 } else if (!strcmp(sotype, "inet4")) {
2633 nmp->nm_sofamily = AF_INET;
2634 } else if (!strcmp(sotype, "inet6")) {
2635 nmp->nm_sofamily = AF_INET6;
2636 } else if (!strcmp(sotype, "inet")) {
2637 nmp->nm_sofamily = 0; /* ok */
2638 } else if (!strcmp(sotype, "ticotsord")) {
2639 nmp->nm_sofamily = AF_LOCAL;
2640 nmp->nm_sotype = SOCK_STREAM;
2641 } else if (!strcmp(sotype, "ticlts")) {
2642 nmp->nm_sofamily = AF_LOCAL;
2643 nmp->nm_sotype = SOCK_DGRAM;
2644 } else {
2645 error = EINVAL;
2646 }
2647 #if CONFIG_NFS4
2648 if (!error && (nmp->nm_vers >= NFS_VER4) && nmp->nm_sotype &&
2649 (nmp->nm_sotype != SOCK_STREAM)) {
2650 error = EINVAL; /* NFSv4 is only allowed over TCP. */
2651 }
2652 #endif
2653 if (error) {
2654 NFS_VFS_DBG("EINVAL sotype = \"%s\"\n", sotype);
2655 }
2656 nfsmerr_if(error);
2657 }
2658 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_PORT)) {
2659 xb_get_32(error, &xb, val);
2660 if (NFS_PORT_INVALID(val)) {
2661 error = EINVAL;
2662 nfsmerr_if(error);
2663 }
2664 nmp->nm_nfsport = (in_port_t)val;
2665 }
2666 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MOUNT_PORT)) {
2667 xb_get_32(error, &xb, val);
2668 if (NFS_PORT_INVALID(val)) {
2669 error = EINVAL;
2670 nfsmerr_if(error);
2671 }
2672 nmp->nm_mountport = (in_port_t)val;
2673 }
2674 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_REQUEST_TIMEOUT)) {
2675 /* convert from time to 0.1s units */
2676 xb_get_32(error, &xb, nmp->nm_timeo);
2677 xb_get_32(error, &xb, val);
2678 nfsmerr_if(error);
2679 if (val >= 1000000000) {
2680 error = EINVAL;
2681 }
2682 nfsmerr_if(error);
2683 nmp->nm_timeo *= 10;
2684 nmp->nm_timeo += (val + 100000000 - 1) / 100000000;
2685 /* now convert to ticks */
2686 nmp->nm_timeo = (nmp->nm_timeo * NFS_HZ + 5) / 10;
2687 }
2688 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SOFT_RETRY_COUNT)) {
2689 xb_get_32(error, &xb, val);
2690 if (!error && (val > 1)) {
2691 nmp->nm_retry = val;
2692 }
2693 }
2694 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_DEAD_TIMEOUT)) {
2695 xb_get_32(error, &xb, nmp->nm_deadtimeout);
2696 xb_skip(error, &xb, XDRWORD);
2697 }
2698 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_FH)) {
2699 nfsmerr_if(error);
2700 nmp->nm_fh = zalloc(nfs_fhandle_zone);
2701 xb_get_32(error, &xb, nmp->nm_fh->fh_len);
2702 nfsmerr_if(error);
2703 if ((size_t)nmp->nm_fh->fh_len > sizeof(nmp->nm_fh->fh_data)) {
2704 error = EINVAL;
2705 } else {
2706 error = xb_get_bytes(&xb, (char*)&nmp->nm_fh->fh_data[0], nmp->nm_fh->fh_len, 0);
2707 }
2708 }
2709 nfsmerr_if(error);
2710 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_FS_LOCATIONS)) {
2711 uint32_t loc, serv, addr, comp;
2712 struct nfs_fs_location *fsl;
2713 struct nfs_fs_server *fss;
2714 struct nfs_fs_path *fsp;
2715
2716 xb_get_32(error, &xb, nmp->nm_locations.nl_numlocs); /* fs location count */
2717 /* sanity check location count */
2718 if (!error && ((nmp->nm_locations.nl_numlocs < 1) || (nmp->nm_locations.nl_numlocs > 256))) {
2719 NFS_VFS_DBG("Invalid number of fs_locations: %d", nmp->nm_locations.nl_numlocs);
2720 error = EINVAL;
2721 }
2722 nfsmerr_if(error);
2723 MALLOC(nmp->nm_locations.nl_locations, struct nfs_fs_location **, nmp->nm_locations.nl_numlocs * sizeof(struct nfs_fs_location*), M_TEMP, M_WAITOK | M_ZERO);
2724 if (!nmp->nm_locations.nl_locations) {
2725 error = ENOMEM;
2726 }
2727 for (loc = 0; loc < nmp->nm_locations.nl_numlocs; loc++) {
2728 nfsmerr_if(error);
2729 fsl = kalloc_type(struct nfs_fs_location,
2730 Z_WAITOK | Z_ZERO | Z_NOFAIL);
2731 nmp->nm_locations.nl_locations[loc] = fsl;
2732 xb_get_32(error, &xb, fsl->nl_servcount); /* server count */
2733 /* sanity check server count */
2734 if (!error && ((fsl->nl_servcount < 1) || (fsl->nl_servcount > 256))) {
2735 NFS_VFS_DBG("Invalid server count %d", fsl->nl_servcount);
2736 error = EINVAL;
2737 }
2738 nfsmerr_if(error);
2739 MALLOC(fsl->nl_servers, struct nfs_fs_server **, fsl->nl_servcount * sizeof(struct nfs_fs_server*), M_TEMP, M_WAITOK | M_ZERO);
2740 if (!fsl->nl_servers) {
2741 error = ENOMEM;
2742 NFS_VFS_DBG("Server count = %d, error = %d\n", fsl->nl_servcount, error);
2743 }
2744 for (serv = 0; serv < fsl->nl_servcount; serv++) {
2745 nfsmerr_if(error);
2746 fss = kalloc_type(struct nfs_fs_server,
2747 Z_WAITOK | Z_ZERO | Z_NOFAIL);
2748 fsl->nl_servers[serv] = fss;
2749 xb_get_32(error, &xb, val); /* server name length */
2750 /* sanity check server name length */
2751 if (!error && (val > MAXPATHLEN)) {
2752 NFS_VFS_DBG("Invalid server name length %d", val);
2753 error = EINVAL;
2754 }
2755 nfsmerr_if(error);
2756 fss->ns_name = kalloc_data(val + 1, Z_WAITOK | Z_ZERO);
2757 if (!fss->ns_name) {
2758 error = ENOMEM;
2759 }
2760 nfsmerr_if(error);
2761 error = xb_get_bytes(&xb, fss->ns_name, val, 0); /* server name */
2762 xb_get_32(error, &xb, fss->ns_addrcount); /* address count */
2763 /* sanity check address count (OK to be zero) */
2764 if (!error && (fss->ns_addrcount > 256)) {
2765 NFS_VFS_DBG("Invalid address count %d", fss->ns_addrcount);
2766 error = EINVAL;
2767 }
2768 nfsmerr_if(error);
2769 if (fss->ns_addrcount > 0) {
2770 MALLOC(fss->ns_addresses, char **, fss->ns_addrcount * sizeof(char *), M_TEMP, M_WAITOK | M_ZERO);
2771 if (!fss->ns_addresses) {
2772 error = ENOMEM;
2773 }
2774 for (addr = 0; addr < fss->ns_addrcount; addr++) {
2775 xb_get_32(error, &xb, val); /* address length */
2776 /* sanity check address length */
2777 if (!error && val > 128) {
2778 NFS_VFS_DBG("Invalid address length %d", val);
2779 error = EINVAL;
2780 }
2781 nfsmerr_if(error);
2782 fss->ns_addresses[addr] = kalloc_data(val + 1, Z_WAITOK | Z_ZERO);
2783 if (!fss->ns_addresses[addr]) {
2784 error = ENOMEM;
2785 }
2786 nfsmerr_if(error);
2787 error = xb_get_bytes(&xb, fss->ns_addresses[addr], val, 0); /* address */
2788 }
2789 }
2790 xb_get_32(error, &xb, val); /* server info length */
2791 xb_skip(error, &xb, val); /* skip server info */
2792 }
2793 /* get pathname */
2794 fsp = &fsl->nl_path;
2795 xb_get_32(error, &xb, fsp->np_compcount); /* component count */
2796 /* sanity check component count */
2797 if (!error && (fsp->np_compcount > MAXPATHLEN)) {
2798 NFS_VFS_DBG("Invalid component count %d", fsp->np_compcount);
2799 error = EINVAL;
2800 }
2801 nfsmerr_if(error);
2802 if (fsp->np_compcount) {
2803 MALLOC(fsp->np_components, char **, fsp->np_compcount * sizeof(char*), M_TEMP, M_WAITOK | M_ZERO);
2804 if (!fsp->np_components) {
2805 error = ENOMEM;
2806 }
2807 }
2808 for (comp = 0; comp < fsp->np_compcount; comp++) {
2809 xb_get_32(error, &xb, val); /* component length */
2810 /* sanity check component length */
2811 if (!error && (val == 0)) {
2812 /*
2813 * Apparently some people think a path with zero components should
2814 * be encoded with one zero-length component. So, just ignore any
2815 * zero length components.
2816 */
2817 comp--;
2818 fsp->np_compcount--;
2819 if (fsp->np_compcount == 0) {
2820 FREE(fsp->np_components, M_TEMP);
2821 fsp->np_components = NULL;
2822 }
2823 continue;
2824 }
2825 if (!error && ((val < 1) || (val > MAXPATHLEN))) {
2826 NFS_VFS_DBG("Invalid component path length %d", val);
2827 error = EINVAL;
2828 }
2829 nfsmerr_if(error);
2830 fsp->np_components[comp] = kalloc_data(val + 1, Z_WAITOK | Z_ZERO);
2831 if (!fsp->np_components[comp]) {
2832 error = ENOMEM;
2833 }
2834 nfsmerr_if(error);
2835 error = xb_get_bytes(&xb, fsp->np_components[comp], val, 0); /* component */
2836 }
2837 xb_get_32(error, &xb, val); /* fs location info length */
2838 NFS_VFS_DBG("Skipping fs location info bytes %d", val);
2839 xb_skip(error, &xb, xdr_rndup(val)); /* skip fs location info */
2840 }
2841 }
2842 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MNTFLAGS)) {
2843 xb_skip(error, &xb, XDRWORD);
2844 }
2845 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MNTFROM)) {
2846 xb_get_32(error, &xb, len);
2847 nfsmerr_if(error);
2848 val = len;
2849 if (val >= sizeof(vfs_statfs(mp)->f_mntfromname)) {
2850 val = sizeof(vfs_statfs(mp)->f_mntfromname) - 1;
2851 }
2852 error = xb_get_bytes(&xb, vfs_statfs(mp)->f_mntfromname, val, 0);
2853 if ((len - val) > 0) {
2854 xb_skip(error, &xb, len - val);
2855 }
2856 nfsmerr_if(error);
2857 vfs_statfs(mp)->f_mntfromname[val] = '\0';
2858 }
2859 nfsmerr_if(error);
2860
2861 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_REALM)) {
2862 xb_get_32(error, &xb, len);
2863 if (!error && ((len < 1) || (len > MAXPATHLEN))) {
2864 error = EINVAL;
2865 }
2866 nfsmerr_if(error);
2867 /* allocate an extra byte for a leading '@' if its not already prepended to the realm */
2868 nmp->nm_realm = kalloc_data(len + 2, Z_WAITOK | Z_ZERO);
2869 if (!nmp->nm_realm) {
2870 error = ENOMEM;
2871 }
2872 nfsmerr_if(error);
2873 error = xb_get_bytes(&xb, nmp->nm_realm, len, 0);
2874 if (error == 0 && *nmp->nm_realm != '@') {
2875 bcopy(nmp->nm_realm, &nmp->nm_realm[1], len);
2876 nmp->nm_realm[0] = '@';
2877 }
2878 }
2879 nfsmerr_if(error);
2880
2881 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_PRINCIPAL)) {
2882 xb_get_32(error, &xb, len);
2883 if (!error && ((len < 1) || (len > MAXPATHLEN))) {
2884 error = EINVAL;
2885 }
2886 nfsmerr_if(error);
2887 nmp->nm_principal = kalloc_data(len + 1, Z_WAITOK | Z_ZERO);
2888 if (!nmp->nm_principal) {
2889 error = ENOMEM;
2890 }
2891 nfsmerr_if(error);
2892 error = xb_get_bytes(&xb, nmp->nm_principal, len, 0);
2893 }
2894 nfsmerr_if(error);
2895
2896 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SVCPRINCIPAL)) {
2897 xb_get_32(error, &xb, len);
2898 if (!error && ((len < 1) || (len > MAXPATHLEN))) {
2899 error = EINVAL;
2900 }
2901 nfsmerr_if(error);
2902 nmp->nm_sprinc = kalloc_data(len + 1, Z_WAITOK | Z_ZERO);
2903 if (!nmp->nm_sprinc) {
2904 error = ENOMEM;
2905 }
2906 nfsmerr_if(error);
2907 error = xb_get_bytes(&xb, nmp->nm_sprinc, len, 0);
2908 }
2909 nfsmerr_if(error);
2910
2911 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_LOCAL_NFS_PORT)) {
2912 if (nmp->nm_nfsport) {
2913 error = EINVAL;
2914 NFS_VFS_DBG("Can't have ports specified over incompatible socket families");
2915 }
2916 nfsmerr_if(error);
2917 xb_get_32(error, &xb, len);
2918 if (!error && ((len < 1) || (len > sizeof(((struct sockaddr_un *)0)->sun_path)))) {
2919 error = EINVAL;
2920 }
2921 nfsmerr_if(error);
2922 MALLOC(nmp->nm_nfs_localport, char *, len + 1, M_TEMP, M_WAITOK | M_ZERO);
2923 if (!nmp->nm_nfs_localport) {
2924 error = ENOMEM;
2925 }
2926 nfsmerr_if(error);
2927 error = xb_get_bytes(&xb, nmp->nm_nfs_localport, len, 0);
2928 nmp->nm_sofamily = AF_LOCAL;
2929 nmp->nm_nfsport = 1; /* We use the now deprecated tpcmux port to indcate that we have an AF_LOCAL port */
2930 NFS_VFS_DBG("Setting nfs local port %s (%d)\n", nmp->nm_nfs_localport, nmp->nm_nfsport);
2931 }
2932 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_LOCAL_MOUNT_PORT)) {
2933 if (nmp->nm_mountport) {
2934 error = EINVAL;
2935 NFS_VFS_DBG("Can't have ports specified over mulitple socket families");
2936 }
2937 nfsmerr_if(error);
2938 xb_get_32(error, &xb, len);
2939 if (!error && ((len < 1) || (len > sizeof(((struct sockaddr_un *)0)->sun_path)))) {
2940 error = EINVAL;
2941 }
2942 nfsmerr_if(error);
2943 MALLOC(nmp->nm_mount_localport, char *, len + 1, M_TEMP, M_WAITOK | M_ZERO);
2944 if (!nmp->nm_mount_localport) {
2945 error = ENOMEM;
2946 }
2947 nfsmerr_if(error);
2948 error = xb_get_bytes(&xb, nmp->nm_mount_localport, len, 0);
2949 nmp->nm_sofamily = AF_LOCAL;
2950 nmp->nm_mountport = 1; /* We use the now deprecated tpcmux port to indcate that we have an AF_LOCAL port */
2951 NFS_VFS_DBG("Setting mount local port %s (%d)\n", nmp->nm_mount_localport, nmp->nm_mountport);
2952 }
2953 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SET_MOUNT_OWNER)) {
2954 xb_get_32(error, &xb, set_owner);
2955 nfsmerr_if(error);
2956 error = vfs_context_suser(ctx);
2957 /*
2958 * root can set owner to whatever, user can set owner to self
2959 */
2960 if ((error) && (set_owner == kauth_cred_getuid(vfs_context_ucred(ctx)))) {
2961 /* ok for non-root can set owner to self */
2962 error = 0;
2963 }
2964 nfsmerr_if(error);
2965 }
2966
2967 /*
2968 * Sanity check/finalize settings.
2969 */
2970
2971 if (nmp->nm_timeo < NFS_MINTIMEO) {
2972 nmp->nm_timeo = NFS_MINTIMEO;
2973 } else if (nmp->nm_timeo > NFS_MAXTIMEO) {
2974 nmp->nm_timeo = NFS_MAXTIMEO;
2975 }
2976 if (nmp->nm_retry > NFS_MAXREXMIT) {
2977 nmp->nm_retry = NFS_MAXREXMIT;
2978 }
2979
2980 if (nmp->nm_numgrps > NFS_MAXGRPS) {
2981 nmp->nm_numgrps = NFS_MAXGRPS;
2982 }
2983 if (nmp->nm_readahead > NFS_MAXRAHEAD) {
2984 nmp->nm_readahead = NFS_MAXRAHEAD;
2985 }
2986 if (nmp->nm_acregmin > nmp->nm_acregmax) {
2987 nmp->nm_acregmin = nmp->nm_acregmax;
2988 }
2989 if (nmp->nm_acdirmin > nmp->nm_acdirmax) {
2990 nmp->nm_acdirmin = nmp->nm_acdirmax;
2991 }
2992
2993 /* need at least one fs location */
2994 if (nmp->nm_locations.nl_numlocs < 1) {
2995 error = EINVAL;
2996 }
2997 nfsmerr_if(error);
2998
2999 if (!NM_OMATTR_GIVEN(nmp, MNTFROM)) {
3000 /* init mount's mntfromname to first location */
3001 nfs_location_mntfromname(&nmp->nm_locations, firstloc,
3002 vfs_statfs(mp)->f_mntfromname,
3003 sizeof(vfs_statfs(mp)->f_mntfromname), 0);
3004 }
3005
3006 /* Need to save the mounting credential for v4. */
3007 nmp->nm_mcred = vfs_context_ucred(ctx);
3008 if (IS_VALID_CRED(nmp->nm_mcred)) {
3009 kauth_cred_ref(nmp->nm_mcred);
3010 }
3011
3012 /*
3013 * If a reserved port is required, check for that privilege.
3014 * (Note that mirror mounts are exempt because the privilege was
3015 * already checked for the original mount.)
3016 */
3017 if (NMFLAG(nmp, RESVPORT) && !vfs_iskernelmount(mp)) {
3018 error = priv_check_cred(nmp->nm_mcred, PRIV_NETINET_RESERVEDPORT, 0);
3019 }
3020 nfsmerr_if(error);
3021
3022 /* set up the version-specific function tables */
3023 if (nmp->nm_vers < NFS_VER4) {
3024 nmp->nm_funcs = &nfs3_funcs;
3025 } else {
3026 #if CONFIG_NFS4
3027 nmp->nm_funcs = &nfs4_funcs;
3028 #else
3029 /* don't go any further if we don't support NFS4 */
3030 nmp->nm_funcs = NULL;
3031 error = ENOTSUP;
3032 nfsmerr_if(error);
3033 #endif
3034 }
3035
3036 /* do mount's initial socket connection */
3037 error = nfs_mount_connect(nmp);
3038 nfsmerr_if(error);
3039
3040 /* sanity check settings now that version/connection is set */
3041 if (nmp->nm_vers == NFS_VER2) { /* ignore RDIRPLUS on NFSv2 */
3042 NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_RDIRPLUS);
3043 }
3044 #if CONFIG_NFS4
3045 if (nmp->nm_vers >= NFS_VER4) {
3046 if (NFS_BITMAP_ISSET(nmp->nm_flags, NFS_MFLAG_ACLONLY)) { /* aclonly trumps noacl */
3047 NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_NOACL);
3048 }
3049 NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_CALLUMNT);
3050 if (nmp->nm_lockmode != NFS_LOCK_MODE_ENABLED) {
3051 error = EINVAL; /* disabled/local lock mode only allowed on v2/v3 */
3052 }
3053 } else {
3054 #endif
3055 /* ignore these if not v4 */
3056 NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_NOCALLBACK);
3057 NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_NAMEDATTR);
3058 NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_NOACL);
3059 NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_ACLONLY);
3060 #if CONFIG_NFS4
3061 }
3062 #endif
3063 nfsmerr_if(error);
3064
3065 if (nmp->nm_sotype == SOCK_DGRAM) {
3066 /* I/O size defaults for UDP are different */
3067 if (!NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READ_SIZE)) {
3068 nmp->nm_rsize = NFS_DGRAM_RSIZE;
3069 }
3070 if (!NFS_BITMAP_ISSET(mattrs, NFS_MATTR_WRITE_SIZE)) {
3071 nmp->nm_wsize = NFS_DGRAM_WSIZE;
3072 }
3073 }
3074
3075 /* round down I/O sizes to multiple of NFS_FABLKSIZE */
3076 nmp->nm_rsize &= ~(NFS_FABLKSIZE - 1);
3077 if (nmp->nm_rsize <= 0) {
3078 nmp->nm_rsize = NFS_FABLKSIZE;
3079 }
3080 nmp->nm_wsize &= ~(NFS_FABLKSIZE - 1);
3081 if (nmp->nm_wsize <= 0) {
3082 nmp->nm_wsize = NFS_FABLKSIZE;
3083 }
3084
3085 /* and limit I/O sizes to maximum allowed */
3086 maxio = (nmp->nm_vers == NFS_VER2) ? NFS_V2MAXDATA :
3087 (nmp->nm_sotype == SOCK_DGRAM) ? NFS_MAXDGRAMDATA : NFS_MAXDATA;
3088 if (maxio > NFS_MAXBSIZE) {
3089 maxio = NFS_MAXBSIZE;
3090 }
3091 if (nmp->nm_rsize > maxio) {
3092 nmp->nm_rsize = maxio;
3093 }
3094 if (nmp->nm_wsize > maxio) {
3095 nmp->nm_wsize = maxio;
3096 }
3097
3098 if (nmp->nm_readdirsize > maxio) {
3099 nmp->nm_readdirsize = maxio;
3100 }
3101 if (nmp->nm_readdirsize > nmp->nm_rsize) {
3102 nmp->nm_readdirsize = nmp->nm_rsize;
3103 }
3104
3105 /* Set up the sockets and related info */
3106 if (nmp->nm_sotype == SOCK_DGRAM) {
3107 TAILQ_INIT(&nmp->nm_cwndq);
3108 }
3109
3110 if (nmp->nm_saddr->sa_family == AF_LOCAL) {
3111 struct sockaddr_un *un = (struct sockaddr_un *)nmp->nm_saddr;
3112 size_t size;
3113 int n = snprintf(vfs_statfs(mp)->f_mntfromname, sizeof(vfs_statfs(mp)->f_mntfromname), "<%s>:", un->sun_path);
3114
3115 if (n > 0 && (size_t)n < sizeof(vfs_statfs(mp)->f_mntfromname)) {
3116 size = sizeof(vfs_statfs(mp)->f_mntfromname) - n;
3117 nfs_location_mntfromname(&nmp->nm_locations, firstloc,
3118 &vfs_statfs(mp)->f_mntfromname[n], size, 1);
3119 }
3120 }
3121
3122
3123 /*
3124 * Get the root node/attributes from the NFS server and
3125 * do any basic, version-specific setup.
3126 */
3127 error = nmp->nm_funcs->nf_mount(nmp, ctx, &np);
3128 nfsmerr_if(error);
3129
3130 /*
3131 * A reference count is needed on the node representing the
3132 * remote root. If this object is not persistent, then backward
3133 * traversals of the mount point (i.e. "..") will not work if
3134 * the node gets flushed out of the cache.
3135 */
3136 nmp->nm_dnp = np;
3137 *vpp = NFSTOV(np);
3138
3139
3140 /* get usecount and drop iocount */
3141 error = vnode_ref(*vpp);
3142 vnode_put(*vpp);
3143 if (error) {
3144 vnode_recycle(*vpp);
3145 goto nfsmerr;
3146 }
3147
3148 /*
3149 * Do statfs to ensure static info gets set to reasonable values.
3150 */
3151 if ((error = nmp->nm_funcs->nf_update_statfs(nmp, ctx))) {
3152 int error2 = vnode_getwithref(*vpp);
3153 vnode_rele(*vpp);
3154 if (!error2) {
3155 vnode_put(*vpp);
3156 }
3157 vnode_recycle(*vpp);
3158 goto nfsmerr;
3159 }
3160 sbp = vfs_statfs(mp);
3161 sbp->f_bsize = nmp->nm_fsattr.nfsa_bsize;
3162 sbp->f_blocks = nmp->nm_fsattr.nfsa_space_total / sbp->f_bsize;
3163 sbp->f_bfree = nmp->nm_fsattr.nfsa_space_free / sbp->f_bsize;
3164 sbp->f_bavail = nmp->nm_fsattr.nfsa_space_avail / sbp->f_bsize;
3165 sbp->f_bused = (nmp->nm_fsattr.nfsa_space_total / sbp->f_bsize) -
3166 (nmp->nm_fsattr.nfsa_space_free / sbp->f_bsize);
3167 sbp->f_files = nmp->nm_fsattr.nfsa_files_total;
3168 sbp->f_ffree = nmp->nm_fsattr.nfsa_files_free;
3169 sbp->f_iosize = nfs_iosize;
3170
3171 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SET_MOUNT_OWNER)) {
3172 sbp->f_owner = set_owner;
3173 }
3174
3175 /*
3176 * Calculate the size used for I/O buffers. Use the larger
3177 * of the two sizes to minimise NFS requests but make sure
3178 * that it is at least one VM page to avoid wasting buffer
3179 * space and to allow easy mmapping of I/O buffers.
3180 * The read/write RPC calls handle the splitting up of
3181 * buffers into multiple requests if the buffer size is
3182 * larger than the I/O size.
3183 */
3184 iosize = max(nmp->nm_rsize, nmp->nm_wsize);
3185 if (iosize < PAGE_SIZE) {
3186 iosize = PAGE_SIZE;
3187 }
3188 nmp->nm_biosize = trunc_page_32(iosize);
3189
3190 /* For NFSv3 and greater, there is a (relatively) reliable ACCESS call. */
3191 if (nmp->nm_vers > NFS_VER2 && !NMFLAG(nmp, NOOPAQUE_AUTH)
3192 ) {
3193 vfs_setauthopaqueaccess(mp);
3194 }
3195
3196 switch (nmp->nm_lockmode) {
3197 case NFS_LOCK_MODE_DISABLED:
3198 break;
3199 case NFS_LOCK_MODE_LOCAL:
3200 vfs_setlocklocal(nmp->nm_mountp);
3201 break;
3202 case NFS_LOCK_MODE_ENABLED:
3203 default:
3204 if (nmp->nm_vers <= NFS_VER3) {
3205 nfs_lockd_mount_register(nmp);
3206 }
3207 break;
3208 }
3209
3210
3211 /* success! */
3212 lck_mtx_lock(&nmp->nm_lock);
3213 nmp->nm_state |= NFSSTA_MOUNTED;
3214
3215 if (nfs_split_open_owner) {
3216 nmp->nm_state |= NFSSTA_SPLIT_OPEN_OWNER;
3217 printf("%s: Open owner is now based on both PID and UID for mount (%s from %s)\n", __FUNCTION__, vfs_statfs(mp)->f_mntfromname, vfs_statfs(mp)->f_mntonname);
3218 }
3219
3220 lck_mtx_unlock(&nmp->nm_lock);
3221 return 0;
3222 nfsmerr:
3223 nfs_mount_drain_and_cleanup(nmp);
3224 return error;
3225 }
3226
3227 #if CONFIG_TRIGGERS
3228
3229 #if CONFIG_NFS4
3230 #define __nfs4_unused /* nothing */
3231 #else
3232 #define __nfs4_unused __unused
3233 #endif
3234
3235 /*
3236 * We've detected a file system boundary on the server and
3237 * need to mount a new file system so that our file systems
3238 * MIRROR the file systems on the server.
3239 *
3240 * Build the mount arguments for the new mount and call kernel_mount().
3241 */
3242 int
nfs_mirror_mount_domount(vnode_t dvp,vnode_t vp,__nfs4_unused vfs_context_t ctx)3243 nfs_mirror_mount_domount(vnode_t dvp, vnode_t vp, __nfs4_unused vfs_context_t ctx)
3244 {
3245 nfsnode_t np = VTONFS(vp);
3246 #if CONFIG_NFS4
3247 nfsnode_t dnp = VTONFS(dvp);
3248 #endif
3249 struct nfsmount *nmp = NFSTONMP(np);
3250 char fstype[MFSTYPENAMELEN], *mntfromname = NULL, *path = NULL, *relpath, *p, *cp;
3251 int error = 0, pathbuflen = MAXPATHLEN, i, mntflags = 0, referral, skipcopy = 0;
3252 size_t nlen, rlen, mlen, mlen2, count;
3253 struct xdrbuf xb, xbnew;
3254 uint32_t mattrs[NFS_MATTR_BITMAP_LEN];
3255 uint32_t newmattrs[NFS_MATTR_BITMAP_LEN];
3256 uint32_t newmflags[NFS_MFLAG_BITMAP_LEN];
3257 uint32_t newmflags_mask[NFS_MFLAG_BITMAP_LEN];
3258 uint32_t val, relpathcomps;
3259 uint64_t argslength = 0, argslength_offset, attrslength_offset, end_offset;
3260 uint32_t numlocs, loc, numserv, serv, numaddr, addr, numcomp, comp;
3261 char buf[XDRWORD];
3262 struct nfs_fs_locations nfsls;
3263
3264 referral = (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL);
3265 if (referral) {
3266 bzero(&nfsls, sizeof(nfsls));
3267 }
3268
3269 xb_init(&xbnew, XDRBUF_NONE);
3270
3271 if (!nmp || (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) {
3272 return ENXIO;
3273 }
3274
3275 /* allocate a couple path buffers we need */
3276 mntfromname = zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_NOFAIL);
3277 path = zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_NOFAIL);
3278
3279 /* get the path for the directory being mounted on */
3280 error = vn_getpath(vp, path, &pathbuflen);
3281 if (error) {
3282 error = ENOMEM;
3283 goto nfsmerr;
3284 }
3285
3286 /*
3287 * Set up the mntfromname for the new mount based on the
3288 * current mount's mntfromname and the directory's path
3289 * relative to the current mount's mntonname.
3290 * Set up relpath to point at the relative path on the current mount.
3291 * Also, count the number of components in relpath.
3292 * We'll be adding those to each fs location path in the new args.
3293 */
3294 nlen = strlcpy(mntfromname, vfs_statfs(nmp->nm_mountp)->f_mntfromname, MAXPATHLEN);
3295 if ((nlen > 0) && (mntfromname[nlen - 1] == '/')) { /* avoid double '/' in new name */
3296 mntfromname[nlen - 1] = '\0';
3297 nlen--;
3298 }
3299 relpath = mntfromname + nlen;
3300 nlen = strlcat(mntfromname, path + strlen(vfs_statfs(nmp->nm_mountp)->f_mntonname), MAXPATHLEN);
3301 if (nlen >= MAXPATHLEN) {
3302 error = ENAMETOOLONG;
3303 goto nfsmerr;
3304 }
3305 /* count the number of components in relpath */
3306 p = relpath;
3307 while (*p && (*p == '/')) {
3308 p++;
3309 }
3310 relpathcomps = 0;
3311 while (*p) {
3312 relpathcomps++;
3313 while (*p && (*p != '/')) {
3314 p++;
3315 }
3316 while (*p && (*p == '/')) {
3317 p++;
3318 }
3319 }
3320
3321 /* grab a copy of the file system type */
3322 vfs_name(vnode_mount(vp), fstype);
3323
3324 /* for referrals, fetch the fs locations */
3325 if (referral) {
3326 const char *vname = vnode_getname(NFSTOV(np));
3327 if (!vname) {
3328 error = ENOENT;
3329 }
3330 #if CONFIG_NFS4
3331 else {
3332 error = nfs4_get_fs_locations(nmp, dnp, NULL, 0, vname, ctx, &nfsls);
3333 vnode_putname(vname);
3334 if (!error && (nfsls.nl_numlocs < 1)) {
3335 error = ENOENT;
3336 }
3337 }
3338 #endif
3339 nfsmerr_if(error);
3340 }
3341
3342 /* set up NFS mount args based on current mount args */
3343
3344 #define xb_copy_32(E, XBSRC, XBDST, V) \
3345 do { \
3346 if (E) break; \
3347 xb_get_32((E), (XBSRC), (V)); \
3348 if (skipcopy) break; \
3349 xb_add_32((E), (XBDST), (V)); \
3350 } while (0)
3351 #define xb_copy_opaque(E, XBSRC, XBDST) \
3352 do { \
3353 uint32_t __count = 0, __val; \
3354 xb_copy_32((E), (XBSRC), (XBDST), __count); \
3355 if (E) break; \
3356 __count = nfsm_rndup(__count); \
3357 __count /= XDRWORD; \
3358 while (__count-- > 0) \
3359 xb_copy_32((E), (XBSRC), (XBDST), __val); \
3360 } while (0)
3361
3362 xb_init_buffer(&xb, nmp->nm_args, 2 * XDRWORD);
3363 xb_get_32(error, &xb, val); /* version */
3364 xb_get_32(error, &xb, argslength); /* args length */
3365 xb_init_buffer(&xb, nmp->nm_args, argslength);
3366
3367 xb_init_buffer(&xbnew, NULL, 0);
3368 xb_copy_32(error, &xb, &xbnew, val); /* version */
3369 argslength_offset = xb_offset(&xbnew);
3370 xb_copy_32(error, &xb, &xbnew, val); /* args length */
3371 xb_copy_32(error, &xb, &xbnew, val); /* XDR args version */
3372 count = NFS_MATTR_BITMAP_LEN;
3373 xb_get_bitmap(error, &xb, mattrs, count); /* mount attribute bitmap */
3374 nfsmerr_if(error);
3375 for (i = 0; i < NFS_MATTR_BITMAP_LEN; i++) {
3376 newmattrs[i] = mattrs[i];
3377 }
3378 if (referral) {
3379 NFS_BITMAP_SET(newmattrs, NFS_MATTR_FS_LOCATIONS);
3380 NFS_BITMAP_CLR(newmattrs, NFS_MATTR_MNTFROM);
3381 } else {
3382 NFS_BITMAP_SET(newmattrs, NFS_MATTR_FH);
3383 }
3384 NFS_BITMAP_SET(newmattrs, NFS_MATTR_FLAGS);
3385 NFS_BITMAP_SET(newmattrs, NFS_MATTR_MNTFLAGS);
3386 NFS_BITMAP_SET(newmattrs, NFS_MATTR_SET_MOUNT_OWNER);
3387 xb_add_bitmap(error, &xbnew, newmattrs, NFS_MATTR_BITMAP_LEN);
3388 attrslength_offset = xb_offset(&xbnew);
3389 xb_copy_32(error, &xb, &xbnew, val); /* attrs length */
3390 NFS_BITMAP_ZERO(newmflags_mask, NFS_MFLAG_BITMAP_LEN);
3391 NFS_BITMAP_ZERO(newmflags, NFS_MFLAG_BITMAP_LEN);
3392 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_FLAGS)) {
3393 count = NFS_MFLAG_BITMAP_LEN;
3394 xb_get_bitmap(error, &xb, newmflags_mask, count); /* mount flag mask bitmap */
3395 count = NFS_MFLAG_BITMAP_LEN;
3396 xb_get_bitmap(error, &xb, newmflags, count); /* mount flag bitmap */
3397 }
3398 NFS_BITMAP_SET(newmflags_mask, NFS_MFLAG_EPHEMERAL);
3399 NFS_BITMAP_SET(newmflags, NFS_MFLAG_EPHEMERAL);
3400 xb_add_bitmap(error, &xbnew, newmflags_mask, NFS_MFLAG_BITMAP_LEN);
3401 xb_add_bitmap(error, &xbnew, newmflags, NFS_MFLAG_BITMAP_LEN);
3402 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_VERSION)) {
3403 xb_copy_32(error, &xb, &xbnew, val);
3404 }
3405 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_MINOR_VERSION)) {
3406 xb_copy_32(error, &xb, &xbnew, val);
3407 }
3408 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_VERSION_RANGE)) {
3409 xb_copy_32(error, &xb, &xbnew, val);
3410 xb_copy_32(error, &xb, &xbnew, val);
3411 }
3412 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READ_SIZE)) {
3413 xb_copy_32(error, &xb, &xbnew, val);
3414 }
3415 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_WRITE_SIZE)) {
3416 xb_copy_32(error, &xb, &xbnew, val);
3417 }
3418 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READDIR_SIZE)) {
3419 xb_copy_32(error, &xb, &xbnew, val);
3420 }
3421 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READAHEAD)) {
3422 xb_copy_32(error, &xb, &xbnew, val);
3423 }
3424 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_REG_MIN)) {
3425 xb_copy_32(error, &xb, &xbnew, val);
3426 xb_copy_32(error, &xb, &xbnew, val);
3427 }
3428 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_REG_MAX)) {
3429 xb_copy_32(error, &xb, &xbnew, val);
3430 xb_copy_32(error, &xb, &xbnew, val);
3431 }
3432 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_DIR_MIN)) {
3433 xb_copy_32(error, &xb, &xbnew, val);
3434 xb_copy_32(error, &xb, &xbnew, val);
3435 }
3436 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_DIR_MAX)) {
3437 xb_copy_32(error, &xb, &xbnew, val);
3438 xb_copy_32(error, &xb, &xbnew, val);
3439 }
3440 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_LOCK_MODE)) {
3441 xb_copy_32(error, &xb, &xbnew, val);
3442 }
3443 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SECURITY)) {
3444 xb_copy_32(error, &xb, &xbnew, count);
3445 while (!error && (count-- > 0)) {
3446 xb_copy_32(error, &xb, &xbnew, val);
3447 }
3448 }
3449 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_KERB_ETYPE)) {
3450 xb_copy_32(error, &xb, &xbnew, count);
3451 xb_add_32(error, &xbnew, -1);
3452 while (!error && (count-- > 0)) {
3453 xb_copy_32(error, &xb, &xbnew, val);
3454 }
3455 }
3456 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MAX_GROUP_LIST)) {
3457 xb_copy_32(error, &xb, &xbnew, val);
3458 }
3459 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SOCKET_TYPE)) {
3460 xb_copy_opaque(error, &xb, &xbnew);
3461 }
3462 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_PORT)) {
3463 xb_copy_32(error, &xb, &xbnew, val);
3464 }
3465 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MOUNT_PORT)) {
3466 xb_copy_32(error, &xb, &xbnew, val);
3467 }
3468 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_REQUEST_TIMEOUT)) {
3469 xb_copy_32(error, &xb, &xbnew, val);
3470 xb_copy_32(error, &xb, &xbnew, val);
3471 }
3472 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SOFT_RETRY_COUNT)) {
3473 xb_copy_32(error, &xb, &xbnew, val);
3474 }
3475 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_DEAD_TIMEOUT)) {
3476 xb_copy_32(error, &xb, &xbnew, val);
3477 xb_copy_32(error, &xb, &xbnew, val);
3478 }
3479 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_FH)) {
3480 xb_get_32(error, &xb, count);
3481 xb_skip(error, &xb, count);
3482 }
3483 if (!referral) {
3484 /* set the initial file handle to the directory's file handle */
3485 xb_add_fh(error, &xbnew, np->n_fhp, np->n_fhsize);
3486 }
3487 /* copy/extend/skip fs locations */
3488 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_FS_LOCATIONS)) {
3489 numlocs = numserv = numaddr = numcomp = 0;
3490 if (referral) { /* don't copy the fs locations for a referral */
3491 skipcopy = 1;
3492 }
3493 xb_copy_32(error, &xb, &xbnew, numlocs); /* location count */
3494 for (loc = 0; !error && (loc < numlocs); loc++) {
3495 xb_copy_32(error, &xb, &xbnew, numserv); /* server count */
3496 for (serv = 0; !error && (serv < numserv); serv++) {
3497 xb_copy_opaque(error, &xb, &xbnew); /* server name */
3498 xb_copy_32(error, &xb, &xbnew, numaddr); /* address count */
3499 for (addr = 0; !error && (addr < numaddr); addr++) {
3500 xb_copy_opaque(error, &xb, &xbnew); /* address */
3501 }
3502 xb_copy_opaque(error, &xb, &xbnew); /* server info */
3503 }
3504 /* pathname */
3505 xb_get_32(error, &xb, numcomp); /* component count */
3506 if (!skipcopy) {
3507 uint64_t totalcomps = numcomp + relpathcomps;
3508
3509 /* set error to ERANGE in the event of overflow */
3510 if (totalcomps > UINT32_MAX) {
3511 nfsmerr_if((error = ERANGE));
3512 }
3513
3514 xb_add_32(error, &xbnew, (uint32_t) totalcomps); /* new component count */
3515 }
3516 for (comp = 0; !error && (comp < numcomp); comp++) {
3517 xb_copy_opaque(error, &xb, &xbnew); /* component */
3518 }
3519 /* add additional components */
3520 p = relpath;
3521 while (*p && (*p == '/')) {
3522 p++;
3523 }
3524 while (*p && !error) {
3525 cp = p;
3526 while (*p && (*p != '/')) {
3527 p++;
3528 }
3529 xb_add_string(error, &xbnew, cp, (p - cp)); /* component */
3530 while (*p && (*p == '/')) {
3531 p++;
3532 }
3533 }
3534 xb_copy_opaque(error, &xb, &xbnew); /* fs location info */
3535 }
3536 if (referral) {
3537 skipcopy = 0;
3538 }
3539 }
3540 if (referral) {
3541 /* add referral's fs locations */
3542 xb_add_32(error, &xbnew, nfsls.nl_numlocs); /* FS_LOCATIONS */
3543 for (loc = 0; !error && (loc < nfsls.nl_numlocs); loc++) {
3544 xb_add_32(error, &xbnew, nfsls.nl_locations[loc]->nl_servcount);
3545 for (serv = 0; !error && (serv < nfsls.nl_locations[loc]->nl_servcount); serv++) {
3546 xb_add_string(error, &xbnew, nfsls.nl_locations[loc]->nl_servers[serv]->ns_name,
3547 strlen(nfsls.nl_locations[loc]->nl_servers[serv]->ns_name));
3548 xb_add_32(error, &xbnew, nfsls.nl_locations[loc]->nl_servers[serv]->ns_addrcount);
3549 for (addr = 0; !error && (addr < nfsls.nl_locations[loc]->nl_servers[serv]->ns_addrcount); addr++) {
3550 xb_add_string(error, &xbnew, nfsls.nl_locations[loc]->nl_servers[serv]->ns_addresses[addr],
3551 strlen(nfsls.nl_locations[loc]->nl_servers[serv]->ns_addresses[addr]));
3552 }
3553 xb_add_32(error, &xbnew, 0); /* empty server info */
3554 }
3555 xb_add_32(error, &xbnew, nfsls.nl_locations[loc]->nl_path.np_compcount);
3556 for (comp = 0; !error && (comp < nfsls.nl_locations[loc]->nl_path.np_compcount); comp++) {
3557 xb_add_string(error, &xbnew, nfsls.nl_locations[loc]->nl_path.np_components[comp],
3558 strlen(nfsls.nl_locations[loc]->nl_path.np_components[comp]));
3559 }
3560 xb_add_32(error, &xbnew, 0); /* empty fs location info */
3561 }
3562 }
3563 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MNTFLAGS)) {
3564 xb_get_32(error, &xb, mntflags);
3565 }
3566 /*
3567 * We add the following mount flags to the ones for the mounted-on mount:
3568 * MNT_DONTBROWSE - to keep the mount from showing up as a separate volume
3569 * MNT_AUTOMOUNTED - to keep DiskArb from retriggering the mount after
3570 * an unmount (looking for /.autodiskmounted)
3571 */
3572 mntflags |= (MNT_AUTOMOUNTED | MNT_DONTBROWSE);
3573 xb_add_32(error, &xbnew, mntflags);
3574 if (!referral && NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MNTFROM)) {
3575 /* copy mntfrom string and add relpath */
3576 rlen = strlen(relpath);
3577 xb_get_32(error, &xb, mlen);
3578 nfsmerr_if(error);
3579 mlen2 = mlen + ((relpath[0] != '/') ? 1 : 0) + rlen;
3580 xb_add_32(error, &xbnew, mlen2);
3581 count = mlen / XDRWORD;
3582 /* copy the original string */
3583 while (count-- > 0) {
3584 xb_copy_32(error, &xb, &xbnew, val);
3585 }
3586 if (!error && (mlen % XDRWORD)) {
3587 error = xb_get_bytes(&xb, buf, mlen % XDRWORD, 0);
3588 if (!error) {
3589 error = xb_add_bytes(&xbnew, buf, mlen % XDRWORD, 1);
3590 }
3591 }
3592 /* insert a '/' if the relative path doesn't start with one */
3593 if (!error && (relpath[0] != '/')) {
3594 buf[0] = '/';
3595 error = xb_add_bytes(&xbnew, buf, 1, 1);
3596 }
3597 /* add the additional relative path */
3598 if (!error) {
3599 error = xb_add_bytes(&xbnew, relpath, rlen, 1);
3600 }
3601 /* make sure the resulting string has the right number of pad bytes */
3602 if (!error && (mlen2 != nfsm_rndup(mlen2))) {
3603 bzero(buf, sizeof(buf));
3604 count = nfsm_rndup(mlen2) - mlen2;
3605 error = xb_add_bytes(&xbnew, buf, count, 1);
3606 }
3607 }
3608 /*
3609 * The following string copies rely on the fact that we already validated
3610 * these data when creating the initial mount point.
3611 */
3612 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_REALM)) {
3613 xb_add_string(error, &xbnew, nmp->nm_realm, strlen(nmp->nm_realm));
3614 }
3615 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_PRINCIPAL)) {
3616 xb_add_string(error, &xbnew, nmp->nm_principal, strlen(nmp->nm_principal));
3617 }
3618 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SVCPRINCIPAL)) {
3619 xb_add_string(error, &xbnew, nmp->nm_sprinc, strlen(nmp->nm_sprinc));
3620 }
3621 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_LOCAL_NFS_PORT)) {
3622 xb_add_string(error, &xbnew, nmp->nm_nfs_localport, strlen(nmp->nm_nfs_localport));
3623 }
3624 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_LOCAL_MOUNT_PORT)) {
3625 xb_add_string(error, &xbnew, nmp->nm_mount_localport, strlen(nmp->nm_mount_localport));
3626 }
3627 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SET_MOUNT_OWNER)) {
3628 /* drop embedded owner value */
3629 xb_get_32(error, &xb, count);
3630 }
3631 /* New mount always gets same owner as this mount */
3632 xb_add_32(error, &xbnew, vfs_statfs(vnode_mount(vp))->f_owner);
3633 xb_build_done(error, &xbnew);
3634
3635 /* update opaque counts */
3636 end_offset = xb_offset(&xbnew);
3637 if (!error) {
3638 error = xb_seek(&xbnew, argslength_offset);
3639 argslength = end_offset - argslength_offset + XDRWORD /*version*/;
3640 xb_add_32(error, &xbnew, argslength);
3641 }
3642 if (!error) {
3643 error = xb_seek(&xbnew, attrslength_offset);
3644 xb_add_32(error, &xbnew, end_offset - attrslength_offset - XDRWORD /*don't include length field*/);
3645 }
3646 nfsmerr_if(error);
3647
3648 /*
3649 * For kernel_mount() call, use the existing mount flags (instead of the
3650 * original flags) because flags like MNT_NOSUID and MNT_NODEV may have
3651 * been silently enforced. Also, in terms of MACF, the _kernel_ is
3652 * performing the mount (and enforcing all of the mount options), so we
3653 * use the kernel context for the mount call.
3654 */
3655 mntflags = vfs_flags(vnode_mount(vp)) & MNT_VISFLAGMASK;
3656 mntflags |= (MNT_AUTOMOUNTED | MNT_DONTBROWSE);
3657
3658 /* do the mount */
3659 error = vfs_mount_at_path(fstype, path, dvp, vp, xb_buffer_base(&xbnew), argslength,
3660 mntflags, VFS_MOUNT_FLAG_PERMIT_UNMOUNT | VFS_MOUNT_FLAG_NOAUTH);
3661
3662 nfsmerr:
3663 if (error) {
3664 printf("nfs: mirror mount of %s on %s failed (%d)\n",
3665 mntfromname, path, error);
3666 }
3667 /* clean up */
3668 xb_cleanup(&xbnew);
3669 if (referral) {
3670 nfs_fs_locations_cleanup(&nfsls);
3671 }
3672 NFS_ZFREE(ZV_NAMEI, path);
3673 NFS_ZFREE(ZV_NAMEI, mntfromname);
3674 if (!error) {
3675 nfs_ephemeral_mount_harvester_start();
3676 }
3677 return error;
3678 }
3679
3680 /*
3681 * trigger vnode functions
3682 */
3683 #define NFS_TRIGGER_DEBUG 1
3684
3685 resolver_result_t
nfs_mirror_mount_trigger_resolve(vnode_t vp,const struct componentname * cnp,enum path_operation pop,__unused int flags,__unused void * data,vfs_context_t ctx)3686 nfs_mirror_mount_trigger_resolve(
3687 vnode_t vp,
3688 const struct componentname *cnp,
3689 enum path_operation pop,
3690 __unused int flags,
3691 __unused void *data,
3692 vfs_context_t ctx)
3693 {
3694 nfsnode_t np = VTONFS(vp);
3695 vnode_t pvp = NULLVP;
3696 int error = 0;
3697 int didBusy = 0;
3698 resolver_result_t result;
3699
3700 /*
3701 * We have a trigger node that doesn't have anything mounted on it yet.
3702 * We'll do the mount if either:
3703 * (a) this isn't the last component of the path OR
3704 * (b) this is an op that looks like it should trigger the mount.
3705 */
3706 if (cnp->cn_flags & ISLASTCN) {
3707 switch (pop) {
3708 case OP_MOUNT:
3709 case OP_UNMOUNT:
3710 case OP_STATFS:
3711 case OP_LINK:
3712 case OP_UNLINK:
3713 case OP_RENAME:
3714 case OP_MKNOD:
3715 case OP_MKFIFO:
3716 case OP_SYMLINK:
3717 case OP_ACCESS:
3718 case OP_GETATTR:
3719 case OP_MKDIR:
3720 case OP_RMDIR:
3721 case OP_REVOKE:
3722 case OP_GETXATTR:
3723 case OP_LISTXATTR:
3724 /* don't perform the mount for these operations */
3725 result = vfs_resolver_result(np->n_trigseq, RESOLVER_NOCHANGE, 0);
3726 #ifdef NFS_TRIGGER_DEBUG
3727 NP(np, "nfs trigger RESOLVE: no change, last %d nameiop %d, seq %d",
3728 (cnp->cn_flags & ISLASTCN) ? 1 : 0, cnp->cn_nameiop, np->n_trigseq);
3729 #endif
3730 return result;
3731 case OP_OPEN:
3732 case OP_CHDIR:
3733 case OP_CHROOT:
3734 case OP_TRUNCATE:
3735 case OP_COPYFILE:
3736 case OP_PATHCONF:
3737 case OP_READLINK:
3738 case OP_SETATTR:
3739 case OP_EXCHANGEDATA:
3740 case OP_SEARCHFS:
3741 case OP_FSCTL:
3742 case OP_SETXATTR:
3743 case OP_REMOVEXATTR:
3744 default:
3745 /* go ahead and do the mount */
3746 break;
3747 }
3748 }
3749
3750 if (vnode_mountedhere(vp) != NULL) {
3751 /*
3752 * Um... there's already something mounted.
3753 * Been there. Done that. Let's just say it succeeded.
3754 */
3755 error = 0;
3756 goto skipmount;
3757 }
3758
3759 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
3760 result = vfs_resolver_result(np->n_trigseq, RESOLVER_ERROR, error);
3761 #ifdef NFS_TRIGGER_DEBUG
3762 NP(np, "nfs trigger RESOLVE: busy error %d, last %d nameiop %d, seq %d",
3763 error, (cnp->cn_flags & ISLASTCN) ? 1 : 0, cnp->cn_nameiop, np->n_trigseq);
3764 #endif
3765 return result;
3766 }
3767 didBusy = 1;
3768
3769 /* Check again, in case the mount happened while we were setting busy */
3770 if (vnode_mountedhere(vp) != NULL) {
3771 /* Been there. Done that. Let's just say it succeeded. */
3772 error = 0;
3773 goto skipmount;
3774 }
3775 nfs_node_lock_force(np);
3776 if (np->n_flag & NDISARMTRIGGER) {
3777 error = ECANCELED;
3778 nfs_node_unlock(np);
3779 goto skipmount;
3780 }
3781 nfs_node_unlock(np);
3782
3783 pvp = vnode_getparent(vp);
3784 if (pvp == NULLVP) {
3785 error = EINVAL;
3786 }
3787 if (!error) {
3788 error = nfs_mirror_mount_domount(pvp, vp, ctx);
3789 }
3790 skipmount:
3791 if (!error) {
3792 np->n_trigseq++;
3793 }
3794 result = vfs_resolver_result(np->n_trigseq, error ? RESOLVER_ERROR : RESOLVER_RESOLVED, error);
3795 #ifdef NFS_TRIGGER_DEBUG
3796 NP(np, "nfs trigger RESOLVE: %s %d, last %d nameiop %d, seq %d",
3797 error ? "error" : "resolved", error,
3798 (cnp->cn_flags & ISLASTCN) ? 1 : 0, cnp->cn_nameiop, np->n_trigseq);
3799 #endif
3800
3801 if (pvp != NULLVP) {
3802 vnode_put(pvp);
3803 }
3804 if (didBusy) {
3805 nfs_node_clear_busy(np);
3806 }
3807 return result;
3808 }
3809
3810 resolver_result_t
nfs_mirror_mount_trigger_unresolve(vnode_t vp,int flags,__unused void * data,vfs_context_t ctx)3811 nfs_mirror_mount_trigger_unresolve(
3812 vnode_t vp,
3813 int flags,
3814 __unused void *data,
3815 vfs_context_t ctx)
3816 {
3817 nfsnode_t np = VTONFS(vp);
3818 mount_t mp;
3819 int error;
3820 resolver_result_t result;
3821
3822 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
3823 result = vfs_resolver_result(np->n_trigseq, RESOLVER_ERROR, error);
3824 #ifdef NFS_TRIGGER_DEBUG
3825 NP(np, "nfs trigger UNRESOLVE: busy error %d, seq %d", error, np->n_trigseq);
3826 #endif
3827 return result;
3828 }
3829
3830 mp = vnode_mountedhere(vp);
3831 if (!mp) {
3832 error = EINVAL;
3833 }
3834 if (!error) {
3835 error = vfs_unmountbyfsid(&(vfs_statfs(mp)->f_fsid), flags, ctx);
3836 }
3837 if (!error) {
3838 np->n_trigseq++;
3839 }
3840 result = vfs_resolver_result(np->n_trigseq, error ? RESOLVER_ERROR : RESOLVER_UNRESOLVED, error);
3841 #ifdef NFS_TRIGGER_DEBUG
3842 NP(np, "nfs trigger UNRESOLVE: %s %d, seq %d",
3843 error ? "error" : "unresolved", error, np->n_trigseq);
3844 #endif
3845 nfs_node_clear_busy(np);
3846 return result;
3847 }
3848
3849 resolver_result_t
nfs_mirror_mount_trigger_rearm(vnode_t vp,__unused int flags,__unused void * data,vfs_context_t ctx)3850 nfs_mirror_mount_trigger_rearm(
3851 vnode_t vp,
3852 __unused int flags,
3853 __unused void *data,
3854 vfs_context_t ctx)
3855 {
3856 nfsnode_t np = VTONFS(vp);
3857 int error;
3858 resolver_result_t result;
3859
3860 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
3861 result = vfs_resolver_result(np->n_trigseq, RESOLVER_ERROR, error);
3862 #ifdef NFS_TRIGGER_DEBUG
3863 NP(np, "nfs trigger REARM: busy error %d, seq %d", error, np->n_trigseq);
3864 #endif
3865 return result;
3866 }
3867
3868 np->n_trigseq++;
3869 result = vfs_resolver_result(np->n_trigseq,
3870 vnode_mountedhere(vp) ? RESOLVER_RESOLVED : RESOLVER_UNRESOLVED, 0);
3871 #ifdef NFS_TRIGGER_DEBUG
3872 NP(np, "nfs trigger REARM: %s, seq %d",
3873 vnode_mountedhere(vp) ? "resolved" : "unresolved", np->n_trigseq);
3874 #endif
3875 nfs_node_clear_busy(np);
3876 return result;
3877 }
3878
3879 /*
3880 * Periodically attempt to unmount ephemeral (mirror) mounts in an attempt to limit
3881 * the number of unused mounts.
3882 */
3883
3884 #define NFS_EPHEMERAL_MOUNT_HARVEST_INTERVAL 120 /* how often the harvester runs */
3885 struct nfs_ephemeral_mount_harvester_info {
3886 fsid_t fsid; /* FSID that we need to try to unmount */
3887 uint32_t mountcount; /* count of ephemeral mounts seen in scan */
3888 };
3889 /* various globals for the harvester */
3890 static thread_call_t nfs_ephemeral_mount_harvester_timer = NULL;
3891 static int nfs_ephemeral_mount_harvester_on = 0;
3892
3893 kern_return_t thread_terminate(thread_t);
3894
3895 static int
nfs_ephemeral_mount_harvester_callback(mount_t mp,void * arg)3896 nfs_ephemeral_mount_harvester_callback(mount_t mp, void *arg)
3897 {
3898 struct nfs_ephemeral_mount_harvester_info *hinfo = arg;
3899 struct nfsmount *nmp;
3900 struct timeval now;
3901
3902 if (strcmp(vfs_statfs(mp)->f_fstypename, "nfs")) {
3903 return VFS_RETURNED;
3904 }
3905 nmp = VFSTONFS(mp);
3906 if (!nmp || !NMFLAG(nmp, EPHEMERAL)
3907 ) {
3908 return VFS_RETURNED;
3909 }
3910 hinfo->mountcount++;
3911
3912 /* avoid unmounting mounts that have been triggered within the last harvest interval */
3913 microtime(&now);
3914 if ((nmp->nm_mounttime >> 32) > ((uint32_t)now.tv_sec - NFS_EPHEMERAL_MOUNT_HARVEST_INTERVAL)) {
3915 return VFS_RETURNED;
3916 }
3917
3918 if (hinfo->fsid.val[0] || hinfo->fsid.val[1]) {
3919 /* attempt to unmount previously-found ephemeral mount */
3920 vfs_unmountbyfsid(&hinfo->fsid, 0, vfs_context_kernel());
3921 hinfo->fsid.val[0] = hinfo->fsid.val[1] = 0;
3922 }
3923
3924 /*
3925 * We can't call unmount here since we hold a mount iter ref
3926 * on mp so save its fsid for the next call iteration to unmount.
3927 */
3928 hinfo->fsid.val[0] = vfs_statfs(mp)->f_fsid.val[0];
3929 hinfo->fsid.val[1] = vfs_statfs(mp)->f_fsid.val[1];
3930
3931 return VFS_RETURNED;
3932 }
3933
3934 /*
3935 * Spawn a thread to do the ephemeral mount harvesting.
3936 */
3937 static void
nfs_ephemeral_mount_harvester_timer_func(void)3938 nfs_ephemeral_mount_harvester_timer_func(void)
3939 {
3940 thread_t thd;
3941
3942 if (kernel_thread_start(nfs_ephemeral_mount_harvester, NULL, &thd) == KERN_SUCCESS) {
3943 thread_deallocate(thd);
3944 }
3945 }
3946
3947 /*
3948 * Iterate all mounts looking for NFS ephemeral mounts to try to unmount.
3949 */
3950 void
nfs_ephemeral_mount_harvester(__unused void * arg,__unused wait_result_t wr)3951 nfs_ephemeral_mount_harvester(__unused void *arg, __unused wait_result_t wr)
3952 {
3953 struct nfs_ephemeral_mount_harvester_info hinfo;
3954 uint64_t deadline;
3955
3956 hinfo.mountcount = 0;
3957 hinfo.fsid.val[0] = hinfo.fsid.val[1] = 0;
3958 vfs_iterate(VFS_ITERATE_TAIL_FIRST, nfs_ephemeral_mount_harvester_callback, &hinfo);
3959 if (hinfo.fsid.val[0] || hinfo.fsid.val[1]) {
3960 /* attempt to unmount last found ephemeral mount */
3961 vfs_unmountbyfsid(&hinfo.fsid, 0, vfs_context_kernel());
3962 }
3963
3964 lck_mtx_lock(&nfs_global_mutex);
3965 if (!hinfo.mountcount) {
3966 /* no more ephemeral mounts - don't need timer */
3967 nfs_ephemeral_mount_harvester_on = 0;
3968 } else {
3969 /* re-arm the timer */
3970 clock_interval_to_deadline(NFS_EPHEMERAL_MOUNT_HARVEST_INTERVAL, NSEC_PER_SEC, &deadline);
3971 thread_call_enter_delayed(nfs_ephemeral_mount_harvester_timer, deadline);
3972 nfs_ephemeral_mount_harvester_on = 1;
3973 }
3974 lck_mtx_unlock(&nfs_global_mutex);
3975
3976 /* thread done */
3977 thread_terminate(current_thread());
3978 }
3979
3980 /*
3981 * Make sure the NFS ephemeral mount harvester timer is running.
3982 */
3983 void
nfs_ephemeral_mount_harvester_start(void)3984 nfs_ephemeral_mount_harvester_start(void)
3985 {
3986 uint64_t deadline;
3987
3988 lck_mtx_lock(&nfs_global_mutex);
3989 if (nfs_ephemeral_mount_harvester_on) {
3990 lck_mtx_unlock(&nfs_global_mutex);
3991 return;
3992 }
3993 if (nfs_ephemeral_mount_harvester_timer == NULL) {
3994 nfs_ephemeral_mount_harvester_timer = thread_call_allocate((thread_call_func_t)nfs_ephemeral_mount_harvester_timer_func, NULL);
3995 }
3996 clock_interval_to_deadline(NFS_EPHEMERAL_MOUNT_HARVEST_INTERVAL, NSEC_PER_SEC, &deadline);
3997 thread_call_enter_delayed(nfs_ephemeral_mount_harvester_timer, deadline);
3998 nfs_ephemeral_mount_harvester_on = 1;
3999 lck_mtx_unlock(&nfs_global_mutex);
4000 }
4001
4002 #endif
4003
4004 /*
4005 * Send a STAT protocol request to the server to verify statd is running.
4006 * rpc-statd service, which responsible to provide locks for the NFS server, is disabled by default on Ubuntu.
4007 * Please see Radar 45969553 for more info.
4008 */
4009 int
nfs3_check_lockmode(struct nfsmount * nmp,struct sockaddr * sa,int sotype,int timeo)4010 nfs3_check_lockmode(struct nfsmount *nmp, struct sockaddr *sa, int sotype, int timeo)
4011 {
4012 struct sockaddr_storage ss;
4013 int error, port = 0;
4014
4015 if (nmp->nm_lockmode == NFS_LOCK_MODE_ENABLED) {
4016 if (sa->sa_len > sizeof(ss)) {
4017 return EINVAL;
4018 }
4019 bcopy(sa, &ss, MIN(sa->sa_len, sizeof(ss)));
4020 error = nfs_portmap_lookup(nmp, vfs_context_current(), (struct sockaddr*)&ss, NULL, RPCPROG_STAT, RPCMNT_VER1, NM_OMFLAG(nmp, MNTUDP) ? SOCK_DGRAM : sotype, timeo);
4021 if (!error) {
4022 if (ss.ss_family == AF_INET) {
4023 port = ntohs(((struct sockaddr_in*)&ss)->sin_port);
4024 } else if (ss.ss_family == AF_INET6) {
4025 port = ntohs(((struct sockaddr_in6*)&ss)->sin6_port);
4026 } else if (ss.ss_family == AF_LOCAL) {
4027 port = (((struct sockaddr_un*)&ss)->sun_path[0] != '\0');
4028 }
4029
4030 if (!port) {
4031 printf("nfs: STAT(NSM) rpc service is not available, unable to mount with current lock mode.\n");
4032 return EPROGUNAVAIL;
4033 }
4034 }
4035 }
4036 return 0;
4037 }
4038
4039 /*
4040 * Send a MOUNT protocol MOUNT request to the server to get the initial file handle (and security).
4041 */
4042 int
nfs3_mount_rpc(struct nfsmount * nmp,struct sockaddr * sa,int sotype,int nfsvers,char * path,vfs_context_t ctx,int timeo,fhandle_t * fh,struct nfs_sec * sec)4043 nfs3_mount_rpc(struct nfsmount *nmp, struct sockaddr *sa, int sotype, int nfsvers, char *path, vfs_context_t ctx, int timeo, fhandle_t *fh, struct nfs_sec *sec)
4044 {
4045 int error = 0, mntproto;
4046 thread_t thd = vfs_context_thread(ctx);
4047 kauth_cred_t cred = vfs_context_ucred(ctx);
4048 uint64_t xid = 0;
4049 size_t slen;
4050 struct nfsm_chain nmreq, nmrep;
4051 mbuf_t mreq;
4052 uint32_t mntvers, mntport, val;
4053 struct sockaddr_storage ss;
4054 struct sockaddr *saddr = (struct sockaddr*)&ss;
4055 struct sockaddr_un *sun = (struct sockaddr_un*)saddr;
4056
4057 nfsm_chain_null(&nmreq);
4058 nfsm_chain_null(&nmrep);
4059
4060 mntvers = (nfsvers == NFS_VER2) ? RPCMNT_VER1 : RPCMNT_VER3;
4061 mntproto = (NM_OMFLAG(nmp, MNTUDP) || (sotype == SOCK_DGRAM)) ? IPPROTO_UDP : IPPROTO_TCP;
4062 sec->count = 0;
4063
4064 bcopy(sa, saddr, min(sizeof(ss), sa->sa_len));
4065 if (saddr->sa_family == AF_INET) {
4066 if (nmp->nm_mountport) {
4067 ((struct sockaddr_in*)saddr)->sin_port = htons(nmp->nm_mountport);
4068 }
4069 mntport = ntohs(((struct sockaddr_in*)saddr)->sin_port);
4070 } else if (saddr->sa_family == AF_INET6) {
4071 if (nmp->nm_mountport) {
4072 ((struct sockaddr_in6*)saddr)->sin6_port = htons(nmp->nm_mountport);
4073 }
4074 mntport = ntohs(((struct sockaddr_in6*)saddr)->sin6_port);
4075 } else { /* Local domain socket */
4076 mntport = ((struct sockaddr_un *)saddr)->sun_path[0]; /* Do we have and address? */
4077 mntproto = IPPROTO_TCP; /* XXX rpcbind only listens on streams sockets for now */
4078 }
4079
4080 while (!mntport) {
4081 error = nfs_portmap_lookup(nmp, ctx, saddr, NULL, RPCPROG_MNT, mntvers,
4082 mntproto == IPPROTO_UDP ? SOCK_DGRAM : SOCK_STREAM, timeo);
4083 nfsmout_if(error);
4084 if (saddr->sa_family == AF_INET) {
4085 mntport = ntohs(((struct sockaddr_in*)saddr)->sin_port);
4086 } else if (saddr->sa_family == AF_INET6) {
4087 mntport = ntohs(((struct sockaddr_in6*)saddr)->sin6_port);
4088 } else if (saddr->sa_family == AF_LOCAL) {
4089 mntport = ((struct sockaddr_un*)saddr)->sun_path[0];
4090 }
4091 if (!mntport) {
4092 /* if not found and TCP, then retry with UDP */
4093 if (mntproto == IPPROTO_UDP) {
4094 error = EPROGUNAVAIL;
4095 break;
4096 }
4097 mntproto = IPPROTO_UDP;
4098 bcopy(sa, saddr, min(sizeof(ss), sa->sa_len));
4099 if (saddr->sa_family == AF_LOCAL) {
4100 strlcpy(sun->sun_path, RPCB_TICLTS_PATH, sizeof(sun->sun_path));
4101 }
4102 }
4103 }
4104 nfsmout_if(error || !mntport);
4105
4106 /* MOUNT protocol MOUNT request */
4107 slen = strlen(path);
4108 nfsm_chain_build_alloc_init(error, &nmreq, NFSX_UNSIGNED + nfsm_rndup(slen));
4109 nfsm_chain_add_name(error, &nmreq, path, slen, nmp);
4110 nfsm_chain_build_done(error, &nmreq);
4111 nfsmout_if(error);
4112 error = nfsm_rpchead2(nmp, (mntproto == IPPROTO_UDP) ? SOCK_DGRAM : SOCK_STREAM,
4113 RPCPROG_MNT, mntvers, RPCMNT_MOUNT,
4114 RPCAUTH_SYS, cred, NULL, nmreq.nmc_mhead, &xid, &mreq);
4115 nfsmout_if(error);
4116 nmreq.nmc_mhead = NULL;
4117 error = nfs_aux_request(nmp, thd, saddr, NULL,
4118 ((mntproto == IPPROTO_UDP) ? SOCK_DGRAM : SOCK_STREAM),
4119 mreq, R_XID32(xid), 1, timeo, &nmrep);
4120 nfsmout_if(error);
4121 nfsm_chain_get_32(error, &nmrep, val);
4122 if (!error && val) {
4123 error = val;
4124 }
4125 nfsmout_if(error);
4126 nfsm_chain_get_fh(error, &nmrep, nfsvers, fh);
4127 if (!error && (nfsvers > NFS_VER2)) {
4128 sec->count = NX_MAX_SEC_FLAVORS;
4129 error = nfsm_chain_get_secinfo(&nmrep, &sec->flavors[0], &sec->count);
4130 }
4131 nfsmout:
4132 nfsm_chain_cleanup(&nmreq);
4133 nfsm_chain_cleanup(&nmrep);
4134 return error;
4135 }
4136
4137
4138 /*
4139 * Send a MOUNT protocol UNMOUNT request to tell the server we've unmounted it.
4140 */
4141 void
nfs3_umount_rpc(struct nfsmount * nmp,vfs_context_t ctx,int timeo)4142 nfs3_umount_rpc(struct nfsmount *nmp, vfs_context_t ctx, int timeo)
4143 {
4144 int error = 0, mntproto;
4145 thread_t thd = vfs_context_thread(ctx);
4146 kauth_cred_t cred = vfs_context_ucred(ctx);
4147 char *path;
4148 uint64_t xid = 0;
4149 size_t slen;
4150 struct nfsm_chain nmreq, nmrep;
4151 mbuf_t mreq;
4152 uint32_t mntvers;
4153 in_port_t mntport;
4154 struct sockaddr_storage ss;
4155 struct sockaddr *saddr = (struct sockaddr*)&ss;
4156
4157 if (!nmp->nm_saddr) {
4158 return;
4159 }
4160
4161 nfsm_chain_null(&nmreq);
4162 nfsm_chain_null(&nmrep);
4163
4164 mntvers = (nmp->nm_vers == NFS_VER2) ? RPCMNT_VER1 : RPCMNT_VER3;
4165 mntproto = (NM_OMFLAG(nmp, MNTUDP) || (nmp->nm_sotype == SOCK_DGRAM)) ? IPPROTO_UDP : IPPROTO_TCP;
4166 mntport = nmp->nm_mountport;
4167
4168 bcopy(nmp->nm_saddr, saddr, min(sizeof(ss), nmp->nm_saddr->sa_len));
4169 if (saddr->sa_family == AF_INET) {
4170 ((struct sockaddr_in*)saddr)->sin_port = htons(mntport);
4171 } else if (saddr->sa_family == AF_INET6) {
4172 ((struct sockaddr_in6*)saddr)->sin6_port = htons(mntport);
4173 } else { /* Local domain socket */
4174 mntport = ((struct sockaddr_un *)saddr)->sun_path[0]; /* Do we have and address? */
4175 }
4176
4177 while (!mntport) {
4178 error = nfs_portmap_lookup(nmp, ctx, saddr, NULL, RPCPROG_MNT, mntvers, mntproto, timeo);
4179 nfsmout_if(error);
4180 if (saddr->sa_family == AF_INET) {
4181 mntport = ntohs(((struct sockaddr_in*)saddr)->sin_port);
4182 } else if (saddr->sa_family == AF_INET6) {
4183 mntport = ntohs(((struct sockaddr_in6*)saddr)->sin6_port);
4184 } else { /* Local domain socket */
4185 mntport = ((struct sockaddr_un *)saddr)->sun_path[0]; /* Do we have and address? */
4186 }
4187 /* if not found and mntvers > VER1, then retry with VER1 */
4188 if (!mntport) {
4189 if (mntvers > RPCMNT_VER1) {
4190 mntvers = RPCMNT_VER1;
4191 } else if (mntproto == IPPROTO_TCP) {
4192 mntproto = IPPROTO_UDP;
4193 mntvers = (nmp->nm_vers == NFS_VER2) ? RPCMNT_VER1 : RPCMNT_VER3;
4194 } else {
4195 break;
4196 }
4197 bcopy(nmp->nm_saddr, saddr, min(sizeof(ss), nmp->nm_saddr->sa_len));
4198 }
4199 }
4200 nfsmout_if(!mntport);
4201
4202 /* MOUNT protocol UNMOUNT request */
4203 path = &vfs_statfs(nmp->nm_mountp)->f_mntfromname[0];
4204 while (*path && (*path != '/')) {
4205 path++;
4206 }
4207 slen = strlen(path);
4208 nfsm_chain_build_alloc_init(error, &nmreq, NFSX_UNSIGNED + nfsm_rndup(slen));
4209 nfsm_chain_add_name(error, &nmreq, path, slen, nmp);
4210 nfsm_chain_build_done(error, &nmreq);
4211 nfsmout_if(error);
4212 error = nfsm_rpchead2(nmp, (mntproto == IPPROTO_UDP) ? SOCK_DGRAM : SOCK_STREAM,
4213 RPCPROG_MNT, RPCMNT_VER1, RPCMNT_UMOUNT,
4214 RPCAUTH_SYS, cred, NULL, nmreq.nmc_mhead, &xid, &mreq);
4215 nfsmout_if(error);
4216 nmreq.nmc_mhead = NULL;
4217 error = nfs_aux_request(nmp, thd, saddr, NULL,
4218 ((mntproto == IPPROTO_UDP) ? SOCK_DGRAM : SOCK_STREAM),
4219 mreq, R_XID32(xid), 1, timeo, &nmrep);
4220 nfsmout:
4221 nfsm_chain_cleanup(&nmreq);
4222 nfsm_chain_cleanup(&nmrep);
4223 }
4224
4225 /*
4226 * unmount system call
4227 */
4228 int
nfs_vfs_unmount(mount_t mp,int mntflags,__unused vfs_context_t ctx)4229 nfs_vfs_unmount(
4230 mount_t mp,
4231 int mntflags,
4232 __unused vfs_context_t ctx)
4233 {
4234 struct nfsmount *nmp;
4235 vnode_t vp;
4236 int error, flags = 0;
4237 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
4238
4239 nmp = VFSTONFS(mp);
4240 lck_mtx_lock(&nmp->nm_lock);
4241 /*
4242 * Set the flag indicating that an unmount attempt is in progress.
4243 */
4244 nmp->nm_state |= NFSSTA_UNMOUNTING;
4245 /*
4246 * During a force unmount we want to...
4247 * Mark that we are doing a force unmount.
4248 * Make the mountpoint soft.
4249 */
4250 if (mntflags & MNT_FORCE) {
4251 flags |= FORCECLOSE;
4252 nmp->nm_state |= NFSSTA_FORCE;
4253 NFS_BITMAP_SET(nmp->nm_flags, NFS_MFLAG_SOFT);
4254 }
4255 /*
4256 * Wait for any in-progress monitored node scan to complete.
4257 */
4258 while (nmp->nm_state & NFSSTA_MONITOR_SCAN) {
4259 msleep(&nmp->nm_state, &nmp->nm_lock, PZERO - 1, "nfswaitmonscan", &ts);
4260 }
4261 /*
4262 * Goes something like this..
4263 * - Call vflush() to clear out vnodes for this file system,
4264 * except for the swap files. Deal with them in 2nd pass.
4265 * - Decrement reference on the vnode representing remote root.
4266 * - Clean up the NFS mount structure.
4267 */
4268 vp = NFSTOV(nmp->nm_dnp);
4269 lck_mtx_unlock(&nmp->nm_lock);
4270
4271 /*
4272 * vflush will check for busy vnodes on mountpoint.
4273 * Will do the right thing for MNT_FORCE. That is, we should
4274 * not get EBUSY back.
4275 */
4276 error = vflush(mp, vp, SKIPSWAP | flags);
4277 if (mntflags & MNT_FORCE) {
4278 error = vflush(mp, NULLVP, flags); /* locks vp in the process */
4279 } else {
4280 if (vnode_isinuse(vp, 1)) {
4281 error = EBUSY;
4282 } else {
4283 error = vflush(mp, vp, flags);
4284 }
4285 }
4286 if (error) {
4287 lck_mtx_lock(&nmp->nm_lock);
4288 nmp->nm_state &= ~NFSSTA_UNMOUNTING;
4289 lck_mtx_unlock(&nmp->nm_lock);
4290 return NFS_MAPERR(error);
4291 }
4292
4293 lck_mtx_lock(&nmp->nm_lock);
4294 nmp->nm_dnp = NULL;
4295 lck_mtx_unlock(&nmp->nm_lock);
4296
4297 /*
4298 * Release the root vnode reference held by mountnfs()
4299 */
4300 error = vnode_get(vp);
4301 vnode_rele(vp);
4302 if (!error) {
4303 vnode_put(vp);
4304 }
4305
4306 vflush(mp, NULLVP, FORCECLOSE);
4307
4308 /* Wait for all other references to be released and free the mount */
4309 nfs_mount_drain_and_cleanup(nmp);
4310
4311 return 0;
4312 }
4313
4314 /*
4315 * cleanup/destroy NFS fs locations structure
4316 */
4317 void
nfs_fs_locations_cleanup(struct nfs_fs_locations * nfslsp)4318 nfs_fs_locations_cleanup(struct nfs_fs_locations *nfslsp)
4319 {
4320 struct nfs_fs_location *fsl;
4321 struct nfs_fs_server *fss;
4322 struct nfs_fs_path *fsp;
4323 uint32_t loc, serv, addr, comp;
4324
4325 /* free up fs locations */
4326 if (!nfslsp->nl_numlocs || !nfslsp->nl_locations) {
4327 return;
4328 }
4329
4330 for (loc = 0; loc < nfslsp->nl_numlocs; loc++) {
4331 fsl = nfslsp->nl_locations[loc];
4332 if (!fsl) {
4333 continue;
4334 }
4335 if ((fsl->nl_servcount > 0) && fsl->nl_servers) {
4336 for (serv = 0; serv < fsl->nl_servcount; serv++) {
4337 fss = fsl->nl_servers[serv];
4338 if (!fss) {
4339 continue;
4340 }
4341 if ((fss->ns_addrcount > 0) && fss->ns_addresses) {
4342 for (addr = 0; addr < fss->ns_addrcount; addr++) {
4343 kfree_data_addr(fss->ns_addresses[addr]);
4344 }
4345 FREE(fss->ns_addresses, M_TEMP);
4346 }
4347 kfree_data_addr(fss->ns_name);
4348 kfree_type(struct nfs_fs_server, fss);
4349 }
4350 FREE(fsl->nl_servers, M_TEMP);
4351 }
4352 fsp = &fsl->nl_path;
4353 if (fsp->np_compcount && fsp->np_components) {
4354 for (comp = 0; comp < fsp->np_compcount; comp++) {
4355 if (fsp->np_components[comp]) {
4356 kfree_data_addr(fsp->np_components[comp]);
4357 }
4358 }
4359 FREE(fsp->np_components, M_TEMP);
4360 }
4361 kfree_type(struct nfs_fs_location, fsl);
4362 }
4363 FREE(nfslsp->nl_locations, M_TEMP);
4364 nfslsp->nl_numlocs = 0;
4365 nfslsp->nl_locations = NULL;
4366 }
4367
4368 void
nfs_mount_rele(struct nfsmount * nmp)4369 nfs_mount_rele(struct nfsmount *nmp)
4370 {
4371 int wup = 0;
4372
4373 lck_mtx_lock(&nmp->nm_lock);
4374 if (nmp->nm_ref < 1) {
4375 panic("nfs zombie mount underflow");
4376 }
4377 nmp->nm_ref--;
4378 if (nmp->nm_ref == 0) {
4379 wup = nmp->nm_state & NFSSTA_MOUNT_DRAIN;
4380 }
4381 lck_mtx_unlock(&nmp->nm_lock);
4382 if (wup) {
4383 wakeup(&nmp->nm_ref);
4384 }
4385 }
4386
4387 void
nfs_mount_drain_and_cleanup(struct nfsmount * nmp)4388 nfs_mount_drain_and_cleanup(struct nfsmount *nmp)
4389 {
4390 lck_mtx_lock(&nmp->nm_lock);
4391 nmp->nm_state |= NFSSTA_MOUNT_DRAIN;
4392 while (nmp->nm_ref > 0) {
4393 msleep(&nmp->nm_ref, &nmp->nm_lock, PZERO - 1, "nfs_mount_drain", NULL);
4394 }
4395 assert(nmp->nm_ref == 0);
4396 lck_mtx_unlock(&nmp->nm_lock);
4397 nfs_mount_cleanup(nmp);
4398 }
4399
4400 /*
4401 * nfs_mount_zombie
4402 */
4403 void
nfs_mount_zombie(struct nfsmount * nmp,int nm_state_flags)4404 nfs_mount_zombie(struct nfsmount *nmp, int nm_state_flags)
4405 {
4406 struct nfsreq *req, *treq;
4407 struct nfs_reqqhead iodq, resendq;
4408 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
4409 struct nfs_open_owner *noop, *nextnoop;
4410 nfsnode_t np;
4411 int docallback;
4412
4413 lck_mtx_lock(&nmp->nm_lock);
4414 nmp->nm_state |= nm_state_flags;
4415 nmp->nm_ref++;
4416 lck_mtx_unlock(&nmp->nm_lock);
4417 #if CONFIG_NFS4
4418 /* stop callbacks */
4419 if ((nmp->nm_vers >= NFS_VER4) && !NMFLAG(nmp, NOCALLBACK) && nmp->nm_cbid) {
4420 nfs4_mount_callback_shutdown(nmp);
4421 }
4422 #endif
4423 #if CONFIG_NFS_GSS
4424 /* Destroy any RPCSEC_GSS contexts */
4425 nfs_gss_clnt_ctx_unmount(nmp);
4426 #endif
4427
4428 /* mark the socket for termination */
4429 lck_mtx_lock(&nmp->nm_lock);
4430 nmp->nm_sockflags |= NMSOCK_UNMOUNT;
4431
4432 /* Have the socket thread send the unmount RPC, if requested/appropriate. */
4433 if ((nmp->nm_vers < NFS_VER4) && (nmp->nm_state & NFSSTA_MOUNTED) &&
4434 !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) && NMFLAG(nmp, CALLUMNT)) {
4435 nfs_mount_sock_thread_wake(nmp);
4436 }
4437
4438 /* wait for the socket thread to terminate */
4439 while (nmp->nm_sockthd && current_thread() != nmp->nm_sockthd) {
4440 wakeup(&nmp->nm_sockthd);
4441 msleep(&nmp->nm_sockthd, &nmp->nm_lock, PZERO - 1, "nfswaitsockthd", &ts);
4442 }
4443 lck_mtx_unlock(&nmp->nm_lock);
4444
4445 /* tear down the socket */
4446 nfs_disconnect(nmp);
4447
4448 lck_mtx_lock(&nmp->nm_lock);
4449
4450 #if CONFIG_NFS4
4451 if ((nmp->nm_vers >= NFS_VER4) && !NMFLAG(nmp, NOCALLBACK) && nmp->nm_cbid) {
4452 /* clear out any pending delegation return requests */
4453 while ((np = TAILQ_FIRST(&nmp->nm_dreturnq))) {
4454 TAILQ_REMOVE(&nmp->nm_dreturnq, np, n_dreturn);
4455 np->n_dreturn.tqe_next = NFSNOLIST;
4456 }
4457 }
4458
4459 /* cancel any renew timer */
4460 if ((nmp->nm_vers >= NFS_VER4) && nmp->nm_renew_timer) {
4461 thread_call_cancel(nmp->nm_renew_timer);
4462 thread_call_free(nmp->nm_renew_timer);
4463 nmp->nm_renew_timer = NULL;
4464 }
4465
4466 #endif
4467 lck_mtx_unlock(&nmp->nm_lock);
4468
4469 if (nmp->nm_state & NFSSTA_MOUNTED) {
4470 switch (nmp->nm_lockmode) {
4471 case NFS_LOCK_MODE_DISABLED:
4472 case NFS_LOCK_MODE_LOCAL:
4473 break;
4474 case NFS_LOCK_MODE_ENABLED:
4475 default:
4476 if (nmp->nm_vers <= NFS_VER3) {
4477 nfs_lockd_mount_unregister(nmp);
4478 nmp->nm_lockmode = NFS_LOCK_MODE_DISABLED;
4479 }
4480 break;
4481 }
4482 }
4483
4484 #if CONFIG_NFS4
4485 if ((nmp->nm_vers >= NFS_VER4) && nmp->nm_longid) {
4486 /* remove/deallocate the client ID data */
4487 lck_mtx_lock(&nfs_global_mutex);
4488 TAILQ_REMOVE(&nfsclientids, nmp->nm_longid, nci_link);
4489 if (nmp->nm_longid->nci_id) {
4490 kfree_data_addr(nmp->nm_longid->nci_id);
4491 }
4492 kfree_type(struct nfs_client_id, nmp->nm_longid);
4493 lck_mtx_unlock(&nfs_global_mutex);
4494 }
4495 #endif
4496 /*
4497 * Be sure all requests for this mount are completed
4498 * and removed from the resend queue.
4499 */
4500 TAILQ_INIT(&resendq);
4501 lck_mtx_lock(&nfs_request_mutex);
4502 TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
4503 if (req->r_nmp == nmp) {
4504 lck_mtx_lock(&req->r_mtx);
4505 if (!req->r_error && req->r_nmrep.nmc_mhead == NULL) {
4506 req->r_error = EIO;
4507 }
4508 if (req->r_flags & R_RESENDQ) {
4509 lck_mtx_lock(&nmp->nm_lock);
4510 if ((req->r_flags & R_RESENDQ) && req->r_rchain.tqe_next != NFSREQNOLIST) {
4511 TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
4512 req->r_flags &= ~R_RESENDQ;
4513 req->r_rchain.tqe_next = NFSREQNOLIST;
4514 /*
4515 * Queue up the request so that we can unreference them
4516 * with out holding nfs_request_mutex
4517 */
4518 TAILQ_INSERT_TAIL(&resendq, req, r_rchain);
4519 }
4520 lck_mtx_unlock(&nmp->nm_lock);
4521 }
4522 wakeup(req);
4523 lck_mtx_unlock(&req->r_mtx);
4524 }
4525 }
4526 lck_mtx_unlock(&nfs_request_mutex);
4527
4528 /* Since we've drop the request mutex we can now safely unreference the request */
4529 TAILQ_FOREACH_SAFE(req, &resendq, r_rchain, treq) {
4530 TAILQ_REMOVE(&resendq, req, r_rchain);
4531 /* Make sure we don't try and remove again in nfs_request_destroy */
4532 req->r_rchain.tqe_next = NFSREQNOLIST;
4533 nfs_request_rele(req);
4534 }
4535
4536 /*
4537 * Now handle and outstanding async requests. We need to walk the
4538 * request queue again this time with the nfsiod_mutex held. No
4539 * other iods can grab our requests until we've put them on our own
4540 * local iod queue for processing.
4541 */
4542 TAILQ_INIT(&iodq);
4543 lck_mtx_lock(&nfs_request_mutex);
4544 lck_mtx_lock(&nfsiod_mutex);
4545 TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
4546 if (req->r_nmp == nmp) {
4547 lck_mtx_lock(&req->r_mtx);
4548 if (req->r_callback.rcb_func
4549 && !(req->r_flags & R_WAITSENT) && !(req->r_flags & R_IOD)) {
4550 /*
4551 * Since R_IOD is not set then we need to handle it. If
4552 * we're not on a list add it to our iod queue. Otherwise
4553 * we must already be on nm_iodq which is added to our
4554 * local queue below.
4555 * %%% We should really keep a back pointer to our iod queue
4556 * that we're on.
4557 */
4558 req->r_flags |= R_IOD;
4559 if (req->r_achain.tqe_next == NFSREQNOLIST) {
4560 TAILQ_INSERT_TAIL(&iodq, req, r_achain);
4561 }
4562 }
4563 lck_mtx_unlock(&req->r_mtx);
4564 }
4565 }
4566
4567 /* finish any async I/O RPCs queued up */
4568 if (nmp->nm_iodlink.tqe_next != NFSNOLIST) {
4569 TAILQ_REMOVE(&nfsiodmounts, nmp, nm_iodlink);
4570 }
4571 TAILQ_CONCAT(&iodq, &nmp->nm_iodq, r_achain);
4572 lck_mtx_unlock(&nfsiod_mutex);
4573 lck_mtx_unlock(&nfs_request_mutex);
4574
4575 TAILQ_FOREACH_SAFE(req, &iodq, r_achain, treq) {
4576 TAILQ_REMOVE(&iodq, req, r_achain);
4577 req->r_achain.tqe_next = NFSREQNOLIST;
4578 lck_mtx_lock(&req->r_mtx);
4579 docallback = !(req->r_flags & R_WAITSENT);
4580 lck_mtx_unlock(&req->r_mtx);
4581 if (docallback) {
4582 req->r_callback.rcb_func(req);
4583 }
4584 }
4585
4586 /* clean up common state */
4587 lck_mtx_lock(&nmp->nm_lock);
4588 while ((np = LIST_FIRST(&nmp->nm_monlist))) {
4589 LIST_REMOVE(np, n_monlink);
4590 np->n_monlink.le_next = NFSNOLIST;
4591 }
4592 TAILQ_FOREACH_SAFE(noop, &nmp->nm_open_owners, noo_link, nextnoop) {
4593 os_ref_count_t newcount;
4594
4595 TAILQ_REMOVE(&nmp->nm_open_owners, noop, noo_link);
4596 noop->noo_flags &= ~NFS_OPEN_OWNER_LINK;
4597 newcount = os_ref_release_locked(&noop->noo_refcnt);
4598
4599 if (newcount) {
4600 continue;
4601 }
4602 nfs_open_owner_destroy(noop);
4603 }
4604 lck_mtx_unlock(&nmp->nm_lock);
4605
4606 #if CONFIG_NFS4
4607 /* clean up NFSv4 state */
4608 if (nmp->nm_vers >= NFS_VER4) {
4609 lck_mtx_lock(&nmp->nm_lock);
4610 while ((np = TAILQ_FIRST(&nmp->nm_delegations))) {
4611 TAILQ_REMOVE(&nmp->nm_delegations, np, n_dlink);
4612 np->n_dlink.tqe_next = NFSNOLIST;
4613 }
4614 lck_mtx_unlock(&nmp->nm_lock);
4615 }
4616 #endif
4617 nfs_mount_rele(nmp);
4618 }
4619
4620 /*
4621 * cleanup/destroy an nfsmount
4622 */
4623 void
nfs_mount_cleanup(struct nfsmount * nmp)4624 nfs_mount_cleanup(struct nfsmount *nmp)
4625 {
4626 if (!nmp) {
4627 return;
4628 }
4629
4630 nfs_mount_zombie(nmp, 0);
4631
4632 NFS_VFS_DBG("Unmounting %s from %s\n",
4633 vfs_statfs(nmp->nm_mountp)->f_mntfromname,
4634 vfs_statfs(nmp->nm_mountp)->f_mntonname);
4635 NFS_VFS_DBG("nfs state = 0x%8.8x\n", nmp->nm_state);
4636 NFS_VFS_DBG("nfs socket flags = 0x%8.8x\n", nmp->nm_sockflags);
4637 NFS_VFS_DBG("nfs mount ref count is %d\n", nmp->nm_ref);
4638
4639 if (nmp->nm_mountp) {
4640 vfs_setfsprivate(nmp->nm_mountp, NULL);
4641 }
4642
4643 lck_mtx_lock(&nmp->nm_lock);
4644 if (nmp->nm_ref) {
4645 panic("Some one has grabbed a ref %d state flags = 0x%8.8x", nmp->nm_ref, nmp->nm_state);
4646 }
4647
4648 free_sockaddr(nmp->nm_saddr);
4649
4650 if ((nmp->nm_vers < NFS_VER4) && nmp->nm_rqsaddr) {
4651 kfree_type(struct sockaddr_storage, nmp->nm_rqsaddr);
4652 }
4653
4654 if (IS_VALID_CRED(nmp->nm_mcred)) {
4655 kauth_cred_unref(&nmp->nm_mcred);
4656 }
4657
4658 nfs_fs_locations_cleanup(&nmp->nm_locations);
4659
4660 if (nmp->nm_realm) {
4661 kfree_data_addr(nmp->nm_realm);
4662 }
4663 if (nmp->nm_principal) {
4664 kfree_data_addr(nmp->nm_principal);
4665 }
4666 if (nmp->nm_sprinc) {
4667 kfree_data_addr(nmp->nm_sprinc);
4668 }
4669
4670 if (nmp->nm_args) {
4671 xb_free(nmp->nm_args);
4672 }
4673
4674 lck_mtx_unlock(&nmp->nm_lock);
4675
4676 lck_mtx_destroy(&nmp->nm_lock, &nfs_mount_grp);
4677 if (nmp->nm_fh) {
4678 NFS_ZFREE(nfs_fhandle_zone, nmp->nm_fh);
4679 }
4680
4681
4682 NFS_ZFREE(nfsmnt_zone, nmp);
4683 }
4684
4685 /*
4686 * Return root of a filesystem
4687 */
4688 int
nfs_vfs_root(mount_t mp,vnode_t * vpp,__unused vfs_context_t ctx)4689 nfs_vfs_root(mount_t mp, vnode_t *vpp, __unused vfs_context_t ctx)
4690 {
4691 vnode_t vp;
4692 struct nfsmount *nmp;
4693 int error;
4694 u_int32_t vpid;
4695
4696 nmp = VFSTONFS(mp);
4697 if (!nmp || !nmp->nm_dnp) {
4698 return ENXIO;
4699 }
4700 vp = NFSTOV(nmp->nm_dnp);
4701 vpid = vnode_vid(vp);
4702 while ((error = vnode_getwithvid(vp, vpid))) {
4703 /* vnode_get() may return ENOENT if the dir changes. */
4704 /* If that happens, just try it again, else return the error. */
4705 if ((error != ENOENT) || (vnode_vid(vp) == vpid)) {
4706 return NFS_MAPERR(error);
4707 }
4708 vpid = vnode_vid(vp);
4709 }
4710 *vpp = vp;
4711 return 0;
4712 }
4713
4714 /*
4715 * Do operations associated with quotas
4716 */
4717 #if !QUOTA
4718 int
nfs_vfs_quotactl(__unused mount_t mp,__unused int cmds,__unused uid_t uid,__unused caddr_t datap,__unused vfs_context_t context)4719 nfs_vfs_quotactl(
4720 __unused mount_t mp,
4721 __unused int cmds,
4722 __unused uid_t uid,
4723 __unused caddr_t datap,
4724 __unused vfs_context_t context)
4725 {
4726 return ENOTSUP;
4727 }
4728 #else
4729
4730 static in_port_t
nfs_sa_getport(struct sockaddr * sa,int * error)4731 nfs_sa_getport(struct sockaddr *sa, int *error)
4732 {
4733 in_port_t port = 0;
4734
4735 if (sa->sa_family == AF_INET6) {
4736 port = ntohs(((struct sockaddr_in6*)sa)->sin6_port);
4737 } else if (sa->sa_family == AF_INET) {
4738 port = ntohs(((struct sockaddr_in*)sa)->sin_port);
4739 } else if (error) {
4740 *error = EIO;
4741 }
4742
4743 return port;
4744 }
4745
4746 static void
nfs_sa_setport(struct sockaddr * sa,in_port_t port)4747 nfs_sa_setport(struct sockaddr *sa, in_port_t port)
4748 {
4749 if (sa->sa_family == AF_INET6) {
4750 ((struct sockaddr_in6*)sa)->sin6_port = htons(port);
4751 } else if (sa->sa_family == AF_INET) {
4752 ((struct sockaddr_in*)sa)->sin_port = htons(port);
4753 }
4754 }
4755
4756 int
nfs3_getquota(struct nfsmount * nmp,vfs_context_t ctx,uid_t id,int type,struct dqblk * dqb)4757 nfs3_getquota(struct nfsmount *nmp, vfs_context_t ctx, uid_t id, int type, struct dqblk *dqb)
4758 {
4759 int error = 0, timeo;
4760 int rqproto, rqvers = (type == GRPQUOTA) ? RPCRQUOTA_EXT_VER : RPCRQUOTA_VER;
4761 in_port_t rqport = 0;
4762 thread_t thd = vfs_context_thread(ctx);
4763 kauth_cred_t cred = vfs_context_ucred(ctx);
4764 char *path;
4765 uint64_t slen, xid = 0;
4766 struct nfsm_chain nmreq, nmrep;
4767 mbuf_t mreq;
4768 uint32_t val = 0, bsize = 0;
4769 struct sockaddr *rqsaddr;
4770 struct timeval now;
4771 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
4772
4773 if (!nmp->nm_saddr) {
4774 return ENXIO;
4775 }
4776
4777 if (NMFLAG(nmp, NOQUOTA) || nmp->nm_saddr->sa_family == AF_LOCAL /* XXX for now */) {
4778 return ENOTSUP;
4779 }
4780
4781 /*
4782 * Allocate an address for rquotad if needed
4783 */
4784 if (!nmp->nm_rqsaddr) {
4785 int need_free = 0;
4786
4787 rqsaddr = (struct sockaddr *)kalloc_type(struct sockaddr_storage, Z_WAITOK | Z_ZERO);
4788 bcopy(nmp->nm_saddr, rqsaddr, min(sizeof(struct sockaddr_storage), nmp->nm_saddr->sa_len));
4789 /* Set the port to zero, will call rpcbind to get the port below */
4790 nfs_sa_setport(rqsaddr, 0);
4791 microuptime(&now);
4792
4793 lck_mtx_lock(&nmp->nm_lock);
4794 if (!nmp->nm_rqsaddr) {
4795 nmp->nm_rqsaddr = rqsaddr;
4796 nmp->nm_rqsaddrstamp = now.tv_sec;
4797 } else {
4798 need_free = 1;
4799 }
4800 lck_mtx_unlock(&nmp->nm_lock);
4801 if (need_free) {
4802 kfree_type(struct sockaddr_storage, rqsaddr);
4803 }
4804 }
4805
4806 timeo = NMFLAG(nmp, SOFT) ? 10 : 60;
4807 rqproto = IPPROTO_UDP; /* XXX should prefer TCP if mount is TCP */
4808
4809 /* check if we have a recently cached rquota port */
4810 microuptime(&now);
4811 lck_mtx_lock(&nmp->nm_lock);
4812 rqsaddr = nmp->nm_rqsaddr;
4813 rqport = nfs_sa_getport(rqsaddr, &error);
4814 while (!error && (!rqport || ((nmp->nm_rqsaddrstamp + 60) <= (uint32_t)now.tv_sec))) {
4815 error = nfs_sigintr(nmp, NULL, thd, 1);
4816 if (error) {
4817 lck_mtx_unlock(&nmp->nm_lock);
4818 return error;
4819 }
4820 if (nmp->nm_state & NFSSTA_RQUOTAINPROG) {
4821 nmp->nm_state |= NFSSTA_WANTRQUOTA;
4822 msleep(&nmp->nm_rqsaddr, &nmp->nm_lock, PZERO - 1, "nfswaitrquotaaddr", &ts);
4823 rqport = nfs_sa_getport(rqsaddr, &error);
4824 continue;
4825 }
4826 nmp->nm_state |= NFSSTA_RQUOTAINPROG;
4827 lck_mtx_unlock(&nmp->nm_lock);
4828
4829 /* send portmap request to get rquota port */
4830 error = nfs_portmap_lookup(nmp, ctx, rqsaddr, NULL, RPCPROG_RQUOTA, rqvers, rqproto, timeo);
4831 if (error) {
4832 goto out;
4833 }
4834 rqport = nfs_sa_getport(rqsaddr, &error);
4835 if (error) {
4836 goto out;
4837 }
4838
4839 if (!rqport) {
4840 /*
4841 * We overload PMAPPORT for the port if rquotad is not
4842 * currently registered or up at the server. In the
4843 * while loop above, port will be set and we will defer
4844 * for a bit. Perhaps the service isn't online yet.
4845 *
4846 * Note that precludes using indirect, but we're not doing
4847 * that here.
4848 */
4849 rqport = PMAPPORT;
4850 nfs_sa_setport(rqsaddr, rqport);
4851 }
4852 microuptime(&now);
4853 nmp->nm_rqsaddrstamp = now.tv_sec;
4854 out:
4855 lck_mtx_lock(&nmp->nm_lock);
4856 nmp->nm_state &= ~NFSSTA_RQUOTAINPROG;
4857 if (nmp->nm_state & NFSSTA_WANTRQUOTA) {
4858 nmp->nm_state &= ~NFSSTA_WANTRQUOTA;
4859 wakeup(&nmp->nm_rqsaddr);
4860 }
4861 }
4862 lck_mtx_unlock(&nmp->nm_lock);
4863 if (error) {
4864 return error;
4865 }
4866
4867 /* Using PMAPPORT for unavailabe rquota service */
4868 if (rqport == PMAPPORT) {
4869 return ENOTSUP;
4870 }
4871
4872 /* rquota request */
4873 nfsm_chain_null(&nmreq);
4874 nfsm_chain_null(&nmrep);
4875 path = &vfs_statfs(nmp->nm_mountp)->f_mntfromname[0];
4876 while (*path && (*path != '/')) {
4877 path++;
4878 }
4879 slen = strlen(path);
4880 nfsm_chain_build_alloc_init(error, &nmreq, 3 * NFSX_UNSIGNED + nfsm_rndup(slen));
4881 nfsm_chain_add_name(error, &nmreq, path, slen, nmp);
4882 if (type == GRPQUOTA) {
4883 nfsm_chain_add_32(error, &nmreq, type);
4884 }
4885 nfsm_chain_add_32(error, &nmreq, id);
4886 nfsm_chain_build_done(error, &nmreq);
4887 nfsmout_if(error);
4888 error = nfsm_rpchead2(nmp, (rqproto == IPPROTO_UDP) ? SOCK_DGRAM : SOCK_STREAM,
4889 RPCPROG_RQUOTA, rqvers, RPCRQUOTA_GET,
4890 RPCAUTH_SYS, cred, NULL, nmreq.nmc_mhead, &xid, &mreq);
4891 nfsmout_if(error);
4892 nmreq.nmc_mhead = NULL;
4893 error = nfs_aux_request(nmp, thd, rqsaddr, NULL,
4894 (rqproto == IPPROTO_UDP) ? SOCK_DGRAM : SOCK_STREAM,
4895 mreq, R_XID32(xid), 0, timeo, &nmrep);
4896 nfsmout_if(error);
4897
4898 /* parse rquota response */
4899 nfsm_chain_get_32(error, &nmrep, val);
4900 if (!error && (val != RQUOTA_STAT_OK)) {
4901 if (val == RQUOTA_STAT_NOQUOTA) {
4902 error = ENOENT;
4903 } else if (val == RQUOTA_STAT_EPERM) {
4904 error = EPERM;
4905 } else {
4906 error = EIO;
4907 }
4908 }
4909 nfsm_chain_get_32(error, &nmrep, bsize);
4910 nfsm_chain_adv(error, &nmrep, NFSX_UNSIGNED);
4911 nfsm_chain_get_32(error, &nmrep, val);
4912 nfsmout_if(error);
4913 dqb->dqb_bhardlimit = (uint64_t)val * bsize;
4914 nfsm_chain_get_32(error, &nmrep, val);
4915 nfsmout_if(error);
4916 dqb->dqb_bsoftlimit = (uint64_t)val * bsize;
4917 nfsm_chain_get_32(error, &nmrep, val);
4918 nfsmout_if(error);
4919 dqb->dqb_curbytes = (uint64_t)val * bsize;
4920 nfsm_chain_get_32(error, &nmrep, dqb->dqb_ihardlimit);
4921 nfsm_chain_get_32(error, &nmrep, dqb->dqb_isoftlimit);
4922 nfsm_chain_get_32(error, &nmrep, dqb->dqb_curinodes);
4923 nfsm_chain_get_32(error, &nmrep, dqb->dqb_btime);
4924 nfsm_chain_get_32(error, &nmrep, dqb->dqb_itime);
4925 nfsmout_if(error);
4926 dqb->dqb_id = id;
4927 nfsmout:
4928 nfsm_chain_cleanup(&nmreq);
4929 nfsm_chain_cleanup(&nmrep);
4930 return error;
4931 }
4932 #if CONFIG_NFS4
4933 int
nfs4_getquota(struct nfsmount * nmp,vfs_context_t ctx,uid_t id,int type,struct dqblk * dqb)4934 nfs4_getquota(struct nfsmount *nmp, vfs_context_t ctx, uid_t id, int type, struct dqblk *dqb)
4935 {
4936 nfsnode_t np;
4937 int error = 0, status, nfsvers, numops;
4938 u_int64_t xid;
4939 struct nfsm_chain nmreq, nmrep;
4940 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
4941 thread_t thd = vfs_context_thread(ctx);
4942 kauth_cred_t cred = vfs_context_ucred(ctx);
4943 struct nfsreq_secinfo_args si;
4944
4945 if (type != USRQUOTA) { /* NFSv4 only supports user quotas */
4946 return ENOTSUP;
4947 }
4948
4949 /* first check that the server supports any of the quota attributes */
4950 if (!NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_supp_attr, NFS_FATTR_QUOTA_AVAIL_HARD) &&
4951 !NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_supp_attr, NFS_FATTR_QUOTA_AVAIL_SOFT) &&
4952 !NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_supp_attr, NFS_FATTR_QUOTA_USED)) {
4953 return ENOTSUP;
4954 }
4955
4956 /*
4957 * The credential passed to the server needs to have
4958 * an effective uid that matches the given uid.
4959 */
4960 if (id != kauth_cred_getuid(cred)) {
4961 struct posix_cred temp_pcred;
4962 posix_cred_t pcred = posix_cred_get(cred);
4963 bzero(&temp_pcred, sizeof(temp_pcred));
4964 temp_pcred.cr_uid = id;
4965 temp_pcred.cr_ngroups = pcred->cr_ngroups;
4966 bcopy(pcred->cr_groups, temp_pcred.cr_groups, sizeof(temp_pcred.cr_groups));
4967 cred = posix_cred_create(&temp_pcred);
4968 if (!IS_VALID_CRED(cred)) {
4969 return ENOMEM;
4970 }
4971 } else {
4972 kauth_cred_ref(cred);
4973 }
4974
4975 nfsvers = nmp->nm_vers;
4976 np = nmp->nm_dnp;
4977 if (!np) {
4978 error = ENXIO;
4979 }
4980 if (error || ((error = vnode_get(NFSTOV(np))))) {
4981 kauth_cred_unref(&cred);
4982 return error;
4983 }
4984
4985 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
4986 nfsm_chain_null(&nmreq);
4987 nfsm_chain_null(&nmrep);
4988
4989 // PUTFH + GETATTR
4990 numops = 2;
4991 nfsm_chain_build_alloc_init(error, &nmreq, 15 * NFSX_UNSIGNED);
4992 nfsm_chain_add_compound_header(error, &nmreq, "quota", nmp->nm_minor_vers, numops);
4993 numops--;
4994 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
4995 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
4996 numops--;
4997 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
4998 NFS_CLEAR_ATTRIBUTES(bitmap);
4999 NFS_BITMAP_SET(bitmap, NFS_FATTR_QUOTA_AVAIL_HARD);
5000 NFS_BITMAP_SET(bitmap, NFS_FATTR_QUOTA_AVAIL_SOFT);
5001 NFS_BITMAP_SET(bitmap, NFS_FATTR_QUOTA_USED);
5002 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, NULL);
5003 nfsm_chain_build_done(error, &nmreq);
5004 nfsm_assert(error, (numops == 0), EPROTO);
5005 nfsmout_if(error);
5006 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, &nmrep, &xid, &status);
5007 nfsm_chain_skip_tag(error, &nmrep);
5008 nfsm_chain_get_32(error, &nmrep, numops);
5009 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5010 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5011 nfsm_assert(error, NFSTONMP(np), ENXIO);
5012 nfsmout_if(error);
5013 error = nfs4_parsefattr(&nmrep, NULL, NULL, NULL, dqb, NULL);
5014 nfsmout_if(error);
5015 nfsm_assert(error, NFSTONMP(np), ENXIO);
5016 nfsmout:
5017 nfsm_chain_cleanup(&nmreq);
5018 nfsm_chain_cleanup(&nmrep);
5019 vnode_put(NFSTOV(np));
5020 kauth_cred_unref(&cred);
5021 return error;
5022 }
5023 #endif /* CONFIG_NFS4 */
5024 int
nfs_vfs_quotactl(mount_t mp,int cmds,uid_t uid,caddr_t datap,vfs_context_t ctx)5025 nfs_vfs_quotactl(mount_t mp, int cmds, uid_t uid, caddr_t datap, vfs_context_t ctx)
5026 {
5027 struct nfsmount *nmp;
5028 int cmd, type, error, nfsvers;
5029 uid_t euid = kauth_cred_getuid(vfs_context_ucred(ctx));
5030 struct dqblk *dqb = (struct dqblk*)datap;
5031
5032 nmp = VFSTONFS(mp);
5033 if (nfs_mount_gone(nmp)) {
5034 return ENXIO;
5035 }
5036 nfsvers = nmp->nm_vers;
5037
5038 if (uid == ~0U) {
5039 uid = euid;
5040 }
5041
5042 /* we can only support Q_GETQUOTA */
5043 cmd = cmds >> SUBCMDSHIFT;
5044 switch (cmd) {
5045 case Q_GETQUOTA:
5046 break;
5047 case Q_QUOTAON:
5048 case Q_QUOTAOFF:
5049 case Q_SETQUOTA:
5050 case Q_SETUSE:
5051 case Q_SYNC:
5052 case Q_QUOTASTAT:
5053 return ENOTSUP;
5054 default:
5055 return EINVAL;
5056 }
5057
5058 type = cmds & SUBCMDMASK;
5059 if ((u_int)type >= MAXQUOTAS) {
5060 return EINVAL;
5061 }
5062 if ((uid != euid) && ((error = vfs_context_suser(ctx)))) {
5063 return NFS_MAPERR(error);
5064 }
5065
5066 if (vfs_busy(mp, LK_NOWAIT)) {
5067 return 0;
5068 }
5069 bzero(dqb, sizeof(*dqb));
5070 error = nmp->nm_funcs->nf_getquota(nmp, ctx, uid, type, dqb);
5071 vfs_unbusy(mp);
5072 return NFS_MAPERR(error);
5073 }
5074 #endif
5075
5076 /*
5077 * Flush out the buffer cache
5078 */
5079 int nfs_sync_callout(vnode_t, void *);
5080
5081 struct nfs_sync_cargs {
5082 vfs_context_t ctx;
5083 int waitfor;
5084 int error;
5085 };
5086
5087 int
nfs_sync_callout(vnode_t vp,void * arg)5088 nfs_sync_callout(vnode_t vp, void *arg)
5089 {
5090 struct nfs_sync_cargs *cargs = (struct nfs_sync_cargs*)arg;
5091 nfsnode_t np = VTONFS(vp);
5092 int error;
5093
5094 if (np->n_flag & NREVOKE) {
5095 vn_revoke(vp, REVOKEALL, cargs->ctx);
5096 return VNODE_RETURNED;
5097 }
5098
5099 if (LIST_EMPTY(&np->n_dirtyblkhd)) {
5100 return VNODE_RETURNED;
5101 }
5102 if (np->n_wrbusy > 0) {
5103 return VNODE_RETURNED;
5104 }
5105 if (np->n_bflag & (NBFLUSHINPROG | NBINVALINPROG)) {
5106 return VNODE_RETURNED;
5107 }
5108
5109 error = nfs_flush(np, cargs->waitfor, vfs_context_thread(cargs->ctx), 0);
5110 if (error) {
5111 cargs->error = error;
5112 }
5113
5114 return VNODE_RETURNED;
5115 }
5116
5117 int
nfs_vfs_sync(mount_t mp,int waitfor,vfs_context_t ctx)5118 nfs_vfs_sync(mount_t mp, int waitfor, vfs_context_t ctx)
5119 {
5120 struct nfs_sync_cargs cargs;
5121
5122 cargs.waitfor = waitfor;
5123 cargs.ctx = ctx;
5124 cargs.error = 0;
5125
5126 vnode_iterate(mp, 0, nfs_sync_callout, &cargs);
5127
5128 return cargs.error;
5129 }
5130
5131 /*
5132 * NFS flat namespace lookup.
5133 * Currently unsupported.
5134 */
5135 /*ARGSUSED*/
5136 int
nfs_vfs_vget(__unused mount_t mp,__unused ino64_t ino,__unused vnode_t * vpp,__unused vfs_context_t ctx)5137 nfs_vfs_vget(
5138 __unused mount_t mp,
5139 __unused ino64_t ino,
5140 __unused vnode_t *vpp,
5141 __unused vfs_context_t ctx)
5142 {
5143 return ENOTSUP;
5144 }
5145
5146 /*
5147 * At this point, this should never happen
5148 */
5149 /*ARGSUSED*/
5150 int
nfs_vfs_fhtovp(__unused mount_t mp,__unused int fhlen,__unused unsigned char * fhp,__unused vnode_t * vpp,__unused vfs_context_t ctx)5151 nfs_vfs_fhtovp(
5152 __unused mount_t mp,
5153 __unused int fhlen,
5154 __unused unsigned char *fhp,
5155 __unused vnode_t *vpp,
5156 __unused vfs_context_t ctx)
5157 {
5158 return ENOTSUP;
5159 }
5160
5161 /*
5162 * Vnode pointer to File handle, should never happen either
5163 */
5164 /*ARGSUSED*/
5165 int
nfs_vfs_vptofh(__unused vnode_t vp,__unused int * fhlenp,__unused unsigned char * fhp,__unused vfs_context_t ctx)5166 nfs_vfs_vptofh(
5167 __unused vnode_t vp,
5168 __unused int *fhlenp,
5169 __unused unsigned char *fhp,
5170 __unused vfs_context_t ctx)
5171 {
5172 return ENOTSUP;
5173 }
5174
5175 /*
5176 * Vfs start routine, a no-op.
5177 */
5178 /*ARGSUSED*/
5179 int
nfs_vfs_start(__unused mount_t mp,__unused int flags,__unused vfs_context_t ctx)5180 nfs_vfs_start(
5181 __unused mount_t mp,
5182 __unused int flags,
5183 __unused vfs_context_t ctx)
5184 {
5185 return 0;
5186 }
5187
5188 /*
5189 * Build the mount info buffer for NFS_MOUNTINFO.
5190 */
5191 int
nfs_mountinfo_assemble(struct nfsmount * nmp,struct xdrbuf * xb)5192 nfs_mountinfo_assemble(struct nfsmount *nmp, struct xdrbuf *xb)
5193 {
5194 struct xdrbuf xbinfo, xborig;
5195 char sotype[16];
5196 uint32_t origargsvers, origargslength;
5197 size_t infolength_offset, curargsopaquelength_offset, curargslength_offset, attrslength_offset, curargs_end_offset, end_offset;
5198 uint32_t miattrs[NFS_MIATTR_BITMAP_LEN];
5199 uint32_t miflags_mask[NFS_MIFLAG_BITMAP_LEN];
5200 uint32_t miflags[NFS_MIFLAG_BITMAP_LEN];
5201 uint32_t mattrs[NFS_MATTR_BITMAP_LEN];
5202 uint32_t mflags_mask[NFS_MFLAG_BITMAP_LEN];
5203 uint32_t mflags[NFS_MFLAG_BITMAP_LEN];
5204 uint32_t loc, serv, addr, comp;
5205 int i, timeo, error = 0;
5206
5207 /* set up mount info attr and flag bitmaps */
5208 NFS_BITMAP_ZERO(miattrs, NFS_MIATTR_BITMAP_LEN);
5209 NFS_BITMAP_SET(miattrs, NFS_MIATTR_FLAGS);
5210 NFS_BITMAP_SET(miattrs, NFS_MIATTR_ORIG_ARGS);
5211 NFS_BITMAP_SET(miattrs, NFS_MIATTR_CUR_ARGS);
5212 NFS_BITMAP_SET(miattrs, NFS_MIATTR_CUR_LOC_INDEX);
5213 NFS_BITMAP_ZERO(miflags_mask, NFS_MIFLAG_BITMAP_LEN);
5214 NFS_BITMAP_ZERO(miflags, NFS_MIFLAG_BITMAP_LEN);
5215 NFS_BITMAP_SET(miflags_mask, NFS_MIFLAG_DEAD);
5216 NFS_BITMAP_SET(miflags_mask, NFS_MIFLAG_NOTRESP);
5217 NFS_BITMAP_SET(miflags_mask, NFS_MIFLAG_RECOVERY);
5218 if (nmp->nm_state & NFSSTA_DEAD) {
5219 NFS_BITMAP_SET(miflags, NFS_MIFLAG_DEAD);
5220 }
5221 if ((nmp->nm_state & (NFSSTA_TIMEO | NFSSTA_JUKEBOXTIMEO)) ||
5222 ((nmp->nm_state & NFSSTA_LOCKTIMEO) && (nmp->nm_lockmode == NFS_LOCK_MODE_ENABLED))) {
5223 NFS_BITMAP_SET(miflags, NFS_MIFLAG_NOTRESP);
5224 }
5225 if (nmp->nm_state & NFSSTA_RECOVER) {
5226 NFS_BITMAP_SET(miflags, NFS_MIFLAG_RECOVERY);
5227 }
5228
5229 /* get original mount args length */
5230 xb_init_buffer(&xborig, nmp->nm_args, 2 * XDRWORD);
5231 xb_get_32(error, &xborig, origargsvers); /* version */
5232 xb_get_32(error, &xborig, origargslength); /* args length */
5233 nfsmerr_if(error);
5234
5235 /* set up current mount attributes bitmap */
5236 NFS_BITMAP_ZERO(mattrs, NFS_MATTR_BITMAP_LEN);
5237 NFS_BITMAP_SET(mattrs, NFS_MATTR_FLAGS);
5238 NFS_BITMAP_SET(mattrs, NFS_MATTR_NFS_VERSION);
5239 #if CONFIG_NFS4
5240 if (nmp->nm_vers >= NFS_VER4) {
5241 NFS_BITMAP_SET(mattrs, NFS_MATTR_NFS_MINOR_VERSION);
5242 }
5243 #endif
5244 NFS_BITMAP_SET(mattrs, NFS_MATTR_READ_SIZE);
5245 NFS_BITMAP_SET(mattrs, NFS_MATTR_WRITE_SIZE);
5246 NFS_BITMAP_SET(mattrs, NFS_MATTR_READDIR_SIZE);
5247 NFS_BITMAP_SET(mattrs, NFS_MATTR_READAHEAD);
5248 NFS_BITMAP_SET(mattrs, NFS_MATTR_ATTRCACHE_REG_MIN);
5249 NFS_BITMAP_SET(mattrs, NFS_MATTR_ATTRCACHE_REG_MAX);
5250 NFS_BITMAP_SET(mattrs, NFS_MATTR_ATTRCACHE_DIR_MIN);
5251 NFS_BITMAP_SET(mattrs, NFS_MATTR_ATTRCACHE_DIR_MAX);
5252 NFS_BITMAP_SET(mattrs, NFS_MATTR_LOCK_MODE);
5253 NFS_BITMAP_SET(mattrs, NFS_MATTR_SECURITY);
5254 if (nmp->nm_etype.selected < nmp->nm_etype.count) {
5255 NFS_BITMAP_SET(mattrs, NFS_MATTR_KERB_ETYPE);
5256 }
5257 NFS_BITMAP_SET(mattrs, NFS_MATTR_MAX_GROUP_LIST);
5258 NFS_BITMAP_SET(mattrs, NFS_MATTR_SOCKET_TYPE);
5259 if (nmp->nm_saddr->sa_family != AF_LOCAL) {
5260 NFS_BITMAP_SET(mattrs, NFS_MATTR_NFS_PORT);
5261 }
5262 if ((nmp->nm_vers < NFS_VER4) && nmp->nm_mountport && !nmp->nm_mount_localport) {
5263 NFS_BITMAP_SET(mattrs, NFS_MATTR_MOUNT_PORT);
5264 }
5265 NFS_BITMAP_SET(mattrs, NFS_MATTR_REQUEST_TIMEOUT);
5266 if (NMFLAG(nmp, SOFT)) {
5267 NFS_BITMAP_SET(mattrs, NFS_MATTR_SOFT_RETRY_COUNT);
5268 }
5269 if (nmp->nm_deadtimeout) {
5270 NFS_BITMAP_SET(mattrs, NFS_MATTR_DEAD_TIMEOUT);
5271 }
5272 if (nmp->nm_fh) {
5273 NFS_BITMAP_SET(mattrs, NFS_MATTR_FH);
5274 }
5275 NFS_BITMAP_SET(mattrs, NFS_MATTR_FS_LOCATIONS);
5276 NFS_BITMAP_SET(mattrs, NFS_MATTR_MNTFLAGS);
5277 if (origargsvers < NFS_ARGSVERSION_XDR) {
5278 NFS_BITMAP_SET(mattrs, NFS_MATTR_MNTFROM);
5279 }
5280 if (nmp->nm_realm) {
5281 NFS_BITMAP_SET(mattrs, NFS_MATTR_REALM);
5282 }
5283 if (nmp->nm_principal) {
5284 NFS_BITMAP_SET(mattrs, NFS_MATTR_PRINCIPAL);
5285 }
5286 if (nmp->nm_sprinc) {
5287 NFS_BITMAP_SET(mattrs, NFS_MATTR_SVCPRINCIPAL);
5288 }
5289 if (nmp->nm_nfs_localport) {
5290 NFS_BITMAP_SET(mattrs, NFS_MATTR_LOCAL_NFS_PORT);
5291 }
5292 if ((nmp->nm_vers < NFS_VER4) && nmp->nm_mount_localport) {
5293 NFS_BITMAP_SET(mattrs, NFS_MATTR_LOCAL_MOUNT_PORT);
5294 }
5295
5296 /* set up current mount flags bitmap */
5297 /* first set the flags that we will be setting - either on OR off */
5298 NFS_BITMAP_ZERO(mflags_mask, NFS_MFLAG_BITMAP_LEN);
5299 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_SOFT);
5300 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_INTR);
5301 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_RESVPORT);
5302 if (nmp->nm_sotype == SOCK_DGRAM) {
5303 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NOCONNECT);
5304 }
5305 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_DUMBTIMER);
5306 if (nmp->nm_vers < NFS_VER4) {
5307 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_CALLUMNT);
5308 }
5309 if (nmp->nm_vers >= NFS_VER3) {
5310 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_RDIRPLUS);
5311 }
5312 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NONEGNAMECACHE);
5313 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_MUTEJUKEBOX);
5314 #if CONFIG_NFS4
5315 if (nmp->nm_vers >= NFS_VER4) {
5316 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_EPHEMERAL);
5317 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NOCALLBACK);
5318 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NAMEDATTR);
5319 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NOACL);
5320 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_ACLONLY);
5321 }
5322 #endif
5323 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NFC);
5324 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NOQUOTA);
5325 if (nmp->nm_vers < NFS_VER4) {
5326 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_MNTUDP);
5327 }
5328 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_MNTQUICK);
5329 /* now set the flags that should be set */
5330 NFS_BITMAP_ZERO(mflags, NFS_MFLAG_BITMAP_LEN);
5331 if (NMFLAG(nmp, SOFT)) {
5332 NFS_BITMAP_SET(mflags, NFS_MFLAG_SOFT);
5333 }
5334 if (NMFLAG(nmp, INTR)) {
5335 NFS_BITMAP_SET(mflags, NFS_MFLAG_INTR);
5336 }
5337 if (NMFLAG(nmp, RESVPORT)) {
5338 NFS_BITMAP_SET(mflags, NFS_MFLAG_RESVPORT);
5339 }
5340 if ((nmp->nm_sotype == SOCK_DGRAM) && NMFLAG(nmp, NOCONNECT)) {
5341 NFS_BITMAP_SET(mflags, NFS_MFLAG_NOCONNECT);
5342 }
5343 if (NMFLAG(nmp, DUMBTIMER)) {
5344 NFS_BITMAP_SET(mflags, NFS_MFLAG_DUMBTIMER);
5345 }
5346 if ((nmp->nm_vers < NFS_VER4) && NMFLAG(nmp, CALLUMNT)) {
5347 NFS_BITMAP_SET(mflags, NFS_MFLAG_CALLUMNT);
5348 }
5349 if ((nmp->nm_vers >= NFS_VER3) && NMFLAG(nmp, RDIRPLUS)) {
5350 NFS_BITMAP_SET(mflags, NFS_MFLAG_RDIRPLUS);
5351 }
5352 if (NMFLAG(nmp, NONEGNAMECACHE)) {
5353 NFS_BITMAP_SET(mflags, NFS_MFLAG_NONEGNAMECACHE);
5354 }
5355 if (NMFLAG(nmp, MUTEJUKEBOX)) {
5356 NFS_BITMAP_SET(mflags, NFS_MFLAG_MUTEJUKEBOX);
5357 }
5358 #if CONFIG_NFS4
5359 if (nmp->nm_vers >= NFS_VER4) {
5360 if (NMFLAG(nmp, EPHEMERAL)) {
5361 NFS_BITMAP_SET(mflags, NFS_MFLAG_EPHEMERAL);
5362 }
5363 if (NMFLAG(nmp, NOCALLBACK)) {
5364 NFS_BITMAP_SET(mflags, NFS_MFLAG_NOCALLBACK);
5365 }
5366 if (NMFLAG(nmp, NAMEDATTR)) {
5367 NFS_BITMAP_SET(mflags, NFS_MFLAG_NAMEDATTR);
5368 }
5369 if (NMFLAG(nmp, NOACL)) {
5370 NFS_BITMAP_SET(mflags, NFS_MFLAG_NOACL);
5371 }
5372 if (NMFLAG(nmp, ACLONLY)) {
5373 NFS_BITMAP_SET(mflags, NFS_MFLAG_ACLONLY);
5374 }
5375 }
5376 #endif
5377 if (NMFLAG(nmp, NFC)) {
5378 NFS_BITMAP_SET(mflags, NFS_MFLAG_NFC);
5379 }
5380 if (NMFLAG(nmp, NOQUOTA) || ((nmp->nm_vers >= NFS_VER4) &&
5381 !NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_supp_attr, NFS_FATTR_QUOTA_AVAIL_HARD) &&
5382 !NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_supp_attr, NFS_FATTR_QUOTA_AVAIL_SOFT) &&
5383 !NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_supp_attr, NFS_FATTR_QUOTA_USED))) {
5384 NFS_BITMAP_SET(mflags, NFS_MFLAG_NOQUOTA);
5385 }
5386 if ((nmp->nm_vers < NFS_VER4) && NMFLAG(nmp, MNTUDP)) {
5387 NFS_BITMAP_SET(mflags, NFS_MFLAG_MNTUDP);
5388 }
5389 if (NMFLAG(nmp, MNTQUICK)) {
5390 NFS_BITMAP_SET(mflags, NFS_MFLAG_MNTQUICK);
5391 }
5392
5393 /* assemble info buffer: */
5394 xb_init_buffer(&xbinfo, NULL, 0);
5395 xb_add_32(error, &xbinfo, NFS_MOUNT_INFO_VERSION);
5396 infolength_offset = xb_offset(&xbinfo);
5397 xb_add_32(error, &xbinfo, 0);
5398 xb_add_bitmap(error, &xbinfo, miattrs, NFS_MIATTR_BITMAP_LEN);
5399 xb_add_bitmap(error, &xbinfo, miflags, NFS_MIFLAG_BITMAP_LEN);
5400 xb_add_32(error, &xbinfo, origargslength);
5401 if (!error) {
5402 error = xb_add_bytes(&xbinfo, nmp->nm_args, origargslength, 0);
5403 }
5404
5405 /* the opaque byte count for the current mount args values: */
5406 curargsopaquelength_offset = xb_offset(&xbinfo);
5407 xb_add_32(error, &xbinfo, 0);
5408
5409 /* Encode current mount args values */
5410 xb_add_32(error, &xbinfo, NFS_ARGSVERSION_XDR);
5411 curargslength_offset = xb_offset(&xbinfo);
5412 xb_add_32(error, &xbinfo, 0);
5413 xb_add_32(error, &xbinfo, NFS_XDRARGS_VERSION_0);
5414 xb_add_bitmap(error, &xbinfo, mattrs, NFS_MATTR_BITMAP_LEN);
5415 attrslength_offset = xb_offset(&xbinfo);
5416 xb_add_32(error, &xbinfo, 0);
5417 xb_add_bitmap(error, &xbinfo, mflags_mask, NFS_MFLAG_BITMAP_LEN);
5418 xb_add_bitmap(error, &xbinfo, mflags, NFS_MFLAG_BITMAP_LEN);
5419 xb_add_32(error, &xbinfo, nmp->nm_vers); /* NFS_VERSION */
5420 #if CONFIG_NFS4
5421 if (nmp->nm_vers >= NFS_VER4) {
5422 xb_add_32(error, &xbinfo, nmp->nm_minor_vers); /* NFS_MINOR_VERSION */
5423 }
5424 #endif
5425 xb_add_32(error, &xbinfo, nmp->nm_rsize); /* READ_SIZE */
5426 xb_add_32(error, &xbinfo, nmp->nm_wsize); /* WRITE_SIZE */
5427 xb_add_32(error, &xbinfo, nmp->nm_readdirsize); /* READDIR_SIZE */
5428 xb_add_32(error, &xbinfo, nmp->nm_readahead); /* READAHEAD */
5429 xb_add_32(error, &xbinfo, nmp->nm_acregmin); /* ATTRCACHE_REG_MIN */
5430 xb_add_32(error, &xbinfo, 0); /* ATTRCACHE_REG_MIN */
5431 xb_add_32(error, &xbinfo, nmp->nm_acregmax); /* ATTRCACHE_REG_MAX */
5432 xb_add_32(error, &xbinfo, 0); /* ATTRCACHE_REG_MAX */
5433 xb_add_32(error, &xbinfo, nmp->nm_acdirmin); /* ATTRCACHE_DIR_MIN */
5434 xb_add_32(error, &xbinfo, 0); /* ATTRCACHE_DIR_MIN */
5435 xb_add_32(error, &xbinfo, nmp->nm_acdirmax); /* ATTRCACHE_DIR_MAX */
5436 xb_add_32(error, &xbinfo, 0); /* ATTRCACHE_DIR_MAX */
5437 xb_add_32(error, &xbinfo, nmp->nm_lockmode); /* LOCK_MODE */
5438 if (nmp->nm_sec.count) {
5439 xb_add_32(error, &xbinfo, nmp->nm_sec.count); /* SECURITY */
5440 nfsmerr_if(error);
5441 for (i = 0; i < nmp->nm_sec.count; i++) {
5442 xb_add_32(error, &xbinfo, nmp->nm_sec.flavors[i]);
5443 }
5444 } else if (nmp->nm_servsec.count) {
5445 xb_add_32(error, &xbinfo, nmp->nm_servsec.count); /* SECURITY */
5446 nfsmerr_if(error);
5447 for (i = 0; i < nmp->nm_servsec.count; i++) {
5448 xb_add_32(error, &xbinfo, nmp->nm_servsec.flavors[i]);
5449 }
5450 } else {
5451 xb_add_32(error, &xbinfo, 1); /* SECURITY */
5452 xb_add_32(error, &xbinfo, nmp->nm_auth);
5453 }
5454 if (nmp->nm_etype.selected < nmp->nm_etype.count) {
5455 xb_add_32(error, &xbinfo, nmp->nm_etype.count);
5456 xb_add_32(error, &xbinfo, nmp->nm_etype.selected);
5457 for (uint32_t j = 0; j < nmp->nm_etype.count; j++) {
5458 xb_add_32(error, &xbinfo, nmp->nm_etype.etypes[j]);
5459 }
5460 nfsmerr_if(error);
5461 }
5462 xb_add_32(error, &xbinfo, nmp->nm_numgrps); /* MAX_GROUP_LIST */
5463 nfsmerr_if(error);
5464
5465 switch (nmp->nm_saddr->sa_family) {
5466 case AF_INET:
5467 case AF_INET6:
5468 snprintf(sotype, sizeof(sotype), "%s%s", (nmp->nm_sotype == SOCK_DGRAM) ? "udp" : "tcp",
5469 nmp->nm_sofamily ? (nmp->nm_sofamily == AF_INET) ? "4" : "6" : "");
5470 xb_add_string(error, &xbinfo, sotype, strlen(sotype)); /* SOCKET_TYPE */
5471 xb_add_32(error, &xbinfo, ntohs(((struct sockaddr_in*)nmp->nm_saddr)->sin_port)); /* NFS_PORT */
5472 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MOUNT_PORT)) {
5473 xb_add_32(error, &xbinfo, nmp->nm_mountport); /* MOUNT_PORT */
5474 }
5475 break;
5476 case AF_LOCAL:
5477 strlcpy(sotype, (nmp->nm_sotype == SOCK_DGRAM) ? "ticlts" : "ticotsord", sizeof(sotype));
5478 xb_add_string(error, &xbinfo, sotype, strlen(sotype));
5479 break;
5480 default:
5481 NFS_VFS_DBG("Unsupported address family %d\n", nmp->nm_saddr->sa_family);
5482 printf("Unsupported address family %d\n", nmp->nm_saddr->sa_family);
5483 error = EINVAL;
5484 break;
5485 }
5486
5487 timeo = (nmp->nm_timeo * 10) / NFS_HZ;
5488 xb_add_32(error, &xbinfo, timeo / 10); /* REQUEST_TIMEOUT */
5489 xb_add_32(error, &xbinfo, (timeo % 10) * 100000000); /* REQUEST_TIMEOUT */
5490 if (NMFLAG(nmp, SOFT)) {
5491 xb_add_32(error, &xbinfo, nmp->nm_retry); /* SOFT_RETRY_COUNT */
5492 }
5493 if (nmp->nm_deadtimeout) {
5494 xb_add_32(error, &xbinfo, nmp->nm_deadtimeout); /* DEAD_TIMEOUT */
5495 xb_add_32(error, &xbinfo, 0); /* DEAD_TIMEOUT */
5496 }
5497 if (nmp->nm_fh) {
5498 xb_add_fh(error, &xbinfo, &nmp->nm_fh->fh_data[0], nmp->nm_fh->fh_len); /* FH */
5499 }
5500 xb_add_32(error, &xbinfo, nmp->nm_locations.nl_numlocs); /* FS_LOCATIONS */
5501 for (loc = 0; !error && (loc < nmp->nm_locations.nl_numlocs); loc++) {
5502 xb_add_32(error, &xbinfo, nmp->nm_locations.nl_locations[loc]->nl_servcount);
5503 for (serv = 0; !error && (serv < nmp->nm_locations.nl_locations[loc]->nl_servcount); serv++) {
5504 xb_add_string(error, &xbinfo, nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_name,
5505 strlen(nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_name));
5506 xb_add_32(error, &xbinfo, nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_addrcount);
5507 for (addr = 0; !error && (addr < nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_addrcount); addr++) {
5508 xb_add_string(error, &xbinfo, nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_addresses[addr],
5509 strlen(nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_addresses[addr]));
5510 }
5511 xb_add_32(error, &xbinfo, 0); /* empty server info */
5512 }
5513 xb_add_32(error, &xbinfo, nmp->nm_locations.nl_locations[loc]->nl_path.np_compcount);
5514 for (comp = 0; !error && (comp < nmp->nm_locations.nl_locations[loc]->nl_path.np_compcount); comp++) {
5515 xb_add_string(error, &xbinfo, nmp->nm_locations.nl_locations[loc]->nl_path.np_components[comp],
5516 strlen(nmp->nm_locations.nl_locations[loc]->nl_path.np_components[comp]));
5517 }
5518 xb_add_32(error, &xbinfo, 0); /* empty fs location info */
5519 }
5520 xb_add_32(error, &xbinfo, vfs_flags(nmp->nm_mountp)); /* MNTFLAGS */
5521 if (origargsvers < NFS_ARGSVERSION_XDR) {
5522 xb_add_string(error, &xbinfo, vfs_statfs(nmp->nm_mountp)->f_mntfromname,
5523 strlen(vfs_statfs(nmp->nm_mountp)->f_mntfromname)); /* MNTFROM */
5524 }
5525 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_REALM)) {
5526 xb_add_string(error, &xbinfo, nmp->nm_realm, strlen(nmp->nm_realm));
5527 }
5528 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_PRINCIPAL)) {
5529 xb_add_string(error, &xbinfo, nmp->nm_principal, strlen(nmp->nm_principal));
5530 }
5531 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SVCPRINCIPAL)) {
5532 xb_add_string(error, &xbinfo, nmp->nm_sprinc, strlen(nmp->nm_sprinc));
5533 }
5534 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_LOCAL_NFS_PORT)) {
5535 struct sockaddr_un *un = (struct sockaddr_un *)nmp->nm_saddr;
5536 xb_add_string(error, &xbinfo, un->sun_path, strlen(un->sun_path));
5537 }
5538 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_LOCAL_MOUNT_PORT)) {
5539 xb_add_string(error, &xbinfo, nmp->nm_mount_localport, strlen(nmp->nm_mount_localport));
5540 }
5541 curargs_end_offset = xb_offset(&xbinfo);
5542
5543 /* NFS_MIATTR_CUR_LOC_INDEX */
5544 xb_add_32(error, &xbinfo, nmp->nm_locations.nl_current.nli_flags);
5545 xb_add_32(error, &xbinfo, nmp->nm_locations.nl_current.nli_loc);
5546 xb_add_32(error, &xbinfo, nmp->nm_locations.nl_current.nli_serv);
5547 xb_add_32(error, &xbinfo, nmp->nm_locations.nl_current.nli_addr);
5548
5549 xb_build_done(error, &xbinfo);
5550
5551 /* update opaque counts */
5552 end_offset = xb_offset(&xbinfo);
5553 if (!error) {
5554 error = xb_seek(&xbinfo, attrslength_offset);
5555 xb_add_32(error, &xbinfo, curargs_end_offset - attrslength_offset - XDRWORD /*don't include length field*/);
5556 }
5557 if (!error) {
5558 error = xb_seek(&xbinfo, curargslength_offset);
5559 xb_add_32(error, &xbinfo, curargs_end_offset - curargslength_offset + XDRWORD /*version*/);
5560 }
5561 if (!error) {
5562 error = xb_seek(&xbinfo, curargsopaquelength_offset);
5563 xb_add_32(error, &xbinfo, curargs_end_offset - curargslength_offset + XDRWORD /*version*/);
5564 }
5565 if (!error) {
5566 error = xb_seek(&xbinfo, infolength_offset);
5567 xb_add_32(error, &xbinfo, end_offset - infolength_offset + XDRWORD /*version*/);
5568 }
5569 nfsmerr_if(error);
5570
5571 /* copy result xdrbuf to caller */
5572 *xb = xbinfo;
5573
5574 /* and mark the local copy as not needing cleanup */
5575 xbinfo.xb_flags &= ~XB_CLEANUP;
5576 nfsmerr:
5577 xb_cleanup(&xbinfo);
5578 return error;
5579 }
5580
5581 /*
5582 * Do that sysctl thang...
5583 */
5584 int
nfs_vfs_sysctl(int * name,u_int namelen,user_addr_t oldp,size_t * oldlenp,user_addr_t newp,size_t newlen,vfs_context_t ctx)5585 nfs_vfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
5586 user_addr_t newp, size_t newlen, vfs_context_t ctx)
5587 {
5588 int error = 0, val;
5589 struct sysctl_req *req = NULL;
5590 union union_vfsidctl vc;
5591 mount_t mp;
5592 struct nfsmount *nmp = NULL;
5593 struct vfsquery vq;
5594 struct nfsreq *rq;
5595 boolean_t is_64_bit;
5596 fsid_t fsid;
5597 struct xdrbuf xb;
5598 struct netfs_status *nsp = NULL;
5599 int timeoutmask;
5600 uint totlen, count, numThreads;
5601
5602 /*
5603 * All names at this level are terminal.
5604 */
5605 if (namelen > 1) {
5606 return ENOTDIR; /* overloaded */
5607 }
5608 is_64_bit = vfs_context_is64bit(ctx);
5609
5610 /* common code for "new style" VFS_CTL sysctl, get the mount. */
5611 switch (name[0]) {
5612 case VFS_CTL_TIMEO:
5613 case VFS_CTL_NOLOCKS:
5614 case VFS_CTL_NSTATUS:
5615 #if defined(XNU_TARGET_OS_OSX)
5616 case VFS_CTL_QUERY:
5617 #endif /* XNU_TARGET_OS_OSX */
5618 req = CAST_DOWN(struct sysctl_req *, oldp);
5619 if (req == NULL) {
5620 return EFAULT;
5621 }
5622 error = SYSCTL_IN(req, &vc, is_64_bit? sizeof(vc.vc64):sizeof(vc.vc32));
5623 if (error) {
5624 return NFS_MAPERR(error);
5625 }
5626 mp = vfs_getvfs(&vc.vc32.vc_fsid); /* works for 32 and 64 */
5627 if (mp == NULL) {
5628 return ENOENT;
5629 }
5630 nmp = VFSTONFS(mp);
5631 if (!nmp) {
5632 return ENOENT;
5633 }
5634 bzero(&vq, sizeof(vq));
5635 req->newidx = 0;
5636 if (is_64_bit) {
5637 req->newptr = vc.vc64.vc_ptr;
5638 req->newlen = (size_t)vc.vc64.vc_len;
5639 } else {
5640 req->newptr = CAST_USER_ADDR_T(vc.vc32.vc_ptr);
5641 req->newlen = vc.vc32.vc_len;
5642 }
5643 break;
5644 #if !defined(XNU_TARGET_OS_OSX)
5645 case VFS_CTL_QUERY:
5646 return EPERM;
5647 #endif /* ! XNU_TARGET_OS_OSX */
5648 }
5649
5650 switch (name[0]) {
5651 case NFS_NFSSTATS:
5652 if (!oldp) {
5653 *oldlenp = sizeof nfsclntstats;
5654 return 0;
5655 }
5656
5657 if (*oldlenp < sizeof nfsclntstats) {
5658 *oldlenp = sizeof nfsclntstats;
5659 return ENOMEM;
5660 }
5661
5662 error = copyout(&nfsclntstats, oldp, sizeof nfsclntstats);
5663 if (error) {
5664 return NFS_MAPERR(error);
5665 }
5666
5667 if (newp && newlen != sizeof nfsclntstats) {
5668 return EINVAL;
5669 }
5670
5671 if (newp) {
5672 return copyin(newp, &nfsclntstats, sizeof nfsclntstats);
5673 }
5674 return 0;
5675 case NFS_NFSZEROSTATS:
5676 bzero(&nfsclntstats, sizeof nfsclntstats);
5677 return 0;
5678 case NFS_MOUNTINFO:
5679 /* read in the fsid */
5680 if (*oldlenp < sizeof(fsid)) {
5681 return EINVAL;
5682 }
5683 if ((error = copyin(oldp, &fsid, sizeof(fsid)))) {
5684 return NFS_MAPERR(error);
5685 }
5686 /* swizzle it back to host order */
5687 fsid.val[0] = ntohl(fsid.val[0]);
5688 fsid.val[1] = ntohl(fsid.val[1]);
5689 /* find mount and make sure it's NFS */
5690 if (((mp = vfs_getvfs(&fsid))) == NULL) {
5691 return ENOENT;
5692 }
5693 if (strcmp(vfs_statfs(mp)->f_fstypename, "nfs")) {
5694 return EINVAL;
5695 }
5696 if (((nmp = VFSTONFS(mp))) == NULL) {
5697 return ENOENT;
5698 }
5699 xb_init(&xb, XDRBUF_NONE);
5700 if ((error = nfs_mountinfo_assemble(nmp, &xb))) {
5701 return NFS_MAPERR(error);
5702 }
5703 if (*oldlenp < xb.xb_u.xb_buffer.xbb_len) {
5704 error = ENOMEM;
5705 } else {
5706 error = copyout(xb_buffer_base(&xb), oldp, xb.xb_u.xb_buffer.xbb_len);
5707 }
5708 *oldlenp = xb.xb_u.xb_buffer.xbb_len;
5709 xb_cleanup(&xb);
5710 break;
5711 case VFS_CTL_NOLOCKS:
5712 if (req->oldptr != USER_ADDR_NULL) {
5713 lck_mtx_lock(&nmp->nm_lock);
5714 val = (nmp->nm_lockmode == NFS_LOCK_MODE_DISABLED) ? 1 : 0;
5715 lck_mtx_unlock(&nmp->nm_lock);
5716 error = SYSCTL_OUT(req, &val, sizeof(val));
5717 if (error) {
5718 return NFS_MAPERR(error);
5719 }
5720 }
5721 if (req->newptr != USER_ADDR_NULL) {
5722 error = SYSCTL_IN(req, &val, sizeof(val));
5723 if (error) {
5724 return NFS_MAPERR(error);
5725 }
5726 lck_mtx_lock(&nmp->nm_lock);
5727 if (nmp->nm_lockmode == NFS_LOCK_MODE_LOCAL) {
5728 /* can't toggle locks when using local locks */
5729 error = EINVAL;
5730 #if CONFIG_NFS4
5731 } else if ((nmp->nm_vers >= NFS_VER4) && val) {
5732 /* can't disable locks for NFSv4 */
5733 error = EINVAL;
5734 #endif
5735 } else if (val) {
5736 if ((nmp->nm_vers <= NFS_VER3) && (nmp->nm_lockmode == NFS_LOCK_MODE_ENABLED)) {
5737 nfs_lockd_mount_unregister(nmp);
5738 }
5739 nmp->nm_lockmode = NFS_LOCK_MODE_DISABLED;
5740 nmp->nm_state &= ~NFSSTA_LOCKTIMEO;
5741 } else {
5742 if ((nmp->nm_vers <= NFS_VER3) && (nmp->nm_lockmode == NFS_LOCK_MODE_DISABLED)) {
5743 nfs_lockd_mount_register(nmp);
5744 }
5745 nmp->nm_lockmode = NFS_LOCK_MODE_ENABLED;
5746 }
5747 lck_mtx_unlock(&nmp->nm_lock);
5748 }
5749 break;
5750 #if defined(XNU_TARGET_OS_OSX)
5751 case VFS_CTL_QUERY:
5752 lck_mtx_lock(&nmp->nm_lock);
5753 /* XXX don't allow users to know about/disconnect unresponsive, soft, nobrowse mounts */
5754 int softnobrowse = (NMFLAG(nmp, SOFT) && (vfs_flags(nmp->nm_mountp) & MNT_DONTBROWSE));
5755 if (!softnobrowse && (nmp->nm_state & NFSSTA_TIMEO)) {
5756 vq.vq_flags |= VQ_NOTRESP;
5757 }
5758 if (!softnobrowse && (nmp->nm_state & NFSSTA_JUKEBOXTIMEO) && !NMFLAG(nmp, MUTEJUKEBOX)) {
5759 vq.vq_flags |= VQ_NOTRESP;
5760 }
5761 if (!softnobrowse && (nmp->nm_state & NFSSTA_LOCKTIMEO) &&
5762 (nmp->nm_lockmode == NFS_LOCK_MODE_ENABLED)) {
5763 vq.vq_flags |= VQ_NOTRESP;
5764 }
5765 if (nmp->nm_state & NFSSTA_DEAD) {
5766 vq.vq_flags |= VQ_DEAD;
5767 }
5768 lck_mtx_unlock(&nmp->nm_lock);
5769 error = SYSCTL_OUT(req, &vq, sizeof(vq));
5770 break;
5771 #endif /* XNU_TARGET_OS_OSX */
5772 case VFS_CTL_TIMEO:
5773 if (req->oldptr != USER_ADDR_NULL) {
5774 lck_mtx_lock(&nmp->nm_lock);
5775 val = nmp->nm_tprintf_initial_delay;
5776 lck_mtx_unlock(&nmp->nm_lock);
5777 error = SYSCTL_OUT(req, &val, sizeof(val));
5778 if (error) {
5779 return NFS_MAPERR(error);
5780 }
5781 }
5782 if (req->newptr != USER_ADDR_NULL) {
5783 error = SYSCTL_IN(req, &val, sizeof(val));
5784 if (error) {
5785 return NFS_MAPERR(error);
5786 }
5787 lck_mtx_lock(&nmp->nm_lock);
5788 if (val < 0) {
5789 nmp->nm_tprintf_initial_delay = 0;
5790 } else {
5791 nmp->nm_tprintf_initial_delay = val;
5792 }
5793 lck_mtx_unlock(&nmp->nm_lock);
5794 }
5795 break;
5796 case VFS_CTL_NSTATUS:
5797 /*
5798 * Return the status of this mount. This is much more
5799 * information than VFS_CTL_QUERY. In addition to the
5800 * vq_flags return the significant mount options along
5801 * with the list of threads blocked on the mount and
5802 * how long the threads have been waiting.
5803 */
5804
5805 lck_mtx_lock(&nfs_request_mutex);
5806 lck_mtx_lock(&nmp->nm_lock);
5807
5808 /*
5809 * Count the number of requests waiting for a reply.
5810 * Note: there could be multiple requests from the same thread.
5811 */
5812 numThreads = 0;
5813 TAILQ_FOREACH(rq, &nfs_reqq, r_chain) {
5814 if (rq->r_nmp == nmp) {
5815 numThreads++;
5816 }
5817 }
5818
5819 /* Calculate total size of result buffer */
5820 totlen = sizeof(struct netfs_status) + (numThreads * sizeof(uint64_t));
5821
5822 if (req->oldptr == USER_ADDR_NULL) { // Caller is querying buffer size
5823 lck_mtx_unlock(&nmp->nm_lock);
5824 lck_mtx_unlock(&nfs_request_mutex);
5825 return SYSCTL_OUT(req, NULL, totlen);
5826 }
5827 if (req->oldlen < totlen) { // Check if caller's buffer is big enough
5828 lck_mtx_unlock(&nmp->nm_lock);
5829 lck_mtx_unlock(&nfs_request_mutex);
5830 return ERANGE;
5831 }
5832
5833 nsp = kalloc_data(totlen, Z_WAITOK | Z_ZERO);
5834 if (nsp == NULL) {
5835 lck_mtx_unlock(&nmp->nm_lock);
5836 lck_mtx_unlock(&nfs_request_mutex);
5837 return ENOMEM;
5838 }
5839 timeoutmask = NFSSTA_TIMEO | NFSSTA_LOCKTIMEO | NFSSTA_JUKEBOXTIMEO;
5840 if (nmp->nm_state & timeoutmask) {
5841 nsp->ns_status |= VQ_NOTRESP;
5842 }
5843 if (nmp->nm_state & NFSSTA_DEAD) {
5844 nsp->ns_status |= VQ_DEAD;
5845 }
5846
5847 (void) nfs_mountopts(nmp, nsp->ns_mountopts, sizeof(nsp->ns_mountopts));
5848 nsp->ns_threadcount = numThreads;
5849
5850 /*
5851 * Get the thread ids of threads waiting for a reply
5852 * and find the longest wait time.
5853 */
5854 if (numThreads > 0) {
5855 struct timeval now;
5856 time_t sendtime;
5857 uint64_t waittime;
5858
5859 microuptime(&now);
5860 count = 0;
5861 sendtime = now.tv_sec;
5862 TAILQ_FOREACH(rq, &nfs_reqq, r_chain) {
5863 if (rq->r_nmp == nmp) {
5864 if (rq->r_start < sendtime) {
5865 sendtime = rq->r_start;
5866 }
5867 // A thread_id of zero is used to represent an async I/O request.
5868 nsp->ns_threadids[count] =
5869 rq->r_thread ? thread_tid(rq->r_thread) : 0;
5870 if (++count >= numThreads) {
5871 break;
5872 }
5873 }
5874 }
5875 waittime = now.tv_sec - sendtime;
5876 nsp->ns_waittime = waittime > UINT32_MAX ? UINT32_MAX : (uint32_t)waittime;
5877 }
5878
5879 lck_mtx_unlock(&nmp->nm_lock);
5880 lck_mtx_unlock(&nfs_request_mutex);
5881
5882 error = SYSCTL_OUT(req, nsp, totlen);
5883 kfree_data(nsp, totlen);
5884 break;
5885 default:
5886 return ENOTSUP;
5887 }
5888 return NFS_MAPERR(error);
5889 }
5890
5891 #if CONFIG_NFS4
5892
5893 static int
mapname2id(struct nfs_testmapid * map)5894 mapname2id(struct nfs_testmapid *map)
5895 {
5896 int error;
5897 error = nfs4_id2guid(map->ntm_name, &map->ntm_guid, map->ntm_grpflag);
5898 if (error) {
5899 return error;
5900 }
5901
5902 if (map->ntm_grpflag) {
5903 error = kauth_cred_guid2gid(&map->ntm_guid, (gid_t *)&map->ntm_id);
5904 } else {
5905 error = kauth_cred_guid2uid(&map->ntm_guid, (uid_t *)&map->ntm_id);
5906 }
5907
5908 return error;
5909 }
5910
5911 static int
mapid2name(struct nfs_testmapid * map)5912 mapid2name(struct nfs_testmapid *map)
5913 {
5914 int error;
5915 size_t len = sizeof(map->ntm_name);
5916
5917 if (map->ntm_grpflag) {
5918 error = kauth_cred_gid2guid((gid_t)map->ntm_id, &map->ntm_guid);
5919 } else {
5920 error = kauth_cred_uid2guid((uid_t)map->ntm_id, &map->ntm_guid);
5921 }
5922
5923 if (error) {
5924 return error;
5925 }
5926
5927 error = nfs4_guid2id(&map->ntm_guid, map->ntm_name, &len, map->ntm_grpflag);
5928
5929 return error;
5930 }
5931
5932 static int
nfsclnt_testidmap(proc_t p,user_addr_t argp)5933 nfsclnt_testidmap(proc_t p, user_addr_t argp)
5934 {
5935 struct nfs_testmapid mapid;
5936 int error, coerror;
5937 size_t len = sizeof(mapid.ntm_name);
5938
5939 /* Let root make this call. */
5940 error = proc_suser(p);
5941 if (error) {
5942 return error;
5943 }
5944
5945 error = copyin(argp, &mapid, sizeof(mapid));
5946 mapid.ntm_name[MAXIDNAMELEN - 1] = '\0';
5947
5948 if (error) {
5949 return error;
5950 }
5951 switch (mapid.ntm_lookup) {
5952 case NTM_NAME2ID:
5953 error = mapname2id(&mapid);
5954 break;
5955 case NTM_ID2NAME:
5956 error = mapid2name(&mapid);
5957 break;
5958 case NTM_NAME2GUID:
5959 error = nfs4_id2guid(mapid.ntm_name, &mapid.ntm_guid, mapid.ntm_grpflag);
5960 break;
5961 case NTM_GUID2NAME:
5962 error = nfs4_guid2id(&mapid.ntm_guid, mapid.ntm_name, &len, mapid.ntm_grpflag);
5963 break;
5964 default:
5965 return EINVAL;
5966 }
5967
5968 coerror = copyout(&mapid, argp, sizeof(mapid));
5969
5970 return error ? error : coerror;
5971 }
5972 #endif /* CONFIG_NFS4 */
5973
5974 /*
5975 * Setup nfsclnt character device to be used by nfsclnt() system call.
5976 */
5977
5978 static int nfsclnt_device_installed = 0;
5979 static void *nfsclnt_devfs = NULL;
5980 static d_ioctl_t nfsclnt_ioctl;
5981
5982 static const struct cdevsw nfsclnt_cdevsw =
5983 {
5984 .d_open = eno_opcl,
5985 .d_close = eno_opcl,
5986 .d_read = eno_rdwrt,
5987 .d_write = eno_rdwrt,
5988 .d_ioctl = nfsclnt_ioctl,
5989 .d_stop = eno_stop,
5990 .d_reset = eno_reset,
5991 .d_ttys = NULL,
5992 .d_select = eno_select,
5993 .d_mmap = eno_mmap,
5994 .d_strategy = eno_strat,
5995 .d_reserved_1 = eno_getc,
5996 .d_reserved_2 = eno_putc,
5997 .d_type = 0
5998 };
5999
6000 static int
nfsclnt_ioctl(__unused dev_t dev,u_long cmd,caddr_t data,__unused int flag,struct proc * p)6001 nfsclnt_ioctl(__unused dev_t dev, u_long cmd, caddr_t data,
6002 __unused int flag, struct proc *p)
6003 {
6004 struct lockd_ans la;
6005 int error;
6006 user_addr_t addr = (user_addr_t)data;
6007
6008 switch (cmd) {
6009 case NFSCLNT_LOCKDANS:
6010 error = copyin(addr, &la, sizeof(la));
6011 if (!error) {
6012 error = nfslockdans(p, &la);
6013 }
6014 break;
6015 case NFSCLNT_LOCKDNOTIFY:
6016 error = nfslockdnotify(p, addr);
6017 break;
6018 #if CONFIG_NFS4
6019 case NFSCLNT_TESTIDMAP:
6020 error = nfsclnt_testidmap(p, addr);
6021 break;
6022 #endif
6023 default:
6024 error = EINVAL;
6025 }
6026
6027 return NFS_MAPERR(error);
6028 }
6029
6030 int
nfsclnt_device_add(void)6031 nfsclnt_device_add(void)
6032 {
6033 int ret;
6034
6035 if (nfsclnt_device_installed) {
6036 return 0;
6037 }
6038
6039 nfsclnt_device_installed = 1;
6040
6041 ret = cdevsw_add(-1, &nfsclnt_cdevsw);
6042 if (ret < 0) {
6043 printf("nfsclnt_device_add: cdevsw_add failed on nfsclnt control device, err %d\n", ret);
6044 nfsclnt_device_installed = 0;
6045 return -1;
6046 }
6047
6048 nfsclnt_devfs = devfs_make_node(makedev(ret, 0), DEVFS_CHAR,
6049 UID_ROOT, GID_WHEEL, 0666, NFSCLNT_DEVICE, 0);
6050
6051 if (nfsclnt_devfs == NULL) {
6052 printf("nfsclnt_device_add: devfs_make_node failed on nfsclnt control device\n");
6053 nfsclnt_device_installed = 0;
6054 return -1;
6055 }
6056
6057 printf("nfsclnt_device_add: nfsclnt chardev was added successfully\n");
6058 return 0;
6059 }
6060
6061 #endif /* CONFIG_NFS_CLIENT */
6062