1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1989, 1993, 1995
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed to Berkeley by
34 * Rick Macklem at The University of Guelph.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the University of
47 * California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 *
64 * @(#)nfs_vfsops.c 8.12 (Berkeley) 5/20/95
65 * FreeBSD-Id: nfs_vfsops.c,v 1.52 1997/11/12 05:42:21 julian Exp $
66 */
67
68 #include <nfs/nfs_conf.h>
69 #if CONFIG_NFS_CLIENT
70
71 /*
72 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
73 * support for mandatory and extensible security protections. This notice
74 * is included in support of clause 2.2 (b) of the Apple Public License,
75 * Version 2.0.
76 */
77
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/conf.h>
81 #include <sys/ioctl.h>
82 #include <sys/signal.h>
83 #include <sys/proc_internal.h> /* for fs rooting to update rootdir in fdp */
84 #include <sys/kauth.h>
85 #include <sys/vnode_internal.h>
86 #include <sys/malloc.h>
87 #include <sys/kernel.h>
88 #include <sys/sysctl.h>
89 #include <sys/mount_internal.h>
90 #include <sys/kpi_mbuf.h>
91 #include <sys/socket.h>
92 #include <sys/un.h>
93 #include <sys/socketvar.h>
94 #include <sys/fcntl.h>
95 #include <sys/quota.h>
96 #include <sys/priv.h>
97 #include <libkern/OSAtomic.h>
98 #include <IOKit/IOLib.h>
99
100 #include <sys/vm.h>
101 #include <sys/vmparam.h>
102
103 #if !defined(NO_MOUNT_PRIVATE)
104 #include <sys/filedesc.h>
105 #endif /* NO_MOUNT_PRIVATE */
106
107 #include <net/if.h>
108 #include <net/route.h>
109 #include <netinet/in.h>
110
111 #include <nfs/rpcv2.h>
112 #include <nfs/krpc.h>
113 #include <nfs/nfsproto.h>
114 #include <nfs/nfs.h>
115 #include <nfs/nfsnode.h>
116 #include <nfs/nfs_gss.h>
117 #include <nfs/nfsmount.h>
118 #include <nfs/xdr_subs.h>
119 #include <nfs/nfsm_subs.h>
120 #include <nfs/nfs_lock.h>
121
122 #include <miscfs/devfs/devfs.h>
123 #include <pexpert/pexpert.h>
124
125 #define NFS_VFS_DBG(...) NFSCLNT_DBG(NFSCLNT_FAC_VFS, 7, ## __VA_ARGS__)
126
127 /*
128 * NFS client globals
129 */
130
131 ZONE_DEFINE(nfsmnt_zone, "NFS mount",
132 sizeof(struct nfsmount), ZC_ZFREE_CLEARMEM);
133
134 int nfs_ticks;
135 static LCK_GRP_DECLARE(nfs_global_grp, "nfs_global");
136 static LCK_GRP_DECLARE(nfs_mount_grp, "nfs_mount");
137 LCK_MTX_DECLARE(nfs_global_mutex, &nfs_global_grp);
138 uint32_t nfs_fs_attr_bitmap[NFS_ATTR_BITMAP_LEN];
139 uint32_t nfs_object_attr_bitmap[NFS_ATTR_BITMAP_LEN];
140 uint32_t nfs_getattr_bitmap[NFS_ATTR_BITMAP_LEN];
141 uint32_t nfs4_getattr_write_bitmap[NFS_ATTR_BITMAP_LEN];
142 struct nfsclientidlist nfsclientids;
143
144 /* NFS requests */
145 struct nfs_reqqhead nfs_reqq;
146 LCK_GRP_DECLARE(nfs_request_grp, "nfs_request");
147 LCK_MTX_DECLARE(nfs_request_mutex, &nfs_request_grp);
148 thread_call_t nfs_request_timer_call;
149 int nfs_request_timer_on;
150 u_int64_t nfs_xid = 0;
151 u_int64_t nfs_xidwrap = 0; /* to build a (non-wrapping) 64 bit xid */
152
153 thread_call_t nfs_buf_timer_call;
154
155 /* NFSv4 */
156 LCK_GRP_DECLARE(nfs_open_grp, "nfs_open");
157 uint32_t nfs_open_owner_seqnum = 0;
158 uint32_t nfs_lock_owner_seqnum = 0;
159 thread_call_t nfs4_callback_timer_call;
160 int nfs4_callback_timer_on = 0;
161 char nfs4_default_domain[MAXPATHLEN];
162
163 /* nfsiod */
164 static LCK_GRP_DECLARE(nfsiod_lck_grp, "nfsiod");
165 LCK_MTX_DECLARE(nfsiod_mutex, &nfsiod_lck_grp);
166 struct nfsiodlist nfsiodfree, nfsiodwork;
167 struct nfsiodmountlist nfsiodmounts;
168 int nfsiod_thread_count = 0;
169 int nfsiod_thread_max = NFS_DEFASYNCTHREAD;
170 int nfs_max_async_writes = NFS_DEFMAXASYNCWRITES;
171
172 int nfs_iosize = NFS_IOSIZE;
173 int nfs_access_cache_timeout = NFS_MAXATTRTIMO;
174 int nfs_access_delete = 1; /* too many servers get this wrong - workaround on by default */
175 int nfs_access_dotzfs = 1;
176 int nfs_access_for_getattr = 0;
177 int nfs_allow_async = 0;
178 int nfs_statfs_rate_limit = NFS_DEFSTATFSRATELIMIT;
179 int nfs_lockd_mounts = 0;
180 int nfs_lockd_request_sent = 0;
181 int nfs_idmap_ctrl = NFS_IDMAP_CTRL_USE_IDMAP_SERVICE;
182 int nfs_callback_port = 0;
183 int nfs_split_open_owner = 0;
184
185 int nfs_tprintf_initial_delay = NFS_TPRINTF_INITIAL_DELAY;
186 int nfs_tprintf_delay = NFS_TPRINTF_DELAY;
187
188 int nfs_mount_timeout = NFS_MOUNT_TIMEOUT;
189 int nfs_mount_quick_timeout = NFS_MOUNT_QUICK_TIMEOUT;
190
191 int mountnfs(char *, mount_t, vfs_context_t, vnode_t *);
192 int nfs_mount_connect(struct nfsmount *);
193 void nfs_mount_drain_and_cleanup(struct nfsmount *);
194 void nfs_mount_cleanup(struct nfsmount *);
195 int nfs_mountinfo_assemble(struct nfsmount *, struct xdrbuf *);
196 int nfs4_mount_update_path_with_symlink(struct nfsmount *, struct nfs_fs_path *, uint32_t, fhandle_t *, int *, fhandle_t *, vfs_context_t);
197
198 /*
199 * NFS VFS operations.
200 */
201 int nfs_vfs_mount(mount_t, vnode_t, user_addr_t, vfs_context_t);
202 int nfs_vfs_start(mount_t, int, vfs_context_t);
203 int nfs_vfs_unmount(mount_t, int, vfs_context_t);
204 int nfs_vfs_root(mount_t, vnode_t *, vfs_context_t);
205 int nfs_vfs_quotactl(mount_t, int, uid_t, caddr_t, vfs_context_t);
206 int nfs_vfs_getattr(mount_t, struct vfs_attr *, vfs_context_t);
207 int nfs_vfs_sync(mount_t, int, vfs_context_t);
208 int nfs_vfs_vget(mount_t, ino64_t, vnode_t *, vfs_context_t);
209 int nfs_vfs_vptofh(vnode_t, int *, unsigned char *, vfs_context_t);
210 int nfs_vfs_fhtovp(mount_t, int, unsigned char *, vnode_t *, vfs_context_t);
211 int nfs_vfs_init(struct vfsconf *);
212 int nfs_vfs_sysctl(int *, u_int, user_addr_t, size_t *, user_addr_t, size_t, vfs_context_t);
213
214 const struct vfsops nfs_vfsops = {
215 .vfs_mount = nfs_vfs_mount,
216 .vfs_start = nfs_vfs_start,
217 .vfs_unmount = nfs_vfs_unmount,
218 .vfs_root = nfs_vfs_root,
219 .vfs_quotactl = nfs_vfs_quotactl,
220 .vfs_getattr = nfs_vfs_getattr,
221 .vfs_sync = nfs_vfs_sync,
222 .vfs_vget = nfs_vfs_vget,
223 .vfs_fhtovp = nfs_vfs_fhtovp,
224 .vfs_vptofh = nfs_vfs_vptofh,
225 .vfs_init = nfs_vfs_init,
226 .vfs_sysctl = nfs_vfs_sysctl,
227 // We do not support the remaining VFS ops
228 };
229
230
231 /*
232 * version-specific NFS functions
233 */
234 int nfs3_mount(struct nfsmount *, vfs_context_t, nfsnode_t *);
235 int nfs4_mount(struct nfsmount *, vfs_context_t, nfsnode_t *);
236 int nfs3_fsinfo(struct nfsmount *, nfsnode_t, vfs_context_t);
237 int nfs3_update_statfs(struct nfsmount *, vfs_context_t);
238 int nfs4_update_statfs(struct nfsmount *, vfs_context_t);
239 #if !QUOTA
240 #define nfs3_getquota NULL
241 #define nfs4_getquota NULL
242 #else
243 int nfs3_getquota(struct nfsmount *, vfs_context_t, uid_t, int, struct dqblk *);
244 int nfs4_getquota(struct nfsmount *, vfs_context_t, uid_t, int, struct dqblk *);
245 #endif
246
247 const struct nfs_funcs nfs3_funcs = {
248 .nf_mount = nfs3_mount,
249 .nf_update_statfs = nfs3_update_statfs,
250 .nf_getquota = nfs3_getquota,
251 .nf_access_rpc = nfs3_access_rpc,
252 .nf_getattr_rpc = nfs3_getattr_rpc,
253 .nf_setattr_rpc = nfs3_setattr_rpc,
254 .nf_read_rpc_async = nfs3_read_rpc_async,
255 .nf_read_rpc_async_finish = nfs3_read_rpc_async_finish,
256 .nf_readlink_rpc = nfs3_readlink_rpc,
257 .nf_write_rpc_async = nfs3_write_rpc_async,
258 .nf_write_rpc_async_finish = nfs3_write_rpc_async_finish,
259 .nf_commit_rpc = nfs3_commit_rpc,
260 .nf_lookup_rpc_async = nfs3_lookup_rpc_async,
261 .nf_lookup_rpc_async_finish = nfs3_lookup_rpc_async_finish,
262 .nf_remove_rpc = nfs3_remove_rpc,
263 .nf_rename_rpc = nfs3_rename_rpc,
264 .nf_setlock_rpc = nfs3_setlock_rpc,
265 .nf_unlock_rpc = nfs3_unlock_rpc,
266 .nf_getlock_rpc = nfs3_getlock_rpc
267 };
268 #if CONFIG_NFS4
269 const struct nfs_funcs nfs4_funcs = {
270 .nf_mount = nfs4_mount,
271 .nf_update_statfs = nfs4_update_statfs,
272 .nf_getquota = nfs4_getquota,
273 .nf_access_rpc = nfs4_access_rpc,
274 .nf_getattr_rpc = nfs4_getattr_rpc,
275 .nf_setattr_rpc = nfs4_setattr_rpc,
276 .nf_read_rpc_async = nfs4_read_rpc_async,
277 .nf_read_rpc_async_finish = nfs4_read_rpc_async_finish,
278 .nf_readlink_rpc = nfs4_readlink_rpc,
279 .nf_write_rpc_async = nfs4_write_rpc_async,
280 .nf_write_rpc_async_finish = nfs4_write_rpc_async_finish,
281 .nf_commit_rpc = nfs4_commit_rpc,
282 .nf_lookup_rpc_async = nfs4_lookup_rpc_async,
283 .nf_lookup_rpc_async_finish = nfs4_lookup_rpc_async_finish,
284 .nf_remove_rpc = nfs4_remove_rpc,
285 .nf_rename_rpc = nfs4_rename_rpc,
286 .nf_setlock_rpc = nfs4_setlock_rpc,
287 .nf_unlock_rpc = nfs4_unlock_rpc,
288 .nf_getlock_rpc = nfs4_getlock_rpc
289 };
290 #endif
291
292 /*
293 * Called once to initialize data structures...
294 */
295 int
nfs_vfs_init(__unused struct vfsconf * vfsp)296 nfs_vfs_init(__unused struct vfsconf *vfsp)
297 {
298 #if CONFIG_NFS4
299 int i;
300 #endif
301 /*
302 * Check to see if major data structures haven't bloated.
303 */
304 if (sizeof(struct nfsnode) > NFS_NODEALLOC) {
305 printf("struct nfsnode bloated (> %dbytes)\n", NFS_NODEALLOC);
306 printf("Try reducing NFS_SMALLFH\n");
307 }
308 if (sizeof(struct nfsmount) > NFS_MNTALLOC) {
309 printf("struct nfsmount bloated (> %dbytes)\n", NFS_MNTALLOC);
310 }
311
312 nfs_ticks = (hz * NFS_TICKINTVL + 500) / 1000;
313 if (nfs_ticks < 1) {
314 nfs_ticks = 1;
315 }
316
317 /* init async I/O thread pool state */
318 TAILQ_INIT(&nfsiodfree);
319 TAILQ_INIT(&nfsiodwork);
320 TAILQ_INIT(&nfsiodmounts);
321
322 /* initialize NFS request list */
323 TAILQ_INIT(&nfs_reqq);
324
325 nfs_nbinit(); /* Init the nfsbuf table */
326
327 #if CONFIG_NFS4
328 /* NFSv4 stuff */
329 NFS4_PER_FS_ATTRIBUTES(nfs_fs_attr_bitmap);
330 NFS4_PER_OBJECT_ATTRIBUTES(nfs_object_attr_bitmap);
331 NFS4_DEFAULT_WRITE_ATTRIBUTES(nfs4_getattr_write_bitmap);
332 NFS4_DEFAULT_ATTRIBUTES(nfs_getattr_bitmap);
333 for (i = 0; i < NFS_ATTR_BITMAP_LEN; i++) {
334 nfs_getattr_bitmap[i] &= nfs_object_attr_bitmap[i];
335 nfs4_getattr_write_bitmap[i] &= nfs_object_attr_bitmap[i];
336 }
337 TAILQ_INIT(&nfsclientids);
338 #endif
339
340 /* initialize NFS timer callouts */
341 nfs_request_timer_call = thread_call_allocate(nfs_request_timer, NULL);
342 nfs_buf_timer_call = thread_call_allocate(nfs_buf_timer, NULL);
343 #if CONFIG_NFS4
344 nfs4_callback_timer_call = thread_call_allocate(nfs4_callback_timer, NULL);
345 #endif
346
347 /*
348 * Assign NFS hooks
349 */
350 struct nfs_hooks hooks = { .f_vinvalbuf = nfs_vinvalbuf1, .f_buf_page_inval = nfs_buf_page_inval_internal };
351 nfs_register_hooks(&hooks);
352
353 return 0;
354 }
355
356 bool
nfs_fs_path_init(struct nfs_fs_path * fsp,uint32_t count)357 nfs_fs_path_init(struct nfs_fs_path *fsp, uint32_t count)
358 {
359 if (count) {
360 fsp->np_components = kalloc_type(char *, count, Z_WAITOK | Z_ZERO);
361 if (fsp->np_components == NULL) {
362 /*
363 * keep np_compcount initialized so that parsing still
364 * happens.
365 */
366 fsp->np_compcount = count;
367 fsp->np_compsize = 0;
368 return false;
369 }
370 } else {
371 fsp->np_components = NULL;
372 }
373 fsp->np_compcount = fsp->np_compsize = count;
374 return true;
375 }
376
377 void
nfs_fs_path_replace(struct nfs_fs_path * dst,struct nfs_fs_path * src)378 nfs_fs_path_replace(struct nfs_fs_path *dst, struct nfs_fs_path *src)
379 {
380 nfs_fs_path_destroy(dst);
381 *dst = *src;
382 bzero(src, sizeof(*src));
383 }
384
385 void
nfs_fs_path_destroy(struct nfs_fs_path * fsp)386 nfs_fs_path_destroy(struct nfs_fs_path *fsp)
387 {
388 if (fsp->np_components) {
389 for (uint32_t i = 0; i < fsp->np_compcount; i++) {
390 if (fsp->np_components[i]) {
391 kfree_data_addr(fsp->np_components[i]);
392 }
393 }
394 kfree_type(char *, fsp->np_compsize, fsp->np_components);
395 }
396 bzero(fsp, sizeof(*fsp));
397 }
398
399 /*
400 * nfs statfs call
401 */
402 int
nfs3_update_statfs(struct nfsmount * nmp,vfs_context_t ctx)403 nfs3_update_statfs(struct nfsmount *nmp, vfs_context_t ctx)
404 {
405 nfsnode_t np;
406 int error = 0, lockerror, status, nfsvers;
407 u_int64_t xid;
408 struct nfsm_chain nmreq, nmrep;
409 uint32_t val = 0;
410
411 nfsvers = nmp->nm_vers;
412 np = nmp->nm_dnp;
413 if (!np) {
414 return ENXIO;
415 }
416 if ((error = vnode_get(NFSTOV(np)))) {
417 return error;
418 }
419
420 nfsm_chain_null(&nmreq);
421 nfsm_chain_null(&nmrep);
422
423 nfsm_chain_build_alloc_init(error, &nmreq, NFSX_FH(nfsvers));
424 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
425 nfsm_chain_build_done(error, &nmreq);
426 nfsmout_if(error);
427 error = nfs_request2(np, NULL, &nmreq, NFSPROC_FSSTAT, vfs_context_thread(ctx),
428 vfs_context_ucred(ctx), NULL, R_SOFT, &nmrep, &xid, &status);
429 if (error == ETIMEDOUT) {
430 goto nfsmout;
431 }
432 if ((lockerror = nfs_node_lock(np))) {
433 error = lockerror;
434 }
435 if (nfsvers == NFS_VER3) {
436 nfsm_chain_postop_attr_update(error, &nmrep, np, &xid);
437 }
438 if (!lockerror) {
439 nfs_node_unlock(np);
440 }
441 if (!error) {
442 error = status;
443 }
444 nfsm_assert(error, NFSTONMP(np), ENXIO);
445 nfsmout_if(error);
446 lck_mtx_lock(&nmp->nm_lock);
447 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SPACE_TOTAL);
448 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SPACE_FREE);
449 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SPACE_AVAIL);
450 if (nfsvers == NFS_VER3) {
451 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_FILES_AVAIL);
452 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_FILES_TOTAL);
453 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_FILES_FREE);
454 nmp->nm_fsattr.nfsa_bsize = NFS_FABLKSIZE;
455 nfsm_chain_get_64(error, &nmrep, nmp->nm_fsattr.nfsa_space_total);
456 nfsm_chain_get_64(error, &nmrep, nmp->nm_fsattr.nfsa_space_free);
457 nfsm_chain_get_64(error, &nmrep, nmp->nm_fsattr.nfsa_space_avail);
458 nfsm_chain_get_64(error, &nmrep, nmp->nm_fsattr.nfsa_files_total);
459 nfsm_chain_get_64(error, &nmrep, nmp->nm_fsattr.nfsa_files_free);
460 nfsm_chain_get_64(error, &nmrep, nmp->nm_fsattr.nfsa_files_avail);
461 // skip invarsec
462 } else {
463 nfsm_chain_adv(error, &nmrep, NFSX_UNSIGNED); // skip tsize?
464 nfsm_chain_get_32(error, &nmrep, nmp->nm_fsattr.nfsa_bsize);
465 nfsm_chain_get_32(error, &nmrep, val);
466 nfsmout_if(error);
467 if (nmp->nm_fsattr.nfsa_bsize <= 0) {
468 nmp->nm_fsattr.nfsa_bsize = NFS_FABLKSIZE;
469 }
470 nmp->nm_fsattr.nfsa_space_total = (uint64_t)val * nmp->nm_fsattr.nfsa_bsize;
471 nfsm_chain_get_32(error, &nmrep, val);
472 nfsmout_if(error);
473 nmp->nm_fsattr.nfsa_space_free = (uint64_t)val * nmp->nm_fsattr.nfsa_bsize;
474 nfsm_chain_get_32(error, &nmrep, val);
475 nfsmout_if(error);
476 nmp->nm_fsattr.nfsa_space_avail = (uint64_t)val * nmp->nm_fsattr.nfsa_bsize;
477 }
478 lck_mtx_unlock(&nmp->nm_lock);
479 nfsmout:
480 nfsm_chain_cleanup(&nmreq);
481 nfsm_chain_cleanup(&nmrep);
482 vnode_put(NFSTOV(np));
483 return error;
484 }
485
486 #if CONFIG_NFS4
487 int
nfs4_update_statfs(struct nfsmount * nmp,vfs_context_t ctx)488 nfs4_update_statfs(struct nfsmount *nmp, vfs_context_t ctx)
489 {
490 nfsnode_t np;
491 int error = 0, lockerror, status, nfsvers, numops;
492 u_int64_t xid;
493 struct nfsm_chain nmreq, nmrep;
494 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
495 struct nfs_vattr nvattr;
496 struct nfsreq_secinfo_args si;
497
498 nfsvers = nmp->nm_vers;
499 np = nmp->nm_dnp;
500 if (!np) {
501 return ENXIO;
502 }
503 if ((error = vnode_get(NFSTOV(np)))) {
504 return error;
505 }
506
507 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
508 NVATTR_INIT(&nvattr);
509 nfsm_chain_null(&nmreq);
510 nfsm_chain_null(&nmrep);
511
512 // PUTFH + GETATTR
513 numops = 2;
514 nfsm_chain_build_alloc_init(error, &nmreq, 15 * NFSX_UNSIGNED);
515 nfsm_chain_add_compound_header(error, &nmreq, "statfs", nmp->nm_minor_vers, numops);
516 numops--;
517 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
518 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
519 numops--;
520 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
521 NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
522 NFS4_STATFS_ATTRIBUTES(bitmap);
523 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np);
524 nfsm_chain_build_done(error, &nmreq);
525 nfsm_assert(error, (numops == 0), EPROTO);
526 nfsmout_if(error);
527 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND,
528 vfs_context_thread(ctx), vfs_context_ucred(ctx),
529 NULL, R_SOFT, &nmrep, &xid, &status);
530 nfsm_chain_skip_tag(error, &nmrep);
531 nfsm_chain_get_32(error, &nmrep, numops);
532 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
533 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
534 nfsm_assert(error, NFSTONMP(np), ENXIO);
535 nfsmout_if(error);
536 lck_mtx_lock(&nmp->nm_lock);
537 error = nfs4_parsefattr(&nmrep, &nmp->nm_fsattr, &nvattr, NULL, NULL, NULL);
538 lck_mtx_unlock(&nmp->nm_lock);
539 nfsmout_if(error);
540 if ((lockerror = nfs_node_lock(np))) {
541 error = lockerror;
542 }
543 if (!error) {
544 nfs_loadattrcache(np, &nvattr, &xid, 0);
545 }
546 if (!lockerror) {
547 nfs_node_unlock(np);
548 }
549 nfsm_assert(error, NFSTONMP(np), ENXIO);
550 nfsmout_if(error);
551 nmp->nm_fsattr.nfsa_bsize = NFS_FABLKSIZE;
552 nfsmout:
553 NVATTR_CLEANUP(&nvattr);
554 nfsm_chain_cleanup(&nmreq);
555 nfsm_chain_cleanup(&nmrep);
556 vnode_put(NFSTOV(np));
557 return error;
558 }
559 #endif /* CONFIG_NFS4 */
560
561 /*
562 * Return an NFS volume name from the mntfrom name.
563 */
564 static void
nfs_get_volname(struct mount * mp,char * volname,size_t len,__unused vfs_context_t ctx)565 nfs_get_volname(struct mount *mp, char *volname, size_t len, __unused vfs_context_t ctx)
566 {
567 const char *ptr, *cptr;
568 const char *mntfrom = vfs_statfs(mp)->f_mntfromname;
569 size_t mflen;
570
571 mflen = strnlen(mntfrom, MAXPATHLEN + 1);
572
573 if (mflen > MAXPATHLEN || mflen == 0) {
574 strlcpy(volname, "Bad volname", len);
575 return;
576 }
577
578 /* Move back over trailing slashes */
579 for (ptr = &mntfrom[mflen - 1]; ptr != mntfrom && *ptr == '/'; ptr--) {
580 mflen--;
581 }
582
583 /* Find first character after the last slash */
584 cptr = ptr = NULL;
585 for (size_t i = 0; i < mflen; i++) {
586 if (mntfrom[i] == '/') {
587 ptr = &mntfrom[i + 1];
588 }
589 /* And the first character after the first colon */
590 else if (cptr == NULL && mntfrom[i] == ':') {
591 cptr = &mntfrom[i + 1];
592 }
593 }
594
595 /*
596 * No slash or nothing after the last slash
597 * use everything past the first colon
598 */
599 if (ptr == NULL || *ptr == '\0') {
600 ptr = cptr;
601 }
602 /* Otherwise use the mntfrom name */
603 if (ptr == NULL) {
604 ptr = mntfrom;
605 }
606
607 mflen = &mntfrom[mflen] - ptr;
608 len = mflen + 1 < len ? mflen + 1 : len;
609
610 strlcpy(volname, ptr, len);
611 }
612
613 /*
614 * The NFS VFS_GETATTR function: "statfs"-type information is retrieved
615 * using the nf_update_statfs() function, and other attributes are cobbled
616 * together from whatever sources we can (getattr, fsinfo, pathconf).
617 */
618 int
nfs_vfs_getattr(mount_t mp,struct vfs_attr * fsap,vfs_context_t ctx)619 nfs_vfs_getattr(mount_t mp, struct vfs_attr *fsap, vfs_context_t ctx)
620 {
621 struct nfsmount *nmp;
622 uint32_t bsize;
623 int error = 0, nfsvers;
624
625 nmp = VFSTONFS(mp);
626 if (nfs_mount_gone(nmp)) {
627 return ENXIO;
628 }
629 nfsvers = nmp->nm_vers;
630
631 if (VFSATTR_IS_ACTIVE(fsap, f_bsize) ||
632 VFSATTR_IS_ACTIVE(fsap, f_iosize) ||
633 VFSATTR_IS_ACTIVE(fsap, f_blocks) ||
634 VFSATTR_IS_ACTIVE(fsap, f_bfree) ||
635 VFSATTR_IS_ACTIVE(fsap, f_bavail) ||
636 VFSATTR_IS_ACTIVE(fsap, f_bused) ||
637 VFSATTR_IS_ACTIVE(fsap, f_files) ||
638 VFSATTR_IS_ACTIVE(fsap, f_ffree)) {
639 int statfsrate = nfs_statfs_rate_limit;
640 int refresh = 1;
641
642 /*
643 * Are we rate-limiting statfs RPCs?
644 * (Treat values less than 1 or greater than 1,000,000 as no limit.)
645 */
646 if ((statfsrate > 0) && (statfsrate < 1000000)) {
647 struct timeval now;
648 time_t stamp;
649
650 microuptime(&now);
651 lck_mtx_lock(&nmp->nm_lock);
652 stamp = (now.tv_sec * statfsrate) + (now.tv_usec / (1000000 / statfsrate));
653 if (stamp != nmp->nm_fsattrstamp) {
654 refresh = 1;
655 nmp->nm_fsattrstamp = stamp;
656 } else {
657 refresh = 0;
658 }
659 lck_mtx_unlock(&nmp->nm_lock);
660 }
661
662 if (refresh && !nfs_use_cache(nmp)) {
663 error = nmp->nm_funcs->nf_update_statfs(nmp, ctx);
664 }
665 if ((error == ESTALE) || (error == ETIMEDOUT)) {
666 error = 0;
667 }
668 if (error) {
669 return NFS_MAPERR(error);
670 }
671
672 lck_mtx_lock(&nmp->nm_lock);
673 VFSATTR_RETURN(fsap, f_iosize, nfs_iosize);
674 VFSATTR_RETURN(fsap, f_bsize, nmp->nm_fsattr.nfsa_bsize);
675 bsize = nmp->nm_fsattr.nfsa_bsize;
676 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SPACE_TOTAL)) {
677 VFSATTR_RETURN(fsap, f_blocks, nmp->nm_fsattr.nfsa_space_total / bsize);
678 }
679 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SPACE_FREE)) {
680 VFSATTR_RETURN(fsap, f_bfree, nmp->nm_fsattr.nfsa_space_free / bsize);
681 }
682 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SPACE_AVAIL)) {
683 VFSATTR_RETURN(fsap, f_bavail, nmp->nm_fsattr.nfsa_space_avail / bsize);
684 }
685 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SPACE_TOTAL) &&
686 NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SPACE_FREE)) {
687 VFSATTR_RETURN(fsap, f_bused,
688 (nmp->nm_fsattr.nfsa_space_total / bsize) -
689 (nmp->nm_fsattr.nfsa_space_free / bsize));
690 }
691 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_FILES_TOTAL)) {
692 VFSATTR_RETURN(fsap, f_files, nmp->nm_fsattr.nfsa_files_total);
693 }
694 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_FILES_FREE)) {
695 VFSATTR_RETURN(fsap, f_ffree, nmp->nm_fsattr.nfsa_files_free);
696 }
697 lck_mtx_unlock(&nmp->nm_lock);
698 }
699
700 if (VFSATTR_IS_ACTIVE(fsap, f_vol_name)) {
701 /*%%% IF fail over support is implemented we may need to take nm_lock */
702 nfs_get_volname(mp, fsap->f_vol_name, MAXPATHLEN, ctx);
703 VFSATTR_SET_SUPPORTED(fsap, f_vol_name);
704 }
705 if (VFSATTR_IS_ACTIVE(fsap, f_capabilities)) {
706 u_int32_t caps, valid;
707 nfsnode_t np = nmp->nm_dnp;
708
709 nfsm_assert(error, VFSTONFS(mp) && np, ENXIO);
710 if (error) {
711 return NFS_MAPERR(error);
712 }
713 lck_mtx_lock(&nmp->nm_lock);
714
715 /*
716 * The capabilities[] array defines what this volume supports.
717 *
718 * The valid[] array defines which bits this code understands
719 * the meaning of (whether the volume has that capability or
720 * not). Any zero bits here means "I don't know what you're
721 * asking about" and the caller cannot tell whether that
722 * capability is present or not.
723 */
724 caps = valid = 0;
725 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SYMLINK_SUPPORT)) {
726 valid |= VOL_CAP_FMT_SYMBOLICLINKS;
727 if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_SYMLINK) {
728 caps |= VOL_CAP_FMT_SYMBOLICLINKS;
729 }
730 }
731 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_LINK_SUPPORT)) {
732 valid |= VOL_CAP_FMT_HARDLINKS;
733 if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_LINK) {
734 caps |= VOL_CAP_FMT_HARDLINKS;
735 }
736 }
737 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_CASE_INSENSITIVE)) {
738 valid |= VOL_CAP_FMT_CASE_SENSITIVE;
739 if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_CASE_INSENSITIVE)) {
740 caps |= VOL_CAP_FMT_CASE_SENSITIVE;
741 }
742 }
743 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_CASE_PRESERVING)) {
744 valid |= VOL_CAP_FMT_CASE_PRESERVING;
745 if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_CASE_PRESERVING) {
746 caps |= VOL_CAP_FMT_CASE_PRESERVING;
747 }
748 }
749 /* Note: VOL_CAP_FMT_2TB_FILESIZE is actually used to test for "large file support" */
750 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXFILESIZE)) {
751 /* Is server's max file size at least 4GB? */
752 if (nmp->nm_fsattr.nfsa_maxfilesize >= 0x100000000ULL) {
753 caps |= VOL_CAP_FMT_2TB_FILESIZE;
754 }
755 } else if (nfsvers >= NFS_VER3) {
756 /*
757 * NFSv3 and up supports 64 bits of file size.
758 * So, we'll just assume maxfilesize >= 4GB
759 */
760 caps |= VOL_CAP_FMT_2TB_FILESIZE;
761 }
762 #if CONFIG_NFS4
763 if (nfsvers >= NFS_VER4) {
764 caps |= VOL_CAP_FMT_HIDDEN_FILES;
765 valid |= VOL_CAP_FMT_HIDDEN_FILES;
766 // VOL_CAP_FMT_OPENDENYMODES
767 // caps |= VOL_CAP_FMT_OPENDENYMODES;
768 // valid |= VOL_CAP_FMT_OPENDENYMODES;
769 }
770 #endif
771 // no version of nfs supports immutable files
772 caps |= VOL_CAP_FMT_NO_IMMUTABLE_FILES;
773 valid |= VOL_CAP_FMT_NO_IMMUTABLE_FILES;
774
775 fsap->f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] =
776 // VOL_CAP_FMT_PERSISTENTOBJECTIDS |
777 // VOL_CAP_FMT_SYMBOLICLINKS |
778 // VOL_CAP_FMT_HARDLINKS |
779 // VOL_CAP_FMT_JOURNAL |
780 // VOL_CAP_FMT_JOURNAL_ACTIVE |
781 // VOL_CAP_FMT_NO_ROOT_TIMES |
782 // VOL_CAP_FMT_SPARSE_FILES |
783 // VOL_CAP_FMT_ZERO_RUNS |
784 // VOL_CAP_FMT_CASE_SENSITIVE |
785 // VOL_CAP_FMT_CASE_PRESERVING |
786 // VOL_CAP_FMT_FAST_STATFS |
787 // VOL_CAP_FMT_2TB_FILESIZE |
788 // VOL_CAP_FMT_OPENDENYMODES |
789 // VOL_CAP_FMT_HIDDEN_FILES |
790 caps;
791 fsap->f_capabilities.valid[VOL_CAPABILITIES_FORMAT] =
792 VOL_CAP_FMT_PERSISTENTOBJECTIDS |
793 // VOL_CAP_FMT_SYMBOLICLINKS |
794 // VOL_CAP_FMT_HARDLINKS |
795 // VOL_CAP_FMT_JOURNAL |
796 // VOL_CAP_FMT_JOURNAL_ACTIVE |
797 // VOL_CAP_FMT_NO_ROOT_TIMES |
798 // VOL_CAP_FMT_SPARSE_FILES |
799 // VOL_CAP_FMT_ZERO_RUNS |
800 // VOL_CAP_FMT_CASE_SENSITIVE |
801 // VOL_CAP_FMT_CASE_PRESERVING |
802 VOL_CAP_FMT_FAST_STATFS |
803 VOL_CAP_FMT_2TB_FILESIZE |
804 // VOL_CAP_FMT_OPENDENYMODES |
805 // VOL_CAP_FMT_HIDDEN_FILES |
806 valid;
807
808 /*
809 * We don't support most of the interfaces.
810 *
811 * We MAY support locking, but we don't have any easy way of
812 * probing. We can tell if there's no lockd running or if
813 * locks have been disabled for a mount, so we can definitely
814 * answer NO in that case. Any attempt to send a request to
815 * lockd to test for locking support may cause the lazily-
816 * launched locking daemons to be started unnecessarily. So
817 * we avoid that. However, we do record if we ever successfully
818 * perform a lock operation on a mount point, so if it looks
819 * like lock ops have worked, we do report that we support them.
820 */
821 caps = valid = 0;
822 #if CONFIG_NFS4
823 if (nfsvers >= NFS_VER4) {
824 caps = VOL_CAP_INT_ADVLOCK | VOL_CAP_INT_FLOCK;
825 valid = VOL_CAP_INT_ADVLOCK | VOL_CAP_INT_FLOCK;
826 if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL) {
827 caps |= VOL_CAP_INT_EXTENDED_SECURITY;
828 }
829 valid |= VOL_CAP_INT_EXTENDED_SECURITY;
830 if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR) {
831 caps |= VOL_CAP_INT_EXTENDED_ATTR;
832 }
833 valid |= VOL_CAP_INT_EXTENDED_ATTR;
834 #if NAMEDSTREAMS
835 if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR) {
836 caps |= VOL_CAP_INT_NAMEDSTREAMS;
837 }
838 valid |= VOL_CAP_INT_NAMEDSTREAMS;
839 #endif
840 } else
841 #endif
842 if (nmp->nm_lockmode == NFS_LOCK_MODE_DISABLED) {
843 /* locks disabled on this mount, so they definitely won't work */
844 valid = VOL_CAP_INT_ADVLOCK | VOL_CAP_INT_FLOCK;
845 } else if (nmp->nm_state & NFSSTA_LOCKSWORK) {
846 caps = VOL_CAP_INT_ADVLOCK | VOL_CAP_INT_FLOCK;
847 valid = VOL_CAP_INT_ADVLOCK | VOL_CAP_INT_FLOCK;
848 }
849 fsap->f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] =
850 // VOL_CAP_INT_SEARCHFS |
851 // VOL_CAP_INT_ATTRLIST |
852 // VOL_CAP_INT_NFSEXPORT |
853 // VOL_CAP_INT_READDIRATTR |
854 // VOL_CAP_INT_EXCHANGEDATA |
855 // VOL_CAP_INT_COPYFILE |
856 // VOL_CAP_INT_ALLOCATE |
857 // VOL_CAP_INT_VOL_RENAME |
858 // VOL_CAP_INT_ADVLOCK |
859 // VOL_CAP_INT_FLOCK |
860 // VOL_CAP_INT_EXTENDED_SECURITY |
861 // VOL_CAP_INT_USERACCESS |
862 // VOL_CAP_INT_MANLOCK |
863 // VOL_CAP_INT_NAMEDSTREAMS |
864 // VOL_CAP_INT_EXTENDED_ATTR |
865 VOL_CAP_INT_REMOTE_EVENT |
866 caps;
867 fsap->f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] =
868 VOL_CAP_INT_SEARCHFS |
869 VOL_CAP_INT_ATTRLIST |
870 VOL_CAP_INT_NFSEXPORT |
871 VOL_CAP_INT_READDIRATTR |
872 VOL_CAP_INT_EXCHANGEDATA |
873 VOL_CAP_INT_COPYFILE |
874 VOL_CAP_INT_ALLOCATE |
875 VOL_CAP_INT_VOL_RENAME |
876 // VOL_CAP_INT_ADVLOCK |
877 // VOL_CAP_INT_FLOCK |
878 // VOL_CAP_INT_EXTENDED_SECURITY |
879 // VOL_CAP_INT_USERACCESS |
880 // VOL_CAP_INT_MANLOCK |
881 // VOL_CAP_INT_NAMEDSTREAMS |
882 // VOL_CAP_INT_EXTENDED_ATTR |
883 VOL_CAP_INT_REMOTE_EVENT |
884 valid;
885
886 fsap->f_capabilities.capabilities[VOL_CAPABILITIES_RESERVED1] = 0;
887 fsap->f_capabilities.valid[VOL_CAPABILITIES_RESERVED1] = 0;
888
889 fsap->f_capabilities.capabilities[VOL_CAPABILITIES_RESERVED2] = 0;
890 fsap->f_capabilities.valid[VOL_CAPABILITIES_RESERVED2] = 0;
891
892 VFSATTR_SET_SUPPORTED(fsap, f_capabilities);
893 lck_mtx_unlock(&nmp->nm_lock);
894 }
895
896 if (VFSATTR_IS_ACTIVE(fsap, f_attributes)) {
897 fsap->f_attributes.validattr.commonattr = 0;
898 fsap->f_attributes.validattr.volattr =
899 ATTR_VOL_NAME | ATTR_VOL_CAPABILITIES | ATTR_VOL_ATTRIBUTES;
900 fsap->f_attributes.validattr.dirattr = 0;
901 fsap->f_attributes.validattr.fileattr = 0;
902 fsap->f_attributes.validattr.forkattr = 0;
903
904 fsap->f_attributes.nativeattr.commonattr = 0;
905 fsap->f_attributes.nativeattr.volattr =
906 ATTR_VOL_NAME | ATTR_VOL_CAPABILITIES | ATTR_VOL_ATTRIBUTES;
907 fsap->f_attributes.nativeattr.dirattr = 0;
908 fsap->f_attributes.nativeattr.fileattr = 0;
909 fsap->f_attributes.nativeattr.forkattr = 0;
910
911 VFSATTR_SET_SUPPORTED(fsap, f_attributes);
912 }
913
914 return NFS_MAPERR(error);
915 }
916
917 /*
918 * nfs version 3 fsinfo rpc call
919 */
920 int
nfs3_fsinfo(struct nfsmount * nmp,nfsnode_t np,vfs_context_t ctx)921 nfs3_fsinfo(struct nfsmount *nmp, nfsnode_t np, vfs_context_t ctx)
922 {
923 int error = 0, lockerror, status, nmlocked = 0;
924 u_int64_t xid;
925 uint32_t val, prefsize, maxsize;
926 struct nfsm_chain nmreq, nmrep;
927
928 nfsm_chain_null(&nmreq);
929 nfsm_chain_null(&nmrep);
930
931 nfsm_chain_build_alloc_init(error, &nmreq, NFSX_FH(nmp->nm_vers));
932 nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
933 nfsm_chain_build_done(error, &nmreq);
934 nfsmout_if(error);
935 error = nfs_request(np, NULL, &nmreq, NFSPROC_FSINFO, ctx, NULL, &nmrep, &xid, &status);
936 if ((lockerror = nfs_node_lock(np))) {
937 error = lockerror;
938 }
939 nfsm_chain_postop_attr_update(error, &nmrep, np, &xid);
940 if (!lockerror) {
941 nfs_node_unlock(np);
942 }
943 if (!error) {
944 error = status;
945 }
946 nfsmout_if(error);
947
948 lck_mtx_lock(&nmp->nm_lock);
949 nmlocked = 1;
950
951 nfsm_chain_get_32(error, &nmrep, maxsize);
952 nfsm_chain_get_32(error, &nmrep, prefsize);
953 nfsmout_if(error);
954 nmp->nm_fsattr.nfsa_maxread = maxsize;
955 if (prefsize < nmp->nm_rsize) {
956 nmp->nm_rsize = (prefsize + NFS_FABLKSIZE - 1) &
957 ~(NFS_FABLKSIZE - 1);
958 }
959 if ((maxsize > 0) && (maxsize < nmp->nm_rsize)) {
960 nmp->nm_rsize = maxsize & ~(NFS_FABLKSIZE - 1);
961 if (nmp->nm_rsize == 0) {
962 nmp->nm_rsize = maxsize;
963 }
964 }
965 nfsm_chain_adv(error, &nmrep, NFSX_UNSIGNED); // skip rtmult
966
967 nfsm_chain_get_32(error, &nmrep, maxsize);
968 nfsm_chain_get_32(error, &nmrep, prefsize);
969 nfsmout_if(error);
970 nmp->nm_fsattr.nfsa_maxwrite = maxsize;
971 if (prefsize < nmp->nm_wsize) {
972 nmp->nm_wsize = (prefsize + NFS_FABLKSIZE - 1) &
973 ~(NFS_FABLKSIZE - 1);
974 }
975 if ((maxsize > 0) && (maxsize < nmp->nm_wsize)) {
976 nmp->nm_wsize = maxsize & ~(NFS_FABLKSIZE - 1);
977 if (nmp->nm_wsize == 0) {
978 nmp->nm_wsize = maxsize;
979 }
980 }
981 nfsm_chain_adv(error, &nmrep, NFSX_UNSIGNED); // skip wtmult
982
983 nfsm_chain_get_32(error, &nmrep, prefsize);
984 nfsmout_if(error);
985 if ((prefsize > 0) && (prefsize < nmp->nm_readdirsize)) {
986 nmp->nm_readdirsize = prefsize;
987 }
988 if ((nmp->nm_fsattr.nfsa_maxread > 0) &&
989 (nmp->nm_fsattr.nfsa_maxread < nmp->nm_readdirsize)) {
990 nmp->nm_readdirsize = nmp->nm_fsattr.nfsa_maxread;
991 }
992
993 nfsm_chain_get_64(error, &nmrep, nmp->nm_fsattr.nfsa_maxfilesize);
994
995 nfsm_chain_adv(error, &nmrep, 2 * NFSX_UNSIGNED); // skip time_delta
996
997 /* convert FS properties to our own flags */
998 nfsm_chain_get_32(error, &nmrep, val);
999 nfsmout_if(error);
1000 if (val & NFSV3FSINFO_LINK) {
1001 nmp->nm_fsattr.nfsa_flags |= NFS_FSFLAG_LINK;
1002 }
1003 if (val & NFSV3FSINFO_SYMLINK) {
1004 nmp->nm_fsattr.nfsa_flags |= NFS_FSFLAG_SYMLINK;
1005 }
1006 if (val & NFSV3FSINFO_HOMOGENEOUS) {
1007 nmp->nm_fsattr.nfsa_flags |= NFS_FSFLAG_HOMOGENEOUS;
1008 }
1009 if (val & NFSV3FSINFO_CANSETTIME) {
1010 nmp->nm_fsattr.nfsa_flags |= NFS_FSFLAG_SET_TIME;
1011 }
1012 nmp->nm_state |= NFSSTA_GOTFSINFO;
1013 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXREAD);
1014 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXWRITE);
1015 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXFILESIZE);
1016 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_LINK_SUPPORT);
1017 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SYMLINK_SUPPORT);
1018 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_HOMOGENEOUS);
1019 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_CANSETTIME);
1020 nfsmout:
1021 if (nmlocked) {
1022 lck_mtx_unlock(&nmp->nm_lock);
1023 }
1024 nfsm_chain_cleanup(&nmreq);
1025 nfsm_chain_cleanup(&nmrep);
1026 return error;
1027 }
1028
1029 /*
1030 * Convert old style NFS mount args to XDR.
1031 */
1032 static int
nfs_convert_old_nfs_args(mount_t mp,user_addr_t data,vfs_context_t ctx,int argsversion,int inkernel,char ** xdrbufp)1033 nfs_convert_old_nfs_args(mount_t mp, user_addr_t data, vfs_context_t ctx, int argsversion, int inkernel, char **xdrbufp)
1034 {
1035 int error = 0, args64bit, argsize, numcomps;
1036 struct user_nfs_args args;
1037 struct nfs_args tempargs;
1038 caddr_t argsp;
1039 size_t len;
1040 u_char nfh[NFS4_FHSIZE];
1041 char *mntfrom, *endserverp, *frompath, *p, *cp;
1042 struct sockaddr_storage ss;
1043 void *sinaddr = NULL;
1044 char uaddr[MAX_IPv6_STR_LEN];
1045 uint32_t mattrs[NFS_MATTR_BITMAP_LEN];
1046 uint32_t mflags_mask[NFS_MFLAG_BITMAP_LEN], mflags[NFS_MFLAG_BITMAP_LEN];
1047 uint32_t nfsvers, nfslockmode = 0;
1048 size_t argslength_offset, attrslength_offset, end_offset;
1049 struct xdrbuf xb;
1050
1051 *xdrbufp = NULL;
1052
1053 /* allocate a temporary buffer for mntfrom */
1054 mntfrom = zalloc(ZV_NAMEI);
1055
1056 args64bit = (inkernel || vfs_context_is64bit(ctx));
1057 argsp = args64bit ? (void*)&args : (void*)&tempargs;
1058
1059 argsize = args64bit ? sizeof(args) : sizeof(tempargs);
1060 switch (argsversion) {
1061 case 3:
1062 argsize -= NFS_ARGSVERSION4_INCSIZE;
1063 OS_FALLTHROUGH;
1064 case 4:
1065 argsize -= NFS_ARGSVERSION5_INCSIZE;
1066 OS_FALLTHROUGH;
1067 case 5:
1068 argsize -= NFS_ARGSVERSION6_INCSIZE;
1069 OS_FALLTHROUGH;
1070 case 6:
1071 break;
1072 default:
1073 error = EPROGMISMATCH;
1074 goto nfsmout;
1075 }
1076
1077 /* read in the structure */
1078 if (inkernel) {
1079 bcopy(CAST_DOWN(void *, data), argsp, argsize);
1080 } else {
1081 error = copyin(data, argsp, argsize);
1082 }
1083 nfsmout_if(error);
1084
1085 if (!args64bit) {
1086 args.addrlen = tempargs.addrlen;
1087 args.sotype = tempargs.sotype;
1088 args.proto = tempargs.proto;
1089 args.fhsize = tempargs.fhsize;
1090 args.flags = tempargs.flags;
1091 args.wsize = tempargs.wsize;
1092 args.rsize = tempargs.rsize;
1093 args.readdirsize = tempargs.readdirsize;
1094 args.timeo = tempargs.timeo;
1095 args.retrans = tempargs.retrans;
1096 args.maxgrouplist = tempargs.maxgrouplist;
1097 args.readahead = tempargs.readahead;
1098 args.leaseterm = tempargs.leaseterm;
1099 args.deadthresh = tempargs.deadthresh;
1100 args.addr = CAST_USER_ADDR_T(tempargs.addr);
1101 args.fh = CAST_USER_ADDR_T(tempargs.fh);
1102 args.hostname = CAST_USER_ADDR_T(tempargs.hostname);
1103 args.version = tempargs.version;
1104 if (args.version >= 4) {
1105 args.acregmin = tempargs.acregmin;
1106 args.acregmax = tempargs.acregmax;
1107 args.acdirmin = tempargs.acdirmin;
1108 args.acdirmax = tempargs.acdirmax;
1109 }
1110 if (args.version >= 5) {
1111 args.auth = tempargs.auth;
1112 }
1113 if (args.version >= 6) {
1114 args.deadtimeout = tempargs.deadtimeout;
1115 }
1116 }
1117
1118 if ((args.fhsize < 0) || (args.fhsize > NFS4_FHSIZE)) {
1119 error = EINVAL;
1120 goto nfsmout;
1121 }
1122 if (args.fhsize > 0) {
1123 if (inkernel) {
1124 bcopy(CAST_DOWN(void *, args.fh), (caddr_t)nfh, args.fhsize);
1125 } else {
1126 error = copyin(args.fh, (caddr_t)nfh, args.fhsize);
1127 }
1128 nfsmout_if(error);
1129 }
1130
1131 if (inkernel) {
1132 error = copystr(CAST_DOWN(void *, args.hostname), mntfrom, MAXPATHLEN - 1, &len);
1133 } else {
1134 error = copyinstr(args.hostname, mntfrom, MAXPATHLEN - 1, &len);
1135 }
1136 nfsmout_if(error);
1137 bzero(&mntfrom[len], MAXPATHLEN - len);
1138
1139 /* find the server-side path being mounted */
1140 frompath = mntfrom;
1141 if (*frompath == '[') { /* skip IPv6 literal address */
1142 while (*frompath && (*frompath != ']')) {
1143 frompath++;
1144 }
1145 if (*frompath == ']') {
1146 frompath++;
1147 }
1148 }
1149 while (*frompath && (*frompath != ':')) {
1150 frompath++;
1151 }
1152 endserverp = frompath;
1153 while (*frompath && (*frompath == ':')) {
1154 frompath++;
1155 }
1156 /* count fs location path components */
1157 p = frompath;
1158 while (*p && (*p == '/')) {
1159 p++;
1160 }
1161 numcomps = 0;
1162 while (*p) {
1163 numcomps++;
1164 while (*p && (*p != '/')) {
1165 p++;
1166 }
1167 while (*p && (*p == '/')) {
1168 p++;
1169 }
1170 }
1171
1172 /* copy socket address */
1173 if (inkernel) {
1174 bcopy(CAST_DOWN(void *, args.addr), &ss, args.addrlen);
1175 } else {
1176 if (args.addrlen > sizeof(struct sockaddr_storage)) {
1177 error = EINVAL;
1178 } else {
1179 error = copyin(args.addr, &ss, args.addrlen);
1180 }
1181 }
1182 nfsmout_if(error);
1183 ss.ss_len = args.addrlen;
1184
1185 /* convert address to universal address string */
1186 if (ss.ss_family == AF_INET) {
1187 if (ss.ss_len != sizeof(struct sockaddr_in)) {
1188 error = EINVAL;
1189 } else {
1190 sinaddr = &((struct sockaddr_in*)&ss)->sin_addr;
1191 }
1192 } else if (ss.ss_family == AF_INET6) {
1193 if (ss.ss_len != sizeof(struct sockaddr_in6)) {
1194 error = EINVAL;
1195 } else {
1196 sinaddr = &((struct sockaddr_in6*)&ss)->sin6_addr;
1197 }
1198 } else {
1199 sinaddr = NULL;
1200 }
1201 nfsmout_if(error);
1202
1203 if (!sinaddr || (inet_ntop(ss.ss_family, sinaddr, uaddr, sizeof(uaddr)) != uaddr)) {
1204 error = EINVAL;
1205 goto nfsmout;
1206 }
1207
1208 /* prepare mount flags */
1209 NFS_BITMAP_ZERO(mflags_mask, NFS_MFLAG_BITMAP_LEN);
1210 NFS_BITMAP_ZERO(mflags, NFS_MFLAG_BITMAP_LEN);
1211 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_SOFT);
1212 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_INTR);
1213 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_RESVPORT);
1214 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NOCONNECT);
1215 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_DUMBTIMER);
1216 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_CALLUMNT);
1217 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_RDIRPLUS);
1218 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NONEGNAMECACHE);
1219 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_MUTEJUKEBOX);
1220 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NOQUOTA);
1221 if (args.flags & NFSMNT_SOFT) {
1222 NFS_BITMAP_SET(mflags, NFS_MFLAG_SOFT);
1223 }
1224 if (args.flags & NFSMNT_INT) {
1225 NFS_BITMAP_SET(mflags, NFS_MFLAG_INTR);
1226 }
1227 if (args.flags & NFSMNT_RESVPORT) {
1228 NFS_BITMAP_SET(mflags, NFS_MFLAG_RESVPORT);
1229 }
1230 if (args.flags & NFSMNT_NOCONN) {
1231 NFS_BITMAP_SET(mflags, NFS_MFLAG_NOCONNECT);
1232 }
1233 if (args.flags & NFSMNT_DUMBTIMR) {
1234 NFS_BITMAP_SET(mflags, NFS_MFLAG_DUMBTIMER);
1235 }
1236 if (args.flags & NFSMNT_CALLUMNT) {
1237 NFS_BITMAP_SET(mflags, NFS_MFLAG_CALLUMNT);
1238 }
1239 if (args.flags & NFSMNT_RDIRPLUS) {
1240 NFS_BITMAP_SET(mflags, NFS_MFLAG_RDIRPLUS);
1241 }
1242 if (args.flags & NFSMNT_NONEGNAMECACHE) {
1243 NFS_BITMAP_SET(mflags, NFS_MFLAG_NONEGNAMECACHE);
1244 }
1245 if (args.flags & NFSMNT_MUTEJUKEBOX) {
1246 NFS_BITMAP_SET(mflags, NFS_MFLAG_MUTEJUKEBOX);
1247 }
1248 if (args.flags & NFSMNT_NOQUOTA) {
1249 NFS_BITMAP_SET(mflags, NFS_MFLAG_NOQUOTA);
1250 }
1251
1252 /* prepare mount attributes */
1253 NFS_BITMAP_ZERO(mattrs, NFS_MATTR_BITMAP_LEN);
1254 NFS_BITMAP_SET(mattrs, NFS_MATTR_FLAGS);
1255 NFS_BITMAP_SET(mattrs, NFS_MATTR_NFS_VERSION);
1256 NFS_BITMAP_SET(mattrs, NFS_MATTR_SOCKET_TYPE);
1257 NFS_BITMAP_SET(mattrs, NFS_MATTR_NFS_PORT);
1258 NFS_BITMAP_SET(mattrs, NFS_MATTR_FH);
1259 NFS_BITMAP_SET(mattrs, NFS_MATTR_FS_LOCATIONS);
1260 NFS_BITMAP_SET(mattrs, NFS_MATTR_MNTFLAGS);
1261 NFS_BITMAP_SET(mattrs, NFS_MATTR_MNTFROM);
1262 if (args.flags & NFSMNT_NFSV4) {
1263 nfsvers = 4;
1264 } else if (args.flags & NFSMNT_NFSV3) {
1265 nfsvers = 3;
1266 } else {
1267 nfsvers = 2;
1268 }
1269 if ((args.flags & NFSMNT_RSIZE) && (args.rsize > 0)) {
1270 NFS_BITMAP_SET(mattrs, NFS_MATTR_READ_SIZE);
1271 }
1272 if ((args.flags & NFSMNT_WSIZE) && (args.wsize > 0)) {
1273 NFS_BITMAP_SET(mattrs, NFS_MATTR_WRITE_SIZE);
1274 }
1275 if ((args.flags & NFSMNT_TIMEO) && (args.timeo > 0)) {
1276 NFS_BITMAP_SET(mattrs, NFS_MATTR_REQUEST_TIMEOUT);
1277 }
1278 if ((args.flags & NFSMNT_RETRANS) && (args.retrans > 0)) {
1279 NFS_BITMAP_SET(mattrs, NFS_MATTR_SOFT_RETRY_COUNT);
1280 }
1281 if ((args.flags & NFSMNT_MAXGRPS) && (args.maxgrouplist > 0)) {
1282 NFS_BITMAP_SET(mattrs, NFS_MATTR_MAX_GROUP_LIST);
1283 }
1284 if ((args.flags & NFSMNT_READAHEAD) && (args.readahead > 0)) {
1285 NFS_BITMAP_SET(mattrs, NFS_MATTR_READAHEAD);
1286 }
1287 if ((args.flags & NFSMNT_READDIRSIZE) && (args.readdirsize > 0)) {
1288 NFS_BITMAP_SET(mattrs, NFS_MATTR_READDIR_SIZE);
1289 }
1290 if ((args.flags & NFSMNT_NOLOCKS) ||
1291 (args.flags & NFSMNT_LOCALLOCKS)) {
1292 NFS_BITMAP_SET(mattrs, NFS_MATTR_LOCK_MODE);
1293 if (args.flags & NFSMNT_NOLOCKS) {
1294 nfslockmode = NFS_LOCK_MODE_DISABLED;
1295 } else if (args.flags & NFSMNT_LOCALLOCKS) {
1296 nfslockmode = NFS_LOCK_MODE_LOCAL;
1297 } else {
1298 nfslockmode = NFS_LOCK_MODE_ENABLED;
1299 }
1300 }
1301 if (args.version >= 4) {
1302 if ((args.flags & NFSMNT_ACREGMIN) && (args.acregmin > 0)) {
1303 NFS_BITMAP_SET(mattrs, NFS_MATTR_ATTRCACHE_REG_MIN);
1304 }
1305 if ((args.flags & NFSMNT_ACREGMAX) && (args.acregmax > 0)) {
1306 NFS_BITMAP_SET(mattrs, NFS_MATTR_ATTRCACHE_REG_MAX);
1307 }
1308 if ((args.flags & NFSMNT_ACDIRMIN) && (args.acdirmin > 0)) {
1309 NFS_BITMAP_SET(mattrs, NFS_MATTR_ATTRCACHE_DIR_MIN);
1310 }
1311 if ((args.flags & NFSMNT_ACDIRMAX) && (args.acdirmax > 0)) {
1312 NFS_BITMAP_SET(mattrs, NFS_MATTR_ATTRCACHE_DIR_MAX);
1313 }
1314 }
1315 if (args.version >= 5) {
1316 if ((args.flags & NFSMNT_SECFLAVOR) || (args.flags & NFSMNT_SECSYSOK)) {
1317 NFS_BITMAP_SET(mattrs, NFS_MATTR_SECURITY);
1318 }
1319 }
1320 if (args.version >= 6) {
1321 if ((args.flags & NFSMNT_DEADTIMEOUT) && (args.deadtimeout > 0)) {
1322 NFS_BITMAP_SET(mattrs, NFS_MATTR_DEAD_TIMEOUT);
1323 }
1324 }
1325
1326 /* build xdr buffer */
1327 xb_init_buffer(&xb, NULL, 0);
1328 xb_add_32(error, &xb, args.version);
1329 argslength_offset = xb_offset(&xb);
1330 xb_add_32(error, &xb, 0); // args length
1331 xb_add_32(error, &xb, NFS_XDRARGS_VERSION_0);
1332 xb_add_bitmap(error, &xb, mattrs, NFS_MATTR_BITMAP_LEN);
1333 attrslength_offset = xb_offset(&xb);
1334 xb_add_32(error, &xb, 0); // attrs length
1335 xb_add_bitmap(error, &xb, mflags_mask, NFS_MFLAG_BITMAP_LEN); /* mask */
1336 xb_add_bitmap(error, &xb, mflags, NFS_MFLAG_BITMAP_LEN); /* value */
1337 xb_add_32(error, &xb, nfsvers);
1338 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READ_SIZE)) {
1339 xb_add_32(error, &xb, args.rsize);
1340 }
1341 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_WRITE_SIZE)) {
1342 xb_add_32(error, &xb, args.wsize);
1343 }
1344 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READDIR_SIZE)) {
1345 xb_add_32(error, &xb, args.readdirsize);
1346 }
1347 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READAHEAD)) {
1348 xb_add_32(error, &xb, args.readahead);
1349 }
1350 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_REG_MIN)) {
1351 xb_add_32(error, &xb, args.acregmin);
1352 xb_add_32(error, &xb, 0);
1353 }
1354 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_REG_MAX)) {
1355 xb_add_32(error, &xb, args.acregmax);
1356 xb_add_32(error, &xb, 0);
1357 }
1358 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_DIR_MIN)) {
1359 xb_add_32(error, &xb, args.acdirmin);
1360 xb_add_32(error, &xb, 0);
1361 }
1362 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_DIR_MAX)) {
1363 xb_add_32(error, &xb, args.acdirmax);
1364 xb_add_32(error, &xb, 0);
1365 }
1366 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_LOCK_MODE)) {
1367 xb_add_32(error, &xb, nfslockmode);
1368 }
1369 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SECURITY)) {
1370 uint32_t flavors[2], i = 0;
1371 if (args.flags & NFSMNT_SECFLAVOR) {
1372 flavors[i++] = args.auth;
1373 }
1374 if ((args.flags & NFSMNT_SECSYSOK) && ((i == 0) || (flavors[0] != RPCAUTH_SYS))) {
1375 flavors[i++] = RPCAUTH_SYS;
1376 }
1377 xb_add_word_array(error, &xb, flavors, i);
1378 }
1379 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MAX_GROUP_LIST)) {
1380 xb_add_32(error, &xb, args.maxgrouplist);
1381 }
1382 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SOCKET_TYPE)) {
1383 xb_add_string(error, &xb, ((args.sotype == SOCK_DGRAM) ? "udp" : "tcp"), 3);
1384 }
1385 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_PORT)) {
1386 xb_add_32(error, &xb, ((ss.ss_family == AF_INET) ?
1387 ntohs(((struct sockaddr_in*)&ss)->sin_port) :
1388 ntohs(((struct sockaddr_in6*)&ss)->sin6_port)));
1389 }
1390 /* NFS_MATTR_MOUNT_PORT (not available in old args) */
1391 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_REQUEST_TIMEOUT)) {
1392 /* convert from .1s increments to time */
1393 xb_add_32(error, &xb, args.timeo / 10);
1394 xb_add_32(error, &xb, (args.timeo % 10) * 100000000);
1395 }
1396 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SOFT_RETRY_COUNT)) {
1397 xb_add_32(error, &xb, args.retrans);
1398 }
1399 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_DEAD_TIMEOUT)) {
1400 xb_add_32(error, &xb, args.deadtimeout);
1401 xb_add_32(error, &xb, 0);
1402 }
1403 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_FH)) {
1404 xb_add_fh(error, &xb, &nfh[0], args.fhsize);
1405 }
1406 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_FS_LOCATIONS)) {
1407 xb_add_32(error, &xb, 1); /* fs location count */
1408 xb_add_32(error, &xb, 1); /* server count */
1409 xb_add_string(error, &xb, mntfrom, (endserverp - mntfrom)); /* server name */
1410 xb_add_32(error, &xb, 1); /* address count */
1411 xb_add_string(error, &xb, uaddr, strlen(uaddr)); /* address */
1412 xb_add_32(error, &xb, 0); /* empty server info */
1413 xb_add_32(error, &xb, numcomps); /* pathname component count */
1414 nfsmout_if(error);
1415 p = frompath;
1416 while (*p && (*p == '/')) {
1417 p++;
1418 }
1419 while (*p) {
1420 cp = p;
1421 while (*p && (*p != '/')) {
1422 p++;
1423 }
1424 xb_add_string(error, &xb, cp, (p - cp)); /* component */
1425 nfsmout_if(error);
1426 while (*p && (*p == '/')) {
1427 p++;
1428 }
1429 }
1430 xb_add_32(error, &xb, 0); /* empty fsl info */
1431 }
1432 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MNTFLAGS)) {
1433 xb_add_32(error, &xb, (vfs_flags(mp) & MNT_VISFLAGMASK)); /* VFS MNT_* flags */
1434 }
1435 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MNTFROM)) {
1436 xb_add_string(error, &xb, mntfrom, strlen(mntfrom)); /* fixed f_mntfromname */
1437 }
1438 xb_build_done(error, &xb);
1439
1440 /* update opaque counts */
1441 end_offset = xb_offset(&xb);
1442 error = xb_seek(&xb, argslength_offset);
1443 xb_add_32(error, &xb, end_offset - argslength_offset + XDRWORD /*version*/);
1444 nfsmout_if(error);
1445 error = xb_seek(&xb, attrslength_offset);
1446 xb_add_32(error, &xb, end_offset - attrslength_offset - XDRWORD /*don't include length field*/);
1447
1448 if (!error) {
1449 /* grab the assembled buffer */
1450 *xdrbufp = xb_buffer_base(&xb);
1451 xb.xb_flags &= ~XB_CLEANUP;
1452 }
1453 nfsmout:
1454 xb_cleanup(&xb);
1455 NFS_ZFREE(ZV_NAMEI, mntfrom);
1456 return error;
1457 }
1458
1459 /*
1460 * VFS Operations.
1461 *
1462 * mount system call
1463 */
1464 int
nfs_vfs_mount(mount_t mp,vnode_t vp,user_addr_t data,vfs_context_t ctx)1465 nfs_vfs_mount(mount_t mp, vnode_t vp, user_addr_t data, vfs_context_t ctx)
1466 {
1467 int error = 0, inkernel = vfs_iskernelmount(mp);
1468 uint32_t argsversion, argslength;
1469 char *xdrbuf = NULL;
1470
1471 /* read in version */
1472 if (inkernel) {
1473 bcopy(CAST_DOWN(void *, data), &argsversion, sizeof(argsversion));
1474 } else if ((error = copyin(data, &argsversion, sizeof(argsversion)))) {
1475 return NFS_MAPERR(error);
1476 }
1477
1478 /* If we have XDR args, then all values in the buffer are in network order */
1479 if (argsversion == htonl(NFS_ARGSVERSION_XDR)) {
1480 argsversion = NFS_ARGSVERSION_XDR;
1481 }
1482
1483 switch (argsversion) {
1484 case 3:
1485 case 4:
1486 case 5:
1487 case 6:
1488 /* convert old-style args to xdr */
1489 error = nfs_convert_old_nfs_args(mp, data, ctx, argsversion, inkernel, &xdrbuf);
1490 break;
1491 case NFS_ARGSVERSION_XDR:
1492 /* copy in xdr buffer */
1493 if (inkernel) {
1494 bcopy(CAST_DOWN(void *, (data + XDRWORD)), &argslength, XDRWORD);
1495 } else {
1496 error = copyin((data + XDRWORD), &argslength, XDRWORD);
1497 }
1498 if (error) {
1499 break;
1500 }
1501 argslength = ntohl(argslength);
1502 /* put a reasonable limit on the size of the XDR args */
1503 if (argslength > 16 * 1024) {
1504 error = E2BIG;
1505 break;
1506 }
1507 /* allocate xdr buffer */
1508 xdrbuf = xb_malloc(xdr_rndup(argslength));
1509 if (!xdrbuf) {
1510 error = ENOMEM;
1511 break;
1512 }
1513 if (inkernel) {
1514 bcopy(CAST_DOWN(void *, data), xdrbuf, argslength);
1515 } else {
1516 error = copyin(data, xdrbuf, argslength);
1517 }
1518
1519 if (!inkernel) {
1520 /* Recheck buffer size to avoid double fetch vulnerability */
1521 struct xdrbuf xb;
1522 uint32_t _version, _length;
1523 xb_init_buffer(&xb, xdrbuf, 2 * XDRWORD);
1524 xb_get_32(error, &xb, _version); /* version */
1525 xb_get_32(error, &xb, _length); /* args length */
1526 if (_length != argslength) {
1527 printf("nfs: actual buffer length (%u) does not match the initial value (%u)\n", _length, argslength);
1528 error = EINVAL;
1529 break;
1530 }
1531 }
1532
1533 break;
1534 default:
1535 error = EPROGMISMATCH;
1536 }
1537
1538 if (error) {
1539 if (xdrbuf) {
1540 xb_free(xdrbuf);
1541 }
1542 return NFS_MAPERR(error);
1543 }
1544 error = mountnfs(xdrbuf, mp, ctx, &vp);
1545 return NFS_MAPERR(error);
1546 }
1547
1548 /*
1549 * Common code for mount and mountroot
1550 */
1551
1552 /* Set up an NFSv2/v3 mount */
1553 int
nfs3_mount(struct nfsmount * nmp,vfs_context_t ctx,nfsnode_t * npp)1554 nfs3_mount(
1555 struct nfsmount *nmp,
1556 vfs_context_t ctx,
1557 nfsnode_t *npp)
1558 {
1559 int error = 0;
1560 struct nfs_vattr nvattr;
1561 u_int64_t xid;
1562
1563 *npp = NULL;
1564
1565 if (!nmp->nm_fh) {
1566 return EINVAL;
1567 }
1568
1569 /*
1570 * Get file attributes for the mountpoint. These are needed
1571 * in order to properly create the root vnode.
1572 */
1573 error = nfs3_getattr_rpc(NULL, nmp->nm_mountp, nmp->nm_fh->fh_data, nmp->nm_fh->fh_len, 0,
1574 ctx, &nvattr, &xid);
1575 if (error) {
1576 goto out;
1577 }
1578
1579 error = nfs_nget(nmp->nm_mountp, NULL, NULL, nmp->nm_fh->fh_data, nmp->nm_fh->fh_len,
1580 &nvattr, &xid, RPCAUTH_UNKNOWN, NG_MARKROOT, npp);
1581 if (*npp) {
1582 nfs_node_unlock(*npp);
1583 }
1584 if (error) {
1585 goto out;
1586 }
1587
1588 /*
1589 * Try to make sure we have all the general info from the server.
1590 */
1591 if (nmp->nm_vers == NFS_VER2) {
1592 NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXNAME);
1593 nmp->nm_fsattr.nfsa_maxname = NFS_MAXNAMLEN;
1594 } else if (nmp->nm_vers == NFS_VER3) {
1595 /* get the NFSv3 FSINFO */
1596 error = nfs3_fsinfo(nmp, *npp, ctx);
1597 if (error) {
1598 goto out;
1599 }
1600 /* grab a copy of root info now (even if server does not support FSF_HOMOGENEOUS) */
1601 struct nfs_fsattr nfsa;
1602 if (!nfs3_pathconf_rpc(*npp, &nfsa, ctx)) {
1603 /* cache a copy of the results */
1604 lck_mtx_lock(&nmp->nm_lock);
1605 nfs3_pathconf_cache(nmp, &nfsa);
1606 lck_mtx_unlock(&nmp->nm_lock);
1607 }
1608 }
1609 out:
1610 if (*npp && error) {
1611 vnode_put(NFSTOV(*npp));
1612 vnode_recycle(NFSTOV(*npp));
1613 *npp = NULL;
1614 }
1615 return error;
1616 }
1617
1618 #if CONFIG_NFS4
1619 /*
1620 * Update an NFSv4 mount path with the contents of the symlink.
1621 *
1622 * Read the link for the given file handle.
1623 * Insert the link's components into the path.
1624 */
1625 int
nfs4_mount_update_path_with_symlink(struct nfsmount * nmp,struct nfs_fs_path * nfsp,uint32_t curcomp,fhandle_t * dirfhp,int * depthp,fhandle_t * fhp,vfs_context_t ctx)1626 nfs4_mount_update_path_with_symlink(struct nfsmount *nmp, struct nfs_fs_path *nfsp, uint32_t curcomp, fhandle_t *dirfhp, int *depthp, fhandle_t *fhp, vfs_context_t ctx)
1627 {
1628 int error = 0, status, numops;
1629 uint32_t len = 0, comp, newcomp, linkcompcount;
1630 u_int64_t xid;
1631 struct nfsm_chain nmreq, nmrep;
1632 struct nfsreq rq, *req = &rq;
1633 struct nfsreq_secinfo_args si;
1634 char *link = NULL, *p, *q, ch;
1635 struct nfs_fs_path nfsp2;
1636
1637 bzero(&nfsp2, sizeof(nfsp2));
1638 if (dirfhp->fh_len) {
1639 NFSREQ_SECINFO_SET(&si, NULL, dirfhp->fh_data, dirfhp->fh_len, nfsp->np_components[curcomp], 0);
1640 } else {
1641 NFSREQ_SECINFO_SET(&si, NULL, NULL, 0, nfsp->np_components[curcomp], 0);
1642 }
1643 nfsm_chain_null(&nmreq);
1644 nfsm_chain_null(&nmrep);
1645
1646 link = zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_NOFAIL);
1647
1648 // PUTFH, READLINK
1649 numops = 2;
1650 nfsm_chain_build_alloc_init(error, &nmreq, 12 * NFSX_UNSIGNED);
1651 nfsm_chain_add_compound_header(error, &nmreq, "readlink", nmp->nm_minor_vers, numops);
1652 numops--;
1653 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
1654 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, fhp->fh_data, fhp->fh_len);
1655 numops--;
1656 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_READLINK);
1657 nfsm_chain_build_done(error, &nmreq);
1658 nfsm_assert(error, (numops == 0), EPROTO);
1659 nfsmout_if(error);
1660
1661 error = nfs_request_async(NULL, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND,
1662 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
1663 if (!error) {
1664 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
1665 }
1666
1667 nfsm_chain_skip_tag(error, &nmrep);
1668 nfsm_chain_get_32(error, &nmrep, numops);
1669 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
1670 nfsm_chain_op_check(error, &nmrep, NFS_OP_READLINK);
1671 nfsm_chain_get_32(error, &nmrep, len);
1672 nfsmout_if(error);
1673 if (len == 0) {
1674 error = ENOENT;
1675 } else if (len >= MAXPATHLEN) {
1676 len = MAXPATHLEN - 1;
1677 }
1678 nfsm_chain_get_opaque(error, &nmrep, len, link);
1679 nfsmout_if(error);
1680 /* make sure link string is terminated properly */
1681 link[len] = '\0';
1682
1683 /* count the number of components in link */
1684 p = link;
1685 while (*p && (*p == '/')) {
1686 p++;
1687 }
1688 linkcompcount = 0;
1689 while (*p) {
1690 linkcompcount++;
1691 while (*p && (*p != '/')) {
1692 p++;
1693 }
1694 while (*p && (*p == '/')) {
1695 p++;
1696 }
1697 }
1698
1699 /* set up new path */
1700 error = nfs_fs_path_init(&nfsp2, nfsp->np_compcount - curcomp + 1 + linkcompcount);
1701 if (error) {
1702 goto nfsmout;
1703 }
1704
1705 /* add link components */
1706 p = link;
1707 while (*p && (*p == '/')) {
1708 p++;
1709 }
1710 for (newcomp = 0; newcomp < linkcompcount; newcomp++) {
1711 /* find end of component */
1712 q = p;
1713 while (*q && (*q != '/')) {
1714 q++;
1715 }
1716 nfsp2.np_components[newcomp] = kalloc_data(q - p + 1, Z_WAITOK | Z_ZERO);
1717 if (!nfsp2.np_components[newcomp]) {
1718 error = ENOMEM;
1719 break;
1720 }
1721 ch = *q;
1722 *q = '\0';
1723 strlcpy(nfsp2.np_components[newcomp], p, q - p + 1);
1724 *q = ch;
1725 p = q;
1726 while (*p && (*p == '/')) {
1727 p++;
1728 }
1729 }
1730 nfsmout_if(error);
1731
1732 /* add remaining components */
1733 for (comp = curcomp + 1; comp < nfsp->np_compcount; comp++, newcomp++) {
1734 nfsp2.np_components[newcomp] = nfsp->np_components[comp];
1735 nfsp->np_components[comp] = NULL;
1736 }
1737 nfsp->np_compcount = curcomp + 1;
1738
1739 nfs_fs_path_replace(nfsp, &nfsp2);
1740
1741 /* for absolute link, let the caller now that the next dirfh is root */
1742 if (link[0] == '/') {
1743 dirfhp->fh_len = 0;
1744 *depthp = 0;
1745 }
1746 nfsmout:
1747 NFS_ZFREE(ZV_NAMEI, link);
1748 nfs_fs_path_destroy(&nfsp2);
1749 nfsm_chain_cleanup(&nmreq);
1750 nfsm_chain_cleanup(&nmrep);
1751 return error;
1752 }
1753
1754 /* Set up an NFSv4 mount */
1755 int
nfs4_mount(struct nfsmount * nmp,vfs_context_t ctx,nfsnode_t * npp)1756 nfs4_mount(
1757 struct nfsmount *nmp,
1758 vfs_context_t ctx,
1759 nfsnode_t *npp)
1760 {
1761 struct nfsm_chain nmreq, nmrep;
1762 int error = 0, numops, status, interval, isdotdot, loopcnt = 0, depth = 0;
1763 struct nfs_fs_path fspath, *nfsp, fspath2;
1764 uint32_t bitmap[NFS_ATTR_BITMAP_LEN], comp, comp2;
1765 fhandle_t fh, dirfh;
1766 struct nfs_vattr nvattr;
1767 u_int64_t xid;
1768 struct nfsreq rq, *req = &rq;
1769 struct nfsreq_secinfo_args si;
1770 struct nfs_sec sec;
1771 struct nfs_fs_locations nfsls;
1772
1773 *npp = NULL;
1774 fh.fh_len = dirfh.fh_len = 0;
1775 TAILQ_INIT(&nmp->nm_open_owners);
1776 TAILQ_INIT(&nmp->nm_delegations);
1777 TAILQ_INIT(&nmp->nm_dreturnq);
1778 nmp->nm_stategenid = 1;
1779 NVATTR_INIT(&nvattr);
1780 bzero(&nfsls, sizeof(nfsls));
1781 nfsm_chain_null(&nmreq);
1782 nfsm_chain_null(&nmrep);
1783
1784 /*
1785 * If no security flavors were specified we'll want to default to the server's
1786 * preferred flavor. For NFSv4.0 we need a file handle and name to get that via
1787 * SECINFO, so we'll do that on the last component of the server path we are
1788 * mounting. If we are mounting the server's root, we'll need to defer the
1789 * SECINFO call to the first successful LOOKUP request.
1790 */
1791 if (!nmp->nm_sec.count) {
1792 nmp->nm_state |= NFSSTA_NEEDSECINFO;
1793 }
1794
1795 /* make a copy of the current location's path */
1796 nfsp = &nmp->nm_locations.nl_locations[nmp->nm_locations.nl_current.nli_loc]->nl_path;
1797 if (nfs_fs_path_init(&fspath, nfsp->np_compcount)) {
1798 fspath.np_compsize = fspath.np_compcount;
1799 for (comp = 0; comp < nfsp->np_compcount; comp++) {
1800 size_t slen = strlen(nfsp->np_components[comp]);
1801 fspath.np_components[comp] = kalloc_data(slen + 1, Z_WAITOK | Z_ZERO);
1802 if (!fspath.np_components[comp]) {
1803 error = ENOMEM;
1804 goto nfsmout;
1805 }
1806 strlcpy(fspath.np_components[comp], nfsp->np_components[comp], slen + 1);
1807 }
1808 } else {
1809 error = ENOMEM;
1810 goto nfsmout;
1811 }
1812
1813 /* for mirror mounts, we can just use the file handle passed in */
1814 if (nmp->nm_fh) {
1815 dirfh.fh_len = nmp->nm_fh->fh_len;
1816 bcopy(nmp->nm_fh->fh_data, dirfh.fh_data, dirfh.fh_len);
1817 NFSREQ_SECINFO_SET(&si, NULL, dirfh.fh_data, dirfh.fh_len, NULL, 0);
1818 goto gotfh;
1819 }
1820
1821 /* otherwise, we need to get the fh for the directory we are mounting */
1822
1823 /* if no components, just get root */
1824 if (fspath.np_compcount == 0) {
1825 nocomponents:
1826 // PUTROOTFH + GETATTR(FH)
1827 NFSREQ_SECINFO_SET(&si, NULL, NULL, 0, NULL, 0);
1828 numops = 2;
1829 nfsm_chain_build_alloc_init(error, &nmreq, 9 * NFSX_UNSIGNED);
1830 nfsm_chain_add_compound_header(error, &nmreq, "mount", nmp->nm_minor_vers, numops);
1831 numops--;
1832 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTROOTFH);
1833 numops--;
1834 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
1835 NFS_CLEAR_ATTRIBUTES(bitmap);
1836 NFS4_DEFAULT_ATTRIBUTES(bitmap);
1837 NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
1838 nfsm_chain_add_bitmap(error, &nmreq, bitmap, NFS_ATTR_BITMAP_LEN);
1839 nfsm_chain_build_done(error, &nmreq);
1840 nfsm_assert(error, (numops == 0), EPROTO);
1841 nfsmout_if(error);
1842 error = nfs_request_async(NULL, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND,
1843 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
1844 if (!error) {
1845 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
1846 }
1847 nfsm_chain_skip_tag(error, &nmrep);
1848 nfsm_chain_get_32(error, &nmrep, numops);
1849 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTROOTFH);
1850 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1851 nfsmout_if(error);
1852 NFS_CLEAR_ATTRIBUTES(nmp->nm_fsattr.nfsa_bitmap);
1853 error = nfs4_parsefattr(&nmrep, &nmp->nm_fsattr, &nvattr, &dirfh, NULL, NULL);
1854 if (!error && !NFS_BITMAP_ISSET(&nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) {
1855 printf("nfs: mount didn't return filehandle?\n");
1856 error = EBADRPC;
1857 }
1858 nfsmout_if(error);
1859 nfsm_chain_cleanup(&nmrep);
1860 nfsm_chain_null(&nmreq);
1861 NVATTR_CLEANUP(&nvattr);
1862 goto gotfh;
1863 }
1864
1865 /* look up each path component */
1866 for (comp = 0; comp < fspath.np_compcount;) {
1867 isdotdot = 0;
1868 if (fspath.np_components[comp][0] == '.') {
1869 if (fspath.np_components[comp][1] == '\0') {
1870 /* skip "." */
1871 comp++;
1872 continue;
1873 }
1874 /* treat ".." specially */
1875 if ((fspath.np_components[comp][1] == '.') &&
1876 (fspath.np_components[comp][2] == '\0')) {
1877 isdotdot = 1;
1878 }
1879 if (isdotdot && (dirfh.fh_len == 0)) {
1880 /* ".." in root directory is same as "." */
1881 comp++;
1882 continue;
1883 }
1884 }
1885 // PUT(ROOT)FH + LOOKUP(P) + GETFH + GETATTR
1886 if (dirfh.fh_len == 0) {
1887 NFSREQ_SECINFO_SET(&si, NULL, NULL, 0, isdotdot ? NULL : fspath.np_components[comp], 0);
1888 } else {
1889 NFSREQ_SECINFO_SET(&si, NULL, dirfh.fh_data, dirfh.fh_len, isdotdot ? NULL : fspath.np_components[comp], 0);
1890 }
1891 numops = 4;
1892 nfsm_chain_build_alloc_init(error, &nmreq, 18 * NFSX_UNSIGNED);
1893 nfsm_chain_add_compound_header(error, &nmreq, "mount", nmp->nm_minor_vers, numops);
1894 numops--;
1895 if (dirfh.fh_len) {
1896 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
1897 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, dirfh.fh_data, dirfh.fh_len);
1898 } else {
1899 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTROOTFH);
1900 }
1901 numops--;
1902 if (isdotdot) {
1903 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_LOOKUPP);
1904 } else {
1905 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_LOOKUP);
1906 nfsm_chain_add_name(error, &nmreq,
1907 fspath.np_components[comp], strlen(fspath.np_components[comp]), nmp);
1908 }
1909 numops--;
1910 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETFH);
1911 numops--;
1912 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
1913 NFS_CLEAR_ATTRIBUTES(bitmap);
1914 NFS4_DEFAULT_ATTRIBUTES(bitmap);
1915 /* if no namedattr support or component is ".zfs", clear NFS_FATTR_NAMED_ATTR */
1916 if (!NMFLAG(nmp, NAMEDATTR) || !strcmp(fspath.np_components[comp], ".zfs")) {
1917 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
1918 }
1919 nfsm_chain_add_bitmap(error, &nmreq, bitmap, NFS_ATTR_BITMAP_LEN);
1920 nfsm_chain_build_done(error, &nmreq);
1921 nfsm_assert(error, (numops == 0), EPROTO);
1922 nfsmout_if(error);
1923 error = nfs_request_async(NULL, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND,
1924 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
1925 if (!error) {
1926 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
1927 }
1928 nfsm_chain_skip_tag(error, &nmrep);
1929 nfsm_chain_get_32(error, &nmrep, numops);
1930 nfsm_chain_op_check(error, &nmrep, dirfh.fh_len ? NFS_OP_PUTFH : NFS_OP_PUTROOTFH);
1931 nfsm_chain_op_check(error, &nmrep, isdotdot ? NFS_OP_LOOKUPP : NFS_OP_LOOKUP);
1932 nfsmout_if(error);
1933 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETFH);
1934 nfsm_chain_get_32(error, &nmrep, fh.fh_len);
1935 if (fh.fh_len > sizeof(fh.fh_data)) {
1936 error = EBADRPC;
1937 }
1938 nfsmout_if(error);
1939 nfsm_chain_get_opaque(error, &nmrep, fh.fh_len, fh.fh_data);
1940 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
1941 if (!error) {
1942 NFS_CLEAR_ATTRIBUTES(nmp->nm_fsattr.nfsa_bitmap);
1943 error = nfs4_parsefattr(&nmrep, &nmp->nm_fsattr, &nvattr, NULL, NULL, &nfsls);
1944 }
1945 nfsm_chain_cleanup(&nmrep);
1946 nfsm_chain_null(&nmreq);
1947 if (error) {
1948 /* LOOKUP succeeded but GETATTR failed? This could be a referral. */
1949 /* Try the lookup again with a getattr for fs_locations. */
1950 nfs_fs_locations_cleanup(&nfsls);
1951 error = nfs4_get_fs_locations(nmp, NULL, dirfh.fh_data, dirfh.fh_len, fspath.np_components[comp], ctx, &nfsls);
1952 if (!error && (nfsls.nl_numlocs < 1)) {
1953 error = ENOENT;
1954 }
1955 nfsmout_if(error);
1956 if (++loopcnt > MAXSYMLINKS) {
1957 /* too many symlink/referral redirections */
1958 error = ELOOP;
1959 goto nfsmout;
1960 }
1961 /* tear down the current connection */
1962 nfs_disconnect(nmp);
1963 /* replace fs locations */
1964 nfs_fs_locations_cleanup(&nmp->nm_locations);
1965 nmp->nm_locations = nfsls;
1966 bzero(&nfsls, sizeof(nfsls));
1967 /* initiate a connection using the new fs locations */
1968 error = nfs_mount_connect(nmp);
1969 if (!error && !(nmp->nm_locations.nl_current.nli_flags & NLI_VALID)) {
1970 error = EIO;
1971 }
1972 nfsmout_if(error);
1973 /* add new server's remote path to beginning of our path and continue */
1974 nfsp = &nmp->nm_locations.nl_locations[nmp->nm_locations.nl_current.nli_loc]->nl_path;
1975 if (nfs_fs_path_init(&fspath2, (fspath.np_compcount - comp + 1) + nfsp->np_compcount)) {
1976 fspath2.np_compsize = fspath2.np_compcount;
1977 for (comp2 = 0; comp2 < nfsp->np_compcount; comp2++) {
1978 size_t slen = strlen(nfsp->np_components[comp2]);
1979 fspath2.np_components[comp2] = kalloc_data(slen + 1, Z_WAITOK | Z_ZERO);
1980 if (!fspath2.np_components[comp2]) {
1981 /* clean up fspath2, then error out */
1982 nfs_fs_path_destroy(&fspath2);
1983 error = ENOMEM;
1984 goto nfsmout;
1985 }
1986 strlcpy(fspath2.np_components[comp2], nfsp->np_components[comp2], slen + 1);
1987 }
1988 if ((fspath.np_compcount - comp - 1) > 0) {
1989 bcopy(&fspath.np_components[comp + 1], &fspath2.np_components[nfsp->np_compcount], (fspath.np_compcount - comp - 1) * sizeof(char*));
1990 }
1991
1992 nfs_fs_path_replace(&fspath, &fspath2);
1993 } else {
1994 error = ENOMEM;
1995 goto nfsmout;
1996 }
1997
1998 /* reset dirfh and component index */
1999 dirfh.fh_len = 0;
2000 comp = 0;
2001 NVATTR_CLEANUP(&nvattr);
2002 if (fspath.np_compcount == 0) {
2003 goto nocomponents;
2004 }
2005 continue;
2006 }
2007 nfsmout_if(error);
2008 /* if file handle is for a symlink, then update the path with the symlink contents */
2009 if (NFS_BITMAP_ISSET(&nvattr.nva_bitmap, NFS_FATTR_TYPE) && (nvattr.nva_type == VLNK)) {
2010 if (++loopcnt > MAXSYMLINKS) {
2011 error = ELOOP;
2012 } else {
2013 error = nfs4_mount_update_path_with_symlink(nmp, &fspath, comp, &dirfh, &depth, &fh, ctx);
2014 }
2015 nfsmout_if(error);
2016 /* directory file handle is either left the same or reset to root (if link was absolute) */
2017 /* path traversal starts at beginning of the path again */
2018 comp = 0;
2019 NVATTR_CLEANUP(&nvattr);
2020 nfs_fs_locations_cleanup(&nfsls);
2021 continue;
2022 }
2023 NVATTR_CLEANUP(&nvattr);
2024 nfs_fs_locations_cleanup(&nfsls);
2025 /* not a symlink... */
2026 if ((nmp->nm_state & NFSSTA_NEEDSECINFO) && (comp == (fspath.np_compcount - 1)) && !isdotdot) {
2027 /* need to get SECINFO for the directory being mounted */
2028 if (dirfh.fh_len == 0) {
2029 NFSREQ_SECINFO_SET(&si, NULL, NULL, 0, isdotdot ? NULL : fspath.np_components[comp], 0);
2030 } else {
2031 NFSREQ_SECINFO_SET(&si, NULL, dirfh.fh_data, dirfh.fh_len, isdotdot ? NULL : fspath.np_components[comp], 0);
2032 }
2033 sec.count = NX_MAX_SEC_FLAVORS;
2034 error = nfs4_secinfo_rpc(nmp, &si, vfs_context_ucred(ctx), sec.flavors, &sec.count);
2035 /* [sigh] some implementations return "illegal" error for unsupported ops */
2036 if (error == NFSERR_OP_ILLEGAL) {
2037 error = 0;
2038 }
2039 nfsmout_if(error);
2040 /* set our default security flavor to the first in the list */
2041 if (sec.count) {
2042 nmp->nm_auth = sec.flavors[0];
2043 }
2044 nmp->nm_state &= ~NFSSTA_NEEDSECINFO;
2045 }
2046 /* advance directory file handle, component index, & update depth */
2047 dirfh = fh;
2048 comp++;
2049 if (!isdotdot) { /* going down the hierarchy */
2050 depth++;
2051 } else if (--depth <= 0) { /* going up the hierarchy */
2052 dirfh.fh_len = 0; /* clear dirfh when we hit root */
2053 }
2054 }
2055
2056 gotfh:
2057 /* get attrs for mount point root */
2058 numops = NMFLAG(nmp, NAMEDATTR) ? 3 : 2; // PUTFH + GETATTR + OPENATTR
2059 nfsm_chain_build_alloc_init(error, &nmreq, 25 * NFSX_UNSIGNED);
2060 nfsm_chain_add_compound_header(error, &nmreq, "mount", nmp->nm_minor_vers, numops);
2061 numops--;
2062 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
2063 nfsm_chain_add_fh(error, &nmreq, NFS_VER4, dirfh.fh_data, dirfh.fh_len);
2064 numops--;
2065 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
2066 NFS_CLEAR_ATTRIBUTES(bitmap);
2067 NFS4_DEFAULT_ATTRIBUTES(bitmap);
2068 /* if no namedattr support or last component is ".zfs", clear NFS_FATTR_NAMED_ATTR */
2069 if (!NMFLAG(nmp, NAMEDATTR) || ((fspath.np_compcount > 0) && !strcmp(fspath.np_components[fspath.np_compcount - 1], ".zfs"))) {
2070 NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR);
2071 }
2072 nfsm_chain_add_bitmap(error, &nmreq, bitmap, NFS_ATTR_BITMAP_LEN);
2073 if (NMFLAG(nmp, NAMEDATTR)) {
2074 numops--;
2075 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_OPENATTR);
2076 nfsm_chain_add_32(error, &nmreq, 0);
2077 }
2078 nfsm_chain_build_done(error, &nmreq);
2079 nfsm_assert(error, (numops == 0), EPROTO);
2080 nfsmout_if(error);
2081 error = nfs_request_async(NULL, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND,
2082 vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
2083 if (!error) {
2084 error = nfs_request_async_finish(req, &nmrep, &xid, &status);
2085 }
2086 nfsm_chain_skip_tag(error, &nmrep);
2087 nfsm_chain_get_32(error, &nmrep, numops);
2088 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
2089 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
2090 nfsmout_if(error);
2091 NFS_CLEAR_ATTRIBUTES(nmp->nm_fsattr.nfsa_bitmap);
2092 error = nfs4_parsefattr(&nmrep, &nmp->nm_fsattr, &nvattr, NULL, NULL, NULL);
2093 nfsmout_if(error);
2094 if (NMFLAG(nmp, NAMEDATTR)) {
2095 nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
2096 if (error == ENOENT) {
2097 error = 0;
2098 }
2099 /* [sigh] some implementations return "illegal" error for unsupported ops */
2100 if (error || !NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_supp_attr, NFS_FATTR_NAMED_ATTR)) {
2101 nmp->nm_fsattr.nfsa_flags &= ~NFS_FSFLAG_NAMED_ATTR;
2102 } else {
2103 nmp->nm_fsattr.nfsa_flags |= NFS_FSFLAG_NAMED_ATTR;
2104 }
2105 } else {
2106 nmp->nm_fsattr.nfsa_flags &= ~NFS_FSFLAG_NAMED_ATTR;
2107 }
2108 if (NMFLAG(nmp, NOACL)) { /* make sure ACL support is turned off */
2109 nmp->nm_fsattr.nfsa_flags &= ~NFS_FSFLAG_ACL;
2110 }
2111 if (NMFLAG(nmp, ACLONLY) && !(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL)) {
2112 NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_ACLONLY);
2113 }
2114 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_supp_attr, NFS_FATTR_FH_EXPIRE_TYPE)) {
2115 uint32_t fhtype = ((nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_FHTYPE_MASK) >> NFS_FSFLAG_FHTYPE_SHIFT);
2116 if (fhtype != NFS_FH_PERSISTENT) {
2117 printf("nfs: warning: non-persistent file handles! for %s\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
2118 }
2119 }
2120
2121 /* make sure it's a directory */
2122 if (!NFS_BITMAP_ISSET(&nvattr.nva_bitmap, NFS_FATTR_TYPE) || (nvattr.nva_type != VDIR)) {
2123 error = ENOTDIR;
2124 goto nfsmout;
2125 }
2126
2127 /* save the NFS fsid */
2128 nmp->nm_fsid = nvattr.nva_fsid;
2129
2130 /* create the root node */
2131 error = nfs_nget(nmp->nm_mountp, NULL, NULL, dirfh.fh_data, dirfh.fh_len, &nvattr, &xid, rq.r_auth, NG_MARKROOT, npp);
2132 nfsmout_if(error);
2133
2134 if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL) {
2135 vfs_setextendedsecurity(nmp->nm_mountp);
2136 }
2137
2138 /* adjust I/O sizes to server limits */
2139 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXREAD) && (nmp->nm_fsattr.nfsa_maxread > 0)) {
2140 if (nmp->nm_fsattr.nfsa_maxread < (uint64_t)nmp->nm_rsize) {
2141 nmp->nm_rsize = nmp->nm_fsattr.nfsa_maxread & ~(NFS_FABLKSIZE - 1);
2142 if (nmp->nm_rsize == 0) {
2143 nmp->nm_rsize = nmp->nm_fsattr.nfsa_maxread;
2144 }
2145 }
2146 }
2147 if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXWRITE) && (nmp->nm_fsattr.nfsa_maxwrite > 0)) {
2148 if (nmp->nm_fsattr.nfsa_maxwrite < (uint64_t)nmp->nm_wsize) {
2149 nmp->nm_wsize = nmp->nm_fsattr.nfsa_maxwrite & ~(NFS_FABLKSIZE - 1);
2150 if (nmp->nm_wsize == 0) {
2151 nmp->nm_wsize = nmp->nm_fsattr.nfsa_maxwrite;
2152 }
2153 }
2154 }
2155
2156 /* set up lease renew timer */
2157 nmp->nm_renew_timer = thread_call_allocate_with_options(nfs4_renew_timer, nmp, THREAD_CALL_PRIORITY_HIGH, THREAD_CALL_OPTIONS_ONCE);
2158 interval = nmp->nm_fsattr.nfsa_lease / 2;
2159 if (interval < 1) {
2160 interval = 1;
2161 }
2162 nfs_interval_timer_start(nmp->nm_renew_timer, interval * 1000);
2163
2164 nfsmout:
2165 nfs_fs_path_destroy(&fspath);
2166 NVATTR_CLEANUP(&nvattr);
2167 nfs_fs_locations_cleanup(&nfsls);
2168 if (*npp) {
2169 nfs_node_unlock(*npp);
2170 }
2171 nfsm_chain_cleanup(&nmreq);
2172 nfsm_chain_cleanup(&nmrep);
2173 return error;
2174 }
2175 #endif /* CONFIG_NFS4 */
2176
2177 /*
2178 * Thread to handle initial NFS mount connection.
2179 */
2180 void
nfs_mount_connect_thread(void * arg,__unused wait_result_t wr)2181 nfs_mount_connect_thread(void *arg, __unused wait_result_t wr)
2182 {
2183 struct nfsmount *nmp = arg;
2184 int error = 0, savederror = 0, slpflag = (NMFLAG(nmp, INTR) ? PCATCH : 0);
2185 int done = 0, timeo, tries, maxtries;
2186
2187 if (NM_OMFLAG(nmp, MNTQUICK)) {
2188 timeo = nfs_mount_quick_timeout >= 1 ? nfs_mount_quick_timeout : NFS_MOUNT_QUICK_TIMEOUT;
2189 maxtries = 1;
2190 } else {
2191 timeo = nfs_mount_timeout >= 1 ? nfs_mount_timeout : NFS_MOUNT_TIMEOUT;
2192 maxtries = 2;
2193 }
2194
2195 for (tries = 0; tries < maxtries; tries++) {
2196 error = nfs_connect(nmp, 1, timeo);
2197 switch (error) {
2198 case ETIMEDOUT:
2199 case EAGAIN:
2200 case EPIPE:
2201 case EADDRNOTAVAIL:
2202 case ENETDOWN:
2203 case ENETUNREACH:
2204 case ENETRESET:
2205 case ECONNABORTED:
2206 case ECONNRESET:
2207 case EISCONN:
2208 case ENOTCONN:
2209 case ESHUTDOWN:
2210 case ECONNREFUSED:
2211 case EHOSTDOWN:
2212 case EHOSTUNREACH:
2213 /* just keep retrying on any of these errors */
2214 break;
2215 case 0:
2216 default:
2217 /* looks like we got an answer... */
2218 done = 1;
2219 break;
2220 }
2221
2222 /* save the best error */
2223 if (nfs_connect_error_class(error) >= nfs_connect_error_class(savederror)) {
2224 savederror = error;
2225 }
2226 if (done) {
2227 error = savederror;
2228 break;
2229 }
2230
2231 /* pause before next attempt */
2232 if ((error = nfs_sigintr(nmp, NULL, current_thread(), 0))) {
2233 break;
2234 }
2235 error = tsleep(nmp, PSOCK | slpflag, "nfs_mount_connect_retry", 2 * hz);
2236 if (error && (error != EWOULDBLOCK)) {
2237 break;
2238 }
2239 error = savederror;
2240 }
2241
2242 /* update status of mount connect */
2243 lck_mtx_lock(&nmp->nm_lock);
2244 if (!nmp->nm_mounterror) {
2245 nmp->nm_mounterror = error;
2246 }
2247 nmp->nm_state &= ~NFSSTA_MOUNT_THREAD;
2248 lck_mtx_unlock(&nmp->nm_lock);
2249 wakeup(&nmp->nm_nss);
2250 }
2251
2252 int
nfs_mount_connect(struct nfsmount * nmp)2253 nfs_mount_connect(struct nfsmount *nmp)
2254 {
2255 int error = 0, slpflag;
2256 thread_t thd;
2257 struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 };
2258
2259 /*
2260 * Set up the socket. Perform initial search for a location/server/address to
2261 * connect to and negotiate any unspecified mount parameters. This work is
2262 * done on a kernel thread to satisfy reserved port usage needs.
2263 */
2264 slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0;
2265 lck_mtx_lock(&nmp->nm_lock);
2266 /* set flag that the thread is running */
2267 nmp->nm_state |= NFSSTA_MOUNT_THREAD;
2268 if (kernel_thread_start(nfs_mount_connect_thread, nmp, &thd) != KERN_SUCCESS) {
2269 nmp->nm_state &= ~NFSSTA_MOUNT_THREAD;
2270 nmp->nm_mounterror = EIO;
2271 printf("nfs mount %s start socket connect thread failed\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname);
2272 } else {
2273 thread_deallocate(thd);
2274 }
2275
2276 /* wait until mount connect thread is finished/gone */
2277 while (nmp->nm_state & NFSSTA_MOUNT_THREAD) {
2278 error = msleep(&nmp->nm_nss, &nmp->nm_lock, slpflag | PSOCK, "nfsconnectthread", &ts);
2279 if ((error && (error != EWOULDBLOCK)) || ((error = nfs_sigintr(nmp, NULL, current_thread(), 1)))) {
2280 /* record error */
2281 if (!nmp->nm_mounterror) {
2282 nmp->nm_mounterror = error;
2283 }
2284 /* signal the thread that we are aborting */
2285 nmp->nm_sockflags |= NMSOCK_UNMOUNT;
2286 if (nmp->nm_nss) {
2287 wakeup(nmp->nm_nss);
2288 }
2289 /* and continue waiting on it to finish */
2290 slpflag = 0;
2291 }
2292 }
2293 lck_mtx_unlock(&nmp->nm_lock);
2294
2295 /* grab mount connect status */
2296 error = nmp->nm_mounterror;
2297
2298 return error;
2299 }
2300
2301 /* Table of maximum minor version for a given version */
2302 uint32_t maxminorverstab[] = {
2303 0, /* Version 0 (does not exist) */
2304 0, /* Version 1 (does not exist) */
2305 0, /* Version 2 */
2306 0, /* Version 3 */
2307 0, /* Version 4 */
2308 };
2309
2310 #define NFS_MAX_SUPPORTED_VERSION ((long)(sizeof (maxminorverstab) / sizeof (uint32_t) - 1))
2311 #define NFS_MAX_SUPPORTED_MINOR_VERSION(v) ((long)(maxminorverstab[(v)]))
2312
2313 #define DEFAULT_NFS_MIN_VERS VER2PVER(2, 0)
2314 #define DEFAULT_NFS_MAX_VERS VER2PVER(3, 0)
2315
2316 /*
2317 * Common code to mount an NFS file system.
2318 */
2319 int
mountnfs(char * xdrbuf,mount_t mp,vfs_context_t ctx,vnode_t * vpp)2320 mountnfs(
2321 char *xdrbuf,
2322 mount_t mp,
2323 vfs_context_t ctx,
2324 vnode_t *vpp)
2325 {
2326 struct nfsmount *nmp;
2327 nfsnode_t np;
2328 int error = 0;
2329 struct vfsstatfs *sbp;
2330 struct xdrbuf xb;
2331 uint32_t i, val, maxio, iosize, len;
2332 uint32_t *mattrs;
2333 uint32_t *mflags_mask;
2334 uint32_t *mflags;
2335 uint32_t argslength, attrslength;
2336 uid_t set_owner = 0;
2337 struct nfs_location_index firstloc = {
2338 .nli_flags = NLI_VALID,
2339 .nli_loc = 0,
2340 .nli_serv = 0,
2341 .nli_addr = 0
2342 };
2343 static const struct nfs_etype nfs_default_etypes = {
2344 .count = NFS_MAX_ETYPES,
2345 .selected = NFS_MAX_ETYPES,
2346 .etypes = { NFS_AES256_CTS_HMAC_SHA1_96,
2347 NFS_AES128_CTS_HMAC_SHA1_96,
2348 NFS_DES3_CBC_SHA1_KD}
2349 };
2350
2351 /* make sure mbuf constants are set up */
2352 if (!nfs_mbuf_mhlen) {
2353 nfs_mbuf_init();
2354 }
2355
2356 if (vfs_flags(mp) & MNT_UPDATE) {
2357 nmp = VFSTONFS(mp);
2358 /* update paths, file handles, etc, here XXX */
2359 xb_free(xdrbuf);
2360 return 0;
2361 } else {
2362 /* allocate an NFS mount structure for this mount */
2363 nmp = zalloc_flags(nfsmnt_zone, Z_WAITOK | Z_ZERO);
2364 lck_mtx_init(&nmp->nm_lock, &nfs_mount_grp, LCK_ATTR_NULL);
2365 TAILQ_INIT(&nmp->nm_resendq);
2366 TAILQ_INIT(&nmp->nm_iodq);
2367 TAILQ_INIT(&nmp->nm_gsscl);
2368 LIST_INIT(&nmp->nm_monlist);
2369 vfs_setfsprivate(mp, nmp);
2370 vfs_getnewfsid(mp);
2371 nmp->nm_mountp = mp;
2372 vfs_setauthopaque(mp);
2373 /*
2374 * Disable cache_lookup_path for NFS. NFS lookup always needs
2375 * to be called to check if the directory attribute cache is
2376 * valid and possibly purge the directory before calling
2377 * cache_lookup.
2378 */
2379 vfs_setauthcache_ttl(mp, 0);
2380
2381 nfs_nhinit_finish();
2382
2383 nmp->nm_args = xdrbuf;
2384
2385 /* set up defaults */
2386 nmp->nm_ref = 0;
2387 nmp->nm_vers = 0;
2388 nmp->nm_min_vers = DEFAULT_NFS_MIN_VERS;
2389 nmp->nm_max_vers = DEFAULT_NFS_MAX_VERS;
2390 nmp->nm_timeo = NFS_TIMEO;
2391 nmp->nm_retry = NFS_RETRANS;
2392 nmp->nm_sotype = 0;
2393 nmp->nm_sofamily = 0;
2394 nmp->nm_nfsport = 0;
2395 nmp->nm_wsize = NFS_WSIZE;
2396 nmp->nm_rsize = NFS_RSIZE;
2397 nmp->nm_readdirsize = NFS_READDIRSIZE;
2398 nmp->nm_numgrps = NFS_MAXGRPS;
2399 nmp->nm_readahead = NFS_DEFRAHEAD;
2400 nmp->nm_tprintf_delay = nfs_tprintf_delay;
2401 if (nmp->nm_tprintf_delay < 0) {
2402 nmp->nm_tprintf_delay = 0;
2403 }
2404 nmp->nm_tprintf_initial_delay = nfs_tprintf_initial_delay;
2405 if (nmp->nm_tprintf_initial_delay < 0) {
2406 nmp->nm_tprintf_initial_delay = 0;
2407 }
2408 nmp->nm_acregmin = NFS_MINATTRTIMO;
2409 nmp->nm_acregmax = NFS_MAXATTRTIMO;
2410 nmp->nm_acdirmin = NFS_MINDIRATTRTIMO;
2411 nmp->nm_acdirmax = NFS_MAXDIRATTRTIMO;
2412 nmp->nm_etype = nfs_default_etypes;
2413 nmp->nm_auth = RPCAUTH_SYS;
2414 nmp->nm_iodlink.tqe_next = NFSNOLIST;
2415 nmp->nm_deadtimeout = 0;
2416 nmp->nm_curdeadtimeout = 0;
2417 NFS_BITMAP_SET(nmp->nm_flags, NFS_MFLAG_RDIRPLUS); /* enable RDIRPLUS by default. It will be reverted later in case NFSv2 is used */
2418 NFS_BITMAP_SET(nmp->nm_flags, NFS_MFLAG_NOACL);
2419 nmp->nm_realm = NULL;
2420 nmp->nm_principal = NULL;
2421 nmp->nm_sprinc = NULL;
2422 }
2423
2424 mattrs = nmp->nm_mattrs;
2425 mflags = nmp->nm_mflags;
2426 mflags_mask = nmp->nm_mflags_mask;
2427
2428 /* set up NFS mount with args */
2429 xb_init_buffer(&xb, xdrbuf, 2 * XDRWORD);
2430 xb_get_32(error, &xb, val); /* version */
2431 xb_get_32(error, &xb, argslength); /* args length */
2432 nfsmerr_if(error);
2433 xb_init_buffer(&xb, xdrbuf, argslength); /* restart parsing with actual buffer length */
2434 xb_get_32(error, &xb, val); /* version */
2435 xb_get_32(error, &xb, argslength); /* args length */
2436 xb_get_32(error, &xb, val); /* XDR args version */
2437 if (val != NFS_XDRARGS_VERSION_0 || argslength < ((4 + NFS_MATTR_BITMAP_LEN + 1) * XDRWORD)) {
2438 error = EINVAL;
2439 }
2440 len = NFS_MATTR_BITMAP_LEN;
2441 xb_get_bitmap(error, &xb, mattrs, len); /* mount attribute bitmap */
2442 attrslength = 0;
2443 xb_get_32(error, &xb, attrslength); /* attrs length */
2444 if (!error && (attrslength > (argslength - ((4 + NFS_MATTR_BITMAP_LEN + 1) * XDRWORD)))) {
2445 error = EINVAL;
2446 }
2447 nfsmerr_if(error);
2448 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_FLAGS)) {
2449 len = NFS_MFLAG_BITMAP_LEN;
2450 xb_get_bitmap(error, &xb, mflags_mask, len); /* mount flag mask */
2451 len = NFS_MFLAG_BITMAP_LEN;
2452 xb_get_bitmap(error, &xb, mflags, len); /* mount flag values */
2453 if (!error) {
2454 /* clear all mask bits and OR in all the ones that are set */
2455 nmp->nm_flags[0] &= ~mflags_mask[0];
2456 nmp->nm_flags[0] |= (mflags_mask[0] & mflags[0]);
2457 }
2458 }
2459 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_VERSION)) {
2460 /* Can't specify a single version and a range */
2461 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_VERSION_RANGE)) {
2462 error = EINVAL;
2463 }
2464 xb_get_32(error, &xb, nmp->nm_vers);
2465 if (nmp->nm_vers > NFS_MAX_SUPPORTED_VERSION ||
2466 nmp->nm_vers < NFS_VER2) {
2467 error = EINVAL;
2468 }
2469 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_MINOR_VERSION)) {
2470 xb_get_32(error, &xb, nmp->nm_minor_vers);
2471 } else {
2472 nmp->nm_minor_vers = maxminorverstab[nmp->nm_vers];
2473 }
2474 if (nmp->nm_minor_vers > maxminorverstab[nmp->nm_vers]) {
2475 error = EINVAL;
2476 }
2477 nmp->nm_max_vers = nmp->nm_min_vers =
2478 VER2PVER(nmp->nm_vers, nmp->nm_minor_vers);
2479 }
2480 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_MINOR_VERSION)) {
2481 /* should have also gotten NFS version (and already gotten minor version) */
2482 if (!NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_VERSION)) {
2483 error = EINVAL;
2484 }
2485 }
2486 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_VERSION_RANGE)) {
2487 xb_get_32(error, &xb, nmp->nm_min_vers);
2488 xb_get_32(error, &xb, nmp->nm_max_vers);
2489 if ((nmp->nm_min_vers > nmp->nm_max_vers) ||
2490 (PVER2MAJOR(nmp->nm_max_vers) > NFS_MAX_SUPPORTED_VERSION) ||
2491 (PVER2MINOR(nmp->nm_min_vers) > maxminorverstab[PVER2MAJOR(nmp->nm_min_vers)]) ||
2492 (PVER2MINOR(nmp->nm_max_vers) > maxminorverstab[PVER2MAJOR(nmp->nm_max_vers)])) {
2493 error = EINVAL;
2494 }
2495 }
2496 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READ_SIZE)) {
2497 xb_get_32(error, &xb, nmp->nm_rsize);
2498 }
2499 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_WRITE_SIZE)) {
2500 xb_get_32(error, &xb, nmp->nm_wsize);
2501 }
2502 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READDIR_SIZE)) {
2503 xb_get_32(error, &xb, nmp->nm_readdirsize);
2504 }
2505 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READAHEAD)) {
2506 xb_get_32(error, &xb, nmp->nm_readahead);
2507 }
2508 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_REG_MIN)) {
2509 xb_get_32(error, &xb, nmp->nm_acregmin);
2510 xb_skip(error, &xb, XDRWORD);
2511 }
2512 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_REG_MAX)) {
2513 xb_get_32(error, &xb, nmp->nm_acregmax);
2514 xb_skip(error, &xb, XDRWORD);
2515 }
2516 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_DIR_MIN)) {
2517 xb_get_32(error, &xb, nmp->nm_acdirmin);
2518 xb_skip(error, &xb, XDRWORD);
2519 }
2520 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_DIR_MAX)) {
2521 xb_get_32(error, &xb, nmp->nm_acdirmax);
2522 xb_skip(error, &xb, XDRWORD);
2523 }
2524 nfsmerr_if(error);
2525 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_LOCK_MODE)) {
2526 xb_get_32(error, &xb, val);
2527 switch (val) {
2528 case NFS_LOCK_MODE_DISABLED:
2529 case NFS_LOCK_MODE_LOCAL:
2530 #if CONFIG_NFS4
2531 if (nmp->nm_vers >= NFS_VER4) {
2532 /* disabled/local lock mode only allowed on v2/v3 */
2533 error = EINVAL;
2534 break;
2535 }
2536 #endif
2537 OS_FALLTHROUGH;
2538 case NFS_LOCK_MODE_ENABLED:
2539 nmp->nm_lockmode = val;
2540 break;
2541 default:
2542 error = EINVAL;
2543 }
2544 }
2545 nfsmerr_if(error);
2546 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SECURITY)) {
2547 uint32_t seccnt;
2548 xb_get_32(error, &xb, seccnt);
2549 if (!error && ((seccnt < 1) || (seccnt > NX_MAX_SEC_FLAVORS))) {
2550 error = EINVAL;
2551 }
2552 nfsmerr_if(error);
2553 nmp->nm_sec.count = seccnt;
2554 for (i = 0; i < seccnt; i++) {
2555 xb_get_32(error, &xb, nmp->nm_sec.flavors[i]);
2556 /* Check for valid security flavor */
2557 switch (nmp->nm_sec.flavors[i]) {
2558 case RPCAUTH_NONE:
2559 case RPCAUTH_SYS:
2560 case RPCAUTH_KRB5:
2561 case RPCAUTH_KRB5I:
2562 case RPCAUTH_KRB5P:
2563 break;
2564 default:
2565 error = EINVAL;
2566 }
2567 }
2568 /* start with the first flavor */
2569 nmp->nm_auth = nmp->nm_sec.flavors[0];
2570 }
2571 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_KERB_ETYPE)) {
2572 uint32_t etypecnt;
2573 xb_get_32(error, &xb, etypecnt);
2574 if (!error && ((etypecnt < 1) || (etypecnt > NFS_MAX_ETYPES))) {
2575 error = EINVAL;
2576 }
2577 nfsmerr_if(error);
2578 nmp->nm_etype.count = etypecnt;
2579 xb_get_32(error, &xb, nmp->nm_etype.selected);
2580 nfsmerr_if(error);
2581 if (etypecnt) {
2582 nmp->nm_etype.selected = etypecnt; /* Nothing is selected yet, so set selected to count */
2583 for (i = 0; i < etypecnt; i++) {
2584 xb_get_32(error, &xb, nmp->nm_etype.etypes[i]);
2585 /* Check for valid encryption type */
2586 switch (nmp->nm_etype.etypes[i]) {
2587 case NFS_DES3_CBC_SHA1_KD:
2588 case NFS_AES128_CTS_HMAC_SHA1_96:
2589 case NFS_AES256_CTS_HMAC_SHA1_96:
2590 break;
2591 default:
2592 error = EINVAL;
2593 }
2594 }
2595 }
2596 }
2597 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MAX_GROUP_LIST)) {
2598 xb_get_32(error, &xb, nmp->nm_numgrps);
2599 }
2600 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SOCKET_TYPE)) {
2601 char sotype[16];
2602
2603 *sotype = '\0';
2604 xb_get_32(error, &xb, val);
2605 if (!error && ((val < 3) || (val > sizeof(sotype)))) {
2606 error = EINVAL;
2607 }
2608 nfsmerr_if(error);
2609 error = xb_get_bytes(&xb, sotype, val, 0);
2610 nfsmerr_if(error);
2611 sotype[val] = '\0';
2612 if (!strcmp(sotype, "tcp")) {
2613 nmp->nm_sotype = SOCK_STREAM;
2614 } else if (!strcmp(sotype, "udp")) {
2615 nmp->nm_sotype = SOCK_DGRAM;
2616 } else if (!strcmp(sotype, "tcp4")) {
2617 nmp->nm_sotype = SOCK_STREAM;
2618 nmp->nm_sofamily = AF_INET;
2619 } else if (!strcmp(sotype, "udp4")) {
2620 nmp->nm_sotype = SOCK_DGRAM;
2621 nmp->nm_sofamily = AF_INET;
2622 } else if (!strcmp(sotype, "tcp6")) {
2623 nmp->nm_sotype = SOCK_STREAM;
2624 nmp->nm_sofamily = AF_INET6;
2625 } else if (!strcmp(sotype, "udp6")) {
2626 nmp->nm_sotype = SOCK_DGRAM;
2627 nmp->nm_sofamily = AF_INET6;
2628 } else if (!strcmp(sotype, "inet4")) {
2629 nmp->nm_sofamily = AF_INET;
2630 } else if (!strcmp(sotype, "inet6")) {
2631 nmp->nm_sofamily = AF_INET6;
2632 } else if (!strcmp(sotype, "inet")) {
2633 nmp->nm_sofamily = 0; /* ok */
2634 } else if (!strcmp(sotype, "ticotsord")) {
2635 nmp->nm_sofamily = AF_LOCAL;
2636 nmp->nm_sotype = SOCK_STREAM;
2637 } else if (!strcmp(sotype, "ticlts")) {
2638 nmp->nm_sofamily = AF_LOCAL;
2639 nmp->nm_sotype = SOCK_DGRAM;
2640 } else {
2641 error = EINVAL;
2642 }
2643 #if CONFIG_NFS4
2644 if (!error && (nmp->nm_vers >= NFS_VER4) && nmp->nm_sotype &&
2645 (nmp->nm_sotype != SOCK_STREAM)) {
2646 error = EINVAL; /* NFSv4 is only allowed over TCP. */
2647 }
2648 #endif
2649 if (error) {
2650 NFS_VFS_DBG("EINVAL sotype = \"%s\"\n", sotype);
2651 }
2652 nfsmerr_if(error);
2653 }
2654 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_PORT)) {
2655 xb_get_32(error, &xb, val);
2656 if (NFS_PORT_INVALID(val)) {
2657 error = EINVAL;
2658 nfsmerr_if(error);
2659 }
2660 nmp->nm_nfsport = (in_port_t)val;
2661 }
2662 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MOUNT_PORT)) {
2663 xb_get_32(error, &xb, val);
2664 if (NFS_PORT_INVALID(val)) {
2665 error = EINVAL;
2666 nfsmerr_if(error);
2667 }
2668 nmp->nm_mountport = (in_port_t)val;
2669 }
2670 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_REQUEST_TIMEOUT)) {
2671 /* convert from time to 0.1s units */
2672 xb_get_32(error, &xb, nmp->nm_timeo);
2673 xb_get_32(error, &xb, val);
2674 nfsmerr_if(error);
2675 if (val >= 1000000000) {
2676 error = EINVAL;
2677 }
2678 nfsmerr_if(error);
2679 nmp->nm_timeo *= 10;
2680 nmp->nm_timeo += (val + 100000000 - 1) / 100000000;
2681 /* now convert to ticks */
2682 nmp->nm_timeo = (nmp->nm_timeo * NFS_HZ + 5) / 10;
2683 }
2684 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SOFT_RETRY_COUNT)) {
2685 xb_get_32(error, &xb, val);
2686 if (!error && (val > 1)) {
2687 nmp->nm_retry = val;
2688 }
2689 }
2690 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_DEAD_TIMEOUT)) {
2691 xb_get_32(error, &xb, nmp->nm_deadtimeout);
2692 xb_skip(error, &xb, XDRWORD);
2693 }
2694 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_FH)) {
2695 nfsmerr_if(error);
2696 nmp->nm_fh = zalloc(nfs_fhandle_zone);
2697 xb_get_32(error, &xb, nmp->nm_fh->fh_len);
2698 nfsmerr_if(error);
2699 if ((size_t)nmp->nm_fh->fh_len > sizeof(nmp->nm_fh->fh_data)) {
2700 error = EINVAL;
2701 } else {
2702 error = xb_get_bytes(&xb, (char*)&nmp->nm_fh->fh_data[0], nmp->nm_fh->fh_len, 0);
2703 }
2704 }
2705 nfsmerr_if(error);
2706 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_FS_LOCATIONS)) {
2707 uint32_t loc, serv, addr, comp;
2708 struct nfs_fs_location *fsl;
2709 struct nfs_fs_server *fss;
2710 struct nfs_fs_path *fsp;
2711
2712 xb_get_32(error, &xb, nmp->nm_locations.nl_numlocs); /* fs location count */
2713 /* sanity check location count */
2714 if (!error && ((nmp->nm_locations.nl_numlocs < 1) || (nmp->nm_locations.nl_numlocs > 256))) {
2715 NFS_VFS_DBG("Invalid number of fs_locations: %d", nmp->nm_locations.nl_numlocs);
2716 error = EINVAL;
2717 }
2718 nfsmerr_if(error);
2719 nmp->nm_locations.nl_locations = kalloc_type(struct nfs_fs_location *,
2720 nmp->nm_locations.nl_numlocs, Z_WAITOK | Z_ZERO);
2721 if (!nmp->nm_locations.nl_locations) {
2722 error = ENOMEM;
2723 }
2724 for (loc = 0; loc < nmp->nm_locations.nl_numlocs; loc++) {
2725 nfsmerr_if(error);
2726 fsl = kalloc_type(struct nfs_fs_location,
2727 Z_WAITOK | Z_ZERO | Z_NOFAIL);
2728 nmp->nm_locations.nl_locations[loc] = fsl;
2729 xb_get_32(error, &xb, fsl->nl_servcount); /* server count */
2730 /* sanity check server count */
2731 if (!error && ((fsl->nl_servcount < 1) || (fsl->nl_servcount > 256))) {
2732 NFS_VFS_DBG("Invalid server count %d", fsl->nl_servcount);
2733 error = EINVAL;
2734 }
2735 nfsmerr_if(error);
2736 fsl->nl_servers = kalloc_type(struct nfs_fs_server *,
2737 fsl->nl_servcount, Z_WAITOK | Z_ZERO);
2738 if (!fsl->nl_servers) {
2739 error = ENOMEM;
2740 NFS_VFS_DBG("Server count = %d, error = %d\n", fsl->nl_servcount, error);
2741 }
2742 for (serv = 0; serv < fsl->nl_servcount; serv++) {
2743 nfsmerr_if(error);
2744 fss = kalloc_type(struct nfs_fs_server,
2745 Z_WAITOK | Z_ZERO | Z_NOFAIL);
2746 fsl->nl_servers[serv] = fss;
2747 xb_get_32(error, &xb, val); /* server name length */
2748 /* sanity check server name length */
2749 if (!error && (val > MAXPATHLEN)) {
2750 NFS_VFS_DBG("Invalid server name length %d", val);
2751 error = EINVAL;
2752 }
2753 nfsmerr_if(error);
2754 fss->ns_name = kalloc_data(val + 1, Z_WAITOK | Z_ZERO);
2755 if (!fss->ns_name) {
2756 error = ENOMEM;
2757 }
2758 nfsmerr_if(error);
2759 error = xb_get_bytes(&xb, fss->ns_name, val, 0); /* server name */
2760 xb_get_32(error, &xb, fss->ns_addrcount); /* address count */
2761 /* sanity check address count (OK to be zero) */
2762 if (!error && (fss->ns_addrcount > 256)) {
2763 NFS_VFS_DBG("Invalid address count %d", fss->ns_addrcount);
2764 error = EINVAL;
2765 }
2766 nfsmerr_if(error);
2767 if (fss->ns_addrcount > 0) {
2768 fss->ns_addresses = kalloc_type(char *,
2769 fss->ns_addrcount, Z_WAITOK | Z_ZERO);
2770 if (!fss->ns_addresses) {
2771 error = ENOMEM;
2772 }
2773 for (addr = 0; addr < fss->ns_addrcount; addr++) {
2774 xb_get_32(error, &xb, val); /* address length */
2775 /* sanity check address length */
2776 if (!error && val > 128) {
2777 NFS_VFS_DBG("Invalid address length %d", val);
2778 error = EINVAL;
2779 }
2780 nfsmerr_if(error);
2781 fss->ns_addresses[addr] = kalloc_data(val + 1, Z_WAITOK | Z_ZERO);
2782 if (!fss->ns_addresses[addr]) {
2783 error = ENOMEM;
2784 }
2785 nfsmerr_if(error);
2786 error = xb_get_bytes(&xb, fss->ns_addresses[addr], val, 0); /* address */
2787 }
2788 }
2789 xb_get_32(error, &xb, val); /* server info length */
2790 xb_skip(error, &xb, val); /* skip server info */
2791 }
2792 /* get pathname */
2793 fsp = &fsl->nl_path;
2794 xb_get_32(error, &xb, fsp->np_compcount); /* component count */
2795 /* sanity check component count */
2796 if (!error && (fsp->np_compcount > MAXPATHLEN)) {
2797 NFS_VFS_DBG("Invalid component count %d", fsp->np_compcount);
2798 error = EINVAL;
2799 }
2800 nfsmerr_if(error);
2801 if (!nfs_fs_path_init(fsp, fsp->np_compcount)) {
2802 error = ENOMEM;
2803 }
2804 for (comp = 0; comp < fsp->np_compcount; comp++) {
2805 xb_get_32(error, &xb, val); /* component length */
2806 /* sanity check component length */
2807 if (!error && (val == 0)) {
2808 /*
2809 * Apparently some people think a path with zero components should
2810 * be encoded with one zero-length component. So, just ignore any
2811 * zero length components.
2812 */
2813 comp--;
2814 fsp->np_compcount--;
2815 if (fsp->np_compcount == 0) {
2816 nfs_fs_path_destroy(fsp);
2817 }
2818 continue;
2819 }
2820 if (!error && ((val < 1) || (val > MAXPATHLEN))) {
2821 NFS_VFS_DBG("Invalid component path length %d", val);
2822 error = EINVAL;
2823 }
2824 nfsmerr_if(error);
2825 fsp->np_components[comp] = kalloc_data(val + 1, Z_WAITOK | Z_ZERO);
2826 if (!fsp->np_components[comp]) {
2827 error = ENOMEM;
2828 }
2829 nfsmerr_if(error);
2830 error = xb_get_bytes(&xb, fsp->np_components[comp], val, 0); /* component */
2831 }
2832 xb_get_32(error, &xb, val); /* fs location info length */
2833 NFS_VFS_DBG("Skipping fs location info bytes %d", val);
2834 xb_skip(error, &xb, xdr_rndup(val)); /* skip fs location info */
2835 }
2836 }
2837 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MNTFLAGS)) {
2838 xb_skip(error, &xb, XDRWORD);
2839 }
2840 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MNTFROM)) {
2841 xb_get_32(error, &xb, len);
2842 nfsmerr_if(error);
2843 val = len;
2844 if (val >= sizeof(vfs_statfs(mp)->f_mntfromname)) {
2845 val = sizeof(vfs_statfs(mp)->f_mntfromname) - 1;
2846 }
2847 error = xb_get_bytes(&xb, vfs_statfs(mp)->f_mntfromname, val, 0);
2848 if ((len - val) > 0) {
2849 xb_skip(error, &xb, len - val);
2850 }
2851 nfsmerr_if(error);
2852 vfs_statfs(mp)->f_mntfromname[val] = '\0';
2853 }
2854 nfsmerr_if(error);
2855
2856 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_REALM)) {
2857 xb_get_32(error, &xb, len);
2858 if (!error && ((len < 1) || (len > MAXPATHLEN))) {
2859 error = EINVAL;
2860 }
2861 nfsmerr_if(error);
2862 /* allocate an extra byte for a leading '@' if its not already prepended to the realm */
2863 nmp->nm_realm = kalloc_data(len + 2, Z_WAITOK | Z_ZERO);
2864 if (!nmp->nm_realm) {
2865 error = ENOMEM;
2866 }
2867 nfsmerr_if(error);
2868 error = xb_get_bytes(&xb, nmp->nm_realm, len, 0);
2869 if (error == 0 && *nmp->nm_realm != '@') {
2870 bcopy(nmp->nm_realm, &nmp->nm_realm[1], len);
2871 nmp->nm_realm[0] = '@';
2872 }
2873 }
2874 nfsmerr_if(error);
2875
2876 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_PRINCIPAL)) {
2877 xb_get_32(error, &xb, len);
2878 if (!error && ((len < 1) || (len > MAXPATHLEN))) {
2879 error = EINVAL;
2880 }
2881 nfsmerr_if(error);
2882 nmp->nm_principal = kalloc_data(len + 1, Z_WAITOK | Z_ZERO);
2883 if (!nmp->nm_principal) {
2884 error = ENOMEM;
2885 }
2886 nfsmerr_if(error);
2887 error = xb_get_bytes(&xb, nmp->nm_principal, len, 0);
2888 }
2889 nfsmerr_if(error);
2890
2891 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SVCPRINCIPAL)) {
2892 xb_get_32(error, &xb, len);
2893 if (!error && ((len < 1) || (len > MAXPATHLEN))) {
2894 error = EINVAL;
2895 }
2896 nfsmerr_if(error);
2897 nmp->nm_sprinc = kalloc_data(len + 1, Z_WAITOK | Z_ZERO);
2898 if (!nmp->nm_sprinc) {
2899 error = ENOMEM;
2900 }
2901 nfsmerr_if(error);
2902 error = xb_get_bytes(&xb, nmp->nm_sprinc, len, 0);
2903 }
2904 nfsmerr_if(error);
2905
2906 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_LOCAL_NFS_PORT)) {
2907 if (nmp->nm_nfsport) {
2908 error = EINVAL;
2909 NFS_VFS_DBG("Can't have ports specified over incompatible socket families");
2910 }
2911 nfsmerr_if(error);
2912 xb_get_32(error, &xb, len);
2913 if (!error && ((len < 1) || (len > sizeof(((struct sockaddr_un *)0)->sun_path)))) {
2914 error = EINVAL;
2915 }
2916 nfsmerr_if(error);
2917 nmp->nm_nfs_localport = kalloc_data(len + 1, Z_WAITOK | Z_ZERO);
2918 if (!nmp->nm_nfs_localport) {
2919 error = ENOMEM;
2920 }
2921 nfsmerr_if(error);
2922 error = xb_get_bytes(&xb, nmp->nm_nfs_localport, len, 0);
2923 nmp->nm_sofamily = AF_LOCAL;
2924 nmp->nm_nfsport = 1; /* We use the now deprecated tpcmux port to indcate that we have an AF_LOCAL port */
2925 NFS_VFS_DBG("Setting nfs local port %s (%d)\n", nmp->nm_nfs_localport, nmp->nm_nfsport);
2926 }
2927 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_LOCAL_MOUNT_PORT)) {
2928 if (nmp->nm_mountport) {
2929 error = EINVAL;
2930 NFS_VFS_DBG("Can't have ports specified over mulitple socket families");
2931 }
2932 nfsmerr_if(error);
2933 xb_get_32(error, &xb, len);
2934 if (!error && ((len < 1) || (len > sizeof(((struct sockaddr_un *)0)->sun_path)))) {
2935 error = EINVAL;
2936 }
2937 nfsmerr_if(error);
2938 nmp->nm_mount_localport = kalloc_data(len + 1, Z_WAITOK | Z_ZERO);
2939 if (!nmp->nm_mount_localport) {
2940 error = ENOMEM;
2941 }
2942 nfsmerr_if(error);
2943 error = xb_get_bytes(&xb, nmp->nm_mount_localport, len, 0);
2944 nmp->nm_sofamily = AF_LOCAL;
2945 nmp->nm_mountport = 1; /* We use the now deprecated tpcmux port to indcate that we have an AF_LOCAL port */
2946 NFS_VFS_DBG("Setting mount local port %s (%d)\n", nmp->nm_mount_localport, nmp->nm_mountport);
2947 }
2948 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SET_MOUNT_OWNER)) {
2949 xb_get_32(error, &xb, set_owner);
2950 nfsmerr_if(error);
2951 error = vfs_context_suser(ctx);
2952 /*
2953 * root can set owner to whatever, user can set owner to self
2954 */
2955 if ((error) && (set_owner == kauth_cred_getuid(vfs_context_ucred(ctx)))) {
2956 /* ok for non-root can set owner to self */
2957 error = 0;
2958 }
2959 nfsmerr_if(error);
2960 }
2961
2962 /*
2963 * Sanity check/finalize settings.
2964 */
2965
2966 if (nmp->nm_timeo < NFS_MINTIMEO) {
2967 nmp->nm_timeo = NFS_MINTIMEO;
2968 } else if (nmp->nm_timeo > NFS_MAXTIMEO) {
2969 nmp->nm_timeo = NFS_MAXTIMEO;
2970 }
2971 if (nmp->nm_retry > NFS_MAXREXMIT) {
2972 nmp->nm_retry = NFS_MAXREXMIT;
2973 }
2974
2975 if (nmp->nm_numgrps > NFS_MAXGRPS) {
2976 nmp->nm_numgrps = NFS_MAXGRPS;
2977 }
2978 if (nmp->nm_readahead > NFS_MAXRAHEAD) {
2979 nmp->nm_readahead = NFS_MAXRAHEAD;
2980 }
2981 if (nmp->nm_acregmin > nmp->nm_acregmax) {
2982 nmp->nm_acregmin = nmp->nm_acregmax;
2983 }
2984 if (nmp->nm_acdirmin > nmp->nm_acdirmax) {
2985 nmp->nm_acdirmin = nmp->nm_acdirmax;
2986 }
2987
2988 /* need at least one fs location */
2989 if (nmp->nm_locations.nl_numlocs < 1) {
2990 error = EINVAL;
2991 }
2992 nfsmerr_if(error);
2993
2994 if (!NM_OMATTR_GIVEN(nmp, MNTFROM)) {
2995 /* init mount's mntfromname to first location */
2996 nfs_location_mntfromname(&nmp->nm_locations, firstloc,
2997 vfs_statfs(mp)->f_mntfromname,
2998 sizeof(vfs_statfs(mp)->f_mntfromname), 0);
2999 }
3000
3001 /* Need to save the mounting credential for v4. */
3002 nmp->nm_mcred = vfs_context_ucred(ctx);
3003 if (IS_VALID_CRED(nmp->nm_mcred)) {
3004 kauth_cred_ref(nmp->nm_mcred);
3005 }
3006
3007 /*
3008 * If a reserved port is required, check for that privilege.
3009 * (Note that mirror mounts are exempt because the privilege was
3010 * already checked for the original mount.)
3011 */
3012 if (NMFLAG(nmp, RESVPORT) && !vfs_iskernelmount(mp)) {
3013 error = priv_check_cred(nmp->nm_mcred, PRIV_NETINET_RESERVEDPORT, 0);
3014 }
3015 nfsmerr_if(error);
3016
3017 /* set up the version-specific function tables */
3018 if (nmp->nm_vers < NFS_VER4) {
3019 nmp->nm_funcs = &nfs3_funcs;
3020 } else {
3021 #if CONFIG_NFS4
3022 nmp->nm_funcs = &nfs4_funcs;
3023 #else
3024 /* don't go any further if we don't support NFS4 */
3025 nmp->nm_funcs = NULL;
3026 error = ENOTSUP;
3027 nfsmerr_if(error);
3028 #endif
3029 }
3030
3031 /* do mount's initial socket connection */
3032 error = nfs_mount_connect(nmp);
3033 nfsmerr_if(error);
3034
3035 /* sanity check settings now that version/connection is set */
3036 if (nmp->nm_vers == NFS_VER2) { /* ignore RDIRPLUS on NFSv2 */
3037 NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_RDIRPLUS);
3038 }
3039 #if CONFIG_NFS4
3040 if (nmp->nm_vers >= NFS_VER4) {
3041 if (NFS_BITMAP_ISSET(nmp->nm_flags, NFS_MFLAG_ACLONLY)) { /* aclonly trumps noacl */
3042 NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_NOACL);
3043 }
3044 NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_CALLUMNT);
3045 if (nmp->nm_lockmode != NFS_LOCK_MODE_ENABLED) {
3046 error = EINVAL; /* disabled/local lock mode only allowed on v2/v3 */
3047 }
3048 } else {
3049 #endif
3050 /* ignore these if not v4 */
3051 NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_NOCALLBACK);
3052 NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_NAMEDATTR);
3053 NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_NOACL);
3054 NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_ACLONLY);
3055 #if CONFIG_NFS4
3056 }
3057 #endif
3058 nfsmerr_if(error);
3059
3060 if (nmp->nm_sotype == SOCK_DGRAM) {
3061 /* I/O size defaults for UDP are different */
3062 if (!NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READ_SIZE)) {
3063 nmp->nm_rsize = NFS_DGRAM_RSIZE;
3064 }
3065 if (!NFS_BITMAP_ISSET(mattrs, NFS_MATTR_WRITE_SIZE)) {
3066 nmp->nm_wsize = NFS_DGRAM_WSIZE;
3067 }
3068 }
3069
3070 /* round down I/O sizes to multiple of NFS_FABLKSIZE */
3071 nmp->nm_rsize &= ~(NFS_FABLKSIZE - 1);
3072 if (nmp->nm_rsize <= 0) {
3073 nmp->nm_rsize = NFS_FABLKSIZE;
3074 }
3075 nmp->nm_wsize &= ~(NFS_FABLKSIZE - 1);
3076 if (nmp->nm_wsize <= 0) {
3077 nmp->nm_wsize = NFS_FABLKSIZE;
3078 }
3079
3080 /* and limit I/O sizes to maximum allowed */
3081 maxio = (nmp->nm_vers == NFS_VER2) ? NFS_V2MAXDATA :
3082 (nmp->nm_sotype == SOCK_DGRAM) ? NFS_MAXDGRAMDATA : NFS_MAXDATA;
3083 if (maxio > NFS_MAXBSIZE) {
3084 maxio = NFS_MAXBSIZE;
3085 }
3086 if (nmp->nm_rsize > maxio) {
3087 nmp->nm_rsize = maxio;
3088 }
3089 if (nmp->nm_wsize > maxio) {
3090 nmp->nm_wsize = maxio;
3091 }
3092
3093 if (nmp->nm_readdirsize > maxio) {
3094 nmp->nm_readdirsize = maxio;
3095 }
3096 if (nmp->nm_readdirsize > nmp->nm_rsize) {
3097 nmp->nm_readdirsize = nmp->nm_rsize;
3098 }
3099
3100 /* Set up the sockets and related info */
3101 if (nmp->nm_sotype == SOCK_DGRAM) {
3102 TAILQ_INIT(&nmp->nm_cwndq);
3103 }
3104
3105 if (nmp->nm_saddr->sa_family == AF_LOCAL) {
3106 struct sockaddr_un *un = (struct sockaddr_un *)nmp->nm_saddr;
3107 size_t size;
3108 int n = snprintf(vfs_statfs(mp)->f_mntfromname, sizeof(vfs_statfs(mp)->f_mntfromname), "<%s>:", un->sun_path);
3109
3110 if (n > 0 && (size_t)n < sizeof(vfs_statfs(mp)->f_mntfromname)) {
3111 size = sizeof(vfs_statfs(mp)->f_mntfromname) - n;
3112 nfs_location_mntfromname(&nmp->nm_locations, firstloc,
3113 &vfs_statfs(mp)->f_mntfromname[n], size, 1);
3114 }
3115 }
3116
3117 /*
3118 * Get the root node/attributes from the NFS server and
3119 * do any basic, version-specific setup.
3120 */
3121 error = nmp->nm_funcs->nf_mount(nmp, ctx, &np);
3122 nfsmerr_if(error);
3123
3124 /*
3125 * A reference count is needed on the node representing the
3126 * remote root. If this object is not persistent, then backward
3127 * traversals of the mount point (i.e. "..") will not work if
3128 * the node gets flushed out of the cache.
3129 */
3130 nmp->nm_dnp = np;
3131 *vpp = NFSTOV(np);
3132
3133 /* get usecount and drop iocount */
3134 error = vnode_ref(*vpp);
3135 vnode_put(*vpp);
3136 if (error) {
3137 vnode_recycle(*vpp);
3138 goto nfsmerr;
3139 }
3140
3141 /*
3142 * Do statfs to ensure static info gets set to reasonable values.
3143 */
3144 if ((error = nmp->nm_funcs->nf_update_statfs(nmp, ctx))) {
3145 int error2 = vnode_getwithref(*vpp);
3146 vnode_rele(*vpp);
3147 if (!error2) {
3148 vnode_put(*vpp);
3149 }
3150 vnode_recycle(*vpp);
3151 goto nfsmerr;
3152 }
3153 sbp = vfs_statfs(mp);
3154 sbp->f_bsize = nmp->nm_fsattr.nfsa_bsize;
3155 sbp->f_blocks = nmp->nm_fsattr.nfsa_space_total / sbp->f_bsize;
3156 sbp->f_bfree = nmp->nm_fsattr.nfsa_space_free / sbp->f_bsize;
3157 sbp->f_bavail = nmp->nm_fsattr.nfsa_space_avail / sbp->f_bsize;
3158 sbp->f_bused = (nmp->nm_fsattr.nfsa_space_total / sbp->f_bsize) -
3159 (nmp->nm_fsattr.nfsa_space_free / sbp->f_bsize);
3160 sbp->f_files = nmp->nm_fsattr.nfsa_files_total;
3161 sbp->f_ffree = nmp->nm_fsattr.nfsa_files_free;
3162 sbp->f_iosize = nfs_iosize;
3163
3164 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SET_MOUNT_OWNER)) {
3165 sbp->f_owner = set_owner;
3166 }
3167
3168 /*
3169 * Calculate the size used for I/O buffers. Use the larger
3170 * of the two sizes to minimise NFS requests but make sure
3171 * that it is at least one VM page to avoid wasting buffer
3172 * space and to allow easy mmapping of I/O buffers.
3173 * The read/write RPC calls handle the splitting up of
3174 * buffers into multiple requests if the buffer size is
3175 * larger than the I/O size.
3176 */
3177 iosize = max(nmp->nm_rsize, nmp->nm_wsize);
3178 if (iosize < PAGE_SIZE) {
3179 iosize = PAGE_SIZE;
3180 }
3181 nmp->nm_biosize = trunc_page_32(iosize);
3182
3183 /* For NFSv3 and greater, there is a (relatively) reliable ACCESS call. */
3184 if (nmp->nm_vers > NFS_VER2 && !NMFLAG(nmp, NOOPAQUE_AUTH)) {
3185 vfs_setauthopaqueaccess(mp);
3186 }
3187
3188 switch (nmp->nm_lockmode) {
3189 case NFS_LOCK_MODE_DISABLED:
3190 break;
3191 case NFS_LOCK_MODE_LOCAL:
3192 vfs_setlocklocal(nmp->nm_mountp);
3193 break;
3194 case NFS_LOCK_MODE_ENABLED:
3195 default:
3196 if (nmp->nm_vers <= NFS_VER3) {
3197 nfs_lockd_mount_register(nmp);
3198 }
3199 break;
3200 }
3201
3202 /* success! */
3203 lck_mtx_lock(&nmp->nm_lock);
3204 nmp->nm_state |= NFSSTA_MOUNTED;
3205
3206 if (nfs_split_open_owner) {
3207 nmp->nm_state |= NFSSTA_SPLIT_OPEN_OWNER;
3208 printf("%s: Open owner is now based on both PID and UID for mount (%s from %s)\n", __FUNCTION__, vfs_statfs(mp)->f_mntfromname, vfs_statfs(mp)->f_mntonname);
3209 }
3210
3211 lck_mtx_unlock(&nmp->nm_lock);
3212 return 0;
3213 nfsmerr:
3214 nfs_mount_drain_and_cleanup(nmp);
3215 return error;
3216 }
3217
3218 #if CONFIG_TRIGGERS
3219
3220 #if CONFIG_NFS4
3221 #define __nfs4_unused /* nothing */
3222 #else
3223 #define __nfs4_unused __unused
3224 #endif
3225
3226 /*
3227 * We've detected a file system boundary on the server and
3228 * need to mount a new file system so that our file systems
3229 * MIRROR the file systems on the server.
3230 *
3231 * Build the mount arguments for the new mount and call kernel_mount().
3232 */
3233 int
nfs_mirror_mount_domount(vnode_t dvp,vnode_t vp,__nfs4_unused vfs_context_t ctx)3234 nfs_mirror_mount_domount(vnode_t dvp, vnode_t vp, __nfs4_unused vfs_context_t ctx)
3235 {
3236 nfsnode_t np = VTONFS(vp);
3237 #if CONFIG_NFS4
3238 nfsnode_t dnp = VTONFS(dvp);
3239 #endif
3240 struct nfsmount *nmp = NFSTONMP(np);
3241 char fstype[MFSTYPENAMELEN], *mntfromname = NULL, *path = NULL, *relpath, *p, *cp;
3242 int error = 0, pathbuflen = MAXPATHLEN, i, mntflags = 0, referral, skipcopy = 0;
3243 size_t nlen, rlen, mlen, mlen2, count;
3244 struct xdrbuf xb, xbnew;
3245 uint32_t mattrs[NFS_MATTR_BITMAP_LEN];
3246 uint32_t newmattrs[NFS_MATTR_BITMAP_LEN];
3247 uint32_t newmflags[NFS_MFLAG_BITMAP_LEN];
3248 uint32_t newmflags_mask[NFS_MFLAG_BITMAP_LEN];
3249 uint32_t val, relpathcomps;
3250 uint64_t argslength = 0, argslength_offset, attrslength_offset, end_offset;
3251 uint32_t numlocs, loc, numserv, serv, numaddr, addr, numcomp, comp;
3252 char buf[XDRWORD];
3253 struct nfs_fs_locations nfsls;
3254
3255 referral = (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL);
3256 if (referral) {
3257 bzero(&nfsls, sizeof(nfsls));
3258 }
3259
3260 xb_init(&xbnew, XDRBUF_NONE);
3261
3262 if (!nmp || (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) {
3263 return ENXIO;
3264 }
3265
3266 /* allocate a couple path buffers we need */
3267 mntfromname = zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_NOFAIL);
3268 path = zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_NOFAIL);
3269
3270 /* get the path for the directory being mounted on */
3271 error = vn_getpath(vp, path, &pathbuflen);
3272 if (error) {
3273 error = ENOMEM;
3274 goto nfsmerr;
3275 }
3276
3277 /*
3278 * Set up the mntfromname for the new mount based on the
3279 * current mount's mntfromname and the directory's path
3280 * relative to the current mount's mntonname.
3281 * Set up relpath to point at the relative path on the current mount.
3282 * Also, count the number of components in relpath.
3283 * We'll be adding those to each fs location path in the new args.
3284 */
3285 nlen = strlcpy(mntfromname, vfs_statfs(nmp->nm_mountp)->f_mntfromname, MAXPATHLEN);
3286 if ((nlen > 0) && (mntfromname[nlen - 1] == '/')) { /* avoid double '/' in new name */
3287 mntfromname[nlen - 1] = '\0';
3288 nlen--;
3289 }
3290 relpath = mntfromname + nlen;
3291 nlen = strlcat(mntfromname, path + strlen(vfs_statfs(nmp->nm_mountp)->f_mntonname), MAXPATHLEN);
3292 if (nlen >= MAXPATHLEN) {
3293 error = ENAMETOOLONG;
3294 goto nfsmerr;
3295 }
3296 /* count the number of components in relpath */
3297 p = relpath;
3298 while (*p && (*p == '/')) {
3299 p++;
3300 }
3301 relpathcomps = 0;
3302 while (*p) {
3303 relpathcomps++;
3304 while (*p && (*p != '/')) {
3305 p++;
3306 }
3307 while (*p && (*p == '/')) {
3308 p++;
3309 }
3310 }
3311
3312 /* grab a copy of the file system type */
3313 vfs_name(vnode_mount(vp), fstype);
3314
3315 /* for referrals, fetch the fs locations */
3316 if (referral) {
3317 const char *vname = vnode_getname(NFSTOV(np));
3318 if (!vname) {
3319 error = ENOENT;
3320 }
3321 #if CONFIG_NFS4
3322 else {
3323 error = nfs4_get_fs_locations(nmp, dnp, NULL, 0, vname, ctx, &nfsls);
3324 vnode_putname(vname);
3325 if (!error && (nfsls.nl_numlocs < 1)) {
3326 error = ENOENT;
3327 }
3328 }
3329 #endif
3330 nfsmerr_if(error);
3331 }
3332
3333 /* set up NFS mount args based on current mount args */
3334
3335 #define xb_copy_32(E, XBSRC, XBDST, V) \
3336 do { \
3337 if (E) break; \
3338 xb_get_32((E), (XBSRC), (V)); \
3339 if (skipcopy) break; \
3340 xb_add_32((E), (XBDST), (V)); \
3341 } while (0)
3342 #define xb_copy_opaque(E, XBSRC, XBDST) \
3343 do { \
3344 uint32_t __count = 0, __val; \
3345 xb_copy_32((E), (XBSRC), (XBDST), __count); \
3346 if (E) break; \
3347 __count = nfsm_rndup(__count); \
3348 __count /= XDRWORD; \
3349 while (__count-- > 0) \
3350 xb_copy_32((E), (XBSRC), (XBDST), __val); \
3351 } while (0)
3352
3353 xb_init_buffer(&xb, nmp->nm_args, 2 * XDRWORD);
3354 xb_get_32(error, &xb, val); /* version */
3355 xb_get_32(error, &xb, argslength); /* args length */
3356 xb_init_buffer(&xb, nmp->nm_args, argslength);
3357
3358 xb_init_buffer(&xbnew, NULL, 0);
3359 xb_copy_32(error, &xb, &xbnew, val); /* version */
3360 argslength_offset = xb_offset(&xbnew);
3361 xb_copy_32(error, &xb, &xbnew, val); /* args length */
3362 xb_copy_32(error, &xb, &xbnew, val); /* XDR args version */
3363 count = NFS_MATTR_BITMAP_LEN;
3364 xb_get_bitmap(error, &xb, mattrs, count); /* mount attribute bitmap */
3365 nfsmerr_if(error);
3366 for (i = 0; i < NFS_MATTR_BITMAP_LEN; i++) {
3367 newmattrs[i] = mattrs[i];
3368 }
3369 if (referral) {
3370 NFS_BITMAP_SET(newmattrs, NFS_MATTR_FS_LOCATIONS);
3371 NFS_BITMAP_CLR(newmattrs, NFS_MATTR_MNTFROM);
3372 } else {
3373 NFS_BITMAP_SET(newmattrs, NFS_MATTR_FH);
3374 }
3375 NFS_BITMAP_SET(newmattrs, NFS_MATTR_FLAGS);
3376 NFS_BITMAP_SET(newmattrs, NFS_MATTR_MNTFLAGS);
3377 NFS_BITMAP_SET(newmattrs, NFS_MATTR_SET_MOUNT_OWNER);
3378 xb_add_bitmap(error, &xbnew, newmattrs, NFS_MATTR_BITMAP_LEN);
3379 attrslength_offset = xb_offset(&xbnew);
3380 xb_copy_32(error, &xb, &xbnew, val); /* attrs length */
3381 NFS_BITMAP_ZERO(newmflags_mask, NFS_MFLAG_BITMAP_LEN);
3382 NFS_BITMAP_ZERO(newmflags, NFS_MFLAG_BITMAP_LEN);
3383 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_FLAGS)) {
3384 count = NFS_MFLAG_BITMAP_LEN;
3385 xb_get_bitmap(error, &xb, newmflags_mask, count); /* mount flag mask bitmap */
3386 count = NFS_MFLAG_BITMAP_LEN;
3387 xb_get_bitmap(error, &xb, newmflags, count); /* mount flag bitmap */
3388 }
3389 NFS_BITMAP_SET(newmflags_mask, NFS_MFLAG_EPHEMERAL);
3390 NFS_BITMAP_SET(newmflags, NFS_MFLAG_EPHEMERAL);
3391 xb_add_bitmap(error, &xbnew, newmflags_mask, NFS_MFLAG_BITMAP_LEN);
3392 xb_add_bitmap(error, &xbnew, newmflags, NFS_MFLAG_BITMAP_LEN);
3393 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_VERSION)) {
3394 xb_copy_32(error, &xb, &xbnew, val);
3395 }
3396 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_MINOR_VERSION)) {
3397 xb_copy_32(error, &xb, &xbnew, val);
3398 }
3399 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_VERSION_RANGE)) {
3400 xb_copy_32(error, &xb, &xbnew, val);
3401 xb_copy_32(error, &xb, &xbnew, val);
3402 }
3403 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READ_SIZE)) {
3404 xb_copy_32(error, &xb, &xbnew, val);
3405 }
3406 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_WRITE_SIZE)) {
3407 xb_copy_32(error, &xb, &xbnew, val);
3408 }
3409 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READDIR_SIZE)) {
3410 xb_copy_32(error, &xb, &xbnew, val);
3411 }
3412 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READAHEAD)) {
3413 xb_copy_32(error, &xb, &xbnew, val);
3414 }
3415 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_REG_MIN)) {
3416 xb_copy_32(error, &xb, &xbnew, val);
3417 xb_copy_32(error, &xb, &xbnew, val);
3418 }
3419 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_REG_MAX)) {
3420 xb_copy_32(error, &xb, &xbnew, val);
3421 xb_copy_32(error, &xb, &xbnew, val);
3422 }
3423 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_DIR_MIN)) {
3424 xb_copy_32(error, &xb, &xbnew, val);
3425 xb_copy_32(error, &xb, &xbnew, val);
3426 }
3427 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_DIR_MAX)) {
3428 xb_copy_32(error, &xb, &xbnew, val);
3429 xb_copy_32(error, &xb, &xbnew, val);
3430 }
3431 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_LOCK_MODE)) {
3432 xb_copy_32(error, &xb, &xbnew, val);
3433 }
3434 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SECURITY)) {
3435 xb_copy_32(error, &xb, &xbnew, count);
3436 while (!error && (count-- > 0)) {
3437 xb_copy_32(error, &xb, &xbnew, val);
3438 }
3439 }
3440 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_KERB_ETYPE)) {
3441 xb_copy_32(error, &xb, &xbnew, count);
3442 xb_add_32(error, &xbnew, -1);
3443 while (!error && (count-- > 0)) {
3444 xb_copy_32(error, &xb, &xbnew, val);
3445 }
3446 }
3447 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MAX_GROUP_LIST)) {
3448 xb_copy_32(error, &xb, &xbnew, val);
3449 }
3450 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SOCKET_TYPE)) {
3451 xb_copy_opaque(error, &xb, &xbnew);
3452 }
3453 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_PORT)) {
3454 xb_copy_32(error, &xb, &xbnew, val);
3455 }
3456 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MOUNT_PORT)) {
3457 xb_copy_32(error, &xb, &xbnew, val);
3458 }
3459 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_REQUEST_TIMEOUT)) {
3460 xb_copy_32(error, &xb, &xbnew, val);
3461 xb_copy_32(error, &xb, &xbnew, val);
3462 }
3463 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SOFT_RETRY_COUNT)) {
3464 xb_copy_32(error, &xb, &xbnew, val);
3465 }
3466 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_DEAD_TIMEOUT)) {
3467 xb_copy_32(error, &xb, &xbnew, val);
3468 xb_copy_32(error, &xb, &xbnew, val);
3469 }
3470 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_FH)) {
3471 xb_get_32(error, &xb, count);
3472 xb_skip(error, &xb, count);
3473 }
3474 if (!referral) {
3475 /* set the initial file handle to the directory's file handle */
3476 xb_add_fh(error, &xbnew, np->n_fhp, np->n_fhsize);
3477 }
3478 /* copy/extend/skip fs locations */
3479 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_FS_LOCATIONS)) {
3480 numlocs = numserv = numaddr = numcomp = 0;
3481 if (referral) { /* don't copy the fs locations for a referral */
3482 skipcopy = 1;
3483 }
3484 xb_copy_32(error, &xb, &xbnew, numlocs); /* location count */
3485 for (loc = 0; !error && (loc < numlocs); loc++) {
3486 xb_copy_32(error, &xb, &xbnew, numserv); /* server count */
3487 for (serv = 0; !error && (serv < numserv); serv++) {
3488 xb_copy_opaque(error, &xb, &xbnew); /* server name */
3489 xb_copy_32(error, &xb, &xbnew, numaddr); /* address count */
3490 for (addr = 0; !error && (addr < numaddr); addr++) {
3491 xb_copy_opaque(error, &xb, &xbnew); /* address */
3492 }
3493 xb_copy_opaque(error, &xb, &xbnew); /* server info */
3494 }
3495 /* pathname */
3496 xb_get_32(error, &xb, numcomp); /* component count */
3497 if (!skipcopy) {
3498 uint64_t totalcomps = numcomp + relpathcomps;
3499
3500 /* set error to ERANGE in the event of overflow */
3501 if (totalcomps > UINT32_MAX) {
3502 nfsmerr_if((error = ERANGE));
3503 }
3504
3505 xb_add_32(error, &xbnew, (uint32_t) totalcomps); /* new component count */
3506 }
3507 for (comp = 0; !error && (comp < numcomp); comp++) {
3508 xb_copy_opaque(error, &xb, &xbnew); /* component */
3509 }
3510 /* add additional components */
3511 p = relpath;
3512 while (*p && (*p == '/')) {
3513 p++;
3514 }
3515 while (*p && !error) {
3516 cp = p;
3517 while (*p && (*p != '/')) {
3518 p++;
3519 }
3520 xb_add_string(error, &xbnew, cp, (p - cp)); /* component */
3521 while (*p && (*p == '/')) {
3522 p++;
3523 }
3524 }
3525 xb_copy_opaque(error, &xb, &xbnew); /* fs location info */
3526 }
3527 if (referral) {
3528 skipcopy = 0;
3529 }
3530 }
3531 if (referral) {
3532 /* add referral's fs locations */
3533 xb_add_32(error, &xbnew, nfsls.nl_numlocs); /* FS_LOCATIONS */
3534 for (loc = 0; !error && (loc < nfsls.nl_numlocs); loc++) {
3535 xb_add_32(error, &xbnew, nfsls.nl_locations[loc]->nl_servcount);
3536 for (serv = 0; !error && (serv < nfsls.nl_locations[loc]->nl_servcount); serv++) {
3537 xb_add_string(error, &xbnew, nfsls.nl_locations[loc]->nl_servers[serv]->ns_name,
3538 strlen(nfsls.nl_locations[loc]->nl_servers[serv]->ns_name));
3539 xb_add_32(error, &xbnew, nfsls.nl_locations[loc]->nl_servers[serv]->ns_addrcount);
3540 for (addr = 0; !error && (addr < nfsls.nl_locations[loc]->nl_servers[serv]->ns_addrcount); addr++) {
3541 xb_add_string(error, &xbnew, nfsls.nl_locations[loc]->nl_servers[serv]->ns_addresses[addr],
3542 strlen(nfsls.nl_locations[loc]->nl_servers[serv]->ns_addresses[addr]));
3543 }
3544 xb_add_32(error, &xbnew, 0); /* empty server info */
3545 }
3546 xb_add_32(error, &xbnew, nfsls.nl_locations[loc]->nl_path.np_compcount);
3547 for (comp = 0; !error && (comp < nfsls.nl_locations[loc]->nl_path.np_compcount); comp++) {
3548 xb_add_string(error, &xbnew, nfsls.nl_locations[loc]->nl_path.np_components[comp],
3549 strlen(nfsls.nl_locations[loc]->nl_path.np_components[comp]));
3550 }
3551 xb_add_32(error, &xbnew, 0); /* empty fs location info */
3552 }
3553 }
3554 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MNTFLAGS)) {
3555 xb_get_32(error, &xb, mntflags);
3556 }
3557 /*
3558 * We add the following mount flags to the ones for the mounted-on mount:
3559 * MNT_DONTBROWSE - to keep the mount from showing up as a separate volume
3560 * MNT_AUTOMOUNTED - to keep DiskArb from retriggering the mount after
3561 * an unmount (looking for /.autodiskmounted)
3562 */
3563 mntflags |= (MNT_AUTOMOUNTED | MNT_DONTBROWSE);
3564 xb_add_32(error, &xbnew, mntflags);
3565 if (!referral && NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MNTFROM)) {
3566 /* copy mntfrom string and add relpath */
3567 rlen = strlen(relpath);
3568 xb_get_32(error, &xb, mlen);
3569 nfsmerr_if(error);
3570 mlen2 = mlen + ((relpath[0] != '/') ? 1 : 0) + rlen;
3571 xb_add_32(error, &xbnew, mlen2);
3572 count = mlen / XDRWORD;
3573 /* copy the original string */
3574 while (count-- > 0) {
3575 xb_copy_32(error, &xb, &xbnew, val);
3576 }
3577 if (!error && (mlen % XDRWORD)) {
3578 error = xb_get_bytes(&xb, buf, mlen % XDRWORD, 0);
3579 if (!error) {
3580 error = xb_add_bytes(&xbnew, buf, mlen % XDRWORD, 1);
3581 }
3582 }
3583 /* insert a '/' if the relative path doesn't start with one */
3584 if (!error && (relpath[0] != '/')) {
3585 buf[0] = '/';
3586 error = xb_add_bytes(&xbnew, buf, 1, 1);
3587 }
3588 /* add the additional relative path */
3589 if (!error) {
3590 error = xb_add_bytes(&xbnew, relpath, rlen, 1);
3591 }
3592 /* make sure the resulting string has the right number of pad bytes */
3593 if (!error && (mlen2 != nfsm_rndup(mlen2))) {
3594 bzero(buf, sizeof(buf));
3595 count = nfsm_rndup(mlen2) - mlen2;
3596 error = xb_add_bytes(&xbnew, buf, count, 1);
3597 }
3598 }
3599 /*
3600 * The following string copies rely on the fact that we already validated
3601 * these data when creating the initial mount point.
3602 */
3603 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_REALM)) {
3604 xb_add_string(error, &xbnew, nmp->nm_realm, strlen(nmp->nm_realm));
3605 }
3606 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_PRINCIPAL)) {
3607 xb_add_string(error, &xbnew, nmp->nm_principal, strlen(nmp->nm_principal));
3608 }
3609 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SVCPRINCIPAL)) {
3610 xb_add_string(error, &xbnew, nmp->nm_sprinc, strlen(nmp->nm_sprinc));
3611 }
3612 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_LOCAL_NFS_PORT)) {
3613 xb_add_string(error, &xbnew, nmp->nm_nfs_localport, strlen(nmp->nm_nfs_localport));
3614 }
3615 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_LOCAL_MOUNT_PORT)) {
3616 xb_add_string(error, &xbnew, nmp->nm_mount_localport, strlen(nmp->nm_mount_localport));
3617 }
3618 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SET_MOUNT_OWNER)) {
3619 /* drop embedded owner value */
3620 xb_get_32(error, &xb, count);
3621 }
3622 /* New mount always gets same owner as this mount */
3623 xb_add_32(error, &xbnew, vfs_statfs(vnode_mount(vp))->f_owner);
3624 xb_build_done(error, &xbnew);
3625
3626 /* update opaque counts */
3627 end_offset = xb_offset(&xbnew);
3628 if (!error) {
3629 error = xb_seek(&xbnew, argslength_offset);
3630 argslength = end_offset - argslength_offset + XDRWORD /*version*/;
3631 xb_add_32(error, &xbnew, argslength);
3632 }
3633 if (!error) {
3634 error = xb_seek(&xbnew, attrslength_offset);
3635 xb_add_32(error, &xbnew, end_offset - attrslength_offset - XDRWORD /*don't include length field*/);
3636 }
3637 nfsmerr_if(error);
3638
3639 /*
3640 * For kernel_mount() call, use the existing mount flags (instead of the
3641 * original flags) because flags like MNT_NOSUID and MNT_NODEV may have
3642 * been silently enforced. Also, in terms of MACF, the _kernel_ is
3643 * performing the mount (and enforcing all of the mount options), so we
3644 * use the kernel context for the mount call.
3645 */
3646 mntflags = vfs_flags(vnode_mount(vp)) & MNT_VISFLAGMASK;
3647 mntflags |= (MNT_AUTOMOUNTED | MNT_DONTBROWSE);
3648
3649 /* do the mount */
3650 error = vfs_mount_at_path(fstype, path, dvp, vp, xb_buffer_base(&xbnew), argslength,
3651 mntflags, VFS_MOUNT_FLAG_PERMIT_UNMOUNT | VFS_MOUNT_FLAG_NOAUTH);
3652
3653 nfsmerr:
3654 if (error) {
3655 printf("nfs: mirror mount of %s on %s failed (%d)\n",
3656 mntfromname, path, error);
3657 }
3658 /* clean up */
3659 xb_cleanup(&xbnew);
3660 if (referral) {
3661 nfs_fs_locations_cleanup(&nfsls);
3662 }
3663 NFS_ZFREE(ZV_NAMEI, path);
3664 NFS_ZFREE(ZV_NAMEI, mntfromname);
3665 if (!error) {
3666 nfs_ephemeral_mount_harvester_start();
3667 }
3668 return error;
3669 }
3670
3671 /*
3672 * trigger vnode functions
3673 */
3674 #define NFS_TRIGGER_DEBUG 1
3675
3676 resolver_result_t
nfs_mirror_mount_trigger_resolve(vnode_t vp,const struct componentname * cnp,enum path_operation pop,__unused int flags,__unused void * data,vfs_context_t ctx)3677 nfs_mirror_mount_trigger_resolve(
3678 vnode_t vp,
3679 const struct componentname *cnp,
3680 enum path_operation pop,
3681 __unused int flags,
3682 __unused void *data,
3683 vfs_context_t ctx)
3684 {
3685 nfsnode_t np = VTONFS(vp);
3686 vnode_t pvp = NULLVP;
3687 int error = 0;
3688 int didBusy = 0;
3689 resolver_result_t result;
3690
3691 /*
3692 * We have a trigger node that doesn't have anything mounted on it yet.
3693 * We'll do the mount if either:
3694 * (a) this isn't the last component of the path OR
3695 * (b) this is an op that looks like it should trigger the mount.
3696 */
3697 if (cnp->cn_flags & ISLASTCN) {
3698 switch (pop) {
3699 case OP_MOUNT:
3700 case OP_UNMOUNT:
3701 case OP_STATFS:
3702 case OP_LINK:
3703 case OP_UNLINK:
3704 case OP_RENAME:
3705 case OP_MKNOD:
3706 case OP_MKFIFO:
3707 case OP_SYMLINK:
3708 case OP_ACCESS:
3709 case OP_GETATTR:
3710 case OP_MKDIR:
3711 case OP_RMDIR:
3712 case OP_REVOKE:
3713 case OP_GETXATTR:
3714 case OP_LISTXATTR:
3715 /* don't perform the mount for these operations */
3716 result = vfs_resolver_result(np->n_trigseq, RESOLVER_NOCHANGE, 0);
3717 #ifdef NFS_TRIGGER_DEBUG
3718 NP(np, "nfs trigger RESOLVE: no change, last %d nameiop %d, seq %d",
3719 (cnp->cn_flags & ISLASTCN) ? 1 : 0, cnp->cn_nameiop, np->n_trigseq);
3720 #endif
3721 return result;
3722 case OP_OPEN:
3723 case OP_CHDIR:
3724 case OP_CHROOT:
3725 case OP_TRUNCATE:
3726 case OP_COPYFILE:
3727 case OP_PATHCONF:
3728 case OP_READLINK:
3729 case OP_SETATTR:
3730 case OP_EXCHANGEDATA:
3731 case OP_SEARCHFS:
3732 case OP_FSCTL:
3733 case OP_SETXATTR:
3734 case OP_REMOVEXATTR:
3735 default:
3736 /* go ahead and do the mount */
3737 break;
3738 }
3739 }
3740
3741 if (vnode_mountedhere(vp) != NULL) {
3742 /*
3743 * Um... there's already something mounted.
3744 * Been there. Done that. Let's just say it succeeded.
3745 */
3746 error = 0;
3747 goto skipmount;
3748 }
3749
3750 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
3751 result = vfs_resolver_result(np->n_trigseq, RESOLVER_ERROR, error);
3752 #ifdef NFS_TRIGGER_DEBUG
3753 NP(np, "nfs trigger RESOLVE: busy error %d, last %d nameiop %d, seq %d",
3754 error, (cnp->cn_flags & ISLASTCN) ? 1 : 0, cnp->cn_nameiop, np->n_trigseq);
3755 #endif
3756 return result;
3757 }
3758 didBusy = 1;
3759
3760 /* Check again, in case the mount happened while we were setting busy */
3761 if (vnode_mountedhere(vp) != NULL) {
3762 /* Been there. Done that. Let's just say it succeeded. */
3763 error = 0;
3764 goto skipmount;
3765 }
3766 nfs_node_lock_force(np);
3767 if (np->n_flag & NDISARMTRIGGER) {
3768 error = ECANCELED;
3769 nfs_node_unlock(np);
3770 goto skipmount;
3771 }
3772 nfs_node_unlock(np);
3773
3774 pvp = vnode_getparent(vp);
3775 if (pvp == NULLVP) {
3776 error = EINVAL;
3777 }
3778 if (!error) {
3779 error = nfs_mirror_mount_domount(pvp, vp, ctx);
3780 }
3781 skipmount:
3782 if (!error) {
3783 np->n_trigseq++;
3784 }
3785 result = vfs_resolver_result(np->n_trigseq, error ? RESOLVER_ERROR : RESOLVER_RESOLVED, error);
3786 #ifdef NFS_TRIGGER_DEBUG
3787 NP(np, "nfs trigger RESOLVE: %s %d, last %d nameiop %d, seq %d",
3788 error ? "error" : "resolved", error,
3789 (cnp->cn_flags & ISLASTCN) ? 1 : 0, cnp->cn_nameiop, np->n_trigseq);
3790 #endif
3791
3792 if (pvp != NULLVP) {
3793 vnode_put(pvp);
3794 }
3795 if (didBusy) {
3796 nfs_node_clear_busy(np);
3797 }
3798 return result;
3799 }
3800
3801 resolver_result_t
nfs_mirror_mount_trigger_unresolve(vnode_t vp,int flags,__unused void * data,vfs_context_t ctx)3802 nfs_mirror_mount_trigger_unresolve(
3803 vnode_t vp,
3804 int flags,
3805 __unused void *data,
3806 vfs_context_t ctx)
3807 {
3808 nfsnode_t np = VTONFS(vp);
3809 mount_t mp;
3810 int error;
3811 resolver_result_t result;
3812
3813 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
3814 result = vfs_resolver_result(np->n_trigseq, RESOLVER_ERROR, error);
3815 #ifdef NFS_TRIGGER_DEBUG
3816 NP(np, "nfs trigger UNRESOLVE: busy error %d, seq %d", error, np->n_trigseq);
3817 #endif
3818 return result;
3819 }
3820
3821 mp = vnode_mountedhere(vp);
3822 if (!mp) {
3823 error = EINVAL;
3824 }
3825 if (!error) {
3826 error = vfs_unmountbyfsid(&(vfs_statfs(mp)->f_fsid), flags, ctx);
3827 }
3828 if (!error) {
3829 np->n_trigseq++;
3830 }
3831 result = vfs_resolver_result(np->n_trigseq, error ? RESOLVER_ERROR : RESOLVER_UNRESOLVED, error);
3832 #ifdef NFS_TRIGGER_DEBUG
3833 NP(np, "nfs trigger UNRESOLVE: %s %d, seq %d",
3834 error ? "error" : "unresolved", error, np->n_trigseq);
3835 #endif
3836 nfs_node_clear_busy(np);
3837 return result;
3838 }
3839
3840 resolver_result_t
nfs_mirror_mount_trigger_rearm(vnode_t vp,__unused int flags,__unused void * data,vfs_context_t ctx)3841 nfs_mirror_mount_trigger_rearm(
3842 vnode_t vp,
3843 __unused int flags,
3844 __unused void *data,
3845 vfs_context_t ctx)
3846 {
3847 nfsnode_t np = VTONFS(vp);
3848 int error;
3849 resolver_result_t result;
3850
3851 if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
3852 result = vfs_resolver_result(np->n_trigseq, RESOLVER_ERROR, error);
3853 #ifdef NFS_TRIGGER_DEBUG
3854 NP(np, "nfs trigger REARM: busy error %d, seq %d", error, np->n_trigseq);
3855 #endif
3856 return result;
3857 }
3858
3859 np->n_trigseq++;
3860 result = vfs_resolver_result(np->n_trigseq,
3861 vnode_mountedhere(vp) ? RESOLVER_RESOLVED : RESOLVER_UNRESOLVED, 0);
3862 #ifdef NFS_TRIGGER_DEBUG
3863 NP(np, "nfs trigger REARM: %s, seq %d",
3864 vnode_mountedhere(vp) ? "resolved" : "unresolved", np->n_trigseq);
3865 #endif
3866 nfs_node_clear_busy(np);
3867 return result;
3868 }
3869
3870 /*
3871 * Periodically attempt to unmount ephemeral (mirror) mounts in an attempt to limit
3872 * the number of unused mounts.
3873 */
3874
3875 #define NFS_EPHEMERAL_MOUNT_HARVEST_INTERVAL 120 /* how often the harvester runs */
3876 struct nfs_ephemeral_mount_harvester_info {
3877 fsid_t fsid; /* FSID that we need to try to unmount */
3878 uint32_t mountcount; /* count of ephemeral mounts seen in scan */
3879 };
3880 /* various globals for the harvester */
3881 static thread_call_t nfs_ephemeral_mount_harvester_timer = NULL;
3882 static int nfs_ephemeral_mount_harvester_on = 0;
3883
3884 kern_return_t thread_terminate(thread_t);
3885
3886 static int
nfs_ephemeral_mount_harvester_callback(mount_t mp,void * arg)3887 nfs_ephemeral_mount_harvester_callback(mount_t mp, void *arg)
3888 {
3889 struct nfs_ephemeral_mount_harvester_info *hinfo = arg;
3890 struct nfsmount *nmp;
3891 struct timeval now;
3892
3893 if (strcmp(vfs_statfs(mp)->f_fstypename, "nfs")) {
3894 return VFS_RETURNED;
3895 }
3896 nmp = VFSTONFS(mp);
3897 if (!nmp || !NMFLAG(nmp, EPHEMERAL)) {
3898 return VFS_RETURNED;
3899 }
3900 hinfo->mountcount++;
3901
3902 /* avoid unmounting mounts that have been triggered within the last harvest interval */
3903 microtime(&now);
3904 if ((nmp->nm_mounttime >> 32) > ((uint32_t)now.tv_sec - NFS_EPHEMERAL_MOUNT_HARVEST_INTERVAL)) {
3905 return VFS_RETURNED;
3906 }
3907
3908 if (hinfo->fsid.val[0] || hinfo->fsid.val[1]) {
3909 /* attempt to unmount previously-found ephemeral mount */
3910 vfs_unmountbyfsid(&hinfo->fsid, 0, vfs_context_kernel());
3911 hinfo->fsid.val[0] = hinfo->fsid.val[1] = 0;
3912 }
3913
3914 /*
3915 * We can't call unmount here since we hold a mount iter ref
3916 * on mp so save its fsid for the next call iteration to unmount.
3917 */
3918 hinfo->fsid.val[0] = vfs_statfs(mp)->f_fsid.val[0];
3919 hinfo->fsid.val[1] = vfs_statfs(mp)->f_fsid.val[1];
3920
3921 return VFS_RETURNED;
3922 }
3923
3924 /*
3925 * Spawn a thread to do the ephemeral mount harvesting.
3926 */
3927 static void
nfs_ephemeral_mount_harvester_timer_func(void)3928 nfs_ephemeral_mount_harvester_timer_func(void)
3929 {
3930 thread_t thd;
3931
3932 if (kernel_thread_start(nfs_ephemeral_mount_harvester, NULL, &thd) == KERN_SUCCESS) {
3933 thread_deallocate(thd);
3934 }
3935 }
3936
3937 /*
3938 * Iterate all mounts looking for NFS ephemeral mounts to try to unmount.
3939 */
3940 void
nfs_ephemeral_mount_harvester(__unused void * arg,__unused wait_result_t wr)3941 nfs_ephemeral_mount_harvester(__unused void *arg, __unused wait_result_t wr)
3942 {
3943 struct nfs_ephemeral_mount_harvester_info hinfo;
3944 uint64_t deadline;
3945
3946 hinfo.mountcount = 0;
3947 hinfo.fsid.val[0] = hinfo.fsid.val[1] = 0;
3948 vfs_iterate(VFS_ITERATE_TAIL_FIRST, nfs_ephemeral_mount_harvester_callback, &hinfo);
3949 if (hinfo.fsid.val[0] || hinfo.fsid.val[1]) {
3950 /* attempt to unmount last found ephemeral mount */
3951 vfs_unmountbyfsid(&hinfo.fsid, 0, vfs_context_kernel());
3952 }
3953
3954 lck_mtx_lock(&nfs_global_mutex);
3955 if (!hinfo.mountcount) {
3956 /* no more ephemeral mounts - don't need timer */
3957 nfs_ephemeral_mount_harvester_on = 0;
3958 } else {
3959 /* re-arm the timer */
3960 clock_interval_to_deadline(NFS_EPHEMERAL_MOUNT_HARVEST_INTERVAL, NSEC_PER_SEC, &deadline);
3961 thread_call_enter_delayed(nfs_ephemeral_mount_harvester_timer, deadline);
3962 nfs_ephemeral_mount_harvester_on = 1;
3963 }
3964 lck_mtx_unlock(&nfs_global_mutex);
3965
3966 /* thread done */
3967 thread_terminate(current_thread());
3968 }
3969
3970 /*
3971 * Make sure the NFS ephemeral mount harvester timer is running.
3972 */
3973 void
nfs_ephemeral_mount_harvester_start(void)3974 nfs_ephemeral_mount_harvester_start(void)
3975 {
3976 uint64_t deadline;
3977
3978 lck_mtx_lock(&nfs_global_mutex);
3979 if (nfs_ephemeral_mount_harvester_on) {
3980 lck_mtx_unlock(&nfs_global_mutex);
3981 return;
3982 }
3983 if (nfs_ephemeral_mount_harvester_timer == NULL) {
3984 nfs_ephemeral_mount_harvester_timer = thread_call_allocate((thread_call_func_t)nfs_ephemeral_mount_harvester_timer_func, NULL);
3985 }
3986 clock_interval_to_deadline(NFS_EPHEMERAL_MOUNT_HARVEST_INTERVAL, NSEC_PER_SEC, &deadline);
3987 thread_call_enter_delayed(nfs_ephemeral_mount_harvester_timer, deadline);
3988 nfs_ephemeral_mount_harvester_on = 1;
3989 lck_mtx_unlock(&nfs_global_mutex);
3990 }
3991
3992 #endif
3993
3994 /*
3995 * Send a STAT protocol request to the server to verify statd is running.
3996 * rpc-statd service, which responsible to provide locks for the NFS server, is disabled by default on Ubuntu.
3997 * Please see Radar 45969553 for more info.
3998 */
3999 int
nfs3_check_lockmode(struct nfsmount * nmp,struct sockaddr * sa,int sotype,int timeo)4000 nfs3_check_lockmode(struct nfsmount *nmp, struct sockaddr *sa, int sotype, int timeo)
4001 {
4002 struct sockaddr_storage ss;
4003 int error, port = 0;
4004
4005 if (nmp->nm_lockmode == NFS_LOCK_MODE_ENABLED) {
4006 if (sa->sa_len > sizeof(ss)) {
4007 return EINVAL;
4008 }
4009 bcopy(sa, &ss, MIN(sa->sa_len, sizeof(ss)));
4010 error = nfs_portmap_lookup(nmp, vfs_context_current(), (struct sockaddr*)&ss, NULL, RPCPROG_STAT, RPCMNT_VER1, NM_OMFLAG(nmp, MNTUDP) ? SOCK_DGRAM : sotype, timeo);
4011 if (!error) {
4012 if (ss.ss_family == AF_INET) {
4013 port = ntohs(((struct sockaddr_in*)&ss)->sin_port);
4014 } else if (ss.ss_family == AF_INET6) {
4015 port = ntohs(((struct sockaddr_in6*)&ss)->sin6_port);
4016 } else if (ss.ss_family == AF_LOCAL) {
4017 port = (((struct sockaddr_un*)&ss)->sun_path[0] != '\0');
4018 }
4019
4020 if (!port) {
4021 printf("nfs: STAT(NSM) rpc service is not available, unable to mount with current lock mode.\n");
4022 return EPROGUNAVAIL;
4023 }
4024 }
4025 }
4026 return 0;
4027 }
4028
4029 /*
4030 * Send a MOUNT protocol MOUNT request to the server to get the initial file handle (and security).
4031 */
4032 int
nfs3_mount_rpc(struct nfsmount * nmp,struct sockaddr * sa,int sotype,int nfsvers,char * path,vfs_context_t ctx,int timeo,fhandle_t * fh,struct nfs_sec * sec)4033 nfs3_mount_rpc(struct nfsmount *nmp, struct sockaddr *sa, int sotype, int nfsvers, char *path, vfs_context_t ctx, int timeo, fhandle_t *fh, struct nfs_sec *sec)
4034 {
4035 int error = 0, mntproto;
4036 thread_t thd = vfs_context_thread(ctx);
4037 kauth_cred_t cred = vfs_context_ucred(ctx);
4038 uint64_t xid = 0;
4039 size_t slen;
4040 struct nfsm_chain nmreq, nmrep;
4041 mbuf_t mreq;
4042 uint32_t mntvers, mntport, val;
4043 struct sockaddr_storage ss;
4044 struct sockaddr *saddr = (struct sockaddr*)&ss;
4045 struct sockaddr_un *sun = (struct sockaddr_un*)saddr;
4046
4047 nfsm_chain_null(&nmreq);
4048 nfsm_chain_null(&nmrep);
4049
4050 mntvers = (nfsvers == NFS_VER2) ? RPCMNT_VER1 : RPCMNT_VER3;
4051 mntproto = (NM_OMFLAG(nmp, MNTUDP) || (sotype == SOCK_DGRAM)) ? IPPROTO_UDP : IPPROTO_TCP;
4052 sec->count = 0;
4053
4054 bcopy(sa, saddr, min(sizeof(ss), sa->sa_len));
4055 if (saddr->sa_family == AF_INET) {
4056 if (nmp->nm_mountport) {
4057 ((struct sockaddr_in*)saddr)->sin_port = htons(nmp->nm_mountport);
4058 }
4059 mntport = ntohs(((struct sockaddr_in*)saddr)->sin_port);
4060 } else if (saddr->sa_family == AF_INET6) {
4061 if (nmp->nm_mountport) {
4062 ((struct sockaddr_in6*)saddr)->sin6_port = htons(nmp->nm_mountport);
4063 }
4064 mntport = ntohs(((struct sockaddr_in6*)saddr)->sin6_port);
4065 } else { /* Local domain socket */
4066 mntport = ((struct sockaddr_un *)saddr)->sun_path[0]; /* Do we have and address? */
4067 mntproto = IPPROTO_TCP; /* XXX rpcbind only listens on streams sockets for now */
4068 }
4069
4070 while (!mntport) {
4071 error = nfs_portmap_lookup(nmp, ctx, saddr, NULL, RPCPROG_MNT, mntvers,
4072 mntproto == IPPROTO_UDP ? SOCK_DGRAM : SOCK_STREAM, timeo);
4073 nfsmout_if(error);
4074 if (saddr->sa_family == AF_INET) {
4075 mntport = ntohs(((struct sockaddr_in*)saddr)->sin_port);
4076 } else if (saddr->sa_family == AF_INET6) {
4077 mntport = ntohs(((struct sockaddr_in6*)saddr)->sin6_port);
4078 } else if (saddr->sa_family == AF_LOCAL) {
4079 mntport = ((struct sockaddr_un*)saddr)->sun_path[0];
4080 }
4081 if (!mntport) {
4082 /* if not found and TCP, then retry with UDP */
4083 if (mntproto == IPPROTO_UDP) {
4084 error = EPROGUNAVAIL;
4085 break;
4086 }
4087 mntproto = IPPROTO_UDP;
4088 bcopy(sa, saddr, min(sizeof(ss), sa->sa_len));
4089 if (saddr->sa_family == AF_LOCAL) {
4090 strlcpy(sun->sun_path, RPCB_TICLTS_PATH, sizeof(sun->sun_path));
4091 }
4092 }
4093 }
4094 nfsmout_if(error || !mntport);
4095
4096 /* MOUNT protocol MOUNT request */
4097 slen = strlen(path);
4098 nfsm_chain_build_alloc_init(error, &nmreq, NFSX_UNSIGNED + nfsm_rndup(slen));
4099 nfsm_chain_add_name(error, &nmreq, path, slen, nmp);
4100 nfsm_chain_build_done(error, &nmreq);
4101 nfsmout_if(error);
4102 error = nfsm_rpchead2(nmp, (mntproto == IPPROTO_UDP) ? SOCK_DGRAM : SOCK_STREAM,
4103 RPCPROG_MNT, mntvers, RPCMNT_MOUNT,
4104 RPCAUTH_SYS, cred, NULL, nmreq.nmc_mhead, &xid, &mreq);
4105 nfsmout_if(error);
4106 nmreq.nmc_mhead = NULL;
4107 error = nfs_aux_request(nmp, thd, saddr, NULL,
4108 ((mntproto == IPPROTO_UDP) ? SOCK_DGRAM : SOCK_STREAM),
4109 mreq, R_XID32(xid), 1, timeo, &nmrep);
4110 nfsmout_if(error);
4111 nfsm_chain_get_32(error, &nmrep, val);
4112 if (!error && val) {
4113 error = val;
4114 }
4115 nfsmout_if(error);
4116 nfsm_chain_get_fh(error, &nmrep, nfsvers, fh);
4117 if (!error && (nfsvers > NFS_VER2)) {
4118 sec->count = NX_MAX_SEC_FLAVORS;
4119 error = nfsm_chain_get_secinfo(&nmrep, &sec->flavors[0], &sec->count);
4120 }
4121 nfsmout:
4122 nfsm_chain_cleanup(&nmreq);
4123 nfsm_chain_cleanup(&nmrep);
4124 return error;
4125 }
4126
4127
4128 /*
4129 * Send a MOUNT protocol UNMOUNT request to tell the server we've unmounted it.
4130 */
4131 void
nfs3_umount_rpc(struct nfsmount * nmp,vfs_context_t ctx,int timeo)4132 nfs3_umount_rpc(struct nfsmount *nmp, vfs_context_t ctx, int timeo)
4133 {
4134 int error = 0, mntproto;
4135 thread_t thd = vfs_context_thread(ctx);
4136 kauth_cred_t cred = vfs_context_ucred(ctx);
4137 char *path;
4138 uint64_t xid = 0;
4139 size_t slen;
4140 struct nfsm_chain nmreq, nmrep;
4141 mbuf_t mreq;
4142 uint32_t mntvers;
4143 in_port_t mntport;
4144 struct sockaddr_storage ss;
4145 struct sockaddr *saddr = (struct sockaddr*)&ss;
4146
4147 if (!nmp->nm_saddr) {
4148 return;
4149 }
4150
4151 nfsm_chain_null(&nmreq);
4152 nfsm_chain_null(&nmrep);
4153
4154 mntvers = (nmp->nm_vers == NFS_VER2) ? RPCMNT_VER1 : RPCMNT_VER3;
4155 mntproto = (NM_OMFLAG(nmp, MNTUDP) || (nmp->nm_sotype == SOCK_DGRAM)) ? IPPROTO_UDP : IPPROTO_TCP;
4156 mntport = nmp->nm_mountport;
4157
4158 bcopy(nmp->nm_saddr, saddr, min(sizeof(ss), nmp->nm_saddr->sa_len));
4159 if (saddr->sa_family == AF_INET) {
4160 ((struct sockaddr_in*)saddr)->sin_port = htons(mntport);
4161 } else if (saddr->sa_family == AF_INET6) {
4162 ((struct sockaddr_in6*)saddr)->sin6_port = htons(mntport);
4163 } else { /* Local domain socket */
4164 mntport = ((struct sockaddr_un *)saddr)->sun_path[0]; /* Do we have and address? */
4165 }
4166
4167 while (!mntport) {
4168 error = nfs_portmap_lookup(nmp, ctx, saddr, NULL, RPCPROG_MNT, mntvers, mntproto, timeo);
4169 nfsmout_if(error);
4170 if (saddr->sa_family == AF_INET) {
4171 mntport = ntohs(((struct sockaddr_in*)saddr)->sin_port);
4172 } else if (saddr->sa_family == AF_INET6) {
4173 mntport = ntohs(((struct sockaddr_in6*)saddr)->sin6_port);
4174 } else { /* Local domain socket */
4175 mntport = ((struct sockaddr_un *)saddr)->sun_path[0]; /* Do we have and address? */
4176 }
4177 /* if not found and mntvers > VER1, then retry with VER1 */
4178 if (!mntport) {
4179 if (mntvers > RPCMNT_VER1) {
4180 mntvers = RPCMNT_VER1;
4181 } else if (mntproto == IPPROTO_TCP) {
4182 mntproto = IPPROTO_UDP;
4183 mntvers = (nmp->nm_vers == NFS_VER2) ? RPCMNT_VER1 : RPCMNT_VER3;
4184 } else {
4185 break;
4186 }
4187 bcopy(nmp->nm_saddr, saddr, min(sizeof(ss), nmp->nm_saddr->sa_len));
4188 }
4189 }
4190 nfsmout_if(!mntport);
4191
4192 /* MOUNT protocol UNMOUNT request */
4193 path = &vfs_statfs(nmp->nm_mountp)->f_mntfromname[0];
4194 while (*path && (*path != '/')) {
4195 path++;
4196 }
4197 slen = strlen(path);
4198 nfsm_chain_build_alloc_init(error, &nmreq, NFSX_UNSIGNED + nfsm_rndup(slen));
4199 nfsm_chain_add_name(error, &nmreq, path, slen, nmp);
4200 nfsm_chain_build_done(error, &nmreq);
4201 nfsmout_if(error);
4202 error = nfsm_rpchead2(nmp, (mntproto == IPPROTO_UDP) ? SOCK_DGRAM : SOCK_STREAM,
4203 RPCPROG_MNT, RPCMNT_VER1, RPCMNT_UMOUNT,
4204 RPCAUTH_SYS, cred, NULL, nmreq.nmc_mhead, &xid, &mreq);
4205 nfsmout_if(error);
4206 nmreq.nmc_mhead = NULL;
4207 error = nfs_aux_request(nmp, thd, saddr, NULL,
4208 ((mntproto == IPPROTO_UDP) ? SOCK_DGRAM : SOCK_STREAM),
4209 mreq, R_XID32(xid), 1, timeo, &nmrep);
4210 nfsmout:
4211 nfsm_chain_cleanup(&nmreq);
4212 nfsm_chain_cleanup(&nmrep);
4213 }
4214
4215 /*
4216 * unmount system call
4217 */
4218 int
nfs_vfs_unmount(mount_t mp,int mntflags,__unused vfs_context_t ctx)4219 nfs_vfs_unmount(
4220 mount_t mp,
4221 int mntflags,
4222 __unused vfs_context_t ctx)
4223 {
4224 struct nfsmount *nmp;
4225 vnode_t vp;
4226 int error, flags = 0, inuse = 1;
4227 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
4228
4229 nmp = VFSTONFS(mp);
4230 lck_mtx_lock(&nmp->nm_lock);
4231 /*
4232 * Set the flag indicating that an unmount attempt is in progress.
4233 */
4234 nmp->nm_state |= NFSSTA_UNMOUNTING;
4235 /*
4236 * During a force unmount we want to...
4237 * Mark that we are doing a force unmount.
4238 * Make the mountpoint soft.
4239 */
4240 if (mntflags & MNT_FORCE) {
4241 flags |= FORCECLOSE;
4242 nmp->nm_state |= NFSSTA_FORCE;
4243 NFS_BITMAP_SET(nmp->nm_flags, NFS_MFLAG_SOFT);
4244 }
4245 /*
4246 * Wait for any in-progress monitored node scan to complete.
4247 */
4248 while (nmp->nm_state & NFSSTA_MONITOR_SCAN) {
4249 msleep(&nmp->nm_state, &nmp->nm_lock, PZERO - 1, "nfswaitmonscan", &ts);
4250 }
4251 /*
4252 * Goes something like this..
4253 * - Call vflush() to clear out vnodes for this file system,
4254 * except for the swap files. Deal with them in 2nd pass.
4255 * - Decrement reference on the vnode representing remote root.
4256 * - Clean up the NFS mount structure.
4257 */
4258 vp = NFSTOV(nmp->nm_dnp);
4259 lck_mtx_unlock(&nmp->nm_lock);
4260
4261 /*
4262 * vflush will check for busy vnodes on mountpoint.
4263 * Will do the right thing for MNT_FORCE. That is, we should
4264 * not get EBUSY back.
4265 */
4266 error = vflush(mp, vp, SKIPSWAP | flags);
4267 if (mntflags & MNT_FORCE) {
4268 error = vflush(mp, NULLVP, flags); /* locks vp in the process */
4269 } else {
4270 if ((nmp->nm_state & NFSSTA_TIMEO) && vfs_isunmount(mp)) {
4271 if (vnode_isinuse(vp, 1)) {
4272 nfs_request_timer(nmp, NULL);
4273 IOSleep(100);
4274 } else {
4275 inuse = 0;
4276 }
4277 }
4278 if (inuse && vnode_isinuse(vp, 1)) {
4279 error = EBUSY;
4280 } else {
4281 error = vflush(mp, vp, flags);
4282 }
4283 }
4284 if (error) {
4285 lck_mtx_lock(&nmp->nm_lock);
4286 nmp->nm_state &= ~NFSSTA_UNMOUNTING;
4287 lck_mtx_unlock(&nmp->nm_lock);
4288 return NFS_MAPERR(error);
4289 }
4290
4291 lck_mtx_lock(&nmp->nm_lock);
4292 nmp->nm_dnp = NULL;
4293 lck_mtx_unlock(&nmp->nm_lock);
4294
4295 /*
4296 * Release the root vnode reference held by mountnfs()
4297 */
4298 error = vnode_get(vp);
4299 vnode_rele(vp);
4300 if (!error) {
4301 vnode_put(vp);
4302 }
4303
4304 vflush(mp, NULLVP, FORCECLOSE);
4305
4306 /* Wait for all other references to be released and free the mount */
4307 nfs_mount_drain_and_cleanup(nmp);
4308
4309 return 0;
4310 }
4311
4312 /*
4313 * cleanup/destroy NFS fs locations structure
4314 */
4315 void
nfs_fs_locations_cleanup(struct nfs_fs_locations * nfslsp)4316 nfs_fs_locations_cleanup(struct nfs_fs_locations *nfslsp)
4317 {
4318 struct nfs_fs_location *fsl;
4319 struct nfs_fs_server *fss;
4320 uint32_t loc, serv, addr;
4321
4322 /* free up fs locations */
4323 if (!nfslsp->nl_numlocs || !nfslsp->nl_locations) {
4324 return;
4325 }
4326
4327 for (loc = 0; loc < nfslsp->nl_numlocs; loc++) {
4328 fsl = nfslsp->nl_locations[loc];
4329 if (!fsl) {
4330 continue;
4331 }
4332 if ((fsl->nl_servcount > 0) && fsl->nl_servers) {
4333 for (serv = 0; serv < fsl->nl_servcount; serv++) {
4334 fss = fsl->nl_servers[serv];
4335 if (!fss) {
4336 continue;
4337 }
4338 if ((fss->ns_addrcount > 0) && fss->ns_addresses) {
4339 for (addr = 0; addr < fss->ns_addrcount; addr++) {
4340 kfree_data_addr(fss->ns_addresses[addr]);
4341 }
4342 kfree_type(char *, fss->ns_addrcount,
4343 fss->ns_addresses);
4344 }
4345 kfree_data_addr(fss->ns_name);
4346 kfree_type(struct nfs_fs_server, fss);
4347 }
4348 kfree_type(struct nfs_fs_server *, fsl->nl_servcount, fsl->nl_servers);
4349 }
4350 nfs_fs_path_destroy(&fsl->nl_path);
4351 kfree_type(struct nfs_fs_location, fsl);
4352 }
4353 kfree_type(struct nfs_fs_location *, nfslsp->nl_numlocs, nfslsp->nl_locations);
4354 nfslsp->nl_numlocs = 0;
4355 nfslsp->nl_locations = NULL;
4356 }
4357
4358 void
nfs_mount_rele(struct nfsmount * nmp)4359 nfs_mount_rele(struct nfsmount *nmp)
4360 {
4361 int wup = 0;
4362
4363 lck_mtx_lock(&nmp->nm_lock);
4364 if (nmp->nm_ref < 1) {
4365 panic("nfs zombie mount underflow");
4366 }
4367 nmp->nm_ref--;
4368 if (nmp->nm_ref == 0) {
4369 wup = nmp->nm_state & NFSSTA_MOUNT_DRAIN;
4370 }
4371 lck_mtx_unlock(&nmp->nm_lock);
4372 if (wup) {
4373 wakeup(&nmp->nm_ref);
4374 }
4375 }
4376
4377 void
nfs_mount_drain_and_cleanup(struct nfsmount * nmp)4378 nfs_mount_drain_and_cleanup(struct nfsmount *nmp)
4379 {
4380 lck_mtx_lock(&nmp->nm_lock);
4381 nmp->nm_state |= NFSSTA_MOUNT_DRAIN;
4382 while (nmp->nm_ref > 0) {
4383 msleep(&nmp->nm_ref, &nmp->nm_lock, PZERO - 1, "nfs_mount_drain", NULL);
4384 }
4385 assert(nmp->nm_ref == 0);
4386 lck_mtx_unlock(&nmp->nm_lock);
4387 nfs_mount_cleanup(nmp);
4388 }
4389
4390 /*
4391 * nfs_mount_zombie
4392 */
4393 void
nfs_mount_zombie(struct nfsmount * nmp,int nm_state_flags)4394 nfs_mount_zombie(struct nfsmount *nmp, int nm_state_flags)
4395 {
4396 struct nfsreq *req, *treq;
4397 struct nfs_reqqhead iodq, resendq;
4398 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
4399 struct nfs_open_owner *noop, *nextnoop;
4400 nfsnode_t np;
4401 int docallback;
4402
4403 lck_mtx_lock(&nmp->nm_lock);
4404 nmp->nm_state |= nm_state_flags;
4405 nmp->nm_ref++;
4406 lck_mtx_unlock(&nmp->nm_lock);
4407 #if CONFIG_NFS4
4408 /* stop callbacks */
4409 if ((nmp->nm_vers >= NFS_VER4) && !NMFLAG(nmp, NOCALLBACK) && nmp->nm_cbid) {
4410 nfs4_mount_callback_shutdown(nmp);
4411 }
4412 #endif
4413 #if CONFIG_NFS_GSS
4414 /* Destroy any RPCSEC_GSS contexts */
4415 nfs_gss_clnt_ctx_unmount(nmp);
4416 #endif
4417
4418 /* mark the socket for termination */
4419 lck_mtx_lock(&nmp->nm_lock);
4420 nmp->nm_sockflags |= NMSOCK_UNMOUNT;
4421
4422 /* Have the socket thread send the unmount RPC, if requested/appropriate. */
4423 if ((nmp->nm_vers < NFS_VER4) && (nmp->nm_state & NFSSTA_MOUNTED) &&
4424 !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) && NMFLAG(nmp, CALLUMNT)) {
4425 nfs_mount_sock_thread_wake(nmp);
4426 }
4427
4428 /* wait for the socket thread to terminate */
4429 while (nmp->nm_sockthd && current_thread() != nmp->nm_sockthd) {
4430 wakeup(&nmp->nm_sockthd);
4431 msleep(&nmp->nm_sockthd, &nmp->nm_lock, PZERO - 1, "nfswaitsockthd", &ts);
4432 }
4433 lck_mtx_unlock(&nmp->nm_lock);
4434
4435 /* tear down the socket */
4436 nfs_disconnect(nmp);
4437
4438 lck_mtx_lock(&nmp->nm_lock);
4439
4440 #if CONFIG_NFS4
4441 if ((nmp->nm_vers >= NFS_VER4) && !NMFLAG(nmp, NOCALLBACK) && nmp->nm_cbid) {
4442 /* clear out any pending delegation return requests */
4443 while ((np = TAILQ_FIRST(&nmp->nm_dreturnq))) {
4444 TAILQ_REMOVE(&nmp->nm_dreturnq, np, n_dreturn);
4445 np->n_dreturn.tqe_next = NFSNOLIST;
4446 }
4447 }
4448
4449 /* cancel any renew timer */
4450 if ((nmp->nm_vers >= NFS_VER4) && nmp->nm_renew_timer) {
4451 thread_call_cancel(nmp->nm_renew_timer);
4452 thread_call_free(nmp->nm_renew_timer);
4453 nmp->nm_renew_timer = NULL;
4454 }
4455
4456 #endif
4457 lck_mtx_unlock(&nmp->nm_lock);
4458
4459 if (nmp->nm_state & NFSSTA_MOUNTED) {
4460 switch (nmp->nm_lockmode) {
4461 case NFS_LOCK_MODE_DISABLED:
4462 case NFS_LOCK_MODE_LOCAL:
4463 break;
4464 case NFS_LOCK_MODE_ENABLED:
4465 default:
4466 if (nmp->nm_vers <= NFS_VER3) {
4467 nfs_lockd_mount_unregister(nmp);
4468 nmp->nm_lockmode = NFS_LOCK_MODE_DISABLED;
4469 }
4470 break;
4471 }
4472 }
4473
4474 #if CONFIG_NFS4
4475 if ((nmp->nm_vers >= NFS_VER4) && nmp->nm_longid) {
4476 /* remove/deallocate the client ID data */
4477 lck_mtx_lock(&nfs_global_mutex);
4478 TAILQ_REMOVE(&nfsclientids, nmp->nm_longid, nci_link);
4479 if (nmp->nm_longid->nci_id) {
4480 kfree_data_addr(nmp->nm_longid->nci_id);
4481 }
4482 kfree_type(struct nfs_client_id, nmp->nm_longid);
4483 lck_mtx_unlock(&nfs_global_mutex);
4484 }
4485 #endif
4486 /*
4487 * Be sure all requests for this mount are completed
4488 * and removed from the resend queue.
4489 */
4490 TAILQ_INIT(&resendq);
4491 lck_mtx_lock(&nfs_request_mutex);
4492 TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
4493 if (req->r_nmp == nmp) {
4494 lck_mtx_lock(&req->r_mtx);
4495 if (!req->r_error && req->r_nmrep.nmc_mhead == NULL) {
4496 req->r_error = EIO;
4497 }
4498 if (req->r_flags & R_RESENDQ) {
4499 lck_mtx_lock(&nmp->nm_lock);
4500 if ((req->r_flags & R_RESENDQ) && req->r_rchain.tqe_next != NFSREQNOLIST) {
4501 TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain);
4502 req->r_flags &= ~R_RESENDQ;
4503 req->r_rchain.tqe_next = NFSREQNOLIST;
4504 /*
4505 * Queue up the request so that we can unreference them
4506 * with out holding nfs_request_mutex
4507 */
4508 TAILQ_INSERT_TAIL(&resendq, req, r_rchain);
4509 }
4510 lck_mtx_unlock(&nmp->nm_lock);
4511 }
4512 wakeup(req);
4513 lck_mtx_unlock(&req->r_mtx);
4514 }
4515 }
4516 lck_mtx_unlock(&nfs_request_mutex);
4517
4518 /* Since we've drop the request mutex we can now safely unreference the request */
4519 TAILQ_FOREACH_SAFE(req, &resendq, r_rchain, treq) {
4520 TAILQ_REMOVE(&resendq, req, r_rchain);
4521 /* Make sure we don't try and remove again in nfs_request_destroy */
4522 req->r_rchain.tqe_next = NFSREQNOLIST;
4523 nfs_request_rele(req);
4524 }
4525
4526 /*
4527 * Now handle and outstanding async requests. We need to walk the
4528 * request queue again this time with the nfsiod_mutex held. No
4529 * other iods can grab our requests until we've put them on our own
4530 * local iod queue for processing.
4531 */
4532 TAILQ_INIT(&iodq);
4533 lck_mtx_lock(&nfs_request_mutex);
4534 lck_mtx_lock(&nfsiod_mutex);
4535 TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
4536 if (req->r_nmp == nmp) {
4537 lck_mtx_lock(&req->r_mtx);
4538 if (req->r_callback.rcb_func
4539 && !(req->r_flags & R_WAITSENT) && !(req->r_flags & R_IOD)) {
4540 /*
4541 * Since R_IOD is not set then we need to handle it. If
4542 * we're not on a list add it to our iod queue. Otherwise
4543 * we must already be on nm_iodq which is added to our
4544 * local queue below.
4545 * %%% We should really keep a back pointer to our iod queue
4546 * that we're on.
4547 */
4548 req->r_flags |= R_IOD;
4549 if (req->r_achain.tqe_next == NFSREQNOLIST) {
4550 TAILQ_INSERT_TAIL(&iodq, req, r_achain);
4551 }
4552 }
4553 lck_mtx_unlock(&req->r_mtx);
4554 }
4555 }
4556
4557 /* finish any async I/O RPCs queued up */
4558 if (nmp->nm_iodlink.tqe_next != NFSNOLIST) {
4559 TAILQ_REMOVE(&nfsiodmounts, nmp, nm_iodlink);
4560 }
4561 TAILQ_CONCAT(&iodq, &nmp->nm_iodq, r_achain);
4562 lck_mtx_unlock(&nfsiod_mutex);
4563 lck_mtx_unlock(&nfs_request_mutex);
4564
4565 TAILQ_FOREACH_SAFE(req, &iodq, r_achain, treq) {
4566 TAILQ_REMOVE(&iodq, req, r_achain);
4567 req->r_achain.tqe_next = NFSREQNOLIST;
4568 lck_mtx_lock(&req->r_mtx);
4569 docallback = !(req->r_flags & R_WAITSENT);
4570 lck_mtx_unlock(&req->r_mtx);
4571 if (docallback) {
4572 req->r_callback.rcb_func(req);
4573 }
4574 }
4575
4576 /* clean up common state */
4577 lck_mtx_lock(&nmp->nm_lock);
4578 while ((np = LIST_FIRST(&nmp->nm_monlist))) {
4579 LIST_REMOVE(np, n_monlink);
4580 np->n_monlink.le_next = NFSNOLIST;
4581 }
4582 TAILQ_FOREACH_SAFE(noop, &nmp->nm_open_owners, noo_link, nextnoop) {
4583 os_ref_count_t newcount;
4584
4585 TAILQ_REMOVE(&nmp->nm_open_owners, noop, noo_link);
4586 noop->noo_flags &= ~NFS_OPEN_OWNER_LINK;
4587 newcount = os_ref_release_locked(&noop->noo_refcnt);
4588
4589 if (newcount) {
4590 continue;
4591 }
4592 nfs_open_owner_destroy(noop);
4593 }
4594 lck_mtx_unlock(&nmp->nm_lock);
4595
4596 #if CONFIG_NFS4
4597 /* clean up NFSv4 state */
4598 if (nmp->nm_vers >= NFS_VER4) {
4599 lck_mtx_lock(&nmp->nm_lock);
4600 while ((np = TAILQ_FIRST(&nmp->nm_delegations))) {
4601 TAILQ_REMOVE(&nmp->nm_delegations, np, n_dlink);
4602 np->n_dlink.tqe_next = NFSNOLIST;
4603 }
4604 lck_mtx_unlock(&nmp->nm_lock);
4605 }
4606 #endif
4607 nfs_mount_rele(nmp);
4608 }
4609
4610 /*
4611 * cleanup/destroy an nfsmount
4612 */
4613 void
nfs_mount_cleanup(struct nfsmount * nmp)4614 nfs_mount_cleanup(struct nfsmount *nmp)
4615 {
4616 if (!nmp) {
4617 return;
4618 }
4619
4620 nfs_mount_zombie(nmp, 0);
4621
4622 NFS_VFS_DBG("Unmounting %s from %s\n",
4623 vfs_statfs(nmp->nm_mountp)->f_mntfromname,
4624 vfs_statfs(nmp->nm_mountp)->f_mntonname);
4625 NFS_VFS_DBG("nfs state = 0x%8.8x\n", nmp->nm_state);
4626 NFS_VFS_DBG("nfs socket flags = 0x%8.8x\n", nmp->nm_sockflags);
4627 NFS_VFS_DBG("nfs mount ref count is %d\n", nmp->nm_ref);
4628
4629 if (nmp->nm_mountp) {
4630 vfs_setfsprivate(nmp->nm_mountp, NULL);
4631 }
4632
4633 lck_mtx_lock(&nmp->nm_lock);
4634 if (nmp->nm_ref) {
4635 panic("Some one has grabbed a ref %d state flags = 0x%8.8x", nmp->nm_ref, nmp->nm_state);
4636 }
4637
4638 free_sockaddr(nmp->nm_saddr);
4639
4640 if ((nmp->nm_vers < NFS_VER4) && nmp->nm_rqsaddr) {
4641 struct sockaddr_storage **nm_rqsaddr_ptr =
4642 (struct sockaddr_storage **)&nmp->nm_rqsaddr;
4643 kfree_type(struct sockaddr_storage, *nm_rqsaddr_ptr);
4644 }
4645
4646 if (IS_VALID_CRED(nmp->nm_mcred)) {
4647 kauth_cred_unref(&nmp->nm_mcred);
4648 }
4649
4650 nfs_fs_locations_cleanup(&nmp->nm_locations);
4651
4652 if (nmp->nm_realm) {
4653 kfree_data_addr(nmp->nm_realm);
4654 }
4655 if (nmp->nm_principal) {
4656 kfree_data_addr(nmp->nm_principal);
4657 }
4658 if (nmp->nm_sprinc) {
4659 kfree_data_addr(nmp->nm_sprinc);
4660 }
4661
4662 if (nmp->nm_args) {
4663 xb_free(nmp->nm_args);
4664 }
4665 if (nmp->nm_nfs_localport) {
4666 kfree_data_addr(nmp->nm_nfs_localport);
4667 }
4668 if (nmp->nm_mount_localport) {
4669 kfree_data_addr(nmp->nm_mount_localport);
4670 }
4671
4672 lck_mtx_unlock(&nmp->nm_lock);
4673
4674 lck_mtx_destroy(&nmp->nm_lock, &nfs_mount_grp);
4675 if (nmp->nm_fh) {
4676 NFS_ZFREE(nfs_fhandle_zone, nmp->nm_fh);
4677 }
4678
4679 NFS_ZFREE(nfsmnt_zone, nmp);
4680 }
4681
4682 /*
4683 * Return root of a filesystem
4684 */
4685 int
nfs_vfs_root(mount_t mp,vnode_t * vpp,__unused vfs_context_t ctx)4686 nfs_vfs_root(mount_t mp, vnode_t *vpp, __unused vfs_context_t ctx)
4687 {
4688 vnode_t vp;
4689 struct nfsmount *nmp;
4690 int error;
4691 u_int32_t vpid;
4692
4693 nmp = VFSTONFS(mp);
4694 if (!nmp || !nmp->nm_dnp) {
4695 return ENXIO;
4696 }
4697 vp = NFSTOV(nmp->nm_dnp);
4698 vpid = vnode_vid(vp);
4699 while ((error = vnode_getwithvid(vp, vpid))) {
4700 /* vnode_get() may return ENOENT if the dir changes. */
4701 /* If that happens, just try it again, else return the error. */
4702 if ((error != ENOENT) || (vnode_vid(vp) == vpid)) {
4703 return NFS_MAPERR(error);
4704 }
4705 vpid = vnode_vid(vp);
4706 }
4707 *vpp = vp;
4708 return 0;
4709 }
4710
4711 /*
4712 * Do operations associated with quotas
4713 */
4714 #if !QUOTA
4715 int
nfs_vfs_quotactl(__unused mount_t mp,__unused int cmds,__unused uid_t uid,__unused caddr_t datap,__unused vfs_context_t context)4716 nfs_vfs_quotactl(
4717 __unused mount_t mp,
4718 __unused int cmds,
4719 __unused uid_t uid,
4720 __unused caddr_t datap,
4721 __unused vfs_context_t context)
4722 {
4723 return ENOTSUP;
4724 }
4725 #else
4726
4727 static in_port_t
nfs_sa_getport(struct sockaddr * sa,int * error)4728 nfs_sa_getport(struct sockaddr *sa, int *error)
4729 {
4730 in_port_t port = 0;
4731
4732 if (sa->sa_family == AF_INET6) {
4733 port = ntohs(((struct sockaddr_in6*)sa)->sin6_port);
4734 } else if (sa->sa_family == AF_INET) {
4735 port = ntohs(((struct sockaddr_in*)sa)->sin_port);
4736 } else if (error) {
4737 *error = EIO;
4738 }
4739
4740 return port;
4741 }
4742
4743 static void
nfs_sa_setport(struct sockaddr * sa,in_port_t port)4744 nfs_sa_setport(struct sockaddr *sa, in_port_t port)
4745 {
4746 if (sa->sa_family == AF_INET6) {
4747 ((struct sockaddr_in6*)sa)->sin6_port = htons(port);
4748 } else if (sa->sa_family == AF_INET) {
4749 ((struct sockaddr_in*)sa)->sin_port = htons(port);
4750 }
4751 }
4752
4753 int
nfs3_getquota(struct nfsmount * nmp,vfs_context_t ctx,uid_t id,int type,struct dqblk * dqb)4754 nfs3_getquota(struct nfsmount *nmp, vfs_context_t ctx, uid_t id, int type, struct dqblk *dqb)
4755 {
4756 int error = 0, timeo;
4757 int rqproto, rqvers = (type == GRPQUOTA) ? RPCRQUOTA_EXT_VER : RPCRQUOTA_VER;
4758 in_port_t rqport = 0;
4759 thread_t thd = vfs_context_thread(ctx);
4760 kauth_cred_t cred = vfs_context_ucred(ctx);
4761 char *path;
4762 uint64_t slen, xid = 0;
4763 struct nfsm_chain nmreq, nmrep;
4764 mbuf_t mreq;
4765 uint32_t val = 0, bsize = 0;
4766 struct sockaddr *rqsaddr;
4767 struct timeval now;
4768 struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
4769
4770 if (!nmp->nm_saddr) {
4771 return ENXIO;
4772 }
4773
4774 if (NMFLAG(nmp, NOQUOTA) || nmp->nm_saddr->sa_family == AF_LOCAL /* XXX for now */) {
4775 return ENOTSUP;
4776 }
4777
4778 /*
4779 * Allocate an address for rquotad if needed
4780 */
4781 if (!nmp->nm_rqsaddr) {
4782 int need_free = 0;
4783
4784 rqsaddr = (struct sockaddr *)kalloc_type(struct sockaddr_storage, Z_WAITOK | Z_ZERO);
4785 bcopy(nmp->nm_saddr, rqsaddr, min(sizeof(struct sockaddr_storage), nmp->nm_saddr->sa_len));
4786 /* Set the port to zero, will call rpcbind to get the port below */
4787 nfs_sa_setport(rqsaddr, 0);
4788 microuptime(&now);
4789
4790 lck_mtx_lock(&nmp->nm_lock);
4791 if (!nmp->nm_rqsaddr) {
4792 nmp->nm_rqsaddr = rqsaddr;
4793 nmp->nm_rqsaddrstamp = now.tv_sec;
4794 } else {
4795 need_free = 1;
4796 }
4797 lck_mtx_unlock(&nmp->nm_lock);
4798 if (need_free) {
4799 struct sockaddr_storage *rqsaddr_storage =
4800 (struct sockaddr_storage *)rqsaddr;
4801 kfree_type(struct sockaddr_storage, rqsaddr_storage);
4802 }
4803 }
4804
4805 timeo = NMFLAG(nmp, SOFT) ? 10 : 60;
4806 rqproto = IPPROTO_UDP; /* XXX should prefer TCP if mount is TCP */
4807
4808 /* check if we have a recently cached rquota port */
4809 microuptime(&now);
4810 lck_mtx_lock(&nmp->nm_lock);
4811 rqsaddr = nmp->nm_rqsaddr;
4812 rqport = nfs_sa_getport(rqsaddr, &error);
4813 while (!error && (!rqport || ((nmp->nm_rqsaddrstamp + 60) <= (uint32_t)now.tv_sec))) {
4814 error = nfs_sigintr(nmp, NULL, thd, 1);
4815 if (error) {
4816 lck_mtx_unlock(&nmp->nm_lock);
4817 return error;
4818 }
4819 if (nmp->nm_state & NFSSTA_RQUOTAINPROG) {
4820 nmp->nm_state |= NFSSTA_WANTRQUOTA;
4821 msleep(&nmp->nm_rqsaddr, &nmp->nm_lock, PZERO - 1, "nfswaitrquotaaddr", &ts);
4822 rqport = nfs_sa_getport(rqsaddr, &error);
4823 continue;
4824 }
4825 nmp->nm_state |= NFSSTA_RQUOTAINPROG;
4826 lck_mtx_unlock(&nmp->nm_lock);
4827
4828 /* send portmap request to get rquota port */
4829 error = nfs_portmap_lookup(nmp, ctx, rqsaddr, NULL, RPCPROG_RQUOTA, rqvers, rqproto, timeo);
4830 if (error) {
4831 goto out;
4832 }
4833 rqport = nfs_sa_getport(rqsaddr, &error);
4834 if (error) {
4835 goto out;
4836 }
4837
4838 if (!rqport) {
4839 /*
4840 * We overload PMAPPORT for the port if rquotad is not
4841 * currently registered or up at the server. In the
4842 * while loop above, port will be set and we will defer
4843 * for a bit. Perhaps the service isn't online yet.
4844 *
4845 * Note that precludes using indirect, but we're not doing
4846 * that here.
4847 */
4848 rqport = PMAPPORT;
4849 nfs_sa_setport(rqsaddr, rqport);
4850 }
4851 microuptime(&now);
4852 nmp->nm_rqsaddrstamp = now.tv_sec;
4853 out:
4854 lck_mtx_lock(&nmp->nm_lock);
4855 nmp->nm_state &= ~NFSSTA_RQUOTAINPROG;
4856 if (nmp->nm_state & NFSSTA_WANTRQUOTA) {
4857 nmp->nm_state &= ~NFSSTA_WANTRQUOTA;
4858 wakeup(&nmp->nm_rqsaddr);
4859 }
4860 }
4861 lck_mtx_unlock(&nmp->nm_lock);
4862 if (error) {
4863 return error;
4864 }
4865
4866 /* Using PMAPPORT for unavailabe rquota service */
4867 if (rqport == PMAPPORT) {
4868 return ENOTSUP;
4869 }
4870
4871 /* rquota request */
4872 nfsm_chain_null(&nmreq);
4873 nfsm_chain_null(&nmrep);
4874 path = &vfs_statfs(nmp->nm_mountp)->f_mntfromname[0];
4875 while (*path && (*path != '/')) {
4876 path++;
4877 }
4878 slen = strlen(path);
4879 nfsm_chain_build_alloc_init(error, &nmreq, 3 * NFSX_UNSIGNED + nfsm_rndup(slen));
4880 nfsm_chain_add_name(error, &nmreq, path, slen, nmp);
4881 if (type == GRPQUOTA) {
4882 nfsm_chain_add_32(error, &nmreq, type);
4883 }
4884 nfsm_chain_add_32(error, &nmreq, id);
4885 nfsm_chain_build_done(error, &nmreq);
4886 nfsmout_if(error);
4887 error = nfsm_rpchead2(nmp, (rqproto == IPPROTO_UDP) ? SOCK_DGRAM : SOCK_STREAM,
4888 RPCPROG_RQUOTA, rqvers, RPCRQUOTA_GET,
4889 RPCAUTH_SYS, cred, NULL, nmreq.nmc_mhead, &xid, &mreq);
4890 nfsmout_if(error);
4891 nmreq.nmc_mhead = NULL;
4892 error = nfs_aux_request(nmp, thd, rqsaddr, NULL,
4893 (rqproto == IPPROTO_UDP) ? SOCK_DGRAM : SOCK_STREAM,
4894 mreq, R_XID32(xid), 0, timeo, &nmrep);
4895 nfsmout_if(error);
4896
4897 /* parse rquota response */
4898 nfsm_chain_get_32(error, &nmrep, val);
4899 if (!error && (val != RQUOTA_STAT_OK)) {
4900 if (val == RQUOTA_STAT_NOQUOTA) {
4901 error = ENOENT;
4902 } else if (val == RQUOTA_STAT_EPERM) {
4903 error = EPERM;
4904 } else {
4905 error = EIO;
4906 }
4907 }
4908 nfsm_chain_get_32(error, &nmrep, bsize);
4909 nfsm_chain_adv(error, &nmrep, NFSX_UNSIGNED);
4910 nfsm_chain_get_32(error, &nmrep, val);
4911 nfsmout_if(error);
4912 dqb->dqb_bhardlimit = (uint64_t)val * bsize;
4913 nfsm_chain_get_32(error, &nmrep, val);
4914 nfsmout_if(error);
4915 dqb->dqb_bsoftlimit = (uint64_t)val * bsize;
4916 nfsm_chain_get_32(error, &nmrep, val);
4917 nfsmout_if(error);
4918 dqb->dqb_curbytes = (uint64_t)val * bsize;
4919 nfsm_chain_get_32(error, &nmrep, dqb->dqb_ihardlimit);
4920 nfsm_chain_get_32(error, &nmrep, dqb->dqb_isoftlimit);
4921 nfsm_chain_get_32(error, &nmrep, dqb->dqb_curinodes);
4922 nfsm_chain_get_32(error, &nmrep, dqb->dqb_btime);
4923 nfsm_chain_get_32(error, &nmrep, dqb->dqb_itime);
4924 nfsmout_if(error);
4925 dqb->dqb_id = id;
4926 nfsmout:
4927 nfsm_chain_cleanup(&nmreq);
4928 nfsm_chain_cleanup(&nmrep);
4929 return error;
4930 }
4931 #if CONFIG_NFS4
4932 int
nfs4_getquota(struct nfsmount * nmp,vfs_context_t ctx,uid_t id,int type,struct dqblk * dqb)4933 nfs4_getquota(struct nfsmount *nmp, vfs_context_t ctx, uid_t id, int type, struct dqblk *dqb)
4934 {
4935 nfsnode_t np;
4936 int error = 0, status, nfsvers, numops;
4937 u_int64_t xid;
4938 struct nfsm_chain nmreq, nmrep;
4939 uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
4940 thread_t thd = vfs_context_thread(ctx);
4941 kauth_cred_t cred = vfs_context_ucred(ctx);
4942 struct nfsreq_secinfo_args si;
4943
4944 if (type != USRQUOTA) { /* NFSv4 only supports user quotas */
4945 return ENOTSUP;
4946 }
4947
4948 /* first check that the server supports any of the quota attributes */
4949 if (!NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_supp_attr, NFS_FATTR_QUOTA_AVAIL_HARD) &&
4950 !NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_supp_attr, NFS_FATTR_QUOTA_AVAIL_SOFT) &&
4951 !NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_supp_attr, NFS_FATTR_QUOTA_USED)) {
4952 return ENOTSUP;
4953 }
4954
4955 /*
4956 * The credential passed to the server needs to have
4957 * an effective uid that matches the given uid.
4958 */
4959 if (id != kauth_cred_getuid(cred)) {
4960 struct posix_cred temp_pcred;
4961 posix_cred_t pcred = posix_cred_get(cred);
4962 bzero(&temp_pcred, sizeof(temp_pcred));
4963 temp_pcred.cr_uid = id;
4964 temp_pcred.cr_ngroups = pcred->cr_ngroups;
4965 bcopy(pcred->cr_groups, temp_pcred.cr_groups, sizeof(temp_pcred.cr_groups));
4966 cred = posix_cred_create(&temp_pcred);
4967 if (!IS_VALID_CRED(cred)) {
4968 return ENOMEM;
4969 }
4970 } else {
4971 kauth_cred_ref(cred);
4972 }
4973
4974 nfsvers = nmp->nm_vers;
4975 np = nmp->nm_dnp;
4976 if (!np) {
4977 error = ENXIO;
4978 }
4979 if (error || ((error = vnode_get(NFSTOV(np))))) {
4980 kauth_cred_unref(&cred);
4981 return error;
4982 }
4983
4984 NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
4985 nfsm_chain_null(&nmreq);
4986 nfsm_chain_null(&nmrep);
4987
4988 // PUTFH + GETATTR
4989 numops = 2;
4990 nfsm_chain_build_alloc_init(error, &nmreq, 15 * NFSX_UNSIGNED);
4991 nfsm_chain_add_compound_header(error, &nmreq, "quota", nmp->nm_minor_vers, numops);
4992 numops--;
4993 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_PUTFH);
4994 nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
4995 numops--;
4996 nfsm_chain_add_v4_op(error, &nmreq, NFS_OP_GETATTR);
4997 NFS_CLEAR_ATTRIBUTES(bitmap);
4998 NFS_BITMAP_SET(bitmap, NFS_FATTR_QUOTA_AVAIL_HARD);
4999 NFS_BITMAP_SET(bitmap, NFS_FATTR_QUOTA_AVAIL_SOFT);
5000 NFS_BITMAP_SET(bitmap, NFS_FATTR_QUOTA_USED);
5001 nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, NULL);
5002 nfsm_chain_build_done(error, &nmreq);
5003 nfsm_assert(error, (numops == 0), EPROTO);
5004 nfsmout_if(error);
5005 error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, &nmrep, &xid, &status);
5006 nfsm_chain_skip_tag(error, &nmrep);
5007 nfsm_chain_get_32(error, &nmrep, numops);
5008 nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
5009 nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
5010 nfsm_assert(error, NFSTONMP(np), ENXIO);
5011 nfsmout_if(error);
5012 error = nfs4_parsefattr(&nmrep, NULL, NULL, NULL, dqb, NULL);
5013 nfsmout_if(error);
5014 nfsm_assert(error, NFSTONMP(np), ENXIO);
5015 nfsmout:
5016 nfsm_chain_cleanup(&nmreq);
5017 nfsm_chain_cleanup(&nmrep);
5018 vnode_put(NFSTOV(np));
5019 kauth_cred_unref(&cred);
5020 return error;
5021 }
5022 #endif /* CONFIG_NFS4 */
5023 int
nfs_vfs_quotactl(mount_t mp,int cmds,uid_t uid,caddr_t datap,vfs_context_t ctx)5024 nfs_vfs_quotactl(mount_t mp, int cmds, uid_t uid, caddr_t datap, vfs_context_t ctx)
5025 {
5026 struct nfsmount *nmp;
5027 int cmd, type, error, nfsvers;
5028 uid_t euid = kauth_cred_getuid(vfs_context_ucred(ctx));
5029 struct dqblk *dqb = (struct dqblk*)datap;
5030
5031 nmp = VFSTONFS(mp);
5032 if (nfs_mount_gone(nmp)) {
5033 return ENXIO;
5034 }
5035 nfsvers = nmp->nm_vers;
5036
5037 if (uid == ~0U) {
5038 uid = euid;
5039 }
5040
5041 /* we can only support Q_GETQUOTA */
5042 cmd = cmds >> SUBCMDSHIFT;
5043 switch (cmd) {
5044 case Q_GETQUOTA:
5045 break;
5046 case Q_QUOTAON:
5047 case Q_QUOTAOFF:
5048 case Q_SETQUOTA:
5049 case Q_SETUSE:
5050 case Q_SYNC:
5051 case Q_QUOTASTAT:
5052 return ENOTSUP;
5053 default:
5054 return EINVAL;
5055 }
5056
5057 type = cmds & SUBCMDMASK;
5058 if ((u_int)type >= MAXQUOTAS) {
5059 return EINVAL;
5060 }
5061 if ((uid != euid) && ((error = vfs_context_suser(ctx)))) {
5062 return NFS_MAPERR(error);
5063 }
5064
5065 if (vfs_busy(mp, LK_NOWAIT)) {
5066 return 0;
5067 }
5068 bzero(dqb, sizeof(*dqb));
5069 error = nmp->nm_funcs->nf_getquota(nmp, ctx, uid, type, dqb);
5070 vfs_unbusy(mp);
5071 return NFS_MAPERR(error);
5072 }
5073 #endif
5074
5075 /*
5076 * Flush out the buffer cache
5077 */
5078 int nfs_sync_callout(vnode_t, void *);
5079
5080 struct nfs_sync_cargs {
5081 vfs_context_t ctx;
5082 int waitfor;
5083 int error;
5084 };
5085
5086 int
nfs_sync_callout(vnode_t vp,void * arg)5087 nfs_sync_callout(vnode_t vp, void *arg)
5088 {
5089 struct nfs_sync_cargs *cargs = (struct nfs_sync_cargs*)arg;
5090 nfsnode_t np = VTONFS(vp);
5091 int error;
5092
5093 if (np->n_flag & NREVOKE) {
5094 vn_revoke(vp, REVOKEALL, cargs->ctx);
5095 return VNODE_RETURNED;
5096 }
5097
5098 if (LIST_EMPTY(&np->n_dirtyblkhd)) {
5099 return VNODE_RETURNED;
5100 }
5101 if (np->n_wrbusy > 0) {
5102 return VNODE_RETURNED;
5103 }
5104 if (np->n_bflag & (NBFLUSHINPROG | NBINVALINPROG)) {
5105 return VNODE_RETURNED;
5106 }
5107
5108 error = nfs_flush(np, cargs->waitfor, vfs_context_thread(cargs->ctx), 0);
5109 if (error) {
5110 cargs->error = error;
5111 }
5112
5113 return VNODE_RETURNED;
5114 }
5115
5116 int
nfs_vfs_sync(mount_t mp,int waitfor,vfs_context_t ctx)5117 nfs_vfs_sync(mount_t mp, int waitfor, vfs_context_t ctx)
5118 {
5119 struct nfs_sync_cargs cargs;
5120
5121 cargs.waitfor = waitfor;
5122 cargs.ctx = ctx;
5123 cargs.error = 0;
5124
5125 vnode_iterate(mp, 0, nfs_sync_callout, &cargs);
5126
5127 return cargs.error;
5128 }
5129
5130 /*
5131 * NFS flat namespace lookup.
5132 * Currently unsupported.
5133 */
5134 /*ARGSUSED*/
5135 int
nfs_vfs_vget(__unused mount_t mp,__unused ino64_t ino,__unused vnode_t * vpp,__unused vfs_context_t ctx)5136 nfs_vfs_vget(
5137 __unused mount_t mp,
5138 __unused ino64_t ino,
5139 __unused vnode_t *vpp,
5140 __unused vfs_context_t ctx)
5141 {
5142 return ENOTSUP;
5143 }
5144
5145 /*
5146 * At this point, this should never happen
5147 */
5148 /*ARGSUSED*/
5149 int
nfs_vfs_fhtovp(__unused mount_t mp,__unused int fhlen,__unused unsigned char * fhp,__unused vnode_t * vpp,__unused vfs_context_t ctx)5150 nfs_vfs_fhtovp(
5151 __unused mount_t mp,
5152 __unused int fhlen,
5153 __unused unsigned char *fhp,
5154 __unused vnode_t *vpp,
5155 __unused vfs_context_t ctx)
5156 {
5157 return ENOTSUP;
5158 }
5159
5160 /*
5161 * Vnode pointer to File handle, should never happen either
5162 */
5163 /*ARGSUSED*/
5164 int
nfs_vfs_vptofh(__unused vnode_t vp,__unused int * fhlenp,__unused unsigned char * fhp,__unused vfs_context_t ctx)5165 nfs_vfs_vptofh(
5166 __unused vnode_t vp,
5167 __unused int *fhlenp,
5168 __unused unsigned char *fhp,
5169 __unused vfs_context_t ctx)
5170 {
5171 return ENOTSUP;
5172 }
5173
5174 /*
5175 * Vfs start routine, a no-op.
5176 */
5177 /*ARGSUSED*/
5178 int
nfs_vfs_start(__unused mount_t mp,__unused int flags,__unused vfs_context_t ctx)5179 nfs_vfs_start(
5180 __unused mount_t mp,
5181 __unused int flags,
5182 __unused vfs_context_t ctx)
5183 {
5184 return 0;
5185 }
5186
5187 /*
5188 * Build the mount info buffer for NFS_MOUNTINFO.
5189 */
5190 int
nfs_mountinfo_assemble(struct nfsmount * nmp,struct xdrbuf * xb)5191 nfs_mountinfo_assemble(struct nfsmount *nmp, struct xdrbuf *xb)
5192 {
5193 struct xdrbuf xbinfo, xborig;
5194 char sotype[16];
5195 uint32_t origargsvers, origargslength;
5196 size_t infolength_offset, curargsopaquelength_offset, curargslength_offset, attrslength_offset, curargs_end_offset, end_offset;
5197 uint32_t miattrs[NFS_MIATTR_BITMAP_LEN];
5198 uint32_t miflags_mask[NFS_MIFLAG_BITMAP_LEN];
5199 uint32_t miflags[NFS_MIFLAG_BITMAP_LEN];
5200 uint32_t mattrs[NFS_MATTR_BITMAP_LEN];
5201 uint32_t mflags_mask[NFS_MFLAG_BITMAP_LEN];
5202 uint32_t mflags[NFS_MFLAG_BITMAP_LEN];
5203 uint32_t loc, serv, addr, comp;
5204 int i, timeo, error = 0;
5205
5206 /* set up mount info attr and flag bitmaps */
5207 NFS_BITMAP_ZERO(miattrs, NFS_MIATTR_BITMAP_LEN);
5208 NFS_BITMAP_SET(miattrs, NFS_MIATTR_FLAGS);
5209 NFS_BITMAP_SET(miattrs, NFS_MIATTR_ORIG_ARGS);
5210 NFS_BITMAP_SET(miattrs, NFS_MIATTR_CUR_ARGS);
5211 NFS_BITMAP_SET(miattrs, NFS_MIATTR_CUR_LOC_INDEX);
5212 NFS_BITMAP_ZERO(miflags_mask, NFS_MIFLAG_BITMAP_LEN);
5213 NFS_BITMAP_ZERO(miflags, NFS_MIFLAG_BITMAP_LEN);
5214 NFS_BITMAP_SET(miflags_mask, NFS_MIFLAG_DEAD);
5215 NFS_BITMAP_SET(miflags_mask, NFS_MIFLAG_NOTRESP);
5216 NFS_BITMAP_SET(miflags_mask, NFS_MIFLAG_RECOVERY);
5217 if (nmp->nm_state & NFSSTA_DEAD) {
5218 NFS_BITMAP_SET(miflags, NFS_MIFLAG_DEAD);
5219 }
5220 if ((nmp->nm_state & (NFSSTA_TIMEO | NFSSTA_JUKEBOXTIMEO)) ||
5221 ((nmp->nm_state & NFSSTA_LOCKTIMEO) && (nmp->nm_lockmode == NFS_LOCK_MODE_ENABLED))) {
5222 NFS_BITMAP_SET(miflags, NFS_MIFLAG_NOTRESP);
5223 }
5224 if (nmp->nm_state & NFSSTA_RECOVER) {
5225 NFS_BITMAP_SET(miflags, NFS_MIFLAG_RECOVERY);
5226 }
5227
5228 /* get original mount args length */
5229 xb_init_buffer(&xborig, nmp->nm_args, 2 * XDRWORD);
5230 xb_get_32(error, &xborig, origargsvers); /* version */
5231 xb_get_32(error, &xborig, origargslength); /* args length */
5232 nfsmerr_if(error);
5233
5234 /* set up current mount attributes bitmap */
5235 NFS_BITMAP_ZERO(mattrs, NFS_MATTR_BITMAP_LEN);
5236 NFS_BITMAP_SET(mattrs, NFS_MATTR_FLAGS);
5237 NFS_BITMAP_SET(mattrs, NFS_MATTR_NFS_VERSION);
5238 #if CONFIG_NFS4
5239 if (nmp->nm_vers >= NFS_VER4) {
5240 NFS_BITMAP_SET(mattrs, NFS_MATTR_NFS_MINOR_VERSION);
5241 }
5242 #endif
5243 NFS_BITMAP_SET(mattrs, NFS_MATTR_READ_SIZE);
5244 NFS_BITMAP_SET(mattrs, NFS_MATTR_WRITE_SIZE);
5245 NFS_BITMAP_SET(mattrs, NFS_MATTR_READDIR_SIZE);
5246 NFS_BITMAP_SET(mattrs, NFS_MATTR_READAHEAD);
5247 NFS_BITMAP_SET(mattrs, NFS_MATTR_ATTRCACHE_REG_MIN);
5248 NFS_BITMAP_SET(mattrs, NFS_MATTR_ATTRCACHE_REG_MAX);
5249 NFS_BITMAP_SET(mattrs, NFS_MATTR_ATTRCACHE_DIR_MIN);
5250 NFS_BITMAP_SET(mattrs, NFS_MATTR_ATTRCACHE_DIR_MAX);
5251 NFS_BITMAP_SET(mattrs, NFS_MATTR_LOCK_MODE);
5252 NFS_BITMAP_SET(mattrs, NFS_MATTR_SECURITY);
5253 if (nmp->nm_etype.selected < nmp->nm_etype.count) {
5254 NFS_BITMAP_SET(mattrs, NFS_MATTR_KERB_ETYPE);
5255 }
5256 NFS_BITMAP_SET(mattrs, NFS_MATTR_MAX_GROUP_LIST);
5257 NFS_BITMAP_SET(mattrs, NFS_MATTR_SOCKET_TYPE);
5258 if (nmp->nm_saddr->sa_family != AF_LOCAL) {
5259 NFS_BITMAP_SET(mattrs, NFS_MATTR_NFS_PORT);
5260 }
5261 if ((nmp->nm_vers < NFS_VER4) && nmp->nm_mountport && !nmp->nm_mount_localport) {
5262 NFS_BITMAP_SET(mattrs, NFS_MATTR_MOUNT_PORT);
5263 }
5264 NFS_BITMAP_SET(mattrs, NFS_MATTR_REQUEST_TIMEOUT);
5265 if (NMFLAG(nmp, SOFT)) {
5266 NFS_BITMAP_SET(mattrs, NFS_MATTR_SOFT_RETRY_COUNT);
5267 }
5268 if (nmp->nm_deadtimeout) {
5269 NFS_BITMAP_SET(mattrs, NFS_MATTR_DEAD_TIMEOUT);
5270 }
5271 if (nmp->nm_fh) {
5272 NFS_BITMAP_SET(mattrs, NFS_MATTR_FH);
5273 }
5274 NFS_BITMAP_SET(mattrs, NFS_MATTR_FS_LOCATIONS);
5275 NFS_BITMAP_SET(mattrs, NFS_MATTR_MNTFLAGS);
5276 if (origargsvers < NFS_ARGSVERSION_XDR) {
5277 NFS_BITMAP_SET(mattrs, NFS_MATTR_MNTFROM);
5278 }
5279 if (nmp->nm_realm) {
5280 NFS_BITMAP_SET(mattrs, NFS_MATTR_REALM);
5281 }
5282 if (nmp->nm_principal) {
5283 NFS_BITMAP_SET(mattrs, NFS_MATTR_PRINCIPAL);
5284 }
5285 if (nmp->nm_sprinc) {
5286 NFS_BITMAP_SET(mattrs, NFS_MATTR_SVCPRINCIPAL);
5287 }
5288 if (nmp->nm_nfs_localport) {
5289 NFS_BITMAP_SET(mattrs, NFS_MATTR_LOCAL_NFS_PORT);
5290 }
5291 if ((nmp->nm_vers < NFS_VER4) && nmp->nm_mount_localport) {
5292 NFS_BITMAP_SET(mattrs, NFS_MATTR_LOCAL_MOUNT_PORT);
5293 }
5294
5295 /* set up current mount flags bitmap */
5296 /* first set the flags that we will be setting - either on OR off */
5297 NFS_BITMAP_ZERO(mflags_mask, NFS_MFLAG_BITMAP_LEN);
5298 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_SOFT);
5299 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_INTR);
5300 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_RESVPORT);
5301 if (nmp->nm_sotype == SOCK_DGRAM) {
5302 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NOCONNECT);
5303 }
5304 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_DUMBTIMER);
5305 if (nmp->nm_vers < NFS_VER4) {
5306 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_CALLUMNT);
5307 }
5308 if (nmp->nm_vers >= NFS_VER3) {
5309 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_RDIRPLUS);
5310 }
5311 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NONEGNAMECACHE);
5312 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_MUTEJUKEBOX);
5313 #if CONFIG_NFS4
5314 if (nmp->nm_vers >= NFS_VER4) {
5315 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_EPHEMERAL);
5316 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NOCALLBACK);
5317 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NAMEDATTR);
5318 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NOACL);
5319 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_ACLONLY);
5320 }
5321 #endif
5322 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NFC);
5323 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NOQUOTA);
5324 if (nmp->nm_vers < NFS_VER4) {
5325 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_MNTUDP);
5326 }
5327 NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_MNTQUICK);
5328 /* now set the flags that should be set */
5329 NFS_BITMAP_ZERO(mflags, NFS_MFLAG_BITMAP_LEN);
5330 if (NMFLAG(nmp, SOFT)) {
5331 NFS_BITMAP_SET(mflags, NFS_MFLAG_SOFT);
5332 }
5333 if (NMFLAG(nmp, INTR)) {
5334 NFS_BITMAP_SET(mflags, NFS_MFLAG_INTR);
5335 }
5336 if (NMFLAG(nmp, RESVPORT)) {
5337 NFS_BITMAP_SET(mflags, NFS_MFLAG_RESVPORT);
5338 }
5339 if ((nmp->nm_sotype == SOCK_DGRAM) && NMFLAG(nmp, NOCONNECT)) {
5340 NFS_BITMAP_SET(mflags, NFS_MFLAG_NOCONNECT);
5341 }
5342 if (NMFLAG(nmp, DUMBTIMER)) {
5343 NFS_BITMAP_SET(mflags, NFS_MFLAG_DUMBTIMER);
5344 }
5345 if ((nmp->nm_vers < NFS_VER4) && NMFLAG(nmp, CALLUMNT)) {
5346 NFS_BITMAP_SET(mflags, NFS_MFLAG_CALLUMNT);
5347 }
5348 if ((nmp->nm_vers >= NFS_VER3) && NMFLAG(nmp, RDIRPLUS)) {
5349 NFS_BITMAP_SET(mflags, NFS_MFLAG_RDIRPLUS);
5350 }
5351 if (NMFLAG(nmp, NONEGNAMECACHE)) {
5352 NFS_BITMAP_SET(mflags, NFS_MFLAG_NONEGNAMECACHE);
5353 }
5354 if (NMFLAG(nmp, MUTEJUKEBOX)) {
5355 NFS_BITMAP_SET(mflags, NFS_MFLAG_MUTEJUKEBOX);
5356 }
5357 #if CONFIG_NFS4
5358 if (nmp->nm_vers >= NFS_VER4) {
5359 if (NMFLAG(nmp, EPHEMERAL)) {
5360 NFS_BITMAP_SET(mflags, NFS_MFLAG_EPHEMERAL);
5361 }
5362 if (NMFLAG(nmp, NOCALLBACK)) {
5363 NFS_BITMAP_SET(mflags, NFS_MFLAG_NOCALLBACK);
5364 }
5365 if (NMFLAG(nmp, NAMEDATTR)) {
5366 NFS_BITMAP_SET(mflags, NFS_MFLAG_NAMEDATTR);
5367 }
5368 if (NMFLAG(nmp, NOACL)) {
5369 NFS_BITMAP_SET(mflags, NFS_MFLAG_NOACL);
5370 }
5371 if (NMFLAG(nmp, ACLONLY)) {
5372 NFS_BITMAP_SET(mflags, NFS_MFLAG_ACLONLY);
5373 }
5374 }
5375 #endif
5376 if (NMFLAG(nmp, NFC)) {
5377 NFS_BITMAP_SET(mflags, NFS_MFLAG_NFC);
5378 }
5379 if (NMFLAG(nmp, NOQUOTA) || ((nmp->nm_vers >= NFS_VER4) &&
5380 !NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_supp_attr, NFS_FATTR_QUOTA_AVAIL_HARD) &&
5381 !NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_supp_attr, NFS_FATTR_QUOTA_AVAIL_SOFT) &&
5382 !NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_supp_attr, NFS_FATTR_QUOTA_USED))) {
5383 NFS_BITMAP_SET(mflags, NFS_MFLAG_NOQUOTA);
5384 }
5385 if ((nmp->nm_vers < NFS_VER4) && NMFLAG(nmp, MNTUDP)) {
5386 NFS_BITMAP_SET(mflags, NFS_MFLAG_MNTUDP);
5387 }
5388 if (NMFLAG(nmp, MNTQUICK)) {
5389 NFS_BITMAP_SET(mflags, NFS_MFLAG_MNTQUICK);
5390 }
5391
5392 /* assemble info buffer: */
5393 xb_init_buffer(&xbinfo, NULL, 0);
5394 xb_add_32(error, &xbinfo, NFS_MOUNT_INFO_VERSION);
5395 infolength_offset = xb_offset(&xbinfo);
5396 xb_add_32(error, &xbinfo, 0);
5397 xb_add_bitmap(error, &xbinfo, miattrs, NFS_MIATTR_BITMAP_LEN);
5398 xb_add_bitmap(error, &xbinfo, miflags, NFS_MIFLAG_BITMAP_LEN);
5399 xb_add_32(error, &xbinfo, origargslength);
5400 if (!error) {
5401 error = xb_add_bytes(&xbinfo, nmp->nm_args, origargslength, 0);
5402 }
5403
5404 /* the opaque byte count for the current mount args values: */
5405 curargsopaquelength_offset = xb_offset(&xbinfo);
5406 xb_add_32(error, &xbinfo, 0);
5407
5408 /* Encode current mount args values */
5409 xb_add_32(error, &xbinfo, NFS_ARGSVERSION_XDR);
5410 curargslength_offset = xb_offset(&xbinfo);
5411 xb_add_32(error, &xbinfo, 0);
5412 xb_add_32(error, &xbinfo, NFS_XDRARGS_VERSION_0);
5413 xb_add_bitmap(error, &xbinfo, mattrs, NFS_MATTR_BITMAP_LEN);
5414 attrslength_offset = xb_offset(&xbinfo);
5415 xb_add_32(error, &xbinfo, 0);
5416 xb_add_bitmap(error, &xbinfo, mflags_mask, NFS_MFLAG_BITMAP_LEN);
5417 xb_add_bitmap(error, &xbinfo, mflags, NFS_MFLAG_BITMAP_LEN);
5418 xb_add_32(error, &xbinfo, nmp->nm_vers); /* NFS_VERSION */
5419 #if CONFIG_NFS4
5420 if (nmp->nm_vers >= NFS_VER4) {
5421 xb_add_32(error, &xbinfo, nmp->nm_minor_vers); /* NFS_MINOR_VERSION */
5422 }
5423 #endif
5424 xb_add_32(error, &xbinfo, nmp->nm_rsize); /* READ_SIZE */
5425 xb_add_32(error, &xbinfo, nmp->nm_wsize); /* WRITE_SIZE */
5426 xb_add_32(error, &xbinfo, nmp->nm_readdirsize); /* READDIR_SIZE */
5427 xb_add_32(error, &xbinfo, nmp->nm_readahead); /* READAHEAD */
5428 xb_add_32(error, &xbinfo, nmp->nm_acregmin); /* ATTRCACHE_REG_MIN */
5429 xb_add_32(error, &xbinfo, 0); /* ATTRCACHE_REG_MIN */
5430 xb_add_32(error, &xbinfo, nmp->nm_acregmax); /* ATTRCACHE_REG_MAX */
5431 xb_add_32(error, &xbinfo, 0); /* ATTRCACHE_REG_MAX */
5432 xb_add_32(error, &xbinfo, nmp->nm_acdirmin); /* ATTRCACHE_DIR_MIN */
5433 xb_add_32(error, &xbinfo, 0); /* ATTRCACHE_DIR_MIN */
5434 xb_add_32(error, &xbinfo, nmp->nm_acdirmax); /* ATTRCACHE_DIR_MAX */
5435 xb_add_32(error, &xbinfo, 0); /* ATTRCACHE_DIR_MAX */
5436 xb_add_32(error, &xbinfo, nmp->nm_lockmode); /* LOCK_MODE */
5437 if (nmp->nm_sec.count) {
5438 xb_add_32(error, &xbinfo, nmp->nm_sec.count); /* SECURITY */
5439 nfsmerr_if(error);
5440 for (i = 0; i < nmp->nm_sec.count; i++) {
5441 xb_add_32(error, &xbinfo, nmp->nm_sec.flavors[i]);
5442 }
5443 } else if (nmp->nm_servsec.count) {
5444 xb_add_32(error, &xbinfo, nmp->nm_servsec.count); /* SECURITY */
5445 nfsmerr_if(error);
5446 for (i = 0; i < nmp->nm_servsec.count; i++) {
5447 xb_add_32(error, &xbinfo, nmp->nm_servsec.flavors[i]);
5448 }
5449 } else {
5450 xb_add_32(error, &xbinfo, 1); /* SECURITY */
5451 xb_add_32(error, &xbinfo, nmp->nm_auth);
5452 }
5453 if (nmp->nm_etype.selected < nmp->nm_etype.count) {
5454 xb_add_32(error, &xbinfo, nmp->nm_etype.count);
5455 xb_add_32(error, &xbinfo, nmp->nm_etype.selected);
5456 for (uint32_t j = 0; j < nmp->nm_etype.count; j++) {
5457 xb_add_32(error, &xbinfo, nmp->nm_etype.etypes[j]);
5458 }
5459 nfsmerr_if(error);
5460 }
5461 xb_add_32(error, &xbinfo, nmp->nm_numgrps); /* MAX_GROUP_LIST */
5462 nfsmerr_if(error);
5463
5464 switch (nmp->nm_saddr->sa_family) {
5465 case AF_INET:
5466 case AF_INET6:
5467 snprintf(sotype, sizeof(sotype), "%s%s", (nmp->nm_sotype == SOCK_DGRAM) ? "udp" : "tcp",
5468 nmp->nm_sofamily ? (nmp->nm_sofamily == AF_INET) ? "4" : "6" : "");
5469 xb_add_string(error, &xbinfo, sotype, strlen(sotype)); /* SOCKET_TYPE */
5470 xb_add_32(error, &xbinfo, ntohs(((struct sockaddr_in*)nmp->nm_saddr)->sin_port)); /* NFS_PORT */
5471 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MOUNT_PORT)) {
5472 xb_add_32(error, &xbinfo, nmp->nm_mountport); /* MOUNT_PORT */
5473 }
5474 break;
5475 case AF_LOCAL:
5476 strlcpy(sotype, (nmp->nm_sotype == SOCK_DGRAM) ? "ticlts" : "ticotsord", sizeof(sotype));
5477 xb_add_string(error, &xbinfo, sotype, strlen(sotype));
5478 break;
5479 default:
5480 NFS_VFS_DBG("Unsupported address family %d\n", nmp->nm_saddr->sa_family);
5481 printf("Unsupported address family %d\n", nmp->nm_saddr->sa_family);
5482 error = EINVAL;
5483 break;
5484 }
5485
5486 timeo = (nmp->nm_timeo * 10) / NFS_HZ;
5487 xb_add_32(error, &xbinfo, timeo / 10); /* REQUEST_TIMEOUT */
5488 xb_add_32(error, &xbinfo, (timeo % 10) * 100000000); /* REQUEST_TIMEOUT */
5489 if (NMFLAG(nmp, SOFT)) {
5490 xb_add_32(error, &xbinfo, nmp->nm_retry); /* SOFT_RETRY_COUNT */
5491 }
5492 if (nmp->nm_deadtimeout) {
5493 xb_add_32(error, &xbinfo, nmp->nm_deadtimeout); /* DEAD_TIMEOUT */
5494 xb_add_32(error, &xbinfo, 0); /* DEAD_TIMEOUT */
5495 }
5496 if (nmp->nm_fh) {
5497 xb_add_fh(error, &xbinfo, &nmp->nm_fh->fh_data[0], nmp->nm_fh->fh_len); /* FH */
5498 }
5499 xb_add_32(error, &xbinfo, nmp->nm_locations.nl_numlocs); /* FS_LOCATIONS */
5500 for (loc = 0; !error && (loc < nmp->nm_locations.nl_numlocs); loc++) {
5501 xb_add_32(error, &xbinfo, nmp->nm_locations.nl_locations[loc]->nl_servcount);
5502 for (serv = 0; !error && (serv < nmp->nm_locations.nl_locations[loc]->nl_servcount); serv++) {
5503 xb_add_string(error, &xbinfo, nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_name,
5504 strlen(nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_name));
5505 xb_add_32(error, &xbinfo, nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_addrcount);
5506 for (addr = 0; !error && (addr < nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_addrcount); addr++) {
5507 xb_add_string(error, &xbinfo, nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_addresses[addr],
5508 strlen(nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_addresses[addr]));
5509 }
5510 xb_add_32(error, &xbinfo, 0); /* empty server info */
5511 }
5512 xb_add_32(error, &xbinfo, nmp->nm_locations.nl_locations[loc]->nl_path.np_compcount);
5513 for (comp = 0; !error && (comp < nmp->nm_locations.nl_locations[loc]->nl_path.np_compcount); comp++) {
5514 xb_add_string(error, &xbinfo, nmp->nm_locations.nl_locations[loc]->nl_path.np_components[comp],
5515 strlen(nmp->nm_locations.nl_locations[loc]->nl_path.np_components[comp]));
5516 }
5517 xb_add_32(error, &xbinfo, 0); /* empty fs location info */
5518 }
5519 xb_add_32(error, &xbinfo, vfs_flags(nmp->nm_mountp)); /* MNTFLAGS */
5520 if (origargsvers < NFS_ARGSVERSION_XDR) {
5521 xb_add_string(error, &xbinfo, vfs_statfs(nmp->nm_mountp)->f_mntfromname,
5522 strlen(vfs_statfs(nmp->nm_mountp)->f_mntfromname)); /* MNTFROM */
5523 }
5524 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_REALM)) {
5525 xb_add_string(error, &xbinfo, nmp->nm_realm, strlen(nmp->nm_realm));
5526 }
5527 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_PRINCIPAL)) {
5528 xb_add_string(error, &xbinfo, nmp->nm_principal, strlen(nmp->nm_principal));
5529 }
5530 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SVCPRINCIPAL)) {
5531 xb_add_string(error, &xbinfo, nmp->nm_sprinc, strlen(nmp->nm_sprinc));
5532 }
5533 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_LOCAL_NFS_PORT)) {
5534 struct sockaddr_un *un = (struct sockaddr_un *)nmp->nm_saddr;
5535 xb_add_string(error, &xbinfo, un->sun_path, strlen(un->sun_path));
5536 }
5537 if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_LOCAL_MOUNT_PORT)) {
5538 xb_add_string(error, &xbinfo, nmp->nm_mount_localport, strlen(nmp->nm_mount_localport));
5539 }
5540 curargs_end_offset = xb_offset(&xbinfo);
5541
5542 /* NFS_MIATTR_CUR_LOC_INDEX */
5543 xb_add_32(error, &xbinfo, nmp->nm_locations.nl_current.nli_flags);
5544 xb_add_32(error, &xbinfo, nmp->nm_locations.nl_current.nli_loc);
5545 xb_add_32(error, &xbinfo, nmp->nm_locations.nl_current.nli_serv);
5546 xb_add_32(error, &xbinfo, nmp->nm_locations.nl_current.nli_addr);
5547
5548 xb_build_done(error, &xbinfo);
5549
5550 /* update opaque counts */
5551 end_offset = xb_offset(&xbinfo);
5552 if (!error) {
5553 error = xb_seek(&xbinfo, attrslength_offset);
5554 xb_add_32(error, &xbinfo, curargs_end_offset - attrslength_offset - XDRWORD /*don't include length field*/);
5555 }
5556 if (!error) {
5557 error = xb_seek(&xbinfo, curargslength_offset);
5558 xb_add_32(error, &xbinfo, curargs_end_offset - curargslength_offset + XDRWORD /*version*/);
5559 }
5560 if (!error) {
5561 error = xb_seek(&xbinfo, curargsopaquelength_offset);
5562 xb_add_32(error, &xbinfo, curargs_end_offset - curargslength_offset + XDRWORD /*version*/);
5563 }
5564 if (!error) {
5565 error = xb_seek(&xbinfo, infolength_offset);
5566 xb_add_32(error, &xbinfo, end_offset - infolength_offset + XDRWORD /*version*/);
5567 }
5568 nfsmerr_if(error);
5569
5570 /* copy result xdrbuf to caller */
5571 *xb = xbinfo;
5572
5573 /* and mark the local copy as not needing cleanup */
5574 xbinfo.xb_flags &= ~XB_CLEANUP;
5575 nfsmerr:
5576 xb_cleanup(&xbinfo);
5577 return error;
5578 }
5579
5580 /*
5581 * Do that sysctl thang...
5582 */
5583 int
nfs_vfs_sysctl(int * name,u_int namelen,user_addr_t oldp,size_t * oldlenp,user_addr_t newp,size_t newlen,vfs_context_t ctx)5584 nfs_vfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
5585 user_addr_t newp, size_t newlen, vfs_context_t ctx)
5586 {
5587 int error = 0, val;
5588 struct sysctl_req *req = NULL;
5589 union union_vfsidctl vc;
5590 mount_t mp;
5591 struct nfsmount *nmp = NULL;
5592 struct vfsquery vq;
5593 struct nfsreq *rq;
5594 boolean_t is_64_bit;
5595 fsid_t fsid;
5596 struct xdrbuf xb;
5597 struct netfs_status *nsp = NULL;
5598 int timeoutmask;
5599 uint totlen, count, numThreads;
5600
5601 /*
5602 * All names at this level are terminal.
5603 */
5604 if (namelen > 1) {
5605 return ENOTDIR; /* overloaded */
5606 }
5607 is_64_bit = vfs_context_is64bit(ctx);
5608
5609 /* common code for "new style" VFS_CTL sysctl, get the mount. */
5610 switch (name[0]) {
5611 case VFS_CTL_TIMEO:
5612 case VFS_CTL_NOLOCKS:
5613 case VFS_CTL_NSTATUS:
5614 #if defined(XNU_TARGET_OS_OSX)
5615 case VFS_CTL_QUERY:
5616 #endif /* XNU_TARGET_OS_OSX */
5617 req = CAST_DOWN(struct sysctl_req *, oldp);
5618 if (req == NULL) {
5619 return EFAULT;
5620 }
5621 error = SYSCTL_IN(req, &vc, is_64_bit? sizeof(vc.vc64):sizeof(vc.vc32));
5622 if (error) {
5623 return NFS_MAPERR(error);
5624 }
5625 mp = vfs_getvfs(&vc.vc32.vc_fsid); /* works for 32 and 64 */
5626 if (mp == NULL) {
5627 return ENOENT;
5628 }
5629 nmp = VFSTONFS(mp);
5630 if (!nmp) {
5631 return ENOENT;
5632 }
5633 bzero(&vq, sizeof(vq));
5634 req->newidx = 0;
5635 if (is_64_bit) {
5636 req->newptr = vc.vc64.vc_ptr;
5637 req->newlen = (size_t)vc.vc64.vc_len;
5638 } else {
5639 req->newptr = CAST_USER_ADDR_T(vc.vc32.vc_ptr);
5640 req->newlen = vc.vc32.vc_len;
5641 }
5642 break;
5643 #if !defined(XNU_TARGET_OS_OSX)
5644 case VFS_CTL_QUERY:
5645 return EPERM;
5646 #endif /* ! XNU_TARGET_OS_OSX */
5647 }
5648
5649 switch (name[0]) {
5650 case NFS_NFSSTATS:
5651 if (!oldp) {
5652 *oldlenp = sizeof nfsclntstats;
5653 return 0;
5654 }
5655
5656 if (*oldlenp < sizeof nfsclntstats) {
5657 *oldlenp = sizeof nfsclntstats;
5658 return ENOMEM;
5659 }
5660
5661 error = copyout(&nfsclntstats, oldp, sizeof nfsclntstats);
5662 if (error) {
5663 return NFS_MAPERR(error);
5664 }
5665
5666 if (newp && newlen != sizeof nfsclntstats) {
5667 return EINVAL;
5668 }
5669
5670 if (newp) {
5671 return copyin(newp, &nfsclntstats, sizeof nfsclntstats);
5672 }
5673 return 0;
5674 case NFS_NFSZEROSTATS:
5675 bzero(&nfsclntstats, sizeof nfsclntstats);
5676 return 0;
5677 case NFS_MOUNTINFO:
5678 /* read in the fsid */
5679 if (*oldlenp < sizeof(fsid)) {
5680 return EINVAL;
5681 }
5682 if ((error = copyin(oldp, &fsid, sizeof(fsid)))) {
5683 return NFS_MAPERR(error);
5684 }
5685 /* swizzle it back to host order */
5686 fsid.val[0] = ntohl(fsid.val[0]);
5687 fsid.val[1] = ntohl(fsid.val[1]);
5688 /* find mount and make sure it's NFS */
5689 if (((mp = vfs_getvfs(&fsid))) == NULL) {
5690 return ENOENT;
5691 }
5692 if (strcmp(vfs_statfs(mp)->f_fstypename, "nfs")) {
5693 return EINVAL;
5694 }
5695 if (((nmp = VFSTONFS(mp))) == NULL) {
5696 return ENOENT;
5697 }
5698 xb_init(&xb, XDRBUF_NONE);
5699 if ((error = nfs_mountinfo_assemble(nmp, &xb))) {
5700 return NFS_MAPERR(error);
5701 }
5702 if (*oldlenp < xb.xb_u.xb_buffer.xbb_len) {
5703 error = ENOMEM;
5704 } else {
5705 error = copyout(xb_buffer_base(&xb), oldp, xb.xb_u.xb_buffer.xbb_len);
5706 }
5707 *oldlenp = xb.xb_u.xb_buffer.xbb_len;
5708 xb_cleanup(&xb);
5709 break;
5710 case VFS_CTL_NOLOCKS:
5711 if (req->oldptr != USER_ADDR_NULL) {
5712 lck_mtx_lock(&nmp->nm_lock);
5713 val = (nmp->nm_lockmode == NFS_LOCK_MODE_DISABLED) ? 1 : 0;
5714 lck_mtx_unlock(&nmp->nm_lock);
5715 error = SYSCTL_OUT(req, &val, sizeof(val));
5716 if (error) {
5717 return NFS_MAPERR(error);
5718 }
5719 }
5720 if (req->newptr != USER_ADDR_NULL) {
5721 error = SYSCTL_IN(req, &val, sizeof(val));
5722 if (error) {
5723 return NFS_MAPERR(error);
5724 }
5725 lck_mtx_lock(&nmp->nm_lock);
5726 if (nmp->nm_lockmode == NFS_LOCK_MODE_LOCAL) {
5727 /* can't toggle locks when using local locks */
5728 error = EINVAL;
5729 #if CONFIG_NFS4
5730 } else if ((nmp->nm_vers >= NFS_VER4) && val) {
5731 /* can't disable locks for NFSv4 */
5732 error = EINVAL;
5733 #endif
5734 } else if (val) {
5735 if ((nmp->nm_vers <= NFS_VER3) && (nmp->nm_lockmode == NFS_LOCK_MODE_ENABLED)) {
5736 nfs_lockd_mount_unregister(nmp);
5737 }
5738 nmp->nm_lockmode = NFS_LOCK_MODE_DISABLED;
5739 nmp->nm_state &= ~NFSSTA_LOCKTIMEO;
5740 } else {
5741 if ((nmp->nm_vers <= NFS_VER3) && (nmp->nm_lockmode == NFS_LOCK_MODE_DISABLED)) {
5742 nfs_lockd_mount_register(nmp);
5743 }
5744 nmp->nm_lockmode = NFS_LOCK_MODE_ENABLED;
5745 }
5746 lck_mtx_unlock(&nmp->nm_lock);
5747 }
5748 break;
5749 #if defined(XNU_TARGET_OS_OSX)
5750 case VFS_CTL_QUERY:
5751 lck_mtx_lock(&nmp->nm_lock);
5752 /* XXX don't allow users to know about/disconnect unresponsive, soft, nobrowse mounts */
5753 int softnobrowse = (NMFLAG(nmp, SOFT) && (vfs_flags(nmp->nm_mountp) & MNT_DONTBROWSE));
5754 if (!softnobrowse && (nmp->nm_state & NFSSTA_TIMEO)) {
5755 vq.vq_flags |= VQ_NOTRESP;
5756 }
5757 if (!softnobrowse && (nmp->nm_state & NFSSTA_JUKEBOXTIMEO) && !NMFLAG(nmp, MUTEJUKEBOX)) {
5758 vq.vq_flags |= VQ_NOTRESP;
5759 }
5760 if (!softnobrowse && (nmp->nm_state & NFSSTA_LOCKTIMEO) &&
5761 (nmp->nm_lockmode == NFS_LOCK_MODE_ENABLED)) {
5762 vq.vq_flags |= VQ_NOTRESP;
5763 }
5764 if (nmp->nm_state & NFSSTA_DEAD) {
5765 vq.vq_flags |= VQ_DEAD;
5766 }
5767 lck_mtx_unlock(&nmp->nm_lock);
5768 error = SYSCTL_OUT(req, &vq, sizeof(vq));
5769 break;
5770 #endif /* XNU_TARGET_OS_OSX */
5771 case VFS_CTL_TIMEO:
5772 if (req->oldptr != USER_ADDR_NULL) {
5773 lck_mtx_lock(&nmp->nm_lock);
5774 val = nmp->nm_tprintf_initial_delay;
5775 lck_mtx_unlock(&nmp->nm_lock);
5776 error = SYSCTL_OUT(req, &val, sizeof(val));
5777 if (error) {
5778 return NFS_MAPERR(error);
5779 }
5780 }
5781 if (req->newptr != USER_ADDR_NULL) {
5782 error = SYSCTL_IN(req, &val, sizeof(val));
5783 if (error) {
5784 return NFS_MAPERR(error);
5785 }
5786 lck_mtx_lock(&nmp->nm_lock);
5787 if (val < 0) {
5788 nmp->nm_tprintf_initial_delay = 0;
5789 } else {
5790 nmp->nm_tprintf_initial_delay = val;
5791 }
5792 lck_mtx_unlock(&nmp->nm_lock);
5793 }
5794 break;
5795 case VFS_CTL_NSTATUS:
5796 /*
5797 * Return the status of this mount. This is much more
5798 * information than VFS_CTL_QUERY. In addition to the
5799 * vq_flags return the significant mount options along
5800 * with the list of threads blocked on the mount and
5801 * how long the threads have been waiting.
5802 */
5803
5804 lck_mtx_lock(&nfs_request_mutex);
5805 lck_mtx_lock(&nmp->nm_lock);
5806
5807 /*
5808 * Count the number of requests waiting for a reply.
5809 * Note: there could be multiple requests from the same thread.
5810 */
5811 numThreads = 0;
5812 TAILQ_FOREACH(rq, &nfs_reqq, r_chain) {
5813 if (rq->r_nmp == nmp) {
5814 numThreads++;
5815 }
5816 }
5817
5818 /* Calculate total size of result buffer */
5819 totlen = sizeof(struct netfs_status) + (numThreads * sizeof(uint64_t));
5820
5821 if (req->oldptr == USER_ADDR_NULL) { // Caller is querying buffer size
5822 lck_mtx_unlock(&nmp->nm_lock);
5823 lck_mtx_unlock(&nfs_request_mutex);
5824 return SYSCTL_OUT(req, NULL, totlen);
5825 }
5826 if (req->oldlen < totlen) { // Check if caller's buffer is big enough
5827 lck_mtx_unlock(&nmp->nm_lock);
5828 lck_mtx_unlock(&nfs_request_mutex);
5829 return ERANGE;
5830 }
5831
5832 nsp = kalloc_data(totlen, Z_WAITOK | Z_ZERO);
5833 if (nsp == NULL) {
5834 lck_mtx_unlock(&nmp->nm_lock);
5835 lck_mtx_unlock(&nfs_request_mutex);
5836 return ENOMEM;
5837 }
5838 timeoutmask = NFSSTA_TIMEO | NFSSTA_LOCKTIMEO | NFSSTA_JUKEBOXTIMEO;
5839 if (nmp->nm_state & timeoutmask) {
5840 nsp->ns_status |= VQ_NOTRESP;
5841 }
5842 if (nmp->nm_state & NFSSTA_DEAD) {
5843 nsp->ns_status |= VQ_DEAD;
5844 }
5845
5846 (void) nfs_mountopts(nmp, nsp->ns_mountopts, sizeof(nsp->ns_mountopts));
5847 nsp->ns_threadcount = numThreads;
5848
5849 /*
5850 * Get the thread ids of threads waiting for a reply
5851 * and find the longest wait time.
5852 */
5853 if (numThreads > 0) {
5854 struct timeval now;
5855 time_t sendtime;
5856 uint64_t waittime;
5857
5858 microuptime(&now);
5859 count = 0;
5860 sendtime = now.tv_sec;
5861 TAILQ_FOREACH(rq, &nfs_reqq, r_chain) {
5862 if (rq->r_nmp == nmp) {
5863 if (rq->r_start < sendtime) {
5864 sendtime = rq->r_start;
5865 }
5866 // A thread_id of zero is used to represent an async I/O request.
5867 nsp->ns_threadids[count] =
5868 rq->r_thread ? thread_tid(rq->r_thread) : 0;
5869 if (++count >= numThreads) {
5870 break;
5871 }
5872 }
5873 }
5874 waittime = now.tv_sec - sendtime;
5875 nsp->ns_waittime = waittime > UINT32_MAX ? UINT32_MAX : (uint32_t)waittime;
5876 }
5877
5878 lck_mtx_unlock(&nmp->nm_lock);
5879 lck_mtx_unlock(&nfs_request_mutex);
5880
5881 error = SYSCTL_OUT(req, nsp, totlen);
5882 kfree_data(nsp, totlen);
5883 break;
5884 default:
5885 return ENOTSUP;
5886 }
5887 return NFS_MAPERR(error);
5888 }
5889
5890 #if CONFIG_NFS4
5891
5892 static int
mapname2id(struct nfs_testmapid * map)5893 mapname2id(struct nfs_testmapid *map)
5894 {
5895 int error;
5896 error = nfs4_id2guid(map->ntm_name, &map->ntm_guid, map->ntm_grpflag);
5897 if (error) {
5898 return error;
5899 }
5900
5901 if (map->ntm_grpflag) {
5902 error = kauth_cred_guid2gid(&map->ntm_guid, (gid_t *)&map->ntm_id);
5903 } else {
5904 error = kauth_cred_guid2uid(&map->ntm_guid, (uid_t *)&map->ntm_id);
5905 }
5906
5907 return error;
5908 }
5909
5910 static int
mapid2name(struct nfs_testmapid * map)5911 mapid2name(struct nfs_testmapid *map)
5912 {
5913 int error;
5914 size_t len = sizeof(map->ntm_name);
5915
5916 if (map->ntm_grpflag) {
5917 error = kauth_cred_gid2guid((gid_t)map->ntm_id, &map->ntm_guid);
5918 } else {
5919 error = kauth_cred_uid2guid((uid_t)map->ntm_id, &map->ntm_guid);
5920 }
5921
5922 if (error) {
5923 return error;
5924 }
5925
5926 error = nfs4_guid2id(&map->ntm_guid, map->ntm_name, &len, map->ntm_grpflag);
5927
5928 return error;
5929 }
5930
5931 static int
nfsclnt_testidmap(proc_t p,user_addr_t argp)5932 nfsclnt_testidmap(proc_t p, user_addr_t argp)
5933 {
5934 struct nfs_testmapid mapid;
5935 int error, coerror;
5936 size_t len = sizeof(mapid.ntm_name);
5937
5938 /* Let root make this call. */
5939 error = proc_suser(p);
5940 if (error) {
5941 return error;
5942 }
5943
5944 error = copyin(argp, &mapid, sizeof(mapid));
5945 mapid.ntm_name[MAXIDNAMELEN - 1] = '\0';
5946
5947 if (error) {
5948 return error;
5949 }
5950 switch (mapid.ntm_lookup) {
5951 case NTM_NAME2ID:
5952 error = mapname2id(&mapid);
5953 break;
5954 case NTM_ID2NAME:
5955 error = mapid2name(&mapid);
5956 break;
5957 case NTM_NAME2GUID:
5958 error = nfs4_id2guid(mapid.ntm_name, &mapid.ntm_guid, mapid.ntm_grpflag);
5959 break;
5960 case NTM_GUID2NAME:
5961 error = nfs4_guid2id(&mapid.ntm_guid, mapid.ntm_name, &len, mapid.ntm_grpflag);
5962 break;
5963 default:
5964 return EINVAL;
5965 }
5966
5967 coerror = copyout(&mapid, argp, sizeof(mapid));
5968
5969 return error ? error : coerror;
5970 }
5971 #endif /* CONFIG_NFS4 */
5972
5973 /*
5974 * Setup nfsclnt character device to be used by nfsclnt() system call.
5975 */
5976
5977 static int nfsclnt_device_installed = 0;
5978 static void *nfsclnt_devfs = NULL;
5979 static d_ioctl_t nfsclnt_ioctl;
5980
5981 static const struct cdevsw nfsclnt_cdevsw =
5982 {
5983 .d_open = eno_opcl,
5984 .d_close = eno_opcl,
5985 .d_read = eno_rdwrt,
5986 .d_write = eno_rdwrt,
5987 .d_ioctl = nfsclnt_ioctl,
5988 .d_stop = eno_stop,
5989 .d_reset = eno_reset,
5990 .d_ttys = NULL,
5991 .d_select = eno_select,
5992 .d_mmap = eno_mmap,
5993 .d_strategy = eno_strat,
5994 .d_reserved_1 = eno_getc,
5995 .d_reserved_2 = eno_putc,
5996 .d_type = 0
5997 };
5998
5999 static int
nfsclnt_ioctl(__unused dev_t dev,u_long cmd,caddr_t data,__unused int flag,struct proc * p)6000 nfsclnt_ioctl(__unused dev_t dev, u_long cmd, caddr_t data,
6001 __unused int flag, struct proc *p)
6002 {
6003 struct lockd_ans la;
6004 int error;
6005 user_addr_t addr = (user_addr_t)data;
6006
6007 switch (cmd) {
6008 case NFSCLNT_LOCKDANS:
6009 error = copyin(addr, &la, sizeof(la));
6010 if (!error) {
6011 error = nfslockdans(p, &la);
6012 }
6013 break;
6014 case NFSCLNT_LOCKDNOTIFY:
6015 error = nfslockdnotify(p, addr);
6016 break;
6017 #if CONFIG_NFS4
6018 case NFSCLNT_TESTIDMAP:
6019 error = nfsclnt_testidmap(p, addr);
6020 break;
6021 #endif
6022 default:
6023 error = EINVAL;
6024 }
6025
6026 return NFS_MAPERR(error);
6027 }
6028
6029 int
nfsclnt_device_add(void)6030 nfsclnt_device_add(void)
6031 {
6032 int ret;
6033
6034 if (nfsclnt_device_installed) {
6035 return 0;
6036 }
6037
6038 nfsclnt_device_installed = 1;
6039
6040 ret = cdevsw_add(-1, &nfsclnt_cdevsw);
6041 if (ret < 0) {
6042 printf("nfsclnt_device_add: cdevsw_add failed on nfsclnt control device, err %d\n", ret);
6043 nfsclnt_device_installed = 0;
6044 return -1;
6045 }
6046
6047 nfsclnt_devfs = devfs_make_node(makedev(ret, 0), DEVFS_CHAR,
6048 UID_ROOT, GID_WHEEL, 0666, NFSCLNT_DEVICE);
6049
6050 if (nfsclnt_devfs == NULL) {
6051 printf("nfsclnt_device_add: devfs_make_node failed on nfsclnt control device\n");
6052 nfsclnt_device_installed = 0;
6053 return -1;
6054 }
6055
6056 printf("nfsclnt_device_add: nfsclnt chardev was added successfully\n");
6057 return 0;
6058 }
6059
6060 #endif /* CONFIG_NFS_CLIENT */
6061