xref: /xnu-8020.121.3/bsd/nfs/nfs_gss.c (revision fdd8201d7b966f0c3ea610489d29bd841d358941)
1 /*
2  * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <nfs/nfs_conf.h>
30 #if CONFIG_NFS
31 
32 /*************
33  * These functions implement RPCSEC_GSS security for the NFS client and server.
34  * The code is specific to the use of Kerberos v5 and the use of DES MAC MD5
35  * protection as described in Internet RFC 2203 and 2623.
36  *
37  * In contrast to the original AUTH_SYS authentication, RPCSEC_GSS is stateful.
38  * It requires the client and server negotiate a secure connection as part of a
39  * security context. The context state is maintained in client and server structures.
40  * On the client side, each user of an NFS mount is assigned their own context,
41  * identified by UID, on their first use of the mount, and it persists until the
42  * unmount or until the context is renewed.  Each user context has a corresponding
43  * server context which the server maintains until the client destroys it, or
44  * until the context expires.
45  *
46  * The client and server contexts are set up dynamically.  When a user attempts
47  * to send an NFS request, if there is no context for the user, then one is
48  * set up via an exchange of NFS null procedure calls as described in RFC 2203.
49  * During this exchange, the client and server pass a security token that is
50  * forwarded via Mach upcall to the gssd, which invokes the GSS-API to authenticate
51  * the user to the server (and vice-versa). The client and server also receive
52  * a unique session key that can be used to digitally sign the credentials and
53  * verifier or optionally to provide data integrity and/or privacy.
54  *
55  * Once the context is complete, the client and server enter a normal data
56  * exchange phase - beginning with the NFS request that prompted the context
57  * creation. During this phase, the client's RPC header contains an RPCSEC_GSS
58  * credential and verifier, and the server returns a verifier as well.
59  * For simple authentication, the verifier contains a signed checksum of the
60  * RPC header, including the credential.  The server's verifier has a signed
61  * checksum of the current sequence number.
62  *
63  * Each client call contains a sequence number that nominally increases by one
64  * on each request.  The sequence number is intended to prevent replay attacks.
65  * Since the protocol can be used over UDP, there is some allowance for
66  * out-of-sequence requests, so the server checks whether the sequence numbers
67  * are within a sequence "window". If a sequence number is outside the lower
68  * bound of the window, the server silently drops the request. This has some
69  * implications for retransmission. If a request needs to be retransmitted, the
70  * client must bump the sequence number even if the request XID is unchanged.
71  *
72  * When the NFS mount is unmounted, the client sends a "destroy" credential
73  * to delete the server's context for each user of the mount. Since it's
74  * possible for the client to crash or disconnect without sending the destroy
75  * message, the server has a thread that reaps contexts that have been idle
76  * too long.
77  */
78 
79 #include <stdint.h>
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/proc.h>
83 #include <sys/kauth.h>
84 #include <sys/kernel.h>
85 #include <sys/mount_internal.h>
86 #include <sys/vnode.h>
87 #include <sys/ubc.h>
88 #include <sys/malloc.h>
89 #include <sys/kpi_mbuf.h>
90 #include <sys/ucred.h>
91 
92 #include <kern/host.h>
93 #include <kern/task.h>
94 #include <libkern/libkern.h>
95 
96 #include <mach/task.h>
97 #include <mach/host_special_ports.h>
98 #include <mach/host_priv.h>
99 #include <mach/thread_act.h>
100 #include <mach/mig_errors.h>
101 #include <mach/vm_map.h>
102 #include <vm/vm_map.h>
103 #include <vm/vm_kern.h>
104 #include <gssd/gssd_mach.h>
105 
106 #include <nfs/rpcv2.h>
107 #include <nfs/nfsproto.h>
108 #include <nfs/nfs.h>
109 #include <nfs/nfsnode.h>
110 #include <nfs/nfs_gss.h>
111 #include <nfs/nfsmount.h>
112 #include <nfs/xdr_subs.h>
113 #include <nfs/nfsm_subs.h>
114 #include <nfs/nfs_gss.h>
115 #include <mach_assert.h>
116 #include <kern/assert.h>
117 
118 #define NFS_GSS_MACH_MAX_RETRIES 3
119 
120 #define NFS_GSS_DBG(...) NFSCLNT_DBG(NFSCLNT_FAC_GSS, 7, ## __VA_ARGS__)
121 #define NFS_GSS_ISDBG  (NFSCLNT_DEBUG_FACILITY &  NFSCLNT_FAC_GSS)
122 
123 #define NFSRV_GSS_DBG(...) NFSRV_DBG(NFSRV_FAC_GSS, 7, ## __VA_ARGS__)
124 
125 #if CONFIG_NFS_SERVER
126 u_long nfs_gss_svc_ctx_hash;
127 struct nfs_gss_svc_ctx_hashhead *nfs_gss_svc_ctx_hashtbl;
128 static LCK_GRP_DECLARE(nfs_gss_svc_grp, "rpcsec_gss_svc");
129 static LCK_MTX_DECLARE(nfs_gss_svc_ctx_mutex, &nfs_gss_svc_grp);
130 uint32_t nfsrv_gss_context_ttl = GSS_CTX_EXPIRE;
131 #define GSS_SVC_CTX_TTL ((uint64_t)max(2*GSS_CTX_PEND, nfsrv_gss_context_ttl) * NSEC_PER_SEC)
132 #endif /* CONFIG_NFS_SERVER */
133 
134 #if CONFIG_NFS_CLIENT
135 LCK_GRP_DECLARE(nfs_gss_clnt_grp, "rpcsec_gss_clnt");
136 #endif /* CONFIG_NFS_CLIENT */
137 
138 #define KRB5_MAX_MIC_SIZE 128
139 uint8_t krb5_mech_oid[11] = { 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x12, 0x01, 0x02, 0x02 };
140 static uint8_t xdrpad[] = { 0x00, 0x00, 0x00, 0x00};
141 
142 #if CONFIG_NFS_CLIENT
143 static int      nfs_gss_clnt_ctx_find(struct nfsreq *);
144 static int      nfs_gss_clnt_ctx_init(struct nfsreq *, struct nfs_gss_clnt_ctx *);
145 static int      nfs_gss_clnt_ctx_init_retry(struct nfsreq *, struct nfs_gss_clnt_ctx *);
146 static int      nfs_gss_clnt_ctx_callserver(struct nfsreq *, struct nfs_gss_clnt_ctx *);
147 static uint8_t  *nfs_gss_clnt_svcname(struct nfsmount *, gssd_nametype *, size_t *);
148 static int      nfs_gss_clnt_gssd_upcall(struct nfsreq *, struct nfs_gss_clnt_ctx *, uint32_t);
149 void            nfs_gss_clnt_ctx_neg_cache_reap(struct nfsmount *);
150 static void     nfs_gss_clnt_ctx_clean(struct nfs_gss_clnt_ctx *);
151 static int      nfs_gss_clnt_ctx_copy(struct nfs_gss_clnt_ctx *, struct nfs_gss_clnt_ctx **);
152 static void     nfs_gss_clnt_ctx_destroy(struct nfs_gss_clnt_ctx *);
153 static void     nfs_gss_clnt_log_error(struct nfsreq *, struct nfs_gss_clnt_ctx *, uint32_t, uint32_t);
154 #endif /* CONFIG_NFS_CLIENT */
155 
156 #if CONFIG_NFS_SERVER
157 static struct nfs_gss_svc_ctx *nfs_gss_svc_ctx_find(uint32_t);
158 static void     nfs_gss_svc_ctx_insert(struct nfs_gss_svc_ctx *);
159 static void     nfs_gss_svc_ctx_timer(void *, void *);
160 static int      nfs_gss_svc_gssd_upcall(struct nfs_gss_svc_ctx *);
161 static int      nfs_gss_svc_seqnum_valid(struct nfs_gss_svc_ctx *, uint32_t);
162 
163 /* This is only used by server code */
164 static void     nfs_gss_nfsm_chain(struct nfsm_chain *, mbuf_t);
165 #endif /* CONFIG_NFS_SERVER */
166 
167 static void     host_release_special_port(mach_port_t);
168 static mach_port_t host_copy_special_port(mach_port_t);
169 static void     nfs_gss_mach_alloc_buffer(u_char *, size_t, vm_map_copy_t *);
170 static int      nfs_gss_mach_vmcopyout(vm_map_copy_t, uint32_t, u_char *);
171 
172 static int      nfs_gss_mchain_length(mbuf_t);
173 static int      nfs_gss_append_chain(struct nfsm_chain *, mbuf_t);
174 static int      nfs_gss_seqbits_size(uint32_t);
175 
176 #if CONFIG_NFS_SERVER
177 thread_call_t nfs_gss_svc_ctx_timer_call;
178 int nfs_gss_timer_on = 0;
179 uint32_t nfs_gss_ctx_count = 0;
180 const uint32_t nfs_gss_ctx_max = GSS_SVC_MAXCONTEXTS;
181 #endif /* CONFIG_NFS_SERVER */
182 
183 /*
184  * Common RPCSEC_GSS support routines
185  */
186 
187 static errno_t
rpc_gss_prepend_32(mbuf_t * mb,uint32_t value)188 rpc_gss_prepend_32(mbuf_t *mb, uint32_t value)
189 {
190 	int error;
191 	uint32_t *data;
192 
193 #if 0
194 	data = mbuf_data(*mb);
195 	/*
196 	 * If a wap token comes back and is not aligned
197 	 * get a new buffer (which should be aligned) to put the
198 	 * length in.
199 	 */
200 	if ((uintptr_t)data & 0x3) {
201 		mbuf_t nmb;
202 
203 		error = mbuf_get(MBUF_WAITOK, MBUF_TYPE_DATA, &nmb);
204 		if (error) {
205 			return error;
206 		}
207 		mbuf_setnext(nmb, *mb);
208 		*mb = nmb;
209 	}
210 #endif
211 	error = mbuf_prepend(mb, sizeof(uint32_t), MBUF_WAITOK);
212 	if (error) {
213 		return error;
214 	}
215 
216 	data = mbuf_data(*mb);
217 	*data = txdr_unsigned(value);
218 
219 	return 0;
220 }
221 
222 /*
223  * Prepend the sequence number to the xdr encode argumen or result
224  * Sequence number is prepended in its own mbuf.
225  *
226  * On successful return mbp_head will point to the old mbuf chain
227  * prepended  with a new mbuf that has the sequence number.
228  */
229 
230 static errno_t
rpc_gss_data_create(mbuf_t * mbp_head,uint32_t seqnum)231 rpc_gss_data_create(mbuf_t *mbp_head, uint32_t seqnum)
232 {
233 	int error;
234 	mbuf_t mb;
235 	struct nfsm_chain nmc;
236 	struct nfsm_chain *nmcp = &nmc;
237 	uint8_t *data;
238 
239 	error = mbuf_get(MBUF_WAITOK, MBUF_TYPE_DATA, &mb);
240 	if (error) {
241 		return error;
242 	}
243 	data = mbuf_data(mb);
244 #if 0
245 	/* Reserve space for prepending */
246 	len = mbuf_maxlen(mb);
247 	len = (len & ~0x3) - NFSX_UNSIGNED;
248 	printf("%s: data = %p, len = %d\n", __func__, data, (int)len);
249 	error = mbuf_setdata(mb, data + len, 0);
250 	if (error || mbuf_trailingspace(mb)) {
251 		printf("%s: data = %p trailingspace = %d error = %d\n", __func__, mbuf_data(mb), (int)mbuf_trailingspace(mb), error);
252 	}
253 #endif
254 	/* Reserve 16 words for prepending */
255 	error = mbuf_setdata(mb, data + 16 * sizeof(uint32_t), 0);
256 	nfsm_chain_init(nmcp, mb);
257 	nfsm_chain_add_32(error, nmcp, seqnum);
258 	nfsm_chain_build_done(error, nmcp);
259 	if (error) {
260 		return EINVAL;
261 	}
262 	mbuf_setnext(nmcp->nmc_mcur, *mbp_head);
263 	*mbp_head = nmcp->nmc_mhead;
264 
265 	return 0;
266 }
267 
268 /*
269  * Create an rpc_gss_integ_data_t given an argument or result in mb_head.
270  * On successful return mb_head will point to the rpc_gss_integ_data_t of length len.
271  *      Note mb_head will now point to a 4 byte sequence number. len does not include
272  *	any extra xdr padding.
273  * Returns 0 on success, else an errno_t
274  */
275 
276 static errno_t
rpc_gss_integ_data_create(gss_ctx_id_t ctx,mbuf_t * mb_head,uint32_t seqnum,uint32_t * len)277 rpc_gss_integ_data_create(gss_ctx_id_t ctx, mbuf_t *mb_head, uint32_t seqnum, uint32_t *len)
278 {
279 	uint32_t error;
280 	uint32_t major;
281 	uint32_t length;
282 	gss_buffer_desc mic;
283 	struct nfsm_chain nmc = {};
284 
285 	/* Length of the argument or result */
286 	length = nfs_gss_mchain_length(*mb_head);
287 	if (len) {
288 		*len = length;
289 	}
290 	error = rpc_gss_data_create(mb_head, seqnum);
291 	if (error) {
292 		return error;
293 	}
294 
295 	/*
296 	 * length is the length of the rpc_gss_data
297 	 */
298 	length += NFSX_UNSIGNED;  /* Add the sequence number to the length */
299 	major = gss_krb5_get_mic_mbuf(&error, ctx, 0, *mb_head, 0, length, &mic);
300 	if (major != GSS_S_COMPLETE) {
301 		printf("gss_krb5_get_mic_mbuf failed %d\n", error);
302 		return error;
303 	}
304 
305 	error = rpc_gss_prepend_32(mb_head, length);
306 	if (error) {
307 		return error;
308 	}
309 
310 	nfsm_chain_dissect_init(error, &nmc, *mb_head);
311 	/* Append GSS mic token by advancing rpc_gss_data_t length + NFSX_UNSIGNED (size of the length field) */
312 	nfsm_chain_adv(error, &nmc, length + NFSX_UNSIGNED);
313 	nfsm_chain_finish_mbuf(error, &nmc); // Force the mic into its own sub chain.
314 	nfsm_chain_add_32(error, &nmc, mic.length);
315 	nfsm_chain_add_opaque(error, &nmc, mic.value, mic.length);
316 	nfsm_chain_build_done(error, &nmc);
317 	gss_release_buffer(NULL, &mic);
318 
319 //	printmbuf("rpc_gss_integ_data_create done", *mb_head, 0, 0);
320 	assert(nmc.nmc_mhead == *mb_head);
321 
322 	return error;
323 }
324 
325 /*
326  * Create an rpc_gss_priv_data_t out of the supplied raw arguments or results in mb_head.
327  * On successful return mb_head will point to a wrap token of lenght len.
328  *	Note len does not include any xdr padding
329  * Returns 0 on success, else an errno_t
330  */
331 static errno_t
rpc_gss_priv_data_create(gss_ctx_id_t ctx,mbuf_t * mb_head,uint32_t seqnum,uint32_t * len)332 rpc_gss_priv_data_create(gss_ctx_id_t ctx, mbuf_t *mb_head, uint32_t seqnum, uint32_t *len)
333 {
334 	uint32_t error;
335 	uint32_t major;
336 	struct nfsm_chain nmc;
337 	uint32_t pad;
338 	uint32_t length;
339 
340 	error = rpc_gss_data_create(mb_head, seqnum);
341 	if (error) {
342 		return error;
343 	}
344 
345 	length = nfs_gss_mchain_length(*mb_head);
346 	major = gss_krb5_wrap_mbuf(&error, ctx, 1, 0, mb_head, 0, length, NULL);
347 	if (major != GSS_S_COMPLETE) {
348 		return error;
349 	}
350 
351 	length = nfs_gss_mchain_length(*mb_head);
352 	if (len) {
353 		*len = length;
354 	}
355 	pad = nfsm_pad(length);
356 
357 	/* Prepend the opaque length of rep rpc_gss_priv_data */
358 	error = rpc_gss_prepend_32(mb_head, length);
359 
360 	if (error) {
361 		return error;
362 	}
363 	if (pad) {
364 		nfsm_chain_dissect_init(error, &nmc, *mb_head);
365 		/* Advance the opauque size of length and length data */
366 		nfsm_chain_adv(error, &nmc, NFSX_UNSIGNED + length);
367 		nfsm_chain_finish_mbuf(error, &nmc);
368 		nfsm_chain_add_opaque_nopad(error, &nmc, xdrpad, pad);
369 		nfsm_chain_build_done(error, &nmc);
370 	}
371 
372 	return error;
373 }
374 
375 #if CONFIG_NFS_CLIENT
376 
377 /*
378  * Restore the argument or result from an rpc_gss_integ_data mbuf chain
379  * We have a four byte seqence number, len arguments, and an opaque
380  * encoded mic, possibly followed by some pad bytes. The mic and possible
381  * pad bytes are on their own sub mbuf chains.
382  *
383  * On successful return mb_head is the chain of the xdr args or results sans
384  * the sequence number and mic and return 0. Otherwise return an errno.
385  *
386  */
387 static errno_t
rpc_gss_integ_data_restore(gss_ctx_id_t ctx __unused,mbuf_t * mb_head,size_t len)388 rpc_gss_integ_data_restore(gss_ctx_id_t ctx __unused, mbuf_t *mb_head, size_t len)
389 {
390 	mbuf_t mb = *mb_head;
391 	mbuf_t tail = NULL, next;
392 
393 	/* Chop of the opaque length and seq number */
394 	mbuf_adj(mb, 2 * NFSX_UNSIGNED);
395 
396 	/* should only be one, ... but */
397 	for (; mb; mb = next) {
398 		next = mbuf_next(mb);
399 		if (mbuf_len(mb) == 0) {
400 			mbuf_free(mb);
401 		} else {
402 			break;
403 		}
404 	}
405 	*mb_head = mb;
406 
407 	for (; mb && len; mb = mbuf_next(mb)) {
408 		tail = mb;
409 		if (mbuf_len(mb) <= len) {
410 			len -= mbuf_len(mb);
411 		} else {
412 			return EBADRPC;
413 		}
414 	}
415 	/* drop the mic */
416 	if (tail) {
417 		mbuf_setnext(tail, NULL);
418 		mbuf_freem(mb);
419 	}
420 
421 	return 0;
422 }
423 
424 /*
425  * Restore the argument or result rfom an rpc_gss_priv_data mbuf chain
426  * mb_head points to the wrap token of length len.
427  *
428  * On successful return mb_head is our original xdr arg or result an
429  * the return value is 0. Otherise return an errno
430  */
431 static errno_t
rpc_gss_priv_data_restore(gss_ctx_id_t ctx,mbuf_t * mb_head,size_t len)432 rpc_gss_priv_data_restore(gss_ctx_id_t ctx, mbuf_t *mb_head, size_t len)
433 {
434 	uint32_t major, error;
435 	mbuf_t mb = *mb_head, next;
436 	size_t plen, length;
437 	gss_qop_t qop = GSS_C_QOP_REVERSE;
438 
439 	/* Chop of the opaque length */
440 	mbuf_adj(mb, NFSX_UNSIGNED);
441 	/* If we have padding, drop it */
442 	plen = nfsm_pad(len);
443 	if (plen) {
444 		mbuf_t tail = NULL;
445 
446 		for (length = 0; length < len && mb; mb = mbuf_next(mb)) {
447 			tail = mb;
448 			length += mbuf_len(mb);
449 		}
450 		if ((length != len) || (mb == NULL) || (tail == NULL)) {
451 			return EBADRPC;
452 		}
453 
454 		mbuf_freem(mb);
455 		mbuf_setnext(tail, NULL);
456 	}
457 
458 	major = gss_krb5_unwrap_mbuf(&error, ctx, mb_head, 0, len, NULL, &qop);
459 	if (major != GSS_S_COMPLETE) {
460 		printf("gss_krb5_unwrap_mbuf failed. major = %d minor = %d\n", (int)major, error);
461 		return error;
462 	}
463 	mb = *mb_head;
464 
465 	/* Drop the seqence number */
466 	mbuf_adj(mb, NFSX_UNSIGNED);
467 	assert(mbuf_len(mb) == 0);
468 
469 	/* Chop of any empty mbufs */
470 	for (mb = *mb_head; mb; mb = next) {
471 		next = mbuf_next(mb);
472 		if (mbuf_len(mb) == 0) {
473 			mbuf_free(mb);
474 		} else {
475 			break;
476 		}
477 	}
478 	*mb_head = mb;
479 
480 	return 0;
481 }
482 
483 /*
484  * Find the context for a particular user.
485  *
486  * If the context doesn't already exist
487  * then create a new context for this user.
488  *
489  * Note that the code allows superuser (uid == 0)
490  * to adopt the context of another user.
491  *
492  * We'll match on the audit session ids, since those
493  * processes will have acccess to the same credential cache.
494  */
495 
496 #define kauth_cred_getasid(cred) ((cred)->cr_audit.as_aia_p->ai_asid)
497 #define kauth_cred_getauid(cred) ((cred)->cr_audit.as_aia_p->ai_auid)
498 
499 #define SAFE_CAST_INTTYPE( type, intval ) \
500 	( (type)(intval)/(sizeof(type) < sizeof(intval) ? 0 : 1) )
501 
502 uid_t
nfs_cred_getasid2uid(kauth_cred_t cred)503 nfs_cred_getasid2uid(kauth_cred_t cred)
504 {
505 	uid_t result = SAFE_CAST_INTTYPE(uid_t, kauth_cred_getasid(cred));
506 	return result;
507 }
508 
509 /*
510  * Debugging
511  */
512 static void
nfs_gss_clnt_ctx_dump(struct nfsmount * nmp)513 nfs_gss_clnt_ctx_dump(struct nfsmount *nmp)
514 {
515 	struct nfs_gss_clnt_ctx *cp;
516 
517 	lck_mtx_lock(&nmp->nm_lock);
518 	NFS_GSS_DBG("Enter\n");
519 	TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) {
520 		lck_mtx_lock(&cp->gss_clnt_mtx);
521 		printf("context %d/%d: refcnt = %d, flags = %x\n",
522 		    kauth_cred_getasid(cp->gss_clnt_cred),
523 		    kauth_cred_getauid(cp->gss_clnt_cred),
524 		    cp->gss_clnt_refcnt, cp->gss_clnt_flags);
525 		lck_mtx_unlock(&cp->gss_clnt_mtx);
526 	}
527 	NFS_GSS_DBG("Exit\n");
528 	lck_mtx_unlock(&nmp->nm_lock);
529 }
530 
531 static char *
nfs_gss_clnt_ctx_name(struct nfsmount * nmp,struct nfs_gss_clnt_ctx * cp,char * buf,int len)532 nfs_gss_clnt_ctx_name(struct nfsmount *nmp, struct nfs_gss_clnt_ctx *cp, char *buf, int len)
533 {
534 	char *np;
535 	size_t nlen;
536 	const char *server = "";
537 
538 	if (nmp && nmp->nm_mountp) {
539 		server = vfs_statfs(nmp->nm_mountp)->f_mntfromname;
540 	}
541 
542 	if (cp == NULL) {
543 		snprintf(buf, len, "[%s] NULL context", server);
544 		return buf;
545 	}
546 
547 	if (cp->gss_clnt_principal && !cp->gss_clnt_display) {
548 		np = (char *)cp->gss_clnt_principal;
549 		nlen = cp->gss_clnt_prinlen;
550 	} else {
551 		np = cp->gss_clnt_display;
552 		nlen = np ? strlen(cp->gss_clnt_display) : 0;
553 	}
554 	if (nlen) {
555 		snprintf(buf, len, "[%s] %.*s %d/%d %s", server, nlen > INT_MAX ? INT_MAX : (int)nlen, np,
556 		    kauth_cred_getasid(cp->gss_clnt_cred),
557 		    kauth_cred_getuid(cp->gss_clnt_cred),
558 		    cp->gss_clnt_principal ? "" : "[from default cred] ");
559 	} else {
560 		snprintf(buf, len, "[%s] using default %d/%d ", server,
561 		    kauth_cred_getasid(cp->gss_clnt_cred),
562 		    kauth_cred_getuid(cp->gss_clnt_cred));
563 	}
564 	return buf;
565 }
566 
567 #define NFS_CTXBUFSZ 80
568 #define NFS_GSS_CTX(req, cp) nfs_gss_clnt_ctx_name((req)->r_nmp, cp ? cp : (req)->r_gss_ctx, CTXBUF, sizeof(CTXBUF))
569 
570 #define NFS_GSS_CLNT_CTX_DUMP(nmp)              \
571 	do {                  \
572 	        if (NFS_GSS_ISDBG && (NFSCLNT_DEBUG_FLAGS & 0x2))   \
573 	                nfs_gss_clnt_ctx_dump((nmp));   \
574 	} while (0)
575 
576 static int
nfs_gss_clnt_ctx_cred_match(kauth_cred_t cred1,kauth_cred_t cred2)577 nfs_gss_clnt_ctx_cred_match(kauth_cred_t cred1, kauth_cred_t cred2)
578 {
579 	if (kauth_cred_getasid(cred1) == kauth_cred_getasid(cred2)) {
580 		return 1;
581 	}
582 	return 0;
583 }
584 
585 /*
586  * Busy the mount for each principal set on the mount
587  * so that the automounter will not unmount the file
588  * system underneath us. With out this, if an unmount
589  * occurs the principal that is set for an audit session
590  * will be lost and we may end up with a different identity.
591  *
592  * Note setting principals on the mount is a bad idea. This
593  * really should be handle by KIM (Kerberos Identity Management)
594  * so that defaults can be set by service identities.
595  */
596 
597 static int
nfs_gss_clnt_mnt_ref(struct nfsmount * nmp)598 nfs_gss_clnt_mnt_ref(struct nfsmount *nmp)
599 {
600 	int error;
601 	vnode_t rvp;
602 
603 	if (nmp == NULL ||
604 	    !(vfs_flags(nmp->nm_mountp) & MNT_AUTOMOUNTED)) {
605 		return EINVAL;
606 	}
607 
608 	/* NOTE: providing 2 as inode number makes vfs_getbyid() to call VFS_ROOT() instead of VFS_GET */
609 	error = vfs_getbyid(&vfs_statfs(nmp->nm_mountp)->f_fsid, 2, &rvp, NULL);
610 	if (!error) {
611 		error = vnode_ref(rvp);
612 		vnode_put(rvp);
613 	}
614 
615 	return error;
616 }
617 
618 /*
619  * Unbusy the mount. See above comment,
620  */
621 
622 static int
nfs_gss_clnt_mnt_rele(struct nfsmount * nmp)623 nfs_gss_clnt_mnt_rele(struct nfsmount *nmp)
624 {
625 	int error;
626 	vnode_t rvp;
627 
628 	if (nmp == NULL ||
629 	    !(vfs_flags(nmp->nm_mountp) & MNT_AUTOMOUNTED)) {
630 		return EINVAL;
631 	}
632 
633 	/* NOTE: providing 2 as inode number makes vfs_getbyid() to call VFS_ROOT() instead of VFS_GET */
634 	error = vfs_getbyid(&vfs_statfs(nmp->nm_mountp)->f_fsid, 2, &rvp, NULL);
635 	if (!error) {
636 		vnode_rele(rvp);
637 		vnode_put(rvp);
638 	}
639 
640 	return error;
641 }
642 
643 int nfs_root_steals_ctx = 0;
644 
645 static int
nfs_gss_clnt_ctx_find_principal(struct nfsreq * req,uint8_t * principal,size_t plen,uint32_t nt)646 nfs_gss_clnt_ctx_find_principal(struct nfsreq *req, uint8_t *principal, size_t plen, uint32_t nt)
647 {
648 	struct nfsmount *nmp = req->r_nmp;
649 	struct nfs_gss_clnt_ctx *cp, *tcp;
650 	struct nfsreq *treq;
651 	int error = 0;
652 	struct timeval now;
653 	char CTXBUF[NFS_CTXBUFSZ];
654 
655 	treq = zalloc_flags(nfs_req_zone, Z_WAITOK | Z_ZERO);
656 	treq->r_nmp = nmp;
657 
658 	microuptime(&now);
659 	lck_mtx_lock(&nmp->nm_lock);
660 	TAILQ_FOREACH_SAFE(cp, &nmp->nm_gsscl, gss_clnt_entries, tcp) {
661 		lck_mtx_lock(&cp->gss_clnt_mtx);
662 		if (cp->gss_clnt_flags & GSS_CTX_DESTROY) {
663 			NFS_GSS_DBG("Found destroyed context %s refcnt = %d continuing\n",
664 			    NFS_GSS_CTX(req, cp),
665 			    cp->gss_clnt_refcnt);
666 			lck_mtx_unlock(&cp->gss_clnt_mtx);
667 			continue;
668 		}
669 		if (nfs_gss_clnt_ctx_cred_match(cp->gss_clnt_cred, req->r_cred)) {
670 			if (nmp->nm_gsscl.tqh_first != cp) {
671 				TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries);
672 				TAILQ_INSERT_HEAD(&nmp->nm_gsscl, cp, gss_clnt_entries);
673 			}
674 			if (principal) {
675 				/*
676 				 * If we have a principal, but it does not match the current cred
677 				 * mark it for removal
678 				 */
679 				if (cp->gss_clnt_prinlen != plen || cp->gss_clnt_prinnt != nt ||
680 				    bcmp(cp->gss_clnt_principal, principal, plen) != 0) {
681 					cp->gss_clnt_flags |= (GSS_CTX_INVAL | GSS_CTX_DESTROY);
682 					cp->gss_clnt_refcnt++;
683 					lck_mtx_unlock(&cp->gss_clnt_mtx);
684 					NFS_GSS_DBG("Marking %s for deletion because %s does not match\n",
685 					    NFS_GSS_CTX(req, cp), principal);
686 					NFS_GSS_DBG("len = (%zu,%zu), nt = (%d,%d)\n", cp->gss_clnt_prinlen, plen,
687 					    cp->gss_clnt_prinnt, nt);
688 					treq->r_gss_ctx  = cp;
689 					cp = NULL;
690 					break;
691 				}
692 			}
693 			if (cp->gss_clnt_flags & GSS_CTX_INVAL) {
694 				/*
695 				 * If we're still being used and we're not expired
696 				 * just return and don't bother gssd again. Note if
697 				 * gss_clnt_nctime is zero it is about to be set to now.
698 				 */
699 				if (cp->gss_clnt_nctime + GSS_NEG_CACHE_TO >= now.tv_sec || cp->gss_clnt_nctime == 0) {
700 					NFS_GSS_DBG("Context %s (refcnt = %d) not expired returning EAUTH nctime = %ld now = %ld\n",
701 					    NFS_GSS_CTX(req, cp), cp->gss_clnt_refcnt, cp->gss_clnt_nctime, now.tv_sec);
702 					lck_mtx_unlock(&cp->gss_clnt_mtx);
703 					lck_mtx_unlock(&nmp->nm_lock);
704 					NFS_ZFREE(nfs_req_zone, treq);
705 					return NFSERR_EAUTH;
706 				}
707 				if (cp->gss_clnt_refcnt) {
708 					struct nfs_gss_clnt_ctx *ncp;
709 					/*
710 					 * If this context has references, we can't use it so we mark if for
711 					 * destruction and create a new context based on this one in the
712 					 * same manner as renewing one.
713 					 */
714 					cp->gss_clnt_flags |= GSS_CTX_DESTROY;
715 					NFS_GSS_DBG("Context %s has expired but we still have %d references\n",
716 					    NFS_GSS_CTX(req, cp), cp->gss_clnt_refcnt);
717 					error = nfs_gss_clnt_ctx_copy(cp, &ncp);
718 					lck_mtx_unlock(&cp->gss_clnt_mtx);
719 					if (error) {
720 						lck_mtx_unlock(&nmp->nm_lock);
721 						NFS_ZFREE(nfs_req_zone, treq);
722 						return error;
723 					}
724 					cp = ncp;
725 					break;
726 				} else {
727 					if (cp->gss_clnt_nctime) {
728 						nmp->nm_ncentries--;
729 					}
730 					lck_mtx_unlock(&cp->gss_clnt_mtx);
731 					TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries);
732 					break;
733 				}
734 			}
735 			/* Found a valid context to return */
736 			cp->gss_clnt_refcnt++;
737 			req->r_gss_ctx = cp;
738 			lck_mtx_unlock(&cp->gss_clnt_mtx);
739 			lck_mtx_unlock(&nmp->nm_lock);
740 			NFS_ZFREE(nfs_req_zone, treq);
741 			return 0;
742 		}
743 		lck_mtx_unlock(&cp->gss_clnt_mtx);
744 	}
745 
746 	if (!cp && nfs_root_steals_ctx && principal == NULL && kauth_cred_getuid(req->r_cred) == 0) {
747 		/*
748 		 * If superuser is trying to get access, then co-opt
749 		 * the first valid context in the list.
750 		 * XXX Ultimately, we need to allow superuser to
751 		 * go ahead and attempt to set up its own context
752 		 * in case one is set up for it.
753 		 */
754 		TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) {
755 			if (!(cp->gss_clnt_flags & (GSS_CTX_INVAL | GSS_CTX_DESTROY))) {
756 				nfs_gss_clnt_ctx_ref(req, cp);
757 				lck_mtx_unlock(&nmp->nm_lock);
758 				NFS_GSS_DBG("Root stole context %s\n", NFS_GSS_CTX(req, NULL));
759 				NFS_ZFREE(nfs_req_zone, treq);
760 				return 0;
761 			}
762 		}
763 	}
764 
765 	NFS_GSS_DBG("Context %s%sfound in Neg Cache @  %ld\n",
766 	    NFS_GSS_CTX(req, cp),
767 	    cp == NULL ? " not " : "",
768 	    cp == NULL ? 0L : cp->gss_clnt_nctime);
769 
770 	/*
771 	 * Not found - create a new context
772 	 */
773 
774 	if (cp == NULL) {
775 		cp = kalloc_type(struct nfs_gss_clnt_ctx,
776 		    Z_WAITOK | Z_ZERO | Z_NOFAIL);
777 		cp->gss_clnt_cred = req->r_cred;
778 		kauth_cred_ref(cp->gss_clnt_cred);
779 		lck_mtx_init(&cp->gss_clnt_mtx, &nfs_gss_clnt_grp, LCK_ATTR_NULL);
780 		cp->gss_clnt_ptime = now.tv_sec - GSS_PRINT_DELAY;
781 		if (principal) {
782 			cp->gss_clnt_principal = kalloc_data(plen + 1, Z_WAITOK | Z_ZERO);
783 			memcpy(cp->gss_clnt_principal, principal, plen);
784 			cp->gss_clnt_prinlen = plen;
785 			cp->gss_clnt_prinnt = nt;
786 			cp->gss_clnt_flags |= GSS_CTX_STICKY;
787 			if (!nfs_gss_clnt_mnt_ref(nmp)) {
788 				cp->gss_clnt_flags |= GSS_CTX_USECOUNT;
789 			}
790 		}
791 	} else {
792 		uint32_t oldflags = cp->gss_clnt_flags;
793 		nfs_gss_clnt_ctx_clean(cp);
794 		if (principal) {
795 			/*
796 			 * If we have a principal and we found a matching audit
797 			 * session, then to get here, the principal had to match.
798 			 * In walking the context list if it has a principal
799 			 * or the principal is not set then we mark the context
800 			 * for destruction and set cp to NULL and we fall to the
801 			 * if clause above. If the context still has references
802 			 * again we copy the context which will preserve the principal
803 			 * and we end up here with the correct principal set.
804 			 * If we don't have references the the principal must have
805 			 * match and we will fall through here.
806 			 */
807 			cp->gss_clnt_flags |= GSS_CTX_STICKY;
808 
809 			/*
810 			 * We are preserving old flags if it set, and we take a ref if not set.
811 			 * Also, because of the short circuit we will not take extra refs here.
812 			 */
813 			if ((oldflags & GSS_CTX_USECOUNT) || !nfs_gss_clnt_mnt_ref(nmp)) {
814 				cp->gss_clnt_flags |= GSS_CTX_USECOUNT;
815 			}
816 		}
817 	}
818 
819 	cp->gss_clnt_thread = current_thread();
820 	nfs_gss_clnt_ctx_ref(req, cp);
821 	TAILQ_INSERT_HEAD(&nmp->nm_gsscl, cp, gss_clnt_entries);
822 	lck_mtx_unlock(&nmp->nm_lock);
823 
824 	error = nfs_gss_clnt_ctx_init_retry(req, cp); // Initialize new context
825 	if (error) {
826 		NFS_GSS_DBG("nfs_gss_clnt_ctx_init_retry returned %d for %s\n", error, NFS_GSS_CTX(req, cp));
827 		nfs_gss_clnt_ctx_unref(req);
828 	}
829 
830 	/* Remove any old matching contex that had a different principal */
831 	nfs_gss_clnt_ctx_unref(treq);
832 	NFS_ZFREE(nfs_req_zone, treq);
833 	return error;
834 }
835 
836 static int
nfs_gss_clnt_ctx_find(struct nfsreq * req)837 nfs_gss_clnt_ctx_find(struct nfsreq *req)
838 {
839 	return nfs_gss_clnt_ctx_find_principal(req, NULL, 0, 0);
840 }
841 
842 /*
843  * Inserts an RPCSEC_GSS credential into an RPC header.
844  * After the credential is inserted, the code continues
845  * to build the verifier which contains a signed checksum
846  * of the RPC header.
847  */
848 
849 int
nfs_gss_clnt_cred_put(struct nfsreq * req,struct nfsm_chain * nmc,mbuf_t args)850 nfs_gss_clnt_cred_put(struct nfsreq *req, struct nfsm_chain *nmc, mbuf_t args)
851 {
852 	struct nfs_gss_clnt_ctx *cp;
853 	uint32_t seqnum = 0;
854 	uint32_t major;
855 	uint32_t error = 0;
856 	int slpflag, recordmark = 0, offset;
857 	struct gss_seq *gsp;
858 	gss_buffer_desc mic;
859 
860 	slpflag = (PZERO - 1);
861 	if (req->r_nmp) {
862 		slpflag |= (NMFLAG(req->r_nmp, INTR) && req->r_thread && !(req->r_flags & R_NOINTR)) ? PCATCH : 0;
863 		recordmark = (req->r_nmp->nm_sotype == SOCK_STREAM);
864 	}
865 
866 retry:
867 	if (req->r_gss_ctx == NULL) {
868 		/*
869 		 * Find the context for this user.
870 		 * If no context is found, one will
871 		 * be created.
872 		 */
873 		error = nfs_gss_clnt_ctx_find(req);
874 		if (error) {
875 			return error;
876 		}
877 	}
878 	cp = req->r_gss_ctx;
879 
880 	/*
881 	 * If the context thread isn't null, then the context isn't
882 	 * yet complete and is for the exclusive use of the thread
883 	 * doing the context setup. Wait until the context thread
884 	 * is null.
885 	 */
886 	lck_mtx_lock(&cp->gss_clnt_mtx);
887 	if (cp->gss_clnt_thread && cp->gss_clnt_thread != current_thread()) {
888 		cp->gss_clnt_flags |= GSS_NEEDCTX;
889 		msleep(cp, &cp->gss_clnt_mtx, slpflag | PDROP, "ctxwait", NULL);
890 		slpflag &= ~PCATCH;
891 		if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) {
892 			return error;
893 		}
894 		nfs_gss_clnt_ctx_unref(req);
895 		goto retry;
896 	}
897 	lck_mtx_unlock(&cp->gss_clnt_mtx);
898 
899 	if (cp->gss_clnt_flags & GSS_CTX_COMPLETE) {
900 		/*
901 		 * Get a sequence number for this request.
902 		 * Check whether the oldest request in the window is complete.
903 		 * If it's still pending, then wait until it's done before
904 		 * we allocate a new sequence number and allow this request
905 		 * to proceed.
906 		 */
907 		lck_mtx_lock(&cp->gss_clnt_mtx);
908 		while (win_getbit(cp->gss_clnt_seqbits,
909 		    ((cp->gss_clnt_seqnum - cp->gss_clnt_seqwin) + 1) % cp->gss_clnt_seqwin)) {
910 			cp->gss_clnt_flags |= GSS_NEEDSEQ;
911 			msleep(cp, &cp->gss_clnt_mtx, slpflag | PDROP, "seqwin", NULL);
912 			slpflag &= ~PCATCH;
913 			if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) {
914 				return error;
915 			}
916 			lck_mtx_lock(&cp->gss_clnt_mtx);
917 			if (cp->gss_clnt_flags & GSS_CTX_INVAL) {
918 				/* Renewed while while we were waiting */
919 				lck_mtx_unlock(&cp->gss_clnt_mtx);
920 				nfs_gss_clnt_ctx_unref(req);
921 				goto retry;
922 			}
923 		}
924 		seqnum = ++cp->gss_clnt_seqnum;
925 		win_setbit(cp->gss_clnt_seqbits, seqnum % cp->gss_clnt_seqwin);
926 		lck_mtx_unlock(&cp->gss_clnt_mtx);
927 
928 		gsp = kalloc_type(struct gss_seq, Z_WAITOK | Z_ZERO | Z_NOFAIL);
929 		gsp->gss_seqnum = seqnum;
930 		SLIST_INSERT_HEAD(&req->r_gss_seqlist, gsp, gss_seqnext);
931 	}
932 
933 	/* Insert the credential */
934 	nfsm_chain_add_32(error, nmc, RPCSEC_GSS);
935 	nfsm_chain_add_32(error, nmc, 5 * NFSX_UNSIGNED + cp->gss_clnt_handle_len);
936 	nfsm_chain_add_32(error, nmc, RPCSEC_GSS_VERS_1);
937 	nfsm_chain_add_32(error, nmc, cp->gss_clnt_proc);
938 	nfsm_chain_add_32(error, nmc, seqnum);
939 	nfsm_chain_add_32(error, nmc, cp->gss_clnt_service);
940 	nfsm_chain_add_32(error, nmc, cp->gss_clnt_handle_len);
941 	if (cp->gss_clnt_handle_len > 0) {
942 		if (cp->gss_clnt_handle == NULL) {
943 			return EBADRPC;
944 		}
945 		nfsm_chain_add_opaque(error, nmc, cp->gss_clnt_handle, cp->gss_clnt_handle_len);
946 	}
947 	if (error) {
948 		return error;
949 	}
950 	/*
951 	 * Now add the verifier
952 	 */
953 	if (cp->gss_clnt_proc == RPCSEC_GSS_INIT ||
954 	    cp->gss_clnt_proc == RPCSEC_GSS_CONTINUE_INIT) {
955 		/*
956 		 * If the context is still being created
957 		 * then use a null verifier.
958 		 */
959 		nfsm_chain_add_32(error, nmc, RPCAUTH_NULL);    // flavor
960 		nfsm_chain_add_32(error, nmc, 0);               // length
961 		nfsm_chain_build_done(error, nmc);
962 		if (!error) {
963 			nfs_gss_append_chain(nmc, args);
964 		}
965 		return error;
966 	}
967 
968 	offset = recordmark ? NFSX_UNSIGNED : 0; // record mark
969 	nfsm_chain_build_done(error, nmc);
970 
971 	major = gss_krb5_get_mic_mbuf((uint32_t *)&error, cp->gss_clnt_ctx_id, 0, nmc->nmc_mhead, offset, 0, &mic);
972 	if (major != GSS_S_COMPLETE) {
973 		printf("gss_krb5_get_mic_buf failed %d\n", error);
974 		return error;
975 	}
976 
977 	nfsm_chain_add_32(error, nmc, RPCSEC_GSS);      // flavor
978 	nfsm_chain_add_32(error, nmc, mic.length);              // length
979 	nfsm_chain_add_opaque(error, nmc, mic.value, mic.length);
980 	(void)gss_release_buffer(NULL, &mic);
981 	nfsm_chain_build_done(error, nmc);
982 	if (error) {
983 		return error;
984 	}
985 
986 	/*
987 	 * Now we may have to compute integrity or encrypt the call args
988 	 * per RFC 2203 Section 5.3.2
989 	 */
990 	switch (cp->gss_clnt_service) {
991 	case RPCSEC_GSS_SVC_NONE:
992 		if (args) {
993 			nfs_gss_append_chain(nmc, args);
994 		}
995 		break;
996 	case RPCSEC_GSS_SVC_INTEGRITY:
997 		/*
998 		 * r_gss_arglen is the length of args mbuf going into the routine.
999 		 * Its used to find the mic if we need to restore the args.
1000 		 */
1001 		/* Note the mbufs that were used in r_mrest are being encapsulated in the rpc_gss_integ_data_t */
1002 		assert(req->r_mrest == args);
1003 		nfsm_chain_finish_mbuf(error, nmc);
1004 		if (error) {
1005 			return error;
1006 		}
1007 		error = rpc_gss_integ_data_create(cp->gss_clnt_ctx_id, &args, seqnum, &req->r_gss_arglen);
1008 		if (error) {
1009 			break;
1010 		}
1011 		req->r_mrest = args;
1012 		req->r_gss_argoff = nfsm_chain_offset(nmc);
1013 		nfs_gss_append_chain(nmc, args);
1014 		break;
1015 	case RPCSEC_GSS_SVC_PRIVACY:
1016 		/*
1017 		 * r_gss_arglen is the length of the wrap token sans any padding length.
1018 		 * Its used to find any XDR padding of the wrap token.
1019 		 */
1020 		/* Note the mbufs that were used in r_mrest are being encapsulated in the rpc_gss_priv_data_t */
1021 		assert(req->r_mrest == args);
1022 		nfsm_chain_finish_mbuf(error, nmc);
1023 		if (error) {
1024 			return error;
1025 		}
1026 		error = rpc_gss_priv_data_create(cp->gss_clnt_ctx_id, &args, seqnum, &req->r_gss_arglen);
1027 		if (error) {
1028 			break;
1029 		}
1030 		req->r_mrest = args;
1031 		req->r_gss_argoff = nfsm_chain_offset(nmc);
1032 		nfs_gss_append_chain(nmc, args);
1033 		break;
1034 	default:
1035 		return EINVAL;
1036 	}
1037 
1038 	return error;
1039 }
1040 
1041 /*
1042  * When receiving a reply, the client checks the verifier
1043  * returned by the server. Check that the verifier is the
1044  * correct type, then extract the sequence number checksum
1045  * from the token in the credential and compare it with a
1046  * computed checksum of the sequence number in the request
1047  * that was sent.
1048  */
1049 int
nfs_gss_clnt_verf_get(struct nfsreq * req,struct nfsm_chain * nmc,uint32_t verftype,uint32_t verflen,uint32_t * accepted_statusp)1050 nfs_gss_clnt_verf_get(
1051 	struct nfsreq *req,
1052 	struct nfsm_chain *nmc,
1053 	uint32_t verftype,
1054 	uint32_t verflen,
1055 	uint32_t *accepted_statusp)
1056 {
1057 	gss_buffer_desc cksum;
1058 	uint32_t seqnum = 0;
1059 	uint32_t major;
1060 	struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx;
1061 	struct nfsm_chain nmc_tmp;
1062 	struct gss_seq *gsp;
1063 	uint32_t reslen;
1064 	int error = 0;
1065 	mbuf_t results_mbuf, prev_mbuf, pad_mbuf;
1066 	size_t ressize, offset;
1067 
1068 	reslen = 0;
1069 	*accepted_statusp = 0;
1070 
1071 	if (cp == NULL) {
1072 		return NFSERR_EAUTH;
1073 	}
1074 	/*
1075 	 * If it's not an RPCSEC_GSS verifier, then it has to
1076 	 * be a null verifier that resulted from either
1077 	 * a CONTINUE_NEEDED reply during context setup or
1078 	 * from the reply to an AUTH_UNIX call from a dummy
1079 	 * context that resulted from a fallback to sec=sys.
1080 	 */
1081 	if (verftype != RPCSEC_GSS) {
1082 		if (verftype != RPCAUTH_NULL) {
1083 			return NFSERR_EAUTH;
1084 		}
1085 		if (cp->gss_clnt_flags & GSS_CTX_COMPLETE) {
1086 			return NFSERR_EAUTH;
1087 		}
1088 		if (verflen > 0) {
1089 			nfsm_chain_adv(error, nmc, nfsm_rndup(verflen));
1090 		}
1091 		nfsm_chain_get_32(error, nmc, *accepted_statusp);
1092 		return error;
1093 	}
1094 
1095 	/*
1096 	 * If we received an RPCSEC_GSS verifier but the
1097 	 * context isn't yet complete, then it must be
1098 	 * the context complete message from the server.
1099 	 * The verifier will contain an encrypted checksum
1100 	 * of the window but we don't have the session key
1101 	 * yet so we can't decrypt it. Stash the verifier
1102 	 * and check it later in nfs_gss_clnt_ctx_init() when
1103 	 * the context is complete.
1104 	 */
1105 	if (!(cp->gss_clnt_flags & GSS_CTX_COMPLETE)) {
1106 		if (verflen > KRB5_MAX_MIC_SIZE) {
1107 			return EBADRPC;
1108 		}
1109 		cp->gss_clnt_verf = (u_char *)kalloc_data(verflen, Z_WAITOK | Z_ZERO);
1110 		if (cp->gss_clnt_verf == NULL) {
1111 			return ENOMEM;
1112 		}
1113 		cp->gss_clnt_verflen = verflen;
1114 		nfsm_chain_get_opaque(error, nmc, verflen, cp->gss_clnt_verf);
1115 		nfsm_chain_get_32(error, nmc, *accepted_statusp);
1116 		return error;
1117 	}
1118 
1119 	if (verflen > KRB5_MAX_MIC_SIZE) {
1120 		return EBADRPC;
1121 	}
1122 	cksum.length = verflen;
1123 	cksum.value = kalloc_data(verflen, Z_WAITOK | Z_NOFAIL);
1124 
1125 	/*
1126 	 * Get the gss mic
1127 	 */
1128 	nfsm_chain_get_opaque(error, nmc, verflen, cksum.value);
1129 	if (error) {
1130 		kfree_data(cksum.value, verflen);
1131 		goto nfsmout;
1132 	}
1133 
1134 	/*
1135 	 * Search the request sequence numbers for this reply, starting
1136 	 * with the most recent, looking for a checksum that matches
1137 	 * the one in the verifier returned by the server.
1138 	 */
1139 	SLIST_FOREACH(gsp, &req->r_gss_seqlist, gss_seqnext) {
1140 		gss_buffer_desc seqnum_buf;
1141 		uint32_t network_seqnum = htonl(gsp->gss_seqnum);
1142 
1143 		seqnum_buf.length = sizeof(network_seqnum);
1144 		seqnum_buf.value = &network_seqnum;
1145 		major = gss_krb5_verify_mic(NULL, cp->gss_clnt_ctx_id, &seqnum_buf, &cksum, NULL);
1146 		if (major == GSS_S_COMPLETE) {
1147 			break;
1148 		}
1149 	}
1150 	kfree_data(cksum.value, verflen);
1151 	if (gsp == NULL) {
1152 		return NFSERR_EAUTH;
1153 	}
1154 
1155 	/*
1156 	 * Get the RPC accepted status
1157 	 */
1158 	nfsm_chain_get_32(error, nmc, *accepted_statusp);
1159 	if (*accepted_statusp != RPC_SUCCESS) {
1160 		return 0;
1161 	}
1162 
1163 	/*
1164 	 * Now we may have to check integrity or decrypt the results
1165 	 * per RFC 2203 Section 5.3.2
1166 	 */
1167 	switch (cp->gss_clnt_service) {
1168 	case RPCSEC_GSS_SVC_NONE:
1169 		/* nothing to do */
1170 		break;
1171 	case RPCSEC_GSS_SVC_INTEGRITY:
1172 		/*
1173 		 * Here's what we expect in the integrity results from RFC 2203:
1174 		 *
1175 		 * - length of seq num + results (4 bytes)
1176 		 * - sequence number (4 bytes)
1177 		 * - results (variable bytes)
1178 		 * - length of checksum token
1179 		 * - checksum of seqnum + results
1180 		 */
1181 
1182 		nfsm_chain_get_32(error, nmc, reslen);          // length of results
1183 		if (reslen > NFS_MAXPACKET) {
1184 			error = EBADRPC;
1185 			goto nfsmout;
1186 		}
1187 
1188 		/* Advance and fetch the mic */
1189 		nmc_tmp = *nmc;
1190 		nfsm_chain_adv(error, &nmc_tmp, reslen);        // skip over the results
1191 		nfsm_chain_get_32(error, &nmc_tmp, cksum.length);
1192 		if (cksum.length > KRB5_MAX_MIC_SIZE) {
1193 			error = EBADRPC;
1194 			goto nfsmout;
1195 		}
1196 		cksum.value = kalloc_data(cksum.length, Z_WAITOK | Z_NOFAIL);
1197 		nfsm_chain_get_opaque(error, &nmc_tmp, cksum.length, cksum.value);
1198 		//XXX chop offf the cksum?
1199 
1200 		/* Call verify mic */
1201 		offset = nfsm_chain_offset(nmc);
1202 		major = gss_krb5_verify_mic_mbuf((uint32_t *)&error, cp->gss_clnt_ctx_id, nmc->nmc_mhead, offset, reslen, &cksum, NULL);
1203 		kfree_data(cksum.value, cksum.length);
1204 		if (major != GSS_S_COMPLETE) {
1205 			printf("client results: gss_krb5_verify_mic_mbuf failed %d\n", error);
1206 			error = EBADRPC;
1207 			goto nfsmout;
1208 		}
1209 
1210 		/*
1211 		 * Get the sequence number prepended to the results
1212 		 * and compare it against the header.
1213 		 */
1214 		nfsm_chain_get_32(error, nmc, seqnum);
1215 		if (gsp->gss_seqnum != seqnum) {
1216 			error = EBADRPC;
1217 			goto nfsmout;
1218 		}
1219 #if 0
1220 		SLIST_FOREACH(gsp, &req->r_gss_seqlist, gss_seqnext) {
1221 			if (seqnum == gsp->gss_seqnum) {
1222 				break;
1223 			}
1224 		}
1225 		if (gsp == NULL) {
1226 			error = EBADRPC;
1227 			goto nfsmout;
1228 		}
1229 #endif
1230 		break;
1231 	case RPCSEC_GSS_SVC_PRIVACY:
1232 		/*
1233 		 * Here's what we expect in the privacy results:
1234 		 *
1235 		 * opaque encodeing of the wrap token
1236 		 * - length of wrap token
1237 		 * - wrap token
1238 		 */
1239 		prev_mbuf = nmc->nmc_mcur;
1240 		nfsm_chain_get_32(error, nmc, reslen);          // length of results
1241 		if (reslen == 0 || reslen > NFS_MAXPACKET) {
1242 			error = EBADRPC;
1243 			goto nfsmout;
1244 		}
1245 
1246 		/* Get the wrap token (current mbuf in the chain starting at the current offset) */
1247 		offset = nmc->nmc_ptr - (caddr_t)mbuf_data(nmc->nmc_mcur);
1248 
1249 		/* split out the wrap token */
1250 		ressize = reslen;
1251 		error = gss_normalize_mbuf(nmc->nmc_mcur, offset, &ressize, &results_mbuf, &pad_mbuf, 0);
1252 		if (error) {
1253 			goto nfsmout;
1254 		}
1255 
1256 		if (pad_mbuf) {
1257 			assert(nfsm_pad(reslen) == mbuf_len(pad_mbuf));
1258 			mbuf_free(pad_mbuf);
1259 		}
1260 
1261 		major = gss_krb5_unwrap_mbuf((uint32_t *)&error, cp->gss_clnt_ctx_id, &results_mbuf, 0, ressize, NULL, NULL);
1262 		if (major) {
1263 			printf("%s unwraped failed %d\n", __func__, error);
1264 			goto nfsmout;
1265 		}
1266 
1267 		/* Now replace the wrapped arguments with the unwrapped ones */
1268 		mbuf_setnext(prev_mbuf, results_mbuf);
1269 		nmc->nmc_mcur = results_mbuf;
1270 		nmc->nmc_ptr = mbuf_data(results_mbuf);
1271 		nmc->nmc_left = mbuf_len(results_mbuf);
1272 
1273 		/*
1274 		 * Get the sequence number prepended to the results
1275 		 * and compare it against the header
1276 		 */
1277 		nfsm_chain_get_32(error, nmc, seqnum);
1278 		if (gsp->gss_seqnum != seqnum) {
1279 			printf("%s bad seqnum\n", __func__);
1280 			error = EBADRPC;
1281 			goto nfsmout;
1282 		}
1283 #if 0
1284 		SLIST_FOREACH(gsp, &req->r_gss_seqlist, gss_seqnext) {
1285 			if (seqnum == gsp->gss_seqnum) {
1286 				break;
1287 			}
1288 		}
1289 		if (gsp == NULL) {
1290 			error = EBADRPC;
1291 			goto nfsmout;
1292 		}
1293 #endif
1294 		break;
1295 	}
1296 nfsmout:
1297 	return error;
1298 }
1299 
1300 /*
1301  * An RPCSEC_GSS request with no integrity or privacy consists
1302  * of just the header mbufs followed by the arg mbufs.
1303  *
1304  * However, integrity or privacy the original mbufs have mbufs
1305  * prepended and appended to, which means we have to do some work to
1306  * restore the arg mbuf chain to its previous state in case we need to
1307  * retransmit.
1308  *
1309  * The location and length of the args is marked by two fields
1310  * in the request structure: r_gss_argoff and r_gss_arglen,
1311  * which are stashed when the NFS request is built.
1312  */
1313 int
nfs_gss_clnt_args_restore(struct nfsreq * req)1314 nfs_gss_clnt_args_restore(struct nfsreq *req)
1315 {
1316 	struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx;
1317 	struct nfsm_chain mchain, *nmc = &mchain;
1318 	int error = 0, merr;
1319 
1320 	if (cp == NULL) {
1321 		return NFSERR_EAUTH;
1322 	}
1323 
1324 	if ((cp->gss_clnt_flags & GSS_CTX_COMPLETE) == 0) {
1325 		return ENEEDAUTH;
1326 	}
1327 
1328 	/* Nothing to restore for SVC_NONE */
1329 	if (cp->gss_clnt_service == RPCSEC_GSS_SVC_NONE) {
1330 		return 0;
1331 	}
1332 
1333 	nfsm_chain_dissect_init(error, nmc, req->r_mhead);      // start at RPC header
1334 	nfsm_chain_adv(error, nmc, req->r_gss_argoff);          // advance to args
1335 	if (error) {
1336 		return error;
1337 	}
1338 
1339 	if (cp->gss_clnt_service == RPCSEC_GSS_SVC_INTEGRITY) {
1340 		error = rpc_gss_integ_data_restore(cp->gss_clnt_ctx_id, &req->r_mrest, req->r_gss_arglen);
1341 	} else {
1342 		error = rpc_gss_priv_data_restore(cp->gss_clnt_ctx_id, &req->r_mrest, req->r_gss_arglen);
1343 	}
1344 
1345 	merr = mbuf_setnext(nmc->nmc_mcur, req->r_mrest);  /* Should always succeed */
1346 	assert(merr == 0);
1347 
1348 	return error ? error : merr;
1349 }
1350 
1351 /*
1352  * This function sets up  a new context on the client.
1353  * Context setup alternates upcalls to the gssd with NFS nullproc calls
1354  * to the server.  Each of these calls exchanges an opaque token, obtained
1355  * via the gssd's calls into the GSS-API on either the client or the server.
1356  * This cycle of calls ends when the client's upcall to the gssd and the
1357  * server's response both return GSS_S_COMPLETE.  At this point, the client
1358  * should have its session key and a handle that it can use to refer to its
1359  * new context on the server.
1360  */
1361 static int
nfs_gss_clnt_ctx_init(struct nfsreq * req,struct nfs_gss_clnt_ctx * cp)1362 nfs_gss_clnt_ctx_init(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp)
1363 {
1364 	struct nfsmount *nmp = req->r_nmp;
1365 	gss_buffer_desc cksum, window;
1366 	uint32_t network_seqnum;
1367 	int client_complete = 0;
1368 	int server_complete = 0;
1369 	int error = 0;
1370 	int retrycnt = 0;
1371 	uint32_t major;
1372 
1373 	/* Initialize a new client context */
1374 
1375 	if (cp->gss_clnt_svcname == NULL) {
1376 		cp->gss_clnt_svcname = nfs_gss_clnt_svcname(nmp, &cp->gss_clnt_svcnt, &cp->gss_clnt_svcnamlen);
1377 		if (cp->gss_clnt_svcname == NULL) {
1378 			error = NFSERR_EAUTH;
1379 			goto nfsmout;
1380 		}
1381 	}
1382 
1383 	cp->gss_clnt_proc = RPCSEC_GSS_INIT;
1384 
1385 	cp->gss_clnt_service =
1386 	    req->r_auth == RPCAUTH_KRB5  ? RPCSEC_GSS_SVC_NONE :
1387 	    req->r_auth == RPCAUTH_KRB5I ? RPCSEC_GSS_SVC_INTEGRITY :
1388 	    req->r_auth == RPCAUTH_KRB5P ? RPCSEC_GSS_SVC_PRIVACY : 0;
1389 
1390 	/*
1391 	 * Now loop around alternating gss_init_sec_context and
1392 	 * gss_accept_sec_context upcalls to the gssd on the client
1393 	 * and server side until the context is complete - or fails.
1394 	 */
1395 	for (;;) {
1396 retry:
1397 		/* Upcall to the gss_init_sec_context in the gssd */
1398 		error = nfs_gss_clnt_gssd_upcall(req, cp, retrycnt);
1399 		if (error) {
1400 			goto nfsmout;
1401 		}
1402 
1403 		if (cp->gss_clnt_major == GSS_S_COMPLETE) {
1404 			client_complete = 1;
1405 			NFS_GSS_DBG("Client complete\n");
1406 			if (server_complete) {
1407 				break;
1408 			}
1409 		} else if (cp->gss_clnt_major != GSS_S_CONTINUE_NEEDED) {
1410 			/*
1411 			 * We may have gotten here because the accept sec context
1412 			 * from the server failed and sent back a GSS token that
1413 			 * encapsulates a kerberos error token per RFC 1964/4121
1414 			 * with a status of GSS_S_CONTINUE_NEEDED. That caused us
1415 			 * to loop to the above up call and received the now
1416 			 * decoded errors.
1417 			 */
1418 			retrycnt++;
1419 			cp->gss_clnt_gssd_flags |= GSSD_RESTART;
1420 			NFS_GSS_DBG("Retrying major = %x minor = %d\n", cp->gss_clnt_major, (int)cp->gss_clnt_minor);
1421 			goto retry;
1422 		}
1423 
1424 		/*
1425 		 * Pass the token to the server.
1426 		 */
1427 		error = nfs_gss_clnt_ctx_callserver(req, cp);
1428 		if (error) {
1429 			if (error == ENEEDAUTH &&
1430 			    (cp->gss_clnt_proc == RPCSEC_GSS_INIT ||
1431 			    cp->gss_clnt_proc == RPCSEC_GSS_CONTINUE_INIT)) {
1432 				/*
1433 				 * We got here because the server had a problem
1434 				 * trying to establish a context and sent that there
1435 				 * was a context problem at the rpc sec layer. Perhaps
1436 				 * gss_accept_sec_context succeeded  in user space,
1437 				 * but the kernel could not handle the etype
1438 				 * to generate the mic for the verifier of the rpc_sec
1439 				 * window size.
1440 				 */
1441 				retrycnt++;
1442 				cp->gss_clnt_gssd_flags |= GSSD_RESTART;
1443 				NFS_GSS_DBG("Retrying major = %x minor = %d\n", cp->gss_clnt_major, (int)cp->gss_clnt_minor);
1444 				goto retry;
1445 			}
1446 			goto nfsmout;
1447 		}
1448 		if (cp->gss_clnt_major == GSS_S_COMPLETE) {
1449 			NFS_GSS_DBG("Server complete\n");
1450 			server_complete = 1;
1451 			if (client_complete) {
1452 				break;
1453 			}
1454 		} else if (cp->gss_clnt_major == GSS_S_CONTINUE_NEEDED) {
1455 			cp->gss_clnt_proc = RPCSEC_GSS_CONTINUE_INIT;
1456 		} else {
1457 			/* Server didn't like us. Try something else */
1458 			retrycnt++;
1459 			cp->gss_clnt_gssd_flags |= GSSD_RESTART;
1460 			NFS_GSS_DBG("Retrying major = %x minor = %d\n", cp->gss_clnt_major, (int)cp->gss_clnt_minor);
1461 		}
1462 	}
1463 
1464 	/*
1465 	 * The context is apparently established successfully
1466 	 */
1467 	lck_mtx_lock(&cp->gss_clnt_mtx);
1468 	cp->gss_clnt_flags |= GSS_CTX_COMPLETE;
1469 	lck_mtx_unlock(&cp->gss_clnt_mtx);
1470 	cp->gss_clnt_proc = RPCSEC_GSS_DATA;
1471 
1472 	network_seqnum = htonl(cp->gss_clnt_seqwin);
1473 	window.length = sizeof(cp->gss_clnt_seqwin);
1474 	window.value = &network_seqnum;
1475 	cksum.value = cp->gss_clnt_verf;
1476 	cksum.length = cp->gss_clnt_verflen;
1477 	major = gss_krb5_verify_mic((uint32_t *)&error, cp->gss_clnt_ctx_id, &window, &cksum, NULL);
1478 	kfree_data(cp->gss_clnt_verf, cp->gss_clnt_verflen);
1479 	cp->gss_clnt_verflen = 0;
1480 	if (major != GSS_S_COMPLETE) {
1481 		printf("%s: could not verify window\n", __func__);
1482 		error = NFSERR_EAUTH;
1483 		goto nfsmout;
1484 	}
1485 
1486 	/*
1487 	 * Set an initial sequence number somewhat randomized.
1488 	 * Start small so we don't overflow GSS_MAXSEQ too quickly.
1489 	 * Add the size of the sequence window so seqbits arithmetic
1490 	 * doesn't go negative.
1491 	 */
1492 	cp->gss_clnt_seqnum = (random() & 0xffff) + cp->gss_clnt_seqwin;
1493 
1494 	/*
1495 	 * Allocate a bitmap to keep track of which requests
1496 	 * are pending within the sequence number window.
1497 	 */
1498 	cp->gss_clnt_seqbits = kalloc_data(nfs_gss_seqbits_size(cp->gss_clnt_seqwin), Z_WAITOK | Z_ZERO);
1499 	if (cp->gss_clnt_seqbits == NULL) {
1500 		error = NFSERR_EAUTH;
1501 	}
1502 
1503 nfsmout:
1504 	/*
1505 	 * If the error is ENEEDAUTH we're not done, so no need
1506 	 * to wake up other threads again. This thread will retry in
1507 	 * the find or renew routines.
1508 	 */
1509 	if (error == ENEEDAUTH) {
1510 		NFS_GSS_DBG("Returning ENEEDAUTH\n");
1511 		return error;
1512 	}
1513 
1514 	/*
1515 	 * If there's an error, just mark it as invalid.
1516 	 * It will be removed when the reference count
1517 	 * drops to zero.
1518 	 */
1519 	lck_mtx_lock(&cp->gss_clnt_mtx);
1520 	if (error) {
1521 		cp->gss_clnt_flags |= GSS_CTX_INVAL;
1522 	}
1523 
1524 	/*
1525 	 * Wake any threads waiting to use the context
1526 	 */
1527 	cp->gss_clnt_thread = NULL;
1528 	if (cp->gss_clnt_flags & GSS_NEEDCTX) {
1529 		cp->gss_clnt_flags &= ~GSS_NEEDCTX;
1530 		wakeup(cp);
1531 	}
1532 	lck_mtx_unlock(&cp->gss_clnt_mtx);
1533 
1534 	NFS_GSS_DBG("Returning error = %d\n", error);
1535 	return error;
1536 }
1537 
1538 /*
1539  * This function calls nfs_gss_clnt_ctx_init() to set up a new context.
1540  * But if there's a failure in trying to establish the context it keeps
1541  * retrying at progressively longer intervals in case the failure is
1542  * due to some transient condition.  For instance, the server might be
1543  * failing the context setup because directory services is not coming
1544  * up in a timely fashion.
1545  */
1546 static int
nfs_gss_clnt_ctx_init_retry(struct nfsreq * req,struct nfs_gss_clnt_ctx * cp)1547 nfs_gss_clnt_ctx_init_retry(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp)
1548 {
1549 	struct nfsmount *nmp = req->r_nmp;
1550 	struct timeval now;
1551 	time_t waituntil;
1552 	int error, slpflag;
1553 	int retries = 0;
1554 	int timeo = NFS_TRYLATERDEL;
1555 
1556 	if (nfs_mount_gone(nmp)) {
1557 		error = ENXIO;
1558 		goto bad;
1559 	}
1560 
1561 	/* For an "intr" mount allow a signal to interrupt the retries */
1562 	slpflag = (NMFLAG(nmp, INTR) && !(req->r_flags & R_NOINTR)) ? PCATCH : 0;
1563 
1564 	while ((error = nfs_gss_clnt_ctx_init(req, cp)) == ENEEDAUTH) {
1565 		microuptime(&now);
1566 		waituntil = now.tv_sec + timeo;
1567 		while (now.tv_sec < waituntil) {
1568 			tsleep(NULL, PSOCK | slpflag, "nfs_gss_clnt_ctx_init_retry", hz);
1569 			slpflag = 0;
1570 			error = nfs_sigintr(req->r_nmp, req, current_thread(), 0);
1571 			if (error) {
1572 				goto bad;
1573 			}
1574 			microuptime(&now);
1575 		}
1576 
1577 		retries++;
1578 		/* If it's a soft mount just give up after a while */
1579 		if ((NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) && (retries > nmp->nm_retry)) {
1580 			error = ETIMEDOUT;
1581 			goto bad;
1582 		}
1583 		timeo *= 2;
1584 		if (timeo > 60) {
1585 			timeo = 60;
1586 		}
1587 	}
1588 
1589 	if (error == 0) {
1590 		return 0;       // success
1591 	}
1592 bad:
1593 	/*
1594 	 * Give up on this context
1595 	 */
1596 	lck_mtx_lock(&cp->gss_clnt_mtx);
1597 	cp->gss_clnt_flags |= GSS_CTX_INVAL;
1598 
1599 	/*
1600 	 * Wake any threads waiting to use the context
1601 	 */
1602 	cp->gss_clnt_thread = NULL;
1603 	if (cp->gss_clnt_flags & GSS_NEEDCTX) {
1604 		cp->gss_clnt_flags &= ~GSS_NEEDCTX;
1605 		wakeup(cp);
1606 	}
1607 	lck_mtx_unlock(&cp->gss_clnt_mtx);
1608 
1609 	return error;
1610 }
1611 
1612 /*
1613  * Call the NFS server using a null procedure for context setup.
1614  * Even though it's a null procedure and nominally has no arguments
1615  * RFC 2203 requires that the GSS-API token be passed as an argument
1616  * and received as a reply.
1617  */
1618 static int
nfs_gss_clnt_ctx_callserver(struct nfsreq * req,struct nfs_gss_clnt_ctx * cp)1619 nfs_gss_clnt_ctx_callserver(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp)
1620 {
1621 	struct nfsm_chain nmreq, nmrep;
1622 	int error = 0, status;
1623 	uint32_t major = cp->gss_clnt_major, minor = cp->gss_clnt_minor;
1624 	int sz;
1625 	/* Take temporaries for the deallocations */
1626 	const uint32_t prev_gss_clnt_tokenlen = cp->gss_clnt_tokenlen;
1627 	const uint32_t prev_gss_clnt_handle_len = cp->gss_clnt_handle_len;
1628 
1629 	if (nfs_mount_gone(req->r_nmp)) {
1630 		return ENXIO;
1631 	}
1632 	nfsm_chain_null(&nmreq);
1633 	nfsm_chain_null(&nmrep);
1634 	sz = NFSX_UNSIGNED + nfsm_rndup(cp->gss_clnt_tokenlen);
1635 	nfsm_chain_build_alloc_init(error, &nmreq, sz);
1636 	nfsm_chain_add_32(error, &nmreq, cp->gss_clnt_tokenlen);
1637 	if (cp->gss_clnt_tokenlen > 0) {
1638 		nfsm_chain_add_opaque(error, &nmreq, cp->gss_clnt_token, cp->gss_clnt_tokenlen);
1639 	}
1640 	nfsm_chain_build_done(error, &nmreq);
1641 	if (error) {
1642 		goto nfsmout;
1643 	}
1644 
1645 	/* Call the server */
1646 	error = nfs_request_gss(req->r_nmp->nm_mountp, &nmreq, req->r_thread, req->r_cred,
1647 	    (req->r_flags & R_OPTMASK), cp, &nmrep, &status);
1648 	if (cp->gss_clnt_token != NULL) {
1649 		kfree_data(cp->gss_clnt_token, prev_gss_clnt_tokenlen);
1650 	}
1651 	if (!error) {
1652 		error = status;
1653 	}
1654 	if (error) {
1655 		goto nfsmout;
1656 	}
1657 
1658 	/* Get the server's reply */
1659 
1660 	nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_handle_len);
1661 	if (cp->gss_clnt_handle != NULL) {
1662 		kfree_data(cp->gss_clnt_handle, prev_gss_clnt_handle_len);
1663 	}
1664 	if (cp->gss_clnt_handle_len > 0 && cp->gss_clnt_handle_len < GSS_MAX_CTX_HANDLE_LEN) {
1665 		cp->gss_clnt_handle = (u_char *)kalloc_data(cp->gss_clnt_handle_len, Z_WAITOK);
1666 		if (cp->gss_clnt_handle == NULL) {
1667 			error = ENOMEM;
1668 			goto nfsmout;
1669 		}
1670 		nfsm_chain_get_opaque(error, &nmrep, cp->gss_clnt_handle_len, cp->gss_clnt_handle);
1671 	} else {
1672 		error = EBADRPC;
1673 	}
1674 	nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_major);
1675 	nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_minor);
1676 	nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_seqwin);
1677 	nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_tokenlen);
1678 	if (error) {
1679 		goto nfsmout;
1680 	}
1681 	if (cp->gss_clnt_tokenlen > 0 && cp->gss_clnt_tokenlen < GSS_MAX_TOKEN_LEN) {
1682 		cp->gss_clnt_token = (u_char *)kalloc_data(cp->gss_clnt_tokenlen, Z_WAITOK);
1683 		if (cp->gss_clnt_token == NULL) {
1684 			error = ENOMEM;
1685 			goto nfsmout;
1686 		}
1687 		nfsm_chain_get_opaque(error, &nmrep, cp->gss_clnt_tokenlen, cp->gss_clnt_token);
1688 	} else {
1689 		error = EBADRPC;
1690 	}
1691 
1692 	/*
1693 	 * Make sure any unusual errors are expanded and logged by gssd
1694 	 */
1695 	if (cp->gss_clnt_major != GSS_S_COMPLETE &&
1696 	    cp->gss_clnt_major != GSS_S_CONTINUE_NEEDED) {
1697 		printf("nfs_gss_clnt_ctx_callserver: gss_clnt_major = %d\n", cp->gss_clnt_major);
1698 		nfs_gss_clnt_log_error(req, cp, major, minor);
1699 	}
1700 
1701 nfsmout:
1702 	nfsm_chain_cleanup(&nmreq);
1703 	nfsm_chain_cleanup(&nmrep);
1704 
1705 	return error;
1706 }
1707 
1708 /*
1709  * We construct the service principal as a gss hostbased service principal of
1710  * the form nfs@<server>, unless the servers principal was passed down in the
1711  * mount arguments. If the arguments don't specify the service principal, the
1712  * server name is extracted the location passed in the mount argument if
1713  * available.  Otherwise assume a format of <server>:<path> in the
1714  * mntfromname. We don't currently support url's or other bizarre formats like
1715  * path@server. Mount_url will convert the nfs url into <server>:<path> when
1716  * calling mount, so this works out well in practice.
1717  *
1718  */
1719 
1720 static uint8_t *
nfs_gss_clnt_svcname(struct nfsmount * nmp,gssd_nametype * nt,size_t * len)1721 nfs_gss_clnt_svcname(struct nfsmount *nmp, gssd_nametype *nt, size_t *len)
1722 {
1723 	char *svcname, *d, *server;
1724 	int lindx, sindx;
1725 
1726 	if (nfs_mount_gone(nmp)) {
1727 		return NULL;
1728 	}
1729 
1730 	if (nmp->nm_sprinc) {
1731 		*len = strlen(nmp->nm_sprinc) + 1;
1732 		svcname = kalloc_data(*len, Z_WAITOK);
1733 		*nt = GSSD_HOSTBASED;
1734 		if (svcname == NULL) {
1735 			return NULL;
1736 		}
1737 		strlcpy(svcname, nmp->nm_sprinc, *len);
1738 
1739 		return (uint8_t *)svcname;
1740 	}
1741 
1742 	*nt = GSSD_HOSTBASED;
1743 	if (nmp->nm_locations.nl_numlocs && !(NFS_GSS_ISDBG && (NFSCLNT_DEBUG_FLAGS & 0x1))) {
1744 		lindx = nmp->nm_locations.nl_current.nli_loc;
1745 		sindx = nmp->nm_locations.nl_current.nli_serv;
1746 		server = nmp->nm_locations.nl_locations[lindx]->nl_servers[sindx]->ns_name;
1747 		*len = (uint32_t)strlen(server);
1748 	} else {
1749 		/* Older binaries using older mount args end up here */
1750 		server = vfs_statfs(nmp->nm_mountp)->f_mntfromname;
1751 		NFS_GSS_DBG("nfs getting gss svcname from %s\n", server);
1752 		d = strchr(server, ':');
1753 		*len = (uint32_t)(d ? (d - server) : strlen(server));
1754 	}
1755 
1756 	*len +=  5; /* "nfs@" plus null */
1757 	svcname = kalloc_data(*len, Z_WAITOK);
1758 	strlcpy(svcname, "nfs", *len);
1759 	strlcat(svcname, "@", *len);
1760 	strlcat(svcname, server, *len);
1761 	NFS_GSS_DBG("nfs svcname = %s\n", svcname);
1762 
1763 	return (uint8_t *)svcname;
1764 }
1765 
1766 /*
1767  * Get a mach port to talk to gssd.
1768  * gssd lives in the root bootstrap, so we call gssd's lookup routine
1769  * to get a send right to talk to a new gssd instance that launchd has launched
1770  * based on the cred's uid and audit session id.
1771  */
1772 
1773 static mach_port_t
nfs_gss_clnt_get_upcall_port(kauth_cred_t credp)1774 nfs_gss_clnt_get_upcall_port(kauth_cred_t credp)
1775 {
1776 	mach_port_t gssd_host_port, uc_port = IPC_PORT_NULL;
1777 	kern_return_t kr;
1778 	au_asid_t asid;
1779 	uid_t uid;
1780 
1781 	kr = host_get_gssd_port(host_priv_self(), &gssd_host_port);
1782 	if (kr != KERN_SUCCESS) {
1783 		printf("nfs_gss_get_upcall_port: can't get gssd port, status %x (%d)\n", kr, kr);
1784 		return IPC_PORT_NULL;
1785 	}
1786 	if (!IPC_PORT_VALID(gssd_host_port)) {
1787 		printf("nfs_gss_get_upcall_port: gssd port not valid\n");
1788 		return IPC_PORT_NULL;
1789 	}
1790 
1791 	asid = kauth_cred_getasid(credp);
1792 	uid = kauth_cred_getauid(credp);
1793 	if (uid == AU_DEFAUDITID) {
1794 		uid = kauth_cred_getuid(credp);
1795 	}
1796 	kr = mach_gss_lookup(gssd_host_port, uid, asid, &uc_port);
1797 	if (kr != KERN_SUCCESS) {
1798 		printf("nfs_gss_clnt_get_upcall_port: mach_gssd_lookup failed: status %x (%d)\n", kr, kr);
1799 	}
1800 	host_release_special_port(gssd_host_port);
1801 
1802 	return uc_port;
1803 }
1804 
1805 
1806 static void
nfs_gss_clnt_log_error(struct nfsreq * req,struct nfs_gss_clnt_ctx * cp,uint32_t major,uint32_t minor)1807 nfs_gss_clnt_log_error(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp, uint32_t major, uint32_t minor)
1808 {
1809 #define GETMAJERROR(x) (((x) >> GSS_C_ROUTINE_ERROR_OFFSET) & GSS_C_ROUTINE_ERROR_MASK)
1810 	struct nfsmount *nmp = req->r_nmp;
1811 	char who[] = "client";
1812 	uint32_t gss_error = GETMAJERROR(cp->gss_clnt_major);
1813 	const char *procn = "unkown";
1814 	proc_t proc;
1815 	char namebuf[MAXCOMLEN + 1];
1816 	pid_t pid = -1;
1817 	struct timeval now;
1818 
1819 	if (req->r_thread) {
1820 		proc = (proc_t)get_bsdthreadtask_info(req->r_thread);
1821 		if (proc) {
1822 			pid = proc_pid(proc);
1823 			proc_name(pid, namebuf, sizeof(namebuf));
1824 			if (*namebuf) {
1825 				procn = namebuf;
1826 			}
1827 		}
1828 	} else {
1829 		procn = "kernproc";
1830 		pid = 0;
1831 	}
1832 
1833 	microuptime(&now);
1834 	if ((cp->gss_clnt_major != major || cp->gss_clnt_minor != minor ||
1835 	    cp->gss_clnt_ptime + GSS_PRINT_DELAY < now.tv_sec) &&
1836 	    (nmp->nm_state & NFSSTA_MOUNTED)) {
1837 		/*
1838 		 * Will let gssd do some logging in hopes that it can translate
1839 		 * the minor code.
1840 		 */
1841 		if (cp->gss_clnt_minor && cp->gss_clnt_minor != minor) {
1842 			(void) mach_gss_log_error(
1843 				cp->gss_clnt_mport,
1844 				vfs_statfs(nmp->nm_mountp)->f_mntfromname,
1845 				kauth_cred_getuid(cp->gss_clnt_cred),
1846 				who,
1847 				cp->gss_clnt_major,
1848 				cp->gss_clnt_minor);
1849 		}
1850 		gss_error = gss_error ? gss_error : cp->gss_clnt_major;
1851 
1852 		/*
1853 		 *%%% It would be really nice to get the terminal from the proc or auditinfo_addr struct and print that here.
1854 		 */
1855 		printf("NFS: gssd auth failure by %s on audit session %d uid %d proc %s/%d for mount %s. Error: major = %d minor = %d\n",
1856 		    cp->gss_clnt_display ? cp->gss_clnt_display : who, kauth_cred_getasid(req->r_cred), kauth_cred_getuid(req->r_cred),
1857 		    procn, pid, vfs_statfs(nmp->nm_mountp)->f_mntfromname, gss_error, (int32_t)cp->gss_clnt_minor);
1858 		cp->gss_clnt_ptime = now.tv_sec;
1859 		switch (gss_error) {
1860 		case 7: printf("NFS: gssd does not have credentials for session %d/%d, (kinit)?\n",
1861 			    kauth_cred_getasid(req->r_cred), kauth_cred_getauid(req->r_cred));
1862 			break;
1863 		case 11: printf("NFS: gssd has expired credentals for session %d/%d, (kinit)?\n",
1864 			    kauth_cred_getasid(req->r_cred), kauth_cred_getauid(req->r_cred));
1865 			break;
1866 		}
1867 	} else {
1868 		NFS_GSS_DBG("NFS: gssd auth failure by %s on audit session %d uid %d proc %s/%d for mount %s. Error: major = %d minor = %d\n",
1869 		    cp->gss_clnt_display ? cp->gss_clnt_display : who, kauth_cred_getasid(req->r_cred), kauth_cred_getuid(req->r_cred),
1870 		    procn, pid, vfs_statfs(nmp->nm_mountp)->f_mntfromname, gss_error, (int32_t)cp->gss_clnt_minor);
1871 	}
1872 }
1873 
1874 /*
1875  * Make an upcall to the gssd using Mach RPC
1876  * The upcall is made using a host special port.
1877  * This allows launchd to fire up the gssd in the
1878  * user's session.  This is important, since gssd
1879  * must have access to the user's credential cache.
1880  */
1881 static int
nfs_gss_clnt_gssd_upcall(struct nfsreq * req,struct nfs_gss_clnt_ctx * cp,uint32_t retrycnt)1882 nfs_gss_clnt_gssd_upcall(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp, uint32_t retrycnt)
1883 {
1884 	kern_return_t kr;
1885 	gssd_byte_buffer octx = NULL;
1886 	uint32_t lucidlen = 0;
1887 	void *lucid_ctx_buffer;
1888 	int retry_cnt = 0;
1889 	vm_map_copy_t itoken = NULL;
1890 	gssd_byte_buffer otoken = NULL;
1891 	mach_msg_type_number_t otokenlen;
1892 	int error = 0;
1893 	uint8_t *principal = NULL;
1894 	size_t plen = 0;
1895 	int32_t nt = GSSD_STRING_NAME;
1896 	vm_map_copy_t pname = NULL;
1897 	vm_map_copy_t svcname = NULL;
1898 	char display_name[MAX_DISPLAY_STR] = "";
1899 	uint32_t ret_flags;
1900 	struct nfsmount *nmp = req->r_nmp;
1901 	uint32_t major = cp->gss_clnt_major, minor = cp->gss_clnt_minor;
1902 	uint32_t selected = (uint32_t)-1;
1903 	struct nfs_etype etype;
1904 
1905 	if (nmp == NULL || vfs_isforce(nmp->nm_mountp) || (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) {
1906 		return ENXIO;
1907 	}
1908 
1909 	if (cp->gss_clnt_gssd_flags & GSSD_RESTART) {
1910 		if (cp->gss_clnt_token) {
1911 			kfree_data(cp->gss_clnt_token, cp->gss_clnt_tokenlen);
1912 		}
1913 		cp->gss_clnt_token = NULL;
1914 		cp->gss_clnt_tokenlen = 0;
1915 		cp->gss_clnt_proc = RPCSEC_GSS_INIT;
1916 		/* Server's handle isn't valid. Don't reuse */
1917 		if (cp->gss_clnt_handle != NULL) {
1918 			kfree_data(cp->gss_clnt_handle, cp->gss_clnt_handle_len);
1919 		}
1920 		cp->gss_clnt_handle_len = 0;
1921 	}
1922 
1923 	NFS_GSS_DBG("Retrycnt = %d nm_etype.count = %d\n", retrycnt, nmp->nm_etype.count);
1924 	if (retrycnt >= nmp->nm_etype.count) {
1925 		return EACCES;
1926 	}
1927 
1928 	/* Copy the mount etypes to an order set of etypes to try */
1929 	etype = nmp->nm_etype;
1930 
1931 	/*
1932 	 * If we've already selected an etype, lets put that first in our
1933 	 * array of etypes to try, since overwhelmingly, that is likely
1934 	 * to be the etype we want.
1935 	 */
1936 	if (etype.selected < etype.count) {
1937 		etype.etypes[0] = nmp->nm_etype.etypes[etype.selected];
1938 		for (uint32_t i = 0; i < etype.selected; i++) {
1939 			etype.etypes[i + 1] = nmp->nm_etype.etypes[i];
1940 		}
1941 		for (uint32_t i = etype.selected + 1; i < etype.count; i++) {
1942 			etype.etypes[i] = nmp->nm_etype.etypes[i];
1943 		}
1944 	}
1945 
1946 	/* Remove the ones we've already have tried */
1947 	for (uint32_t i = retrycnt; i < etype.count; i++) {
1948 		etype.etypes[i - retrycnt] = etype.etypes[i];
1949 	}
1950 	etype.count = etype.count - retrycnt;
1951 
1952 	NFS_GSS_DBG("etype count = %d preferred etype = %d\n", etype.count, etype.etypes[0]);
1953 
1954 	/*
1955 	 * NFS currently only supports default principals or
1956 	 * principals based on the uid of the caller, unless
1957 	 * the principal to use for the mounting cred was specified
1958 	 * in the mount argmuments. If the realm to use was specified
1959 	 * then will send that up as the principal since the realm is
1960 	 * preceed by an "@" gssd that will try and select the default
1961 	 * principal for that realm.
1962 	 */
1963 
1964 	if (cp->gss_clnt_principal && cp->gss_clnt_prinlen) {
1965 		principal = cp->gss_clnt_principal;
1966 		plen = cp->gss_clnt_prinlen;
1967 		nt = cp->gss_clnt_prinnt;
1968 	} else if (nmp->nm_principal && IS_VALID_CRED(nmp->nm_mcred) && req->r_cred == nmp->nm_mcred) {
1969 		plen = (uint32_t)strlen(nmp->nm_principal);
1970 		principal = (uint8_t *)nmp->nm_principal;
1971 		cp->gss_clnt_prinnt = nt = GSSD_USER;
1972 	} else if (nmp->nm_realm) {
1973 		plen = (uint32_t)strlen(nmp->nm_realm);
1974 		principal = (uint8_t *)nmp->nm_realm;
1975 		nt = GSSD_USER;
1976 	}
1977 
1978 	if (!IPC_PORT_VALID(cp->gss_clnt_mport)) {
1979 		cp->gss_clnt_mport = nfs_gss_clnt_get_upcall_port(req->r_cred);
1980 		if (cp->gss_clnt_mport == IPC_PORT_NULL) {
1981 			goto out;
1982 		}
1983 	}
1984 
1985 	if (plen) {
1986 		nfs_gss_mach_alloc_buffer(principal, plen, &pname);
1987 	}
1988 	if (cp->gss_clnt_svcnamlen) {
1989 		nfs_gss_mach_alloc_buffer(cp->gss_clnt_svcname, cp->gss_clnt_svcnamlen, &svcname);
1990 	}
1991 	if (cp->gss_clnt_tokenlen) {
1992 		nfs_gss_mach_alloc_buffer(cp->gss_clnt_token, cp->gss_clnt_tokenlen, &itoken);
1993 	}
1994 
1995 	/* Always want to export the lucid context */
1996 	cp->gss_clnt_gssd_flags |= GSSD_LUCID_CONTEXT;
1997 
1998 retry:
1999 	kr = mach_gss_init_sec_context_v3(
2000 		cp->gss_clnt_mport,
2001 		GSSD_KRB5_MECH,
2002 		(gssd_byte_buffer) itoken, (mach_msg_type_number_t) cp->gss_clnt_tokenlen,
2003 		kauth_cred_getuid(cp->gss_clnt_cred),
2004 		nt,
2005 		(gssd_byte_buffer)pname, (mach_msg_type_number_t) plen,
2006 		cp->gss_clnt_svcnt,
2007 		(gssd_byte_buffer)svcname, (mach_msg_type_number_t) cp->gss_clnt_svcnamlen,
2008 		GSSD_MUTUAL_FLAG,
2009 		(gssd_etype_list)etype.etypes, (mach_msg_type_number_t)etype.count,
2010 		&cp->gss_clnt_gssd_flags,
2011 		&cp->gss_clnt_context,
2012 		&cp->gss_clnt_cred_handle,
2013 		&ret_flags,
2014 		&octx, (mach_msg_type_number_t *) &lucidlen,
2015 		&otoken, &otokenlen,
2016 		cp->gss_clnt_display ? NULL : display_name,
2017 		&cp->gss_clnt_major,
2018 		&cp->gss_clnt_minor);
2019 
2020 	/* Clear the RESTART flag */
2021 	cp->gss_clnt_gssd_flags &= ~GSSD_RESTART;
2022 	if (cp->gss_clnt_major != GSS_S_CONTINUE_NEEDED) {
2023 		/* We're done with the gssd handles */
2024 		cp->gss_clnt_context = 0;
2025 		cp->gss_clnt_cred_handle = 0;
2026 	}
2027 
2028 	if (kr != KERN_SUCCESS) {
2029 		printf("nfs_gss_clnt_gssd_upcall: mach_gss_init_sec_context failed: %x (%d)\n", kr, kr);
2030 		if (kr == MIG_SERVER_DIED && cp->gss_clnt_cred_handle == 0 &&
2031 		    retry_cnt++ < NFS_GSS_MACH_MAX_RETRIES &&
2032 		    !vfs_isforce(nmp->nm_mountp) && (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) == 0) {
2033 			if (plen) {
2034 				nfs_gss_mach_alloc_buffer(principal, plen, &pname);
2035 			}
2036 			if (cp->gss_clnt_svcnamlen) {
2037 				nfs_gss_mach_alloc_buffer(cp->gss_clnt_svcname, cp->gss_clnt_svcnamlen, &svcname);
2038 			}
2039 			if (cp->gss_clnt_tokenlen > 0) {
2040 				nfs_gss_mach_alloc_buffer(cp->gss_clnt_token, cp->gss_clnt_tokenlen, &itoken);
2041 			}
2042 			goto retry;
2043 		}
2044 
2045 		host_release_special_port(cp->gss_clnt_mport);
2046 		cp->gss_clnt_mport = IPC_PORT_NULL;
2047 		goto out;
2048 	}
2049 
2050 	if (cp->gss_clnt_display == NULL && *display_name != '\0') {
2051 		size_t dlen = strnlen(display_name, MAX_DISPLAY_STR) + 1;  /* Add extra byte to include '\0' */
2052 
2053 		if (dlen < MAX_DISPLAY_STR) {
2054 			cp->gss_clnt_display = kalloc_data(dlen, Z_WAITOK);
2055 			if (cp->gss_clnt_display == NULL) {
2056 				goto skip;
2057 			}
2058 			bcopy(display_name, cp->gss_clnt_display, dlen);
2059 		} else {
2060 			goto skip;
2061 		}
2062 	}
2063 skip:
2064 	/*
2065 	 * Make sure any unusual errors are expanded and logged by gssd
2066 	 *
2067 	 * XXXX, we need to rethink this and just have gssd return a string for the major and minor codes.
2068 	 */
2069 	if (cp->gss_clnt_major != GSS_S_COMPLETE &&
2070 	    cp->gss_clnt_major != GSS_S_CONTINUE_NEEDED) {
2071 		NFS_GSS_DBG("Up call returned error\n");
2072 		nfs_gss_clnt_log_error(req, cp, major, minor);
2073 		/* Server's handle isn't valid. Don't reuse */
2074 		if (cp->gss_clnt_handle != NULL) {
2075 			kfree_data(cp->gss_clnt_handle, cp->gss_clnt_handle_len);
2076 		}
2077 		cp->gss_clnt_handle_len = 0;
2078 	}
2079 
2080 	if (lucidlen > 0) {
2081 		if (lucidlen > MAX_LUCIDLEN) {
2082 			printf("nfs_gss_clnt_gssd_upcall: bad context length (%d)\n", lucidlen);
2083 			vm_map_copy_discard((vm_map_copy_t) octx);
2084 			vm_map_copy_discard((vm_map_copy_t) otoken);
2085 			goto out;
2086 		}
2087 		lucid_ctx_buffer = kalloc_data(lucidlen, Z_WAITOK | Z_ZERO);
2088 		error = nfs_gss_mach_vmcopyout((vm_map_copy_t) octx, lucidlen, lucid_ctx_buffer);
2089 		if (error) {
2090 			vm_map_copy_discard((vm_map_copy_t) otoken);
2091 			kfree_data(lucid_ctx_buffer, lucidlen);
2092 			goto out;
2093 		}
2094 
2095 		if (cp->gss_clnt_ctx_id) {
2096 			gss_krb5_destroy_context(cp->gss_clnt_ctx_id);
2097 		}
2098 		cp->gss_clnt_ctx_id = gss_krb5_make_context(lucid_ctx_buffer, lucidlen);
2099 		kfree_data(lucid_ctx_buffer, lucidlen);
2100 		if (cp->gss_clnt_ctx_id == NULL) {
2101 			printf("Failed to make context from lucid_ctx_buffer\n");
2102 			goto out;
2103 		}
2104 		for (uint32_t i = 0; i < nmp->nm_etype.count; i++) {
2105 			if (nmp->nm_etype.etypes[i] == cp->gss_clnt_ctx_id->gss_cryptor.etype) {
2106 				selected = i;
2107 				break;
2108 			}
2109 		}
2110 	}
2111 
2112 	/* Free context token used as input */
2113 	if (cp->gss_clnt_token) {
2114 		kfree_data(cp->gss_clnt_token, cp->gss_clnt_tokenlen);
2115 	}
2116 	cp->gss_clnt_tokenlen = 0;
2117 
2118 	if (otokenlen > 0) {
2119 		/* Set context token to gss output token */
2120 		cp->gss_clnt_token = (u_char *)kalloc_data(otokenlen, Z_WAITOK);
2121 		if (cp->gss_clnt_token == NULL) {
2122 			printf("nfs_gss_clnt_gssd_upcall: could not allocate %d bytes\n", otokenlen);
2123 			vm_map_copy_discard((vm_map_copy_t) otoken);
2124 			return ENOMEM;
2125 		}
2126 		error = nfs_gss_mach_vmcopyout((vm_map_copy_t) otoken, otokenlen, cp->gss_clnt_token);
2127 		if (error) {
2128 			printf("Could not copyout gss token\n");
2129 			kfree_data(cp->gss_clnt_token, otokenlen);
2130 			return NFSERR_EAUTH;
2131 		}
2132 		cp->gss_clnt_tokenlen = otokenlen;
2133 	}
2134 
2135 	if (selected != (uint32_t)-1) {
2136 		nmp->nm_etype.selected = selected;
2137 		NFS_GSS_DBG("etype selected = %d\n", nmp->nm_etype.etypes[selected]);
2138 	}
2139 	NFS_GSS_DBG("Up call succeeded major = %d\n", cp->gss_clnt_major);
2140 	return 0;
2141 
2142 out:
2143 	if (cp->gss_clnt_token) {
2144 		kfree_data(cp->gss_clnt_token, cp->gss_clnt_tokenlen);
2145 	}
2146 	cp->gss_clnt_tokenlen = 0;
2147 	/* Server's handle isn't valid. Don't reuse */
2148 	if (cp->gss_clnt_handle != NULL) {
2149 		kfree_data(cp->gss_clnt_handle, cp->gss_clnt_handle_len);
2150 	}
2151 	cp->gss_clnt_handle_len = 0;
2152 
2153 	NFS_GSS_DBG("Up call returned NFSERR_EAUTH");
2154 	return NFSERR_EAUTH;
2155 }
2156 
2157 /*
2158  * Invoked at the completion of an RPC call that uses an RPCSEC_GSS
2159  * credential. The sequence number window that the server returns
2160  * at context setup indicates the maximum number of client calls that
2161  * can be outstanding on a context. The client maintains a bitmap that
2162  * represents the server's window.  Each pending request has a bit set
2163  * in the window bitmap.  When a reply comes in or times out, we reset
2164  * the bit in the bitmap and if there are any other threads waiting for
2165  * a context slot we notify the waiting thread(s).
2166  *
2167  * Note that if a request is retransmitted, it will have a single XID
2168  * but it may be associated with multiple sequence numbers.  So we
2169  * may have to reset multiple sequence number bits in the window bitmap.
2170  */
2171 void
nfs_gss_clnt_rpcdone(struct nfsreq * req)2172 nfs_gss_clnt_rpcdone(struct nfsreq *req)
2173 {
2174 	struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx;
2175 	struct gss_seq *gsp, *ngsp;
2176 	int i = 0;
2177 
2178 	if (cp == NULL || !(cp->gss_clnt_flags & GSS_CTX_COMPLETE)) {
2179 		return; // no context - don't bother
2180 	}
2181 	/*
2182 	 * Reset the bit for this request in the
2183 	 * sequence number window to indicate it's done.
2184 	 * We do this even if the request timed out.
2185 	 */
2186 	lck_mtx_lock(&cp->gss_clnt_mtx);
2187 	gsp = SLIST_FIRST(&req->r_gss_seqlist);
2188 	if (gsp && gsp->gss_seqnum > (cp->gss_clnt_seqnum - cp->gss_clnt_seqwin)) {
2189 		win_resetbit(cp->gss_clnt_seqbits,
2190 		    gsp->gss_seqnum % cp->gss_clnt_seqwin);
2191 	}
2192 
2193 	/*
2194 	 * Limit the seqnum list to GSS_CLNT_SEQLISTMAX entries
2195 	 */
2196 	SLIST_FOREACH_SAFE(gsp, &req->r_gss_seqlist, gss_seqnext, ngsp) {
2197 		if (++i > GSS_CLNT_SEQLISTMAX) {
2198 			SLIST_REMOVE(&req->r_gss_seqlist, gsp, gss_seq, gss_seqnext);
2199 			kfree_type(struct gss_seq, gsp);
2200 		}
2201 	}
2202 
2203 	/*
2204 	 * If there's a thread waiting for
2205 	 * the window to advance, wake it up.
2206 	 */
2207 	if (cp->gss_clnt_flags & GSS_NEEDSEQ) {
2208 		cp->gss_clnt_flags &= ~GSS_NEEDSEQ;
2209 		wakeup(cp);
2210 	}
2211 	lck_mtx_unlock(&cp->gss_clnt_mtx);
2212 }
2213 
2214 /*
2215  * Create a reference to a context from a request
2216  * and bump the reference count
2217  */
2218 void
nfs_gss_clnt_ctx_ref(struct nfsreq * req,struct nfs_gss_clnt_ctx * cp)2219 nfs_gss_clnt_ctx_ref(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp)
2220 {
2221 	req->r_gss_ctx = cp;
2222 
2223 	lck_mtx_lock(&cp->gss_clnt_mtx);
2224 	cp->gss_clnt_refcnt++;
2225 	lck_mtx_unlock(&cp->gss_clnt_mtx);
2226 }
2227 
2228 /*
2229  * Remove a context reference from a request
2230  * If the reference count drops to zero, and the
2231  * context is invalid, destroy the context
2232  */
2233 void
nfs_gss_clnt_ctx_unref(struct nfsreq * req)2234 nfs_gss_clnt_ctx_unref(struct nfsreq *req)
2235 {
2236 	struct nfsmount *nmp = req->r_nmp;
2237 	struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx;
2238 	int on_neg_cache = 0;
2239 	int neg_cache = 0;
2240 	int destroy = 0;
2241 	struct timeval now;
2242 	char CTXBUF[NFS_CTXBUFSZ];
2243 
2244 	if (cp == NULL) {
2245 		return;
2246 	}
2247 
2248 	req->r_gss_ctx = NULL;
2249 
2250 	lck_mtx_lock(&cp->gss_clnt_mtx);
2251 	if (--cp->gss_clnt_refcnt < 0) {
2252 		panic("Over release of gss context!");
2253 	}
2254 
2255 	if (cp->gss_clnt_refcnt == 0) {
2256 		if ((cp->gss_clnt_flags & GSS_CTX_INVAL) &&
2257 		    cp->gss_clnt_ctx_id) {
2258 			gss_krb5_destroy_context(cp->gss_clnt_ctx_id);
2259 			cp->gss_clnt_ctx_id = NULL;
2260 		}
2261 		if (cp->gss_clnt_flags & GSS_CTX_DESTROY) {
2262 			destroy = 1;
2263 			if ((cp->gss_clnt_flags & GSS_CTX_USECOUNT) && !nfs_gss_clnt_mnt_rele(nmp)) {
2264 				cp->gss_clnt_flags &= ~GSS_CTX_USECOUNT;
2265 			}
2266 			if (cp->gss_clnt_nctime) {
2267 				on_neg_cache = 1;
2268 			}
2269 		}
2270 	}
2271 	if (!destroy && cp->gss_clnt_nctime == 0 &&
2272 	    (cp->gss_clnt_flags & GSS_CTX_INVAL)) {
2273 		microuptime(&now);
2274 		cp->gss_clnt_nctime = now.tv_sec;
2275 		neg_cache = 1;
2276 	}
2277 	lck_mtx_unlock(&cp->gss_clnt_mtx);
2278 	if (destroy) {
2279 		NFS_GSS_DBG("Destroying context %s\n", NFS_GSS_CTX(req, cp));
2280 		if (nmp) {
2281 			lck_mtx_lock(&nmp->nm_lock);
2282 			if (cp->gss_clnt_entries.tqe_next != NFSNOLIST) {
2283 				TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries);
2284 			}
2285 			if (on_neg_cache) {
2286 				nmp->nm_ncentries--;
2287 			}
2288 			lck_mtx_unlock(&nmp->nm_lock);
2289 		}
2290 		nfs_gss_clnt_ctx_destroy(cp);
2291 	} else if (neg_cache) {
2292 		NFS_GSS_DBG("Entering context %s into negative cache\n", NFS_GSS_CTX(req, cp));
2293 		if (nmp) {
2294 			lck_mtx_lock(&nmp->nm_lock);
2295 			nmp->nm_ncentries++;
2296 			nfs_gss_clnt_ctx_neg_cache_reap(nmp);
2297 			lck_mtx_unlock(&nmp->nm_lock);
2298 		}
2299 	}
2300 	NFS_GSS_CLNT_CTX_DUMP(nmp);
2301 }
2302 
2303 /*
2304  * Try and reap any old negative cache entries.
2305  * cache queue.
2306  */
2307 void
nfs_gss_clnt_ctx_neg_cache_reap(struct nfsmount * nmp)2308 nfs_gss_clnt_ctx_neg_cache_reap(struct nfsmount *nmp)
2309 {
2310 	struct nfs_gss_clnt_ctx *cp, *tcp;
2311 	struct timeval now;
2312 	int reaped = 0;
2313 
2314 	/* Try and reap old, unreferenced, expired contexts */
2315 	microuptime(&now);
2316 
2317 	NFS_GSS_DBG("Reaping contexts ncentries = %d\n", nmp->nm_ncentries);
2318 
2319 	TAILQ_FOREACH_SAFE(cp, &nmp->nm_gsscl, gss_clnt_entries, tcp) {
2320 		int destroy = 0;
2321 
2322 		/* Don't reap STICKY contexts */
2323 		if ((cp->gss_clnt_flags & GSS_CTX_STICKY) ||
2324 		    !(cp->gss_clnt_flags & GSS_CTX_INVAL)) {
2325 			continue;
2326 		}
2327 		/* Keep up to GSS_MAX_NEG_CACHE_ENTRIES */
2328 		if (nmp->nm_ncentries <= GSS_MAX_NEG_CACHE_ENTRIES) {
2329 			break;
2330 		}
2331 		/* Contexts too young */
2332 		if (cp->gss_clnt_nctime + GSS_NEG_CACHE_TO >= now.tv_sec) {
2333 			continue;
2334 		}
2335 		/* Not referenced, remove it. */
2336 		lck_mtx_lock(&cp->gss_clnt_mtx);
2337 		if (cp->gss_clnt_refcnt == 0) {
2338 			cp->gss_clnt_flags |= GSS_CTX_DESTROY;
2339 			destroy = 1;
2340 		}
2341 		lck_mtx_unlock(&cp->gss_clnt_mtx);
2342 		if (destroy) {
2343 			TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries);
2344 			nmp->nm_ncentries++;
2345 			reaped++;
2346 			nfs_gss_clnt_ctx_destroy(cp);
2347 		}
2348 	}
2349 	NFS_GSS_DBG("Reaped %d contexts ncentries = %d\n", reaped, nmp->nm_ncentries);
2350 }
2351 
2352 /*
2353  * Clean a context to be cached
2354  */
2355 static void
nfs_gss_clnt_ctx_clean(struct nfs_gss_clnt_ctx * cp)2356 nfs_gss_clnt_ctx_clean(struct nfs_gss_clnt_ctx *cp)
2357 {
2358 	/* Preserve gss_clnt_mtx */
2359 	assert(cp->gss_clnt_thread == NULL);  /* Will be set to this thread */
2360 	/* gss_clnt_entries  we should not be on any list at this point */
2361 	cp->gss_clnt_flags = 0;
2362 	/* gss_clnt_refcnt should be zero */
2363 	assert(cp->gss_clnt_refcnt == 0);
2364 	/*
2365 	 * We are who we are preserve:
2366 	 * gss_clnt_cred
2367 	 * gss_clnt_principal
2368 	 * gss_clnt_prinlen
2369 	 * gss_clnt_prinnt
2370 	 * gss_clnt_desplay
2371 	 */
2372 	/* gss_clnt_proc will be set in nfs_gss_clnt_ctx_init */
2373 	cp->gss_clnt_seqnum = 0;
2374 	/* Preserve gss_clnt_service, we're not changing flavors */
2375 	if (cp->gss_clnt_handle) {
2376 		kfree_data(cp->gss_clnt_handle, cp->gss_clnt_handle_len);
2377 	}
2378 	cp->gss_clnt_handle_len = 0;
2379 	cp->gss_clnt_nctime = 0;
2380 	if (cp->gss_clnt_seqbits) {
2381 		kfree_data(cp->gss_clnt_seqbits, nfs_gss_seqbits_size(cp->gss_clnt_seqwin));
2382 	}
2383 	cp->gss_clnt_seqwin = 0;
2384 	/* Preserve gss_clnt_mport. Still talking to the same gssd */
2385 	if (cp->gss_clnt_verf) {
2386 		kfree_data(cp->gss_clnt_verf, cp->gss_clnt_verflen);
2387 	}
2388 	/* Service name might change on failover, so reset it */
2389 	if (cp->gss_clnt_svcname) {
2390 		kfree_data(cp->gss_clnt_svcname, cp->gss_clnt_svcnamlen);
2391 		cp->gss_clnt_svcnt = 0;
2392 	}
2393 	cp->gss_clnt_svcnamlen = 0;
2394 	cp->gss_clnt_cred_handle = 0;
2395 	cp->gss_clnt_context = 0;
2396 	if (cp->gss_clnt_token) {
2397 		kfree_data(cp->gss_clnt_token, cp->gss_clnt_tokenlen);
2398 	}
2399 	cp->gss_clnt_tokenlen = 0;
2400 	/* XXX gss_clnt_ctx_id ??? */
2401 	/*
2402 	 * Preserve:
2403 	 * gss_clnt_gssd_flags
2404 	 * gss_clnt_major
2405 	 * gss_clnt_minor
2406 	 * gss_clnt_ptime
2407 	 */
2408 }
2409 
2410 /*
2411  * Copy a source context to a new context. This is used to create a new context
2412  * with the identity of the old context for renewal. The old context is invalid
2413  * at this point but may have reference still to it, so it is not safe to use that
2414  * context.
2415  */
2416 static int
nfs_gss_clnt_ctx_copy(struct nfs_gss_clnt_ctx * scp,struct nfs_gss_clnt_ctx ** dcpp)2417 nfs_gss_clnt_ctx_copy(struct nfs_gss_clnt_ctx *scp, struct nfs_gss_clnt_ctx **dcpp)
2418 {
2419 	struct nfs_gss_clnt_ctx *dcp;
2420 
2421 	*dcpp = (struct nfs_gss_clnt_ctx *)NULL;
2422 	dcp = kalloc_type(struct nfs_gss_clnt_ctx, Z_WAITOK | Z_ZERO | Z_NOFAIL);
2423 	lck_mtx_init(&dcp->gss_clnt_mtx, &nfs_gss_clnt_grp, LCK_ATTR_NULL);
2424 	dcp->gss_clnt_cred = scp->gss_clnt_cred;
2425 	kauth_cred_ref(dcp->gss_clnt_cred);
2426 	dcp->gss_clnt_prinlen = scp->gss_clnt_prinlen;
2427 	dcp->gss_clnt_prinnt = scp->gss_clnt_prinnt;
2428 	if (scp->gss_clnt_principal) {
2429 		dcp->gss_clnt_principal = kalloc_data(dcp->gss_clnt_prinlen, Z_WAITOK | Z_ZERO);
2430 		if (dcp->gss_clnt_principal == NULL) {
2431 			kfree_type(struct nfs_gss_clnt_ctx, dcp);
2432 			return ENOMEM;
2433 		}
2434 		bcopy(scp->gss_clnt_principal, dcp->gss_clnt_principal, dcp->gss_clnt_prinlen);
2435 	}
2436 	/* Note we don't preserve the display name, that will be set by a successful up call */
2437 	dcp->gss_clnt_service = scp->gss_clnt_service;
2438 	dcp->gss_clnt_mport = host_copy_special_port(scp->gss_clnt_mport);
2439 	dcp->gss_clnt_ctx_id = NULL;   /* Will be set from successful upcall */
2440 	dcp->gss_clnt_gssd_flags = scp->gss_clnt_gssd_flags;
2441 	dcp->gss_clnt_major = scp->gss_clnt_major;
2442 	dcp->gss_clnt_minor = scp->gss_clnt_minor;
2443 	dcp->gss_clnt_ptime = scp->gss_clnt_ptime;
2444 
2445 	*dcpp = dcp;
2446 
2447 	return 0;
2448 }
2449 
2450 /*
2451  * Remove a context
2452  */
2453 static void
nfs_gss_clnt_ctx_destroy(struct nfs_gss_clnt_ctx * cp)2454 nfs_gss_clnt_ctx_destroy(struct nfs_gss_clnt_ctx *cp)
2455 {
2456 	NFS_GSS_DBG("Destroying context %d/%d\n",
2457 	    kauth_cred_getasid(cp->gss_clnt_cred),
2458 	    kauth_cred_getauid(cp->gss_clnt_cred));
2459 
2460 	host_release_special_port(cp->gss_clnt_mport);
2461 	cp->gss_clnt_mport = IPC_PORT_NULL;
2462 
2463 	lck_mtx_destroy(&cp->gss_clnt_mtx, &nfs_gss_clnt_grp);
2464 
2465 	if (IS_VALID_CRED(cp->gss_clnt_cred)) {
2466 		kauth_cred_unref(&cp->gss_clnt_cred);
2467 	}
2468 	cp->gss_clnt_entries.tqe_next = NFSNOLIST;
2469 	cp->gss_clnt_entries.tqe_prev = NFSNOLIST;
2470 	if (cp->gss_clnt_principal) {
2471 		kfree_data_addr(cp->gss_clnt_principal);
2472 	}
2473 	if (cp->gss_clnt_display) {
2474 		kfree_data_addr(cp->gss_clnt_display);
2475 	}
2476 	if (cp->gss_clnt_ctx_id) {
2477 		gss_krb5_destroy_context(cp->gss_clnt_ctx_id);
2478 		cp->gss_clnt_ctx_id = NULL;
2479 	}
2480 
2481 	nfs_gss_clnt_ctx_clean(cp);
2482 
2483 	kfree_type(struct nfs_gss_clnt_ctx, cp);
2484 }
2485 
2486 /*
2487  * The context for a user is invalid.
2488  * Mark the context as invalid, then
2489  * create a new context.
2490  */
2491 int
nfs_gss_clnt_ctx_renew(struct nfsreq * req)2492 nfs_gss_clnt_ctx_renew(struct nfsreq *req)
2493 {
2494 	struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx;
2495 	struct nfs_gss_clnt_ctx *ncp;
2496 	struct nfsmount *nmp;
2497 	int error = 0;
2498 	char CTXBUF[NFS_CTXBUFSZ];
2499 
2500 	if (cp == NULL) {
2501 		return 0;
2502 	}
2503 
2504 	if (req->r_nmp == NULL) {
2505 		return ENXIO;
2506 	}
2507 	nmp = req->r_nmp;
2508 
2509 	lck_mtx_lock(&cp->gss_clnt_mtx);
2510 	if (cp->gss_clnt_flags & GSS_CTX_INVAL) {
2511 		lck_mtx_unlock(&cp->gss_clnt_mtx);
2512 		nfs_gss_clnt_ctx_unref(req);
2513 		return 0;     // already being renewed
2514 	}
2515 
2516 	cp->gss_clnt_flags |= (GSS_CTX_INVAL | GSS_CTX_DESTROY);
2517 
2518 	if (cp->gss_clnt_flags & (GSS_NEEDCTX | GSS_NEEDSEQ)) {
2519 		cp->gss_clnt_flags &= ~GSS_NEEDSEQ;
2520 		wakeup(cp);
2521 	}
2522 	lck_mtx_unlock(&cp->gss_clnt_mtx);
2523 
2524 	if (cp->gss_clnt_proc == RPCSEC_GSS_DESTROY) {
2525 		return EACCES;  /* Destroying a context is best effort. Don't renew. */
2526 	}
2527 	/*
2528 	 * If we're setting up a context let nfs_gss_clnt_ctx_init know this is not working
2529 	 * and to try some other etype.
2530 	 */
2531 	if (cp->gss_clnt_proc != RPCSEC_GSS_DATA) {
2532 		return ENEEDAUTH;
2533 	}
2534 	error =  nfs_gss_clnt_ctx_copy(cp, &ncp);
2535 	NFS_GSS_DBG("Renewing context %s\n", NFS_GSS_CTX(req, ncp));
2536 	nfs_gss_clnt_ctx_unref(req);
2537 	if (error) {
2538 		return error;
2539 	}
2540 
2541 	lck_mtx_lock(&nmp->nm_lock);
2542 	/*
2543 	 * Note we don't bother taking the new context mutex as we're
2544 	 * not findable at the moment.
2545 	 */
2546 	ncp->gss_clnt_thread = current_thread();
2547 	nfs_gss_clnt_ctx_ref(req, ncp);
2548 	TAILQ_INSERT_HEAD(&nmp->nm_gsscl, ncp, gss_clnt_entries);
2549 	lck_mtx_unlock(&nmp->nm_lock);
2550 
2551 	error = nfs_gss_clnt_ctx_init_retry(req, ncp); // Initialize new context
2552 	if (error) {
2553 		nfs_gss_clnt_ctx_unref(req);
2554 	}
2555 
2556 	return error;
2557 }
2558 
2559 
2560 /*
2561  * Destroy all the contexts associated with a mount.
2562  * The contexts are also destroyed by the server.
2563  */
2564 void
nfs_gss_clnt_ctx_unmount(struct nfsmount * nmp)2565 nfs_gss_clnt_ctx_unmount(struct nfsmount *nmp)
2566 {
2567 	struct nfs_gss_clnt_ctx *cp;
2568 	struct nfsm_chain nmreq, nmrep;
2569 	int error, status;
2570 	struct nfsreq *req;
2571 
2572 	if (!nmp) {
2573 		return;
2574 	}
2575 
2576 	req = zalloc(nfs_req_zone);
2577 	req->r_nmp = nmp;
2578 	lck_mtx_lock(&nmp->nm_lock);
2579 	while ((cp = TAILQ_FIRST(&nmp->nm_gsscl))) {
2580 		TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries);
2581 		cp->gss_clnt_entries.tqe_next = NFSNOLIST;
2582 		lck_mtx_lock(&cp->gss_clnt_mtx);
2583 		if (cp->gss_clnt_flags & GSS_CTX_DESTROY) {
2584 			lck_mtx_unlock(&cp->gss_clnt_mtx);
2585 			continue;
2586 		}
2587 		cp->gss_clnt_refcnt++;
2588 		lck_mtx_unlock(&cp->gss_clnt_mtx);
2589 		req->r_gss_ctx = cp;
2590 
2591 		lck_mtx_unlock(&nmp->nm_lock);
2592 		/*
2593 		 * Tell the server to destroy its context.
2594 		 * But don't bother if it's a forced unmount.
2595 		 */
2596 		if (!nfs_mount_gone(nmp) &&
2597 		    (cp->gss_clnt_flags & (GSS_CTX_INVAL | GSS_CTX_DESTROY | GSS_CTX_COMPLETE)) == GSS_CTX_COMPLETE) {
2598 			cp->gss_clnt_proc = RPCSEC_GSS_DESTROY;
2599 
2600 			error = 0;
2601 			nfsm_chain_null(&nmreq);
2602 			nfsm_chain_null(&nmrep);
2603 			nfsm_chain_build_alloc_init(error, &nmreq, 0);
2604 			nfsm_chain_build_done(error, &nmreq);
2605 			if (!error) {
2606 				nfs_request_gss(nmp->nm_mountp, &nmreq,
2607 				    current_thread(), cp->gss_clnt_cred, 0, cp, &nmrep, &status);
2608 			}
2609 			nfsm_chain_cleanup(&nmreq);
2610 			nfsm_chain_cleanup(&nmrep);
2611 		}
2612 
2613 		/*
2614 		 * Mark the context invalid then drop
2615 		 * the reference to remove it if its
2616 		 * refcount is zero.
2617 		 */
2618 		lck_mtx_lock(&cp->gss_clnt_mtx);
2619 		cp->gss_clnt_flags |= (GSS_CTX_INVAL | GSS_CTX_DESTROY);
2620 		lck_mtx_unlock(&cp->gss_clnt_mtx);
2621 		nfs_gss_clnt_ctx_unref(req);
2622 		lck_mtx_lock(&nmp->nm_lock);
2623 	}
2624 	lck_mtx_unlock(&nmp->nm_lock);
2625 	assert(TAILQ_EMPTY(&nmp->nm_gsscl));
2626 	NFS_ZFREE(nfs_req_zone, req);
2627 }
2628 
2629 
2630 /*
2631  * Removes a mounts context for a credential
2632  */
2633 int
nfs_gss_clnt_ctx_remove(struct nfsmount * nmp,kauth_cred_t cred)2634 nfs_gss_clnt_ctx_remove(struct nfsmount *nmp, kauth_cred_t cred)
2635 {
2636 	struct nfs_gss_clnt_ctx *cp, *tcp;
2637 	struct nfsreq *req;
2638 
2639 	req = zalloc(nfs_req_zone);
2640 	req->r_nmp = nmp;
2641 
2642 	NFS_GSS_DBG("Enter\n");
2643 	NFS_GSS_CLNT_CTX_DUMP(nmp);
2644 	lck_mtx_lock(&nmp->nm_lock);
2645 	TAILQ_FOREACH_SAFE(cp, &nmp->nm_gsscl, gss_clnt_entries, tcp) {
2646 		lck_mtx_lock(&cp->gss_clnt_mtx);
2647 		if (nfs_gss_clnt_ctx_cred_match(cp->gss_clnt_cred, cred)) {
2648 			if (cp->gss_clnt_flags & GSS_CTX_DESTROY) {
2649 				NFS_GSS_DBG("Found destroyed context %d/%d. refcnt = %d continuing\n",
2650 				    kauth_cred_getasid(cp->gss_clnt_cred),
2651 				    kauth_cred_getauid(cp->gss_clnt_cred),
2652 				    cp->gss_clnt_refcnt);
2653 				lck_mtx_unlock(&cp->gss_clnt_mtx);
2654 				continue;
2655 			}
2656 			cp->gss_clnt_refcnt++;
2657 			cp->gss_clnt_flags |= (GSS_CTX_INVAL | GSS_CTX_DESTROY);
2658 			lck_mtx_unlock(&cp->gss_clnt_mtx);
2659 			req->r_gss_ctx = cp;
2660 			lck_mtx_unlock(&nmp->nm_lock);
2661 			/*
2662 			 * Drop the reference to remove it if its
2663 			 * refcount is zero.
2664 			 */
2665 			NFS_GSS_DBG("Removed context %d/%d refcnt = %d\n",
2666 			    kauth_cred_getasid(cp->gss_clnt_cred),
2667 			    kauth_cred_getuid(cp->gss_clnt_cred),
2668 			    cp->gss_clnt_refcnt);
2669 			nfs_gss_clnt_ctx_unref(req);
2670 			NFS_ZFREE(nfs_req_zone, req);
2671 			return 0;
2672 		}
2673 		lck_mtx_unlock(&cp->gss_clnt_mtx);
2674 	}
2675 
2676 	lck_mtx_unlock(&nmp->nm_lock);
2677 
2678 	NFS_ZFREE(nfs_req_zone, req);
2679 	NFS_GSS_DBG("Returning ENOENT\n");
2680 	return ENOENT;
2681 }
2682 
2683 /*
2684  * Sets a mounts principal for a session associated with cred.
2685  */
2686 int
nfs_gss_clnt_ctx_set_principal(struct nfsmount * nmp,vfs_context_t ctx,uint8_t * principal,size_t princlen,uint32_t nametype)2687 nfs_gss_clnt_ctx_set_principal(struct nfsmount *nmp, vfs_context_t ctx,
2688     uint8_t *principal, size_t princlen, uint32_t nametype)
2689 {
2690 	struct nfsreq *req;
2691 	int error;
2692 
2693 	NFS_GSS_DBG("Enter:\n");
2694 
2695 	req = zalloc_flags(nfs_req_zone, Z_WAITOK | Z_ZERO);
2696 	req->r_nmp = nmp;
2697 	req->r_auth = nmp->nm_auth;
2698 	req->r_thread = vfs_context_thread(ctx);
2699 	req->r_cred = vfs_context_ucred(ctx);
2700 
2701 	error = nfs_gss_clnt_ctx_find_principal(req, principal, princlen, nametype);
2702 	NFS_GSS_DBG("nfs_gss_clnt_ctx_find_principal returned %d\n", error);
2703 	/*
2704 	 * We don't care about auth errors. Those would indicate that the context is in the
2705 	 * neagative cache and if and when the user has credentials for the principal
2706 	 * we should be good to go in that we will select those credentials for this principal.
2707 	 */
2708 	if (error == EACCES || error == EAUTH || error == ENEEDAUTH) {
2709 		error = 0;
2710 	}
2711 
2712 	/* We're done with this request */
2713 	nfs_gss_clnt_ctx_unref(req);
2714 	NFS_ZFREE(nfs_req_zone, req);
2715 	return error;
2716 }
2717 
2718 /*
2719  * Gets a mounts principal from a session associated with cred
2720  */
2721 int
nfs_gss_clnt_ctx_get_principal(struct nfsmount * nmp,vfs_context_t ctx,struct user_nfs_gss_principal * p)2722 nfs_gss_clnt_ctx_get_principal(struct nfsmount *nmp, vfs_context_t ctx,
2723     struct user_nfs_gss_principal *p)
2724 {
2725 	struct nfsreq *req;
2726 	int error = 0;
2727 	struct nfs_gss_clnt_ctx *cp;
2728 	kauth_cred_t cred = vfs_context_ucred(ctx);
2729 	const char *princ = NULL;
2730 	char CTXBUF[NFS_CTXBUFSZ];
2731 
2732 	/* Make sure the the members of the struct user_nfs_gss_principal are initialized */
2733 	p->nametype = GSSD_STRING_NAME;
2734 	p->principal = USER_ADDR_NULL;
2735 	p->princlen = 0;
2736 	p->flags = 0;
2737 
2738 	req = zalloc_flags(nfs_req_zone, Z_WAITOK);
2739 	req->r_nmp = nmp;
2740 	lck_mtx_lock(&nmp->nm_lock);
2741 	TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) {
2742 		lck_mtx_lock(&cp->gss_clnt_mtx);
2743 		if (cp->gss_clnt_flags & GSS_CTX_DESTROY) {
2744 			NFS_GSS_DBG("Found destroyed context %s refcnt = %d continuing\n",
2745 			    NFS_GSS_CTX(req, cp),
2746 			    cp->gss_clnt_refcnt);
2747 			lck_mtx_unlock(&cp->gss_clnt_mtx);
2748 			continue;
2749 		}
2750 		if (nfs_gss_clnt_ctx_cred_match(cp->gss_clnt_cred, cred)) {
2751 			cp->gss_clnt_refcnt++;
2752 			lck_mtx_unlock(&cp->gss_clnt_mtx);
2753 			goto out;
2754 		}
2755 		lck_mtx_unlock(&cp->gss_clnt_mtx);
2756 	}
2757 
2758 out:
2759 	if (cp == NULL) {
2760 		lck_mtx_unlock(&nmp->nm_lock);
2761 		p->flags |= NFS_IOC_NO_CRED_FLAG;  /* No credentials, valid or invalid on this mount */
2762 		NFS_GSS_DBG("No context found for session %d by uid %d\n",
2763 		    kauth_cred_getasid(cred), kauth_cred_getuid(cred));
2764 		NFS_ZFREE(nfs_req_zone, req);
2765 		return 0;
2766 	}
2767 
2768 	/* Indicate if the cred is INVALID */
2769 	if (cp->gss_clnt_flags & GSS_CTX_INVAL) {
2770 		p->flags |= NFS_IOC_INVALID_CRED_FLAG;
2771 	}
2772 
2773 	/* We have set a principal on the mount */
2774 	if (cp->gss_clnt_principal) {
2775 		princ = (char *)cp->gss_clnt_principal;
2776 		p->princlen = cp->gss_clnt_prinlen;
2777 		p->nametype = cp->gss_clnt_prinnt;
2778 	} else if (cp->gss_clnt_display) {
2779 		/* We have a successful use the the default credential */
2780 		princ = cp->gss_clnt_display;
2781 		p->princlen = strlen(cp->gss_clnt_display);
2782 	}
2783 
2784 	/*
2785 	 * If neither of the above is true we have an invalid default credential
2786 	 * So from above p->principal is USER_ADDR_NULL and princ is NULL
2787 	 */
2788 
2789 	if (princ) {
2790 		char *pp;
2791 
2792 		pp = kalloc_data(p->princlen, Z_WAITOK);
2793 		bcopy(princ, pp, p->princlen);
2794 		p->principal = CAST_USER_ADDR_T(pp);
2795 	}
2796 
2797 	lck_mtx_unlock(&nmp->nm_lock);
2798 
2799 	req->r_gss_ctx = cp;
2800 	NFS_GSS_DBG("Found context %s\n", NFS_GSS_CTX(req, NULL));
2801 	nfs_gss_clnt_ctx_unref(req);
2802 	NFS_ZFREE(nfs_req_zone, req);
2803 	return error;
2804 }
2805 #endif /* CONFIG_NFS_CLIENT */
2806 
2807 /*************
2808  *
2809  * Server functions
2810  */
2811 
2812 #if CONFIG_NFS_SERVER
2813 
2814 /*
2815  * Initialization when NFS starts
2816  */
2817 void
nfs_gss_svc_init(void)2818 nfs_gss_svc_init(void)
2819 {
2820 	nfs_gss_svc_ctx_hashtbl = hashinit(SVC_CTX_HASHSZ, M_TEMP, &nfs_gss_svc_ctx_hash);
2821 
2822 	nfs_gss_svc_ctx_timer_call = thread_call_allocate(nfs_gss_svc_ctx_timer, NULL);
2823 }
2824 
2825 /*
2826  * Find a server context based on a handle value received
2827  * in an RPCSEC_GSS credential.
2828  */
2829 static struct nfs_gss_svc_ctx *
nfs_gss_svc_ctx_find(uint32_t handle)2830 nfs_gss_svc_ctx_find(uint32_t handle)
2831 {
2832 	struct nfs_gss_svc_ctx_hashhead *head;
2833 	struct nfs_gss_svc_ctx *cp;
2834 	uint64_t timenow;
2835 
2836 	if (handle == 0) {
2837 		return NULL;
2838 	}
2839 
2840 	head = &nfs_gss_svc_ctx_hashtbl[SVC_CTX_HASH(handle)];
2841 	/*
2842 	 * Don't return a context that is going to expire in GSS_CTX_PEND seconds
2843 	 */
2844 	clock_interval_to_deadline(GSS_CTX_PEND, NSEC_PER_SEC, &timenow);
2845 
2846 	lck_mtx_lock(&nfs_gss_svc_ctx_mutex);
2847 
2848 	LIST_FOREACH(cp, head, gss_svc_entries) {
2849 		if (cp->gss_svc_handle == handle) {
2850 			if (timenow > cp->gss_svc_incarnation + GSS_SVC_CTX_TTL) {
2851 				/*
2852 				 * Context has or is about to expire. Don't use.
2853 				 * We'll return null and the client will have to create
2854 				 * a new context.
2855 				 */
2856 				cp->gss_svc_handle = 0;
2857 				/*
2858 				 * Make sure though that we stay around for GSS_CTX_PEND seconds
2859 				 * for other threads that might be using the context.
2860 				 */
2861 				cp->gss_svc_incarnation = timenow;
2862 
2863 				cp = NULL;
2864 				break;
2865 			}
2866 			lck_mtx_lock(&cp->gss_svc_mtx);
2867 			cp->gss_svc_refcnt++;
2868 			lck_mtx_unlock(&cp->gss_svc_mtx);
2869 			break;
2870 		}
2871 	}
2872 
2873 	lck_mtx_unlock(&nfs_gss_svc_ctx_mutex);
2874 
2875 	return cp;
2876 }
2877 
2878 /*
2879  * Insert a new server context into the hash table
2880  * and start the context reap thread if necessary.
2881  */
2882 static void
nfs_gss_svc_ctx_insert(struct nfs_gss_svc_ctx * cp)2883 nfs_gss_svc_ctx_insert(struct nfs_gss_svc_ctx *cp)
2884 {
2885 	struct nfs_gss_svc_ctx_hashhead *head;
2886 	struct nfs_gss_svc_ctx *p;
2887 
2888 	lck_mtx_lock(&nfs_gss_svc_ctx_mutex);
2889 
2890 	/*
2891 	 * Give the client a random handle so that if we reboot
2892 	 * it's unlikely the client will get a bad context match.
2893 	 * Make sure it's not zero or already assigned.
2894 	 */
2895 retry:
2896 	cp->gss_svc_handle = random();
2897 	if (cp->gss_svc_handle == 0) {
2898 		goto retry;
2899 	}
2900 	head = &nfs_gss_svc_ctx_hashtbl[SVC_CTX_HASH(cp->gss_svc_handle)];
2901 	LIST_FOREACH(p, head, gss_svc_entries)
2902 	if (p->gss_svc_handle == cp->gss_svc_handle) {
2903 		goto retry;
2904 	}
2905 
2906 	clock_interval_to_deadline(GSS_CTX_PEND, NSEC_PER_SEC,
2907 	    &cp->gss_svc_incarnation);
2908 	LIST_INSERT_HEAD(head, cp, gss_svc_entries);
2909 	nfs_gss_ctx_count++;
2910 
2911 	if (!nfs_gss_timer_on) {
2912 		nfs_gss_timer_on = 1;
2913 
2914 		nfs_interval_timer_start(nfs_gss_svc_ctx_timer_call,
2915 		    min(GSS_TIMER_PERIOD, max(GSS_CTX_TTL_MIN, nfsrv_gss_context_ttl)) * MSECS_PER_SEC);
2916 	}
2917 
2918 	lck_mtx_unlock(&nfs_gss_svc_ctx_mutex);
2919 }
2920 
2921 /*
2922  * This function is called via the kernel's callout
2923  * mechanism.  It runs only when there are
2924  * cached RPCSEC_GSS contexts.
2925  */
2926 void
nfs_gss_svc_ctx_timer(__unused void * param1,__unused void * param2)2927 nfs_gss_svc_ctx_timer(__unused void *param1, __unused void *param2)
2928 {
2929 	struct nfs_gss_svc_ctx *cp, *next;
2930 	uint64_t timenow;
2931 	int contexts = 0;
2932 	int i;
2933 
2934 	lck_mtx_lock(&nfs_gss_svc_ctx_mutex);
2935 	clock_get_uptime(&timenow);
2936 
2937 	NFSRV_GSS_DBG("is running\n");
2938 
2939 	/*
2940 	 * Scan all the hash chains
2941 	 */
2942 	for (i = 0; i < SVC_CTX_HASHSZ; i++) {
2943 		/*
2944 		 * For each hash chain, look for entries
2945 		 * that haven't been used in a while.
2946 		 */
2947 		LIST_FOREACH_SAFE(cp, &nfs_gss_svc_ctx_hashtbl[i], gss_svc_entries, next) {
2948 			contexts++;
2949 			if (timenow > cp->gss_svc_incarnation +
2950 			    (cp->gss_svc_handle ? GSS_SVC_CTX_TTL : 0)
2951 			    && cp->gss_svc_refcnt == 0) {
2952 				/*
2953 				 * A stale context - remove it
2954 				 */
2955 				LIST_REMOVE(cp, gss_svc_entries);
2956 				NFSRV_GSS_DBG("Removing contex for %d\n", cp->gss_svc_uid);
2957 				if (cp->gss_svc_seqbits) {
2958 					kfree_data(cp->gss_svc_seqbits, nfs_gss_seqbits_size(cp->gss_svc_seqwin));
2959 				}
2960 				lck_mtx_destroy(&cp->gss_svc_mtx, &nfs_gss_svc_grp);
2961 				kfree_type(struct nfs_gss_svc_ctx, cp);
2962 				contexts--;
2963 			}
2964 		}
2965 	}
2966 
2967 	nfs_gss_ctx_count = contexts;
2968 
2969 	/*
2970 	 * If there are still some cached contexts left,
2971 	 * set up another callout to check on them later.
2972 	 */
2973 	nfs_gss_timer_on = nfs_gss_ctx_count > 0;
2974 	if (nfs_gss_timer_on) {
2975 		nfs_interval_timer_start(nfs_gss_svc_ctx_timer_call,
2976 		    min(GSS_TIMER_PERIOD, max(GSS_CTX_TTL_MIN, nfsrv_gss_context_ttl)) * MSECS_PER_SEC);
2977 	}
2978 
2979 	lck_mtx_unlock(&nfs_gss_svc_ctx_mutex);
2980 }
2981 
2982 /*
2983  * Here the server receives an RPCSEC_GSS credential in an
2984  * RPC call header.  First there's some checking to make sure
2985  * the credential is appropriate - whether the context is still
2986  * being set up, or is complete.  Then we use the handle to find
2987  * the server's context and validate the verifier, which contains
2988  * a signed checksum of the RPC header. If the verifier checks
2989  * out, we extract the user's UID and groups from the context
2990  * and use it to set up a UNIX credential for the user's request.
2991  */
2992 int
nfs_gss_svc_cred_get(struct nfsrv_descript * nd,struct nfsm_chain * nmc)2993 nfs_gss_svc_cred_get(struct nfsrv_descript *nd, struct nfsm_chain *nmc)
2994 {
2995 	uint32_t vers, proc, seqnum, service;
2996 	uint32_t handle, handle_len;
2997 	uint32_t major;
2998 	struct nfs_gss_svc_ctx *cp = NULL;
2999 	uint32_t flavor = 0;
3000 	int error = 0;
3001 	uint32_t arglen;
3002 	size_t argsize, start, header_len;
3003 	gss_buffer_desc cksum;
3004 	struct nfsm_chain nmc_tmp;
3005 	mbuf_t reply_mbuf, prev_mbuf, pad_mbuf;
3006 
3007 	vers = proc = seqnum = service = handle_len = 0;
3008 	arglen = 0;
3009 
3010 	nfsm_chain_get_32(error, nmc, vers);
3011 	if (vers != RPCSEC_GSS_VERS_1) {
3012 		error = NFSERR_AUTHERR | AUTH_REJECTCRED;
3013 		goto nfsmout;
3014 	}
3015 
3016 	nfsm_chain_get_32(error, nmc, proc);
3017 	nfsm_chain_get_32(error, nmc, seqnum);
3018 	nfsm_chain_get_32(error, nmc, service);
3019 	nfsm_chain_get_32(error, nmc, handle_len);
3020 	if (error) {
3021 		goto nfsmout;
3022 	}
3023 
3024 	/*
3025 	 * Make sure context setup/destroy is being done with a nullproc
3026 	 */
3027 	if (proc != RPCSEC_GSS_DATA && nd->nd_procnum != NFSPROC_NULL) {
3028 		error = NFSERR_AUTHERR | RPCSEC_GSS_CREDPROBLEM;
3029 		goto nfsmout;
3030 	}
3031 
3032 	/*
3033 	 * If the sequence number is greater than the max
3034 	 * allowable, reject and have the client init a
3035 	 * new context.
3036 	 */
3037 	if (seqnum > GSS_MAXSEQ) {
3038 		error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM;
3039 		goto nfsmout;
3040 	}
3041 
3042 	nd->nd_sec =
3043 	    service == RPCSEC_GSS_SVC_NONE ?      RPCAUTH_KRB5 :
3044 	    service == RPCSEC_GSS_SVC_INTEGRITY ? RPCAUTH_KRB5I :
3045 	    service == RPCSEC_GSS_SVC_PRIVACY ?   RPCAUTH_KRB5P : 0;
3046 
3047 	if (proc == RPCSEC_GSS_INIT) {
3048 		/*
3049 		 * Limit the total number of contexts
3050 		 */
3051 		if (nfs_gss_ctx_count > nfs_gss_ctx_max) {
3052 			error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM;
3053 			goto nfsmout;
3054 		}
3055 
3056 		/*
3057 		 * Set up a new context
3058 		 */
3059 		cp = kalloc_type(struct nfs_gss_svc_ctx,
3060 		    Z_WAITOK | Z_ZERO | Z_NOFAIL);
3061 		lck_mtx_init(&cp->gss_svc_mtx, &nfs_gss_svc_grp, LCK_ATTR_NULL);
3062 		cp->gss_svc_refcnt = 1;
3063 	} else {
3064 		/*
3065 		 * Use the handle to find the context
3066 		 */
3067 		if (handle_len != sizeof(handle)) {
3068 			error = NFSERR_AUTHERR | RPCSEC_GSS_CREDPROBLEM;
3069 			goto nfsmout;
3070 		}
3071 		nfsm_chain_get_32(error, nmc, handle);
3072 		if (error) {
3073 			goto nfsmout;
3074 		}
3075 		cp = nfs_gss_svc_ctx_find(handle);
3076 		if (cp == NULL) {
3077 			error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM;
3078 			goto nfsmout;
3079 		}
3080 	}
3081 
3082 	cp->gss_svc_proc = proc;
3083 
3084 	if (proc == RPCSEC_GSS_DATA || proc == RPCSEC_GSS_DESTROY) {
3085 		struct posix_cred temp_pcred;
3086 
3087 		if (cp->gss_svc_seqwin == 0) {
3088 			/*
3089 			 * Context isn't complete
3090 			 */
3091 			error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM;
3092 			goto nfsmout;
3093 		}
3094 
3095 		if (!nfs_gss_svc_seqnum_valid(cp, seqnum)) {
3096 			/*
3097 			 * Sequence number is bad
3098 			 */
3099 			error = EINVAL; // drop the request
3100 			goto nfsmout;
3101 		}
3102 
3103 		/*
3104 		 * Validate the verifier.
3105 		 * The verifier contains an encrypted checksum
3106 		 * of the call header from the XID up to and
3107 		 * including the credential.  We compute the
3108 		 * checksum and compare it with what came in
3109 		 * the verifier.
3110 		 */
3111 		header_len = nfsm_chain_offset(nmc);
3112 		nfsm_chain_get_32(error, nmc, flavor);
3113 		nfsm_chain_get_32(error, nmc, cksum.length);
3114 		if (error) {
3115 			goto nfsmout;
3116 		}
3117 		if (flavor != RPCSEC_GSS || cksum.length > KRB5_MAX_MIC_SIZE) {
3118 			error = NFSERR_AUTHERR | AUTH_BADVERF;
3119 		} else {
3120 			cksum.value = kalloc_data(cksum.length, Z_WAITOK | Z_NOFAIL);
3121 			nfsm_chain_get_opaque(error, nmc, cksum.length, cksum.value);
3122 		}
3123 		if (error) {
3124 			goto nfsmout;
3125 		}
3126 
3127 		/* Now verify the client's call header checksum */
3128 		major = gss_krb5_verify_mic_mbuf((uint32_t *)&error, cp->gss_svc_ctx_id, nmc->nmc_mhead, 0, header_len, &cksum, NULL);
3129 		(void)gss_release_buffer(NULL, &cksum);
3130 		if (major != GSS_S_COMPLETE) {
3131 			printf("Server header: gss_krb5_verify_mic_mbuf failed %d\n", error);
3132 			error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM;
3133 			goto nfsmout;
3134 		}
3135 
3136 		nd->nd_gss_seqnum = seqnum;
3137 
3138 		/*
3139 		 * Set up the user's cred
3140 		 */
3141 		bzero(&temp_pcred, sizeof(temp_pcred));
3142 		temp_pcred.cr_uid = cp->gss_svc_uid;
3143 		bcopy(cp->gss_svc_gids, temp_pcred.cr_groups,
3144 		    sizeof(gid_t) * cp->gss_svc_ngroups);
3145 		temp_pcred.cr_ngroups = (short)cp->gss_svc_ngroups;
3146 
3147 		nd->nd_cr = posix_cred_create(&temp_pcred);
3148 		if (nd->nd_cr == NULL) {
3149 			error = ENOMEM;
3150 			goto nfsmout;
3151 		}
3152 		clock_get_uptime(&cp->gss_svc_incarnation);
3153 
3154 		/*
3155 		 * If the call arguments are integrity or privacy protected
3156 		 * then we need to check them here.
3157 		 */
3158 		switch (service) {
3159 		case RPCSEC_GSS_SVC_NONE:
3160 			/* nothing to do */
3161 			break;
3162 		case RPCSEC_GSS_SVC_INTEGRITY:
3163 			/*
3164 			 * Here's what we expect in the integrity call args:
3165 			 *
3166 			 * - length of seq num + call args (4 bytes)
3167 			 * - sequence number (4 bytes)
3168 			 * - call args (variable bytes)
3169 			 * - length of checksum token
3170 			 * - checksum of seqnum + call args
3171 			 */
3172 			nfsm_chain_get_32(error, nmc, arglen);          // length of args
3173 			if (arglen > NFS_MAXPACKET) {
3174 				error = EBADRPC;
3175 				goto nfsmout;
3176 			}
3177 
3178 			nmc_tmp = *nmc;
3179 			nfsm_chain_adv(error, &nmc_tmp, arglen);
3180 			nfsm_chain_get_32(error, &nmc_tmp, cksum.length);
3181 			cksum.value = NULL;
3182 			if (cksum.length > 0 && cksum.length < GSS_MAX_MIC_LEN) {
3183 				cksum.value = kalloc_data(cksum.length, Z_WAITOK | Z_NOFAIL);
3184 			} else {
3185 				error = EBADRPC;
3186 				goto nfsmout;
3187 			}
3188 			nfsm_chain_get_opaque(error, &nmc_tmp, cksum.length, cksum.value);
3189 
3190 			/* Verify the checksum over the call args */
3191 			start = nfsm_chain_offset(nmc);
3192 
3193 			major = gss_krb5_verify_mic_mbuf((uint32_t *)&error, cp->gss_svc_ctx_id,
3194 			    nmc->nmc_mhead, start, arglen, &cksum, NULL);
3195 			kfree_data(cksum.value, cksum.length);
3196 			if (major != GSS_S_COMPLETE) {
3197 				printf("Server args: gss_krb5_verify_mic_mbuf failed %d\n", error);
3198 				error = EBADRPC;
3199 				goto nfsmout;
3200 			}
3201 
3202 			/*
3203 			 * Get the sequence number prepended to the args
3204 			 * and compare it against the one sent in the
3205 			 * call credential.
3206 			 */
3207 			nfsm_chain_get_32(error, nmc, seqnum);
3208 			if (seqnum != nd->nd_gss_seqnum) {
3209 				error = EBADRPC;                        // returns as GARBAGEARGS
3210 				goto nfsmout;
3211 			}
3212 			break;
3213 		case RPCSEC_GSS_SVC_PRIVACY:
3214 			/*
3215 			 * Here's what we expect in the privacy call args:
3216 			 *
3217 			 * - length of wrap token
3218 			 * - wrap token (37-40 bytes)
3219 			 */
3220 			prev_mbuf = nmc->nmc_mcur;
3221 			nfsm_chain_get_32(error, nmc, arglen);          // length of args
3222 			if (arglen > NFS_MAXPACKET) {
3223 				error = EBADRPC;
3224 				goto nfsmout;
3225 			}
3226 
3227 			/* Get the wrap token (current mbuf in the chain starting at the current offset) */
3228 			start = nmc->nmc_ptr - (caddr_t)mbuf_data(nmc->nmc_mcur);
3229 
3230 			/* split out the wrap token */
3231 			argsize = arglen;
3232 			error = gss_normalize_mbuf(nmc->nmc_mcur, start, &argsize, &reply_mbuf, &pad_mbuf, 0);
3233 			if (error) {
3234 				goto nfsmout;
3235 			}
3236 
3237 			assert(argsize == arglen);
3238 			if (pad_mbuf) {
3239 				assert(nfsm_pad(arglen) == mbuf_len(pad_mbuf));
3240 				mbuf_free(pad_mbuf);
3241 			} else {
3242 				assert(nfsm_pad(arglen) == 0);
3243 			}
3244 
3245 			major = gss_krb5_unwrap_mbuf((uint32_t *)&error, cp->gss_svc_ctx_id, &reply_mbuf, 0, arglen, NULL, NULL);
3246 			if (major != GSS_S_COMPLETE) {
3247 				printf("%s: gss_krb5_unwrap_mbuf failes %d\n", __func__, error);
3248 				goto nfsmout;
3249 			}
3250 
3251 			/* Now replace the wrapped arguments with the unwrapped ones */
3252 			mbuf_setnext(prev_mbuf, reply_mbuf);
3253 			nmc->nmc_mcur = reply_mbuf;
3254 			nmc->nmc_ptr = mbuf_data(reply_mbuf);
3255 			nmc->nmc_left = mbuf_len(reply_mbuf);
3256 
3257 			/*
3258 			 * - sequence number (4 bytes)
3259 			 * - call args
3260 			 */
3261 
3262 			// nfsm_chain_reverse(nmc, nfsm_pad(toklen));
3263 
3264 			/*
3265 			 * Get the sequence number prepended to the args
3266 			 * and compare it against the one sent in the
3267 			 * call credential.
3268 			 */
3269 			nfsm_chain_get_32(error, nmc, seqnum);
3270 			if (seqnum != nd->nd_gss_seqnum) {
3271 				printf("%s: Sequence number mismatch seqnum = %d nd->nd_gss_seqnum = %d\n",
3272 				    __func__, seqnum, nd->nd_gss_seqnum);
3273 				printmbuf("reply_mbuf", nmc->nmc_mhead, 0, 0);
3274 				printf("reply_mbuf %p nmc_head %p\n", reply_mbuf, nmc->nmc_mhead);
3275 				error = EBADRPC;                        // returns as GARBAGEARGS
3276 				goto nfsmout;
3277 			}
3278 			break;
3279 		}
3280 	} else {
3281 		uint32_t verflen;
3282 		/*
3283 		 * If the proc is RPCSEC_GSS_INIT or RPCSEC_GSS_CONTINUE_INIT
3284 		 * then we expect a null verifier.
3285 		 */
3286 		nfsm_chain_get_32(error, nmc, flavor);
3287 		nfsm_chain_get_32(error, nmc, verflen);
3288 		if (error || flavor != RPCAUTH_NULL || verflen > 0) {
3289 			error = NFSERR_AUTHERR | RPCSEC_GSS_CREDPROBLEM;
3290 		}
3291 		if (error) {
3292 			if (proc == RPCSEC_GSS_INIT) {
3293 				lck_mtx_destroy(&cp->gss_svc_mtx, &nfs_gss_svc_grp);
3294 				kfree_type(struct nfs_gss_svc_ctx, cp);
3295 				cp = NULL;
3296 			}
3297 			goto nfsmout;
3298 		}
3299 	}
3300 
3301 	nd->nd_gss_context = cp;
3302 	return 0;
3303 nfsmout:
3304 	if (cp) {
3305 		nfs_gss_svc_ctx_deref(cp);
3306 	}
3307 	return error;
3308 }
3309 
3310 /*
3311  * Insert the server's verifier into the RPC reply header.
3312  * It contains a signed checksum of the sequence number that
3313  * was received in the RPC call.
3314  * Then go on to add integrity or privacy if necessary.
3315  */
3316 int
nfs_gss_svc_verf_put(struct nfsrv_descript * nd,struct nfsm_chain * nmc)3317 nfs_gss_svc_verf_put(struct nfsrv_descript *nd, struct nfsm_chain *nmc)
3318 {
3319 	struct nfs_gss_svc_ctx *cp;
3320 	int error = 0;
3321 	gss_buffer_desc cksum, seqbuf;
3322 	uint32_t network_seqnum;
3323 	cp = nd->nd_gss_context;
3324 	uint32_t major;
3325 
3326 	if (cp->gss_svc_major != GSS_S_COMPLETE) {
3327 		/*
3328 		 * If the context isn't yet complete
3329 		 * then return a null verifier.
3330 		 */
3331 		nfsm_chain_add_32(error, nmc, RPCAUTH_NULL);
3332 		nfsm_chain_add_32(error, nmc, 0);
3333 		return error;
3334 	}
3335 
3336 	/*
3337 	 * Compute checksum of the request seq number
3338 	 * If it's the final reply of context setup
3339 	 * then return the checksum of the context
3340 	 * window size.
3341 	 */
3342 	seqbuf.length = NFSX_UNSIGNED;
3343 	if (cp->gss_svc_proc == RPCSEC_GSS_INIT ||
3344 	    cp->gss_svc_proc == RPCSEC_GSS_CONTINUE_INIT) {
3345 		network_seqnum = htonl(cp->gss_svc_seqwin);
3346 	} else {
3347 		network_seqnum = htonl(nd->nd_gss_seqnum);
3348 	}
3349 	seqbuf.value = &network_seqnum;
3350 
3351 	major = gss_krb5_get_mic((uint32_t *)&error, cp->gss_svc_ctx_id, 0, &seqbuf, &cksum);
3352 	if (major != GSS_S_COMPLETE) {
3353 		return error;
3354 	}
3355 
3356 	/*
3357 	 * Now wrap it in a token and add
3358 	 * the verifier to the reply.
3359 	 */
3360 	nfsm_chain_add_32(error, nmc, RPCSEC_GSS);
3361 	nfsm_chain_add_32(error, nmc, cksum.length);
3362 	nfsm_chain_add_opaque(error, nmc, cksum.value, cksum.length);
3363 	gss_release_buffer(NULL, &cksum);
3364 
3365 	return error;
3366 }
3367 
3368 /*
3369  * The results aren't available yet, but if they need to be
3370  * checksummed for integrity protection or encrypted, then
3371  * we can record the start offset here, insert a place-holder
3372  * for the results length, as well as the sequence number.
3373  * The rest of the work is done later by nfs_gss_svc_protect_reply()
3374  * when the results are available.
3375  */
3376 int
nfs_gss_svc_prepare_reply(struct nfsrv_descript * nd,struct nfsm_chain * nmc)3377 nfs_gss_svc_prepare_reply(struct nfsrv_descript *nd, struct nfsm_chain *nmc)
3378 {
3379 	struct nfs_gss_svc_ctx *cp = nd->nd_gss_context;
3380 	int error = 0;
3381 
3382 	if (cp->gss_svc_proc == RPCSEC_GSS_INIT ||
3383 	    cp->gss_svc_proc == RPCSEC_GSS_CONTINUE_INIT) {
3384 		return 0;
3385 	}
3386 
3387 	switch (nd->nd_sec) {
3388 	case RPCAUTH_KRB5:
3389 		/* Nothing to do */
3390 		break;
3391 	case RPCAUTH_KRB5I:
3392 	case RPCAUTH_KRB5P:
3393 		nd->nd_gss_mb = nmc->nmc_mcur;                  // record current mbuf
3394 		nfsm_chain_finish_mbuf(error, nmc);             // split the chain here
3395 		break;
3396 	}
3397 
3398 	return error;
3399 }
3400 
3401 /*
3402  * The results are checksummed or encrypted for return to the client
3403  */
3404 int
nfs_gss_svc_protect_reply(struct nfsrv_descript * nd,mbuf_t mrep __unused)3405 nfs_gss_svc_protect_reply(struct nfsrv_descript *nd, mbuf_t mrep __unused)
3406 {
3407 	struct nfs_gss_svc_ctx *cp = nd->nd_gss_context;
3408 	struct nfsm_chain nmrep_res, *nmc_res = &nmrep_res;
3409 	mbuf_t mb, results;
3410 	uint32_t reslen;
3411 	int error = 0;
3412 
3413 	/* XXX
3414 	 * Using a reference to the mbuf where we previously split the reply
3415 	 * mbuf chain, we split the mbuf chain argument into two mbuf chains,
3416 	 * one that allows us to prepend a length field or token, (nmc_pre)
3417 	 * and the second which holds just the results that we're going to
3418 	 * checksum and/or encrypt.  When we're done, we join the chains back
3419 	 * together.
3420 	 */
3421 
3422 	mb = nd->nd_gss_mb;                             // the mbuf where we split
3423 	results = mbuf_next(mb);                        // first mbuf in the results
3424 	error = mbuf_setnext(mb, NULL);                 // disconnect the chains
3425 	if (error) {
3426 		return error;
3427 	}
3428 	nfs_gss_nfsm_chain(nmc_res, mb);                // set up the prepend chain
3429 	nfsm_chain_build_done(error, nmc_res);
3430 	if (error) {
3431 		return error;
3432 	}
3433 
3434 	if (nd->nd_sec == RPCAUTH_KRB5I) {
3435 		error = rpc_gss_integ_data_create(cp->gss_svc_ctx_id, &results, nd->nd_gss_seqnum, &reslen);
3436 	} else {
3437 		/* RPCAUTH_KRB5P */
3438 		error = rpc_gss_priv_data_create(cp->gss_svc_ctx_id, &results, nd->nd_gss_seqnum, &reslen);
3439 	}
3440 	nfs_gss_append_chain(nmc_res, results); // Append the results mbufs
3441 	nfsm_chain_build_done(error, nmc_res);
3442 
3443 	return error;
3444 }
3445 
3446 /*
3447  * This function handles the context setup calls from the client.
3448  * Essentially, it implements the NFS null procedure calls when
3449  * an RPCSEC_GSS credential is used.
3450  * This is the context maintenance function.  It creates and
3451  * destroys server contexts at the whim of the client.
3452  * During context creation, it receives GSS-API tokens from the
3453  * client, passes them up to gssd, and returns a received token
3454  * back to the client in the null procedure reply.
3455  */
3456 int
nfs_gss_svc_ctx_init(struct nfsrv_descript * nd,struct nfsrv_sock * slp,mbuf_t * mrepp)3457 nfs_gss_svc_ctx_init(struct nfsrv_descript *nd, struct nfsrv_sock *slp, mbuf_t *mrepp)
3458 {
3459 	struct nfs_gss_svc_ctx *cp = NULL;
3460 	int error = 0;
3461 	int autherr = 0;
3462 	struct nfsm_chain *nmreq, nmrep;
3463 	int sz;
3464 
3465 	nmreq = &nd->nd_nmreq;
3466 	nfsm_chain_null(&nmrep);
3467 	*mrepp = NULL;
3468 	cp = nd->nd_gss_context;
3469 	nd->nd_repstat = 0;
3470 
3471 	switch (cp->gss_svc_proc) {
3472 	case RPCSEC_GSS_INIT:
3473 		nfs_gss_svc_ctx_insert(cp);
3474 		OS_FALLTHROUGH;
3475 
3476 	case RPCSEC_GSS_CONTINUE_INIT:
3477 		/* Get the token from the request */
3478 		nfsm_chain_get_32(error, nmreq, cp->gss_svc_tokenlen);
3479 		cp->gss_svc_token = NULL;
3480 		if (cp->gss_svc_tokenlen > 0 && cp->gss_svc_tokenlen < GSS_MAX_TOKEN_LEN) {
3481 			cp->gss_svc_token = kalloc_data(cp->gss_svc_tokenlen, Z_WAITOK);
3482 		}
3483 		if (cp->gss_svc_token == NULL) {
3484 			autherr = RPCSEC_GSS_CREDPROBLEM;
3485 			break;
3486 		}
3487 		nfsm_chain_get_opaque(error, nmreq, cp->gss_svc_tokenlen, cp->gss_svc_token);
3488 
3489 		/* Use the token in a gss_accept_sec_context upcall */
3490 		error = nfs_gss_svc_gssd_upcall(cp);
3491 		if (error) {
3492 			autherr = RPCSEC_GSS_CREDPROBLEM;
3493 			if (error == NFSERR_EAUTH) {
3494 				error = 0;
3495 			}
3496 			break;
3497 		}
3498 
3499 		/*
3500 		 * If the context isn't complete, pass the new token
3501 		 * back to the client for another round.
3502 		 */
3503 		if (cp->gss_svc_major != GSS_S_COMPLETE) {
3504 			break;
3505 		}
3506 
3507 		/*
3508 		 * Now the server context is complete.
3509 		 * Finish setup.
3510 		 */
3511 		clock_get_uptime(&cp->gss_svc_incarnation);
3512 
3513 		cp->gss_svc_seqwin = GSS_SVC_SEQWINDOW;
3514 		cp->gss_svc_seqbits = kalloc_data(nfs_gss_seqbits_size(cp->gss_svc_seqwin), Z_WAITOK | Z_ZERO);
3515 		if (cp->gss_svc_seqbits == NULL) {
3516 			autherr = RPCSEC_GSS_CREDPROBLEM;
3517 			break;
3518 		}
3519 		break;
3520 
3521 	case RPCSEC_GSS_DATA:
3522 		/* Just a nullproc ping - do nothing */
3523 		break;
3524 
3525 	case RPCSEC_GSS_DESTROY:
3526 		/*
3527 		 * Don't destroy the context immediately because
3528 		 * other active requests might still be using it.
3529 		 * Instead, schedule it for destruction after
3530 		 * GSS_CTX_PEND time has elapsed.
3531 		 */
3532 		cp = nfs_gss_svc_ctx_find(cp->gss_svc_handle);
3533 		if (cp != NULL) {
3534 			cp->gss_svc_handle = 0; // so it can't be found
3535 			lck_mtx_lock(&cp->gss_svc_mtx);
3536 			clock_interval_to_deadline(GSS_CTX_PEND, NSEC_PER_SEC,
3537 			    &cp->gss_svc_incarnation);
3538 			lck_mtx_unlock(&cp->gss_svc_mtx);
3539 		}
3540 		break;
3541 	default:
3542 		autherr = RPCSEC_GSS_CREDPROBLEM;
3543 		break;
3544 	}
3545 
3546 	/* Now build the reply  */
3547 
3548 	if (nd->nd_repstat == 0) {
3549 		nd->nd_repstat = autherr ? (NFSERR_AUTHERR | autherr) : NFSERR_RETVOID;
3550 	}
3551 	sz = 7 * NFSX_UNSIGNED + nfsm_rndup(cp->gss_svc_tokenlen); // size of results
3552 	error = nfsrv_rephead(nd, slp, &nmrep, sz);
3553 	*mrepp = nmrep.nmc_mhead;
3554 	if (error || autherr) {
3555 		goto nfsmout;
3556 	}
3557 
3558 	if (cp->gss_svc_proc == RPCSEC_GSS_INIT ||
3559 	    cp->gss_svc_proc == RPCSEC_GSS_CONTINUE_INIT) {
3560 		nfsm_chain_add_32(error, &nmrep, sizeof(cp->gss_svc_handle));
3561 		nfsm_chain_add_32(error, &nmrep, cp->gss_svc_handle);
3562 
3563 		nfsm_chain_add_32(error, &nmrep, cp->gss_svc_major);
3564 		nfsm_chain_add_32(error, &nmrep, cp->gss_svc_minor);
3565 		nfsm_chain_add_32(error, &nmrep, cp->gss_svc_seqwin);
3566 
3567 		nfsm_chain_add_32(error, &nmrep, cp->gss_svc_tokenlen);
3568 		if (cp->gss_svc_token != NULL) {
3569 			nfsm_chain_add_opaque(error, &nmrep, cp->gss_svc_token, cp->gss_svc_tokenlen);
3570 			kfree_data_addr(cp->gss_svc_token);
3571 		}
3572 	}
3573 
3574 nfsmout:
3575 	if (autherr != 0) {
3576 		nd->nd_gss_context = NULL;
3577 		LIST_REMOVE(cp, gss_svc_entries);
3578 		if (cp->gss_svc_seqbits != NULL) {
3579 			kfree_data(cp->gss_svc_seqbits, nfs_gss_seqbits_size(cp->gss_svc_seqwin));
3580 		}
3581 		if (cp->gss_svc_token != NULL) {
3582 			kfree_data_addr(cp->gss_svc_token);
3583 		}
3584 		lck_mtx_destroy(&cp->gss_svc_mtx, &nfs_gss_svc_grp);
3585 		kfree_type(struct nfs_gss_svc_ctx, cp);
3586 	}
3587 
3588 	nfsm_chain_build_done(error, &nmrep);
3589 	if (error) {
3590 		nfsm_chain_cleanup(&nmrep);
3591 		*mrepp = NULL;
3592 	}
3593 	return error;
3594 }
3595 
3596 /*
3597  * This is almost a mirror-image of the client side upcall.
3598  * It passes and receives a token, but invokes gss_accept_sec_context.
3599  * If it's the final call of the context setup, then gssd also returns
3600  * the session key and the user's UID.
3601  */
3602 static int
nfs_gss_svc_gssd_upcall(struct nfs_gss_svc_ctx * cp)3603 nfs_gss_svc_gssd_upcall(struct nfs_gss_svc_ctx *cp)
3604 {
3605 	kern_return_t kr;
3606 	mach_port_t mp;
3607 	int retry_cnt = 0;
3608 	gssd_byte_buffer octx = NULL;
3609 	uint32_t lucidlen = 0;
3610 	void *lucid_ctx_buffer;
3611 	uint32_t ret_flags;
3612 	vm_map_copy_t itoken = NULL;
3613 	gssd_byte_buffer otoken = NULL;
3614 	mach_msg_type_number_t otokenlen;
3615 	int error = 0;
3616 	char svcname[] = "nfs";
3617 
3618 	kr = host_get_gssd_port(host_priv_self(), &mp);
3619 	if (kr != KERN_SUCCESS) {
3620 		printf("nfs_gss_svc_gssd_upcall: can't get gssd port, status %x (%d)\n", kr, kr);
3621 		goto out;
3622 	}
3623 	if (!IPC_PORT_VALID(mp)) {
3624 		printf("nfs_gss_svc_gssd_upcall: gssd port not valid\n");
3625 		goto out;
3626 	}
3627 
3628 	if (cp->gss_svc_tokenlen > 0) {
3629 		nfs_gss_mach_alloc_buffer(cp->gss_svc_token, cp->gss_svc_tokenlen, &itoken);
3630 	}
3631 
3632 retry:
3633 	printf("Calling mach_gss_accept_sec_context\n");
3634 	kr = mach_gss_accept_sec_context(
3635 		mp,
3636 		(gssd_byte_buffer) itoken, (mach_msg_type_number_t) cp->gss_svc_tokenlen,
3637 		svcname,
3638 		0,
3639 		&cp->gss_svc_context,
3640 		&cp->gss_svc_cred_handle,
3641 		&ret_flags,
3642 		&cp->gss_svc_uid,
3643 		cp->gss_svc_gids,
3644 		&cp->gss_svc_ngroups,
3645 		&octx, (mach_msg_type_number_t *) &lucidlen,
3646 		&otoken, &otokenlen,
3647 		&cp->gss_svc_major,
3648 		&cp->gss_svc_minor);
3649 
3650 	printf("mach_gss_accept_sec_context returned %d\n", kr);
3651 	if (kr != KERN_SUCCESS) {
3652 		printf("nfs_gss_svc_gssd_upcall failed: %x (%d)\n", kr, kr);
3653 		if (kr == MIG_SERVER_DIED && cp->gss_svc_context == 0 &&
3654 		    retry_cnt++ < NFS_GSS_MACH_MAX_RETRIES) {
3655 			if (cp->gss_svc_tokenlen > 0) {
3656 				nfs_gss_mach_alloc_buffer(cp->gss_svc_token, cp->gss_svc_tokenlen, &itoken);
3657 			}
3658 			goto retry;
3659 		}
3660 		host_release_special_port(mp);
3661 		goto out;
3662 	}
3663 
3664 	host_release_special_port(mp);
3665 
3666 	if (lucidlen > 0) {
3667 		if (lucidlen > MAX_LUCIDLEN) {
3668 			printf("nfs_gss_svc_gssd_upcall: bad context length (%d)\n", lucidlen);
3669 			vm_map_copy_discard((vm_map_copy_t) octx);
3670 			vm_map_copy_discard((vm_map_copy_t) otoken);
3671 			goto out;
3672 		}
3673 		lucid_ctx_buffer = kalloc_data(lucidlen, Z_WAITOK | Z_ZERO);
3674 		error = nfs_gss_mach_vmcopyout((vm_map_copy_t) octx, lucidlen, lucid_ctx_buffer);
3675 		if (error) {
3676 			vm_map_copy_discard((vm_map_copy_t) otoken);
3677 			kfree_data(lucid_ctx_buffer, lucidlen);
3678 			goto out;
3679 		}
3680 		if (cp->gss_svc_ctx_id) {
3681 			gss_krb5_destroy_context(cp->gss_svc_ctx_id);
3682 		}
3683 		cp->gss_svc_ctx_id = gss_krb5_make_context(lucid_ctx_buffer, lucidlen);
3684 		kfree_data(lucid_ctx_buffer, lucidlen);
3685 		if (cp->gss_svc_ctx_id == NULL) {
3686 			printf("Failed to make context from lucid_ctx_buffer\n");
3687 			goto out;
3688 		}
3689 	}
3690 
3691 	/* Free context token used as input */
3692 	if (cp->gss_svc_token) {
3693 		kfree_data(cp->gss_svc_token, cp->gss_svc_tokenlen);
3694 	}
3695 	cp->gss_svc_token = NULL;
3696 	cp->gss_svc_tokenlen = 0;
3697 
3698 	if (otokenlen > 0) {
3699 		/* Set context token to gss output token */
3700 		cp->gss_svc_token = kalloc_data(otokenlen, Z_WAITOK);
3701 		if (cp->gss_svc_token == NULL) {
3702 			printf("nfs_gss_svc_gssd_upcall: could not allocate %d bytes\n", otokenlen);
3703 			vm_map_copy_discard((vm_map_copy_t) otoken);
3704 			return ENOMEM;
3705 		}
3706 		error = nfs_gss_mach_vmcopyout((vm_map_copy_t) otoken, otokenlen, cp->gss_svc_token);
3707 		if (error) {
3708 			kfree_data(cp->gss_svc_token, otokenlen);
3709 			return NFSERR_EAUTH;
3710 		}
3711 		cp->gss_svc_tokenlen = otokenlen;
3712 	}
3713 
3714 	return 0;
3715 
3716 out:
3717 	kfree_data(cp->gss_svc_token, cp->gss_svc_tokenlen);
3718 	cp->gss_svc_tokenlen = 0;
3719 
3720 	return NFSERR_EAUTH;
3721 }
3722 
3723 /*
3724  * Validate the sequence number in the credential as described
3725  * in RFC 2203 Section 5.3.3.1
3726  *
3727  * Here the window of valid sequence numbers is represented by
3728  * a bitmap.  As each sequence number is received, its bit is
3729  * set in the bitmap.  An invalid sequence number lies below
3730  * the lower bound of the window, or is within the window but
3731  * has its bit already set.
3732  */
3733 static int
nfs_gss_svc_seqnum_valid(struct nfs_gss_svc_ctx * cp,uint32_t seq)3734 nfs_gss_svc_seqnum_valid(struct nfs_gss_svc_ctx *cp, uint32_t seq)
3735 {
3736 	uint32_t *bits = cp->gss_svc_seqbits;
3737 	uint32_t win = cp->gss_svc_seqwin;
3738 	uint32_t i;
3739 
3740 	lck_mtx_lock(&cp->gss_svc_mtx);
3741 
3742 	/*
3743 	 * If greater than the window upper bound,
3744 	 * move the window up, and set the bit.
3745 	 */
3746 	if (seq > cp->gss_svc_seqmax) {
3747 		if (seq - cp->gss_svc_seqmax > win) {
3748 			bzero(bits, nfs_gss_seqbits_size(win));
3749 		} else {
3750 			for (i = cp->gss_svc_seqmax + 1; i < seq; i++) {
3751 				win_resetbit(bits, i % win);
3752 			}
3753 		}
3754 		win_setbit(bits, seq % win);
3755 		cp->gss_svc_seqmax = seq;
3756 		lck_mtx_unlock(&cp->gss_svc_mtx);
3757 		return 1;
3758 	}
3759 
3760 	/*
3761 	 * Invalid if below the lower bound of the window
3762 	 */
3763 	if (seq <= cp->gss_svc_seqmax - win) {
3764 		lck_mtx_unlock(&cp->gss_svc_mtx);
3765 		return 0;
3766 	}
3767 
3768 	/*
3769 	 * In the window, invalid if the bit is already set
3770 	 */
3771 	if (win_getbit(bits, seq % win)) {
3772 		lck_mtx_unlock(&cp->gss_svc_mtx);
3773 		return 0;
3774 	}
3775 	win_setbit(bits, seq % win);
3776 	lck_mtx_unlock(&cp->gss_svc_mtx);
3777 	return 1;
3778 }
3779 
3780 /*
3781  * Drop a reference to a context
3782  *
3783  * Note that it's OK for the context to exist
3784  * with a refcount of zero.  The refcount isn't
3785  * checked until we're about to reap an expired one.
3786  */
3787 void
nfs_gss_svc_ctx_deref(struct nfs_gss_svc_ctx * cp)3788 nfs_gss_svc_ctx_deref(struct nfs_gss_svc_ctx *cp)
3789 {
3790 	lck_mtx_lock(&cp->gss_svc_mtx);
3791 	if (cp->gss_svc_refcnt > 0) {
3792 		cp->gss_svc_refcnt--;
3793 	} else {
3794 		printf("nfs_gss_ctx_deref: zero refcount\n");
3795 	}
3796 	lck_mtx_unlock(&cp->gss_svc_mtx);
3797 }
3798 
3799 /*
3800  * Called at NFS server shutdown - destroy all contexts
3801  */
3802 void
nfs_gss_svc_cleanup(void)3803 nfs_gss_svc_cleanup(void)
3804 {
3805 	struct nfs_gss_svc_ctx_hashhead *head;
3806 	struct nfs_gss_svc_ctx *cp, *ncp;
3807 	int i;
3808 
3809 	lck_mtx_lock(&nfs_gss_svc_ctx_mutex);
3810 
3811 	/*
3812 	 * Run through all the buckets
3813 	 */
3814 	for (i = 0; i < SVC_CTX_HASHSZ; i++) {
3815 		/*
3816 		 * Remove and free all entries in the bucket
3817 		 */
3818 		head = &nfs_gss_svc_ctx_hashtbl[i];
3819 		LIST_FOREACH_SAFE(cp, head, gss_svc_entries, ncp) {
3820 			LIST_REMOVE(cp, gss_svc_entries);
3821 			if (cp->gss_svc_seqbits) {
3822 				kfree_data(cp->gss_svc_seqbits, nfs_gss_seqbits_size(cp->gss_svc_seqwin));
3823 			}
3824 			lck_mtx_destroy(&cp->gss_svc_mtx, &nfs_gss_svc_grp);
3825 			kfree_type(struct nfs_gss_svc_ctx, cp);
3826 		}
3827 	}
3828 
3829 	lck_mtx_unlock(&nfs_gss_svc_ctx_mutex);
3830 }
3831 
3832 #endif /* CONFIG_NFS_SERVER */
3833 
3834 
3835 /*************
3836  * The following functions are used by both client and server.
3837  */
3838 
3839 /*
3840  * Release a host special port that was obtained by host_get_special_port
3841  * or one of its macros (host_get_gssd_port in this case).
3842  * This really should be in a public kpi.
3843  */
3844 
3845 /* This should be in a public header if this routine is not */
3846 extern void ipc_port_release_send(ipc_port_t);
3847 extern ipc_port_t ipc_port_copy_send(ipc_port_t);
3848 
3849 static void
host_release_special_port(mach_port_t mp)3850 host_release_special_port(mach_port_t mp)
3851 {
3852 	if (IPC_PORT_VALID(mp)) {
3853 		ipc_port_release_send(mp);
3854 	}
3855 }
3856 
3857 static mach_port_t
host_copy_special_port(mach_port_t mp)3858 host_copy_special_port(mach_port_t mp)
3859 {
3860 	return ipc_port_copy_send(mp);
3861 }
3862 
3863 /*
3864  * The token that is sent and received in the gssd upcall
3865  * has unbounded variable length.  Mach RPC does not pass
3866  * the token in-line.  Instead it uses page mapping to handle
3867  * these parameters.  This function allocates a VM buffer
3868  * to hold the token for an upcall and copies the token
3869  * (received from the client) into it.  The VM buffer is
3870  * marked with a src_destroy flag so that the upcall will
3871  * automatically de-allocate the buffer when the upcall is
3872  * complete.
3873  */
3874 static void
nfs_gss_mach_alloc_buffer(u_char * buf,size_t buflen,vm_map_copy_t * addr)3875 nfs_gss_mach_alloc_buffer(u_char *buf, size_t buflen, vm_map_copy_t *addr)
3876 {
3877 	kern_return_t kr;
3878 	vm_offset_t kmem_buf;
3879 	vm_size_t tbuflen;
3880 
3881 	*addr = NULL;
3882 	if (buf == NULL || buflen == 0) {
3883 		return;
3884 	}
3885 
3886 	tbuflen = vm_map_round_page(buflen, vm_map_page_mask(ipc_kernel_map));
3887 
3888 	if (tbuflen < buflen) {
3889 		printf("nfs_gss_mach_alloc_buffer: vm_map_round_page failed\n");
3890 		return;
3891 	}
3892 
3893 	kr = kmem_alloc(ipc_kernel_map, &kmem_buf, tbuflen,
3894 	    KMA_DATA, VM_KERN_MEMORY_FILE);
3895 	if (kr != 0) {
3896 		printf("nfs_gss_mach_alloc_buffer: vm_allocate failed\n");
3897 		return;
3898 	}
3899 
3900 	bcopy(buf, (char *)kmem_buf, buflen);
3901 	bzero((char *)kmem_buf + buflen, tbuflen - buflen);
3902 
3903 	kr = vm_map_unwire(ipc_kernel_map, kmem_buf, kmem_buf + tbuflen, FALSE);
3904 	if (kr != 0) {
3905 		printf("nfs_gss_mach_alloc_buffer: vm_map_unwire failed\n");
3906 		return;
3907 	}
3908 
3909 	kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t) kmem_buf,
3910 	    (vm_map_size_t) buflen, TRUE, addr);
3911 	if (kr != 0) {
3912 		printf("nfs_gss_mach_alloc_buffer: vm_map_copyin failed\n");
3913 		return;
3914 	}
3915 }
3916 
3917 /*
3918  * Here we handle a token received from the gssd via an upcall.
3919  * The received token resides in an allocate VM buffer.
3920  * We copy the token out of this buffer to a chunk of malloc'ed
3921  * memory of the right size, then de-allocate the VM buffer.
3922  */
3923 static int
nfs_gss_mach_vmcopyout(vm_map_copy_t in,uint32_t len,u_char * out)3924 nfs_gss_mach_vmcopyout(vm_map_copy_t in, uint32_t len, u_char *out)
3925 {
3926 	vm_map_offset_t map_data;
3927 	vm_offset_t data;
3928 	int error;
3929 
3930 	error = vm_map_copyout(ipc_kernel_map, &map_data, in);
3931 	if (error) {
3932 		return error;
3933 	}
3934 
3935 	data = CAST_DOWN(vm_offset_t, map_data);
3936 	bcopy((void *) data, out, len);
3937 	vm_deallocate(ipc_kernel_map, data, len);
3938 
3939 	return 0;
3940 }
3941 
3942 /*
3943  * Return the number of bytes in an mbuf chain.
3944  */
3945 static int
nfs_gss_mchain_length(mbuf_t mhead)3946 nfs_gss_mchain_length(mbuf_t mhead)
3947 {
3948 	mbuf_t mb;
3949 	int len = 0;
3950 
3951 	for (mb = mhead; mb; mb = mbuf_next(mb)) {
3952 		len += mbuf_len(mb);
3953 	}
3954 
3955 	return len;
3956 }
3957 
3958 /*
3959  * Return the size for the sequence numbers bitmap.
3960  */
3961 static int
nfs_gss_seqbits_size(uint32_t win)3962 nfs_gss_seqbits_size(uint32_t win)
3963 {
3964 	return nfsm_rndup((win + 7) / 8);
3965 }
3966 
3967 /*
3968  * Append an args or results mbuf chain to the header chain
3969  */
3970 static int
nfs_gss_append_chain(struct nfsm_chain * nmc,mbuf_t mc)3971 nfs_gss_append_chain(struct nfsm_chain *nmc, mbuf_t mc)
3972 {
3973 	int error = 0;
3974 	mbuf_t mb, tail;
3975 
3976 	/* Connect the mbuf chains */
3977 	error = mbuf_setnext(nmc->nmc_mcur, mc);
3978 	if (error) {
3979 		return error;
3980 	}
3981 
3982 	/* Find the last mbuf in the chain */
3983 	tail = NULL;
3984 	for (mb = mc; mb; mb = mbuf_next(mb)) {
3985 		tail = mb;
3986 	}
3987 
3988 	nmc->nmc_mcur = tail;
3989 	nmc->nmc_ptr = (caddr_t) mbuf_data(tail) + mbuf_len(tail);
3990 	nmc->nmc_left = mbuf_trailingspace(tail);
3991 
3992 	return 0;
3993 }
3994 
3995 #if CONFIG_NFS_SERVER /* Only used by CONFIG_NFS_SERVER */
3996 /*
3997  * Convert an mbuf chain to an NFS mbuf chain
3998  */
3999 static void
nfs_gss_nfsm_chain(struct nfsm_chain * nmc,mbuf_t mc)4000 nfs_gss_nfsm_chain(struct nfsm_chain *nmc, mbuf_t mc)
4001 {
4002 	mbuf_t mb, tail;
4003 
4004 	/* Find the last mbuf in the chain */
4005 	tail = NULL;
4006 	for (mb = mc; mb; mb = mbuf_next(mb)) {
4007 		tail = mb;
4008 	}
4009 
4010 	nmc->nmc_mhead = mc;
4011 	nmc->nmc_mcur = tail;
4012 	nmc->nmc_ptr = (caddr_t) mbuf_data(tail) + mbuf_len(tail);
4013 	nmc->nmc_left = mbuf_trailingspace(tail);
4014 	nmc->nmc_flags = 0;
4015 }
4016 #endif /* CONFIG_NFS_SERVER */
4017 
4018 
4019 #if 0
4020 #define DISPLAYLEN 16
4021 #define MAXDISPLAYLEN 256
4022 
4023 static void
4024 hexdump(const char *msg, void *data, size_t len)
4025 {
4026 	size_t i, j;
4027 	u_char *d = data;
4028 	char *p, disbuf[3 * DISPLAYLEN + 1];
4029 
4030 	printf("NFS DEBUG %s len=%d:\n", msg, (uint32_t)len);
4031 	if (len > MAXDISPLAYLEN) {
4032 		len = MAXDISPLAYLEN;
4033 	}
4034 
4035 	for (i = 0; i < len; i += DISPLAYLEN) {
4036 		for (p = disbuf, j = 0; (j + i) < len && j < DISPLAYLEN; j++, p += 3) {
4037 			snprintf(p, 4, "%02x ", d[i + j]);
4038 		}
4039 		printf("\t%s\n", disbuf);
4040 	}
4041 }
4042 #endif
4043 
4044 #endif /* CONFIG_NFS */
4045