1 /*
2 * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <nfs/nfs_conf.h>
30 #if CONFIG_NFS_SERVER
31
32 /*************
33 * These functions implement RPCSEC_GSS security for the NFS client and server.
34 * The code is specific to the use of Kerberos v5 and the use of DES MAC MD5
35 * protection as described in Internet RFC 2203 and 2623.
36 *
37 * In contrast to the original AUTH_SYS authentication, RPCSEC_GSS is stateful.
38 * It requires the client and server negotiate a secure connection as part of a
39 * security context. The context state is maintained in client and server structures.
40 * On the client side, each user of an NFS mount is assigned their own context,
41 * identified by UID, on their first use of the mount, and it persists until the
42 * unmount or until the context is renewed. Each user context has a corresponding
43 * server context which the server maintains until the client destroys it, or
44 * until the context expires.
45 *
46 * The client and server contexts are set up dynamically. When a user attempts
47 * to send an NFS request, if there is no context for the user, then one is
48 * set up via an exchange of NFS null procedure calls as described in RFC 2203.
49 * During this exchange, the client and server pass a security token that is
50 * forwarded via Mach upcall to the gssd, which invokes the GSS-API to authenticate
51 * the user to the server (and vice-versa). The client and server also receive
52 * a unique session key that can be used to digitally sign the credentials and
53 * verifier or optionally to provide data integrity and/or privacy.
54 *
55 * Once the context is complete, the client and server enter a normal data
56 * exchange phase - beginning with the NFS request that prompted the context
57 * creation. During this phase, the client's RPC header contains an RPCSEC_GSS
58 * credential and verifier, and the server returns a verifier as well.
59 * For simple authentication, the verifier contains a signed checksum of the
60 * RPC header, including the credential. The server's verifier has a signed
61 * checksum of the current sequence number.
62 *
63 * Each client call contains a sequence number that nominally increases by one
64 * on each request. The sequence number is intended to prevent replay attacks.
65 * Since the protocol can be used over UDP, there is some allowance for
66 * out-of-sequence requests, so the server checks whether the sequence numbers
67 * are within a sequence "window". If a sequence number is outside the lower
68 * bound of the window, the server silently drops the request. This has some
69 * implications for retransmission. If a request needs to be retransmitted, the
70 * client must bump the sequence number even if the request XID is unchanged.
71 *
72 * When the NFS mount is unmounted, the client sends a "destroy" credential
73 * to delete the server's context for each user of the mount. Since it's
74 * possible for the client to crash or disconnect without sending the destroy
75 * message, the server has a thread that reaps contexts that have been idle
76 * too long.
77 */
78
79 #include <sys/systm.h>
80 #include <sys/kauth.h>
81 #include <sys/param.h>
82 #include <sys/mbuf.h>
83 #include <sys/mount_internal.h>
84 #include <sys/kpi_mbuf.h>
85
86 #include <kern/host.h>
87
88 #include <mach/host_priv.h>
89 #include <mach/vm_map.h>
90 #include <vm/vm_map.h>
91 #include <vm/vm_kern_xnu.h>
92 #include <gssd/gssd_mach.h>
93
94 #include <nfs/rpcv2.h>
95 #include <nfs/nfsproto.h>
96 #include <nfs/nfs.h>
97 #include <nfs/nfs_gss.h>
98 #include <nfs/xdr_subs.h>
99 #include <nfs/nfsm_subs.h>
100 #include <nfs/nfs_gss.h>
101
102 #define NFS_GSS_MACH_MAX_RETRIES 3
103
104 #define NFSRV_GSS_DBG(...) NFSRV_DBG(NFSRV_FAC_GSS, 7, ## __VA_ARGS__)
105
106 u_long nfs_gss_svc_ctx_hash;
107 struct nfs_gss_svc_ctx_hashhead *nfs_gss_svc_ctx_hashtbl;
108 static LCK_GRP_DECLARE(nfs_gss_svc_grp, "rpcsec_gss_svc");
109 static LCK_MTX_DECLARE(nfs_gss_svc_ctx_mutex, &nfs_gss_svc_grp);
110 uint32_t nfsrv_gss_context_ttl = GSS_CTX_EXPIRE;
111 #define GSS_SVC_CTX_TTL ((uint64_t)max(2*GSS_CTX_PEND, nfsrv_gss_context_ttl) * NSEC_PER_SEC)
112
113 #define KRB5_MAX_MIC_SIZE 128
114 static uint8_t xdrpad[] = { 0x00, 0x00, 0x00, 0x00};
115
116 static struct nfs_gss_svc_ctx *nfs_gss_svc_ctx_find(uint32_t);
117 static void nfs_gss_svc_ctx_insert(struct nfs_gss_svc_ctx *);
118 static void nfs_gss_svc_ctx_timer(void *, void *);
119 static int nfs_gss_svc_gssd_upcall(struct nfs_gss_svc_ctx *);
120 static int nfs_gss_svc_seqnum_valid(struct nfs_gss_svc_ctx *, uint32_t);
121
122 /* This is only used by server code */
123 static void nfs_gss_nfsm_chain(struct nfsm_chain *, mbuf_t);
124
125 static void host_release_special_port(mach_port_t);
126 static void nfs_gss_mach_alloc_buffer(u_char *, size_t, vm_map_copy_t *);
127 static int nfs_gss_mach_vmcopyout(vm_map_copy_t, uint32_t, u_char *);
128
129 static int nfs_gss_mchain_length(mbuf_t);
130 static int nfs_gss_append_chain(struct nfsm_chain *, mbuf_t);
131 static int nfs_gss_seqbits_size(uint32_t);
132
133 thread_call_t nfs_gss_svc_ctx_timer_call;
134 int nfs_gss_timer_on = 0;
135 uint32_t nfs_gss_ctx_count = 0;
136 const uint32_t nfs_gss_ctx_max = GSS_SVC_MAXCONTEXTS;
137
138 /*
139 * Common RPCSEC_GSS support routines
140 */
141
142 static errno_t
rpc_gss_prepend_32(mbuf_t * mb,uint32_t value)143 rpc_gss_prepend_32(mbuf_t *mb, uint32_t value)
144 {
145 int error;
146 uint32_t *data;
147
148 #if 0
149 data = mtod(*mb, uint32_t *);
150 /*
151 * If a wap token comes back and is not aligned
152 * get a new buffer (which should be aligned) to put the
153 * length in.
154 */
155 if ((uintptr_t)data & 0x3) {
156 mbuf_t nmb;
157
158 error = mbuf_get(MBUF_WAITOK, MBUF_TYPE_DATA, &nmb);
159 if (error) {
160 return error;
161 }
162 mbuf_setnext(nmb, *mb);
163 *mb = nmb;
164 }
165 #endif
166 error = mbuf_prepend(mb, sizeof(uint32_t), MBUF_WAITOK);
167 if (error) {
168 return error;
169 }
170
171 data = mtod(*mb, uint32_t *);
172 *data = txdr_unsigned(value);
173
174 return 0;
175 }
176
177 /*
178 * Prepend the sequence number to the xdr encode argumen or result
179 * Sequence number is prepended in its own mbuf.
180 *
181 * On successful return mbp_head will point to the old mbuf chain
182 * prepended with a new mbuf that has the sequence number.
183 */
184
185 static errno_t
rpc_gss_data_create(mbuf_t * mbp_head,uint32_t seqnum)186 rpc_gss_data_create(mbuf_t *mbp_head, uint32_t seqnum)
187 {
188 int error;
189 mbuf_t mb;
190 struct nfsm_chain nmc;
191 struct nfsm_chain *nmcp = &nmc;
192 uint8_t *data;
193
194 error = mbuf_get(MBUF_WAITOK, MBUF_TYPE_DATA, &mb);
195 if (error) {
196 return error;
197 }
198 data = mtod(mb, uint8_t *);
199 #if 0
200 /* Reserve space for prepending */
201 len = mbuf_maxlen(mb);
202 len = (len & ~0x3) - NFSX_UNSIGNED;
203 printf("%s: data = %p, len = %d\n", __func__, data, (int)len);
204 error = mbuf_setdata(mb, data + len, 0);
205 if (error || mbuf_trailingspace(mb)) {
206 printf("%s: data = %p trailingspace = %d error = %d\n", __func__, mtod(mb, caddr_t), (int)mbuf_trailingspace(mb), error);
207 }
208 #endif
209 /* Reserve 16 words for prepending */
210 error = mbuf_setdata(mb, data + 16 * sizeof(uint32_t), 0);
211 nfsm_chain_init(nmcp, mb);
212 nfsm_chain_add_32(error, nmcp, seqnum);
213 nfsm_chain_build_done(error, nmcp);
214 if (error) {
215 return EINVAL;
216 }
217 mbuf_setnext(nmcp->nmc_mcur, *mbp_head);
218 *mbp_head = nmcp->nmc_mhead;
219
220 return 0;
221 }
222
223 /*
224 * Create an rpc_gss_integ_data_t given an argument or result in mb_head.
225 * On successful return mb_head will point to the rpc_gss_integ_data_t of length len.
226 * Note mb_head will now point to a 4 byte sequence number. len does not include
227 * any extra xdr padding.
228 * Returns 0 on success, else an errno_t
229 */
230
231 static errno_t
rpc_gss_integ_data_create(gss_ctx_id_t ctx,mbuf_t * mb_head,uint32_t seqnum,uint32_t * len)232 rpc_gss_integ_data_create(gss_ctx_id_t ctx, mbuf_t *mb_head, uint32_t seqnum, uint32_t *len)
233 {
234 uint32_t error;
235 uint32_t major;
236 uint32_t length;
237 gss_buffer_desc mic;
238 struct nfsm_chain nmc = {};
239
240 /* Length of the argument or result */
241 length = nfs_gss_mchain_length(*mb_head);
242 if (len) {
243 *len = length;
244 }
245 error = rpc_gss_data_create(mb_head, seqnum);
246 if (error) {
247 return error;
248 }
249
250 /*
251 * length is the length of the rpc_gss_data
252 */
253 length += NFSX_UNSIGNED; /* Add the sequence number to the length */
254 major = gss_krb5_get_mic_mbuf(&error, ctx, 0, *mb_head, 0, length, &mic);
255 if (major != GSS_S_COMPLETE) {
256 printf("gss_krb5_get_mic_mbuf failed %d\n", error);
257 return error;
258 }
259
260 error = rpc_gss_prepend_32(mb_head, length);
261 if (error) {
262 return error;
263 }
264
265 nfsm_chain_dissect_init(error, &nmc, *mb_head);
266 /* Append GSS mic token by advancing rpc_gss_data_t length + NFSX_UNSIGNED (size of the length field) */
267 nfsm_chain_adv(error, &nmc, length + NFSX_UNSIGNED);
268 nfsm_chain_finish_mbuf(error, &nmc); // Force the mic into its own sub chain.
269 nfsm_chain_add_32(error, &nmc, mic.length);
270 nfsm_chain_add_opaque(error, &nmc, mic.value, mic.length);
271 nfsm_chain_build_done(error, &nmc);
272 gss_release_buffer(NULL, &mic);
273
274 // printmbuf("rpc_gss_integ_data_create done", *mb_head, 0, 0);
275 assert(nmc.nmc_mhead == *mb_head);
276
277 return error;
278 }
279
280 /*
281 * Create an rpc_gss_priv_data_t out of the supplied raw arguments or results in mb_head.
282 * On successful return mb_head will point to a wrap token of lenght len.
283 * Note len does not include any xdr padding
284 * Returns 0 on success, else an errno_t
285 */
286 static errno_t
rpc_gss_priv_data_create(gss_ctx_id_t ctx,mbuf_t * mb_head,uint32_t seqnum,uint32_t * len)287 rpc_gss_priv_data_create(gss_ctx_id_t ctx, mbuf_t *mb_head, uint32_t seqnum, uint32_t *len)
288 {
289 uint32_t error;
290 uint32_t major;
291 struct nfsm_chain nmc;
292 uint32_t pad;
293 uint32_t length;
294
295 error = rpc_gss_data_create(mb_head, seqnum);
296 if (error) {
297 return error;
298 }
299
300 length = nfs_gss_mchain_length(*mb_head);
301 major = gss_krb5_wrap_mbuf(&error, ctx, 1, 0, mb_head, 0, length, NULL);
302 if (major != GSS_S_COMPLETE) {
303 return error;
304 }
305
306 length = nfs_gss_mchain_length(*mb_head);
307 if (len) {
308 *len = length;
309 }
310 pad = nfsm_pad(length);
311
312 /* Prepend the opaque length of rep rpc_gss_priv_data */
313 error = rpc_gss_prepend_32(mb_head, length);
314
315 if (error) {
316 return error;
317 }
318 if (pad) {
319 nfsm_chain_dissect_init(error, &nmc, *mb_head);
320 /* Advance the opauque size of length and length data */
321 nfsm_chain_adv(error, &nmc, NFSX_UNSIGNED + length);
322 nfsm_chain_finish_mbuf(error, &nmc);
323 nfsm_chain_add_opaque_nopad(error, &nmc, xdrpad, pad);
324 nfsm_chain_build_done(error, &nmc);
325 }
326
327 return error;
328 }
329
330 /*************
331 *
332 * Server functions
333 */
334
335 /*
336 * Initialization when NFS starts
337 */
338 void
nfs_gss_svc_init(void)339 nfs_gss_svc_init(void)
340 {
341 nfs_gss_svc_ctx_hashtbl = hashinit(SVC_CTX_HASHSZ, M_TEMP, &nfs_gss_svc_ctx_hash);
342
343 nfs_gss_svc_ctx_timer_call = thread_call_allocate(nfs_gss_svc_ctx_timer, NULL);
344 }
345
346 /*
347 * Find a server context based on a handle value received
348 * in an RPCSEC_GSS credential.
349 */
350 static struct nfs_gss_svc_ctx *
nfs_gss_svc_ctx_find(uint32_t handle)351 nfs_gss_svc_ctx_find(uint32_t handle)
352 {
353 struct nfs_gss_svc_ctx_hashhead *head;
354 struct nfs_gss_svc_ctx *cp;
355 uint64_t timenow;
356
357 if (handle == 0) {
358 return NULL;
359 }
360
361 head = &nfs_gss_svc_ctx_hashtbl[SVC_CTX_HASH(handle)];
362 /*
363 * Don't return a context that is going to expire in GSS_CTX_PEND seconds
364 */
365 clock_interval_to_deadline(GSS_CTX_PEND, NSEC_PER_SEC, &timenow);
366
367 lck_mtx_lock(&nfs_gss_svc_ctx_mutex);
368
369 LIST_FOREACH(cp, head, gss_svc_entries) {
370 if (cp->gss_svc_handle == handle) {
371 if (timenow > cp->gss_svc_incarnation + GSS_SVC_CTX_TTL) {
372 /*
373 * Context has or is about to expire. Don't use.
374 * We'll return null and the client will have to create
375 * a new context.
376 */
377 cp->gss_svc_handle = 0;
378 /*
379 * Make sure though that we stay around for GSS_CTX_PEND seconds
380 * for other threads that might be using the context.
381 */
382 cp->gss_svc_incarnation = timenow;
383
384 cp = NULL;
385 break;
386 }
387 lck_mtx_lock(&cp->gss_svc_mtx);
388 cp->gss_svc_refcnt++;
389 lck_mtx_unlock(&cp->gss_svc_mtx);
390 break;
391 }
392 }
393
394 lck_mtx_unlock(&nfs_gss_svc_ctx_mutex);
395
396 return cp;
397 }
398
399 /*
400 * Insert a new server context into the hash table
401 * and start the context reap thread if necessary.
402 */
403 static void
nfs_gss_svc_ctx_insert(struct nfs_gss_svc_ctx * cp)404 nfs_gss_svc_ctx_insert(struct nfs_gss_svc_ctx *cp)
405 {
406 struct nfs_gss_svc_ctx_hashhead *head;
407 struct nfs_gss_svc_ctx *p;
408
409 lck_mtx_lock(&nfs_gss_svc_ctx_mutex);
410
411 /*
412 * Give the client a random handle so that if we reboot
413 * it's unlikely the client will get a bad context match.
414 * Make sure it's not zero or already assigned.
415 */
416 retry:
417 cp->gss_svc_handle = random();
418 if (cp->gss_svc_handle == 0) {
419 goto retry;
420 }
421 head = &nfs_gss_svc_ctx_hashtbl[SVC_CTX_HASH(cp->gss_svc_handle)];
422 LIST_FOREACH(p, head, gss_svc_entries)
423 if (p->gss_svc_handle == cp->gss_svc_handle) {
424 goto retry;
425 }
426
427 clock_interval_to_deadline(GSS_CTX_PEND, NSEC_PER_SEC,
428 &cp->gss_svc_incarnation);
429 LIST_INSERT_HEAD(head, cp, gss_svc_entries);
430 nfs_gss_ctx_count++;
431
432 if (!nfs_gss_timer_on) {
433 nfs_gss_timer_on = 1;
434
435 nfs_interval_timer_start(nfs_gss_svc_ctx_timer_call,
436 min(GSS_TIMER_PERIOD, max(GSS_CTX_TTL_MIN, nfsrv_gss_context_ttl)) * MSECS_PER_SEC);
437 }
438
439 lck_mtx_unlock(&nfs_gss_svc_ctx_mutex);
440 }
441
442 /*
443 * This function is called via the kernel's callout
444 * mechanism. It runs only when there are
445 * cached RPCSEC_GSS contexts.
446 */
447 void
nfs_gss_svc_ctx_timer(__unused void * param1,__unused void * param2)448 nfs_gss_svc_ctx_timer(__unused void *param1, __unused void *param2)
449 {
450 struct nfs_gss_svc_ctx *cp, *next;
451 uint64_t timenow;
452 int contexts = 0;
453 int i;
454
455 lck_mtx_lock(&nfs_gss_svc_ctx_mutex);
456 clock_get_uptime(&timenow);
457
458 NFSRV_GSS_DBG("is running\n");
459
460 /*
461 * Scan all the hash chains
462 */
463 for (i = 0; i < SVC_CTX_HASHSZ; i++) {
464 /*
465 * For each hash chain, look for entries
466 * that haven't been used in a while.
467 */
468 LIST_FOREACH_SAFE(cp, &nfs_gss_svc_ctx_hashtbl[i], gss_svc_entries, next) {
469 contexts++;
470 if (timenow > cp->gss_svc_incarnation +
471 (cp->gss_svc_handle ? GSS_SVC_CTX_TTL : 0)
472 && cp->gss_svc_refcnt == 0) {
473 /*
474 * A stale context - remove it
475 */
476 LIST_REMOVE(cp, gss_svc_entries);
477 NFSRV_GSS_DBG("Removing contex for %d\n", cp->gss_svc_uid);
478 if (cp->gss_svc_seqbits) {
479 kfree_data(cp->gss_svc_seqbits, nfs_gss_seqbits_size(cp->gss_svc_seqwin));
480 }
481 lck_mtx_destroy(&cp->gss_svc_mtx, &nfs_gss_svc_grp);
482 kfree_type(struct nfs_gss_svc_ctx, cp);
483 contexts--;
484 }
485 }
486 }
487
488 nfs_gss_ctx_count = contexts;
489
490 /*
491 * If there are still some cached contexts left,
492 * set up another callout to check on them later.
493 */
494 nfs_gss_timer_on = nfs_gss_ctx_count > 0;
495 if (nfs_gss_timer_on) {
496 nfs_interval_timer_start(nfs_gss_svc_ctx_timer_call,
497 min(GSS_TIMER_PERIOD, max(GSS_CTX_TTL_MIN, nfsrv_gss_context_ttl)) * MSECS_PER_SEC);
498 }
499
500 lck_mtx_unlock(&nfs_gss_svc_ctx_mutex);
501 }
502
503 /*
504 * Here the server receives an RPCSEC_GSS credential in an
505 * RPC call header. First there's some checking to make sure
506 * the credential is appropriate - whether the context is still
507 * being set up, or is complete. Then we use the handle to find
508 * the server's context and validate the verifier, which contains
509 * a signed checksum of the RPC header. If the verifier checks
510 * out, we extract the user's UID and groups from the context
511 * and use it to set up a UNIX credential for the user's request.
512 */
513 int
nfs_gss_svc_cred_get(struct nfsrv_descript * nd,struct nfsm_chain * nmc)514 nfs_gss_svc_cred_get(struct nfsrv_descript *nd, struct nfsm_chain *nmc)
515 {
516 uint32_t vers, proc, seqnum, service;
517 uint32_t handle, handle_len;
518 uint32_t major;
519 struct nfs_gss_svc_ctx *cp = NULL;
520 uint32_t flavor = 0;
521 int error = 0;
522 uint32_t arglen;
523 size_t argsize, start, header_len;
524 gss_buffer_desc cksum;
525 struct nfsm_chain nmc_tmp;
526 mbuf_t reply_mbuf, prev_mbuf, pad_mbuf;
527
528 vers = proc = seqnum = service = handle_len = 0;
529 arglen = 0;
530
531 nfsm_chain_get_32(error, nmc, vers);
532 if (vers != RPCSEC_GSS_VERS_1) {
533 error = NFSERR_AUTHERR | AUTH_REJECTCRED;
534 goto nfsmout;
535 }
536
537 nfsm_chain_get_32(error, nmc, proc);
538 nfsm_chain_get_32(error, nmc, seqnum);
539 nfsm_chain_get_32(error, nmc, service);
540 nfsm_chain_get_32(error, nmc, handle_len);
541 if (error) {
542 goto nfsmout;
543 }
544
545 /*
546 * Make sure context setup/destroy is being done with a nullproc
547 */
548 if (proc != RPCSEC_GSS_DATA && nd->nd_procnum != NFSPROC_NULL) {
549 error = NFSERR_AUTHERR | RPCSEC_GSS_CREDPROBLEM;
550 goto nfsmout;
551 }
552
553 /*
554 * If the sequence number is greater than the max
555 * allowable, reject and have the client init a
556 * new context.
557 */
558 if (seqnum > GSS_MAXSEQ) {
559 error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM;
560 goto nfsmout;
561 }
562
563 nd->nd_sec =
564 service == RPCSEC_GSS_SVC_NONE ? RPCAUTH_KRB5 :
565 service == RPCSEC_GSS_SVC_INTEGRITY ? RPCAUTH_KRB5I :
566 service == RPCSEC_GSS_SVC_PRIVACY ? RPCAUTH_KRB5P : 0;
567
568 if (proc == RPCSEC_GSS_INIT) {
569 /*
570 * Limit the total number of contexts
571 */
572 if (nfs_gss_ctx_count > nfs_gss_ctx_max) {
573 error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM;
574 goto nfsmout;
575 }
576
577 /*
578 * Set up a new context
579 */
580 cp = kalloc_type(struct nfs_gss_svc_ctx,
581 Z_WAITOK | Z_ZERO | Z_NOFAIL);
582 lck_mtx_init(&cp->gss_svc_mtx, &nfs_gss_svc_grp, LCK_ATTR_NULL);
583 cp->gss_svc_refcnt = 1;
584 } else {
585 /*
586 * Use the handle to find the context
587 */
588 if (handle_len != sizeof(handle)) {
589 error = NFSERR_AUTHERR | RPCSEC_GSS_CREDPROBLEM;
590 goto nfsmout;
591 }
592 nfsm_chain_get_32(error, nmc, handle);
593 if (error) {
594 goto nfsmout;
595 }
596 cp = nfs_gss_svc_ctx_find(handle);
597 if (cp == NULL) {
598 error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM;
599 goto nfsmout;
600 }
601 }
602
603 cp->gss_svc_proc = proc;
604
605 if (proc == RPCSEC_GSS_DATA || proc == RPCSEC_GSS_DESTROY) {
606 struct posix_cred temp_pcred;
607
608 if (cp->gss_svc_seqwin == 0) {
609 /*
610 * Context isn't complete
611 */
612 error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM;
613 goto nfsmout;
614 }
615
616 if (!nfs_gss_svc_seqnum_valid(cp, seqnum)) {
617 /*
618 * Sequence number is bad
619 */
620 error = EINVAL; // drop the request
621 goto nfsmout;
622 }
623
624 /*
625 * Validate the verifier.
626 * The verifier contains an encrypted checksum
627 * of the call header from the XID up to and
628 * including the credential. We compute the
629 * checksum and compare it with what came in
630 * the verifier.
631 */
632 header_len = nfsm_chain_offset(nmc);
633 nfsm_chain_get_32(error, nmc, flavor);
634 nfsm_chain_get_32(error, nmc, cksum.length);
635 if (error) {
636 goto nfsmout;
637 }
638 if (flavor != RPCSEC_GSS || cksum.length > KRB5_MAX_MIC_SIZE) {
639 error = NFSERR_AUTHERR | AUTH_BADVERF;
640 } else {
641 cksum.value = kalloc_data(cksum.length, Z_WAITOK | Z_NOFAIL);
642 nfsm_chain_get_opaque(error, nmc, cksum.length, cksum.value);
643 }
644 if (error) {
645 goto nfsmout;
646 }
647
648 /* Now verify the client's call header checksum */
649 major = gss_krb5_verify_mic_mbuf((uint32_t *)&error, cp->gss_svc_ctx_id, nmc->nmc_mhead, 0, header_len, &cksum, NULL);
650 (void)gss_release_buffer(NULL, &cksum);
651 if (major != GSS_S_COMPLETE) {
652 printf("Server header: gss_krb5_verify_mic_mbuf failed %d\n", error);
653 error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM;
654 goto nfsmout;
655 }
656
657 nd->nd_gss_seqnum = seqnum;
658
659 /*
660 * Set up the user's cred
661 */
662 bzero(&temp_pcred, sizeof(temp_pcred));
663 temp_pcred.cr_uid = cp->gss_svc_uid;
664 bcopy(cp->gss_svc_gids, temp_pcred.cr_groups,
665 sizeof(gid_t) * cp->gss_svc_ngroups);
666 temp_pcred.cr_ngroups = (short)cp->gss_svc_ngroups;
667
668 nd->nd_cr = posix_cred_create(&temp_pcred);
669 if (nd->nd_cr == NULL) {
670 error = ENOMEM;
671 goto nfsmout;
672 }
673 clock_get_uptime(&cp->gss_svc_incarnation);
674
675 /*
676 * If the call arguments are integrity or privacy protected
677 * then we need to check them here.
678 */
679 switch (service) {
680 case RPCSEC_GSS_SVC_NONE:
681 /* nothing to do */
682 break;
683 case RPCSEC_GSS_SVC_INTEGRITY:
684 /*
685 * Here's what we expect in the integrity call args:
686 *
687 * - length of seq num + call args (4 bytes)
688 * - sequence number (4 bytes)
689 * - call args (variable bytes)
690 * - length of checksum token
691 * - checksum of seqnum + call args
692 */
693 nfsm_chain_get_32(error, nmc, arglen); // length of args
694 if (arglen > NFS_MAXPACKET) {
695 error = EBADRPC;
696 goto nfsmout;
697 }
698
699 nmc_tmp = *nmc;
700 nfsm_chain_adv(error, &nmc_tmp, arglen);
701 nfsm_chain_get_32(error, &nmc_tmp, cksum.length);
702 cksum.value = NULL;
703 if (cksum.length > 0 && cksum.length < GSS_MAX_MIC_LEN) {
704 cksum.value = kalloc_data(cksum.length, Z_WAITOK | Z_NOFAIL);
705 } else {
706 error = EBADRPC;
707 goto nfsmout;
708 }
709 nfsm_chain_get_opaque(error, &nmc_tmp, cksum.length, cksum.value);
710
711 /* Verify the checksum over the call args */
712 start = nfsm_chain_offset(nmc);
713
714 major = gss_krb5_verify_mic_mbuf((uint32_t *)&error, cp->gss_svc_ctx_id,
715 nmc->nmc_mhead, start, arglen, &cksum, NULL);
716 kfree_data(cksum.value, cksum.length);
717 if (major != GSS_S_COMPLETE) {
718 printf("Server args: gss_krb5_verify_mic_mbuf failed %d\n", error);
719 error = EBADRPC;
720 goto nfsmout;
721 }
722
723 /*
724 * Get the sequence number prepended to the args
725 * and compare it against the one sent in the
726 * call credential.
727 */
728 nfsm_chain_get_32(error, nmc, seqnum);
729 if (seqnum != nd->nd_gss_seqnum) {
730 error = EBADRPC; // returns as GARBAGEARGS
731 goto nfsmout;
732 }
733 break;
734 case RPCSEC_GSS_SVC_PRIVACY:
735 /*
736 * Here's what we expect in the privacy call args:
737 *
738 * - length of wrap token
739 * - wrap token (37-40 bytes)
740 */
741 prev_mbuf = nmc->nmc_mcur;
742 nfsm_chain_get_32(error, nmc, arglen); // length of args
743 if (arglen > NFS_MAXPACKET) {
744 error = EBADRPC;
745 goto nfsmout;
746 }
747
748 /* Get the wrap token (current mbuf in the chain starting at the current offset) */
749 start = nmc->nmc_ptr - mtod(nmc->nmc_mcur, caddr_t);
750
751 /* split out the wrap token */
752 argsize = arglen;
753 error = gss_normalize_mbuf(nmc->nmc_mcur, start, &argsize, &reply_mbuf, &pad_mbuf, 0);
754 if (error) {
755 goto nfsmout;
756 }
757
758 assert(argsize == arglen);
759 if (pad_mbuf) {
760 assert(nfsm_pad(arglen) == mbuf_len(pad_mbuf));
761 mbuf_free(pad_mbuf);
762 } else {
763 assert(nfsm_pad(arglen) == 0);
764 }
765
766 major = gss_krb5_unwrap_mbuf((uint32_t *)&error, cp->gss_svc_ctx_id, &reply_mbuf, 0, arglen, NULL, NULL);
767 if (major != GSS_S_COMPLETE) {
768 printf("%s: gss_krb5_unwrap_mbuf failes %d\n", __func__, error);
769 goto nfsmout;
770 }
771
772 /* Now replace the wrapped arguments with the unwrapped ones */
773 mbuf_setnext(prev_mbuf, reply_mbuf);
774 nmc->nmc_mcur = reply_mbuf;
775 nmc->nmc_ptr = mtod(reply_mbuf, caddr_t);
776 nmc->nmc_left = mbuf_len(reply_mbuf);
777
778 /*
779 * - sequence number (4 bytes)
780 * - call args
781 */
782
783 // nfsm_chain_reverse(nmc, nfsm_pad(toklen));
784
785 /*
786 * Get the sequence number prepended to the args
787 * and compare it against the one sent in the
788 * call credential.
789 */
790 nfsm_chain_get_32(error, nmc, seqnum);
791 if (seqnum != nd->nd_gss_seqnum) {
792 printf("%s: Sequence number mismatch seqnum = %d nd->nd_gss_seqnum = %d\n",
793 __func__, seqnum, nd->nd_gss_seqnum);
794 printmbuf("reply_mbuf", nmc->nmc_mhead, 0, 0);
795 printf("reply_mbuf %p nmc_head %p\n", reply_mbuf, nmc->nmc_mhead);
796 error = EBADRPC; // returns as GARBAGEARGS
797 goto nfsmout;
798 }
799 break;
800 }
801 } else {
802 uint32_t verflen;
803 /*
804 * If the proc is RPCSEC_GSS_INIT or RPCSEC_GSS_CONTINUE_INIT
805 * then we expect a null verifier.
806 */
807 nfsm_chain_get_32(error, nmc, flavor);
808 nfsm_chain_get_32(error, nmc, verflen);
809 if (error || flavor != RPCAUTH_NULL || verflen > 0) {
810 error = NFSERR_AUTHERR | RPCSEC_GSS_CREDPROBLEM;
811 }
812 if (error) {
813 if (proc == RPCSEC_GSS_INIT) {
814 lck_mtx_destroy(&cp->gss_svc_mtx, &nfs_gss_svc_grp);
815 kfree_type(struct nfs_gss_svc_ctx, cp);
816 cp = NULL;
817 }
818 goto nfsmout;
819 }
820 }
821
822 nd->nd_gss_context = cp;
823 return 0;
824 nfsmout:
825 if (cp) {
826 nfs_gss_svc_ctx_deref(cp);
827 }
828 return error;
829 }
830
831 /*
832 * Insert the server's verifier into the RPC reply header.
833 * It contains a signed checksum of the sequence number that
834 * was received in the RPC call.
835 * Then go on to add integrity or privacy if necessary.
836 */
837 int
nfs_gss_svc_verf_put(struct nfsrv_descript * nd,struct nfsm_chain * nmc)838 nfs_gss_svc_verf_put(struct nfsrv_descript *nd, struct nfsm_chain *nmc)
839 {
840 struct nfs_gss_svc_ctx *cp;
841 int error = 0;
842 gss_buffer_desc cksum, seqbuf;
843 uint32_t network_seqnum;
844 cp = nd->nd_gss_context;
845 uint32_t major;
846
847 if (cp->gss_svc_major != GSS_S_COMPLETE) {
848 /*
849 * If the context isn't yet complete
850 * then return a null verifier.
851 */
852 nfsm_chain_add_32(error, nmc, RPCAUTH_NULL);
853 nfsm_chain_add_32(error, nmc, 0);
854 return error;
855 }
856
857 /*
858 * Compute checksum of the request seq number
859 * If it's the final reply of context setup
860 * then return the checksum of the context
861 * window size.
862 */
863 seqbuf.length = NFSX_UNSIGNED;
864 if (cp->gss_svc_proc == RPCSEC_GSS_INIT ||
865 cp->gss_svc_proc == RPCSEC_GSS_CONTINUE_INIT) {
866 network_seqnum = htonl(cp->gss_svc_seqwin);
867 } else {
868 network_seqnum = htonl(nd->nd_gss_seqnum);
869 }
870 seqbuf.value = &network_seqnum;
871
872 major = gss_krb5_get_mic((uint32_t *)&error, cp->gss_svc_ctx_id, 0, &seqbuf, &cksum);
873 if (major != GSS_S_COMPLETE) {
874 return error;
875 }
876
877 /*
878 * Now wrap it in a token and add
879 * the verifier to the reply.
880 */
881 nfsm_chain_add_32(error, nmc, RPCSEC_GSS);
882 nfsm_chain_add_32(error, nmc, cksum.length);
883 nfsm_chain_add_opaque(error, nmc, cksum.value, cksum.length);
884 gss_release_buffer(NULL, &cksum);
885
886 return error;
887 }
888
889 /*
890 * The results aren't available yet, but if they need to be
891 * checksummed for integrity protection or encrypted, then
892 * we can record the start offset here, insert a place-holder
893 * for the results length, as well as the sequence number.
894 * The rest of the work is done later by nfs_gss_svc_protect_reply()
895 * when the results are available.
896 */
897 int
nfs_gss_svc_prepare_reply(struct nfsrv_descript * nd,struct nfsm_chain * nmc)898 nfs_gss_svc_prepare_reply(struct nfsrv_descript *nd, struct nfsm_chain *nmc)
899 {
900 struct nfs_gss_svc_ctx *cp = nd->nd_gss_context;
901 int error = 0;
902
903 if (cp->gss_svc_proc == RPCSEC_GSS_INIT ||
904 cp->gss_svc_proc == RPCSEC_GSS_CONTINUE_INIT) {
905 return 0;
906 }
907
908 switch (nd->nd_sec) {
909 case RPCAUTH_KRB5:
910 /* Nothing to do */
911 break;
912 case RPCAUTH_KRB5I:
913 case RPCAUTH_KRB5P:
914 nd->nd_gss_mb = nmc->nmc_mcur; // record current mbuf
915 nfsm_chain_finish_mbuf(error, nmc); // split the chain here
916 break;
917 }
918
919 return error;
920 }
921
922 /*
923 * The results are checksummed or encrypted for return to the client
924 */
925 int
nfs_gss_svc_protect_reply(struct nfsrv_descript * nd,mbuf_t mrep __unused)926 nfs_gss_svc_protect_reply(struct nfsrv_descript *nd, mbuf_t mrep __unused)
927 {
928 struct nfs_gss_svc_ctx *cp = nd->nd_gss_context;
929 struct nfsm_chain nmrep_res, *nmc_res = &nmrep_res;
930 mbuf_t mb, results;
931 uint32_t reslen;
932 int error = 0;
933
934 /* XXX
935 * Using a reference to the mbuf where we previously split the reply
936 * mbuf chain, we split the mbuf chain argument into two mbuf chains,
937 * one that allows us to prepend a length field or token, (nmc_pre)
938 * and the second which holds just the results that we're going to
939 * checksum and/or encrypt. When we're done, we join the chains back
940 * together.
941 */
942
943 mb = nd->nd_gss_mb; // the mbuf where we split
944 results = mbuf_next(mb); // first mbuf in the results
945 error = mbuf_setnext(mb, NULL); // disconnect the chains
946 if (error) {
947 return error;
948 }
949 nfs_gss_nfsm_chain(nmc_res, mb); // set up the prepend chain
950 nfsm_chain_build_done(error, nmc_res);
951 if (error) {
952 return error;
953 }
954
955 if (nd->nd_sec == RPCAUTH_KRB5I) {
956 error = rpc_gss_integ_data_create(cp->gss_svc_ctx_id, &results, nd->nd_gss_seqnum, &reslen);
957 } else {
958 /* RPCAUTH_KRB5P */
959 error = rpc_gss_priv_data_create(cp->gss_svc_ctx_id, &results, nd->nd_gss_seqnum, &reslen);
960 }
961 nfs_gss_append_chain(nmc_res, results); // Append the results mbufs
962 nfsm_chain_build_done(error, nmc_res);
963
964 return error;
965 }
966
967 /*
968 * This function handles the context setup calls from the client.
969 * Essentially, it implements the NFS null procedure calls when
970 * an RPCSEC_GSS credential is used.
971 * This is the context maintenance function. It creates and
972 * destroys server contexts at the whim of the client.
973 * During context creation, it receives GSS-API tokens from the
974 * client, passes them up to gssd, and returns a received token
975 * back to the client in the null procedure reply.
976 */
977 int
nfs_gss_svc_ctx_init(struct nfsrv_descript * nd,struct nfsrv_sock * slp,mbuf_t * mrepp)978 nfs_gss_svc_ctx_init(struct nfsrv_descript *nd, struct nfsrv_sock *slp, mbuf_t *mrepp)
979 {
980 struct nfs_gss_svc_ctx *cp = NULL;
981 int error = 0;
982 int autherr = 0;
983 struct nfsm_chain *nmreq, nmrep;
984 int sz;
985
986 nmreq = &nd->nd_nmreq;
987 nfsm_chain_null(&nmrep);
988 *mrepp = NULL;
989 cp = nd->nd_gss_context;
990 nd->nd_repstat = 0;
991
992 switch (cp->gss_svc_proc) {
993 case RPCSEC_GSS_INIT:
994 nfs_gss_svc_ctx_insert(cp);
995 OS_FALLTHROUGH;
996
997 case RPCSEC_GSS_CONTINUE_INIT:
998 /* Get the token from the request */
999 nfsm_chain_get_32(error, nmreq, cp->gss_svc_tokenlen);
1000 cp->gss_svc_token = NULL;
1001 if (cp->gss_svc_tokenlen > 0 && cp->gss_svc_tokenlen < GSS_MAX_TOKEN_LEN) {
1002 cp->gss_svc_token = kalloc_data(cp->gss_svc_tokenlen, Z_WAITOK);
1003 }
1004 if (cp->gss_svc_token == NULL) {
1005 autherr = RPCSEC_GSS_CREDPROBLEM;
1006 break;
1007 }
1008 nfsm_chain_get_opaque(error, nmreq, cp->gss_svc_tokenlen, cp->gss_svc_token);
1009
1010 /* Use the token in a gss_accept_sec_context upcall */
1011 error = nfs_gss_svc_gssd_upcall(cp);
1012 if (error) {
1013 autherr = RPCSEC_GSS_CREDPROBLEM;
1014 if (error == NFSERR_EAUTH) {
1015 error = 0;
1016 }
1017 break;
1018 }
1019
1020 /*
1021 * If the context isn't complete, pass the new token
1022 * back to the client for another round.
1023 */
1024 if (cp->gss_svc_major != GSS_S_COMPLETE) {
1025 break;
1026 }
1027
1028 /*
1029 * Now the server context is complete.
1030 * Finish setup.
1031 */
1032 clock_get_uptime(&cp->gss_svc_incarnation);
1033
1034 cp->gss_svc_seqwin = GSS_SVC_SEQWINDOW;
1035 cp->gss_svc_seqbits = kalloc_data(nfs_gss_seqbits_size(cp->gss_svc_seqwin), Z_WAITOK | Z_ZERO);
1036 if (cp->gss_svc_seqbits == NULL) {
1037 autherr = RPCSEC_GSS_CREDPROBLEM;
1038 break;
1039 }
1040 break;
1041
1042 case RPCSEC_GSS_DATA:
1043 /* Just a nullproc ping - do nothing */
1044 break;
1045
1046 case RPCSEC_GSS_DESTROY:
1047 /*
1048 * Don't destroy the context immediately because
1049 * other active requests might still be using it.
1050 * Instead, schedule it for destruction after
1051 * GSS_CTX_PEND time has elapsed.
1052 */
1053 cp = nfs_gss_svc_ctx_find(cp->gss_svc_handle);
1054 if (cp != NULL) {
1055 cp->gss_svc_handle = 0; // so it can't be found
1056 lck_mtx_lock(&cp->gss_svc_mtx);
1057 clock_interval_to_deadline(GSS_CTX_PEND, NSEC_PER_SEC,
1058 &cp->gss_svc_incarnation);
1059 lck_mtx_unlock(&cp->gss_svc_mtx);
1060 }
1061 break;
1062 default:
1063 autherr = RPCSEC_GSS_CREDPROBLEM;
1064 break;
1065 }
1066
1067 /* Now build the reply */
1068
1069 if (nd->nd_repstat == 0) {
1070 nd->nd_repstat = autherr ? (NFSERR_AUTHERR | autherr) : NFSERR_RETVOID;
1071 }
1072 sz = 7 * NFSX_UNSIGNED + nfsm_rndup(cp->gss_svc_tokenlen); // size of results
1073 error = nfsrv_rephead(nd, slp, &nmrep, sz);
1074 *mrepp = nmrep.nmc_mhead;
1075 if (error || autherr) {
1076 goto nfsmout;
1077 }
1078
1079 if (cp->gss_svc_proc == RPCSEC_GSS_INIT ||
1080 cp->gss_svc_proc == RPCSEC_GSS_CONTINUE_INIT) {
1081 nfsm_chain_add_32(error, &nmrep, sizeof(cp->gss_svc_handle));
1082 nfsm_chain_add_32(error, &nmrep, cp->gss_svc_handle);
1083
1084 nfsm_chain_add_32(error, &nmrep, cp->gss_svc_major);
1085 nfsm_chain_add_32(error, &nmrep, cp->gss_svc_minor);
1086 nfsm_chain_add_32(error, &nmrep, cp->gss_svc_seqwin);
1087
1088 nfsm_chain_add_32(error, &nmrep, cp->gss_svc_tokenlen);
1089 if (cp->gss_svc_token != NULL) {
1090 nfsm_chain_add_opaque(error, &nmrep, cp->gss_svc_token, cp->gss_svc_tokenlen);
1091 kfree_data_addr(cp->gss_svc_token);
1092 }
1093 }
1094
1095 nfsmout:
1096 if (autherr != 0) {
1097 nd->nd_gss_context = NULL;
1098 LIST_REMOVE(cp, gss_svc_entries);
1099 if (cp->gss_svc_seqbits != NULL) {
1100 kfree_data(cp->gss_svc_seqbits, nfs_gss_seqbits_size(cp->gss_svc_seqwin));
1101 }
1102 if (cp->gss_svc_token != NULL) {
1103 kfree_data_addr(cp->gss_svc_token);
1104 }
1105 lck_mtx_destroy(&cp->gss_svc_mtx, &nfs_gss_svc_grp);
1106 kfree_type(struct nfs_gss_svc_ctx, cp);
1107 }
1108
1109 nfsm_chain_build_done(error, &nmrep);
1110 if (error) {
1111 nfsm_chain_cleanup(&nmrep);
1112 *mrepp = NULL;
1113 }
1114 return error;
1115 }
1116
1117 /*
1118 * This is almost a mirror-image of the client side upcall.
1119 * It passes and receives a token, but invokes gss_accept_sec_context.
1120 * If it's the final call of the context setup, then gssd also returns
1121 * the session key and the user's UID.
1122 */
1123 static int
nfs_gss_svc_gssd_upcall(struct nfs_gss_svc_ctx * cp)1124 nfs_gss_svc_gssd_upcall(struct nfs_gss_svc_ctx *cp)
1125 {
1126 kern_return_t kr;
1127 mach_port_t mp;
1128 int retry_cnt = 0;
1129 gssd_byte_buffer octx = NULL;
1130 uint32_t lucidlen = 0;
1131 void *lucid_ctx_buffer;
1132 uint32_t ret_flags;
1133 vm_map_copy_t itoken = NULL;
1134 gssd_byte_buffer otoken = NULL;
1135 mach_msg_type_number_t otokenlen;
1136 int error = 0;
1137 char svcname[] = "nfs";
1138
1139 kr = host_get_gssd_port(host_priv_self(), &mp);
1140 if (kr != KERN_SUCCESS) {
1141 printf("nfs_gss_svc_gssd_upcall: can't get gssd port, status %x (%d)\n", kr, kr);
1142 goto out;
1143 }
1144 if (!IPC_PORT_VALID(mp)) {
1145 printf("nfs_gss_svc_gssd_upcall: gssd port not valid\n");
1146 goto out;
1147 }
1148
1149 if (cp->gss_svc_tokenlen > 0) {
1150 nfs_gss_mach_alloc_buffer(cp->gss_svc_token, cp->gss_svc_tokenlen, &itoken);
1151 }
1152
1153 retry:
1154 printf("Calling mach_gss_accept_sec_context\n");
1155 kr = mach_gss_accept_sec_context(
1156 mp,
1157 (gssd_byte_buffer) itoken, (mach_msg_type_number_t) cp->gss_svc_tokenlen,
1158 svcname,
1159 0,
1160 &cp->gss_svc_context,
1161 &cp->gss_svc_cred_handle,
1162 &ret_flags,
1163 &cp->gss_svc_uid,
1164 cp->gss_svc_gids,
1165 &cp->gss_svc_ngroups,
1166 &octx, (mach_msg_type_number_t *) &lucidlen,
1167 &otoken, &otokenlen,
1168 &cp->gss_svc_major,
1169 &cp->gss_svc_minor);
1170
1171 printf("mach_gss_accept_sec_context returned %d\n", kr);
1172 if (kr != KERN_SUCCESS) {
1173 printf("nfs_gss_svc_gssd_upcall failed: %x (%d)\n", kr, kr);
1174 if (kr == MIG_SERVER_DIED && cp->gss_svc_context == 0 &&
1175 retry_cnt++ < NFS_GSS_MACH_MAX_RETRIES) {
1176 if (cp->gss_svc_tokenlen > 0) {
1177 nfs_gss_mach_alloc_buffer(cp->gss_svc_token, cp->gss_svc_tokenlen, &itoken);
1178 }
1179 goto retry;
1180 }
1181 host_release_special_port(mp);
1182 goto out;
1183 }
1184
1185 host_release_special_port(mp);
1186
1187 if (lucidlen > 0) {
1188 if (lucidlen > MAX_LUCIDLEN) {
1189 printf("nfs_gss_svc_gssd_upcall: bad context length (%d)\n", lucidlen);
1190 vm_map_copy_discard((vm_map_copy_t) octx);
1191 vm_map_copy_discard((vm_map_copy_t) otoken);
1192 goto out;
1193 }
1194 lucid_ctx_buffer = kalloc_data(lucidlen, Z_WAITOK | Z_ZERO);
1195 error = nfs_gss_mach_vmcopyout((vm_map_copy_t) octx, lucidlen, lucid_ctx_buffer);
1196 if (error) {
1197 vm_map_copy_discard((vm_map_copy_t) octx);
1198 vm_map_copy_discard((vm_map_copy_t) otoken);
1199 kfree_data(lucid_ctx_buffer, lucidlen);
1200 goto out;
1201 }
1202 if (cp->gss_svc_ctx_id) {
1203 gss_krb5_destroy_context(cp->gss_svc_ctx_id);
1204 }
1205 cp->gss_svc_ctx_id = gss_krb5_make_context(lucid_ctx_buffer, lucidlen);
1206 kfree_data(lucid_ctx_buffer, lucidlen);
1207 if (cp->gss_svc_ctx_id == NULL) {
1208 printf("Failed to make context from lucid_ctx_buffer\n");
1209 goto out;
1210 }
1211 }
1212
1213 /* Free context token used as input */
1214 if (cp->gss_svc_token) {
1215 kfree_data(cp->gss_svc_token, cp->gss_svc_tokenlen);
1216 }
1217 cp->gss_svc_token = NULL;
1218 cp->gss_svc_tokenlen = 0;
1219
1220 if (otokenlen > 0) {
1221 /* Set context token to gss output token */
1222 cp->gss_svc_token = kalloc_data(otokenlen, Z_WAITOK);
1223 if (cp->gss_svc_token == NULL) {
1224 printf("nfs_gss_svc_gssd_upcall: could not allocate %d bytes\n", otokenlen);
1225 vm_map_copy_discard((vm_map_copy_t) otoken);
1226 return ENOMEM;
1227 }
1228 error = nfs_gss_mach_vmcopyout((vm_map_copy_t) otoken, otokenlen, cp->gss_svc_token);
1229 if (error) {
1230 vm_map_copy_discard((vm_map_copy_t) otoken);
1231 kfree_data(cp->gss_svc_token, otokenlen);
1232 return NFSERR_EAUTH;
1233 }
1234 cp->gss_svc_tokenlen = otokenlen;
1235 }
1236
1237 return 0;
1238
1239 out:
1240 kfree_data(cp->gss_svc_token, cp->gss_svc_tokenlen);
1241 cp->gss_svc_tokenlen = 0;
1242
1243 return NFSERR_EAUTH;
1244 }
1245
1246 /*
1247 * Validate the sequence number in the credential as described
1248 * in RFC 2203 Section 5.3.3.1
1249 *
1250 * Here the window of valid sequence numbers is represented by
1251 * a bitmap. As each sequence number is received, its bit is
1252 * set in the bitmap. An invalid sequence number lies below
1253 * the lower bound of the window, or is within the window but
1254 * has its bit already set.
1255 */
1256 static int
nfs_gss_svc_seqnum_valid(struct nfs_gss_svc_ctx * cp,uint32_t seq)1257 nfs_gss_svc_seqnum_valid(struct nfs_gss_svc_ctx *cp, uint32_t seq)
1258 {
1259 uint32_t *bits = cp->gss_svc_seqbits;
1260 uint32_t win = cp->gss_svc_seqwin;
1261 uint32_t i;
1262
1263 lck_mtx_lock(&cp->gss_svc_mtx);
1264
1265 /*
1266 * If greater than the window upper bound,
1267 * move the window up, and set the bit.
1268 */
1269 if (seq > cp->gss_svc_seqmax) {
1270 if (seq - cp->gss_svc_seqmax > win) {
1271 bzero(bits, nfs_gss_seqbits_size(win));
1272 } else {
1273 for (i = cp->gss_svc_seqmax + 1; i < seq; i++) {
1274 win_resetbit(bits, i % win);
1275 }
1276 }
1277 win_setbit(bits, seq % win);
1278 cp->gss_svc_seqmax = seq;
1279 lck_mtx_unlock(&cp->gss_svc_mtx);
1280 return 1;
1281 }
1282
1283 /*
1284 * Invalid if below the lower bound of the window
1285 */
1286 if (seq <= cp->gss_svc_seqmax - win) {
1287 lck_mtx_unlock(&cp->gss_svc_mtx);
1288 return 0;
1289 }
1290
1291 /*
1292 * In the window, invalid if the bit is already set
1293 */
1294 if (win_getbit(bits, seq % win)) {
1295 lck_mtx_unlock(&cp->gss_svc_mtx);
1296 return 0;
1297 }
1298 win_setbit(bits, seq % win);
1299 lck_mtx_unlock(&cp->gss_svc_mtx);
1300 return 1;
1301 }
1302
1303 /*
1304 * Drop a reference to a context
1305 *
1306 * Note that it's OK for the context to exist
1307 * with a refcount of zero. The refcount isn't
1308 * checked until we're about to reap an expired one.
1309 */
1310 void
nfs_gss_svc_ctx_deref(struct nfs_gss_svc_ctx * cp)1311 nfs_gss_svc_ctx_deref(struct nfs_gss_svc_ctx *cp)
1312 {
1313 lck_mtx_lock(&cp->gss_svc_mtx);
1314 if (cp->gss_svc_refcnt > 0) {
1315 cp->gss_svc_refcnt--;
1316 } else {
1317 printf("nfs_gss_ctx_deref: zero refcount\n");
1318 }
1319 lck_mtx_unlock(&cp->gss_svc_mtx);
1320 }
1321
1322 /*
1323 * Called at NFS server shutdown - destroy all contexts
1324 */
1325 void
nfs_gss_svc_cleanup(void)1326 nfs_gss_svc_cleanup(void)
1327 {
1328 struct nfs_gss_svc_ctx_hashhead *head;
1329 struct nfs_gss_svc_ctx *cp, *ncp;
1330 int i;
1331
1332 lck_mtx_lock(&nfs_gss_svc_ctx_mutex);
1333
1334 /*
1335 * Run through all the buckets
1336 */
1337 for (i = 0; i < SVC_CTX_HASHSZ; i++) {
1338 /*
1339 * Remove and free all entries in the bucket
1340 */
1341 head = &nfs_gss_svc_ctx_hashtbl[i];
1342 LIST_FOREACH_SAFE(cp, head, gss_svc_entries, ncp) {
1343 LIST_REMOVE(cp, gss_svc_entries);
1344 if (cp->gss_svc_seqbits) {
1345 kfree_data(cp->gss_svc_seqbits, nfs_gss_seqbits_size(cp->gss_svc_seqwin));
1346 }
1347 lck_mtx_destroy(&cp->gss_svc_mtx, &nfs_gss_svc_grp);
1348 kfree_type(struct nfs_gss_svc_ctx, cp);
1349 }
1350 }
1351
1352 lck_mtx_unlock(&nfs_gss_svc_ctx_mutex);
1353 }
1354
1355 /*************
1356 * The following functions are used by both client and server.
1357 */
1358
1359 /*
1360 * Release a host special port that was obtained by host_get_special_port
1361 * or one of its macros (host_get_gssd_port in this case).
1362 * This really should be in a public kpi.
1363 */
1364
1365 /* This should be in a public header if this routine is not */
1366 static void
host_release_special_port(mach_port_t mp)1367 host_release_special_port(mach_port_t mp)
1368 {
1369 if (IPC_PORT_VALID(mp)) {
1370 ipc_port_release_send(mp);
1371 }
1372 }
1373
1374 /*
1375 * The token that is sent and received in the gssd upcall
1376 * has unbounded variable length. Mach RPC does not pass
1377 * the token in-line. Instead it uses page mapping to handle
1378 * these parameters. This function allocates a VM buffer
1379 * to hold the token for an upcall and copies the token
1380 * (received from the client) into it. The VM buffer is
1381 * marked with a src_destroy flag so that the upcall will
1382 * automatically de-allocate the buffer when the upcall is
1383 * complete.
1384 */
1385 static void
nfs_gss_mach_alloc_buffer(u_char * buf,size_t buflen,vm_map_copy_t * addr)1386 nfs_gss_mach_alloc_buffer(u_char *buf, size_t buflen, vm_map_copy_t *addr)
1387 {
1388 kern_return_t kr;
1389 vm_offset_t kmem_buf;
1390 vm_size_t tbuflen;
1391
1392 *addr = NULL;
1393 if (buf == NULL || buflen == 0) {
1394 return;
1395 }
1396
1397 tbuflen = vm_map_round_page(buflen, vm_map_page_mask(ipc_kernel_map));
1398
1399 if (tbuflen < buflen) {
1400 printf("nfs_gss_mach_alloc_buffer: vm_map_round_page failed\n");
1401 return;
1402 }
1403
1404 kr = kmem_alloc(ipc_kernel_map, &kmem_buf, tbuflen,
1405 KMA_DATA, VM_KERN_MEMORY_FILE);
1406 if (kr != 0) {
1407 printf("nfs_gss_mach_alloc_buffer: vm_allocate failed\n");
1408 return;
1409 }
1410
1411 bcopy(buf, (char *)kmem_buf, buflen);
1412 bzero((char *)kmem_buf + buflen, tbuflen - buflen);
1413
1414 kr = vm_map_unwire(ipc_kernel_map, kmem_buf, kmem_buf + tbuflen, FALSE);
1415 if (kr != 0) {
1416 printf("nfs_gss_mach_alloc_buffer: vm_map_unwire failed\n");
1417 return;
1418 }
1419
1420 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t) kmem_buf,
1421 (vm_map_size_t) buflen, TRUE, addr);
1422 if (kr != 0) {
1423 printf("nfs_gss_mach_alloc_buffer: vm_map_copyin failed\n");
1424 return;
1425 }
1426 }
1427
1428 /*
1429 * Here we handle a token received from the gssd via an upcall.
1430 * The received token resides in an allocate VM buffer.
1431 * We copy the token out of this buffer to a chunk of malloc'ed
1432 * memory of the right size, then de-allocate the VM buffer.
1433 */
1434 static int
nfs_gss_mach_vmcopyout(vm_map_copy_t in,uint32_t len,u_char * out)1435 nfs_gss_mach_vmcopyout(vm_map_copy_t in, uint32_t len, u_char *out)
1436 {
1437 vm_map_offset_t map_data;
1438 vm_offset_t data;
1439 int error;
1440
1441 error = vm_map_copyout(ipc_kernel_map, &map_data, in);
1442 if (error) {
1443 return error;
1444 }
1445
1446 data = CAST_DOWN(vm_offset_t, map_data);
1447 bcopy((void *) data, out, len);
1448 vm_deallocate(ipc_kernel_map, data, len);
1449
1450 return 0;
1451 }
1452
1453 /*
1454 * Return the number of bytes in an mbuf chain.
1455 */
1456 static int
nfs_gss_mchain_length(mbuf_t mhead)1457 nfs_gss_mchain_length(mbuf_t mhead)
1458 {
1459 mbuf_t mb;
1460 int len = 0;
1461
1462 for (mb = mhead; mb; mb = mbuf_next(mb)) {
1463 len += mbuf_len(mb);
1464 }
1465
1466 return len;
1467 }
1468
1469 /*
1470 * Return the size for the sequence numbers bitmap.
1471 */
1472 static int
nfs_gss_seqbits_size(uint32_t win)1473 nfs_gss_seqbits_size(uint32_t win)
1474 {
1475 return nfsm_rndup((win + 7) / 8);
1476 }
1477
1478 /*
1479 * Append an args or results mbuf chain to the header chain
1480 */
1481 static int
nfs_gss_append_chain(struct nfsm_chain * nmc,mbuf_t mc)1482 nfs_gss_append_chain(struct nfsm_chain *nmc, mbuf_t mc)
1483 {
1484 int error = 0;
1485 mbuf_t mb, tail;
1486
1487 /* Connect the mbuf chains */
1488 error = mbuf_setnext(nmc->nmc_mcur, mc);
1489 if (error) {
1490 return error;
1491 }
1492
1493 /* Find the last mbuf in the chain */
1494 tail = NULL;
1495 for (mb = mc; mb; mb = mbuf_next(mb)) {
1496 tail = mb;
1497 }
1498
1499 nmc->nmc_mcur = tail;
1500 nmc->nmc_ptr = mtod(tail, caddr_t) + mbuf_len(tail);
1501 nmc->nmc_left = mbuf_trailingspace(tail);
1502
1503 return 0;
1504 }
1505
1506 /*
1507 * Convert an mbuf chain to an NFS mbuf chain
1508 */
1509 static void
nfs_gss_nfsm_chain(struct nfsm_chain * nmc,mbuf_t mc)1510 nfs_gss_nfsm_chain(struct nfsm_chain *nmc, mbuf_t mc)
1511 {
1512 mbuf_t mb, tail;
1513
1514 /* Find the last mbuf in the chain */
1515 tail = NULL;
1516 for (mb = mc; mb; mb = mbuf_next(mb)) {
1517 tail = mb;
1518 }
1519
1520 nmc->nmc_mhead = mc;
1521 nmc->nmc_mcur = tail;
1522 nmc->nmc_ptr = mtod(tail, caddr_t) + mbuf_len(tail);
1523 nmc->nmc_left = mbuf_trailingspace(tail);
1524 nmc->nmc_flags = 0;
1525 }
1526
1527 #if 0
1528 #define DISPLAYLEN 16
1529 #define MAXDISPLAYLEN 256
1530
1531 static void
1532 hexdump(const char *msg, void *data, size_t len)
1533 {
1534 size_t i, j;
1535 u_char *d = data;
1536 char *p, disbuf[3 * DISPLAYLEN + 1];
1537
1538 printf("NFS DEBUG %s len=%d:\n", msg, (uint32_t)len);
1539 if (len > MAXDISPLAYLEN) {
1540 len = MAXDISPLAYLEN;
1541 }
1542
1543 for (i = 0; i < len; i += DISPLAYLEN) {
1544 for (p = disbuf, j = 0; (j + i) < len && j < DISPLAYLEN; j++, p += 3) {
1545 snprintf(p, 4, "%02x ", d[i + j]);
1546 }
1547 printf("\t%s\n", disbuf);
1548 }
1549 }
1550 #endif
1551
1552 #endif /* CONFIG_NFS_SERVER */
1553