1 /*
2 * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <nfs/nfs_conf.h>
30 #if CONFIG_NFS_SERVER
31
32 /*************
33 * These functions implement RPCSEC_GSS security for the NFS client and server.
34 * The code is specific to the use of Kerberos v5 and the use of DES MAC MD5
35 * protection as described in Internet RFC 2203 and 2623.
36 *
37 * In contrast to the original AUTH_SYS authentication, RPCSEC_GSS is stateful.
38 * It requires the client and server negotiate a secure connection as part of a
39 * security context. The context state is maintained in client and server structures.
40 * On the client side, each user of an NFS mount is assigned their own context,
41 * identified by UID, on their first use of the mount, and it persists until the
42 * unmount or until the context is renewed. Each user context has a corresponding
43 * server context which the server maintains until the client destroys it, or
44 * until the context expires.
45 *
46 * The client and server contexts are set up dynamically. When a user attempts
47 * to send an NFS request, if there is no context for the user, then one is
48 * set up via an exchange of NFS null procedure calls as described in RFC 2203.
49 * During this exchange, the client and server pass a security token that is
50 * forwarded via Mach upcall to the gssd, which invokes the GSS-API to authenticate
51 * the user to the server (and vice-versa). The client and server also receive
52 * a unique session key that can be used to digitally sign the credentials and
53 * verifier or optionally to provide data integrity and/or privacy.
54 *
55 * Once the context is complete, the client and server enter a normal data
56 * exchange phase - beginning with the NFS request that prompted the context
57 * creation. During this phase, the client's RPC header contains an RPCSEC_GSS
58 * credential and verifier, and the server returns a verifier as well.
59 * For simple authentication, the verifier contains a signed checksum of the
60 * RPC header, including the credential. The server's verifier has a signed
61 * checksum of the current sequence number.
62 *
63 * Each client call contains a sequence number that nominally increases by one
64 * on each request. The sequence number is intended to prevent replay attacks.
65 * Since the protocol can be used over UDP, there is some allowance for
66 * out-of-sequence requests, so the server checks whether the sequence numbers
67 * are within a sequence "window". If a sequence number is outside the lower
68 * bound of the window, the server silently drops the request. This has some
69 * implications for retransmission. If a request needs to be retransmitted, the
70 * client must bump the sequence number even if the request XID is unchanged.
71 *
72 * When the NFS mount is unmounted, the client sends a "destroy" credential
73 * to delete the server's context for each user of the mount. Since it's
74 * possible for the client to crash or disconnect without sending the destroy
75 * message, the server has a thread that reaps contexts that have been idle
76 * too long.
77 */
78
79 #include <sys/systm.h>
80 #include <sys/kauth.h>
81 #include <sys/mount_internal.h>
82 #include <sys/kpi_mbuf.h>
83
84 #include <kern/host.h>
85
86 #include <mach/host_priv.h>
87 #include <mach/vm_map.h>
88 #include <vm/vm_map.h>
89 #include <vm/vm_kern_xnu.h>
90 #include <gssd/gssd_mach.h>
91
92 #include <nfs/rpcv2.h>
93 #include <nfs/nfsproto.h>
94 #include <nfs/nfs.h>
95 #include <nfs/nfs_gss.h>
96 #include <nfs/xdr_subs.h>
97 #include <nfs/nfsm_subs.h>
98 #include <nfs/nfs_gss.h>
99
100 #define NFS_GSS_MACH_MAX_RETRIES 3
101
102 #define NFSRV_GSS_DBG(...) NFSRV_DBG(NFSRV_FAC_GSS, 7, ## __VA_ARGS__)
103
104 u_long nfs_gss_svc_ctx_hash;
105 struct nfs_gss_svc_ctx_hashhead *nfs_gss_svc_ctx_hashtbl;
106 static LCK_GRP_DECLARE(nfs_gss_svc_grp, "rpcsec_gss_svc");
107 static LCK_MTX_DECLARE(nfs_gss_svc_ctx_mutex, &nfs_gss_svc_grp);
108 uint32_t nfsrv_gss_context_ttl = GSS_CTX_EXPIRE;
109 #define GSS_SVC_CTX_TTL ((uint64_t)max(2*GSS_CTX_PEND, nfsrv_gss_context_ttl) * NSEC_PER_SEC)
110
111 #define KRB5_MAX_MIC_SIZE 128
112 static uint8_t xdrpad[] = { 0x00, 0x00, 0x00, 0x00};
113
114 static struct nfs_gss_svc_ctx *nfs_gss_svc_ctx_find(uint32_t);
115 static void nfs_gss_svc_ctx_insert(struct nfs_gss_svc_ctx *);
116 static void nfs_gss_svc_ctx_timer(void *, void *);
117 static int nfs_gss_svc_gssd_upcall(struct nfs_gss_svc_ctx *);
118 static int nfs_gss_svc_seqnum_valid(struct nfs_gss_svc_ctx *, uint32_t);
119
120 /* This is only used by server code */
121 static void nfs_gss_nfsm_chain(struct nfsm_chain *, mbuf_t);
122
123 static void host_release_special_port(mach_port_t);
124 static void nfs_gss_mach_alloc_buffer(u_char *, size_t, vm_map_copy_t *);
125 static int nfs_gss_mach_vmcopyout(vm_map_copy_t, uint32_t, u_char *);
126
127 static int nfs_gss_mchain_length(mbuf_t);
128 static int nfs_gss_append_chain(struct nfsm_chain *, mbuf_t);
129 static int nfs_gss_seqbits_size(uint32_t);
130
131 thread_call_t nfs_gss_svc_ctx_timer_call;
132 int nfs_gss_timer_on = 0;
133 uint32_t nfs_gss_ctx_count = 0;
134 const uint32_t nfs_gss_ctx_max = GSS_SVC_MAXCONTEXTS;
135
136 /*
137 * Common RPCSEC_GSS support routines
138 */
139
140 static errno_t
rpc_gss_prepend_32(mbuf_t * mb,uint32_t value)141 rpc_gss_prepend_32(mbuf_t *mb, uint32_t value)
142 {
143 int error;
144 uint32_t *data;
145
146 #if 0
147 data = mbuf_data(*mb);
148 /*
149 * If a wap token comes back and is not aligned
150 * get a new buffer (which should be aligned) to put the
151 * length in.
152 */
153 if ((uintptr_t)data & 0x3) {
154 mbuf_t nmb;
155
156 error = mbuf_get(MBUF_WAITOK, MBUF_TYPE_DATA, &nmb);
157 if (error) {
158 return error;
159 }
160 mbuf_setnext(nmb, *mb);
161 *mb = nmb;
162 }
163 #endif
164 error = mbuf_prepend(mb, sizeof(uint32_t), MBUF_WAITOK);
165 if (error) {
166 return error;
167 }
168
169 data = mbuf_data(*mb);
170 *data = txdr_unsigned(value);
171
172 return 0;
173 }
174
175 /*
176 * Prepend the sequence number to the xdr encode argumen or result
177 * Sequence number is prepended in its own mbuf.
178 *
179 * On successful return mbp_head will point to the old mbuf chain
180 * prepended with a new mbuf that has the sequence number.
181 */
182
183 static errno_t
rpc_gss_data_create(mbuf_t * mbp_head,uint32_t seqnum)184 rpc_gss_data_create(mbuf_t *mbp_head, uint32_t seqnum)
185 {
186 int error;
187 mbuf_t mb;
188 struct nfsm_chain nmc;
189 struct nfsm_chain *nmcp = &nmc;
190 uint8_t *data;
191
192 error = mbuf_get(MBUF_WAITOK, MBUF_TYPE_DATA, &mb);
193 if (error) {
194 return error;
195 }
196 data = mbuf_data(mb);
197 #if 0
198 /* Reserve space for prepending */
199 len = mbuf_maxlen(mb);
200 len = (len & ~0x3) - NFSX_UNSIGNED;
201 printf("%s: data = %p, len = %d\n", __func__, data, (int)len);
202 error = mbuf_setdata(mb, data + len, 0);
203 if (error || mbuf_trailingspace(mb)) {
204 printf("%s: data = %p trailingspace = %d error = %d\n", __func__, mbuf_data(mb), (int)mbuf_trailingspace(mb), error);
205 }
206 #endif
207 /* Reserve 16 words for prepending */
208 error = mbuf_setdata(mb, data + 16 * sizeof(uint32_t), 0);
209 nfsm_chain_init(nmcp, mb);
210 nfsm_chain_add_32(error, nmcp, seqnum);
211 nfsm_chain_build_done(error, nmcp);
212 if (error) {
213 return EINVAL;
214 }
215 mbuf_setnext(nmcp->nmc_mcur, *mbp_head);
216 *mbp_head = nmcp->nmc_mhead;
217
218 return 0;
219 }
220
221 /*
222 * Create an rpc_gss_integ_data_t given an argument or result in mb_head.
223 * On successful return mb_head will point to the rpc_gss_integ_data_t of length len.
224 * Note mb_head will now point to a 4 byte sequence number. len does not include
225 * any extra xdr padding.
226 * Returns 0 on success, else an errno_t
227 */
228
229 static errno_t
rpc_gss_integ_data_create(gss_ctx_id_t ctx,mbuf_t * mb_head,uint32_t seqnum,uint32_t * len)230 rpc_gss_integ_data_create(gss_ctx_id_t ctx, mbuf_t *mb_head, uint32_t seqnum, uint32_t *len)
231 {
232 uint32_t error;
233 uint32_t major;
234 uint32_t length;
235 gss_buffer_desc mic;
236 struct nfsm_chain nmc = {};
237
238 /* Length of the argument or result */
239 length = nfs_gss_mchain_length(*mb_head);
240 if (len) {
241 *len = length;
242 }
243 error = rpc_gss_data_create(mb_head, seqnum);
244 if (error) {
245 return error;
246 }
247
248 /*
249 * length is the length of the rpc_gss_data
250 */
251 length += NFSX_UNSIGNED; /* Add the sequence number to the length */
252 major = gss_krb5_get_mic_mbuf(&error, ctx, 0, *mb_head, 0, length, &mic);
253 if (major != GSS_S_COMPLETE) {
254 printf("gss_krb5_get_mic_mbuf failed %d\n", error);
255 return error;
256 }
257
258 error = rpc_gss_prepend_32(mb_head, length);
259 if (error) {
260 return error;
261 }
262
263 nfsm_chain_dissect_init(error, &nmc, *mb_head);
264 /* Append GSS mic token by advancing rpc_gss_data_t length + NFSX_UNSIGNED (size of the length field) */
265 nfsm_chain_adv(error, &nmc, length + NFSX_UNSIGNED);
266 nfsm_chain_finish_mbuf(error, &nmc); // Force the mic into its own sub chain.
267 nfsm_chain_add_32(error, &nmc, mic.length);
268 nfsm_chain_add_opaque(error, &nmc, mic.value, mic.length);
269 nfsm_chain_build_done(error, &nmc);
270 gss_release_buffer(NULL, &mic);
271
272 // printmbuf("rpc_gss_integ_data_create done", *mb_head, 0, 0);
273 assert(nmc.nmc_mhead == *mb_head);
274
275 return error;
276 }
277
278 /*
279 * Create an rpc_gss_priv_data_t out of the supplied raw arguments or results in mb_head.
280 * On successful return mb_head will point to a wrap token of lenght len.
281 * Note len does not include any xdr padding
282 * Returns 0 on success, else an errno_t
283 */
284 static errno_t
rpc_gss_priv_data_create(gss_ctx_id_t ctx,mbuf_t * mb_head,uint32_t seqnum,uint32_t * len)285 rpc_gss_priv_data_create(gss_ctx_id_t ctx, mbuf_t *mb_head, uint32_t seqnum, uint32_t *len)
286 {
287 uint32_t error;
288 uint32_t major;
289 struct nfsm_chain nmc;
290 uint32_t pad;
291 uint32_t length;
292
293 error = rpc_gss_data_create(mb_head, seqnum);
294 if (error) {
295 return error;
296 }
297
298 length = nfs_gss_mchain_length(*mb_head);
299 major = gss_krb5_wrap_mbuf(&error, ctx, 1, 0, mb_head, 0, length, NULL);
300 if (major != GSS_S_COMPLETE) {
301 return error;
302 }
303
304 length = nfs_gss_mchain_length(*mb_head);
305 if (len) {
306 *len = length;
307 }
308 pad = nfsm_pad(length);
309
310 /* Prepend the opaque length of rep rpc_gss_priv_data */
311 error = rpc_gss_prepend_32(mb_head, length);
312
313 if (error) {
314 return error;
315 }
316 if (pad) {
317 nfsm_chain_dissect_init(error, &nmc, *mb_head);
318 /* Advance the opauque size of length and length data */
319 nfsm_chain_adv(error, &nmc, NFSX_UNSIGNED + length);
320 nfsm_chain_finish_mbuf(error, &nmc);
321 nfsm_chain_add_opaque_nopad(error, &nmc, xdrpad, pad);
322 nfsm_chain_build_done(error, &nmc);
323 }
324
325 return error;
326 }
327
328 /*************
329 *
330 * Server functions
331 */
332
333 /*
334 * Initialization when NFS starts
335 */
336 void
nfs_gss_svc_init(void)337 nfs_gss_svc_init(void)
338 {
339 nfs_gss_svc_ctx_hashtbl = hashinit(SVC_CTX_HASHSZ, M_TEMP, &nfs_gss_svc_ctx_hash);
340
341 nfs_gss_svc_ctx_timer_call = thread_call_allocate(nfs_gss_svc_ctx_timer, NULL);
342 }
343
344 /*
345 * Find a server context based on a handle value received
346 * in an RPCSEC_GSS credential.
347 */
348 static struct nfs_gss_svc_ctx *
nfs_gss_svc_ctx_find(uint32_t handle)349 nfs_gss_svc_ctx_find(uint32_t handle)
350 {
351 struct nfs_gss_svc_ctx_hashhead *head;
352 struct nfs_gss_svc_ctx *cp;
353 uint64_t timenow;
354
355 if (handle == 0) {
356 return NULL;
357 }
358
359 head = &nfs_gss_svc_ctx_hashtbl[SVC_CTX_HASH(handle)];
360 /*
361 * Don't return a context that is going to expire in GSS_CTX_PEND seconds
362 */
363 clock_interval_to_deadline(GSS_CTX_PEND, NSEC_PER_SEC, &timenow);
364
365 lck_mtx_lock(&nfs_gss_svc_ctx_mutex);
366
367 LIST_FOREACH(cp, head, gss_svc_entries) {
368 if (cp->gss_svc_handle == handle) {
369 if (timenow > cp->gss_svc_incarnation + GSS_SVC_CTX_TTL) {
370 /*
371 * Context has or is about to expire. Don't use.
372 * We'll return null and the client will have to create
373 * a new context.
374 */
375 cp->gss_svc_handle = 0;
376 /*
377 * Make sure though that we stay around for GSS_CTX_PEND seconds
378 * for other threads that might be using the context.
379 */
380 cp->gss_svc_incarnation = timenow;
381
382 cp = NULL;
383 break;
384 }
385 lck_mtx_lock(&cp->gss_svc_mtx);
386 cp->gss_svc_refcnt++;
387 lck_mtx_unlock(&cp->gss_svc_mtx);
388 break;
389 }
390 }
391
392 lck_mtx_unlock(&nfs_gss_svc_ctx_mutex);
393
394 return cp;
395 }
396
397 /*
398 * Insert a new server context into the hash table
399 * and start the context reap thread if necessary.
400 */
401 static void
nfs_gss_svc_ctx_insert(struct nfs_gss_svc_ctx * cp)402 nfs_gss_svc_ctx_insert(struct nfs_gss_svc_ctx *cp)
403 {
404 struct nfs_gss_svc_ctx_hashhead *head;
405 struct nfs_gss_svc_ctx *p;
406
407 lck_mtx_lock(&nfs_gss_svc_ctx_mutex);
408
409 /*
410 * Give the client a random handle so that if we reboot
411 * it's unlikely the client will get a bad context match.
412 * Make sure it's not zero or already assigned.
413 */
414 retry:
415 cp->gss_svc_handle = random();
416 if (cp->gss_svc_handle == 0) {
417 goto retry;
418 }
419 head = &nfs_gss_svc_ctx_hashtbl[SVC_CTX_HASH(cp->gss_svc_handle)];
420 LIST_FOREACH(p, head, gss_svc_entries)
421 if (p->gss_svc_handle == cp->gss_svc_handle) {
422 goto retry;
423 }
424
425 clock_interval_to_deadline(GSS_CTX_PEND, NSEC_PER_SEC,
426 &cp->gss_svc_incarnation);
427 LIST_INSERT_HEAD(head, cp, gss_svc_entries);
428 nfs_gss_ctx_count++;
429
430 if (!nfs_gss_timer_on) {
431 nfs_gss_timer_on = 1;
432
433 nfs_interval_timer_start(nfs_gss_svc_ctx_timer_call,
434 min(GSS_TIMER_PERIOD, max(GSS_CTX_TTL_MIN, nfsrv_gss_context_ttl)) * MSECS_PER_SEC);
435 }
436
437 lck_mtx_unlock(&nfs_gss_svc_ctx_mutex);
438 }
439
440 /*
441 * This function is called via the kernel's callout
442 * mechanism. It runs only when there are
443 * cached RPCSEC_GSS contexts.
444 */
445 void
nfs_gss_svc_ctx_timer(__unused void * param1,__unused void * param2)446 nfs_gss_svc_ctx_timer(__unused void *param1, __unused void *param2)
447 {
448 struct nfs_gss_svc_ctx *cp, *next;
449 uint64_t timenow;
450 int contexts = 0;
451 int i;
452
453 lck_mtx_lock(&nfs_gss_svc_ctx_mutex);
454 clock_get_uptime(&timenow);
455
456 NFSRV_GSS_DBG("is running\n");
457
458 /*
459 * Scan all the hash chains
460 */
461 for (i = 0; i < SVC_CTX_HASHSZ; i++) {
462 /*
463 * For each hash chain, look for entries
464 * that haven't been used in a while.
465 */
466 LIST_FOREACH_SAFE(cp, &nfs_gss_svc_ctx_hashtbl[i], gss_svc_entries, next) {
467 contexts++;
468 if (timenow > cp->gss_svc_incarnation +
469 (cp->gss_svc_handle ? GSS_SVC_CTX_TTL : 0)
470 && cp->gss_svc_refcnt == 0) {
471 /*
472 * A stale context - remove it
473 */
474 LIST_REMOVE(cp, gss_svc_entries);
475 NFSRV_GSS_DBG("Removing contex for %d\n", cp->gss_svc_uid);
476 if (cp->gss_svc_seqbits) {
477 kfree_data(cp->gss_svc_seqbits, nfs_gss_seqbits_size(cp->gss_svc_seqwin));
478 }
479 lck_mtx_destroy(&cp->gss_svc_mtx, &nfs_gss_svc_grp);
480 kfree_type(struct nfs_gss_svc_ctx, cp);
481 contexts--;
482 }
483 }
484 }
485
486 nfs_gss_ctx_count = contexts;
487
488 /*
489 * If there are still some cached contexts left,
490 * set up another callout to check on them later.
491 */
492 nfs_gss_timer_on = nfs_gss_ctx_count > 0;
493 if (nfs_gss_timer_on) {
494 nfs_interval_timer_start(nfs_gss_svc_ctx_timer_call,
495 min(GSS_TIMER_PERIOD, max(GSS_CTX_TTL_MIN, nfsrv_gss_context_ttl)) * MSECS_PER_SEC);
496 }
497
498 lck_mtx_unlock(&nfs_gss_svc_ctx_mutex);
499 }
500
501 /*
502 * Here the server receives an RPCSEC_GSS credential in an
503 * RPC call header. First there's some checking to make sure
504 * the credential is appropriate - whether the context is still
505 * being set up, or is complete. Then we use the handle to find
506 * the server's context and validate the verifier, which contains
507 * a signed checksum of the RPC header. If the verifier checks
508 * out, we extract the user's UID and groups from the context
509 * and use it to set up a UNIX credential for the user's request.
510 */
511 int
nfs_gss_svc_cred_get(struct nfsrv_descript * nd,struct nfsm_chain * nmc)512 nfs_gss_svc_cred_get(struct nfsrv_descript *nd, struct nfsm_chain *nmc)
513 {
514 uint32_t vers, proc, seqnum, service;
515 uint32_t handle, handle_len;
516 uint32_t major;
517 struct nfs_gss_svc_ctx *cp = NULL;
518 uint32_t flavor = 0;
519 int error = 0;
520 uint32_t arglen;
521 size_t argsize, start, header_len;
522 gss_buffer_desc cksum;
523 struct nfsm_chain nmc_tmp;
524 mbuf_t reply_mbuf, prev_mbuf, pad_mbuf;
525
526 vers = proc = seqnum = service = handle_len = 0;
527 arglen = 0;
528
529 nfsm_chain_get_32(error, nmc, vers);
530 if (vers != RPCSEC_GSS_VERS_1) {
531 error = NFSERR_AUTHERR | AUTH_REJECTCRED;
532 goto nfsmout;
533 }
534
535 nfsm_chain_get_32(error, nmc, proc);
536 nfsm_chain_get_32(error, nmc, seqnum);
537 nfsm_chain_get_32(error, nmc, service);
538 nfsm_chain_get_32(error, nmc, handle_len);
539 if (error) {
540 goto nfsmout;
541 }
542
543 /*
544 * Make sure context setup/destroy is being done with a nullproc
545 */
546 if (proc != RPCSEC_GSS_DATA && nd->nd_procnum != NFSPROC_NULL) {
547 error = NFSERR_AUTHERR | RPCSEC_GSS_CREDPROBLEM;
548 goto nfsmout;
549 }
550
551 /*
552 * If the sequence number is greater than the max
553 * allowable, reject and have the client init a
554 * new context.
555 */
556 if (seqnum > GSS_MAXSEQ) {
557 error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM;
558 goto nfsmout;
559 }
560
561 nd->nd_sec =
562 service == RPCSEC_GSS_SVC_NONE ? RPCAUTH_KRB5 :
563 service == RPCSEC_GSS_SVC_INTEGRITY ? RPCAUTH_KRB5I :
564 service == RPCSEC_GSS_SVC_PRIVACY ? RPCAUTH_KRB5P : 0;
565
566 if (proc == RPCSEC_GSS_INIT) {
567 /*
568 * Limit the total number of contexts
569 */
570 if (nfs_gss_ctx_count > nfs_gss_ctx_max) {
571 error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM;
572 goto nfsmout;
573 }
574
575 /*
576 * Set up a new context
577 */
578 cp = kalloc_type(struct nfs_gss_svc_ctx,
579 Z_WAITOK | Z_ZERO | Z_NOFAIL);
580 lck_mtx_init(&cp->gss_svc_mtx, &nfs_gss_svc_grp, LCK_ATTR_NULL);
581 cp->gss_svc_refcnt = 1;
582 } else {
583 /*
584 * Use the handle to find the context
585 */
586 if (handle_len != sizeof(handle)) {
587 error = NFSERR_AUTHERR | RPCSEC_GSS_CREDPROBLEM;
588 goto nfsmout;
589 }
590 nfsm_chain_get_32(error, nmc, handle);
591 if (error) {
592 goto nfsmout;
593 }
594 cp = nfs_gss_svc_ctx_find(handle);
595 if (cp == NULL) {
596 error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM;
597 goto nfsmout;
598 }
599 }
600
601 cp->gss_svc_proc = proc;
602
603 if (proc == RPCSEC_GSS_DATA || proc == RPCSEC_GSS_DESTROY) {
604 struct posix_cred temp_pcred;
605
606 if (cp->gss_svc_seqwin == 0) {
607 /*
608 * Context isn't complete
609 */
610 error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM;
611 goto nfsmout;
612 }
613
614 if (!nfs_gss_svc_seqnum_valid(cp, seqnum)) {
615 /*
616 * Sequence number is bad
617 */
618 error = EINVAL; // drop the request
619 goto nfsmout;
620 }
621
622 /*
623 * Validate the verifier.
624 * The verifier contains an encrypted checksum
625 * of the call header from the XID up to and
626 * including the credential. We compute the
627 * checksum and compare it with what came in
628 * the verifier.
629 */
630 header_len = nfsm_chain_offset(nmc);
631 nfsm_chain_get_32(error, nmc, flavor);
632 nfsm_chain_get_32(error, nmc, cksum.length);
633 if (error) {
634 goto nfsmout;
635 }
636 if (flavor != RPCSEC_GSS || cksum.length > KRB5_MAX_MIC_SIZE) {
637 error = NFSERR_AUTHERR | AUTH_BADVERF;
638 } else {
639 cksum.value = kalloc_data(cksum.length, Z_WAITOK | Z_NOFAIL);
640 nfsm_chain_get_opaque(error, nmc, cksum.length, cksum.value);
641 }
642 if (error) {
643 goto nfsmout;
644 }
645
646 /* Now verify the client's call header checksum */
647 major = gss_krb5_verify_mic_mbuf((uint32_t *)&error, cp->gss_svc_ctx_id, nmc->nmc_mhead, 0, header_len, &cksum, NULL);
648 (void)gss_release_buffer(NULL, &cksum);
649 if (major != GSS_S_COMPLETE) {
650 printf("Server header: gss_krb5_verify_mic_mbuf failed %d\n", error);
651 error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM;
652 goto nfsmout;
653 }
654
655 nd->nd_gss_seqnum = seqnum;
656
657 /*
658 * Set up the user's cred
659 */
660 bzero(&temp_pcred, sizeof(temp_pcred));
661 temp_pcred.cr_uid = cp->gss_svc_uid;
662 bcopy(cp->gss_svc_gids, temp_pcred.cr_groups,
663 sizeof(gid_t) * cp->gss_svc_ngroups);
664 temp_pcred.cr_ngroups = (short)cp->gss_svc_ngroups;
665
666 nd->nd_cr = posix_cred_create(&temp_pcred);
667 if (nd->nd_cr == NULL) {
668 error = ENOMEM;
669 goto nfsmout;
670 }
671 clock_get_uptime(&cp->gss_svc_incarnation);
672
673 /*
674 * If the call arguments are integrity or privacy protected
675 * then we need to check them here.
676 */
677 switch (service) {
678 case RPCSEC_GSS_SVC_NONE:
679 /* nothing to do */
680 break;
681 case RPCSEC_GSS_SVC_INTEGRITY:
682 /*
683 * Here's what we expect in the integrity call args:
684 *
685 * - length of seq num + call args (4 bytes)
686 * - sequence number (4 bytes)
687 * - call args (variable bytes)
688 * - length of checksum token
689 * - checksum of seqnum + call args
690 */
691 nfsm_chain_get_32(error, nmc, arglen); // length of args
692 if (arglen > NFS_MAXPACKET) {
693 error = EBADRPC;
694 goto nfsmout;
695 }
696
697 nmc_tmp = *nmc;
698 nfsm_chain_adv(error, &nmc_tmp, arglen);
699 nfsm_chain_get_32(error, &nmc_tmp, cksum.length);
700 cksum.value = NULL;
701 if (cksum.length > 0 && cksum.length < GSS_MAX_MIC_LEN) {
702 cksum.value = kalloc_data(cksum.length, Z_WAITOK | Z_NOFAIL);
703 } else {
704 error = EBADRPC;
705 goto nfsmout;
706 }
707 nfsm_chain_get_opaque(error, &nmc_tmp, cksum.length, cksum.value);
708
709 /* Verify the checksum over the call args */
710 start = nfsm_chain_offset(nmc);
711
712 major = gss_krb5_verify_mic_mbuf((uint32_t *)&error, cp->gss_svc_ctx_id,
713 nmc->nmc_mhead, start, arglen, &cksum, NULL);
714 kfree_data(cksum.value, cksum.length);
715 if (major != GSS_S_COMPLETE) {
716 printf("Server args: gss_krb5_verify_mic_mbuf failed %d\n", error);
717 error = EBADRPC;
718 goto nfsmout;
719 }
720
721 /*
722 * Get the sequence number prepended to the args
723 * and compare it against the one sent in the
724 * call credential.
725 */
726 nfsm_chain_get_32(error, nmc, seqnum);
727 if (seqnum != nd->nd_gss_seqnum) {
728 error = EBADRPC; // returns as GARBAGEARGS
729 goto nfsmout;
730 }
731 break;
732 case RPCSEC_GSS_SVC_PRIVACY:
733 /*
734 * Here's what we expect in the privacy call args:
735 *
736 * - length of wrap token
737 * - wrap token (37-40 bytes)
738 */
739 prev_mbuf = nmc->nmc_mcur;
740 nfsm_chain_get_32(error, nmc, arglen); // length of args
741 if (arglen > NFS_MAXPACKET) {
742 error = EBADRPC;
743 goto nfsmout;
744 }
745
746 /* Get the wrap token (current mbuf in the chain starting at the current offset) */
747 start = nmc->nmc_ptr - (caddr_t)mbuf_data(nmc->nmc_mcur);
748
749 /* split out the wrap token */
750 argsize = arglen;
751 error = gss_normalize_mbuf(nmc->nmc_mcur, start, &argsize, &reply_mbuf, &pad_mbuf, 0);
752 if (error) {
753 goto nfsmout;
754 }
755
756 assert(argsize == arglen);
757 if (pad_mbuf) {
758 assert(nfsm_pad(arglen) == mbuf_len(pad_mbuf));
759 mbuf_free(pad_mbuf);
760 } else {
761 assert(nfsm_pad(arglen) == 0);
762 }
763
764 major = gss_krb5_unwrap_mbuf((uint32_t *)&error, cp->gss_svc_ctx_id, &reply_mbuf, 0, arglen, NULL, NULL);
765 if (major != GSS_S_COMPLETE) {
766 printf("%s: gss_krb5_unwrap_mbuf failes %d\n", __func__, error);
767 goto nfsmout;
768 }
769
770 /* Now replace the wrapped arguments with the unwrapped ones */
771 mbuf_setnext(prev_mbuf, reply_mbuf);
772 nmc->nmc_mcur = reply_mbuf;
773 nmc->nmc_ptr = mbuf_data(reply_mbuf);
774 nmc->nmc_left = mbuf_len(reply_mbuf);
775
776 /*
777 * - sequence number (4 bytes)
778 * - call args
779 */
780
781 // nfsm_chain_reverse(nmc, nfsm_pad(toklen));
782
783 /*
784 * Get the sequence number prepended to the args
785 * and compare it against the one sent in the
786 * call credential.
787 */
788 nfsm_chain_get_32(error, nmc, seqnum);
789 if (seqnum != nd->nd_gss_seqnum) {
790 printf("%s: Sequence number mismatch seqnum = %d nd->nd_gss_seqnum = %d\n",
791 __func__, seqnum, nd->nd_gss_seqnum);
792 printmbuf("reply_mbuf", nmc->nmc_mhead, 0, 0);
793 printf("reply_mbuf %p nmc_head %p\n", reply_mbuf, nmc->nmc_mhead);
794 error = EBADRPC; // returns as GARBAGEARGS
795 goto nfsmout;
796 }
797 break;
798 }
799 } else {
800 uint32_t verflen;
801 /*
802 * If the proc is RPCSEC_GSS_INIT or RPCSEC_GSS_CONTINUE_INIT
803 * then we expect a null verifier.
804 */
805 nfsm_chain_get_32(error, nmc, flavor);
806 nfsm_chain_get_32(error, nmc, verflen);
807 if (error || flavor != RPCAUTH_NULL || verflen > 0) {
808 error = NFSERR_AUTHERR | RPCSEC_GSS_CREDPROBLEM;
809 }
810 if (error) {
811 if (proc == RPCSEC_GSS_INIT) {
812 lck_mtx_destroy(&cp->gss_svc_mtx, &nfs_gss_svc_grp);
813 kfree_type(struct nfs_gss_svc_ctx, cp);
814 cp = NULL;
815 }
816 goto nfsmout;
817 }
818 }
819
820 nd->nd_gss_context = cp;
821 return 0;
822 nfsmout:
823 if (cp) {
824 nfs_gss_svc_ctx_deref(cp);
825 }
826 return error;
827 }
828
829 /*
830 * Insert the server's verifier into the RPC reply header.
831 * It contains a signed checksum of the sequence number that
832 * was received in the RPC call.
833 * Then go on to add integrity or privacy if necessary.
834 */
835 int
nfs_gss_svc_verf_put(struct nfsrv_descript * nd,struct nfsm_chain * nmc)836 nfs_gss_svc_verf_put(struct nfsrv_descript *nd, struct nfsm_chain *nmc)
837 {
838 struct nfs_gss_svc_ctx *cp;
839 int error = 0;
840 gss_buffer_desc cksum, seqbuf;
841 uint32_t network_seqnum;
842 cp = nd->nd_gss_context;
843 uint32_t major;
844
845 if (cp->gss_svc_major != GSS_S_COMPLETE) {
846 /*
847 * If the context isn't yet complete
848 * then return a null verifier.
849 */
850 nfsm_chain_add_32(error, nmc, RPCAUTH_NULL);
851 nfsm_chain_add_32(error, nmc, 0);
852 return error;
853 }
854
855 /*
856 * Compute checksum of the request seq number
857 * If it's the final reply of context setup
858 * then return the checksum of the context
859 * window size.
860 */
861 seqbuf.length = NFSX_UNSIGNED;
862 if (cp->gss_svc_proc == RPCSEC_GSS_INIT ||
863 cp->gss_svc_proc == RPCSEC_GSS_CONTINUE_INIT) {
864 network_seqnum = htonl(cp->gss_svc_seqwin);
865 } else {
866 network_seqnum = htonl(nd->nd_gss_seqnum);
867 }
868 seqbuf.value = &network_seqnum;
869
870 major = gss_krb5_get_mic((uint32_t *)&error, cp->gss_svc_ctx_id, 0, &seqbuf, &cksum);
871 if (major != GSS_S_COMPLETE) {
872 return error;
873 }
874
875 /*
876 * Now wrap it in a token and add
877 * the verifier to the reply.
878 */
879 nfsm_chain_add_32(error, nmc, RPCSEC_GSS);
880 nfsm_chain_add_32(error, nmc, cksum.length);
881 nfsm_chain_add_opaque(error, nmc, cksum.value, cksum.length);
882 gss_release_buffer(NULL, &cksum);
883
884 return error;
885 }
886
887 /*
888 * The results aren't available yet, but if they need to be
889 * checksummed for integrity protection or encrypted, then
890 * we can record the start offset here, insert a place-holder
891 * for the results length, as well as the sequence number.
892 * The rest of the work is done later by nfs_gss_svc_protect_reply()
893 * when the results are available.
894 */
895 int
nfs_gss_svc_prepare_reply(struct nfsrv_descript * nd,struct nfsm_chain * nmc)896 nfs_gss_svc_prepare_reply(struct nfsrv_descript *nd, struct nfsm_chain *nmc)
897 {
898 struct nfs_gss_svc_ctx *cp = nd->nd_gss_context;
899 int error = 0;
900
901 if (cp->gss_svc_proc == RPCSEC_GSS_INIT ||
902 cp->gss_svc_proc == RPCSEC_GSS_CONTINUE_INIT) {
903 return 0;
904 }
905
906 switch (nd->nd_sec) {
907 case RPCAUTH_KRB5:
908 /* Nothing to do */
909 break;
910 case RPCAUTH_KRB5I:
911 case RPCAUTH_KRB5P:
912 nd->nd_gss_mb = nmc->nmc_mcur; // record current mbuf
913 nfsm_chain_finish_mbuf(error, nmc); // split the chain here
914 break;
915 }
916
917 return error;
918 }
919
920 /*
921 * The results are checksummed or encrypted for return to the client
922 */
923 int
nfs_gss_svc_protect_reply(struct nfsrv_descript * nd,mbuf_t mrep __unused)924 nfs_gss_svc_protect_reply(struct nfsrv_descript *nd, mbuf_t mrep __unused)
925 {
926 struct nfs_gss_svc_ctx *cp = nd->nd_gss_context;
927 struct nfsm_chain nmrep_res, *nmc_res = &nmrep_res;
928 mbuf_t mb, results;
929 uint32_t reslen;
930 int error = 0;
931
932 /* XXX
933 * Using a reference to the mbuf where we previously split the reply
934 * mbuf chain, we split the mbuf chain argument into two mbuf chains,
935 * one that allows us to prepend a length field or token, (nmc_pre)
936 * and the second which holds just the results that we're going to
937 * checksum and/or encrypt. When we're done, we join the chains back
938 * together.
939 */
940
941 mb = nd->nd_gss_mb; // the mbuf where we split
942 results = mbuf_next(mb); // first mbuf in the results
943 error = mbuf_setnext(mb, NULL); // disconnect the chains
944 if (error) {
945 return error;
946 }
947 nfs_gss_nfsm_chain(nmc_res, mb); // set up the prepend chain
948 nfsm_chain_build_done(error, nmc_res);
949 if (error) {
950 return error;
951 }
952
953 if (nd->nd_sec == RPCAUTH_KRB5I) {
954 error = rpc_gss_integ_data_create(cp->gss_svc_ctx_id, &results, nd->nd_gss_seqnum, &reslen);
955 } else {
956 /* RPCAUTH_KRB5P */
957 error = rpc_gss_priv_data_create(cp->gss_svc_ctx_id, &results, nd->nd_gss_seqnum, &reslen);
958 }
959 nfs_gss_append_chain(nmc_res, results); // Append the results mbufs
960 nfsm_chain_build_done(error, nmc_res);
961
962 return error;
963 }
964
965 /*
966 * This function handles the context setup calls from the client.
967 * Essentially, it implements the NFS null procedure calls when
968 * an RPCSEC_GSS credential is used.
969 * This is the context maintenance function. It creates and
970 * destroys server contexts at the whim of the client.
971 * During context creation, it receives GSS-API tokens from the
972 * client, passes them up to gssd, and returns a received token
973 * back to the client in the null procedure reply.
974 */
975 int
nfs_gss_svc_ctx_init(struct nfsrv_descript * nd,struct nfsrv_sock * slp,mbuf_t * mrepp)976 nfs_gss_svc_ctx_init(struct nfsrv_descript *nd, struct nfsrv_sock *slp, mbuf_t *mrepp)
977 {
978 struct nfs_gss_svc_ctx *cp = NULL;
979 int error = 0;
980 int autherr = 0;
981 struct nfsm_chain *nmreq, nmrep;
982 int sz;
983
984 nmreq = &nd->nd_nmreq;
985 nfsm_chain_null(&nmrep);
986 *mrepp = NULL;
987 cp = nd->nd_gss_context;
988 nd->nd_repstat = 0;
989
990 switch (cp->gss_svc_proc) {
991 case RPCSEC_GSS_INIT:
992 nfs_gss_svc_ctx_insert(cp);
993 OS_FALLTHROUGH;
994
995 case RPCSEC_GSS_CONTINUE_INIT:
996 /* Get the token from the request */
997 nfsm_chain_get_32(error, nmreq, cp->gss_svc_tokenlen);
998 cp->gss_svc_token = NULL;
999 if (cp->gss_svc_tokenlen > 0 && cp->gss_svc_tokenlen < GSS_MAX_TOKEN_LEN) {
1000 cp->gss_svc_token = kalloc_data(cp->gss_svc_tokenlen, Z_WAITOK);
1001 }
1002 if (cp->gss_svc_token == NULL) {
1003 autherr = RPCSEC_GSS_CREDPROBLEM;
1004 break;
1005 }
1006 nfsm_chain_get_opaque(error, nmreq, cp->gss_svc_tokenlen, cp->gss_svc_token);
1007
1008 /* Use the token in a gss_accept_sec_context upcall */
1009 error = nfs_gss_svc_gssd_upcall(cp);
1010 if (error) {
1011 autherr = RPCSEC_GSS_CREDPROBLEM;
1012 if (error == NFSERR_EAUTH) {
1013 error = 0;
1014 }
1015 break;
1016 }
1017
1018 /*
1019 * If the context isn't complete, pass the new token
1020 * back to the client for another round.
1021 */
1022 if (cp->gss_svc_major != GSS_S_COMPLETE) {
1023 break;
1024 }
1025
1026 /*
1027 * Now the server context is complete.
1028 * Finish setup.
1029 */
1030 clock_get_uptime(&cp->gss_svc_incarnation);
1031
1032 cp->gss_svc_seqwin = GSS_SVC_SEQWINDOW;
1033 cp->gss_svc_seqbits = kalloc_data(nfs_gss_seqbits_size(cp->gss_svc_seqwin), Z_WAITOK | Z_ZERO);
1034 if (cp->gss_svc_seqbits == NULL) {
1035 autherr = RPCSEC_GSS_CREDPROBLEM;
1036 break;
1037 }
1038 break;
1039
1040 case RPCSEC_GSS_DATA:
1041 /* Just a nullproc ping - do nothing */
1042 break;
1043
1044 case RPCSEC_GSS_DESTROY:
1045 /*
1046 * Don't destroy the context immediately because
1047 * other active requests might still be using it.
1048 * Instead, schedule it for destruction after
1049 * GSS_CTX_PEND time has elapsed.
1050 */
1051 cp = nfs_gss_svc_ctx_find(cp->gss_svc_handle);
1052 if (cp != NULL) {
1053 cp->gss_svc_handle = 0; // so it can't be found
1054 lck_mtx_lock(&cp->gss_svc_mtx);
1055 clock_interval_to_deadline(GSS_CTX_PEND, NSEC_PER_SEC,
1056 &cp->gss_svc_incarnation);
1057 lck_mtx_unlock(&cp->gss_svc_mtx);
1058 }
1059 break;
1060 default:
1061 autherr = RPCSEC_GSS_CREDPROBLEM;
1062 break;
1063 }
1064
1065 /* Now build the reply */
1066
1067 if (nd->nd_repstat == 0) {
1068 nd->nd_repstat = autherr ? (NFSERR_AUTHERR | autherr) : NFSERR_RETVOID;
1069 }
1070 sz = 7 * NFSX_UNSIGNED + nfsm_rndup(cp->gss_svc_tokenlen); // size of results
1071 error = nfsrv_rephead(nd, slp, &nmrep, sz);
1072 *mrepp = nmrep.nmc_mhead;
1073 if (error || autherr) {
1074 goto nfsmout;
1075 }
1076
1077 if (cp->gss_svc_proc == RPCSEC_GSS_INIT ||
1078 cp->gss_svc_proc == RPCSEC_GSS_CONTINUE_INIT) {
1079 nfsm_chain_add_32(error, &nmrep, sizeof(cp->gss_svc_handle));
1080 nfsm_chain_add_32(error, &nmrep, cp->gss_svc_handle);
1081
1082 nfsm_chain_add_32(error, &nmrep, cp->gss_svc_major);
1083 nfsm_chain_add_32(error, &nmrep, cp->gss_svc_minor);
1084 nfsm_chain_add_32(error, &nmrep, cp->gss_svc_seqwin);
1085
1086 nfsm_chain_add_32(error, &nmrep, cp->gss_svc_tokenlen);
1087 if (cp->gss_svc_token != NULL) {
1088 nfsm_chain_add_opaque(error, &nmrep, cp->gss_svc_token, cp->gss_svc_tokenlen);
1089 kfree_data_addr(cp->gss_svc_token);
1090 }
1091 }
1092
1093 nfsmout:
1094 if (autherr != 0) {
1095 nd->nd_gss_context = NULL;
1096 LIST_REMOVE(cp, gss_svc_entries);
1097 if (cp->gss_svc_seqbits != NULL) {
1098 kfree_data(cp->gss_svc_seqbits, nfs_gss_seqbits_size(cp->gss_svc_seqwin));
1099 }
1100 if (cp->gss_svc_token != NULL) {
1101 kfree_data_addr(cp->gss_svc_token);
1102 }
1103 lck_mtx_destroy(&cp->gss_svc_mtx, &nfs_gss_svc_grp);
1104 kfree_type(struct nfs_gss_svc_ctx, cp);
1105 }
1106
1107 nfsm_chain_build_done(error, &nmrep);
1108 if (error) {
1109 nfsm_chain_cleanup(&nmrep);
1110 *mrepp = NULL;
1111 }
1112 return error;
1113 }
1114
1115 /*
1116 * This is almost a mirror-image of the client side upcall.
1117 * It passes and receives a token, but invokes gss_accept_sec_context.
1118 * If it's the final call of the context setup, then gssd also returns
1119 * the session key and the user's UID.
1120 */
1121 static int
nfs_gss_svc_gssd_upcall(struct nfs_gss_svc_ctx * cp)1122 nfs_gss_svc_gssd_upcall(struct nfs_gss_svc_ctx *cp)
1123 {
1124 kern_return_t kr;
1125 mach_port_t mp;
1126 int retry_cnt = 0;
1127 gssd_byte_buffer octx = NULL;
1128 uint32_t lucidlen = 0;
1129 void *lucid_ctx_buffer;
1130 uint32_t ret_flags;
1131 vm_map_copy_t itoken = NULL;
1132 gssd_byte_buffer otoken = NULL;
1133 mach_msg_type_number_t otokenlen;
1134 int error = 0;
1135 char svcname[] = "nfs";
1136
1137 kr = host_get_gssd_port(host_priv_self(), &mp);
1138 if (kr != KERN_SUCCESS) {
1139 printf("nfs_gss_svc_gssd_upcall: can't get gssd port, status %x (%d)\n", kr, kr);
1140 goto out;
1141 }
1142 if (!IPC_PORT_VALID(mp)) {
1143 printf("nfs_gss_svc_gssd_upcall: gssd port not valid\n");
1144 goto out;
1145 }
1146
1147 if (cp->gss_svc_tokenlen > 0) {
1148 nfs_gss_mach_alloc_buffer(cp->gss_svc_token, cp->gss_svc_tokenlen, &itoken);
1149 }
1150
1151 retry:
1152 printf("Calling mach_gss_accept_sec_context\n");
1153 kr = mach_gss_accept_sec_context(
1154 mp,
1155 (gssd_byte_buffer) itoken, (mach_msg_type_number_t) cp->gss_svc_tokenlen,
1156 svcname,
1157 0,
1158 &cp->gss_svc_context,
1159 &cp->gss_svc_cred_handle,
1160 &ret_flags,
1161 &cp->gss_svc_uid,
1162 cp->gss_svc_gids,
1163 &cp->gss_svc_ngroups,
1164 &octx, (mach_msg_type_number_t *) &lucidlen,
1165 &otoken, &otokenlen,
1166 &cp->gss_svc_major,
1167 &cp->gss_svc_minor);
1168
1169 printf("mach_gss_accept_sec_context returned %d\n", kr);
1170 if (kr != KERN_SUCCESS) {
1171 printf("nfs_gss_svc_gssd_upcall failed: %x (%d)\n", kr, kr);
1172 if (kr == MIG_SERVER_DIED && cp->gss_svc_context == 0 &&
1173 retry_cnt++ < NFS_GSS_MACH_MAX_RETRIES) {
1174 if (cp->gss_svc_tokenlen > 0) {
1175 nfs_gss_mach_alloc_buffer(cp->gss_svc_token, cp->gss_svc_tokenlen, &itoken);
1176 }
1177 goto retry;
1178 }
1179 host_release_special_port(mp);
1180 goto out;
1181 }
1182
1183 host_release_special_port(mp);
1184
1185 if (lucidlen > 0) {
1186 if (lucidlen > MAX_LUCIDLEN) {
1187 printf("nfs_gss_svc_gssd_upcall: bad context length (%d)\n", lucidlen);
1188 vm_map_copy_discard((vm_map_copy_t) octx);
1189 vm_map_copy_discard((vm_map_copy_t) otoken);
1190 goto out;
1191 }
1192 lucid_ctx_buffer = kalloc_data(lucidlen, Z_WAITOK | Z_ZERO);
1193 error = nfs_gss_mach_vmcopyout((vm_map_copy_t) octx, lucidlen, lucid_ctx_buffer);
1194 if (error) {
1195 vm_map_copy_discard((vm_map_copy_t) octx);
1196 vm_map_copy_discard((vm_map_copy_t) otoken);
1197 kfree_data(lucid_ctx_buffer, lucidlen);
1198 goto out;
1199 }
1200 if (cp->gss_svc_ctx_id) {
1201 gss_krb5_destroy_context(cp->gss_svc_ctx_id);
1202 }
1203 cp->gss_svc_ctx_id = gss_krb5_make_context(lucid_ctx_buffer, lucidlen);
1204 kfree_data(lucid_ctx_buffer, lucidlen);
1205 if (cp->gss_svc_ctx_id == NULL) {
1206 printf("Failed to make context from lucid_ctx_buffer\n");
1207 goto out;
1208 }
1209 }
1210
1211 /* Free context token used as input */
1212 if (cp->gss_svc_token) {
1213 kfree_data(cp->gss_svc_token, cp->gss_svc_tokenlen);
1214 }
1215 cp->gss_svc_token = NULL;
1216 cp->gss_svc_tokenlen = 0;
1217
1218 if (otokenlen > 0) {
1219 /* Set context token to gss output token */
1220 cp->gss_svc_token = kalloc_data(otokenlen, Z_WAITOK);
1221 if (cp->gss_svc_token == NULL) {
1222 printf("nfs_gss_svc_gssd_upcall: could not allocate %d bytes\n", otokenlen);
1223 vm_map_copy_discard((vm_map_copy_t) otoken);
1224 return ENOMEM;
1225 }
1226 error = nfs_gss_mach_vmcopyout((vm_map_copy_t) otoken, otokenlen, cp->gss_svc_token);
1227 if (error) {
1228 vm_map_copy_discard((vm_map_copy_t) otoken);
1229 kfree_data(cp->gss_svc_token, otokenlen);
1230 return NFSERR_EAUTH;
1231 }
1232 cp->gss_svc_tokenlen = otokenlen;
1233 }
1234
1235 return 0;
1236
1237 out:
1238 kfree_data(cp->gss_svc_token, cp->gss_svc_tokenlen);
1239 cp->gss_svc_tokenlen = 0;
1240
1241 return NFSERR_EAUTH;
1242 }
1243
1244 /*
1245 * Validate the sequence number in the credential as described
1246 * in RFC 2203 Section 5.3.3.1
1247 *
1248 * Here the window of valid sequence numbers is represented by
1249 * a bitmap. As each sequence number is received, its bit is
1250 * set in the bitmap. An invalid sequence number lies below
1251 * the lower bound of the window, or is within the window but
1252 * has its bit already set.
1253 */
1254 static int
nfs_gss_svc_seqnum_valid(struct nfs_gss_svc_ctx * cp,uint32_t seq)1255 nfs_gss_svc_seqnum_valid(struct nfs_gss_svc_ctx *cp, uint32_t seq)
1256 {
1257 uint32_t *bits = cp->gss_svc_seqbits;
1258 uint32_t win = cp->gss_svc_seqwin;
1259 uint32_t i;
1260
1261 lck_mtx_lock(&cp->gss_svc_mtx);
1262
1263 /*
1264 * If greater than the window upper bound,
1265 * move the window up, and set the bit.
1266 */
1267 if (seq > cp->gss_svc_seqmax) {
1268 if (seq - cp->gss_svc_seqmax > win) {
1269 bzero(bits, nfs_gss_seqbits_size(win));
1270 } else {
1271 for (i = cp->gss_svc_seqmax + 1; i < seq; i++) {
1272 win_resetbit(bits, i % win);
1273 }
1274 }
1275 win_setbit(bits, seq % win);
1276 cp->gss_svc_seqmax = seq;
1277 lck_mtx_unlock(&cp->gss_svc_mtx);
1278 return 1;
1279 }
1280
1281 /*
1282 * Invalid if below the lower bound of the window
1283 */
1284 if (seq <= cp->gss_svc_seqmax - win) {
1285 lck_mtx_unlock(&cp->gss_svc_mtx);
1286 return 0;
1287 }
1288
1289 /*
1290 * In the window, invalid if the bit is already set
1291 */
1292 if (win_getbit(bits, seq % win)) {
1293 lck_mtx_unlock(&cp->gss_svc_mtx);
1294 return 0;
1295 }
1296 win_setbit(bits, seq % win);
1297 lck_mtx_unlock(&cp->gss_svc_mtx);
1298 return 1;
1299 }
1300
1301 /*
1302 * Drop a reference to a context
1303 *
1304 * Note that it's OK for the context to exist
1305 * with a refcount of zero. The refcount isn't
1306 * checked until we're about to reap an expired one.
1307 */
1308 void
nfs_gss_svc_ctx_deref(struct nfs_gss_svc_ctx * cp)1309 nfs_gss_svc_ctx_deref(struct nfs_gss_svc_ctx *cp)
1310 {
1311 lck_mtx_lock(&cp->gss_svc_mtx);
1312 if (cp->gss_svc_refcnt > 0) {
1313 cp->gss_svc_refcnt--;
1314 } else {
1315 printf("nfs_gss_ctx_deref: zero refcount\n");
1316 }
1317 lck_mtx_unlock(&cp->gss_svc_mtx);
1318 }
1319
1320 /*
1321 * Called at NFS server shutdown - destroy all contexts
1322 */
1323 void
nfs_gss_svc_cleanup(void)1324 nfs_gss_svc_cleanup(void)
1325 {
1326 struct nfs_gss_svc_ctx_hashhead *head;
1327 struct nfs_gss_svc_ctx *cp, *ncp;
1328 int i;
1329
1330 lck_mtx_lock(&nfs_gss_svc_ctx_mutex);
1331
1332 /*
1333 * Run through all the buckets
1334 */
1335 for (i = 0; i < SVC_CTX_HASHSZ; i++) {
1336 /*
1337 * Remove and free all entries in the bucket
1338 */
1339 head = &nfs_gss_svc_ctx_hashtbl[i];
1340 LIST_FOREACH_SAFE(cp, head, gss_svc_entries, ncp) {
1341 LIST_REMOVE(cp, gss_svc_entries);
1342 if (cp->gss_svc_seqbits) {
1343 kfree_data(cp->gss_svc_seqbits, nfs_gss_seqbits_size(cp->gss_svc_seqwin));
1344 }
1345 lck_mtx_destroy(&cp->gss_svc_mtx, &nfs_gss_svc_grp);
1346 kfree_type(struct nfs_gss_svc_ctx, cp);
1347 }
1348 }
1349
1350 lck_mtx_unlock(&nfs_gss_svc_ctx_mutex);
1351 }
1352
1353 /*************
1354 * The following functions are used by both client and server.
1355 */
1356
1357 /*
1358 * Release a host special port that was obtained by host_get_special_port
1359 * or one of its macros (host_get_gssd_port in this case).
1360 * This really should be in a public kpi.
1361 */
1362
1363 /* This should be in a public header if this routine is not */
1364 static void
host_release_special_port(mach_port_t mp)1365 host_release_special_port(mach_port_t mp)
1366 {
1367 if (IPC_PORT_VALID(mp)) {
1368 ipc_port_release_send(mp);
1369 }
1370 }
1371
1372 /*
1373 * The token that is sent and received in the gssd upcall
1374 * has unbounded variable length. Mach RPC does not pass
1375 * the token in-line. Instead it uses page mapping to handle
1376 * these parameters. This function allocates a VM buffer
1377 * to hold the token for an upcall and copies the token
1378 * (received from the client) into it. The VM buffer is
1379 * marked with a src_destroy flag so that the upcall will
1380 * automatically de-allocate the buffer when the upcall is
1381 * complete.
1382 */
1383 static void
nfs_gss_mach_alloc_buffer(u_char * buf,size_t buflen,vm_map_copy_t * addr)1384 nfs_gss_mach_alloc_buffer(u_char *buf, size_t buflen, vm_map_copy_t *addr)
1385 {
1386 kern_return_t kr;
1387 vm_offset_t kmem_buf;
1388 vm_size_t tbuflen;
1389
1390 *addr = NULL;
1391 if (buf == NULL || buflen == 0) {
1392 return;
1393 }
1394
1395 tbuflen = vm_map_round_page(buflen, vm_map_page_mask(ipc_kernel_map));
1396
1397 if (tbuflen < buflen) {
1398 printf("nfs_gss_mach_alloc_buffer: vm_map_round_page failed\n");
1399 return;
1400 }
1401
1402 kr = kmem_alloc(ipc_kernel_map, &kmem_buf, tbuflen,
1403 KMA_DATA, VM_KERN_MEMORY_FILE);
1404 if (kr != 0) {
1405 printf("nfs_gss_mach_alloc_buffer: vm_allocate failed\n");
1406 return;
1407 }
1408
1409 bcopy(buf, (char *)kmem_buf, buflen);
1410 bzero((char *)kmem_buf + buflen, tbuflen - buflen);
1411
1412 kr = vm_map_unwire(ipc_kernel_map, kmem_buf, kmem_buf + tbuflen, FALSE);
1413 if (kr != 0) {
1414 printf("nfs_gss_mach_alloc_buffer: vm_map_unwire failed\n");
1415 return;
1416 }
1417
1418 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t) kmem_buf,
1419 (vm_map_size_t) buflen, TRUE, addr);
1420 if (kr != 0) {
1421 printf("nfs_gss_mach_alloc_buffer: vm_map_copyin failed\n");
1422 return;
1423 }
1424 }
1425
1426 /*
1427 * Here we handle a token received from the gssd via an upcall.
1428 * The received token resides in an allocate VM buffer.
1429 * We copy the token out of this buffer to a chunk of malloc'ed
1430 * memory of the right size, then de-allocate the VM buffer.
1431 */
1432 static int
nfs_gss_mach_vmcopyout(vm_map_copy_t in,uint32_t len,u_char * out)1433 nfs_gss_mach_vmcopyout(vm_map_copy_t in, uint32_t len, u_char *out)
1434 {
1435 vm_map_offset_t map_data;
1436 vm_offset_t data;
1437 int error;
1438
1439 error = vm_map_copyout(ipc_kernel_map, &map_data, in);
1440 if (error) {
1441 return error;
1442 }
1443
1444 data = CAST_DOWN(vm_offset_t, map_data);
1445 bcopy((void *) data, out, len);
1446 vm_deallocate(ipc_kernel_map, data, len);
1447
1448 return 0;
1449 }
1450
1451 /*
1452 * Return the number of bytes in an mbuf chain.
1453 */
1454 static int
nfs_gss_mchain_length(mbuf_t mhead)1455 nfs_gss_mchain_length(mbuf_t mhead)
1456 {
1457 mbuf_t mb;
1458 int len = 0;
1459
1460 for (mb = mhead; mb; mb = mbuf_next(mb)) {
1461 len += mbuf_len(mb);
1462 }
1463
1464 return len;
1465 }
1466
1467 /*
1468 * Return the size for the sequence numbers bitmap.
1469 */
1470 static int
nfs_gss_seqbits_size(uint32_t win)1471 nfs_gss_seqbits_size(uint32_t win)
1472 {
1473 return nfsm_rndup((win + 7) / 8);
1474 }
1475
1476 /*
1477 * Append an args or results mbuf chain to the header chain
1478 */
1479 static int
nfs_gss_append_chain(struct nfsm_chain * nmc,mbuf_t mc)1480 nfs_gss_append_chain(struct nfsm_chain *nmc, mbuf_t mc)
1481 {
1482 int error = 0;
1483 mbuf_t mb, tail;
1484
1485 /* Connect the mbuf chains */
1486 error = mbuf_setnext(nmc->nmc_mcur, mc);
1487 if (error) {
1488 return error;
1489 }
1490
1491 /* Find the last mbuf in the chain */
1492 tail = NULL;
1493 for (mb = mc; mb; mb = mbuf_next(mb)) {
1494 tail = mb;
1495 }
1496
1497 nmc->nmc_mcur = tail;
1498 nmc->nmc_ptr = (caddr_t) mbuf_data(tail) + mbuf_len(tail);
1499 nmc->nmc_left = mbuf_trailingspace(tail);
1500
1501 return 0;
1502 }
1503
1504 /*
1505 * Convert an mbuf chain to an NFS mbuf chain
1506 */
1507 static void
nfs_gss_nfsm_chain(struct nfsm_chain * nmc,mbuf_t mc)1508 nfs_gss_nfsm_chain(struct nfsm_chain *nmc, mbuf_t mc)
1509 {
1510 mbuf_t mb, tail;
1511
1512 /* Find the last mbuf in the chain */
1513 tail = NULL;
1514 for (mb = mc; mb; mb = mbuf_next(mb)) {
1515 tail = mb;
1516 }
1517
1518 nmc->nmc_mhead = mc;
1519 nmc->nmc_mcur = tail;
1520 nmc->nmc_ptr = (caddr_t) mbuf_data(tail) + mbuf_len(tail);
1521 nmc->nmc_left = mbuf_trailingspace(tail);
1522 nmc->nmc_flags = 0;
1523 }
1524
1525 #if 0
1526 #define DISPLAYLEN 16
1527 #define MAXDISPLAYLEN 256
1528
1529 static void
1530 hexdump(const char *msg, void *data, size_t len)
1531 {
1532 size_t i, j;
1533 u_char *d = data;
1534 char *p, disbuf[3 * DISPLAYLEN + 1];
1535
1536 printf("NFS DEBUG %s len=%d:\n", msg, (uint32_t)len);
1537 if (len > MAXDISPLAYLEN) {
1538 len = MAXDISPLAYLEN;
1539 }
1540
1541 for (i = 0; i < len; i += DISPLAYLEN) {
1542 for (p = disbuf, j = 0; (j + i) < len && j < DISPLAYLEN; j++, p += 3) {
1543 snprintf(p, 4, "%02x ", d[i + j]);
1544 }
1545 printf("\t%s\n", disbuf);
1546 }
1547 }
1548 #endif
1549
1550 #endif /* CONFIG_NFS_SERVER */
1551