xref: /xnu-11215.61.5/bsd/vfs/vfs_cprotect.c (revision 4f1223e81cd707a65cc109d0b8ad6653699da3c4)
1 /*
2  * Copyright (c) 2015-2023 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <sys/cprotect.h>
30 #include <sys/malloc.h>
31 #include <sys/mount_internal.h>
32 #include <sys/filio.h>
33 #include <sys/content_protection.h>
34 #include <libkern/crypto/sha1.h>
35 #include <libkern/libkern.h>
36 //for write protection
37 #include <vm/vm_kern_xnu.h>
38 #include <vm/vm_map_xnu.h>
39 #include <mach/mach_vm.h>
40 
41 #define PTR_ADD(type, base, offset)             (type)((uintptr_t)(base) + (offset))
42 
43 // -- struct cpx --
44 
45 /*
46  * This structure contains the unwrapped key and is passed to the lower layers.
47  * It is private so users must use the accessors declared in sys/cprotect.h
48  * to read/write it.
49  */
50 
51 // cpx_flags defined in cprotect.h
52 enum {
53 	CPX_SEP_WRAPPEDKEY                      = 0x01,
54 	CPX_IV_AES_CTX_INITIALIZED      = 0x02,
55 	CPX_USE_OFFSET_FOR_IV           = 0x04,
56 
57 	// Using AES IV context generated from key
58 	CPX_IV_AES_CTX_VFS                      = 0x08,
59 	CPX_SYNTHETIC_OFFSET_FOR_IV = 0x10,
60 	CPX_COMPOSITEKEY            = 0x20,
61 
62 	//write page protection
63 	CPX_WRITE_PROTECTABLE           = 0x40
64 };
65 
66 /*
67  * variable-length CPX structure. See fixed-length variant in cprotect.h
68  */
69 struct cpx {
70 #if DEBUG
71 	uint32_t                cpx_magic1;
72 #endif
73 	aes_encrypt_ctx         *cpx_iv_aes_ctx_ptr;// Pointer to context used for generating the IV
74 	cpx_flags_t             cpx_flags;
75 	uint16_t                cpx_max_key_len;
76 	uint16_t                cpx_key_len;
77 	//fixed length up to here.  cpx_cached_key is variable-length
78 	uint8_t                 cpx_cached_key[];
79 };
80 
81 /* Allows us to switch between CPX types */
82 typedef union cpxunion {
83 	struct cpx cpx_var;
84 	fcpx_t cpx_fixed;
85 } cpxunion_t;
86 
87 ZONE_DEFINE(cpx_zone, "cpx",
88     sizeof(struct fcpx), ZC_ZFREE_CLEARMEM);
89 ZONE_DEFINE(aes_ctz_zone, "AES ctx",
90     sizeof(aes_encrypt_ctx), ZC_ZFREE_CLEARMEM);
91 
92 // Note: see struct fcpx defined in sys/cprotect.h
93 
94 // -- cpx_t accessors --
95 
96 size_t
cpx_size(size_t key_len)97 cpx_size(size_t key_len)
98 {
99 	// This should pick up the 'magic' word in DEBUG for free.
100 	size_t size = sizeof(struct cpx) + key_len;
101 
102 	return size;
103 }
104 
105 size_t
cpx_sizex(const struct cpx * cpx)106 cpx_sizex(const struct cpx *cpx)
107 {
108 	return cpx_size(cpx->cpx_max_key_len);
109 }
110 
111 cpx_t
cpx_alloc(size_t key_len,bool needs_ctx)112 cpx_alloc(size_t key_len, bool needs_ctx)
113 {
114 	cpx_t cpx = NULL;
115 
116 #if CONFIG_KEYPAGE_WP
117 #pragma unused(key_len, needs_ctx)
118 
119 	/*
120 	 * Macs only use 1 key per volume, so force it into its own page.
121 	 * This way, we can write-protect as needed.
122 	 */
123 
124 	assert(cpx_size(key_len) <= PAGE_SIZE);
125 	kmem_alloc(kernel_map, (vm_offset_t *)&cpx, PAGE_SIZE,
126 	    KMA_DATA | KMA_NOFAIL, VM_KERN_MEMORY_FILE);
127 	//mark the page as protectable, since kmem_alloc succeeded.
128 	cpx->cpx_flags |= CPX_WRITE_PROTECTABLE;
129 #else
130 	/* If key page write protection disabled, just switch to zalloc */
131 
132 	// error out if you try to request a key that's too big
133 	if (key_len > VFS_CP_MAX_CACHEBUFLEN) {
134 		return NULL;
135 	}
136 
137 	// the actual key array is fixed-length, but the amount of usable content can vary, via 'key_len'
138 	cpx = zalloc_flags(cpx_zone, Z_WAITOK | Z_ZERO);
139 
140 	// if our encryption type needs it, alloc the context
141 	if (needs_ctx) {
142 		cpx_alloc_ctx(cpx);
143 	}
144 
145 #endif
146 	cpx_init(cpx, key_len);
147 
148 	return cpx;
149 }
150 
151 int
cpx_alloc_ctx(cpx_t cpx)152 cpx_alloc_ctx(cpx_t cpx)
153 {
154 #if CONFIG_KEYPAGE_WP
155 	(void) cpx;
156 #else
157 	if (cpx->cpx_iv_aes_ctx_ptr) {
158 		// already allocated?
159 		return 0;
160 	}
161 
162 	cpx->cpx_iv_aes_ctx_ptr = zalloc_flags(aes_ctz_zone, Z_WAITOK | Z_ZERO);
163 #endif // CONFIG_KEYPAGE_WP
164 
165 	return 0;
166 }
167 
168 void
cpx_free_ctx(cpx_t cpx)169 cpx_free_ctx(cpx_t cpx)
170 {
171 #if CONFIG_KEYPAGE_WP
172 	(void) cpx;
173 # else
174 	if (cpx->cpx_iv_aes_ctx_ptr) {
175 		zfree(aes_ctz_zone, cpx->cpx_iv_aes_ctx_ptr);
176 	}
177 #endif // CONFIG_KEYPAGE_WP
178 }
179 
180 void
cpx_writeprotect(cpx_t cpx)181 cpx_writeprotect(cpx_t cpx)
182 {
183 #if CONFIG_KEYPAGE_WP
184 	void *cpxstart = (void*)cpx;
185 	if (cpx->cpx_flags & CPX_WRITE_PROTECTABLE) {
186 		mach_vm_protect(kernel_map, (vm_map_offset_t)cpxstart, PAGE_SIZE, false, (VM_PROT_READ));
187 	}
188 #else
189 	(void) cpx;
190 #endif
191 	return;
192 }
193 
194 #if DEBUG
195 static const uint32_t cpx_magic1 = 0x7b787063;          // cpx{
196 static const uint32_t cpx_magic2 = 0x7870637d;          // }cpx
197 #endif
198 
199 void
cpx_free(cpx_t cpx)200 cpx_free(cpx_t cpx)
201 {
202 #if DEBUG
203 	assert(cpx->cpx_magic1 == cpx_magic1);
204 	assert(*PTR_ADD(uint32_t *, cpx, cpx_sizex(cpx) - 4) == cpx_magic2);
205 #endif
206 
207 #if CONFIG_KEYPAGE_WP
208 	/* unprotect the page before bzeroing */
209 	void *cpxstart = (void*)cpx;
210 	if (cpx->cpx_flags & CPX_WRITE_PROTECTABLE) {
211 		mach_vm_protect(kernel_map, (vm_map_offset_t)cpxstart, PAGE_SIZE, false, (VM_PROT_DEFAULT));
212 
213 		//now zero the memory after un-protecting it
214 		bzero(cpx->cpx_cached_key, cpx->cpx_max_key_len);
215 
216 		//If we are here, then we used kmem_alloc to get the page. Must use kmem_free to drop it.
217 		kmem_free(kernel_map, (vm_offset_t)cpx, PAGE_SIZE);
218 		return;
219 	}
220 #else
221 	// free the context if it wasn't already freed
222 	cpx_free_ctx(cpx);
223 	zfree(cpx_zone, cpx);
224 	return;
225 #endif
226 }
227 
228 void
cpx_init(cpx_t cpx,size_t key_len)229 cpx_init(cpx_t cpx, size_t key_len)
230 {
231 #if DEBUG
232 	cpx->cpx_magic1 = cpx_magic1;
233 	*PTR_ADD(uint32_t *, cpx, cpx_size(key_len) - 4) = cpx_magic2;
234 #endif
235 	cpx->cpx_flags = 0;
236 	cpx->cpx_key_len = 0;
237 	assert(key_len <= UINT16_MAX);
238 	cpx->cpx_max_key_len = (uint16_t)key_len;
239 }
240 
241 bool
cpx_is_sep_wrapped_key(const struct cpx * cpx)242 cpx_is_sep_wrapped_key(const struct cpx *cpx)
243 {
244 	return ISSET(cpx->cpx_flags, CPX_SEP_WRAPPEDKEY);
245 }
246 
247 void
cpx_set_is_sep_wrapped_key(struct cpx * cpx,bool v)248 cpx_set_is_sep_wrapped_key(struct cpx *cpx, bool v)
249 {
250 	if (v) {
251 		SET(cpx->cpx_flags, CPX_SEP_WRAPPEDKEY);
252 	} else {
253 		CLR(cpx->cpx_flags, CPX_SEP_WRAPPEDKEY);
254 	}
255 }
256 
257 bool
cpx_is_composite_key(const struct cpx * cpx)258 cpx_is_composite_key(const struct cpx *cpx)
259 {
260 	return ISSET(cpx->cpx_flags, CPX_COMPOSITEKEY);
261 }
262 
263 void
cpx_set_is_composite_key(struct cpx * cpx,bool v)264 cpx_set_is_composite_key(struct cpx *cpx, bool v)
265 {
266 	if (v) {
267 		SET(cpx->cpx_flags, CPX_COMPOSITEKEY);
268 	} else {
269 		CLR(cpx->cpx_flags, CPX_COMPOSITEKEY);
270 	}
271 }
272 
273 bool
cpx_use_offset_for_iv(const struct cpx * cpx)274 cpx_use_offset_for_iv(const struct cpx *cpx)
275 {
276 	return ISSET(cpx->cpx_flags, CPX_USE_OFFSET_FOR_IV);
277 }
278 
279 void
cpx_set_use_offset_for_iv(struct cpx * cpx,bool v)280 cpx_set_use_offset_for_iv(struct cpx *cpx, bool v)
281 {
282 	if (v) {
283 		SET(cpx->cpx_flags, CPX_USE_OFFSET_FOR_IV);
284 	} else {
285 		CLR(cpx->cpx_flags, CPX_USE_OFFSET_FOR_IV);
286 	}
287 }
288 
289 bool
cpx_synthetic_offset_for_iv(const struct cpx * cpx)290 cpx_synthetic_offset_for_iv(const struct cpx *cpx)
291 {
292 	return ISSET(cpx->cpx_flags, CPX_SYNTHETIC_OFFSET_FOR_IV);
293 }
294 
295 void
cpx_set_synthetic_offset_for_iv(struct cpx * cpx,bool v)296 cpx_set_synthetic_offset_for_iv(struct cpx *cpx, bool v)
297 {
298 	if (v) {
299 		SET(cpx->cpx_flags, CPX_SYNTHETIC_OFFSET_FOR_IV);
300 	} else {
301 		CLR(cpx->cpx_flags, CPX_SYNTHETIC_OFFSET_FOR_IV);
302 	}
303 }
304 
305 uint16_t
cpx_max_key_len(const struct cpx * cpx)306 cpx_max_key_len(const struct cpx *cpx)
307 {
308 	return cpx->cpx_max_key_len;
309 }
310 
311 uint16_t
cpx_key_len(const struct cpx * cpx)312 cpx_key_len(const struct cpx *cpx)
313 {
314 	return cpx->cpx_key_len;
315 }
316 
317 void
cpx_set_key_len(struct cpx * cpx,uint16_t key_len)318 cpx_set_key_len(struct cpx *cpx, uint16_t key_len)
319 {
320 	cpx->cpx_key_len = key_len;
321 
322 	if (ISSET(cpx->cpx_flags, CPX_IV_AES_CTX_VFS)) {
323 		/*
324 		 * We assume that if the key length is being modified, the key
325 		 * has changed.  As a result, un-set any bits related to the
326 		 * AES context, if needed. They should be re-generated
327 		 * on-demand.
328 		 */
329 		CLR(cpx->cpx_flags, CPX_IV_AES_CTX_INITIALIZED | CPX_IV_AES_CTX_VFS);
330 	}
331 }
332 
333 bool
cpx_has_key(const struct cpx * cpx)334 cpx_has_key(const struct cpx *cpx)
335 {
336 	return cpx->cpx_key_len > 0;
337 }
338 
339 #pragma clang diagnostic push
340 #pragma clang diagnostic ignored "-Wcast-qual"
341 void *
cpx_key(const struct cpx * cpx)342 cpx_key(const struct cpx *cpx)
343 {
344 	return (void *)cpx->cpx_cached_key;
345 }
346 #pragma clang diagnostic pop
347 
348 void
cpx_set_aes_iv_key(struct cpx * cpx,void * iv_key)349 cpx_set_aes_iv_key(struct cpx *cpx, void *iv_key)
350 {
351 	if (cpx->cpx_iv_aes_ctx_ptr) {
352 		aes_encrypt_key128(iv_key, cpx->cpx_iv_aes_ctx_ptr);
353 		SET(cpx->cpx_flags, CPX_IV_AES_CTX_INITIALIZED | CPX_USE_OFFSET_FOR_IV);
354 		CLR(cpx->cpx_flags, CPX_IV_AES_CTX_VFS);
355 	}
356 }
357 
358 aes_encrypt_ctx *
cpx_iv_aes_ctx(struct cpx * cpx)359 cpx_iv_aes_ctx(struct cpx *cpx)
360 {
361 	if (ISSET(cpx->cpx_flags, CPX_IV_AES_CTX_INITIALIZED)) {
362 		return cpx->cpx_iv_aes_ctx_ptr;
363 	}
364 
365 	SHA1_CTX sha1ctxt;
366 	uint8_t digest[SHA_DIGEST_LENGTH]; /* Kiv */
367 
368 	/* First init the cp_cache_iv_key[] */
369 	SHA1Init(&sha1ctxt);
370 
371 	/*
372 	 * We can only use this when the keys are generated in the AP; As a result
373 	 * we only use the first 32 bytes of key length in the cache key
374 	 */
375 	SHA1Update(&sha1ctxt, cpx->cpx_cached_key, cpx->cpx_key_len);
376 	SHA1Final(digest, &sha1ctxt);
377 
378 	cpx_set_aes_iv_key(cpx, digest);
379 	SET(cpx->cpx_flags, CPX_IV_AES_CTX_VFS);
380 
381 	return cpx->cpx_iv_aes_ctx_ptr;
382 }
383 
384 void
cpx_flush(cpx_t cpx)385 cpx_flush(cpx_t cpx)
386 {
387 	bzero(cpx->cpx_cached_key, cpx->cpx_max_key_len);
388 	if (cpx->cpx_iv_aes_ctx_ptr) {
389 		bzero(cpx->cpx_iv_aes_ctx_ptr, sizeof(aes_encrypt_ctx));
390 	}
391 	cpx->cpx_flags = 0;
392 	cpx->cpx_key_len = 0;
393 }
394 
395 bool
cpx_can_copy(const struct cpx * src,const struct cpx * dst)396 cpx_can_copy(const struct cpx *src, const struct cpx *dst)
397 {
398 	return src->cpx_key_len <= dst->cpx_max_key_len;
399 }
400 
401 void
cpx_copy(const struct cpx * src,cpx_t dst)402 cpx_copy(const struct cpx *src, cpx_t dst)
403 {
404 	uint16_t key_len = cpx_key_len(src);
405 	cpx_set_key_len(dst, key_len);
406 	memcpy(cpx_key(dst), cpx_key(src), key_len);
407 	dst->cpx_flags = src->cpx_flags;
408 	if (ISSET(dst->cpx_flags, CPX_IV_AES_CTX_INITIALIZED)) {
409 		*(dst->cpx_iv_aes_ctx_ptr) = *(src->cpx_iv_aes_ctx_ptr); // deep copy
410 	}
411 }
412 
413 typedef unsigned char cp_vfs_callback_arg_type_t;
414 enum {
415 	CP_TYPE_LOCK_STATE   = 0,
416 	CP_TYPE_EP_STATE     = 1,
417 	CP_TYPE_CX_STATE     = 2,
418 };
419 
420 typedef struct {
421 	cp_vfs_callback_arg_type_t type;
422 	union {
423 		cp_lock_state_t lock_state;
424 		cp_ep_state_t   ep_state;
425 		cp_cx_state_t   cx_state;
426 	};
427 	int             valid_uuid;
428 	uuid_t          volume_uuid;
429 } cp_vfs_callback_arg;
430 
431 static int
cp_vfs_callback(mount_t mp,void * arg)432 cp_vfs_callback(mount_t mp, void *arg)
433 {
434 	cp_vfs_callback_arg *callback_arg = (cp_vfs_callback_arg *)arg;
435 
436 	if (callback_arg->valid_uuid) {
437 		struct vfs_attr va;
438 		VFSATTR_INIT(&va);
439 		VFSATTR_WANTED(&va, f_uuid);
440 
441 		if (vfs_getattr(mp, &va, vfs_context_current())) {
442 			return 0;
443 		}
444 
445 		if (!VFSATTR_IS_SUPPORTED(&va, f_uuid)) {
446 			return 0;
447 		}
448 
449 		if (memcmp(va.f_uuid, callback_arg->volume_uuid, sizeof(uuid_t))) {
450 			return 0;
451 		}
452 	}
453 
454 	switch (callback_arg->type) {
455 	case(CP_TYPE_LOCK_STATE):
456 		VFS_IOCTL(mp, FIODEVICELOCKED, (void *)(uintptr_t)callback_arg->lock_state, 0, vfs_context_kernel());
457 		break;
458 	case(CP_TYPE_EP_STATE):
459 		VFS_IOCTL(mp, FIODEVICEEPSTATE, (void *)(uintptr_t)callback_arg->ep_state, 0, vfs_context_kernel());
460 		break;
461 	case(CP_TYPE_CX_STATE):
462 		VFS_IOCTL(mp, FIODEVICECXSTATE, (void *)(uintptr_t)callback_arg->cx_state, 0, vfs_context_kernel());
463 		break;
464 	default:
465 		break;
466 	}
467 	return 0;
468 }
469 
470 int
cp_key_store_action(cp_key_store_action_t action)471 cp_key_store_action(cp_key_store_action_t action)
472 {
473 	cp_vfs_callback_arg callback_arg;
474 
475 	memset(callback_arg.volume_uuid, 0, sizeof(uuid_t));
476 	callback_arg.valid_uuid = 0;
477 
478 	switch (action) {
479 	case CP_ACTION_LOCKED:
480 	case CP_ACTION_UNLOCKED:
481 		callback_arg.type = CP_TYPE_LOCK_STATE;
482 		callback_arg.lock_state = (action == CP_ACTION_LOCKED ? CP_LOCKED_STATE : CP_UNLOCKED_STATE);
483 		return vfs_iterate(0, cp_vfs_callback, (void *)&callback_arg);
484 	case CP_ACTION_EP_INVALIDATED:
485 		callback_arg.type = CP_TYPE_EP_STATE;
486 		callback_arg.ep_state = CP_EP_INVALIDATED;
487 		return vfs_iterate(0, cp_vfs_callback, (void *)&callback_arg);
488 	case CP_ACTION_CX_EXPIRED:
489 		callback_arg.type = CP_TYPE_CX_STATE;
490 		callback_arg.cx_state = CP_CX_EXPIRED;
491 		return vfs_iterate(0, cp_vfs_callback, (void *)&callback_arg);
492 	default:
493 		return -1;
494 	}
495 }
496 
497 int
cp_key_store_action_for_volume(uuid_t volume_uuid,cp_key_store_action_t action)498 cp_key_store_action_for_volume(uuid_t volume_uuid, cp_key_store_action_t action)
499 {
500 	cp_vfs_callback_arg callback_arg;
501 
502 	memcpy(callback_arg.volume_uuid, volume_uuid, sizeof(uuid_t));
503 	callback_arg.valid_uuid = 1;
504 
505 	switch (action) {
506 	case CP_ACTION_LOCKED:
507 	case CP_ACTION_UNLOCKED:
508 		callback_arg.type = CP_TYPE_LOCK_STATE;
509 		callback_arg.lock_state = (action == CP_ACTION_LOCKED ? CP_LOCKED_STATE : CP_UNLOCKED_STATE);
510 		return vfs_iterate(0, cp_vfs_callback, (void *)&callback_arg);
511 	case CP_ACTION_EP_INVALIDATED:
512 		callback_arg.type = CP_TYPE_EP_STATE;
513 		callback_arg.ep_state = CP_EP_INVALIDATED;
514 		return vfs_iterate(0, cp_vfs_callback, (void *)&callback_arg);
515 	case CP_ACTION_CX_EXPIRED:
516 		callback_arg.type = CP_TYPE_CX_STATE;
517 		callback_arg.cx_state = CP_CX_EXPIRED;
518 		return vfs_iterate(0, cp_vfs_callback, (void *)&callback_arg);
519 	default:
520 		return -1;
521 	}
522 }
523 
524 int
cp_is_valid_class(int isdir,int32_t protectionclass)525 cp_is_valid_class(int isdir, int32_t protectionclass)
526 {
527 	/*
528 	 * The valid protection classes are from 0 -> N
529 	 * We use a signed argument to detect unassigned values from
530 	 * directory entry creation time in HFS.
531 	 */
532 	if (isdir) {
533 		/* Directories are not allowed to have F, but they can have "NONE" */
534 		return (protectionclass == PROTECTION_CLASS_CX) ||
535 		       ((protectionclass >= PROTECTION_CLASS_DIR_NONE) &&
536 		       (protectionclass <= PROTECTION_CLASS_D));
537 	} else {
538 		return (protectionclass >= PROTECTION_CLASS_A) &&
539 		       (protectionclass <= PROTECTION_CLASS_CX);
540 	}
541 }
542 
543 /*
544  * Parses versions of the form 12A316, i.e. <major><minor><revision> and
545  * returns a uint32_t in the form 0xaabbcccc where aa = <major>,
546  * bb = <ASCII char>, cccc = <revision>.
547  */
548 static cp_key_os_version_t
parse_os_version(const char * vers)549 parse_os_version(const char *vers)
550 {
551 	const char *p = vers;
552 
553 	int a = 0;
554 	while (*p >= '0' && *p <= '9') {
555 		a = a * 10 + *p - '0';
556 		++p;
557 	}
558 
559 	if (!a) {
560 		return 0;
561 	}
562 
563 	int b = *p++;
564 	if (!b) {
565 		return 0;
566 	}
567 
568 	int c = 0;
569 	while (*p >= '0' && *p <= '9') {
570 		c = c * 10 + *p - '0';
571 		++p;
572 	}
573 
574 	if (!c) {
575 		return 0;
576 	}
577 
578 	return (a & 0xff) << 24 | b << 16 | (c & 0xffff);
579 }
580 
581 cp_key_os_version_t
cp_os_version(void)582 cp_os_version(void)
583 {
584 	static cp_key_os_version_t cp_os_version;
585 
586 	if (cp_os_version) {
587 		return cp_os_version;
588 	}
589 
590 	if (!osversion[0]) {
591 		return 0;
592 	}
593 
594 	cp_os_version = parse_os_version(osversion);
595 	if (!cp_os_version) {
596 		printf("cp_os_version: unable to parse osversion `%s'\n", osversion);
597 		cp_os_version = 1;
598 	}
599 
600 	return cp_os_version;
601 }
602