xref: /xnu-12377.81.4/osfmk/arm/cpuid.c (revision 043036a2b3718f7f0be807e2870f8f47d3fa0796)
1 /*
2  * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 
32 #include <pexpert/pexpert.h>
33 #include <arm/cpuid.h>
34 #include <arm/cpuid_internal.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm64/proc_reg.h>
37 #include <kern/lock_rw.h>
38 #include <vm/vm_page.h>
39 
40 #include <libkern/section_keywords.h>
41 
42 /* Temporary types to aid decoding,
43  * Everything in Little Endian */
44 
45 typedef struct {
46 	uint32_t
47 	    Ctype1:3, /* 2:0 */
48 	    Ctype2:3, /* 5:3 */
49 	    Ctype3:3, /* 8:6 */
50 	    Ctypes:15, /* 6:23 - Don't Care */
51 	    LoC:3, /* 26-24 - Level of Coherency */
52 	    LoU:3, /* 29:27 - Level of Unification */
53 	    RAZ:2; /* 31:30 - Read-As-Zero */
54 } arm_cache_clidr_t;
55 
56 typedef union {
57 	arm_cache_clidr_t bits;
58 	uint32_t          value;
59 } arm_cache_clidr_info_t;
60 
61 
62 typedef struct {
63 	uint32_t
64 	    LineSize:3, /* 2:0 - Number of words in cache line */
65 	    Assoc:10, /* 12:3 - Associativity of cache */
66 	    NumSets:15, /* 27:13 - Number of sets in cache */
67 	    c_type:4; /* 31:28 - Cache type */
68 } arm_cache_ccsidr_t;
69 
70 
71 typedef union {
72 	arm_cache_ccsidr_t bits;
73 	uint32_t           value;
74 } arm_cache_ccsidr_info_t;
75 
76 /* Statics */
77 
78 static SECURITY_READ_ONLY_LATE(arm_cpu_info_t) cpuid_cpu_info;
79 static SECURITY_READ_ONLY_LATE(cache_info_t *) cpuid_cache_info_boot_cpu;
80 static cache_info_t cpuid_cache_info[MAX_CPU_TYPES] = { 0 };
81 static _Atomic uint8_t cpuid_cache_info_bitmap = 0;
82 
83 /* Code */
84 
85 __private_extern__
86 void
do_cpuid(void)87 do_cpuid(void)
88 {
89 	cpuid_cpu_info.value = machine_read_midr();
90 #if (__ARM_ARCH__ == 8)
91 
92 #if defined(HAS_APPLE_PAC)
93 	cpuid_cpu_info.arm_info.arm_arch = CPU_ARCH_ARMv8E;
94 #else /* defined(HAS_APPLE_PAC) */
95 	cpuid_cpu_info.arm_info.arm_arch = CPU_ARCH_ARMv8;
96 #endif /* defined(HAS_APPLE_PAC) */
97 
98 #else /* (__ARM_ARCH__ != 8) */
99 #error Unsupported arch
100 #endif /* (__ARM_ARCH__ != 8) */
101 }
102 
103 arm_cpu_info_t *
cpuid_info(void)104 cpuid_info(void)
105 {
106 	return &cpuid_cpu_info;
107 }
108 
109 int
cpuid_get_cpufamily(void)110 cpuid_get_cpufamily(void)
111 {
112 	int cpufamily = 0;
113 
114 	switch (cpuid_info()->arm_info.arm_implementor) {
115 	case CPU_VID_ARM:
116 		switch (cpuid_info()->arm_info.arm_part) {
117 		case CPU_PART_CORTEXA9:
118 			cpufamily = CPUFAMILY_ARM_14;
119 			break;
120 		case CPU_PART_CORTEXA8:
121 			cpufamily = CPUFAMILY_ARM_13;
122 			break;
123 		case CPU_PART_CORTEXA7:
124 			cpufamily = CPUFAMILY_ARM_15;
125 			break;
126 		case CPU_PART_1136JFS:
127 		case CPU_PART_1176JZFS:
128 			cpufamily = CPUFAMILY_ARM_11;
129 			break;
130 		case CPU_PART_926EJS:
131 		case CPU_PART_920T:
132 			cpufamily = CPUFAMILY_ARM_9;
133 			break;
134 		default:
135 			cpufamily = CPUFAMILY_UNKNOWN;
136 			break;
137 		}
138 		break;
139 
140 	case CPU_VID_INTEL:
141 		cpufamily = CPUFAMILY_ARM_XSCALE;
142 		break;
143 
144 	case CPU_VID_APPLE:
145 		switch (cpuid_info()->arm_info.arm_part) {
146 		case CPU_PART_TYPHOON:
147 		case CPU_PART_TYPHOON_CAPRI:
148 			cpufamily = CPUFAMILY_ARM_TYPHOON;
149 			break;
150 		case CPU_PART_TWISTER:
151 		case CPU_PART_TWISTER_ELBA_MALTA:
152 			cpufamily = CPUFAMILY_ARM_TWISTER;
153 			break;
154 		case CPU_PART_HURRICANE:
155 		case CPU_PART_HURRICANE_MYST:
156 			cpufamily = CPUFAMILY_ARM_HURRICANE;
157 			break;
158 		case CPU_PART_MONSOON:
159 		case CPU_PART_MISTRAL:
160 			cpufamily = CPUFAMILY_ARM_MONSOON_MISTRAL;
161 			break;
162 		case CPU_PART_VORTEX:
163 		case CPU_PART_TEMPEST:
164 		case CPU_PART_TEMPEST_M9:
165 		case CPU_PART_VORTEX_ARUBA:
166 		case CPU_PART_TEMPEST_ARUBA:
167 			cpufamily = CPUFAMILY_ARM_VORTEX_TEMPEST;
168 			break;
169 		case CPU_PART_LIGHTNING:
170 		case CPU_PART_THUNDER:
171 		case CPU_PART_THUNDER_M10:
172 			cpufamily = CPUFAMILY_ARM_LIGHTNING_THUNDER;
173 			break;
174 		case CPU_PART_FIRESTORM_JADE_CHOP:
175 		case CPU_PART_FIRESTORM_JADE_DIE:
176 		case CPU_PART_ICESTORM_JADE_CHOP:
177 		case CPU_PART_ICESTORM_JADE_DIE:
178 		case CPU_PART_FIRESTORM_SICILY:
179 		case CPU_PART_ICESTORM_SICILY:
180 		case CPU_PART_FIRESTORM_TONGA:
181 		case CPU_PART_ICESTORM_TONGA:
182 			cpufamily = CPUFAMILY_ARM_FIRESTORM_ICESTORM;
183 			break;
184 		case CPU_PART_BLIZZARD_STATEN:
185 		case CPU_PART_AVALANCHE_STATEN:
186 		case CPU_PART_BLIZZARD_RHODES_CHOP:
187 		case CPU_PART_AVALANCHE_RHODES_CHOP:
188 		case CPU_PART_BLIZZARD_RHODES_DIE:
189 		case CPU_PART_AVALANCHE_RHODES_DIE:
190 		case CPU_PART_BLIZZARD_ELLIS:
191 		case CPU_PART_AVALANCHE_ELLIS:
192 			cpufamily = CPUFAMILY_ARM_BLIZZARD_AVALANCHE;
193 			break;
194 		case CPU_PART_EVEREST:
195 		case CPU_PART_SAWTOOTH:
196 		case CPU_PART_SAWTOOTH_M11:
197 			cpufamily = CPUFAMILY_ARM_EVEREST_SAWTOOTH;
198 			break;
199 		case CPU_PART_ECORE_IBIZA:
200 		case CPU_PART_PCORE_IBIZA:
201 			cpufamily = CPUFAMILY_ARM_IBIZA;
202 			break;
203 		case CPU_PART_ECORE_PALMA:
204 		case CPU_PART_PCORE_PALMA:
205 			cpufamily = CPUFAMILY_ARM_PALMA;
206 			break;
207 		case CPU_PART_ECORE_COLL:
208 		case CPU_PART_PCORE_COLL:
209 			cpufamily = CPUFAMILY_ARM_COLL;
210 			break;
211 		case CPU_PART_ECORE_LOBOS:
212 		case CPU_PART_PCORE_LOBOS:
213 			cpufamily = CPUFAMILY_ARM_LOBOS;
214 			break;
215 		case CPU_PART_ECORE_DONAN:
216 		case CPU_PART_PCORE_DONAN:
217 			cpufamily = CPUFAMILY_ARM_DONAN;
218 			break;
219 		case CPU_PART_ECORE_BRAVA_S:
220 		case CPU_PART_PCORE_BRAVA_S:
221 		case CPU_PART_ECORE_BRAVA_C:
222 		case CPU_PART_PCORE_BRAVA_C:
223 			cpufamily = CPUFAMILY_ARM_BRAVA;
224 			break;
225 		case CPU_PART_ECORE_HIDRA:
226 		case CPU_PART_PCORE_HIDRA:
227 			cpufamily = CPUFAMILY_ARM_HIDRA;
228 			break;
229 		default:
230 			cpufamily = CPUFAMILY_UNKNOWN;
231 			break;
232 		}
233 		break;
234 
235 	default:
236 		cpufamily = CPUFAMILY_UNKNOWN;
237 		break;
238 	}
239 
240 	return cpufamily;
241 }
242 
243 int
cpuid_get_cpusubfamily(void)244 cpuid_get_cpusubfamily(void)
245 {
246 	int cpusubfamily = CPUSUBFAMILY_UNKNOWN;
247 
248 	if (cpuid_info()->arm_info.arm_implementor != CPU_VID_APPLE) {
249 		return cpusubfamily;
250 	}
251 
252 	switch (cpuid_info()->arm_info.arm_part) {
253 	case CPU_PART_TYPHOON:
254 	case CPU_PART_TWISTER:
255 	case CPU_PART_HURRICANE:
256 	case CPU_PART_MONSOON:
257 	case CPU_PART_MISTRAL:
258 	case CPU_PART_VORTEX:
259 	case CPU_PART_TEMPEST:
260 	case CPU_PART_LIGHTNING:
261 	case CPU_PART_THUNDER:
262 	case CPU_PART_FIRESTORM_SICILY:
263 	case CPU_PART_ICESTORM_SICILY:
264 	case CPU_PART_BLIZZARD_ELLIS:
265 	case CPU_PART_AVALANCHE_ELLIS:
266 	case CPU_PART_SAWTOOTH:
267 	case CPU_PART_EVEREST:
268 		cpusubfamily = CPUSUBFAMILY_ARM_HP;
269 		break;
270 	case CPU_PART_TYPHOON_CAPRI:
271 	case CPU_PART_TWISTER_ELBA_MALTA:
272 	case CPU_PART_HURRICANE_MYST:
273 	case CPU_PART_VORTEX_ARUBA:
274 	case CPU_PART_TEMPEST_ARUBA:
275 	case CPU_PART_FIRESTORM_TONGA:
276 	case CPU_PART_ICESTORM_TONGA:
277 	case CPU_PART_BLIZZARD_STATEN:
278 	case CPU_PART_AVALANCHE_STATEN:
279 		cpusubfamily = CPUSUBFAMILY_ARM_HG;
280 		break;
281 	case CPU_PART_TEMPEST_M9:
282 	case CPU_PART_THUNDER_M10:
283 	case CPU_PART_SAWTOOTH_M11:
284 		cpusubfamily = CPUSUBFAMILY_ARM_M;
285 		break;
286 	case CPU_PART_ECORE_IBIZA:
287 	case CPU_PART_PCORE_IBIZA:
288 		cpusubfamily = CPUSUBFAMILY_ARM_HG;
289 		break;
290 	case CPU_PART_ECORE_COLL:
291 	case CPU_PART_PCORE_COLL:
292 		cpusubfamily = CPUSUBFAMILY_ARM_HP;
293 		break;
294 	case CPU_PART_ECORE_PALMA:
295 	case CPU_PART_PCORE_PALMA:
296 		cpusubfamily = CPUSUBFAMILY_ARM_HC_HD;
297 		break;
298 	case CPU_PART_ECORE_LOBOS:
299 	case CPU_PART_PCORE_LOBOS:
300 		cpusubfamily = CPUSUBFAMILY_ARM_HS;
301 		break;
302 	case CPU_PART_FIRESTORM_JADE_CHOP:
303 	case CPU_PART_ICESTORM_JADE_CHOP:
304 		cpusubfamily = CPUSUBFAMILY_ARM_HS;
305 		break;
306 	case CPU_PART_FIRESTORM_JADE_DIE:
307 	case CPU_PART_ICESTORM_JADE_DIE:
308 		cpusubfamily = CPUSUBFAMILY_ARM_HC_HD;
309 		break;
310 	case CPU_PART_BLIZZARD_RHODES_CHOP:
311 	case CPU_PART_AVALANCHE_RHODES_CHOP:
312 		cpusubfamily = CPUSUBFAMILY_ARM_HS;
313 		break;
314 	case CPU_PART_BLIZZARD_RHODES_DIE:
315 	case CPU_PART_AVALANCHE_RHODES_DIE:
316 		cpusubfamily = CPUSUBFAMILY_ARM_HC_HD;
317 		break;
318 	case CPU_PART_ECORE_DONAN:
319 	case CPU_PART_PCORE_DONAN:
320 		cpusubfamily = CPUSUBFAMILY_ARM_HG;
321 		break;
322 	case CPU_PART_ECORE_BRAVA_S:
323 	case CPU_PART_PCORE_BRAVA_S:
324 		cpusubfamily = CPUSUBFAMILY_ARM_HS;
325 		break;
326 	case CPU_PART_ECORE_BRAVA_C:
327 	case CPU_PART_PCORE_BRAVA_C:
328 		cpusubfamily = CPUSUBFAMILY_ARM_HC_HD;
329 		break;
330 	case CPU_PART_ECORE_HIDRA:
331 	case CPU_PART_PCORE_HIDRA:
332 		cpusubfamily = CPUSUBFAMILY_ARM_HG;
333 		break;
334 	default:
335 		cpusubfamily = CPUSUBFAMILY_UNKNOWN;
336 		break;
337 	}
338 
339 	return cpusubfamily;
340 }
341 
342 void
do_debugid(void)343 do_debugid(void)
344 {
345 	machine_do_debugid();
346 }
347 
348 arm_debug_info_t *
arm_debug_info(void)349 arm_debug_info(void)
350 {
351 	return machine_arm_debug_info();
352 }
353 
354 void
do_mvfpid(void)355 do_mvfpid(void)
356 {
357 	return machine_do_mvfpid();
358 }
359 
360 arm_mvfp_info_t
361 *
arm_mvfp_info(void)362 arm_mvfp_info(void)
363 {
364 	return machine_arm_mvfp_info();
365 }
366 
367 
368 void
do_cacheid(void)369 do_cacheid(void)
370 {
371 	arm_cache_clidr_info_t arm_cache_clidr_info;
372 	arm_cache_ccsidr_info_t arm_cache_ccsidr_info;
373 
374 	/*
375 	 * We only need to parse cache geometry parameters once per cluster type.
376 	 * Skip this if some other core of the same type has already parsed them.
377 	 */
378 	cluster_type_t cluster_type = ml_get_topology_info()->cpus[ml_get_cpu_number_local()].cluster_type;
379 	uint8_t prev_cpuid_cache_info_bitmap = os_atomic_or_orig(&cpuid_cache_info_bitmap,
380 	    (uint8_t)(1 << cluster_type), acq_rel);
381 	if (prev_cpuid_cache_info_bitmap & (1 << cluster_type)) {
382 		return;
383 	}
384 
385 	cache_info_t *cpuid_cache_info_p = &cpuid_cache_info[cluster_type];
386 
387 	arm_cache_clidr_info.value = machine_read_clidr();
388 
389 
390 	/*
391 	 * For compatibility purposes with existing callers, let's cache the boot CPU
392 	 * cache parameters and return those upon any call to cache_info();
393 	 */
394 	if (prev_cpuid_cache_info_bitmap == 0) {
395 		cpuid_cache_info_boot_cpu = cpuid_cache_info_p;
396 	}
397 
398 	/* Select L1 data/unified cache */
399 
400 	machine_write_csselr(CSSELR_L1, CSSELR_DATA_UNIFIED);
401 	arm_cache_ccsidr_info.value = machine_read_ccsidr();
402 
403 	cpuid_cache_info_p->c_unified = (arm_cache_clidr_info.bits.Ctype1 == 0x4) ? 1 : 0;
404 
405 	switch (arm_cache_ccsidr_info.bits.c_type) {
406 	case 0x1:
407 		cpuid_cache_info_p->c_type = CACHE_WRITE_ALLOCATION;
408 		break;
409 	case 0x2:
410 		cpuid_cache_info_p->c_type = CACHE_READ_ALLOCATION;
411 		break;
412 	case 0x4:
413 		cpuid_cache_info_p->c_type = CACHE_WRITE_BACK;
414 		break;
415 	case 0x8:
416 		cpuid_cache_info_p->c_type = CACHE_WRITE_THROUGH;
417 		break;
418 	default:
419 		cpuid_cache_info_p->c_type = CACHE_UNKNOWN;
420 	}
421 
422 	cpuid_cache_info_p->c_linesz = 4 * (1 << (arm_cache_ccsidr_info.bits.LineSize + 2));
423 	cpuid_cache_info_p->c_assoc = (arm_cache_ccsidr_info.bits.Assoc + 1);
424 
425 	/* I cache size */
426 	cpuid_cache_info_p->c_isize = (arm_cache_ccsidr_info.bits.NumSets + 1) * cpuid_cache_info_p->c_linesz * cpuid_cache_info_p->c_assoc;
427 
428 	/* D cache size */
429 	cpuid_cache_info_p->c_dsize = (arm_cache_ccsidr_info.bits.NumSets + 1) * cpuid_cache_info_p->c_linesz * cpuid_cache_info_p->c_assoc;
430 
431 
432 	if ((arm_cache_clidr_info.bits.Ctype3 == 0x4) ||
433 	    (arm_cache_clidr_info.bits.Ctype2 == 0x4) || (arm_cache_clidr_info.bits.Ctype2 == 0x2)) {
434 		if (arm_cache_clidr_info.bits.Ctype3 == 0x4) {
435 			/* Select L3 (LLC) if the SoC is new enough to have that.
436 			 * This will be the second-level cache for the highest-performing ACC. */
437 			machine_write_csselr(CSSELR_L3, CSSELR_DATA_UNIFIED);
438 		} else {
439 			/* Select L2 data cache */
440 			machine_write_csselr(CSSELR_L2, CSSELR_DATA_UNIFIED);
441 		}
442 		arm_cache_ccsidr_info.value = machine_read_ccsidr();
443 
444 		cpuid_cache_info_p->c_linesz = 4 * (1 << (arm_cache_ccsidr_info.bits.LineSize + 2));
445 		cpuid_cache_info_p->c_assoc = (arm_cache_ccsidr_info.bits.Assoc + 1);
446 		cpuid_cache_info_p->c_l2size = (arm_cache_ccsidr_info.bits.NumSets + 1) * cpuid_cache_info_p->c_linesz * cpuid_cache_info_p->c_assoc;
447 		cpuid_cache_info_p->c_inner_cache_size = cpuid_cache_info_p->c_dsize;
448 		cpuid_cache_info_p->c_bulksize_op = cpuid_cache_info_p->c_l2size;
449 
450 		/* capri has a 2MB L2 cache unlike every other SoC up to this
451 		 * point with a 1MB L2 cache, so to get the same performance
452 		 * gain from coloring, we have to double the number of colors.
453 		 * Note that in general (and in fact as it's implemented in
454 		 * i386/cpuid.c), the number of colors is calculated as the
455 		 * cache line size * the number of sets divided by the page
456 		 * size. Also note that for H8 devices and up, the page size
457 		 * will be 16k instead of 4, which will reduce the number of
458 		 * colors required. Thus, this is really a temporary solution
459 		 * for capri specifically that we may want to generalize later:
460 		 *
461 		 * TODO: Are there any special considerations for our unusual
462 		 * cache geometries (3MB)?
463 		 */
464 		vm_cache_geometry_colors = ((arm_cache_ccsidr_info.bits.NumSets + 1) * cpuid_cache_info_p->c_linesz) / PAGE_SIZE;
465 		kprintf(" vm_cache_geometry_colors: %d\n", vm_cache_geometry_colors);
466 	} else {
467 		cpuid_cache_info_p->c_l2size = 0;
468 
469 		cpuid_cache_info_p->c_inner_cache_size = cpuid_cache_info_p->c_dsize;
470 		cpuid_cache_info_p->c_bulksize_op = cpuid_cache_info_p->c_dsize;
471 	}
472 
473 	if (cpuid_cache_info_p->c_unified == 0) {
474 		machine_write_csselr(CSSELR_L1, CSSELR_INSTR);
475 		arm_cache_ccsidr_info.value = machine_read_ccsidr();
476 		uint32_t c_linesz = 4 * (1 << (arm_cache_ccsidr_info.bits.LineSize + 2));
477 		uint32_t c_assoc = (arm_cache_ccsidr_info.bits.Assoc + 1);
478 		/* I cache size */
479 		cpuid_cache_info_p->c_isize = (arm_cache_ccsidr_info.bits.NumSets + 1) * c_linesz * c_assoc;
480 	}
481 
482 	if (cpuid_cache_info_p == cpuid_cache_info_boot_cpu) {
483 		cpuid_cache_info_p->c_valid = true;
484 	} else {
485 		os_atomic_store(&cpuid_cache_info_p->c_valid, true, release);
486 		thread_wakeup((event_t)&cpuid_cache_info_p->c_valid);
487 	}
488 
489 	kprintf("%s() - %u bytes %s cache (I:%u D:%u (%s)), %u-way assoc, %u bytes/line\n",
490 	    __FUNCTION__,
491 	    cpuid_cache_info_p->c_dsize + cpuid_cache_info_p->c_isize,
492 	    ((cpuid_cache_info_p->c_type == CACHE_WRITE_BACK) ? "WB" :
493 	    (cpuid_cache_info_p->c_type == CACHE_WRITE_THROUGH ? "WT" : "Unknown")),
494 	    cpuid_cache_info_p->c_isize,
495 	    cpuid_cache_info_p->c_dsize,
496 	    (cpuid_cache_info_p->c_unified) ? "unified" : "separate",
497 	    cpuid_cache_info_p->c_assoc,
498 	    cpuid_cache_info_p->c_linesz);
499 }
500 
501 cache_info_t   *
cache_info(void)502 cache_info(void)
503 {
504 	return cpuid_cache_info_boot_cpu;
505 }
506 
507 cache_info_t   *
cache_info_type(cluster_type_t cluster_type)508 cache_info_type(cluster_type_t cluster_type)
509 {
510 	assert((cluster_type >= 0) && (cluster_type < MAX_CPU_TYPES));
511 	cache_info_t *ret = &cpuid_cache_info[cluster_type];
512 
513 	/*
514 	 * cpuid_cache_info_boot_cpu is always populated by the time
515 	 * cache_info_type() is callable.  Other clusters may not have completed
516 	 * do_cacheid() yet.
517 	 */
518 	if (ret == cpuid_cache_info_boot_cpu) {
519 		return ret;
520 	}
521 
522 	while (!os_atomic_load(&ret->c_valid, acquire)) {
523 		assert_wait((event_t)&ret->c_valid, THREAD_UNINT);
524 		if (os_atomic_load(&ret->c_valid, relaxed)) {
525 			clear_wait(current_thread(), THREAD_AWAKENED);
526 		} else {
527 			thread_block(THREAD_CONTINUE_NULL);
528 		}
529 	}
530 
531 	return ret;
532 }
533