1 /* 2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 29 #ifndef _MACH_THREAD_POLICY_H_ 30 #define _MACH_THREAD_POLICY_H_ 31 32 #include <mach/mach_types.h> 33 34 /* 35 * These are the calls for accessing the policy parameters 36 * of a particular thread. 37 * 38 * The extra 'get_default' parameter to the second call is 39 * IN/OUT as follows: 40 * 1) if asserted on the way in it indicates that the default 41 * values should be returned, not the ones currently set, in 42 * this case 'get_default' will always be asserted on return; 43 * 2) if unasserted on the way in, the current settings are 44 * desired and if still unasserted on return, then the info 45 * returned reflects the current settings, otherwise if 46 * 'get_default' returns asserted, it means that there are no 47 * current settings due to other parameters taking precedence, 48 * and the default ones are being returned instead. 49 */ 50 51 typedef natural_t thread_policy_flavor_t; 52 typedef integer_t *thread_policy_t; 53 54 /* 55 * kern_return_t thread_policy_set( 56 * thread_t thread, 57 * thread_policy_flavor_t flavor, 58 * thread_policy_t policy_info, 59 * mach_msg_type_number_t count); 60 * 61 * kern_return_t thread_policy_get( 62 * thread_t thread, 63 * thread_policy_flavor_t flavor, 64 * thread_policy_t policy_info, 65 * mach_msg_type_number_t *count, 66 * boolean_t *get_default); 67 */ 68 69 /* 70 * Defined flavors. 71 */ 72 /* 73 * THREAD_STANDARD_POLICY: 74 * 75 * This is the standard (fair) scheduling mode, assigned to new 76 * threads. The thread will be given processor time in a manner 77 * which apportions approximately equal share to long running 78 * computations. 79 * 80 * Parameters: 81 * [none] 82 */ 83 84 #define THREAD_STANDARD_POLICY 1 85 86 struct thread_standard_policy { 87 natural_t no_data; 88 }; 89 90 typedef struct thread_standard_policy thread_standard_policy_data_t; 91 typedef struct thread_standard_policy *thread_standard_policy_t; 92 93 #define THREAD_STANDARD_POLICY_COUNT 0 94 95 /* 96 * THREAD_EXTENDED_POLICY: 97 * 98 * Extended form of THREAD_STANDARD_POLICY, which supplies a 99 * hint indicating whether this is a long running computation. 100 * 101 * Parameters: 102 * 103 * timeshare: TRUE (the default) results in identical scheduling 104 * behavior as THREAD_STANDARD_POLICY. 105 */ 106 107 #define THREAD_EXTENDED_POLICY 1 108 109 struct thread_extended_policy { 110 boolean_t timeshare; 111 }; 112 113 typedef struct thread_extended_policy thread_extended_policy_data_t; 114 typedef struct thread_extended_policy *thread_extended_policy_t; 115 116 #define THREAD_EXTENDED_POLICY_COUNT ((mach_msg_type_number_t) \ 117 (sizeof (thread_extended_policy_data_t) / sizeof (integer_t))) 118 119 /* 120 * THREAD_TIME_CONSTRAINT_POLICY: 121 * 122 * This scheduling mode is for threads which have real time 123 * constraints on their execution. 124 * 125 * Parameters: 126 * 127 * period: This is the nominal amount of time between separate 128 * processing arrivals, specified in absolute time units. A 129 * value of 0 indicates that there is no inherent periodicity in 130 * the computation. 131 * 132 * computation: This is the nominal amount of computation 133 * time needed during a separate processing arrival, specified 134 * in absolute time units. The thread may be preempted after 135 * the computation time has elapsed. 136 * If (computation < constraint/2) it will be forced to 137 * constraint/2 to avoid unintended preemption and associated 138 * timer interrupts. 139 * 140 * constraint: This is the maximum amount of real time that 141 * may elapse from the start of a separate processing arrival 142 * to the end of computation for logically correct functioning, 143 * specified in absolute time units. Must be (>= computation). 144 * Note that latency = (constraint - computation). 145 * 146 * preemptible: IGNORED (This indicates that the computation may be 147 * interrupted, subject to the constraint specified above.) 148 */ 149 150 #define THREAD_TIME_CONSTRAINT_POLICY 2 151 152 struct thread_time_constraint_policy { 153 uint32_t period; 154 uint32_t computation; 155 uint32_t constraint; 156 boolean_t preemptible; 157 }; 158 159 typedef struct thread_time_constraint_policy \ 160 thread_time_constraint_policy_data_t; 161 typedef struct thread_time_constraint_policy \ 162 *thread_time_constraint_policy_t; 163 164 #define THREAD_TIME_CONSTRAINT_POLICY_COUNT ((mach_msg_type_number_t) \ 165 (sizeof (thread_time_constraint_policy_data_t) / sizeof (integer_t))) 166 167 /* 168 * THREAD_PRECEDENCE_POLICY: 169 * 170 * This may be used to indicate the relative value of the 171 * computation compared to the other threads in the task. 172 * 173 * Parameters: 174 * 175 * importance: The importance is specified as a signed value. 176 */ 177 178 #define THREAD_PRECEDENCE_POLICY 3 179 180 struct thread_precedence_policy { 181 integer_t importance; 182 }; 183 184 typedef struct thread_precedence_policy thread_precedence_policy_data_t; 185 typedef struct thread_precedence_policy *thread_precedence_policy_t; 186 187 #define THREAD_PRECEDENCE_POLICY_COUNT ((mach_msg_type_number_t) \ 188 (sizeof (thread_precedence_policy_data_t) / sizeof (integer_t))) 189 190 /* 191 * THREAD_AFFINITY_POLICY: 192 * 193 * This policy is experimental. 194 * This may be used to express affinity relationships 195 * between threads in the task. Threads with the same affinity tag will 196 * be scheduled to share an L2 cache if possible. That is, affinity tags 197 * are a hint to the scheduler for thread placement. 198 * 199 * The namespace of affinity tags is generally local to one task. However, 200 * a child task created after the assignment of affinity tags by its parent 201 * will share that namespace. In particular, a family of forked processes 202 * may be created with a shared affinity namespace. 203 * 204 * Parameters: 205 * tag: The affinity set identifier. 206 */ 207 208 #define THREAD_AFFINITY_POLICY 4 209 210 struct thread_affinity_policy { 211 integer_t affinity_tag; 212 }; 213 214 #define THREAD_AFFINITY_TAG_NULL 0 215 216 typedef struct thread_affinity_policy thread_affinity_policy_data_t; 217 typedef struct thread_affinity_policy *thread_affinity_policy_t; 218 219 #define THREAD_AFFINITY_POLICY_COUNT ((mach_msg_type_number_t) \ 220 (sizeof (thread_affinity_policy_data_t) / sizeof (integer_t))) 221 222 /* 223 * THREAD_BACKGROUND_POLICY: 224 */ 225 226 #define THREAD_BACKGROUND_POLICY 5 227 228 struct thread_background_policy { 229 integer_t priority; 230 }; 231 232 #define THREAD_BACKGROUND_POLICY_DARWIN_BG 0x1000 233 234 typedef struct thread_background_policy thread_background_policy_data_t; 235 typedef struct thread_background_policy *thread_background_policy_t; 236 237 #define THREAD_BACKGROUND_POLICY_COUNT ((mach_msg_type_number_t) \ 238 (sizeof (thread_background_policy_data_t) / sizeof (integer_t))) 239 240 241 #define THREAD_LATENCY_QOS_POLICY 7 242 typedef integer_t thread_latency_qos_t; 243 244 struct thread_latency_qos_policy { 245 thread_latency_qos_t thread_latency_qos_tier; 246 }; 247 248 typedef struct thread_latency_qos_policy thread_latency_qos_policy_data_t; 249 typedef struct thread_latency_qos_policy *thread_latency_qos_policy_t; 250 251 #define THREAD_LATENCY_QOS_POLICY_COUNT ((mach_msg_type_number_t) \ 252 (sizeof (thread_latency_qos_policy_data_t) / sizeof (integer_t))) 253 254 #define THREAD_THROUGHPUT_QOS_POLICY 8 255 typedef integer_t thread_throughput_qos_t; 256 257 struct thread_throughput_qos_policy { 258 thread_throughput_qos_t thread_throughput_qos_tier; 259 }; 260 261 typedef struct thread_throughput_qos_policy thread_throughput_qos_policy_data_t; 262 typedef struct thread_throughput_qos_policy *thread_throughput_qos_policy_t; 263 264 #define THREAD_THROUGHPUT_QOS_POLICY_COUNT ((mach_msg_type_number_t) \ 265 (sizeof (thread_throughput_qos_policy_data_t) / sizeof (integer_t))) 266 267 #ifdef PRIVATE 268 269 /* 270 * THREAD_POLICY_STATE: 271 */ 272 #define THREAD_POLICY_STATE 6 273 274 #define THREAD_POLICY_STATE_FLAG_STATIC_PARAM 0x1 275 276 struct thread_policy_state { 277 integer_t requested; 278 integer_t effective; 279 integer_t pending; 280 integer_t flags; 281 uint64_t thps_requested_policy; 282 uint64_t thps_effective_policy; 283 uint32_t thps_user_promotions; 284 uint32_t thps_user_promotion_basepri; 285 uint32_t thps_ipc_overrides; 286 uint32_t reserved32; 287 uint64_t reserved[2]; 288 }; 289 290 typedef struct thread_policy_state thread_policy_state_data_t; 291 typedef struct thread_policy_state *thread_policy_state_t; 292 293 #define THREAD_POLICY_STATE_COUNT ((mach_msg_type_number_t) \ 294 (sizeof (thread_policy_state_data_t) / sizeof (integer_t))) 295 296 /* 297 * THREAD_QOS_POLICY: 298 */ 299 #define THREAD_QOS_POLICY 9 300 301 typedef uint8_t thread_qos_t; 302 #define THREAD_QOS_UNSPECIFIED 0 303 #define THREAD_QOS_DEFAULT THREAD_QOS_UNSPECIFIED /* Temporary rename */ 304 #define THREAD_QOS_MAINTENANCE 1 305 #define THREAD_QOS_BACKGROUND 2 306 #define THREAD_QOS_UTILITY 3 307 #define THREAD_QOS_LEGACY 4 /* i.e. default workq threads */ 308 #define THREAD_QOS_USER_INITIATED 5 309 #define THREAD_QOS_USER_INTERACTIVE 6 310 311 #define THREAD_QOS_LAST 7 312 313 #define THREAD_QOS_MIN_TIER_IMPORTANCE (-15) 314 315 /* 316 * Overrides are inputs to the task/thread policy engine that 317 * temporarily elevate the effective QoS of a thread without changing 318 * its steady-state (and round-trip-able) requested QoS. The 319 * interfaces into the kernel allow the caller to associate a resource 320 * and type that describe the reason/lifecycle of the override. For 321 * instance, a contended pthread_mutex_t held by a UTILITY thread 322 * might get an override to USER_INTERACTIVE, with the resource 323 * being the userspace address of the pthread_mutex_t. When the 324 * owning thread releases that resource, it can call into the 325 * task policy subsystem to drop the override because of that resource, 326 * although if more contended locks are held by the thread, the 327 * effective QoS may remain overridden for longer. 328 * 329 * THREAD_QOS_OVERRIDE_TYPE_PTHREAD_MUTEX is used for contended 330 * pthread_mutex_t's via the pthread kext. The holder gets an override 331 * with resource=&mutex and a count of 1 by the initial contender. 332 * Subsequent contenders raise the QoS value, until the holder 333 * decrements the count to 0 and the override is released. 334 * 335 * THREAD_QOS_OVERRIDE_TYPE_PTHREAD_RWLOCK is unimplemented and has no 336 * specified semantics. 337 * 338 * THREAD_QOS_OVERRIDE_TYPE_PTHREAD_EXPLICIT_OVERRIDE are explicitly 339 * paired start/end overrides on a target thread. The resource can 340 * either be a memory allocation in userspace, or the pthread_t of the 341 * overrider if no allocation was used. 342 * 343 * THREAD_QOS_OVERRIDE_TYPE_WILDCARD is a catch-all which will reset every 344 * resource matching the resource value. Passing 345 * THREAD_QOS_OVERRIDE_RESOURCE_WILDCARD as well will reset everything. 346 */ 347 348 #define THREAD_QOS_OVERRIDE_TYPE_UNKNOWN (0) 349 #define THREAD_QOS_OVERRIDE_TYPE_PTHREAD_MUTEX (1) 350 #define THREAD_QOS_OVERRIDE_TYPE_PTHREAD_RWLOCK (2) 351 #define THREAD_QOS_OVERRIDE_TYPE_PTHREAD_EXPLICIT_OVERRIDE (3) 352 #define THREAD_QOS_OVERRIDE_TYPE_WILDCARD (5) 353 354 /* A special resource value to indicate a resource wildcard */ 355 #define THREAD_QOS_OVERRIDE_RESOURCE_WILDCARD (~((user_addr_t)0)) 356 357 struct thread_qos_policy { 358 integer_t qos_tier; 359 integer_t tier_importance; 360 }; 361 362 typedef struct thread_qos_policy thread_qos_policy_data_t; 363 typedef struct thread_qos_policy *thread_qos_policy_t; 364 365 #define THREAD_QOS_POLICY_COUNT ((mach_msg_type_number_t) \ 366 (sizeof (thread_qos_policy_data_t) / sizeof (integer_t))) 367 368 /* 369 * THREAD_TIME_CONSTRAINT_WITH_PRIORITY_POLICY: 370 * 371 * This scheduling mode is for threads which have real time 372 * constraints on their execution with support for multiple 373 * real time priorities. 374 * 375 * Threads are ordered by highest priority first then, for 376 * threads of the same priority, by earliest deadline first. 377 * But if sched_rt_runq_strict_priority is false, a lower priority 378 * thread with an earlier deadline will be preferred over a higher 379 * priority thread with a later deadline, as long as both threads' 380 * computations will fit before the later deadline. 381 * 382 * Parameters: 383 * 384 * period: This is the nominal amount of time between separate 385 * processing arrivals, specified in absolute time units. A 386 * value of 0 indicates that there is no inherent periodicity in 387 * the computation. 388 * 389 * computation: This is the nominal amount of computation 390 * time needed during a separate processing arrival, specified 391 * in absolute time units. The thread may be preempted after 392 * the computation time has elapsed. 393 * If (computation < constraint/2) it will be forced to 394 * constraint/2 to avoid unintended preemption and associated 395 * timer interrupts. 396 * 397 * constraint: This is the maximum amount of real time that 398 * may elapse from the start of a separate processing arrival 399 * to the end of computation for logically correct functioning, 400 * specified in absolute time units. Must be (>= computation). 401 * Note that latency = (constraint - computation). 402 * 403 * preemptible: IGNORED (This indicates that the computation may be 404 * interrupted, subject to the constraint specified above.) 405 * 406 * priority: This is the scheduling priority of the thread. 407 * User processes may only set the default priority of 408 * TIME_CONSTRAINT_POLICY_DEFAULT_PRIORITY. Higher priorities 409 * up to TIME_CONSTRAINT_POLICY_MAXIMUM_PRIORITY are reserved 410 * for system use and attempts to set them will fail. 411 */ 412 413 #define THREAD_TIME_CONSTRAINT_WITH_PRIORITY_POLICY 10 414 415 struct thread_time_constraint_with_priority_policy { 416 uint32_t period; 417 uint32_t computation; 418 uint32_t constraint; 419 boolean_t preemptible; 420 uint32_t priority; 421 }; 422 423 typedef struct thread_time_constraint_with_priority_policy \ 424 thread_time_constraint_with_priority_policy_data_t; 425 typedef struct thread_time_constraint_with_priority_policy \ 426 *thread_time_constraint_with_priority_policy_t; 427 428 #define THREAD_TIME_CONSTRAINT_WITH_PRIORITY_POLICY_COUNT ((mach_msg_type_number_t) \ 429 (sizeof (thread_time_constraint_with_priority_policy_data_t) / sizeof (integer_t))) 430 431 #define TIME_CONSTRAINT_POLICY_DEFAULT_PRIORITY 97 432 #define TIME_CONSTRAINT_POLICY_MAXIMUM_PRIORITY 127 433 434 #endif /* PRIVATE */ 435 436 #ifdef PRIVATE 437 438 /* 439 * Internal bitfields are privately exported for revlocked tracing tools like msa to decode tracepoints. 440 * 441 * These struct definitions *will* change in the future. 442 * When they do, we will update THREAD_POLICY_INTERNAL_STRUCT_VERSION. 443 */ 444 445 #define THREAD_POLICY_INTERNAL_STRUCT_VERSION 7 446 447 struct thread_requested_policy { 448 uint64_t thrp_int_darwinbg :1, /* marked as darwinbg via setpriority */ 449 thrp_ext_darwinbg :1, 450 thrp_int_iotier :2, /* IO throttle tier */ 451 thrp_ext_iotier :2, 452 thrp_int_iopassive :1, /* should IOs cause lower tiers to be throttled */ 453 thrp_ext_iopassive :1, 454 thrp_latency_qos :3, /* Timer latency QoS */ 455 thrp_through_qos :3, /* Computation throughput QoS */ 456 457 thrp_pidbind_bg :1, /* task i'm bound to is marked 'watchbg' */ 458 thrp_qos :3, /* thread qos class */ 459 thrp_qos_relprio :4, /* thread qos relative priority (store as inverse, -10 -> 0xA) */ 460 thrp_qos_override :3, /* thread qos class override */ 461 thrp_qos_promote :3, /* thread qos class from promotion */ 462 thrp_qos_kevent_override:3, /* thread qos class from kevent override */ 463 thrp_terminated :1, /* heading for termination */ 464 thrp_qos_workq_override :3, /* thread qos class override (workq) */ 465 thrp_qos_wlsvc_override :3, /* workloop servicer qos class override */ 466 thrp_iotier_kevent_override :2, /* thread iotier from kevent override */ 467 468 thrp_reserved :24; 469 }; 470 471 struct thread_effective_policy { 472 uint64_t thep_darwinbg :1, /* marked as 'background', and sockets are marked bg when created */ 473 thep_io_tier :2, /* effective throttle tier */ 474 thep_io_passive :1, /* should IOs cause lower tiers to be throttled */ 475 thep_all_sockets_bg :1, /* All existing sockets in process are marked as bg (thread: all created by thread) */ 476 thep_new_sockets_bg :1, /* Newly created sockets should be marked as bg */ 477 thep_terminated :1, /* all throttles have been removed for quick exit or SIGTERM handling */ 478 thep_qos_ui_is_urgent :1, /* bump UI-Interactive QoS up to the urgent preemption band */ 479 thep_latency_qos :3, /* Timer latency QoS level */ 480 thep_through_qos :3, /* Computation throughput QoS level */ 481 482 thep_qos :3, /* thread qos class */ 483 thep_qos_relprio :4, /* thread qos relative priority (store as inverse, -10 -> 0xA) */ 484 thep_qos_promote :3, /* thread qos class used for promotion */ 485 thep_promote_above_task :1, /* thread is promoted above task-level clamp */ 486 487 thep_reserved :39; 488 }; 489 490 #endif /* PRIVATE */ 491 492 493 #endif /* _MACH_THREAD_POLICY_H_ */ 494