1 /*
2 * Copyright (c) 2025 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include "checker.h"
30 #include <kern/assert.h>
31
32 #define WATCHPOINT_MAP_INITIAL_CAPACITY 4096 /* must be power of 2 */
33 #define WATCHPOINT_MAP_MAX_LOAD_FACTOR 0.75
34
35 static inline size_t
hash_address(uintptr_t addr,size_t capacity)36 hash_address(uintptr_t addr, size_t capacity)
37 {
38 size_t hash = (size_t)addr;
39 hash = (hash ^ (hash >> 16)) * 31;
40 hash = (hash ^ (hash >> 16)) * 31;
41 hash = hash ^ (hash >> 16);
42 return hash % capacity;
43 }
44
45 struct watchpoint_entry {
46 union {
47 void *pc; /* program point of the memory operation instruction */
48 struct backtrace_array *backtrace; /* backtrace collected at program point of the memory operation instruction */
49 };
50 uintptr_t address; /* address of the memory operation happening on the fiber with id=fiber_id */
51 int fiber_id; /* id of the fiber in which the memory operation is happening */
52 uint8_t size; /* size of the memory operation (up to 16 bytes) */
53 uint8_t access_type; /* enum access_type */
54 uint8_t has_backtrace; /* if true, use the backtrace field of the union */
55 };
56
57 static void
watchpoint_entry_init(struct watchpoint_entry * entry,uintptr_t address,enum access_type type,size_t size,fiber_t fiber)58 watchpoint_entry_init(struct watchpoint_entry* entry, uintptr_t address, enum access_type type, size_t size, fiber_t fiber)
59 {
60 FIBERS_ASSERT(entry != NULL, "watchpoint_entry_init: null entry");
61 FIBERS_ASSERT(size <= 16, "watchpoint_entry_init: invalid size"); // no access is bigger than sizeof(int128)
62
63 entry->address = address;
64 entry->fiber_id = fiber->id;
65 entry->access_type = (uint8_t)type;
66 entry->size = (uint8_t)size;
67 // default to pc=0
68 entry->pc = 0;
69 entry->has_backtrace = 0;
70 }
71
72 struct watchpoint_node {
73 struct watchpoint_entry entry;
74 struct watchpoint_node *next;
75 };
76
77 // A simple hashmap to store watchpoint_entry indexed by watchpoint_entry.address
78 struct watchpoint_map {
79 struct watchpoint_node **buckets;
80 size_t count;
81 size_t capacity;
82 };
83
84 static bool
watchpoint_map_is_initialized(struct watchpoint_map * map)85 watchpoint_map_is_initialized(struct watchpoint_map* map)
86 {
87 return map->capacity != 0 && map->buckets != NULL;
88 }
89
90 void
watchpoint_map_init(struct watchpoint_map * map)91 watchpoint_map_init(struct watchpoint_map* map)
92 {
93 map->count = 0;
94 map->capacity = WATCHPOINT_MAP_INITIAL_CAPACITY;
95 map->buckets = calloc(map->capacity, sizeof(struct watchpoint_node*));
96 }
97
98 // Currently the watchpoint map has a global scope, so this function is unnecessary
99 // We keep it here for future usage
100 /*
101 * void
102 * watchpoint_map_destroy(struct watchpoint_map* map)
103 * {
104 * if (map->buckets) {
105 * for (size_t i = 0; i < map->capacity; ++i) {
106 * struct watchpoint_node* current = map->buckets[i];
107 * while (current != NULL) {
108 * struct watchpoint_node* to_free = current;
109 * current = current->next;
110 * free(to_free);
111 * }
112 * }
113 * free(map->buckets);
114 * }
115 * map->buckets = NULL;
116 * map->count = 0;
117 * map->capacity = 0;
118 * }
119 */
120
121 static bool
watchpoint_map_resize(struct watchpoint_map * map,size_t new_capacity)122 watchpoint_map_resize(struct watchpoint_map* map, size_t new_capacity)
123 {
124 if (new_capacity < map->count) {
125 return false;
126 }
127 struct watchpoint_node** new_buckets = calloc(new_capacity, sizeof(struct watchpoint_node*));
128 if (new_buckets == NULL) {
129 return false;
130 }
131
132 /* rehash all existing entries into the new buckets */
133 for (size_t i = 0; i < map->capacity; ++i) {
134 struct watchpoint_node* current = map->buckets[i];
135 while (current != NULL) {
136 struct watchpoint_node* node_to_move = current;
137 current = current->next;
138
139 size_t new_index = hash_address(node_to_move->entry.address, new_capacity);
140 node_to_move->next = new_buckets[new_index];
141 new_buckets[new_index] = node_to_move;
142 }
143 }
144
145 free(map->buckets);
146 map->buckets = new_buckets;
147 map->capacity = new_capacity;
148 return true;
149 }
150
151 void
watchpoint_map_add(struct watchpoint_map * map,struct watchpoint_entry entry)152 watchpoint_map_add(struct watchpoint_map* map, struct watchpoint_entry entry)
153 {
154 if (((double)map->count / map->capacity) >= WATCHPOINT_MAP_MAX_LOAD_FACTOR) {
155 watchpoint_map_resize(map, map->capacity * 2);
156 }
157
158 struct watchpoint_node* new_node = malloc(sizeof(struct watchpoint_node));
159 new_node->entry = entry;
160 new_node->next = NULL;
161
162 size_t index = hash_address(entry.address, map->capacity);
163 new_node->next = map->buckets[index];
164 map->buckets[index] = new_node;
165 map->count++;
166 }
167
168 bool
watchpoint_map_find_remove(struct watchpoint_map * map,uintptr_t address,fiber_t fiber,struct watchpoint_entry * removed_entry)169 watchpoint_map_find_remove(struct watchpoint_map* map, uintptr_t address, fiber_t fiber, struct watchpoint_entry* removed_entry)
170 {
171 size_t index = hash_address(address, map->capacity);
172
173 struct watchpoint_node* current = map->buckets[index];
174 struct watchpoint_node* prev = NULL;
175
176 while (current != NULL) {
177 if (current->entry.address == address && current->entry.fiber_id == fiber->id) {
178 if (removed_entry) {
179 memcpy(removed_entry, ¤t->entry, sizeof(struct watchpoint_entry));
180 }
181
182 if (prev == NULL) {
183 map->buckets[index] = current->next;
184 } else {
185 prev->next = current->next;
186 }
187 free(current);
188 map->count--;
189 return true;
190 }
191 prev = current;
192 current = current->next;
193 }
194
195 return false;
196 }
197
198 static void
report_race(uintptr_t current_addr,size_t current_size,enum access_type current_type,struct watchpoint_entry * conflicting_entry)199 report_race(uintptr_t current_addr, size_t current_size, enum access_type current_type, struct watchpoint_entry* conflicting_entry)
200 {
201 raw_printf("==== Warning: Fibers data race checker violation ====\n");
202 raw_printf("%s of size %d at %p by fiber %d\n", current_type == ACCESS_TYPE_STORE ? "Write" : "Read", current_size, (void*)current_addr, fibers_current->id);
203 if (fibers_debug) {
204 print_current_backtrace();
205 }
206
207 raw_printf("Previous %s of size %d at %p by fiber %d\n", conflicting_entry->access_type == ACCESS_TYPE_STORE ? "write" : "read", conflicting_entry->size, (void*)conflicting_entry->address, conflicting_entry->fiber_id);
208 if (conflicting_entry->has_backtrace) {
209 print_collected_backtrace(conflicting_entry->backtrace);
210 } else {
211 struct backtrace_array bt = { .buffer = {(void*)conflicting_entry->pc}, .nptrs = 1 };
212 print_collected_backtrace(&bt);
213 }
214
215 if (fibers_abort_on_error) {
216 abort();
217 }
218 }
219
220 static inline void
report_missing_race(uintptr_t current_addr,size_t current_size,enum access_type current_type)221 report_missing_race(uintptr_t current_addr, size_t current_size, enum access_type current_type)
222 {
223 raw_printf("==== Warning: Fibers data race checker violation ====\n");
224 raw_printf("%s of size %d at %p by fiber %d\n", current_type == ACCESS_TYPE_STORE ? "Write" : "Read", current_size, (void*)current_addr, fibers_current->id);
225 if (fibers_debug) {
226 print_current_backtrace();
227 }
228
229 raw_printf("Watchpoint was unexpectedly missing or modified by another fiber during yield.\n");
230 if (fibers_abort_on_error) {
231 abort();
232 }
233 }
234
235 void
report_value_race(uintptr_t current_addr,size_t current_size,enum access_type current_type)236 report_value_race(uintptr_t current_addr, size_t current_size, enum access_type current_type)
237 {
238 raw_printf("==== Warning: Fibers data race checker violation ====\n");
239 raw_printf("%s of size %d at %p by fiber %d\n", current_type == ACCESS_TYPE_STORE ? "Write" : "Read", current_size, (void*)current_addr, fibers_current->id);
240 if (fibers_debug) {
241 print_current_backtrace();
242 }
243
244 raw_printf("Value was modified in between the operation by another fiber during yield.\n");
245 if (fibers_abort_on_error) {
246 abort();
247 }
248 }
249
250 static inline bool
ranges_overlap(uintptr_t addr1,size_t size1,uintptr_t addr2,size_t size2)251 ranges_overlap(uintptr_t addr1, size_t size1, uintptr_t addr2, size_t size2)
252 {
253 if (size1 == 0 || size2 == 0) {
254 return false;
255 }
256 uintptr_t end1 = addr1 + size1;
257 uintptr_t end2 = addr2 + size2;
258 if (end1 < addr1 || end2 < addr2) {
259 return false;
260 }
261 return addr1 < end2 && addr2 < end1;
262 }
263
264 /*
265 * Check for conflicting memory accesses to the same region happening in another fiber.
266 * Concurrent loads are allowed, loads in-between a store are not.
267 */
268 static inline bool
check_for_conflicts(struct watchpoint_map * map,uintptr_t current_addr,size_t current_size,enum access_type current_type)269 check_for_conflicts(struct watchpoint_map* map, uintptr_t current_addr, size_t current_size, enum access_type current_type)
270 {
271 /* range: [current_addr - 16, current_addr + 16] (33 addresses) */
272 uintptr_t start_check_addr = (current_addr > 16) ? (current_addr - 16) : 0;
273 uintptr_t end_check_addr = current_addr + 16;
274
275 for (uintptr_t check_addr = start_check_addr;; ++check_addr) {
276 size_t index = hash_address(check_addr, map->capacity);
277 struct watchpoint_node* node = map->buckets[index];
278
279 while (node != NULL) {
280 struct watchpoint_entry* existing_entry = &node->entry;
281
282 if (ranges_overlap(current_addr, current_size, existing_entry->address, existing_entry->size)) {
283 if (current_type == ACCESS_TYPE_STORE) {
284 /* any access in between a store is a race */
285 report_race(current_addr, current_size, current_type, existing_entry);
286 return true;
287 } else if (existing_entry->access_type == ACCESS_TYPE_STORE) {
288 /* allow other loads in between a load, but not a store */
289 report_race(current_addr, current_size, current_type, existing_entry);
290 return true;
291 }
292 }
293 node = node->next;
294 }
295 if (check_addr == end_check_addr) {
296 break;
297 }
298 }
299
300 return false;
301 }
302
303 static struct watchpoint_map checker_watchpoints;
304
305 bool
check_and_set_watchpoint(void * pc,uintptr_t address,size_t size,enum access_type access_type)306 check_and_set_watchpoint(void *pc, uintptr_t address, size_t size, enum access_type access_type)
307 {
308 if (!watchpoint_map_is_initialized(&checker_watchpoints)) {
309 watchpoint_map_init(&checker_watchpoints);
310 }
311
312 if (check_for_conflicts(&checker_watchpoints, address, size, access_type)) {
313 return false;
314 } else {
315 struct watchpoint_entry new_watchpoint;
316 watchpoint_entry_init(&new_watchpoint, address, access_type, size, fibers_current);
317 if (fibers_debug) {
318 new_watchpoint.backtrace = collect_current_backtrace();
319 new_watchpoint.has_backtrace = 1;
320 } else {
321 new_watchpoint.pc = pc;
322 }
323
324 watchpoint_map_add(&checker_watchpoints, new_watchpoint);
325 return true;
326 }
327 }
328
329 void
post_check_and_remove_watchpoint(uintptr_t address,size_t size,enum access_type access_type)330 post_check_and_remove_watchpoint(uintptr_t address, size_t size, enum access_type access_type)
331 {
332 struct watchpoint_entry removed_entry;
333 if (watchpoint_map_find_remove(&checker_watchpoints, address, fibers_current, &removed_entry)) {
334 FIBERS_ASSERT(removed_entry.address == address, "race? internal error");
335 FIBERS_ASSERT(removed_entry.access_type == access_type, "race? internal error");
336 FIBERS_ASSERT(removed_entry.size == size, "race? internal error");
337 FIBERS_ASSERT(removed_entry.fiber_id == fibers_current->id, "race? internal error");
338
339 if (removed_entry.has_backtrace) {
340 free(removed_entry.backtrace);
341 }
342 } else {
343 report_missing_race(address, size, access_type);
344 }
345 }
346