1#!/usr/bin/python3 2 3# format_vm_parameter_validation.py 4# Pretty-print the output of tests/vm/vm_parameter_validation.c 5# 6# usage: 7# vm_parameter_validation | format_vm_parameter_validation.py 8 9import re 10import sys 11import copy 12import itertools 13 14# magic return values used for in-band signalling 15# fixme duplicated in vm_parameter_validation.c 16# fixme also duplicated in other_return_values below 17RESULT_SUCCESS = 0 18RESULT_BUSTED = -99 19RESULT_IGNORED = -98 20RESULT_ZEROSIZE = -97 21RESULT_PANIC = -96 22RESULT_GUARD = -95 23RESULT_MISMATCH = -94 24RESULT_OUT_PARAM_BAD = -93 25RESULT_MACH_SEND_INVALID_MEMORY = 0x1000000c 26 27# output formatting 28format_result = { 29 RESULT_SUCCESS : ' .', 30 RESULT_BUSTED : ' **', 31 RESULT_MISMATCH : ' ##', 32 RESULT_IGNORED : ' ', 33 RESULT_ZEROSIZE : ' o', 34 RESULT_PANIC : ' pp', 35 RESULT_GUARD : ' gg', 36 RESULT_OUT_PARAM_BAD: ' ot', 37 RESULT_MACH_SEND_INVALID_MEMORY : ' mi', 38} 39 40format_default = '%3d' 41format_col_width = 3 42format_empty_col = format_col_width * ' ' 43format_indent_width = 4 44format_indent = format_indent_width * ' ' 45 46 47# record the result of one trial: 48# ret: the return value from the tested function 49# parameters: array of the input parameter names for that trial 50# (for example ["start PGSZ-2", "size -1"]) 51class Result: 52 def __init__(self, new_ret, new_parameters): 53 self.ret = new_ret 54 self.parameters = new_parameters 55 def __repr__(self): 56 return str(self.ret) + " = " + str(self.parameters) 57 58# record the results of all trials in one test 59# testname: the name of the test (including the function being tested) 60# config: a string describing OS, CPU, etc 61# compat: code for error compatibility 62# results: an array of Result, one per trial 63class Test: 64 def __init__(self, new_name, new_config, new_compat, new_results = []): 65 self.testname = new_name 66 self.config = new_config 67 self.compat = new_compat 68 self.results = new_results 69 70# print column labels under some output 71# example output given indent=2 col_width=4 labels=[foo,bar,baz,qux]: 72# | | | | 73# | | | qux 74# | | baz 75# | bar 76# foo 77def print_column_labels(labels, indent_width, col_width): 78 indent = indent_width * ' ' 79 empty_column = '|' + (col_width-1) * ' ' 80 81 unprinted = len(labels) 82 print(indent + unprinted*empty_column) 83 84 for label in reversed(labels): 85 unprinted -= 1 86 print(indent + unprinted*empty_column + label) 87 88# pretty-print one function return code 89def print_one_result(ret): 90 if ret in format_result: 91 print(format_result[ret], end='') 92 else: 93 print(format_default % (ret), end='') 94 95# choose the appropriate error code table for a test 96# (either errno_return_values or kern_return_values) 97def error_code_values_for_test(test): 98 errno_fns = ['mprotect', 'msync', 'minherit', 'mincore', 'mlock', 'munlock', 99 'mmap', 'munmap', 'mremap_encrypted', 'vslock', 'vsunlock', 100 'madvise', 'useracc'] 101 for fn in errno_fns: 102 if test.testname.startswith(fn): 103 return errno_return_values 104 else: 105 return kern_return_values 106 107# print a helpful description of the return values seen in results 108# fixme these won't include RESULT_MISMATCH 109def print_legend(test): 110 # find all error codes represented in the results 111 codes = {} 112 for result in test.results: 113 codes[result.ret] = True 114 115 known_return_values = error_code_values_for_test(test) 116 117 # print the names of the detected error codes 118 output = [] 119 for code in sorted(codes.keys()): 120 if code in known_return_values: 121 output.append(str(code) + ': ' + known_return_values[code]) 122 elif code in other_return_values: 123 output.append(other_return_values[code]) 124 elif code != 0: 125 output.append(str(code) + ': ????') 126 127 print(format_indent + '(' + ', '.join(output) + ')') 128 129# display names for error codes returned in errno 130errno_return_values = { 131 1: 'EPERM', 132 9: 'EBADF', 133 12: 'ENOMEM', 134 13: 'EACCES', 135 14: 'EFAULT', 136 22: 'EINVAL', 137} 138 139# display names for error codes returned in kern_return_t 140kern_return_values = { 141 1: 'KERN_INVALID_ADDRESS', 142 2: 'KERN_PROTECTION_FAILURE', 143 3: 'KERN_NO_SPACE', 144 4: 'KERN_INVALID_ARGUMENT', 145 5: 'KERN_FAILURE', 146 6: 'KERN_RESOURCE_SHORTAGE', 147 7: 'KERN_NOT_RECEIVER', 148 8: 'KERN_NO_ACCESS', 149 9: 'KERN_MEMORY_FAILURE', 150 10: 'KERN_MEMORY_ERROR', 151 11: 'KERN_ALREADY_IN_SET', 152 12: 'KERN_NOT_IN_SET', 153 13: 'KERN_NAME_EXISTS', 154 14: 'KERN_ABORTED', 155 15: 'KERN_INVALID_NAME', 156 16: 'KERN_INVALID_TASK', 157 17: 'KERN_INVALID_RIGHT', 158 18: 'KERN_INVALID_VALUE', 159 19: 'KERN_UREFS_OVERFLOW', 160 20: 'KERN_INVALID_CAPABILITY', 161 21: 'KERN_RIGHT_EXISTS', 162 22: 'KERN_INVALID_HOST', 163 23: 'KERN_MEMORY_PRESENT', 164 24: 'KERN_MEMORY_DATA_MOVED', 165 25: 'KERN_MEMORY_RESTART_COPY', 166 26: 'KERN_INVALID_PROCESSOR_SET', 167 27: 'KERN_POLICY_LIMIT', 168 28: 'KERN_INVALID_POLICY', 169 29: 'KERN_INVALID_OBJECT', 170 30: 'KERN_ALREADY_WAITING', 171 31: 'KERN_DEFAULT_SET', 172 32: 'KERN_EXCEPTION_PROTECTED', 173 33: 'KERN_INVALID_LEDGER', 174 34: 'KERN_INVALID_MEMORY_CONTROL', 175 35: 'KERN_INVALID_SECURITY', 176 36: 'KERN_NOT_DEPRESSED', 177 37: 'KERN_TERMINATED', 178 38: 'KERN_LOCK_SET_DESTROYED', 179 39: 'KERN_LOCK_UNSTABLE', 180 40: 'KERN_LOCK_OWNED', 181 41: 'KERN_LOCK_OWNED_SELF', 182 42: 'KERN_SEMAPHORE_DESTROYED', 183 43: 'KERN_RPC_SERVER_TERMINATED', 184 44: 'KERN_RPC_TERMINATE_ORPHAN', 185 45: 'KERN_RPC_CONTINUE_ORPHAN', 186 46: 'KERN_NOT_SUPPORTED', 187 47: 'KERN_NODE_DOWN', 188 48: 'KERN_NOT_WAITING', 189 49: 'KERN_OPERATION_TIMED_OUT', 190 50: 'KERN_CODESIGN_ERROR', 191 51: 'KERN_POLICY_STATIC', 192 52: 'KERN_INSUFFICIENT_BUFFER_SIZE', 193 53: 'KERN_DENIED', 194 54: 'KERN_MISSING_KC', 195 55: 'KERN_INVALID_KC', 196 56: 'KERN_NOT_FOUND', 197 100: 'KERN_RETURN_MAX', 198 -304: 'MIG_BAD_ARGUMENTS (server type check failure)', 199 0x1000000c : 'MACH_SEND_INVALID_MEMORY', 200} 201 202# display names for the special return values used by the test machinery 203other_return_values = { 204 RESULT_BUSTED: format_result[RESULT_BUSTED].lstrip() + ': trial broken, not performed', 205 RESULT_IGNORED: '<empty> trial ignored, not performed', 206 RESULT_ZEROSIZE: format_result[RESULT_ZEROSIZE].lstrip() + ': size == 0', 207 RESULT_PANIC: format_result[RESULT_PANIC].lstrip() + ': trial is believed to panic, not performed', 208 RESULT_GUARD: format_result[RESULT_GUARD].lstrip() + ': trial is believed to throw EXC_GUARD, not performed', 209 RESULT_OUT_PARAM_BAD: format_result[RESULT_OUT_PARAM_BAD].lstrip() + ': trial set incorrect values to out parameters', 210} 211 212# inside line, replace 'return 123' with 'return ERR_CODE_NAME' 213def replace_error_code_return(test, line): 214 known_return_values = error_code_values_for_test(test) 215 for code, name in known_return_values.items(): 216 line = line.replace('return ' + str(code) + ';', 'return ' + name + ';') 217 return line 218 219def dimensions(results): 220 if len(results) == 0: 221 return 0 222 return len(results[0].parameters) 223 224# given one k-dimensional results 225# return a list of k counts that is the size of each dimension 226def count_each_dimension(results): 227 if len(results) == 0: 228 return [] 229 first = results[0].parameters 230 k = dimensions(results) 231 counts = [] 232 step = 1 233 for dim in range(k-1, -1, -1): 234 count = round(len(results) / step) 235 for i in range(0, len(results), step): 236 cur = results[i].parameters 237 if i != 0 and cur[dim] == first[dim]: 238 count = round(i / step) 239 break; 240 step *= count 241 counts.append(count) 242 243 counts.reverse() 244 return counts; 245 246# Reduce one k-dimensional results to many (k-1) dimensional results 247# Yields a sequence of [results, name] pairs 248# where results has k-1 dimensions 249# and name is the parameter name from the removed dimension 250def iterate_dimension(results, dim = 0): 251 if len(results) == 0: 252 return 253 254 k = dimensions(results) 255 dim_counts = count_each_dimension(results) 256 257 inner_count = 1 258 for d in range(dim+1, k): 259 inner_count *= dim_counts[d] 260 261 outer_step = len(results) 262 for d in range(0, dim): 263 outer_step = int(outer_step / dim_counts[d]) 264 265 for r in range(dim_counts[dim]): 266 start = r * inner_count 267 name = results[start].parameters[dim] 268 new_results = [] 269 for i in range(start, len(results), outer_step): 270 for j in range(inner_count): 271 new_result = copy.deepcopy(results[i+j]) 272 del new_result.parameters[dim] 273 new_results.append(new_result) 274 yield [new_results, name] 275 276# Print the results of a test that has two parameters (for example a test of start/size) 277# If overrides!=None, use any non-SUCCESS return values from override in place of the other results. 278def print_results_2D(results, overrides=None): 279 # complain if results and override have different dimensions 280 if overrides: 281 if len(overrides) != len(results): 282 print("WARNING: override results have a different height; overrides ignored") 283 for i, result in enumerate(results): 284 if len(overrides[i].parameters) != len(result.parameters): 285 print("WARNING: override results have a different width; overrides ignored") 286 287 columns = [] 288 prev_row_label = '' 289 first_row_label = '' 290 for i, result in enumerate(results): 291 if overrides: override = overrides[i].ret 292 293 if first_row_label == '': 294 # record first row's name so we can use it to find columns 295 # (assumes every row has the same column labels) 296 first_row_label = result.parameters[0] 297 298 if result.parameters[0] == first_row_label: 299 # record column names in the first row 300 columns.append(result.parameters[1]) 301 302 if result.parameters[0] != prev_row_label: 303 # new row 304 if prev_row_label != '': print(format_indent + prev_row_label) 305 print(format_indent, end='') 306 prev_row_label = result.parameters[0] 307 308 if overrides and override != RESULT_SUCCESS: 309 print_one_result(override) 310 else: 311 print_one_result(result.ret) 312 313 if prev_row_label: print(format_indent + prev_row_label) 314 print_column_labels(columns, format_indent_width + format_col_width - 1, format_col_width) 315 316def print_results_2D_try_condensed(results): 317 if 0 == len(results): 318 return 319 singleton = results[0].ret 320 if any([result.ret != singleton for result in results]): 321 print_results_2D(results) 322 return 323 # will print as condensed 324 rows = set() 325 cols = set() 326 for result in results: 327 rows.add(result.parameters[0].split()[1]) 328 cols.add(result.parameters[1].split()[1]) 329 print_one_result(result.ret) 330 print(" for all pairs") 331 332def print_results_3D(results, testname): 333 # foreach parameter[1], print 2D table of parameter[0] and parameter[2] 334 for results2D, name in iterate_dimension(results, 1): 335 print(testname + ': ' + name) 336 print_results_2D(results2D) 337 338 # foreach parameter[0], print 2D table of parameter[1] and parameter[2] 339 # This is redundant but can be useful for human readers. 340 for results2D, name in iterate_dimension(results, 0): 341 print(testname + ': ' + name) 342 print_results_2D(results2D) 343 344def print_results_4D(results): 345 x, y, z = '', '', '' 346 # Make a map[{3rd_param, 4th_param, ...}] = {all options} 347 # For now, we print 2d tables of 1st, 2nd param for each possible combination of remaining values 348 349 map_of_results = {} 350 for _, result in enumerate(results): 351 k = tuple(result.parameters[2:]) 352 353 if k not in map_of_results: 354 map_of_results[k] = [result] 355 else: 356 map_of_results[k].append(result) 357 358 # prepare to iterate 359 prev_matrix = [] 360 iterable = [] 361 for k, result_list in map_of_results.items(): 362 one_2d_result = [] 363 matrix = [] 364 for result in result_list: 365 x = result.parameters[0] 366 y = result.parameters[1] 367 repl_result = Result(result.ret, (x, y)) 368 one_2d_result.append(repl_result) 369 matrix.append(result.ret) 370 if matrix == prev_matrix: 371 # if the return codes are the same everywhere, we will print successive tables only once 372 # note that this assumes that the sets of 2D labels are the same everywhere, and doesn't check that assumption 373 iterable[-1][0].append(k) 374 else: 375 iterable.append(([k], one_2d_result)) 376 prev_matrix = matrix 377 378 # print 379 for iter in iterable: 380 print(iter[0]) 381 print_results_2D_try_condensed(iter[1]) 382 383 384# Print the results of a test that has two parameters 385# (for example a test of addr only, or size only) 386# If overrides!=None, use any non-SUCCESS return values from override in place of the other results. 387def print_results_1D(results, overrides=None): 388 # complain if results and overrides have different dimensions 389 if overrides: 390 if len(overrides) != len(results): 391 print("WARNING: override results have a different height; overrides ignored") 392 for i, result in enumerate(results): 393 if len(overrides[i].parameters) != len(result.parameters): 394 print("WARNING: override results have a different width; overrides ignored") 395 396 for i, result in enumerate(results): 397 if overrides: override = overrides[i].ret 398 399 # indent, value, indent, label 400 print(format_indent, end='') 401 if overrides and override != RESULT_SUCCESS: 402 print_one_result(override) 403 else: 404 print_one_result(result.ret) 405 print(format_indent + result.parameters[0]) 406 407def print_results_nD(results, testname, overrides=None): 408 if (dimensions(results) == 1): 409 print_results_1D(results, overrides) 410 elif (dimensions(results) == 2): 411 print_results_2D(results, overrides) 412 elif dimensions(results) == 3: 413 print_results_3D(results, testname) 414 elif dimensions(results) == 4: 415 print_results_4D(results) 416 else: 417 print(format_indent + 'too many dimensions') 418 419 420def main(): 421 data = sys.stdin.readlines() 422 423 424 # remove any lines that don't start with "TESTNAME" or "TESTCONFIG" or "RESULT" 425 # (including darwintest output like "PASS" or "FAIL") 426 # and print them now 427 # Also verify that the counts of "TEST BEGIN" == "TEST END" 428 # (they will mismatch if a test suite crashed) 429 testbegincount = 0 430 testendcount = 0 431 testlines = [] 432 for line in data: 433 unmodified_line = line 434 # count TEST BEGIN and TEST END 435 if ('TEST BEGIN' in line): 436 testbegincount += 1 437 if ('TEST END' in line): 438 testendcount += 1 439 # remove any T_LOG() timestamp prefixes and KTEST prefixes 440 line = re.sub('^\s*\d+:\d+:\d+ ', '', line) 441 line = re.sub('^\[KTEST\]\s+[A-Z]+\s+\d+\s+(\d+\s+)?\S+\s+\d+\s+', '', line) 442 line = line.lstrip() 443 444 if (line.startswith('TESTNAME') or line.startswith('RESULT') 445 or line.startswith('TESTCONFIG') or line.startswith('TESTCOMPAT')): 446 testlines.append(line) # line is test output 447 elif line == '': 448 pass # ignore empty lines 449 else: 450 print(unmodified_line, end='') # line is other output 451 452 # parse test output into Test and Result objects 453 454 testnum = 0 455 def group_by_test(line): 456 nonlocal testnum 457 if line.startswith('TESTNAME '): 458 testnum = testnum+1 459 return testnum 460 461 tests = [] 462 for _, group in itertools.groupby(testlines, group_by_test): 463 lines = list(group) 464 465 name = lines.pop(0).removeprefix('TESTNAME ').rstrip() 466 config = lines.pop(0).removeprefix('TESTCONFIG ').rstrip() 467 compat = [] 468 results = [] 469 for line in lines: 470 if line.startswith('RESULT'): 471 components = line.removeprefix('RESULT ').rstrip().split(', ') 472 ret = int(components.pop(0)) 473 results.append(Result(ret, components)) 474 475 tests.append(Test(name, config, compat, results)) 476 477 print('found %d tests' % (len(tests))) 478 479 # stats to print at the end 480 test_count = len(tests) 481 all_configurations = set() 482 483 # print test output 484 for test in tests: 485 # print test name and test config on separate lines 486 # `diff` handles this better than putting both on the same line 487 print('test ' + test.testname) 488 489 print(format_indent + 'config ' + test.config) 490 all_configurations.add(test.config) 491 492 if len(test.results) == 0: 493 print(format_indent + 'no results') 494 else: 495 print_legend(test) 496 print_results_nD(test.results, test.testname) 497 498 499 print('end ' + test.testname) 500 501 print() 502 print(str(test_count) + ' test(s) performed') 503 504 if (testbegincount != testendcount): 505 print('### error: %d TEST BEGINs, %d TEST ENDs - some tests may have crashed' 506 % (testbegincount, testendcount)) 507 508 print(str(len(all_configurations)) + ' configuration(s) tested:') 509 for config in sorted(all_configurations): 510 print(format_indent + '[' + config + ']') 511 512 513main() 514