diff options
Diffstat (limited to 'core/testing')
| -rw-r--r-- | core/testing/events.odin | 48 | ||||
| -rw-r--r-- | core/testing/logging.odin | 71 | ||||
| -rw-r--r-- | core/testing/reporting.odin | 329 | ||||
| -rw-r--r-- | core/testing/runner.odin | 822 | ||||
| -rw-r--r-- | core/testing/runner_other.odin | 14 | ||||
| -rw-r--r-- | core/testing/runner_windows.odin | 235 | ||||
| -rw-r--r-- | core/testing/signal_handler.odin | 33 | ||||
| -rw-r--r-- | core/testing/signal_handler_libc.odin | 149 | ||||
| -rw-r--r-- | core/testing/signal_handler_other.odin | 19 | ||||
| -rw-r--r-- | core/testing/testing.odin | 69 |
10 files changed, 1478 insertions, 311 deletions
diff --git a/core/testing/events.odin b/core/testing/events.odin new file mode 100644 index 000000000..bab35aaad --- /dev/null +++ b/core/testing/events.odin @@ -0,0 +1,48 @@ +//+private +package testing + +import "base:runtime" +import "core:sync/chan" +import "core:time" + +Test_State :: enum { + Ready, + Running, + Successful, + Failed, +} + +Update_Channel :: chan.Chan(Channel_Event) +Update_Channel_Sender :: chan.Chan(Channel_Event, .Send) + +Task_Channel :: struct { + channel: Update_Channel, + test_index: int, +} + +Event_New_Test :: struct { + test_index: int, +} + +Event_State_Change :: struct { + new_state: Test_State, +} + +Event_Set_Fail_Timeout :: struct { + at_time: time.Time, + location: runtime.Source_Code_Location, +} + +Event_Log_Message :: struct { + level: runtime.Logger_Level, + text: string, + time: time.Time, + formatted_text: string, +} + +Channel_Event :: union { + Event_New_Test, + Event_State_Change, + Event_Set_Fail_Timeout, + Event_Log_Message, +} diff --git a/core/testing/logging.odin b/core/testing/logging.odin new file mode 100644 index 000000000..5bbbffeae --- /dev/null +++ b/core/testing/logging.odin @@ -0,0 +1,71 @@ +//+private +package testing + +import "base:runtime" +import "core:fmt" +import pkg_log "core:log" +import "core:strings" +import "core:sync/chan" +import "core:time" + +Default_Test_Logger_Opts :: runtime.Logger_Options { + .Level, + .Terminal_Color, + .Short_File_Path, + .Line, + .Procedure, + .Date, .Time, +} + +Log_Message :: struct { + level: runtime.Logger_Level, + text: string, + time: time.Time, + // `text` may be allocated differently, depending on where a log message + // originates from. + allocator: runtime.Allocator, +} + +test_logger_proc :: proc(logger_data: rawptr, level: runtime.Logger_Level, text: string, options: runtime.Logger_Options, location := #caller_location) { + t := cast(^T)logger_data + + if level >= .Error { + t.error_count += 1 + } + + cloned_text, clone_error := strings.clone(text, t._log_allocator) + assert(clone_error == nil, "Error while cloning string in test thread logger proc.") + + now := time.now() + + chan.send(t.channel, Event_Log_Message { + level = level, + text = cloned_text, + time = now, + formatted_text = format_log_text(level, text, options, location, now, t._log_allocator), + }) +} + +runner_logger_proc :: proc(logger_data: rawptr, level: runtime.Logger_Level, text: string, options: runtime.Logger_Options, location := #caller_location) { + log_messages := cast(^[dynamic]Log_Message)logger_data + + now := time.now() + + append(log_messages, Log_Message { + level = level, + text = format_log_text(level, text, options, location, now), + time = now, + allocator = context.allocator, + }) +} + +format_log_text :: proc(level: runtime.Logger_Level, text: string, options: runtime.Logger_Options, location: runtime.Source_Code_Location, at_time: time.Time, allocator := context.allocator) -> string{ + backing: [1024]byte + buf := strings.builder_from_bytes(backing[:]) + + pkg_log.do_level_header(options, &buf, level) + pkg_log.do_time_header(options, &buf, at_time) + pkg_log.do_location_header(options, &buf, location) + + return fmt.aprintf("%s%s", strings.to_string(buf), text, allocator = allocator) +} diff --git a/core/testing/reporting.odin b/core/testing/reporting.odin new file mode 100644 index 000000000..92e144ccc --- /dev/null +++ b/core/testing/reporting.odin @@ -0,0 +1,329 @@ +//+private +package testing + +import "base:runtime" +import "core:encoding/ansi" +import "core:fmt" +import "core:io" +import "core:mem" +import "core:path/filepath" +import "core:strings" + +// Definitions of colors for use in the test runner. +SGR_RESET :: ansi.CSI + ansi.RESET + ansi.SGR +SGR_READY :: ansi.CSI + ansi.FG_BRIGHT_BLACK + ansi.SGR +SGR_RUNNING :: ansi.CSI + ansi.FG_YELLOW + ansi.SGR +SGR_SUCCESS :: ansi.CSI + ansi.FG_GREEN + ansi.SGR +SGR_FAILED :: ansi.CSI + ansi.FG_RED + ansi.SGR + +MAX_PROGRESS_WIDTH :: 100 + +// More than enough bytes to cover long package names, long test names, dozens +// of ANSI codes, et cetera. +LINE_BUFFER_SIZE :: (MAX_PROGRESS_WIDTH * 8 + 224) * runtime.Byte + +PROGRESS_COLUMN_SPACING :: 2 + +Package_Run :: struct { + name: string, + header: string, + + frame_ready: bool, + + redraw_buffer: [LINE_BUFFER_SIZE]byte, + redraw_string: string, + + last_change_state: Test_State, + last_change_name: string, + + tests: []Internal_Test, + test_states: []Test_State, +} + +Report :: struct { + packages: []Package_Run, + packages_by_name: map[string]^Package_Run, + + pkg_column_len: int, + test_column_len: int, + progress_width: int, + + all_tests: []Internal_Test, + all_test_states: []Test_State, +} + +// Organize all tests by package and sort out test state data. +make_report :: proc(internal_tests: []Internal_Test) -> (report: Report, error: runtime.Allocator_Error) { + assert(len(internal_tests) > 0, "make_report called with no tests") + + packages: [dynamic]Package_Run + + report.all_tests = internal_tests + report.all_test_states = make([]Test_State, len(internal_tests)) or_return + + // First, figure out what belongs where. + #no_bounds_check cur_pkg := internal_tests[0].pkg + pkg_start: int + + // This loop assumes the tests are sorted by package already. + for it, index in internal_tests { + if cur_pkg != it.pkg { + #no_bounds_check { + append(&packages, Package_Run { + name = cur_pkg, + tests = report.all_tests[pkg_start:index], + test_states = report.all_test_states[pkg_start:index], + }) or_return + } + + when PROGRESS_WIDTH == 0 { + report.progress_width = max(report.progress_width, index - pkg_start) + } + + pkg_start = index + report.pkg_column_len = max(report.pkg_column_len, len(cur_pkg)) + cur_pkg = it.pkg + } + report.test_column_len = max(report.test_column_len, len(it.name)) + } + + // Handle the last (or only) package. + #no_bounds_check { + append(&packages, Package_Run { + name = cur_pkg, + header = cur_pkg, + tests = report.all_tests[pkg_start:], + test_states = report.all_test_states[pkg_start:], + }) or_return + } + when PROGRESS_WIDTH == 0 { + report.progress_width = max(report.progress_width, len(internal_tests) - pkg_start) + } else { + report.progress_width = PROGRESS_WIDTH + } + report.progress_width = min(report.progress_width, MAX_PROGRESS_WIDTH) + + report.pkg_column_len = PROGRESS_COLUMN_SPACING + max(report.pkg_column_len, len(cur_pkg)) + + shrink(&packages) or_return + + for &pkg in packages { + pkg.header = fmt.aprintf("%- *[1]s[", pkg.name, report.pkg_column_len) + assert(len(pkg.header) > 0, "Error allocating package header string.") + + // This is safe because the array is done resizing, and it has the same + // lifetime as the map. + report.packages_by_name[pkg.name] = &pkg + } + + // It's okay to discard the dynamic array's allocator information here, + // because its capacity has been shrunk to its length, it was allocated by + // the caller's context allocator, and it will be deallocated by the same. + // + // `delete_slice` is equivalent to `delete_dynamic_array` in this case. + report.packages = packages[:] + + return +} + +destroy_report :: proc(report: ^Report) { + for pkg in report.packages { + delete(pkg.header) + } + + delete(report.packages) + delete(report.packages_by_name) + delete(report.all_test_states) +} + +redraw_package :: proc(w: io.Writer, report: Report, pkg: ^Package_Run) { + if pkg.frame_ready { + io.write_string(w, pkg.redraw_string) + return + } + + // Write the output line here so we can cache it. + line_builder := strings.builder_from_bytes(pkg.redraw_buffer[:]) + line_writer := strings.to_writer(&line_builder) + + highest_run_index: int + failed_count: int + done_count: int + #no_bounds_check for i := 0; i < len(pkg.test_states); i += 1 { + switch pkg.test_states[i] { + case .Ready: + continue + case .Running: + highest_run_index = max(highest_run_index, i) + case .Successful: + done_count += 1 + case .Failed: + failed_count += 1 + done_count += 1 + } + } + + start := max(0, highest_run_index - (report.progress_width - 1)) + end := min(start + report.progress_width, len(pkg.test_states)) + + // This variable is to keep track of the last ANSI code emitted, in + // order to avoid repeating the same code over in a sequence. + // + // This should help reduce screen flicker. + last_state := Test_State(-1) + + io.write_string(line_writer, pkg.header) + + #no_bounds_check for state in pkg.test_states[start:end] { + switch state { + case .Ready: + if last_state != state { + io.write_string(line_writer, SGR_READY) + last_state = state + } + case .Running: + if last_state != state { + io.write_string(line_writer, SGR_RUNNING) + last_state = state + } + case .Successful: + if last_state != state { + io.write_string(line_writer, SGR_SUCCESS) + last_state = state + } + case .Failed: + if last_state != state { + io.write_string(line_writer, SGR_FAILED) + last_state = state + } + } + io.write_byte(line_writer, '|') + } + + for _ in 0 ..< report.progress_width - (end - start) { + io.write_byte(line_writer, ' ') + } + + io.write_string(line_writer, SGR_RESET + "] ") + + ticker: string + if done_count == len(pkg.test_states) { + ticker = "[package done]" + if failed_count > 0 { + ticker = fmt.tprintf("%s (" + SGR_FAILED + "%i" + SGR_RESET + " failed)", ticker, failed_count) + } + } else { + if len(pkg.last_change_name) == 0 { + #no_bounds_check pkg.last_change_name = pkg.tests[0].name + } + + switch pkg.last_change_state { + case .Ready: + ticker = fmt.tprintf(SGR_READY + "%s" + SGR_RESET, pkg.last_change_name) + case .Running: + ticker = fmt.tprintf(SGR_RUNNING + "%s" + SGR_RESET, pkg.last_change_name) + case .Failed: + ticker = fmt.tprintf(SGR_FAILED + "%s" + SGR_RESET, pkg.last_change_name) + case .Successful: + ticker = fmt.tprintf(SGR_SUCCESS + "%s" + SGR_RESET, pkg.last_change_name) + } + } + + if done_count == len(pkg.test_states) { + fmt.wprintfln(line_writer, " % 4i :: %s", + len(pkg.test_states), + ticker, + ) + } else { + fmt.wprintfln(line_writer, "% 4i/% 4i :: %s", + done_count, + len(pkg.test_states), + ticker, + ) + } + + pkg.redraw_string = strings.to_string(line_builder) + pkg.frame_ready = true + io.write_string(w, pkg.redraw_string) +} + +redraw_report :: proc(w: io.Writer, report: Report) { + // If we print a line longer than the user's terminal can handle, it may + // wrap around, shifting the progress report out of alignment. + // + // There are ways to get the current terminal width, and that would be the + // ideal way to handle this, but it would require system-specific code such + // as setting STDIN to be non-blocking in order to read the response from + // the ANSI DSR escape code, or reading environment variables. + // + // The DECAWM escape codes control whether or not the terminal will wrap + // long lines or overwrite the last visible character. + // This should be fine for now. + // + // Note that we only do this for the animated summary; log messages are + // still perfectly fine to wrap, as they're printed in their own batch, + // whereas the animation depends on each package being only on one line. + // + // Of course, if you resize your terminal while it's printing, things can + // still break... + fmt.wprint(w, ansi.CSI + ansi.DECAWM_OFF) + for &pkg in report.packages { + redraw_package(w, report, &pkg) + } + fmt.wprint(w, ansi.CSI + ansi.DECAWM_ON) +} + +needs_to_redraw :: proc(report: Report) -> bool { + for pkg in report.packages { + if !pkg.frame_ready { + return true + } + } + + return false +} + +draw_status_bar :: proc(w: io.Writer, threads_string: string, total_done_count, total_test_count: int) { + if total_done_count == total_test_count { + // All tests are done; print a blank line to maintain the same height + // of the progress report. + fmt.wprintln(w) + } else { + fmt.wprintfln(w, + "%s % 4i/% 4i :: total", + threads_string, + total_done_count, + total_test_count) + } +} + +write_memory_report :: proc(w: io.Writer, tracker: ^mem.Tracking_Allocator, pkg, name: string) { + fmt.wprintf(w, + "<% 10M/% 10M> <% 10M> (% 5i/% 5i) :: %s.%s", + tracker.current_memory_allocated, + tracker.total_memory_allocated, + tracker.peak_memory_allocated, + tracker.total_free_count, + tracker.total_allocation_count, + pkg, + name) + + for ptr, entry in tracker.allocation_map { + fmt.wprintf(w, + "\n +++ leak % 10M @ %p [%s:%i:%s()]", + entry.size, + ptr, + filepath.base(entry.location.file_path), + entry.location.line, + entry.location.procedure) + } + + for entry in tracker.bad_free_array { + fmt.wprintf(w, + "\n +++ bad free @ %p [%s:%i:%s()]", + entry.memory, + filepath.base(entry.location.file_path), + entry.location.line, + entry.location.procedure) + } +} diff --git a/core/testing/runner.odin b/core/testing/runner.odin index 0039f1939..328186c35 100644 --- a/core/testing/runner.odin +++ b/core/testing/runner.odin @@ -1,73 +1,823 @@ //+private package testing +import "base:intrinsics" +import "base:runtime" +import "core:bytes" +import "core:encoding/ansi" +@require import "core:encoding/base64" +import "core:fmt" import "core:io" +@require import pkg_log "core:log" +import "core:mem" import "core:os" import "core:slice" +@require import "core:strings" +import "core:sync/chan" +import "core:thread" +import "core:time" -reset_t :: proc(t: ^T) { - clear(&t.cleanups) - t.error_count = 0 +// Specify how many threads to use when running tests. +TEST_THREADS : int : #config(ODIN_TEST_THREADS, 0) +// Track the memory used by each test. +TRACKING_MEMORY : bool : #config(ODIN_TEST_TRACK_MEMORY, true) +// Always report how much memory is used, even when there are no leaks or bad frees. +ALWAYS_REPORT_MEMORY : bool : #config(ODIN_TEST_ALWAYS_REPORT_MEMORY, false) +// Specify how much memory each thread allocator starts with. +PER_THREAD_MEMORY : int : #config(ODIN_TEST_THREAD_MEMORY, mem.ROLLBACK_STACK_DEFAULT_BLOCK_SIZE) +// Select a specific set of tests to run by name. +// Each test is separated by a comma and may optionally include the package name. +// This may be useful when running tests on multiple packages with `-all-packages`. +// The format is: `package.test_name,test_name_only,...` +TEST_NAMES : string : #config(ODIN_TEST_NAMES, "") +// Show the fancy animated progress report. +FANCY_OUTPUT : bool : #config(ODIN_TEST_FANCY, true) +// Copy failed tests to the clipboard when done. +USE_CLIPBOARD : bool : #config(ODIN_TEST_CLIPBOARD, false) +// How many test results to show at a time per package. +PROGRESS_WIDTH : int : #config(ODIN_TEST_PROGRESS_WIDTH, 24) +// This is the random seed that will be sent to each test. +// If it is unspecified, it will be set to the system cycle counter at startup. +SHARED_RANDOM_SEED : u64 : #config(ODIN_TEST_RANDOM_SEED, 0) +// Set the lowest log level for this test run. +LOG_LEVEL : string : #config(ODIN_TEST_LOG_LEVEL, "info") + + +get_log_level :: #force_inline proc() -> runtime.Logger_Level { + when ODIN_DEBUG { + // Always use .Debug in `-debug` mode. + return .Debug + } else { + when LOG_LEVEL == "debug" { return .Debug } + else when LOG_LEVEL == "info" { return .Info } + else when LOG_LEVEL == "warning" { return .Warning } + else when LOG_LEVEL == "error" { return .Error } + else when LOG_LEVEL == "fatal" { return .Fatal } + else { + #panic("Unknown `ODIN_TEST_LOG_LEVEL`: \"" + LOG_LEVEL + "\", possible levels are: \"debug\", \"info\", \"warning\", \"error\", or \"fatal\".") + } + } } + end_t :: proc(t: ^T) { for i := len(t.cleanups)-1; i >= 0; i -= 1 { - c := t.cleanups[i] + #no_bounds_check c := t.cleanups[i] + context = c.ctx c.procedure(c.user_data) } + + delete(t.cleanups) + t.cleanups = {} +} + +Task_Data :: struct { + it: Internal_Test, + t: T, + allocator_index: int, +} + +Task_Timeout :: struct { + test_index: int, + at_time: time.Time, + location: runtime.Source_Code_Location, +} + +run_test_task :: proc(task: thread.Task) { + data := cast(^Task_Data)(task.data) + + setup_task_signal_handler(task.user_index) + + chan.send(data.t.channel, Event_New_Test { + test_index = task.user_index, + }) + + chan.send(data.t.channel, Event_State_Change { + new_state = .Running, + }) + + context.assertion_failure_proc = test_assertion_failure_proc + + context.logger = { + procedure = test_logger_proc, + data = &data.t, + lowest_level = get_log_level(), + options = Default_Test_Logger_Opts, + } + + free_all(context.temp_allocator) + + data.it.p(&data.t) + + end_t(&data.t) + + new_state : Test_State = .Failed if failed(&data.t) else .Successful + + chan.send(data.t.channel, Event_State_Change { + new_state = new_state, + }) } runner :: proc(internal_tests: []Internal_Test) -> bool { - stream := os.stream_from_handle(os.stdout) - w := io.to_writer(stream) + BATCH_BUFFER_SIZE :: 32 * mem.Kilobyte + POOL_BLOCK_SIZE :: 16 * mem.Kilobyte + CLIPBOARD_BUFFER_SIZE :: 16 * mem.Kilobyte + + BUFFERED_EVENTS_PER_CHANNEL :: 16 + RESERVED_LOG_MESSAGES :: 64 + RESERVED_TEST_FAILURES :: 64 + + ERROR_STRING_TIMEOUT : string : "Test timed out." + ERROR_STRING_UNKNOWN : string : "Test failed for unknown reasons." + OSC_WINDOW_TITLE : string : ansi.OSC + ansi.WINDOW_TITLE + ";Odin test runner (%i/%i)" + ansi.ST + + safe_delete_string :: proc(s: string, allocator := context.allocator) { + // Guard against bad frees on static strings. + switch raw_data(s) { + case raw_data(ERROR_STRING_TIMEOUT), raw_data(ERROR_STRING_UNKNOWN): + return + case: + delete(s, allocator) + } + } + + stdout := io.to_writer(os.stream_from_handle(os.stdout)) + stderr := io.to_writer(os.stream_from_handle(os.stderr)) + + // -- Prepare test data. + + alloc_error: mem.Allocator_Error + + when TEST_NAMES != "" { + select_internal_tests: [dynamic]Internal_Test + defer delete(select_internal_tests) + + { + index_list := TEST_NAMES + for selector in strings.split_iterator(&index_list, ",") { + // Temp allocator is fine since we just need to identify which test it's referring to. + split_selector := strings.split(selector, ".", context.temp_allocator) - t := &T{} - t.w = w - reserve(&t.cleanups, 1024) - defer delete(t.cleanups) + found := false + switch len(split_selector) { + case 1: + // Only the test name? + #no_bounds_check name := split_selector[0] + find_test_by_name: for it in internal_tests { + if it.name == name { + found = true + _, alloc_error = append(&select_internal_tests, it) + fmt.assertf(alloc_error == nil, "Error appending to select internal tests: %v", alloc_error) + break find_test_by_name + } + } + case 2: + #no_bounds_check pkg := split_selector[0] + #no_bounds_check name := split_selector[1] + find_test_by_pkg_and_name: for it in internal_tests { + if it.pkg == pkg && it.name == name { + found = true + _, alloc_error = append(&select_internal_tests, it) + fmt.assertf(alloc_error == nil, "Error appending to select internal tests: %v", alloc_error) + break find_test_by_pkg_and_name + } + } + } + if !found { + fmt.wprintfln(stderr, "No test found for the name: %q", selector) + } + } + } + + // Intentional shadow with user-specified tests. + internal_tests := select_internal_tests[:] + } + total_failure_count := 0 total_success_count := 0 - total_test_count := len(internal_tests) + total_done_count := 0 + total_test_count := len(internal_tests) + + when !FANCY_OUTPUT { + // This is strictly for updating the window title when the progress + // report is disabled. We're otherwise able to depend on the call to + // `needs_to_redraw`. + last_done_count := -1 + } + + if total_test_count == 0 { + // Exit early. + fmt.wprintln(stdout, "No tests to run.") + return true + } + + for it in internal_tests { + // NOTE(Feoramund): The old test runner skipped over tests with nil + // procedures, but I couldn't find any case where they occurred. + // This assert stands to prevent any oversight on my part. + fmt.assertf(it.p != nil, "Test %s.%s has <nil> procedure.", it.pkg, it.name) + } slice.sort_by(internal_tests, proc(a, b: Internal_Test) -> bool { - if a.pkg < b.pkg { - return true + if a.pkg == b.pkg { + return a.name < b.name + } else { + return a.pkg < b.pkg } - return a.name < b.name }) - prev_pkg := "" + // -- Set thread count. - for it in internal_tests { - if it.p == nil { - total_test_count -= 1 - continue + when TEST_THREADS == 0 { + thread_count := os.processor_core_count() + } else { + thread_count := max(1, TEST_THREADS) + } + + thread_count = min(thread_count, total_test_count) + + // -- Allocate. + + pool_stack: mem.Rollback_Stack + alloc_error = mem.rollback_stack_init(&pool_stack, POOL_BLOCK_SIZE) + fmt.assertf(alloc_error == nil, "Error allocating memory for thread pool: %v", alloc_error) + defer mem.rollback_stack_destroy(&pool_stack) + + pool: thread.Pool + thread.pool_init(&pool, mem.rollback_stack_allocator(&pool_stack), thread_count) + defer thread.pool_destroy(&pool) + + task_channels: []Task_Channel = --- + task_channels, alloc_error = make([]Task_Channel, thread_count) + fmt.assertf(alloc_error == nil, "Error allocating memory for update channels: %v", alloc_error) + defer delete(task_channels) + + for &task_channel, index in task_channels { + task_channel.channel, alloc_error = chan.create_buffered(Update_Channel, BUFFERED_EVENTS_PER_CHANNEL, context.allocator) + fmt.assertf(alloc_error == nil, "Error allocating memory for update channel #%i: %v", index, alloc_error) + } + defer for &task_channel in task_channels { + chan.destroy(&task_channel.channel) + } + + // This buffer is used to batch writes to STDOUT or STDERR, to help reduce + // screen flickering. + batch_buffer: bytes.Buffer + bytes.buffer_init_allocator(&batch_buffer, 0, BATCH_BUFFER_SIZE) + batch_writer := io.to_writer(bytes.buffer_to_stream(&batch_buffer)) + defer bytes.buffer_destroy(&batch_buffer) + + report: Report = --- + report, alloc_error = make_report(internal_tests) + fmt.assertf(alloc_error == nil, "Error allocating memory for test report: %v", alloc_error) + defer destroy_report(&report) + + when FANCY_OUTPUT { + // We cannot make use of the ANSI save/restore cursor codes, because they + // work by absolute screen coordinates. This will cause unnecessary + // scrollback if we print at the bottom of someone's terminal. + ansi_redraw_string := fmt.aprintf( + // ANSI for "go up N lines then erase the screen from the cursor forward." + ansi.CSI + "%i" + ansi.CPL + ansi.CSI + ansi.ED + + // We'll combine this with the window title format string, since it + // can be printed at the same time. + "%s", + // 1 extra line for the status bar. + 1 + len(report.packages), OSC_WINDOW_TITLE) + assert(len(ansi_redraw_string) > 0, "Error allocating ANSI redraw string.") + defer delete(ansi_redraw_string) + + thread_count_status_string: string = --- + { + PADDING :: PROGRESS_COLUMN_SPACING + PROGRESS_WIDTH + + unpadded := fmt.tprintf("%i thread%s", thread_count, "" if thread_count == 1 else "s") + thread_count_status_string = fmt.aprintf("%- *[1]s", unpadded, report.pkg_column_len + PADDING) + assert(len(thread_count_status_string) > 0, "Error allocating thread count status string.") } + defer delete(thread_count_status_string) + } + + task_data_slots: []Task_Data = --- + task_data_slots, alloc_error = make([]Task_Data, thread_count) + fmt.assertf(alloc_error == nil, "Error allocating memory for task data slots: %v", alloc_error) + defer delete(task_data_slots) - free_all(context.temp_allocator) - reset_t(t) - defer end_t(t) + // Tests rotate through these allocators as they finish. + task_allocators: []mem.Rollback_Stack = --- + task_allocators, alloc_error = make([]mem.Rollback_Stack, thread_count) + fmt.assertf(alloc_error == nil, "Error allocating memory for task allocators: %v", alloc_error) + defer delete(task_allocators) - if prev_pkg != it.pkg { - prev_pkg = it.pkg - logf(t, "[Package: %s]", it.pkg) + when TRACKING_MEMORY { + task_memory_trackers: []mem.Tracking_Allocator = --- + task_memory_trackers, alloc_error = make([]mem.Tracking_Allocator, thread_count) + fmt.assertf(alloc_error == nil, "Error allocating memory for memory trackers: %v", alloc_error) + defer delete(task_memory_trackers) + } + + #no_bounds_check for i in 0 ..< thread_count { + alloc_error = mem.rollback_stack_init(&task_allocators[i], PER_THREAD_MEMORY) + fmt.assertf(alloc_error == nil, "Error allocating memory for task allocator #%i: %v", i, alloc_error) + when TRACKING_MEMORY { + mem.tracking_allocator_init(&task_memory_trackers[i], mem.rollback_stack_allocator(&task_allocators[i])) } + } - logf(t, "[Test: %s]", it.name) + defer #no_bounds_check for i in 0 ..< thread_count { + when TRACKING_MEMORY { + mem.tracking_allocator_destroy(&task_memory_trackers[i]) + } + mem.rollback_stack_destroy(&task_allocators[i]) + } - run_internal_test(t, it) + task_timeouts: [dynamic]Task_Timeout = --- + task_timeouts, alloc_error = make([dynamic]Task_Timeout, 0, thread_count) + fmt.assertf(alloc_error == nil, "Error allocating memory for task timeouts: %v", alloc_error) + defer delete(task_timeouts) - if failed(t) { - logf(t, "[%s : FAILURE]", it.name) - } else { - logf(t, "[%s : SUCCESS]", it.name) - total_success_count += 1 + failed_test_reason_map: map[int]string = --- + failed_test_reason_map, alloc_error = make(map[int]string, RESERVED_TEST_FAILURES) + fmt.assertf(alloc_error == nil, "Error allocating memory for failed test reasons: %v", alloc_error) + defer delete(failed_test_reason_map) + + log_messages: [dynamic]Log_Message = --- + log_messages, alloc_error = make([dynamic]Log_Message, 0, RESERVED_LOG_MESSAGES) + fmt.assertf(alloc_error == nil, "Error allocating memory for log message queue: %v", alloc_error) + defer delete(log_messages) + + sorted_failed_test_reasons: [dynamic]int = --- + sorted_failed_test_reasons, alloc_error = make([dynamic]int, 0, RESERVED_TEST_FAILURES) + fmt.assertf(alloc_error == nil, "Error allocating memory for sorted failed test reasons: %v", alloc_error) + defer delete(sorted_failed_test_reasons) + + when USE_CLIPBOARD { + clipboard_buffer: bytes.Buffer + bytes.buffer_init_allocator(&clipboard_buffer, 0, CLIPBOARD_BUFFER_SIZE) + defer bytes.buffer_destroy(&clipboard_buffer) + } + + when SHARED_RANDOM_SEED == 0 { + shared_random_seed := cast(u64)intrinsics.read_cycle_counter() + } else { + shared_random_seed := SHARED_RANDOM_SEED + } + + // -- Setup initial tasks. + + // NOTE(Feoramund): This is the allocator that will be used by threads to + // persist log messages past their lifetimes. It has its own variable name + // in the event it needs to be changed from `context.allocator` without + // digging through the source to divine everywhere it is used for that. + shared_log_allocator := context.allocator + + context.logger = { + procedure = runner_logger_proc, + data = &log_messages, + lowest_level = get_log_level(), + options = Default_Test_Logger_Opts - {.Short_File_Path, .Line, .Procedure}, + } + + run_index: int + + setup_tasks: for &data, task_index in task_data_slots { + setup_next_test: for run_index < total_test_count { + #no_bounds_check it := internal_tests[run_index] + defer run_index += 1 + + data.it = it + data.t.seed = shared_random_seed + #no_bounds_check data.t.channel = chan.as_send(task_channels[task_index].channel) + data.t._log_allocator = shared_log_allocator + data.allocator_index = task_index + + #no_bounds_check when TRACKING_MEMORY { + task_allocator := mem.tracking_allocator(&task_memory_trackers[task_index]) + } else { + task_allocator := mem.rollback_stack_allocator(&task_allocators[task_index]) + } + + thread.pool_add_task(&pool, task_allocator, run_test_task, &data, run_index) + + continue setup_tasks } } - logf(t, "----------------------------------------") - if total_test_count == 0 { - log(t, "NO TESTS RAN") + + // -- Run tests. + + setup_signal_handler() + + fmt.wprint(stdout, ansi.CSI + ansi.DECTCEM_HIDE) + + when FANCY_OUTPUT { + signals_were_raised := false + + redraw_report(stdout, report) + draw_status_bar(stdout, thread_count_status_string, total_done_count, total_test_count) + } + + when TEST_THREADS == 0 { + pkg_log.infof("Starting test runner with %i thread%s. Set with -define:ODIN_TEST_THREADS=n.", + thread_count, + "" if thread_count == 1 else "s") + } else { + pkg_log.infof("Starting test runner with %i thread%s.", + thread_count, + "" if thread_count == 1 else "s") + } + + when SHARED_RANDOM_SEED == 0 { + pkg_log.infof("The random seed sent to every test is: %v. Set with -define:ODIN_TEST_RANDOM_SEED=n.", shared_random_seed) } else { - logf(t, "%d/%d SUCCESSFUL", total_success_count, total_test_count) + pkg_log.infof("The random seed sent to every test is: %v.", shared_random_seed) + } + + when TRACKING_MEMORY { + when ALWAYS_REPORT_MEMORY { + pkg_log.info("Memory tracking is enabled. Tests will log their memory usage when complete.") + } else { + pkg_log.info("Memory tracking is enabled. Tests will log their memory usage if there's an issue.") + } + pkg_log.info("< Final Mem/ Total Mem> < Peak Mem> (#Free/Alloc) :: [package.test_name]") + } else when ALWAYS_REPORT_MEMORY { + pkg_log.warn("ODIN_TEST_ALWAYS_REPORT_MEMORY is true, but ODIN_TRACK_MEMORY is false.") + } + + start_time := time.now() + + thread.pool_start(&pool) + main_loop: for !thread.pool_is_empty(&pool) { + { + events_pending := thread.pool_num_done(&pool) > 0 + + if !events_pending { + poll_tasks: for &task_channel in task_channels { + if chan.len(task_channel.channel) > 0 { + events_pending = true + break poll_tasks + } + } + } + + if !events_pending { + // Keep the main thread from pegging a core at 100% usage. + time.sleep(1 * time.Microsecond) + } + } + + cycle_pool: for task in thread.pool_pop_done(&pool) { + data := cast(^Task_Data)(task.data) + + when TRACKING_MEMORY { + #no_bounds_check tracker := &task_memory_trackers[data.allocator_index] + + memory_is_in_bad_state := len(tracker.allocation_map) + len(tracker.bad_free_array) > 0 + + when ALWAYS_REPORT_MEMORY { + should_report := true + } else { + should_report := memory_is_in_bad_state + } + + if should_report { + write_memory_report(batch_writer, tracker, data.it.pkg, data.it.name) + + pkg_log.log(.Warning if memory_is_in_bad_state else .Info, bytes.buffer_to_string(&batch_buffer)) + bytes.buffer_reset(&batch_buffer) + } + + mem.tracking_allocator_reset(tracker) + } + + free_all(task.allocator) + + if run_index < total_test_count { + #no_bounds_check it := internal_tests[run_index] + defer run_index += 1 + + data.it = it + data.t.seed = shared_random_seed + data.t.error_count = 0 + + thread.pool_add_task(&pool, task.allocator, run_test_task, data, run_index) + } + } + + handle_events: for &task_channel in task_channels { + for ev in chan.try_recv(task_channel.channel) { + switch event in ev { + case Event_New_Test: + task_channel.test_index = event.test_index + + case Event_State_Change: + #no_bounds_check report.all_test_states[task_channel.test_index] = event.new_state + + #no_bounds_check it := internal_tests[task_channel.test_index] + #no_bounds_check pkg := report.packages_by_name[it.pkg] + + #partial switch event.new_state { + case .Failed: + if task_channel.test_index not_in failed_test_reason_map { + failed_test_reason_map[task_channel.test_index] = ERROR_STRING_UNKNOWN + } + total_failure_count += 1 + total_done_count += 1 + case .Successful: + total_success_count += 1 + total_done_count += 1 + } + + when ODIN_DEBUG { + pkg_log.debugf("Test #%i %s.%s changed state to %v.", task_channel.test_index, it.pkg, it.name, event.new_state) + } + + pkg.last_change_state = event.new_state + pkg.last_change_name = it.name + pkg.frame_ready = false + + case Event_Set_Fail_Timeout: + _, alloc_error = append(&task_timeouts, Task_Timeout { + test_index = task_channel.test_index, + at_time = event.at_time, + location = event.location, + }) + fmt.assertf(alloc_error == nil, "Error appending to task timeouts: %v", alloc_error) + + case Event_Log_Message: + _, alloc_error = append(&log_messages, Log_Message { + level = event.level, + text = event.formatted_text, + time = event.time, + allocator = shared_log_allocator, + }) + fmt.assertf(alloc_error == nil, "Error appending to log messages: %v", alloc_error) + + if event.level >= .Error { + // Save the message for the final summary. + if old_error, ok := failed_test_reason_map[task_channel.test_index]; ok { + safe_delete_string(old_error, shared_log_allocator) + } + failed_test_reason_map[task_channel.test_index] = event.text + } else { + delete(event.text, shared_log_allocator) + } + } + } + } + + check_timeouts: for i := len(task_timeouts) - 1; i >= 0; i -= 1 { + #no_bounds_check timeout := &task_timeouts[i] + + if time.since(timeout.at_time) < 0 { + continue check_timeouts + } + + defer unordered_remove(&task_timeouts, i) + + #no_bounds_check if report.all_test_states[timeout.test_index] > .Running { + continue check_timeouts + } + + if !thread.pool_stop_task(&pool, timeout.test_index) { + // The task may have stopped a split second after we started + // checking, but we haven't handled the new state yet. + continue check_timeouts + } + + #no_bounds_check report.all_test_states[timeout.test_index] = .Failed + #no_bounds_check it := internal_tests[timeout.test_index] + #no_bounds_check pkg := report.packages_by_name[it.pkg] + pkg.frame_ready = false + + if old_error, ok := failed_test_reason_map[timeout.test_index]; ok { + safe_delete_string(old_error, shared_log_allocator) + } + failed_test_reason_map[timeout.test_index] = ERROR_STRING_TIMEOUT + total_failure_count += 1 + total_done_count += 1 + + now := time.now() + _, alloc_error = append(&log_messages, Log_Message { + level = .Error, + text = format_log_text(.Error, ERROR_STRING_TIMEOUT, Default_Test_Logger_Opts, timeout.location, now), + time = now, + allocator = context.allocator, + }) + fmt.assertf(alloc_error == nil, "Error appending to log messages: %v", alloc_error) + + find_task_data: for &data in task_data_slots { + if data.it.pkg == it.pkg && data.it.name == it.name { + end_t(&data.t) + break find_task_data + } + } + } + + if should_stop_runner() { + fmt.wprintln(stderr, "\nCaught interrupt signal. Stopping all tests.") + thread.pool_shutdown(&pool) + break main_loop + } + + when FANCY_OUTPUT { + // Because the bounds checking procs send directly to STDERR with + // no way to redirect or handle them, we need to at least try to + // let the user see those messages when using the animated progress + // report. This flag may be set by the block of code below if a + // signal is raised. + // + // It'll be purely by luck if the output is interleaved properly, + // given the nature of non-thread-safe printing. + // + // At worst, if Odin did not print any error for this signal, we'll + // just re-display the progress report. The fatal log error message + // should be enough to clue the user in that something dire has + // occurred. + bypass_progress_overwrite := false + } + + if test_index, reason, ok := should_stop_test(); ok { + #no_bounds_check report.all_test_states[test_index] = .Failed + #no_bounds_check it := internal_tests[test_index] + #no_bounds_check pkg := report.packages_by_name[it.pkg] + pkg.frame_ready = false + + fmt.assertf(thread.pool_stop_task(&pool, test_index), + "A signal (%v) was raised to stop test #%i %s.%s, but it was unable to be found.", + reason, test_index, it.pkg, it.name) + + if test_index not_in failed_test_reason_map { + // We only write a new error message here if there wasn't one + // already, because the message we can provide based only on + // the signal won't be very useful, whereas asserts and panics + // will provide a user-written error message. + failed_test_reason_map[test_index] = fmt.aprintf("Signal caught: %v", reason, allocator = shared_log_allocator) + pkg_log.fatalf("Caught signal to stop test #%i %s.%s for: %v.", test_index, it.pkg, it.name, reason) + + } + + when FANCY_OUTPUT { + bypass_progress_overwrite = true + signals_were_raised = true + } + + total_failure_count += 1 + total_done_count += 1 + } + + // -- Redraw. + + when FANCY_OUTPUT { + if len(log_messages) == 0 && !needs_to_redraw(report) { + continue main_loop + } + + if !bypass_progress_overwrite { + fmt.wprintf(stdout, ansi_redraw_string, total_done_count, total_test_count) + } + } else { + if total_done_count != last_done_count { + fmt.wprintf(stdout, OSC_WINDOW_TITLE, total_done_count, total_test_count) + last_done_count = total_done_count + } + + if len(log_messages) == 0 { + continue main_loop + } + } + + // Because each thread has its own messenger channel, log messages + // arrive in chunks that are in-order, but when they're merged with the + // logs from other threads, they become out-of-order. + slice.stable_sort_by(log_messages[:], proc(a, b: Log_Message) -> bool { + return time.diff(a.time, b.time) > 0 + }) + + for message in log_messages { + fmt.wprintln(batch_writer, message.text) + delete(message.text, message.allocator) + } + + fmt.wprint(stderr, bytes.buffer_to_string(&batch_buffer)) + clear(&log_messages) + bytes.buffer_reset(&batch_buffer) + + when FANCY_OUTPUT { + redraw_report(batch_writer, report) + draw_status_bar(batch_writer, thread_count_status_string, total_done_count, total_test_count) + fmt.wprint(stdout, bytes.buffer_to_string(&batch_buffer)) + bytes.buffer_reset(&batch_buffer) + } + } + + // -- All tests are complete, or the runner has been interrupted. + + // NOTE(Feoramund): If you've arrived here after receiving signal 11 or + // SIGSEGV on the main runner thread, while using a UNIX-like platform, + // there is the possibility that you may have encountered a rare edge case + // involving the joining of threads. + // + // At the time of writing, the thread library is undergoing a rewrite that + // should solve this problem; it is not an issue with the test runner itself. + thread.pool_join(&pool) + + finished_in := time.since(start_time) + + when !FANCY_OUTPUT { + // One line to space out the results, since we don't have the status + // bar in plain mode. + fmt.wprintln(batch_writer) + } + + fmt.wprintf(batch_writer, + "Finished %i test%s in %v.", + total_done_count, + "" if total_done_count == 1 else "s", + finished_in) + + if total_done_count != total_test_count { + not_run_count := total_test_count - total_done_count + fmt.wprintf(batch_writer, + " " + SGR_READY + "%i" + SGR_RESET + " %s left undone.", + not_run_count, + "test was" if not_run_count == 1 else "tests were") + } + + if total_success_count == total_test_count { + fmt.wprintfln(batch_writer, + " %s " + SGR_SUCCESS + "successful." + SGR_RESET, + "The test was" if total_test_count == 1 else "All tests were") + } else if total_failure_count > 0 { + if total_failure_count == total_test_count { + fmt.wprintfln(batch_writer, + " %s " + SGR_FAILED + "failed." + SGR_RESET, + "The test" if total_test_count == 1 else "All tests") + } else { + fmt.wprintfln(batch_writer, + " " + SGR_FAILED + "%i" + SGR_RESET + " test%s failed.", + total_failure_count, + "" if total_failure_count == 1 else "s") + } + + for test_index in failed_test_reason_map { + _, alloc_error = append(&sorted_failed_test_reasons, test_index) + fmt.assertf(alloc_error == nil, "Error appending to sorted failed test reasons: %v", alloc_error) + } + + slice.sort(sorted_failed_test_reasons[:]) + + for test_index in sorted_failed_test_reasons { + #no_bounds_check last_error := failed_test_reason_map[test_index] + #no_bounds_check it := internal_tests[test_index] + pkg_and_name := fmt.tprintf("%s.%s", it.pkg, it.name) + fmt.wprintfln(batch_writer, " - %- *[1]s\t%s", + pkg_and_name, + report.pkg_column_len + report.test_column_len, + last_error) + safe_delete_string(last_error, shared_log_allocator) + } + + if total_success_count > 0 { + when USE_CLIPBOARD { + clipboard_writer := io.to_writer(bytes.buffer_to_stream(&clipboard_buffer)) + fmt.wprint(clipboard_writer, "-define:ODIN_TEST_NAMES=") + for test_index in sorted_failed_test_reasons { + #no_bounds_check it := internal_tests[test_index] + fmt.wprintf(clipboard_writer, "%s.%s,", it.pkg, it.name) + } + + encoded_names := base64.encode(bytes.buffer_to_bytes(&clipboard_buffer), allocator = context.temp_allocator) + + fmt.wprintf(batch_writer, + ansi.OSC + ansi.CLIPBOARD + ";c;%s" + ansi.ST + + "\nThe name%s of the failed test%s been copied to your clipboard.", + encoded_names, + "" if total_failure_count == 1 else "s", + " has" if total_failure_count == 1 else "s have") + } else { + fmt.wprintf(batch_writer, "\nTo run only the failed test%s, use:\n\t-define:ODIN_TEST_NAMES=", + "" if total_failure_count == 1 else "s") + for test_index in sorted_failed_test_reasons { + #no_bounds_check it := internal_tests[test_index] + fmt.wprintf(batch_writer, "%s.%s,", it.pkg, it.name) + } + fmt.wprint(batch_writer, "\n\nIf your terminal supports OSC 52, you may use -define:ODIN_TEST_CLIPBOARD to have this copied directly to your clipboard.") + } + + fmt.wprintln(batch_writer) + } } + + fmt.wprint(stdout, ansi.CSI + ansi.DECTCEM_SHOW) + + when FANCY_OUTPUT { + if signals_were_raised { + fmt.wprintln(batch_writer, ` +Signals were raised during this test run. Log messages are likely to have collided with each other. +To partly mitigate this, redirect STDERR to a file or use the -define:ODIN_TEST_FANCY=false option.`) + } + } + + fmt.wprintln(stderr, bytes.buffer_to_string(&batch_buffer)) + return total_success_count == total_test_count } diff --git a/core/testing/runner_other.odin b/core/testing/runner_other.odin deleted file mode 100644 index f3271d209..000000000 --- a/core/testing/runner_other.odin +++ /dev/null @@ -1,14 +0,0 @@ -//+private -//+build !windows -package testing - -import "core:time" - -run_internal_test :: proc(t: ^T, it: Internal_Test) { - // TODO(bill): Catch panics on other platforms - it.p(t) -} - -_fail_timeout :: proc(t: ^T, duration: time.Duration, loc := #caller_location) { - -}
\ No newline at end of file diff --git a/core/testing/runner_windows.odin b/core/testing/runner_windows.odin deleted file mode 100644 index 15264355b..000000000 --- a/core/testing/runner_windows.odin +++ /dev/null @@ -1,235 +0,0 @@ -//+private -//+build windows -package testing - -import win32 "core:sys/windows" -import "base:runtime" -import "base:intrinsics" -import "core:time" - -Sema :: struct { - count: i32, -} - -sema_reset :: proc "contextless" (s: ^Sema) { - intrinsics.atomic_store(&s.count, 0) -} -sema_wait :: proc "contextless" (s: ^Sema) { - for { - original_count := s.count - for original_count == 0 { - win32.WaitOnAddress(&s.count, &original_count, size_of(original_count), win32.INFINITE) - original_count = s.count - } - if original_count == intrinsics.atomic_compare_exchange_strong(&s.count, original_count-1, original_count) { - return - } - } -} -sema_wait_with_timeout :: proc "contextless" (s: ^Sema, duration: time.Duration) -> bool { - if duration <= 0 { - return false - } - for { - - original_count := intrinsics.atomic_load(&s.count) - for start := time.tick_now(); original_count == 0; /**/ { - if intrinsics.atomic_load(&s.count) != original_count { - remaining := duration - time.tick_since(start) - if remaining < 0 { - return false - } - ms := u32(remaining/time.Millisecond) - if !win32.WaitOnAddress(&s.count, &original_count, size_of(original_count), ms) { - return false - } - } - original_count = s.count - } - if original_count == intrinsics.atomic_compare_exchange_strong(&s.count, original_count-1, original_count) { - return true - } - } -} - -sema_post :: proc "contextless" (s: ^Sema, count := 1) { - intrinsics.atomic_add(&s.count, i32(count)) - if count == 1 { - win32.WakeByAddressSingle(&s.count) - } else { - win32.WakeByAddressAll(&s.count) - } -} - - - -Thread_Proc :: #type proc(^Thread) - -MAX_USER_ARGUMENTS :: 8 - -Thread :: struct { - using specific: Thread_Os_Specific, - procedure: Thread_Proc, - - t: ^T, - it: Internal_Test, - success: bool, - - init_context: Maybe(runtime.Context), - - creation_allocator: runtime.Allocator, - - internal_fail_timeout: time.Duration, - internal_fail_timeout_loc: runtime.Source_Code_Location, -} - -Thread_Os_Specific :: struct { - win32_thread: win32.HANDLE, - win32_thread_id: win32.DWORD, - done: bool, // see note in `is_done` -} - -thread_create :: proc(procedure: Thread_Proc) -> ^Thread { - __windows_thread_entry_proc :: proc "system" (t_: rawptr) -> win32.DWORD { - t := (^Thread)(t_) - context = t.init_context.? or_else runtime.default_context() - - t.procedure(t) - - if t.init_context == nil { - if context.temp_allocator.data == &runtime.global_default_temp_allocator_data { - runtime.default_temp_allocator_destroy(auto_cast context.temp_allocator.data) - } - } - - intrinsics.atomic_store(&t.done, true) - return 0 - } - - - thread := new(Thread) - if thread == nil { - return nil - } - thread.creation_allocator = context.allocator - - win32_thread_id: win32.DWORD - win32_thread := win32.CreateThread(nil, 0, __windows_thread_entry_proc, thread, win32.CREATE_SUSPENDED, &win32_thread_id) - if win32_thread == nil { - free(thread, thread.creation_allocator) - return nil - } - thread.procedure = procedure - thread.win32_thread = win32_thread - thread.win32_thread_id = win32_thread_id - thread.init_context = context - - return thread -} - -thread_start :: proc "contextless" (thread: ^Thread) { - win32.ResumeThread(thread.win32_thread) -} - -thread_join_and_destroy :: proc(thread: ^Thread) { - if thread.win32_thread != win32.INVALID_HANDLE { - win32.WaitForSingleObject(thread.win32_thread, win32.INFINITE) - win32.CloseHandle(thread.win32_thread) - thread.win32_thread = win32.INVALID_HANDLE - } - free(thread, thread.creation_allocator) -} - -thread_terminate :: proc "contextless" (thread: ^Thread, exit_code: int) { - win32.TerminateThread(thread.win32_thread, u32(exit_code)) -} - - -_fail_timeout :: proc(t: ^T, duration: time.Duration, loc := #caller_location) { - assert(global_fail_timeout_thread == nil, "set_fail_timeout previously called", loc) - - thread := thread_create(proc(thread: ^Thread) { - t := thread.t - timeout := thread.internal_fail_timeout - if !sema_wait_with_timeout(&global_fail_timeout_semaphore, timeout) { - fail_now(t, "TIMEOUT", thread.internal_fail_timeout_loc) - } - }) - thread.internal_fail_timeout = duration - thread.internal_fail_timeout_loc = loc - thread.t = t - global_fail_timeout_thread = thread - thread_start(thread) -} - -global_fail_timeout_thread: ^Thread -global_fail_timeout_semaphore: Sema - -global_threaded_runner_semaphore: Sema -global_exception_handler: rawptr -global_current_thread: ^Thread -global_current_t: ^T - -run_internal_test :: proc(t: ^T, it: Internal_Test) { - thread := thread_create(proc(thread: ^Thread) { - exception_handler_proc :: proc "system" (ExceptionInfo: ^win32.EXCEPTION_POINTERS) -> win32.LONG { - switch ExceptionInfo.ExceptionRecord.ExceptionCode { - case - win32.EXCEPTION_DATATYPE_MISALIGNMENT, - win32.EXCEPTION_BREAKPOINT, - win32.EXCEPTION_ACCESS_VIOLATION, - win32.EXCEPTION_ILLEGAL_INSTRUCTION, - win32.EXCEPTION_ARRAY_BOUNDS_EXCEEDED, - win32.EXCEPTION_STACK_OVERFLOW: - - sema_post(&global_threaded_runner_semaphore) - return win32.EXCEPTION_EXECUTE_HANDLER - } - - return win32.EXCEPTION_CONTINUE_SEARCH - } - global_exception_handler = win32.AddVectoredExceptionHandler(0, exception_handler_proc) - - context.assertion_failure_proc = proc(prefix, message: string, loc: runtime.Source_Code_Location) -> ! { - errorf(global_current_t, "%s %s", prefix, message, loc=loc) - intrinsics.trap() - } - - t := thread.t - - global_fail_timeout_thread = nil - sema_reset(&global_fail_timeout_semaphore) - - thread.it.p(t) - - sema_post(&global_fail_timeout_semaphore) - if global_fail_timeout_thread != nil do thread_join_and_destroy(global_fail_timeout_thread) - - thread.success = true - sema_post(&global_threaded_runner_semaphore) - }) - - sema_reset(&global_threaded_runner_semaphore) - global_current_t = t - - t._fail_now = proc() -> ! { - intrinsics.trap() - } - - thread.t = t - thread.it = it - thread.success = false - thread_start(thread) - - sema_wait(&global_threaded_runner_semaphore) - thread_terminate(thread, int(!thread.success)) - thread_join_and_destroy(thread) - - win32.RemoveVectoredExceptionHandler(global_exception_handler) - - if !thread.success && t.error_count == 0 { - t.error_count += 1 - } - - return -} diff --git a/core/testing/signal_handler.odin b/core/testing/signal_handler.odin new file mode 100644 index 000000000..891f6bbb6 --- /dev/null +++ b/core/testing/signal_handler.odin @@ -0,0 +1,33 @@ +//+private +package testing + +import "base:runtime" +import pkg_log "core:log" + +Stop_Reason :: enum { + Unknown, + Illegal_Instruction, + Arithmetic_Error, + Segmentation_Fault, +} + +test_assertion_failure_proc :: proc(prefix, message: string, loc: runtime.Source_Code_Location) -> ! { + pkg_log.fatalf("%s: %s", prefix, message, location = loc) + runtime.trap() +} + +setup_signal_handler :: proc() { + _setup_signal_handler() +} + +setup_task_signal_handler :: proc(test_index: int) { + _setup_task_signal_handler(test_index) +} + +should_stop_runner :: proc() -> bool { + return _should_stop_runner() +} + +should_stop_test :: proc() -> (test_index: int, reason: Stop_Reason, ok: bool) { + return _should_stop_test() +} diff --git a/core/testing/signal_handler_libc.odin b/core/testing/signal_handler_libc.odin new file mode 100644 index 000000000..0ab34776e --- /dev/null +++ b/core/testing/signal_handler_libc.odin @@ -0,0 +1,149 @@ +//+private +//+build windows, linux, darwin, freebsd, openbsd, netbsd, haiku +package testing + +import "base:intrinsics" +import "core:c/libc" +import "core:encoding/ansi" +import "core:sync" +import "core:os" +@require import "core:sys/unix" + +@(private="file") stop_runner_flag: libc.sig_atomic_t + +@(private="file") stop_test_gate: sync.Mutex +@(private="file") stop_test_index: libc.sig_atomic_t +@(private="file") stop_test_reason: libc.sig_atomic_t +@(private="file") stop_test_alert: libc.sig_atomic_t + +@(private="file", thread_local) +local_test_index: libc.sig_atomic_t + +@(private="file") +stop_runner_callback :: proc "c" (sig: libc.int) { + prev := intrinsics.atomic_add(&stop_runner_flag, 1) + + // If the flag was already set (if this is the second signal sent for example), + // consider this a forced (not graceful) exit. + if prev > 0 { + os.exit(int(sig)) + } +} + +@(private="file") +stop_test_callback :: proc "c" (sig: libc.int) { + if local_test_index == -1 { + // We're the test runner, and we ourselves have caught a signal from + // which there is no recovery. + // + // The most we can do now is make sure the user's cursor is visible, + // nuke the entire processs, and hope a useful core dump survives. + + // NOTE(Feoramund): Using these write calls in a signal handler is + // undefined behavior in C99 but possibly tolerated in POSIX 2008. + // Either way, we may as well try to salvage what we can. + show_cursor := ansi.CSI + ansi.DECTCEM_SHOW + libc.fwrite(raw_data(show_cursor), size_of(byte), len(show_cursor), libc.stdout) + libc.fflush(libc.stdout) + + // This is an attempt at being compliant by avoiding printf. + sigbuf: [8]byte + sigstr: string + { + signum := cast(int)sig + i := len(sigbuf) - 2 + for signum > 0 { + m := signum % 10 + signum /= 10 + sigbuf[i] = cast(u8)('0' + m) + i -= 1 + } + sigstr = cast(string)sigbuf[1 + i:len(sigbuf) - 1] + } + + advisory_a := ` +The test runner's main thread has caught an unrecoverable error (signal ` + advisory_b := `) and will now forcibly terminate. +This is a dire bug and should be reported to the Odin developers. +` + libc.fwrite(raw_data(advisory_a), size_of(byte), len(advisory_a), libc.stderr) + libc.fwrite(raw_data(sigstr), size_of(byte), len(sigstr), libc.stderr) + libc.fwrite(raw_data(advisory_b), size_of(byte), len(advisory_b), libc.stderr) + + // Try to get a core dump. + libc.abort() + } + + if sync.mutex_guard(&stop_test_gate) { + intrinsics.atomic_store(&stop_test_index, local_test_index) + intrinsics.atomic_store(&stop_test_reason, cast(libc.sig_atomic_t)sig) + intrinsics.atomic_store(&stop_test_alert, 1) + + for { + // Idle until this thread is terminated by the runner, + // otherwise we may continue to generate signals. + intrinsics.cpu_relax() + + when ODIN_OS != .Windows { + // NOTE(Feoramund): Some UNIX-like platforms may require this. + // + // During testing, I found that NetBSD 10.0 refused to + // terminate a task thread, even when its thread had been + // properly set to PTHREAD_CANCEL_ASYNCHRONOUS. + // + // The runner would stall after returning from `pthread_cancel`. + + unix.pthread_testcancel() + } + } + } +} + +_setup_signal_handler :: proc() { + local_test_index = -1 + + // Catch user interrupt / CTRL-C. + libc.signal(libc.SIGINT, stop_runner_callback) + // Catch polite termination request. + libc.signal(libc.SIGTERM, stop_runner_callback) + + // For tests: + // Catch asserts and panics. + libc.signal(libc.SIGILL, stop_test_callback) + // Catch arithmetic errors. + libc.signal(libc.SIGFPE, stop_test_callback) + // Catch segmentation faults (illegal memory access). + libc.signal(libc.SIGSEGV, stop_test_callback) +} + +_setup_task_signal_handler :: proc(test_index: int) { + local_test_index = cast(libc.sig_atomic_t)test_index +} + +_should_stop_runner :: proc() -> bool { + return intrinsics.atomic_load(&stop_runner_flag) == 1 +} + +@(private="file") +unlock_stop_test_gate :: proc(_: int, _: Stop_Reason, ok: bool) { + if ok { + sync.mutex_unlock(&stop_test_gate) + } +} + +@(deferred_out=unlock_stop_test_gate) +_should_stop_test :: proc() -> (test_index: int, reason: Stop_Reason, ok: bool) { + if intrinsics.atomic_load(&stop_test_alert) == 1 { + intrinsics.atomic_store(&stop_test_alert, 0) + + test_index = cast(int)intrinsics.atomic_load(&stop_test_index) + switch intrinsics.atomic_load(&stop_test_reason) { + case libc.SIGFPE: reason = .Arithmetic_Error + case libc.SIGILL: reason = .Illegal_Instruction + case libc.SIGSEGV: reason = .Segmentation_Fault + } + ok = true + } + + return +} diff --git a/core/testing/signal_handler_other.odin b/core/testing/signal_handler_other.odin new file mode 100644 index 000000000..04981f5af --- /dev/null +++ b/core/testing/signal_handler_other.odin @@ -0,0 +1,19 @@ +//+private +//+build !windows !linux !darwin !freebsd !openbsd !netbsd !haiku +package testing + +_setup_signal_handler :: proc() { + // Do nothing. +} + +_setup_task_signal_handler :: proc(test_index: int) { + // Do nothing. +} + +_should_stop_runner :: proc() -> bool { + return false +} + +_should_stop_test :: proc() -> (test_index: int, reason: Stop_Reason, ok: bool) { + return 0, {}, false +} diff --git a/core/testing/testing.odin b/core/testing/testing.odin index a8c5ffa48..92b4d391d 100644 --- a/core/testing/testing.odin +++ b/core/testing/testing.odin @@ -1,10 +1,11 @@ package testing -import "core:fmt" -import "core:io" -import "core:time" import "base:intrinsics" +import "base:runtime" +import pkg_log "core:log" import "core:reflect" +import "core:sync/chan" +import "core:time" _ :: reflect // alias reflect to nothing to force visibility for -vet @@ -22,44 +23,55 @@ Internal_Test :: struct { Internal_Cleanup :: struct { procedure: proc(rawptr), user_data: rawptr, + ctx: runtime.Context, } T :: struct { error_count: int, - w: io.Writer, + // If your test needs to perform random operations, it's advised to use + // this value to seed a local random number generator rather than relying + // on the non-thread-safe global one. + // + // This way, your results will be deterministic. + // + // This value is chosen at startup of the test runner, logged, and may be + // specified by the user. It is the same for all tests of a single run. + seed: u64, + + channel: Update_Channel_Sender, cleanups: [dynamic]Internal_Cleanup, + // This allocator is shared between the test runner and its threads for + // cloning log strings, so they can outlive the lifetime of individual + // tests during channel transmission. + _log_allocator: runtime.Allocator, + _fail_now: proc() -> !, } +@(deprecated="prefer `log.error`") error :: proc(t: ^T, args: ..any, loc := #caller_location) { - fmt.wprintf(t.w, "%v: ", loc) - fmt.wprintln(t.w, ..args) - t.error_count += 1 + pkg_log.error(..args, location = loc) } +@(deprecated="prefer `log.errorf`") errorf :: proc(t: ^T, format: string, args: ..any, loc := #caller_location) { - fmt.wprintf(t.w, "%v: ", loc) - fmt.wprintf(t.w, format, ..args) - fmt.wprintln(t.w) - t.error_count += 1 + pkg_log.errorf(format, ..args, location = loc) } fail :: proc(t: ^T, loc := #caller_location) { - error(t, "FAIL", loc=loc) - t.error_count += 1 + pkg_log.error("FAIL", location=loc) } fail_now :: proc(t: ^T, msg := "", loc := #caller_location) { if msg != "" { - error(t, "FAIL:", msg, loc=loc) + pkg_log.error("FAIL:", msg, location=loc) } else { - error(t, "FAIL", loc=loc) + pkg_log.error("FAIL", location=loc) } - t.error_count += 1 if t._fail_now != nil { t._fail_now() } @@ -69,32 +81,34 @@ failed :: proc(t: ^T) -> bool { return t.error_count != 0 } +@(deprecated="prefer `log.info`") log :: proc(t: ^T, args: ..any, loc := #caller_location) { - fmt.wprintln(t.w, ..args) + pkg_log.info(..args, location = loc) } +@(deprecated="prefer `log.infof`") logf :: proc(t: ^T, format: string, args: ..any, loc := #caller_location) { - fmt.wprintf(t.w, format, ..args) - fmt.wprintln(t.w) + pkg_log.infof(format, ..args, location = loc) } -// cleanup registers a procedure and user_data, which will be called when the test, and all its subtests, complete -// cleanup procedures will be called in LIFO (last added, first called) order. +// cleanup registers a procedure and user_data, which will be called when the test, and all its subtests, complete. +// Cleanup procedures will be called in LIFO (last added, first called) order. +// Each procedure will use a copy of the context at the time of registering. cleanup :: proc(t: ^T, procedure: proc(rawptr), user_data: rawptr) { - append(&t.cleanups, Internal_Cleanup{procedure, user_data}) + append(&t.cleanups, Internal_Cleanup{procedure, user_data, context}) } expect :: proc(t: ^T, ok: bool, msg: string = "", loc := #caller_location) -> bool { if !ok { - error(t, msg, loc=loc) + pkg_log.error(msg, location=loc) } return ok } expectf :: proc(t: ^T, ok: bool, format: string, args: ..any, loc := #caller_location) -> bool { if !ok { - errorf(t, format, ..args, loc=loc) + pkg_log.errorf(format, ..args, location=loc) } return ok } @@ -102,12 +116,15 @@ expectf :: proc(t: ^T, ok: bool, format: string, args: ..any, loc := #caller_loc expect_value :: proc(t: ^T, value, expected: $T, loc := #caller_location) -> bool where intrinsics.type_is_comparable(T) { ok := value == expected || reflect.is_nil(value) && reflect.is_nil(expected) if !ok { - errorf(t, "expected %v, got %v", expected, value, loc=loc) + pkg_log.errorf("expected %v, got %v", expected, value, location=loc) } return ok } set_fail_timeout :: proc(t: ^T, duration: time.Duration, loc := #caller_location) { - _fail_timeout(t, duration, loc) + chan.send(t.channel, Event_Set_Fail_Timeout { + at_time = time.time_add(time.now(), duration), + location = loc, + }) } |