diff options
| author | Feoramund <161657516+Feoramund@users.noreply.github.com> | 2024-05-27 19:44:19 -0400 |
|---|---|---|
| committer | Feoramund <161657516+Feoramund@users.noreply.github.com> | 2024-06-02 14:34:31 -0400 |
| commit | b6c4dfb68d5d7d2ac883eaa18409eb8e7d6f2e9c (patch) | |
| tree | ddd589e66937fda42aea76aedb0056c42cca2572 /core/testing/runner.odin | |
| parent | 95c2e020fff74f8e2e193db595c25726a8b9a99e (diff) | |
Refactor the test runner
Changes
- Support multi-threaded testing.
- Support `set_fail_timeout` on all platforms.
- Display an animated progress report.
- Setup all tests with a context logger.
- Give all tests their own separate custom allocators.
- Support tracking test memory usage.
- Display a summary of the failed tests at the end.
- Let users select only specific tests to run.
- Support copying failed tests to the clipboard to run again.
- Support catching SIGINT (CTRL-C) to cancel early.
- Record context in cleanup procs.
- Write all log messages to STDERR for easy redirection.
- Possibly more I've forgotten.
New Options
- `-define:test_threads=N`: Specify thread count.
- `-define:test_thread_memory=B`: Specify initial memory block size in bytes to each thread.
- `-define:test_track_memory=true`: Track the memory usage of individual tests.
- `-define:test_fancy=false`: Disable animated progress report.
- `-define:test_select=package.test_name,...`: Run only select tests.
- `-define:test_clipboard=true`: Copy names of failed tests to the clipboard.
- `-define:test_progress_width=24`: Change the width of the animated progress bars.
Diffstat (limited to 'core/testing/runner.odin')
| -rw-r--r-- | core/testing/runner.odin | 678 |
1 files changed, 640 insertions, 38 deletions
diff --git a/core/testing/runner.odin b/core/testing/runner.odin index 0039f1939..ce5aa112a 100644 --- a/core/testing/runner.odin +++ b/core/testing/runner.odin @@ -1,73 +1,675 @@ //+private package testing +import "base:intrinsics" +import "base:runtime" +import "core:bytes" +import "core:encoding/ansi" +import "core:encoding/base64" +import "core:fmt" import "core:io" +import pkg_log "core:log" +import "core:mem" import "core:os" import "core:slice" +import "core:strings" +import "core:sync/chan" +import "core:thread" +import "core:time" + +// Keep `-vet` happy. +base64_encode :: base64.encode +_ :: pkg_log +_ :: strings + +// Specify how many threads to use when running tests. +TEST_THREADS : int : #config(test_threads, 0) +// Track the memory used by each test. +TRACKING_MEMORY : bool : #config(test_track_memory, false) +// Specify how much memory each thread allocator starts with. +PER_THREAD_MEMORY : int : #config(test_thread_memory, mem.ROLLBACK_STACK_DEFAULT_BLOCK_SIZE) +// Select a specific set of tests to run by name. +TEST_SELECT : string : #config(test_select, "") +// Show the fancy animated progress report. +FANCY_OUTPUT : bool : #config(test_fancy, true) +// Copy failed tests to the clipboard when done. +USE_CLIPBOARD : bool : #config(test_clipboard, false) +// How many test results to show at a time per package. +PROGRESS_WIDTH : int : #config(test_progress_width, 24) + -reset_t :: proc(t: ^T) { - clear(&t.cleanups) - t.error_count = 0 -} end_t :: proc(t: ^T) { for i := len(t.cleanups)-1; i >= 0; i -= 1 { - c := t.cleanups[i] + #no_bounds_check c := t.cleanups[i] + context = c.ctx c.procedure(c.user_data) } + + delete(t.cleanups) + t.cleanups = {} +} + +Task_Data :: struct { + it: Internal_Test, + t: T, + allocator_index: int, +} + +Task_Timeout :: struct { + test_index: int, + at_time: time.Time, + location: runtime.Source_Code_Location, +} + +run_test_task :: proc(task: thread.Task) { + data := cast(^Task_Data)(task.data) + + chan.send(data.t.channel, Event_New_Test { + test_index = task.user_index, + }) + + chan.send(data.t.channel, Event_State_Change { + new_state = .Running, + }) + + context.logger = { + procedure = test_logger_proc, + data = &data.t, + lowest_level = .Debug if ODIN_DEBUG else .Info, + options = Default_Test_Logger_Opts, + } + + free_all(context.temp_allocator) + + run_internal_test(&data.t, data.it) + + end_t(&data.t) + + new_state : Test_State = .Failed if failed(&data.t) else .Successful + + chan.send(data.t.channel, Event_State_Change { + new_state = new_state, + }) } runner :: proc(internal_tests: []Internal_Test) -> bool { - stream := os.stream_from_handle(os.stdout) - w := io.to_writer(stream) + BATCH_BUFFER_SIZE :: 32 * mem.Kilobyte + POOL_BLOCK_SIZE :: 16 * mem.Kilobyte + CLIPBOARD_BUFFER_SIZE :: 16 * mem.Kilobyte + + BUFFERED_EVENTS_PER_CHANNEL :: 16 + RESERVED_LOG_MESSAGES :: 64 + RESERVED_TEST_FAILURES :: 64 + + ERROR_STRING_TIMEOUT : string : "Test timed out." + ERROR_STRING_UNKNOWN : string : "Test failed for unknown reasons." + OSC_WINDOW_TITLE : string : ansi.OSC + ansi.WINDOW_TITLE + ";Odin test runner (%i/%i)" + ansi.ST + + safe_delete_string :: proc(s: string, allocator := context.allocator) { + // Guard against bad frees on static strings. + switch raw_data(s) { + case raw_data(ERROR_STRING_TIMEOUT), raw_data(ERROR_STRING_UNKNOWN): + return + case: + delete(s, allocator) + } + } + + stdout := io.to_writer(os.stream_from_handle(os.stdout)) + stderr := io.to_writer(os.stream_from_handle(os.stderr)) + + // -- Prepare test data. - t := &T{} - t.w = w - reserve(&t.cleanups, 1024) - defer delete(t.cleanups) + alloc_error: mem.Allocator_Error + when TEST_SELECT != "" { + select_internal_tests: [dynamic]Internal_Test + defer delete(select_internal_tests) + + { + index_list := TEST_SELECT + for selector in strings.split_iterator(&index_list, ",") { + // Temp allocator is fine since we just need to identify which test it's referring to. + split_selector := strings.split(selector, ".", context.temp_allocator) + + found := false + switch len(split_selector) { + case 1: + // Only the test name? + #no_bounds_check name := split_selector[0] + find_test_by_name: for it in internal_tests { + if it.name == name { + found = true + _, alloc_error = append(&select_internal_tests, it) + fmt.assertf(alloc_error == nil, "Error appending to select internal tests: %v", alloc_error) + break find_test_by_name + } + } + case 2: + #no_bounds_check pkg := split_selector[0] + #no_bounds_check name := split_selector[1] + find_test_by_pkg_and_name: for it in internal_tests { + if it.pkg == pkg && it.name == name { + found = true + _, alloc_error = append(&select_internal_tests, it) + fmt.assertf(alloc_error == nil, "Error appending to select internal tests: %v", alloc_error) + break find_test_by_pkg_and_name + } + } + } + if !found { + fmt.wprintfln(stderr, "No test found for the name: %q", selector) + } + } + } + + // Intentional shadow with user-specified tests. + internal_tests := select_internal_tests[:] + } + + total_failure_count := 0 total_success_count := 0 - total_test_count := len(internal_tests) + total_done_count := 0 + total_test_count := len(internal_tests) + + when !FANCY_OUTPUT { + // This is strictly for updating the window title when the progress + // report is disabled. We're otherwise able to depend on the call to + // `needs_to_redraw`. + last_done_count := -1 + } + + if total_test_count == 0 { + // Exit early. + fmt.wprintln(stdout, "No tests to run.") + return true + } - slice.sort_by(internal_tests, proc(a, b: Internal_Test) -> bool { - if a.pkg < b.pkg { - return true + for it in internal_tests { + // NOTE(Feoramund): The old test runner skipped over tests with nil + // procedures, but I couldn't find any case where they occurred. + // This assert stands to prevent any oversight on my part. + fmt.assertf(it.p != nil, "Test %s.%s has <nil> procedure.", it.pkg, it.name) + } + + slice.stable_sort_by(internal_tests, proc(a, b: Internal_Test) -> bool { + if a.pkg == b.pkg { + return a.name < b.name + } else { + return a.pkg < b.pkg } - return a.name < b.name }) - prev_pkg := "" + // -- Set thread count. - for it in internal_tests { - if it.p == nil { - total_test_count -= 1 - continue + when TEST_THREADS == 0 { + thread_count := os.processor_core_count() + } else { + thread_count := max(1, TEST_THREADS) + } + + thread_count = min(thread_count, total_test_count) + + // -- Allocate. + + pool_stack: mem.Rollback_Stack + alloc_error = mem.rollback_stack_init(&pool_stack, POOL_BLOCK_SIZE) + fmt.assertf(alloc_error == nil, "Error allocating memory for thread pool: %v", alloc_error) + defer mem.rollback_stack_destroy(&pool_stack) + + pool: thread.Pool + thread.pool_init(&pool, mem.rollback_stack_allocator(&pool_stack), thread_count) + defer thread.pool_destroy(&pool) + + task_channels: []Task_Channel = --- + task_channels, alloc_error = make([]Task_Channel, thread_count) + fmt.assertf(alloc_error == nil, "Error allocating memory for update channels: %v", alloc_error) + defer delete(task_channels) + + for &task_channel, index in task_channels { + task_channel.channel, alloc_error = chan.create_buffered(Update_Channel, BUFFERED_EVENTS_PER_CHANNEL, context.allocator) + fmt.assertf(alloc_error == nil, "Error allocating memory for update channel #%i: %v", index, alloc_error) + } + defer for &task_channel in task_channels { + chan.destroy(&task_channel.channel) + } + + // This buffer is used to batch writes to STDOUT or STDERR, to help reduce + // screen flickering. + batch_buffer: bytes.Buffer + bytes.buffer_init_allocator(&batch_buffer, 0, BATCH_BUFFER_SIZE) + batch_writer := io.to_writer(bytes.buffer_to_stream(&batch_buffer)) + defer bytes.buffer_destroy(&batch_buffer) + + report: Report = --- + report, alloc_error = make_report(internal_tests) + fmt.assertf(alloc_error == nil, "Error allocating memory for test report: %v", alloc_error) + defer destroy_report(&report) + + when FANCY_OUTPUT { + // We cannot make use of the ANSI save/restore cursor codes, because they + // work by absolute screen coordinates. This will cause unnecessary + // scrollback if we print at the bottom of someone's terminal. + ansi_redraw_string := fmt.aprintf( + // ANSI for "go up N lines then erase the screen from the cursor forward." + ansi.CSI + "%i" + ansi.CPL + ansi.CSI + ansi.ED + + // We'll combine this with the window title format string, since it + // can be printed at the same time. + "%s", + // 1 extra line for the status bar. + 1 + len(report.packages), OSC_WINDOW_TITLE) + assert(len(ansi_redraw_string) > 0, "Error allocating ANSI redraw string.") + defer delete(ansi_redraw_string) + + thread_count_status_string: string = --- + { + PADDING :: PROGRESS_COLUMN_SPACING + PROGRESS_WIDTH + + unpadded := fmt.tprintf("%i thread%s", thread_count, "" if thread_count == 1 else "s") + thread_count_status_string = fmt.aprintf("%- *[1]s", unpadded, report.pkg_column_len + PADDING) + assert(len(thread_count_status_string) > 0, "Error allocating thread count status string.") + } + defer delete(thread_count_status_string) + } + + task_data_slots: []Task_Data = --- + task_data_slots, alloc_error = make([]Task_Data, thread_count) + fmt.assertf(alloc_error == nil, "Error allocating memory for task data slots: %v", alloc_error) + defer delete(task_data_slots) + + safe_heap: mem.Mutex_Allocator + mem.mutex_allocator_init(&safe_heap, context.allocator) + safe_heap_allocator := mem.mutex_allocator(&safe_heap) + + // Tests rotate through these allocators as they finish. + task_allocators: []mem.Rollback_Stack = --- + task_allocators, alloc_error = make([]mem.Rollback_Stack, thread_count) + fmt.assertf(alloc_error == nil, "Error allocating memory for task allocators: %v", alloc_error) + defer delete(task_allocators) + + when TRACKING_MEMORY { + task_memory_trackers: []mem.Tracking_Allocator = --- + task_memory_trackers, alloc_error = make([]mem.Tracking_Allocator, thread_count) + fmt.assertf(alloc_error == nil, "Error allocating memory for memory trackers: %v", alloc_error) + defer delete(task_memory_trackers) + } + + #no_bounds_check for i in 0 ..< thread_count { + alloc_error = mem.rollback_stack_init(&task_allocators[i], PER_THREAD_MEMORY, block_allocator = safe_heap_allocator) + fmt.assertf(alloc_error == nil, "Error allocating memory for task allocator #%i: %v", i, alloc_error) + when TRACKING_MEMORY { + mem.tracking_allocator_init(&task_memory_trackers[i], mem.rollback_stack_allocator(&task_allocators[i])) + } + } + + defer #no_bounds_check for i in 0 ..< thread_count { + when TRACKING_MEMORY { + mem.tracking_allocator_destroy(&task_memory_trackers[i]) + } + mem.rollback_stack_destroy(&task_allocators[i]) + } + + task_timeouts: [dynamic]Task_Timeout = --- + task_timeouts, alloc_error = make([dynamic]Task_Timeout, 0, thread_count) + fmt.assertf(alloc_error == nil, "Error allocating memory for task timeouts: %v", alloc_error) + defer delete(task_timeouts) + + failed_test_reason_map: map[int]string = --- + failed_test_reason_map, alloc_error = make(map[int]string, RESERVED_TEST_FAILURES) + fmt.assertf(alloc_error == nil, "Error allocating memory for failed test reasons: %v", alloc_error) + defer delete(failed_test_reason_map) + + log_messages: [dynamic]Log_Message = --- + log_messages, alloc_error = make([dynamic]Log_Message, 0, RESERVED_LOG_MESSAGES) + fmt.assertf(alloc_error == nil, "Error allocating memory for log message queue: %v", alloc_error) + defer delete(log_messages) + + sorted_failed_test_reasons: [dynamic]int = --- + sorted_failed_test_reasons, alloc_error = make([dynamic]int, 0, RESERVED_TEST_FAILURES) + fmt.assertf(alloc_error == nil, "Error allocating memory for sorted failed test reasons: %v", alloc_error) + defer delete(sorted_failed_test_reasons) + + when USE_CLIPBOARD { + clipboard_buffer: bytes.Buffer + bytes.buffer_init_allocator(&clipboard_buffer, 0, CLIPBOARD_BUFFER_SIZE) + defer bytes.buffer_destroy(&clipboard_buffer) + } + + // -- Setup initial tasks. + + // NOTE(Feoramund): This is the allocator that will be used by threads to + // persist log messages past their lifetimes. It has its own variable name + // in the event it needs to be changed from `safe_heap_allocator` without + // digging through the source to divine everywhere it is used for that. + shared_log_allocator := safe_heap_allocator + + context.allocator = safe_heap_allocator + + context.logger = { + procedure = runner_logger_proc, + data = &log_messages, + lowest_level = .Debug if ODIN_DEBUG else .Info, + options = Default_Test_Logger_Opts - {.Short_File_Path, .Line, .Procedure}, + } + + run_index: int + + setup_tasks: for &data, task_index in task_data_slots { + setup_next_test: for run_index < total_test_count { + #no_bounds_check it := internal_tests[run_index] + defer run_index += 1 + + data.it = it + #no_bounds_check data.t.channel = chan.as_send(task_channels[task_index].channel) + data.t._log_allocator = shared_log_allocator + data.allocator_index = task_index + + #no_bounds_check when TRACKING_MEMORY { + task_allocator := mem.tracking_allocator(&task_memory_trackers[task_index]) + } else { + task_allocator := mem.rollback_stack_allocator(&task_allocators[task_index]) + } + + thread.pool_add_task(&pool, task_allocator, run_test_task, &data, run_index) + + continue setup_tasks + } + } + + // -- Run tests. + + setup_signal_handler() + + fmt.wprint(stdout, ansi.CSI + ansi.DECTCEM_HIDE) + + when FANCY_OUTPUT { + redraw_report(stdout, report) + draw_status_bar(stdout, thread_count_status_string, total_done_count, total_test_count) + } + + when TRACKING_MEMORY { + pkg_log.info("Memory tracking is enabled. Tests will log their memory usage when complete.") + pkg_log.info("< Final Mem/ Total Mem> < Peak Mem> (#Free/Alloc) :: [package.test_name]") + } + + start_time := time.now() + + thread.pool_start(&pool) + main_loop: for !thread.pool_is_empty(&pool) { + + cycle_pool: for task in thread.pool_pop_done(&pool) { + data := cast(^Task_Data)(task.data) + + when TRACKING_MEMORY { + #no_bounds_check tracker := &task_memory_trackers[data.allocator_index] + + write_memory_report(batch_writer, tracker, data.it.pkg, data.it.name) + + pkg_log.info(bytes.buffer_to_string(&batch_buffer)) + bytes.buffer_reset(&batch_buffer) + + mem.tracking_allocator_reset(tracker) + } + + free_all(task.allocator) + + if run_index < total_test_count { + #no_bounds_check it := internal_tests[run_index] + defer run_index += 1 + + data.it = it + data.t.error_count = 0 + + thread.pool_add_task(&pool, task.allocator, run_test_task, data, run_index) + } + } + + handle_events: for &task_channel in task_channels { + for ev in chan.try_recv(task_channel.channel) { + switch event in ev { + case Event_New_Test: + task_channel.test_index = event.test_index + + case Event_State_Change: + #no_bounds_check report.all_test_states[task_channel.test_index] = event.new_state + + #no_bounds_check it := internal_tests[task_channel.test_index] + #no_bounds_check pkg := report.packages_by_name[it.pkg] + + #partial switch event.new_state { + case .Failed: + if task_channel.test_index not_in failed_test_reason_map { + failed_test_reason_map[task_channel.test_index] = ERROR_STRING_UNKNOWN + } + total_failure_count += 1 + total_done_count += 1 + case .Successful: + total_success_count += 1 + total_done_count += 1 + } + + when ODIN_DEBUG { + pkg_log.debugf("Test #%i %s.%s changed state to %v.", task_channel.test_index, it.pkg, it.name, event.new_state) + } + + pkg.last_change_state = event.new_state + pkg.last_change_name = it.name + pkg.frame_ready = false + + case Event_Set_Fail_Timeout: + _, alloc_error = append(&task_timeouts, Task_Timeout { + test_index = task_channel.test_index, + at_time = event.at_time, + location = event.location, + }) + fmt.assertf(alloc_error == nil, "Error appending to task timeouts: %v", alloc_error) + + case Event_Log_Message: + _, alloc_error = append(&log_messages, Log_Message { + level = event.level, + text = event.formatted_text, + time = event.time, + allocator = shared_log_allocator, + }) + fmt.assertf(alloc_error == nil, "Error appending to log messages: %v", alloc_error) + + if event.level >= .Error { + // Save the message for the final summary. + if old_error, ok := failed_test_reason_map[task_channel.test_index]; ok { + safe_delete_string(old_error, shared_log_allocator) + } + failed_test_reason_map[task_channel.test_index] = event.text + } else { + delete(event.text, shared_log_allocator) + } + } + } } - free_all(context.temp_allocator) - reset_t(t) - defer end_t(t) + check_timeouts: for i := len(task_timeouts) - 1; i >= 0; i -= 1 { + #no_bounds_check timeout := &task_timeouts[i] + + if time.since(timeout.at_time) < 0 { + continue check_timeouts + } + + defer unordered_remove(&task_timeouts, i) + + #no_bounds_check if report.all_test_states[timeout.test_index] > .Running { + continue check_timeouts + } + + if !thread.pool_stop_task(&pool, timeout.test_index) { + // The task may have stopped a split second after we started + // checking, but we haven't handled the new state yet. + continue check_timeouts + } + + #no_bounds_check report.all_test_states[timeout.test_index] = .Failed + #no_bounds_check it := internal_tests[timeout.test_index] + #no_bounds_check pkg := report.packages_by_name[it.pkg] + pkg.frame_ready = false + + if old_error, ok := failed_test_reason_map[timeout.test_index]; ok { + safe_delete_string(old_error, shared_log_allocator) + } + failed_test_reason_map[timeout.test_index] = ERROR_STRING_TIMEOUT + total_failure_count += 1 + total_done_count += 1 + + now := time.now() + _, alloc_error = append(&log_messages, Log_Message { + level = .Error, + text = format_log_text(.Error, ERROR_STRING_TIMEOUT, Default_Test_Logger_Opts, timeout.location, now), + time = now, + allocator = context.allocator, + }) + fmt.assertf(alloc_error == nil, "Error appending to log messages: %v", alloc_error) + + find_task_data: for &data in task_data_slots { + if data.it.pkg == it.pkg && data.it.name == it.name { + end_t(&data.t) + break find_task_data + } + } + } - if prev_pkg != it.pkg { - prev_pkg = it.pkg - logf(t, "[Package: %s]", it.pkg) + if should_abort() { + fmt.wprintln(stderr, "\nCaught interrupt signal. Stopping all tests.") + thread.pool_shutdown(&pool) + break main_loop } - logf(t, "[Test: %s]", it.name) + // -- Redraw. - run_internal_test(t, it) + when FANCY_OUTPUT { + if len(log_messages) == 0 && !needs_to_redraw(report) { + continue main_loop + } - if failed(t) { - logf(t, "[%s : FAILURE]", it.name) + fmt.wprintf(stdout, ansi_redraw_string, total_done_count, total_test_count) } else { - logf(t, "[%s : SUCCESS]", it.name) - total_success_count += 1 + if total_done_count != last_done_count { + fmt.wprintf(stdout, OSC_WINDOW_TITLE, total_done_count, total_test_count) + last_done_count = total_done_count + } + + if len(log_messages) == 0 { + continue main_loop + } + } + + // Because each thread has its own messenger channel, log messages + // arrive in chunks that are in-order, but when they're merged with the + // logs from other threads, they become out-of-order. + slice.stable_sort_by(log_messages[:], proc(a, b: Log_Message) -> bool { + return time.diff(a.time, b.time) > 0 + }) + + for message in log_messages { + fmt.wprintln(batch_writer, message.text) + delete(message.text, message.allocator) + } + + fmt.wprint(stderr, bytes.buffer_to_string(&batch_buffer)) + clear(&log_messages) + bytes.buffer_reset(&batch_buffer) + + when FANCY_OUTPUT { + redraw_report(batch_writer, report) + draw_status_bar(batch_writer, thread_count_status_string, total_done_count, total_test_count) + fmt.wprint(stdout, bytes.buffer_to_string(&batch_buffer)) + bytes.buffer_reset(&batch_buffer) } } - logf(t, "----------------------------------------") - if total_test_count == 0 { - log(t, "NO TESTS RAN") - } else { - logf(t, "%d/%d SUCCESSFUL", total_success_count, total_test_count) + + // -- All tests are complete, or the runner has been interrupted. + + thread.pool_join(&pool) + + finished_in := time.since(start_time) + + fmt.wprintf(batch_writer, + "\nFinished %i test%s in %v.", + total_done_count, + "" if total_done_count == 1 else "s", + finished_in) + + if total_done_count != total_test_count { + not_run_count := total_test_count - total_done_count + fmt.wprintf(batch_writer, + " " + SGR_READY + "%i" + SGR_RESET + " %s left undone.", + not_run_count, + "test was" if not_run_count == 1 else "tests were") + } + + if total_success_count == total_test_count { + fmt.wprintfln(batch_writer, + " %s " + SGR_SUCCESS + "successful." + SGR_RESET, + "The test was" if total_test_count == 1 else "All tests were") + } else if total_failure_count > 0 { + if total_failure_count == total_test_count { + fmt.wprintfln(batch_writer, + " %s " + SGR_FAILED + "failed." + SGR_RESET, + "The test" if total_test_count == 1 else "All tests") + } else { + fmt.wprintfln(batch_writer, + " " + SGR_FAILED + "%i" + SGR_RESET + " test%s failed.", + total_failure_count, + "" if total_failure_count == 1 else "s") + } + + for test_index in failed_test_reason_map { + _, alloc_error = append(&sorted_failed_test_reasons, test_index) + fmt.assertf(alloc_error == nil, "Error appending to sorted failed test reasons: %v", alloc_error) + } + + slice.sort(sorted_failed_test_reasons[:]) + + for test_index in sorted_failed_test_reasons { + #no_bounds_check last_error := failed_test_reason_map[test_index] + #no_bounds_check it := internal_tests[test_index] + pkg_and_name := fmt.tprintf("%s.%s", it.pkg, it.name) + fmt.wprintfln(batch_writer, " - %- *[1]s\t%s", + pkg_and_name, + report.pkg_column_len + report.test_column_len, + last_error) + safe_delete_string(last_error, shared_log_allocator) + } + + if total_success_count > 0 { + when USE_CLIPBOARD { + clipboard_writer := io.to_writer(bytes.buffer_to_stream(&clipboard_buffer)) + fmt.wprint(clipboard_writer, "-define:test_select=") + for test_index in sorted_failed_test_reasons { + #no_bounds_check it := internal_tests[test_index] + fmt.wprintf(clipboard_writer, "%s.%s,", it.pkg, it.name) + } + + encoded_names := base64_encode(bytes.buffer_to_bytes(&clipboard_buffer), allocator = context.temp_allocator) + + fmt.wprintf(batch_writer, + ansi.OSC + ansi.CLIPBOARD + ";c;%s" + ansi.ST + + "\nThe name%s of the failed test%s been copied to your clipboard.", + encoded_names, + "" if total_failure_count == 1 else "s", + " has" if total_failure_count == 1 else "s have") + } else { + fmt.wprintf(batch_writer, "\nTo run only the failed test%s, use:\n\t-define:test_select=", + "" if total_failure_count == 1 else "s") + for test_index in sorted_failed_test_reasons { + #no_bounds_check it := internal_tests[test_index] + fmt.wprintf(batch_writer, "%s.%s,", it.pkg, it.name) + } + } + + fmt.wprintln(batch_writer) + } } + + fmt.wprint(batch_writer, ansi.CSI + ansi.DECTCEM_SHOW) + + fmt.wprintln(stderr, bytes.buffer_to_string(&batch_buffer)) + return total_success_count == total_test_count } |