diff --git a/lib/compiler/aro/aro/Diagnostics.zig b/lib/compiler/aro/aro/Diagnostics.zig index f894403648b4..8f80e4393dc8 100644 --- a/lib/compiler/aro/aro/Diagnostics.zig +++ b/lib/compiler/aro/aro/Diagnostics.zig @@ -528,7 +528,7 @@ const MsgWriter = struct { config: std.io.tty.Config, fn init(config: std.io.tty.Config) MsgWriter { - std.debug.getStderrMutex().lock(); + std.debug.lockStdErr(); return .{ .w = std.io.bufferedWriter(std.io.getStdErr().writer()), .config = config, @@ -537,7 +537,7 @@ const MsgWriter = struct { pub fn deinit(m: *MsgWriter) void { m.w.flush() catch {}; - std.debug.getStderrMutex().unlock(); + std.debug.unlockStdErr(); } pub fn print(m: *MsgWriter, comptime fmt: []const u8, args: anytype) void { diff --git a/lib/compiler/build_runner.zig b/lib/compiler/build_runner.zig index 35dcbb882bda..86ad68133ac3 100644 --- a/lib/compiler/build_runner.zig +++ b/lib/compiler/build_runner.zig @@ -289,13 +289,14 @@ pub fn main() !void { .windows_api => {}, } - var progress: std.Progress = .{ .dont_print_on_dumb = true }; - const main_progress_node = progress.start("", 0); + const main_progress_node = std.Progress.start(.{ + .disable_printing = (color == .off), + }); builder.debug_log_scopes = debug_log_scopes.items; builder.resolveInstallPrefix(install_prefix, dir_list); { - var prog_node = main_progress_node.start("user build.zig logic", 0); + var prog_node = main_progress_node.start("Configure", 0); defer prog_node.end(); try builder.runBuild(root); } @@ -385,7 +386,7 @@ fn runStepNames( arena: std.mem.Allocator, b: *std.Build, step_names: []const []const u8, - parent_prog_node: *std.Progress.Node, + parent_prog_node: std.Progress.Node, thread_pool_options: std.Thread.Pool.Options, run: *Run, seed: u32, @@ -452,7 +453,7 @@ fn runStepNames( { defer parent_prog_node.end(); - var step_prog = parent_prog_node.start("steps", step_stack.count()); + const step_prog = parent_prog_node.start("steps", step_stack.count()); defer step_prog.end(); var wait_group: std.Thread.WaitGroup = .{}; @@ -467,7 +468,7 @@ fn runStepNames( if (step.state == .skipped_oom) continue; thread_pool.spawnWg(&wait_group, workerMakeOneStep, .{ - &wait_group, &thread_pool, b, step, &step_prog, run, + &wait_group, &thread_pool, b, step, step_prog, run, }); } } @@ -891,7 +892,7 @@ fn workerMakeOneStep( thread_pool: *std.Thread.Pool, b: *std.Build, s: *Step, - prog_node: *std.Progress.Node, + prog_node: std.Progress.Node, run: *Run, ) void { // First, check the conditions for running this step. If they are not met, @@ -941,11 +942,10 @@ fn workerMakeOneStep( } } - var sub_prog_node = prog_node.start(s.name, 0); - sub_prog_node.activate(); + const sub_prog_node = prog_node.start(s.name, 0); defer sub_prog_node.end(); - const make_result = s.make(&sub_prog_node); + const make_result = s.make(sub_prog_node); // No matter the result, we want to display error/warning messages. const show_compile_errors = !run.prominent_compile_errors and @@ -954,8 +954,8 @@ fn workerMakeOneStep( const show_stderr = s.result_stderr.len > 0; if (show_error_msgs or show_compile_errors or show_stderr) { - sub_prog_node.context.lock_stderr(); - defer sub_prog_node.context.unlock_stderr(); + std.debug.lockStdErr(); + defer std.debug.unlockStdErr(); printErrorMessages(b, s, run) catch {}; } @@ -1225,7 +1225,7 @@ fn cleanExit() void { process.exit(0); } -const Color = enum { auto, off, on }; +const Color = std.zig.Color; const Summary = enum { all, new, failures, none }; fn get_tty_conf(color: Color, stderr: File) std.io.tty.Config { diff --git a/lib/compiler/resinator/cli.zig b/lib/compiler/resinator/cli.zig index deee1ed54a3b..32b4389997de 100644 --- a/lib/compiler/resinator/cli.zig +++ b/lib/compiler/resinator/cli.zig @@ -108,8 +108,8 @@ pub const Diagnostics = struct { } pub fn renderToStdErr(self: *Diagnostics, args: []const []const u8, config: std.io.tty.Config) void { - std.debug.getStderrMutex().lock(); - defer std.debug.getStderrMutex().unlock(); + std.debug.lockStdErr(); + defer std.debug.unlockStdErr(); const stderr = std.io.getStdErr().writer(); self.renderToWriter(args, stderr, config) catch return; } diff --git a/lib/compiler/resinator/errors.zig b/lib/compiler/resinator/errors.zig index 909824c594ce..5cfa766ac124 100644 --- a/lib/compiler/resinator/errors.zig +++ b/lib/compiler/resinator/errors.zig @@ -60,8 +60,8 @@ pub const Diagnostics = struct { } pub fn renderToStdErr(self: *Diagnostics, cwd: std.fs.Dir, source: []const u8, tty_config: std.io.tty.Config, source_mappings: ?SourceMappings) void { - std.debug.getStderrMutex().lock(); - defer std.debug.getStderrMutex().unlock(); + std.debug.lockStdErr(); + defer std.debug.unlockStdErr(); const stderr = std.io.getStdErr().writer(); for (self.errors.items) |err_details| { renderErrorMessage(self.allocator, stderr, tty_config, cwd, err_details, source, self.strings.items, source_mappings) catch return; diff --git a/lib/compiler/resinator/main.zig b/lib/compiler/resinator/main.zig index 3dd59857588e..e056e80252e1 100644 --- a/lib/compiler/resinator/main.zig +++ b/lib/compiler/resinator/main.zig @@ -50,12 +50,6 @@ pub fn main() !void { }, }; - if (zig_integration) { - // Send progress with a special string to indicate that the building of the - // resinator binary is finished and we've moved on to actually compiling the .rc file - try error_handler.server.serveStringMessage(.progress, ""); - } - var options = options: { var cli_diagnostics = cli.Diagnostics.init(allocator); defer cli_diagnostics.deinit(); diff --git a/lib/compiler/test_runner.zig b/lib/compiler/test_runner.zig index 194e84b8eab0..dc82545e5497 100644 --- a/lib/compiler/test_runner.zig +++ b/lib/compiler/test_runner.zig @@ -129,12 +129,11 @@ fn mainTerminal() void { var ok_count: usize = 0; var skip_count: usize = 0; var fail_count: usize = 0; - var progress = std.Progress{ - .dont_print_on_dumb = true, - }; - const root_node = progress.start("Test", test_fn_list.len); - const have_tty = progress.terminal != null and - (progress.supports_ansi_escape_codes or progress.is_windows_terminal); + const root_node = std.Progress.start(.{ + .root_name = "Test", + .estimated_total_items = test_fn_list.len, + }); + const have_tty = std.io.getStdErr().isTty(); var async_frame_buffer: []align(builtin.target.stackAlignment()) u8 = undefined; // TODO this is on the next line (using `undefined` above) because otherwise zig incorrectly @@ -151,11 +150,9 @@ fn mainTerminal() void { } std.testing.log_level = .warn; - var test_node = root_node.start(test_fn.name, 0); - test_node.activate(); - progress.refresh(); + const test_node = root_node.start(test_fn.name, 0); if (!have_tty) { - std.debug.print("{d}/{d} {s}... ", .{ i + 1, test_fn_list.len, test_fn.name }); + std.debug.print("{d}/{d} {s}...", .{ i + 1, test_fn_list.len, test_fn.name }); } if (test_fn.func()) |_| { ok_count += 1; @@ -164,12 +161,22 @@ fn mainTerminal() void { } else |err| switch (err) { error.SkipZigTest => { skip_count += 1; - progress.log("SKIP\n", .{}); + if (have_tty) { + std.debug.print("{d}/{d} {s}...SKIP\n", .{ i + 1, test_fn_list.len, test_fn.name }); + } else { + std.debug.print("SKIP\n", .{}); + } test_node.end(); }, else => { fail_count += 1; - progress.log("FAIL ({s})\n", .{@errorName(err)}); + if (have_tty) { + std.debug.print("{d}/{d} {s}...FAIL ({s})\n", .{ + i + 1, test_fn_list.len, test_fn.name, @errorName(err), + }); + } else { + std.debug.print("FAIL ({s})\n", .{@errorName(err)}); + } if (@errorReturnTrace()) |trace| { std.debug.dumpStackTrace(trace.*); } diff --git a/lib/std/Build.zig b/lib/std/Build.zig index a2e8a7c564e1..4443fa404c74 100644 --- a/lib/std/Build.zig +++ b/lib/std/Build.zig @@ -1059,7 +1059,7 @@ pub fn getUninstallStep(b: *Build) *Step { return &b.uninstall_tls.step; } -fn makeUninstall(uninstall_step: *Step, prog_node: *std.Progress.Node) anyerror!void { +fn makeUninstall(uninstall_step: *Step, prog_node: std.Progress.Node) anyerror!void { _ = prog_node; const uninstall_tls: *TopLevelStep = @fieldParentPtr("step", uninstall_step); const b: *Build = @fieldParentPtr("uninstall_tls", uninstall_tls); @@ -2281,10 +2281,10 @@ pub const LazyPath = union(enum) { .cwd_relative => |p| return src_builder.pathFromCwd(p), .generated => |gen| { var file_path: []const u8 = gen.file.step.owner.pathFromRoot(gen.file.path orelse { - std.debug.getStderrMutex().lock(); + std.debug.lockStdErr(); const stderr = std.io.getStdErr(); dumpBadGetPathHelp(gen.file.step, stderr, src_builder, asking_step) catch {}; - std.debug.getStderrMutex().unlock(); + std.debug.unlockStdErr(); @panic("misconfigured build script"); }); @@ -2351,8 +2351,8 @@ fn dumpBadDirnameHelp( comptime msg: []const u8, args: anytype, ) anyerror!void { - debug.getStderrMutex().lock(); - defer debug.getStderrMutex().unlock(); + debug.lockStdErr(); + defer debug.unlockStdErr(); const stderr = io.getStdErr(); const w = stderr.writer(); diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig index 01bea6c0cef5..b6aed17076ce 100644 --- a/lib/std/Build/Step.zig +++ b/lib/std/Build/Step.zig @@ -58,7 +58,7 @@ pub const TestResults = struct { } }; -pub const MakeFn = *const fn (step: *Step, prog_node: *std.Progress.Node) anyerror!void; +pub const MakeFn = *const fn (step: *Step, prog_node: std.Progress.Node) anyerror!void; pub const State = enum { precheck_unstarted, @@ -176,7 +176,7 @@ pub fn init(options: StepOptions) Step { /// If the Step's `make` function reports `error.MakeFailed`, it indicates they /// have already reported the error. Otherwise, we add a simple error report /// here. -pub fn make(s: *Step, prog_node: *std.Progress.Node) error{ MakeFailed, MakeSkipped }!void { +pub fn make(s: *Step, prog_node: std.Progress.Node) error{ MakeFailed, MakeSkipped }!void { const arena = s.owner.allocator; s.makeFn(s, prog_node) catch |err| switch (err) { @@ -217,7 +217,7 @@ pub fn getStackTrace(s: *Step) ?std.builtin.StackTrace { }; } -fn makeNoOp(step: *Step, prog_node: *std.Progress.Node) anyerror!void { +fn makeNoOp(step: *Step, prog_node: std.Progress.Node) anyerror!void { _ = prog_node; var all_cached = true; @@ -303,7 +303,7 @@ pub fn addError(step: *Step, comptime fmt: []const u8, args: anytype) error{OutO pub fn evalZigProcess( s: *Step, argv: []const []const u8, - prog_node: *std.Progress.Node, + prog_node: std.Progress.Node, ) !?[]const u8 { assert(argv.len != 0); const b = s.owner; @@ -319,6 +319,7 @@ pub fn evalZigProcess( child.stdout_behavior = .Pipe; child.stderr_behavior = .Pipe; child.request_resource_usage_statistics = true; + child.progress_node = prog_node; child.spawn() catch |err| return s.fail("unable to spawn {s}: {s}", .{ argv[0], @errorName(err), @@ -337,11 +338,6 @@ pub fn evalZigProcess( const Header = std.zig.Server.Message.Header; var result: ?[]const u8 = null; - var node_name: std.ArrayListUnmanaged(u8) = .{}; - defer node_name.deinit(gpa); - var sub_prog_node = prog_node.start("", 0); - defer sub_prog_node.end(); - const stdout = poller.fifo(.stdout); poll: while (true) { @@ -379,11 +375,6 @@ pub fn evalZigProcess( .extra = extra_array, }; }, - .progress => { - node_name.clearRetainingCapacity(); - try node_name.appendSlice(gpa, body); - sub_prog_node.setName(node_name.items); - }, .emit_bin_path => { const EbpHdr = std.zig.Server.Message.EmitBinPath; const ebp_hdr = @as(*align(1) const EbpHdr, @ptrCast(body)); diff --git a/lib/std/Build/Step/CheckFile.zig b/lib/std/Build/Step/CheckFile.zig index b3323f9e98b9..b7ce2ded6136 100644 --- a/lib/std/Build/Step/CheckFile.zig +++ b/lib/std/Build/Step/CheckFile.zig @@ -46,7 +46,7 @@ pub fn setName(check_file: *CheckFile, name: []const u8) void { check_file.step.name = name; } -fn make(step: *Step, prog_node: *std.Progress.Node) !void { +fn make(step: *Step, prog_node: std.Progress.Node) !void { _ = prog_node; const b = step.owner; const check_file: *CheckFile = @fieldParentPtr("step", step); diff --git a/lib/std/Build/Step/CheckObject.zig b/lib/std/Build/Step/CheckObject.zig index fa0ccc339d3d..84c9c62abb40 100644 --- a/lib/std/Build/Step/CheckObject.zig +++ b/lib/std/Build/Step/CheckObject.zig @@ -550,7 +550,7 @@ pub fn checkComputeCompare( check_object.checks.append(check) catch @panic("OOM"); } -fn make(step: *Step, prog_node: *std.Progress.Node) !void { +fn make(step: *Step, prog_node: std.Progress.Node) !void { _ = prog_node; const b = step.owner; const gpa = b.allocator; diff --git a/lib/std/Build/Step/Compile.zig b/lib/std/Build/Step/Compile.zig index f660ef64a6b8..e27dd656198a 100644 --- a/lib/std/Build/Step/Compile.zig +++ b/lib/std/Build/Step/Compile.zig @@ -967,7 +967,7 @@ fn getGeneratedFilePath(compile: *Compile, comptime tag_name: []const u8, asking const maybe_path: ?*GeneratedFile = @field(compile, tag_name); const generated_file = maybe_path orelse { - std.debug.getStderrMutex().lock(); + std.debug.lockStdErr(); const stderr = std.io.getStdErr(); std.Build.dumpBadGetPathHelp(&compile.step, stderr, compile.step.owner, asking_step) catch {}; @@ -976,7 +976,7 @@ fn getGeneratedFilePath(compile: *Compile, comptime tag_name: []const u8, asking }; const path = generated_file.path orelse { - std.debug.getStderrMutex().lock(); + std.debug.lockStdErr(); const stderr = std.io.getStdErr(); std.Build.dumpBadGetPathHelp(&compile.step, stderr, compile.step.owner, asking_step) catch {}; @@ -987,7 +987,7 @@ fn getGeneratedFilePath(compile: *Compile, comptime tag_name: []const u8, asking return path; } -fn make(step: *Step, prog_node: *std.Progress.Node) !void { +fn make(step: *Step, prog_node: std.Progress.Node) !void { const b = step.owner; const arena = b.allocator; const compile: *Compile = @fieldParentPtr("step", step); diff --git a/lib/std/Build/Step/ConfigHeader.zig b/lib/std/Build/Step/ConfigHeader.zig index 4a0e64e8d054..212ea605ed19 100644 --- a/lib/std/Build/Step/ConfigHeader.zig +++ b/lib/std/Build/Step/ConfigHeader.zig @@ -164,7 +164,7 @@ fn putValue(config_header: *ConfigHeader, field_name: []const u8, comptime T: ty } } -fn make(step: *Step, prog_node: *std.Progress.Node) !void { +fn make(step: *Step, prog_node: std.Progress.Node) !void { _ = prog_node; const b = step.owner; const config_header: *ConfigHeader = @fieldParentPtr("step", step); diff --git a/lib/std/Build/Step/Fmt.zig b/lib/std/Build/Step/Fmt.zig index 3010d701b1ac..f346c6cc3949 100644 --- a/lib/std/Build/Step/Fmt.zig +++ b/lib/std/Build/Step/Fmt.zig @@ -36,7 +36,7 @@ pub fn create(owner: *std.Build, options: Options) *Fmt { return fmt; } -fn make(step: *Step, prog_node: *std.Progress.Node) !void { +fn make(step: *Step, prog_node: std.Progress.Node) !void { // zig fmt is fast enough that no progress is needed. _ = prog_node; diff --git a/lib/std/Build/Step/InstallArtifact.zig b/lib/std/Build/Step/InstallArtifact.zig index c56bafcfb53d..bd1d5db4a977 100644 --- a/lib/std/Build/Step/InstallArtifact.zig +++ b/lib/std/Build/Step/InstallArtifact.zig @@ -115,7 +115,7 @@ pub fn create(owner: *std.Build, artifact: *Step.Compile, options: Options) *Ins return install_artifact; } -fn make(step: *Step, prog_node: *std.Progress.Node) !void { +fn make(step: *Step, prog_node: std.Progress.Node) !void { _ = prog_node; const install_artifact: *InstallArtifact = @fieldParentPtr("step", step); const b = step.owner; diff --git a/lib/std/Build/Step/InstallDir.zig b/lib/std/Build/Step/InstallDir.zig index 1722b975f70c..0a6edafb338e 100644 --- a/lib/std/Build/Step/InstallDir.zig +++ b/lib/std/Build/Step/InstallDir.zig @@ -56,7 +56,7 @@ pub fn create(owner: *std.Build, options: Options) *InstallDir { return install_dir; } -fn make(step: *Step, prog_node: *std.Progress.Node) !void { +fn make(step: *Step, prog_node: std.Progress.Node) !void { _ = prog_node; const b = step.owner; const install_dir: *InstallDir = @fieldParentPtr("step", step); diff --git a/lib/std/Build/Step/InstallFile.zig b/lib/std/Build/Step/InstallFile.zig index 6fa6d6bc9994..8202a9d79648 100644 --- a/lib/std/Build/Step/InstallFile.zig +++ b/lib/std/Build/Step/InstallFile.zig @@ -36,7 +36,7 @@ pub fn create( return install_file; } -fn make(step: *Step, prog_node: *std.Progress.Node) !void { +fn make(step: *Step, prog_node: std.Progress.Node) !void { _ = prog_node; const b = step.owner; const install_file: *InstallFile = @fieldParentPtr("step", step); diff --git a/lib/std/Build/Step/ObjCopy.zig b/lib/std/Build/Step/ObjCopy.zig index 515736dbc117..966764adcc1e 100644 --- a/lib/std/Build/Step/ObjCopy.zig +++ b/lib/std/Build/Step/ObjCopy.zig @@ -90,7 +90,7 @@ pub fn getOutputSeparatedDebug(objcopy: *const ObjCopy) ?std.Build.LazyPath { return if (objcopy.output_file_debug) |*file| .{ .generated = .{ .file = file } } else null; } -fn make(step: *Step, prog_node: *std.Progress.Node) !void { +fn make(step: *Step, prog_node: std.Progress.Node) !void { const b = step.owner; const objcopy: *ObjCopy = @fieldParentPtr("step", step); diff --git a/lib/std/Build/Step/Options.zig b/lib/std/Build/Step/Options.zig index c4daed73fff6..2937cf70e1ee 100644 --- a/lib/std/Build/Step/Options.zig +++ b/lib/std/Build/Step/Options.zig @@ -410,7 +410,7 @@ pub fn getOutput(options: *Options) LazyPath { return .{ .generated = .{ .file = &options.generated_file } }; } -fn make(step: *Step, prog_node: *std.Progress.Node) !void { +fn make(step: *Step, prog_node: std.Progress.Node) !void { // This step completes so quickly that no progress is necessary. _ = prog_node; diff --git a/lib/std/Build/Step/RemoveDir.zig b/lib/std/Build/Step/RemoveDir.zig index 64a3c72668ee..6483a684aaea 100644 --- a/lib/std/Build/Step/RemoveDir.zig +++ b/lib/std/Build/Step/RemoveDir.zig @@ -22,7 +22,7 @@ pub fn create(owner: *std.Build, dir_path: []const u8) *RemoveDir { return remove_dir; } -fn make(step: *Step, prog_node: *std.Progress.Node) !void { +fn make(step: *Step, prog_node: std.Progress.Node) !void { // TODO update progress node while walking file system. // Should the standard library support this use case?? _ = prog_node; diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig index fec5b5ab679c..1ecc3334c480 100644 --- a/lib/std/Build/Step/Run.zig +++ b/lib/std/Build/Step/Run.zig @@ -23,6 +23,11 @@ cwd: ?Build.LazyPath, /// Override this field to modify the environment, or use setEnvironmentVariable env_map: ?*EnvMap, +/// When `true` prevents `ZIG_PROGRESS` environment variable from being passed +/// to the child process, which otherwise would be used for the child to send +/// progress updates to the parent. +disable_zig_progress: bool, + /// Configures whether the Run step is considered to have side-effects, and also /// whether the Run step will inherit stdio streams, forwarding them to the /// parent process, in which case will require a global lock to prevent other @@ -152,6 +157,7 @@ pub fn create(owner: *std.Build, name: []const u8) *Run { .argv = .{}, .cwd = null, .env_map = null, + .disable_zig_progress = false, .stdio = .infer_from_args, .stdin = .none, .extra_file_dependencies = &.{}, @@ -574,7 +580,7 @@ const IndexedOutput = struct { tag: @typeInfo(Arg).Union.tag_type.?, output: *Output, }; -fn make(step: *Step, prog_node: *std.Progress.Node) !void { +fn make(step: *Step, prog_node: std.Progress.Node) !void { const b = step.owner; const arena = b.allocator; const run: *Run = @fieldParentPtr("step", step); @@ -878,7 +884,7 @@ fn runCommand( argv: []const []const u8, has_side_effects: bool, output_dir_path: []const u8, - prog_node: *std.Progress.Node, + prog_node: std.Progress.Node, ) !void { const step = &run.step; const b = step.owner; @@ -1195,7 +1201,7 @@ fn spawnChildAndCollect( run: *Run, argv: []const []const u8, has_side_effects: bool, - prog_node: *std.Progress.Node, + prog_node: std.Progress.Node, ) !ChildProcResult { const b = run.step.owner; const arena = b.allocator; @@ -1235,6 +1241,10 @@ fn spawnChildAndCollect( child.stdin_behavior = .Pipe; } + if (run.stdio != .zig_test and !run.disable_zig_progress) { + child.progress_node = prog_node; + } + try child.spawn(); var timer = try std.time.Timer.start(); @@ -1264,7 +1274,7 @@ const StdIoResult = struct { fn evalZigTest( run: *Run, child: *std.process.Child, - prog_node: *std.Progress.Node, + prog_node: std.Progress.Node, ) !StdIoResult { const gpa = run.step.owner.allocator; const arena = run.step.owner.allocator; @@ -1291,7 +1301,7 @@ fn evalZigTest( var metadata: ?TestMetadata = null; var sub_prog_node: ?std.Progress.Node = null; - defer if (sub_prog_node) |*n| n.end(); + defer if (sub_prog_node) |n| n.end(); poll: while (true) { while (stdout.readableLength() < @sizeOf(Header)) { @@ -1406,7 +1416,7 @@ const TestMetadata = struct { expected_panic_msgs: []const u32, string_bytes: []const u8, next_index: u32, - prog_node: *std.Progress.Node, + prog_node: std.Progress.Node, fn testName(tm: TestMetadata, index: u32) []const u8 { return std.mem.sliceTo(tm.string_bytes[tm.names[index]..], 0); @@ -1421,7 +1431,7 @@ fn requestNextTest(in: fs.File, metadata: *TestMetadata, sub_prog_node: *?std.Pr if (metadata.expected_panic_msgs[i] != 0) continue; const name = metadata.testName(i); - if (sub_prog_node.*) |*n| n.end(); + if (sub_prog_node.*) |n| n.end(); sub_prog_node.* = metadata.prog_node.start(name, 0); try sendRunTestMessage(in, i); diff --git a/lib/std/Build/Step/TranslateC.zig b/lib/std/Build/Step/TranslateC.zig index cb1b48e3c061..e07744c2da15 100644 --- a/lib/std/Build/Step/TranslateC.zig +++ b/lib/std/Build/Step/TranslateC.zig @@ -116,7 +116,7 @@ pub fn defineCMacroRaw(translate_c: *TranslateC, name_and_value: []const u8) voi translate_c.c_macros.append(translate_c.step.owner.dupe(name_and_value)) catch @panic("OOM"); } -fn make(step: *Step, prog_node: *std.Progress.Node) !void { +fn make(step: *Step, prog_node: std.Progress.Node) !void { const b = step.owner; const translate_c: *TranslateC = @fieldParentPtr("step", step); diff --git a/lib/std/Build/Step/WriteFile.zig b/lib/std/Build/Step/WriteFile.zig index 401c5b78ece2..0639573b8fea 100644 --- a/lib/std/Build/Step/WriteFile.zig +++ b/lib/std/Build/Step/WriteFile.zig @@ -198,7 +198,7 @@ fn maybeUpdateName(write_file: *WriteFile) void { } } -fn make(step: *Step, prog_node: *std.Progress.Node) !void { +fn make(step: *Step, prog_node: std.Progress.Node) !void { _ = prog_node; const b = step.owner; const write_file: *WriteFile = @fieldParentPtr("step", step); diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index 19f90e86a91c..59cc559d5d98 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -1,10 +1,4 @@ -//! This API is non-allocating, non-fallible, and thread-safe. -//! The tradeoff is that users of this API must provide the storage -//! for each `Progress.Node`. -//! -//! Initialize the struct directly, overriding these fields as desired: -//! * `refresh_rate_ms` -//! * `initial_delay_ms` +//! This API is non-allocating, non-fallible, thread-safe, and lock-free. const std = @import("std"); const builtin = @import("builtin"); @@ -12,436 +6,1293 @@ const windows = std.os.windows; const testing = std.testing; const assert = std.debug.assert; const Progress = @This(); +const posix = std.posix; +const is_big_endian = builtin.cpu.arch.endian() == .big; +const is_windows = builtin.os.tag == .windows; /// `null` if the current node (and its children) should /// not print on update() -terminal: ?std.fs.File = undefined, +terminal: std.fs.File, -/// Is this a windows API terminal (note: this is not the same as being run on windows -/// because other terminals exist like MSYS/git-bash) -is_windows_terminal: bool = false, +terminal_mode: TerminalMode, -/// Whether the terminal supports ANSI escape codes. -supports_ansi_escape_codes: bool = false, +update_thread: ?std.Thread, -/// If the terminal is "dumb", don't print output. -/// This can be useful if you don't want to print all -/// the stages of code generation if there are a lot. -/// You should not use it if the user should see output -/// for example showing the user what tests run. -dont_print_on_dumb: bool = false, +/// Atomically set by SIGWINCH as well as the root done() function. +redraw_event: std.Thread.ResetEvent, +/// Indicates a request to shut down and reset global state. +/// Accessed atomically. +done: bool, -root: Node = undefined, +refresh_rate_ns: u64, +initial_delay_ns: u64, -/// Keeps track of how much time has passed since the beginning. -/// Used to compare with `initial_delay_ms` and `refresh_rate_ms`. -timer: ?std.time.Timer = null, +rows: u16, +cols: u16, +/// Tracks the number of newlines that have been actually written to the terminal. +written_newline_count: u16, +/// Tracks the number of newlines that will be written to the terminal if the +/// draw buffer is sent. +accumulated_newline_count: u16, -/// When the previous refresh was written to the terminal. -/// Used to compare with `refresh_rate_ms`. -prev_refresh_timestamp: u64 = undefined, +/// Accessed only by the update thread. +draw_buffer: []u8, -/// This buffer represents the maximum number of bytes written to the terminal -/// with each refresh. -output_buffer: [100]u8 = undefined, +/// This is in a separate array from `node_storage` but with the same length so +/// that it can be iterated over efficiently without trashing too much of the +/// CPU cache. +node_parents: []Node.Parent, +node_storage: []Node.Storage, +node_freelist: []Node.OptionalIndex, +node_freelist_first: Node.OptionalIndex, +node_end_index: u32, -/// How many nanoseconds between writing updates to the terminal. -refresh_rate_ns: u64 = 50 * std.time.ns_per_ms, +pub const TerminalMode = union(enum) { + off, + ansi_escape_codes, + /// This is not the same as being run on windows because other terminals + /// exist like MSYS/git-bash. + windows_api: if (is_windows) WindowsApi else void, -/// How many nanoseconds to keep the output hidden -initial_delay_ns: u64 = 500 * std.time.ns_per_ms, - -done: bool = true, - -/// Protects the `refresh` function, as well as `node.recently_updated_child`. -/// Without this, callsites would call `Node.end` and then free `Node` memory -/// while it was still being accessed by the `refresh` function. -update_mutex: std.Thread.Mutex = .{}, + pub const WindowsApi = struct { + /// The output code page of the console. + code_page: windows.UINT, + }; +}; -/// Keeps track of how many columns in the terminal have been output, so that -/// we can move the cursor back later. -columns_written: usize = undefined, +pub const Options = struct { + /// User-provided buffer with static lifetime. + /// + /// Used to store the entire write buffer sent to the terminal. Progress output will be truncated if it + /// cannot fit into this buffer which will look bad but not cause any malfunctions. + /// + /// Must be at least 200 bytes. + draw_buffer: []u8 = &default_draw_buffer, + /// How many nanoseconds between writing updates to the terminal. + refresh_rate_ns: u64 = 80 * std.time.ns_per_ms, + /// How many nanoseconds to keep the output hidden + initial_delay_ns: u64 = 200 * std.time.ns_per_ms, + /// If provided, causes the progress item to have a denominator. + /// 0 means unknown. + estimated_total_items: usize = 0, + root_name: []const u8 = "", + disable_printing: bool = false, +}; /// Represents one unit of progress. Each node can have children nodes, or /// one can use integers with `update`. pub const Node = struct { - context: *Progress, - parent: ?*Node, - name: []const u8, - unit: []const u8 = "", - /// Must be handled atomically to be thread-safe. - recently_updated_child: ?*Node = null, - /// Must be handled atomically to be thread-safe. 0 means null. - unprotected_estimated_total_items: usize, - /// Must be handled atomically to be thread-safe. - unprotected_completed_items: usize, + index: OptionalIndex, + + pub const max_name_len = 40; + + const Storage = extern struct { + /// Little endian. + completed_count: u32, + /// 0 means unknown. + /// Little endian. + estimated_total_count: u32, + name: [max_name_len]u8, + + /// Not thread-safe. + fn getIpcFd(s: Storage) ?posix.fd_t { + return if (s.estimated_total_count == std.math.maxInt(u32)) switch (@typeInfo(posix.fd_t)) { + .Int => @bitCast(s.completed_count), + .Pointer => @ptrFromInt(s.completed_count), + else => @compileError("unsupported fd_t of " ++ @typeName(posix.fd_t)), + } else null; + } + + /// Thread-safe. + fn setIpcFd(s: *Storage, fd: posix.fd_t) void { + const integer: u32 = switch (@typeInfo(posix.fd_t)) { + .Int => @bitCast(fd), + .Pointer => @intFromPtr(fd), + else => @compileError("unsupported fd_t of " ++ @typeName(posix.fd_t)), + }; + // `estimated_total_count` max int indicates the special state that + // causes `completed_count` to be treated as a file descriptor, so + // the order here matters. + @atomicStore(u32, &s.completed_count, integer, .monotonic); + @atomicStore(u32, &s.estimated_total_count, std.math.maxInt(u32), .release); + } + + /// Not thread-safe. + fn byteSwap(s: *Storage) void { + s.completed_count = @byteSwap(s.completed_count); + s.estimated_total_count = @byteSwap(s.estimated_total_count); + } + + comptime { + assert((@sizeOf(Storage) % 4) == 0); + } + }; + + const Parent = enum(u8) { + /// Unallocated storage. + unused = std.math.maxInt(u8) - 1, + /// Indicates root node. + none = std.math.maxInt(u8), + /// Index into `node_storage`. + _, + + fn unwrap(i: @This()) ?Index { + return switch (i) { + .unused, .none => return null, + else => @enumFromInt(@intFromEnum(i)), + }; + } + }; + + pub const OptionalIndex = enum(u8) { + none = std.math.maxInt(u8), + /// Index into `node_storage`. + _, + + pub fn unwrap(i: @This()) ?Index { + if (i == .none) return null; + return @enumFromInt(@intFromEnum(i)); + } + + fn toParent(i: @This()) Parent { + assert(@intFromEnum(i) != @intFromEnum(Parent.unused)); + return @enumFromInt(@intFromEnum(i)); + } + }; + + /// Index into `node_storage`. + pub const Index = enum(u8) { + _, + + fn toParent(i: @This()) Parent { + assert(@intFromEnum(i) != @intFromEnum(Parent.unused)); + assert(@intFromEnum(i) != @intFromEnum(Parent.none)); + return @enumFromInt(@intFromEnum(i)); + } + + pub fn toOptional(i: @This()) OptionalIndex { + return @enumFromInt(@intFromEnum(i)); + } + }; /// Create a new child progress node. Thread-safe. - /// Call `Node.end` when done. - /// TODO solve https://github.com/ziglang/zig/issues/2765 and then change this - /// API to set `self.parent.recently_updated_child` with the return value. - /// Until that is fixed you probably want to call `activate` on the return value. + /// /// Passing 0 for `estimated_total_items` means unknown. - pub fn start(self: *Node, name: []const u8, estimated_total_items: usize) Node { - return Node{ - .context = self.context, - .parent = self, - .name = name, - .unprotected_estimated_total_items = estimated_total_items, - .unprotected_completed_items = 0, - }; + pub fn start(node: Node, name: []const u8, estimated_total_items: usize) Node { + if (noop_impl) { + assert(node.index == .none); + return .{ .index = .none }; + } + const node_index = node.index.unwrap() orelse return .{ .index = .none }; + const parent = node_index.toParent(); + + const freelist_head = &global_progress.node_freelist_first; + var opt_free_index = @atomicLoad(Node.OptionalIndex, freelist_head, .seq_cst); + while (opt_free_index.unwrap()) |free_index| { + const freelist_ptr = freelistByIndex(free_index); + opt_free_index = @cmpxchgWeak(Node.OptionalIndex, freelist_head, opt_free_index, freelist_ptr.*, .seq_cst, .seq_cst) orelse { + // We won the allocation race. + return init(free_index, parent, name, estimated_total_items); + }; + } + + const free_index = @atomicRmw(u32, &global_progress.node_end_index, .Add, 1, .monotonic); + if (free_index >= global_progress.node_storage.len) { + // Ran out of node storage memory. Progress for this node will not be tracked. + _ = @atomicRmw(u32, &global_progress.node_end_index, .Sub, 1, .monotonic); + return .{ .index = .none }; + } + + return init(@enumFromInt(free_index), parent, name, estimated_total_items); } /// This is the same as calling `start` and then `end` on the returned `Node`. Thread-safe. - pub fn completeOne(self: *Node) void { - if (self.parent) |parent| { - @atomicStore(?*Node, &parent.recently_updated_child, self, .release); - } - _ = @atomicRmw(usize, &self.unprotected_completed_items, .Add, 1, .monotonic); - self.context.maybeRefresh(); + pub fn completeOne(n: Node) void { + const index = n.index.unwrap() orelse return; + const storage = storageByIndex(index); + _ = @atomicRmw(u32, &storage.completed_count, .Add, 1, .monotonic); + } + + /// Thread-safe. + pub fn setCompletedItems(n: Node, completed_items: usize) void { + const index = n.index.unwrap() orelse return; + const storage = storageByIndex(index); + @atomicStore(u32, &storage.completed_count, std.math.lossyCast(u32, completed_items), .monotonic); + } + + /// Thread-safe. 0 means unknown. + pub fn setEstimatedTotalItems(n: Node, count: usize) void { + const index = n.index.unwrap() orelse return; + const storage = storageByIndex(index); + // Avoid u32 max int which is used to indicate a special state. + const saturated = @min(std.math.maxInt(u32) - 1, count); + @atomicStore(u32, &storage.estimated_total_count, saturated, .monotonic); + } + + /// Thread-safe. + pub fn increaseEstimatedTotalItems(n: Node, count: usize) void { + const index = n.index.unwrap() orelse return; + const storage = storageByIndex(index); + _ = @atomicRmw(u32, &storage.estimated_total_count, .Add, std.math.lossyCast(u32, count), .monotonic); } /// Finish a started `Node`. Thread-safe. - pub fn end(self: *Node) void { - self.context.maybeRefresh(); - if (self.parent) |parent| { - { - self.context.update_mutex.lock(); - defer self.context.update_mutex.unlock(); - _ = @cmpxchgStrong(?*Node, &parent.recently_updated_child, self, null, .monotonic, .monotonic); + pub fn end(n: Node) void { + if (noop_impl) { + assert(n.index == .none); + return; + } + const index = n.index.unwrap() orelse return; + const parent_ptr = parentByIndex(index); + if (parent_ptr.unwrap()) |parent_index| { + _ = @atomicRmw(u32, &storageByIndex(parent_index).completed_count, .Add, 1, .monotonic); + @atomicStore(Node.Parent, parent_ptr, .unused, .seq_cst); + + const freelist_head = &global_progress.node_freelist_first; + var first = @atomicLoad(Node.OptionalIndex, freelist_head, .seq_cst); + while (true) { + freelistByIndex(index).* = first; + first = @cmpxchgWeak(Node.OptionalIndex, freelist_head, first, index.toOptional(), .seq_cst, .seq_cst) orelse break; } - parent.completeOne(); } else { - self.context.update_mutex.lock(); - defer self.context.update_mutex.unlock(); - self.context.done = true; - self.context.refreshWithHeldLock(); + @atomicStore(bool, &global_progress.done, true, .seq_cst); + global_progress.redraw_event.set(); + if (global_progress.update_thread) |thread| thread.join(); } } - /// Tell the parent node that this node is actively being worked on. Thread-safe. - pub fn activate(self: *Node) void { - if (self.parent) |parent| { - @atomicStore(?*Node, &parent.recently_updated_child, self, .release); - self.context.maybeRefresh(); - } + /// Posix-only. Used by `std.process.Child`. Thread-safe. + pub fn setIpcFd(node: Node, fd: posix.fd_t) void { + const index = node.index.unwrap() orelse return; + assert(fd >= 0); + assert(fd != posix.STDOUT_FILENO); + assert(fd != posix.STDIN_FILENO); + assert(fd != posix.STDERR_FILENO); + storageByIndex(index).setIpcFd(fd); } - /// Thread-safe. - pub fn setName(self: *Node, name: []const u8) void { - const progress = self.context; - progress.update_mutex.lock(); - defer progress.update_mutex.unlock(); - self.name = name; - if (self.parent) |parent| { - @atomicStore(?*Node, &parent.recently_updated_child, self, .release); - if (parent.parent) |grand_parent| { - @atomicStore(?*Node, &grand_parent.recently_updated_child, parent, .release); - } - if (progress.timer) |*timer| progress.maybeRefreshWithHeldLock(timer); - } + fn storageByIndex(index: Node.Index) *Node.Storage { + return &global_progress.node_storage[@intFromEnum(index)]; } - /// Thread-safe. - pub fn setUnit(self: *Node, unit: []const u8) void { - const progress = self.context; - progress.update_mutex.lock(); - defer progress.update_mutex.unlock(); - self.unit = unit; - if (self.parent) |parent| { - @atomicStore(?*Node, &parent.recently_updated_child, self, .release); - if (parent.parent) |grand_parent| { - @atomicStore(?*Node, &grand_parent.recently_updated_child, parent, .release); - } - if (progress.timer) |*timer| progress.maybeRefreshWithHeldLock(timer); - } + fn parentByIndex(index: Node.Index) *Node.Parent { + return &global_progress.node_parents[@intFromEnum(index)]; } - /// Thread-safe. 0 means unknown. - pub fn setEstimatedTotalItems(self: *Node, count: usize) void { - @atomicStore(usize, &self.unprotected_estimated_total_items, count, .monotonic); + fn freelistByIndex(index: Node.Index) *Node.OptionalIndex { + return &global_progress.node_freelist[@intFromEnum(index)]; } - /// Thread-safe. - pub fn setCompletedItems(self: *Node, completed_items: usize) void { - @atomicStore(usize, &self.unprotected_completed_items, completed_items, .monotonic); + fn init(free_index: Index, parent: Parent, name: []const u8, estimated_total_items: usize) Node { + assert(parent != .unused); + + const storage = storageByIndex(free_index); + storage.* = .{ + .completed_count = 0, + .estimated_total_count = std.math.lossyCast(u32, estimated_total_items), + .name = [1]u8{0} ** max_name_len, + }; + const name_len = @min(max_name_len, name.len); + @memcpy(storage.name[0..name_len], name[0..name_len]); + + const parent_ptr = parentByIndex(free_index); + assert(parent_ptr.* == .unused); + @atomicStore(Node.Parent, parent_ptr, parent, .release); + + return .{ .index = free_index.toOptional() }; } }; -/// Create a new progress node. +var global_progress: Progress = .{ + .terminal = undefined, + .terminal_mode = .off, + .update_thread = null, + .redraw_event = .{}, + .refresh_rate_ns = undefined, + .initial_delay_ns = undefined, + .rows = 0, + .cols = 0, + .written_newline_count = 0, + .accumulated_newline_count = 0, + .draw_buffer = undefined, + .done = false, + + .node_parents = &node_parents_buffer, + .node_storage = &node_storage_buffer, + .node_freelist = &node_freelist_buffer, + .node_freelist_first = .none, + .node_end_index = 0, +}; + +const node_storage_buffer_len = 200; +var node_parents_buffer: [node_storage_buffer_len]Node.Parent = undefined; +var node_storage_buffer: [node_storage_buffer_len]Node.Storage = undefined; +var node_freelist_buffer: [node_storage_buffer_len]Node.OptionalIndex = undefined; + +var default_draw_buffer: [4096]u8 = undefined; + +var debug_start_trace = std.debug.Trace.init; + +const noop_impl = builtin.single_threaded or switch (builtin.os.tag) { + .wasi, .freestanding => true, + else => false, +}; + +/// Initializes a global Progress instance. +/// +/// Asserts there is only one global Progress instance. +/// /// Call `Node.end` when done. -/// TODO solve https://github.com/ziglang/zig/issues/2765 and then change this -/// API to return Progress rather than accept it as a parameter. -/// `estimated_total_items` value of 0 means unknown. -pub fn start(self: *Progress, name: []const u8, estimated_total_items: usize) *Node { - const stderr = std.io.getStdErr(); - self.terminal = null; - if (stderr.supportsAnsiEscapeCodes()) { - self.terminal = stderr; - self.supports_ansi_escape_codes = true; - } else if (builtin.os.tag == .windows and stderr.isTty()) { - self.is_windows_terminal = true; - self.terminal = stderr; - } else if (builtin.os.tag != .windows) { - // we are in a "dumb" terminal like in acme or writing to a file - self.terminal = stderr; - } - self.root = Node{ - .context = self, - .parent = null, - .name = name, - .unprotected_estimated_total_items = estimated_total_items, - .unprotected_completed_items = 0, +pub fn start(options: Options) Node { + // Ensure there is only 1 global Progress object. + if (global_progress.node_end_index != 0) { + debug_start_trace.dump(); + unreachable; + } + debug_start_trace.add("first initialized here"); + + @memset(global_progress.node_parents, .unused); + const root_node = Node.init(@enumFromInt(0), .none, options.root_name, options.estimated_total_items); + global_progress.done = false; + global_progress.node_end_index = 1; + + assert(options.draw_buffer.len >= 200); + global_progress.draw_buffer = options.draw_buffer; + global_progress.refresh_rate_ns = options.refresh_rate_ns; + global_progress.initial_delay_ns = options.initial_delay_ns; + + if (noop_impl) + return .{ .index = .none }; + + if (std.process.parseEnvVarInt("ZIG_PROGRESS", u31, 10)) |ipc_fd| { + global_progress.update_thread = std.Thread.spawn(.{}, ipcThreadRun, .{ + @as(posix.fd_t, switch (@typeInfo(posix.fd_t)) { + .Int => ipc_fd, + .Pointer => @ptrFromInt(ipc_fd), + else => @compileError("unsupported fd_t of " ++ @typeName(posix.fd_t)), + }), + }) catch |err| { + std.log.warn("failed to spawn IPC thread for communicating progress to parent: {s}", .{@errorName(err)}); + return .{ .index = .none }; + }; + } else |env_err| switch (env_err) { + error.EnvironmentVariableNotFound => { + if (options.disable_printing) { + return .{ .index = .none }; + } + const stderr = std.io.getStdErr(); + global_progress.terminal = stderr; + if (stderr.supportsAnsiEscapeCodes()) { + global_progress.terminal_mode = .ansi_escape_codes; + } else if (is_windows and stderr.isTty()) { + global_progress.terminal_mode = TerminalMode{ .windows_api = .{ + .code_page = windows.kernel32.GetConsoleOutputCP(), + } }; + } + + if (global_progress.terminal_mode == .off) { + return .{ .index = .none }; + } + + if (have_sigwinch) { + var act: posix.Sigaction = .{ + .handler = .{ .sigaction = handleSigWinch }, + .mask = posix.empty_sigset, + .flags = (posix.SA.SIGINFO | posix.SA.RESTART), + }; + posix.sigaction(posix.SIG.WINCH, &act, null) catch |err| { + std.log.warn("failed to install SIGWINCH signal handler for noticing terminal resizes: {s}", .{@errorName(err)}); + }; + } + + if (switch (global_progress.terminal_mode) { + .off => unreachable, // handled a few lines above + .ansi_escape_codes => std.Thread.spawn(.{}, updateThreadRun, .{}), + .windows_api => if (is_windows) std.Thread.spawn(.{}, windowsApiUpdateThreadRun, .{}) else unreachable, + }) |thread| { + global_progress.update_thread = thread; + } else |err| { + std.log.warn("unable to spawn thread for printing progress to terminal: {s}", .{@errorName(err)}); + return .{ .index = .none }; + } + }, + else => |e| { + std.log.warn("invalid ZIG_PROGRESS file descriptor integer: {s}", .{@errorName(e)}); + return .{ .index = .none }; + }, + } + + return root_node; +} + +/// Returns whether a resize is needed to learn the terminal size. +fn wait(timeout_ns: u64) bool { + const resize_flag = if (global_progress.redraw_event.timedWait(timeout_ns)) |_| + true + else |err| switch (err) { + error.Timeout => false, }; - self.columns_written = 0; - self.prev_refresh_timestamp = 0; - self.timer = std.time.Timer.start() catch null; - self.done = false; - return &self.root; + global_progress.redraw_event.reset(); + return resize_flag or (global_progress.cols == 0); } -/// Updates the terminal if enough time has passed since last update. Thread-safe. -pub fn maybeRefresh(self: *Progress) void { - if (self.timer) |*timer| { - if (!self.update_mutex.tryLock()) return; - defer self.update_mutex.unlock(); - maybeRefreshWithHeldLock(self, timer); +fn updateThreadRun() void { + // Store this data in the thread so that it does not need to be part of the + // linker data of the main executable. + var serialized_buffer: Serialized.Buffer = undefined; + + { + const resize_flag = wait(global_progress.initial_delay_ns); + if (@atomicLoad(bool, &global_progress.done, .seq_cst)) return; + maybeUpdateSize(resize_flag); + + const buffer = computeRedraw(&serialized_buffer); + if (stderr_mutex.tryLock()) { + defer stderr_mutex.unlock(); + write(buffer) catch return; + } + } + + while (true) { + const resize_flag = wait(global_progress.refresh_rate_ns); + + if (@atomicLoad(bool, &global_progress.done, .seq_cst)) { + stderr_mutex.lock(); + defer stderr_mutex.unlock(); + return clearWrittenWithEscapeCodes() catch {}; + } + + maybeUpdateSize(resize_flag); + + const buffer = computeRedraw(&serialized_buffer); + if (stderr_mutex.tryLock()) { + defer stderr_mutex.unlock(); + write(buffer) catch return; + } + } +} + +fn windowsApiUpdateThreadRun() void { + var serialized_buffer: Serialized.Buffer = undefined; + + { + const resize_flag = wait(global_progress.initial_delay_ns); + if (@atomicLoad(bool, &global_progress.done, .seq_cst)) return; + maybeUpdateSize(resize_flag); + + const buffer = computeRedraw(&serialized_buffer); + if (stderr_mutex.tryLock()) { + defer stderr_mutex.unlock(); + write(buffer) catch return; + } + } + + while (true) { + const resize_flag = wait(global_progress.refresh_rate_ns); + + if (@atomicLoad(bool, &global_progress.done, .seq_cst)) { + stderr_mutex.lock(); + defer stderr_mutex.unlock(); + return clearWrittenWindowsApi() catch {}; + } + + maybeUpdateSize(resize_flag); + + const buffer = computeRedraw(&serialized_buffer); + if (stderr_mutex.tryLock()) { + defer stderr_mutex.unlock(); + clearWrittenWindowsApi() catch return; + write(buffer) catch return; + } } } -fn maybeRefreshWithHeldLock(self: *Progress, timer: *std.time.Timer) void { - const now = timer.read(); - if (now < self.initial_delay_ns) return; - // TODO I have observed this to happen sometimes. I think we need to follow Rust's - // lead and guarantee monotonically increasing times in the std lib itself. - if (now < self.prev_refresh_timestamp) return; - if (now - self.prev_refresh_timestamp < self.refresh_rate_ns) return; - return self.refreshWithHeldLock(); +/// Allows the caller to freely write to stderr until `unlockStdErr` is called. +/// +/// During the lock, any `std.Progress` information is cleared from the terminal. +pub fn lockStdErr() void { + stderr_mutex.lock(); + clearWrittenWithEscapeCodes() catch {}; +} + +pub fn unlockStdErr() void { + stderr_mutex.unlock(); } -/// Updates the terminal and resets `self.next_refresh_timestamp`. Thread-safe. -pub fn refresh(self: *Progress) void { - if (!self.update_mutex.tryLock()) return; - defer self.update_mutex.unlock(); +fn ipcThreadRun(fd: posix.fd_t) anyerror!void { + // Store this data in the thread so that it does not need to be part of the + // linker data of the main executable. + var serialized_buffer: Serialized.Buffer = undefined; + + { + _ = wait(global_progress.initial_delay_ns); - return self.refreshWithHeldLock(); + if (@atomicLoad(bool, &global_progress.done, .seq_cst)) + return; + + const serialized = serialize(&serialized_buffer); + writeIpc(fd, serialized) catch |err| switch (err) { + error.BrokenPipe => return, + }; + } + + while (true) { + _ = wait(global_progress.refresh_rate_ns); + + if (@atomicLoad(bool, &global_progress.done, .seq_cst)) + return; + + const serialized = serialize(&serialized_buffer); + writeIpc(fd, serialized) catch |err| switch (err) { + error.BrokenPipe => return, + }; + } } -fn clearWithHeldLock(p: *Progress, end_ptr: *usize) void { - const file = p.terminal orelse return; - var end = end_ptr.*; - if (p.columns_written > 0) { - // restore the cursor position by moving the cursor - // `columns_written` cells to the left, then clear the rest of the - // line - if (p.supports_ansi_escape_codes) { - end += (std.fmt.bufPrint(p.output_buffer[end..], "\x1b[{d}D", .{p.columns_written}) catch unreachable).len; - end += (std.fmt.bufPrint(p.output_buffer[end..], "\x1b[0K", .{}) catch unreachable).len; - } else if (builtin.os.tag == .windows) winapi: { - std.debug.assert(p.is_windows_terminal); - - var info: windows.CONSOLE_SCREEN_BUFFER_INFO = undefined; - if (windows.kernel32.GetConsoleScreenBufferInfo(file.handle, &info) != windows.TRUE) { - // stop trying to write to this file - p.terminal = null; - break :winapi; - } +const start_sync = "\x1b[?2026h"; +const up_one_line = "\x1bM"; +const clear = "\x1b[J"; +const save = "\x1b7"; +const restore = "\x1b8"; +const finish_sync = "\x1b[?2026l"; - var cursor_pos = windows.COORD{ - .X = info.dwCursorPosition.X - @as(windows.SHORT, @intCast(p.columns_written)), - .Y = info.dwCursorPosition.Y, +const TreeSymbol = enum { + /// ├─ + tee, + /// │ + line, + /// └─ + langle, + + const Encoding = enum { + ansi_escapes, + code_page_437, + utf8, + ascii, + }; + + /// The escape sequence representation as a string literal + fn escapeSeq(symbol: TreeSymbol) *const [9:0]u8 { + return switch (symbol) { + .tee => "\x1B\x28\x30\x74\x71\x1B\x28\x42 ", + .line => "\x1B\x28\x30\x78\x1B\x28\x42 ", + .langle => "\x1B\x28\x30\x6d\x71\x1B\x28\x42 ", + }; + } + + fn bytes(symbol: TreeSymbol, encoding: Encoding) []const u8 { + return switch (encoding) { + .ansi_escapes => escapeSeq(symbol), + .code_page_437 => switch (symbol) { + .tee => "\xC3\xC4 ", + .line => "\xB3 ", + .langle => "\xC0\xC4 ", + }, + .utf8 => switch (symbol) { + .tee => "├─ ", + .line => "│ ", + .langle => "└─ ", + }, + .ascii => switch (symbol) { + .tee => "|- ", + .line => "| ", + .langle => "+- ", + }, + }; + } + + fn maxByteLen(symbol: TreeSymbol) usize { + var max: usize = 0; + inline for (@typeInfo(Encoding).Enum.fields) |field| { + const len = symbol.bytes(@field(Encoding, field.name)).len; + max = @max(max, len); + } + return max; + } +}; + +fn appendTreeSymbol(symbol: TreeSymbol, buf: []u8, start_i: usize) usize { + switch (global_progress.terminal_mode) { + .off => unreachable, + .ansi_escape_codes => { + const bytes = symbol.escapeSeq(); + buf[start_i..][0..bytes.len].* = bytes.*; + return start_i + bytes.len; + }, + .windows_api => |windows_api| { + const bytes = if (!is_windows) unreachable else switch (windows_api.code_page) { + // Code page 437 is the default code page and contains the box drawing symbols + 437 => symbol.bytes(.code_page_437), + // UTF-8 + 65001 => symbol.bytes(.utf8), + // Fall back to ASCII approximation + else => symbol.bytes(.ascii), }; + @memcpy(buf[start_i..][0..bytes.len], bytes); + return start_i + bytes.len; + }, + } +} - if (cursor_pos.X < 0) - cursor_pos.X = 0; - - const fill_chars = @as(windows.DWORD, @intCast(info.dwSize.X - cursor_pos.X)); - - var written: windows.DWORD = undefined; - if (windows.kernel32.FillConsoleOutputAttribute( - file.handle, - info.wAttributes, - fill_chars, - cursor_pos, - &written, - ) != windows.TRUE) { - // stop trying to write to this file - p.terminal = null; - break :winapi; - } - if (windows.kernel32.FillConsoleOutputCharacterW( - file.handle, - ' ', - fill_chars, - cursor_pos, - &written, - ) != windows.TRUE) { - // stop trying to write to this file - p.terminal = null; - break :winapi; - } - if (windows.kernel32.SetConsoleCursorPosition(file.handle, cursor_pos) != windows.TRUE) { - // stop trying to write to this file - p.terminal = null; - break :winapi; +fn clearWrittenWithEscapeCodes() anyerror!void { + if (global_progress.written_newline_count == 0) return; + + var i: usize = 0; + const buf = global_progress.draw_buffer; + + buf[i..][0..start_sync.len].* = start_sync.*; + i += start_sync.len; + + i = computeClear(buf, i); + + buf[i..][0..finish_sync.len].* = finish_sync.*; + i += finish_sync.len; + + global_progress.accumulated_newline_count = 0; + try write(buf[0..i]); +} + +fn computeClear(buf: []u8, start_i: usize) usize { + var i = start_i; + + const prev_nl_n = global_progress.written_newline_count; + if (prev_nl_n > 0) { + buf[i] = '\r'; + i += 1; + for (0..prev_nl_n) |_| { + buf[i..][0..up_one_line.len].* = up_one_line.*; + i += up_one_line.len; + } + } + + buf[i..][0..clear.len].* = clear.*; + i += clear.len; + + return i; +} + +/// U+25BA or ► +const windows_api_start_marker = 0x25BA; + +fn clearWrittenWindowsApi() error{Unexpected}!void { + // This uses a 'marker' strategy. The idea is: + // - Always write a marker (in this case U+25BA or ►) at the beginning of the progress + // - Get the current cursor position (at the end of the progress) + // - Subtract the number of lines written to get the expected start of the progress + // - Check to see if the first character at the start of the progress is the marker + // - If it's not the marker, keep checking the line before until we find it + // - Clear the screen from that position down, and set the cursor position to the start + // + // This strategy works even if there is line wrapping, and can handle the window + // being resized/scrolled arbitrarily. + // + // Notes: + // - Ideally, the marker would be a zero-width character, but the Windows console + // doesn't seem to support rendering zero-width characters (they show up as a space) + // - This same marker idea could technically be done with an attribute instead + // (https://learn.microsoft.com/en-us/windows/console/console-screen-buffers#character-attributes) + // but it must be a valid attribute and it actually needs to apply to the first + // character in order to be readable via ReadConsoleOutputAttribute. It doesn't seem + // like any of the available attributes are invisible/benign. + const prev_nl_n = global_progress.written_newline_count; + if (prev_nl_n > 0) { + const handle = global_progress.terminal.handle; + const screen_area = @as(windows.DWORD, global_progress.cols) * global_progress.rows; + + var console_info: windows.CONSOLE_SCREEN_BUFFER_INFO = undefined; + if (windows.kernel32.GetConsoleScreenBufferInfo(handle, &console_info) == 0) { + return error.Unexpected; + } + const cursor_pos = console_info.dwCursorPosition; + const expected_y = cursor_pos.Y - @as(i16, @intCast(prev_nl_n)); + var start_pos = windows.COORD{ .X = 0, .Y = expected_y }; + while (start_pos.Y >= 0) { + var wchar: [1]u16 = undefined; + var num_console_chars_read: windows.DWORD = undefined; + if (windows.kernel32.ReadConsoleOutputCharacterW(handle, &wchar, wchar.len, start_pos, &num_console_chars_read) == 0) { + return error.Unexpected; } + + if (wchar[0] == windows_api_start_marker) break; + start_pos.Y -= 1; } else { - // we are in a "dumb" terminal like in acme or writing to a file - p.output_buffer[end] = '\n'; - end += 1; + // If we couldn't find the marker, then just assume that no lines wrapped + start_pos = .{ .X = 0, .Y = expected_y }; + } + var num_chars_written: windows.DWORD = undefined; + if (windows.kernel32.FillConsoleOutputCharacterW(handle, ' ', screen_area, start_pos, &num_chars_written) == 0) { + return error.Unexpected; + } + if (windows.kernel32.SetConsoleCursorPosition(handle, start_pos) == 0) { + return error.Unexpected; } - - p.columns_written = 0; } - end_ptr.* = end; } -fn refreshWithHeldLock(self: *Progress) void { - const is_dumb = !self.supports_ansi_escape_codes and !self.is_windows_terminal; - if (is_dumb and self.dont_print_on_dumb) return; +const Children = struct { + child: Node.OptionalIndex, + sibling: Node.OptionalIndex, +}; + +const Serialized = struct { + parents: []Node.Parent, + storage: []Node.Storage, - const file = self.terminal orelse return; + const Buffer = struct { + parents: [node_storage_buffer_len]Node.Parent, + storage: [node_storage_buffer_len]Node.Storage, + map: [node_storage_buffer_len]Node.Index, - var end: usize = 0; - clearWithHeldLock(self, &end); + parents_copy: [node_storage_buffer_len]Node.Parent, + storage_copy: [node_storage_buffer_len]Node.Storage, + ipc_metadata_copy: [node_storage_buffer_len]SavedMetadata, - if (!self.done) { - var need_ellipse = false; - var maybe_node: ?*Node = &self.root; - while (maybe_node) |node| { - if (need_ellipse) { - self.bufWrite(&end, "... ", .{}); + ipc_metadata: [node_storage_buffer_len]SavedMetadata, + }; +}; + +fn serialize(serialized_buffer: *Serialized.Buffer) Serialized { + var serialized_len: usize = 0; + var any_ipc = false; + + // Iterate all of the nodes and construct a serializable copy of the state that can be examined + // without atomics. + const end_index = @atomicLoad(u32, &global_progress.node_end_index, .monotonic); + const node_parents = global_progress.node_parents[0..end_index]; + const node_storage = global_progress.node_storage[0..end_index]; + for (node_parents, node_storage, 0..) |*parent_ptr, *storage_ptr, i| { + var begin_parent = @atomicLoad(Node.Parent, parent_ptr, .acquire); + while (begin_parent != .unused) { + const dest_storage = &serialized_buffer.storage[serialized_len]; + @memcpy(&dest_storage.name, &storage_ptr.name); + dest_storage.estimated_total_count = @atomicLoad(u32, &storage_ptr.estimated_total_count, .acquire); + dest_storage.completed_count = @atomicLoad(u32, &storage_ptr.completed_count, .monotonic); + const end_parent = @atomicLoad(Node.Parent, parent_ptr, .acquire); + if (begin_parent == end_parent) { + any_ipc = any_ipc or (dest_storage.getIpcFd() != null); + serialized_buffer.parents[serialized_len] = begin_parent; + serialized_buffer.map[i] = @enumFromInt(serialized_len); + serialized_len += 1; + break; } - need_ellipse = false; - const eti = @atomicLoad(usize, &node.unprotected_estimated_total_items, .monotonic); - const completed_items = @atomicLoad(usize, &node.unprotected_completed_items, .monotonic); - const current_item = completed_items + 1; - if (node.name.len != 0 or eti > 0) { - if (node.name.len != 0) { - self.bufWrite(&end, "{s}", .{node.name}); - need_ellipse = true; - } - if (eti > 0) { - if (need_ellipse) self.bufWrite(&end, " ", .{}); - self.bufWrite(&end, "[{d}/{d}{s}] ", .{ current_item, eti, node.unit }); - need_ellipse = false; - } else if (completed_items != 0) { - if (need_ellipse) self.bufWrite(&end, " ", .{}); - self.bufWrite(&end, "[{d}{s}] ", .{ current_item, node.unit }); - need_ellipse = false; + + begin_parent = end_parent; + } + } + + // Remap parents to point inside serialized arrays. + for (serialized_buffer.parents[0..serialized_len]) |*parent| { + parent.* = switch (parent.*) { + .unused => unreachable, + .none => .none, + _ => |p| serialized_buffer.map[@intFromEnum(p)].toParent(), + }; + } + + // Find nodes which correspond to child processes. + if (any_ipc) + serialized_len = serializeIpc(serialized_len, serialized_buffer); + + return .{ + .parents = serialized_buffer.parents[0..serialized_len], + .storage = serialized_buffer.storage[0..serialized_len], + }; +} + +const SavedMetadata = struct { + ipc_fd: u16, + main_index: u8, + start_index: u8, + nodes_len: u8, + + fn getIpcFd(metadata: SavedMetadata) posix.fd_t { + return if (is_windows) + @ptrFromInt(@as(usize, metadata.ipc_fd) << 2) + else + metadata.ipc_fd; + } + + fn setIpcFd(fd: posix.fd_t) u16 { + return @intCast(if (is_windows) + @shrExact(@intFromPtr(fd), 2) + else + fd); + } +}; + +var ipc_metadata_len: u8 = 0; +var remaining_read_trash_bytes: usize = 0; + +fn serializeIpc(start_serialized_len: usize, serialized_buffer: *Serialized.Buffer) usize { + const ipc_metadata_copy = &serialized_buffer.ipc_metadata_copy; + const ipc_metadata = &serialized_buffer.ipc_metadata; + + var serialized_len = start_serialized_len; + var pipe_buf: [2 * 4096]u8 align(4) = undefined; + + const old_ipc_metadata = ipc_metadata_copy[0..ipc_metadata_len]; + ipc_metadata_len = 0; + + main_loop: for ( + serialized_buffer.parents[0..serialized_len], + serialized_buffer.storage[0..serialized_len], + 0.., + ) |main_parent, *main_storage, main_index| { + if (main_parent == .unused) continue; + const fd = main_storage.getIpcFd() orelse continue; + var bytes_read: usize = 0; + while (true) { + const n = posix.read(fd, pipe_buf[bytes_read..]) catch |err| switch (err) { + error.WouldBlock => break, + else => |e| { + std.log.debug("failed to read child progress data: {s}", .{@errorName(e)}); + main_storage.completed_count = 0; + main_storage.estimated_total_count = 0; + continue :main_loop; + }, + }; + if (n == 0) break; + if (remaining_read_trash_bytes > 0) { + assert(bytes_read == 0); + if (remaining_read_trash_bytes >= n) { + remaining_read_trash_bytes -= n; + continue; } + const src = pipe_buf[remaining_read_trash_bytes..n]; + std.mem.copyForwards(u8, &pipe_buf, src); + remaining_read_trash_bytes = 0; + bytes_read = src.len; + continue; } - maybe_node = @atomicLoad(?*Node, &node.recently_updated_child, .acquire); + bytes_read += n; } - if (need_ellipse) { - self.bufWrite(&end, "... ", .{}); + // Ignore all but the last message on the pipe. + var input: []u8 = pipe_buf[0..bytes_read]; + if (input.len == 0) { + serialized_len = useSavedIpcData(serialized_len, serialized_buffer, main_storage, main_index, old_ipc_metadata); + continue; + } + + const storage, const parents = while (true) { + const subtree_len: usize = input[0]; + const expected_bytes = 1 + subtree_len * (@sizeOf(Node.Storage) + @sizeOf(Node.Parent)); + if (input.len < expected_bytes) { + // Ignore short reads. We'll handle the next full message when it comes instead. + assert(remaining_read_trash_bytes == 0); + remaining_read_trash_bytes = expected_bytes - input.len; + serialized_len = useSavedIpcData(serialized_len, serialized_buffer, main_storage, main_index, old_ipc_metadata); + continue :main_loop; + } + if (input.len > expected_bytes) { + input = input[expected_bytes..]; + continue; + } + const storage_bytes = input[1..][0 .. subtree_len * @sizeOf(Node.Storage)]; + const parents_bytes = input[1 + storage_bytes.len ..][0 .. subtree_len * @sizeOf(Node.Parent)]; + break .{ + std.mem.bytesAsSlice(Node.Storage, storage_bytes), + std.mem.bytesAsSlice(Node.Parent, parents_bytes), + }; + }; + + const nodes_len: u8 = @intCast(@min(parents.len - 1, serialized_buffer.storage.len - serialized_len)); + + // Remember in case the pipe is empty on next update. + ipc_metadata[ipc_metadata_len] = .{ + .ipc_fd = SavedMetadata.setIpcFd(fd), + .start_index = @intCast(serialized_len), + .nodes_len = nodes_len, + .main_index = @intCast(main_index), + }; + ipc_metadata_len += 1; + + // Mount the root here. + copyRoot(main_storage, &storage[0]); + if (is_big_endian) main_storage.byteSwap(); + + // Copy the rest of the tree to the end. + const storage_dest = serialized_buffer.storage[serialized_len..][0..nodes_len]; + @memcpy(storage_dest, storage[1..][0..nodes_len]); + + // Always little-endian over the pipe. + if (is_big_endian) for (storage_dest) |*s| s.byteSwap(); + + // Patch up parent pointers taking into account how the subtree is mounted. + for (serialized_buffer.parents[serialized_len..][0..nodes_len], parents[1..][0..nodes_len]) |*dest, p| { + dest.* = switch (p) { + // Fix bad data so the rest of the code does not see `unused`. + .none, .unused => .none, + // Root node is being mounted here. + @as(Node.Parent, @enumFromInt(0)) => @enumFromInt(main_index), + // Other nodes mounted at the end. + // Don't trust child data; if the data is outside the expected range, ignore the data. + // This also handles the case when data was truncated. + _ => |off| if (@intFromEnum(off) > nodes_len) + .none + else + @enumFromInt(serialized_len + @intFromEnum(off) - 1), + }; } + + serialized_len += nodes_len; } - _ = file.write(self.output_buffer[0..end]) catch { - // stop trying to write to this file - self.terminal = null; + // Save a copy in case any pipes are empty on the next update. + @memcpy(serialized_buffer.parents_copy[0..serialized_len], serialized_buffer.parents[0..serialized_len]); + @memcpy(serialized_buffer.storage_copy[0..serialized_len], serialized_buffer.storage[0..serialized_len]); + @memcpy(ipc_metadata_copy[0..ipc_metadata_len], ipc_metadata[0..ipc_metadata_len]); + + return serialized_len; +} + +fn copyRoot(dest: *Node.Storage, src: *align(1) Node.Storage) void { + dest.* = .{ + .completed_count = src.completed_count, + .estimated_total_count = src.estimated_total_count, + .name = if (src.name[0] == 0) dest.name else src.name, }; - if (self.timer) |*timer| { - self.prev_refresh_timestamp = timer.read(); +} + +fn findOld(ipc_fd: posix.fd_t, old_metadata: []const SavedMetadata) ?*const SavedMetadata { + for (old_metadata) |*m| { + if (m.getIpcFd() == ipc_fd) + return m; } + return null; } -pub fn log(self: *Progress, comptime format: []const u8, args: anytype) void { - const file = self.terminal orelse { - std.debug.print(format, args); - return; +fn useSavedIpcData( + start_serialized_len: usize, + serialized_buffer: *Serialized.Buffer, + main_storage: *Node.Storage, + main_index: usize, + old_metadata: []const SavedMetadata, +) usize { + const parents_copy = &serialized_buffer.parents_copy; + const storage_copy = &serialized_buffer.storage_copy; + const ipc_metadata = &serialized_buffer.ipc_metadata; + + const ipc_fd = main_storage.getIpcFd().?; + const saved_metadata = findOld(ipc_fd, old_metadata) orelse { + main_storage.completed_count = 0; + main_storage.estimated_total_count = 0; + return start_serialized_len; }; - self.refresh(); - file.writer().print(format, args) catch { - self.terminal = null; - return; + + const start_index = saved_metadata.start_index; + const nodes_len = @min(saved_metadata.nodes_len, serialized_buffer.storage.len - start_serialized_len); + const old_main_index = saved_metadata.main_index; + + ipc_metadata[ipc_metadata_len] = .{ + .ipc_fd = SavedMetadata.setIpcFd(ipc_fd), + .start_index = @intCast(start_serialized_len), + .nodes_len = nodes_len, + .main_index = @intCast(main_index), }; - self.columns_written = 0; -} + ipc_metadata_len += 1; + + const parents = parents_copy[start_index..][0..nodes_len]; + const storage = storage_copy[start_index..][0..nodes_len]; -/// Allows the caller to freely write to stderr until unlock_stderr() is called. -/// During the lock, the progress information is cleared from the terminal. -pub fn lock_stderr(p: *Progress) void { - p.update_mutex.lock(); - if (p.terminal) |file| { - var end: usize = 0; - clearWithHeldLock(p, &end); - _ = file.write(p.output_buffer[0..end]) catch { - // stop trying to write to this file - p.terminal = null; + copyRoot(main_storage, &storage_copy[old_main_index]); + + @memcpy(serialized_buffer.storage[start_serialized_len..][0..storage.len], storage); + + for (serialized_buffer.parents[start_serialized_len..][0..parents.len], parents) |*dest, p| { + dest.* = switch (p) { + .none, .unused => .none, + _ => |prev| d: { + if (@intFromEnum(prev) == old_main_index) { + break :d @enumFromInt(main_index); + } else if (@intFromEnum(prev) > nodes_len) { + break :d .none; + } else { + break :d @enumFromInt(@intFromEnum(prev) - start_index + start_serialized_len); + } + }, }; } - std.debug.getStderrMutex().lock(); + + return start_serialized_len + storage.len; +} + +fn computeRedraw(serialized_buffer: *Serialized.Buffer) []u8 { + const serialized = serialize(serialized_buffer); + + // Now we can analyze our copy of the graph without atomics, reconstructing + // children lists which do not exist in the canonical data. These are + // needed for tree traversal below. + + var children_buffer: [node_storage_buffer_len]Children = undefined; + const children = children_buffer[0..serialized.parents.len]; + + @memset(children, .{ .child = .none, .sibling = .none }); + + for (serialized.parents, 0..) |parent, child_index_usize| { + const child_index: Node.Index = @enumFromInt(child_index_usize); + assert(parent != .unused); + const parent_index = parent.unwrap() orelse continue; + const children_node = &children[@intFromEnum(parent_index)]; + if (children_node.child.unwrap()) |existing_child_index| { + const existing_child = &children[@intFromEnum(existing_child_index)]; + children[@intFromEnum(child_index)].sibling = existing_child.sibling; + existing_child.sibling = child_index.toOptional(); + } else { + children_node.child = child_index.toOptional(); + } + } + + // The strategy is: keep the cursor at the end, and then with every redraw: + // move cursor to beginning of line, move cursor up N lines, erase to end of screen, write + + var i: usize = 0; + const buf = global_progress.draw_buffer; + + buf[i..][0..start_sync.len].* = start_sync.*; + i += start_sync.len; + + switch (global_progress.terminal_mode) { + .off => unreachable, + .ansi_escape_codes => i = computeClear(buf, i), + .windows_api => if (!is_windows) unreachable, + } + + global_progress.accumulated_newline_count = 0; + const root_node_index: Node.Index = @enumFromInt(0); + i = computeNode(buf, i, serialized, children, root_node_index); + + buf[i..][0..finish_sync.len].* = finish_sync.*; + i += finish_sync.len; + + return buf[0..i]; +} + +fn computePrefix( + buf: []u8, + start_i: usize, + serialized: Serialized, + children: []const Children, + node_index: Node.Index, +) usize { + var i = start_i; + const parent_index = serialized.parents[@intFromEnum(node_index)].unwrap() orelse return i; + if (serialized.parents[@intFromEnum(parent_index)] == .none) return i; + if (@intFromEnum(serialized.parents[@intFromEnum(parent_index)]) == 0 and + serialized.storage[0].name[0] == 0) + { + return i; + } + i = computePrefix(buf, i, serialized, children, parent_index); + if (children[@intFromEnum(parent_index)].sibling == .none) { + const prefix = " "; + const upper_bound_len = prefix.len + line_upper_bound_len; + if (i + upper_bound_len > buf.len) return buf.len; + buf[i..][0..prefix.len].* = prefix.*; + i += prefix.len; + } else { + const upper_bound_len = comptime (TreeSymbol.line.maxByteLen() + line_upper_bound_len); + if (i + upper_bound_len > buf.len) return buf.len; + i = appendTreeSymbol(.line, buf, i); + } + return i; +} + +const line_upper_bound_len = @max(TreeSymbol.tee.maxByteLen(), TreeSymbol.langle.maxByteLen()) + + "[4294967296/4294967296] ".len + Node.max_name_len + finish_sync.len; + +fn computeNode( + buf: []u8, + start_i: usize, + serialized: Serialized, + children: []const Children, + node_index: Node.Index, +) usize { + var i = start_i; + i = computePrefix(buf, i, serialized, children, node_index); + + if (i + line_upper_bound_len > buf.len) + return start_i; + + const storage = &serialized.storage[@intFromEnum(node_index)]; + const estimated_total = storage.estimated_total_count; + const completed_items = storage.completed_count; + const name = if (std.mem.indexOfScalar(u8, &storage.name, 0)) |end| storage.name[0..end] else &storage.name; + const parent = serialized.parents[@intFromEnum(node_index)]; + + if (parent != .none) p: { + if (@intFromEnum(parent) == 0 and serialized.storage[0].name[0] == 0) { + break :p; + } + if (children[@intFromEnum(node_index)].sibling == .none) { + i = appendTreeSymbol(.langle, buf, i); + } else { + i = appendTreeSymbol(.tee, buf, i); + } + } + + const is_empty_root = @intFromEnum(node_index) == 0 and serialized.storage[0].name[0] == 0; + if (!is_empty_root) { + if (name.len != 0 or estimated_total > 0) { + if (estimated_total > 0) { + i += (std.fmt.bufPrint(buf[i..], "[{d}/{d}] ", .{ completed_items, estimated_total }) catch &.{}).len; + } else if (completed_items != 0) { + i += (std.fmt.bufPrint(buf[i..], "[{d}] ", .{completed_items}) catch &.{}).len; + } + if (name.len != 0) { + i += (std.fmt.bufPrint(buf[i..], "{s}", .{name}) catch &.{}).len; + } + } + + i = @min(global_progress.cols + start_i, i); + buf[i] = '\n'; + i += 1; + global_progress.accumulated_newline_count += 1; + } + + if (global_progress.withinRowLimit()) { + if (children[@intFromEnum(node_index)].child.unwrap()) |child| { + i = computeNode(buf, i, serialized, children, child); + } + } + + if (global_progress.withinRowLimit()) { + if (children[@intFromEnum(node_index)].sibling.unwrap()) |sibling| { + i = computeNode(buf, i, serialized, children, sibling); + } + } + + return i; } -pub fn unlock_stderr(p: *Progress) void { - std.debug.getStderrMutex().unlock(); - p.update_mutex.unlock(); +fn withinRowLimit(p: *Progress) bool { + // The +2 here is so that the PS1 is not scrolled off the top of the terminal. + // one because we keep the cursor on the next line + // one more to account for the PS1 + return p.accumulated_newline_count + 2 < p.rows; } -fn bufWrite(self: *Progress, end: *usize, comptime format: []const u8, args: anytype) void { - if (std.fmt.bufPrint(self.output_buffer[end.*..], format, args)) |written| { - const amt = written.len; - end.* += amt; - self.columns_written += amt; +fn write(buf: []const u8) anyerror!void { + try global_progress.terminal.writeAll(buf); + global_progress.written_newline_count = global_progress.accumulated_newline_count; +} + +var remaining_write_trash_bytes: usize = 0; + +fn writeIpc(fd: posix.fd_t, serialized: Serialized) error{BrokenPipe}!void { + // Byteswap if necessary to ensure little endian over the pipe. This is + // needed because the parent or child process might be running in qemu. + if (is_big_endian) for (serialized.storage) |*s| s.byteSwap(); + + assert(serialized.parents.len == serialized.storage.len); + const serialized_len: u8 = @intCast(serialized.parents.len); + const header = std.mem.asBytes(&serialized_len); + const storage = std.mem.sliceAsBytes(serialized.storage); + const parents = std.mem.sliceAsBytes(serialized.parents); + + var vecs: [3]posix.iovec_const = .{ + .{ .base = header.ptr, .len = header.len }, + .{ .base = storage.ptr, .len = storage.len }, + .{ .base = parents.ptr, .len = parents.len }, + }; + + while (remaining_write_trash_bytes > 0) { + // We do this in a separate write call to give a better chance for the + // writev below to be in a single packet. + const n = @min(parents.len, remaining_write_trash_bytes); + if (posix.write(fd, parents[0..n])) |written| { + remaining_write_trash_bytes -= written; + continue; + } else |err| switch (err) { + error.WouldBlock => return, + error.BrokenPipe => return error.BrokenPipe, + else => |e| { + std.log.debug("failed to send progress to parent process: {s}", .{@errorName(e)}); + return error.BrokenPipe; + }, + } + } + + // If this write would block we do not want to keep trying, but we need to + // know if a partial message was written. + if (posix.writev(fd, &vecs)) |written| { + const total = header.len + storage.len + parents.len; + if (written < total) { + remaining_write_trash_bytes = total - written; + } } else |err| switch (err) { - error.NoSpaceLeft => { - self.columns_written += self.output_buffer.len - end.*; - end.* = self.output_buffer.len; - const suffix = "... "; - @memcpy(self.output_buffer[self.output_buffer.len - suffix.len ..], suffix); + error.WouldBlock => {}, + error.BrokenPipe => return error.BrokenPipe, + else => |e| { + std.log.debug("failed to send progress to parent process: {s}", .{@errorName(e)}); + return error.BrokenPipe; }, } } -test "basic functionality" { - var disable = true; - _ = &disable; - if (disable) { - // This test is disabled because it uses time.sleep() and is therefore slow. It also - // prints bogus progress data to stderr. - return error.SkipZigTest; - } - var progress = Progress{}; - const root_node = progress.start("", 100); - defer root_node.end(); - - const speed_factor = std.time.ns_per_ms; - - const sub_task_names = [_][]const u8{ - "reticulating splines", - "adjusting shoes", - "climbing towers", - "pouring juice", - }; - var next_sub_task: usize = 0; +fn maybeUpdateSize(resize_flag: bool) void { + if (!resize_flag) return; - var i: usize = 0; - while (i < 100) : (i += 1) { - var node = root_node.start(sub_task_names[next_sub_task], 5); - node.activate(); - next_sub_task = (next_sub_task + 1) % sub_task_names.len; + const fd = global_progress.terminal.handle; - node.completeOne(); - std.time.sleep(5 * speed_factor); - node.completeOne(); - node.completeOne(); - std.time.sleep(5 * speed_factor); - node.completeOne(); - node.completeOne(); - std.time.sleep(5 * speed_factor); + if (is_windows) { + var info: windows.CONSOLE_SCREEN_BUFFER_INFO = undefined; - node.end(); + if (windows.kernel32.GetConsoleScreenBufferInfo(fd, &info) != windows.FALSE) { + // In the old Windows console, dwSize.Y is the line count of the + // entire scrollback buffer, so we use this instead so that we + // always get the size of the screen. + const screen_height = info.srWindow.Bottom - info.srWindow.Top; + global_progress.rows = @intCast(screen_height); + global_progress.cols = @intCast(info.dwSize.X); + } else { + std.log.debug("failed to determine terminal size; using conservative guess 80x25", .{}); + global_progress.rows = 25; + global_progress.cols = 80; + } + } else { + var winsize: posix.winsize = .{ + .ws_row = 0, + .ws_col = 0, + .ws_xpixel = 0, + .ws_ypixel = 0, + }; - std.time.sleep(5 * speed_factor); - } - { - var node = root_node.start("this is a really long name designed to activate the truncation code. let's find out if it works", 0); - node.activate(); - std.time.sleep(10 * speed_factor); - progress.refresh(); - std.time.sleep(10 * speed_factor); - node.end(); + const err = posix.system.ioctl(fd, posix.T.IOCGWINSZ, @intFromPtr(&winsize)); + if (posix.errno(err) == .SUCCESS) { + global_progress.rows = winsize.ws_row; + global_progress.cols = winsize.ws_col; + } else { + std.log.debug("failed to determine terminal size; using conservative guess 80x25", .{}); + global_progress.rows = 25; + global_progress.cols = 80; + } } } + +fn handleSigWinch(sig: i32, info: *const posix.siginfo_t, ctx_ptr: ?*anyopaque) callconv(.C) void { + _ = info; + _ = ctx_ptr; + assert(sig == posix.SIG.WINCH); + global_progress.redraw_event.set(); +} + +const have_sigwinch = switch (builtin.os.tag) { + .linux, + .plan9, + .solaris, + .netbsd, + .openbsd, + .haiku, + .macos, + .ios, + .watchos, + .tvos, + .visionos, + .dragonfly, + .freebsd, + => true, + + else => false, +}; + +var stderr_mutex: std.Thread.Mutex = .{}; diff --git a/lib/std/debug.zig b/lib/std/debug.zig index 980b027f0ac6..1073d6d3abd9 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -77,19 +77,28 @@ const PdbOrDwarf = union(enum) { } }; -var stderr_mutex = std.Thread.Mutex{}; +/// Allows the caller to freely write to stderr until `unlockStdErr` is called. +/// +/// During the lock, any `std.Progress` information is cleared from the terminal. +pub fn lockStdErr() void { + std.Progress.lockStdErr(); +} + +pub fn unlockStdErr() void { + std.Progress.unlockStdErr(); +} /// Print to stderr, unbuffered, and silently returning on failure. Intended /// for use in "printf debugging." Use `std.log` functions for proper logging. pub fn print(comptime fmt: []const u8, args: anytype) void { - stderr_mutex.lock(); - defer stderr_mutex.unlock(); + lockStdErr(); + defer unlockStdErr(); const stderr = io.getStdErr().writer(); nosuspend stderr.print(fmt, args) catch return; } pub fn getStderrMutex() *std.Thread.Mutex { - return &stderr_mutex; + @compileError("deprecated. call std.debug.lockStdErr() and std.debug.unlockStdErr() instead which will integrate properly with std.Progress"); } /// TODO multithreaded awareness @@ -107,8 +116,8 @@ pub fn getSelfDebugInfo() !*DebugInfo { /// Tries to print a hexadecimal view of the bytes, unbuffered, and ignores any error returned. /// Obtains the stderr mutex while dumping. pub fn dump_hex(bytes: []const u8) void { - stderr_mutex.lock(); - defer stderr_mutex.unlock(); + lockStdErr(); + defer unlockStdErr(); dump_hex_fallible(bytes) catch {}; } @@ -2750,13 +2759,19 @@ pub const Trace = ConfigurableTrace(2, 4, builtin.mode == .Debug); pub fn ConfigurableTrace(comptime size: usize, comptime stack_frame_count: usize, comptime is_enabled: bool) type { return struct { - addrs: [actual_size][stack_frame_count]usize = undefined, - notes: [actual_size][]const u8 = undefined, - index: Index = 0, + addrs: [actual_size][stack_frame_count]usize, + notes: [actual_size][]const u8, + index: Index, const actual_size = if (enabled) size else 0; const Index = if (enabled) usize else u0; + pub const init: @This() = .{ + .addrs = undefined, + .notes = undefined, + .index = 0, + }; + pub const enabled = is_enabled; pub const add = if (enabled) addNoInline else addNoOp; diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig index 2d1f4402d647..1a7475e1c598 100644 --- a/lib/std/fmt.zig +++ b/lib/std/fmt.zig @@ -9,7 +9,7 @@ const assert = std.debug.assert; const mem = std.mem; const unicode = std.unicode; const meta = std.meta; -const lossyCast = std.math.lossyCast; +const lossyCast = math.lossyCast; const expectFmt = std.testing.expectFmt; pub const default_max_depth = 3; @@ -1494,10 +1494,20 @@ pub fn Formatter(comptime format_fn: anytype) type { /// Ignores '_' character in `buf`. /// See also `parseUnsigned`. pub fn parseInt(comptime T: type, buf: []const u8, base: u8) ParseIntError!T { + return parseIntWithGenericCharacter(T, u8, buf, base); +} + +/// Like `parseInt`, but with a generic `Character` type. +pub fn parseIntWithGenericCharacter( + comptime Result: type, + comptime Character: type, + buf: []const Character, + base: u8, +) ParseIntError!Result { if (buf.len == 0) return error.InvalidCharacter; - if (buf[0] == '+') return parseWithSign(T, buf[1..], base, .pos); - if (buf[0] == '-') return parseWithSign(T, buf[1..], base, .neg); - return parseWithSign(T, buf, base, .pos); + if (buf[0] == '+') return parseIntWithSign(Result, Character, buf[1..], base, .pos); + if (buf[0] == '-') return parseIntWithSign(Result, Character, buf[1..], base, .neg); + return parseIntWithSign(Result, Character, buf, base, .pos); } test parseInt { @@ -1560,12 +1570,13 @@ test parseInt { try std.testing.expectEqual(@as(i5, -16), try std.fmt.parseInt(i5, "-10", 16)); } -fn parseWithSign( - comptime T: type, - buf: []const u8, +fn parseIntWithSign( + comptime Result: type, + comptime Character: type, + buf: []const Character, base: u8, comptime sign: enum { pos, neg }, -) ParseIntError!T { +) ParseIntError!Result { if (buf.len == 0) return error.InvalidCharacter; var buf_base = base; @@ -1575,7 +1586,7 @@ fn parseWithSign( buf_base = 10; // Detect the base by looking at buf prefix. if (buf.len > 2 and buf[0] == '0') { - switch (std.ascii.toLower(buf[1])) { + if (math.cast(u8, buf[1])) |c| switch (std.ascii.toLower(c)) { 'b' => { buf_base = 2; buf_start = buf[2..]; @@ -1589,7 +1600,7 @@ fn parseWithSign( buf_start = buf[2..]; }, else => {}, - } + }; } } @@ -1598,33 +1609,33 @@ fn parseWithSign( .neg => math.sub, }; - // accumulate into U which is always 8 bits or larger. this prevents - // `buf_base` from overflowing T. - const info = @typeInfo(T); - const U = std.meta.Int(info.Int.signedness, @max(8, info.Int.bits)); - var x: U = 0; + // accumulate into Accumulate which is always 8 bits or larger. this prevents + // `buf_base` from overflowing Result. + const info = @typeInfo(Result); + const Accumulate = std.meta.Int(info.Int.signedness, @max(8, info.Int.bits)); + var accumulate: Accumulate = 0; if (buf_start[0] == '_' or buf_start[buf_start.len - 1] == '_') return error.InvalidCharacter; for (buf_start) |c| { if (c == '_') continue; - const digit = try charToDigit(c, buf_base); - if (x != 0) { - x = try math.mul(U, x, math.cast(U, buf_base) orelse return error.Overflow); + const digit = try charToDigit(math.cast(u8, c) orelse return error.InvalidCharacter, buf_base); + if (accumulate != 0) { + accumulate = try math.mul(Accumulate, accumulate, math.cast(Accumulate, buf_base) orelse return error.Overflow); } else if (sign == .neg) { // The first digit of a negative number. // Consider parsing "-4" as an i3. // This should work, but positive 4 overflows i3, so we can't cast the digit to T and subtract. - x = math.cast(U, -@as(i8, @intCast(digit))) orelse return error.Overflow; + accumulate = math.cast(Accumulate, -@as(i8, @intCast(digit))) orelse return error.Overflow; continue; } - x = try add(U, x, math.cast(U, digit) orelse return error.Overflow); + accumulate = try add(Accumulate, accumulate, math.cast(Accumulate, digit) orelse return error.Overflow); } - return if (T == U) - x + return if (Result == Accumulate) + accumulate else - math.cast(T, x) orelse return error.Overflow; + math.cast(Result, accumulate) orelse return error.Overflow; } /// Parses the string `buf` as unsigned representation in the specified base @@ -1639,7 +1650,7 @@ fn parseWithSign( /// Ignores '_' character in `buf`. /// See also `parseInt`. pub fn parseUnsigned(comptime T: type, buf: []const u8, base: u8) ParseIntError!T { - return parseWithSign(T, buf, base, .pos); + return parseIntWithSign(T, u8, buf, base, .pos); } test parseUnsigned { diff --git a/lib/std/io/tty.zig b/lib/std/io/tty.zig index baf54a1fdf86..cdeaba81c51e 100644 --- a/lib/std/io/tty.zig +++ b/lib/std/io/tty.zig @@ -24,7 +24,7 @@ pub fn detectConfig(file: File) Config { if (native_os == .windows and file.isTty()) { var info: windows.CONSOLE_SCREEN_BUFFER_INFO = undefined; - if (windows.kernel32.GetConsoleScreenBufferInfo(file.handle, &info) != windows.TRUE) { + if (windows.kernel32.GetConsoleScreenBufferInfo(file.handle, &info) == windows.FALSE) { return if (force_color == true) .escape_codes else .no_color; } return .{ .windows_api = .{ diff --git a/lib/std/json/dynamic.zig b/lib/std/json/dynamic.zig index a56d37bf0ba5..a1849b0fed3e 100644 --- a/lib/std/json/dynamic.zig +++ b/lib/std/json/dynamic.zig @@ -52,8 +52,8 @@ pub const Value = union(enum) { } pub fn dump(self: Value) void { - std.debug.getStderrMutex().lock(); - defer std.debug.getStderrMutex().unlock(); + std.debug.lockStdErr(); + defer std.debug.unlockStdErr(); const stderr = std.io.getStdErr().writer(); stringify(self, .{}, stderr) catch return; diff --git a/lib/std/log.zig b/lib/std/log.zig index 0562d09c51ce..b2c05112b029 100644 --- a/lib/std/log.zig +++ b/lib/std/log.zig @@ -45,8 +45,8 @@ //! const prefix = "[" ++ comptime level.asText() ++ "] " ++ scope_prefix; //! //! // Print the message to stderr, silently ignoring any errors -//! std.debug.getStderrMutex().lock(); -//! defer std.debug.getStderrMutex().unlock(); +//! std.debug.lockStdErr(); +//! defer std.debug.unlockStdErr(); //! const stderr = std.io.getStdErr().writer(); //! nosuspend stderr.print(prefix ++ format ++ "\n", args) catch return; //! } @@ -152,8 +152,8 @@ pub fn defaultLog( var bw = std.io.bufferedWriter(stderr); const writer = bw.writer(); - std.debug.getStderrMutex().lock(); - defer std.debug.getStderrMutex().unlock(); + std.debug.lockStdErr(); + defer std.debug.unlockStdErr(); nosuspend { writer.print(level_txt ++ prefix2 ++ format ++ "\n", args) catch return; bw.flush() catch return; diff --git a/lib/std/os/windows/kernel32.zig b/lib/std/os/windows/kernel32.zig index a5c295487861..3d72b6f1a26e 100644 --- a/lib/std/os/windows/kernel32.zig +++ b/lib/std/os/windows/kernel32.zig @@ -175,6 +175,15 @@ pub extern "kernel32" fn FillConsoleOutputCharacterW(hConsoleOutput: HANDLE, cCh pub extern "kernel32" fn FillConsoleOutputAttribute(hConsoleOutput: HANDLE, wAttribute: WORD, nLength: DWORD, dwWriteCoord: COORD, lpNumberOfAttrsWritten: *DWORD) callconv(WINAPI) BOOL; pub extern "kernel32" fn SetConsoleCursorPosition(hConsoleOutput: HANDLE, dwCursorPosition: COORD) callconv(WINAPI) BOOL; +pub extern "kernel32" fn WriteConsoleW(hConsoleOutput: HANDLE, lpBuffer: [*]const u16, nNumberOfCharsToWrite: DWORD, lpNumberOfCharsWritten: ?*DWORD, lpReserved: ?LPVOID) callconv(WINAPI) BOOL; +pub extern "kernel32" fn ReadConsoleOutputCharacterW( + hConsoleOutput: windows.HANDLE, + lpCharacter: [*]u16, + nLength: windows.DWORD, + dwReadCoord: windows.COORD, + lpNumberOfCharsRead: *windows.DWORD, +) callconv(windows.WINAPI) windows.BOOL; + pub extern "kernel32" fn GetCurrentDirectoryW(nBufferLength: DWORD, lpBuffer: ?[*]WCHAR) callconv(WINAPI) DWORD; pub extern "kernel32" fn GetCurrentThread() callconv(WINAPI) HANDLE; diff --git a/lib/std/process.zig b/lib/std/process.zig index 5bdee4971d43..e55ce7ff4148 100644 --- a/lib/std/process.zig +++ b/lib/std/process.zig @@ -431,6 +431,26 @@ pub fn hasEnvVarConstant(comptime key: []const u8) bool { } } +pub const ParseEnvVarIntError = std.fmt.ParseIntError || error{EnvironmentVariableNotFound}; + +/// Parses an environment variable as an integer. +/// +/// Since the key is comptime-known, no allocation is needed. +/// +/// On Windows, `key` must be valid UTF-8. +pub fn parseEnvVarInt(comptime key: []const u8, comptime I: type, base: u8) ParseEnvVarIntError!I { + if (native_os == .windows) { + const key_w = comptime std.unicode.utf8ToUtf16LeStringLiteral(key); + const text = getenvW(key_w) orelse return error.EnvironmentVariableNotFound; + return std.fmt.parseIntWithGenericCharacter(I, u16, text, base); + } else if (native_os == .wasi and !builtin.link_libc) { + @compileError("parseEnvVarInt is not supported for WASI without libc"); + } else { + const text = posix.getenv(key) orelse return error.EnvironmentVariableNotFound; + return std.fmt.parseInt(I, text, base); + } +} + pub const HasEnvVarError = error{ OutOfMemory, @@ -1740,6 +1760,7 @@ pub fn cleanExit() void { if (builtin.mode == .Debug) { return; } else { + std.debug.lockStdErr(); exit(0); } } @@ -1790,24 +1811,143 @@ test raiseFileDescriptorLimit { raiseFileDescriptorLimit(); } -pub fn createNullDelimitedEnvMap(arena: mem.Allocator, env_map: *const EnvMap) ![:null]?[*:0]u8 { - const envp_count = env_map.count(); +pub const CreateEnvironOptions = struct { + /// `null` means to leave the `ZIG_PROGRESS` environment variable unmodified. + /// If non-null, negative means to remove the environment variable, and >= 0 + /// means to provide it with the given integer. + zig_progress_fd: ?i32 = null, +}; + +/// Creates a null-deliminated environment variable block in the format +/// expected by POSIX, from a hash map plus options. +pub fn createEnvironFromMap( + arena: Allocator, + map: *const EnvMap, + options: CreateEnvironOptions, +) Allocator.Error![:null]?[*:0]u8 { + const ZigProgressAction = enum { nothing, edit, delete, add }; + const zig_progress_action: ZigProgressAction = a: { + const fd = options.zig_progress_fd orelse break :a .nothing; + const contains = map.get("ZIG_PROGRESS") != null; + if (fd >= 0) { + break :a if (contains) .edit else .add; + } else { + if (contains) break :a .delete; + } + break :a .nothing; + }; + + const envp_count: usize = c: { + var count: usize = map.count(); + switch (zig_progress_action) { + .add => count += 1, + .delete => count -= 1, + .nothing, .edit => {}, + } + break :c count; + }; + const envp_buf = try arena.allocSentinel(?[*:0]u8, envp_count, null); + var i: usize = 0; + + if (zig_progress_action == .add) { + envp_buf[i] = try std.fmt.allocPrintZ(arena, "ZIG_PROGRESS={d}", .{options.zig_progress_fd.?}); + i += 1; + } + { - var it = env_map.iterator(); - var i: usize = 0; - while (it.next()) |pair| : (i += 1) { - const env_buf = try arena.allocSentinel(u8, pair.key_ptr.len + pair.value_ptr.len + 1, 0); - @memcpy(env_buf[0..pair.key_ptr.len], pair.key_ptr.*); - env_buf[pair.key_ptr.len] = '='; - @memcpy(env_buf[pair.key_ptr.len + 1 ..][0..pair.value_ptr.len], pair.value_ptr.*); - envp_buf[i] = env_buf.ptr; + var it = map.iterator(); + while (it.next()) |pair| { + if (mem.eql(u8, pair.key_ptr.*, "ZIG_PROGRESS")) switch (zig_progress_action) { + .add => unreachable, + .delete => continue, + .edit => { + envp_buf[i] = try std.fmt.allocPrintZ(arena, "{s}={d}", .{ + pair.key_ptr.*, options.zig_progress_fd.?, + }); + i += 1; + continue; + }, + .nothing => {}, + }; + + envp_buf[i] = try std.fmt.allocPrintZ(arena, "{s}={s}", .{ pair.key_ptr.*, pair.value_ptr.* }); + i += 1; + } + } + + assert(i == envp_count); + return envp_buf; +} + +/// Creates a null-deliminated environment variable block in the format +/// expected by POSIX, from a hash map plus options. +pub fn createEnvironFromExisting( + arena: Allocator, + existing: [*:null]const ?[*:0]const u8, + options: CreateEnvironOptions, +) Allocator.Error![:null]?[*:0]u8 { + const existing_count, const contains_zig_progress = c: { + var count: usize = 0; + var contains = false; + while (existing[count]) |line| : (count += 1) { + contains = contains or mem.eql(u8, mem.sliceTo(line, '='), "ZIG_PROGRESS"); + } + break :c .{ count, contains }; + }; + const ZigProgressAction = enum { nothing, edit, delete, add }; + const zig_progress_action: ZigProgressAction = a: { + const fd = options.zig_progress_fd orelse break :a .nothing; + if (fd >= 0) { + break :a if (contains_zig_progress) .edit else .add; + } else { + if (contains_zig_progress) break :a .delete; + } + break :a .nothing; + }; + + const envp_count: usize = c: { + var count: usize = existing_count; + switch (zig_progress_action) { + .add => count += 1, + .delete => count -= 1, + .nothing, .edit => {}, } - assert(i == envp_count); + break :c count; + }; + + const envp_buf = try arena.allocSentinel(?[*:0]u8, envp_count, null); + var i: usize = 0; + var existing_index: usize = 0; + + if (zig_progress_action == .add) { + envp_buf[i] = try std.fmt.allocPrintZ(arena, "ZIG_PROGRESS={d}", .{options.zig_progress_fd.?}); + i += 1; + } + + while (existing[existing_index]) |line| : (existing_index += 1) { + if (mem.eql(u8, mem.sliceTo(line, '='), "ZIG_PROGRESS")) switch (zig_progress_action) { + .add => unreachable, + .delete => continue, + .edit => { + envp_buf[i] = try std.fmt.allocPrintZ(arena, "ZIG_PROGRESS={d}", .{options.zig_progress_fd.?}); + i += 1; + continue; + }, + .nothing => {}, + }; + envp_buf[i] = try arena.dupeZ(u8, mem.span(line)); + i += 1; } + + assert(i == envp_count); return envp_buf; } +pub fn createNullDelimitedEnvMap(arena: mem.Allocator, env_map: *const EnvMap) Allocator.Error![:null]?[*:0]u8 { + return createEnvironFromMap(arena, env_map, .{}); +} + test createNullDelimitedEnvMap { const allocator = testing.allocator; var envmap = EnvMap.init(allocator); diff --git a/lib/std/process/Child.zig b/lib/std/process/Child.zig index 48ab67800e97..0599763c67e0 100644 --- a/lib/std/process/Child.zig +++ b/lib/std/process/Child.zig @@ -12,6 +12,7 @@ const EnvMap = std.process.EnvMap; const maxInt = std.math.maxInt; const assert = std.debug.assert; const native_os = builtin.os.tag; +const Allocator = std.mem.Allocator; const ChildProcess = @This(); pub const Id = switch (native_os) { @@ -92,6 +93,16 @@ request_resource_usage_statistics: bool = false, /// `spawn`. resource_usage_statistics: ResourceUsageStatistics = .{}, +/// When populated, a pipe will be created for the child process to +/// communicate progress back to the parent. The file descriptor of the +/// write end of the pipe will be specified in the `ZIG_PROGRESS` +/// environment variable inside the child process. The progress reported by +/// the child will be attached to this progress node in the parent process. +/// +/// The child's progress tree will be grafted into the parent's progress tree, +/// by substituting this node with the child's root node. +progress_node: std.Progress.Node = .{ .index = .none }, + pub const ResourceUsageStatistics = struct { rusage: @TypeOf(rusage_init) = rusage_init, @@ -205,9 +216,9 @@ pub fn init(argv: []const []const u8, allocator: mem.Allocator) ChildProcess { .stdin = null, .stdout = null, .stderr = null, - .stdin_behavior = StdIo.Inherit, - .stdout_behavior = StdIo.Inherit, - .stderr_behavior = StdIo.Inherit, + .stdin_behavior = .Inherit, + .stdout_behavior = .Inherit, + .stderr_behavior = .Inherit, .expand_arg0 = .no_expand, }; } @@ -538,22 +549,22 @@ fn spawnPosix(self: *ChildProcess) SpawnError!void { // turns out, we `dup2` everything anyway, so there's no need! const pipe_flags: posix.O = .{ .CLOEXEC = true }; - const stdin_pipe = if (self.stdin_behavior == StdIo.Pipe) try posix.pipe2(pipe_flags) else undefined; - errdefer if (self.stdin_behavior == StdIo.Pipe) { + const stdin_pipe = if (self.stdin_behavior == .Pipe) try posix.pipe2(pipe_flags) else undefined; + errdefer if (self.stdin_behavior == .Pipe) { destroyPipe(stdin_pipe); }; - const stdout_pipe = if (self.stdout_behavior == StdIo.Pipe) try posix.pipe2(pipe_flags) else undefined; - errdefer if (self.stdout_behavior == StdIo.Pipe) { + const stdout_pipe = if (self.stdout_behavior == .Pipe) try posix.pipe2(pipe_flags) else undefined; + errdefer if (self.stdout_behavior == .Pipe) { destroyPipe(stdout_pipe); }; - const stderr_pipe = if (self.stderr_behavior == StdIo.Pipe) try posix.pipe2(pipe_flags) else undefined; - errdefer if (self.stderr_behavior == StdIo.Pipe) { + const stderr_pipe = if (self.stderr_behavior == .Pipe) try posix.pipe2(pipe_flags) else undefined; + errdefer if (self.stderr_behavior == .Pipe) { destroyPipe(stderr_pipe); }; - const any_ignore = (self.stdin_behavior == StdIo.Ignore or self.stdout_behavior == StdIo.Ignore or self.stderr_behavior == StdIo.Ignore); + const any_ignore = (self.stdin_behavior == .Ignore or self.stdout_behavior == .Ignore or self.stderr_behavior == .Ignore); const dev_null_fd = if (any_ignore) posix.openZ("/dev/null", .{ .ACCMODE = .RDWR }, 0) catch |err| switch (err) { error.PathAlreadyExists => unreachable, @@ -572,6 +583,16 @@ fn spawnPosix(self: *ChildProcess) SpawnError!void { if (any_ignore) posix.close(dev_null_fd); } + const prog_pipe: [2]posix.fd_t = p: { + if (self.progress_node.index == .none) { + break :p .{ -1, -1 }; + } else { + // We use CLOEXEC for the same reason as in `pipe_flags`. + break :p try posix.pipe2(.{ .NONBLOCK = true, .CLOEXEC = true }); + } + }; + errdefer destroyPipe(prog_pipe); + var arena_allocator = std.heap.ArenaAllocator.init(self.allocator); defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); @@ -588,16 +609,25 @@ fn spawnPosix(self: *ChildProcess) SpawnError!void { const argv_buf = try arena.allocSentinel(?[*:0]const u8, self.argv.len, null); for (self.argv, 0..) |arg, i| argv_buf[i] = (try arena.dupeZ(u8, arg)).ptr; - const envp = m: { + const prog_fileno = 3; + comptime assert(@max(posix.STDIN_FILENO, posix.STDOUT_FILENO, posix.STDERR_FILENO) + 1 == prog_fileno); + + const envp: [*:null]const ?[*:0]const u8 = m: { + const prog_fd: i32 = if (prog_pipe[1] == -1) -1 else prog_fileno; if (self.env_map) |env_map| { - const envp_buf = try process.createNullDelimitedEnvMap(arena, env_map); - break :m envp_buf.ptr; + break :m (try process.createEnvironFromMap(arena, env_map, .{ + .zig_progress_fd = prog_fd, + })).ptr; } else if (builtin.link_libc) { - break :m std.c.environ; + break :m (try process.createEnvironFromExisting(arena, std.c.environ, .{ + .zig_progress_fd = prog_fd, + })).ptr; } else if (builtin.output_mode == .Exe) { // Then we have Zig start code and this works. // TODO type-safety for null-termination of `os.environ`. - break :m @as([*:null]const ?[*:0]const u8, @ptrCast(std.os.environ.ptr)); + break :m (try process.createEnvironFromExisting(arena, @ptrCast(std.os.environ.ptr), .{ + .zig_progress_fd = prog_fd, + })).ptr; } else { // TODO come up with a solution for this. @compileError("missing std lib enhancement: ChildProcess implementation has no way to collect the environment variables to forward to the child process"); @@ -631,6 +661,10 @@ fn spawnPosix(self: *ChildProcess) SpawnError!void { posix.chdir(cwd) catch |err| forkChildErrReport(err_pipe[1], err); } + // Must happen after fchdir above, the cwd file descriptor might be + // equal to prog_fileno and be clobbered by this dup2 call. + if (prog_pipe[1] != -1) posix.dup2(prog_pipe[1], prog_fileno) catch |err| forkChildErrReport(err_pipe[1], err); + if (self.gid) |gid| { posix.setregid(gid, gid) catch |err| forkChildErrReport(err_pipe[1], err); } @@ -648,18 +682,18 @@ fn spawnPosix(self: *ChildProcess) SpawnError!void { // we are the parent const pid: i32 = @intCast(pid_result); - if (self.stdin_behavior == StdIo.Pipe) { - self.stdin = File{ .handle = stdin_pipe[1] }; + if (self.stdin_behavior == .Pipe) { + self.stdin = .{ .handle = stdin_pipe[1] }; } else { self.stdin = null; } - if (self.stdout_behavior == StdIo.Pipe) { - self.stdout = File{ .handle = stdout_pipe[0] }; + if (self.stdout_behavior == .Pipe) { + self.stdout = .{ .handle = stdout_pipe[0] }; } else { self.stdout = null; } - if (self.stderr_behavior == StdIo.Pipe) { - self.stderr = File{ .handle = stderr_pipe[0] }; + if (self.stderr_behavior == .Pipe) { + self.stderr = .{ .handle = stderr_pipe[0] }; } else { self.stderr = null; } @@ -668,15 +702,20 @@ fn spawnPosix(self: *ChildProcess) SpawnError!void { self.err_pipe = err_pipe; self.term = null; - if (self.stdin_behavior == StdIo.Pipe) { + if (self.stdin_behavior == .Pipe) { posix.close(stdin_pipe[0]); } - if (self.stdout_behavior == StdIo.Pipe) { + if (self.stdout_behavior == .Pipe) { posix.close(stdout_pipe[1]); } - if (self.stderr_behavior == StdIo.Pipe) { + if (self.stderr_behavior == .Pipe) { posix.close(stderr_pipe[1]); } + + if (prog_pipe[1] != -1) { + posix.close(prog_pipe[1]); + } + self.progress_node.setIpcFd(prog_pipe[0]); } fn spawnWindows(self: *ChildProcess) SpawnError!void { @@ -962,7 +1001,7 @@ fn setUpChildIo(stdio: StdIo, pipe_fd: i32, std_fileno: i32, dev_null_fd: i32) ! } fn destroyPipe(pipe: [2]posix.fd_t) void { - posix.close(pipe[0]); + if (pipe[0] != -1) posix.close(pipe[0]); if (pipe[0] != pipe[1]) posix.close(pipe[1]); } diff --git a/lib/std/zig.zig b/lib/std/zig.zig index 03921ba77312..26c06780820f 100644 --- a/lib/std/zig.zig +++ b/lib/std/zig.zig @@ -718,7 +718,7 @@ pub const LazySrcLoc = union(enum) { /// where in semantic analysis the value got set. pub const TracedOffset = struct { x: i32, - trace: std.debug.Trace = .{}, + trace: std.debug.Trace = std.debug.Trace.init, const want_tracing = false; }; diff --git a/lib/std/zig/ErrorBundle.zig b/lib/std/zig/ErrorBundle.zig index 5a6651b24847..2d69ff901dbb 100644 --- a/lib/std/zig/ErrorBundle.zig +++ b/lib/std/zig/ErrorBundle.zig @@ -155,8 +155,8 @@ pub const RenderOptions = struct { }; pub fn renderToStdErr(eb: ErrorBundle, options: RenderOptions) void { - std.debug.getStderrMutex().lock(); - defer std.debug.getStderrMutex().unlock(); + std.debug.lockStdErr(); + defer std.debug.unlockStdErr(); const stderr = std.io.getStdErr(); return renderToWriter(eb, options, stderr.writer()) catch return; } diff --git a/lib/std/zig/Server.zig b/lib/std/zig/Server.zig index 10e14a55fc68..7f8de00b4aeb 100644 --- a/lib/std/zig/Server.zig +++ b/lib/std/zig/Server.zig @@ -14,8 +14,6 @@ pub const Message = struct { zig_version, /// Body is an ErrorBundle. error_bundle, - /// Body is a UTF-8 string. - progress, /// Body is a EmitBinPath. emit_bin_path, /// Body is a TestMetadata diff --git a/src/Compilation.zig b/src/Compilation.zig index 03b981812e35..507cbfc6d55a 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -1273,8 +1273,8 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil if (options.verbose_llvm_cpu_features) { if (options.root_mod.resolved_target.llvm_cpu_features) |cf| print: { const target = options.root_mod.resolved_target.result; - std.debug.getStderrMutex().lock(); - defer std.debug.getStderrMutex().unlock(); + std.debug.lockStdErr(); + defer std.debug.unlockStdErr(); const stderr = std.io.getStdErr().writer(); nosuspend { stderr.print("compilation: {s}\n", .{options.root_name}) catch break :print; @@ -1934,7 +1934,7 @@ pub fn getTarget(self: Compilation) Target { /// Only legal to call when cache mode is incremental and a link file is present. pub fn hotCodeSwap( comp: *Compilation, - prog_node: *std.Progress.Node, + prog_node: std.Progress.Node, pid: std.process.Child.Id, ) !void { const lf = comp.bin_file.?; @@ -1966,7 +1966,7 @@ fn cleanupAfterUpdate(comp: *Compilation) void { } /// Detect changes to source files, perform semantic analysis, and update the output files. -pub fn update(comp: *Compilation, main_progress_node: *std.Progress.Node) !void { +pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { const tracy_trace = trace(@src()); defer tracy_trace.end(); @@ -2256,7 +2256,7 @@ pub fn update(comp: *Compilation, main_progress_node: *std.Progress.Node) !void } } -fn flush(comp: *Compilation, arena: Allocator, prog_node: *std.Progress.Node) !void { +fn flush(comp: *Compilation, arena: Allocator, prog_node: std.Progress.Node) !void { if (comp.bin_file) |lf| { // This is needed before reading the error flags. lf.flush(arena, prog_node) catch |err| switch (err) { @@ -2566,13 +2566,11 @@ pub fn emitLlvmObject( default_emit: Emit, bin_emit_loc: ?EmitLoc, llvm_object: *LlvmObject, - prog_node: *std.Progress.Node, + prog_node: std.Progress.Node, ) !void { if (build_options.only_c) @compileError("unreachable"); - var sub_prog_node = prog_node.start("LLVM Emit Object", 0); - sub_prog_node.activate(); - sub_prog_node.context.refresh(); + const sub_prog_node = prog_node.start("LLVM Emit Object", 0); defer sub_prog_node.end(); try llvm_object.emit(.{ @@ -3249,32 +3247,20 @@ pub fn addZirErrorMessages(eb: *ErrorBundle.Wip, file: *Module.File) !void { pub fn performAllTheWork( comp: *Compilation, - main_progress_node: *std.Progress.Node, + main_progress_node: std.Progress.Node, ) error{ TimerUnsupported, OutOfMemory }!void { // Here we queue up all the AstGen tasks first, followed by C object compilation. // We wait until the AstGen tasks are all completed before proceeding to the // (at least for now) single-threaded main work queue. However, C object compilation // only needs to be finished by the end of this function. - var zir_prog_node = main_progress_node.start("AST Lowering", 0); - defer zir_prog_node.end(); - - var wasm_prog_node = main_progress_node.start("Compile Autodocs", 0); - defer wasm_prog_node.end(); - - var c_obj_prog_node = main_progress_node.start("Compile C Objects", comp.c_source_files.len); - defer c_obj_prog_node.end(); - - var win32_resource_prog_node = main_progress_node.start("Compile Win32 Resources", comp.rc_source_files.len); - defer win32_resource_prog_node.end(); - comp.work_queue_wait_group.reset(); defer comp.work_queue_wait_group.wait(); if (!build_options.only_c and !build_options.only_core_functionality) { if (comp.docs_emit != null) { comp.thread_pool.spawnWg(&comp.work_queue_wait_group, workerDocsCopy, .{comp}); - comp.work_queue_wait_group.spawnManager(workerDocsWasm, .{ comp, &wasm_prog_node }); + comp.work_queue_wait_group.spawnManager(workerDocsWasm, .{ comp, main_progress_node }); } } @@ -3282,6 +3268,9 @@ pub fn performAllTheWork( const astgen_frame = tracy.namedFrame("astgen"); defer astgen_frame.end(); + const zir_prog_node = main_progress_node.start("AST Lowering", 0); + defer zir_prog_node.end(); + comp.astgen_wait_group.reset(); defer comp.astgen_wait_group.wait(); @@ -3313,7 +3302,7 @@ pub fn performAllTheWork( while (comp.astgen_work_queue.readItem()) |file| { comp.thread_pool.spawnWg(&comp.astgen_wait_group, workerAstGenFile, .{ - comp, file, &zir_prog_node, &comp.astgen_wait_group, .root, + comp, file, zir_prog_node, &comp.astgen_wait_group, .root, }); } @@ -3325,14 +3314,14 @@ pub fn performAllTheWork( while (comp.c_object_work_queue.readItem()) |c_object| { comp.thread_pool.spawnWg(&comp.work_queue_wait_group, workerUpdateCObject, .{ - comp, c_object, &c_obj_prog_node, + comp, c_object, main_progress_node, }); } if (!build_options.only_core_functionality) { while (comp.win32_resource_work_queue.readItem()) |win32_resource| { comp.thread_pool.spawnWg(&comp.work_queue_wait_group, workerUpdateWin32Resource, .{ - comp, win32_resource, &win32_resource_prog_node, + comp, win32_resource, main_progress_node, }); } } @@ -3342,11 +3331,13 @@ pub fn performAllTheWork( try reportMultiModuleErrors(mod); try mod.flushRetryableFailures(); mod.sema_prog_node = main_progress_node.start("Semantic Analysis", 0); - mod.sema_prog_node.activate(); + mod.codegen_prog_node = main_progress_node.start("Code Generation", 0); } defer if (comp.module) |mod| { mod.sema_prog_node.end(); mod.sema_prog_node = undefined; + mod.codegen_prog_node.end(); + mod.codegen_prog_node = undefined; }; while (true) { @@ -3379,7 +3370,7 @@ pub fn performAllTheWork( } } -fn processOneJob(comp: *Compilation, job: Job, prog_node: *std.Progress.Node) !void { +fn processOneJob(comp: *Compilation, job: Job, prog_node: std.Progress.Node) !void { switch (job) { .codegen_decl => |decl_index| { const module = comp.module.?; @@ -3803,7 +3794,10 @@ fn docsCopyModule(comp: *Compilation, module: *Package.Module, name: []const u8, } } -fn workerDocsWasm(comp: *Compilation, prog_node: *std.Progress.Node) void { +fn workerDocsWasm(comp: *Compilation, parent_prog_node: std.Progress.Node) void { + const prog_node = parent_prog_node.start("Compile Autodocs", 0); + defer prog_node.end(); + workerDocsWasmFallible(comp, prog_node) catch |err| { comp.lockAndSetMiscFailure(.docs_wasm, "unable to build autodocs: {s}", .{ @errorName(err), @@ -3811,7 +3805,7 @@ fn workerDocsWasm(comp: *Compilation, prog_node: *std.Progress.Node) void { }; } -fn workerDocsWasmFallible(comp: *Compilation, prog_node: *std.Progress.Node) anyerror!void { +fn workerDocsWasmFallible(comp: *Compilation, prog_node: std.Progress.Node) anyerror!void { const gpa = comp.gpa; var arena_allocator = std.heap.ArenaAllocator.init(gpa); @@ -3952,12 +3946,11 @@ const AstGenSrc = union(enum) { fn workerAstGenFile( comp: *Compilation, file: *Module.File, - prog_node: *std.Progress.Node, + prog_node: std.Progress.Node, wg: *WaitGroup, src: AstGenSrc, ) void { - var child_prog_node = prog_node.start(file.sub_file_path, 0); - child_prog_node.activate(); + const child_prog_node = prog_node.start(file.sub_file_path, 0); defer child_prog_node.end(); const mod = comp.module.?; @@ -4265,7 +4258,7 @@ pub fn cImport(comp: *Compilation, c_src: []const u8, owner_mod: *Package.Module fn workerUpdateCObject( comp: *Compilation, c_object: *CObject, - progress_node: *std.Progress.Node, + progress_node: std.Progress.Node, ) void { comp.updateCObject(c_object, progress_node) catch |err| switch (err) { error.AnalysisFail => return, @@ -4282,7 +4275,7 @@ fn workerUpdateCObject( fn workerUpdateWin32Resource( comp: *Compilation, win32_resource: *Win32Resource, - progress_node: *std.Progress.Node, + progress_node: std.Progress.Node, ) void { comp.updateWin32Resource(win32_resource, progress_node) catch |err| switch (err) { error.AnalysisFail => return, @@ -4300,7 +4293,7 @@ fn buildCompilerRtOneShot( comp: *Compilation, output_mode: std.builtin.OutputMode, out: *?CRTFile, - prog_node: *std.Progress.Node, + prog_node: std.Progress.Node, ) void { comp.buildOutputFromZig( "compiler_rt.zig", @@ -4427,7 +4420,7 @@ fn reportRetryableEmbedFileError( } } -fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.Progress.Node) !void { +fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Progress.Node) !void { if (comp.config.c_frontend == .aro) { return comp.failCObj(c_object, "aro does not support compiling C objects yet", .{}); } @@ -4467,9 +4460,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P const c_source_basename = std.fs.path.basename(c_object.src.src_path); - c_obj_prog_node.activate(); - var child_progress_node = c_obj_prog_node.start(c_source_basename, 0); - child_progress_node.activate(); + const child_progress_node = c_obj_prog_node.start(c_source_basename, 0); defer child_progress_node.end(); // Special case when doing build-obj for just one C file. When there are more than one object @@ -4731,7 +4722,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P }; } -fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32_resource_prog_node: *std.Progress.Node) !void { +fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32_resource_prog_node: std.Progress.Node) !void { if (!std.process.can_spawn) { return comp.failWin32Resource(win32_resource, "{s} does not support spawning a child process", .{@tagName(builtin.os.tag)}); } @@ -4763,9 +4754,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32 _ = comp.failed_win32_resources.swapRemove(win32_resource); } - win32_resource_prog_node.activate(); - var child_progress_node = win32_resource_prog_node.start(src_basename, 0); - child_progress_node.activate(); + const child_progress_node = win32_resource_prog_node.start(src_basename, 0); defer child_progress_node.end(); var man = comp.obtainWin32ResourceCacheManifest(); @@ -4833,7 +4822,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32 }); try argv.appendSlice(&.{ "--", in_rc_path, out_res_path }); - try spawnZigRc(comp, win32_resource, src_basename, arena, argv.items, &child_progress_node); + try spawnZigRc(comp, win32_resource, arena, argv.items, child_progress_node); break :blk digest; }; @@ -4901,7 +4890,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32 try argv.appendSlice(rc_src.extra_flags); try argv.appendSlice(&.{ "--", rc_src.src_path, out_res_path }); - try spawnZigRc(comp, win32_resource, src_basename, arena, argv.items, &child_progress_node); + try spawnZigRc(comp, win32_resource, arena, argv.items, child_progress_node); // Read depfile and update cache manifest { @@ -4966,10 +4955,9 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32 fn spawnZigRc( comp: *Compilation, win32_resource: *Win32Resource, - src_basename: []const u8, arena: Allocator, argv: []const []const u8, - child_progress_node: *std.Progress.Node, + child_progress_node: std.Progress.Node, ) !void { var node_name: std.ArrayListUnmanaged(u8) = .{}; defer node_name.deinit(arena); @@ -4978,6 +4966,7 @@ fn spawnZigRc( child.stdin_behavior = .Ignore; child.stdout_behavior = .Pipe; child.stderr_behavior = .Pipe; + child.progress_node = child_progress_node; child.spawn() catch |err| { return comp.failWin32Resource(win32_resource, "unable to spawn {s} rc: {s}", .{ argv[0], @errorName(err) }); @@ -5019,22 +5008,6 @@ fn spawnZigRc( }; return comp.failWin32ResourceWithOwnedBundle(win32_resource, error_bundle); }, - .progress => { - node_name.clearRetainingCapacity(); - // is a special string that indicates that the child - // process has reached resinator's main function - if (std.mem.eql(u8, body, "")) { - child_progress_node.setName(src_basename); - } - // Ignore 0-length strings since if multiple zig rc commands - // are executed at the same time, only one will send progress strings - // while the other(s) will send empty strings. - else if (body.len > 0) { - try node_name.appendSlice(arena, "build 'zig rc'... "); - try node_name.appendSlice(arena, body); - child_progress_node.setName(node_name.items); - } - }, else => {}, // ignore other messages } @@ -5937,8 +5910,8 @@ pub fn lockAndParseLldStderr(comp: *Compilation, prefix: []const u8, stderr: []c } pub fn dump_argv(argv: []const []const u8) void { - std.debug.getStderrMutex().lock(); - defer std.debug.getStderrMutex().unlock(); + std.debug.lockStdErr(); + defer std.debug.unlockStdErr(); const stderr = std.io.getStdErr().writer(); for (argv[0 .. argv.len - 1]) |arg| { nosuspend stderr.print("{s} ", .{arg}) catch return; @@ -5989,14 +5962,13 @@ pub fn updateSubCompilation( parent_comp: *Compilation, sub_comp: *Compilation, misc_task: MiscTask, - prog_node: *std.Progress.Node, + prog_node: std.Progress.Node, ) !void { { - var sub_node = prog_node.start(@tagName(misc_task), 0); - sub_node.activate(); + const sub_node = prog_node.start(@tagName(misc_task), 0); defer sub_node.end(); - try sub_comp.update(prog_node); + try sub_comp.update(sub_node); } // Look for compilation errors in this sub compilation @@ -6024,7 +5996,7 @@ fn buildOutputFromZig( output_mode: std.builtin.OutputMode, out: *?CRTFile, misc_task_tag: MiscTask, - prog_node: *std.Progress.Node, + prog_node: std.Progress.Node, ) !void { const tracy_trace = trace(@src()); defer tracy_trace.end(); @@ -6131,7 +6103,7 @@ pub fn build_crt_file( root_name: []const u8, output_mode: std.builtin.OutputMode, misc_task_tag: MiscTask, - prog_node: *std.Progress.Node, + prog_node: std.Progress.Node, /// These elements have to get mutated to add the owner module after it is /// created within this function. c_source_files: []CSourceFile, diff --git a/src/Module.zig b/src/Module.zig index c571c851fe57..ef410fad4e33 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -66,6 +66,7 @@ root_mod: *Package.Module, main_mod: *Package.Module, std_mod: *Package.Module, sema_prog_node: std.Progress.Node = undefined, +codegen_prog_node: std.Progress.Node = undefined, /// Used by AstGen worker to load and store ZIR cache. global_zir_cache: Compilation.Directory, @@ -2942,11 +2943,12 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { const tracy = trace(@src()); defer tracy.end(); + const ip = &mod.intern_pool; const decl = mod.declPtr(decl_index); log.debug("ensureDeclAnalyzed '{d}' (name '{}')", .{ @intFromEnum(decl_index), - decl.name.fmt(&mod.intern_pool), + decl.name.fmt(ip), }); // Determine whether or not this Decl is outdated, i.e. requires re-analysis @@ -2991,10 +2993,6 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { try mod.deleteDeclExports(decl_index); } - var decl_prog_node = mod.sema_prog_node.start("", 0); - decl_prog_node.activate(); - defer decl_prog_node.end(); - const sema_result: SemaDeclResult = blk: { if (decl.zir_decl_index == .none and !mod.declIsRoot(decl_index)) { // Anonymous decl. We don't semantically analyze these. @@ -3012,6 +3010,9 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { }; } + const decl_prog_node = mod.sema_prog_node.start((try decl.fullyQualifiedName(mod)).toSlice(ip), 0); + defer decl_prog_node.end(); + break :blk mod.semaDecl(decl_index) catch |err| switch (err) { error.AnalysisFail => { if (decl.analysis == .in_progress) { @@ -3215,6 +3216,9 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In }; } + const codegen_prog_node = zcu.codegen_prog_node.start((try decl.fullyQualifiedName(zcu)).toSlice(ip), 0); + defer codegen_prog_node.end(); + if (comp.bin_file) |lf| { lf.updateFunc(zcu, func_index, air, liveness) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, @@ -4500,6 +4504,9 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato log.debug("finish func name '{}'", .{(decl.fullyQualifiedName(mod) catch break :blk).fmt(ip)}); } + const decl_prog_node = mod.sema_prog_node.start((try decl.fullyQualifiedName(mod)).toSlice(ip), 0); + defer decl_prog_node.end(); + mod.intern_pool.removeDependenciesForDepender(gpa, InternPool.Depender.wrap(.{ .func = func_index })); var comptime_err_ret_trace = std.ArrayList(SrcLoc).init(gpa); @@ -5316,7 +5323,7 @@ fn handleUpdateExports( pub fn populateTestFunctions( mod: *Module, - main_progress_node: *std.Progress.Node, + main_progress_node: std.Progress.Node, ) !void { const gpa = mod.gpa; const ip = &mod.intern_pool; @@ -5333,13 +5340,13 @@ pub fn populateTestFunctions( // We have to call `ensureDeclAnalyzed` here in case `builtin.test_functions` // was not referenced by start code. mod.sema_prog_node = main_progress_node.start("Semantic Analysis", 0); - mod.sema_prog_node.activate(); defer { mod.sema_prog_node.end(); mod.sema_prog_node = undefined; } try mod.ensureDeclAnalyzed(decl_index); } + const decl = mod.declPtr(decl_index); const test_fn_ty = decl.typeOf(mod).slicePtrFieldType(mod).childType(mod); @@ -5440,21 +5447,32 @@ pub fn populateTestFunctions( decl.val = new_val; decl.has_tv = true; } - try mod.linkerUpdateDecl(decl_index); + { + mod.codegen_prog_node = main_progress_node.start("Code Generation", 0); + defer { + mod.codegen_prog_node.end(); + mod.codegen_prog_node = undefined; + } + + try mod.linkerUpdateDecl(decl_index); + } } pub fn linkerUpdateDecl(zcu: *Zcu, decl_index: Decl.Index) !void { const comp = zcu.comp; + const decl = zcu.declPtr(decl_index); + + const codegen_prog_node = zcu.codegen_prog_node.start((try decl.fullyQualifiedName(zcu)).toSlice(&zcu.intern_pool), 0); + defer codegen_prog_node.end(); + if (comp.bin_file) |lf| { lf.updateDecl(zcu, decl_index) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => { - const decl = zcu.declPtr(decl_index); decl.analysis = .codegen_failure; }, else => { - const decl = zcu.declPtr(decl_index); const gpa = zcu.gpa; try zcu.failed_decls.ensureUnusedCapacity(gpa, 1); zcu.failed_decls.putAssumeCapacityNoClobber(decl_index, try ErrorMsg.create( @@ -5472,7 +5490,6 @@ pub fn linkerUpdateDecl(zcu: *Zcu, decl_index: Decl.Index) !void { llvm_object.updateDecl(zcu, decl_index) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => { - const decl = zcu.declPtr(decl_index); decl.analysis = .codegen_failure; }, }; diff --git a/src/Package/Fetch.zig b/src/Package/Fetch.zig index 97cbd36bd0b5..6528c2a53ffa 100644 --- a/src/Package/Fetch.zig +++ b/src/Package/Fetch.zig @@ -35,7 +35,7 @@ name_tok: std.zig.Ast.TokenIndex, lazy_status: LazyStatus, parent_package_root: Cache.Path, parent_manifest_ast: ?*const std.zig.Ast, -prog_node: *std.Progress.Node, +prog_node: std.Progress.Node, job_queue: *JobQueue, /// If true, don't add an error for a missing hash. This flag is not passed /// down to recursive dependencies. It's intended to be used only be the CLI. @@ -720,8 +720,7 @@ fn queueJobsForDeps(f: *Fetch) RunError!void { }; } - // job_queue mutex is locked so this is OK. - f.prog_node.unprotected_estimated_total_items += new_fetch_index; + f.prog_node.increaseEstimatedTotalItems(new_fetch_index); break :nf .{ new_fetches[0..new_fetch_index], prog_names[0..new_fetch_index] }; }; @@ -751,9 +750,8 @@ pub fn relativePathDigest( } pub fn workerRun(f: *Fetch, prog_name: []const u8) void { - var prog_node = f.prog_node.start(prog_name, 0); + const prog_node = f.prog_node.start(prog_name, 0); defer prog_node.end(); - prog_node.activate(); run(f) catch |err| switch (err) { error.OutOfMemory => f.oom_flag = true, @@ -1311,9 +1309,8 @@ fn unpackGitPack(f: *Fetch, out_dir: fs.Dir, resource: *Resource) anyerror!Unpac var index_file = try pack_dir.createFile("pkg.idx", .{ .read = true }); defer index_file.close(); { - var index_prog_node = f.prog_node.start("Index pack", 0); + const index_prog_node = f.prog_node.start("Index pack", 0); defer index_prog_node.end(); - index_prog_node.activate(); var index_buffered_writer = std.io.bufferedWriter(index_file.writer()); try git.indexPack(gpa, pack_file, index_buffered_writer.writer()); try index_buffered_writer.flush(); @@ -1321,9 +1318,8 @@ fn unpackGitPack(f: *Fetch, out_dir: fs.Dir, resource: *Resource) anyerror!Unpac } { - var checkout_prog_node = f.prog_node.start("Checkout", 0); + const checkout_prog_node = f.prog_node.start("Checkout", 0); defer checkout_prog_node.end(); - checkout_prog_node.activate(); var repository = try git.Repository.init(gpa, pack_file, index_file); defer repository.deinit(); var diagnostics: git.Diagnostics = .{ .allocator = arena }; diff --git a/src/glibc.zig b/src/glibc.zig index 5ec0442d6a8e..6474a23dce02 100644 --- a/src/glibc.zig +++ b/src/glibc.zig @@ -160,7 +160,7 @@ pub const CRTFile = enum { libc_nonshared_a, }; -pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile, prog_node: *std.Progress.Node) !void { +pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile, prog_node: std.Progress.Node) !void { if (!build_options.have_llvm) { return error.ZigCompilerNotBuiltWithLLVMExtensions; } @@ -658,7 +658,7 @@ pub const BuiltSharedObjects = struct { const all_map_basename = "all.map"; -pub fn buildSharedObjects(comp: *Compilation, prog_node: *std.Progress.Node) !void { +pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) !void { const tracy = trace(@src()); defer tracy.end(); @@ -1065,7 +1065,7 @@ fn buildSharedLib( bin_directory: Compilation.Directory, asm_file_basename: []const u8, lib: Lib, - prog_node: *std.Progress.Node, + prog_node: std.Progress.Node, ) !void { const tracy = trace(@src()); defer tracy.end(); diff --git a/src/libcxx.zig b/src/libcxx.zig index b1b2014cb576..1c48f775271b 100644 --- a/src/libcxx.zig +++ b/src/libcxx.zig @@ -113,7 +113,7 @@ pub const BuildError = error{ ZigCompilerNotBuiltWithLLVMExtensions, }; -pub fn buildLibCXX(comp: *Compilation, prog_node: *std.Progress.Node) BuildError!void { +pub fn buildLibCXX(comp: *Compilation, prog_node: std.Progress.Node) BuildError!void { if (!build_options.have_llvm) { return error.ZigCompilerNotBuiltWithLLVMExtensions; } @@ -357,7 +357,7 @@ pub fn buildLibCXX(comp: *Compilation, prog_node: *std.Progress.Node) BuildError comp.libcxx_static_lib = try sub_compilation.toCrtFile(); } -pub fn buildLibCXXABI(comp: *Compilation, prog_node: *std.Progress.Node) BuildError!void { +pub fn buildLibCXXABI(comp: *Compilation, prog_node: std.Progress.Node) BuildError!void { if (!build_options.have_llvm) { return error.ZigCompilerNotBuiltWithLLVMExtensions; } diff --git a/src/libtsan.zig b/src/libtsan.zig index 28dba65772fd..1aa32e6ff07a 100644 --- a/src/libtsan.zig +++ b/src/libtsan.zig @@ -13,7 +13,7 @@ pub const BuildError = error{ TSANUnsupportedCPUArchitecture, }; -pub fn buildTsan(comp: *Compilation, prog_node: *std.Progress.Node) BuildError!void { +pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!void { if (!build_options.have_llvm) { return error.ZigCompilerNotBuiltWithLLVMExtensions; } diff --git a/src/libunwind.zig b/src/libunwind.zig index 808ea298ab9c..77838768272b 100644 --- a/src/libunwind.zig +++ b/src/libunwind.zig @@ -14,7 +14,7 @@ pub const BuildError = error{ ZigCompilerNotBuiltWithLLVMExtensions, }; -pub fn buildStaticLib(comp: *Compilation, prog_node: *std.Progress.Node) BuildError!void { +pub fn buildStaticLib(comp: *Compilation, prog_node: std.Progress.Node) BuildError!void { if (!build_options.have_llvm) { return error.ZigCompilerNotBuiltWithLLVMExtensions; } diff --git a/src/link.zig b/src/link.zig index c05b545a892e..ef09639dcffd 100644 --- a/src/link.zig +++ b/src/link.zig @@ -535,7 +535,7 @@ pub const File = struct { /// Commit pending changes and write headers. Takes into account final output mode /// and `use_lld`, not only `effectiveOutputMode`. /// `arena` has the lifetime of the call to `Compilation.update`. - pub fn flush(base: *File, arena: Allocator, prog_node: *std.Progress.Node) FlushError!void { + pub fn flush(base: *File, arena: Allocator, prog_node: std.Progress.Node) FlushError!void { if (build_options.only_c) { assert(base.tag == .c); return @as(*C, @fieldParentPtr("base", base)).flush(arena, prog_node); @@ -572,7 +572,7 @@ pub const File = struct { /// Commit pending changes and write headers. Works based on `effectiveOutputMode` /// rather than final output mode. - pub fn flushModule(base: *File, arena: Allocator, prog_node: *std.Progress.Node) FlushError!void { + pub fn flushModule(base: *File, arena: Allocator, prog_node: std.Progress.Node) FlushError!void { switch (base.tag) { inline else => |tag| { if (tag != .c and build_options.only_c) unreachable; @@ -688,7 +688,7 @@ pub const File = struct { } } - pub fn linkAsArchive(base: *File, arena: Allocator, prog_node: *std.Progress.Node) FlushError!void { + pub fn linkAsArchive(base: *File, arena: Allocator, prog_node: std.Progress.Node) FlushError!void { const tracy = trace(@src()); defer tracy.end(); @@ -966,7 +966,7 @@ pub const File = struct { base: File, arena: Allocator, llvm_object: *LlvmObject, - prog_node: *std.Progress.Node, + prog_node: std.Progress.Node, ) !void { return base.comp.emitLlvmObject(arena, base.emit, .{ .directory = null, diff --git a/src/link/C.zig b/src/link/C.zig index 07814c9e7101..305af4015639 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -370,7 +370,7 @@ pub fn updateDeclLineNumber(self: *C, zcu: *Zcu, decl_index: InternPool.DeclInde _ = decl_index; } -pub fn flush(self: *C, arena: Allocator, prog_node: *std.Progress.Node) !void { +pub fn flush(self: *C, arena: Allocator, prog_node: std.Progress.Node) !void { return self.flushModule(arena, prog_node); } @@ -389,14 +389,13 @@ fn abiDefines(self: *C, target: std.Target) !std.ArrayList(u8) { return defines; } -pub fn flushModule(self: *C, arena: Allocator, prog_node: *std.Progress.Node) !void { +pub fn flushModule(self: *C, arena: Allocator, prog_node: std.Progress.Node) !void { _ = arena; // Has the same lifetime as the call to Compilation.update. const tracy = trace(@src()); defer tracy.end(); - var sub_prog_node = prog_node.start("Flush Module", 0); - sub_prog_node.activate(); + const sub_prog_node = prog_node.start("Flush Module", 0); defer sub_prog_node.end(); const comp = self.base.comp; diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 31cfe1ca9d20..d24d69d913bd 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1702,7 +1702,7 @@ fn resolveGlobalSymbol(self: *Coff, current: SymbolWithLoc) !void { gop.value_ptr.* = current; } -pub fn flush(self: *Coff, arena: Allocator, prog_node: *std.Progress.Node) link.File.FlushError!void { +pub fn flush(self: *Coff, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { const comp = self.base.comp; const use_lld = build_options.have_llvm and comp.config.use_lld; if (use_lld) { @@ -1714,7 +1714,7 @@ pub fn flush(self: *Coff, arena: Allocator, prog_node: *std.Progress.Node) link. } } -pub fn flushModule(self: *Coff, arena: Allocator, prog_node: *std.Progress.Node) link.File.FlushError!void { +pub fn flushModule(self: *Coff, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1726,8 +1726,7 @@ pub fn flushModule(self: *Coff, arena: Allocator, prog_node: *std.Progress.Node) return; } - var sub_prog_node = prog_node.start("COFF Flush", 0); - sub_prog_node.activate(); + const sub_prog_node = prog_node.start("COFF Flush", 0); defer sub_prog_node.end(); const module = comp.module orelse return error.LinkingWithoutZigSourceUnimplemented; diff --git a/src/link/Coff/lld.zig b/src/link/Coff/lld.zig index 47753cbf0135..c2620c1fe930 100644 --- a/src/link/Coff/lld.zig +++ b/src/link/Coff/lld.zig @@ -16,7 +16,7 @@ const Allocator = mem.Allocator; const Coff = @import("../Coff.zig"); const Compilation = @import("../../Compilation.zig"); -pub fn linkWithLLD(self: *Coff, arena: Allocator, prog_node: *std.Progress.Node) !void { +pub fn linkWithLLD(self: *Coff, arena: Allocator, prog_node: std.Progress.Node) !void { const tracy = trace(@src()); defer tracy.end(); @@ -38,9 +38,7 @@ pub fn linkWithLLD(self: *Coff, arena: Allocator, prog_node: *std.Progress.Node) } } else null; - var sub_prog_node = prog_node.start("LLD Link", 0); - sub_prog_node.activate(); - sub_prog_node.context.refresh(); + const sub_prog_node = prog_node.start("LLD Link", 0); defer sub_prog_node.end(); const is_lib = comp.config.output_mode == .Lib; diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 8a3192f93ea3..eb27b4449ed8 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -1064,7 +1064,7 @@ pub fn markDirty(self: *Elf, shdr_index: u32) void { } } -pub fn flush(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node) link.File.FlushError!void { +pub fn flush(self: *Elf, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { const use_lld = build_options.have_llvm and self.base.comp.config.use_lld; if (use_lld) { return self.linkWithLLD(arena, prog_node); @@ -1072,7 +1072,7 @@ pub fn flush(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node) link.F try self.flushModule(arena, prog_node); } -pub fn flushModule(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node) link.File.FlushError!void { +pub fn flushModule(self: *Elf, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1085,8 +1085,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node) if (use_lld) return; } - var sub_prog_node = prog_node.start("ELF Flush", 0); - sub_prog_node.activate(); + const sub_prog_node = prog_node.start("ELF Flush", 0); defer sub_prog_node.end(); const target = comp.root_mod.resolved_target.result; @@ -2147,7 +2146,7 @@ fn scanRelocs(self: *Elf) !void { } } -fn linkWithLLD(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node) !void { +fn linkWithLLD(self: *Elf, arena: Allocator, prog_node: std.Progress.Node) !void { const tracy = trace(@src()); defer tracy.end(); @@ -2169,9 +2168,7 @@ fn linkWithLLD(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node) !voi } } else null; - var sub_prog_node = prog_node.start("LLD Link", 0); - sub_prog_node.activate(); - sub_prog_node.context.refresh(); + const sub_prog_node = prog_node.start("LLD Link", 0); defer sub_prog_node.end(); const output_mode = comp.config.output_mode; diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 7bf195f5f9cb..947a2665de6f 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -360,11 +360,11 @@ pub fn deinit(self: *MachO) void { self.unwind_records.deinit(gpa); } -pub fn flush(self: *MachO, arena: Allocator, prog_node: *std.Progress.Node) link.File.FlushError!void { +pub fn flush(self: *MachO, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { try self.flushModule(arena, prog_node); } -pub fn flushModule(self: *MachO, arena: Allocator, prog_node: *std.Progress.Node) link.File.FlushError!void { +pub fn flushModule(self: *MachO, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { const tracy = trace(@src()); defer tracy.end(); @@ -375,8 +375,7 @@ pub fn flushModule(self: *MachO, arena: Allocator, prog_node: *std.Progress.Node try self.base.emitLlvmObject(arena, llvm_object, prog_node); } - var sub_prog_node = prog_node.start("MachO Flush", 0); - sub_prog_node.activate(); + const sub_prog_node = prog_node.start("MachO Flush", 0); defer sub_prog_node.end(); const directory = self.base.emit.directory; diff --git a/src/link/NvPtx.zig b/src/link/NvPtx.zig index 111b59fc3ba5..8e1ebc9726ee 100644 --- a/src/link/NvPtx.zig +++ b/src/link/NvPtx.zig @@ -106,11 +106,11 @@ pub fn freeDecl(self: *NvPtx, decl_index: InternPool.DeclIndex) void { return self.llvm_object.freeDecl(decl_index); } -pub fn flush(self: *NvPtx, arena: Allocator, prog_node: *std.Progress.Node) link.File.FlushError!void { +pub fn flush(self: *NvPtx, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { return self.flushModule(arena, prog_node); } -pub fn flushModule(self: *NvPtx, arena: Allocator, prog_node: *std.Progress.Node) link.File.FlushError!void { +pub fn flushModule(self: *NvPtx, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { if (build_options.skip_non_native) @panic("Attempted to compile for architecture that was disabled by build configuration"); diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index a45142a12ef4..328d669b585d 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -604,7 +604,7 @@ fn allocateGotIndex(self: *Plan9) usize { } } -pub fn flush(self: *Plan9, arena: Allocator, prog_node: *std.Progress.Node) link.File.FlushError!void { +pub fn flush(self: *Plan9, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { const comp = self.base.comp; const use_lld = build_options.have_llvm and comp.config.use_lld; assert(!use_lld); @@ -663,7 +663,7 @@ fn atomCount(self: *Plan9) usize { return data_decl_count + fn_decl_count + unnamed_const_count + lazy_atom_count + extern_atom_count + anon_atom_count; } -pub fn flushModule(self: *Plan9, arena: Allocator, prog_node: *std.Progress.Node) link.File.FlushError!void { +pub fn flushModule(self: *Plan9, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { if (build_options.skip_non_native and builtin.object_format != .plan9) { @panic("Attempted to compile for object format that was disabled by build configuration"); } @@ -677,8 +677,7 @@ pub fn flushModule(self: *Plan9, arena: Allocator, prog_node: *std.Progress.Node const tracy = trace(@src()); defer tracy.end(); - var sub_prog_node = prog_node.start("Flush Module", 0); - sub_prog_node.activate(); + const sub_prog_node = prog_node.start("Flush Module", 0); defer sub_prog_node.end(); log.debug("flushModule", .{}); diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index 27c905cc615e..0cc238f140af 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -193,11 +193,11 @@ pub fn freeDecl(self: *SpirV, decl_index: InternPool.DeclIndex) void { _ = decl_index; } -pub fn flush(self: *SpirV, arena: Allocator, prog_node: *std.Progress.Node) link.File.FlushError!void { +pub fn flush(self: *SpirV, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { return self.flushModule(arena, prog_node); } -pub fn flushModule(self: *SpirV, arena: Allocator, prog_node: *std.Progress.Node) link.File.FlushError!void { +pub fn flushModule(self: *SpirV, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { if (build_options.skip_non_native) { @panic("Attempted to compile for architecture that was disabled by build configuration"); } @@ -205,8 +205,7 @@ pub fn flushModule(self: *SpirV, arena: Allocator, prog_node: *std.Progress.Node const tracy = trace(@src()); defer tracy.end(); - var sub_prog_node = prog_node.start("Flush Module", 0); - sub_prog_node.activate(); + const sub_prog_node = prog_node.start("Flush Module", 0); defer sub_prog_node.end(); const spv = &self.object.spv; @@ -253,7 +252,7 @@ pub fn flushModule(self: *SpirV, arena: Allocator, prog_node: *std.Progress.Node const module = try spv.finalize(arena, target); errdefer arena.free(module); - const linked_module = self.linkModule(arena, module, &sub_prog_node) catch |err| switch (err) { + const linked_module = self.linkModule(arena, module, sub_prog_node) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, else => |other| { log.err("error while linking: {s}\n", .{@errorName(other)}); @@ -264,7 +263,7 @@ pub fn flushModule(self: *SpirV, arena: Allocator, prog_node: *std.Progress.Node try self.base.file.?.writeAll(std.mem.sliceAsBytes(linked_module)); } -fn linkModule(self: *SpirV, a: Allocator, module: []Word, progress: *std.Progress.Node) ![]Word { +fn linkModule(self: *SpirV, a: Allocator, module: []Word, progress: std.Progress.Node) ![]Word { _ = self; const lower_invocation_globals = @import("SpirV/lower_invocation_globals.zig"); diff --git a/src/link/SpirV/deduplicate.zig b/src/link/SpirV/deduplicate.zig index 4cf5ebf65ac0..292ff0e86821 100644 --- a/src/link/SpirV/deduplicate.zig +++ b/src/link/SpirV/deduplicate.zig @@ -418,9 +418,8 @@ const EntityHashContext = struct { } }; -pub fn run(parser: *BinaryModule.Parser, binary: *BinaryModule, progress: *std.Progress.Node) !void { - var sub_node = progress.start("deduplicate", 0); - sub_node.activate(); +pub fn run(parser: *BinaryModule.Parser, binary: *BinaryModule, progress: std.Progress.Node) !void { + const sub_node = progress.start("deduplicate", 0); defer sub_node.end(); var arena = std.heap.ArenaAllocator.init(parser.a); diff --git a/src/link/SpirV/lower_invocation_globals.zig b/src/link/SpirV/lower_invocation_globals.zig index ee992112c8ee..edf16a7cd80f 100644 --- a/src/link/SpirV/lower_invocation_globals.zig +++ b/src/link/SpirV/lower_invocation_globals.zig @@ -682,9 +682,8 @@ const ModuleBuilder = struct { } }; -pub fn run(parser: *BinaryModule.Parser, binary: *BinaryModule, progress: *std.Progress.Node) !void { - var sub_node = progress.start("Lower invocation globals", 6); - sub_node.activate(); +pub fn run(parser: *BinaryModule.Parser, binary: *BinaryModule, progress: std.Progress.Node) !void { + const sub_node = progress.start("Lower invocation globals", 6); defer sub_node.end(); var arena = std.heap.ArenaAllocator.init(parser.a); diff --git a/src/link/SpirV/prune_unused.zig b/src/link/SpirV/prune_unused.zig index cefdaddd93c6..a604d62349f6 100644 --- a/src/link/SpirV/prune_unused.zig +++ b/src/link/SpirV/prune_unused.zig @@ -255,9 +255,8 @@ fn removeIdsFromMap(a: Allocator, map: anytype, info: ModuleInfo, alive_marker: } } -pub fn run(parser: *BinaryModule.Parser, binary: *BinaryModule, progress: *std.Progress.Node) !void { - var sub_node = progress.start("Prune unused IDs", 0); - sub_node.activate(); +pub fn run(parser: *BinaryModule.Parser, binary: *BinaryModule, progress: std.Progress.Node) !void { + const sub_node = progress.start("Prune unused IDs", 0); defer sub_node.end(); var arena = std.heap.ArenaAllocator.init(parser.a); diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index bf345813df2d..da6425326b76 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -2464,7 +2464,7 @@ fn appendDummySegment(wasm: *Wasm) !void { }); } -pub fn flush(wasm: *Wasm, arena: Allocator, prog_node: *std.Progress.Node) link.File.FlushError!void { +pub fn flush(wasm: *Wasm, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { const comp = wasm.base.comp; const use_lld = build_options.have_llvm and comp.config.use_lld; @@ -2475,7 +2475,7 @@ pub fn flush(wasm: *Wasm, arena: Allocator, prog_node: *std.Progress.Node) link. } /// Uses the in-house linker to link one or multiple object -and archive files into a WebAssembly binary. -pub fn flushModule(wasm: *Wasm, arena: Allocator, prog_node: *std.Progress.Node) link.File.FlushError!void { +pub fn flushModule(wasm: *Wasm, arena: Allocator, prog_node: std.Progress.Node) link.File.FlushError!void { const tracy = trace(@src()); defer tracy.end(); @@ -2486,8 +2486,7 @@ pub fn flushModule(wasm: *Wasm, arena: Allocator, prog_node: *std.Progress.Node) if (use_lld) return; } - var sub_prog_node = prog_node.start("Wasm Flush", 0); - sub_prog_node.activate(); + const sub_prog_node = prog_node.start("Wasm Flush", 0); defer sub_prog_node.end(); const directory = wasm.base.emit.directory; // Just an alias to make it shorter to type. @@ -3323,7 +3322,7 @@ fn emitImport(wasm: *Wasm, writer: anytype, import: types.Import) !void { } } -fn linkWithLLD(wasm: *Wasm, arena: Allocator, prog_node: *std.Progress.Node) !void { +fn linkWithLLD(wasm: *Wasm, arena: Allocator, prog_node: std.Progress.Node) !void { const tracy = trace(@src()); defer tracy.end(); @@ -3350,9 +3349,7 @@ fn linkWithLLD(wasm: *Wasm, arena: Allocator, prog_node: *std.Progress.Node) !vo } } else null; - var sub_prog_node = prog_node.start("LLD Link", 0); - sub_prog_node.activate(); - sub_prog_node.context.refresh(); + const sub_prog_node = prog_node.start("LLD Link", 0); defer sub_prog_node.end(); const is_obj = comp.config.output_mode == .Obj; diff --git a/src/main.zig b/src/main.zig index 099ceb27f90c..22349dd36a27 100644 --- a/src/main.zig +++ b/src/main.zig @@ -3404,11 +3404,16 @@ fn buildOutputType( }, } + const root_prog_node = std.Progress.start(.{ + .disable_printing = (color == .off), + }); + defer root_prog_node.end(); + if (arg_mode == .translate_c) { - return cmdTranslateC(comp, arena, null); + return cmdTranslateC(comp, arena, null, root_prog_node); } - updateModule(comp, color) catch |err| switch (err) { + updateModule(comp, color, root_prog_node) catch |err| switch (err) { error.SemanticAnalyzeFail => { assert(listen == .none); saveState(comp, debug_incremental); @@ -4028,22 +4033,7 @@ fn serve( var child_pid: ?std.process.Child.Id = null; - var progress: std.Progress = .{ - .terminal = null, - .root = .{ - .context = undefined, - .parent = null, - .name = "", - .unprotected_estimated_total_items = 0, - .unprotected_completed_items = 0, - }, - .columns_written = 0, - .prev_refresh_timestamp = 0, - .timer = null, - .done = false, - }; - const main_progress_node = &progress.root; - main_progress_node.context = &progress; + const main_progress_node = std.Progress.start(.{}); while (true) { const hdr = try server.receiveMessage(); @@ -4051,7 +4041,6 @@ fn serve( switch (hdr.tag) { .exit => return cleanExit(), .update => { - assert(main_progress_node.recently_updated_child == null); tracy.frameMark(); if (arg_mode == .translate_c) { @@ -4059,7 +4048,7 @@ fn serve( defer arena_instance.deinit(); const arena = arena_instance.allocator(); var output: Compilation.CImportResult = undefined; - try cmdTranslateC(comp, arena, &output); + try cmdTranslateC(comp, arena, &output, main_progress_node); defer output.deinit(gpa); if (output.errors.errorMessageCount() != 0) { try server.serveErrorBundle(output.errors); @@ -4075,21 +4064,7 @@ fn serve( try comp.makeBinFileWritable(); } - if (builtin.single_threaded) { - try comp.update(main_progress_node); - } else { - var reset: std.Thread.ResetEvent = .{}; - - var progress_thread = try std.Thread.spawn(.{}, progressThread, .{ - &progress, &server, &reset, - }); - defer { - reset.set(); - progress_thread.join(); - } - - try comp.update(main_progress_node); - } + try comp.update(main_progress_node); try comp.makeBinFileExecutable(); try serveUpdateResults(&server, comp); @@ -4116,7 +4091,6 @@ fn serve( }, .hot_update => { tracy.frameMark(); - assert(main_progress_node.recently_updated_child == null); if (child_pid) |pid| { try comp.hotCodeSwap(main_progress_node, pid); try serveUpdateResults(&server, comp); @@ -4146,63 +4120,6 @@ fn serve( } } -fn progressThread(progress: *std.Progress, server: *const Server, reset: *std.Thread.ResetEvent) void { - while (true) { - if (reset.timedWait(500 * std.time.ns_per_ms)) |_| { - // The Compilation update has completed. - return; - } else |err| switch (err) { - error.Timeout => {}, - } - - var buf: std.BoundedArray(u8, 160) = .{}; - - { - progress.update_mutex.lock(); - defer progress.update_mutex.unlock(); - - var need_ellipse = false; - var maybe_node: ?*std.Progress.Node = &progress.root; - while (maybe_node) |node| { - if (need_ellipse) { - buf.appendSlice("... ") catch {}; - } - need_ellipse = false; - const eti = @atomicLoad(usize, &node.unprotected_estimated_total_items, .monotonic); - const completed_items = @atomicLoad(usize, &node.unprotected_completed_items, .monotonic); - const current_item = completed_items + 1; - if (node.name.len != 0 or eti > 0) { - if (node.name.len != 0) { - buf.appendSlice(node.name) catch {}; - need_ellipse = true; - } - if (eti > 0) { - if (need_ellipse) buf.appendSlice(" ") catch {}; - buf.writer().print("[{d}/{d}] ", .{ current_item, eti }) catch {}; - need_ellipse = false; - } else if (completed_items != 0) { - if (need_ellipse) buf.appendSlice(" ") catch {}; - buf.writer().print("[{d}] ", .{current_item}) catch {}; - need_ellipse = false; - } - } - maybe_node = @atomicLoad(?*std.Progress.Node, &node.recently_updated_child, .acquire); - } - } - - const progress_string = buf.slice(); - - server.serveMessage(.{ - .tag = .progress, - .bytes_len = @as(u32, @intCast(progress_string.len)), - }, &.{ - progress_string, - }) catch |err| { - fatal("unable to write to client: {s}", .{@errorName(err)}); - }; - } -} - fn serveUpdateResults(s: *Server, comp: *Compilation) !void { const gpa = comp.gpa; var error_bundle = try comp.getAllErrorsAlloc(); @@ -4469,25 +4386,8 @@ fn runOrTestHotSwap( } } -fn updateModule(comp: *Compilation, color: Color) !void { - { - // If the terminal is dumb, we dont want to show the user all the output. - var progress: std.Progress = .{ .dont_print_on_dumb = true }; - const main_progress_node = progress.start("", 0); - defer main_progress_node.end(); - switch (color) { - .off => { - progress.terminal = null; - }, - .on => { - progress.terminal = std.io.getStdErr(); - progress.supports_ansi_escape_codes = true; - }, - .auto => {}, - } - - try comp.update(main_progress_node); - } +fn updateModule(comp: *Compilation, color: Color, prog_node: std.Progress.Node) !void { + try comp.update(prog_node); var errors = try comp.getAllErrorsAlloc(); defer errors.deinit(comp.gpa); @@ -4498,7 +4398,12 @@ fn updateModule(comp: *Compilation, color: Color) !void { } } -fn cmdTranslateC(comp: *Compilation, arena: Allocator, fancy_output: ?*Compilation.CImportResult) !void { +fn cmdTranslateC( + comp: *Compilation, + arena: Allocator, + fancy_output: ?*Compilation.CImportResult, + prog_node: std.Progress.Node, +) !void { if (build_options.only_core_functionality) @panic("@translate-c is not available in a zig2.c build"); const color: Color = .auto; assert(comp.c_source_files.len == 1); @@ -4559,6 +4464,7 @@ fn cmdTranslateC(comp: *Compilation, arena: Allocator, fancy_output: ?*Compilati .root_src_path = "aro_translate_c.zig", .depend_on_aro = true, .capture = &stdout, + .progress_node = prog_node, }); break :f stdout; }, @@ -4736,8 +4642,6 @@ const usage_build = ; fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { - var progress: std.Progress = .{ .dont_print_on_dumb = true }; - var build_file: ?[]const u8 = null; var override_lib_dir: ?[]const u8 = try EnvVar.ZIG_LIB_DIR.get(arena); var override_global_cache_dir: ?[]const u8 = try EnvVar.ZIG_GLOBAL_CACHE_DIR.get(arena); @@ -4798,6 +4702,8 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { const results_tmp_file_nonce = Package.Manifest.hex64(std.crypto.random.int(u64)); try child_argv.append("-Z" ++ results_tmp_file_nonce); + var color: Color = .auto; + { var i: usize = 0; while (i < args.len) : (i += 1) { @@ -4882,6 +4788,14 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { verbose_cimport = true; } else if (mem.eql(u8, arg, "--verbose-llvm-cpu-features")) { verbose_llvm_cpu_features = true; + } else if (mem.eql(u8, arg, "--color")) { + if (i + 1 >= args.len) fatal("expected [auto|on|off] after {s}", .{arg}); + i += 1; + color = std.meta.stringToEnum(Color, args[i]) orelse { + fatal("expected [auto|on|off] after {s}, found '{s}'", .{ arg, args[i] }); + }; + try child_argv.appendSlice(&.{ arg, args[i] }); + continue; } else if (mem.eql(u8, arg, "--seed")) { if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg}); i += 1; @@ -4895,7 +4809,11 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { const work_around_btrfs_bug = native_os == .linux and EnvVar.ZIG_BTRFS_WORKAROUND.isSet(); - const color: Color = .auto; + const root_prog_node = std.Progress.start(.{ + .disable_printing = (color == .off), + .root_name = "Compile Build Script", + }); + defer root_prog_node.end(); const target_query: std.Target.Query = .{}; const resolved_target: Package.Module.ResolvedTarget = .{ @@ -5051,8 +4969,8 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { config, ); } else { - const root_prog_node = progress.start("Fetch Packages", 0); - defer root_prog_node.end(); + const fetch_prog_node = root_prog_node.start("Fetch Packages", 0); + defer fetch_prog_node.end(); var job_queue: Package.Fetch.JobQueue = .{ .http_client = &http_client, @@ -5093,7 +5011,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { .lazy_status = .eager, .parent_package_root = build_mod.root, .parent_manifest_ast = null, - .prog_node = root_prog_node, + .prog_node = fetch_prog_node, .job_queue = &job_queue, .omit_missing_hash_error = true, .allow_missing_paths_field = false, @@ -5232,7 +5150,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { }; defer comp.destroy(); - updateModule(comp, color) catch |err| switch (err) { + updateModule(comp, color, root_prog_node) catch |err| switch (err) { error.SemanticAnalyzeFail => process.exit(2), else => |e| return e, }; @@ -5250,7 +5168,12 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void { child.stdout_behavior = .Inherit; child.stderr_behavior = .Inherit; - const term = try child.spawnAndWait(); + const term = t: { + std.debug.lockStdErr(); + defer std.debug.unlockStdErr(); + break :t try child.spawnAndWait(); + }; + switch (term) { .Exited => |code| { if (code == 0) return cleanExit(); @@ -5326,8 +5249,9 @@ const JitCmdOptions = struct { prepend_zig_exe_path: bool = false, depend_on_aro: bool = false, capture: ?*[]u8 = null, - /// Send progress and error bundles via std.zig.Server over stdout + /// Send error bundles via std.zig.Server over stdout server: bool = false, + progress_node: ?std.Progress.Node = null, }; fn jitCmd( @@ -5337,6 +5261,9 @@ fn jitCmd( options: JitCmdOptions, ) !void { const color: Color = .auto; + const root_prog_node = if (options.progress_node) |node| node else std.Progress.start(.{ + .disable_printing = (color == .off), + }); const target_query: std.Target.Query = .{}; const resolved_target: Package.Module.ResolvedTarget = .{ @@ -5473,39 +5400,14 @@ fn jitCmd( }; defer comp.destroy(); - if (options.server and !builtin.single_threaded) { - var reset: std.Thread.ResetEvent = .{}; - var progress: std.Progress = .{ - .terminal = null, - .root = .{ - .context = undefined, - .parent = null, - .name = "", - .unprotected_estimated_total_items = 0, - .unprotected_completed_items = 0, - }, - .columns_written = 0, - .prev_refresh_timestamp = 0, - .timer = null, - .done = false, - }; - const main_progress_node = &progress.root; - main_progress_node.context = &progress; + if (options.server) { var server = std.zig.Server{ .out = std.io.getStdOut(), .in = undefined, // won't be receiving messages .receive_fifo = undefined, // won't be receiving messages }; - var progress_thread = try std.Thread.spawn(.{}, progressThread, .{ - &progress, &server, &reset, - }); - defer { - reset.set(); - progress_thread.join(); - } - - try comp.update(main_progress_node); + try comp.update(root_prog_node); var error_bundle = try comp.getAllErrorsAlloc(); defer error_bundle.deinit(comp.gpa); @@ -5514,7 +5416,7 @@ fn jitCmd( process.exit(2); } } else { - updateModule(comp, color) catch |err| switch (err) { + updateModule(comp, color, root_prog_node) catch |err| switch (err) { error.SemanticAnalyzeFail => process.exit(2), else => |e| return e, }; @@ -6963,8 +6865,9 @@ fn cmdFetch( try http_client.initDefaultProxies(arena); - var progress: std.Progress = .{ .dont_print_on_dumb = true }; - const root_prog_node = progress.start("Fetch", 0); + var root_prog_node = std.Progress.start(.{ + .root_name = "Fetch", + }); defer root_prog_node.end(); var global_cache_directory: Compilation.Directory = l: { @@ -7028,8 +6931,8 @@ fn cmdFetch( const hex_digest = Package.Manifest.hexDigest(fetch.actual_hash); - progress.done = true; - progress.refresh(); + root_prog_node.end(); + root_prog_node = .{ .index = .none }; const name = switch (save) { .no => { diff --git a/src/mingw.zig b/src/mingw.zig index 803c0f936752..5aa79064ee2c 100644 --- a/src/mingw.zig +++ b/src/mingw.zig @@ -16,7 +16,7 @@ pub const CRTFile = enum { mingw32_lib, }; -pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile, prog_node: *std.Progress.Node) !void { +pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile, prog_node: std.Progress.Node) !void { if (!build_options.have_llvm) { return error.ZigCompilerNotBuiltWithLLVMExtensions; } @@ -234,8 +234,8 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void { const include_dir = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libc", "mingw", "def-include" }); if (comp.verbose_cc) print: { - std.debug.getStderrMutex().lock(); - defer std.debug.getStderrMutex().unlock(); + std.debug.lockStdErr(); + defer std.debug.unlockStdErr(); const stderr = std.io.getStdErr().writer(); nosuspend stderr.print("def file: {s}\n", .{def_file_path}) catch break :print; nosuspend stderr.print("include dir: {s}\n", .{include_dir}) catch break :print; diff --git a/src/musl.zig b/src/musl.zig index 3228faf27192..edeea9cca7d2 100644 --- a/src/musl.zig +++ b/src/musl.zig @@ -19,7 +19,7 @@ pub const CRTFile = enum { libc_so, }; -pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile, prog_node: *std.Progress.Node) !void { +pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile, prog_node: std.Progress.Node) !void { if (!build_options.have_llvm) { return error.ZigCompilerNotBuiltWithLLVMExtensions; } diff --git a/src/wasi_libc.zig b/src/wasi_libc.zig index 122d45230b0b..57d93b6f567f 100644 --- a/src/wasi_libc.zig +++ b/src/wasi_libc.zig @@ -57,7 +57,7 @@ pub fn execModelCrtFileFullName(wasi_exec_model: std.builtin.WasiExecModel) []co }; } -pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile, prog_node: *std.Progress.Node) !void { +pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile, prog_node: std.Progress.Node) !void { if (!build_options.have_llvm) { return error.ZigCompilerNotBuiltWithLLVMExtensions; } diff --git a/test/src/Cases.zig b/test/src/Cases.zig index 2138a6aa2546..72574bd97ea0 100644 --- a/test/src/Cases.zig +++ b/test/src/Cases.zig @@ -561,7 +561,7 @@ pub fn lowerToTranslateCSteps( for (self.translate.items) |case| switch (case.kind) { .run => |output| { if (translate_c_options.skip_run_translated_c) continue; - const annotated_case_name = b.fmt("run-translated-c {s}", .{case.name}); + const annotated_case_name = b.fmt("run-translated-c {s}", .{case.name}); for (test_filters) |test_filter| { if (std.mem.indexOf(u8, annotated_case_name, test_filter)) |_| break; } else if (test_filters.len > 0) continue; diff --git a/test/src/RunTranslatedC.zig b/test/src/RunTranslatedC.zig index 8414bd15ac91..74d119276884 100644 --- a/test/src/RunTranslatedC.zig +++ b/test/src/RunTranslatedC.zig @@ -91,6 +91,7 @@ pub fn addCase(self: *RunTranslatedCContext, case: *const TestCase) void { run.expectStdErrEqual(""); } run.expectStdOutEqual(case.expected_stdout); + run.skip_foreign_checks = true; self.step.dependOn(&run.step); } diff --git a/test/standalone/cmakedefine/build.zig b/test/standalone/cmakedefine/build.zig index d90441360fce..3c57523373e8 100644 --- a/test/standalone/cmakedefine/build.zig +++ b/test/standalone/cmakedefine/build.zig @@ -80,7 +80,7 @@ pub fn build(b: *std.Build) void { test_step.dependOn(&wrapper_header.step); } -fn compare_headers(step: *std.Build.Step, prog_node: *std.Progress.Node) !void { +fn compare_headers(step: *std.Build.Step, prog_node: std.Progress.Node) !void { _ = prog_node; const allocator = step.owner.allocator; const expected_fmt = "expected_{s}"; diff --git a/test/standalone/empty_env/build.zig b/test/standalone/empty_env/build.zig index b8e488f830e5..344e8047bd12 100644 --- a/test/standalone/empty_env/build.zig +++ b/test/standalone/empty_env/build.zig @@ -21,6 +21,7 @@ pub fn build(b: *std.Build) void { const run = b.addRunArtifact(main); run.clearEnvironment(); + run.disable_zig_progress = true; test_step.dependOn(&run.step); }