From 249a826fd55718e4986a8382761c3ccb28ca9a6e Mon Sep 17 00:00:00 2001 From: mlugg Date: Sun, 28 Jul 2024 17:09:14 +0100 Subject: [PATCH] compiler: split Decl into Nav and Cau The type `Zcu.Decl` in the compiler is problematic: over time it has gained many responsibilities. Every source declaration, container type, generic instantiation, and `@extern` has a `Decl`. The functions of these `Decl`s are in some cases entirely disjoint. After careful analysis, I determined that the two main responsibilities of `Decl` are as follows: * A `Decl` acts as the "subject" of semantic analysis at comptime. A single unit of analysis is either a runtime function body, or a `Decl`. It registers incremental dependencies, tracks analysis errors, etc. * A `Decl` acts as a "global variable": a pointer to it is consistent, and it may be lowered to a specific symbol by the codegen backend. This commit eliminates `Decl` and introduces new types to model these responsibilities: `Cau` (Comptime Analysis Unit) and `Nav` (Named Addressable Value). Every source declaration, and every container type requiring resolution (so *not* including `opaque`), has a `Cau`. For a source declaration, this `Cau` performs the resolution of its value. (When #131 is implemented, it is unsolved whether type and value resolution will share a `Cau` or have two distinct `Cau`s.) For a type, this `Cau` is the context in which type resolution occurs. Every non-`comptime` source declaration, every generic instantiation, and every distinct `extern` has a `Nav`. These are sent to codegen/link: the backends by definition do not care about `Cau`s. This commit has some minor technically-breaking changes surrounding `usingnamespace`. I don't think they'll impact anyone, since the changes are fixes around semantics which were previously inconsistent (the behavior changed depending on hashmap iteration order!). Aside from that, this changeset has no significant user-facing changes. Instead, it is an internal refactor which makes it easier to correctly model the responsibilities of different objects, particularly regarding incremental compilation. The performance impact should be negligible, but I will take measurements before merging this work into `master`. Co-authored-by: Jacob Young Co-authored-by: Jakub Konka --- src/Compilation.zig | 227 +- src/InternPool.zig | 1380 +++++++---- src/Sema.zig | 2070 ++++++++--------- src/Sema/bitcast.zig | 2 +- src/Sema/comptime_ptr_access.zig | 24 +- src/Type.zig | 174 +- src/Value.zig | 85 +- src/Zcu.zig | 664 ++---- src/Zcu/PerThread.zig | 1678 +++++++------ src/arch/aarch64/CodeGen.zig | 81 +- src/arch/aarch64/Emit.zig | 8 +- src/arch/arm/CodeGen.zig | 41 +- src/arch/riscv64/CodeGen.zig | 117 +- src/arch/riscv64/Emit.zig | 6 +- src/arch/sparc64/CodeGen.zig | 91 +- src/arch/wasm/CodeGen.zig | 391 ++-- src/arch/wasm/Emit.zig | 14 +- src/arch/x86_64/CodeGen.zig | 193 +- src/arch/x86_64/Emit.zig | 22 +- src/arch/x86_64/Lower.zig | 4 +- src/codegen.zig | 203 +- src/codegen/c.zig | 480 ++-- src/codegen/c/Type.zig | 73 +- src/codegen/llvm.zig | 848 ++++--- src/codegen/spirv.zig | 586 +++-- src/link.zig | 88 +- src/link/C.zig | 242 +- src/link/Coff.zig | 400 ++-- src/link/Dwarf.zig | 1039 ++++----- src/link/Elf.zig | 40 +- src/link/Elf/ZigObject.zig | 545 ++--- src/link/MachO.zig | 38 +- src/link/MachO/Atom.zig | 20 + src/link/MachO/ZigObject.zig | 554 ++--- src/link/NvPtx.zig | 4 +- src/link/Plan9.zig | 527 ++--- src/link/SpirV.zig | 40 +- src/link/Wasm.zig | 51 +- src/link/Wasm/ZigObject.zig | 397 ++-- src/print_air.zig | 2 +- src/print_value.zig | 32 +- test/behavior/type_info.zig | 6 +- test/behavior/usingnamespace.zig | 4 - test/behavior/usingnamespace/file_0.zig | 1 - test/behavior/usingnamespace/file_1.zig | 12 - test/behavior/usingnamespace/imports.zig | 5 - .../setAlignStack_in_inline_function.zig | 22 - .../setAlignStack_set_twice.zig | 11 - ...n_invalid_value_of_non-exhaustive_enum.zig | 2 +- 49 files changed, 6380 insertions(+), 7164 deletions(-) delete mode 100644 test/behavior/usingnamespace/file_0.zig delete mode 100644 test/behavior/usingnamespace/file_1.zig delete mode 100644 test/behavior/usingnamespace/imports.zig delete mode 100644 test/cases/compile_errors/setAlignStack_in_inline_function.zig delete mode 100644 test/cases/compile_errors/setAlignStack_set_twice.zig diff --git a/src/Compilation.zig b/src/Compilation.zig index 824b8695e740..01a5772e231d 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -354,28 +354,25 @@ pub const RcIncludes = enum { const Job = union(enum) { /// Write the constant value for a Decl to the output file. - codegen_decl: InternPool.DeclIndex, + codegen_nav: InternPool.Nav.Index, /// Write the machine code for a function to the output file. - /// This will either be a non-generic `func_decl` or a `func_instance`. codegen_func: struct { + /// This will either be a non-generic `func_decl` or a `func_instance`. func: InternPool.Index, /// This `Air` is owned by the `Job` and allocated with `gpa`. /// It must be deinited when the job is processed. air: Air, }, - /// Render the .h file snippet for the Decl. - emit_h_decl: InternPool.DeclIndex, - /// The Decl needs to be analyzed and possibly export itself. - /// It may have already be analyzed, or it may have been determined - /// to be outdated; in this case perform semantic analysis again. - analyze_decl: InternPool.DeclIndex, + /// The `Cau` must be semantically analyzed (and possibly export itself). + /// This may be its first time being analyzed, or it may be outdated. + analyze_cau: InternPool.Cau.Index, /// Analyze the body of a runtime function. /// After analysis, a `codegen_func` job will be queued. /// These must be separate jobs to ensure any needed type resolution occurs *before* codegen. analyze_func: InternPool.Index, /// The source file containing the Decl has been updated, and so the /// Decl may need its line number information updated in the debug info. - update_line_number: InternPool.DeclIndex, + update_line_number: void, // TODO /// The main source file for the module needs to be analyzed. analyze_mod: *Package.Module, /// Fully resolve the given `struct` or `union` type. @@ -419,7 +416,7 @@ const Job = union(enum) { }; const CodegenJob = union(enum) { - decl: InternPool.DeclIndex, + nav: InternPool.Nav.Index, func: struct { func: InternPool.Index, /// This `Air` is owned by the `Job` and allocated with `gpa`. @@ -1445,12 +1442,6 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil .path = try options.global_cache_directory.join(arena, &[_][]const u8{zir_sub_dir}), }; - const emit_h: ?*Zcu.GlobalEmitH = if (options.emit_h) |loc| eh: { - const eh = try arena.create(Zcu.GlobalEmitH); - eh.* = .{ .loc = loc }; - break :eh eh; - } else null; - const std_mod = options.std_mod orelse try Package.Module.create(arena, .{ .global_cache_directory = options.global_cache_directory, .paths = .{ @@ -1478,7 +1469,6 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil .std_mod = std_mod, .global_zir_cache = global_zir_cache, .local_zir_cache = local_zir_cache, - .emit_h = emit_h, .error_limit = error_limit, .llvm_object = null, }; @@ -2581,7 +2571,7 @@ fn addNonIncrementalStuffToCacheManifest( man.hash.addOptionalBytes(comp.test_name_prefix); man.hash.add(comp.skip_linker_dependencies); man.hash.add(comp.formatted_panics); - man.hash.add(mod.emit_h != null); + //man.hash.add(mod.emit_h != null); man.hash.add(mod.error_limit); } else { cache_helpers.addModule(&man.hash, comp.root_mod); @@ -2930,7 +2920,7 @@ const Header = extern struct { intern_pool: extern struct { thread_count: u32, src_hash_deps_len: u32, - decl_val_deps_len: u32, + nav_val_deps_len: u32, namespace_deps_len: u32, namespace_name_deps_len: u32, first_dependency_len: u32, @@ -2972,7 +2962,7 @@ pub fn saveState(comp: *Compilation) !void { .intern_pool = .{ .thread_count = @intCast(ip.locals.len), .src_hash_deps_len = @intCast(ip.src_hash_deps.count()), - .decl_val_deps_len = @intCast(ip.decl_val_deps.count()), + .nav_val_deps_len = @intCast(ip.nav_val_deps.count()), .namespace_deps_len = @intCast(ip.namespace_deps.count()), .namespace_name_deps_len = @intCast(ip.namespace_name_deps.count()), .first_dependency_len = @intCast(ip.first_dependency.count()), @@ -2999,8 +2989,8 @@ pub fn saveState(comp: *Compilation) !void { addBuf(&bufs, mem.sliceAsBytes(ip.src_hash_deps.keys())); addBuf(&bufs, mem.sliceAsBytes(ip.src_hash_deps.values())); - addBuf(&bufs, mem.sliceAsBytes(ip.decl_val_deps.keys())); - addBuf(&bufs, mem.sliceAsBytes(ip.decl_val_deps.values())); + addBuf(&bufs, mem.sliceAsBytes(ip.nav_val_deps.keys())); + addBuf(&bufs, mem.sliceAsBytes(ip.nav_val_deps.values())); addBuf(&bufs, mem.sliceAsBytes(ip.namespace_deps.keys())); addBuf(&bufs, mem.sliceAsBytes(ip.namespace_deps.values())); addBuf(&bufs, mem.sliceAsBytes(ip.namespace_name_deps.keys())); @@ -3019,7 +3009,7 @@ pub fn saveState(comp: *Compilation) !void { addBuf(&bufs, local.shared.strings.view().items(.@"0")[0..pt_header.intern_pool.string_bytes_len]); addBuf(&bufs, mem.sliceAsBytes(local.shared.tracked_insts.view().items(.@"0")[0..pt_header.intern_pool.tracked_insts_len])); addBuf(&bufs, mem.sliceAsBytes(local.shared.files.view().items(.bin_digest)[0..pt_header.intern_pool.files_len])); - addBuf(&bufs, mem.sliceAsBytes(local.shared.files.view().items(.root_decl)[0..pt_header.intern_pool.files_len])); + addBuf(&bufs, mem.sliceAsBytes(local.shared.files.view().items(.root_type)[0..pt_header.intern_pool.files_len])); } //// TODO: compilation errors @@ -3065,6 +3055,8 @@ pub fn totalErrorCount(comp: *Compilation) u32 { } if (comp.module) |zcu| { + const ip = &zcu.intern_pool; + total += zcu.failed_exports.count(); total += zcu.failed_embed_files.count(); @@ -3084,25 +3076,18 @@ pub fn totalErrorCount(comp: *Compilation) u32 { // When a parse error is introduced, we keep all the semantic analysis for // the previous parse success, including compile errors, but we cannot // emit them until the file succeeds parsing. - for (zcu.failed_analysis.keys()) |key| { - const decl_index = switch (key.unwrap()) { - .decl => |d| d, - .func => |ip_index| zcu.funcInfo(ip_index).owner_decl, + for (zcu.failed_analysis.keys()) |anal_unit| { + const file_index = switch (anal_unit.unwrap()) { + .cau => |cau| zcu.namespacePtr(ip.getCau(cau).namespace).file_scope, + .func => |ip_index| zcu.funcInfo(ip_index).zir_body_inst.resolveFull(ip).file, }; - if (zcu.declFileScope(decl_index).okToReportErrors()) { + if (zcu.fileByIndex(file_index).okToReportErrors()) { total += 1; - if (zcu.cimport_errors.get(key)) |errors| { + if (zcu.cimport_errors.get(anal_unit)) |errors| { total += errors.errorMessageCount(); } } } - if (zcu.emit_h) |emit_h| { - for (emit_h.failed_decls.keys()) |key| { - if (zcu.declFileScope(key).okToReportErrors()) { - total += 1; - } - } - } if (zcu.intern_pool.global_error_set.getNamesFromMainThread().len > zcu.error_limit) { total += 1; @@ -3169,6 +3154,8 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { }); } if (comp.module) |zcu| { + const ip = &zcu.intern_pool; + var all_references = try zcu.resolveReferences(); defer all_references.deinit(gpa); @@ -3219,14 +3206,14 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { if (err) |e| return e; } for (zcu.failed_analysis.keys(), zcu.failed_analysis.values()) |anal_unit, error_msg| { - const decl_index = switch (anal_unit.unwrap()) { - .decl => |d| d, - .func => |ip_index| zcu.funcInfo(ip_index).owner_decl, + const file_index = switch (anal_unit.unwrap()) { + .cau => |cau| zcu.namespacePtr(ip.getCau(cau).namespace).file_scope, + .func => |ip_index| zcu.funcInfo(ip_index).zir_body_inst.resolveFull(ip).file, }; - // Skip errors for Decls within files that had a parse failure. + // Skip errors for AnalUnits within files that had a parse failure. // We'll try again once parsing succeeds. - if (!zcu.declFileScope(decl_index).okToReportErrors()) continue; + if (!zcu.fileByIndex(file_index).okToReportErrors()) continue; try addModuleErrorMsg(zcu, &bundle, error_msg.*, &all_references); if (zcu.cimport_errors.get(anal_unit)) |errors| { @@ -3250,15 +3237,6 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { } } } - if (zcu.emit_h) |emit_h| { - for (emit_h.failed_decls.keys(), emit_h.failed_decls.values()) |decl_index, error_msg| { - // Skip errors for Decls within files that had a parse failure. - // We'll try again once parsing succeeds. - if (zcu.declFileScope(decl_index).okToReportErrors()) { - try addModuleErrorMsg(zcu, &bundle, error_msg.*, &all_references); - } - } - } for (zcu.failed_exports.values()) |value| { try addModuleErrorMsg(zcu, &bundle, value.*, &all_references); } @@ -3437,11 +3415,15 @@ pub fn addModuleErrorMsg( const loc = std.zig.findLineColumn(source.bytes, span.main); const rt_file_path = try src.file_scope.fullPath(gpa); const name = switch (ref.referencer.unwrap()) { - .decl => |d| mod.declPtr(d).name, - .func => |f| mod.funcOwnerDeclPtr(f).name, + .cau => |cau| switch (ip.getCau(cau).owner.unwrap()) { + .nav => |nav| ip.getNav(nav).name.toSlice(ip), + .type => |ty| Type.fromInterned(ty).containerTypeName(ip).toSlice(ip), + .none => "comptime", + }, + .func => |f| ip.getNav(mod.funcInfo(f).owner_nav).name.toSlice(ip), }; try ref_traces.append(gpa, .{ - .decl_name = try eb.addString(name.toSlice(ip)), + .decl_name = try eb.addString(name), .src_loc = try eb.addSourceLocation(.{ .src_path = try eb.addString(rt_file_path), .span_start = span.start, @@ -3617,10 +3599,10 @@ fn performAllTheWorkInner( // Pre-load these things from our single-threaded context since they // will be needed by the worker threads. const path_digest = zcu.filePathDigest(file_index); - const root_decl = zcu.fileRootDecl(file_index); + const old_root_type = zcu.fileRootType(file_index); const file = zcu.fileByIndex(file_index); comp.thread_pool.spawnWgId(&astgen_wait_group, workerAstGenFile, .{ - comp, file, file_index, path_digest, root_decl, zir_prog_node, &astgen_wait_group, .root, + comp, file, file_index, path_digest, old_root_type, zir_prog_node, &astgen_wait_group, .root, }); } } @@ -3682,7 +3664,7 @@ fn performAllTheWorkInner( // which we need to work on, and queue it if so. if (try zcu.findOutdatedToAnalyze()) |outdated| { switch (outdated.unwrap()) { - .decl => |decl| try comp.queueJob(.{ .analyze_decl = decl }), + .cau => |cau| try comp.queueJob(.{ .analyze_cau = cau }), .func => |func| try comp.queueJob(.{ .analyze_func = func }), } continue; @@ -3704,24 +3686,17 @@ pub fn queueJobs(comp: *Compilation, jobs: []const Job) !void { fn processOneJob(tid: usize, comp: *Compilation, job: Job, prog_node: std.Progress.Node) JobError!void { switch (job) { - .codegen_decl => |decl_index| { - const decl = comp.module.?.declPtr(decl_index); - - switch (decl.analysis) { - .unreferenced => unreachable, - .in_progress => unreachable, - - .file_failure, - .sema_failure, - .codegen_failure, - .dependency_failure, - => {}, - - .complete => { - assert(decl.has_tv); - try comp.queueCodegenJob(tid, .{ .decl = decl_index }); - }, + .codegen_nav => |nav_index| { + const zcu = comp.module.?; + const nav = zcu.intern_pool.getNav(nav_index); + if (nav.analysis_owner.unwrap()) |cau| { + const unit = InternPool.AnalUnit.wrap(.{ .cau = cau }); + if (zcu.failed_analysis.contains(unit) or zcu.transitive_failed_analysis.contains(unit)) { + return; + } } + assert(nav.status == .resolved); + try comp.queueCodegenJob(tid, .{ .nav = nav_index }); }, .codegen_func => |func| { // This call takes ownership of `func.air`. @@ -3740,82 +3715,30 @@ fn processOneJob(tid: usize, comp: *Compilation, job: Job, prog_node: std.Progre error.AnalysisFail => return, }; }, - .emit_h_decl => |decl_index| { - if (true) @panic("regressed compiler feature: emit-h should hook into updateExports, " ++ - "not decl analysis, which is too early to know about @export calls"); - + .analyze_cau => |cau_index| { const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) }; - const decl = pt.zcu.declPtr(decl_index); - - switch (decl.analysis) { - .unreferenced => unreachable, - .in_progress => unreachable, - - .file_failure, - .sema_failure, - .dependency_failure, - => return, - - // emit-h only requires semantic analysis of the Decl to be complete, - // it does not depend on machine code generation to succeed. - .codegen_failure, .complete => { - const named_frame = tracy.namedFrame("emit_h_decl"); - defer named_frame.end(); - - const gpa = comp.gpa; - const emit_h = pt.zcu.emit_h.?; - _ = try emit_h.decl_table.getOrPut(gpa, decl_index); - const decl_emit_h = emit_h.declPtr(decl_index); - const fwd_decl = &decl_emit_h.fwd_decl; - fwd_decl.shrinkRetainingCapacity(0); - var ctypes_arena = std.heap.ArenaAllocator.init(gpa); - defer ctypes_arena.deinit(); - - const file_scope = pt.zcu.namespacePtr(decl.src_namespace).fileScope(pt.zcu); - - var dg: c_codegen.DeclGen = .{ - .gpa = gpa, - .pt = pt, - .mod = file_scope.mod, - .error_msg = null, - .pass = .{ .decl = decl_index }, - .is_naked_fn = false, - .fwd_decl = fwd_decl.toManaged(gpa), - .ctype_pool = c_codegen.CType.Pool.empty, - .scratch = .{}, - .anon_decl_deps = .{}, - .aligned_anon_decls = .{}, - }; - defer { - fwd_decl.* = dg.fwd_decl.moveToUnmanaged(); - fwd_decl.shrinkAndFree(gpa, fwd_decl.items.len); - dg.ctype_pool.deinit(gpa); - dg.scratch.deinit(gpa); - } - try dg.ctype_pool.init(gpa); - - c_codegen.genHeader(&dg) catch |err| switch (err) { - error.AnalysisFail => { - try emit_h.failed_decls.put(gpa, decl_index, dg.error_msg.?); - return; - }, - else => |e| return e, - }; - }, - } - }, - .analyze_decl => |decl_index| { - const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) }; - pt.ensureDeclAnalyzed(decl_index) catch |err| switch (err) { + pt.ensureCauAnalyzed(cau_index) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => return, }; - const decl = pt.zcu.declPtr(decl_index); - if (decl.kind == .@"test" and comp.config.is_test) { + queue_test_analysis: { + if (!comp.config.is_test) break :queue_test_analysis; + + // Check if this is a test function. + const ip = &pt.zcu.intern_pool; + const cau = ip.getCau(cau_index); + const nav_index = switch (cau.owner.unwrap()) { + .none, .type => break :queue_test_analysis, + .nav => |nav| nav, + }; + if (!pt.zcu.test_functions.contains(nav_index)) { + break :queue_test_analysis; + } + // Tests are always emitted in test binaries. The decl_refs are created by // Zcu.populateTestFunctions, but this will not queue body analysis, so do // that now. - try pt.zcu.ensureFuncBodyAnalysisQueued(decl.val.toIntern()); + try pt.zcu.ensureFuncBodyAnalysisQueued(ip.getNav(nav_index).status.resolved.val); } }, .resolve_type_fully => |ty| { @@ -3832,6 +3755,8 @@ fn processOneJob(tid: usize, comp: *Compilation, job: Job, prog_node: std.Progre const named_frame = tracy.namedFrame("update_line_number"); defer named_frame.end(); + if (true) @panic("TODO: update_line_number"); + const gpa = comp.gpa; const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) }; const decl = pt.zcu.declPtr(decl_index); @@ -4054,12 +3979,12 @@ fn codegenThread(tid: usize, comp: *Compilation) void { fn processOneCodegenJob(tid: usize, comp: *Compilation, codegen_job: CodegenJob) JobError!void { switch (codegen_job) { - .decl => |decl_index| { - const named_frame = tracy.namedFrame("codegen_decl"); + .nav => |nav_index| { + const named_frame = tracy.namedFrame("codegen_nav"); defer named_frame.end(); const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) }; - try pt.linkerUpdateDecl(decl_index); + try pt.linkerUpdateNav(nav_index); }, .func => |func| { const named_frame = tracy.namedFrame("codegen_func"); @@ -4366,7 +4291,7 @@ fn workerAstGenFile( file: *Zcu.File, file_index: Zcu.File.Index, path_digest: Cache.BinDigest, - root_decl: Zcu.Decl.OptionalIndex, + old_root_type: InternPool.Index, prog_node: std.Progress.Node, wg: *WaitGroup, src: Zcu.AstGenSrc, @@ -4375,7 +4300,7 @@ fn workerAstGenFile( defer child_prog_node.end(); const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) }; - pt.astGenFile(file, path_digest, root_decl) catch |err| switch (err) { + pt.astGenFile(file, path_digest, old_root_type) catch |err| switch (err) { error.AnalysisFail => return, else => { file.status = .retryable_failure; @@ -4406,7 +4331,7 @@ fn workerAstGenFile( // `@import("builtin")` is handled specially. if (mem.eql(u8, import_path, "builtin")) continue; - const import_result, const imported_path_digest, const imported_root_decl = blk: { + const import_result, const imported_path_digest, const imported_root_type = blk: { comp.mutex.lock(); defer comp.mutex.unlock(); @@ -4421,8 +4346,8 @@ fn workerAstGenFile( comp.appendFileSystemInput(fsi, res.file.mod.root, res.file.sub_file_path) catch continue; }; const imported_path_digest = pt.zcu.filePathDigest(res.file_index); - const imported_root_decl = pt.zcu.fileRootDecl(res.file_index); - break :blk .{ res, imported_path_digest, imported_root_decl }; + const imported_root_type = pt.zcu.fileRootType(res.file_index); + break :blk .{ res, imported_path_digest, imported_root_type }; }; if (import_result.is_new) { log.debug("AstGen of {s} has import '{s}'; queuing AstGen of {s}", .{ @@ -4433,7 +4358,7 @@ fn workerAstGenFile( .import_tok = item.data.token, } }; comp.thread_pool.spawnWgId(wg, workerAstGenFile, .{ - comp, import_result.file, import_result.file_index, imported_path_digest, imported_root_decl, prog_node, wg, sub_src, + comp, import_result.file, import_result.file_index, imported_path_digest, imported_root_type, prog_node, wg, sub_src, }); } } diff --git a/src/InternPool.zig b/src/InternPool.zig index 29343400340a..2e704945f9a2 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -24,12 +24,14 @@ tid_shift_32: if (single_threaded) u0 else std.math.Log2Int(u32) = if (single_th /// These are also invalidated if tracking fails for this instruction. /// Value is index into `dep_entries` of the first dependency on this hash. src_hash_deps: std.AutoArrayHashMapUnmanaged(TrackedInst.Index, DepEntry.Index) = .{}, -/// Dependencies on the value of a Decl. -/// Value is index into `dep_entries` of the first dependency on this Decl value. -decl_val_deps: std.AutoArrayHashMapUnmanaged(DeclIndex, DepEntry.Index) = .{}, -/// Dependencies on the IES of a runtime function. -/// Value is index into `dep_entries` of the first dependency on this Decl value. -func_ies_deps: std.AutoArrayHashMapUnmanaged(Index, DepEntry.Index) = .{}, +/// Dependencies on the value of a Nav. +/// Value is index into `dep_entries` of the first dependency on this Nav value. +nav_val_deps: std.AutoArrayHashMapUnmanaged(Nav.Index, DepEntry.Index) = .{}, +/// Dependencies on an interned value, either: +/// * a runtime function (invalidated when its IES changes) +/// * a container type requiring resolution (invalidated when the type must be recreated at a new index) +/// Value is index into `dep_entries` of the first dependency on this interned value. +interned_deps: std.AutoArrayHashMapUnmanaged(Index, DepEntry.Index) = .{}, /// Dependencies on the full set of names in a ZIR namespace. /// Key refers to a `struct_decl`, `union_decl`, etc. /// Value is index into `dep_entries` of the first dependency on this namespace. @@ -210,25 +212,25 @@ pub fn trackZir( } /// Analysis Unit. Represents a single entity which undergoes semantic analysis. -/// This is either a `Decl` (in future `Cau`) or a runtime function. +/// This is either a `Cau` or a runtime function. /// The LSB is used as a tag bit. /// This is the "source" of an incremental dependency edge. pub const AnalUnit = packed struct(u32) { - kind: enum(u1) { decl, func }, + kind: enum(u1) { cau, func }, index: u31, pub const Unwrapped = union(enum) { - decl: DeclIndex, + cau: Cau.Index, func: InternPool.Index, }; pub fn unwrap(as: AnalUnit) Unwrapped { return switch (as.kind) { - .decl => .{ .decl = @enumFromInt(as.index) }, + .cau => .{ .cau = @enumFromInt(as.index) }, .func => .{ .func = @enumFromInt(as.index) }, }; } pub fn wrap(raw: Unwrapped) AnalUnit { return switch (raw) { - .decl => |decl| .{ .kind = .decl, .index = @intCast(@intFromEnum(decl)) }, + .cau => |cau| .{ .kind = .cau, .index = @intCast(@intFromEnum(cau)) }, .func => |func| .{ .kind = .func, .index = @intCast(@intFromEnum(func)) }, }; } @@ -247,10 +249,275 @@ pub const AnalUnit = packed struct(u32) { }; }; +/// Comptime Analysis Unit. This is the "subject" of semantic analysis where the root context is +/// comptime; every `Sema` is owned by either a `Cau` or a runtime function (see `AnalUnit`). +/// The state stored here is immutable. +/// +/// * Every ZIR `declaration` has a `Cau` (post-instantiation) to analyze the declaration body. +/// * Every `struct`, `union`, and `enum` has a `Cau` for type resolution. +/// +/// The analysis status of a `Cau` is known only from state in `Zcu`. +/// An entry in `Zcu.failed_analysis` indicates an analysis failure with associated error message. +/// An entry in `Zcu.transitive_failed_analysis` indicates a transitive analysis failure. +/// +/// 12 bytes. +pub const Cau = struct { + /// The `declaration`, `struct_decl`, `enum_decl`, or `union_decl` instruction which this `Cau` analyzes. + zir_index: TrackedInst.Index, + /// The namespace which this `Cau` should be analyzed within. + namespace: NamespaceIndex, + /// This field essentially tells us what to do with the information resulting from + /// semantic analysis. See `Owner.Unwrapped` for details. + owner: Owner, + + /// See `Owner.Unwrapped` for details. In terms of representation, the `InternPool.Index` + /// or `Nav.Index` is cast to a `u31` and stored in `index`. As a special case, if + /// `@as(u32, @bitCast(owner)) == 0xFFFF_FFFF`, then the value is treated as `.none`. + pub const Owner = packed struct(u32) { + kind: enum(u1) { type, nav }, + index: u31, + + pub const Unwrapped = union(enum) { + /// This `Cau` exists in isolation. It is a global `comptime` declaration, or (TODO ANYTHING ELSE?). + /// After semantic analysis completes, the result is discarded. + none, + /// This `Cau` is owned by the given type for type resolution. + /// This is a `struct`, `union`, or `enum` type. + type: InternPool.Index, + /// This `Cau` is owned by the given `Nav` to resolve its value. + /// When analyzing the `Cau`, the resulting value is stored as the value of this `Nav`. + nav: Nav.Index, + }; + + pub fn unwrap(owner: Owner) Unwrapped { + if (@as(u32, @bitCast(owner)) == std.math.maxInt(u32)) { + return .none; + } + return switch (owner.kind) { + .type => .{ .type = @enumFromInt(owner.index) }, + .nav => .{ .nav = @enumFromInt(owner.index) }, + }; + } + + fn wrap(raw: Unwrapped) Owner { + return switch (raw) { + .none => @bitCast(@as(u32, std.math.maxInt(u32))), + .type => |ty| .{ .kind = .type, .index = @intCast(@intFromEnum(ty)) }, + .nav => |nav| .{ .kind = .nav, .index = @intCast(@intFromEnum(nav)) }, + }; + } + }; + + pub const Index = enum(u32) { + _, + pub const Optional = enum(u32) { + none = std.math.maxInt(u32), + _, + pub fn unwrap(opt: Optional) ?Cau.Index { + return switch (opt) { + .none => null, + _ => @enumFromInt(@intFromEnum(opt)), + }; + } + }; + pub fn toOptional(i: Cau.Index) Optional { + return @enumFromInt(@intFromEnum(i)); + } + const Unwrapped = struct { + tid: Zcu.PerThread.Id, + index: u32, + + fn wrap(unwrapped: Unwrapped, ip: *const InternPool) Cau.Index { + assert(@intFromEnum(unwrapped.tid) <= ip.getTidMask()); + assert(unwrapped.index <= ip.getIndexMask(u31)); + return @enumFromInt(@as(u32, @intFromEnum(unwrapped.tid)) << ip.tid_shift_31 | + unwrapped.index); + } + }; + fn unwrap(cau_index: Cau.Index, ip: *const InternPool) Unwrapped { + return .{ + .tid = @enumFromInt(@intFromEnum(cau_index) >> ip.tid_shift_31 & ip.getTidMask()), + .index = @intFromEnum(cau_index) & ip.getIndexMask(u31), + }; + } + }; +}; + +/// Named Addressable Value. Represents a global value with a name and address. This name may be +/// generated, and the type (and hence address) may be comptime-only. A `Nav` whose type has runtime +/// bits is sent to the linker to be emitted to the binary. +/// +/// * Every ZIR `declaration` which is not a `comptime` declaration has a `Nav` (post-instantiation) +/// which stores the declaration's resolved value. +/// * Generic instances have a `Nav` corresponding to the instantiated function. +/// * `@extern` calls create a `Nav` whose value is a `.@"extern"`. +/// +/// `Nav.Repr` is the in-memory representation. +pub const Nav = struct { + /// The unqualified name of this `Nav`. Namespace lookups use this name, and error messages may use it. + /// Additionally, extern `Nav`s (i.e. those whose value is an `extern`) use this name. + name: NullTerminatedString, + /// The fully-qualified name of this `Nav`. + fqn: NullTerminatedString, + /// If the value of this `Nav` is resolved by semantic analysis, it is within this `Cau`. + /// If this is `.none`, then `status == .resolved` always. + analysis_owner: Cau.Index.Optional, + /// TODO: this is a hack! If #20663 isn't accepted, let's figure out something a bit better. + is_usingnamespace: bool, + status: union(enum) { + /// This `Nav` is pending semantic analysis through `analysis_owner`. + unresolved, + /// The value of this `Nav` is resolved. + resolved: struct { + val: InternPool.Index, + alignment: Alignment, + @"linksection": OptionalNullTerminatedString, + @"addrspace": std.builtin.AddressSpace, + }, + }, + + /// Asserts that `status == .resolved`. + pub fn typeOf(nav: Nav, ip: *const InternPool) InternPool.Index { + return ip.typeOf(nav.status.resolved.val); + } + + /// Asserts that `status == .resolved`. + pub fn isExtern(nav: Nav, ip: *const InternPool) bool { + return ip.indexToKey(nav.status.resolved.val) == .@"extern"; + } + + /// Get the ZIR instruction corresponding to this `Nav`, used to resolve source locations. + /// This is a `declaration`. + pub fn srcInst(nav: Nav, ip: *const InternPool) TrackedInst.Index { + if (nav.analysis_owner.unwrap()) |cau| { + return ip.getCau(cau).zir_index; + } + // A `Nav` with no corresponding `Cau` always has a resolved value. + return switch (ip.indexToKey(nav.status.resolved.val)) { + .func => |func| { + // Since there was no `analysis_owner`, this must be an instantiation. + // Go up to the generic owner and consult *its* `analysis_owner`. + const go_nav = ip.getNav(ip.indexToKey(func.generic_owner).func.owner_nav); + const go_cau = ip.getCau(go_nav.analysis_owner.unwrap().?); + return go_cau.zir_index; + }, + .@"extern" => |@"extern"| @"extern".zir_index, // extern / @extern + else => unreachable, + }; + } + + pub const Index = enum(u32) { + _, + pub const Optional = enum(u32) { + none = std.math.maxInt(u32), + _, + pub fn unwrap(opt: Optional) ?Nav.Index { + return switch (opt) { + .none => null, + _ => @enumFromInt(@intFromEnum(opt)), + }; + } + }; + pub fn toOptional(i: Nav.Index) Optional { + return @enumFromInt(@intFromEnum(i)); + } + const Unwrapped = struct { + tid: Zcu.PerThread.Id, + index: u32, + + fn wrap(unwrapped: Unwrapped, ip: *const InternPool) Nav.Index { + assert(@intFromEnum(unwrapped.tid) <= ip.getTidMask()); + assert(unwrapped.index <= ip.getIndexMask(u32)); + return @enumFromInt(@as(u32, @intFromEnum(unwrapped.tid)) << ip.tid_shift_32 | + unwrapped.index); + } + }; + fn unwrap(nav_index: Nav.Index, ip: *const InternPool) Unwrapped { + return .{ + .tid = @enumFromInt(@intFromEnum(nav_index) >> ip.tid_shift_32 & ip.getTidMask()), + .index = @intFromEnum(nav_index) & ip.getIndexMask(u32), + }; + } + }; + + /// The compact in-memory representation of a `Nav`. + /// 18 bytes. + const Repr = struct { + name: NullTerminatedString, + fqn: NullTerminatedString, + analysis_owner: Cau.Index.Optional, + /// Populated only if `bits.status == .resolved`. + val: InternPool.Index, + /// Populated only if `bits.status == .resolved`. + @"linksection": OptionalNullTerminatedString, + bits: Bits, + + const Bits = packed struct(u16) { + status: enum(u1) { unresolved, resolved }, + /// Populated only if `bits.status == .resolved`. + alignment: Alignment, + /// Populated only if `bits.status == .resolved`. + @"addrspace": std.builtin.AddressSpace, + _: u3 = 0, + is_usingnamespace: bool, + }; + + fn unpack(repr: Repr) Nav { + return .{ + .name = repr.name, + .fqn = repr.fqn, + .analysis_owner = repr.analysis_owner, + .is_usingnamespace = repr.bits.is_usingnamespace, + .status = switch (repr.bits.status) { + .unresolved => .unresolved, + .resolved => .{ .resolved = .{ + .val = repr.val, + .alignment = repr.bits.alignment, + .@"linksection" = repr.@"linksection", + .@"addrspace" = repr.bits.@"addrspace", + } }, + }, + }; + } + }; + + fn pack(nav: Nav) Repr { + // Note that in the `unresolved` case, we do not mark fields as `undefined`, even though they should not be used. + // This is to avoid writing undefined bytes to disk when serializing buffers. + return .{ + .name = nav.name, + .fqn = nav.fqn, + .analysis_owner = nav.analysis_owner, + .val = switch (nav.status) { + .unresolved => .none, + .resolved => |r| r.val, + }, + .@"linksection" = switch (nav.status) { + .unresolved => .none, + .resolved => |r| r.@"linksection", + }, + .bits = switch (nav.status) { + .unresolved => .{ + .status = .unresolved, + .alignment = .none, + .@"addrspace" = .generic, + .is_usingnamespace = nav.is_usingnamespace, + }, + .resolved => |r| .{ + .status = .resolved, + .alignment = r.alignment, + .@"addrspace" = r.@"addrspace", + .is_usingnamespace = nav.is_usingnamespace, + }, + }, + }; + } +}; + pub const Dependee = union(enum) { src_hash: TrackedInst.Index, - decl_val: DeclIndex, - func_ies: Index, + nav_val: Nav.Index, + interned: Index, namespace: TrackedInst.Index, namespace_name: NamespaceNameKey, }; @@ -297,8 +564,8 @@ pub const DependencyIterator = struct { pub fn dependencyIterator(ip: *const InternPool, dependee: Dependee) DependencyIterator { const first_entry = switch (dependee) { .src_hash => |x| ip.src_hash_deps.get(x), - .decl_val => |x| ip.decl_val_deps.get(x), - .func_ies => |x| ip.func_ies_deps.get(x), + .nav_val => |x| ip.nav_val_deps.get(x), + .interned => |x| ip.interned_deps.get(x), .namespace => |x| ip.namespace_deps.get(x), .namespace_name => |x| ip.namespace_name_deps.get(x), } orelse return .{ @@ -337,8 +604,8 @@ pub fn addDependency(ip: *InternPool, gpa: Allocator, depender: AnalUnit, depend inline else => |dependee_payload, tag| new_index: { const gop = try switch (tag) { .src_hash => ip.src_hash_deps, - .decl_val => ip.decl_val_deps, - .func_ies => ip.func_ies_deps, + .nav_val => ip.nav_val_deps, + .interned => ip.interned_deps, .namespace => ip.namespace_deps, .namespace_name => ip.namespace_name_deps, }.getOrPut(gpa, dependee_payload); @@ -426,8 +693,9 @@ const Local = struct { tracked_insts: ListMutate, files: ListMutate, maps: ListMutate, + caus: ListMutate, + navs: ListMutate, - decls: BucketListMutate, namespaces: BucketListMutate, } align(std.atomic.cache_line), @@ -439,8 +707,9 @@ const Local = struct { tracked_insts: TrackedInsts, files: List(File), maps: Maps, + caus: Caus, + navs: Navs, - decls: Decls, namespaces: Namespaces, pub fn getLimbs(shared: *const Local.Shared) Limbs { @@ -461,15 +730,12 @@ const Local = struct { const Strings = List(struct { u8 }); const TrackedInsts = List(struct { TrackedInst }); const Maps = List(struct { FieldMap }); - - const decls_bucket_width = 8; - const decls_bucket_mask = (1 << decls_bucket_width) - 1; - const decl_next_free_field = "src_namespace"; - const Decls = List(struct { *[1 << decls_bucket_width]Zcu.Decl }); + const Caus = List(struct { Cau }); + const Navs = List(Nav.Repr); const namespaces_bucket_width = 8; const namespaces_bucket_mask = (1 << namespaces_bucket_width) - 1; - const namespace_next_free_field = "decl_index"; + const namespace_next_free_field = "owner_type"; const Namespaces = List(struct { *[1 << namespaces_bucket_width]Zcu.Namespace }); const ListMutate = struct { @@ -810,8 +1076,6 @@ const Local = struct { /// /// Key is the hash of the path to this file, used to store /// `InternPool.TrackedInst`. - /// - /// Value is the `Decl` of the struct that represents this `File`. pub fn getMutableFiles(local: *Local, gpa: Allocator) List(File).Mutable { return .{ .gpa = gpa, @@ -835,26 +1099,34 @@ const Local = struct { }; } - /// Rather than allocating Decl objects with an Allocator, we instead allocate - /// them with this BucketList. This provides four advantages: - /// * Stable memory so that one thread can access a Decl object while another - /// thread allocates additional Decl objects from this list. - /// * It allows us to use u32 indexes to reference Decl objects rather than - /// pointers, saving memory in Type, Value, and dependency sets. - /// * Using integers to reference Decl objects rather than pointers makes - /// serialization trivial. - /// * It provides a unique integer to be used for anonymous symbol names, avoiding - /// multi-threaded contention on an atomic counter. - pub fn getMutableDecls(local: *Local, gpa: Allocator) Decls.Mutable { + pub fn getMutableCaus(local: *Local, gpa: Allocator) Caus.Mutable { return .{ .gpa = gpa, .arena = &local.mutate.arena, - .mutate = &local.mutate.decls.buckets_list, - .list = &local.shared.decls, + .mutate = &local.mutate.caus, + .list = &local.shared.caus, }; } - /// Same pattern as with `getMutableDecls`. + pub fn getMutableNavs(local: *Local, gpa: Allocator) Navs.Mutable { + return .{ + .gpa = gpa, + .arena = &local.mutate.arena, + .mutate = &local.mutate.navs, + .list = &local.shared.navs, + }; + } + + /// Rather than allocating Namespace objects with an Allocator, we instead allocate + /// them with this BucketList. This provides four advantages: + /// * Stable memory so that one thread can access a Namespace object while another + /// thread allocates additional Namespace objects from this list. + /// * It allows us to use u32 indexes to reference Namespace objects rather than + /// pointers, saving memory in types. + /// * Using integers to reference Namespace objects rather than pointers makes + /// serialization trivial. + /// * It provides a unique integer to be used for anonymous symbol names, avoiding + /// multi-threaded contention on an atomic counter. pub fn getMutableNamespaces(local: *Local, gpa: Allocator) Namespaces.Mutable { return .{ .gpa = gpa, @@ -1038,51 +1310,6 @@ pub const RuntimeIndex = enum(u32) { pub const ComptimeAllocIndex = enum(u32) { _ }; -pub const DeclIndex = enum(u32) { - _, - - const Unwrapped = struct { - tid: Zcu.PerThread.Id, - bucket_index: u32, - index: u32, - - fn wrap(unwrapped: Unwrapped, ip: *const InternPool) DeclIndex { - assert(@intFromEnum(unwrapped.tid) <= ip.getTidMask()); - assert(unwrapped.bucket_index <= ip.getIndexMask(u32) >> Local.decls_bucket_width); - assert(unwrapped.index <= Local.decls_bucket_mask); - return @enumFromInt(@as(u32, @intFromEnum(unwrapped.tid)) << ip.tid_shift_32 | - unwrapped.bucket_index << Local.decls_bucket_width | - unwrapped.index); - } - }; - fn unwrap(decl_index: DeclIndex, ip: *const InternPool) Unwrapped { - const index = @intFromEnum(decl_index) & ip.getIndexMask(u32); - return .{ - .tid = @enumFromInt(@intFromEnum(decl_index) >> ip.tid_shift_32 & ip.getTidMask()), - .bucket_index = index >> Local.decls_bucket_width, - .index = index & Local.decls_bucket_mask, - }; - } - - pub fn toOptional(i: DeclIndex) OptionalDeclIndex { - return @enumFromInt(@intFromEnum(i)); - } -}; - -pub const OptionalDeclIndex = enum(u32) { - none = std.math.maxInt(u32), - _, - - pub fn init(oi: ?DeclIndex) OptionalDeclIndex { - return @enumFromInt(@intFromEnum(oi orelse return .none)); - } - - pub fn unwrap(oi: OptionalDeclIndex) ?DeclIndex { - if (oi == .none) return null; - return @enumFromInt(@intFromEnum(oi)); - } -}; - pub const NamespaceIndex = enum(u32) { _, @@ -1153,7 +1380,8 @@ pub const FileIndex = enum(u32) { const File = struct { bin_digest: Cache.BinDigest, file: *Zcu.File, - root_decl: OptionalDeclIndex, + /// `.none` means no type has been created yet. + root_type: InternPool.Index, }; /// An index into `strings`. @@ -1332,26 +1560,26 @@ pub const OptionalNullTerminatedString = enum(u32) { /// `Index` because we must differentiate between the following cases: /// * runtime-known value (where we store the type) /// * comptime-known value (where we store the value) -/// * decl val (so that we can analyze the value lazily) -/// * decl ref (so that we can analyze the reference lazily) +/// * `Nav` val (so that we can analyze the value lazily) +/// * `Nav` ref (so that we can analyze the reference lazily) pub const CaptureValue = packed struct(u32) { - tag: enum(u2) { @"comptime", runtime, decl_val, decl_ref }, + tag: enum(u2) { @"comptime", runtime, nav_val, nav_ref }, idx: u30, pub fn wrap(val: Unwrapped) CaptureValue { return switch (val) { .@"comptime" => |i| .{ .tag = .@"comptime", .idx = @intCast(@intFromEnum(i)) }, .runtime => |i| .{ .tag = .runtime, .idx = @intCast(@intFromEnum(i)) }, - .decl_val => |i| .{ .tag = .decl_val, .idx = @intCast(@intFromEnum(i)) }, - .decl_ref => |i| .{ .tag = .decl_ref, .idx = @intCast(@intFromEnum(i)) }, + .nav_val => |i| .{ .tag = .nav_val, .idx = @intCast(@intFromEnum(i)) }, + .nav_ref => |i| .{ .tag = .nav_ref, .idx = @intCast(@intFromEnum(i)) }, }; } pub fn unwrap(val: CaptureValue) Unwrapped { return switch (val.tag) { .@"comptime" => .{ .@"comptime" = @enumFromInt(val.idx) }, .runtime => .{ .runtime = @enumFromInt(val.idx) }, - .decl_val => .{ .decl_val = @enumFromInt(val.idx) }, - .decl_ref => .{ .decl_ref = @enumFromInt(val.idx) }, + .nav_val => .{ .nav_val = @enumFromInt(val.idx) }, + .nav_ref => .{ .nav_ref = @enumFromInt(val.idx) }, }; } @@ -1360,8 +1588,8 @@ pub const CaptureValue = packed struct(u32) { @"comptime": Index, /// Index refers to the type. runtime: Index, - decl_val: DeclIndex, - decl_ref: DeclIndex, + nav_val: Nav.Index, + nav_ref: Nav.Index, }; pub const Slice = struct { @@ -1410,7 +1638,7 @@ pub const Key = union(enum) { undef: Index, simple_value: SimpleValue, variable: Variable, - extern_func: ExternFunc, + @"extern": Extern, func: Func, int: Key.Int, err: Error, @@ -1637,25 +1865,37 @@ pub const Key = union(enum) { } }; + /// A runtime variable defined in this `Zcu`. pub const Variable = struct { ty: Index, init: Index, - decl: DeclIndex, + owner_nav: Nav.Index, lib_name: OptionalNullTerminatedString, - is_extern: bool, - is_const: bool, is_threadlocal: bool, is_weak_linkage: bool, }; - pub const ExternFunc = struct { + pub const Extern = struct { + /// The name of the extern symbol. + name: NullTerminatedString, + /// The type of the extern symbol itself. + /// This may be `.anyopaque_type`, in which case the value may not be loaded. ty: Index, - /// The Decl that corresponds to the function itself. - decl: DeclIndex, /// Library name if specified. /// For example `extern "c" fn write(...) usize` would have 'c' as library name. /// Index into the string table bytes. lib_name: OptionalNullTerminatedString, + is_const: bool, + is_threadlocal: bool, + is_weak_linkage: bool, + alignment: Alignment, + @"addrspace": std.builtin.AddressSpace, + /// The ZIR instruction which created this extern; used only for source locations. + /// This is a `declaration`. + zir_index: TrackedInst.Index, + /// The `Nav` corresponding to this extern symbol. + /// This is ignored by hashing and equality. + owner_nav: Nav.Index, }; pub const Func = struct { @@ -1687,8 +1927,7 @@ pub const Key = union(enum) { /// so that it can be mutated. /// This will be 0 when the function is not a generic function instantiation. branch_quota_extra_index: u32, - /// The Decl that corresponds to the function itself. - owner_decl: DeclIndex, + owner_nav: Nav.Index, /// The ZIR instruction that is a function instruction. Use this to find /// the body. We store this rather than the body directly so that when ZIR /// is regenerated on update(), we can map this to the new corresponding @@ -1861,14 +2100,14 @@ pub const Key = union(enum) { pub const BaseAddr = union(enum) { const Tag = @typeInfo(BaseAddr).Union.tag_type.?; - /// Points to the value of a single `Decl`, which may be constant or a `variable`. - decl: DeclIndex, + /// Points to the value of a single `Nav`, which may be constant or a `variable`. + nav: Nav.Index, /// Points to the value of a single comptime alloc stored in `Sema`. comptime_alloc: ComptimeAllocIndex, /// Points to a single unnamed constant value. - anon_decl: AnonDecl, + uav: Uav, /// Points to a comptime field of a struct. Index is the field's value. /// @@ -1923,15 +2162,11 @@ pub const Key = union(enum) { /// the aggregate pointer. arr_elem: BaseIndex, - pub const MutDecl = struct { - decl: DeclIndex, - runtime_index: RuntimeIndex, - }; pub const BaseIndex = struct { base: Index, index: u64, }; - pub const AnonDecl = extern struct { + pub const Uav = extern struct { val: Index, /// Contains the canonical pointer type of the anonymous /// declaration. This may equal `ty` of the `Ptr` or it may be @@ -1944,10 +2179,10 @@ pub const Key = union(enum) { if (@as(Key.Ptr.BaseAddr.Tag, a) != @as(Key.Ptr.BaseAddr.Tag, b)) return false; return switch (a) { - .decl => |a_decl| a_decl == b.decl, + .nav => |a_nav| a_nav == b.nav, .comptime_alloc => |a_alloc| a_alloc == b.comptime_alloc, - .anon_decl => |ad| ad.val == b.anon_decl.val and - ad.orig_ty == b.anon_decl.orig_ty, + .uav => |ad| ad.val == b.uav.val and + ad.orig_ty == b.uav.orig_ty, .int => true, .eu_payload => |a_eu_payload| a_eu_payload == b.eu_payload, .opt_payload => |a_opt_payload| a_opt_payload == b.opt_payload, @@ -2048,7 +2283,7 @@ pub const Key = union(enum) { .payload => |y| Hash.hash(seed + 1, asBytes(&x.ty) ++ asBytes(&y)), }, - .variable => |variable| Hash.hash(seed, asBytes(&variable.decl)), + .variable => |variable| Hash.hash(seed, asBytes(&variable.owner_nav)), .opaque_type, .enum_type, @@ -2125,9 +2360,9 @@ pub const Key = union(enum) { const big_offset: i128 = ptr.byte_offset; const common = asBytes(&ptr.ty) ++ asBytes(&big_offset); return switch (ptr.base_addr) { - inline .decl, + inline .nav, .comptime_alloc, - .anon_decl, + .uav, .int, .eu_payload, .opt_payload, @@ -2231,7 +2466,7 @@ pub const Key = union(enum) { // function instances which have inferred error sets. if (func.generic_owner == .none and func.resolved_error_set_extra_index == 0) { - const bytes = asBytes(&func.owner_decl) ++ asBytes(&func.ty) ++ + const bytes = asBytes(&func.owner_nav) ++ asBytes(&func.ty) ++ [1]u8{@intFromBool(func.uncoerced_ty == func.ty)}; return Hash.hash(seed, bytes); } @@ -2250,7 +2485,11 @@ pub const Key = union(enum) { return hasher.final(); }, - .extern_func => |x| Hash.hash(seed, asBytes(&x.ty) ++ asBytes(&x.decl)), + .@"extern" => |e| Hash.hash(seed, asBytes(&e.name) ++ + asBytes(&e.ty) ++ asBytes(&e.lib_name) ++ + asBytes(&e.is_const) ++ asBytes(&e.is_threadlocal) ++ + asBytes(&e.is_weak_linkage) ++ asBytes(&e.alignment) ++ + asBytes(&e.@"addrspace") ++ asBytes(&e.zir_index)), }; } @@ -2331,11 +2570,19 @@ pub const Key = union(enum) { .variable => |a_info| { const b_info = b.variable; - return a_info.decl == b_info.decl; + return a_info.owner_nav == b_info.owner_nav; }, - .extern_func => |a_info| { - const b_info = b.extern_func; - return a_info.ty == b_info.ty and a_info.decl == b_info.decl; + .@"extern" => |a_info| { + const b_info = b.@"extern"; + return a_info.name == b_info.name and + a_info.ty == b_info.ty and + a_info.lib_name == b_info.lib_name and + a_info.is_const == b_info.is_const and + a_info.is_threadlocal == b_info.is_threadlocal and + a_info.is_weak_linkage == b_info.is_weak_linkage and + a_info.alignment == b_info.alignment and + a_info.@"addrspace" == b_info.@"addrspace" and + a_info.zir_index == b_info.zir_index; }, .func => |a_info| { const b_info = b.func; @@ -2344,7 +2591,7 @@ pub const Key = union(enum) { return false; if (a_info.generic_owner == .none) { - if (a_info.owner_decl != b_info.owner_decl) + if (a_info.owner_nav != b_info.owner_nav) return false; } else { if (!std.mem.eql( @@ -2594,7 +2841,7 @@ pub const Key = union(enum) { .float, .opt, .variable, - .extern_func, + .@"extern", .func, .err, .error_union, @@ -2632,8 +2879,11 @@ pub const LoadedUnionType = struct { tid: Zcu.PerThread.Id, /// The index of the `Tag.TypeUnion` payload. extra_index: u32, - /// The Decl that corresponds to the union itself. - decl: DeclIndex, + // TODO: the non-fqn will be needed by the new dwarf structure + /// The name of this union type. + name: NullTerminatedString, + /// The `Cau` within which type resolution occurs. + cau: Cau.Index, /// Represents the declarations inside this union. namespace: OptionalNamespaceIndex, /// The enum tag type. @@ -2949,7 +3199,8 @@ pub fn loadUnionType(ip: *const InternPool, index: Index) LoadedUnionType { return .{ .tid = unwrapped_index.tid, .extra_index = data, - .decl = type_union.data.decl, + .name = type_union.data.name, + .cau = type_union.data.cau, .namespace = type_union.data.namespace, .enum_tag_ty = type_union.data.tag_ty, .field_types = field_types, @@ -2963,8 +3214,11 @@ pub const LoadedStructType = struct { tid: Zcu.PerThread.Id, /// The index of the `Tag.TypeStruct` or `Tag.TypeStructPacked` payload. extra_index: u32, - /// The struct's owner Decl. `none` when the struct is `@TypeOf(.{})`. - decl: OptionalDeclIndex, + // TODO: the non-fqn will be needed by the new dwarf structure + /// The name of this struct type. + name: NullTerminatedString, + /// The `Cau` within which type resolution occurs. `none` when the struct is `@TypeOf(.{})`. + cau: Cau.Index.Optional, /// `none` when the struct has no declarations. namespace: OptionalNamespaceIndex, /// Index of the `struct_decl` or `reify` ZIR instruction. @@ -3563,7 +3817,8 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { if (item.data == 0) return .{ .tid = .main, .extra_index = 0, - .decl = .none, + .name = .empty, + .cau = .none, .namespace = .none, .zir_index = .none, .layout = .auto, @@ -3577,7 +3832,8 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { .names_map = .none, .captures = CaptureValue.Slice.empty, }; - const decl: DeclIndex = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "decl").?]); + const name: NullTerminatedString = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "name").?]); + const cau: Cau.Index = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "cau").?]); const zir_index: TrackedInst.Index = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "zir_index").?]); const fields_len = extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "fields_len").?]; const flags: Tag.TypeStruct.Flags = @bitCast(@atomicLoad(u32, &extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "flags").?], .unordered)); @@ -3667,7 +3923,8 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { return .{ .tid = unwrapped_index.tid, .extra_index = item.data, - .decl = decl.toOptional(), + .name = name, + .cau = cau.toOptional(), .namespace = namespace, .zir_index = zir_index.toOptional(), .layout = if (flags.is_extern) .@"extern" else .auto, @@ -3683,7 +3940,8 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { }; }, .type_struct_packed, .type_struct_packed_inits => { - const decl: DeclIndex = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "decl").?]); + const name: NullTerminatedString = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "name").?]); + const cau: Cau.Index = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "cau").?]); const zir_index: TrackedInst.Index = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "zir_index").?]); const fields_len = extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "fields_len").?]; const namespace: OptionalNamespaceIndex = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "namespace").?]); @@ -3729,7 +3987,8 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { return .{ .tid = unwrapped_index.tid, .extra_index = item.data, - .decl = decl.toOptional(), + .name = name, + .cau = cau.toOptional(), .namespace = namespace, .zir_index = zir_index.toOptional(), .layout = .@"packed", @@ -3749,8 +4008,12 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { } const LoadedEnumType = struct { - /// The Decl that corresponds to the enum itself. - decl: DeclIndex, + // TODO: the non-fqn will be needed by the new dwarf structure + /// The name of this enum type. + name: NullTerminatedString, + /// The `Cau` within which type resolution occurs. + /// `null` if this is a generated tag type. + cau: Cau.Index.Optional, /// Represents the declarations inside this enum. namespace: OptionalNamespaceIndex, /// An integer type which is used for the numerical value of the enum. @@ -3827,15 +4090,21 @@ pub fn loadEnumType(ip: *const InternPool, index: Index) LoadedEnumType { .type_enum_auto => { const extra = extraDataTrail(extra_list, EnumAuto, item.data); var extra_index: u32 = @intCast(extra.end); - if (extra.data.zir_index == .none) { + const cau: Cau.Index.Optional = if (extra.data.zir_index == .none) cau: { extra_index += 1; // owner_union - } + break :cau .none; + } else cau: { + const cau: Cau.Index = @enumFromInt(extra_list.view().items(.@"0")[extra_index]); + extra_index += 1; // cau + break :cau cau.toOptional(); + }; const captures_len = if (extra.data.captures_len == std.math.maxInt(u32)) c: { extra_index += 2; // type_hash: PackedU64 break :c 0; } else extra.data.captures_len; return .{ - .decl = extra.data.decl, + .name = extra.data.name, + .cau = cau, .namespace = extra.data.namespace, .tag_ty = extra.data.int_tag_type, .names = .{ @@ -3861,15 +4130,21 @@ pub fn loadEnumType(ip: *const InternPool, index: Index) LoadedEnumType { }; const extra = extraDataTrail(extra_list, EnumExplicit, item.data); var extra_index: u32 = @intCast(extra.end); - if (extra.data.zir_index == .none) { + const cau: Cau.Index.Optional = if (extra.data.zir_index == .none) cau: { extra_index += 1; // owner_union - } + break :cau .none; + } else cau: { + const cau: Cau.Index = @enumFromInt(extra_list.view().items(.@"0")[extra_index]); + extra_index += 1; // cau + break :cau cau.toOptional(); + }; const captures_len = if (extra.data.captures_len == std.math.maxInt(u32)) c: { extra_index += 2; // type_hash: PackedU64 break :c 0; } else extra.data.captures_len; return .{ - .decl = extra.data.decl, + .name = extra.data.name, + .cau = cau, .namespace = extra.data.namespace, .tag_ty = extra.data.int_tag_type, .names = .{ @@ -3896,10 +4171,11 @@ pub fn loadEnumType(ip: *const InternPool, index: Index) LoadedEnumType { /// Note that this type doubles as the payload for `Tag.type_opaque`. pub const LoadedOpaqueType = struct { - /// The opaque's owner Decl. - decl: DeclIndex, /// Contains the declarations inside this opaque. namespace: OptionalNamespaceIndex, + // TODO: the non-fqn will be needed by the new dwarf structure + /// The name of this opaque type. + name: NullTerminatedString, /// Index of the `opaque_decl` or `reify` instruction. zir_index: TrackedInst.Index, captures: CaptureValue.Slice, @@ -3915,7 +4191,7 @@ pub fn loadOpaqueType(ip: *const InternPool, index: Index) LoadedOpaqueType { else extra.data.captures_len; return .{ - .decl = extra.data.decl, + .name = extra.data.name, .namespace = extra.data.namespace, .zir_index = extra.data.zir_index, .captures = .{ @@ -4216,10 +4492,10 @@ pub const Index = enum(u32) { undef: DataIsIndex, simple_value: void, - ptr_decl: struct { data: *PtrDecl }, + ptr_nav: struct { data: *PtrNav }, ptr_comptime_alloc: struct { data: *PtrComptimeAlloc }, - ptr_anon_decl: struct { data: *PtrAnonDecl }, - ptr_anon_decl_aligned: struct { data: *PtrAnonDeclAligned }, + ptr_uav: struct { data: *PtrUav }, + ptr_uav_aligned: struct { data: *PtrUavAligned }, ptr_comptime_field: struct { data: *PtrComptimeField }, ptr_int: struct { data: *PtrInt }, ptr_eu_payload: struct { data: *PtrBase }, @@ -4255,7 +4531,7 @@ pub const Index = enum(u32) { float_c_longdouble_f128: struct { data: *Float128 }, float_comptime_float: struct { data: *Float128 }, variable: struct { data: *Tag.Variable }, - extern_func: struct { data: *Key.ExternFunc }, + @"extern": struct { data: *Tag.Extern }, func_decl: struct { const @"data.analysis.inferred_error_set" = opaque {}; data: *Tag.FuncDecl, @@ -4669,23 +4945,23 @@ pub const Tag = enum(u8) { /// A value that can be represented with only an enum tag. /// data is SimpleValue enum value. simple_value, - /// A pointer to a decl. - /// data is extra index of `PtrDecl`, which contains the type and address. - ptr_decl, + /// A pointer to a `Nav`. + /// data is extra index of `PtrNav`, which contains the type and address. + ptr_nav, /// A pointer to a decl that can be mutated at comptime. /// data is extra index of `PtrComptimeAlloc`, which contains the type and address. ptr_comptime_alloc, - /// A pointer to an anonymous decl. - /// data is extra index of `PtrAnonDecl`, which contains the pointer type and decl value. - /// The alignment of the anonymous decl is communicated via the pointer type. - ptr_anon_decl, - /// A pointer to an anonymous decl. - /// data is extra index of `PtrAnonDeclAligned`, which contains the pointer + /// A pointer to an anonymous addressable value. + /// data is extra index of `PtrUav`, which contains the pointer type and decl value. + /// The alignment of the uav is communicated via the pointer type. + ptr_uav, + /// A pointer to an unnamed addressable value. + /// data is extra index of `PtrUavAligned`, which contains the pointer /// type and decl value. /// The original pointer type is also provided, which will be different than `ty`. - /// This encoding is only used when a pointer to an anonymous decl is + /// This encoding is only used when a pointer to a Uav is /// coerced to a different pointer type with a different alignment. - ptr_anon_decl_aligned, + ptr_uav_aligned, /// data is extra index of `PtrComptimeField`, which contains the pointer type and field value. ptr_comptime_field, /// A pointer with an integer value. @@ -4800,9 +5076,10 @@ pub const Tag = enum(u8) { /// A global variable. /// data is extra index to Variable. variable, - /// An extern function. - /// data is extra index to ExternFunc. - extern_func, + /// An extern function or variable. + /// data is extra index to Extern. + /// Some parts of the key are stored in `owner_nav`. + @"extern", /// A non-extern function corresponding directly to the AST node from whence it originated. /// data is extra index to `FuncDecl`. /// Only the owner Decl is used for hashing and equality because the other @@ -4843,7 +5120,6 @@ pub const Tag = enum(u8) { const TypeValue = Key.TypeValue; const Error = Key.Error; const EnumTag = Key.EnumTag; - const ExternFunc = Key.ExternFunc; const Union = Key.Union; const TypePointer = Key.PtrType; @@ -4877,10 +5153,10 @@ pub const Tag = enum(u8) { .undef => unreachable, .simple_value => unreachable, - .ptr_decl => PtrDecl, + .ptr_nav => PtrNav, .ptr_comptime_alloc => PtrComptimeAlloc, - .ptr_anon_decl => PtrAnonDecl, - .ptr_anon_decl_aligned => PtrAnonDeclAligned, + .ptr_uav => PtrUav, + .ptr_uav_aligned => PtrUavAligned, .ptr_comptime_field => PtrComptimeField, .ptr_int => PtrInt, .ptr_eu_payload => PtrBase, @@ -4916,7 +5192,7 @@ pub const Tag = enum(u8) { .float_c_longdouble_f128 => unreachable, .float_comptime_float => unreachable, .variable => Variable, - .extern_func => ExternFunc, + .@"extern" => Extern, .func_decl => FuncDecl, .func_instance => FuncInstance, .func_coerced => FuncCoerced, @@ -4933,21 +5209,29 @@ pub const Tag = enum(u8) { ty: Index, /// May be `none`. init: Index, - decl: DeclIndex, + owner_nav: Nav.Index, /// Library name if specified. /// For example `extern "c" var stderrp = ...` would have 'c' as library name. lib_name: OptionalNullTerminatedString, flags: Flags, pub const Flags = packed struct(u32) { - is_extern: bool, is_const: bool, is_threadlocal: bool, is_weak_linkage: bool, - _: u28 = 0, + _: u29 = 0, }; }; + pub const Extern = struct { + // name, alignment, addrspace come from `owner_nav`. + ty: Index, + lib_name: OptionalNullTerminatedString, + flags: Variable.Flags, + owner_nav: Nav.Index, + zir_index: TrackedInst.Index, + }; + /// Trailing: /// 0. element: Index for each len /// len is determined by the aggregate type. @@ -4962,7 +5246,7 @@ pub const Tag = enum(u8) { /// A `none` value marks that the inferred error set is not resolved yet. pub const FuncDecl = struct { analysis: FuncAnalysis, - owner_decl: DeclIndex, + owner_nav: Nav.Index, ty: Index, zir_body_inst: TrackedInst.Index, lbrace_line: u32, @@ -4979,7 +5263,7 @@ pub const Tag = enum(u8) { pub const FuncInstance = struct { analysis: FuncAnalysis, // Needed by the linker for codegen. Not part of hashing or equality. - owner_decl: DeclIndex, + owner_nav: Nav.Index, ty: Index, branch_quota: u32, /// Points to a `FuncDecl`. @@ -5029,6 +5313,7 @@ pub const Tag = enum(u8) { /// 3. field type: Index for each field; declaration order /// 4. field align: Alignment for each field; declaration order pub const TypeUnion = struct { + name: NullTerminatedString, flags: Flags, /// This could be provided through the tag type, but it is more convenient /// to store it directly. This is also necessary for `dumpStatsFallible` to @@ -5038,7 +5323,7 @@ pub const Tag = enum(u8) { size: u32, /// Only valid after .have_layout padding: u32, - decl: DeclIndex, + cau: Cau.Index, namespace: OptionalNamespaceIndex, /// The enum that provides the list of field names and values. tag_ty: Index, @@ -5068,7 +5353,8 @@ pub const Tag = enum(u8) { /// 4. name: NullTerminatedString for each fields_len /// 5. init: Index for each fields_len // if tag is type_struct_packed_inits pub const TypeStructPacked = struct { - decl: DeclIndex, + name: NullTerminatedString, + cau: Cau.Index, zir_index: TrackedInst.Index, fields_len: u32, namespace: OptionalNamespaceIndex, @@ -5120,7 +5406,8 @@ pub const Tag = enum(u8) { /// field_index: RuntimeOrder // for each field in runtime order /// 10. field_offset: u32 // for each field in declared order, undef until layout_resolved pub const TypeStruct = struct { - decl: DeclIndex, + name: NullTerminatedString, + cau: Cau.Index, zir_index: TrackedInst.Index, fields_len: u32, flags: Flags, @@ -5164,8 +5451,7 @@ pub const Tag = enum(u8) { /// Trailing: /// 0. capture: CaptureValue // for each `captures_len` pub const TypeOpaque = struct { - /// The opaque's owner Decl. - decl: DeclIndex, + name: NullTerminatedString, /// Contains the declarations inside this opaque. namespace: OptionalNamespaceIndex, /// The index of the `opaque_decl` instruction. @@ -5188,29 +5474,19 @@ pub const FuncAnalysis = packed struct(u32) { inferred_error_set: bool, disable_instrumentation: bool, - _: u13 = 0, + _: u19 = 0, - pub const State = enum(u8) { - /// This function has not yet undergone analysis, because we have not - /// seen a potential runtime call. It may be analyzed in future. - none, - /// Analysis for this function has been queued, but not yet completed. + pub const State = enum(u2) { + /// The runtime function has never been referenced. + /// As such, it has never been analyzed, nor is it queued for analysis. + unreferenced, + /// The runtime function has been referenced, but has not yet been analyzed. + /// Its semantic analysis is queued. queued, - /// This function intentionally only has ZIR generated because it is marked - /// inline, which means no runtime version of the function will be generated. - inline_only, - in_progress, - /// There will be a corresponding ErrorMsg in Zcu.failed_decls - sema_failure, - /// This function might be OK but it depends on another Decl which did not - /// successfully complete semantic analysis. - dependency_failure, - /// There will be a corresponding ErrorMsg in Zcu.failed_decls. - /// Indicates that semantic analysis succeeded, but code generation for - /// this function failed. - codegen_failure, - /// Semantic analysis and code generation of this function succeeded. - success, + /// The runtime function has been (or is currently being) semantically analyzed. + /// To know if analysis succeeded, consult `zcu.[transitive_]failed_analysis`. + /// To know if analysis is up-to-date, consult `zcu.[potentially_]outdated`. + analyzed, }; }; @@ -5477,13 +5753,13 @@ pub const Array = struct { /// Trailing: /// 0. owner_union: Index // if `zir_index == .none` -/// 1. capture: CaptureValue // for each `captures_len` -/// 2. type_hash: PackedU64 // if reified (`captures_len == std.math.maxInt(u32)`) -/// 3. field name: NullTerminatedString for each fields_len; declaration order -/// 4. tag value: Index for each fields_len; declaration order +/// 1. cau: Cau.Index // if `zir_index != .none` +/// 2. capture: CaptureValue // for each `captures_len` +/// 3. type_hash: PackedU64 // if reified (`captures_len == std.math.maxInt(u32)`) +/// 4. field name: NullTerminatedString for each fields_len; declaration order +/// 5. tag value: Index for each fields_len; declaration order pub const EnumExplicit = struct { - /// The Decl that corresponds to the enum itself. - decl: DeclIndex, + name: NullTerminatedString, /// `std.math.maxInt(u32)` indicates this type is reified. captures_len: u32, /// This may be `none` if there are no declarations. @@ -5505,12 +5781,12 @@ pub const EnumExplicit = struct { /// Trailing: /// 0. owner_union: Index // if `zir_index == .none` -/// 1. capture: CaptureValue // for each `captures_len` -/// 2. type_hash: PackedU64 // if reified (`captures_len == std.math.maxInt(u32)`) -/// 3. field name: NullTerminatedString for each fields_len; declaration order +/// 1. cau: Cau.Index // if `zir_index != .none` +/// 2. capture: CaptureValue // for each `captures_len` +/// 3. type_hash: PackedU64 // if reified (`captures_len == std.math.maxInt(u32)`) +/// 4. field name: NullTerminatedString for each fields_len; declaration order pub const EnumAuto = struct { - /// The Decl that corresponds to the enum itself. - decl: DeclIndex, + name: NullTerminatedString, /// `std.math.maxInt(u32)` indicates this type is reified. captures_len: u32, /// This may be `none` if there are no declarations. @@ -5539,15 +5815,15 @@ pub const PackedU64 = packed struct(u64) { } }; -pub const PtrDecl = struct { +pub const PtrNav = struct { ty: Index, - decl: DeclIndex, + nav: Nav.Index, byte_offset_a: u32, byte_offset_b: u32, - fn init(ty: Index, decl: DeclIndex, byte_offset: u64) @This() { + fn init(ty: Index, nav: Nav.Index, byte_offset: u64) @This() { return .{ .ty = ty, - .decl = decl, + .nav = nav, .byte_offset_a = @intCast(byte_offset >> 32), .byte_offset_b = @truncate(byte_offset), }; @@ -5557,7 +5833,7 @@ pub const PtrDecl = struct { } }; -pub const PtrAnonDecl = struct { +pub const PtrUav = struct { ty: Index, val: Index, byte_offset_a: u32, @@ -5575,7 +5851,7 @@ pub const PtrAnonDecl = struct { } }; -pub const PtrAnonDeclAligned = struct { +pub const PtrUavAligned = struct { ty: Index, val: Index, /// Must be nonequal to `ty`. Only the alignment from this value is important. @@ -5805,8 +6081,9 @@ pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void { .tracked_insts = Local.TrackedInsts.empty, .files = Local.List(File).empty, .maps = Local.Maps.empty, + .caus = Local.Caus.empty, + .navs = Local.Navs.empty, - .decls = Local.Decls.empty, .namespaces = Local.Namespaces.empty, }, .mutate = .{ @@ -5819,8 +6096,9 @@ pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void { .tracked_insts = Local.ListMutate.empty, .files = Local.ListMutate.empty, .maps = Local.ListMutate.empty, + .caus = Local.ListMutate.empty, + .navs = Local.ListMutate.empty, - .decls = Local.BucketListMutate.empty, .namespaces = Local.BucketListMutate.empty, }, }); @@ -5878,8 +6156,8 @@ pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void { pub fn deinit(ip: *InternPool, gpa: Allocator) void { ip.src_hash_deps.deinit(gpa); - ip.decl_val_deps.deinit(gpa); - ip.func_ies_deps.deinit(gpa); + ip.nav_val_deps.deinit(gpa); + ip.interned_deps.deinit(gpa); ip.namespace_deps.deinit(gpa); ip.namespace_name_deps.deinit(gpa); @@ -5900,8 +6178,11 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { else local.mutate.namespaces.last_bucket_len]) |*namespace| { - namespace.decls.deinit(gpa); - namespace.usingnamespace_set.deinit(gpa); + namespace.pub_decls.deinit(gpa); + namespace.priv_decls.deinit(gpa); + namespace.pub_usingnamespace.deinit(gpa); + namespace.priv_usingnamespace.deinit(gpa); + namespace.other_decls.deinit(gpa); } }; const maps = local.getMutableMaps(gpa); @@ -6082,14 +6363,14 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { if (extra.data.captures_len == std.math.maxInt(u32)) { break :ns .{ .reified = .{ .zir_index = zir_index, - .type_hash = extraData(extra_list, PackedU64, extra.end).get(), + .type_hash = extraData(extra_list, PackedU64, extra.end + 1).get(), } }; } break :ns .{ .declared = .{ .zir_index = zir_index, .captures = .{ .owned = .{ .tid = unwrapped_index.tid, - .start = extra.end, + .start = extra.end + 1, .len = extra.data.captures_len, } }, } }; @@ -6106,14 +6387,14 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { if (extra.data.captures_len == std.math.maxInt(u32)) { break :ns .{ .reified = .{ .zir_index = zir_index, - .type_hash = extraData(extra_list, PackedU64, extra.end).get(), + .type_hash = extraData(extra_list, PackedU64, extra.end + 1).get(), } }; } break :ns .{ .declared = .{ .zir_index = zir_index, .captures = .{ .owned = .{ .tid = unwrapped_index.tid, - .start = extra.end, + .start = extra.end + 1, .len = extra.data.captures_len, } }, } }; @@ -6132,24 +6413,24 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .val = extra.val, } }; }, - .ptr_decl => { - const info = extraData(unwrapped_index.getExtra(ip), PtrDecl, data); - return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .decl = info.decl }, .byte_offset = info.byteOffset() } }; + .ptr_nav => { + const info = extraData(unwrapped_index.getExtra(ip), PtrNav, data); + return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .nav = info.nav }, .byte_offset = info.byteOffset() } }; }, .ptr_comptime_alloc => { const info = extraData(unwrapped_index.getExtra(ip), PtrComptimeAlloc, data); return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .comptime_alloc = info.index }, .byte_offset = info.byteOffset() } }; }, - .ptr_anon_decl => { - const info = extraData(unwrapped_index.getExtra(ip), PtrAnonDecl, data); - return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .anon_decl = .{ + .ptr_uav => { + const info = extraData(unwrapped_index.getExtra(ip), PtrUav, data); + return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .uav = .{ .val = info.val, .orig_ty = info.ty, } }, .byte_offset = info.byteOffset() } }; }, - .ptr_anon_decl_aligned => { - const info = extraData(unwrapped_index.getExtra(ip), PtrAnonDeclAligned, data); - return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .anon_decl = .{ + .ptr_uav_aligned => { + const info = extraData(unwrapped_index.getExtra(ip), PtrUavAligned, data); + return .{ .ptr = .{ .ty = info.ty, .base_addr = .{ .uav = .{ .val = info.val, .orig_ty = info.orig_ty, } }, .byte_offset = info.byteOffset() } }; @@ -6293,15 +6574,28 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { return .{ .variable = .{ .ty = extra.ty, .init = extra.init, - .decl = extra.decl, + .owner_nav = extra.owner_nav, + .lib_name = extra.lib_name, + .is_threadlocal = extra.flags.is_threadlocal, + .is_weak_linkage = extra.flags.is_weak_linkage, + } }; + }, + .@"extern" => { + const extra = extraData(unwrapped_index.getExtra(ip), Tag.Extern, data); + const nav = ip.getNav(extra.owner_nav); + return .{ .@"extern" = .{ + .name = nav.name, + .ty = extra.ty, .lib_name = extra.lib_name, - .is_extern = extra.flags.is_extern, .is_const = extra.flags.is_const, .is_threadlocal = extra.flags.is_threadlocal, .is_weak_linkage = extra.flags.is_weak_linkage, + .alignment = nav.status.resolved.alignment, + .@"addrspace" = nav.status.resolved.@"addrspace", + .zir_index = extra.zir_index, + .owner_nav = extra.owner_nav, } }; }, - .extern_func => .{ .extern_func = extraData(unwrapped_index.getExtra(ip), Tag.ExternFunc, data) }, .func_instance => .{ .func = ip.extraFuncInstance(unwrapped_index.tid, unwrapped_index.getExtra(ip), data) }, .func_decl => .{ .func = extraFuncDecl(unwrapped_index.tid, unwrapped_index.getExtra(ip), data) }, .func_coerced => .{ .func = ip.extraFuncCoerced(unwrapped_index.getExtra(ip), data) }, @@ -6513,7 +6807,7 @@ fn extraFuncDecl(tid: Zcu.PerThread.Id, extra: Local.Extra, extra_index: u32) Ke .zir_body_inst_extra_index = extra_index + std.meta.fieldIndex(P, "zir_body_inst").?, .resolved_error_set_extra_index = if (func_decl.data.analysis.inferred_error_set) func_decl.end else 0, .branch_quota_extra_index = 0, - .owner_decl = func_decl.data.owner_decl, + .owner_nav = func_decl.data.owner_nav, .zir_body_inst = func_decl.data.zir_body_inst, .lbrace_line = func_decl.data.lbrace_line, .rbrace_line = func_decl.data.rbrace_line, @@ -6528,7 +6822,7 @@ fn extraFuncInstance(ip: *const InternPool, tid: Zcu.PerThread.Id, extra: Local. const extra_items = extra.view().items(.@"0"); const analysis_extra_index = extra_index + std.meta.fieldIndex(Tag.FuncInstance, "analysis").?; const analysis: FuncAnalysis = @bitCast(@atomicLoad(u32, &extra_items[analysis_extra_index], .unordered)); - const owner_decl: DeclIndex = @enumFromInt(extra_items[extra_index + std.meta.fieldIndex(Tag.FuncInstance, "owner_decl").?]); + const owner_nav: Nav.Index = @enumFromInt(extra_items[extra_index + std.meta.fieldIndex(Tag.FuncInstance, "owner_nav").?]); const ty: Index = @enumFromInt(extra_items[extra_index + std.meta.fieldIndex(Tag.FuncInstance, "ty").?]); const generic_owner: Index = @enumFromInt(extra_items[extra_index + std.meta.fieldIndex(Tag.FuncInstance, "generic_owner").?]); const func_decl = ip.funcDeclInfo(generic_owner); @@ -6541,7 +6835,7 @@ fn extraFuncInstance(ip: *const InternPool, tid: Zcu.PerThread.Id, extra: Local. .zir_body_inst_extra_index = func_decl.zir_body_inst_extra_index, .resolved_error_set_extra_index = if (analysis.inferred_error_set) end_extra_index else 0, .branch_quota_extra_index = extra_index + std.meta.fieldIndex(Tag.FuncInstance, "branch_quota").?, - .owner_decl = owner_decl, + .owner_nav = owner_nav, .zir_body_inst = func_decl.zir_body_inst, .lbrace_line = func_decl.lbrace_line, .rbrace_line = func_decl.rbrace_line, @@ -6905,7 +7199,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .enum_type => unreachable, // use getEnumType() instead .func_type => unreachable, // use getFuncType() instead - .extern_func => unreachable, // use getExternFunc() instead + .@"extern" => unreachable, // use getExtern() instead .func => unreachable, // use getFuncInstance() or getFuncDecl() instead .variable => |variable| { @@ -6916,11 +7210,10 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .data = try addExtra(extra, Tag.Variable{ .ty = variable.ty, .init = variable.init, - .decl = variable.decl, + .owner_nav = variable.owner_nav, .lib_name = variable.lib_name, .flags = .{ - .is_extern = variable.is_extern, - .is_const = variable.is_const, + .is_const = false, .is_threadlocal = variable.is_threadlocal, .is_weak_linkage = variable.is_weak_linkage, }, @@ -6945,29 +7238,29 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All const ptr_type = ip.indexToKey(ptr.ty).ptr_type; assert(ptr_type.flags.size != .Slice); items.appendAssumeCapacity(switch (ptr.base_addr) { - .decl => |decl| .{ - .tag = .ptr_decl, - .data = try addExtra(extra, PtrDecl.init(ptr.ty, decl, ptr.byte_offset)), + .nav => |nav| .{ + .tag = .ptr_nav, + .data = try addExtra(extra, PtrNav.init(ptr.ty, nav, ptr.byte_offset)), }, .comptime_alloc => |alloc_index| .{ .tag = .ptr_comptime_alloc, .data = try addExtra(extra, PtrComptimeAlloc.init(ptr.ty, alloc_index, ptr.byte_offset)), }, - .anon_decl => |anon_decl| if (ptrsHaveSameAlignment(ip, ptr.ty, ptr_type, anon_decl.orig_ty)) item: { - if (ptr.ty != anon_decl.orig_ty) { + .uav => |uav| if (ptrsHaveSameAlignment(ip, ptr.ty, ptr_type, uav.orig_ty)) item: { + if (ptr.ty != uav.orig_ty) { gop.cancel(); var new_key = key; - new_key.ptr.base_addr.anon_decl.orig_ty = ptr.ty; + new_key.ptr.base_addr.uav.orig_ty = ptr.ty; gop = try ip.getOrPutKey(gpa, tid, new_key); if (gop == .existing) return gop.existing; } break :item .{ - .tag = .ptr_anon_decl, - .data = try addExtra(extra, PtrAnonDecl.init(ptr.ty, anon_decl.val, ptr.byte_offset)), + .tag = .ptr_uav, + .data = try addExtra(extra, PtrUav.init(ptr.ty, uav.val, ptr.byte_offset)), }; } else .{ - .tag = .ptr_anon_decl_aligned, - .data = try addExtra(extra, PtrAnonDeclAligned.init(ptr.ty, anon_decl.val, anon_decl.orig_ty, ptr.byte_offset)), + .tag = .ptr_uav_aligned, + .data = try addExtra(extra, PtrUavAligned.init(ptr.ty, uav.val, uav.orig_ty, ptr.byte_offset)), }, .comptime_field => |field_val| item: { assert(field_val != .none); @@ -7635,7 +7928,8 @@ pub fn getUnionType( .fields_len = ini.fields_len, .size = std.math.maxInt(u32), .padding = std.math.maxInt(u32), - .decl = undefined, // set by `finish` + .name = undefined, // set by `finish` + .cau = undefined, // set by `finish` .namespace = .none, // set by `finish` .tag_ty = ini.enum_tag_ty, .zir_index = switch (ini.key) { @@ -7682,7 +7976,8 @@ pub fn getUnionType( return .{ .wip = .{ .tid = tid, .index = gop.put(), - .decl_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeUnion, "decl").?, + .type_name_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeUnion, "name").?, + .cau_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeUnion, "cau").?, .namespace_extra_index = if (ini.has_namespace) extra_index + std.meta.fieldIndex(Tag.TypeUnion, "namespace").? else @@ -7693,18 +7988,44 @@ pub fn getUnionType( pub const WipNamespaceType = struct { tid: Zcu.PerThread.Id, index: Index, - decl_extra_index: u32, + type_name_extra_index: u32, + cau_extra_index: ?u32, namespace_extra_index: ?u32, - pub fn finish(wip: WipNamespaceType, ip: *InternPool, decl: DeclIndex, namespace: OptionalNamespaceIndex) Index { - const extra_items = ip.getLocalShared(wip.tid).extra.acquire().view().items(.@"0"); - extra_items[wip.decl_extra_index] = @intFromEnum(decl); + + pub fn setName( + wip: WipNamespaceType, + ip: *InternPool, + type_name: NullTerminatedString, + ) void { + const extra = ip.getLocalShared(wip.tid).extra.acquire(); + const extra_items = extra.view().items(.@"0"); + extra_items[wip.type_name_extra_index] = @intFromEnum(type_name); + } + + pub fn finish( + wip: WipNamespaceType, + ip: *InternPool, + analysis_owner: Cau.Index.Optional, + namespace: OptionalNamespaceIndex, + ) Index { + const extra = ip.getLocalShared(wip.tid).extra.acquire(); + const extra_items = extra.view().items(.@"0"); + + if (wip.cau_extra_index) |i| { + extra_items[i] = @intFromEnum(analysis_owner.unwrap().?); + } else { + assert(analysis_owner == .none); + } + if (wip.namespace_extra_index) |i| { extra_items[i] = @intFromEnum(namespace.unwrap().?); } else { assert(namespace == .none); } + return wip.index; } + pub fn cancel(wip: WipNamespaceType, ip: *InternPool, tid: Zcu.PerThread.Id) void { ip.remove(tid, wip.index); } @@ -7784,7 +8105,8 @@ pub fn getStructType( ini.fields_len + // names ini.fields_len); // inits const extra_index = addExtraAssumeCapacity(extra, Tag.TypeStructPacked{ - .decl = undefined, // set by `finish` + .name = undefined, // set by `finish` + .cau = undefined, // set by `finish` .zir_index = zir_index, .fields_len = ini.fields_len, .namespace = .none, @@ -7818,7 +8140,8 @@ pub fn getStructType( return .{ .wip = .{ .tid = tid, .index = gop.put(), - .decl_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeStructPacked, "decl").?, + .type_name_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeStructPacked, "name").?, + .cau_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeStructPacked, "cau").?, .namespace_extra_index = if (ini.has_namespace) extra_index + std.meta.fieldIndex(Tag.TypeStructPacked, "namespace").? else @@ -7843,7 +8166,8 @@ pub fn getStructType( align_elements_len + comptime_elements_len + 2); // names_map + namespace const extra_index = addExtraAssumeCapacity(extra, Tag.TypeStruct{ - .decl = undefined, // set by `finish` + .name = undefined, // set by `finish` + .cau = undefined, // set by `finish` .zir_index = zir_index, .fields_len = ini.fields_len, .size = std.math.maxInt(u32), @@ -7908,7 +8232,8 @@ pub fn getStructType( return .{ .wip = .{ .tid = tid, .index = gop.put(), - .decl_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeStruct, "decl").?, + .type_name_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeStruct, "name").?, + .cau_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeStruct, "cau").?, .namespace_extra_index = namespace_extra_index, } }; } @@ -8047,34 +8372,71 @@ pub fn getFuncType( return gop.put(); } -pub fn getExternFunc( +/// Intern an `.@"extern"`, creating a corresponding owner `Nav` if necessary. +/// This will *not* queue the extern for codegen: see `Zcu.PerThread.getExtern` for a wrapper which does. +pub fn getExtern( ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, - key: Key.ExternFunc, -) Allocator.Error!Index { - var gop = try ip.getOrPutKey(gpa, tid, .{ .extern_func = key }); + /// `key.owner_nav` is ignored. + key: Key.Extern, +) Allocator.Error!struct { + index: Index, + /// Only set if the `Nav` was newly created. + new_nav: Nav.Index.Optional, +} { + var gop = try ip.getOrPutKey(gpa, tid, .{ .@"extern" = key }); defer gop.deinit(); - if (gop == .existing) return gop.existing; + if (gop == .existing) return .{ + .index = gop.existing, + .new_nav = .none, + }; const local = ip.getLocal(tid); const items = local.getMutableItems(gpa); - try items.ensureUnusedCapacity(1); const extra = local.getMutableExtra(gpa); + try items.ensureUnusedCapacity(1); + try extra.ensureUnusedCapacity(@typeInfo(Tag.Extern).Struct.fields.len); + try local.getMutableNavs(gpa).ensureUnusedCapacity(1); - const prev_extra_len = extra.mutate.len; - const extra_index = try addExtra(extra, @as(Tag.ExternFunc, key)); - errdefer extra.mutate.len = prev_extra_len; + // Predict the index the `@"extern" will live at, so we can construct the owner `Nav` before releasing the shard's mutex. + const extern_index = Index.Unwrapped.wrap(.{ + .tid = tid, + .index = items.mutate.len, + }, ip); + const owner_nav = ip.createNav(gpa, tid, .{ + .name = key.name, + .fqn = key.name, + .val = extern_index, + .alignment = key.alignment, + .@"linksection" = .none, + .@"addrspace" = key.@"addrspace", + }) catch unreachable; // capacity asserted above + const extra_index = addExtraAssumeCapacity(extra, Tag.Extern{ + .ty = key.ty, + .lib_name = key.lib_name, + .flags = .{ + .is_const = key.is_const, + .is_threadlocal = key.is_threadlocal, + .is_weak_linkage = key.is_weak_linkage, + }, + .zir_index = key.zir_index, + .owner_nav = owner_nav, + }); items.appendAssumeCapacity(.{ - .tag = .extern_func, + .tag = .@"extern", .data = extra_index, }); - errdefer items.mutate.len -= 1; - return gop.put(); + assert(gop.put() == extern_index); + + return .{ + .index = extern_index, + .new_nav = owner_nav.toOptional(), + }; } pub const GetFuncDeclKey = struct { - owner_decl: DeclIndex, + owner_nav: Nav.Index, ty: Index, zir_body_inst: TrackedInst.Index, lbrace_line: u32, @@ -8105,7 +8467,7 @@ pub fn getFuncDecl( const func_decl_extra_index = addExtraAssumeCapacity(extra, Tag.FuncDecl{ .analysis = .{ - .state = if (key.cc == .Inline) .inline_only else .none, + .state = .unreferenced, .is_cold = false, .is_noinline = key.is_noinline, .calls_or_awaits_errorable_fn = false, @@ -8113,7 +8475,7 @@ pub fn getFuncDecl( .inferred_error_set = false, .disable_instrumentation = false, }, - .owner_decl = key.owner_decl, + .owner_nav = key.owner_nav, .ty = key.ty, .zir_body_inst = key.zir_body_inst, .lbrace_line = key.lbrace_line, @@ -8140,7 +8502,7 @@ pub fn getFuncDecl( } pub const GetFuncDeclIesKey = struct { - owner_decl: DeclIndex, + owner_nav: Nav.Index, param_types: []Index, noalias_bits: u32, comptime_bits: u32, @@ -8209,7 +8571,7 @@ pub fn getFuncDeclIes( const func_decl_extra_index = addExtraAssumeCapacity(extra, Tag.FuncDecl{ .analysis = .{ - .state = if (key.cc == .Inline) .inline_only else .none, + .state = .unreferenced, .is_cold = false, .is_noinline = key.is_noinline, .calls_or_awaits_errorable_fn = false, @@ -8217,7 +8579,7 @@ pub fn getFuncDeclIes( .inferred_error_set = true, .disable_instrumentation = false, }, - .owner_decl = key.owner_decl, + .owner_nav = key.owner_nav, .ty = func_ty, .zir_body_inst = key.zir_body_inst, .lbrace_line = key.lbrace_line, @@ -8401,7 +8763,7 @@ pub fn getFuncInstance( const func_extra_index = addExtraAssumeCapacity(extra, Tag.FuncInstance{ .analysis = .{ - .state = if (arg.cc == .Inline) .inline_only else .none, + .state = .unreferenced, .is_cold = false, .is_noinline = arg.is_noinline, .calls_or_awaits_errorable_fn = false, @@ -8409,9 +8771,9 @@ pub fn getFuncInstance( .inferred_error_set = false, .disable_instrumentation = false, }, - // This is populated after we create the Decl below. It is not read + // This is populated after we create the Nav below. It is not read // by equality or hashing functions. - .owner_decl = undefined, + .owner_nav = undefined, .ty = func_ty, .branch_quota = 0, .generic_owner = generic_owner, @@ -8501,7 +8863,7 @@ pub fn getFuncInstanceIes( const func_extra_index = addExtraAssumeCapacity(extra, Tag.FuncInstance{ .analysis = .{ - .state = if (arg.cc == .Inline) .inline_only else .none, + .state = .unreferenced, .is_cold = false, .is_noinline = arg.is_noinline, .calls_or_awaits_errorable_fn = false, @@ -8509,9 +8871,9 @@ pub fn getFuncInstanceIes( .inferred_error_set = true, .disable_instrumentation = false, }, - // This is populated after we create the Decl below. It is not read + // This is populated after we create the Nav below. It is not read // by equality or hashing functions. - .owner_decl = undefined, + .owner_nav = undefined, .ty = func_ty, .branch_quota = 0, .generic_owner = generic_owner, @@ -8617,37 +8979,26 @@ fn finishFuncInstance( alignment: Alignment, section: OptionalNullTerminatedString, ) Allocator.Error!void { - const fn_owner_decl = ip.declPtr(ip.funcDeclOwner(generic_owner)); - const decl_index = try ip.createDecl(gpa, tid, .{ - .name = undefined, - .fqn = undefined, - .src_namespace = fn_owner_decl.src_namespace, - .has_tv = true, - .owns_tv = true, - .val = @import("Value.zig").fromInterned(func_index), + const fn_owner_nav = ip.getNav(ip.funcDeclInfo(generic_owner).owner_nav); + const fn_namespace = ip.getCau(fn_owner_nav.analysis_owner.unwrap().?).namespace; + + // TODO: improve this name + const nav_name = try ip.getOrPutStringFmt(gpa, tid, "{}__anon_{d}", .{ + fn_owner_nav.name.fmt(ip), @intFromEnum(func_index), + }, .no_embedded_nulls); + const nav_index = try ip.createNav(gpa, tid, .{ + .name = nav_name, + .fqn = try ip.namespacePtr(fn_namespace).internFullyQualifiedName(ip, gpa, tid, nav_name), + .val = func_index, .alignment = alignment, .@"linksection" = section, - .@"addrspace" = fn_owner_decl.@"addrspace", - .analysis = .complete, - .zir_decl_index = fn_owner_decl.zir_decl_index, - .is_pub = fn_owner_decl.is_pub, - .is_exported = fn_owner_decl.is_exported, - .kind = .anon, + .@"addrspace" = fn_owner_nav.status.resolved.@"addrspace", }); - errdefer ip.destroyDecl(tid, decl_index); - // Populate the owner_decl field which was left undefined until now. + // Populate the owner_nav field which was left undefined until now. extra.view().items(.@"0")[ - func_extra_index + std.meta.fieldIndex(Tag.FuncInstance, "owner_decl").? - ] = @intFromEnum(decl_index); - - // TODO: improve this name - const decl = ip.declPtr(decl_index); - decl.name = try ip.getOrPutStringFmt(gpa, tid, "{}__anon_{d}", .{ - fn_owner_decl.name.fmt(ip), @intFromEnum(decl_index), - }, .no_embedded_nulls); - decl.fqn = try ip.namespacePtr(fn_owner_decl.src_namespace) - .internFullyQualifiedName(ip, gpa, tid, decl.name); + func_extra_index + std.meta.fieldIndex(Tag.FuncInstance, "owner_nav").? + ] = @intFromEnum(nav_index); } pub const EnumTypeInit = struct { @@ -8671,23 +9022,36 @@ pub const WipEnumType = struct { tid: Zcu.PerThread.Id, index: Index, tag_ty_index: u32, - decl_index: u32, - namespace_index: ?u32, + type_name_extra_index: u32, + cau_extra_index: u32, + namespace_extra_index: ?u32, names_map: MapIndex, names_start: u32, values_map: OptionalMapIndex, values_start: u32, + pub fn setName( + wip: WipEnumType, + ip: *InternPool, + type_name: NullTerminatedString, + ) void { + const extra = ip.getLocalShared(wip.tid).extra.acquire(); + const extra_items = extra.view().items(.@"0"); + extra_items[wip.type_name_extra_index] = @intFromEnum(type_name); + } + pub fn prepare( wip: WipEnumType, ip: *InternPool, - decl: DeclIndex, + analysis_owner: Cau.Index, namespace: OptionalNamespaceIndex, ) void { const extra = ip.getLocalShared(wip.tid).extra.acquire(); const extra_items = extra.view().items(.@"0"); - extra_items[wip.decl_index] = @intFromEnum(decl); - if (wip.namespace_index) |i| { + + extra_items[wip.cau_extra_index] = @intFromEnum(analysis_owner); + + if (wip.namespace_extra_index) |i| { extra_items[i] = @intFromEnum(namespace.unwrap().?); } else { assert(namespace == .none); @@ -8780,10 +9144,11 @@ pub fn getEnumType( .reified => 2, // type_hash: PackedU64 } + // zig fmt: on + 1 + // cau ini.fields_len); // field types const extra_index = addExtraAssumeCapacity(extra, EnumAuto{ - .decl = undefined, // set by `prepare` + .name = undefined, // set by `prepare` .captures_len = switch (ini.key) { .declared => |d| @intCast(d.captures.len), .reified => std.math.maxInt(u32), @@ -8800,6 +9165,8 @@ pub fn getEnumType( .tag = .type_enum_auto, .data = extra_index, }); + const cau_extra_index = extra.view().len; + extra.appendAssumeCapacity(undefined); // `cau` will be set by `finish` switch (ini.key) { .declared => |d| extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}), .reified => |r| _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)), @@ -8810,8 +9177,9 @@ pub fn getEnumType( .tid = tid, .index = gop.put(), .tag_ty_index = extra_index + std.meta.fieldIndex(EnumAuto, "int_tag_type").?, - .decl_index = extra_index + std.meta.fieldIndex(EnumAuto, "decl").?, - .namespace_index = if (ini.has_namespace) extra_index + std.meta.fieldIndex(EnumAuto, "namespace").? else null, + .type_name_extra_index = extra_index + std.meta.fieldIndex(EnumAuto, "name").?, + .cau_extra_index = @intCast(cau_extra_index), + .namespace_extra_index = if (ini.has_namespace) extra_index + std.meta.fieldIndex(EnumAuto, "namespace").? else null, .names_map = names_map, .names_start = @intCast(names_start), .values_map = .none, @@ -8835,11 +9203,12 @@ pub fn getEnumType( .reified => 2, // type_hash: PackedU64 } + // zig fmt: on + 1 + // cau ini.fields_len + // field types ini.fields_len * @intFromBool(ini.has_values)); // field values const extra_index = addExtraAssumeCapacity(extra, EnumExplicit{ - .decl = undefined, // set by `prepare` + .name = undefined, // set by `prepare` .captures_len = switch (ini.key) { .declared => |d| @intCast(d.captures.len), .reified => std.math.maxInt(u32), @@ -8861,6 +9230,8 @@ pub fn getEnumType( }, .data = extra_index, }); + const cau_extra_index = extra.view().len; + extra.appendAssumeCapacity(undefined); // `cau` will be set by `finish` switch (ini.key) { .declared => |d| extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}), .reified => |r| _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)), @@ -8874,9 +9245,10 @@ pub fn getEnumType( return .{ .wip = .{ .tid = tid, .index = gop.put(), - .tag_ty_index = extra_index + std.meta.fieldIndex(EnumAuto, "int_tag_type").?, - .decl_index = extra_index + std.meta.fieldIndex(EnumAuto, "decl").?, - .namespace_index = if (ini.has_namespace) extra_index + std.meta.fieldIndex(EnumAuto, "namespace").? else null, + .tag_ty_index = extra_index + std.meta.fieldIndex(EnumExplicit, "int_tag_type").?, + .type_name_extra_index = extra_index + std.meta.fieldIndex(EnumExplicit, "name").?, + .cau_extra_index = @intCast(cau_extra_index), + .namespace_extra_index = if (ini.has_namespace) extra_index + std.meta.fieldIndex(EnumExplicit, "namespace").? else null, .names_map = names_map, .names_start = @intCast(names_start), .values_map = values_map, @@ -8887,7 +9259,7 @@ pub fn getEnumType( } const GeneratedTagEnumTypeInit = struct { - decl: DeclIndex, + name: NullTerminatedString, owner_union_ty: Index, tag_ty: Index, names: []const NullTerminatedString, @@ -8928,7 +9300,7 @@ pub fn getGeneratedTagEnumType( items.appendAssumeCapacity(.{ .tag = .type_enum_auto, .data = addExtraAssumeCapacity(extra, EnumAuto{ - .decl = ini.decl, + .name = ini.name, .captures_len = 0, .namespace = .none, .int_tag_type = ini.tag_ty, @@ -8961,7 +9333,7 @@ pub fn getGeneratedTagEnumType( .auto => unreachable, }, .data = addExtraAssumeCapacity(extra, EnumExplicit{ - .decl = ini.decl, + .name = ini.name, .captures_len = 0, .namespace = .none, .int_tag_type = ini.tag_ty, @@ -9034,7 +9406,7 @@ pub fn getOpaqueType( .reified => 0, }); const extra_index = addExtraAssumeCapacity(extra, Tag.TypeOpaque{ - .decl = undefined, // set by `finish` + .name = undefined, // set by `finish` .namespace = .none, .zir_index = switch (ini.key) { inline else => |x| x.zir_index, @@ -9052,15 +9424,18 @@ pub fn getOpaqueType( .declared => |d| extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}), .reified => {}, } - return .{ .wip = .{ - .tid = tid, - .index = gop.put(), - .decl_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeOpaque, "decl").?, - .namespace_extra_index = if (ini.has_namespace) - extra_index + std.meta.fieldIndex(Tag.TypeOpaque, "namespace").? - else - null, - } }; + return .{ + .wip = .{ + .tid = tid, + .index = gop.put(), + .type_name_extra_index = extra_index + std.meta.fieldIndex(Tag.TypeOpaque, "name").?, + .cau_extra_index = null, // opaques do not undergo type resolution + .namespace_extra_index = if (ini.has_namespace) + extra_index + std.meta.fieldIndex(Tag.TypeOpaque, "namespace").? + else + null, + }, + }; } pub fn getIfExists(ip: *const InternPool, key: Key) ?Index { @@ -9181,7 +9556,8 @@ fn addExtraAssumeCapacity(extra: Local.Extra.Mutable, item: anytype) u32 { inline for (@typeInfo(@TypeOf(item)).Struct.fields) |field| { extra.appendAssumeCapacity(.{switch (field.type) { Index, - DeclIndex, + Cau.Index, + Nav.Index, NamespaceIndex, OptionalNamespaceIndex, MapIndex, @@ -9244,7 +9620,8 @@ fn extraDataTrail(extra: Local.Extra, comptime T: type, index: u32) struct { dat const extra_item = extra_items[extra_index]; @field(result, field.name) = switch (field.type) { Index, - DeclIndex, + Cau.Index, + Nav.Index, NamespaceIndex, OptionalNamespaceIndex, MapIndex, @@ -9436,12 +9813,6 @@ pub fn getCoerced( switch (ip.indexToKey(val)) { .undef => return ip.get(gpa, tid, .{ .undef = new_ty }), - .extern_func => |extern_func| if (ip.isFunctionType(new_ty)) - return ip.getExternFunc(gpa, tid, .{ - .ty = new_ty, - .decl = extern_func.decl, - .lib_name = extern_func.lib_name, - }), .func => unreachable, .int => |int| switch (ip.indexToKey(new_ty)) { @@ -9858,27 +10229,23 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { var items_len: usize = 0; var extra_len: usize = 0; var limbs_len: usize = 0; - var decls_len: usize = 0; for (ip.locals) |*local| { items_len += local.mutate.items.len; extra_len += local.mutate.extra.len; limbs_len += local.mutate.limbs.len; - decls_len += local.mutate.decls.buckets_list.len; } const items_size = (1 + 4) * items_len; const extra_size = 4 * extra_len; const limbs_size = 8 * limbs_len; - const decls_size = @sizeOf(Zcu.Decl) * decls_len; // TODO: map overhead size is not taken into account - const total_size = @sizeOf(InternPool) + items_size + extra_size + limbs_size + decls_size; + const total_size = @sizeOf(InternPool) + items_size + extra_size + limbs_size; std.debug.print( \\InternPool size: {d} bytes \\ {d} items: {d} bytes \\ {d} extra: {d} bytes \\ {d} limbs: {d} bytes - \\ {d} decls: {d} bytes \\ , .{ total_size, @@ -9888,8 +10255,6 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { extra_size, limbs_len, limbs_size, - decls_len, - decls_size, }); const TagStats = struct { @@ -10034,10 +10399,10 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { .undef => 0, .simple_type => 0, .simple_value => 0, - .ptr_decl => @sizeOf(PtrDecl), + .ptr_nav => @sizeOf(PtrNav), .ptr_comptime_alloc => @sizeOf(PtrComptimeAlloc), - .ptr_anon_decl => @sizeOf(PtrAnonDecl), - .ptr_anon_decl_aligned => @sizeOf(PtrAnonDeclAligned), + .ptr_uav => @sizeOf(PtrUav), + .ptr_uav_aligned => @sizeOf(PtrUavAligned), .ptr_comptime_field => @sizeOf(PtrComptimeField), .ptr_int => @sizeOf(PtrInt), .ptr_eu_payload => @sizeOf(PtrBase), @@ -10092,7 +10457,7 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { .float_c_longdouble_f128 => @sizeOf(Float128), .float_comptime_float => @sizeOf(Float128), .variable => @sizeOf(Tag.Variable), - .extern_func => @sizeOf(Tag.ExternFunc), + .@"extern" => @sizeOf(Tag.Extern), .func_decl => @sizeOf(Tag.FuncDecl), .func_instance => b: { const info = extraData(extra_list, Tag.FuncInstance, data); @@ -10171,10 +10536,10 @@ fn dumpAllFallible(ip: *const InternPool) anyerror!void { .type_union, .type_function, .undef, - .ptr_decl, + .ptr_nav, .ptr_comptime_alloc, - .ptr_anon_decl, - .ptr_anon_decl_aligned, + .ptr_uav, + .ptr_uav_aligned, .ptr_comptime_field, .ptr_int, .ptr_eu_payload, @@ -10212,7 +10577,7 @@ fn dumpAllFallible(ip: *const InternPool) anyerror!void { .float_c_longdouble_f128, .float_comptime_float, .variable, - .extern_func, + .@"extern", .func_decl, .func_instance, .func_coerced, @@ -10275,13 +10640,13 @@ pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator) instances.sort(SortContext{ .values = instances.values() }); var it = instances.iterator(); while (it.next()) |entry| { - const generic_fn_owner_decl = ip.declPtrConst(ip.funcDeclOwner(entry.key_ptr.*)); - try w.print("{} ({}): \n", .{ generic_fn_owner_decl.name.fmt(ip), entry.value_ptr.items.len }); + const generic_fn_owner_nav = ip.getNav(ip.funcDeclInfo(entry.key_ptr.*).owner_nav); + try w.print("{} ({}): \n", .{ generic_fn_owner_nav.name.fmt(ip), entry.value_ptr.items.len }); for (entry.value_ptr.items) |index| { const unwrapped_index = index.unwrap(ip); const func = ip.extraFuncInstance(unwrapped_index.tid, unwrapped_index.getExtra(ip), unwrapped_index.getData(ip)); - const owner_decl = ip.declPtrConst(func.owner_decl); - try w.print(" {}: (", .{owner_decl.name.fmt(ip)}); + const owner_nav = ip.getNav(func.owner_nav); + try w.print(" {}: (", .{owner_nav.name.fmt(ip)}); for (func.comptime_args.get(ip)) |arg| { if (arg != .none) { const key = ip.indexToKey(arg); @@ -10295,66 +10660,183 @@ pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator) try bw.flush(); } -pub fn declPtr(ip: *InternPool, decl_index: DeclIndex) *Zcu.Decl { - return @constCast(ip.declPtrConst(decl_index)); +pub fn getCau(ip: *const InternPool, index: Cau.Index) Cau { + const unwrapped = index.unwrap(ip); + const caus = ip.getLocalShared(unwrapped.tid).caus.acquire(); + return caus.view().items(.@"0")[unwrapped.index]; +} + +pub fn getNav(ip: *const InternPool, index: Nav.Index) Nav { + const unwrapped = index.unwrap(ip); + const navs = ip.getLocalShared(unwrapped.tid).navs.acquire(); + return navs.view().get(unwrapped.index).unpack(); } -pub fn declPtrConst(ip: *const InternPool, decl_index: DeclIndex) *const Zcu.Decl { - const unwrapped_decl_index = decl_index.unwrap(ip); - const decls = ip.getLocalShared(unwrapped_decl_index.tid).decls.acquire(); - const decls_bucket = decls.view().items(.@"0")[unwrapped_decl_index.bucket_index]; - return &decls_bucket[unwrapped_decl_index.index]; +pub fn namespacePtr(ip: *InternPool, namespace_index: NamespaceIndex) *Zcu.Namespace { + const unwrapped_namespace_index = namespace_index.unwrap(ip); + const namespaces = ip.getLocalShared(unwrapped_namespace_index.tid).namespaces.acquire(); + const namespaces_bucket = namespaces.view().items(.@"0")[unwrapped_namespace_index.bucket_index]; + return &namespaces_bucket[unwrapped_namespace_index.index]; } -pub fn createDecl( +/// Create a `Cau` associated with the type at the given `InternPool.Index`. +pub fn createTypeCau( ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, - initialization: Zcu.Decl, -) Allocator.Error!DeclIndex { - const local = ip.getLocal(tid); - const free_list_next = local.mutate.decls.free_list; - if (free_list_next != Local.BucketListMutate.free_list_sentinel) { - const reused_decl_index: DeclIndex = @enumFromInt(free_list_next); - const reused_decl = ip.declPtr(reused_decl_index); - local.mutate.decls.free_list = @intFromEnum(@field(reused_decl, Local.decl_next_free_field)); - reused_decl.* = initialization; - return reused_decl_index; - } - const decls = local.getMutableDecls(gpa); - if (local.mutate.decls.last_bucket_len == 0) { - try decls.ensureUnusedCapacity(1); - var arena = decls.arena.promote(decls.gpa); - defer decls.arena.* = arena.state; - decls.appendAssumeCapacity(.{try arena.allocator().create( - [1 << Local.decls_bucket_width]Zcu.Decl, - )}); - } - const unwrapped_decl_index: DeclIndex.Unwrapped = .{ + zir_index: TrackedInst.Index, + namespace: NamespaceIndex, + owner_type: InternPool.Index, +) Allocator.Error!Cau.Index { + const caus = ip.getLocal(tid).getMutableCaus(gpa); + const index_unwrapped: Cau.Index.Unwrapped = .{ .tid = tid, - .bucket_index = decls.mutate.len - 1, - .index = local.mutate.decls.last_bucket_len, + .index = caus.mutate.len, }; - local.mutate.decls.last_bucket_len = - (unwrapped_decl_index.index + 1) & Local.namespaces_bucket_mask; - const decl_index = unwrapped_decl_index.wrap(ip); - ip.declPtr(decl_index).* = initialization; - return decl_index; + try caus.append(.{.{ + .zir_index = zir_index, + .namespace = namespace, + .owner = Cau.Owner.wrap(.{ .type = owner_type }), + }}); + return index_unwrapped.wrap(ip); } -pub fn destroyDecl(ip: *InternPool, tid: Zcu.PerThread.Id, decl_index: DeclIndex) void { - const local = ip.getLocal(tid); - const decl = ip.declPtr(decl_index); - decl.* = undefined; - @field(decl, Local.decl_next_free_field) = @enumFromInt(local.mutate.decls.free_list); - local.mutate.decls.free_list = @intFromEnum(decl_index); +/// Create a `Cau` for a `comptime` declaration. +pub fn createComptimeCau( + ip: *InternPool, + gpa: Allocator, + tid: Zcu.PerThread.Id, + zir_index: TrackedInst.Index, + namespace: NamespaceIndex, +) Allocator.Error!Cau.Index { + const caus = ip.getLocal(tid).getMutableCaus(gpa); + const index_unwrapped: Cau.Index.Unwrapped = .{ + .tid = tid, + .index = caus.mutate.len, + }; + try caus.append(.{.{ + .zir_index = zir_index, + .namespace = namespace, + .owner = Cau.Owner.wrap(.none), + }}); + return index_unwrapped.wrap(ip); } -pub fn namespacePtr(ip: *InternPool, namespace_index: NamespaceIndex) *Zcu.Namespace { - const unwrapped_namespace_index = namespace_index.unwrap(ip); - const namespaces = ip.getLocalShared(unwrapped_namespace_index.tid).namespaces.acquire(); - const namespaces_bucket = namespaces.view().items(.@"0")[unwrapped_namespace_index.bucket_index]; - return &namespaces_bucket[unwrapped_namespace_index.index]; +/// Create a `Nav` not associated with any `Cau`. +/// Since there is no analysis owner, the `Nav`'s value must be known at creation time. +pub fn createNav( + ip: *InternPool, + gpa: Allocator, + tid: Zcu.PerThread.Id, + opts: struct { + name: NullTerminatedString, + fqn: NullTerminatedString, + val: InternPool.Index, + alignment: Alignment, + @"linksection": OptionalNullTerminatedString, + @"addrspace": std.builtin.AddressSpace, + }, +) Allocator.Error!Nav.Index { + const navs = ip.getLocal(tid).getMutableNavs(gpa); + const index_unwrapped: Nav.Index.Unwrapped = .{ + .tid = tid, + .index = navs.mutate.len, + }; + try navs.append(Nav.pack(.{ + .name = opts.name, + .fqn = opts.fqn, + .analysis_owner = .none, + .status = .{ .resolved = .{ + .val = opts.val, + .alignment = opts.alignment, + .@"linksection" = opts.@"linksection", + .@"addrspace" = opts.@"addrspace", + } }, + .is_usingnamespace = false, + })); + return index_unwrapped.wrap(ip); +} + +/// Create a `Cau` and `Nav` which are paired. The value of the `Nav` is +/// determined by semantic analysis of the `Cau`. The value of the `Nav` +/// is initially unresolved. +pub fn createPairedCauNav( + ip: *InternPool, + gpa: Allocator, + tid: Zcu.PerThread.Id, + name: NullTerminatedString, + fqn: NullTerminatedString, + zir_index: TrackedInst.Index, + namespace: NamespaceIndex, + /// TODO: this is hacky! See `Nav.is_usingnamespace`. + is_usingnamespace: bool, +) Allocator.Error!struct { Cau.Index, Nav.Index } { + const caus = ip.getLocal(tid).getMutableCaus(gpa); + const navs = ip.getLocal(tid).getMutableNavs(gpa); + + try caus.ensureUnusedCapacity(1); + try navs.ensureUnusedCapacity(1); + + const cau = Cau.Index.Unwrapped.wrap(.{ + .tid = tid, + .index = caus.mutate.len, + }, ip); + const nav = Nav.Index.Unwrapped.wrap(.{ + .tid = tid, + .index = navs.mutate.len, + }, ip); + + caus.appendAssumeCapacity(.{.{ + .zir_index = zir_index, + .namespace = namespace, + .owner = Cau.Owner.wrap(.{ .nav = nav }), + }}); + navs.appendAssumeCapacity(Nav.pack(.{ + .name = name, + .fqn = fqn, + .analysis_owner = cau.toOptional(), + .status = .unresolved, + .is_usingnamespace = is_usingnamespace, + })); + + return .{ cau, nav }; +} + +/// Resolve the value of a `Nav` with an analysis owner. +/// If its status is already `resolved`, the old value is discarded. +pub fn resolveNavValue( + ip: *InternPool, + nav: Nav.Index, + resolved: struct { + val: InternPool.Index, + alignment: Alignment, + @"linksection": OptionalNullTerminatedString, + @"addrspace": std.builtin.AddressSpace, + }, +) void { + const unwrapped = nav.unwrap(ip); + + const local = ip.getLocal(unwrapped.tid); + local.mutate.extra.mutex.lock(); + defer local.mutate.extra.mutex.unlock(); + + const navs = local.shared.navs.view(); + + const nav_analysis_owners = navs.items(.analysis_owner); + const nav_vals = navs.items(.val); + const nav_linksections = navs.items(.@"linksection"); + const nav_bits = navs.items(.bits); + + assert(nav_analysis_owners[unwrapped.index] != .none); + + @atomicStore(InternPool.Index, &nav_vals[unwrapped.index], resolved.val, .release); + @atomicStore(OptionalNullTerminatedString, &nav_linksections[unwrapped.index], resolved.@"linksection", .release); + + var bits = nav_bits[unwrapped.index]; + bits.status = .resolved; + bits.alignment = resolved.alignment; + bits.@"addrspace" = resolved.@"addrspace"; + @atomicStore(Nav.Repr.Bits, &nav_bits[unwrapped.index], bits, .release); } pub fn createNamespace( @@ -10404,7 +10886,7 @@ pub fn destroyNamespace( namespace.* = .{ .parent = undefined, .file_scope = undefined, - .decl_index = undefined, + .owner_type = undefined, }; @field(namespace, Local.namespace_next_free_field) = @enumFromInt(local.mutate.namespaces.free_list); @@ -10750,10 +11232,10 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index { .simple_type, .simple_value => unreachable, // handled via Index above - inline .ptr_decl, + inline .ptr_nav, .ptr_comptime_alloc, - .ptr_anon_decl, - .ptr_anon_decl_aligned, + .ptr_uav, + .ptr_uav_aligned, .ptr_comptime_field, .ptr_int, .ptr_eu_payload, @@ -10770,7 +11252,7 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index { .error_union_error, .enum_tag, .variable, - .extern_func, + .@"extern", .func_decl, .func_instance, .func_coerced, @@ -10892,14 +11374,14 @@ pub fn isVariable(ip: *const InternPool, val: Index) bool { return val.unwrap(ip).getTag(ip) == .variable; } -pub fn getBackingDecl(ip: *const InternPool, val: Index) OptionalDeclIndex { +pub fn getBackingNav(ip: *const InternPool, val: Index) Nav.Index.Optional { var base = val; while (true) { const unwrapped_base = base.unwrap(ip); const base_item = unwrapped_base.getItem(ip); switch (base_item.tag) { - .ptr_decl => return @enumFromInt(unwrapped_base.getExtra(ip).view().items(.@"0")[ - base_item.data + std.meta.fieldIndex(PtrDecl, "decl").? + .ptr_nav => return @enumFromInt(unwrapped_base.getExtra(ip).view().items(.@"0")[ + base_item.data + std.meta.fieldIndex(PtrNav, "nav").? ]), inline .ptr_eu_payload, .ptr_opt_payload, @@ -10922,11 +11404,11 @@ pub fn getBackingAddrTag(ip: *const InternPool, val: Index) ?Key.Ptr.BaseAddr.Ta const unwrapped_base = base.unwrap(ip); const base_item = unwrapped_base.getItem(ip); switch (base_item.tag) { - .ptr_decl => return .decl, + .ptr_nav => return .nav, .ptr_comptime_alloc => return .comptime_alloc, - .ptr_anon_decl, - .ptr_anon_decl_aligned, - => return .anon_decl, + .ptr_uav, + .ptr_uav_aligned, + => return .uav, .ptr_comptime_field => return .comptime_field, .ptr_int => return .int, inline .ptr_eu_payload, @@ -11098,10 +11580,10 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois // values, not types .undef, .simple_value, - .ptr_decl, + .ptr_nav, .ptr_comptime_alloc, - .ptr_anon_decl, - .ptr_anon_decl_aligned, + .ptr_uav, + .ptr_uav_aligned, .ptr_comptime_field, .ptr_int, .ptr_eu_payload, @@ -11137,7 +11619,7 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois .float_c_longdouble_f128, .float_comptime_float, .variable, - .extern_func, + .@"extern", .func_decl, .func_instance, .func_coerced, @@ -11190,18 +11672,6 @@ pub fn funcAnalysisUnordered(ip: *const InternPool, func: Index) FuncAnalysis { return @atomicLoad(FuncAnalysis, @constCast(ip).funcAnalysisPtr(func), .unordered); } -pub fn funcSetAnalysisState(ip: *InternPool, func: Index, state: FuncAnalysis.State) void { - const unwrapped_func = func.unwrap(ip); - const extra_mutex = &ip.getLocal(unwrapped_func.tid).mutate.extra.mutex; - extra_mutex.lock(); - defer extra_mutex.unlock(); - - const analysis_ptr = ip.funcAnalysisPtr(func); - var analysis = analysis_ptr.*; - analysis.state = state; - @atomicStore(FuncAnalysis, analysis_ptr, analysis, .release); -} - pub fn funcMaxStackAlignment(ip: *InternPool, func: Index, new_stack_alignment: Alignment) void { const unwrapped_func = func.unwrap(ip); const extra_mutex = &ip.getLocal(unwrapped_func.tid).mutate.extra.mutex; @@ -11349,10 +11819,6 @@ pub fn funcDeclInfo(ip: *const InternPool, index: Index) Key.Func { return extraFuncDecl(unwrapped_index.tid, unwrapped_index.getExtra(ip), item.data); } -pub fn funcDeclOwner(ip: *const InternPool, index: Index) DeclIndex { - return funcDeclInfo(ip, index).owner_decl; -} - pub fn funcTypeParamsLen(ip: *const InternPool, index: Index) u32 { const unwrapped_index = index.unwrap(ip); const extra_list = unwrapped_index.getExtra(ip); @@ -11409,14 +11875,6 @@ pub fn anonStructFieldsLen(ip: *const InternPool, i: Index) u32 { return @intCast(ip.indexToKey(i).anon_struct_type.types.len); } -/// Asserts the type is a struct. -pub fn structDecl(ip: *const InternPool, i: Index) OptionalDeclIndex { - return switch (ip.indexToKey(i)) { - .struct_type => |t| t.decl, - else => unreachable, - }; -} - /// Returns the already-existing field with the same name, if any. pub fn addFieldName( ip: *InternPool, @@ -11436,8 +11894,8 @@ pub fn addFieldName( return null; } -/// Used only by `get` for pointer values, and mainly intended to use `Tag.ptr_anon_decl` -/// encoding instead of `Tag.ptr_anon_decl_aligned` when possible. +/// Used only by `get` for pointer values, and mainly intended to use `Tag.ptr_uav` +/// encoding instead of `Tag.ptr_uav_aligned` when possible. fn ptrsHaveSameAlignment(ip: *InternPool, a_ty: Index, a_info: Key.PtrType, b_ty: Index) bool { if (a_ty == b_ty) return true; const b_info = ip.indexToKey(b_ty).ptr_type; @@ -11607,3 +12065,7 @@ pub fn getErrorValue( pub fn getErrorValueIfExists(ip: *const InternPool, name: NullTerminatedString) ?Zcu.ErrorInt { return @intFromEnum(ip.global_error_set.getErrorValueIfExists(name) orelse return null); } + +pub fn isRemoved(ip: *const InternPool, ty: Index) bool { + return ty.unwrap(ip).getTag(ip) == .removed; +} diff --git a/src/Sema.zig b/src/Sema.zig index c84692e7c512..925f77929f7f 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -16,16 +16,14 @@ air_instructions: std.MultiArrayList(Air.Inst) = .{}, air_extra: std.ArrayListUnmanaged(u32) = .{}, /// Maps ZIR to AIR. inst_map: InstMap = .{}, -/// When analyzing an inline function call, owner_decl is the Decl of the caller. -owner_decl: *Decl, -owner_decl_index: InternPool.DeclIndex, -/// For an inline or comptime function call, this will be the root parent function -/// which contains the callsite. Corresponds to `owner_decl`. -/// This could be `none`, a `func_decl`, or a `func_instance`. -owner_func_index: InternPool.Index, +/// The "owner" of a `Sema` represents the root "thing" that is being analyzed. +/// This does not change throughout the entire lifetime of a `Sema`. For instance, +/// when analyzing a runtime function body, this is always `func` of that function, +/// even if an inline/comptime function call is being analyzed. +owner: AnalUnit, /// The function this ZIR code is the body of, according to the source code. -/// This starts out the same as `owner_func_index` and then diverges in the case of -/// an inline or comptime function call. +/// This starts out the same as `sema.owner.func` if applicable, and then diverges +/// in the case of an inline or comptime function call. /// This could be `none`, a `func_decl`, or a `func_instance`. func_index: InternPool.Index, /// Whether the type of func_index has a calling convention of `.Naked`. @@ -48,7 +46,6 @@ branch_count: u32 = 0, /// Populated when returning `error.ComptimeBreak`. Used to communicate the /// break instruction up the stack to find the corresponding Block. comptime_break_inst: Zir.Inst.Index = undefined, -decl_val_table: std.AutoHashMapUnmanaged(InternPool.DeclIndex, Air.Inst.Ref) = .{}, /// When doing a generic function instantiation, this array collects a value /// for each parameter of the generic owner. `none` for non-comptime parameters. /// This is a separate array from `block.params` so that it can be passed @@ -79,10 +76,6 @@ no_partial_func_ty: bool = false, /// here so the values can be dropped without any cleanup. unresolved_inferred_allocs: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, InferredAlloc) = .{}, -/// This is populated when `@setAlignStack` occurs so that if there is a duplicate -/// one encountered, the conflicting source location can be shown. -prev_stack_alignment_src: ?LazySrcLoc = null, - /// While analyzing a type which has a special InternPool index, this is set to the index at which /// the struct/enum/union type created should be placed. Otherwise, it is `.none`. builtin_type_target_index: InternPool.Index = .none, @@ -177,7 +170,6 @@ const trace = @import("tracy.zig").trace; const Namespace = Module.Namespace; const CompileError = Module.CompileError; const SemaError = Module.SemaError; -const Decl = Module.Decl; const LazySrcLoc = Zcu.LazySrcLoc; const RangeSet = @import("RangeSet.zig"); const target_util = @import("target.zig"); @@ -394,7 +386,7 @@ pub const Block = struct { /// The name of the current "context" for naming namespace types. /// The interpretation of this depends on the name strategy in ZIR, but the name /// is always incorporated into the type name somehow. - /// See `Sema.createAnonymousDeclTypeNamed`. + /// See `Sema.createTypeName`. type_name_ctx: InternPool.NullTerminatedString, /// Create a `LazySrcLoc` based on an `Offset` from the code being analyzed in this block. @@ -440,8 +432,8 @@ pub const Block = struct { try sema.errNote(ci.src, parent, prefix ++ "it is inside a @cImport", .{}); }, .comptime_ret_ty => |rt| { - const ret_ty_src: LazySrcLoc = if (try sema.funcDeclSrc(rt.func)) |fn_decl| .{ - .base_node_inst = fn_decl.zir_decl_index.unwrap().?, + const ret_ty_src: LazySrcLoc = if (try sema.funcDeclSrcInst(rt.func)) |fn_decl_inst| .{ + .base_node_inst = fn_decl_inst, .offset = .{ .node_offset_fn_type_ret_ty = 0 }, } else rt.func_src; if (rt.return_ty.isGenericPoison()) { @@ -871,7 +863,6 @@ pub fn deinit(sema: *Sema) void { sema.air_instructions.deinit(gpa); sema.air_extra.deinit(gpa); sema.inst_map.deinit(gpa); - sema.decl_val_table.deinit(gpa); { var it = sema.post_hoc_blocks.iterator(); while (it.next()) |entry| { @@ -2170,7 +2161,7 @@ fn resolveValueResolveLazy(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Value fn resolveValueIntable(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Value { const val = (try sema.resolveValue(inst)) orelse return null; if (sema.pt.zcu.intern_pool.getBackingAddrTag(val.toIntern())) |addr| switch (addr) { - .decl, .anon_decl, .comptime_alloc, .comptime_field => return null, + .nav, .uav, .comptime_alloc, .comptime_field => return null, .int => {}, .eu_payload, .opt_payload, .arr_elem, .field => unreachable, }; @@ -2503,7 +2494,6 @@ pub fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Module.Error @setCold(true); const gpa = sema.gpa; const mod = sema.pt.zcu; - const ip = &mod.intern_pool; if (build_options.enable_debug_extensions and mod.comp.debug_compile_errors) { var all_references = mod.resolveReferences() catch @panic("out of memory"); @@ -2531,10 +2521,10 @@ pub fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Module.Error const use_ref_trace = if (mod.comp.reference_trace) |n| n > 0 else mod.failed_analysis.count() == 0; if (use_ref_trace) { - err_msg.reference_trace_root = sema.ownerUnit().toOptional(); + err_msg.reference_trace_root = sema.owner.toOptional(); } - const gop = try mod.failed_analysis.getOrPut(gpa, sema.ownerUnit()); + const gop = try mod.failed_analysis.getOrPut(gpa, sema.owner); if (gop.found_existing) { // If there are multiple errors for the same Decl, prefer the first one added. sema.err = null; @@ -2544,16 +2534,6 @@ pub fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Module.Error gop.value_ptr.* = err_msg; } - if (sema.owner_func_index != .none) { - ip.funcSetAnalysisState(sema.owner_func_index, .sema_failure); - } else { - sema.owner_decl.analysis = .sema_failure; - } - - if (sema.func_index != .none) { - ip.funcSetAnalysisState(sema.func_index, .sema_failure); - } - return error.AnalysisFail; } @@ -2662,7 +2642,8 @@ fn getCaptures(sema: *Sema, block: *Block, type_src: LazySrcLoc, extra_index: us const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; - const parent_captures: InternPool.CaptureValue.Slice = zcu.namespacePtr(block.namespace).getType(zcu).getCaptures(zcu); + const parent_ty = Type.fromInterned(zcu.namespacePtr(block.namespace).owner_type); + const parent_captures: InternPool.CaptureValue.Slice = parent_ty.getCaptures(zcu); const captures = try sema.arena.alloc(InternPool.CaptureValue, captures_len); @@ -2704,8 +2685,8 @@ fn getCaptures(sema: *Sema, block: *Block, type_src: LazySrcLoc, extra_index: us sema.code.nullTerminatedString(str), .no_embedded_nulls, ); - const decl = try sema.lookupIdentifier(block, LazySrcLoc.unneeded, decl_name); // TODO: could we need this src loc? - break :capture InternPool.CaptureValue.wrap(.{ .decl_val = decl }); + const nav = try sema.lookupIdentifier(block, LazySrcLoc.unneeded, decl_name); // TODO: could we need this src loc? + break :capture InternPool.CaptureValue.wrap(.{ .nav_val = nav }); }, .decl_ref => |str| capture: { const decl_name = try ip.getOrPutString( @@ -2714,8 +2695,8 @@ fn getCaptures(sema: *Sema, block: *Block, type_src: LazySrcLoc, extra_index: us sema.code.nullTerminatedString(str), .no_embedded_nulls, ); - const decl = try sema.lookupIdentifier(block, LazySrcLoc.unneeded, decl_name); // TODO: could we need this src loc? - break :capture InternPool.CaptureValue.wrap(.{ .decl_ref = decl }); + const nav = try sema.lookupIdentifier(block, LazySrcLoc.unneeded, decl_name); // TODO: could we need this src loc? + break :capture InternPool.CaptureValue.wrap(.{ .nav_ref = nav }); }, }; } @@ -2740,19 +2721,24 @@ fn wrapWipTy(sema: *Sema, wip_ty: anytype) @TypeOf(wip_ty) { fn maybeRemoveOutdatedType(sema: *Sema, ty: InternPool.Index) !bool { const pt = sema.pt; const zcu = pt.zcu; + const ip = &zcu.intern_pool; if (!zcu.comp.incremental) return false; - const decl_index = Type.fromInterned(ty).getOwnerDecl(zcu); - const decl_as_depender = AnalUnit.wrap(.{ .decl = decl_index }); - const was_outdated = zcu.outdated.swapRemove(decl_as_depender) or - zcu.potentially_outdated.swapRemove(decl_as_depender); + const cau_index = switch (ip.indexToKey(ty)) { + .struct_type => ip.loadStructType(ty).cau.unwrap().?, + .union_type => ip.loadUnionType(ty).cau, + .enum_type => ip.loadEnumType(ty).cau.unwrap().?, + else => unreachable, + }; + const cau_unit = AnalUnit.wrap(.{ .cau = cau_index }); + const was_outdated = zcu.outdated.swapRemove(cau_unit) or + zcu.potentially_outdated.swapRemove(cau_unit); if (!was_outdated) return false; - _ = zcu.outdated_ready.swapRemove(decl_as_depender); - zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, AnalUnit.wrap(.{ .decl = decl_index })); + _ = zcu.outdated_ready.swapRemove(cau_unit); + zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, cau_unit); zcu.intern_pool.remove(pt.tid, ty); - zcu.declPtr(decl_index).analysis = .dependency_failure; - try zcu.markDependeeOutdated(.{ .decl_val = decl_index }); + try zcu.markDependeeOutdated(.{ .interned = ty }); return true; } @@ -2831,73 +2817,68 @@ fn zirStructDecl( }); errdefer wip_ty.cancel(ip, pt.tid); - const new_decl_index = try sema.createAnonymousDeclTypeNamed( + wip_ty.setName(ip, try sema.createTypeName( block, - Value.fromInterned(wip_ty.index), small.name_strategy, "struct", inst, - ); - mod.declPtr(new_decl_index).owns_tv = true; - errdefer pt.abortAnonDecl(new_decl_index); - - if (pt.zcu.comp.incremental) { - try ip.addDependency( - sema.gpa, - AnalUnit.wrap(.{ .decl = new_decl_index }), - .{ .src_hash = try block.trackZir(inst) }, - ); - } + wip_ty.index, + )); // TODO: if AstGen tells us `@This` was not used in the fields, we can elide the namespace. const new_namespace_index: InternPool.OptionalNamespaceIndex = if (true or decls_len > 0) (try pt.createNamespace(.{ .parent = block.namespace.toOptional(), - .decl_index = new_decl_index, + .owner_type = wip_ty.index, .file_scope = block.getFileScopeIndex(mod), })).toOptional() else .none; errdefer if (new_namespace_index.unwrap()) |ns| pt.destroyNamespace(ns); + const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, new_namespace_index.unwrap() orelse block.namespace, wip_ty.index); + + if (pt.zcu.comp.incremental) { + try ip.addDependency( + sema.gpa, + AnalUnit.wrap(.{ .cau = new_cau_index }), + .{ .src_hash = tracked_inst }, + ); + } + if (new_namespace_index.unwrap()) |ns| { const decls = sema.code.bodySlice(extra_index, decls_len); - try pt.scanNamespace(ns, decls, mod.declPtr(new_decl_index)); + try pt.scanNamespace(ns, decls); } - try pt.finalizeAnonDecl(new_decl_index); try mod.comp.queueJob(.{ .resolve_type_fully = wip_ty.index }); - try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = new_decl_index })); - return Air.internedToRef(wip_ty.finish(ip, new_decl_index, new_namespace_index)); + try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .cau = new_cau_index })); + try sema.declareDependency(.{ .interned = wip_ty.index }); + return Air.internedToRef(wip_ty.finish(ip, new_cau_index.toOptional(), new_namespace_index)); } -fn createAnonymousDeclTypeNamed( +fn createTypeName( sema: *Sema, block: *Block, - val: Value, name_strategy: Zir.Inst.NameStrategy, anon_prefix: []const u8, inst: ?Zir.Inst.Index, -) !InternPool.DeclIndex { + /// This is used purely to give the type a unique name in the `anon` case. + type_index: InternPool.Index, +) !InternPool.NullTerminatedString { const pt = sema.pt; const zcu = pt.zcu; + const gpa = zcu.gpa; const ip = &zcu.intern_pool; - const gpa = sema.gpa; - const namespace = block.namespace; - const new_decl_index = try pt.allocateNewDecl(namespace); - errdefer pt.destroyDecl(new_decl_index); switch (name_strategy) { .anon => {}, // handled after switch - .parent => { - try pt.initNewAnonDecl(new_decl_index, val, block.type_name_ctx, .none); - return new_decl_index; - }, + .parent => return block.type_name_ctx, .func => func_strat: { const fn_info = sema.code.getFnInfo(ip.funcZirBodyInst(sema.func_index).resolve(ip)); const zir_tags = sema.code.instructions.items(.tag); - var buf = std.ArrayList(u8).init(gpa); - defer buf.deinit(); + var buf: std.ArrayListUnmanaged(u8) = .{}; + defer buf.deinit(gpa); - const writer = buf.writer(); + const writer = buf.writer(gpa); try writer.print("{}(", .{block.type_name_ctx.fmt(ip)}); var arg_i: usize = 0; @@ -2931,23 +2912,18 @@ fn createAnonymousDeclTypeNamed( }; try writer.writeByte(')'); - const name = try ip.getOrPutString(gpa, pt.tid, buf.items, .no_embedded_nulls); - try pt.initNewAnonDecl(new_decl_index, val, name, .none); - return new_decl_index; + return ip.getOrPutString(gpa, pt.tid, buf.items, .no_embedded_nulls); }, .dbg_var => { + // TODO: this logic is questionable. We ideally should be traversing the `Block` rather than relying on the order of AstGen instructions. const ref = inst.?.toRef(); const zir_tags = sema.code.instructions.items(.tag); const zir_data = sema.code.instructions.items(.data); for (@intFromEnum(inst.?)..zir_tags.len) |i| switch (zir_tags[i]) { - .dbg_var_ptr, .dbg_var_val => { - if (zir_data[i].str_op.operand != ref) continue; - - const name = try ip.getOrPutStringFmt(gpa, pt.tid, "{}.{s}", .{ + .dbg_var_ptr, .dbg_var_val => if (zir_data[i].str_op.operand == ref) { + return ip.getOrPutStringFmt(gpa, pt.tid, "{}.{s}", .{ block.type_name_ctx.fmt(ip), zir_data[i].str_op.getStr(sema.code), }, .no_embedded_nulls); - try pt.initNewAnonDecl(new_decl_index, val, name, .none); - return new_decl_index; }, else => {}, }; @@ -2955,20 +2931,19 @@ fn createAnonymousDeclTypeNamed( }, } - // anon strat handling. + // anon strat handling // It would be neat to have "struct:line:column" but this name has // to survive incremental updates, where it may have been shifted down // or up to a different line, but unchanged, and thus not unnecessarily // semantically analyzed. - // This name is also used as the key in the parent namespace so it cannot be - // renamed. + // TODO: that would be possible, by detecting line number changes and renaming + // types appropriately. However, `@typeName` becomes a problem then. If we remove + // that builtin from the language, we can consider this. - const name = ip.getOrPutStringFmt(gpa, pt.tid, "{}__{s}_{d}", .{ - block.type_name_ctx.fmt(ip), anon_prefix, @intFromEnum(new_decl_index), - }, .no_embedded_nulls) catch unreachable; - try pt.initNewAnonDecl(new_decl_index, val, name, .none); - return new_decl_index; + return ip.getOrPutStringFmt(gpa, pt.tid, "{}__{s}_{d}", .{ + block.type_name_ctx.fmt(ip), anon_prefix, @intFromEnum(type_index), + }, .no_embedded_nulls); } fn zirEnumDecl( @@ -3068,60 +3043,53 @@ fn zirEnumDecl( errdefer if (!done) wip_ty.cancel(ip, pt.tid); - const new_decl_index = try sema.createAnonymousDeclTypeNamed( + const type_name = try sema.createTypeName( block, - Value.fromInterned(wip_ty.index), small.name_strategy, "enum", inst, + wip_ty.index, ); - const new_decl = mod.declPtr(new_decl_index); - new_decl.owns_tv = true; - errdefer if (!done) pt.abortAnonDecl(new_decl_index); - - if (pt.zcu.comp.incremental) { - try mod.intern_pool.addDependency( - gpa, - AnalUnit.wrap(.{ .decl = new_decl_index }), - .{ .src_hash = try block.trackZir(inst) }, - ); - } + wip_ty.setName(ip, type_name); // TODO: if AstGen tells us `@This` was not used in the fields, we can elide the namespace. const new_namespace_index: InternPool.OptionalNamespaceIndex = if (true or decls_len > 0) (try pt.createNamespace(.{ .parent = block.namespace.toOptional(), - .decl_index = new_decl_index, + .owner_type = wip_ty.index, .file_scope = block.getFileScopeIndex(mod), })).toOptional() else .none; errdefer if (!done) if (new_namespace_index.unwrap()) |ns| pt.destroyNamespace(ns); + const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, new_namespace_index.unwrap() orelse block.namespace, wip_ty.index); + + if (pt.zcu.comp.incremental) { + try mod.intern_pool.addDependency( + gpa, + AnalUnit.wrap(.{ .cau = new_cau_index }), + .{ .src_hash = try block.trackZir(inst) }, + ); + } + if (new_namespace_index.unwrap()) |ns| { - try pt.scanNamespace(ns, decls, new_decl); + try pt.scanNamespace(ns, decls); } + try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .cau = new_cau_index })); + try sema.declareDependency(.{ .interned = wip_ty.index }); + // We've finished the initial construction of this type, and are about to perform analysis. - // Set the decl and namespace appropriately, and don't destroy anything on failure. - wip_ty.prepare(ip, new_decl_index, new_namespace_index); + // Set the Cau and namespace appropriately, and don't destroy anything on failure. + wip_ty.prepare(ip, new_cau_index, new_namespace_index); done = true; const int_tag_ty = ty: { // We create a block for the field type instructions because they // may need to reference Decls from inside the enum namespace. - // Within the field type, default value, and alignment expressions, the "owner decl" - // should be the enum itself. + // Within the field type, default value, and alignment expressions, the owner should be the enum's `Cau`. - const prev_owner_decl = sema.owner_decl; - const prev_owner_decl_index = sema.owner_decl_index; - sema.owner_decl = new_decl; - sema.owner_decl_index = new_decl_index; - defer { - sema.owner_decl = prev_owner_decl; - sema.owner_decl_index = prev_owner_decl_index; - } - - const prev_owner_func_index = sema.owner_func_index; - sema.owner_func_index = .none; - defer sema.owner_func_index = prev_owner_func_index; + const prev_owner = sema.owner; + sema.owner = AnalUnit.wrap(.{ .cau = new_cau_index }); + defer sema.owner = prev_owner; const prev_func_index = sema.func_index; sema.func_index = .none; @@ -3135,7 +3103,7 @@ fn zirEnumDecl( .inlining = null, .is_comptime = true, .src_base_inst = tracked_inst, - .type_name_ctx = new_decl.name, + .type_name_ctx = type_name, }; defer enum_block.instructions.deinit(sema.gpa); @@ -3253,7 +3221,6 @@ fn zirEnumDecl( } } - try pt.finalizeAnonDecl(new_decl_index); return Air.internedToRef(wip_ty.index); } @@ -3336,41 +3303,41 @@ fn zirUnionDecl( }); errdefer wip_ty.cancel(ip, pt.tid); - const new_decl_index = try sema.createAnonymousDeclTypeNamed( + wip_ty.setName(ip, try sema.createTypeName( block, - Value.fromInterned(wip_ty.index), small.name_strategy, "union", inst, - ); - mod.declPtr(new_decl_index).owns_tv = true; - errdefer pt.abortAnonDecl(new_decl_index); - - if (pt.zcu.comp.incremental) { - try mod.intern_pool.addDependency( - gpa, - AnalUnit.wrap(.{ .decl = new_decl_index }), - .{ .src_hash = try block.trackZir(inst) }, - ); - } + wip_ty.index, + )); // TODO: if AstGen tells us `@This` was not used in the fields, we can elide the namespace. const new_namespace_index: InternPool.OptionalNamespaceIndex = if (true or decls_len > 0) (try pt.createNamespace(.{ .parent = block.namespace.toOptional(), - .decl_index = new_decl_index, + .owner_type = wip_ty.index, .file_scope = block.getFileScopeIndex(mod), })).toOptional() else .none; errdefer if (new_namespace_index.unwrap()) |ns| pt.destroyNamespace(ns); + const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, new_namespace_index.unwrap() orelse block.namespace, wip_ty.index); + + if (pt.zcu.comp.incremental) { + try mod.intern_pool.addDependency( + gpa, + AnalUnit.wrap(.{ .cau = new_cau_index }), + .{ .src_hash = try block.trackZir(inst) }, + ); + } + if (new_namespace_index.unwrap()) |ns| { const decls = sema.code.bodySlice(extra_index, decls_len); - try pt.scanNamespace(ns, decls, mod.declPtr(new_decl_index)); + try pt.scanNamespace(ns, decls); } - try pt.finalizeAnonDecl(new_decl_index); try mod.comp.queueJob(.{ .resolve_type_fully = wip_ty.index }); - try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = new_decl_index })); - return Air.internedToRef(wip_ty.finish(ip, new_decl_index, new_namespace_index)); + try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .cau = new_cau_index })); + try sema.declareDependency(.{ .interned = wip_ty.index }); + return Air.internedToRef(wip_ty.finish(ip, new_cau_index.toOptional(), new_namespace_index)); } fn zirOpaqueDecl( @@ -3418,47 +3385,33 @@ fn zirOpaqueDecl( }; // No `wrapWipTy` needed as no std.builtin types are opaque. const wip_ty = switch (try ip.getOpaqueType(gpa, pt.tid, opaque_init)) { - .existing => |ty| wip: { - if (!try sema.maybeRemoveOutdatedType(ty)) return Air.internedToRef(ty); - break :wip (try ip.getOpaqueType(gpa, pt.tid, opaque_init)).wip; - }, + // No `maybeRemoveOutdatedType` as opaque types are never outdated. + .existing => |ty| return Air.internedToRef(ty), .wip => |wip| wip, }; errdefer wip_ty.cancel(ip, pt.tid); - const new_decl_index = try sema.createAnonymousDeclTypeNamed( + wip_ty.setName(ip, try sema.createTypeName( block, - Value.fromInterned(wip_ty.index), small.name_strategy, "opaque", inst, - ); - mod.declPtr(new_decl_index).owns_tv = true; - errdefer pt.abortAnonDecl(new_decl_index); - - if (pt.zcu.comp.incremental) { - try ip.addDependency( - gpa, - AnalUnit.wrap(.{ .decl = new_decl_index }), - .{ .src_hash = try block.trackZir(inst) }, - ); - } + wip_ty.index, + )); const new_namespace_index: InternPool.OptionalNamespaceIndex = if (decls_len > 0) (try pt.createNamespace(.{ .parent = block.namespace.toOptional(), - .decl_index = new_decl_index, + .owner_type = wip_ty.index, .file_scope = block.getFileScopeIndex(mod), })).toOptional() else .none; errdefer if (new_namespace_index.unwrap()) |ns| pt.destroyNamespace(ns); if (new_namespace_index.unwrap()) |ns| { const decls = sema.code.bodySlice(extra_index, decls_len); - try pt.scanNamespace(ns, decls, mod.declPtr(new_decl_index)); + try pt.scanNamespace(ns, decls); } - try pt.finalizeAnonDecl(new_decl_index); - - return Air.internedToRef(wip_ty.finish(ip, new_decl_index, new_namespace_index)); + return Air.internedToRef(wip_ty.finish(ip, .none, new_namespace_index)); } fn zirErrorSetDecl( @@ -3774,7 +3727,7 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro // might have already done our job and created an anon decl ref. switch (mod.intern_pool.indexToKey(ptr_val.toIntern())) { .ptr => |ptr| switch (ptr.base_addr) { - .anon_decl => { + .uav => { // The comptime-ification was already done for us. // Just make sure the pointer is const. return sema.makePtrConst(block, alloc); @@ -3799,7 +3752,7 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro // Promote the constant to an anon decl. const new_mut_ptr = Air.internedToRef(try pt.intern(.{ .ptr = .{ .ty = alloc_ty.toIntern(), - .base_addr = .{ .anon_decl = .{ + .base_addr = .{ .uav = .{ .val = interned.toIntern(), .orig_ty = alloc_ty.toIntern(), } }, @@ -4097,7 +4050,7 @@ fn finishResolveComptimeKnownAllocPtr( } else { return try pt.intern(.{ .ptr = .{ .ty = alloc_ty.toIntern(), - .base_addr = .{ .anon_decl = .{ + .base_addr = .{ .uav = .{ .orig_ty = alloc_ty.toIntern(), .val = result_val, } }, @@ -4250,7 +4203,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com } const val = switch (mod.intern_pool.indexToKey(resolved_ptr).ptr.base_addr) { - .anon_decl => |a| a.val, + .uav => |a| a.val, .comptime_alloc => |i| val: { const alloc = sema.getComptimeAlloc(i); break :val (try alloc.val.intern(pt, sema.arena)).toIntern(); @@ -5505,22 +5458,23 @@ fn failWithBadMemberAccess( field_name: InternPool.NullTerminatedString, ) CompileError { const pt = sema.pt; - const mod = pt.zcu; - const kw_name = switch (agg_ty.zigTypeTag(mod)) { + const zcu = pt.zcu; + const ip = &zcu.intern_pool; + const kw_name = switch (agg_ty.zigTypeTag(zcu)) { .Union => "union", .Struct => "struct", .Opaque => "opaque", .Enum => "enum", else => unreachable, }; - if (agg_ty.getOwnerDeclOrNull(mod)) |some| if (mod.declIsRoot(some)) { + if (agg_ty.typeDeclInst(zcu)) |inst| if (inst.resolve(ip) == .main_struct_inst) { return sema.fail(block, field_src, "root struct of file '{}' has no member named '{}'", .{ - agg_ty.fmt(pt), field_name.fmt(&mod.intern_pool), + agg_ty.fmt(pt), field_name.fmt(ip), }); }; return sema.fail(block, field_src, "{s} '{}' has no member named '{}'", .{ - kw_name, agg_ty.fmt(pt), field_name.fmt(&mod.intern_pool), + kw_name, agg_ty.fmt(pt), field_name.fmt(ip), }); } @@ -5535,13 +5489,12 @@ fn failWithBadStructFieldAccess( const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; - const decl = zcu.declPtr(struct_type.decl.unwrap().?); const msg = msg: { const msg = try sema.errMsg( field_src, "no field named '{}' in struct '{}'", - .{ field_name.fmt(ip), decl.fqn.fmt(ip) }, + .{ field_name.fmt(ip), struct_type.name.fmt(ip) }, ); errdefer msg.destroy(sema.gpa); try sema.errNote(struct_ty.srcLoc(zcu), msg, "struct declared here", .{}); @@ -5562,13 +5515,12 @@ fn failWithBadUnionFieldAccess( const zcu = pt.zcu; const ip = &zcu.intern_pool; const gpa = sema.gpa; - const decl = zcu.declPtr(union_obj.decl); const msg = msg: { const msg = try sema.errMsg( field_src, "no field named '{}' in union '{}'", - .{ field_name.fmt(ip), decl.fqn.fmt(ip) }, + .{ field_name.fmt(ip), union_obj.name.fmt(ip) }, ); errdefer msg.destroy(gpa); try sema.errNote(union_ty.srcLoc(zcu), msg, "union declared here", .{}); @@ -5659,7 +5611,7 @@ fn storeToInferredAllocComptime( if (iac.is_const and !operand_val.canMutateComptimeVarState(zcu)) { iac.ptr = try pt.intern(.{ .ptr = .{ .ty = alloc_ty.toIntern(), - .base_addr = .{ .anon_decl = .{ + .base_addr = .{ .uav = .{ .val = operand_val.toIntern(), .orig_ty = alloc_ty.toIntern(), } }, @@ -5748,11 +5700,11 @@ fn addStrLit(sema: *Sema, string: InternPool.String, len: u64) CompileError!Air. .ty = array_ty.toIntern(), .storage = .{ .bytes = string }, } }); - return anonDeclRef(sema, val); + return sema.uavRef(val); } -fn anonDeclRef(sema: *Sema, val: InternPool.Index) CompileError!Air.Inst.Ref { - return Air.internedToRef(try refValue(sema, val)); +fn uavRef(sema: *Sema, val: InternPool.Index) CompileError!Air.Inst.Ref { + return Air.internedToRef(try sema.refValue(val)); } fn refValue(sema: *Sema, val: InternPool.Index) CompileError!InternPool.Index { @@ -5767,7 +5719,7 @@ fn refValue(sema: *Sema, val: InternPool.Index) CompileError!InternPool.Index { })).toIntern(); return pt.intern(.{ .ptr = .{ .ty = ptr_ty, - .base_addr = .{ .anon_decl = .{ + .base_addr = .{ .uav = .{ .val = val, .orig_ty = ptr_ty, } }, @@ -5866,7 +5818,7 @@ fn zirCompileLog( } try writer.print("\n", .{}); - const gop = try mod.compile_log_sources.getOrPut(sema.gpa, sema.ownerUnit()); + const gop = try mod.compile_log_sources.getOrPut(sema.gpa, sema.owner); if (!gop.found_existing) gop.value_ptr.* = .{ .base_node_inst = block.src_base_inst, .node_offset = src_node, @@ -6021,7 +5973,7 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr if (!comp.config.link_libc) try sema.errNote(src, msg, "libc headers not available; compilation does not link against libc", .{}); - const gop = try zcu.cimport_errors.getOrPut(gpa, sema.ownerUnit()); + const gop = try zcu.cimport_errors.getOrPut(gpa, sema.owner); if (!gop.found_existing) { gop.value_ptr.* = c_import_res.errors; c_import_res.errors = std.zig.ErrorBundle.empty; @@ -6069,13 +6021,15 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)}); const path_digest = zcu.filePathDigest(result.file_index); - const root_decl = zcu.fileRootDecl(result.file_index); - pt.astGenFile(result.file, path_digest, root_decl) catch |err| + const old_root_type = zcu.fileRootType(result.file_index); + pt.astGenFile(result.file, path_digest, old_root_type) catch |err| return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)}); + // TODO: register some kind of dependency on the file. + // That way, if this returns `error.AnalysisFail`, we have the dependency banked ready to + // trigger re-analysis later. try pt.ensureFileAnalyzed(result.file_index); - const file_root_decl_index = zcu.fileRootDecl(result.file_index).unwrap().?; - return sema.analyzeDeclVal(parent_block, src, file_root_decl_index); + return Air.internedToRef(zcu.fileRootType(result.file_index)); } fn zirSuspendBlock(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -6423,36 +6377,40 @@ fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void defer tracy.end(); const pt = sema.pt; - const mod = pt.zcu; + const zcu = pt.zcu; + const ip = &zcu.intern_pool; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.Export, inst_data.payload_index).data; const src = block.nodeOffset(inst_data.src_node); const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0); const options_src = block.builtinCallArgSrc(inst_data.src_node, 1); - const decl_name = try mod.intern_pool.getOrPutString( - mod.gpa, + const decl_name = try ip.getOrPutString( + zcu.gpa, pt.tid, sema.code.nullTerminatedString(extra.decl_name), .no_embedded_nulls, ); - const decl_index = if (extra.namespace != .none) index_blk: { + const nav_index = if (extra.namespace != .none) index_blk: { const container_ty = try sema.resolveType(block, operand_src, extra.namespace); - const container_namespace = container_ty.getNamespaceIndex(mod); + const container_namespace = container_ty.getNamespaceIndex(zcu); - const maybe_index = try sema.lookupInNamespace(block, operand_src, container_namespace, decl_name, false); - break :index_blk maybe_index orelse + const lookup = try sema.lookupInNamespace(block, operand_src, container_namespace, decl_name, false) orelse return sema.failWithBadMemberAccess(block, container_ty, operand_src, decl_name); + + break :index_blk lookup.nav; } else try sema.lookupIdentifier(block, operand_src, decl_name); const options = try sema.resolveExportOptions(block, options_src, extra.options); - { - try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = decl_index })); - try sema.ensureDeclAnalyzed(decl_index); - const exported_decl = mod.declPtr(decl_index); - if (exported_decl.val.getFunction(mod)) |function| { - return sema.analyzeExport(block, src, options, function.owner_decl); - } - } - try sema.analyzeExport(block, src, options, decl_index); + + try sema.ensureNavResolved(src, nav_index); + + // Make sure to export the owner Nav if applicable. + const exported_nav = switch (ip.indexToKey(ip.getNav(nav_index).status.resolved.val)) { + .variable => |v| v.owner_nav, + .@"extern" => |e| e.owner_nav, + .func => |f| f.owner_nav, + else => nav_index, + }; + try sema.analyzeExport(block, src, options, exported_nav); } fn zirExportValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { @@ -6460,7 +6418,8 @@ fn zirExportValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError defer tracy.end(); const pt = sema.pt; - const mod = pt.zcu; + const zcu = pt.zcu; + const ip = &zcu.intern_pool; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.ExportValue, inst_data.payload_index).data; const src = block.nodeOffset(inst_data.src_node); @@ -6472,17 +6431,24 @@ fn zirExportValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const options = try sema.resolveExportOptions(block, options_src, extra.options); if (options.linkage == .internal) return; - if (operand.getFunction(mod)) |function| { - const decl_index = function.owner_decl; - return sema.analyzeExport(block, src, options, decl_index); - } - try sema.exports.append(mod.gpa, .{ - .opts = options, - .src = src, - .exported = .{ .value = operand.toIntern() }, - .status = .in_progress, - }); + // If the value has an owner Nav, export that instead. + const maybe_owner_nav = switch (ip.indexToKey(operand.toIntern())) { + .variable => |v| v.owner_nav, + .@"extern" => |e| e.owner_nav, + .func => |f| f.owner_nav, + else => null, + }; + if (maybe_owner_nav) |owner_nav| { + return sema.analyzeExport(block, src, options, owner_nav); + } else { + try sema.exports.append(zcu.gpa, .{ + .opts = options, + .src = src, + .exported = .{ .uav = operand.toIntern() }, + .status = .in_progress, + }); + } } pub fn analyzeExport( @@ -6490,22 +6456,22 @@ pub fn analyzeExport( block: *Block, src: LazySrcLoc, options: Module.Export.Options, - exported_decl_index: InternPool.DeclIndex, + exported_nav_index: InternPool.Nav.Index, ) !void { const gpa = sema.gpa; const pt = sema.pt; - const mod = pt.zcu; + const zcu = pt.zcu; + const ip = &zcu.intern_pool; if (options.linkage == .internal) return; - try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = exported_decl_index })); - try sema.ensureDeclAnalyzed(exported_decl_index); - const exported_decl = mod.declPtr(exported_decl_index); - const export_ty = exported_decl.typeOf(mod); + try sema.ensureNavResolved(src, exported_nav_index); + const exported_nav = ip.getNav(exported_nav_index); + const export_ty = Type.fromInterned(exported_nav.typeOf(ip)); if (!try sema.validateExternType(export_ty, .other)) { - const msg = msg: { + return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "unable to export type '{}'", .{export_ty.fmt(pt)}); errdefer msg.destroy(gpa); @@ -6513,59 +6479,50 @@ pub fn analyzeExport( try sema.addDeclaredHereNote(msg, export_ty); break :msg msg; - }; - return sema.failWithOwnedErrorMsg(block, msg); + }); } // TODO: some backends might support re-exporting extern decls - if (exported_decl.isExtern(mod)) { + if (exported_nav.isExtern(ip)) { return sema.fail(block, src, "export target cannot be extern", .{}); } - try sema.maybeQueueFuncBodyAnalysis(src, exported_decl_index); + try sema.maybeQueueFuncBodyAnalysis(src, exported_nav_index); try sema.exports.append(gpa, .{ .opts = options, .src = src, - .exported = .{ .decl_index = exported_decl_index }, + .exported = .{ .nav = exported_nav_index }, .status = .in_progress, }); } fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void { const pt = sema.pt; - const mod = pt.zcu; + const zcu = pt.zcu; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const operand_src = block.builtinCallArgSrc(extra.node, 0); const src = block.nodeOffset(extra.node); const alignment = try sema.resolveAlign(block, operand_src, extra.operand); + + const func = switch (sema.owner.unwrap()) { + .func => |func| func, + .cau => return sema.fail(block, src, "@setAlignStack outside of function scope", .{}), + }; + if (alignment.order(Alignment.fromNonzeroByteUnits(256)).compare(.gt)) { return sema.fail(block, src, "attempt to @setAlignStack({d}); maximum is 256", .{ alignment.toByteUnits().?, }); } - const fn_owner_decl = mod.funcOwnerDeclPtr(sema.func_index); - switch (fn_owner_decl.typeOf(mod).fnCallingConvention(mod)) { + switch (Value.fromInterned(func).typeOf(zcu).fnCallingConvention(zcu)) { .Naked => return sema.fail(block, src, "@setAlignStack in naked function", .{}), .Inline => return sema.fail(block, src, "@setAlignStack in inline function", .{}), - else => if (block.inlining != null) { - return sema.fail(block, src, "@setAlignStack in inline call", .{}); - }, - } - - if (sema.prev_stack_alignment_src) |prev_src| { - const msg = msg: { - const msg = try sema.errMsg(src, "multiple @setAlignStack in the same function body", .{}); - errdefer msg.destroy(sema.gpa); - try sema.errNote(prev_src, msg, "other instance here", .{}); - break :msg msg; - }; - return sema.failWithOwnedErrorMsg(block, msg); + else => {}, } - sema.prev_stack_alignment_src = src; - mod.intern_pool.funcMaxStackAlignment(sema.func_index, alignment); + zcu.intern_pool.funcMaxStackAlignment(sema.func_index, alignment); } fn zirSetCold(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void { @@ -6577,16 +6534,24 @@ fn zirSetCold(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) const is_cold = try sema.resolveConstBool(block, operand_src, extra.operand, .{ .needed_comptime_reason = "operand to @setCold must be comptime-known", }); - if (sema.func_index == .none) return; // does nothing outside a function - ip.funcSetCold(sema.func_index, is_cold); + // TODO: should `@setCold` apply to the parent in an inline call? + // See also #20642 and friends. + const func = switch (sema.owner.unwrap()) { + .func => |func| func, + .cau => return, // does nothing outside a function + }; + ip.funcSetCold(func, is_cold); } fn zirDisableInstrumentation(sema: *Sema) CompileError!void { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; - if (sema.func_index == .none) return; // does nothing outside a function - ip.funcSetDisableInstrumentation(sema.func_index); + const func = switch (sema.owner.unwrap()) { + .func => |func| func, + .cau => return, // does nothing outside a function + }; + ip.funcSetDisableInstrumentation(func); } fn zirSetFloatMode(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void { @@ -6760,8 +6725,8 @@ fn zirDeclRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air inst_data.get(sema.code), .no_embedded_nulls, ); - const decl_index = try sema.lookupIdentifier(block, src, decl_name); - return sema.analyzeDeclRef(src, decl_index); + const nav_index = try sema.lookupIdentifier(block, src, decl_name); + return sema.analyzeNavRef(src, nav_index); } fn zirDeclVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -6775,17 +6740,18 @@ fn zirDeclVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air inst_data.get(sema.code), .no_embedded_nulls, ); - const decl = try sema.lookupIdentifier(block, src, decl_name); - return sema.analyzeDeclVal(block, src, decl); + const nav = try sema.lookupIdentifier(block, src, decl_name); + return sema.analyzeNavVal(block, src, nav); } -fn lookupIdentifier(sema: *Sema, block: *Block, src: LazySrcLoc, name: InternPool.NullTerminatedString) !InternPool.DeclIndex { +fn lookupIdentifier(sema: *Sema, block: *Block, src: LazySrcLoc, name: InternPool.NullTerminatedString) !InternPool.Nav.Index { const pt = sema.pt; const mod = pt.zcu; var namespace = block.namespace; while (true) { - if (try sema.lookupInNamespace(block, src, namespace.toOptional(), name, false)) |decl_index| { - return decl_index; + if (try sema.lookupInNamespace(block, src, namespace.toOptional(), name, false)) |lookup| { + assert(lookup.accessible); + return lookup.nav; } namespace = mod.namespacePtr(namespace).parent.unwrap() orelse break; } @@ -6801,66 +6767,72 @@ fn lookupInNamespace( opt_namespace_index: InternPool.OptionalNamespaceIndex, ident_name: InternPool.NullTerminatedString, observe_usingnamespace: bool, -) CompileError!?InternPool.DeclIndex { +) CompileError!?struct { + nav: InternPool.Nav.Index, + /// If `false`, the declaration is in a different file and is not `pub`. + /// We still return the declaration for better error reporting. + accessible: bool, +} { const pt = sema.pt; - const mod = pt.zcu; + const zcu = pt.zcu; + const ip = &zcu.intern_pool; const namespace_index = opt_namespace_index.unwrap() orelse return null; - const namespace = mod.namespacePtr(namespace_index); - const namespace_decl = mod.declPtr(namespace.decl_index); - if (namespace_decl.analysis == .file_failure) { - return error.AnalysisFail; - } + const namespace = zcu.namespacePtr(namespace_index); + + const adapter: Zcu.Namespace.NameAdapter = .{ .zcu = zcu }; - if (observe_usingnamespace and namespace.usingnamespace_set.count() != 0) { - const src_file = mod.namespacePtr(block.namespace).file_scope; + const src_file = zcu.namespacePtr(block.namespace).file_scope; + if (observe_usingnamespace and (namespace.pub_usingnamespace.items.len != 0 or namespace.priv_usingnamespace.items.len != 0)) { const gpa = sema.gpa; - var checked_namespaces: std.AutoArrayHashMapUnmanaged(*Namespace, bool) = .{}; + var checked_namespaces: std.AutoArrayHashMapUnmanaged(*Namespace, void) = .{}; defer checked_namespaces.deinit(gpa); // Keep track of name conflicts for error notes. - var candidates: std.ArrayListUnmanaged(InternPool.DeclIndex) = .{}; + var candidates: std.ArrayListUnmanaged(InternPool.Nav.Index) = .{}; defer candidates.deinit(gpa); - try checked_namespaces.put(gpa, namespace, namespace.file_scope == src_file); + try checked_namespaces.put(gpa, namespace, {}); var check_i: usize = 0; while (check_i < checked_namespaces.count()) : (check_i += 1) { const check_ns = checked_namespaces.keys()[check_i]; - if (check_ns.decls.getKeyAdapted(ident_name, Module.DeclAdapter{ .zcu = mod })) |decl_index| { - // Skip decls which are not marked pub, which are in a different - // file than the `a.b`/`@hasDecl` syntax. - const decl = mod.declPtr(decl_index); - if (decl.is_pub or (src_file == decl.getFileScopeIndex(mod) and - checked_namespaces.values()[check_i])) - { - try candidates.append(gpa, decl_index); - } - } - var it = check_ns.usingnamespace_set.iterator(); - while (it.next()) |entry| { - const sub_usingnamespace_decl_index = entry.key_ptr.*; - // Skip the decl we're currently analysing. - if (sub_usingnamespace_decl_index == sema.owner_decl_index) continue; - const sub_usingnamespace_decl = mod.declPtr(sub_usingnamespace_decl_index); - const sub_is_pub = entry.value_ptr.*; - if (!sub_is_pub and src_file != sub_usingnamespace_decl.getFileScopeIndex(mod)) { - // Skip usingnamespace decls which are not marked pub, which are in - // a different file than the `a.b`/`@hasDecl` syntax. + const Pass = enum { @"pub", priv }; + for ([2]Pass{ .@"pub", .priv }) |pass| { + if (pass == .priv and src_file != check_ns.file_scope) { continue; } - try sema.ensureDeclAnalyzed(sub_usingnamespace_decl_index); - const ns_ty = sub_usingnamespace_decl.val.toType(); - const sub_ns = mod.namespacePtrUnwrap(ns_ty.getNamespaceIndex(mod)) orelse continue; - try checked_namespaces.put(gpa, sub_ns, src_file == sub_usingnamespace_decl.getFileScopeIndex(mod)); + + const decls, const usingnamespaces = switch (pass) { + .@"pub" => .{ &check_ns.pub_decls, &check_ns.pub_usingnamespace }, + .priv => .{ &check_ns.priv_decls, &check_ns.priv_usingnamespace }, + }; + + if (decls.getKeyAdapted(ident_name, adapter)) |nav_index| { + try candidates.append(gpa, nav_index); + } + + for (usingnamespaces.items) |sub_ns_nav| { + try sema.ensureNavResolved(src, sub_ns_nav); + const sub_ns_ty = Type.fromInterned(ip.getNav(sub_ns_nav).status.resolved.val); + const sub_ns = zcu.namespacePtrUnwrap(sub_ns_ty.getNamespaceIndex(zcu)) orelse continue; + try checked_namespaces.put(gpa, sub_ns, {}); + } } } - { + ignore_self: { + const skip_nav = switch (sema.owner.unwrap()) { + .func => break :ignore_self, + .cau => |cau| switch (ip.getCau(cau).owner.unwrap()) { + .none, .type => break :ignore_self, + .nav => |nav| nav, + }, + }; var i: usize = 0; while (i < candidates.items.len) { - if (candidates.items[i] == sema.owner_decl_index) { + if (candidates.items[i] == skip_nav) { _ = candidates.orderedRemove(i); } else { i += 1; @@ -6870,48 +6842,50 @@ fn lookupInNamespace( switch (candidates.items.len) { 0 => {}, - 1 => { - const decl_index = candidates.items[0]; - return decl_index; - }, - else => { - const msg = msg: { - const msg = try sema.errMsg(src, "ambiguous reference", .{}); - errdefer msg.destroy(gpa); - for (candidates.items) |candidate_index| { - const candidate = mod.declPtr(candidate_index); - try sema.errNote(.{ - .base_node_inst = candidate.zir_decl_index.unwrap().?, - .offset = LazySrcLoc.Offset.nodeOffset(0), - }, msg, "declared here", .{}); - } - break :msg msg; - }; - return sema.failWithOwnedErrorMsg(block, msg); + 1 => return .{ + .nav = candidates.items[0], + .accessible = true, }, + else => return sema.failWithOwnedErrorMsg(block, msg: { + const msg = try sema.errMsg(src, "ambiguous reference", .{}); + errdefer msg.destroy(gpa); + for (candidates.items) |candidate| { + try sema.errNote(zcu.navSrcLoc(candidate), msg, "declared here", .{}); + } + break :msg msg; + }), } - } else if (namespace.decls.getKeyAdapted(ident_name, Module.DeclAdapter{ .zcu = mod })) |decl_index| { - return decl_index; + } else if (namespace.pub_decls.getKeyAdapted(ident_name, adapter)) |nav_index| { + return .{ + .nav = nav_index, + .accessible = true, + }; + } else if (namespace.priv_decls.getKeyAdapted(ident_name, adapter)) |nav_index| { + return .{ + .nav = nav_index, + .accessible = src_file == namespace.file_scope, + }; } return null; } -fn funcDeclSrc(sema: *Sema, func_inst: Air.Inst.Ref) !?*Decl { +fn funcDeclSrcInst(sema: *Sema, func_inst: Air.Inst.Ref) !?InternPool.TrackedInst.Index { const pt = sema.pt; - const mod = pt.zcu; - const func_val = (try sema.resolveValue(func_inst)) orelse return null; - if (func_val.isUndef(mod)) return null; - const owner_decl_index = switch (mod.intern_pool.indexToKey(func_val.toIntern())) { - .extern_func => |extern_func| extern_func.decl, - .func => |func| func.owner_decl, + const zcu = pt.zcu; + const ip = &zcu.intern_pool; + const func_val = try sema.resolveValue(func_inst) orelse return null; + if (func_val.isUndef(zcu)) return null; + const nav = switch (ip.indexToKey(func_val.toIntern())) { + .@"extern" => |e| e.owner_nav, + .func => |f| f.owner_nav, .ptr => |ptr| switch (ptr.base_addr) { - .decl => |decl| if (ptr.byte_offset == 0) mod.declPtr(decl).val.getFunction(mod).?.owner_decl else return null, + .nav => |nav| if (ptr.byte_offset == 0) nav else return null, else => return null, }, else => return null, }; - return mod.declPtr(owner_decl_index); + return ip.getNav(nav).srcInst(ip); } pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref { @@ -7100,11 +7074,12 @@ fn zirCall( const call_dbg_node: Zir.Inst.Index = @enumFromInt(@intFromEnum(inst) - 1); const call_inst = try sema.analyzeCall(block, func, func_ty, callee_src, call_src, modifier, ensure_result_used, args_info, call_dbg_node, .call); - if (sema.owner_func_index == .none or - !mod.intern_pool.funcAnalysisUnordered(sema.owner_func_index).calls_or_awaits_errorable_fn) - { - // No errorable fn actually called; we have no error return trace - input_is_error = false; + switch (sema.owner.unwrap()) { + .cau => input_is_error = false, + .func => |owner_func| if (!mod.intern_pool.funcAnalysisUnordered(owner_func).calls_or_awaits_errorable_fn) { + // No errorable fn actually called; we have no error return trace + input_is_error = false; + }, } if (block.ownerModule().error_tracing and @@ -7199,7 +7174,7 @@ fn checkCallArgumentCount( return func_ty; } - const maybe_decl = try sema.funcDeclSrc(func); + const maybe_func_inst = try sema.funcDeclSrcInst(func); const member_str = if (member_fn) "member function " else ""; const variadic_str = if (func_ty_info.is_var_args) "at least " else ""; const msg = msg: { @@ -7215,9 +7190,9 @@ fn checkCallArgumentCount( ); errdefer msg.destroy(sema.gpa); - if (maybe_decl) |fn_decl| { + if (maybe_func_inst) |func_inst| { try sema.errNote(.{ - .base_node_inst = fn_decl.zir_decl_index.unwrap().?, + .base_node_inst = func_inst, .offset = LazySrcLoc.Offset.nodeOffset(0), }, msg, "function declared here", .{}); } @@ -7544,7 +7519,7 @@ fn analyzeCall( if (func_val.isUndef(mod)) return sema.failWithUseOfUndef(block, call_src); if (cc == .Naked) { - const maybe_decl = try sema.funcDeclSrc(func); + const maybe_func_inst = try sema.funcDeclSrcInst(func); const msg = msg: { const msg = try sema.errMsg( func_src, @@ -7553,8 +7528,8 @@ fn analyzeCall( ); errdefer msg.destroy(sema.gpa); - if (maybe_decl) |fn_decl| try sema.errNote(.{ - .base_node_inst = fn_decl.zir_decl_index.unwrap().?, + if (maybe_func_inst) |func_inst| try sema.errNote(.{ + .base_node_inst = func_inst, .offset = LazySrcLoc.Offset.nodeOffset(0), }, msg, "function declared here", .{}); break :msg msg; @@ -7654,33 +7629,32 @@ fn analyzeCall( .block_comptime_reason = comptime_reason, }); const module_fn_index = switch (mod.intern_pool.indexToKey(func_val.toIntern())) { - .extern_func => return sema.fail(block, call_src, "{s} call of extern function", .{ + .@"extern" => return sema.fail(block, call_src, "{s} call of extern function", .{ @as([]const u8, if (is_comptime_call) "comptime" else "inline"), }), .func => func_val.toIntern(), .ptr => |ptr| blk: { switch (ptr.base_addr) { - .decl => |decl| if (ptr.byte_offset == 0) { - const func_val_ptr = mod.declPtr(decl).val.toIntern(); - const intern_index = mod.intern_pool.indexToKey(func_val_ptr); - if (intern_index == .extern_func or (intern_index == .variable and intern_index.variable.is_extern)) + .nav => |nav_index| if (ptr.byte_offset == 0) { + const nav = ip.getNav(nav_index); + if (nav.isExtern(ip)) return sema.fail(block, call_src, "{s} call of extern function pointer", .{ - @as([]const u8, if (is_comptime_call) "comptime" else "inline"), + if (is_comptime_call) "comptime" else "inline", }); - break :blk func_val_ptr; + break :blk nav.status.resolved.val; }, else => {}, } assert(callee_ty.isPtrAtRuntime(mod)); return sema.fail(block, call_src, "{s} call of function pointer", .{ - @as([]const u8, if (is_comptime_call) "comptime" else "inline"), + if (is_comptime_call) "comptime" else "inline", }); }, else => unreachable, }; if (func_ty_info.is_var_args) { return sema.fail(block, call_src, "{s} call of variadic function", .{ - @as([]const u8, if (is_comptime_call) "comptime" else "inline"), + if (is_comptime_call) "comptime" else "inline", }); } @@ -7712,7 +7686,12 @@ fn analyzeCall( }; const module_fn = mod.funcInfo(module_fn_index); - const fn_owner_decl = mod.declPtr(module_fn.owner_decl); + + // This is not a function instance, so the function's `Nav` has a + // `Cau` -- we don't need to check `generic_owner`. + const fn_nav = ip.getNav(module_fn.owner_nav); + const fn_cau_index = fn_nav.analysis_owner.unwrap().?; + const fn_cau = ip.getCau(fn_cau_index); // We effectively want a child Sema here, but can't literally do that, because we need AIR // to be shared. InlineCallSema is a wrapper which handles this for us. While `ics` is in @@ -7720,7 +7699,7 @@ fn analyzeCall( // whenever performing an operation where the difference matters. var ics = InlineCallSema.init( sema, - fn_owner_decl.getFileScope(mod).zir, + mod.cauFileScope(fn_cau_index).zir, module_fn_index, block.error_return_trace_index, ); @@ -7729,7 +7708,8 @@ fn analyzeCall( var child_block: Block = .{ .parent = null, .sema = sema, - .namespace = fn_owner_decl.src_namespace, + // The function body exists in the same namespace as the corresponding function declaration. + .namespace = fn_cau.namespace, .instructions = .{}, .label = null, .inlining = &inlining, @@ -7740,8 +7720,8 @@ fn analyzeCall( .runtime_cond = block.runtime_cond, .runtime_loop = block.runtime_loop, .runtime_index = block.runtime_index, - .src_base_inst = fn_owner_decl.zir_decl_index.unwrap().?, - .type_name_ctx = fn_owner_decl.name, + .src_base_inst = fn_cau.zir_index, + .type_name_ctx = fn_nav.fqn, }; const merges = &child_block.inlining.?.merges; @@ -7759,7 +7739,7 @@ fn analyzeCall( // comptime memory is mutated. const memoized_arg_values = try sema.arena.alloc(InternPool.Index, func_ty_info.param_types.len); - const owner_info = mod.typeToFunc(fn_owner_decl.typeOf(mod)).?; + const owner_info = mod.typeToFunc(Type.fromInterned(module_fn.ty)).?; const new_param_types = try sema.arena.alloc(InternPool.Index, owner_info.param_types.len); var new_fn_info: InternPool.GetFuncTypeKey = .{ .param_types = new_param_types, @@ -7809,9 +7789,6 @@ fn analyzeCall( _ = ics.callee(); if (!inlining.has_comptime_args) { - if (module_fn.analysisUnordered(ip).state == .sema_failure) - return error.AnalysisFail; - var block_it = block; while (block_it.inlining) |parent_inlining| { if (!parent_inlining.has_comptime_args and parent_inlining.func == module_fn_index) { @@ -7957,8 +7934,11 @@ fn analyzeCall( if (call_dbg_node) |some| try sema.zirDbgStmt(block, some); - if (sema.owner_func_index != .none and Type.fromInterned(func_ty_info.return_type).isError(mod)) { - ip.funcSetCallsOrAwaitsErrorableFn(sema.owner_func_index); + switch (sema.owner.unwrap()) { + .cau => {}, + .func => |owner_func| if (Type.fromInterned(func_ty_info.return_type).isError(mod)) { + ip.funcSetCallsOrAwaitsErrorableFn(owner_func); + }, } if (try sema.resolveValue(func)) |func_val| { @@ -7994,7 +7974,7 @@ fn analyzeCall( switch (mod.intern_pool.indexToKey(func_val.toIntern())) { .func => break :skip_safety, .ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) { - .decl => |decl| if (!mod.declPtr(decl).isExtern(mod)) break :skip_safety, + .nav => |nav| if (!ip.getNav(nav).isExtern(ip)) break :skip_safety, else => {}, }, else => {}, @@ -8018,18 +7998,18 @@ fn analyzeCall( fn handleTailCall(sema: *Sema, block: *Block, call_src: LazySrcLoc, func_ty: Type, result: Air.Inst.Ref) !Air.Inst.Ref { const pt = sema.pt; - const mod = pt.zcu; - const target = mod.getTarget(); - const backend = mod.comp.getZigBackend(); + const zcu = pt.zcu; + const target = zcu.getTarget(); + const backend = zcu.comp.getZigBackend(); if (!target_util.supportsTailCall(target, backend)) { return sema.fail(block, call_src, "unable to perform tail call: compiler backend '{s}' does not support tail calls on target architecture '{s}' with the selected CPU feature flags", .{ @tagName(backend), @tagName(target.cpu.arch), }); } - const func_decl = mod.funcOwnerDeclPtr(sema.owner_func_index); - if (!func_ty.eql(func_decl.typeOf(mod), mod)) { + const owner_func_ty = Type.fromInterned(zcu.funcInfo(sema.owner.unwrap().func).ty); + if (owner_func_ty.toIntern() != func_ty.toIntern()) { return sema.fail(block, call_src, "unable to perform tail call: type of function being called '{}' does not match type of calling function '{}'", .{ - func_ty.fmt(pt), func_decl.typeOf(mod).fmt(pt), + func_ty.fmt(pt), owner_func_ty.fmt(pt), }); } _ = try block.addUnOp(.ret, result); @@ -8191,7 +8171,7 @@ fn instantiateGenericCall( }); const generic_owner = switch (zcu.intern_pool.indexToKey(func_val.toIntern())) { .func => func_val.toIntern(), - .ptr => |ptr| zcu.declPtr(ptr.base_addr.decl).val.toIntern(), + .ptr => |ptr| ip.getNav(ptr.base_addr.nav).status.resolved.val, else => unreachable, }; const generic_owner_func = zcu.intern_pool.indexToKey(generic_owner).func; @@ -8207,10 +8187,10 @@ fn instantiateGenericCall( // The actual monomorphization happens via adding `func_instance` to // `InternPool`. - const fn_owner_decl = zcu.declPtr(generic_owner_func.owner_decl); - const namespace_index = fn_owner_decl.src_namespace; - const namespace = zcu.namespacePtr(namespace_index); - const fn_zir = namespace.fileScope(zcu).zir; + // Since we are looking at the generic owner here, it has a `Cau`. + const fn_nav = ip.getNav(generic_owner_func.owner_nav); + const fn_cau = ip.getCau(fn_nav.analysis_owner.unwrap().?); + const fn_zir = zcu.namespacePtr(fn_cau.namespace).fileScope(zcu).zir; const fn_info = fn_zir.getFnInfo(generic_owner_func.zir_body_inst.resolve(ip)); const comptime_args = try sema.arena.alloc(InternPool.Index, args_info.count()); @@ -8232,15 +8212,13 @@ fn instantiateGenericCall( // We pass the generic callsite's owner decl here because whatever `Decl` // dependencies are chased at this point should be attached to the // callsite, not the `Decl` associated with the `func_instance`. - .owner_decl = sema.owner_decl, - .owner_decl_index = sema.owner_decl_index, - .func_index = sema.owner_func_index, + .owner = sema.owner, + .func_index = sema.func_index, // This may not be known yet, since the calling convention could be generic, but there // should be no illegal instructions encountered while creating the function anyway. .func_is_naked = false, .fn_ret_ty = Type.void, .fn_ret_ty_ies = null, - .owner_func_index = .none, .comptime_args = comptime_args, .generic_owner = generic_owner, .generic_call_src = call_src, @@ -8253,12 +8231,12 @@ fn instantiateGenericCall( var child_block: Block = .{ .parent = null, .sema = &child_sema, - .namespace = namespace_index, + .namespace = fn_cau.namespace, .instructions = .{}, .inlining = null, .is_comptime = true, - .src_base_inst = fn_owner_decl.zir_decl_index.unwrap().?, - .type_name_ctx = fn_owner_decl.name, + .src_base_inst = fn_cau.zir_index, + .type_name_ctx = fn_nav.fqn, }; defer child_block.instructions.deinit(gpa); @@ -8421,10 +8399,11 @@ fn instantiateGenericCall( if (call_dbg_node) |some| try sema.zirDbgStmt(block, some); - if (sema.owner_func_index != .none and - Type.fromInterned(func_ty_info.return_type).isError(zcu)) - { - ip.funcSetCallsOrAwaitsErrorableFn(sema.owner_func_index); + switch (sema.owner.unwrap()) { + .cau => {}, + .func => |owner_func| if (Type.fromInterned(func_ty_info.return_type).isError(zcu)) { + ip.funcSetCallsOrAwaitsErrorableFn(owner_func); + }, } try sema.addReferenceEntry(call_src, AnalUnit.wrap(.{ .func = callee_index })); @@ -9366,10 +9345,11 @@ fn zirFunc( inferred_error_set: bool, ) CompileError!Air.Inst.Ref { const pt = sema.pt; - const mod = pt.zcu; + const zcu = pt.zcu; + const ip = &zcu.intern_pool; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const extra = sema.code.extraData(Zir.Inst.Func, inst_data.payload_index); - const target = mod.getTarget(); + const target = zcu.getTarget(); const ret_ty_src = block.src(.{ .node_offset_fn_type_ret_ty = inst_data.src_node }); var extra_index = extra.end; @@ -9410,11 +9390,17 @@ fn zirFunc( // the callconv based on whether it is exported. Otherwise, the callconv defaults // to `.Unspecified`. const cc: std.builtin.CallingConvention = if (has_body) cc: { - const fn_is_exported = if (sema.generic_owner != .none) exported: { - const generic_owner_fn = mod.funcInfo(sema.generic_owner); - const generic_owner_decl = mod.declPtr(generic_owner_fn.owner_decl); - break :exported generic_owner_decl.is_exported; - } else sema.owner_decl.is_exported; + const func_decl_cau = if (sema.generic_owner != .none) cau: { + const generic_owner_fn = zcu.funcInfo(sema.generic_owner); + // The generic owner definitely has a `Cau` for the corresponding function declaration. + const generic_owner_nav = ip.getNav(generic_owner_fn.owner_nav); + break :cau generic_owner_nav.analysis_owner.unwrap().?; + } else sema.owner.unwrap().cau; + const fn_is_exported = exported: { + const decl_inst = ip.getCau(func_decl_cau).zir_index.resolve(ip); + const zir_decl = sema.code.getDeclaration(decl_inst)[0]; + break :exported zir_decl.flags.is_export; + }; break :cc if (fn_is_exported) .C else .Unspecified; } else .Unspecified; @@ -9613,10 +9599,10 @@ fn funcCommon( is_noinline: bool, ) CompileError!Air.Inst.Ref { const pt = sema.pt; - const mod = pt.zcu; + const zcu = pt.zcu; const gpa = sema.gpa; - const target = mod.getTarget(); - const ip = &mod.intern_pool; + const target = zcu.getTarget(); + const ip = &zcu.intern_pool; const ret_ty_src = block.src(.{ .node_offset_fn_type_ret_ty = src_node_offset }); const cc_src = block.src(.{ .node_offset_fn_type_cc = src_node_offset }); const func_src = block.nodeOffset(src_node_offset); @@ -9664,8 +9650,8 @@ fn funcCommon( if (this_generic and !sema.no_partial_func_ty and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved)) { return sema.fail(block, param_src, "generic parameters not allowed in function with calling convention '{s}'", .{@tagName(cc_resolved)}); } - if (!param_ty.isValidParamType(mod)) { - const opaque_str = if (param_ty.zigTypeTag(mod) == .Opaque) "opaque " else ""; + if (!param_ty.isValidParamType(zcu)) { + const opaque_str = if (param_ty.zigTypeTag(zcu) == .Opaque) "opaque " else ""; return sema.fail(block, param_src, "parameter of {s}type '{}' not allowed", .{ opaque_str, param_ty.fmt(pt), }); @@ -9699,7 +9685,7 @@ fn funcCommon( return sema.failWithOwnedErrorMsg(block, msg); } if (is_source_decl and !this_generic and is_noalias and - !(param_ty.zigTypeTag(mod) == .Pointer or param_ty.isPtrLikeOptional(mod))) + !(param_ty.zigTypeTag(zcu) == .Pointer or param_ty.isPtrLikeOptional(zcu))) { return sema.fail(block, param_src, "non-pointer parameter declared noalias", .{}); } @@ -9707,7 +9693,7 @@ fn funcCommon( .Interrupt => if (target.cpu.arch.isX86()) { const err_code_size = target.ptrBitWidth(); switch (i) { - 0 => if (param_ty.zigTypeTag(mod) != .Pointer) return sema.fail(block, param_src, "first parameter of function with 'Interrupt' calling convention must be a pointer type", .{}), + 0 => if (param_ty.zigTypeTag(zcu) != .Pointer) return sema.fail(block, param_src, "first parameter of function with 'Interrupt' calling convention must be a pointer type", .{}), 1 => if (param_ty.bitSize(pt) != err_code_size) return sema.fail(block, param_src, "second parameter of function with 'Interrupt' calling convention must be a {d}-bit integer", .{err_code_size}), else => return sema.fail(block, param_src, "'Interrupt' calling convention supports up to 2 parameters, found {d}", .{i + 1}), } @@ -9769,14 +9755,11 @@ fn funcCommon( ); } - // extern_func and func_decl functions take ownership of `sema.owner_decl`. - sema.owner_decl.@"linksection" = switch (section) { + const section_name: InternPool.OptionalNullTerminatedString = switch (section) { .generic => .none, .default => .none, - .explicit => |section_name| section_name.toOptional(), + .explicit => |name| name.toOptional(), }; - sema.owner_decl.alignment = alignment orelse .none; - sema.owner_decl.@"addrspace" = address_space orelse .generic; if (inferred_error_set) { assert(!is_extern); @@ -9784,7 +9767,7 @@ fn funcCommon( if (!ret_poison) try sema.validateErrorUnionPayloadType(block, bare_return_type, ret_ty_src); const func_index = try ip.getFuncDeclIes(gpa, pt.tid, .{ - .owner_decl = sema.owner_decl_index, + .owner_nav = sema.getOwnerCauNav(), .param_types = param_types, .noalias_bits = noalias_bits, @@ -9804,6 +9787,13 @@ fn funcCommon( .lbrace_column = @as(u16, @truncate(src_locs.columns)), .rbrace_column = @as(u16, @truncate(src_locs.columns >> 16)), }); + // func_decl functions take ownership of the `Nav` of Sema'a owner `Cau`. + ip.resolveNavValue(sema.getOwnerCauNav(), .{ + .val = func_index, + .alignment = alignment orelse .none, + .@"linksection" = section_name, + .@"addrspace" = address_space orelse .generic, + }); return finishFunc( sema, block, @@ -9846,11 +9836,20 @@ fn funcCommon( if (opt_lib_name) |lib_name| try sema.handleExternLibName(block, block.src(.{ .node_offset_lib_name = src_node_offset, }), lib_name); - const func_index = try ip.getExternFunc(gpa, pt.tid, .{ + const func_index = try pt.getExtern(.{ + .name = sema.getOwnerCauNavName(), .ty = func_ty, - .decl = sema.owner_decl_index, - .lib_name = try mod.intern_pool.getOrPutStringOpt(gpa, pt.tid, opt_lib_name, .no_embedded_nulls), + .lib_name = try ip.getOrPutStringOpt(gpa, pt.tid, opt_lib_name, .no_embedded_nulls), + .is_const = true, + .is_threadlocal = false, + .is_weak_linkage = false, + .alignment = alignment orelse .none, + .@"addrspace" = address_space orelse .generic, + .zir_index = sema.getOwnerCauDeclInst(), // `declaration` instruction + .owner_nav = undefined, // ignored by `getExtern` }); + // Note that unlike function declaration, extern functions don't touch the + // Sema's owner Cau's owner Nav. The alignment etc were passed above. return finishFunc( sema, block, @@ -9872,7 +9871,7 @@ fn funcCommon( if (has_body) { const func_index = try ip.getFuncDecl(gpa, pt.tid, .{ - .owner_decl = sema.owner_decl_index, + .owner_nav = sema.getOwnerCauNav(), .ty = func_ty, .cc = cc, .is_noinline = is_noinline, @@ -9882,6 +9881,13 @@ fn funcCommon( .lbrace_column = @as(u16, @truncate(src_locs.columns)), .rbrace_column = @as(u16, @truncate(src_locs.columns >> 16)), }); + // func_decl functions take ownership of the `Nav` of Sema'a owner `Cau`. + ip.resolveNavValue(sema.getOwnerCauNav(), .{ + .val = func_index, + .alignment = alignment orelse .none, + .@"linksection" = section_name, + .@"addrspace" = address_space orelse .generic, + }); return finishFunc( sema, block, @@ -11179,7 +11185,7 @@ const SwitchProngAnalysis = struct { return block.addStructFieldVal(spa.operand, field_index, field_ty); } } else if (capture_byref) { - return anonDeclRef(sema, item_val.toIntern()); + return sema.uavRef(item_val.toIntern()); } else { return inline_case_capture; } @@ -13947,9 +13953,8 @@ fn zirHasDecl(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air } const namespace = container_type.getNamespaceIndex(mod); - if (try sema.lookupInNamespace(block, src, namespace, decl_name, true)) |decl_index| { - const decl = mod.declPtr(decl_index); - if (decl.is_pub or decl.getFileScope(mod) == block.getFileScope(mod)) { + if (try sema.lookupInNamespace(block, src, namespace, decl_name, true)) |lookup| { + if (lookup.accessible) { return .bool_true; } } @@ -13981,9 +13986,11 @@ fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return sema.fail(block, operand_src, "unable to open '{s}': {s}", .{ operand, @errorName(err) }); }, }; + // TODO: register some kind of dependency on the file. + // That way, if this returns `error.AnalysisFail`, we have the dependency banked ready to + // trigger re-analysis later. try pt.ensureFileAnalyzed(result.file_index); - const file_root_decl_index = zcu.fileRootDecl(result.file_index).unwrap().?; - return sema.analyzeDeclVal(block, operand_src, file_root_decl_index); + return Air.internedToRef(zcu.fileRootType(result.file_index)); } fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -16970,7 +16977,7 @@ fn analyzeArithmetic( if (block.wantSafety() and want_safety and scalar_tag == .Int) { if (mod.backendSupportsFeature(.safety_checked_instructions)) { if (air_tag != air_tag_safe) { - _ = try sema.preparePanicId(block, .integer_overflow); + _ = try sema.preparePanicId(block, src, .integer_overflow); } return block.addBinOp(air_tag_safe, casted_lhs, casted_rhs); } else { @@ -17158,13 +17165,11 @@ fn zirAsm( if (is_volatile) { return sema.fail(block, src, "volatile keyword is redundant on module-level assembly", .{}); } - try mod.addGlobalAssembly(sema.owner_decl_index, asm_source); + try mod.addGlobalAssembly(sema.owner.unwrap().cau, asm_source); return .void_value; } - if (block.is_comptime) { - try sema.requireRuntimeBlock(block, src, null); - } + try sema.requireRuntimeBlock(block, src, null); var extra_i = extra.end; var output_type_bits = extra.data.output_type_bits; @@ -17646,18 +17651,17 @@ fn zirThis( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { + _ = extended; const pt = sema.pt; - const mod = pt.zcu; - const this_decl_index = mod.namespacePtr(block.namespace).decl_index; - const src = block.nodeOffset(@bitCast(extended.operand)); - return sema.analyzeDeclVal(block, src, this_decl_index); + const namespace = pt.zcu.namespacePtr(block.namespace); + return Air.internedToRef(namespace.owner_type); } fn zirClosureGet(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; - const captures = mod.namespacePtr(block.namespace).getType(mod).getCaptures(mod); + const captures = Type.fromInterned(mod.namespacePtr(block.namespace).owner_type).getCaptures(mod); const src_node: i32 = @bitCast(extended.operand); const src = block.nodeOffset(src_node); @@ -17665,8 +17669,8 @@ fn zirClosureGet(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat const capture_ty = switch (captures.get(ip)[extended.small].unwrap()) { .@"comptime" => |index| return Air.internedToRef(index), .runtime => |index| index, - .decl_val => |decl_index| return sema.analyzeDeclVal(block, src, decl_index), - .decl_ref => |decl_index| return sema.analyzeDeclRef(src, decl_index), + .nav_val => |nav| return sema.analyzeNavVal(block, src, nav), + .nav_ref => |nav| return sema.analyzeNavRef(src, nav), }; // The comptime case is handled already above. Runtime case below. @@ -17764,20 +17768,19 @@ fn zirBuiltinSrc( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { - _ = block; const tracy = trace(@src()); defer tracy.end(); const pt = sema.pt; const zcu = pt.zcu; - const extra = sema.code.extraData(Zir.Inst.Src, extended.operand).data; - const fn_owner_decl = zcu.funcOwnerDeclPtr(sema.func_index); const ip = &zcu.intern_pool; + const extra = sema.code.extraData(Zir.Inst.Src, extended.operand).data; + const fn_name = ip.getNav(zcu.funcInfo(sema.func_index).owner_nav).name; const gpa = sema.gpa; - const file_scope = fn_owner_decl.getFileScope(zcu); + const file_scope = block.getFileScope(zcu); const func_name_val = v: { - const func_name_len = fn_owner_decl.name.length(ip); + const func_name_len = fn_name.length(ip); const array_ty = try pt.intern(.{ .array_type = .{ .len = func_name_len, .sentinel = .zero_u8, @@ -17787,11 +17790,11 @@ fn zirBuiltinSrc( .ty = .slice_const_u8_sentinel_0_type, .ptr = try pt.intern(.{ .ptr = .{ .ty = .manyptr_const_u8_sentinel_0_type, - .base_addr = .{ .anon_decl = .{ + .base_addr = .{ .uav = .{ .orig_ty = .slice_const_u8_sentinel_0_type, .val = try pt.intern(.{ .aggregate = .{ .ty = array_ty, - .storage = .{ .bytes = fn_owner_decl.name.toString() }, + .storage = .{ .bytes = fn_name.toString() }, } }), } }, .byte_offset = 0, @@ -17811,7 +17814,7 @@ fn zirBuiltinSrc( .ty = .slice_const_u8_sentinel_0_type, .ptr = try pt.intern(.{ .ptr = .{ .ty = .manyptr_const_u8_sentinel_0_type, - .base_addr = .{ .anon_decl = .{ + .base_addr = .{ .uav = .{ .orig_ty = .slice_const_u8_sentinel_0_type, .val = try pt.intern(.{ .aggregate = .{ .ty = array_ty, @@ -17837,7 +17840,7 @@ fn zirBuiltinSrc( .ty = .slice_const_u8_sentinel_0_type, .ptr = try pt.intern(.{ .ptr = .{ .ty = .manyptr_const_u8_sentinel_0_type, - .base_addr = .{ .anon_decl = .{ + .base_addr = .{ .uav = .{ .orig_ty = .slice_const_u8_sentinel_0_type, .val = try pt.intern(.{ .aggregate = .{ .ty = array_ty, @@ -17902,25 +17905,23 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .val = .void_value, } }))), .Fn => { - const fn_info_decl_index = (try sema.namespaceLookup( + const fn_info_nav = try sema.namespaceLookup( block, src, type_info_ty.getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "Fn", .no_embedded_nulls), - )).?; - try sema.ensureDeclAnalyzed(fn_info_decl_index); - const fn_info_decl = mod.declPtr(fn_info_decl_index); - const fn_info_ty = fn_info_decl.val.toType(); + ) orelse @panic("std.builtin.Type is corrupt"); + try sema.ensureNavResolved(src, fn_info_nav); + const fn_info_ty = Type.fromInterned(ip.getNav(fn_info_nav).status.resolved.val); - const param_info_decl_index = (try sema.namespaceLookup( + const param_info_nav = try sema.namespaceLookup( block, src, fn_info_ty.getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "Param", .no_embedded_nulls), - )).?; - try sema.ensureDeclAnalyzed(param_info_decl_index); - const param_info_decl = mod.declPtr(param_info_decl_index); - const param_info_ty = param_info_decl.val.toType(); + ) orelse @panic("std.builtin.Type is corrupt"); + try sema.ensureNavResolved(src, param_info_nav); + const param_info_ty = Type.fromInterned(ip.getNav(param_info_nav).status.resolved.val); const func_ty_info = mod.typeToFunc(ty).?; const param_vals = try sema.arena.alloc(InternPool.Index, func_ty_info.param_types.len); @@ -17972,7 +17973,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ty = slice_ty, .ptr = try pt.intern(.{ .ptr = .{ .ty = manyptr_ty, - .base_addr = .{ .anon_decl = .{ + .base_addr = .{ .uav = .{ .orig_ty = manyptr_ty, .val = new_decl_val, } }, @@ -18014,15 +18015,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }))); }, .Int => { - const int_info_decl_index = (try sema.namespaceLookup( + const int_info_nav = try sema.namespaceLookup( block, src, type_info_ty.getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "Int", .no_embedded_nulls), - )).?; - try sema.ensureDeclAnalyzed(int_info_decl_index); - const int_info_decl = mod.declPtr(int_info_decl_index); - const int_info_ty = int_info_decl.val.toType(); + ) orelse @panic("std.builtin.Type is corrupt"); + try sema.ensureNavResolved(src, int_info_nav); + const int_info_ty = Type.fromInterned(ip.getNav(int_info_nav).status.resolved.val); const signedness_ty = try pt.getBuiltinType("Signedness"); const info = ty.intInfo(mod); @@ -18042,15 +18042,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }))); }, .Float => { - const float_info_decl_index = (try sema.namespaceLookup( + const float_info_nav = try sema.namespaceLookup( block, src, type_info_ty.getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "Float", .no_embedded_nulls), - )).?; - try sema.ensureDeclAnalyzed(float_info_decl_index); - const float_info_decl = mod.declPtr(float_info_decl_index); - const float_info_ty = float_info_decl.val.toType(); + ) orelse @panic("std.builtin.Type is corrupt"); + try sema.ensureNavResolved(src, float_info_nav); + const float_info_ty = Type.fromInterned(ip.getNav(float_info_nav).status.resolved.val); const field_vals = .{ // bits: u16, @@ -18074,26 +18073,24 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const addrspace_ty = try pt.getBuiltinType("AddressSpace"); const pointer_ty = t: { - const decl_index = (try sema.namespaceLookup( + const nav = try sema.namespaceLookup( block, src, (try pt.getBuiltinType("Type")).getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "Pointer", .no_embedded_nulls), - )).?; - try sema.ensureDeclAnalyzed(decl_index); - const decl = mod.declPtr(decl_index); - break :t decl.val.toType(); + ) orelse @panic("std.builtin.Type is corrupt"); + try sema.ensureNavResolved(src, nav); + break :t Type.fromInterned(ip.getNav(nav).status.resolved.val); }; const ptr_size_ty = t: { - const decl_index = (try sema.namespaceLookup( + const nav = try sema.namespaceLookup( block, src, pointer_ty.getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "Size", .no_embedded_nulls), - )).?; - try sema.ensureDeclAnalyzed(decl_index); - const decl = mod.declPtr(decl_index); - break :t decl.val.toType(); + ) orelse @panic("std.builtin.Type is corrupt"); + try sema.ensureNavResolved(src, nav); + break :t Type.fromInterned(ip.getNav(nav).status.resolved.val); }; const field_values = .{ @@ -18128,15 +18125,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }, .Array => { const array_field_ty = t: { - const array_field_ty_decl_index = (try sema.namespaceLookup( + const nav = try sema.namespaceLookup( block, src, type_info_ty.getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "Array", .no_embedded_nulls), - )).?; - try sema.ensureDeclAnalyzed(array_field_ty_decl_index); - const array_field_ty_decl = mod.declPtr(array_field_ty_decl_index); - break :t array_field_ty_decl.val.toType(); + ) orelse @panic("std.builtin.Type is corrupt"); + try sema.ensureNavResolved(src, nav); + break :t Type.fromInterned(ip.getNav(nav).status.resolved.val); }; const info = ty.arrayInfo(mod); @@ -18159,15 +18155,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }, .Vector => { const vector_field_ty = t: { - const vector_field_ty_decl_index = (try sema.namespaceLookup( + const nav = try sema.namespaceLookup( block, src, type_info_ty.getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "Vector", .no_embedded_nulls), - )).?; - try sema.ensureDeclAnalyzed(vector_field_ty_decl_index); - const vector_field_ty_decl = mod.declPtr(vector_field_ty_decl_index); - break :t vector_field_ty_decl.val.toType(); + ) orelse @panic("std.builtin.Type is corrupt"); + try sema.ensureNavResolved(src, nav); + break :t Type.fromInterned(ip.getNav(nav).status.resolved.val); }; const info = ty.arrayInfo(mod); @@ -18188,15 +18183,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }, .Optional => { const optional_field_ty = t: { - const optional_field_ty_decl_index = (try sema.namespaceLookup( + const nav = try sema.namespaceLookup( block, src, type_info_ty.getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "Optional", .no_embedded_nulls), - )).?; - try sema.ensureDeclAnalyzed(optional_field_ty_decl_index); - const optional_field_ty_decl = mod.declPtr(optional_field_ty_decl_index); - break :t optional_field_ty_decl.val.toType(); + ) orelse @panic("std.builtin.Type is corrupt"); + try sema.ensureNavResolved(src, nav); + break :t Type.fromInterned(ip.getNav(nav).status.resolved.val); }; const field_values = .{ @@ -18215,15 +18209,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ErrorSet => { // Get the Error type const error_field_ty = t: { - const set_field_ty_decl_index = (try sema.namespaceLookup( + const nav = try sema.namespaceLookup( block, src, type_info_ty.getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "Error", .no_embedded_nulls), - )).?; - try sema.ensureDeclAnalyzed(set_field_ty_decl_index); - const set_field_ty_decl = mod.declPtr(set_field_ty_decl_index); - break :t set_field_ty_decl.val.toType(); + ) orelse @panic("std.builtin.Type is corrupt"); + try sema.ensureNavResolved(src, nav); + break :t Type.fromInterned(ip.getNav(nav).status.resolved.val); }; // Build our list of Error values @@ -18251,7 +18244,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ty = .slice_const_u8_sentinel_0_type, .ptr = try pt.intern(.{ .ptr = .{ .ty = .manyptr_const_u8_sentinel_0_type, - .base_addr = .{ .anon_decl = .{ + .base_addr = .{ .uav = .{ .val = new_decl_val, .orig_ty = .slice_const_u8_sentinel_0_type, } }, @@ -18298,7 +18291,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ty = slice_errors_ty.toIntern(), .ptr = try pt.intern(.{ .ptr = .{ .ty = manyptr_errors_ty, - .base_addr = .{ .anon_decl = .{ + .base_addr = .{ .uav = .{ .orig_ty = manyptr_errors_ty, .val = new_decl_val, } }, @@ -18321,15 +18314,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }, .ErrorUnion => { const error_union_field_ty = t: { - const error_union_field_ty_decl_index = (try sema.namespaceLookup( + const nav = try sema.namespaceLookup( block, src, type_info_ty.getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "ErrorUnion", .no_embedded_nulls), - )).?; - try sema.ensureDeclAnalyzed(error_union_field_ty_decl_index); - const error_union_field_ty_decl = mod.declPtr(error_union_field_ty_decl_index); - break :t error_union_field_ty_decl.val.toType(); + ) orelse @panic("std.builtin.Type is corrupt"); + try sema.ensureNavResolved(src, nav); + break :t Type.fromInterned(ip.getNav(nav).status.resolved.val); }; const field_values = .{ @@ -18351,15 +18343,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const is_exhaustive = Value.makeBool(ip.loadEnumType(ty.toIntern()).tag_mode != .nonexhaustive); const enum_field_ty = t: { - const enum_field_ty_decl_index = (try sema.namespaceLookup( + const nav = try sema.namespaceLookup( block, src, type_info_ty.getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "EnumField", .no_embedded_nulls), - )).?; - try sema.ensureDeclAnalyzed(enum_field_ty_decl_index); - const enum_field_ty_decl = mod.declPtr(enum_field_ty_decl_index); - break :t enum_field_ty_decl.val.toType(); + ) orelse @panic("std.builtin.Type is corrupt"); + try sema.ensureNavResolved(src, nav); + break :t Type.fromInterned(ip.getNav(nav).status.resolved.val); }; const enum_field_vals = try sema.arena.alloc(InternPool.Index, ip.loadEnumType(ty.toIntern()).names.len); @@ -18392,7 +18383,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ty = .slice_const_u8_sentinel_0_type, .ptr = try pt.intern(.{ .ptr = .{ .ty = .manyptr_const_u8_sentinel_0_type, - .base_addr = .{ .anon_decl = .{ + .base_addr = .{ .uav = .{ .val = new_decl_val, .orig_ty = .slice_const_u8_sentinel_0_type, } }, @@ -18435,7 +18426,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ty = slice_ty, .ptr = try pt.intern(.{ .ptr = .{ .ty = manyptr_ty, - .base_addr = .{ .anon_decl = .{ + .base_addr = .{ .uav = .{ .val = new_decl_val, .orig_ty = manyptr_ty, } }, @@ -18448,15 +18439,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ip.loadEnumType(ty.toIntern()).namespace); const type_enum_ty = t: { - const type_enum_ty_decl_index = (try sema.namespaceLookup( + const nav = try sema.namespaceLookup( block, src, type_info_ty.getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "Enum", .no_embedded_nulls), - )).?; - try sema.ensureDeclAnalyzed(type_enum_ty_decl_index); - const type_enum_ty_decl = mod.declPtr(type_enum_ty_decl_index); - break :t type_enum_ty_decl.val.toType(); + ) orelse @panic("std.builtin.Type is corrupt"); + try sema.ensureNavResolved(src, nav); + break :t Type.fromInterned(ip.getNav(nav).status.resolved.val); }; const field_values = .{ @@ -18480,27 +18470,25 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }, .Union => { const type_union_ty = t: { - const type_union_ty_decl_index = (try sema.namespaceLookup( + const nav = try sema.namespaceLookup( block, src, type_info_ty.getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "Union", .no_embedded_nulls), - )).?; - try sema.ensureDeclAnalyzed(type_union_ty_decl_index); - const type_union_ty_decl = mod.declPtr(type_union_ty_decl_index); - break :t type_union_ty_decl.val.toType(); + ) orelse @panic("std.builtin.Type is corrupt"); + try sema.ensureNavResolved(src, nav); + break :t Type.fromInterned(ip.getNav(nav).status.resolved.val); }; const union_field_ty = t: { - const union_field_ty_decl_index = (try sema.namespaceLookup( + const nav = try sema.namespaceLookup( block, src, type_info_ty.getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "UnionField", .no_embedded_nulls), - )).?; - try sema.ensureDeclAnalyzed(union_field_ty_decl_index); - const union_field_ty_decl = mod.declPtr(union_field_ty_decl_index); - break :t union_field_ty_decl.val.toType(); + ) orelse @panic("std.builtin.Type is corrupt"); + try sema.ensureNavResolved(src, nav); + break :t Type.fromInterned(ip.getNav(nav).status.resolved.val); }; try ty.resolveLayout(pt); // Getting alignment requires type layout @@ -18528,7 +18516,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ty = .slice_const_u8_sentinel_0_type, .ptr = try pt.intern(.{ .ptr = .{ .ty = .manyptr_const_u8_sentinel_0_type, - .base_addr = .{ .anon_decl = .{ + .base_addr = .{ .uav = .{ .val = new_decl_val, .orig_ty = .slice_const_u8_sentinel_0_type, } }, @@ -18579,7 +18567,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ty = slice_ty, .ptr = try pt.intern(.{ .ptr = .{ .ty = manyptr_ty, - .base_addr = .{ .anon_decl = .{ + .base_addr = .{ .uav = .{ .orig_ty = manyptr_ty, .val = new_decl_val, } }, @@ -18597,15 +18585,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }); const container_layout_ty = t: { - const decl_index = (try sema.namespaceLookup( + const nav = try sema.namespaceLookup( block, src, (try pt.getBuiltinType("Type")).getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "ContainerLayout", .no_embedded_nulls), - )).?; - try sema.ensureDeclAnalyzed(decl_index); - const decl = mod.declPtr(decl_index); - break :t decl.val.toType(); + ) orelse @panic("std.builtin.Type is corrupt"); + try sema.ensureNavResolved(src, nav); + break :t Type.fromInterned(ip.getNav(nav).status.resolved.val); }; const field_values = .{ @@ -18630,27 +18617,25 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }, .Struct => { const type_struct_ty = t: { - const type_struct_ty_decl_index = (try sema.namespaceLookup( + const nav = try sema.namespaceLookup( block, src, type_info_ty.getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "Struct", .no_embedded_nulls), - )).?; - try sema.ensureDeclAnalyzed(type_struct_ty_decl_index); - const type_struct_ty_decl = mod.declPtr(type_struct_ty_decl_index); - break :t type_struct_ty_decl.val.toType(); + ) orelse @panic("std.builtin.Type is corrupt"); + try sema.ensureNavResolved(src, nav); + break :t Type.fromInterned(ip.getNav(nav).status.resolved.val); }; const struct_field_ty = t: { - const struct_field_ty_decl_index = (try sema.namespaceLookup( + const nav = try sema.namespaceLookup( block, src, type_info_ty.getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "StructField", .no_embedded_nulls), - )).?; - try sema.ensureDeclAnalyzed(struct_field_ty_decl_index); - const struct_field_ty_decl = mod.declPtr(struct_field_ty_decl_index); - break :t struct_field_ty_decl.val.toType(); + ) orelse @panic("std.builtin.Type is corrupt"); + try sema.ensureNavResolved(src, nav); + break :t Type.fromInterned(ip.getNav(nav).status.resolved.val); }; try ty.resolveLayout(pt); // Getting alignment requires type layout @@ -18683,7 +18668,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ty = .slice_const_u8_sentinel_0_type, .ptr = try pt.intern(.{ .ptr = .{ .ty = .manyptr_const_u8_sentinel_0_type, - .base_addr = .{ .anon_decl = .{ + .base_addr = .{ .uav = .{ .val = new_decl_val, .orig_ty = .slice_const_u8_sentinel_0_type, } }, @@ -18747,7 +18732,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ty = .slice_const_u8_sentinel_0_type, .ptr = try pt.intern(.{ .ptr = .{ .ty = .manyptr_const_u8_sentinel_0_type, - .base_addr = .{ .anon_decl = .{ + .base_addr = .{ .uav = .{ .val = new_decl_val, .orig_ty = .slice_const_u8_sentinel_0_type, } }, @@ -18809,7 +18794,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ty = slice_ty, .ptr = try pt.intern(.{ .ptr = .{ .ty = manyptr_ty, - .base_addr = .{ .anon_decl = .{ + .base_addr = .{ .uav = .{ .orig_ty = manyptr_ty, .val = new_decl_val, } }, @@ -18830,15 +18815,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }); const container_layout_ty = t: { - const decl_index = (try sema.namespaceLookup( + const nav = try sema.namespaceLookup( block, src, (try pt.getBuiltinType("Type")).getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "ContainerLayout", .no_embedded_nulls), - )).?; - try sema.ensureDeclAnalyzed(decl_index); - const decl = mod.declPtr(decl_index); - break :t decl.val.toType(); + ) orelse @panic("std.builtin.Type is corrupt"); + try sema.ensureNavResolved(src, nav); + break :t Type.fromInterned(ip.getNav(nav).status.resolved.val); }; const layout = ty.containerLayout(mod); @@ -18866,15 +18850,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }, .Opaque => { const type_opaque_ty = t: { - const type_opaque_ty_decl_index = (try sema.namespaceLookup( + const nav = try sema.namespaceLookup( block, src, type_info_ty.getNamespaceIndex(mod), try ip.getOrPutString(gpa, pt.tid, "Opaque", .no_embedded_nulls), - )).?; - try sema.ensureDeclAnalyzed(type_opaque_ty_decl_index); - const type_opaque_ty_decl = mod.declPtr(type_opaque_ty_decl_index); - break :t type_opaque_ty_decl.val.toType(); + ) orelse @panic("std.builtin.Type is corrupt"); + try sema.ensureNavResolved(src, nav); + break :t Type.fromInterned(ip.getNav(nav).status.resolved.val); }; try ty.resolveFields(pt); @@ -18906,19 +18889,19 @@ fn typeInfoDecls( opt_namespace: InternPool.OptionalNamespaceIndex, ) CompileError!InternPool.Index { const pt = sema.pt; - const mod = pt.zcu; + const zcu = pt.zcu; + const ip = &zcu.intern_pool; const gpa = sema.gpa; const declaration_ty = t: { - const declaration_ty_decl_index = (try sema.namespaceLookup( + const nav = try sema.namespaceLookup( block, src, - type_info_ty.getNamespaceIndex(mod), - try mod.intern_pool.getOrPutString(gpa, pt.tid, "Declaration", .no_embedded_nulls), - )).?; - try sema.ensureDeclAnalyzed(declaration_ty_decl_index); - const declaration_ty_decl = mod.declPtr(declaration_ty_decl_index); - break :t declaration_ty_decl.val.toType(); + type_info_ty.getNamespaceIndex(zcu), + try ip.getOrPutString(gpa, pt.tid, "Declaration", .no_embedded_nulls), + ) orelse @panic("std.builtin.Type is corrupt"); + try sema.ensureNavResolved(src, nav); + break :t Type.fromInterned(ip.getNav(nav).status.resolved.val); }; var decl_vals = std.ArrayList(InternPool.Index).init(gpa); @@ -18927,7 +18910,7 @@ fn typeInfoDecls( var seen_namespaces = std.AutoHashMap(*Namespace, void).init(gpa); defer seen_namespaces.deinit(); - try sema.typeInfoNamespaceDecls(block, opt_namespace, declaration_ty, &decl_vals, &seen_namespaces); + try sema.typeInfoNamespaceDecls(block, src, opt_namespace, declaration_ty, &decl_vals, &seen_namespaces); const array_decl_ty = try pt.arrayType(.{ .len = decl_vals.items.len, @@ -18944,12 +18927,12 @@ fn typeInfoDecls( .is_const = true, }, })).toIntern(); - const manyptr_ty = Type.fromInterned(slice_ty).slicePtrFieldType(mod).toIntern(); + const manyptr_ty = Type.fromInterned(slice_ty).slicePtrFieldType(zcu).toIntern(); return try pt.intern(.{ .slice = .{ .ty = slice_ty, .ptr = try pt.intern(.{ .ptr = .{ .ty = manyptr_ty, - .base_addr = .{ .anon_decl = .{ + .base_addr = .{ .uav = .{ .orig_ty = manyptr_ty, .val = new_decl_val, } }, @@ -18962,59 +18945,54 @@ fn typeInfoDecls( fn typeInfoNamespaceDecls( sema: *Sema, block: *Block, + src: LazySrcLoc, opt_namespace_index: InternPool.OptionalNamespaceIndex, declaration_ty: Type, decl_vals: *std.ArrayList(InternPool.Index), seen_namespaces: *std.AutoHashMap(*Namespace, void), ) !void { const pt = sema.pt; - const mod = pt.zcu; - const ip = &mod.intern_pool; + const zcu = pt.zcu; + const ip = &zcu.intern_pool; const namespace_index = opt_namespace_index.unwrap() orelse return; - const namespace = mod.namespacePtr(namespace_index); + const namespace = zcu.namespacePtr(namespace_index); const gop = try seen_namespaces.getOrPut(namespace); if (gop.found_existing) return; - const decls = namespace.decls.keys(); - for (decls) |decl_index| { - const decl = mod.declPtr(decl_index); - if (!decl.is_pub) continue; - if (decl.kind == .@"usingnamespace") { - if (decl.analysis == .in_progress) continue; - try sema.ensureDeclAnalyzed(decl_index); - try sema.typeInfoNamespaceDecls(block, decl.val.toType().getNamespaceIndex(mod), declaration_ty, decl_vals, seen_namespaces); - continue; - } - if (decl.kind != .named) continue; - const name_val = v: { - const decl_name_len = decl.name.length(ip); - const new_decl_ty = try pt.arrayType(.{ - .len = decl_name_len, + for (namespace.pub_decls.keys()) |nav| { + const name = ip.getNav(nav).name; + const name_val = name_val: { + const name_len = name.length(ip); + const array_ty = try pt.arrayType(.{ + .len = name_len, .sentinel = .zero_u8, .child = .u8_type, }); - const new_decl_val = try pt.intern(.{ .aggregate = .{ - .ty = new_decl_ty.toIntern(), - .storage = .{ .bytes = decl.name.toString() }, - } }); - break :v try pt.intern(.{ .slice = .{ - .ty = .slice_const_u8_sentinel_0_type, - .ptr = try pt.intern(.{ .ptr = .{ - .ty = .manyptr_const_u8_sentinel_0_type, - .base_addr = .{ .anon_decl = .{ - .orig_ty = .slice_const_u8_sentinel_0_type, - .val = new_decl_val, - } }, - .byte_offset = 0, - } }), - .len = (try pt.intValue(Type.usize, decl_name_len)).toIntern(), + const array_val = try pt.intern(.{ .aggregate = .{ + .ty = array_ty.toIntern(), + .storage = .{ .bytes = name.toString() }, } }); + break :name_val try pt.intern(.{ + .slice = .{ + .ty = .slice_const_u8_sentinel_0_type, // [:0]const u8 + .ptr = try pt.intern(.{ + .ptr = .{ + .ty = .manyptr_const_u8_sentinel_0_type, // [*:0]const u8 + .base_addr = .{ .uav = .{ + .orig_ty = .slice_const_u8_sentinel_0_type, + .val = array_val, + } }, + .byte_offset = 0, + }, + }), + .len = (try pt.intValue(Type.usize, name_len)).toIntern(), + }, + }); }; - - const fields = .{ - //name: [:0]const u8, + const fields = [_]InternPool.Index{ + // name: [:0]const u8, name_val, }; try decl_vals.append(try pt.intern(.{ .aggregate = .{ @@ -19022,6 +19000,17 @@ fn typeInfoNamespaceDecls( .storage = .{ .elems = &fields }, } })); } + + for (namespace.pub_usingnamespace.items) |nav| { + if (ip.getNav(nav).analysis_owner.unwrap()) |cau| { + if (zcu.analysis_in_progress.contains(AnalUnit.wrap(.{ .cau = cau }))) { + continue; + } + } + try sema.ensureNavResolved(src, nav); + const namespace_ty = Type.fromInterned(ip.getNav(nav).status.resolved.val); + try sema.typeInfoNamespaceDecls(block, src, namespace_ty.getNamespaceIndex(zcu), declaration_ty, decl_vals, seen_namespaces); + } } fn zirTypeof(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -19906,7 +19895,7 @@ fn restoreErrRetIndex(sema: *Sema, start_block: *Block, src: LazySrcLoc, target_ return; } - if (!mod.intern_pool.funcAnalysisUnordered(sema.owner_func_index).calls_or_awaits_errorable_fn) return; + if (!mod.intern_pool.funcAnalysisUnordered(sema.owner.unwrap().func).calls_or_awaits_errorable_fn) return; if (!start_block.ownerModule().error_tracing) return; assert(saved_index != .none); // The .error_return_trace_index field was dropped somewhere @@ -19928,7 +19917,7 @@ fn addToInferredErrorSet(sema: *Sema, uncasted_operand: Air.Inst.Ref) !void { }, else => if (ip.isInferredErrorSetType(err_set_ty)) { const ies = sema.fn_ret_ty_ies.?; - assert(ies.func == sema.func_index); + assert(ies.func == sema.owner.unwrap().func); try sema.addToInferredErrorSetPtr(ies, sema.typeOf(uncasted_operand)); }, } @@ -20232,7 +20221,7 @@ fn zirStructInitEmptyResult(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is if (is_byref) { const init_val = (try sema.resolveValue(init_ref)).?; - return anonDeclRef(sema, init_val.toIntern()); + return sema.uavRef(init_val.toIntern()); } else { return init_ref; } @@ -21056,7 +21045,7 @@ fn arrayInitAnon( } fn addConstantMaybeRef(sema: *Sema, val: InternPool.Index, is_ref: bool) !Air.Inst.Ref { - return if (is_ref) anonDeclRef(sema, val) else Air.internedToRef(val); + return if (is_ref) sema.uavRef(val) else Air.internedToRef(val); } fn zirFieldTypeRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -21163,16 +21152,16 @@ fn getErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { const ptr_stack_trace_ty = try pt.singleMutPtrType(stack_trace_ty); const opt_ptr_stack_trace_ty = try pt.optionalType(ptr_stack_trace_ty.toIntern()); - if (sema.owner_func_index != .none and - ip.funcAnalysisUnordered(sema.owner_func_index).calls_or_awaits_errorable_fn and - block.ownerModule().error_tracing) - { - return block.addTy(.err_return_trace, opt_ptr_stack_trace_ty); + switch (sema.owner.unwrap()) { + .func => |func| if (ip.funcAnalysisUnordered(func).calls_or_awaits_errorable_fn and block.ownerModule().error_tracing) { + return block.addTy(.err_return_trace, opt_ptr_stack_trace_ty); + }, + .cau => {}, } - return Air.internedToRef((try pt.intern(.{ .opt = .{ + return Air.internedToRef(try pt.intern(.{ .opt = .{ .ty = opt_ptr_stack_trace_ty.toIntern(), .val = .none, - } }))); + } })); } fn zirFrame( @@ -21369,24 +21358,24 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); const pt = sema.pt; - const mod = pt.zcu; - const ip = &mod.intern_pool; + const zcu = pt.zcu; + const ip = &zcu.intern_pool; try operand_ty.resolveLayout(pt); - const enum_ty = switch (operand_ty.zigTypeTag(mod)) { + const enum_ty = switch (operand_ty.zigTypeTag(zcu)) { .EnumLiteral => { const val = try sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, operand, undefined); const tag_name = ip.indexToKey(val.toIntern()).enum_literal; return sema.addNullTerminatedStrLit(tag_name); }, .Enum => operand_ty, - .Union => operand_ty.unionTagType(mod) orelse + .Union => operand_ty.unionTagType(zcu) orelse return sema.fail(block, src, "union '{}' is untagged", .{operand_ty.fmt(pt)}), else => return sema.fail(block, operand_src, "expected enum or union; found '{}'", .{ operand_ty.fmt(pt), }), }; - if (enum_ty.enumFieldCount(mod) == 0) { + if (enum_ty.enumFieldCount(zcu) == 0) { // TODO I don't think this is the correct way to handle this but // it prevents a crash. // https://github.com/ziglang/zig/issues/15909 @@ -21394,26 +21383,25 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air enum_ty.fmt(pt), }); } - const enum_decl_index = enum_ty.getOwnerDecl(mod); const casted_operand = try sema.coerce(block, enum_ty, operand, operand_src); if (try sema.resolveDefinedValue(block, operand_src, casted_operand)) |val| { - const field_index = enum_ty.enumTagFieldIndex(val, mod) orelse { + const field_index = enum_ty.enumTagFieldIndex(val, zcu) orelse { const msg = msg: { const msg = try sema.errMsg(src, "no field with value '{}' in enum '{}'", .{ - val.fmtValueSema(pt, sema), mod.declPtr(enum_decl_index).name.fmt(ip), + val.fmtValueSema(pt, sema), enum_ty.fmt(pt), }); errdefer msg.destroy(sema.gpa); - try sema.errNote(enum_ty.srcLoc(mod), msg, "declared here", .{}); + try sema.errNote(enum_ty.srcLoc(zcu), msg, "declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); }; // TODO: write something like getCoercedInts to avoid needing to dupe - const field_name = enum_ty.enumFieldName(field_index, mod); + const field_name = enum_ty.enumFieldName(field_index, zcu); return sema.addNullTerminatedStrLit(field_name); } try sema.requireRuntimeBlock(block, src, operand_src); - if (block.wantSafety() and mod.backendSupportsFeature(.is_named_enum_value)) { + if (block.wantSafety() and zcu.backendSupportsFeature(.is_named_enum_value)) { const ok = try block.addUnOp(.is_named_enum_value, casted_operand); try sema.addSafetyCheck(block, src, ok, .invalid_enum_value); } @@ -21820,19 +21808,15 @@ fn zirReify( }; errdefer wip_ty.cancel(ip, pt.tid); - const new_decl_index = try sema.createAnonymousDeclTypeNamed( + wip_ty.setName(ip, try sema.createTypeName( block, - Value.fromInterned(wip_ty.index), name_strategy, "opaque", inst, - ); - mod.declPtr(new_decl_index).owns_tv = true; - errdefer pt.abortAnonDecl(new_decl_index); - - try pt.finalizeAnonDecl(new_decl_index); + wip_ty.index, + )); - return Air.internedToRef(wip_ty.finish(ip, new_decl_index, .none)); + return Air.internedToRef(wip_ty.finish(ip, .none, .none)); }, .Union => { const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); @@ -22001,13 +21985,15 @@ fn reifyEnum( }); } + const tracked_inst = try block.trackZir(inst); + const wip_ty = switch (try ip.getEnumType(gpa, pt.tid, .{ .has_namespace = false, .has_values = true, .tag_mode = if (is_exhaustive) .explicit else .nonexhaustive, .fields_len = fields_len, .key = .{ .reified = .{ - .zir_index = try block.trackZir(inst), + .zir_index = tracked_inst, .type_hash = hasher.final(), } }, })) { @@ -22020,17 +22006,17 @@ fn reifyEnum( return sema.fail(block, src, "Type.Enum.tag_type must be an integer type", .{}); } - const new_decl_index = try sema.createAnonymousDeclTypeNamed( + wip_ty.setName(ip, try sema.createTypeName( block, - Value.fromInterned(wip_ty.index), name_strategy, "enum", inst, - ); - mod.declPtr(new_decl_index).owns_tv = true; - errdefer pt.abortAnonDecl(new_decl_index); + wip_ty.index, + )); + + const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, block.namespace, wip_ty.index); - wip_ty.prepare(ip, new_decl_index, .none); + wip_ty.prepare(ip, new_cau_index, .none); wip_ty.setTagTy(ip, tag_ty.toIntern()); for (0..fields_len) |field_idx| { @@ -22076,7 +22062,6 @@ fn reifyEnum( return sema.fail(block, src, "non-exhaustive enum specified every value", .{}); } - try pt.finalizeAnonDecl(new_decl_index); return Air.internedToRef(wip_ty.index); } @@ -22134,6 +22119,8 @@ fn reifyUnion( } } + const tracked_inst = try block.trackZir(inst); + const wip_ty = switch (try ip.getUnionType(gpa, pt.tid, .{ .flags = .{ .layout = layout, @@ -22158,7 +22145,7 @@ fn reifyUnion( .field_types = &.{}, // set later .field_aligns = &.{}, // set later .key = .{ .reified = .{ - .zir_index = try block.trackZir(inst), + .zir_index = tracked_inst, .type_hash = hasher.final(), } }, })) { @@ -22167,15 +22154,14 @@ fn reifyUnion( }; errdefer wip_ty.cancel(ip, pt.tid); - const new_decl_index = try sema.createAnonymousDeclTypeNamed( + const type_name = try sema.createTypeName( block, - Value.fromInterned(wip_ty.index), name_strategy, "union", inst, + wip_ty.index, ); - mod.declPtr(new_decl_index).owns_tv = true; - errdefer pt.abortAnonDecl(new_decl_index); + wip_ty.setName(ip, type_name); const field_types = try sema.arena.alloc(InternPool.Index, fields_len); const field_aligns = if (any_aligns) try sema.arena.alloc(InternPool.Alignment, fields_len) else undefined; @@ -22268,7 +22254,7 @@ fn reifyUnion( } } - const enum_tag_ty = try sema.generateUnionTagTypeSimple(block, field_names.keys(), mod.declPtr(new_decl_index)); + const enum_tag_ty = try sema.generateUnionTagTypeSimple(field_names.keys(), wip_ty.index, type_name); break :tag_ty .{ enum_tag_ty, false }; }; errdefer if (!has_explicit_tag) ip.remove(pt.tid, enum_tag_ty); // remove generated tag type on error @@ -22315,10 +22301,11 @@ fn reifyUnion( loaded_union.setTagType(ip, enum_tag_ty); loaded_union.setStatus(ip, .have_field_types); - try pt.finalizeAnonDecl(new_decl_index); + const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, block.namespace, wip_ty.index); + try mod.comp.queueJob(.{ .resolve_type_fully = wip_ty.index }); - try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = new_decl_index })); - return Air.internedToRef(wip_ty.finish(ip, new_decl_index, .none)); + try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .cau = new_cau_index })); + return Air.internedToRef(wip_ty.finish(ip, new_cau_index.toOptional(), .none)); } fn reifyStruct( @@ -22399,6 +22386,8 @@ fn reifyStruct( } } + const tracked_inst = try block.trackZir(inst); + const wip_ty = switch (try ip.getStructType(gpa, pt.tid, .{ .layout = layout, .fields_len = fields_len, @@ -22411,7 +22400,7 @@ fn reifyStruct( .inits_resolved = true, .has_namespace = false, .key = .{ .reified = .{ - .zir_index = try block.trackZir(inst), + .zir_index = tracked_inst, .type_hash = hasher.final(), } }, })) { @@ -22426,15 +22415,13 @@ fn reifyStruct( .auto => {}, }; - const new_decl_index = try sema.createAnonymousDeclTypeNamed( + wip_ty.setName(ip, try sema.createTypeName( block, - Value.fromInterned(wip_ty.index), name_strategy, "struct", inst, - ); - mod.declPtr(new_decl_index).owns_tv = true; - errdefer pt.abortAnonDecl(new_decl_index); + wip_ty.index, + )); const struct_type = ip.loadStructType(wip_ty.index); @@ -22582,10 +22569,11 @@ fn reifyStruct( } } - try pt.finalizeAnonDecl(new_decl_index); + const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, block.namespace, wip_ty.index); + try mod.comp.queueJob(.{ .resolve_type_fully = wip_ty.index }); - try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = new_decl_index })); - return Air.internedToRef(wip_ty.finish(ip, new_decl_index, .none)); + try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .cau = new_cau_index })); + return Air.internedToRef(wip_ty.finish(ip, new_cau_index.toOptional(), .none)); } fn resolveVaListRef(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) CompileError!Air.Inst.Ref { @@ -26028,7 +26016,8 @@ fn zirVarExtended( extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const pt = sema.pt; - const mod = pt.zcu; + const zcu = pt.zcu; + const ip = &zcu.intern_pool; const extra = sema.code.extraData(Zir.Inst.ExtendedVar, extended.operand); const ty_src = block.src(.{ .node_offset_var_decl_ty = 0 }); const init_src = block.src(.{ .node_offset_var_decl_init = 0 }); @@ -26075,16 +26064,62 @@ fn zirVarExtended( try sema.validateVarType(block, ty_src, var_ty, small.is_extern); - return Air.internedToRef((try pt.intern(.{ .variable = .{ + if (small.is_extern) { + // We need to resolve the alignment and addrspace early. + // Keep in sync with logic in `Zcu.PerThread.semaCau`. + const align_src = block.src(.{ .node_offset_var_decl_align = 0 }); + const addrspace_src = block.src(.{ .node_offset_var_decl_addrspace = 0 }); + + const decl_inst, const decl_bodies = decl: { + const decl_inst = sema.getOwnerCauDeclInst().resolve(ip); + const zir_decl, const extra_end = sema.code.getDeclaration(decl_inst); + break :decl .{ decl_inst, zir_decl.getBodies(extra_end, sema.code) }; + }; + + const alignment: InternPool.Alignment = a: { + const align_body = decl_bodies.align_body orelse break :a .none; + const align_ref = try sema.resolveInlineBody(block, align_body, decl_inst); + break :a try sema.analyzeAsAlign(block, align_src, align_ref); + }; + + const @"addrspace": std.builtin.AddressSpace = as: { + const addrspace_ctx: Sema.AddressSpaceContext = switch (ip.indexToKey(var_ty.toIntern())) { + .func_type => .function, + else => .variable, + }; + const target = zcu.getTarget(); + const addrspace_body = decl_bodies.addrspace_body orelse break :as switch (addrspace_ctx) { + .function => target_util.defaultAddressSpace(target, .function), + .variable => target_util.defaultAddressSpace(target, .global_mutable), + .constant => target_util.defaultAddressSpace(target, .global_constant), + else => unreachable, + }; + const addrspace_ref = try sema.resolveInlineBody(block, addrspace_body, decl_inst); + break :as try sema.analyzeAsAddressSpace(block, addrspace_src, addrspace_ref, addrspace_ctx); + }; + + return Air.internedToRef(try pt.getExtern(.{ + .name = sema.getOwnerCauNavName(), + .ty = var_ty.toIntern(), + .lib_name = try ip.getOrPutStringOpt(sema.gpa, pt.tid, lib_name, .no_embedded_nulls), + .is_const = small.is_const, + .is_threadlocal = small.is_threadlocal, + .is_weak_linkage = false, + .alignment = alignment, + .@"addrspace" = @"addrspace", + .zir_index = sema.getOwnerCauDeclInst(), // `declaration` instruction + .owner_nav = undefined, // ignored by `getExtern` + })); + } + assert(!small.is_const); // non-const non-extern variable is not legal + return Air.internedToRef(try pt.intern(.{ .variable = .{ .ty = var_ty.toIntern(), .init = init_val, - .decl = sema.owner_decl_index, - .lib_name = try mod.intern_pool.getOrPutStringOpt(sema.gpa, pt.tid, lib_name, .no_embedded_nulls), - .is_extern = small.is_extern, - .is_const = small.is_const, + .owner_nav = sema.getOwnerCauNav(), + .lib_name = try ip.getOrPutStringOpt(sema.gpa, pt.tid, lib_name, .no_embedded_nulls), .is_threadlocal = small.is_threadlocal, .is_weak_linkage = false, - } }))); + } })); } fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -26255,10 +26290,23 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A else => |e| return e, }; break :blk mod.toEnum(std.builtin.CallingConvention, cc_val); - } else if (sema.owner_decl.is_exported and has_body) - .C - else - .Unspecified; + } else cc: { + if (has_body) { + const decl_inst = if (sema.generic_owner != .none) decl_inst: { + // Generic instance -- use the original function declaration to + // look for the `export` syntax. + const nav = mod.intern_pool.getNav(mod.funcInfo(sema.generic_owner).owner_nav); + const cau = mod.intern_pool.getCau(nav.analysis_owner.unwrap().?); + break :decl_inst cau.zir_index; + } else sema.getOwnerCauDeclInst(); // not an instantiation so we're analyzing a function declaration Cau + + const zir_decl = sema.code.getDeclaration(decl_inst.resolve(&mod.intern_pool))[0]; + if (zir_decl.flags.is_export) { + break :cc .C; + } + } + break :cc .Unspecified; + }; const ret_ty: Type = if (extra.data.bits.has_ret_ty_body) blk: { const body_len = sema.code.extra[extra_index]; @@ -26600,42 +26648,32 @@ fn zirBuiltinExtern( const options = try sema.resolveExternOptions(block, options_src, extra.rhs); + // TODO: error for threadlocal functions, non-const functions, etc + if (options.linkage == .weak and !ty.ptrAllowsZero(mod)) { ty = try pt.optionalType(ty.toIntern()); } const ptr_info = ty.ptrInfo(mod); - const new_decl_index = try pt.allocateNewDecl(sema.owner_decl.src_namespace); - errdefer pt.destroyDecl(new_decl_index); - const new_decl = mod.declPtr(new_decl_index); - try pt.initNewAnonDecl( - new_decl_index, - Value.fromInterned( - if (Type.fromInterned(ptr_info.child).zigTypeTag(mod) == .Fn) - try ip.getExternFunc(sema.gpa, pt.tid, .{ - .ty = ptr_info.child, - .decl = new_decl_index, - .lib_name = options.library_name, - }) - else - try pt.intern(.{ .variable = .{ - .ty = ptr_info.child, - .init = .none, - .decl = new_decl_index, - .lib_name = options.library_name, - .is_extern = true, - .is_const = ptr_info.flags.is_const, - .is_threadlocal = options.is_thread_local, - .is_weak_linkage = options.linkage == .weak, - } }), - ), - options.name, - .none, - ); - new_decl.owns_tv = true; - // Note that this will queue the anon decl for codegen, so that the backend can - // correctly handle the extern, including duplicate detection. - try pt.finalizeAnonDecl(new_decl_index); + const extern_val = try pt.getExtern(.{ + .name = options.name, + .ty = ptr_info.child, + .lib_name = options.library_name, + .is_const = ptr_info.flags.is_const, + .is_threadlocal = options.is_thread_local, + .is_weak_linkage = options.linkage == .weak, + .alignment = ptr_info.flags.alignment, + .@"addrspace" = ptr_info.flags.address_space, + // This instruction is just for source locations. + // `builtin_extern` doesn't provide enough information, and isn't currently tracked. + // So, for now, just use our containing `declaration`. + .zir_index = switch (sema.owner.unwrap()) { + .cau => sema.getOwnerCauDeclInst(), + .func => sema.getOwnerFuncDeclInst(), + }, + .owner_nav = undefined, // ignored by `getExtern` + }); + const extern_nav = ip.indexToKey(extern_val).@"extern".owner_nav; return Air.internedToRef((try pt.getCoerced(Value.fromInterned(try pt.intern(.{ .ptr = .{ .ty = switch (ip.indexToKey(ty.toIntern())) { @@ -26643,7 +26681,7 @@ fn zirBuiltinExtern( .opt_type => |child_type| child_type, else => unreachable, }, - .base_addr = .{ .decl = new_decl_index }, + .base_addr = .{ .nav = extern_nav }, .byte_offset = 0, } })), ty)).toIntern()); } @@ -27129,17 +27167,15 @@ fn explainWhyTypeIsNotPacked( } } -fn prepareSimplePanic(sema: *Sema) !void { +fn prepareSimplePanic(sema: *Sema, block: *Block, src: LazySrcLoc) !void { const pt = sema.pt; const mod = pt.zcu; if (mod.panic_func_index == .none) { - const decl_index = (try pt.getBuiltinDecl("panic")); - // decl_index may be an alias; we must find the decl that actually - // owns the function. - try sema.ensureDeclAnalyzed(decl_index); - const fn_val = try mod.declPtr(decl_index).valueOrFail(); - try sema.declareDependency(.{ .decl_val = decl_index }); + const fn_ref = try sema.analyzeNavVal(block, src, try pt.getBuiltinNav("panic")); + const fn_val = try sema.resolveConstValue(block, src, fn_ref, .{ + .needed_comptime_reason = "panic handler must be comptime-known", + }); assert(fn_val.typeOf(mod).zigTypeTag(mod) == .Fn); assert(try sema.fnHasRuntimeBits(fn_val.typeOf(mod))); try mod.ensureFuncBodyAnalysisQueued(fn_val.toIntern()); @@ -27167,16 +27203,16 @@ fn prepareSimplePanic(sema: *Sema) !void { /// Backends depend on panic decls being available when lowering safety-checked /// instructions. This function ensures the panic function will be available to /// be called during that time. -fn preparePanicId(sema: *Sema, block: *Block, panic_id: Module.PanicId) !InternPool.DeclIndex { +fn preparePanicId(sema: *Sema, block: *Block, src: LazySrcLoc, panic_id: Module.PanicId) !InternPool.Nav.Index { const pt = sema.pt; const mod = pt.zcu; const gpa = sema.gpa; if (mod.panic_messages[@intFromEnum(panic_id)].unwrap()) |x| return x; - try sema.prepareSimplePanic(); + try sema.prepareSimplePanic(block, src); const panic_messages_ty = try pt.getBuiltinType("panic_messages"); - const msg_decl_index = (sema.namespaceLookup( + const msg_nav_index = (sema.namespaceLookup( block, LazySrcLoc.unneeded, panic_messages_ty.getNamespaceIndex(mod), @@ -27186,9 +27222,9 @@ fn preparePanicId(sema: *Sema, block: *Block, panic_id: Module.PanicId) !InternP error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable, error.OutOfMemory => |e| return e, }).?; - try sema.ensureDeclAnalyzed(msg_decl_index); - mod.panic_messages[@intFromEnum(panic_id)] = msg_decl_index.toOptional(); - return msg_decl_index; + try sema.ensureNavResolved(src, msg_nav_index); + mod.panic_messages[@intFromEnum(panic_id)] = msg_nav_index.toOptional(); + return msg_nav_index; } fn addSafetyCheck( @@ -27282,10 +27318,10 @@ fn panicWithMsg(sema: *Sema, block: *Block, src: LazySrcLoc, msg_inst: Air.Inst. return; } - try sema.prepareSimplePanic(); + try sema.prepareSimplePanic(block, src); const panic_func = mod.funcInfo(mod.panic_func_index); - const panic_fn = try sema.analyzeDeclVal(block, src, panic_func.owner_decl); + const panic_fn = try sema.analyzeNavVal(block, src, panic_func.owner_nav); const null_stack_trace = Air.internedToRef(mod.null_stack_trace); const opt_usize_ty = try pt.optionalType(.usize_type); @@ -27455,8 +27491,8 @@ fn safetyCheckFormatted( } fn safetyPanic(sema: *Sema, block: *Block, src: LazySrcLoc, panic_id: Module.PanicId) CompileError!void { - const msg_decl_index = try sema.preparePanicId(block, panic_id); - const msg_inst = try sema.analyzeDeclVal(block, src, msg_decl_index); + const msg_nav_index = try sema.preparePanicId(block, src, panic_id); + const msg_inst = try sema.analyzeNavVal(block, src, msg_nav_index); try sema.panicWithMsg(block, src, msg_inst, .@"safety check"); } @@ -27628,21 +27664,21 @@ fn fieldVal( return Air.internedToRef(enum_val.toIntern()); }, .Struct, .Opaque => { - if (try sema.namespaceLookupVal(block, src, child_type.getNamespaceIndex(mod), field_name)) |inst| { - return inst; + switch (child_type.toIntern()) { + .empty_struct_type, .anyopaque_type => {}, // no namespace + else => if (try sema.namespaceLookupVal(block, src, child_type.getNamespaceIndex(mod), field_name)) |inst| { + return inst; + }, } return sema.failWithBadMemberAccess(block, child_type, src, field_name); }, - else => { - const msg = msg: { - const msg = try sema.errMsg(src, "type '{}' has no members", .{child_type.fmt(pt)}); - errdefer msg.destroy(sema.gpa); - if (child_type.isSlice(mod)) try sema.errNote(src, msg, "slice values have 'len' and 'ptr' members", .{}); - if (child_type.zigTypeTag(mod) == .Array) try sema.errNote(src, msg, "array values have 'len' member", .{}); - break :msg msg; - }; - return sema.failWithOwnedErrorMsg(block, msg); - }, + else => return sema.failWithOwnedErrorMsg(block, msg: { + const msg = try sema.errMsg(src, "type '{}' has no members", .{child_type.fmt(pt)}); + errdefer msg.destroy(sema.gpa); + if (child_type.isSlice(mod)) try sema.errNote(src, msg, "slice values have 'len' and 'ptr' members", .{}); + if (child_type.zigTypeTag(mod) == .Array) try sema.errNote(src, msg, "array values have 'len' member", .{}); + break :msg msg; + }), } }, .Struct => if (is_pointer_to) { @@ -27700,7 +27736,7 @@ fn fieldPtr( .Array => { if (field_name.eqlSlice("len", ip)) { const int_val = try pt.intValue(Type.usize, inner_ty.arrayLen(mod)); - return anonDeclRef(sema, int_val.toIntern()); + return uavRef(sema, int_val.toIntern()); } else if (field_name.eqlSlice("ptr", ip) and is_pointer_to) { const ptr_info = object_ty.ptrInfo(mod); const new_ptr_ty = try pt.ptrTypeSema(.{ @@ -27839,7 +27875,7 @@ fn fieldPtr( child_type else try pt.singleErrorSetType(field_name); - return anonDeclRef(sema, try pt.intern(.{ .err = .{ + return uavRef(sema, try pt.intern(.{ .err = .{ .ty = error_set_type.toIntern(), .name = field_name, } })); @@ -27853,7 +27889,7 @@ fn fieldPtr( if (enum_ty.enumFieldIndex(field_name, mod)) |field_index| { const field_index_u32: u32 = @intCast(field_index); const idx_val = try pt.enumValueFieldIndex(enum_ty, field_index_u32); - return anonDeclRef(sema, idx_val.toIntern()); + return uavRef(sema, idx_val.toIntern()); } } return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); @@ -27867,7 +27903,7 @@ fn fieldPtr( }; const field_index_u32: u32 = @intCast(field_index); const idx_val = try pt.enumValueFieldIndex(child_type, field_index_u32); - return anonDeclRef(sema, idx_val.toIntern()); + return uavRef(sema, idx_val.toIntern()); }, .Struct, .Opaque => { if (try sema.namespaceLookupRef(block, src, child_type.getNamespaceIndex(mod), field_name)) |inst| { @@ -27923,18 +27959,18 @@ fn fieldCallBind( // in `fieldVal`. This function takes a pointer and returns a pointer. const pt = sema.pt; - const mod = pt.zcu; - const ip = &mod.intern_pool; + const zcu = pt.zcu; + const ip = &zcu.intern_pool; const raw_ptr_src = src; // TODO better source location const raw_ptr_ty = sema.typeOf(raw_ptr); - const inner_ty = if (raw_ptr_ty.zigTypeTag(mod) == .Pointer and (raw_ptr_ty.ptrSize(mod) == .One or raw_ptr_ty.ptrSize(mod) == .C)) - raw_ptr_ty.childType(mod) + const inner_ty = if (raw_ptr_ty.zigTypeTag(zcu) == .Pointer and (raw_ptr_ty.ptrSize(zcu) == .One or raw_ptr_ty.ptrSize(zcu) == .C)) + raw_ptr_ty.childType(zcu) else return sema.fail(block, raw_ptr_src, "expected single pointer, found '{}'", .{raw_ptr_ty.fmt(pt)}); // Optionally dereference a second pointer to get the concrete type. - const is_double_ptr = inner_ty.zigTypeTag(mod) == .Pointer and inner_ty.ptrSize(mod) == .One; - const concrete_ty = if (is_double_ptr) inner_ty.childType(mod) else inner_ty; + const is_double_ptr = inner_ty.zigTypeTag(zcu) == .Pointer and inner_ty.ptrSize(zcu) == .One; + const concrete_ty = if (is_double_ptr) inner_ty.childType(zcu) else inner_ty; const ptr_ty = if (is_double_ptr) inner_ty else raw_ptr_ty; const object_ptr = if (is_double_ptr) try sema.analyzeLoad(block, src, raw_ptr, src) @@ -27942,36 +27978,36 @@ fn fieldCallBind( raw_ptr; find_field: { - switch (concrete_ty.zigTypeTag(mod)) { + switch (concrete_ty.zigTypeTag(zcu)) { .Struct => { try concrete_ty.resolveFields(pt); - if (mod.typeToStruct(concrete_ty)) |struct_type| { + if (zcu.typeToStruct(concrete_ty)) |struct_type| { const field_index = struct_type.nameIndex(ip, field_name) orelse break :find_field; const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); return sema.finishFieldCallBind(block, src, ptr_ty, field_ty, field_index, object_ptr); - } else if (concrete_ty.isTuple(mod)) { + } else if (concrete_ty.isTuple(zcu)) { if (field_name.eqlSlice("len", ip)) { - return .{ .direct = try pt.intRef(Type.usize, concrete_ty.structFieldCount(mod)) }; + return .{ .direct = try pt.intRef(Type.usize, concrete_ty.structFieldCount(zcu)) }; } if (field_name.toUnsigned(ip)) |field_index| { - if (field_index >= concrete_ty.structFieldCount(mod)) break :find_field; - return sema.finishFieldCallBind(block, src, ptr_ty, concrete_ty.structFieldType(field_index, mod), field_index, object_ptr); + if (field_index >= concrete_ty.structFieldCount(zcu)) break :find_field; + return sema.finishFieldCallBind(block, src, ptr_ty, concrete_ty.structFieldType(field_index, zcu), field_index, object_ptr); } } else { - const max = concrete_ty.structFieldCount(mod); + const max = concrete_ty.structFieldCount(zcu); for (0..max) |i_usize| { const i: u32 = @intCast(i_usize); - if (field_name == concrete_ty.structFieldName(i, mod).unwrap().?) { - return sema.finishFieldCallBind(block, src, ptr_ty, concrete_ty.structFieldType(i, mod), i, object_ptr); + if (field_name == concrete_ty.structFieldName(i, zcu).unwrap().?) { + return sema.finishFieldCallBind(block, src, ptr_ty, concrete_ty.structFieldType(i, zcu), i, object_ptr); } } } }, .Union => { try concrete_ty.resolveFields(pt); - const union_obj = mod.typeToUnion(concrete_ty).?; + const union_obj = zcu.typeToUnion(concrete_ty).?; _ = union_obj.loadTagType(ip).nameIndex(ip, field_name) orelse break :find_field; const field_ptr = try unionFieldPtr(sema, block, src, object_ptr, field_name, field_name_src, concrete_ty, false); return .{ .direct = try sema.analyzeLoad(block, src, field_ptr, src) }; @@ -27985,23 +28021,23 @@ fn fieldCallBind( } // If we get here, we need to look for a decl in the struct type instead. - const found_decl = found_decl: { - const namespace = concrete_ty.getNamespace(mod) orelse - break :found_decl null; - const decl_idx = (try sema.namespaceLookup(block, src, namespace, field_name)) orelse - break :found_decl null; + const found_nav = found_nav: { + const namespace = concrete_ty.getNamespace(zcu) orelse + break :found_nav null; + const nav_index = try sema.namespaceLookup(block, src, namespace, field_name) orelse + break :found_nav null; - const decl_val = try sema.analyzeDeclVal(block, src, decl_idx); + const decl_val = try sema.analyzeNavVal(block, src, nav_index); const decl_type = sema.typeOf(decl_val); - if (mod.typeToFunc(decl_type)) |func_type| f: { + if (zcu.typeToFunc(decl_type)) |func_type| f: { if (func_type.param_types.len == 0) break :f; const first_param_type = Type.fromInterned(func_type.param_types.get(ip)[0]); if (first_param_type.isGenericPoison() or - (first_param_type.zigTypeTag(mod) == .Pointer and - (first_param_type.ptrSize(mod) == .One or - first_param_type.ptrSize(mod) == .C) and - first_param_type.childType(mod).eql(concrete_ty, mod))) + (first_param_type.zigTypeTag(zcu) == .Pointer and + (first_param_type.ptrSize(zcu) == .One or + first_param_type.ptrSize(zcu) == .C) and + first_param_type.childType(zcu).eql(concrete_ty, zcu))) { // Note that if the param type is generic poison, we know that it must // specifically be `anytype` since it's the first parameter, meaning we @@ -28012,31 +28048,31 @@ fn fieldCallBind( .func_inst = decl_val, .arg0_inst = object_ptr, } }; - } else if (first_param_type.eql(concrete_ty, mod)) { + } else if (first_param_type.eql(concrete_ty, zcu)) { const deref = try sema.analyzeLoad(block, src, object_ptr, src); return .{ .method = .{ .func_inst = decl_val, .arg0_inst = deref, } }; - } else if (first_param_type.zigTypeTag(mod) == .Optional) { - const child = first_param_type.optionalChild(mod); - if (child.eql(concrete_ty, mod)) { + } else if (first_param_type.zigTypeTag(zcu) == .Optional) { + const child = first_param_type.optionalChild(zcu); + if (child.eql(concrete_ty, zcu)) { const deref = try sema.analyzeLoad(block, src, object_ptr, src); return .{ .method = .{ .func_inst = decl_val, .arg0_inst = deref, } }; - } else if (child.zigTypeTag(mod) == .Pointer and - child.ptrSize(mod) == .One and - child.childType(mod).eql(concrete_ty, mod)) + } else if (child.zigTypeTag(zcu) == .Pointer and + child.ptrSize(zcu) == .One and + child.childType(zcu).eql(concrete_ty, zcu)) { return .{ .method = .{ .func_inst = decl_val, .arg0_inst = object_ptr, } }; } - } else if (first_param_type.zigTypeTag(mod) == .ErrorUnion and - first_param_type.errorUnionPayload(mod).eql(concrete_ty, mod)) + } else if (first_param_type.zigTypeTag(zcu) == .ErrorUnion and + first_param_type.errorUnionPayload(zcu).eql(concrete_ty, zcu)) { const deref = try sema.analyzeLoad(block, src, object_ptr, src); return .{ .method = .{ @@ -28045,7 +28081,7 @@ fn fieldCallBind( } }; } } - break :found_decl decl_idx; + break :found_nav nav_index; }; const msg = msg: { @@ -28055,14 +28091,15 @@ fn fieldCallBind( }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, concrete_ty); - if (found_decl) |decl_idx| { - const decl = mod.declPtr(decl_idx); - try sema.errNote(.{ - .base_node_inst = decl.zir_decl_index.unwrap().?, - .offset = LazySrcLoc.Offset.nodeOffset(0), - }, msg, "'{}' is not a member function", .{field_name.fmt(ip)}); + if (found_nav) |nav_index| { + try sema.errNote( + zcu.navSrcLoc(nav_index), + msg, + "'{}' is not a member function", + .{field_name.fmt(ip)}, + ); } - if (concrete_ty.zigTypeTag(mod) == .ErrorUnion) { + if (concrete_ty.zigTypeTag(zcu) == .ErrorUnion) { try sema.errNote(src, msg, "consider using 'try', 'catch', or 'if'", .{}); } if (is_double_ptr) { @@ -28119,27 +28156,22 @@ fn namespaceLookup( src: LazySrcLoc, opt_namespace: InternPool.OptionalNamespaceIndex, decl_name: InternPool.NullTerminatedString, -) CompileError!?InternPool.DeclIndex { +) CompileError!?InternPool.Nav.Index { const pt = sema.pt; - const mod = pt.zcu; + const zcu = pt.zcu; const gpa = sema.gpa; - if (try sema.lookupInNamespace(block, src, opt_namespace, decl_name, true)) |decl_index| { - const decl = mod.declPtr(decl_index); - if (!decl.is_pub and decl.getFileScope(mod) != block.getFileScope(mod)) { - const msg = msg: { + if (try sema.lookupInNamespace(block, src, opt_namespace, decl_name, true)) |lookup| { + if (!lookup.accessible) { + return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "'{}' is not marked 'pub'", .{ - decl_name.fmt(&mod.intern_pool), + decl_name.fmt(&zcu.intern_pool), }); errdefer msg.destroy(gpa); - try sema.errNote(.{ - .base_node_inst = decl.zir_decl_index.unwrap().?, - .offset = LazySrcLoc.Offset.nodeOffset(0), - }, msg, "declared here", .{}); + try sema.errNote(zcu.navSrcLoc(lookup.nav), msg, "declared here", .{}); break :msg msg; - }; - return sema.failWithOwnedErrorMsg(block, msg); + }); } - return decl_index; + return lookup.nav; } return null; } @@ -28151,8 +28183,8 @@ fn namespaceLookupRef( opt_namespace: InternPool.OptionalNamespaceIndex, decl_name: InternPool.NullTerminatedString, ) CompileError!?Air.Inst.Ref { - const decl = (try sema.namespaceLookup(block, src, opt_namespace, decl_name)) orelse return null; - return try sema.analyzeDeclRef(src, decl); + const nav = try sema.namespaceLookup(block, src, opt_namespace, decl_name) orelse return null; + return try sema.analyzeNavRef(src, nav); } fn namespaceLookupVal( @@ -28162,8 +28194,8 @@ fn namespaceLookupVal( opt_namespace: InternPool.OptionalNamespaceIndex, decl_name: InternPool.NullTerminatedString, ) CompileError!?Air.Inst.Ref { - const decl = (try sema.namespaceLookup(block, src, opt_namespace, decl_name)) orelse return null; - return try sema.analyzeDeclVal(block, src, decl); + const nav = try sema.namespaceLookup(block, src, opt_namespace, decl_name) orelse return null; + return try sema.analyzeNavVal(block, src, nav); } fn structFieldPtr( @@ -29200,9 +29232,9 @@ const CoerceOpts = struct { fn get(info: @This(), sema: *Sema) !?LazySrcLoc { if (info.func_inst == .none) return null; - const fn_decl = try sema.funcDeclSrc(info.func_inst) orelse return null; + const func_inst = try sema.funcDeclSrcInst(info.func_inst) orelse return null; return .{ - .base_node_inst = fn_decl.zir_decl_index.unwrap().?, + .base_node_inst = func_inst, .offset = .{ .fn_proto_param_type = .{ .fn_proto_node_offset = 0, .param_index = info.param_i, @@ -29303,8 +29335,12 @@ fn coerceExtra( // Function body to function pointer. if (inst_ty.zigTypeTag(zcu) == .Fn) { const fn_val = try sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, inst, undefined); - const fn_decl = fn_val.pointerDecl(zcu).?; - const inst_as_ptr = try sema.analyzeDeclRef(inst_src, fn_decl); + const fn_nav = switch (zcu.intern_pool.indexToKey(fn_val.toIntern())) { + .func => |f| f.owner_nav, + .@"extern" => |e| e.owner_nav, + else => unreachable, + }; + const inst_as_ptr = try sema.analyzeNavRef(inst_src, fn_nav); return sema.coerce(block, dest_ty, inst_as_ptr, inst_src); } @@ -29846,7 +29882,7 @@ fn coerceExtra( errdefer msg.destroy(sema.gpa); const ret_ty_src: LazySrcLoc = .{ - .base_node_inst = zcu.funcOwnerDeclPtr(sema.func_index).zir_decl_index.unwrap().?, + .base_node_inst = sema.getOwnerFuncDeclInst(), .offset = .{ .node_offset_fn_type_ret_ty = 0 }, }; try sema.errNote(ret_ty_src, msg, "'noreturn' declared here", .{}); @@ -29879,10 +29915,10 @@ fn coerceExtra( // Add notes about function return type if (opts.is_ret and - zcu.test_functions.get(zcu.funcOwnerDeclIndex(sema.func_index)) == null) + !zcu.test_functions.contains(zcu.funcInfo(sema.owner.unwrap().func).owner_nav)) { const ret_ty_src: LazySrcLoc = .{ - .base_node_inst = zcu.funcOwnerDeclPtr(sema.func_index).zir_decl_index.unwrap().?, + .base_node_inst = sema.getOwnerFuncDeclInst(), .offset = .{ .node_offset_fn_type_ret_ty = 0 }, }; if (inst_ty.isError(zcu) and !dest_ty.isError(zcu)) { @@ -30885,9 +30921,9 @@ fn coerceVarArgParam( if (block.is_typeof) return inst; const pt = sema.pt; - const mod = pt.zcu; + const zcu = pt.zcu; const uncasted_ty = sema.typeOf(inst); - const coerced = switch (uncasted_ty.zigTypeTag(mod)) { + const coerced = switch (uncasted_ty.zigTypeTag(zcu)) { // TODO consider casting to c_int/f64 if they fit .ComptimeInt, .ComptimeFloat => return sema.fail( block, @@ -30897,12 +30933,12 @@ fn coerceVarArgParam( ), .Fn => fn_ptr: { const fn_val = try sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, inst, undefined); - const fn_decl = fn_val.pointerDecl(mod).?; - break :fn_ptr try sema.analyzeDeclRef(inst_src, fn_decl); + const fn_nav = zcu.funcInfo(fn_val.toIntern()).owner_nav; + break :fn_ptr try sema.analyzeNavRef(inst_src, fn_nav); }, .Array => return sema.fail(block, inst_src, "arrays must be passed by reference to variadic function", .{}), .Float => float: { - const target = mod.getTarget(); + const target = zcu.getTarget(); const double_bits = target.c_type_bit_size(.double); const inst_bits = uncasted_ty.floatBits(target); if (inst_bits >= double_bits) break :float inst; @@ -30912,10 +30948,10 @@ fn coerceVarArgParam( else => unreachable, } }, - else => if (uncasted_ty.isAbiInt(mod)) int: { + else => if (uncasted_ty.isAbiInt(zcu)) int: { if (!try sema.validateExternType(uncasted_ty, .param_ty)) break :int inst; - const target = mod.getTarget(); - const uncasted_info = uncasted_ty.intInfo(mod); + const target = zcu.getTarget(); + const uncasted_info = uncasted_ty.intInfo(zcu); if (uncasted_info.bits <= target.c_type_bit_size(switch (uncasted_info.signedness) { .signed => .int, .unsigned => .uint, @@ -32117,23 +32153,14 @@ fn coerceTupleToTuple( } }))); } -fn analyzeDeclVal( +fn analyzeNavVal( sema: *Sema, block: *Block, src: LazySrcLoc, - decl_index: InternPool.DeclIndex, + nav_index: InternPool.Nav.Index, ) CompileError!Air.Inst.Ref { - if (sema.decl_val_table.get(decl_index)) |result| { - return result; - } - const decl_ref = try sema.analyzeDeclRefInner(src, decl_index, false); - const result = try sema.analyzeLoad(block, src, decl_ref, src); - if (result.toInterned() != null) { - if (!block.is_typeof) { - try sema.decl_val_table.put(sema.gpa, decl_index, result); - } - } - return result; + const ref = try sema.analyzeNavRefInner(src, nav_index, false); + return sema.analyzeLoad(block, src, ref, src); } fn addReferenceEntry( @@ -32148,44 +32175,37 @@ fn addReferenceEntry( // TODO: we need to figure out how to model inline calls here. // They aren't references in the analysis sense, but ought to show up in the reference trace! // Would representing inline calls in the reference table cause excessive memory usage? - try zcu.addUnitReference(sema.ownerUnit(), referenced_unit, src); + try zcu.addUnitReference(sema.owner, referenced_unit, src); } -pub fn ensureDeclAnalyzed(sema: *Sema, decl_index: InternPool.DeclIndex) CompileError!void { +pub fn ensureNavResolved(sema: *Sema, src: LazySrcLoc, nav_index: InternPool.Nav.Index) CompileError!void { const pt = sema.pt; - const mod = pt.zcu; - const ip = &mod.intern_pool; - const decl = mod.declPtr(decl_index); - if (decl.analysis == .in_progress) { - const msg = try sema.errMsg(.{ - .base_node_inst = decl.zir_decl_index.unwrap().?, - .offset = LazySrcLoc.Offset.nodeOffset(0), - }, "dependency loop detected", .{}); - return sema.failWithOwnedErrorMsg(null, msg); - } + const zcu = pt.zcu; + const ip = &zcu.intern_pool; - pt.ensureDeclAnalyzed(decl_index) catch |err| { - if (sema.owner_func_index != .none) { - ip.funcSetAnalysisState(sema.owner_func_index, .dependency_failure); - } else { - sema.owner_decl.analysis = .dependency_failure; - } - return err; - }; -} + const nav = ip.getNav(nav_index); -fn ensureFuncBodyAnalyzed(sema: *Sema, func: InternPool.Index) CompileError!void { - const pt = sema.pt; - const mod = pt.zcu; - const ip = &mod.intern_pool; - pt.ensureFuncBodyAnalyzed(func) catch |err| { - if (sema.owner_func_index != .none) { - ip.funcSetAnalysisState(sema.owner_func_index, .dependency_failure); - } else { - sema.owner_decl.analysis = .dependency_failure; - } - return err; + const cau_index = nav.analysis_owner.unwrap() orelse { + assert(nav.status == .resolved); + return; }; + + // Note that even if `nav.status == .resolved`, we must still trigger `ensureCauAnalyzed` + // to make sure the value is up-to-date on incremental updates. + + assert(ip.getCau(cau_index).owner.unwrap().nav == nav_index); + + const anal_unit = AnalUnit.wrap(.{ .cau = cau_index }); + try sema.addReferenceEntry(src, anal_unit); + + if (zcu.analysis_in_progress.contains(anal_unit)) { + return sema.failWithOwnedErrorMsg(null, try sema.errMsg(.{ + .base_node_inst = ip.getCau(cau_index).zir_index, + .offset = LazySrcLoc.Offset.nodeOffset(0), + }, "dependency loop detected", .{})); + } + + return pt.ensureCauAnalyzed(cau_index); } fn optRefValue(sema: *Sema, opt_val: ?Value) !Value { @@ -32200,55 +32220,57 @@ fn optRefValue(sema: *Sema, opt_val: ?Value) !Value { } })); } -fn analyzeDeclRef(sema: *Sema, src: LazySrcLoc, decl_index: InternPool.DeclIndex) CompileError!Air.Inst.Ref { - return sema.analyzeDeclRefInner(src, decl_index, true); +fn analyzeNavRef(sema: *Sema, src: LazySrcLoc, nav_index: InternPool.Nav.Index) CompileError!Air.Inst.Ref { + return sema.analyzeNavRefInner(src, nav_index, true); } -/// Analyze a reference to the decl at the given index. Ensures the underlying decl is analyzed, but +/// Analyze a reference to the `Nav` at the given index. Ensures the underlying `Nav` is analyzed, but /// only triggers analysis for function bodies if `analyze_fn_body` is true. If it's possible for a -/// decl_ref to end up in runtime code, the function body must be analyzed: `analyzeDeclRef` wraps +/// decl_ref to end up in runtime code, the function body must be analyzed: `analyzeNavRef` wraps /// this function with `analyze_fn_body` set to true. -fn analyzeDeclRefInner(sema: *Sema, src: LazySrcLoc, decl_index: InternPool.DeclIndex, analyze_fn_body: bool) CompileError!Air.Inst.Ref { +fn analyzeNavRefInner(sema: *Sema, src: LazySrcLoc, orig_nav_index: InternPool.Nav.Index, analyze_fn_body: bool) CompileError!Air.Inst.Ref { const pt = sema.pt; - const mod = pt.zcu; - try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = decl_index })); - try sema.ensureDeclAnalyzed(decl_index); + const zcu = pt.zcu; + const ip = &zcu.intern_pool; - const decl_val = try mod.declPtr(decl_index).valueOrFail(); - const owner_decl = mod.declPtr(switch (mod.intern_pool.indexToKey(decl_val.toIntern())) { - .variable => |variable| variable.decl, - .extern_func => |extern_func| extern_func.decl, - .func => |func| func.owner_decl, - else => decl_index, - }); - // TODO: if this is a `decl_ref` of a non-variable decl, only depend on decl type - try sema.declareDependency(.{ .decl_val = decl_index }); + // TODO: if this is a `decl_ref` of a non-variable Nav, only depend on Nav type + try sema.declareDependency(.{ .nav_val = orig_nav_index }); + try sema.ensureNavResolved(src, orig_nav_index); + + const nav_val = zcu.navValue(orig_nav_index); + const nav_index, const is_const = switch (ip.indexToKey(nav_val.toIntern())) { + .variable => |v| .{ v.owner_nav, false }, + .func => |f| .{ f.owner_nav, true }, + .@"extern" => |e| .{ e.owner_nav, e.is_const }, + else => .{ orig_nav_index, true }, + }; + const nav_info = ip.getNav(nav_index).status.resolved; const ptr_ty = try pt.ptrTypeSema(.{ - .child = decl_val.typeOf(mod).toIntern(), + .child = nav_val.typeOf(zcu).toIntern(), .flags = .{ - .alignment = owner_decl.alignment, - .is_const = if (decl_val.getVariable(mod)) |variable| variable.is_const else true, - .address_space = owner_decl.@"addrspace", + .alignment = nav_info.alignment, + .is_const = is_const, + .address_space = nav_info.@"addrspace", }, }); if (analyze_fn_body) { - try sema.maybeQueueFuncBodyAnalysis(src, decl_index); + try sema.maybeQueueFuncBodyAnalysis(src, nav_index); } return Air.internedToRef((try pt.intern(.{ .ptr = .{ .ty = ptr_ty.toIntern(), - .base_addr = .{ .decl = decl_index }, + .base_addr = .{ .nav = nav_index }, .byte_offset = 0, } }))); } -fn maybeQueueFuncBodyAnalysis(sema: *Sema, src: LazySrcLoc, decl_index: InternPool.DeclIndex) !void { - const mod = sema.pt.zcu; - const decl = mod.declPtr(decl_index); - const decl_val = try decl.valueOrFail(); - if (!mod.intern_pool.isFuncBody(decl_val.toIntern())) return; - if (!try sema.fnHasRuntimeBits(decl_val.typeOf(mod))) return; - try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .func = decl_val.toIntern() })); - try mod.ensureFuncBodyAnalysisQueued(decl_val.toIntern()); +fn maybeQueueFuncBodyAnalysis(sema: *Sema, src: LazySrcLoc, nav_index: InternPool.Nav.Index) !void { + const zcu = sema.pt.zcu; + const ip = &zcu.intern_pool; + const nav_val = zcu.navValue(nav_index); + if (!ip.isFuncBody(nav_val.toIntern())) return; + if (!try sema.fnHasRuntimeBits(nav_val.typeOf(zcu))) return; + try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .func = nav_val.toIntern() })); + try zcu.ensureFuncBodyAnalysisQueued(nav_val.toIntern()); } fn analyzeRef( @@ -32263,9 +32285,9 @@ fn analyzeRef( if (try sema.resolveValue(operand)) |val| { switch (mod.intern_pool.indexToKey(val.toIntern())) { - .extern_func => |extern_func| return sema.analyzeDeclRef(src, extern_func.decl), - .func => |func| return sema.analyzeDeclRef(src, func.owner_decl), - else => return anonDeclRef(sema, val.toIntern()), + .@"extern" => |e| return sema.analyzeNavRef(src, e.owner_nav), + .func => |f| return sema.analyzeNavRef(src, f.owner_nav), + else => return uavRef(sema, val.toIntern()), } } @@ -35198,7 +35220,7 @@ pub fn resolveStructAlignment( const ip = &mod.intern_pool; const target = mod.getTarget(); - assert(sema.ownerUnit().unwrap().decl == struct_type.decl.unwrap().?); + assert(sema.owner.unwrap().cau == struct_type.cau.unwrap().?); assert(struct_type.layout != .@"packed"); assert(struct_type.flagsUnordered(ip).alignment == .none); @@ -35242,7 +35264,7 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void { const ip = &zcu.intern_pool; const struct_type = zcu.typeToStruct(ty) orelse return; - assert(sema.ownerUnit().unwrap().decl == struct_type.decl.unwrap().?); + assert(sema.owner.unwrap().cau == struct_type.cau.unwrap().?); if (struct_type.haveLayout(ip)) return; @@ -35384,8 +35406,7 @@ fn semaBackingIntType(pt: Zcu.PerThread, struct_type: InternPool.LoadedStructTyp const gpa = zcu.gpa; const ip = &zcu.intern_pool; - const decl_index = struct_type.decl.unwrap().?; - const decl = zcu.declPtr(decl_index); + const cau_index = struct_type.cau.unwrap().?; const zir = zcu.namespacePtr(struct_type.namespace.unwrap().?).fileScope(zcu).zir; @@ -35400,13 +35421,11 @@ fn semaBackingIntType(pt: Zcu.PerThread, struct_type: InternPool.LoadedStructTyp .gpa = gpa, .arena = analysis_arena.allocator(), .code = zir, - .owner_decl = decl, - .owner_decl_index = decl_index, + .owner = AnalUnit.wrap(.{ .cau = cau_index }), .func_index = .none, .func_is_naked = false, .fn_ret_ty = Type.void, .fn_ret_ty_ies = null, - .owner_func_index = .none, .comptime_err_ret_trace = &comptime_err_ret_trace, }; defer sema.deinit(); @@ -35414,12 +35433,12 @@ fn semaBackingIntType(pt: Zcu.PerThread, struct_type: InternPool.LoadedStructTyp var block: Block = .{ .parent = null, .sema = &sema, - .namespace = struct_type.namespace.unwrap() orelse decl.src_namespace, + .namespace = ip.getCau(cau_index).namespace, .instructions = .{}, .inlining = null, .is_comptime = true, .src_base_inst = struct_type.zir_index.unwrap().?, - .type_name_ctx = decl.name, + .type_name_ctx = struct_type.name, }; defer assert(block.instructions.items.len == 0); @@ -35544,7 +35563,7 @@ pub fn resolveUnionAlignment( const ip = &zcu.intern_pool; const target = zcu.getTarget(); - assert(sema.ownerUnit().unwrap().decl == union_type.decl); + assert(sema.owner.unwrap().cau == union_type.cau); assert(!union_type.haveLayout(ip)); @@ -35584,7 +35603,7 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void { // Load again, since the tag type might have changed due to resolution. const union_type = ip.loadUnionType(ty.ip_index); - assert(sema.ownerUnit().unwrap().decl == union_type.decl); + assert(sema.owner.unwrap().cau == union_type.cau); const old_flags = union_type.flagsUnordered(ip); switch (old_flags.status) { @@ -35697,7 +35716,7 @@ pub fn resolveStructFully(sema: *Sema, ty: Type) SemaError!void { const ip = &mod.intern_pool; const struct_type = mod.typeToStruct(ty).?; - assert(sema.ownerUnit().unwrap().decl == struct_type.decl.unwrap().?); + assert(sema.owner.unwrap().cau == struct_type.cau.unwrap().?); if (struct_type.setFullyResolved(ip)) return; errdefer struct_type.clearFullyResolved(ip); @@ -35720,7 +35739,7 @@ pub fn resolveUnionFully(sema: *Sema, ty: Type) SemaError!void { const ip = &mod.intern_pool; const union_obj = mod.typeToUnion(ty).?; - assert(sema.ownerUnit().unwrap().decl == union_obj.decl); + assert(sema.owner.unwrap().cau == union_obj.cau); switch (union_obj.flagsUnordered(ip).status) { .none, .have_field_types, .field_types_wip, .layout_wip, .have_layout => {}, @@ -35754,21 +35773,8 @@ pub fn resolveTypeFieldsStruct( const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; - // If there is no owner decl it means the struct has no fields. - const owner_decl = struct_type.decl.unwrap() orelse return; - assert(sema.ownerUnit().unwrap().decl == owner_decl); - - switch (zcu.declPtr(owner_decl).analysis) { - .file_failure, - .dependency_failure, - .sema_failure, - => { - sema.owner_decl.analysis = .dependency_failure; - return error.AnalysisFail; - }, - else => {}, - } + assert(sema.owner.unwrap().cau == struct_type.cau.unwrap().?); if (struct_type.haveFieldTypes(ip)) return; @@ -35783,13 +35789,7 @@ pub fn resolveTypeFieldsStruct( defer struct_type.clearFieldTypesWip(ip); semaStructFields(pt, sema.arena, struct_type) catch |err| switch (err) { - error.AnalysisFail => { - if (zcu.declPtr(owner_decl).analysis == .complete) { - zcu.declPtr(owner_decl).analysis = .dependency_failure; - } - return error.AnalysisFail; - }, - error.OutOfMemory => return error.OutOfMemory, + error.AnalysisFail, error.OutOfMemory => |e| return e, error.ComptimeBreak, error.ComptimeReturn, error.GenericPoison => unreachable, }; } @@ -35799,9 +35799,8 @@ pub fn resolveStructFieldInits(sema: *Sema, ty: Type) SemaError!void { const zcu = pt.zcu; const ip = &zcu.intern_pool; const struct_type = zcu.typeToStruct(ty) orelse return; - const owner_decl = struct_type.decl.unwrap() orelse return; - assert(sema.ownerUnit().unwrap().decl == owner_decl); + assert(sema.owner.unwrap().cau == struct_type.cau.unwrap().?); // Inits can start as resolved if (struct_type.haveFieldInits(ip)) return; @@ -35819,13 +35818,7 @@ pub fn resolveStructFieldInits(sema: *Sema, ty: Type) SemaError!void { defer struct_type.clearInitsWip(ip); semaStructFieldInits(pt, sema.arena, struct_type) catch |err| switch (err) { - error.AnalysisFail => { - if (zcu.declPtr(owner_decl).analysis == .complete) { - zcu.declPtr(owner_decl).analysis = .dependency_failure; - } - return error.AnalysisFail; - }, - error.OutOfMemory => return error.OutOfMemory, + error.AnalysisFail, error.OutOfMemory => |e| return e, error.ComptimeBreak, error.ComptimeReturn, error.GenericPoison => unreachable, }; struct_type.setHaveFieldInits(ip); @@ -35835,20 +35828,9 @@ pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.Load const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; - const owner_decl = zcu.declPtr(union_type.decl); - assert(sema.ownerUnit().unwrap().decl == union_type.decl); + assert(sema.owner.unwrap().cau == union_type.cau); - switch (owner_decl.analysis) { - .file_failure, - .dependency_failure, - .sema_failure, - => { - sema.owner_decl.analysis = .dependency_failure; - return error.AnalysisFail; - }, - else => {}, - } switch (union_type.flagsUnordered(ip).status) { .none => {}, .field_types_wip => { @@ -35869,14 +35851,8 @@ pub fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_type: InternPool.Load union_type.setStatus(ip, .field_types_wip); errdefer union_type.setStatus(ip, .none); - semaUnionFields(pt, sema.arena, union_type) catch |err| switch (err) { - error.AnalysisFail => { - if (owner_decl.analysis == .complete) { - owner_decl.analysis = .dependency_failure; - } - return error.AnalysisFail; - }, - error.OutOfMemory => return error.OutOfMemory, + semaUnionFields(pt, sema.arena, ty.toIntern(), union_type) catch |err| switch (err) { + error.AnalysisFail, error.OutOfMemory => |e| return e, error.ComptimeBreak, error.ComptimeReturn, error.GenericPoison => unreachable, }; union_type.setStatus(ip, .have_field_types); @@ -35891,28 +35867,28 @@ fn resolveInferredErrorSet( ies_index: InternPool.Index, ) CompileError!InternPool.Index { const pt = sema.pt; - const mod = pt.zcu; - const ip = &mod.intern_pool; + const zcu = pt.zcu; + const ip = &zcu.intern_pool; const func_index = ip.iesFuncIndex(ies_index); - const func = mod.funcInfo(func_index); + const func = zcu.funcInfo(func_index); - try sema.declareDependency(.{ .func_ies = func_index }); + try sema.declareDependency(.{ .interned = func_index }); // resolved IES // TODO: during an incremental update this might not be `.none`, but the // function might be out-of-date! const resolved_ty = func.resolvedErrorSetUnordered(ip); if (resolved_ty != .none) return resolved_ty; - if (func.analysisUnordered(ip).state == .in_progress) + if (zcu.analysis_in_progress.contains(AnalUnit.wrap(.{ .func = func_index }))) { return sema.fail(block, src, "unable to resolve inferred error set", .{}); + } // In order to ensure that all dependencies are properly added to the set, // we need to ensure the function body is analyzed of the inferred error // set. However, in the case of comptime/inline function calls with // inferred error sets, each call gets an adhoc InferredErrorSet object, which // has no corresponding function body. - const ies_func_owner_decl = mod.declPtr(func.owner_decl); - const ies_func_info = mod.typeToFunc(ies_func_owner_decl.typeOf(mod)).?; + const ies_func_info = zcu.typeToFunc(Type.fromInterned(func.ty)).?; // if ies declared by a inline function with generic return type, the return_type should be generic_poison, // because inline function does not create a new declaration, and the ies has been filled with analyzeCall, // so here we can simply skip this case. @@ -35920,22 +35896,17 @@ fn resolveInferredErrorSet( assert(ies_func_info.cc == .Inline); } else if (ip.errorUnionSet(ies_func_info.return_type) == ies_index) { if (ies_func_info.is_generic) { - const msg = msg: { + return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "unable to resolve inferred error set of generic function", .{}); errdefer msg.destroy(sema.gpa); - - try sema.errNote(.{ - .base_node_inst = ies_func_owner_decl.zir_decl_index.unwrap().?, - .offset = LazySrcLoc.Offset.nodeOffset(0), - }, msg, "generic function declared here", .{}); + try sema.errNote(zcu.navSrcLoc(func.owner_nav), msg, "generic function declared here", .{}); break :msg msg; - }; - return sema.failWithOwnedErrorMsg(block, msg); + }); } // In this case we are dealing with the actual InferredErrorSet object that // corresponds to the function, not one created to track an inline/comptime call. try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .func = func_index })); - try sema.ensureFuncBodyAnalyzed(func_index); + try pt.ensureFuncBodyAnalyzed(func_index); } // This will now have been resolved by the logic at the end of `Module.analyzeFnBody` @@ -36092,9 +36063,8 @@ fn semaStructFields( const zcu = pt.zcu; const gpa = zcu.gpa; const ip = &zcu.intern_pool; - const decl_index = struct_type.decl.unwrap() orelse return; - const decl = zcu.declPtr(decl_index); - const namespace_index = struct_type.namespace.unwrap() orelse decl.src_namespace; + const cau_index = struct_type.cau.unwrap().?; + const namespace_index = ip.getCau(cau_index).namespace; const zir = zcu.namespacePtr(namespace_index).fileScope(zcu).zir; const zir_index = struct_type.zir_index.unwrap().?.resolve(ip); @@ -36119,13 +36089,11 @@ fn semaStructFields( .gpa = gpa, .arena = arena, .code = zir, - .owner_decl = decl, - .owner_decl_index = decl_index, + .owner = AnalUnit.wrap(.{ .cau = cau_index }), .func_index = .none, .func_is_naked = false, .fn_ret_ty = Type.void, .fn_ret_ty_ies = null, - .owner_func_index = .none, .comptime_err_ret_trace = &comptime_err_ret_trace, }; defer sema.deinit(); @@ -36138,7 +36106,7 @@ fn semaStructFields( .inlining = null, .is_comptime = true, .src_base_inst = struct_type.zir_index.unwrap().?, - .type_name_ctx = decl.name, + .type_name_ctx = struct_type.name, }; defer assert(block_scope.instructions.items.len == 0); @@ -36318,9 +36286,8 @@ fn semaStructFieldInits( assert(!struct_type.haveFieldInits(ip)); - const decl_index = struct_type.decl.unwrap() orelse return; - const decl = zcu.declPtr(decl_index); - const namespace_index = struct_type.namespace.unwrap() orelse decl.src_namespace; + const cau_index = struct_type.cau.unwrap().?; + const namespace_index = ip.getCau(cau_index).namespace; const zir = zcu.namespacePtr(namespace_index).fileScope(zcu).zir; const zir_index = struct_type.zir_index.unwrap().?.resolve(ip); const fields_len, const small, var extra_index = structZirInfo(zir, zir_index); @@ -36333,13 +36300,11 @@ fn semaStructFieldInits( .gpa = gpa, .arena = arena, .code = zir, - .owner_decl = decl, - .owner_decl_index = decl_index, + .owner = AnalUnit.wrap(.{ .cau = cau_index }), .func_index = .none, .func_is_naked = false, .fn_ret_ty = Type.void, .fn_ret_ty_ies = null, - .owner_func_index = .none, .comptime_err_ret_trace = &comptime_err_ret_trace, }; defer sema.deinit(); @@ -36352,7 +36317,7 @@ fn semaStructFieldInits( .inlining = null, .is_comptime = true, .src_base_inst = struct_type.zir_index.unwrap().?, - .type_name_ctx = decl.name, + .type_name_ctx = struct_type.name, }; defer assert(block_scope.instructions.items.len == 0); @@ -36449,14 +36414,14 @@ fn semaStructFieldInits( try sema.flushExports(); } -fn semaUnionFields(pt: Zcu.PerThread, arena: Allocator, union_type: InternPool.LoadedUnionType) CompileError!void { +fn semaUnionFields(pt: Zcu.PerThread, arena: Allocator, union_ty: InternPool.Index, union_type: InternPool.LoadedUnionType) CompileError!void { const tracy = trace(@src()); defer tracy.end(); const zcu = pt.zcu; const gpa = zcu.gpa; const ip = &zcu.intern_pool; - const decl_index = union_type.decl; + const cau_index = union_type.cau; const zir = zcu.namespacePtr(union_type.namespace.unwrap().?).fileScope(zcu).zir; const zir_index = union_type.zir_index.resolve(ip); const extended = zir.instructions.items(.data)[@intFromEnum(zir_index)].extended; @@ -36501,8 +36466,6 @@ fn semaUnionFields(pt: Zcu.PerThread, arena: Allocator, union_type: InternPool.L const body = zir.bodySlice(extra_index, body_len); extra_index += body.len; - const decl = zcu.declPtr(decl_index); - var comptime_err_ret_trace = std.ArrayList(LazySrcLoc).init(gpa); defer comptime_err_ret_trace.deinit(); @@ -36511,13 +36474,11 @@ fn semaUnionFields(pt: Zcu.PerThread, arena: Allocator, union_type: InternPool.L .gpa = gpa, .arena = arena, .code = zir, - .owner_decl = decl, - .owner_decl_index = decl_index, + .owner = AnalUnit.wrap(.{ .cau = cau_index }), .func_index = .none, .func_is_naked = false, .fn_ret_ty = Type.void, .fn_ret_ty_ies = null, - .owner_func_index = .none, .comptime_err_ret_trace = &comptime_err_ret_trace, }; defer sema.deinit(); @@ -36530,7 +36491,7 @@ fn semaUnionFields(pt: Zcu.PerThread, arena: Allocator, union_type: InternPool.L .inlining = null, .is_comptime = true, .src_base_inst = union_type.zir_index, - .type_name_ctx = decl.name, + .type_name_ctx = union_type.name, }; defer assert(block_scope.instructions.items.len == 0); @@ -36817,10 +36778,10 @@ fn semaUnionFields(pt: Zcu.PerThread, arena: Allocator, union_type: InternPool.L return sema.failWithOwnedErrorMsg(&block_scope, msg); } } else if (enum_field_vals.count() > 0) { - const enum_ty = try sema.generateUnionTagTypeNumbered(&block_scope, enum_field_names, enum_field_vals.keys(), zcu.declPtr(union_type.decl)); + const enum_ty = try sema.generateUnionTagTypeNumbered(enum_field_names, enum_field_vals.keys(), union_ty, union_type.name); union_type.setTagType(ip, enum_ty); } else { - const enum_ty = try sema.generateUnionTagTypeSimple(&block_scope, enum_field_names, zcu.declPtr(union_type.decl)); + const enum_ty = try sema.generateUnionTagTypeSimple(enum_field_names, union_ty, union_type.name); union_type.setTagType(ip, enum_ty); } @@ -36836,39 +36797,27 @@ fn semaUnionFieldVal(sema: *Sema, block: *Block, src: LazySrcLoc, int_tag_ty: Ty fn generateUnionTagTypeNumbered( sema: *Sema, - block: *Block, enum_field_names: []const InternPool.NullTerminatedString, enum_field_vals: []const InternPool.Index, - union_owner_decl: *Module.Decl, + union_type: InternPool.Index, + union_name: InternPool.NullTerminatedString, ) !InternPool.Index { const pt = sema.pt; const mod = pt.zcu; const gpa = sema.gpa; const ip = &mod.intern_pool; - const new_decl_index = try pt.allocateNewDecl(block.namespace); - errdefer pt.destroyDecl(new_decl_index); const name = try ip.getOrPutStringFmt( gpa, pt.tid, "@typeInfo({}).Union.tag_type.?", - .{union_owner_decl.fqn.fmt(ip)}, + .{union_name.fmt(ip)}, .no_embedded_nulls, ); - try pt.initNewAnonDecl( - new_decl_index, - Value.@"unreachable", - name, - name.toOptional(), - ); - errdefer pt.abortAnonDecl(new_decl_index); - - const new_decl = mod.declPtr(new_decl_index); - new_decl.owns_tv = true; const enum_ty = try ip.getGeneratedTagEnumType(gpa, pt.tid, .{ - .decl = new_decl_index, - .owner_union_ty = union_owner_decl.val.toIntern(), + .name = name, + .owner_union_ty = union_type, .tag_ty = if (enum_field_vals.len == 0) (try pt.intType(.unsigned, 0)).toIntern() else @@ -36878,46 +36827,31 @@ fn generateUnionTagTypeNumbered( .tag_mode = .explicit, }); - new_decl.val = Value.fromInterned(enum_ty); - - try pt.finalizeAnonDecl(new_decl_index); return enum_ty; } fn generateUnionTagTypeSimple( sema: *Sema, - block: *Block, enum_field_names: []const InternPool.NullTerminatedString, - union_owner_decl: *Module.Decl, + union_type: InternPool.Index, + union_name: InternPool.NullTerminatedString, ) !InternPool.Index { const pt = sema.pt; const mod = pt.zcu; const ip = &mod.intern_pool; const gpa = sema.gpa; - const new_decl_index = new_decl_index: { - const new_decl_index = try pt.allocateNewDecl(block.namespace); - errdefer pt.destroyDecl(new_decl_index); - const name = try ip.getOrPutStringFmt( - gpa, - pt.tid, - "@typeInfo({}).Union.tag_type.?", - .{union_owner_decl.fqn.fmt(ip)}, - .no_embedded_nulls, - ); - try pt.initNewAnonDecl( - new_decl_index, - Value.@"unreachable", - name, - name.toOptional(), - ); - break :new_decl_index new_decl_index; - }; - errdefer pt.abortAnonDecl(new_decl_index); + const name = try ip.getOrPutStringFmt( + gpa, + pt.tid, + "@typeInfo({}).Union.tag_type.?", + .{union_name.fmt(ip)}, + .no_embedded_nulls, + ); const enum_ty = try ip.getGeneratedTagEnumType(gpa, pt.tid, .{ - .decl = new_decl_index, - .owner_union_ty = union_owner_decl.val.toIntern(), + .name = name, + .owner_union_ty = union_type, .tag_ty = if (enum_field_names.len == 0) (try pt.intType(.unsigned, 0)).toIntern() else @@ -36927,11 +36861,6 @@ fn generateUnionTagTypeSimple( .tag_mode = .auto, }); - const new_decl = mod.declPtr(new_decl_index); - new_decl.owns_tv = true; - new_decl.val = Value.fromInterned(enum_ty); - - try pt.finalizeAnonDecl(new_decl_index); return enum_ty; } @@ -37057,9 +36986,9 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { // values, not types .undef, .simple_value, - .ptr_decl, - .ptr_anon_decl, - .ptr_anon_decl_aligned, + .ptr_nav, + .ptr_uav, + .ptr_uav_aligned, .ptr_comptime_alloc, .ptr_comptime_field, .ptr_int, @@ -37096,7 +37025,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .float_c_longdouble_f128, .float_comptime_float, .variable, - .extern_func, + .@"extern", .func_decl, .func_instance, .func_coerced, @@ -37965,7 +37894,7 @@ fn intFitsInType( .zero_usize, .zero_u8 => return true, else => switch (mod.intern_pool.indexToKey(val.toIntern())) { .undef => return true, - .variable, .extern_func, .func, .ptr => { + .variable, .@"extern", .func, .ptr => { const target = mod.getTarget(); const ptr_bits = target.ptrBitWidth(); return switch (info.signedness) { @@ -38240,24 +38169,24 @@ pub fn declareDependency(sema: *Sema, dependee: InternPool.Dependee) !void { // of a type and they use `@This()`. This dependency would be unnecessary, and in fact would // just result in over-analysis since `Zcu.findOutdatedToAnalyze` would never be able to resolve // the loop. - if (sema.owner_func_index == .none and dependee == .decl_val and dependee.decl_val == sema.owner_decl_index) { - return; + switch (sema.owner.unwrap()) { + .cau => |cau| switch (dependee) { + .nav_val => |nav| if (zcu.intern_pool.getNav(nav).analysis_owner == cau.toOptional()) { + return; + }, + else => {}, + }, + .func => {}, } - const depender = AnalUnit.wrap( - if (sema.owner_func_index != .none) - .{ .func = sema.owner_func_index } - else - .{ .decl = sema.owner_decl_index }, - ); - try zcu.intern_pool.addDependency(sema.gpa, depender, dependee); + try zcu.intern_pool.addDependency(sema.gpa, sema.owner, dependee); } fn isComptimeMutablePtr(sema: *Sema, val: Value) bool { return switch (sema.pt.zcu.intern_pool.indexToKey(val.toIntern())) { .slice => |slice| sema.isComptimeMutablePtr(Value.fromInterned(slice.ptr)), .ptr => |ptr| switch (ptr.base_addr) { - .anon_decl, .decl, .int => false, + .uav, .nav, .int => false, .comptime_field => true, .comptime_alloc => |alloc_index| !sema.getComptimeAlloc(alloc_index).is_const, .eu_payload, .opt_payload => |base| sema.isComptimeMutablePtr(Value.fromInterned(base)), @@ -38388,19 +38317,17 @@ pub fn flushExports(sema: *Sema) !void { const zcu = sema.pt.zcu; const gpa = zcu.gpa; - const unit = sema.ownerUnit(); - // There may be existing exports. For instance, a struct may export // things during both field type resolution and field default resolution. // // So, pick up and delete any existing exports. This strategy performs // redundant work, but that's okay, because this case is exceedingly rare. - if (zcu.single_exports.get(unit)) |export_idx| { + if (zcu.single_exports.get(sema.owner)) |export_idx| { try sema.exports.append(gpa, zcu.all_exports.items[export_idx]); - } else if (zcu.multi_exports.get(unit)) |info| { + } else if (zcu.multi_exports.get(sema.owner)) |info| { try sema.exports.appendSlice(gpa, zcu.all_exports.items[info.index..][0..info.len]); } - zcu.deleteUnitExports(unit); + zcu.deleteUnitExports(sema.owner); // `sema.exports` is completed; store the data into the `Zcu`. if (sema.exports.items.len == 1) { @@ -38410,24 +38337,55 @@ pub fn flushExports(sema: *Sema) !void { break :idx zcu.all_exports.items.len - 1; }; zcu.all_exports.items[export_idx] = sema.exports.items[0]; - zcu.single_exports.putAssumeCapacityNoClobber(unit, @intCast(export_idx)); + zcu.single_exports.putAssumeCapacityNoClobber(sema.owner, @intCast(export_idx)); } else { try zcu.multi_exports.ensureUnusedCapacity(gpa, 1); const exports_base = zcu.all_exports.items.len; try zcu.all_exports.appendSlice(gpa, sema.exports.items); - zcu.multi_exports.putAssumeCapacityNoClobber(unit, .{ + zcu.multi_exports.putAssumeCapacityNoClobber(sema.owner, .{ .index = @intCast(exports_base), .len = @intCast(sema.exports.items.len), }); } } -pub fn ownerUnit(sema: Sema) AnalUnit { - if (sema.owner_func_index != .none) { - return AnalUnit.wrap(.{ .func = sema.owner_func_index }); - } else { - return AnalUnit.wrap(.{ .decl = sema.owner_decl_index }); - } +/// Given that this `Sema` is owned by the `Cau` of a `declaration`, fetches +/// the corresponding `Nav`. +fn getOwnerCauNav(sema: *Sema) InternPool.Nav.Index { + const cau = sema.owner.unwrap().cau; + return sema.pt.zcu.intern_pool.getCau(cau).owner.unwrap().nav; +} + +/// Given that this `Sema` is owned by the `Cau` of a `declaration`, fetches +/// the declaration name from its corresponding `Nav`. +fn getOwnerCauNavName(sema: *Sema) InternPool.NullTerminatedString { + const nav = sema.getOwnerCauNav(); + return sema.pt.zcu.intern_pool.getNav(nav).name; +} + +/// Given that this `Sema` is owned by the `Cau` of a `declaration`, fetches +/// the `TrackedInst` corresponding to this `declaration` instruction. +fn getOwnerCauDeclInst(sema: *Sema) InternPool.TrackedInst.Index { + const ip = &sema.pt.zcu.intern_pool; + const cau = ip.getCau(sema.owner.unwrap().cau); + assert(cau.owner.unwrap() == .nav); + return cau.zir_index; +} + +/// Given that this `Sema` is owned by a runtime function, fetches the +/// `TrackedInst` corresponding to its `declaration` instruction. +fn getOwnerFuncDeclInst(sema: *Sema) InternPool.TrackedInst.Index { + const zcu = sema.pt.zcu; + const ip = &zcu.intern_pool; + const func = sema.owner.unwrap().func; + const func_info = zcu.funcInfo(func); + const cau = if (func_info.generic_owner == .none) cau: { + break :cau ip.getNav(func_info.owner_nav).analysis_owner.unwrap().?; + } else cau: { + const generic_owner = zcu.funcInfo(func_info.generic_owner); + break :cau ip.getNav(generic_owner.owner_nav).analysis_owner.unwrap().?; + }; + return ip.getCau(cau).zir_index; } pub const bitCastVal = @import("Sema/bitcast.zig").bitCast; diff --git a/src/Sema/bitcast.zig b/src/Sema/bitcast.zig index c5155dec6377..065de877e2f3 100644 --- a/src/Sema/bitcast.zig +++ b/src/Sema/bitcast.zig @@ -254,7 +254,7 @@ const UnpackValueBits = struct { .error_set_type, .inferred_error_set_type, .variable, - .extern_func, + .@"extern", .func, .err, .error_union, diff --git a/src/Sema/comptime_ptr_access.zig b/src/Sema/comptime_ptr_access.zig index b7d660c4267e..8f0b8b1b175c 100644 --- a/src/Sema/comptime_ptr_access.zig +++ b/src/Sema/comptime_ptr_access.zig @@ -217,15 +217,23 @@ fn loadComptimePtrInner( }; const base_val: MutableValue = switch (ptr.base_addr) { - .decl => |decl_index| val: { - try sema.declareDependency(.{ .decl_val = decl_index }); - try sema.ensureDeclAnalyzed(decl_index); - const decl = zcu.declPtr(decl_index); - if (decl.val.getVariable(zcu) != null) return .runtime_load; - break :val .{ .interned = decl.val.toIntern() }; + .nav => |nav| val: { + try sema.declareDependency(.{ .nav_val = nav }); + try sema.ensureNavResolved(src, nav); + const val = ip.getNav(nav).status.resolved.val; + switch (ip.indexToKey(val)) { + .variable => return .runtime_load, + // We let `.@"extern"` through here if it's a function. + // This allows you to alias `extern fn`s. + .@"extern" => |e| if (Type.fromInterned(e.ty).zigTypeTag(zcu) == .Fn) + break :val .{ .interned = val } + else + return .runtime_load, + else => break :val .{ .interned = val }, + } }, .comptime_alloc => |alloc_index| sema.getComptimeAlloc(alloc_index).val, - .anon_decl => |anon_decl| .{ .interned = anon_decl.val }, + .uav => |uav| .{ .interned = uav.val }, .comptime_field => |val| .{ .interned = val }, .int => return .runtime_load, .eu_payload => |base_ptr_ip| val: { @@ -580,7 +588,7 @@ fn prepareComptimePtrStore( // `base_strat` will not be an error case. const base_strat: ComptimeStoreStrategy = switch (ptr.base_addr) { - .decl, .anon_decl, .int => return .runtime_store, + .nav, .uav, .int => return .runtime_store, .comptime_field => return .comptime_field, .comptime_alloc => |alloc_index| .{ .direct = .{ .alloc = alloc_index, diff --git a/src/Type.zig b/src/Type.zig index 9fcfec084653..506a9dce811f 100644 --- a/src/Type.zig +++ b/src/Type.zig @@ -268,9 +268,9 @@ pub fn print(ty: Type, writer: anytype, pt: Zcu.PerThread) @TypeOf(writer).Error return; }, .inferred_error_set_type => |func_index| { - const owner_decl = mod.funcOwnerDeclPtr(func_index); + const func_nav = ip.getNav(mod.funcInfo(func_index).owner_nav); try writer.print("@typeInfo(@typeInfo(@TypeOf({})).Fn.return_type.?).ErrorUnion.error_set", .{ - owner_decl.fqn.fmt(ip), + func_nav.fqn.fmt(ip), }); }, .error_set_type => |error_set_type| { @@ -331,15 +331,11 @@ pub fn print(ty: Type, writer: anytype, pt: Zcu.PerThread) @TypeOf(writer).Error .generic_poison => unreachable, }, .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - if (struct_type.decl.unwrap()) |decl_index| { - const decl = mod.declPtr(decl_index); - try writer.print("{}", .{decl.fqn.fmt(ip)}); - } else if (ip.loadStructType(ty.toIntern()).namespace.unwrap()) |namespace_index| { - const namespace = mod.namespacePtr(namespace_index); - try namespace.renderFullyQualifiedName(ip, .empty, writer); - } else { + const name = ip.loadStructType(ty.toIntern()).name; + if (name == .empty) { try writer.writeAll("@TypeOf(.{})"); + } else { + try writer.print("{}", .{name.fmt(ip)}); } }, .anon_struct_type => |anon_struct| { @@ -366,16 +362,16 @@ pub fn print(ty: Type, writer: anytype, pt: Zcu.PerThread) @TypeOf(writer).Error }, .union_type => { - const decl = mod.declPtr(ip.loadUnionType(ty.toIntern()).decl); - try writer.print("{}", .{decl.fqn.fmt(ip)}); + const name = ip.loadUnionType(ty.toIntern()).name; + try writer.print("{}", .{name.fmt(ip)}); }, .opaque_type => { - const decl = mod.declPtr(ip.loadOpaqueType(ty.toIntern()).decl); - try writer.print("{}", .{decl.fqn.fmt(ip)}); + const name = ip.loadOpaqueType(ty.toIntern()).name; + try writer.print("{}", .{name.fmt(ip)}); }, .enum_type => { - const decl = mod.declPtr(ip.loadEnumType(ty.toIntern()).decl); - try writer.print("{}", .{decl.fqn.fmt(ip)}); + const name = ip.loadEnumType(ty.toIntern()).name; + try writer.print("{}", .{name.fmt(ip)}); }, .func_type => |fn_info| { if (fn_info.is_noinline) { @@ -427,7 +423,7 @@ pub fn print(ty: Type, writer: anytype, pt: Zcu.PerThread) @TypeOf(writer).Error .undef, .simple_value, .variable, - .extern_func, + .@"extern", .func, .int, .err, @@ -645,7 +641,7 @@ pub fn hasRuntimeBitsAdvanced( .undef, .simple_value, .variable, - .extern_func, + .@"extern", .func, .int, .err, @@ -757,7 +753,7 @@ pub fn hasWellDefinedLayout(ty: Type, mod: *Module) bool { .undef, .simple_value, .variable, - .extern_func, + .@"extern", .func, .int, .err, @@ -1108,7 +1104,7 @@ pub fn abiAlignmentAdvanced( .undef, .simple_value, .variable, - .extern_func, + .@"extern", .func, .int, .err, @@ -1483,7 +1479,7 @@ pub fn abiSizeAdvanced( .undef, .simple_value, .variable, - .extern_func, + .@"extern", .func, .int, .err, @@ -1813,7 +1809,7 @@ pub fn bitSizeAdvanced( .undef, .simple_value, .variable, - .extern_func, + .@"extern", .func, .int, .err, @@ -2351,7 +2347,7 @@ pub fn intInfo(starting_ty: Type, mod: *Module) InternPool.Key.IntType { .undef, .simple_value, .variable, - .extern_func, + .@"extern", .func, .int, .err, @@ -2700,7 +2696,7 @@ pub fn onePossibleValue(starting_type: Type, pt: Zcu.PerThread) !?Value { .undef, .simple_value, .variable, - .extern_func, + .@"extern", .func, .int, .err, @@ -2899,7 +2895,7 @@ pub fn comptimeOnlyAdvanced(ty: Type, pt: Zcu.PerThread, comptime strat: Resolve .undef, .simple_value, .variable, - .extern_func, + .@"extern", .func, .int, .err, @@ -3007,6 +3003,26 @@ pub fn getNamespace(ty: Type, zcu: *Zcu) ?InternPool.OptionalNamespaceIndex { }; } +// TODO: new dwarf structure will also need the enclosing code block for types created in imperative scopes +pub fn getParentNamespace(ty: Type, zcu: *Zcu) ?InternPool.OptionalNamespaceIndex { + const ip = &zcu.intern_pool; + const cau = switch (ip.indexToKey(ty.toIntern())) { + .struct_type => ip.loadStructType(ty.toIntern()).cau, + .union_type => ip.loadUnionType(ty.toIntern()).cau.toOptional(), + .enum_type => |e| switch (e) { + .declared, .reified => ip.loadEnumType(ty.toIntern()).cau, + .generated_tag => |gt| ip.loadUnionType(gt.union_type).cau.toOptional(), + .empty_struct => unreachable, + }, + // TODO: this doesn't handle opaque types with empty namespaces + .opaque_type => return ip.namespacePtr(ip.loadOpaqueType(ty.toIntern()).namespace.unwrap().?).parent, + else => return null, + }; + return ip.namespacePtr(ip.getCau(cau.unwrap() orelse return .none).namespace) + // TODO: I thought the cau contained the parent namespace based on "analyzed within" but alas + .parent; +} + // Works for vectors and vectors of integers. pub fn minInt(ty: Type, pt: Zcu.PerThread, dest_ty: Type) !Value { const mod = pt.zcu; @@ -3321,21 +3337,6 @@ pub fn structFieldOffset(ty: Type, index: usize, pt: Zcu.PerThread) u64 { } } -pub fn getOwnerDecl(ty: Type, mod: *Module) InternPool.DeclIndex { - return ty.getOwnerDeclOrNull(mod) orelse unreachable; -} - -pub fn getOwnerDeclOrNull(ty: Type, mod: *Module) ?InternPool.DeclIndex { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => ip.loadStructType(ty.toIntern()).decl.unwrap(), - .union_type => ip.loadUnionType(ty.toIntern()).decl, - .opaque_type => ip.loadOpaqueType(ty.toIntern()).decl, - .enum_type => ip.loadEnumType(ty.toIntern()).decl, - else => null, - }; -} - pub fn srcLocOrNull(ty: Type, zcu: *Zcu) ?Module.LazySrcLoc { const ip = &zcu.intern_pool; return .{ @@ -3366,7 +3367,7 @@ pub fn isTuple(ty: Type, mod: *Module) bool { .struct_type => { const struct_type = ip.loadStructType(ty.toIntern()); if (struct_type.layout == .@"packed") return false; - if (struct_type.decl == .none) return false; + if (struct_type.cau == .none) return false; return struct_type.flagsUnordered(ip).is_tuple; }, .anon_struct_type => |anon_struct| anon_struct.names.len == 0, @@ -3388,7 +3389,7 @@ pub fn isTupleOrAnonStruct(ty: Type, mod: *Module) bool { .struct_type => { const struct_type = ip.loadStructType(ty.toIntern()); if (struct_type.layout == .@"packed") return false; - if (struct_type.decl == .none) return false; + if (struct_type.cau == .none) return false; return struct_type.flagsUnordered(ip).is_tuple; }, .anon_struct_type => true, @@ -3444,6 +3445,21 @@ pub fn typeDeclInst(ty: Type, zcu: *const Zcu) ?InternPool.TrackedInst.Index { }; } +pub fn typeDeclInstAllowGeneratedTag(ty: Type, zcu: *const Zcu) ?InternPool.TrackedInst.Index { + const ip = &zcu.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .struct_type => ip.loadStructType(ty.toIntern()).zir_index.unwrap(), + .union_type => ip.loadUnionType(ty.toIntern()).zir_index, + .enum_type => |e| switch (e) { + .declared, .reified => ip.loadEnumType(ty.toIntern()).zir_index.unwrap().?, + .generated_tag => |gt| ip.loadUnionType(gt.union_type).zir_index, + .empty_struct => unreachable, + }, + .opaque_type => ip.loadOpaqueType(ty.toIntern()).zir_index, + else => null, + }; +} + pub fn typeDeclSrcLine(ty: Type, zcu: *Zcu) ?u32 { const ip = &zcu.intern_pool; const tracked = switch (ip.indexToKey(ty.toIntern())) { @@ -3471,7 +3487,7 @@ pub fn typeDeclSrcLine(ty: Type, zcu: *Zcu) ?u32 { }; } -/// Given a namespace type, returns its list of caotured values. +/// Given a namespace type, returns its list of captured values. pub fn getCaptures(ty: Type, zcu: *const Zcu) InternPool.CaptureValue.Slice { const ip = &zcu.intern_pool; return switch (ip.indexToKey(ty.toIntern())) { @@ -3773,7 +3789,11 @@ fn resolveStructInner( const gpa = zcu.gpa; const struct_obj = zcu.typeToStruct(ty).?; - const owner_decl_index = struct_obj.decl.unwrap() orelse return; + const owner = InternPool.AnalUnit.wrap(.{ .cau = struct_obj.cau.unwrap() orelse return }); + + if (zcu.failed_analysis.contains(owner) or zcu.transitive_failed_analysis.contains(owner)) { + return error.AnalysisFail; + } var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); @@ -3786,24 +3806,30 @@ fn resolveStructInner( .gpa = gpa, .arena = analysis_arena.allocator(), .code = undefined, // This ZIR will not be used. - .owner_decl = zcu.declPtr(owner_decl_index), - .owner_decl_index = owner_decl_index, + .owner = owner, .func_index = .none, .func_is_naked = false, .fn_ret_ty = Type.void, .fn_ret_ty_ies = null, - .owner_func_index = .none, .comptime_err_ret_trace = &comptime_err_ret_trace, }; defer sema.deinit(); - switch (resolution) { - .fields => return sema.resolveTypeFieldsStruct(ty.toIntern(), struct_obj), - .inits => return sema.resolveStructFieldInits(ty), - .alignment => return sema.resolveStructAlignment(ty.toIntern(), struct_obj), - .layout => return sema.resolveStructLayout(ty), - .full => return sema.resolveStructFully(ty), - } + (switch (resolution) { + .fields => sema.resolveTypeFieldsStruct(ty.toIntern(), struct_obj), + .inits => sema.resolveStructFieldInits(ty), + .alignment => sema.resolveStructAlignment(ty.toIntern(), struct_obj), + .layout => sema.resolveStructLayout(ty), + .full => sema.resolveStructFully(ty), + }) catch |err| switch (err) { + error.AnalysisFail => { + if (!zcu.failed_analysis.contains(owner)) { + try zcu.transitive_failed_analysis.put(gpa, owner, {}); + } + return error.AnalysisFail; + }, + error.OutOfMemory => |e| return e, + }; } /// `ty` must be a union. @@ -3816,7 +3842,11 @@ fn resolveUnionInner( const gpa = zcu.gpa; const union_obj = zcu.typeToUnion(ty).?; - const owner_decl_index = union_obj.decl; + const owner = InternPool.AnalUnit.wrap(.{ .cau = union_obj.cau }); + + if (zcu.failed_analysis.contains(owner) or zcu.transitive_failed_analysis.contains(owner)) { + return error.AnalysisFail; + } var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); @@ -3829,23 +3859,29 @@ fn resolveUnionInner( .gpa = gpa, .arena = analysis_arena.allocator(), .code = undefined, // This ZIR will not be used. - .owner_decl = zcu.declPtr(owner_decl_index), - .owner_decl_index = owner_decl_index, + .owner = owner, .func_index = .none, .func_is_naked = false, .fn_ret_ty = Type.void, .fn_ret_ty_ies = null, - .owner_func_index = .none, .comptime_err_ret_trace = &comptime_err_ret_trace, }; defer sema.deinit(); - switch (resolution) { - .fields => return sema.resolveTypeFieldsUnion(ty, union_obj), - .alignment => return sema.resolveUnionAlignment(ty, union_obj), - .layout => return sema.resolveUnionLayout(ty), - .full => return sema.resolveUnionFully(ty), - } + (switch (resolution) { + .fields => sema.resolveTypeFieldsUnion(ty, union_obj), + .alignment => sema.resolveUnionAlignment(ty, union_obj), + .layout => sema.resolveUnionLayout(ty), + .full => sema.resolveUnionFully(ty), + }) catch |err| switch (err) { + error.AnalysisFail => { + if (!zcu.failed_analysis.contains(owner)) { + try zcu.transitive_failed_analysis.put(gpa, owner, {}); + } + return error.AnalysisFail; + }, + error.OutOfMemory => |e| return e, + }; } /// Fully resolves a simple type. This is usually a nop, but for builtin types with @@ -3945,6 +3981,16 @@ pub fn elemPtrType(ptr_ty: Type, offset: ?usize, pt: Zcu.PerThread) !Type { }); } +pub fn containerTypeName(ty: Type, ip: *const InternPool) InternPool.NullTerminatedString { + return switch (ip.indexToKey(ty.toIntern())) { + .struct_type => ip.loadStructType(ty.toIntern()).name, + .union_type => ip.loadUnionType(ty.toIntern()).name, + .enum_type => ip.loadEnumType(ty.toIntern()).name, + .opaque_type => ip.loadOpaqueType(ty.toIntern()).name, + else => unreachable, + }; +} + pub const @"u1": Type = .{ .ip_index = .u1_type }; pub const @"u8": Type = .{ .ip_index = .u8_type }; pub const @"u16": Type = .{ .ip_index = .u16_type }; diff --git a/src/Value.zig b/src/Value.zig index 7aead8ae7de6..8ae3b5845888 100644 --- a/src/Value.zig +++ b/src/Value.zig @@ -227,13 +227,6 @@ pub fn getFunction(val: Value, mod: *Module) ?InternPool.Key.Func { }; } -pub fn getExternFunc(val: Value, mod: *Module) ?InternPool.Key.ExternFunc { - return switch (mod.intern_pool.indexToKey(val.toIntern())) { - .extern_func => |extern_func| extern_func, - else => null, - }; -} - pub fn getVariable(val: Value, mod: *Module) ?InternPool.Key.Variable { return switch (mod.intern_pool.indexToKey(val.toIntern())) { .variable => |variable| variable, @@ -319,17 +312,8 @@ pub fn toBool(val: Value) bool { }; } -fn ptrHasIntAddr(val: Value, mod: *Module) bool { - var check = val; - while (true) switch (mod.intern_pool.indexToKey(check.toIntern())) { - .ptr => |ptr| switch (ptr.base_addr) { - .decl, .comptime_alloc, .comptime_field, .anon_decl => return false, - .int => return true, - .eu_payload, .opt_payload => |base| check = Value.fromInterned(base), - .arr_elem, .field => |base_index| check = Value.fromInterned(base_index.base), - }, - else => unreachable, - }; +fn ptrHasIntAddr(val: Value, zcu: *Zcu) bool { + return zcu.intern_pool.getBackingAddrTag(val.toIntern()).? == .int; } /// Write a Value's contents to `buffer`. @@ -1058,7 +1042,7 @@ pub fn orderAgainstZeroAdvanced( .bool_true => .gt, else => switch (pt.zcu.intern_pool.indexToKey(lhs.toIntern())) { .ptr => |ptr| if (ptr.byte_offset > 0) .gt else switch (ptr.base_addr) { - .decl, .comptime_alloc, .comptime_field => .gt, + .nav, .comptime_alloc, .comptime_field => .gt, .int => .eq, else => unreachable, }, @@ -1130,11 +1114,11 @@ pub fn compareHeteroAdvanced( pt: Zcu.PerThread, comptime strat: ResolveStrat, ) !bool { - if (lhs.pointerDecl(pt.zcu)) |lhs_decl| { - if (rhs.pointerDecl(pt.zcu)) |rhs_decl| { + if (lhs.pointerNav(pt.zcu)) |lhs_nav| { + if (rhs.pointerNav(pt.zcu)) |rhs_nav| { switch (op) { - .eq => return lhs_decl == rhs_decl, - .neq => return lhs_decl != rhs_decl, + .eq => return lhs_nav == rhs_nav, + .neq => return lhs_nav != rhs_nav, else => {}, } } else { @@ -1144,7 +1128,7 @@ pub fn compareHeteroAdvanced( else => {}, } } - } else if (rhs.pointerDecl(pt.zcu)) |_| { + } else if (rhs.pointerNav(pt.zcu)) |_| { switch (op) { .eq => return false, .neq => return true, @@ -1252,12 +1236,12 @@ pub fn canMutateComptimeVarState(val: Value, zcu: *Zcu) bool { .payload => |payload| Value.fromInterned(payload).canMutateComptimeVarState(zcu), }, .ptr => |ptr| switch (ptr.base_addr) { - .decl => false, // The value of a Decl can never reference a comptime alloc. + .nav => false, // The value of a Nav can never reference a comptime alloc. .int => false, .comptime_alloc => true, // A comptime alloc is either mutable or references comptime-mutable memory. .comptime_field => true, // Comptime field pointers are comptime-mutable, albeit only to the "correct" value. .eu_payload, .opt_payload => |base| Value.fromInterned(base).canMutateComptimeVarState(zcu), - .anon_decl => |anon_decl| Value.fromInterned(anon_decl.val).canMutateComptimeVarState(zcu), + .uav => |uav| Value.fromInterned(uav.val).canMutateComptimeVarState(zcu), .arr_elem, .field => |base_index| Value.fromInterned(base_index.base).canMutateComptimeVarState(zcu), }, .slice => |slice| return Value.fromInterned(slice.ptr).canMutateComptimeVarState(zcu), @@ -1273,16 +1257,17 @@ pub fn canMutateComptimeVarState(val: Value, zcu: *Zcu) bool { }; } -/// Gets the decl referenced by this pointer. If the pointer does not point -/// to a decl, or if it points to some part of a decl (like field_ptr or element_ptr), -/// this function returns null. -pub fn pointerDecl(val: Value, mod: *Module) ?InternPool.DeclIndex { +/// Gets the `Nav` referenced by this pointer. If the pointer does not point +/// to a `Nav`, or if it points to some part of one (like a field or element), +/// returns null. +pub fn pointerNav(val: Value, mod: *Module) ?InternPool.Nav.Index { return switch (mod.intern_pool.indexToKey(val.toIntern())) { - .variable => |variable| variable.decl, - .extern_func => |extern_func| extern_func.decl, - .func => |func| func.owner_decl, + // TODO: these 3 cases are weird; these aren't pointer values! + .variable => |v| v.owner_nav, + .@"extern" => |e| e.owner_nav, + .func => |func| func.owner_nav, .ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) { - .decl => |decl| decl, + .nav => |nav| nav, else => null, } else null, else => null, @@ -1341,10 +1326,14 @@ pub fn isLazySize(val: Value, mod: *Module) bool { }; } -pub fn isPtrToThreadLocal(val: Value, mod: *Module) bool { - const backing_decl = mod.intern_pool.getBackingDecl(val.toIntern()).unwrap() orelse return false; - const variable = mod.declPtr(backing_decl).getOwnedVariable(mod) orelse return false; - return variable.is_threadlocal; +pub fn isPtrToThreadLocal(val: Value, zcu: *Zcu) bool { + const ip = &zcu.intern_pool; + const nav = ip.getBackingNav(val.toIntern()).unwrap() orelse return false; + return switch (ip.indexToKey(ip.getNav(nav).status.resolved.val)) { + .@"extern" => |e| e.is_threadlocal, + .variable => |v| v.is_threadlocal, + else => false, + }; } // Asserts that the provided start/end are in-bounds. @@ -4031,8 +4020,8 @@ pub const PointerDeriveStep = union(enum) { addr: u64, ptr_ty: Type, }, - decl_ptr: InternPool.DeclIndex, - anon_decl_ptr: InternPool.Key.Ptr.BaseAddr.AnonDecl, + nav_ptr: InternPool.Nav.Index, + uav_ptr: InternPool.Key.Ptr.BaseAddr.Uav, comptime_alloc_ptr: struct { val: Value, ptr_ty: Type, @@ -4069,8 +4058,8 @@ pub const PointerDeriveStep = union(enum) { pub fn ptrType(step: PointerDeriveStep, pt: Zcu.PerThread) !Type { return switch (step) { .int => |int| int.ptr_ty, - .decl_ptr => |decl| try pt.zcu.declPtr(decl).declPtrType(pt), - .anon_decl_ptr => |ad| Type.fromInterned(ad.orig_ty), + .nav_ptr => |nav| try pt.navPtrType(nav), + .uav_ptr => |uav| Type.fromInterned(uav.orig_ty), .comptime_alloc_ptr => |info| info.ptr_ty, .comptime_field_ptr => |val| try pt.singleConstPtrType(val.typeOf(pt.zcu)), .offset_and_cast => |oac| oac.new_ptr_ty, @@ -4098,17 +4087,17 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, pt: Zcu.PerTh .addr = ptr.byte_offset, .ptr_ty = Type.fromInterned(ptr.ty), } }, - .decl => |decl| .{ .decl_ptr = decl }, - .anon_decl => |ad| base: { + .nav => |nav| .{ .nav_ptr = nav }, + .uav => |uav| base: { // A slight tweak: `orig_ty` here is sometimes not `const`, but it ought to be. // TODO: fix this in the sites interning anon decls! const const_ty = try pt.ptrType(info: { - var info = Type.fromInterned(ad.orig_ty).ptrInfo(zcu); + var info = Type.fromInterned(uav.orig_ty).ptrInfo(zcu); info.flags.is_const = true; break :info info; }); - break :base .{ .anon_decl_ptr = .{ - .val = ad.val, + break :base .{ .uav_ptr = .{ + .val = uav.val, .orig_ty = const_ty.toIntern(), } }; }, @@ -4357,7 +4346,7 @@ pub fn resolveLazy(val: Value, arena: Allocator, pt: Zcu.PerThread) Zcu.SemaErro }, .ptr => |ptr| { switch (ptr.base_addr) { - .decl, .comptime_alloc, .anon_decl, .int => return val, + .nav, .comptime_alloc, .uav, .int => return val, .comptime_field => |field_val| { const resolved_field_val = (try Value.fromInterned(field_val).resolveLazy(arena, pt)).toIntern(); return if (resolved_field_val == field_val) diff --git a/src/Zcu.zig b/src/Zcu.zig index 54faf34bf471..bcb331b59755 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -118,8 +118,15 @@ embed_table: std.StringArrayHashMapUnmanaged(*EmbedFile) = .{}, /// is not yet implemented. intern_pool: InternPool = .{}, +analysis_in_progress: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .{}, /// The ErrorMsg memory is owned by the `AnalUnit`, using Module's general purpose allocator. failed_analysis: std.AutoArrayHashMapUnmanaged(AnalUnit, *ErrorMsg) = .{}, +/// This `AnalUnit` failed semantic analysis because it required analysis of another `AnalUnit` which itself failed. +transitive_failed_analysis: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .{}, +/// This `Nav` succeeded analysis, but failed codegen. +/// This may be a simple "value" `Nav`, or it may be a function. +/// The ErrorMsg memory is owned by the `AnalUnit`, using Module's general purpose allocator. +failed_codegen: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, *ErrorMsg) = .{}, /// Keep track of one `@compileLog` callsite per `AnalUnit`. /// The value is the source location of the `@compileLog` call, convertible to a `LazySrcLoc`. compile_log_sources: std.AutoArrayHashMapUnmanaged(AnalUnit, extern struct { @@ -155,12 +162,12 @@ outdated: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .{}, /// Such `AnalUnit`s are ready for immediate re-analysis. /// See `findOutdatedToAnalyze` for details. outdated_ready: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .{}, -/// This contains a set of Decls which may not be in `outdated`, but are the -/// root Decls of files which have updated source and thus must be re-analyzed. -/// If such a Decl is only in this set, the struct type index may be preserved -/// (only the namespace might change). If such a Decl is also `outdated`, the -/// struct type index must be recreated. -outdated_file_root: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .{}, +/// This contains a set of struct types whose corresponding `Cau` may not be in +/// `outdated`, but are the root types of files which have updated source and +/// thus must be re-analyzed. If such a type is only in this set, the struct type +/// index may be preserved (only the namespace might change). If its owned `Cau` +/// is also outdated, the struct type index must be recreated. +outdated_file_root: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{}, /// This contains a list of AnalUnit whose analysis or codegen failed, but the /// failure was something like running out of disk space, and trying again may /// succeed. On the next update, we will flush this list, marking all members of @@ -179,12 +186,9 @@ stage1_flags: packed struct { compile_log_text: std.ArrayListUnmanaged(u8) = .{}, -emit_h: ?*GlobalEmitH, - -test_functions: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .{}, +test_functions: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, void) = .{}, -/// TODO: the key here will be a `Cau.Index`. -global_assembly: std.AutoArrayHashMapUnmanaged(Decl.Index, []u8) = .{}, +global_assembly: std.AutoArrayHashMapUnmanaged(InternPool.Cau.Index, []u8) = .{}, /// Key is the `AnalUnit` *performing* the reference. This representation allows /// incremental updates to quickly delete references caused by a specific `AnalUnit`. @@ -196,7 +200,7 @@ all_references: std.ArrayListUnmanaged(Reference) = .{}, /// Freelist of indices in `all_references`. free_references: std.ArrayListUnmanaged(u32) = .{}, -panic_messages: [PanicId.len]Decl.OptionalIndex = .{.none} ** PanicId.len, +panic_messages: [PanicId.len]InternPool.Nav.Index.Optional = .{.none} ** PanicId.len, /// The panic function body. panic_func_index: InternPool.Index = .none, null_stack_trace: InternPool.Index = .none, @@ -250,45 +254,25 @@ pub const CImportError = struct { } }; -/// A `Module` has zero or one of these depending on whether `-femit-h` is enabled. -pub const GlobalEmitH = struct { - /// Where to put the output. - loc: Compilation.EmitLoc, - /// When emit_h is non-null, each Decl gets one more compile error slot for - /// emit-h failing for that Decl. This table is also how we tell if a Decl has - /// failed emit-h or succeeded. - failed_decls: std.AutoArrayHashMapUnmanaged(Decl.Index, *ErrorMsg) = .{}, - /// Tracks all decls in order to iterate over them and emit .h code for them. - decl_table: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .{}, - /// Similar to the allocated_decls field of Module, this is where `EmitH` objects - /// are allocated. There will be exactly one EmitH object per Decl object, with - /// identical indexes. - allocated_emit_h: std.SegmentedList(EmitH, 0) = .{}, - - pub fn declPtr(global_emit_h: *GlobalEmitH, decl_index: Decl.Index) *EmitH { - return global_emit_h.allocated_emit_h.at(@intFromEnum(decl_index)); - } -}; - pub const ErrorInt = u32; pub const Exported = union(enum) { - /// The Decl being exported. Note this is *not* the Decl performing the export. - decl_index: Decl.Index, + /// The Nav being exported. Note this is *not* the Nav corresponding to the AnalUnit performing the export. + nav: InternPool.Nav.Index, /// Constant value being exported. - value: InternPool.Index, + uav: InternPool.Index, pub fn getValue(exported: Exported, zcu: *Zcu) Value { return switch (exported) { - .decl_index => |decl_index| zcu.declPtr(decl_index).val, - .value => |value| Value.fromInterned(value), + .nav => |nav| zcu.navValue(nav), + .uav => |uav| Value.fromInterned(uav), }; } pub fn getAlign(exported: Exported, zcu: *Zcu) Alignment { return switch (exported) { - .decl_index => |decl_index| zcu.declPtr(decl_index).alignment, - .value => .none, + .nav => |nav| zcu.intern_pool.getNav(nav).status.resolved.alignment, + .uav => .none, }; } }; @@ -324,302 +308,54 @@ pub const Reference = struct { src: LazySrcLoc, }; -pub const Decl = struct { - /// Equal to `fqn` if already fully qualified. - name: InternPool.NullTerminatedString, - /// Fully qualified name. - fqn: InternPool.NullTerminatedString, - /// The most recent Value of the Decl after a successful semantic analysis. - /// Populated when `has_tv`. - val: Value, - /// Populated when `has_tv`. - @"linksection": InternPool.OptionalNullTerminatedString, - /// Populated when `has_tv`. - alignment: Alignment, - /// Populated when `has_tv`. - @"addrspace": std.builtin.AddressSpace, - /// The direct parent namespace of the Decl. In the case of the Decl - /// corresponding to a file, this is the namespace of the struct, since - /// there is no parent. - src_namespace: Namespace.Index, - - /// Index of the ZIR `declaration` instruction from which this `Decl` was created. - /// For the root `Decl` of a `File` and legacy anonymous decls, this is `.none`. - zir_decl_index: InternPool.TrackedInst.Index.Optional, - - /// Represents the "shallow" analysis status. For example, for decls that are functions, - /// the function type is analyzed with this set to `in_progress`, however, the semantic - /// analysis of the function body is performed with this value set to `success`. Functions - /// have their own analysis status field. - analysis: enum { - /// This Decl corresponds to an AST Node that has not been referenced yet, and therefore - /// because of Zig's lazy declaration analysis, it will remain unanalyzed until referenced. - unreferenced, - /// Semantic analysis for this Decl is running right now. - /// This state detects dependency loops. - in_progress, - /// The file corresponding to this Decl had a parse error or ZIR error. - /// There will be a corresponding ErrorMsg in Zcu.failed_files. - file_failure, - /// This Decl might be OK but it depends on another one which did not - /// successfully complete semantic analysis. - dependency_failure, - /// Semantic analysis failure. - /// There will be a corresponding ErrorMsg in Zcu.failed_analysis. - sema_failure, - /// There will be a corresponding ErrorMsg in Zcu.failed_analysis. - codegen_failure, - /// Sematic analysis and constant value codegen of this Decl has - /// succeeded. However, the Decl may be outdated due to an in-progress - /// update. Note that for a function, this does not mean codegen of the - /// function body succeded: that state is indicated by the function's - /// `analysis` field. - complete, - }, - /// Whether `typed_value`, `align`, `linksection` and `addrspace` are populated. - has_tv: bool, - /// If `true` it means the `Decl` is the resource owner of the type/value associated - /// with it. That means when `Decl` is destroyed, the cleanup code should additionally - /// check if the value owns a `Namespace`, and destroy that too. - owns_tv: bool, - /// Whether the corresponding AST decl has a `pub` keyword. - is_pub: bool, - /// Whether the corresponding AST decl has a `export` keyword. - is_exported: bool, - /// What kind of a declaration is this. - kind: Kind, - - pub const Kind = enum { - @"usingnamespace", - @"test", - @"comptime", - named, - anon, - }; - - pub const Index = InternPool.DeclIndex; - pub const OptionalIndex = InternPool.OptionalDeclIndex; - - pub fn zirBodies(decl: Decl, zcu: *Zcu) Zir.Inst.Declaration.Bodies { - const zir = decl.getFileScope(zcu).zir; - const zir_index = decl.zir_decl_index.unwrap().?.resolve(&zcu.intern_pool); - const declaration = zir.instructions.items(.data)[@intFromEnum(zir_index)].declaration; - const extra = zir.extraData(Zir.Inst.Declaration, declaration.payload_index); - return extra.data.getBodies(@intCast(extra.end), zir); - } - - pub fn typeOf(decl: Decl, zcu: *const Zcu) Type { - assert(decl.has_tv); - return decl.val.typeOf(zcu); - } - - /// Small wrapper for Sema to use over direct access to the `val` field. - /// If the value is not populated, instead returns `error.AnalysisFail`. - pub fn valueOrFail(decl: Decl) error{AnalysisFail}!Value { - if (!decl.has_tv) return error.AnalysisFail; - return decl.val; - } - - pub fn getOwnedFunction(decl: Decl, zcu: *Zcu) ?InternPool.Key.Func { - const i = decl.getOwnedFunctionIndex(); - if (i == .none) return null; - return switch (zcu.intern_pool.indexToKey(i)) { - .func => |func| func, - else => null, - }; - } - - /// This returns an InternPool.Index even when the value is not a function. - pub fn getOwnedFunctionIndex(decl: Decl) InternPool.Index { - return if (decl.owns_tv) decl.val.toIntern() else .none; - } - - /// If the Decl owns its value and it is an extern function, returns it, - /// otherwise null. - pub fn getOwnedExternFunc(decl: Decl, zcu: *Zcu) ?InternPool.Key.ExternFunc { - return if (decl.owns_tv) decl.val.getExternFunc(zcu) else null; - } - - /// If the Decl owns its value and it is a variable, returns it, - /// otherwise null. - pub fn getOwnedVariable(decl: Decl, zcu: *Zcu) ?InternPool.Key.Variable { - return if (decl.owns_tv) decl.val.getVariable(zcu) else null; - } - - /// Gets the namespace that this Decl creates by being a struct, union, - /// enum, or opaque. - pub fn getInnerNamespaceIndex(decl: Decl, zcu: *Zcu) Namespace.OptionalIndex { - if (!decl.has_tv) return .none; - const ip = &zcu.intern_pool; - return switch (decl.val.ip_index) { - .empty_struct_type => .none, - .none => .none, - else => switch (ip.indexToKey(decl.val.toIntern())) { - .opaque_type => ip.loadOpaqueType(decl.val.toIntern()).namespace, - .struct_type => ip.loadStructType(decl.val.toIntern()).namespace, - .union_type => ip.loadUnionType(decl.val.toIntern()).namespace, - .enum_type => ip.loadEnumType(decl.val.toIntern()).namespace, - else => .none, - }, - }; - } - - /// Like `getInnerNamespaceIndex`, but only returns it if the Decl is the owner. - pub fn getOwnedInnerNamespaceIndex(decl: Decl, zcu: *Zcu) Namespace.OptionalIndex { - if (!decl.owns_tv) return .none; - return decl.getInnerNamespaceIndex(zcu); - } - - /// Same as `getOwnedInnerNamespaceIndex` but additionally obtains the pointer. - pub fn getOwnedInnerNamespace(decl: Decl, zcu: *Zcu) ?*Namespace { - return zcu.namespacePtrUnwrap(decl.getOwnedInnerNamespaceIndex(zcu)); - } - - /// Same as `getInnerNamespaceIndex` but additionally obtains the pointer. - pub fn getInnerNamespace(decl: Decl, zcu: *Zcu) ?*Namespace { - return zcu.namespacePtrUnwrap(decl.getInnerNamespaceIndex(zcu)); - } - - pub fn getFileScope(decl: Decl, zcu: *Zcu) *File { - return zcu.fileByIndex(getFileScopeIndex(decl, zcu)); - } - - pub fn getFileScopeIndex(decl: Decl, zcu: *Zcu) File.Index { - return zcu.namespacePtr(decl.src_namespace).file_scope; - } - - pub fn getExternDecl(decl: Decl, zcu: *Zcu) OptionalIndex { - assert(decl.has_tv); - return switch (zcu.intern_pool.indexToKey(decl.val.toIntern())) { - .variable => |variable| if (variable.is_extern) variable.decl.toOptional() else .none, - .extern_func => |extern_func| extern_func.decl.toOptional(), - else => .none, - }; - } - - pub fn isExtern(decl: Decl, zcu: *Zcu) bool { - return decl.getExternDecl(zcu) != .none; - } - - pub fn getAlignment(decl: Decl, pt: Zcu.PerThread) Alignment { - assert(decl.has_tv); - if (decl.alignment != .none) return decl.alignment; - return decl.typeOf(pt.zcu).abiAlignment(pt); - } - - pub fn declPtrType(decl: Decl, pt: Zcu.PerThread) !Type { - assert(decl.has_tv); - const decl_ty = decl.typeOf(pt.zcu); - return pt.ptrType(.{ - .child = decl_ty.toIntern(), - .flags = .{ - .alignment = if (decl.alignment == decl_ty.abiAlignment(pt)) - .none - else - decl.alignment, - .address_space = decl.@"addrspace", - .is_const = decl.getOwnedVariable(pt.zcu) == null, - }, - }); - } - - /// Returns the source location of this `Decl`. - /// Asserts that this `Decl` corresponds to what will in future be a `Nav` (Named - /// Addressable Value): a source-level declaration or generic instantiation. - pub fn navSrcLoc(decl: Decl, zcu: *Zcu) LazySrcLoc { - return .{ - .base_node_inst = decl.zir_decl_index.unwrap() orelse inst: { - // generic instantiation - assert(decl.has_tv); - assert(decl.owns_tv); - const owner = zcu.funcInfo(decl.val.toIntern()).generic_owner; - const generic_owner_decl = zcu.declPtr(zcu.funcInfo(owner).owner_decl); - break :inst generic_owner_decl.zir_decl_index.unwrap().?; - }, - .offset = LazySrcLoc.Offset.nodeOffset(0), - }; - } - - pub fn navSrcLine(decl: Decl, zcu: *Zcu) u32 { - const ip = &zcu.intern_pool; - const tracked = decl.zir_decl_index.unwrap() orelse inst: { - // generic instantiation - assert(decl.has_tv); - assert(decl.owns_tv); - const generic_owner_func = switch (ip.indexToKey(decl.val.toIntern())) { - .func => |func| func.generic_owner, - else => return 0, // TODO: this is probably a `variable` or something; figure this out when we finish sorting out `Decl`. - }; - const generic_owner_decl = zcu.declPtr(zcu.funcInfo(generic_owner_func).owner_decl); - break :inst generic_owner_decl.zir_decl_index.unwrap().?; - }; - const info = tracked.resolveFull(ip); - const file = zcu.fileByIndex(info.file); - assert(file.zir_loaded); - const zir = file.zir; - const inst = zir.instructions.get(@intFromEnum(info.inst)); - assert(inst.tag == .declaration); - return zir.extraData(Zir.Inst.Declaration, inst.data.declaration.payload_index).data.src_line; - } - - pub fn typeSrcLine(decl: Decl, zcu: *Zcu) u32 { - assert(decl.has_tv); - assert(decl.owns_tv); - return decl.val.toType().typeDeclSrcLine(zcu).?; - } -}; - -/// This state is attached to every Decl when Module emit_h is non-null. -pub const EmitH = struct { - fwd_decl: std.ArrayListUnmanaged(u8) = .{}, -}; - -pub const DeclAdapter = struct { - zcu: *Zcu, - - pub fn hash(self: @This(), s: InternPool.NullTerminatedString) u32 { - _ = self; - return std.hash.uint32(@intFromEnum(s)); - } - - pub fn eql(self: @This(), a: InternPool.NullTerminatedString, b_decl_index: Decl.Index, b_index: usize) bool { - _ = b_index; - return a == self.zcu.declPtr(b_decl_index).name; - } -}; - /// The container that structs, enums, unions, and opaques have. pub const Namespace = struct { parent: OptionalIndex, file_scope: File.Index, /// Will be a struct, enum, union, or opaque. - decl_index: Decl.Index, - /// Direct children of the namespace. - /// Declaration order is preserved via entry order. - /// These are only declarations named directly by the AST; anonymous - /// declarations are not stored here. - decls: std.ArrayHashMapUnmanaged(Decl.Index, void, DeclContext, true) = .{}, - /// Key is usingnamespace Decl itself. To find the namespace being included, - /// the Decl Value has to be resolved as a Type which has a Namespace. - /// Value is whether the usingnamespace decl is marked `pub`. - usingnamespace_set: std.AutoHashMapUnmanaged(Decl.Index, bool) = .{}, + owner_type: InternPool.Index, + /// Members of the namespace which are marked `pub`. + pub_decls: std.ArrayHashMapUnmanaged(InternPool.Nav.Index, void, NavNameContext, true) = .{}, + /// Members of the namespace which are *not* marked `pub`. + priv_decls: std.ArrayHashMapUnmanaged(InternPool.Nav.Index, void, NavNameContext, true) = .{}, + /// All `usingnamespace` declarations in this namespace which are marked `pub`. + pub_usingnamespace: std.ArrayListUnmanaged(InternPool.Nav.Index) = .{}, + /// All `usingnamespace` declarations in this namespace which are *not* marked `pub`. + priv_usingnamespace: std.ArrayListUnmanaged(InternPool.Nav.Index) = .{}, + /// All `comptime` and `test` declarations in this namespace. We store these purely so that + /// incremental compilation can re-use the existing `Cau`s when a namespace changes. + other_decls: std.ArrayListUnmanaged(InternPool.Cau.Index) = .{}, pub const Index = InternPool.NamespaceIndex; pub const OptionalIndex = InternPool.OptionalNamespaceIndex; - const DeclContext = struct { + const NavNameContext = struct { zcu: *Zcu, - pub fn hash(ctx: @This(), decl_index: Decl.Index) u32 { - const decl = ctx.zcu.declPtr(decl_index); - return std.hash.uint32(@intFromEnum(decl.name)); + pub fn hash(ctx: NavNameContext, nav: InternPool.Nav.Index) u32 { + const name = ctx.zcu.intern_pool.getNav(nav).name; + return std.hash.uint32(@intFromEnum(name)); } - pub fn eql(ctx: @This(), a_decl_index: Decl.Index, b_decl_index: Decl.Index, b_index: usize) bool { + pub fn eql(ctx: NavNameContext, a_nav: InternPool.Nav.Index, b_nav: InternPool.Nav.Index, b_index: usize) bool { _ = b_index; - const a_decl = ctx.zcu.declPtr(a_decl_index); - const b_decl = ctx.zcu.declPtr(b_decl_index); - return a_decl.name == b_decl.name; + const a_name = ctx.zcu.intern_pool.getNav(a_nav).name; + const b_name = ctx.zcu.intern_pool.getNav(b_nav).name; + return a_name == b_name; + } + }; + + pub const NameAdapter = struct { + zcu: *Zcu, + + pub fn hash(ctx: NameAdapter, s: InternPool.NullTerminatedString) u32 { + _ = ctx; + return std.hash.uint32(@intFromEnum(s)); + } + + pub fn eql(ctx: NameAdapter, a: InternPool.NullTerminatedString, b_nav: InternPool.Nav.Index, b_index: usize) bool { + _ = b_index; + return a == ctx.zcu.intern_pool.getNav(b_nav).name; } }; @@ -631,25 +367,6 @@ pub const Namespace = struct { return ip.filePtr(ns.file_scope); } - // This renders e.g. "std.fs.Dir.OpenOptions" - pub fn renderFullyQualifiedName( - ns: Namespace, - ip: *InternPool, - name: InternPool.NullTerminatedString, - writer: anytype, - ) @TypeOf(writer).Error!void { - if (ns.parent.unwrap()) |parent| { - try ip.namespacePtr(parent).renderFullyQualifiedName( - ip, - ip.declPtr(ns.decl_index).name, - writer, - ); - } else { - try ns.fileScopeIp(ip).renderFullyQualifiedName(writer); - } - if (name != .empty) try writer.print(".{}", .{name.fmt(ip)}); - } - /// This renders e.g. "std/fs.zig:Dir.OpenOptions" pub fn renderFullyQualifiedDebugName( ns: Namespace, @@ -678,44 +395,9 @@ pub const Namespace = struct { tid: Zcu.PerThread.Id, name: InternPool.NullTerminatedString, ) !InternPool.NullTerminatedString { - const strings = ip.getLocal(tid).getMutableStrings(gpa); - // Protects reads of interned strings from being reallocated during the call to - // renderFullyQualifiedName. - const slice = try strings.addManyAsSlice(count: { - var count: usize = name.length(ip) + 1; - var cur_ns = &ns; - while (true) { - const decl = ip.declPtr(cur_ns.decl_index); - cur_ns = ip.namespacePtr(cur_ns.parent.unwrap() orelse { - count += ns.fileScopeIp(ip).fullyQualifiedNameLen(); - break :count count; - }); - count += decl.name.length(ip) + 1; - } - }); - var fbs = std.io.fixedBufferStream(slice[0]); - ns.renderFullyQualifiedName(ip, name, fbs.writer()) catch unreachable; - assert(fbs.pos == slice[0].len); - - // Sanitize the name for nvptx which is more restrictive. - // TODO This should be handled by the backend, not the frontend. Have a - // look at how the C backend does it for inspiration. - // FIXME This has bitrotted and is no longer able to be implemented here. - //const cpu_arch = zcu.root_mod.resolved_target.result.cpu.arch; - //if (cpu_arch.isNvptx()) { - // for (slice[0]) |*byte| switch (byte.*) { - // '{', '}', '*', '[', ']', '(', ')', ',', ' ', '\'' => byte.* = '_', - // else => {}, - // }; - //} - - return ip.getOrPutTrailingString(gpa, tid, @intCast(slice[0].len), .no_embedded_nulls); - } - - pub fn getType(ns: Namespace, zcu: *Zcu) Type { - const decl = zcu.declPtr(ns.decl_index); - assert(decl.has_tv); - return decl.val.toType(); + const ns_name = Type.fromInterned(ns.owner_type).containerTypeName(ip); + if (name == .empty) return ns_name; + return ip.getOrPutStringFmt(gpa, tid, "{}.{}", .{ ns_name.fmt(ip), name.fmt(ip) }, .no_embedded_nulls); } }; @@ -2428,16 +2110,13 @@ pub fn deinit(zcu: *Zcu) void { for (zcu.failed_analysis.values()) |value| { value.destroy(gpa); } - zcu.failed_analysis.deinit(gpa); - - if (zcu.emit_h) |emit_h| { - for (emit_h.failed_decls.values()) |value| { - value.destroy(gpa); - } - emit_h.failed_decls.deinit(gpa); - emit_h.decl_table.deinit(gpa); - emit_h.allocated_emit_h.deinit(gpa); + for (zcu.failed_codegen.values()) |value| { + value.destroy(gpa); } + zcu.analysis_in_progress.deinit(gpa); + zcu.failed_analysis.deinit(gpa); + zcu.transitive_failed_analysis.deinit(gpa); + zcu.failed_codegen.deinit(gpa); for (zcu.failed_files.values()) |value| { if (value) |msg| msg.destroy(gpa); @@ -2486,26 +2165,14 @@ pub fn deinit(zcu: *Zcu) void { zcu.intern_pool.deinit(gpa); } -pub fn declPtr(mod: *Zcu, index: Decl.Index) *Decl { - return mod.intern_pool.declPtr(index); -} - -pub fn namespacePtr(mod: *Zcu, index: Namespace.Index) *Namespace { - return mod.intern_pool.namespacePtr(index); +pub fn namespacePtr(zcu: *Zcu, index: Namespace.Index) *Namespace { + return zcu.intern_pool.namespacePtr(index); } pub fn namespacePtrUnwrap(mod: *Zcu, index: Namespace.OptionalIndex) ?*Namespace { return mod.namespacePtr(index.unwrap() orelse return null); } -/// Returns true if and only if the Decl is the top level struct associated with a File. -pub fn declIsRoot(mod: *Zcu, decl_index: Decl.Index) bool { - const decl = mod.declPtr(decl_index); - const namespace = mod.namespacePtr(decl.src_namespace); - if (namespace.parent != .none) return false; - return decl_index == namespace.decl_index; -} - // TODO https://github.com/ziglang/zig/issues/8643 pub const data_has_safety_tag = @sizeOf(Zir.Inst.Data) != 8; pub const HackDataLayout = extern struct { @@ -2642,8 +2309,12 @@ pub fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void { // If this is a Decl, we must recursively mark dependencies on its tyval // as no longer PO. switch (depender.unwrap()) { - .decl => |decl_index| try zcu.markPoDependeeUpToDate(.{ .decl_val = decl_index }), - .func => |func_index| try zcu.markPoDependeeUpToDate(.{ .func_ies = func_index }), + .cau => |cau| switch (zcu.intern_pool.getCau(cau).owner.unwrap()) { + .nav => |nav| try zcu.markPoDependeeUpToDate(.{ .nav_val = nav }), + .type => |ty| try zcu.markPoDependeeUpToDate(.{ .interned = ty }), + .none => {}, + }, + .func => |func| try zcu.markPoDependeeUpToDate(.{ .interned = func }), } } } @@ -2651,9 +2322,13 @@ pub fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void { /// Given a AnalUnit which is newly outdated or PO, mark all AnalUnits which may /// in turn be PO, due to a dependency on the original AnalUnit's tyval or IES. fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUnit) !void { - var it = zcu.intern_pool.dependencyIterator(switch (maybe_outdated.unwrap()) { - .decl => |decl_index| .{ .decl_val = decl_index }, // TODO: also `decl_ref` deps when introduced - .func => |func_index| .{ .func_ies = func_index }, + const ip = &zcu.intern_pool; + var it = ip.dependencyIterator(switch (maybe_outdated.unwrap()) { + .cau => |cau| switch (ip.getCau(cau).owner.unwrap()) { + .nav => |nav| .{ .nav_val = nav }, // TODO: also `nav_ref` deps when introduced + .none, .type => return, // analysis of this `Cau` can't outdate any dependencies + }, + .func => |func_index| .{ .interned = func_index }, // IES }); while (it.next()) |po| { @@ -2680,6 +2355,8 @@ fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUni pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?AnalUnit { if (!zcu.comp.incremental) return null; + if (true) @panic("TODO: findOutdatedToAnalyze"); + if (zcu.outdated.count() == 0 and zcu.potentially_outdated.count() == 0) { log.debug("findOutdatedToAnalyze: no outdated depender", .{}); return null; @@ -2742,6 +2419,8 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?AnalUnit { zcu.potentially_outdated.count(), }); + const Decl = {}; + var chosen_decl_idx: ?Decl.Index = null; var chosen_decl_dependers: u32 = undefined; @@ -2939,65 +2618,20 @@ pub fn mapOldZirToNew( /// analyzed, and for ensuring it can exist at runtime (see /// `sema.fnHasRuntimeBits`). This function does *not* guarantee that the body /// will be analyzed when it returns: for that, see `ensureFuncBodyAnalyzed`. -pub fn ensureFuncBodyAnalysisQueued(mod: *Zcu, func_index: InternPool.Index) !void { - const ip = &mod.intern_pool; - const func = mod.funcInfo(func_index); - const decl_index = func.owner_decl; - const decl = mod.declPtr(decl_index); - - switch (decl.analysis) { - .unreferenced => unreachable, - .in_progress => unreachable, - - .file_failure, - .sema_failure, - .codegen_failure, - .dependency_failure, - // Analysis of the function Decl itself failed, but we've already - // emitted an error for that. The callee doesn't need the function to be - // analyzed right now, so its analysis can safely continue. - => return, - - .complete => {}, - } - - assert(decl.has_tv); - - const func_as_depender = AnalUnit.wrap(.{ .func = func_index }); - const is_outdated = mod.outdated.contains(func_as_depender) or - mod.potentially_outdated.contains(func_as_depender); +pub fn ensureFuncBodyAnalysisQueued(zcu: *Zcu, func_index: InternPool.Index) !void { + const ip = &zcu.intern_pool; + const func = zcu.funcInfo(func_index); switch (func.analysisUnordered(ip).state) { - .none => {}, - .queued => return, - // As above, we don't need to forward errors here. - .sema_failure, - .dependency_failure, - .codegen_failure, - .success, - => if (!is_outdated) return, - .in_progress => return, - .inline_only => unreachable, // don't queue work for this - } - - // Decl itself is safely analyzed, and body analysis is not yet queued - - try mod.comp.queueJob(.{ .analyze_func = func_index }); - if (mod.emit_h != null) { - // TODO: we ideally only want to do this if the function's type changed - // since the last update - try mod.comp.queueJob(.{ .emit_h_decl = decl_index }); + .unreferenced => {}, // We're the first reference! + .queued => return, // Analysis is already queued. + .analyzed => return, // Analysis is complete; if it's out-of-date, it'll be re-analyzed later this update. } + + try zcu.comp.queueJob(.{ .analyze_func = func_index }); func.setAnalysisState(ip, .queued); } -pub const SemaDeclResult = packed struct { - /// Whether the value of a `decl_val` of this Decl changed. - invalidate_decl_val: bool, - /// Whether the type of a `decl_ref` of this Decl changed. - invalidate_decl_ref: bool, -}; - pub const ImportFileResult = struct { file: *File, file_index: File.Index, @@ -3171,14 +2805,15 @@ pub fn handleUpdateExports( }; } -pub fn addGlobalAssembly(mod: *Zcu, decl_index: Decl.Index, source: []const u8) !void { - const gop = try mod.global_assembly.getOrPut(mod.gpa, decl_index); +pub fn addGlobalAssembly(zcu: *Zcu, cau: InternPool.Cau.Index, source: []const u8) !void { + const gpa = zcu.gpa; + const gop = try zcu.global_assembly.getOrPut(gpa, cau); if (gop.found_existing) { - const new_value = try std.fmt.allocPrint(mod.gpa, "{s}\n{s}", .{ gop.value_ptr.*, source }); - mod.gpa.free(gop.value_ptr.*); + const new_value = try std.fmt.allocPrint(gpa, "{s}\n{s}", .{ gop.value_ptr.*, source }); + gpa.free(gop.value_ptr.*); gop.value_ptr.* = new_value; } else { - gop.value_ptr.* = try mod.gpa.dupe(u8, source); + gop.value_ptr.* = try gpa.dupe(u8, source); } } @@ -3315,10 +2950,6 @@ pub fn atomicPtrAlignment( return error.BadType; } -pub fn declFileScope(mod: *Zcu, decl_index: Decl.Index) *File { - return mod.declPtr(decl_index).getFileScope(mod); -} - /// Returns null in the following cases: /// * `@TypeOf(.{})` /// * A struct which has no fields (`struct {}`). @@ -3352,16 +2983,8 @@ pub fn typeToFunc(mod: *Zcu, ty: Type) ?InternPool.Key.FuncType { return mod.intern_pool.indexToFuncType(ty.toIntern()); } -pub fn funcOwnerDeclPtr(mod: *Zcu, func_index: InternPool.Index) *Decl { - return mod.declPtr(mod.funcOwnerDeclIndex(func_index)); -} - -pub fn funcOwnerDeclIndex(mod: *Zcu, func_index: InternPool.Index) Decl.Index { - return mod.funcInfo(func_index).owner_decl; -} - -pub fn iesFuncIndex(mod: *const Zcu, ies_index: InternPool.Index) InternPool.Index { - return mod.intern_pool.iesFuncIndex(ies_index); +pub fn iesFuncIndex(zcu: *const Zcu, ies_index: InternPool.Index) InternPool.Index { + return zcu.intern_pool.iesFuncIndex(ies_index); } pub fn funcInfo(mod: *Zcu, func_index: InternPool.Index) InternPool.Key.Func { @@ -3372,44 +2995,6 @@ pub fn toEnum(mod: *Zcu, comptime E: type, val: Value) E { return mod.intern_pool.toEnum(E, val.toIntern()); } -pub fn isAnytypeParam(mod: *Zcu, func: InternPool.Index, index: u32) bool { - const file = mod.declPtr(func.owner_decl).getFileScope(mod); - - const tags = file.zir.instructions.items(.tag); - - const param_body = file.zir.getParamBody(func.zir_body_inst); - const param = param_body[index]; - - return switch (tags[param]) { - .param, .param_comptime => false, - .param_anytype, .param_anytype_comptime => true, - else => unreachable, - }; -} - -pub fn getParamName(mod: *Zcu, func_index: InternPool.Index, index: u32) [:0]const u8 { - const func = mod.funcInfo(func_index); - const file = mod.declPtr(func.owner_decl).getFileScope(mod); - - const tags = file.zir.instructions.items(.tag); - const data = file.zir.instructions.items(.data); - - const param_body = file.zir.getParamBody(func.zir_body_inst.resolve(&mod.intern_pool)); - const param = param_body[index]; - - return switch (tags[@intFromEnum(param)]) { - .param, .param_comptime => blk: { - const extra = file.zir.extraData(Zir.Inst.Param, data[@intFromEnum(param)].pl_tok.payload_index); - break :blk file.zir.nullTerminatedString(extra.data.name); - }, - .param_anytype, .param_anytype_comptime => blk: { - const param_data = data[@intFromEnum(param)].str_tok; - break :blk param_data.get(file.zir); - }, - else => unreachable, - }; -} - pub const UnionLayout = struct { abi_size: u64, abi_align: Alignment, @@ -3468,19 +3053,20 @@ pub fn fileByIndex(zcu: *Zcu, file_index: File.Index) *File { return zcu.intern_pool.filePtr(file_index); } -/// Returns the `Decl` of the struct that represents this `File`. -pub fn fileRootDecl(zcu: *const Zcu, file_index: File.Index) Decl.OptionalIndex { +/// Returns the struct that represents this `File`. +/// If the struct has not been created, returns `.none`. +pub fn fileRootType(zcu: *const Zcu, file_index: File.Index) InternPool.Index { const ip = &zcu.intern_pool; const file_index_unwrapped = file_index.unwrap(ip); const files = ip.getLocalShared(file_index_unwrapped.tid).files.acquire(); - return files.view().items(.root_decl)[file_index_unwrapped.index]; + return files.view().items(.root_type)[file_index_unwrapped.index]; } -pub fn setFileRootDecl(zcu: *Zcu, file_index: File.Index, root_decl: Decl.OptionalIndex) void { +pub fn setFileRootType(zcu: *Zcu, file_index: File.Index, root_type: InternPool.Index) void { const ip = &zcu.intern_pool; const file_index_unwrapped = file_index.unwrap(ip); const files = ip.getLocalShared(file_index_unwrapped.tid).files.acquire(); - files.view().items(.root_decl)[file_index_unwrapped.index] = root_decl; + files.view().items(.root_type)[file_index_unwrapped.index] = root_type; } pub fn filePathDigest(zcu: *const Zcu, file_index: File.Index) Cache.BinDigest { @@ -3489,3 +3075,39 @@ pub fn filePathDigest(zcu: *const Zcu, file_index: File.Index) Cache.BinDigest { const files = ip.getLocalShared(file_index_unwrapped.tid).files.acquire(); return files.view().items(.bin_digest)[file_index_unwrapped.index]; } + +pub fn navSrcLoc(zcu: *const Zcu, nav_index: InternPool.Nav.Index) LazySrcLoc { + const ip = &zcu.intern_pool; + return .{ + .base_node_inst = ip.getNav(nav_index).srcInst(ip), + .offset = LazySrcLoc.Offset.nodeOffset(0), + }; +} + +pub fn navSrcLine(zcu: *Zcu, nav_index: InternPool.Nav.Index) u32 { + const ip = &zcu.intern_pool; + const inst_info = ip.getNav(nav_index).srcInst(ip).resolveFull(ip); + const zir = zcu.fileByIndex(inst_info.file).zir; + const inst = zir.instructions.get(@intFromEnum(inst_info.inst)); + assert(inst.tag == .declaration); + return zir.extraData(Zir.Inst.Declaration, inst.data.declaration.payload_index).data.src_line; +} + +pub fn navValue(zcu: *const Zcu, nav_index: InternPool.Nav.Index) Value { + return Value.fromInterned(zcu.intern_pool.getNav(nav_index).status.resolved.val); +} + +pub fn navFileScopeIndex(zcu: *Zcu, nav: InternPool.Nav.Index) File.Index { + const ip = &zcu.intern_pool; + return ip.getNav(nav).srcInst(ip).resolveFull(ip).file; +} + +pub fn navFileScope(zcu: *Zcu, nav: InternPool.Nav.Index) *File { + return zcu.fileByIndex(zcu.navFileScopeIndex(nav)); +} + +pub fn cauFileScope(zcu: *Zcu, cau: InternPool.Cau.Index) *File { + const ip = &zcu.intern_pool; + const file_index = ip.getCau(cau).zir_index.resolveFull(ip).file; + return zcu.fileByIndex(file_index); +} diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index b4c8c834f9f6..29bee0ed9dd1 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -6,26 +6,6 @@ tid: Id, pub const IdBacking = u7; pub const Id = if (InternPool.single_threaded) enum { main } else enum(IdBacking) { main, _ }; -pub fn destroyDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) void { - const zcu = pt.zcu; - const gpa = zcu.gpa; - - { - _ = zcu.test_functions.swapRemove(decl_index); - if (zcu.global_assembly.fetchSwapRemove(decl_index)) |kv| { - gpa.free(kv.value); - } - } - - pt.zcu.intern_pool.destroyDecl(pt.tid, decl_index); - - if (zcu.emit_h) |zcu_emit_h| { - const decl_emit_h = zcu_emit_h.declPtr(decl_index); - decl_emit_h.fwd_decl.deinit(gpa); - decl_emit_h.* = undefined; - } -} - fn deinitFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) void { const zcu = pt.zcu; const gpa = zcu.gpa; @@ -40,9 +20,6 @@ fn deinitFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) void { file.unload(gpa); } file.references.deinit(gpa); - if (zcu.fileRootDecl(file_index).unwrap()) |root_decl| { - pt.zcu.intern_pool.destroyDecl(pt.tid, root_decl); - } if (file.prev_zir) |prev_zir| { prev_zir.deinit(gpa); gpa.destroy(prev_zir); @@ -62,7 +39,7 @@ pub fn astGenFile( pt: Zcu.PerThread, file: *Zcu.File, path_digest: Cache.BinDigest, - opt_root_decl: Zcu.Decl.OptionalIndex, + old_root_type: InternPool.Index, ) !void { dev.check(.ast_gen); assert(!file.mod.isBuiltin()); @@ -323,13 +300,13 @@ pub fn astGenFile( return error.AnalysisFail; } - if (opt_root_decl.unwrap()) |root_decl| { + if (old_root_type != .none) { // The root of this file must be re-analyzed, since the file has changed. comp.mutex.lock(); defer comp.mutex.unlock(); - log.debug("outdated root Decl: {}", .{root_decl}); - try zcu.outdated_file_root.put(gpa, root_decl, {}); + log.debug("outdated file root type: {}", .{old_root_type}); + try zcu.outdated_file_root.put(gpa, old_root_type, {}); } } @@ -491,137 +468,171 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void { } } -/// Like `ensureDeclAnalyzed`, but the Decl is a file's root Decl. +/// Ensures that `zcu.fileRootType` on this `file_index` gives an up-to-date answer. +/// Returns `error.AnalysisFail` if the file has an error. pub fn ensureFileAnalyzed(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { - if (pt.zcu.fileRootDecl(file_index).unwrap()) |existing_root| { - return pt.ensureDeclAnalyzed(existing_root); + const file_root_type = pt.zcu.fileRootType(file_index); + if (file_root_type != .none) { + const file_root_type_cau = pt.zcu.intern_pool.loadStructType(file_root_type).cau.unwrap().?; + return pt.ensureCauAnalyzed(file_root_type_cau); } else { return pt.semaFile(file_index); } } -/// This ensures that the Decl will have an up-to-date Type and Value populated. -/// However the resolution status of the Type may not be fully resolved. -/// For example an inferred error set is not resolved until after `analyzeFnBody`. -/// is called. -pub fn ensureDeclAnalyzed(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) Zcu.SemaError!void { - dev.check(.sema); - +/// This ensures that the state of the `Cau`, and of its corresponding `Nav` or type, +/// is fully up-to-date. Note that the type of the `Nav` may not be fully resolved. +/// Returns `error.AnalysisFail` if the `Cau` has an error. +pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu.SemaError!void { const tracy = trace(@src()); defer tracy.end(); - const mod = pt.zcu; - const ip = &mod.intern_pool; - const decl = mod.declPtr(decl_index); + const zcu = pt.zcu; + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; - log.debug("ensureDeclAnalyzed '{d}' (name '{}')", .{ - @intFromEnum(decl_index), - decl.name.fmt(ip), - }); + const anal_unit = InternPool.AnalUnit.wrap(.{ .cau = cau_index }); + const cau = ip.getCau(cau_index); + const inst_info = cau.zir_index.resolveFull(ip); + + log.debug("ensureCauAnalyzed {d}", .{@intFromEnum(cau_index)}); + + assert(!zcu.analysis_in_progress.contains(anal_unit)); - // Determine whether or not this Decl is outdated, i.e. requires re-analysis - // even if `complete`. If a Decl is PO, we pessismistically assume that it - // *does* require re-analysis, to ensure that the Decl is definitely + // Determine whether or not this Cau is outdated, i.e. requires re-analysis + // even if `complete`. If a Cau is PO, we pessismistically assume that it + // *does* require re-analysis, to ensure that the Cau is definitely // up-to-date when this function returns. // If analysis occurs in a poor order, this could result in over-analysis. // We do our best to avoid this by the other dependency logic in this file - // which tries to limit re-analysis to Decls whose previously listed + // which tries to limit re-analysis to Caus whose previously listed // dependencies are all up-to-date. - const decl_as_depender = InternPool.AnalUnit.wrap(.{ .decl = decl_index }); - const decl_was_outdated = mod.outdated.swapRemove(decl_as_depender) or - mod.potentially_outdated.swapRemove(decl_as_depender); + const cau_outdated = zcu.outdated.swapRemove(anal_unit) or + zcu.potentially_outdated.swapRemove(anal_unit); + + if (cau_outdated) { + _ = zcu.outdated_ready.swapRemove(anal_unit); + } + + // TODO: this only works if namespace lookups in Sema trigger `ensureCauAnalyzed`, because + // `outdated_file_root` information is not "viral", so we need that a namespace lookup first + // handles the case where the file root is not an outdated *type* but does have an outdated + // *namespace*. A more logically simple alternative may be for a file's root struct to register + // a dependency on the file's entire source code (hash). Alternatively, we could make sure that + // these are always handled first in an update. Actually, that's probably the best option. + // For my own benefit, here's how a namespace update for a normal (non-file-root) type works: + // `const S = struct { ... };` + // We are adding or removing a declaration within this `struct`. + // * `S` registers a dependency on `.{ .src_hash = (declaration of S) }` + // * Any change to the `struct` body -- including changing a declaration -- invalidates this + // * `S` is re-analyzed, but notes: + // * there is an existing struct instance (at this `TrackedInst` with these captures) + // * the struct's `Cau` is up-to-date (because nothing about the fields changed) + // * so, it uses the same `struct` + // * but this doesn't stop it from updating the namespace! + // * we basically do `scanDecls`, updating the namespace as needed + // * TODO: optimize this to make sure we only do it once a generation i guess? + // * so everyone lived happily ever after + const file_root_outdated = switch (cau.owner.unwrap()) { + .type => |ty| zcu.outdated_file_root.swapRemove(ty), + .nav, .none => false, + }; - if (decl_was_outdated) { - _ = mod.outdated_ready.swapRemove(decl_as_depender); + if (zcu.fileByIndex(inst_info.file).status != .success_zir) { + return error.AnalysisFail; } - const was_outdated = mod.outdated_file_root.swapRemove(decl_index) or decl_was_outdated; - - switch (decl.analysis) { - .in_progress => unreachable, - - .file_failure => return error.AnalysisFail, - - .sema_failure, - .dependency_failure, - .codegen_failure, - => if (!was_outdated) return error.AnalysisFail, - - .complete => if (!was_outdated) return, - - .unreferenced => {}, + if (!cau_outdated and !file_root_outdated) { + // We can trust the current information about this `Cau`. + if (zcu.failed_analysis.contains(anal_unit) or zcu.transitive_failed_analysis.contains(anal_unit)) { + return error.AnalysisFail; + } + // If it wasn't failed and wasn't marked outdated, then either... + // * it is a type and is up-to-date, or + // * it is a `comptime` decl and is up-to-date, or + // * it is another decl and is EITHER up-to-date OR never-referenced (so unresolved) + // We just need to check for that last case. + switch (cau.owner.unwrap()) { + .type, .none => return, + .nav => |nav| if (ip.getNav(nav).status == .resolved) return, + } } - if (was_outdated) { - dev.check(.incremental); - // The exports this Decl performs will be re-discovered, so we remove them here + // `cau_outdated` can be true in the initial update for `comptime` declarations, + // so this isn't a `dev.check`. + if (cau_outdated and dev.env.supports(.incremental)) { + // The exports this `Cau` performs will be re-discovered, so we remove them here // prior to re-analysis. - mod.deleteUnitExports(decl_as_depender); - mod.deleteUnitReferences(decl_as_depender); + zcu.deleteUnitExports(anal_unit); + zcu.deleteUnitReferences(anal_unit); } - const sema_result: Zcu.SemaDeclResult = blk: { - if (decl.zir_decl_index == .none and !mod.declIsRoot(decl_index)) { - // Anonymous decl. We don't semantically analyze these. - break :blk .{ - .invalidate_decl_val = false, - .invalidate_decl_ref = false, - }; - } - - if (mod.declIsRoot(decl_index)) { - const changed = try pt.semaFileUpdate(decl.getFileScopeIndex(mod), decl_was_outdated); - break :blk .{ + const sema_result: SemaCauResult = res: { + if (inst_info.inst == .main_struct_inst) { + const changed = try pt.semaFileUpdate(inst_info.file, cau_outdated); + break :res .{ .invalidate_decl_val = changed, .invalidate_decl_ref = changed, }; } - const decl_prog_node = mod.sema_prog_node.start(decl.fqn.toSlice(ip), 0); + const decl_prog_node = zcu.sema_prog_node.start(switch (cau.owner.unwrap()) { + .nav => |nav| ip.getNav(nav).fqn.toSlice(ip), + .type => |ty| Type.fromInterned(ty).containerTypeName(ip).toSlice(ip), + .none => "comptime", + }, 0); defer decl_prog_node.end(); - break :blk pt.semaDecl(decl_index) catch |err| switch (err) { + break :res pt.semaCau(cau_index) catch |err| switch (err) { error.AnalysisFail => { - if (decl.analysis == .in_progress) { - // If this decl caused the compile error, the analysis field would - // be changed to indicate it was this Decl's fault. Because this - // did not happen, we infer here that it was a dependency failure. - decl.analysis = .dependency_failure; + if (!zcu.failed_analysis.contains(anal_unit)) { + // If this `Cau` caused the error, it would have an entry in `failed_analysis`. + // Since it does not, this must be a transitive failure. + try zcu.transitive_failed_analysis.put(gpa, anal_unit, {}); } return error.AnalysisFail; }, error.GenericPoison => unreachable, - else => |e| { - decl.analysis = .sema_failure; - try mod.failed_analysis.ensureUnusedCapacity(mod.gpa, 1); - try mod.retryable_failures.append(mod.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index })); - mod.failed_analysis.putAssumeCapacityNoClobber(InternPool.AnalUnit.wrap(.{ .decl = decl_index }), try Zcu.ErrorMsg.create( - mod.gpa, - decl.navSrcLoc(mod), - "unable to analyze: {s}", - .{@errorName(e)}, + error.ComptimeBreak => unreachable, + error.ComptimeReturn => unreachable, + error.OutOfMemory => { + try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); + try zcu.retryable_failures.append(gpa, anal_unit); + zcu.failed_analysis.putAssumeCapacityNoClobber(anal_unit, try Zcu.ErrorMsg.create( + gpa, + .{ .base_node_inst = cau.zir_index, .offset = Zcu.LazySrcLoc.Offset.nodeOffset(0) }, + "unable to analyze: OutOfMemory", + .{}, )); return error.AnalysisFail; }, }; }; + if (!cau_outdated) { + // We definitely don't need to do any dependency tracking, so our work is done. + return; + } + // TODO: we do not yet have separate dependencies for decl values vs types. - if (decl_was_outdated) { - if (sema_result.invalidate_decl_val or sema_result.invalidate_decl_ref) { - log.debug("Decl tv invalidated ('{d}')", .{@intFromEnum(decl_index)}); - // This dependency was marked as PO, meaning dependees were waiting - // on its analysis result, and it has turned out to be outdated. - // Update dependees accordingly. - try mod.markDependeeOutdated(.{ .decl_val = decl_index }); - } else { - log.debug("Decl tv up-to-date ('{d}')", .{@intFromEnum(decl_index)}); - // This dependency was previously PO, but turned out to be up-to-date. - // We do not need to queue successive analysis. - try mod.markPoDependeeUpToDate(.{ .decl_val = decl_index }); - } + const invalidate = sema_result.invalidate_decl_val or sema_result.invalidate_decl_ref; + const dependee: InternPool.Dependee = switch (cau.owner.unwrap()) { + .none => return, // there are no dependencies on a `comptime` decl! + .nav => |nav_index| .{ .nav_val = nav_index }, + .type => |ty| .{ .interned = ty }, + }; + + if (invalidate) { + // This dependency was marked as PO, meaning dependees were waiting + // on its analysis result, and it has turned out to be outdated. + // Update dependees accordingly. + try zcu.markDependeeOutdated(dependee); + } else { + // This dependency was previously PO, but turned out to be up-to-date. + // We do not need to queue successive analysis. + try zcu.markPoDependeeUpToDate(dependee); } } @@ -636,28 +647,32 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter const ip = &zcu.intern_pool; // We only care about the uncoerced function. - // We need to do this for the "orphaned function" check below to be valid. const func_index = ip.unwrapCoercedFunc(maybe_coerced_func_index); const func = zcu.funcInfo(maybe_coerced_func_index); - const decl_index = func.owner_decl; - const decl = zcu.declPtr(decl_index); - log.debug("ensureFuncBodyAnalyzed '{d}' (instance of '{}')", .{ - @intFromEnum(func_index), - decl.name.fmt(ip), - }); + log.debug("ensureFuncBodyAnalyzed {d}", .{@intFromEnum(func_index)}); - // First, our owner decl must be up-to-date. This will always be the case - // during the first update, but may not on successive updates if we happen - // to get analyzed before our parent decl. - try pt.ensureDeclAnalyzed(decl_index); + // Here's an interesting question: is this function actually valid? + // Maybe the signature changed, so we'll end up creating a whole different `func` + // in the InternPool, and this one is a waste of time to analyze. Worse, we'd be + // analyzing new ZIR with old data, and get bogus errors. They would be unused, + // but they would still hang around internally! So, let's detect this case. + // For function decls, we must ensure the declaration's `Cau` is up-to-date, and + // check if `func_index` was removed by that update. + // For function instances, we do that process on the generic owner. - // On an update, it's possible this function changed such that our owner - // decl now refers to a different function, making this one orphaned. If - // that's the case, we should remove this function from the binary. - if (decl.val.ip_index != func_index) { - try zcu.markDependeeOutdated(.{ .func_ies = func_index }); + try pt.ensureCauAnalyzed(cau: { + const func_nav = if (func.generic_owner == .none) + func.owner_nav + else + zcu.funcInfo(func.generic_owner).owner_nav; + + break :cau ip.getNav(func_nav).analysis_owner.unwrap().?; + }); + + if (ip.isRemoved(func_index) or (func.generic_owner != .none and ip.isRemoved(func.generic_owner))) { + try zcu.markDependeeOutdated(.{ .interned = func_index }); // IES ip.removeDependenciesForDepender(gpa, InternPool.AnalUnit.wrap(.{ .func = func_index })); ip.remove(pt.tid, func_index); @panic("TODO: remove orphaned function from binary"); @@ -670,58 +685,40 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter else .none; - switch (decl.analysis) { - .unreferenced => unreachable, - .in_progress => unreachable, - - .codegen_failure => unreachable, // functions do not perform constant value generation + const anal_unit = InternPool.AnalUnit.wrap(.{ .func = func_index }); + const func_outdated = zcu.outdated.swapRemove(anal_unit) or + zcu.potentially_outdated.swapRemove(anal_unit); - .file_failure, - .sema_failure, - .dependency_failure, - => return error.AnalysisFail, - - .complete => {}, - } - - const func_as_depender = InternPool.AnalUnit.wrap(.{ .func = func_index }); - const was_outdated = zcu.outdated.swapRemove(func_as_depender) or - zcu.potentially_outdated.swapRemove(func_as_depender); - - if (was_outdated) { + if (func_outdated) { dev.check(.incremental); - _ = zcu.outdated_ready.swapRemove(func_as_depender); - zcu.deleteUnitExports(func_as_depender); - zcu.deleteUnitReferences(func_as_depender); + _ = zcu.outdated_ready.swapRemove(anal_unit); + zcu.deleteUnitExports(anal_unit); + zcu.deleteUnitReferences(anal_unit); } - switch (func.analysisUnordered(ip).state) { - .success => if (!was_outdated) return, - .sema_failure, - .dependency_failure, - .codegen_failure, - => if (!was_outdated) return error.AnalysisFail, - .none, .queued => {}, - .in_progress => unreachable, - .inline_only => unreachable, // don't queue work for this + if (!func_outdated) { + // We can trust the current information about this function. + if (zcu.failed_analysis.contains(anal_unit) or zcu.transitive_failed_analysis.contains(anal_unit)) { + return error.AnalysisFail; + } + switch (func.analysisUnordered(ip).state) { + .unreferenced => {}, // this is the first reference + .queued => {}, // we're waiting on first-time analysis + .analyzed => return, // up-to-date + } } log.debug("analyze and generate fn body '{d}'; reason='{s}'", .{ @intFromEnum(func_index), - if (was_outdated) "outdated" else "never analyzed", + if (func_outdated) "outdated" else "never analyzed", }); - var tmp_arena = std.heap.ArenaAllocator.init(gpa); - defer tmp_arena.deinit(); - const sema_arena = tmp_arena.allocator(); - - var air = pt.analyzeFnBody(func_index, sema_arena) catch |err| switch (err) { + var air = pt.analyzeFnBody(func_index) catch |err| switch (err) { error.AnalysisFail => { - if (func.analysisUnordered(ip).state == .in_progress) { - // If this decl caused the compile error, the analysis field would - // be changed to indicate it was this Decl's fault. Because this - // did not happen, we infer here that it was a dependency failure. - func.setAnalysisState(ip, .dependency_failure); + if (!zcu.failed_analysis.contains(anal_unit)) { + // If this function caused the error, it would have an entry in `failed_analysis`. + // Since it does not, this must be a transitive failure. + try zcu.transitive_failed_analysis.put(gpa, anal_unit, {}); } return error.AnalysisFail; }, @@ -729,18 +726,14 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter }; errdefer air.deinit(gpa); - const invalidate_ies_deps = i: { - if (!was_outdated) break :i false; - if (!func.analysisUnordered(ip).inferred_error_set) break :i true; - const new_resolved_ies = func.resolvedErrorSetUnordered(ip); - break :i new_resolved_ies != old_resolved_ies; - }; - if (invalidate_ies_deps) { - log.debug("func IES invalidated ('{d}')", .{@intFromEnum(func_index)}); - try zcu.markDependeeOutdated(.{ .func_ies = func_index }); - } else if (was_outdated) { - log.debug("func IES up-to-date ('{d}')", .{@intFromEnum(func_index)}); - try zcu.markPoDependeeUpToDate(.{ .func_ies = func_index }); + if (func_outdated) { + if (!func.analysisUnordered(ip).inferred_error_set or func.resolvedErrorSetUnordered(ip) != old_resolved_ies) { + log.debug("func IES invalidated ('{d}')", .{@intFromEnum(func_index)}); + try zcu.markDependeeOutdated(.{ .interned = func_index }); + } else { + log.debug("func IES up-to-date ('{d}')", .{@intFromEnum(func_index)}); + try zcu.markPoDependeeUpToDate(.{ .interned = func_index }); + } } const comp = zcu.comp; @@ -773,16 +766,16 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Ai } const func = zcu.funcInfo(func_index); - const decl_index = func.owner_decl; - const decl = zcu.declPtr(decl_index); + const nav_index = func.owner_nav; + const nav = ip.getNav(nav_index); var liveness = try Liveness.analyze(gpa, air, ip); defer liveness.deinit(gpa); if (build_options.enable_debug_extensions and comp.verbose_air) { - std.debug.print("# Begin Function AIR: {}:\n", .{decl.fqn.fmt(ip)}); + std.debug.print("# Begin Function AIR: {}:\n", .{nav.fqn.fmt(ip)}); @import("../print_air.zig").dump(pt, air, liveness); - std.debug.print("# End Function AIR: {}\n\n", .{decl.fqn.fmt(ip)}); + std.debug.print("# End Function AIR: {}\n\n", .{nav.fqn.fmt(ip)}); } if (std.debug.runtime_safety) { @@ -797,23 +790,18 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Ai verify.verify() catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, else => { - try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); - zcu.failed_analysis.putAssumeCapacityNoClobber( - InternPool.AnalUnit.wrap(.{ .func = func_index }), - try Zcu.ErrorMsg.create( - gpa, - decl.navSrcLoc(zcu), - "invalid liveness: {s}", - .{@errorName(err)}, - ), - ); - func.setAnalysisState(ip, .codegen_failure); + try zcu.failed_codegen.putNoClobber(gpa, nav_index, try Zcu.ErrorMsg.create( + gpa, + zcu.navSrcLoc(nav_index), + "invalid liveness: {s}", + .{@errorName(err)}, + )); return; }, }; } - const codegen_prog_node = zcu.codegen_prog_node.start(decl.fqn.toSlice(ip), 0); + const codegen_prog_node = zcu.codegen_prog_node.start(nav.fqn.toSlice(ip), 0); defer codegen_prog_node.end(); if (!air.typesFullyResolved(zcu)) { @@ -821,22 +809,21 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Ai // Correcting this failure will involve changing a type this function // depends on, hence triggering re-analysis of this function, so this // interacts correctly with incremental compilation. - func.setAnalysisState(ip, .codegen_failure); + // TODO: do we need to mark this failure anywhere? I don't think so, since compilation + // will fail due to the type error anyway. } else if (comp.bin_file) |lf| { lf.updateFunc(pt, func_index, air, liveness) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => { - func.setAnalysisState(ip, .codegen_failure); + assert(zcu.failed_codegen.contains(nav_index)); }, else => { - try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); - zcu.failed_analysis.putAssumeCapacityNoClobber(InternPool.AnalUnit.wrap(.{ .func = func_index }), try Zcu.ErrorMsg.create( + try zcu.failed_codegen.putNoClobber(gpa, nav_index, try Zcu.ErrorMsg.create( gpa, - decl.navSrcLoc(zcu), + zcu.navSrcLoc(nav_index), "unable to codegen: {s}", .{@errorName(err)}, )); - func.setAnalysisState(ip, .codegen_failure); try zcu.retryable_failures.append(zcu.gpa, InternPool.AnalUnit.wrap(.{ .func = func_index })); }, }; @@ -851,17 +838,16 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Ai pub fn semaPkg(pt: Zcu.PerThread, pkg: *Module) !void { dev.check(.sema); const import_file_result = try pt.importPkg(pkg); - const root_decl_index = pt.zcu.fileRootDecl(import_file_result.file_index); - if (root_decl_index == .none) { + const root_type = pt.zcu.fileRootType(import_file_result.file_index); + if (root_type == .none) { return pt.semaFile(import_file_result.file_index); } } -fn getFileRootStruct( +fn createFileRootStruct( pt: Zcu.PerThread, - decl_index: Zcu.Decl.Index, - namespace_index: Zcu.Namespace.Index, file_index: Zcu.File.Index, + namespace_index: Zcu.Namespace.Index, ) Allocator.Error!InternPool.Index { const zcu = pt.zcu; const gpa = zcu.gpa; @@ -912,34 +898,37 @@ fn getFileRootStruct( }; errdefer wip_ty.cancel(ip, pt.tid); + wip_ty.setName(ip, try file.internFullyQualifiedName(pt)); + ip.namespacePtr(namespace_index).owner_type = wip_ty.index; + const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, namespace_index, wip_ty.index); + if (zcu.comp.incremental) { try ip.addDependency( gpa, - InternPool.AnalUnit.wrap(.{ .decl = decl_index }), + InternPool.AnalUnit.wrap(.{ .cau = new_cau_index }), .{ .src_hash = tracked_inst }, ); } - const decl = zcu.declPtr(decl_index); - decl.val = Value.fromInterned(wip_ty.index); - decl.has_tv = true; - decl.owns_tv = true; - decl.analysis = .complete; - - try pt.scanNamespace(namespace_index, decls, decl); + try pt.scanNamespace(namespace_index, decls); try zcu.comp.queueJob(.{ .resolve_type_fully = wip_ty.index }); - return wip_ty.finish(ip, decl_index, namespace_index.toOptional()); + zcu.setFileRootType(file_index, wip_ty.index); + return wip_ty.finish(ip, new_cau_index.toOptional(), namespace_index.toOptional()); } -/// Re-analyze the root Decl of a file on an incremental update. +/// Re-analyze the root type of a file on an incremental update. /// If `type_outdated`, the struct type itself is considered outdated and is /// reconstructed at a new InternPool index. Otherwise, the namespace is just /// re-analyzed. Returns whether the decl's tyval was invalidated. +/// Returns `error.AnalysisFail` if the file has an error. fn semaFileUpdate(pt: Zcu.PerThread, file_index: Zcu.File.Index, type_outdated: bool) Zcu.SemaError!bool { const zcu = pt.zcu; const ip = &zcu.intern_pool; const file = zcu.fileByIndex(file_index); - const decl = zcu.declPtr(zcu.fileRootDecl(file_index).unwrap().?); + const file_root_type = zcu.fileRootType(file_index); + const namespace_index = Type.fromInterned(file_root_type).getNamespaceIndex(zcu).unwrap().?; + + assert(file_root_type != .none); log.debug("semaFileUpdate mod={s} sub_file_path={s} type_outdated={}", .{ file.mod.fully_qualified_name, @@ -948,33 +937,18 @@ fn semaFileUpdate(pt: Zcu.PerThread, file_index: Zcu.File.Index, type_outdated: }); if (file.status != .success_zir) { - if (decl.analysis == .file_failure) { - return false; - } else { - decl.analysis = .file_failure; - return true; - } - } - - if (decl.analysis == .file_failure) { - // No struct type currently exists. Create one! - const root_decl = zcu.fileRootDecl(file_index); - _ = try pt.getFileRootStruct(root_decl.unwrap().?, decl.src_namespace, file_index); - return true; + return error.AnalysisFail; } - assert(decl.has_tv); - assert(decl.owns_tv); - if (type_outdated) { - // Invalidate the existing type, reusing the decl and namespace. - const file_root_decl = zcu.fileRootDecl(file_index).unwrap().?; - ip.removeDependenciesForDepender(zcu.gpa, InternPool.AnalUnit.wrap(.{ - .decl = file_root_decl, - })); - ip.remove(pt.tid, decl.val.toIntern()); - decl.val = undefined; - _ = try pt.getFileRootStruct(file_root_decl, decl.src_namespace, file_index); + // Invalidate the existing type, reusing its namespace. + const file_root_type_cau = ip.loadStructType(file_root_type).cau.unwrap().?; + ip.removeDependenciesForDepender( + zcu.gpa, + InternPool.AnalUnit.wrap(.{ .cau = file_root_type_cau }), + ); + ip.remove(pt.tid, file_root_type); + _ = try pt.createFileRootStruct(file_index, namespace_index); return true; } @@ -994,7 +968,7 @@ fn semaFileUpdate(pt: Zcu.PerThread, file_index: Zcu.File.Index, type_outdated: const decls = file.zir.bodySlice(extra_index, decls_len); if (!type_outdated) { - try pt.scanNamespace(decl.src_namespace, decls, decl); + try pt.scanNamespace(namespace_index, decls); } return false; @@ -1009,43 +983,19 @@ fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { const zcu = pt.zcu; const gpa = zcu.gpa; const file = zcu.fileByIndex(file_index); - assert(zcu.fileRootDecl(file_index) == .none); - log.debug("semaFile zcu={s} sub_file_path={s}", .{ - file.mod.fully_qualified_name, file.sub_file_path, - }); - - // Because these three things each reference each other, `undefined` - // placeholders are used before being set after the struct type gains an - // InternPool index. - const new_namespace_index = try pt.createNamespace(.{ - .parent = .none, - .decl_index = undefined, - .file_scope = file_index, - }); - errdefer pt.destroyNamespace(new_namespace_index); - - const new_decl_index = try pt.allocateNewDecl(new_namespace_index); - const new_decl = zcu.declPtr(new_decl_index); - errdefer @panic("TODO error handling"); - - zcu.setFileRootDecl(file_index, new_decl_index.toOptional()); - zcu.namespacePtr(new_namespace_index).decl_index = new_decl_index; - - new_decl.fqn = try file.internFullyQualifiedName(pt); - new_decl.name = new_decl.fqn; - new_decl.is_pub = true; - new_decl.is_exported = false; - new_decl.alignment = .none; - new_decl.@"linksection" = .none; - new_decl.analysis = .in_progress; + assert(zcu.fileRootType(file_index) == .none); if (file.status != .success_zir) { - new_decl.analysis = .file_failure; - return; + return error.AnalysisFail; } assert(file.zir_loaded); - const struct_ty = try pt.getFileRootStruct(new_decl_index, new_namespace_index, file_index); + const new_namespace_index = try pt.createNamespace(.{ + .parent = .none, + .owner_type = undefined, // set in `createFileRootStruct` + .file_scope = file_index, + }); + const struct_ty = try pt.createFileRootStruct(file_index, new_namespace_index); errdefer zcu.intern_pool.remove(pt.tid, struct_ty); switch (zcu.comp.cache_use) { @@ -1067,98 +1017,121 @@ fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void { whole.cache_manifest_mutex.lock(); defer whole.cache_manifest_mutex.unlock(); - try man.addFilePostContents(resolved_path, source.bytes, source.stat); + man.addFilePostContents(resolved_path, source.bytes, source.stat) catch |err| switch (err) { + error.OutOfMemory => |e| return e, + else => { + try pt.reportRetryableFileError(file_index, "unable to update cache: {s}", .{@errorName(err)}); + return error.AnalysisFail; + }, + }; }, .incremental => {}, } } -fn semaDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !Zcu.SemaDeclResult { - const tracy = trace(@src()); - defer tracy.end(); +const SemaCauResult = packed struct { + /// Whether the value of a `decl_val` of the corresponding Nav changed. + invalidate_decl_val: bool, + /// Whether the type of a `decl_ref` of the corresponding Nav changed. + invalidate_decl_ref: bool, +}; +/// Performs semantic analysis on the given `Cau`, storing results to its owner `Nav` if needed. +/// If analysis fails, returns `error.AnalysisFail`, storing an error in `zcu.failed_analysis` unless +/// the error is transitive. +/// On success, returns information about whether the `Nav` value changed. +fn semaCau(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) !SemaCauResult { const zcu = pt.zcu; - const decl = zcu.declPtr(decl_index); + const gpa = zcu.gpa; const ip = &zcu.intern_pool; - if (decl.getFileScope(zcu).status != .success_zir) { - return error.AnalysisFail; - } + const anal_unit = InternPool.AnalUnit.wrap(.{ .cau = cau_index }); - assert(!zcu.declIsRoot(decl_index)); + const cau = ip.getCau(cau_index); + const inst_info = cau.zir_index.resolveFull(ip); + const file = zcu.fileByIndex(inst_info.file); + const zir = file.zir; - if (decl.zir_decl_index == .none and decl.owns_tv) { - // We are re-analyzing an anonymous owner Decl (for a function or a namespace type). - return pt.semaAnonOwnerDecl(decl_index); + if (file.status != .success_zir) { + return error.AnalysisFail; } - log.debug("semaDecl '{d}'", .{@intFromEnum(decl_index)}); - log.debug("decl name '{}'", .{decl.fqn.fmt(ip)}); - defer log.debug("finish decl name '{}'", .{decl.fqn.fmt(ip)}); + // We are about to re-analyze this `Cau`; drop its depenndencies. + zcu.intern_pool.removeDependenciesForDepender(gpa, anal_unit); - const old_has_tv = decl.has_tv; - // The following values are ignored if `!old_has_tv` - const old_ty = if (old_has_tv) decl.typeOf(zcu) else undefined; - const old_val = decl.val; - const old_align = decl.alignment; - const old_linksection = decl.@"linksection"; - const old_addrspace = decl.@"addrspace"; - const old_is_inline = if (decl.getOwnedFunction(zcu)) |prev_func| - prev_func.analysisUnordered(ip).state == .inline_only - else - false; - - const decl_inst = decl.zir_decl_index.unwrap().?.resolve(ip); + const builtin_type_target_index: InternPool.Index = switch (cau.owner.unwrap()) { + .none => ip_index: { + // `comptime` decl -- we will re-analyze its body. + // This declaration has no value so is definitely not a std.builtin type. + break :ip_index .none; + }, + .type => |ty| { + // This is an incremental update, and this type is being re-analyzed because it is outdated. + // The type must be recreated at a new `InternPool.Index`. + // Remove it from the InternPool and mark it outdated so that creation sites are re-analyzed. + ip.remove(pt.tid, ty); + return .{ + .invalidate_decl_val = true, + .invalidate_decl_ref = true, + }; + }, + .nav => |nav| ip_index: { + // Other decl -- we will re-analyze its value. + // This might be a type in `builtin.zig` -- check. + if (file.mod != zcu.std_mod) break :ip_index .none; + // We're in the std module. + const nav_name = ip.getNav(nav).name; + const std_file_imported = try pt.importPkg(zcu.std_mod); + const std_type = Type.fromInterned(zcu.fileRootType(std_file_imported.file_index)); + const std_namespace = zcu.namespacePtr(std_type.getNamespace(zcu).?.unwrap().?); + const builtin_str = try ip.getOrPutString(gpa, pt.tid, "builtin", .no_embedded_nulls); + const builtin_nav = ip.getNav(std_namespace.pub_decls.getKeyAdapted(builtin_str, Zcu.Namespace.NameAdapter{ .zcu = zcu }) orelse break :ip_index .none); + const builtin_namespace = switch (builtin_nav.status) { + .unresolved => break :ip_index .none, + .resolved => |r| Type.fromInterned(r.val).getNamespace(zcu).?.unwrap().?, + }; + if (cau.namespace != builtin_namespace) break :ip_index .none; + // We're in builtin.zig. This could be a builtin we need to add to a specific InternPool index. + for ([_][]const u8{ + "AtomicOrder", + "AtomicRmwOp", + "CallingConvention", + "AddressSpace", + "FloatMode", + "ReduceOp", + "CallModifier", + "PrefetchOptions", + "ExportOptions", + "ExternOptions", + "Type", + }, [_]InternPool.Index{ + .atomic_order_type, + .atomic_rmw_op_type, + .calling_convention_type, + .address_space_type, + .float_mode_type, + .reduce_op_type, + .call_modifier_type, + .prefetch_options_type, + .export_options_type, + .extern_options_type, + .type_info_type, + }) |type_name, type_ip| { + if (nav_name.eqlSlice(type_name, ip)) break :ip_index type_ip; + } + break :ip_index .none; + }, + }; - const gpa = zcu.gpa; - const zir = decl.getFileScope(zcu).zir; - - const builtin_type_target_index: InternPool.Index = ip_index: { - const std_mod = zcu.std_mod; - if (decl.getFileScope(zcu).mod != std_mod) break :ip_index .none; - // We're in the std module. - const std_file_imported = try pt.importPkg(std_mod); - const std_file_root_decl_index = zcu.fileRootDecl(std_file_imported.file_index); - const std_decl = zcu.declPtr(std_file_root_decl_index.unwrap().?); - const std_namespace = std_decl.getInnerNamespace(zcu).?; - const builtin_str = try ip.getOrPutString(gpa, pt.tid, "builtin", .no_embedded_nulls); - const builtin_decl = zcu.declPtr(std_namespace.decls.getKeyAdapted(builtin_str, Zcu.DeclAdapter{ .zcu = zcu }) orelse break :ip_index .none); - const builtin_namespace = builtin_decl.getInnerNamespaceIndex(zcu).unwrap() orelse break :ip_index .none; - if (decl.src_namespace != builtin_namespace) break :ip_index .none; - // We're in builtin.zig. This could be a builtin we need to add to a specific InternPool index. - for ([_][]const u8{ - "AtomicOrder", - "AtomicRmwOp", - "CallingConvention", - "AddressSpace", - "FloatMode", - "ReduceOp", - "CallModifier", - "PrefetchOptions", - "ExportOptions", - "ExternOptions", - "Type", - }, [_]InternPool.Index{ - .atomic_order_type, - .atomic_rmw_op_type, - .calling_convention_type, - .address_space_type, - .float_mode_type, - .reduce_op_type, - .call_modifier_type, - .prefetch_options_type, - .export_options_type, - .extern_options_type, - .type_info_type, - }) |type_name, type_ip| { - if (decl.name.eqlSlice(type_name, ip)) break :ip_index type_ip; - } - break :ip_index .none; + const is_usingnamespace = switch (cau.owner.unwrap()) { + .nav => |nav| ip.getNav(nav).is_usingnamespace, + .none, .type => false, }; - zcu.intern_pool.removeDependenciesForDepender(gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index })); + log.debug("semaCau '{d}'", .{@intFromEnum(cau_index)}); - decl.analysis = .in_progress; + try zcu.analysis_in_progress.put(gpa, anal_unit, {}); + errdefer _ = zcu.analysis_in_progress.swapRemove(anal_unit); var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); @@ -1171,224 +1144,216 @@ fn semaDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !Zcu.SemaDeclResult { .gpa = gpa, .arena = analysis_arena.allocator(), .code = zir, - .owner_decl = decl, - .owner_decl_index = decl_index, + .owner = anal_unit, .func_index = .none, .func_is_naked = false, .fn_ret_ty = Type.void, .fn_ret_ty_ies = null, - .owner_func_index = .none, .comptime_err_ret_trace = &comptime_err_ret_trace, .builtin_type_target_index = builtin_type_target_index, }; defer sema.deinit(); - // Every Decl (other than file root Decls, which do not have a ZIR index) has a dependency on its own source. - try sema.declareDependency(.{ .src_hash = try ip.trackZir(gpa, pt.tid, .{ - .file = decl.getFileScopeIndex(zcu), - .inst = decl_inst, - }) }); + // Every `Cau` has a dependency on the source of its own ZIR instruction. + try sema.declareDependency(.{ .src_hash = cau.zir_index }); - var block_scope: Sema.Block = .{ + var block: Sema.Block = .{ .parent = null, .sema = &sema, - .namespace = decl.src_namespace, + .namespace = cau.namespace, .instructions = .{}, .inlining = null, .is_comptime = true, - .src_base_inst = decl.zir_decl_index.unwrap().?, - .type_name_ctx = decl.name, + .src_base_inst = cau.zir_index, + .type_name_ctx = switch (cau.owner.unwrap()) { + .nav => |nav| ip.getNav(nav).fqn, + .type => |ty| Type.fromInterned(ty).containerTypeName(ip), + .none => try ip.getOrPutStringFmt(gpa, pt.tid, "{}.comptime", .{ + Type.fromInterned(zcu.namespacePtr(cau.namespace).owner_type).containerTypeName(ip).fmt(ip), + }, .no_embedded_nulls), + }, + }; + defer block.instructions.deinit(gpa); + + const zir_decl: Zir.Inst.Declaration, const decl_bodies: Zir.Inst.Declaration.Bodies = decl: { + const decl, const extra_end = zir.getDeclaration(inst_info.inst); + break :decl .{ decl, decl.getBodies(extra_end, zir) }; + }; + + // We have to fetch this state before resolving the body because of the `nav_already_populated` + // case below. We might change the language in future so that align/linksection/etc for functions + // work in a way more in line with other declarations, in which case that logic will go away. + const old_nav_info = switch (cau.owner.unwrap()) { + .none, .type => undefined, // we'll never use `old_nav_info` + .nav => |nav| ip.getNav(nav), }; - defer block_scope.instructions.deinit(gpa); - const decl_bodies = decl.zirBodies(zcu); + const result_ref = try sema.resolveInlineBody(&block, decl_bodies.value_body, inst_info.inst); - const result_ref = try sema.resolveInlineBody(&block_scope, decl_bodies.value_body, decl_inst); - // We'll do some other bits with the Sema. Clear the type target index just - // in case they analyze any type. + const nav_index = switch (cau.owner.unwrap()) { + .none => { + // This is a `comptime` decl, so we are done -- the side effects are all we care about. + // Just make sure to `flushExports`. + try sema.flushExports(); + assert(zcu.analysis_in_progress.swapRemove(anal_unit)); + return .{ + .invalidate_decl_val = false, + .invalidate_decl_ref = false, + }; + }, + .nav => |nav| nav, // We will resolve this `Nav` below. + .type => unreachable, // Handled at top of function. + }; + + // We'll do more work with the Sema. Clear the target type index just in case we analyze any type. sema.builtin_type_target_index = .none; - const align_src = block_scope.src(.{ .node_offset_var_decl_align = 0 }); - const section_src = block_scope.src(.{ .node_offset_var_decl_section = 0 }); - const address_space_src = block_scope.src(.{ .node_offset_var_decl_addrspace = 0 }); - const ty_src = block_scope.src(.{ .node_offset_var_decl_ty = 0 }); - const init_src = block_scope.src(.{ .node_offset_var_decl_init = 0 }); - const decl_val = try sema.resolveFinalDeclValue(&block_scope, init_src, result_ref); + + const align_src = block.src(.{ .node_offset_var_decl_align = 0 }); + const section_src = block.src(.{ .node_offset_var_decl_section = 0 }); + const addrspace_src = block.src(.{ .node_offset_var_decl_addrspace = 0 }); + const ty_src = block.src(.{ .node_offset_var_decl_ty = 0 }); + const init_src = block.src(.{ .node_offset_var_decl_init = 0 }); + + const decl_val = try sema.resolveFinalDeclValue(&block, init_src, result_ref); const decl_ty = decl_val.typeOf(zcu); - // Note this resolves the type of the Decl, not the value; if this Decl - // is a struct, for example, this resolves `type` (which needs no resolution), - // not the struct itself. + switch (decl_val.toIntern()) { + .generic_poison => unreachable, // assertion failure + .unreachable_value => unreachable, // assertion failure + else => {}, + } + + // This resolves the type of the resolved value, not that value itself. If `decl_val` is a struct type, + // this resolves the type `type` (which needs no resolution), not the struct itself. try decl_ty.resolveLayout(pt); - if (decl.kind == .@"usingnamespace") { - if (!decl_ty.eql(Type.type, zcu)) { - return sema.fail(&block_scope, ty_src, "expected type, found {}", .{decl_ty.fmt(pt)}); + // TODO: this is jank. If #20663 is rejected, let's think about how to better model `usingnamespace`. + if (is_usingnamespace) { + if (decl_ty.toIntern() != .type_type) { + return sema.fail(&block, ty_src, "expected type, found {}", .{decl_ty.fmt(pt)}); } - const ty = decl_val.toType(); - if (ty.getNamespace(zcu) == null) { - return sema.fail(&block_scope, ty_src, "type {} has no namespace", .{ty.fmt(pt)}); + if (decl_val.toType().getNamespace(zcu) == null) { + return sema.fail(&block, ty_src, "type {} has no namespace", .{decl_val.toType().fmt(pt)}); } - - decl.val = ty.toValue(); - decl.alignment = .none; - decl.@"linksection" = .none; - decl.has_tv = true; - decl.owns_tv = false; - decl.analysis = .complete; - - // TODO: usingnamespace cannot currently participate in incremental compilation + ip.resolveNavValue(nav_index, .{ + .val = decl_val.toIntern(), + .alignment = .none, + .@"linksection" = .none, + .@"addrspace" = .generic, + }); + // TODO: usingnamespace cannot participate in incremental compilation + assert(zcu.analysis_in_progress.swapRemove(anal_unit)); return .{ .invalidate_decl_val = true, .invalidate_decl_ref = true, }; } - var queue_linker_work = true; - var is_func = false; - var is_inline = false; - switch (decl_val.toIntern()) { - .generic_poison => unreachable, - .unreachable_value => unreachable, - else => switch (ip.indexToKey(decl_val.toIntern())) { - .variable => |variable| { - decl.owns_tv = variable.decl == decl_index; - queue_linker_work = decl.owns_tv; - }, - - .extern_func => |extern_func| { - decl.owns_tv = extern_func.decl == decl_index; - queue_linker_work = decl.owns_tv; - is_func = decl.owns_tv; - }, - - .func => |func| { - decl.owns_tv = func.owner_decl == decl_index; - queue_linker_work = false; - is_inline = decl.owns_tv and decl_ty.fnCallingConvention(zcu) == .Inline; - is_func = decl.owns_tv; - }, - - else => {}, - }, - } + const nav_already_populated, const queue_linker_work = switch (ip.indexToKey(decl_val.toIntern())) { + .func => |f| .{ f.owner_nav == nav_index, false }, + .variable => |v| .{ false, v.owner_nav == nav_index }, + .@"extern" => .{ false, false }, + else => .{ false, true }, + }; - decl.val = decl_val; - // Function linksection, align, and addrspace were already set by Sema - if (!is_func) { - decl.alignment = blk: { - const align_body = decl_bodies.align_body orelse break :blk .none; - const align_ref = try sema.resolveInlineBody(&block_scope, align_body, decl_inst); - break :blk try sema.analyzeAsAlign(&block_scope, align_src, align_ref); + if (nav_already_populated) { + // This is a function declaration. + // Logic in `Sema.funcCommon` has already populated the `Nav` for us. + assert(ip.getNav(nav_index).status.resolved.val == decl_val.toIntern()); + } else { + // Keep in sync with logic in `Sema.zirVarExtended`. + const alignment: InternPool.Alignment = a: { + const align_body = decl_bodies.align_body orelse break :a .none; + const align_ref = try sema.resolveInlineBody(&block, align_body, inst_info.inst); + break :a try sema.analyzeAsAlign(&block, align_src, align_ref); }; - decl.@"linksection" = blk: { - const linksection_body = decl_bodies.linksection_body orelse break :blk .none; - const linksection_ref = try sema.resolveInlineBody(&block_scope, linksection_body, decl_inst); - const bytes = try sema.toConstString(&block_scope, section_src, linksection_ref, .{ + + const @"linksection": InternPool.OptionalNullTerminatedString = ls: { + const linksection_body = decl_bodies.linksection_body orelse break :ls .none; + const linksection_ref = try sema.resolveInlineBody(&block, linksection_body, inst_info.inst); + const bytes = try sema.toConstString(&block, section_src, linksection_ref, .{ .needed_comptime_reason = "linksection must be comptime-known", }); if (std.mem.indexOfScalar(u8, bytes, 0) != null) { - return sema.fail(&block_scope, section_src, "linksection cannot contain null bytes", .{}); + return sema.fail(&block, section_src, "linksection cannot contain null bytes", .{}); } else if (bytes.len == 0) { - return sema.fail(&block_scope, section_src, "linksection cannot be empty", .{}); + return sema.fail(&block, section_src, "linksection cannot be empty", .{}); } - break :blk try ip.getOrPutStringOpt(gpa, pt.tid, bytes, .no_embedded_nulls); + break :ls try ip.getOrPutStringOpt(gpa, pt.tid, bytes, .no_embedded_nulls); }; - decl.@"addrspace" = blk: { + + const @"addrspace": std.builtin.AddressSpace = as: { const addrspace_ctx: Sema.AddressSpaceContext = switch (ip.indexToKey(decl_val.toIntern())) { + .func => .function, .variable => .variable, - .extern_func, .func => .function, + .@"extern" => |e| if (ip.indexToKey(e.ty) == .func_type) + .function + else + .variable, else => .constant, }; - const target = zcu.getTarget(); - - const addrspace_body = decl_bodies.addrspace_body orelse break :blk switch (addrspace_ctx) { + const addrspace_body = decl_bodies.addrspace_body orelse break :as switch (addrspace_ctx) { .function => target_util.defaultAddressSpace(target, .function), .variable => target_util.defaultAddressSpace(target, .global_mutable), .constant => target_util.defaultAddressSpace(target, .global_constant), else => unreachable, }; - const addrspace_ref = try sema.resolveInlineBody(&block_scope, addrspace_body, decl_inst); - break :blk try sema.analyzeAsAddressSpace(&block_scope, address_space_src, addrspace_ref, addrspace_ctx); + const addrspace_ref = try sema.resolveInlineBody(&block, addrspace_body, inst_info.inst); + break :as try sema.analyzeAsAddressSpace(&block, addrspace_src, addrspace_ref, addrspace_ctx); }; - } - decl.has_tv = true; - decl.analysis = .complete; - - const result: Zcu.SemaDeclResult = if (old_has_tv) .{ - .invalidate_decl_val = !decl_ty.eql(old_ty, zcu) or - !decl.val.eql(old_val, decl_ty, zcu) or - is_inline != old_is_inline, - .invalidate_decl_ref = !decl_ty.eql(old_ty, zcu) or - decl.alignment != old_align or - decl.@"linksection" != old_linksection or - decl.@"addrspace" != old_addrspace or - is_inline != old_is_inline, - } else .{ - .invalidate_decl_val = true, - .invalidate_decl_ref = true, - }; - - const has_runtime_bits = queue_linker_work and (is_func or try sema.typeHasRuntimeBits(decl_ty)); - if (has_runtime_bits) { - // Needed for codegen_decl which will call updateDecl and then the - // codegen backend wants full access to the Decl Type. - try decl_ty.resolveFully(pt); - - try zcu.comp.queueJob(.{ .codegen_decl = decl_index }); - if (result.invalidate_decl_ref and zcu.emit_h != null) { - try zcu.comp.queueJob(.{ .emit_h_decl = decl_index }); - } + ip.resolveNavValue(nav_index, .{ + .val = decl_val.toIntern(), + .alignment = alignment, + .@"linksection" = @"linksection", + .@"addrspace" = @"addrspace", + }); } - if (decl.is_exported) { - const export_src = block_scope.src(.{ .token_offset = @intFromBool(decl.is_pub) }); - if (is_inline) return sema.fail(&block_scope, export_src, "export of inline function", .{}); - // The scope needs to have the decl in it. - try sema.analyzeExport(&block_scope, export_src, .{ .name = decl.name }, decl_index); + // Mark the `Cau` as completed before evaluating the export! + assert(zcu.analysis_in_progress.swapRemove(anal_unit)); + + if (zir_decl.flags.is_export) { + const export_src = block.src(.{ .token_offset = @intFromBool(zir_decl.flags.is_pub) }); + const name_slice = zir.nullTerminatedString(zir_decl.name.toString(zir).?); + const name_ip = try ip.getOrPutString(gpa, pt.tid, name_slice, .no_embedded_nulls); + try sema.analyzeExport(&block, export_src, .{ .name = name_ip }, nav_index); } try sema.flushExports(); - return result; -} - -pub fn semaAnonOwnerDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !Zcu.SemaDeclResult { - const zcu = pt.zcu; - const decl = zcu.declPtr(decl_index); + queue_codegen: { + if (!queue_linker_work) break :queue_codegen; - assert(decl.has_tv); - assert(decl.owns_tv); + // Needed for codegen_nav which will call updateDecl and then the + // codegen backend wants full access to the Decl Type. + // We also need this for the `isFnOrHasRuntimeBits` check below. + // TODO: we could make the language more lenient by deferring this work + // to the `codegen_nav` job. + try decl_ty.resolveFully(pt); - log.debug("semaAnonOwnerDecl '{d}'", .{@intFromEnum(decl_index)}); + if (!decl_ty.isFnOrHasRuntimeBits(pt)) break :queue_codegen; - switch (decl.typeOf(zcu).zigTypeTag(zcu)) { - .Fn => @panic("TODO: update fn instance"), - .Type => {}, - else => unreachable, + try zcu.comp.queueJob(.{ .codegen_nav = nav_index }); } - // We are the owner Decl of a type, and we were marked as outdated. That means the *structure* - // of this type changed; not just its namespace. Therefore, we need a new InternPool index. - // - // However, as soon as we make that, the context that created us will require re-analysis anyway - // (as it depends on this Decl's value), meaning the `struct_decl` (or equivalent) instruction - // will be analyzed again. Since Sema already needs to be able to reconstruct types like this, - // why should we bother implementing it here too when the Sema logic will be hit right after? - // - // So instead, let's just mark this Decl as failed - so that any remaining Decls which genuinely - // reference it (via `@This`) end up silently erroring too - and we'll let Sema make a new type - // with a new Decl. - // - // Yes, this does mean that any type owner Decl has a constant value for its entire lifetime. - zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index })); - zcu.intern_pool.remove(pt.tid, decl.val.toIntern()); - decl.analysis = .dependency_failure; - return .{ - .invalidate_decl_val = true, - .invalidate_decl_ref = true, - }; + switch (old_nav_info.status) { + .unresolved => return .{ + .invalidate_decl_val = true, + .invalidate_decl_ref = true, + }, + .resolved => |old| { + const new = ip.getNav(nav_index).status.resolved; + return .{ + .invalidate_decl_val = new.val != old.val, + .invalidate_decl_ref = ip.typeOf(new.val) != ip.typeOf(old.val) or + new.alignment != old.alignment or + new.@"linksection" != old.@"linksection" or + new.@"addrspace" != old.@"addrspace", + }; + }, + } } pub fn importPkg(pt: Zcu.PerThread, mod: *Module) !Zcu.ImportFileResult { @@ -1426,7 +1391,7 @@ pub fn importPkg(pt: Zcu.PerThread, mod: *Module) !Zcu.ImportFileResult { const file_index = try ip.createFile(gpa, pt.tid, .{ .bin_digest = path_digest, .file = builtin_file, - .root_decl = .none, + .root_type = .none, }); keep_resolved_path = true; // It's now owned by import_table. gop.value_ptr.* = file_index; @@ -1453,7 +1418,7 @@ pub fn importPkg(pt: Zcu.PerThread, mod: *Module) !Zcu.ImportFileResult { const new_file_index = try ip.createFile(gpa, pt.tid, .{ .bin_digest = path_digest, .file = new_file, - .root_decl = .none, + .root_type = .none, }); keep_resolved_path = true; // It's now owned by import_table. gop.value_ptr.* = new_file_index; @@ -1563,7 +1528,7 @@ pub fn importFile( const new_file_index = try ip.createFile(gpa, pt.tid, .{ .bin_digest = path_digest, .file = new_file, - .root_decl = .none, + .root_type = .none, }); keep_resolved_path = true; // It's now owned by import_table. gop.value_ptr.* = new_file_index; @@ -1726,7 +1691,7 @@ fn newEmbedFile( })).toIntern(); const ptr_val = try pt.intern(.{ .ptr = .{ .ty = ptr_ty, - .base_addr = .{ .anon_decl = .{ + .base_addr = .{ .uav = .{ .val = array_val, .orig_ty = ptr_ty, } }, @@ -1748,39 +1713,70 @@ pub fn scanNamespace( pt: Zcu.PerThread, namespace_index: Zcu.Namespace.Index, decls: []const Zir.Inst.Index, - parent_decl: *Zcu.Decl, ) Allocator.Error!void { const tracy = trace(@src()); defer tracy.end(); const zcu = pt.zcu; + const ip = &zcu.intern_pool; const gpa = zcu.gpa; const namespace = zcu.namespacePtr(namespace_index); // For incremental updates, `scanDecl` wants to look up existing decls by their ZIR index rather // than their name. We'll build an efficient mapping now, then discard the current `decls`. - var existing_by_inst: std.AutoHashMapUnmanaged(InternPool.TrackedInst.Index, Zcu.Decl.Index) = .{}; + // We map to the `Cau`, since not every declaration has a `Nav`. + var existing_by_inst: std.AutoHashMapUnmanaged(InternPool.TrackedInst.Index, InternPool.Cau.Index) = .{}; defer existing_by_inst.deinit(gpa); - try existing_by_inst.ensureTotalCapacity(gpa, @intCast(namespace.decls.count())); - - for (namespace.decls.keys()) |decl_index| { - const decl = zcu.declPtr(decl_index); - existing_by_inst.putAssumeCapacityNoClobber(decl.zir_decl_index.unwrap().?, decl_index); + try existing_by_inst.ensureTotalCapacity(gpa, @intCast( + namespace.pub_decls.count() + namespace.priv_decls.count() + + namespace.pub_usingnamespace.items.len + namespace.priv_usingnamespace.items.len + + namespace.other_decls.items.len, + )); + + for (namespace.pub_decls.keys()) |nav| { + const cau_index = ip.getNav(nav).analysis_owner.unwrap().?; + const zir_index = ip.getCau(cau_index).zir_index; + existing_by_inst.putAssumeCapacityNoClobber(zir_index, cau_index); + } + for (namespace.priv_decls.keys()) |nav| { + const cau_index = ip.getNav(nav).analysis_owner.unwrap().?; + const zir_index = ip.getCau(cau_index).zir_index; + existing_by_inst.putAssumeCapacityNoClobber(zir_index, cau_index); + } + for (namespace.pub_usingnamespace.items) |nav| { + const cau_index = ip.getNav(nav).analysis_owner.unwrap().?; + const zir_index = ip.getCau(cau_index).zir_index; + existing_by_inst.putAssumeCapacityNoClobber(zir_index, cau_index); + } + for (namespace.priv_usingnamespace.items) |nav| { + const cau_index = ip.getNav(nav).analysis_owner.unwrap().?; + const zir_index = ip.getCau(cau_index).zir_index; + existing_by_inst.putAssumeCapacityNoClobber(zir_index, cau_index); + } + for (namespace.other_decls.items) |cau_index| { + const cau = ip.getCau(cau_index); + existing_by_inst.putAssumeCapacityNoClobber(cau.zir_index, cau_index); + // If this is a test, it'll be re-added to `test_functions` later on + // if still alive. Remove it for now. + switch (cau.owner.unwrap()) { + .none, .type => {}, + .nav => |nav| _ = zcu.test_functions.swapRemove(nav), + } } var seen_decls: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; defer seen_decls.deinit(gpa); - namespace.decls.clearRetainingCapacity(); - try namespace.decls.ensureTotalCapacity(gpa, decls.len); - - namespace.usingnamespace_set.clearRetainingCapacity(); + namespace.pub_decls.clearRetainingCapacity(); + namespace.priv_decls.clearRetainingCapacity(); + namespace.pub_usingnamespace.clearRetainingCapacity(); + namespace.priv_usingnamespace.clearRetainingCapacity(); + namespace.other_decls.clearRetainingCapacity(); var scan_decl_iter: ScanDeclIter = .{ .pt = pt, .namespace_index = namespace_index, - .parent_decl = parent_decl, .seen_decls = &seen_decls, .existing_by_inst = &existing_by_inst, .pass = .named, @@ -1792,34 +1788,17 @@ pub fn scanNamespace( for (decls) |decl_inst| { try scan_decl_iter.scanDecl(decl_inst); } - - if (seen_decls.count() != namespace.decls.count()) { - // Do a pass over the namespace contents and remove any decls from the last update - // which were removed in this one. - var i: usize = 0; - while (i < namespace.decls.count()) { - const decl_index = namespace.decls.keys()[i]; - const decl = zcu.declPtr(decl_index); - if (!seen_decls.contains(decl.name)) { - // We must preserve namespace ordering for @typeInfo. - namespace.decls.orderedRemoveAt(i); - i -= 1; - } - } - } } const ScanDeclIter = struct { pt: Zcu.PerThread, namespace_index: Zcu.Namespace.Index, - parent_decl: *Zcu.Decl, seen_decls: *std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void), - existing_by_inst: *const std.AutoHashMapUnmanaged(InternPool.TrackedInst.Index, Zcu.Decl.Index), + existing_by_inst: *const std.AutoHashMapUnmanaged(InternPool.TrackedInst.Index, InternPool.Cau.Index), /// Decl scanning is run in two passes, so that we can detect when a generated /// name would clash with an explicit name and use a different one. pass: enum { named, unnamed }, usingnamespace_index: usize = 0, - comptime_index: usize = 0, unnamed_test_index: usize = 0, fn avoidNameConflict(iter: *ScanDeclIter, comptime fmt: []const u8, args: anytype) !InternPool.NullTerminatedString { @@ -1843,37 +1822,35 @@ const ScanDeclIter = struct { const pt = iter.pt; const zcu = pt.zcu; + const comp = zcu.comp; const namespace_index = iter.namespace_index; const namespace = zcu.namespacePtr(namespace_index); const gpa = zcu.gpa; - const zir = namespace.fileScope(zcu).zir; + const file = namespace.fileScope(zcu); + const zir = file.zir; const ip = &zcu.intern_pool; const inst_data = zir.instructions.items(.data)[@intFromEnum(decl_inst)].declaration; const extra = zir.extraData(Zir.Inst.Declaration, inst_data.payload_index); const declaration = extra.data; - // Every Decl needs a name. - const decl_name: InternPool.NullTerminatedString, const kind: Zcu.Decl.Kind, const is_named_test: bool = switch (declaration.name) { + const Kind = enum { @"comptime", @"usingnamespace", @"test", named }; + + const maybe_name: InternPool.OptionalNullTerminatedString, const kind: Kind, const is_named_test: bool = switch (declaration.name) { .@"comptime" => info: { if (iter.pass != .unnamed) return; - const i = iter.comptime_index; - iter.comptime_index += 1; break :info .{ - try iter.avoidNameConflict("comptime_{d}", .{i}), + .none, .@"comptime", false, }; }, .@"usingnamespace" => info: { - // TODO: this isn't right! These should be considered unnamed. Name conflicts can happen here. - // The problem is, we need to preserve the decl ordering for `@typeInfo`. - // I'm not bothering to fix this now, since some upcoming changes will change this code significantly anyway. - if (iter.pass != .named) return; + if (iter.pass != .unnamed) return; const i = iter.usingnamespace_index; iter.usingnamespace_index += 1; break :info .{ - try iter.avoidNameConflict("usingnamespace_{d}", .{i}), + (try iter.avoidNameConflict("usingnamespace_{d}", .{i})).toOptional(), .@"usingnamespace", false, }; @@ -1883,7 +1860,7 @@ const ScanDeclIter = struct { const i = iter.unnamed_test_index; iter.unnamed_test_index += 1; break :info .{ - try iter.avoidNameConflict("test_{d}", .{i}), + (try iter.avoidNameConflict("test_{d}", .{i})).toOptional(), .@"test", false, }; @@ -1894,7 +1871,7 @@ const ScanDeclIter = struct { assert(declaration.flags.has_doc_comment); const name = zir.nullTerminatedString(@enumFromInt(zir.extra[extra.end])); break :info .{ - try iter.avoidNameConflict("decltest.{s}", .{name}), + (try iter.avoidNameConflict("decltest.{s}", .{name})).toOptional(), .@"test", true, }; @@ -1903,7 +1880,7 @@ const ScanDeclIter = struct { // We consider these to be unnamed since the decl name can be adjusted to avoid conflicts if necessary. if (iter.pass != .unnamed) return; break :info .{ - try iter.avoidNameConflict("test.{s}", .{zir.nullTerminatedString(declaration.name.toString(zir).?)}), + (try iter.avoidNameConflict("test.{s}", .{zir.nullTerminatedString(declaration.name.toString(zir).?)})).toOptional(), .@"test", true, }; @@ -1917,132 +1894,144 @@ const ScanDeclIter = struct { ); try iter.seen_decls.putNoClobber(gpa, name, {}); break :info .{ - name, + name.toOptional(), .named, false, }; }, }; - switch (kind) { - .@"usingnamespace" => try namespace.usingnamespace_set.ensureUnusedCapacity(gpa, 1), - .@"test" => try zcu.test_functions.ensureUnusedCapacity(gpa, 1), - else => {}, - } - - const parent_file_scope_index = iter.parent_decl.getFileScopeIndex(zcu); const tracked_inst = try ip.trackZir(gpa, pt.tid, .{ - .file = parent_file_scope_index, + .file = namespace.file_scope, .inst = decl_inst, }); - // We create a Decl for it regardless of analysis status. - - const prev_exported, const decl_index = if (iter.existing_by_inst.get(tracked_inst)) |decl_index| decl_index: { - // We need only update this existing Decl. - const decl = zcu.declPtr(decl_index); - const was_exported = decl.is_exported; - assert(decl.kind == kind); // ZIR tracking should preserve this - decl.name = decl_name; - decl.fqn = try namespace.internFullyQualifiedName(ip, gpa, pt.tid, decl_name); - decl.is_pub = declaration.flags.is_pub; - decl.is_exported = declaration.flags.is_export; - break :decl_index .{ was_exported, decl_index }; - } else decl_index: { - // Create and set up a new Decl. - const new_decl_index = try pt.allocateNewDecl(namespace_index); - const new_decl = zcu.declPtr(new_decl_index); - new_decl.kind = kind; - new_decl.name = decl_name; - new_decl.fqn = try namespace.internFullyQualifiedName(ip, gpa, pt.tid, decl_name); - new_decl.is_pub = declaration.flags.is_pub; - new_decl.is_exported = declaration.flags.is_export; - new_decl.zir_decl_index = tracked_inst.toOptional(); - break :decl_index .{ false, new_decl_index }; - }; - - const decl = zcu.declPtr(decl_index); - - namespace.decls.putAssumeCapacityNoClobberContext(decl_index, {}, .{ .zcu = zcu }); + const existing_cau = iter.existing_by_inst.get(tracked_inst); + + const cau, const want_analysis = switch (kind) { + .@"comptime" => cau: { + const cau = existing_cau orelse try ip.createComptimeCau(gpa, pt.tid, tracked_inst, namespace_index); + + // For a `comptime` declaration, whether to re-analyze is based solely on whether the + // `Cau` is outdated. So, add this one to `outdated` and `outdated_ready` if not already. + const unit = InternPool.AnalUnit.wrap(.{ .cau = cau }); + if (zcu.potentially_outdated.fetchSwapRemove(unit)) |kv| { + try zcu.outdated.ensureUnusedCapacity(gpa, 1); + try zcu.outdated_ready.ensureUnusedCapacity(gpa, 1); + zcu.outdated.putAssumeCapacityNoClobber(unit, kv.value); + if (kv.value == 0) { // no PO deps + zcu.outdated_ready.putAssumeCapacityNoClobber(unit, {}); + } + } else if (!zcu.outdated.contains(unit)) { + try zcu.outdated.ensureUnusedCapacity(gpa, 1); + try zcu.outdated_ready.ensureUnusedCapacity(gpa, 1); + zcu.outdated.putAssumeCapacityNoClobber(unit, 0); + zcu.outdated_ready.putAssumeCapacityNoClobber(unit, {}); + } - const comp = zcu.comp; - const decl_mod = namespace.fileScope(zcu).mod; - const want_analysis = declaration.flags.is_export or switch (kind) { - .anon => unreachable, - .@"comptime" => true, - .@"usingnamespace" => a: { - namespace.usingnamespace_set.putAssumeCapacityNoClobber(decl_index, declaration.flags.is_pub); - break :a true; + break :cau .{ cau, true }; }, - .named => false, - .@"test" => a: { - if (!comp.config.is_test) break :a false; - if (decl_mod != zcu.main_mod) break :a false; - if (is_named_test and comp.test_filters.len > 0) { - const decl_fqn = decl.fqn.toSlice(ip); - for (comp.test_filters) |test_filter| { - if (std.mem.indexOf(u8, decl_fqn, test_filter)) |_| break; - } else break :a false; - } - zcu.test_functions.putAssumeCapacity(decl_index, {}); // may clobber on incremental update - break :a true; + else => cau: { + const name = maybe_name.unwrap().?; + const fqn = try namespace.internFullyQualifiedName(ip, gpa, pt.tid, name); + const cau, const nav = if (existing_cau) |cau_index| cau_nav: { + const nav_index = ip.getCau(cau_index).owner.unwrap().nav; + const nav = ip.getNav(nav_index); + assert(nav.name == name); + assert(nav.fqn == fqn); + break :cau_nav .{ cau_index, nav_index }; + } else try ip.createPairedCauNav(gpa, pt.tid, name, fqn, tracked_inst, namespace_index, kind == .@"usingnamespace"); + const want_analysis = switch (kind) { + .@"comptime" => unreachable, + .@"usingnamespace" => a: { + if (declaration.flags.is_pub) { + try namespace.pub_usingnamespace.append(gpa, nav); + } else { + try namespace.priv_usingnamespace.append(gpa, nav); + } + break :a true; + }, + .@"test" => a: { + try namespace.other_decls.append(gpa, cau); + // TODO: incremental compilation! + // * remove from `test_functions` if no longer matching filter + // * add to `test_functions` if newly passing filter + // This logic is unaware of incremental: we'll end up with duplicates. + // Perhaps we should add all test indiscriminately and filter at the end of the update. + if (!comp.config.is_test) break :a false; + if (file.mod != zcu.main_mod) break :a false; + if (is_named_test and comp.test_filters.len > 0) { + const fqn_slice = fqn.toSlice(ip); + for (comp.test_filters) |test_filter| { + if (std.mem.indexOf(u8, fqn_slice, test_filter) != null) break; + } else break :a false; + } + try zcu.test_functions.put(gpa, nav, {}); + break :a true; + }, + .named => a: { + if (declaration.flags.is_pub) { + try namespace.pub_decls.putContext(gpa, nav, {}, .{ .zcu = zcu }); + } else { + try namespace.priv_decls.putContext(gpa, nav, {}, .{ .zcu = zcu }); + } + break :a false; + }, + }; + break :cau .{ cau, want_analysis }; }, }; - if (want_analysis) { - // We will not queue analysis if the decl has been analyzed on a previous update and - // `is_export` is unchanged. In this case, the incremental update mechanism will handle - // re-analysis for us if necessary. - if (prev_exported != declaration.flags.is_export or decl.analysis == .unreferenced) { - log.debug("scanDecl queue analyze_decl file='{s}' decl_name='{}' decl_index={d}", .{ - namespace.fileScope(zcu).sub_file_path, decl_name.fmt(ip), decl_index, - }); - try comp.queueJob(.{ .analyze_decl = decl_index }); - } + if (want_analysis or declaration.flags.is_export) { + log.debug( + "scanDecl queue analyze_cau file='{s}' cau_index={d}", + .{ namespace.fileScope(zcu).sub_file_path, cau }, + ); + try comp.queueJob(.{ .analyze_cau = cau }); } - if (decl.getOwnedFunction(zcu) != null) { - // TODO this logic is insufficient; namespaces we don't re-scan may still require - // updated line numbers. Look into this! - // TODO Look into detecting when this would be unnecessary by storing enough state - // in `Decl` to notice that the line number did not change. - try comp.queueJob(.{ .update_line_number = decl_index }); - } + // TODO: we used to do line number updates here, but this is an inappropriate place for this logic to live. } }; -/// Cancel the creation of an anon decl and delete any references to it. -/// If other decls depend on this decl, they must be aborted first. -pub fn abortAnonDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) void { - assert(!pt.zcu.declIsRoot(decl_index)); - pt.destroyDecl(decl_index); -} - -/// Finalize the creation of an anon decl. -pub fn finalizeAnonDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) Allocator.Error!void { - if (pt.zcu.declPtr(decl_index).typeOf(pt.zcu).isFnOrHasRuntimeBits(pt)) { - try pt.zcu.comp.queueJob(.{ .codegen_decl = decl_index }); - } -} - -pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: Allocator) Zcu.SemaError!Air { +fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.SemaError!Air { const tracy = trace(@src()); defer tracy.end(); - const mod = pt.zcu; - const gpa = mod.gpa; - const ip = &mod.intern_pool; - const func = mod.funcInfo(func_index); - const decl_index = func.owner_decl; - const decl = mod.declPtr(decl_index); + const zcu = pt.zcu; + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; + + const anal_unit = InternPool.AnalUnit.wrap(.{ .func = func_index }); + const func = zcu.funcInfo(func_index); + const inst_info = func.zir_body_inst.resolveFull(ip); + const file = zcu.fileByIndex(inst_info.file); + const zir = file.zir; + + try zcu.analysis_in_progress.put(gpa, anal_unit, {}); + errdefer _ = zcu.analysis_in_progress.swapRemove(anal_unit); + + func.setAnalysisState(ip, .analyzed); + + // This is the `Cau` corresponding to the `declaration` instruction which the function or its generic owner originates from. + const decl_cau = ip.getCau(cau: { + const orig_nav = if (func.generic_owner == .none) + func.owner_nav + else + zcu.funcInfo(func.generic_owner).owner_nav; + + break :cau ip.getNav(orig_nav).analysis_owner.unwrap().?; + }); - log.debug("func name '{}'", .{decl.fqn.fmt(ip)}); - defer log.debug("finish func name '{}'", .{decl.fqn.fmt(ip)}); + const func_nav = ip.getNav(func.owner_nav); - const decl_prog_node = mod.sema_prog_node.start(decl.fqn.toSlice(ip), 0); + const decl_prog_node = zcu.sema_prog_node.start(func_nav.fqn.toSlice(ip), 0); defer decl_prog_node.end(); - mod.intern_pool.removeDependenciesForDepender(gpa, InternPool.AnalUnit.wrap(.{ .func = func_index })); + zcu.intern_pool.removeDependenciesForDepender(gpa, anal_unit); + + var analysis_arena = std.heap.ArenaAllocator.init(gpa); + defer analysis_arena.deinit(); var comptime_err_ret_trace = std.ArrayList(Zcu.LazySrcLoc).init(gpa); defer comptime_err_ret_trace.deinit(); @@ -2052,21 +2041,19 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All // the runtime-known parameters only, not to be confused with the // generic_owner function type, which potentially has more parameters, // including comptime parameters. - const fn_ty = decl.typeOf(mod); - const fn_ty_info = mod.typeToFunc(fn_ty).?; + const fn_ty = Type.fromInterned(func.ty); + const fn_ty_info = zcu.typeToFunc(fn_ty).?; var sema: Sema = .{ .pt = pt, .gpa = gpa, - .arena = arena, - .code = decl.getFileScope(mod).zir, - .owner_decl = decl, - .owner_decl_index = decl_index, + .arena = analysis_arena.allocator(), + .code = zir, + .owner = anal_unit, .func_index = func_index, .func_is_naked = fn_ty_info.cc == .Naked, .fn_ret_ty = Type.fromInterned(fn_ty_info.return_type), .fn_ret_ty_ies = null, - .owner_func_index = func_index, .branch_quota = @max(func.branchQuotaUnordered(ip), Sema.default_branch_quota), .comptime_err_ret_trace = &comptime_err_ret_trace, }; @@ -2074,11 +2061,11 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All // Every runtime function has a dependency on the source of the Decl it originates from. // It also depends on the value of its owner Decl. - try sema.declareDependency(.{ .src_hash = decl.zir_decl_index.unwrap().? }); - try sema.declareDependency(.{ .decl_val = decl_index }); + try sema.declareDependency(.{ .src_hash = decl_cau.zir_index }); + try sema.declareDependency(.{ .nav_val = func.owner_nav }); if (func.analysisUnordered(ip).inferred_error_set) { - const ies = try arena.create(Sema.InferredErrorSet); + const ies = try analysis_arena.allocator().create(Sema.InferredErrorSet); ies.* = .{ .func = func_index }; sema.fn_ret_ty_ies = ies; } @@ -2094,19 +2081,12 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All var inner_block: Sema.Block = .{ .parent = null, .sema = &sema, - .namespace = decl.src_namespace, + .namespace = decl_cau.namespace, .instructions = .{}, .inlining = null, .is_comptime = false, - .src_base_inst = inst: { - const owner_info = if (func.generic_owner == .none) - func - else - mod.funcInfo(func.generic_owner); - const orig_decl = mod.declPtr(owner_info.owner_decl); - break :inst orig_decl.zir_decl_index.unwrap().?; - }, - .type_name_ctx = decl.name, + .src_base_inst = decl_cau.zir_index, + .type_name_ctx = func_nav.fqn, }; defer inner_block.instructions.deinit(gpa); @@ -2144,10 +2124,10 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All const gop = sema.inst_map.getOrPutAssumeCapacity(inst); if (gop.found_existing) continue; // provided above by comptime arg - const inst_info = sema.code.instructions.get(@intFromEnum(inst)); - const param_name: Zir.NullTerminatedString = switch (inst_info.tag) { - .param_anytype => inst_info.data.str_tok.start, - .param => sema.code.extraData(Zir.Inst.Param, inst_info.data.pl_tok.payload_index).data.name, + const param_inst_info = sema.code.instructions.get(@intFromEnum(inst)); + const param_name: Zir.NullTerminatedString = switch (param_inst_info.tag) { + .param_anytype => param_inst_info.data.str_tok.start, + .param => sema.code.extraData(Zir.Inst.Param, param_inst_info.data.pl_tok.payload_index).data.name, else => unreachable, }; @@ -2179,8 +2159,6 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All }); } - func.setAnalysisState(ip, .in_progress); - const last_arg_index = inner_block.instructions.items.len; // Save the error trace as our first action in the function. @@ -2190,9 +2168,8 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All inner_block.error_return_trace_index = error_return_trace_index; sema.analyzeFnBody(&inner_block, fn_info.body) catch |err| switch (err) { - // TODO make these unreachable instead of @panic - error.GenericPoison => @panic("zig compiler bug: GenericPoison"), - error.ComptimeReturn => @panic("zig compiler bug: ComptimeReturn"), + error.GenericPoison => unreachable, + error.ComptimeReturn => unreachable, else => |e| return e, }; @@ -2207,14 +2184,13 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All // If we don't get an error return trace from a caller, create our own. if (func.analysisUnordered(ip).calls_or_awaits_errorable_fn and - mod.comp.config.any_error_tracing and - !sema.fn_ret_ty.isError(mod)) + zcu.comp.config.any_error_tracing and + !sema.fn_ret_ty.isError(zcu)) { sema.setupErrorReturnTrace(&inner_block, last_arg_index) catch |err| switch (err) { - // TODO make these unreachable instead of @panic - error.GenericPoison => @panic("zig compiler bug: GenericPoison"), - error.ComptimeReturn => @panic("zig compiler bug: ComptimeReturn"), - error.ComptimeBreak => @panic("zig compiler bug: ComptimeBreak"), + error.GenericPoison => unreachable, + error.ComptimeReturn => unreachable, + error.ComptimeBreak => unreachable, else => |e| return e, }; } @@ -2239,35 +2215,25 @@ pub fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index, arena: All error.GenericPoison => unreachable, error.ComptimeReturn => unreachable, error.ComptimeBreak => unreachable, - error.AnalysisFail => { - // In this case our function depends on a type that had a compile error. - // We should not try to lower this function. - decl.analysis = .dependency_failure; - return error.AnalysisFail; - }, else => |e| return e, }; assert(ies.resolved != .none); ip.funcSetIesResolved(func_index, ies.resolved); } - func.setAnalysisState(ip, .success); + assert(zcu.analysis_in_progress.swapRemove(anal_unit)); // Finally we must resolve the return type and parameter types so that backends // have full access to type information. // Crucially, this happens *after* we set the function state to success above, // so that dependencies on the function body will now be satisfied rather than // result in circular dependency errors. + // TODO: this can go away once we fix backends having to resolve `StackTrace`. + // The codegen timing guarantees that the parameter types will be populated. sema.resolveFnTypes(fn_ty) catch |err| switch (err) { error.GenericPoison => unreachable, error.ComptimeReturn => unreachable, error.ComptimeBreak => unreachable, - error.AnalysisFail => { - // In this case our function depends on a type that had a compile error. - // We should not try to lower this function. - decl.analysis = .dependency_failure; - return error.AnalysisFail; - }, else => |e| return e, }; @@ -2287,36 +2253,6 @@ pub fn destroyNamespace(pt: Zcu.PerThread, namespace_index: Zcu.Namespace.Index) return pt.zcu.intern_pool.destroyNamespace(pt.tid, namespace_index); } -pub fn allocateNewDecl(pt: Zcu.PerThread, namespace: Zcu.Namespace.Index) !Zcu.Decl.Index { - const zcu = pt.zcu; - const gpa = zcu.gpa; - const decl_index = try zcu.intern_pool.createDecl(gpa, pt.tid, .{ - .name = undefined, - .fqn = undefined, - .src_namespace = namespace, - .has_tv = false, - .owns_tv = false, - .val = undefined, - .alignment = undefined, - .@"linksection" = .none, - .@"addrspace" = .generic, - .analysis = .unreferenced, - .zir_decl_index = .none, - .is_pub = false, - .is_exported = false, - .kind = .anon, - }); - - if (zcu.emit_h) |zcu_emit_h| { - if (@intFromEnum(decl_index) >= zcu_emit_h.allocated_emit_h.len) { - try zcu_emit_h.allocated_emit_h.append(gpa, .{}); - assert(@intFromEnum(decl_index) == zcu_emit_h.allocated_emit_h.len); - } - } - - return decl_index; -} - pub fn getErrorValue( pt: Zcu.PerThread, name: InternPool.NullTerminatedString, @@ -2328,25 +2264,6 @@ pub fn getErrorValueFromSlice(pt: Zcu.PerThread, name: []const u8) Allocator.Err return pt.getErrorValue(try pt.zcu.intern_pool.getOrPutString(pt.zcu.gpa, name)); } -pub fn initNewAnonDecl( - pt: Zcu.PerThread, - new_decl_index: Zcu.Decl.Index, - val: Value, - name: InternPool.NullTerminatedString, - fqn: InternPool.OptionalNullTerminatedString, -) Allocator.Error!void { - const new_decl = pt.zcu.declPtr(new_decl_index); - - new_decl.name = name; - new_decl.fqn = fqn.unwrap() orelse try pt.zcu.namespacePtr(new_decl.src_namespace) - .internFullyQualifiedName(&pt.zcu.intern_pool, pt.zcu.gpa, pt.tid, name); - new_decl.val = val; - new_decl.alignment = .none; - new_decl.@"linksection" = .none; - new_decl.has_tv = true; - new_decl.analysis = .complete; -} - fn lockAndClearFileCompileError(pt: Zcu.PerThread, file: *Zcu.File) void { switch (file.status) { .success_zir, .retryable_failure => {}, @@ -2367,35 +2284,35 @@ pub fn processExports(pt: Zcu.PerThread) !void { const zcu = pt.zcu; const gpa = zcu.gpa; - // First, construct a mapping of every exported value and Decl to the indices of all its different exports. - var decl_exports: std.AutoArrayHashMapUnmanaged(Zcu.Decl.Index, std.ArrayListUnmanaged(u32)) = .{}; - var value_exports: std.AutoArrayHashMapUnmanaged(InternPool.Index, std.ArrayListUnmanaged(u32)) = .{}; + // First, construct a mapping of every exported value and Nav to the indices of all its different exports. + var nav_exports: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, std.ArrayListUnmanaged(u32)) = .{}; + var uav_exports: std.AutoArrayHashMapUnmanaged(InternPool.Index, std.ArrayListUnmanaged(u32)) = .{}; defer { - for (decl_exports.values()) |*exports| { + for (nav_exports.values()) |*exports| { exports.deinit(gpa); } - decl_exports.deinit(gpa); - for (value_exports.values()) |*exports| { + nav_exports.deinit(gpa); + for (uav_exports.values()) |*exports| { exports.deinit(gpa); } - value_exports.deinit(gpa); + uav_exports.deinit(gpa); } // We note as a heuristic: // * It is rare to export a value. - // * It is rare for one Decl to be exported multiple times. + // * It is rare for one Nav to be exported multiple times. // So, this ensureTotalCapacity serves as a reasonable (albeit very approximate) optimization. - try decl_exports.ensureTotalCapacity(gpa, zcu.single_exports.count() + zcu.multi_exports.count()); + try nav_exports.ensureTotalCapacity(gpa, zcu.single_exports.count() + zcu.multi_exports.count()); for (zcu.single_exports.values()) |export_idx| { const exp = zcu.all_exports.items[export_idx]; const value_ptr, const found_existing = switch (exp.exported) { - .decl_index => |i| gop: { - const gop = try decl_exports.getOrPut(gpa, i); + .nav => |nav| gop: { + const gop = try nav_exports.getOrPut(gpa, nav); break :gop .{ gop.value_ptr, gop.found_existing }; }, - .value => |i| gop: { - const gop = try value_exports.getOrPut(gpa, i); + .uav => |uav| gop: { + const gop = try uav_exports.getOrPut(gpa, uav); break :gop .{ gop.value_ptr, gop.found_existing }; }, }; @@ -2406,12 +2323,12 @@ pub fn processExports(pt: Zcu.PerThread) !void { for (zcu.multi_exports.values()) |info| { for (zcu.all_exports.items[info.index..][0..info.len], info.index..) |exp, export_idx| { const value_ptr, const found_existing = switch (exp.exported) { - .decl_index => |i| gop: { - const gop = try decl_exports.getOrPut(gpa, i); + .nav => |nav| gop: { + const gop = try nav_exports.getOrPut(gpa, nav); break :gop .{ gop.value_ptr, gop.found_existing }; }, - .value => |i| gop: { - const gop = try value_exports.getOrPut(gpa, i); + .uav => |uav| gop: { + const gop = try uav_exports.getOrPut(gpa, uav); break :gop .{ gop.value_ptr, gop.found_existing }; }, }; @@ -2424,13 +2341,13 @@ pub fn processExports(pt: Zcu.PerThread) !void { var symbol_exports: SymbolExports = .{}; defer symbol_exports.deinit(gpa); - for (decl_exports.keys(), decl_exports.values()) |exported_decl, exports_list| { - const exported: Zcu.Exported = .{ .decl_index = exported_decl }; + for (nav_exports.keys(), nav_exports.values()) |exported_nav, exports_list| { + const exported: Zcu.Exported = .{ .nav = exported_nav }; try pt.processExportsInner(&symbol_exports, exported, exports_list.items); } - for (value_exports.keys(), value_exports.values()) |exported_value, exports_list| { - const exported: Zcu.Exported = .{ .value = exported_value }; + for (uav_exports.keys(), uav_exports.values()) |exported_uav, exports_list| { + const exported: Zcu.Exported = .{ .uav = exported_uav }; try pt.processExportsInner(&symbol_exports, exported, exports_list.items); } } @@ -2467,20 +2384,31 @@ fn processExportsInner( } switch (exported) { - .decl_index => |idx| if (failed: { - const decl = zcu.declPtr(idx); - if (decl.analysis != .complete) break :failed true; - // Check if has owned function - if (!decl.owns_tv) break :failed false; - if (decl.typeOf(zcu).zigTypeTag(zcu) != .Fn) break :failed false; - // Check if owned function failed - break :failed zcu.funcInfo(decl.val.toIntern()).analysisUnordered(ip).state != .success; + .nav => |nav_index| if (failed: { + const nav = ip.getNav(nav_index); + if (zcu.failed_codegen.contains(nav_index)) break :failed true; + if (nav.analysis_owner.unwrap()) |cau| { + const cau_unit = InternPool.AnalUnit.wrap(.{ .cau = cau }); + if (zcu.failed_analysis.contains(cau_unit)) break :failed true; + if (zcu.transitive_failed_analysis.contains(cau_unit)) break :failed true; + } + const val = switch (nav.status) { + .unresolved => break :failed true, + .resolved => |r| Value.fromInterned(r.val), + }; + // If the value is a function, we also need to check if that function succeeded analysis. + if (val.typeOf(zcu).zigTypeTag(zcu) == .Fn) { + const func_unit = InternPool.AnalUnit.wrap(.{ .func = val.toIntern() }); + if (zcu.failed_analysis.contains(func_unit)) break :failed true; + if (zcu.transitive_failed_analysis.contains(func_unit)) break :failed true; + } + break :failed false; }) { // This `Decl` is failed, so was never sent to codegen. // TODO: we should probably tell the backend to delete any old exports of this `Decl`? return; }, - .value => {}, + .uav => {}, } if (zcu.comp.bin_file) |lf| { @@ -2499,46 +2427,49 @@ pub fn populateTestFunctions( const ip = &zcu.intern_pool; const builtin_mod = zcu.root_mod.getBuiltinDependency(); const builtin_file_index = (pt.importPkg(builtin_mod) catch unreachable).file_index; - const root_decl_index = zcu.fileRootDecl(builtin_file_index); - const root_decl = zcu.declPtr(root_decl_index.unwrap().?); - const builtin_namespace = zcu.namespacePtr(root_decl.src_namespace); - const test_functions_str = try ip.getOrPutString(gpa, pt.tid, "test_functions", .no_embedded_nulls); - const decl_index = builtin_namespace.decls.getKeyAdapted( - test_functions_str, - Zcu.DeclAdapter{ .zcu = zcu }, + pt.ensureFileAnalyzed(builtin_file_index) catch |err| switch (err) { + error.AnalysisFail => unreachable, // builtin module is generated so cannot be corrupt + error.OutOfMemory => |e| return e, + }; + const builtin_root_type = Type.fromInterned(zcu.fileRootType(builtin_file_index)); + const builtin_namespace = builtin_root_type.getNamespace(zcu).?.unwrap().?; + const nav_index = zcu.namespacePtr(builtin_namespace).pub_decls.getKeyAdapted( + try ip.getOrPutString(gpa, pt.tid, "test_functions", .no_embedded_nulls), + Zcu.Namespace.NameAdapter{ .zcu = zcu }, ).?; { - // We have to call `ensureDeclAnalyzed` here in case `builtin.test_functions` + // We have to call `ensureCauAnalyzed` here in case `builtin.test_functions` // was not referenced by start code. zcu.sema_prog_node = main_progress_node.start("Semantic Analysis", 0); defer { zcu.sema_prog_node.end(); zcu.sema_prog_node = std.Progress.Node.none; } - try pt.ensureDeclAnalyzed(decl_index); + const cau_index = ip.getNav(nav_index).analysis_owner.unwrap().?; + try pt.ensureCauAnalyzed(cau_index); } - const decl = zcu.declPtr(decl_index); - const test_fn_ty = decl.typeOf(zcu).slicePtrFieldType(zcu).childType(zcu); + const test_fns_val = zcu.navValue(nav_index); + const test_fn_ty = test_fns_val.typeOf(zcu).slicePtrFieldType(zcu).childType(zcu); - const array_anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl = array: { + const array_anon_decl: InternPool.Key.Ptr.BaseAddr.Uav = array: { // Add zcu.test_functions to an array decl then make the test_functions // decl reference it as a slice. const test_fn_vals = try gpa.alloc(InternPool.Index, zcu.test_functions.count()); defer gpa.free(test_fn_vals); - for (test_fn_vals, zcu.test_functions.keys()) |*test_fn_val, test_decl_index| { - const test_decl = zcu.declPtr(test_decl_index); - const test_decl_name = test_decl.fqn; - const test_decl_name_len = test_decl_name.length(ip); - const test_name_anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl = n: { + for (test_fn_vals, zcu.test_functions.keys()) |*test_fn_val, test_nav_index| { + const test_nav = ip.getNav(test_nav_index); + const test_nav_name = test_nav.fqn; + const test_nav_name_len = test_nav_name.length(ip); + const test_name_anon_decl: InternPool.Key.Ptr.BaseAddr.Uav = n: { const test_name_ty = try pt.arrayType(.{ - .len = test_decl_name_len, + .len = test_nav_name_len, .child = .u8_type, }); const test_name_val = try pt.intern(.{ .aggregate = .{ .ty = test_name_ty.toIntern(), - .storage = .{ .bytes = test_decl_name.toString() }, + .storage = .{ .bytes = test_nav_name.toString() }, } }); break :n .{ .orig_ty = (try pt.singleConstPtrType(test_name_ty)).toIntern(), @@ -2552,23 +2483,18 @@ pub fn populateTestFunctions( .ty = .slice_const_u8_type, .ptr = try pt.intern(.{ .ptr = .{ .ty = .manyptr_const_u8_type, - .base_addr = .{ .anon_decl = test_name_anon_decl }, + .base_addr = .{ .uav = test_name_anon_decl }, .byte_offset = 0, } }), .len = try pt.intern(.{ .int = .{ .ty = .usize_type, - .storage = .{ .u64 = test_decl_name_len }, + .storage = .{ .u64 = test_nav_name_len }, } }), } }), // func try pt.intern(.{ .ptr = .{ - .ty = try pt.intern(.{ .ptr_type = .{ - .child = test_decl.typeOf(zcu).toIntern(), - .flags = .{ - .is_const = true, - }, - } }), - .base_addr = .{ .decl = test_decl_index }, + .ty = (try pt.navPtrType(test_nav_index)).toIntern(), + .base_addr = .{ .nav = test_nav_index }, .byte_offset = 0, } }), }; @@ -2601,22 +2527,16 @@ pub fn populateTestFunctions( .size = .Slice, }, }); - const new_val = decl.val; const new_init = try pt.intern(.{ .slice = .{ .ty = new_ty.toIntern(), .ptr = try pt.intern(.{ .ptr = .{ .ty = new_ty.slicePtrFieldType(zcu).toIntern(), - .base_addr = .{ .anon_decl = array_anon_decl }, + .base_addr = .{ .uav = array_anon_decl }, .byte_offset = 0, } }), .len = (try pt.intValue(Type.usize, zcu.test_functions.count())).toIntern(), } }); - ip.mutateVarInit(decl.val.toIntern(), new_init); - - // Since we are replacing the Decl's value we must perform cleanup on the - // previous value. - decl.val = new_val; - decl.has_tv = true; + ip.mutateVarInit(test_fns_val.toIntern(), new_init); } { zcu.codegen_prog_node = main_progress_node.start("Code Generation", 0); @@ -2625,40 +2545,45 @@ pub fn populateTestFunctions( zcu.codegen_prog_node = std.Progress.Node.none; } - try pt.linkerUpdateDecl(decl_index); + try pt.linkerUpdateNav(nav_index); } } -pub fn linkerUpdateDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !void { +pub fn linkerUpdateNav(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !void { const zcu = pt.zcu; const comp = zcu.comp; - const decl = zcu.declPtr(decl_index); - - const codegen_prog_node = zcu.codegen_prog_node.start(decl.fqn.toSlice(&zcu.intern_pool), 0); + const nav = zcu.intern_pool.getNav(nav_index); + const codegen_prog_node = zcu.codegen_prog_node.start(nav.fqn.toSlice(&zcu.intern_pool), 0); defer codegen_prog_node.end(); if (comp.bin_file) |lf| { - lf.updateDecl(pt, decl_index) catch |err| switch (err) { + lf.updateNav(pt, nav_index) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => { - decl.analysis = .codegen_failure; + assert(zcu.failed_codegen.contains(nav_index)); }, else => { const gpa = zcu.gpa; - try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); - zcu.failed_analysis.putAssumeCapacityNoClobber(InternPool.AnalUnit.wrap(.{ .decl = decl_index }), try Zcu.ErrorMsg.create( + try zcu.failed_codegen.ensureUnusedCapacity(gpa, 1); + zcu.failed_codegen.putAssumeCapacityNoClobber(nav_index, try Zcu.ErrorMsg.create( gpa, - decl.navSrcLoc(zcu), + zcu.navSrcLoc(nav_index), "unable to codegen: {s}", .{@errorName(err)}, )); - decl.analysis = .codegen_failure; - try zcu.retryable_failures.append(zcu.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index })); + if (nav.analysis_owner.unwrap()) |cau| { + try zcu.retryable_failures.append(zcu.gpa, InternPool.AnalUnit.wrap(.{ .cau = cau })); + } else { + // TODO: we don't have a way to indicate that this failure is retryable! + // Since these are really rare, we could as a cop-out retry the whole build next update. + // But perhaps we can do better... + @panic("TODO: retryable failure codegenning non-declaration Nav"); + } }, }; } else if (zcu.llvm_object) |llvm_object| { - llvm_object.updateDecl(pt, decl_index) catch |err| switch (err) { + llvm_object.updateNav(pt, nav_index) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, }; } @@ -2750,9 +2675,30 @@ pub fn intern(pt: Zcu.PerThread, key: InternPool.Key) Allocator.Error!InternPool return pt.zcu.intern_pool.get(pt.zcu.gpa, pt.tid, key); } -/// Shortcut for calling `intern_pool.getCoerced`. +/// Essentially a shortcut for calling `intern_pool.getCoerced`. +/// However, this function also allows coercing `extern`s. The `InternPool` function can't do +/// this because it requires potentially pushing to the job queue. pub fn getCoerced(pt: Zcu.PerThread, val: Value, new_ty: Type) Allocator.Error!Value { - return Value.fromInterned(try pt.zcu.intern_pool.getCoerced(pt.zcu.gpa, pt.tid, val.toIntern(), new_ty.toIntern())); + const ip = &pt.zcu.intern_pool; + switch (ip.indexToKey(val.toIntern())) { + .@"extern" => |e| { + const coerced = try pt.getExtern(.{ + .name = e.name, + .ty = new_ty.toIntern(), + .lib_name = e.lib_name, + .is_const = e.is_const, + .is_threadlocal = e.is_threadlocal, + .is_weak_linkage = e.is_weak_linkage, + .alignment = e.alignment, + .@"addrspace" = e.@"addrspace", + .zir_index = e.zir_index, + .owner_nav = undefined, // ignored by `getExtern`. + }); + return Value.fromInterned(coerced); + }, + else => {}, + } + return Value.fromInterned(try ip.getCoerced(pt.zcu.gpa, pt.tid, val.toIntern(), new_ty.toIntern())); } pub fn intType(pt: Zcu.PerThread, signedness: std.builtin.Signedness, bits: u16) Allocator.Error!Type { @@ -3237,24 +3183,29 @@ pub fn structPackedFieldBitOffset( } pub fn getBuiltin(pt: Zcu.PerThread, name: []const u8) Allocator.Error!Air.Inst.Ref { - const decl_index = try pt.getBuiltinDecl(name); - pt.ensureDeclAnalyzed(decl_index) catch @panic("std.builtin is corrupt"); - return Air.internedToRef(pt.zcu.declPtr(decl_index).val.toIntern()); + const zcu = pt.zcu; + const ip = &zcu.intern_pool; + const nav = try pt.getBuiltinNav(name); + pt.ensureCauAnalyzed(ip.getNav(nav).analysis_owner.unwrap().?) catch @panic("std.builtin is corrupt"); + return Air.internedToRef(ip.getNav(nav).status.resolved.val); } -pub fn getBuiltinDecl(pt: Zcu.PerThread, name: []const u8) Allocator.Error!InternPool.DeclIndex { +pub fn getBuiltinNav(pt: Zcu.PerThread, name: []const u8) Allocator.Error!InternPool.Nav.Index { const zcu = pt.zcu; const gpa = zcu.gpa; const ip = &zcu.intern_pool; const std_file_imported = pt.importPkg(zcu.std_mod) catch @panic("failed to import lib/std.zig"); - const std_file_root_decl = zcu.fileRootDecl(std_file_imported.file_index).unwrap().?; - const std_namespace = zcu.declPtr(std_file_root_decl).getOwnedInnerNamespace(zcu).?; + const std_type = Type.fromInterned(zcu.fileRootType(std_file_imported.file_index)); + const std_namespace = zcu.namespacePtr(std_type.getNamespace(zcu).?.unwrap().?); const builtin_str = try ip.getOrPutString(gpa, pt.tid, "builtin", .no_embedded_nulls); - const builtin_decl = std_namespace.decls.getKeyAdapted(builtin_str, Zcu.DeclAdapter{ .zcu = zcu }) orelse @panic("lib/std.zig is corrupt and missing 'builtin'"); - pt.ensureDeclAnalyzed(builtin_decl) catch @panic("std.builtin is corrupt"); - const builtin_namespace = zcu.declPtr(builtin_decl).getInnerNamespace(zcu) orelse @panic("std.builtin is corrupt"); + const builtin_nav = std_namespace.pub_decls.getKeyAdapted(builtin_str, Zcu.Namespace.NameAdapter{ .zcu = zcu }) orelse + @panic("lib/std.zig is corrupt and missing 'builtin'"); + pt.ensureCauAnalyzed(ip.getNav(builtin_nav).analysis_owner.unwrap().?) catch @panic("std.builtin is corrupt"); + const builtin_type = Type.fromInterned(ip.getNav(builtin_nav).status.resolved.val); + const builtin_namespace_index = (if (builtin_type.getNamespace(zcu)) |n| n.unwrap() else null) orelse @panic("std.builtin is corrupt"); + const builtin_namespace = zcu.namespacePtr(builtin_namespace_index); const name_str = try ip.getOrPutString(gpa, pt.tid, name, .no_embedded_nulls); - return builtin_namespace.decls.getKeyAdapted(name_str, Zcu.DeclAdapter{ .zcu = zcu }) orelse @panic("lib/std/builtin.zig is corrupt"); + return builtin_namespace.pub_decls.getKeyAdapted(name_str, Zcu.Namespace.NameAdapter{ .zcu = zcu }) orelse @panic("lib/std/builtin.zig is corrupt"); } pub fn getBuiltinType(pt: Zcu.PerThread, name: []const u8) Allocator.Error!Type { @@ -3264,6 +3215,47 @@ pub fn getBuiltinType(pt: Zcu.PerThread, name: []const u8) Allocator.Error!Type return ty; } +pub fn navPtrType(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) Allocator.Error!Type { + const zcu = pt.zcu; + const ip = &zcu.intern_pool; + const r = ip.getNav(nav_index).status.resolved; + const ty = Value.fromInterned(r.val).typeOf(zcu); + return pt.ptrType(.{ + .child = ty.toIntern(), + .flags = .{ + .alignment = if (r.alignment == ty.abiAlignment(pt)) + .none + else + r.alignment, + .address_space = r.@"addrspace", + .is_const = switch (ip.indexToKey(r.val)) { + .variable => false, + .@"extern" => |e| e.is_const, + else => true, + }, + }, + }); +} + +/// Intern an `.@"extern"`, creating a corresponding owner `Nav` if necessary. +/// If necessary, the new `Nav` is queued for codegen. +/// `key.owner_nav` is ignored and may be `undefined`. +pub fn getExtern(pt: Zcu.PerThread, key: InternPool.Key.Extern) Allocator.Error!InternPool.Index { + const result = try pt.zcu.intern_pool.getExtern(pt.zcu.gpa, pt.tid, key); + if (result.new_nav.unwrap()) |nav| { + try pt.zcu.comp.queueJob(.{ .codegen_nav = nav }); + } + return result.index; +} + +// TODO: this shouldn't need a `PerThread`! Fix the signature of `Type.abiAlignment`. +pub fn navAlignment(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) InternPool.Alignment { + const zcu = pt.zcu; + const r = zcu.intern_pool.getNav(nav_index).status.resolved; + if (r.alignment != .none) return r.alignment; + return Value.fromInterned(r.val).typeOf(zcu).abiAlignment(pt); +} + const Air = @import("../Air.zig"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 695bd4211b3b..2810b6b52189 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -52,7 +52,7 @@ bin_file: *link.File, debug_output: DebugInfoOutput, target: *const std.Target, func_index: InternPool.Index, -owner_decl: InternPool.DeclIndex, +owner_nav: InternPool.Nav.Index, err_msg: ?*ErrorMsg, args: []MCValue, ret_mcv: MCValue, @@ -184,7 +184,7 @@ const DbgInfoReloc = struct { fn genArgDbgInfo(reloc: DbgInfoReloc, function: Self) error{OutOfMemory}!void { switch (function.debug_output) { .dwarf => |dw| { - const loc: link.File.Dwarf.DeclState.DbgInfoLoc = switch (reloc.mcv) { + const loc: link.File.Dwarf.NavState.DbgInfoLoc = switch (reloc.mcv) { .register => |reg| .{ .register = reg.dwarfLocOp() }, .stack_offset, .stack_argument_offset, @@ -202,7 +202,7 @@ const DbgInfoReloc = struct { else => unreachable, // not a possible argument }; - try dw.genArgDbgInfo(reloc.name, reloc.ty, function.owner_decl, loc); + try dw.genArgDbgInfo(reloc.name, reloc.ty, function.owner_nav, loc); }, .plan9 => {}, .none => {}, @@ -218,7 +218,7 @@ const DbgInfoReloc = struct { switch (function.debug_output) { .dwarf => |dw| { - const loc: link.File.Dwarf.DeclState.DbgInfoLoc = switch (reloc.mcv) { + const loc: link.File.Dwarf.NavState.DbgInfoLoc = switch (reloc.mcv) { .register => |reg| .{ .register = reg.dwarfLocOp() }, .ptr_stack_offset, .stack_offset, @@ -248,7 +248,7 @@ const DbgInfoReloc = struct { break :blk .nop; }, }; - try dw.genVarDbgInfo(reloc.name, reloc.ty, function.owner_decl, is_ptr, loc); + try dw.genVarDbgInfo(reloc.name, reloc.ty, function.owner_nav, is_ptr, loc); }, .plan9 => {}, .none => {}, @@ -341,11 +341,9 @@ pub fn generate( const zcu = pt.zcu; const gpa = zcu.gpa; const func = zcu.funcInfo(func_index); - const fn_owner_decl = zcu.declPtr(func.owner_decl); - assert(fn_owner_decl.has_tv); - const fn_type = fn_owner_decl.typeOf(zcu); - const namespace = zcu.namespacePtr(fn_owner_decl.src_namespace); - const target = &namespace.fileScope(zcu).mod.resolved_target.result; + const fn_type = Type.fromInterned(func.ty); + const file_scope = zcu.navFileScope(func.owner_nav); + const target = &file_scope.mod.resolved_target.result; var branch_stack = std.ArrayList(Branch).init(gpa); defer { @@ -364,7 +362,7 @@ pub fn generate( .target = target, .bin_file = lf, .func_index = func_index, - .owner_decl = func.owner_decl, + .owner_nav = func.owner_nav, .err_msg = null, .args = undefined, // populated after `resolveCallingConventionValues` .ret_mcv = undefined, // populated after `resolveCallingConventionValues` @@ -4053,8 +4051,8 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type @panic("TODO store"); }, .coff => blk: { - const coff_file = self.bin_file.cast(link.File.Coff).?; - const atom = try coff_file.getOrCreateAtomForDecl(self.owner_decl); + const coff_file = self.bin_file.cast(.coff).?; + const atom = try coff_file.getOrCreateAtomForNav(self.owner_nav); break :blk coff_file.getAtom(atom).getSymbolIndex().?; }, else => unreachable, // unsupported target format @@ -4289,6 +4287,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const ty = self.typeOf(callee); const pt = self.pt; const mod = pt.zcu; + const ip = &mod.intern_pool; const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, @@ -4351,19 +4350,19 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - if (try self.air.value(callee, pt)) |func_value| { - if (func_value.getFunction(mod)) |func| { - if (self.bin_file.cast(link.File.Elf)) |elf_file| { + if (try self.air.value(callee, pt)) |func_value| switch (ip.indexToKey(func_value.toIntern())) { + .func => |func| { + if (self.bin_file.cast(.elf)) |elf_file| { const zo = elf_file.zigObjectPtr().?; - const sym_index = try zo.getOrCreateMetadataForDecl(elf_file, func.owner_decl); + const sym_index = try zo.getOrCreateMetadataForNav(elf_file, func.owner_nav); const sym = zo.symbol(sym_index); _ = try sym.getOrCreateZigGotEntry(sym_index, elf_file); const got_addr = @as(u32, @intCast(sym.zigGotAddress(elf_file))); try self.genSetReg(Type.usize, .x30, .{ .memory = got_addr }); - } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { + } else if (self.bin_file.cast(.macho)) |macho_file| { _ = macho_file; @panic("TODO airCall"); - // const atom = try macho_file.getOrCreateAtomForDecl(func.owner_decl); + // const atom = try macho_file.getOrCreateAtomForNav(func.owner_nav); // const sym_index = macho_file.getAtom(atom).getSymbolIndex().?; // try self.genSetReg(Type.u64, .x30, .{ // .linker_load = .{ @@ -4371,8 +4370,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // .sym_index = sym_index, // }, // }); - } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { - const atom = try coff_file.getOrCreateAtomForDecl(func.owner_decl); + } else if (self.bin_file.cast(.coff)) |coff_file| { + const atom = try coff_file.getOrCreateAtomForNav(func.owner_nav); const sym_index = coff_file.getAtom(atom).getSymbolIndex().?; try self.genSetReg(Type.u64, .x30, .{ .linker_load = .{ @@ -4380,8 +4379,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier .sym_index = sym_index, }, }); - } else if (self.bin_file.cast(link.File.Plan9)) |p9| { - const atom_index = try p9.seeDecl(func.owner_decl); + } else if (self.bin_file.cast(.plan9)) |p9| { + const atom_index = try p9.seeNav(pt, func.owner_nav); const atom = p9.getAtom(atom_index); try self.genSetReg(Type.usize, .x30, .{ .memory = atom.getOffsetTableAddress(p9) }); } else unreachable; @@ -4390,14 +4389,15 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier .tag = .blr, .data = .{ .reg = .x30 }, }); - } else if (func_value.getExternFunc(mod)) |extern_func| { - const decl_name = mod.declPtr(extern_func.decl).name.toSlice(&mod.intern_pool); - const lib_name = extern_func.lib_name.toSlice(&mod.intern_pool); - if (self.bin_file.cast(link.File.MachO)) |macho_file| { + }, + .@"extern" => |@"extern"| { + const nav_name = ip.getNav(@"extern".owner_nav).name.toSlice(ip); + const lib_name = @"extern".lib_name.toSlice(ip); + if (self.bin_file.cast(.macho)) |macho_file| { _ = macho_file; @panic("TODO airCall"); - // const sym_index = try macho_file.getGlobalSymbol(decl_name, lib_name); - // const atom = try macho_file.getOrCreateAtomForDecl(self.owner_decl); + // const sym_index = try macho_file.getGlobalSymbol(nav_name, lib_name); + // const atom = try macho_file.getOrCreateAtomForNav(self.owner_nav); // const atom_index = macho_file.getAtom(atom).getSymbolIndex().?; // _ = try self.addInst(.{ // .tag = .call_extern, @@ -4408,8 +4408,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // }, // }, // }); - } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { - const sym_index = try coff_file.getGlobalSymbol(decl_name, lib_name); + } else if (self.bin_file.cast(.coff)) |coff_file| { + const sym_index = try coff_file.getGlobalSymbol(nav_name, lib_name); try self.genSetReg(Type.u64, .x30, .{ .linker_load = .{ .type = .import, @@ -4423,9 +4423,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } else { return self.fail("TODO implement calling extern functions", .{}); } - } else { - return self.fail("TODO implement calling bitcasted functions", .{}); - } + }, + else => return self.fail("TODO implement calling bitcasted functions", .{}), } else { assert(ty.zigTypeTag(mod) == .Pointer); const mcv = try self.resolveInst(callee); @@ -5594,8 +5593,8 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro @panic("TODO genSetStack"); }, .coff => blk: { - const coff_file = self.bin_file.cast(link.File.Coff).?; - const atom = try coff_file.getOrCreateAtomForDecl(self.owner_decl); + const coff_file = self.bin_file.cast(.coff).?; + const atom = try coff_file.getOrCreateAtomForNav(self.owner_nav); break :blk coff_file.getAtom(atom).getSymbolIndex().?; }, else => unreachable, // unsupported target format @@ -5717,8 +5716,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void // break :blk macho_file.getAtom(atom).getSymbolIndex().?; }, .coff => blk: { - const coff_file = self.bin_file.cast(link.File.Coff).?; - const atom = try coff_file.getOrCreateAtomForDecl(self.owner_decl); + const coff_file = self.bin_file.cast(.coff).?; + const atom = try coff_file.getOrCreateAtomForNav(self.owner_nav); break :blk coff_file.getAtom(atom).getSymbolIndex().?; }, else => unreachable, // unsupported target format @@ -5915,8 +5914,8 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I // break :blk macho_file.getAtom(atom).getSymbolIndex().?; }, .coff => blk: { - const coff_file = self.bin_file.cast(link.File.Coff).?; - const atom = try coff_file.getOrCreateAtomForDecl(self.owner_decl); + const coff_file = self.bin_file.cast(.coff).?; + const atom = try coff_file.getOrCreateAtomForNav(self.owner_nav); break :blk coff_file.getAtom(atom).getSymbolIndex().?; }, else => unreachable, // unsupported target format @@ -6226,7 +6225,7 @@ fn genTypedValue(self: *Self, val: Value) InnerError!MCValue { self.pt, self.src_loc, val, - self.owner_decl, + self.target.*, )) { .mcv => |mcv| switch (mcv) { .none => .none, diff --git a/src/arch/aarch64/Emit.zig b/src/arch/aarch64/Emit.zig index 7010267e179c..641a6fd920a3 100644 --- a/src/arch/aarch64/Emit.zig +++ b/src/arch/aarch64/Emit.zig @@ -687,7 +687,7 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) !void { }; _ = offset; - if (emit.bin_file.cast(link.File.MachO)) |macho_file| { + if (emit.bin_file.cast(.macho)) |macho_file| { _ = macho_file; @panic("TODO mirCallExtern"); // // Add relocation to the decl. @@ -701,7 +701,7 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) !void { // .pcrel = true, // .length = 2, // }); - } else if (emit.bin_file.cast(link.File.Coff)) |_| { + } else if (emit.bin_file.cast(.coff)) |_| { unreachable; // Calling imports is handled via `.load_memory_import` } else { return emit.fail("Implement call_extern for linking backends != {{ COFF, MachO }}", .{}); @@ -903,7 +903,7 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void { else => unreachable, } - if (emit.bin_file.cast(link.File.MachO)) |macho_file| { + if (emit.bin_file.cast(.macho)) |macho_file| { _ = macho_file; @panic("TODO mirLoadMemoryPie"); // const Atom = link.File.MachO.Atom; @@ -932,7 +932,7 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void { // else => unreachable, // }, // } }); - } else if (emit.bin_file.cast(link.File.Coff)) |coff_file| { + } else if (emit.bin_file.cast(.coff)) |coff_file| { const atom_index = coff_file.getAtomIndexForSymbol(.{ .sym_index = data.atom_index, .file = null }).?; const target = switch (tag) { .load_memory_got, diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 19bef2f99180..e4d106921ed4 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -262,7 +262,7 @@ const DbgInfoReloc = struct { fn genArgDbgInfo(reloc: DbgInfoReloc, function: Self) error{OutOfMemory}!void { switch (function.debug_output) { .dwarf => |dw| { - const loc: link.File.Dwarf.DeclState.DbgInfoLoc = switch (reloc.mcv) { + const loc: link.File.Dwarf.NavState.DbgInfoLoc = switch (reloc.mcv) { .register => |reg| .{ .register = reg.dwarfLocOp() }, .stack_offset, .stack_argument_offset, @@ -280,7 +280,7 @@ const DbgInfoReloc = struct { else => unreachable, // not a possible argument }; - try dw.genArgDbgInfo(reloc.name, reloc.ty, function.pt.zcu.funcOwnerDeclIndex(function.func_index), loc); + try dw.genArgDbgInfo(reloc.name, reloc.ty, function.pt.zcu.funcInfo(function.func_index).owner_nav, loc); }, .plan9 => {}, .none => {}, @@ -296,7 +296,7 @@ const DbgInfoReloc = struct { switch (function.debug_output) { .dwarf => |dw| { - const loc: link.File.Dwarf.DeclState.DbgInfoLoc = switch (reloc.mcv) { + const loc: link.File.Dwarf.NavState.DbgInfoLoc = switch (reloc.mcv) { .register => |reg| .{ .register = reg.dwarfLocOp() }, .ptr_stack_offset, .stack_offset, @@ -323,7 +323,7 @@ const DbgInfoReloc = struct { break :blk .nop; }, }; - try dw.genVarDbgInfo(reloc.name, reloc.ty, function.pt.zcu.funcOwnerDeclIndex(function.func_index), is_ptr, loc); + try dw.genVarDbgInfo(reloc.name, reloc.ty, function.pt.zcu.funcInfo(function.func_index).owner_nav, is_ptr, loc); }, .plan9 => {}, .none => {}, @@ -346,11 +346,9 @@ pub fn generate( const zcu = pt.zcu; const gpa = zcu.gpa; const func = zcu.funcInfo(func_index); - const fn_owner_decl = zcu.declPtr(func.owner_decl); - assert(fn_owner_decl.has_tv); - const fn_type = fn_owner_decl.typeOf(zcu); - const namespace = zcu.namespacePtr(fn_owner_decl.src_namespace); - const target = &namespace.fileScope(zcu).mod.resolved_target.result; + const func_ty = Type.fromInterned(func.ty); + const file_scope = zcu.navFileScope(func.owner_nav); + const target = &file_scope.mod.resolved_target.result; var branch_stack = std.ArrayList(Branch).init(gpa); defer { @@ -372,7 +370,7 @@ pub fn generate( .err_msg = null, .args = undefined, // populated after `resolveCallingConventionValues` .ret_mcv = undefined, // populated after `resolveCallingConventionValues` - .fn_type = fn_type, + .fn_type = func_ty, .arg_index = 0, .branch_stack = &branch_stack, .src_loc = src_loc, @@ -385,7 +383,7 @@ pub fn generate( defer function.exitlude_jump_relocs.deinit(gpa); defer function.dbg_info_relocs.deinit(gpa); - var call_info = function.resolveCallingConventionValues(fn_type) catch |err| switch (err) { + var call_info = function.resolveCallingConventionValues(func_ty) catch |err| switch (err) { error.CodegenFail => return Result{ .fail = function.err_msg.? }, error.OutOfRegisters => return Result{ .fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}), @@ -4264,6 +4262,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const ty = self.typeOf(callee); const pt = self.pt; const mod = pt.zcu; + const ip = &mod.intern_pool; const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, @@ -4333,16 +4332,16 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - if (try self.air.value(callee, pt)) |func_value| { - if (func_value.getFunction(mod)) |func| { - if (self.bin_file.cast(link.File.Elf)) |elf_file| { + if (try self.air.value(callee, pt)) |func_value| switch (ip.indexToKey(func_value.toIntern())) { + .func => |func| { + if (self.bin_file.cast(.elf)) |elf_file| { const zo = elf_file.zigObjectPtr().?; - const sym_index = try zo.getOrCreateMetadataForDecl(elf_file, func.owner_decl); + const sym_index = try zo.getOrCreateMetadataForNav(elf_file, func.owner_nav); const sym = zo.symbol(sym_index); _ = try sym.getOrCreateZigGotEntry(sym_index, elf_file); const got_addr: u32 = @intCast(sym.zigGotAddress(elf_file)); try self.genSetReg(Type.usize, .lr, .{ .memory = got_addr }); - } else if (self.bin_file.cast(link.File.MachO)) |_| { + } else if (self.bin_file.cast(.macho)) |_| { unreachable; // unsupported architecture for MachO } else { return self.fail("TODO implement call on {s} for {s}", .{ @@ -4350,11 +4349,13 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier @tagName(self.target.cpu.arch), }); } - } else if (func_value.getExternFunc(mod)) |_| { + }, + .@"extern" => { return self.fail("TODO implement calling extern functions", .{}); - } else { + }, + else => { return self.fail("TODO implement calling bitcasted functions", .{}); - } + }, } else { assert(ty.zigTypeTag(mod) == .Pointer); const mcv = try self.resolveInst(callee); @@ -6178,7 +6179,7 @@ fn genTypedValue(self: *Self, val: Value) InnerError!MCValue { pt, self.src_loc, val, - pt.zcu.funcOwnerDeclIndex(self.func_index), + self.target.*, )) { .mcv => |mcv| switch (mcv) { .none => .none, diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index c4adb82ee60d..b5384542e73f 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -118,26 +118,18 @@ const RegisterOffset = struct { reg: Register, off: i32 = 0 }; pub const FrameAddr = struct { index: FrameIndex, off: i32 = 0 }; const Owner = union(enum) { - func_index: InternPool.Index, + nav_index: InternPool.Nav.Index, lazy_sym: link.File.LazySymbol, - fn getDecl(owner: Owner, zcu: *Zcu) InternPool.DeclIndex { - return switch (owner) { - .func_index => |func_index| zcu.funcOwnerDeclIndex(func_index), - .lazy_sym => |lazy_sym| lazy_sym.ty.getOwnerDecl(zcu), - }; - } - fn getSymbolIndex(owner: Owner, func: *Func) !u32 { const pt = func.pt; switch (owner) { - .func_index => |func_index| { - const decl_index = func.pt.zcu.funcOwnerDeclIndex(func_index); - const elf_file = func.bin_file.cast(link.File.Elf).?; - return elf_file.zigObjectPtr().?.getOrCreateMetadataForDecl(elf_file, decl_index); + .nav_index => |nav_index| { + const elf_file = func.bin_file.cast(.elf).?; + return elf_file.zigObjectPtr().?.getOrCreateMetadataForNav(elf_file, nav_index); }, .lazy_sym => |lazy_sym| { - const elf_file = func.bin_file.cast(link.File.Elf).?; + const elf_file = func.bin_file.cast(.elf).?; return elf_file.zigObjectPtr().?.getOrCreateMetadataForLazySymbol(elf_file, pt, lazy_sym) catch |err| func.fail("{s} creating lazy symbol", .{@errorName(err)}); }, @@ -767,12 +759,8 @@ pub fn generate( const gpa = zcu.gpa; const ip = &zcu.intern_pool; const func = zcu.funcInfo(func_index); - const fn_owner_decl = zcu.declPtr(func.owner_decl); - assert(fn_owner_decl.has_tv); - const fn_type = fn_owner_decl.typeOf(zcu); - const namespace = zcu.namespacePtr(fn_owner_decl.src_namespace); - const target = &namespace.fileScope(zcu).mod.resolved_target.result; - const mod = namespace.fileScope(zcu).mod; + const fn_type = Type.fromInterned(func.ty); + const mod = zcu.navFileScope(func.owner_nav).mod; var branch_stack = std.ArrayList(Branch).init(gpa); defer { @@ -789,9 +777,9 @@ pub fn generate( .mod = mod, .bin_file = bin_file, .liveness = liveness, - .target = target, + .target = &mod.resolved_target.result, .debug_output = debug_output, - .owner = .{ .func_index = func_index }, + .owner = .{ .nav_index = func.owner_nav }, .err_msg = null, .args = undefined, // populated after `resolveCallingConventionValues` .ret_mcv = undefined, // populated after `resolveCallingConventionValues` @@ -818,7 +806,7 @@ pub fn generate( function.mir_instructions.deinit(gpa); } - wip_mir_log.debug("{}:", .{function.fmtDecl(func.owner_decl)}); + wip_mir_log.debug("{}:", .{fmtNav(func.owner_nav, ip)}); try function.frame_allocs.resize(gpa, FrameIndex.named_count); function.frame_allocs.set( @@ -1074,22 +1062,22 @@ fn fmtWipMir(func: *Func, inst: Mir.Inst.Index) std.fmt.Formatter(formatWipMir) return .{ .data = .{ .func = func, .inst = inst } }; } -const FormatDeclData = struct { - zcu: *Zcu, - decl_index: InternPool.DeclIndex, +const FormatNavData = struct { + ip: *const InternPool, + nav_index: InternPool.Nav.Index, }; -fn formatDecl( - data: FormatDeclData, +fn formatNav( + data: FormatNavData, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { - try writer.print("{}", .{data.zcu.declPtr(data.decl_index).fqn.fmt(&data.zcu.intern_pool)}); + try writer.print("{}", .{data.ip.getNav(data.nav_index).fqn.fmt(data.ip)}); } -fn fmtDecl(func: *Func, decl_index: InternPool.DeclIndex) std.fmt.Formatter(formatDecl) { +fn fmtNav(nav_index: InternPool.Nav.Index, ip: *const InternPool) std.fmt.Formatter(formatNav) { return .{ .data = .{ - .zcu = func.pt.zcu, - .decl_index = decl_index, + .ip = ip, + .nav_index = nav_index, } }; } @@ -1393,9 +1381,9 @@ fn genLazy(func: *Func, lazy_sym: link.File.LazySymbol) InnerError!void { const pt = func.pt; const mod = pt.zcu; const ip = &mod.intern_pool; - switch (lazy_sym.ty.zigTypeTag(mod)) { + switch (Type.fromInterned(lazy_sym.ty).zigTypeTag(mod)) { .Enum => { - const enum_ty = lazy_sym.ty; + const enum_ty = Type.fromInterned(lazy_sym.ty); wip_mir_log.debug("{}.@tagName:", .{enum_ty.fmt(pt)}); const param_regs = abi.Registers.Integer.function_arg_regs; @@ -1408,11 +1396,11 @@ fn genLazy(func: *Func, lazy_sym: link.File.LazySymbol) InnerError!void { const data_reg, const data_lock = try func.allocReg(.int); defer func.register_manager.unlockReg(data_lock); - const elf_file = func.bin_file.cast(link.File.Elf).?; + const elf_file = func.bin_file.cast(.elf).?; const zo = elf_file.zigObjectPtr().?; const sym_index = zo.getOrCreateMetadataForLazySymbol(elf_file, pt, .{ .kind = .const_data, - .ty = enum_ty, + .ty = enum_ty.toIntern(), }) catch |err| return func.fail("{s} creating lazy symbol", .{@errorName(err)}); @@ -1479,7 +1467,7 @@ fn genLazy(func: *Func, lazy_sym: link.File.LazySymbol) InnerError!void { }, else => return func.fail( "TODO implement {s} for {}", - .{ @tagName(lazy_sym.kind), lazy_sym.ty.fmt(pt) }, + .{ @tagName(lazy_sym.kind), Type.fromInterned(lazy_sym.ty).fmt(pt) }, ), } } @@ -4682,17 +4670,14 @@ fn airFieldParentPtr(func: *Func, inst: Air.Inst.Index) !void { } fn genArgDbgInfo(func: Func, inst: Air.Inst.Index, mcv: MCValue) !void { - const pt = func.pt; - const zcu = pt.zcu; const arg = func.air.instructions.items(.data)[@intFromEnum(inst)].arg; const ty = arg.ty.toType(); - const owner_decl = func.owner.getDecl(zcu); if (arg.name == .none) return; const name = func.air.nullTerminatedString(@intFromEnum(arg.name)); switch (func.debug_output) { .dwarf => |dw| switch (mcv) { - .register => |reg| try dw.genArgDbgInfo(name, ty, owner_decl, .{ + .register => |reg| try dw.genArgDbgInfo(name, ty, func.owner.nav_index, .{ .register = reg.dwarfLocOp(), }), .load_frame => {}, @@ -4940,14 +4925,14 @@ fn genCall( switch (switch (func_key) { else => func_key, .ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) { - .decl => |decl| zcu.intern_pool.indexToKey(zcu.declPtr(decl).val.toIntern()), + .nav => |nav| zcu.intern_pool.indexToKey(zcu.navValue(nav).toIntern()), else => func_key, } else func_key, }) { .func => |func_val| { - if (func.bin_file.cast(link.File.Elf)) |elf_file| { + if (func.bin_file.cast(.elf)) |elf_file| { const zo = elf_file.zigObjectPtr().?; - const sym_index = try zo.getOrCreateMetadataForDecl(elf_file, func_val.owner_decl); + const sym_index = try zo.getOrCreateMetadataForNav(elf_file, func_val.owner_nav); if (func.mod.pic) { return func.fail("TODO: genCall pic", .{}); @@ -4964,19 +4949,18 @@ fn genCall( } } else unreachable; // not a valid riscv64 format }, - .extern_func => |extern_func| { - const owner_decl = zcu.declPtr(extern_func.decl); - const lib_name = extern_func.lib_name.toSlice(&zcu.intern_pool); - const decl_name = owner_decl.name.toSlice(&zcu.intern_pool); + .@"extern" => |@"extern"| { + const lib_name = @"extern".lib_name.toSlice(&zcu.intern_pool); + const name = @"extern".name.toSlice(&zcu.intern_pool); const atom_index = try func.owner.getSymbolIndex(func); - const elf_file = func.bin_file.cast(link.File.Elf).?; + const elf_file = func.bin_file.cast(.elf).?; _ = try func.addInst(.{ .tag = .pseudo_extern_fn_reloc, .data = .{ .reloc = .{ .register = .ra, .atom_index = atom_index, - .sym_index = try elf_file.getGlobalSymbol(decl_name, lib_name), + .sym_index = try elf_file.getGlobalSymbol(name, lib_name), } }, }); }, @@ -5213,8 +5197,6 @@ fn genVarDbgInfo( mcv: MCValue, name: [:0]const u8, ) !void { - const pt = func.pt; - const zcu = pt.zcu; const is_ptr = switch (tag) { .dbg_var_ptr => true, .dbg_var_val => false, @@ -5223,7 +5205,7 @@ fn genVarDbgInfo( switch (func.debug_output) { .dwarf => |dw| { - const loc: link.File.Dwarf.DeclState.DbgInfoLoc = switch (mcv) { + const loc: link.File.Dwarf.NavState.DbgInfoLoc = switch (mcv) { .register => |reg| .{ .register = reg.dwarfLocOp() }, .memory => |address| .{ .memory = address }, .load_symbol => |sym_off| loc: { @@ -5238,7 +5220,7 @@ fn genVarDbgInfo( break :blk .nop; }, }; - try dw.genVarDbgInfo(name, ty, func.owner.getDecl(zcu), is_ptr, loc); + try dw.genVarDbgInfo(name, ty, func.owner.nav_index, is_ptr, loc); }, .plan9 => {}, .none => {}, @@ -7804,7 +7786,6 @@ fn airMemcpy(func: *Func, inst: Air.Inst.Index) !void { fn airTagName(func: *Func, inst: Air.Inst.Index) !void { const pt = func.pt; - const zcu = pt.zcu; const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { @@ -7820,7 +7801,7 @@ fn airTagName(func: *Func, inst: Air.Inst.Index) !void { const operand = try func.resolveInst(un_op); try func.genSetReg(enum_ty, param_regs[1], operand); - const lazy_sym = link.File.LazySymbol.initDecl(.code, enum_ty.getOwnerDecl(zcu), zcu); + const lazy_sym: link.File.LazySymbol = .{ .kind = .code, .ty = enum_ty.toIntern() }; const elf_file = func.bin_file.cast(link.File.Elf).?; const zo = elf_file.zigObjectPtr().?; const sym_index = zo.getOrCreateMetadataForLazySymbol(elf_file, pt, lazy_sym) catch |err| @@ -8033,32 +8014,14 @@ fn getResolvedInstValue(func: *Func, inst: Air.Inst.Index) *InstTracking { fn genTypedValue(func: *Func, val: Value) InnerError!MCValue { const pt = func.pt; - const zcu = pt.zcu; - const gpa = func.gpa; - const owner_decl_index = func.owner.getDecl(zcu); const lf = func.bin_file; const src_loc = func.src_loc; - if (val.isUndef(pt.zcu)) { - const local_sym_index = lf.lowerUnnamedConst(pt, val, owner_decl_index) catch |err| { - const msg = try ErrorMsg.create(gpa, src_loc, "lowering unnamed undefined constant failed: {s}", .{@errorName(err)}); - func.err_msg = msg; - return error.CodegenFail; - }; - switch (lf.tag) { - .elf => return MCValue{ .undef = local_sym_index }, - else => unreachable, - } - } - - const result = try codegen.genTypedValue( - lf, - pt, - src_loc, - val, - owner_decl_index, - ); + const result = if (val.isUndef(pt.zcu)) + try lf.lowerUav(pt, val.toIntern(), .none, src_loc) + else + try codegen.genTypedValue(lf, pt, src_loc, val, func.target.*); const mcv: MCValue = switch (result) { .mcv => |mcv| switch (mcv) { .none => .none, diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig index 9345f5daccaf..137bc395721e 100644 --- a/src/arch/riscv64/Emit.zig +++ b/src/arch/riscv64/Emit.zig @@ -49,7 +49,7 @@ pub fn emitMir(emit: *Emit) Error!void { .Lib => emit.lower.link_mode == .static, }; - const elf_file = emit.bin_file.cast(link.File.Elf).?; + const elf_file = emit.bin_file.cast(.elf).?; const zo = elf_file.zigObjectPtr().?; const atom_ptr = zo.symbol(symbol.atom_index).atom(elf_file).?; @@ -81,7 +81,7 @@ pub fn emitMir(emit: *Emit) Error!void { }); }, .load_tlv_reloc => |symbol| { - const elf_file = emit.bin_file.cast(link.File.Elf).?; + const elf_file = emit.bin_file.cast(.elf).?; const zo = elf_file.zigObjectPtr().?; const atom_ptr = zo.symbol(symbol.atom_index).atom(elf_file).?; @@ -107,7 +107,7 @@ pub fn emitMir(emit: *Emit) Error!void { }); }, .call_extern_fn_reloc => |symbol| { - const elf_file = emit.bin_file.cast(link.File.Elf).?; + const elf_file = emit.bin_file.cast(.elf).?; const zo = elf_file.zigObjectPtr().?; const atom_ptr = zo.symbol(symbol.atom_index).atom(elf_file).?; diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index f9bcfe700353..bba8cdec3481 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -273,11 +273,9 @@ pub fn generate( const zcu = pt.zcu; const gpa = zcu.gpa; const func = zcu.funcInfo(func_index); - const fn_owner_decl = zcu.declPtr(func.owner_decl); - assert(fn_owner_decl.has_tv); - const fn_type = fn_owner_decl.typeOf(zcu); - const namespace = zcu.namespacePtr(fn_owner_decl.src_namespace); - const target = &namespace.fileScope(zcu).mod.resolved_target.result; + const func_ty = Type.fromInterned(func.ty); + const file_scope = zcu.navFileScope(func.owner_nav); + const target = &file_scope.mod.resolved_target.result; var branch_stack = std.ArrayList(Branch).init(gpa); defer { @@ -300,7 +298,7 @@ pub fn generate( .err_msg = null, .args = undefined, // populated after `resolveCallingConventionValues` .ret_mcv = undefined, // populated after `resolveCallingConventionValues` - .fn_type = fn_type, + .fn_type = func_ty, .arg_index = 0, .branch_stack = &branch_stack, .src_loc = src_loc, @@ -312,7 +310,7 @@ pub fn generate( defer function.blocks.deinit(gpa); defer function.exitlude_jump_relocs.deinit(gpa); - var call_info = function.resolveCallingConventionValues(fn_type, .callee) catch |err| switch (err) { + var call_info = function.resolveCallingConventionValues(func_ty, .callee) catch |err| switch (err) { error.CodegenFail => return Result{ .fail = function.err_msg.? }, error.OutOfRegisters => return Result{ .fail = try ErrorMsg.create(gpa, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}), @@ -1306,6 +1304,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const ty = self.typeOf(callee); const pt = self.pt; const mod = pt.zcu; + const ip = &mod.intern_pool; const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, .Pointer => ty.childType(mod), @@ -1349,46 +1348,42 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - if (try self.air.value(callee, pt)) |func_value| { - if (self.bin_file.tag == link.File.Elf.base_tag) { - switch (mod.intern_pool.indexToKey(func_value.ip_index)) { - .func => |func| { - const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: { - const zo = elf_file.zigObjectPtr().?; - const sym_index = try zo.getOrCreateMetadataForDecl(elf_file, func.owner_decl); - const sym = zo.symbol(sym_index); - _ = try sym.getOrCreateZigGotEntry(sym_index, elf_file); - break :blk @as(u32, @intCast(sym.zigGotAddress(elf_file))); - } else unreachable; - - try self.genSetReg(Type.usize, .o7, .{ .memory = got_addr }); - - _ = try self.addInst(.{ - .tag = .jmpl, - .data = .{ - .arithmetic_3op = .{ - .is_imm = false, - .rd = .o7, - .rs1 = .o7, - .rs2_or_imm = .{ .rs2 = .g0 }, - }, - }, - }); + if (try self.air.value(callee, pt)) |func_value| switch (ip.indexToKey(func_value.toIntern())) { + .func => |func| { + const got_addr = if (self.bin_file.cast(.elf)) |elf_file| blk: { + const zo = elf_file.zigObjectPtr().?; + const sym_index = try zo.getOrCreateMetadataForNav(elf_file, func.owner_nav); + const sym = zo.symbol(sym_index); + _ = try sym.getOrCreateZigGotEntry(sym_index, elf_file); + break :blk @as(u32, @intCast(sym.zigGotAddress(elf_file))); + } else @panic("TODO SPARCv9 currently does not support non-ELF binaries"); + + try self.genSetReg(Type.usize, .o7, .{ .memory = got_addr }); - // TODO Find a way to fill this delay slot - _ = try self.addInst(.{ - .tag = .nop, - .data = .{ .nop = {} }, - }); - }, - .extern_func => { - return self.fail("TODO implement calling extern functions", .{}); - }, - else => { - return self.fail("TODO implement calling bitcasted functions", .{}); + _ = try self.addInst(.{ + .tag = .jmpl, + .data = .{ + .arithmetic_3op = .{ + .is_imm = false, + .rd = .o7, + .rs1 = .o7, + .rs2_or_imm = .{ .rs2 = .g0 }, + }, }, - } - } else @panic("TODO SPARCv9 currently does not support non-ELF binaries"); + }); + + // TODO Find a way to fill this delay slot + _ = try self.addInst(.{ + .tag = .nop, + .data = .{ .nop = {} }, + }); + }, + .@"extern" => { + return self.fail("TODO implement calling extern functions", .{}); + }, + else => { + return self.fail("TODO implement calling bitcasted functions", .{}); + }, } else { assert(ty.zigTypeTag(mod) == .Pointer); const mcv = try self.resolveInst(callee); @@ -3614,13 +3609,13 @@ fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue) !void { const mod = pt.zcu; const arg = self.air.instructions.items(.data)[@intFromEnum(inst)].arg; const ty = arg.ty.toType(); - const owner_decl = mod.funcOwnerDeclIndex(self.func_index); + const owner_nav = mod.funcInfo(self.func_index).owner_nav; if (arg.name == .none) return; const name = self.air.nullTerminatedString(@intFromEnum(arg.name)); switch (self.debug_output) { .dwarf => |dw| switch (mcv) { - .register => |reg| try dw.genArgDbgInfo(name, ty, owner_decl, .{ + .register => |reg| try dw.genArgDbgInfo(name, ty, owner_nav, .{ .register = reg.dwarfLocOp(), }), else => {}, @@ -4153,7 +4148,7 @@ fn genTypedValue(self: *Self, val: Value) InnerError!MCValue { pt, self.src_loc, val, - pt.zcu.funcOwnerDeclIndex(self.func_index), + self.target.*, )) { .mcv => |mcv| switch (mcv) { .none => .none, diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 68b7f7293899..be049ec9754b 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -640,8 +640,8 @@ const CodeGen = @This(); /// Reference to the function declaration the code /// section belongs to -decl: *Decl, -decl_index: InternPool.DeclIndex, +owner_nav: InternPool.Nav.Index, +src_loc: Zcu.LazySrcLoc, /// Current block depth. Used to calculate the relative difference between a break /// and block block_depth: u32 = 0, @@ -681,7 +681,7 @@ locals: std.ArrayListUnmanaged(u8), /// are enabled also. simd_immediates: std.ArrayListUnmanaged([16]u8) = .{}, /// The Target we're emitting (used to call intInfo) -target: std.Target, +target: *const std.Target, /// Represents the wasm binary file that is being linked. bin_file: *link.File.Wasm, pt: Zcu.PerThread, @@ -765,8 +765,7 @@ pub fn deinit(func: *CodeGen) void { /// Sets `err_msg` on `CodeGen` and returns `error.CodegenFail` which is caught in link/Wasm.zig fn fail(func: *CodeGen, comptime fmt: []const u8, args: anytype) InnerError { - const src_loc = func.decl.navSrcLoc(func.pt.zcu); - func.err_msg = try Zcu.ErrorMsg.create(func.gpa, src_loc, fmt, args); + func.err_msg = try Zcu.ErrorMsg.create(func.gpa, func.src_loc, fmt, args); return error.CodegenFail; } @@ -803,8 +802,14 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue { // // In the other cases, we will simply lower the constant to a value that fits // into a single local (such as a pointer, integer, bool, etc). - const result: WValue = if (isByRef(ty, pt)) - .{ .memory = try func.bin_file.lowerUnnamedConst(pt, val, func.decl_index) } + const result: WValue = if (isByRef(ty, pt, func.target.*)) + switch (try func.bin_file.lowerUav(pt, val.toIntern(), .none, func.src_loc)) { + .mcv => |mcv| .{ .memory = mcv.load_symbol }, + .fail => |err_msg| { + func.err_msg = err_msg; + return error.CodegenFail; + }, + } else try func.lowerConstant(val, ty); @@ -995,9 +1000,8 @@ fn addExtraAssumeCapacity(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32 } /// Using a given `Type`, returns the corresponding valtype for .auto callconv -fn typeToValtype(ty: Type, pt: Zcu.PerThread) wasm.Valtype { +fn typeToValtype(ty: Type, pt: Zcu.PerThread, target: std.Target) wasm.Valtype { const mod = pt.zcu; - const target = mod.getTarget(); const ip = &mod.intern_pool; return switch (ty.zigTypeTag(mod)) { .Float => switch (ty.floatBits(target)) { @@ -1015,19 +1019,19 @@ fn typeToValtype(ty: Type, pt: Zcu.PerThread) wasm.Valtype { .Struct => blk: { if (pt.zcu.typeToPackedStruct(ty)) |packed_struct| { const backing_int_ty = Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)); - break :blk typeToValtype(backing_int_ty, pt); + break :blk typeToValtype(backing_int_ty, pt, target); } else { break :blk .i32; } }, - .Vector => switch (determineSimdStoreStrategy(ty, pt)) { + .Vector => switch (determineSimdStoreStrategy(ty, pt, target)) { .direct => .v128, .unrolled => .i32, }, .Union => switch (ty.containerLayout(pt.zcu)) { .@"packed" => blk: { const int_ty = pt.intType(.unsigned, @as(u16, @intCast(ty.bitSize(pt)))) catch @panic("out of memory"); - break :blk typeToValtype(int_ty, pt); + break :blk typeToValtype(int_ty, pt, target); }, else => .i32, }, @@ -1036,17 +1040,17 @@ fn typeToValtype(ty: Type, pt: Zcu.PerThread) wasm.Valtype { } /// Using a given `Type`, returns the byte representation of its wasm value type -fn genValtype(ty: Type, pt: Zcu.PerThread) u8 { - return wasm.valtype(typeToValtype(ty, pt)); +fn genValtype(ty: Type, pt: Zcu.PerThread, target: std.Target) u8 { + return wasm.valtype(typeToValtype(ty, pt, target)); } /// Using a given `Type`, returns the corresponding wasm value type /// Differently from `genValtype` this also allows `void` to create a block /// with no return type -fn genBlockType(ty: Type, pt: Zcu.PerThread) u8 { +fn genBlockType(ty: Type, pt: Zcu.PerThread, target: std.Target) u8 { return switch (ty.ip_index) { .void_type, .noreturn_type => wasm.block_empty, - else => genValtype(ty, pt), + else => genValtype(ty, pt, target), }; } @@ -1108,7 +1112,7 @@ fn getResolvedInst(func: *CodeGen, ref: Air.Inst.Ref) *WValue { /// Returns a corresponding `Wvalue` with `local` as active tag fn allocLocal(func: *CodeGen, ty: Type) InnerError!WValue { const pt = func.pt; - const valtype = typeToValtype(ty, pt); + const valtype = typeToValtype(ty, pt, func.target.*); const index_or_null = switch (valtype) { .i32 => func.free_locals_i32.popOrNull(), .i64 => func.free_locals_i64.popOrNull(), @@ -1128,7 +1132,7 @@ fn allocLocal(func: *CodeGen, ty: Type) InnerError!WValue { /// to use a zero-initialized local. fn ensureAllocLocal(func: *CodeGen, ty: Type) InnerError!WValue { const pt = func.pt; - try func.locals.append(func.gpa, genValtype(ty, pt)); + try func.locals.append(func.gpa, genValtype(ty, pt, func.target.*)); const initial_index = func.local_index; func.local_index += 1; return .{ .local = .{ .value = initial_index, .references = 1 } }; @@ -1142,6 +1146,7 @@ fn genFunctype( params: []const InternPool.Index, return_type: Type, pt: Zcu.PerThread, + target: std.Target, ) !wasm.Type { const mod = pt.zcu; var temp_params = std.ArrayList(wasm.Valtype).init(gpa); @@ -1149,16 +1154,16 @@ fn genFunctype( var returns = std.ArrayList(wasm.Valtype).init(gpa); defer returns.deinit(); - if (firstParamSRet(cc, return_type, pt)) { + if (firstParamSRet(cc, return_type, pt, target)) { try temp_params.append(.i32); // memory address is always a 32-bit handle } else if (return_type.hasRuntimeBitsIgnoreComptime(pt)) { if (cc == .C) { const res_classes = abi.classifyType(return_type, pt); assert(res_classes[0] == .direct and res_classes[1] == .none); const scalar_type = abi.scalarType(return_type, pt); - try returns.append(typeToValtype(scalar_type, pt)); + try returns.append(typeToValtype(scalar_type, pt, target)); } else { - try returns.append(typeToValtype(return_type, pt)); + try returns.append(typeToValtype(return_type, pt, target)); } } else if (return_type.isError(mod)) { try returns.append(.i32); @@ -1175,9 +1180,9 @@ fn genFunctype( if (param_classes[1] == .none) { if (param_classes[0] == .direct) { const scalar_type = abi.scalarType(param_type, pt); - try temp_params.append(typeToValtype(scalar_type, pt)); + try temp_params.append(typeToValtype(scalar_type, pt, target)); } else { - try temp_params.append(typeToValtype(param_type, pt)); + try temp_params.append(typeToValtype(param_type, pt, target)); } } else { // i128/f128 @@ -1185,7 +1190,7 @@ fn genFunctype( try temp_params.append(.i64); } }, - else => try temp_params.append(typeToValtype(param_type, pt)), + else => try temp_params.append(typeToValtype(param_type, pt, target)), } } @@ -1205,25 +1210,23 @@ pub fn generate( code: *std.ArrayList(u8), debug_output: codegen.DebugInfoOutput, ) codegen.CodeGenError!codegen.Result { - _ = src_loc; const zcu = pt.zcu; const gpa = zcu.gpa; const func = zcu.funcInfo(func_index); - const decl = zcu.declPtr(func.owner_decl); - const namespace = zcu.namespacePtr(decl.src_namespace); - const target = namespace.fileScope(zcu).mod.resolved_target.result; + const file_scope = zcu.navFileScope(func.owner_nav); + const target = &file_scope.mod.resolved_target.result; var code_gen: CodeGen = .{ .gpa = gpa, .pt = pt, .air = air, .liveness = liveness, .code = code, - .decl_index = func.owner_decl, - .decl = decl, + .owner_nav = func.owner_nav, + .src_loc = src_loc, .err_msg = undefined, .locals = .{}, .target = target, - .bin_file = bin_file.cast(link.File.Wasm).?, + .bin_file = bin_file.cast(.wasm).?, .debug_output = debug_output, .func_index = func_index, }; @@ -1241,12 +1244,13 @@ fn genFunc(func: *CodeGen) InnerError!void { const pt = func.pt; const mod = pt.zcu; const ip = &mod.intern_pool; - const fn_info = mod.typeToFunc(func.decl.typeOf(mod)).?; - var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types.get(ip), Type.fromInterned(fn_info.return_type), pt); + const fn_ty = mod.navValue(func.owner_nav).typeOf(mod); + const fn_info = mod.typeToFunc(fn_ty).?; + var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types.get(ip), Type.fromInterned(fn_info.return_type), pt, func.target.*); defer func_type.deinit(func.gpa); - _ = try func.bin_file.storeDeclType(func.decl_index, func_type); + _ = try func.bin_file.storeNavType(func.owner_nav, func_type); - var cc_result = try func.resolveCallingConventionValues(func.decl.typeOf(mod)); + var cc_result = try func.resolveCallingConventionValues(fn_ty); defer cc_result.deinit(func.gpa); func.args = cc_result.args; @@ -1324,7 +1328,7 @@ fn genFunc(func: *CodeGen) InnerError!void { .bin_file = func.bin_file, .code = func.code, .locals = func.locals.items, - .decl_index = func.decl_index, + .owner_nav = func.owner_nav, .dbg_output = func.debug_output, .prev_di_line = 0, .prev_di_column = 0, @@ -1367,7 +1371,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV // Check if we store the result as a pointer to the stack rather than // by value - if (firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt)) { + if (firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt, func.target.*)) { // the sret arg will be passed as first argument, therefore we // set the `return_value` before allocating locals for regular args. result.return_value = .{ .local = .{ .value = func.local_index, .references = 1 } }; @@ -1401,9 +1405,9 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV return result; } -fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, pt: Zcu.PerThread) bool { +fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, pt: Zcu.PerThread, target: std.Target) bool { switch (cc) { - .Unspecified, .Inline => return isByRef(return_type, pt), + .Unspecified, .Inline => return isByRef(return_type, pt, target), .C => { const ty_classes = abi.classifyType(return_type, pt); if (ty_classes[0] == .indirect) return true; @@ -1711,10 +1715,9 @@ fn arch(func: *const CodeGen) std.Target.Cpu.Arch { /// For a given `Type`, will return true when the type will be passed /// by reference, rather than by value -fn isByRef(ty: Type, pt: Zcu.PerThread) bool { +fn isByRef(ty: Type, pt: Zcu.PerThread, target: std.Target) bool { const mod = pt.zcu; const ip = &mod.intern_pool; - const target = mod.getTarget(); switch (ty.zigTypeTag(mod)) { .Type, .ComptimeInt, @@ -1746,11 +1749,11 @@ fn isByRef(ty: Type, pt: Zcu.PerThread) bool { }, .Struct => { if (mod.typeToPackedStruct(ty)) |packed_struct| { - return isByRef(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)), pt); + return isByRef(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)), pt, target); } return ty.hasRuntimeBitsIgnoreComptime(pt); }, - .Vector => return determineSimdStoreStrategy(ty, pt) == .unrolled, + .Vector => return determineSimdStoreStrategy(ty, pt, target) == .unrolled, .Int => return ty.intInfo(mod).bits > 64, .Enum => return ty.intInfo(mod).bits > 64, .Float => return ty.floatBits(target) > 64, @@ -1784,11 +1787,10 @@ const SimdStoreStrategy = enum { /// This means when a given type is 128 bits and either the simd128 or relaxed-simd /// features are enabled, the function will return `.direct`. This would allow to store /// it using a instruction, rather than an unrolled version. -fn determineSimdStoreStrategy(ty: Type, pt: Zcu.PerThread) SimdStoreStrategy { +fn determineSimdStoreStrategy(ty: Type, pt: Zcu.PerThread, target: std.Target) SimdStoreStrategy { std.debug.assert(ty.zigTypeTag(pt.zcu) == .Vector); if (ty.bitSize(pt) != 128) return .unrolled; const hasFeature = std.Target.wasm.featureSetHas; - const target = pt.zcu.getTarget(); const features = target.cpu.features; if (hasFeature(features, .relaxed_simd) or hasFeature(features, .simd128)) { return .direct; @@ -2091,7 +2093,7 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = pt.zcu; const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try func.resolveInst(un_op); - const fn_info = mod.typeToFunc(func.decl.typeOf(mod)).?; + const fn_info = mod.typeToFunc(mod.navValue(func.owner_nav).typeOf(mod)).?; const ret_ty = Type.fromInterned(fn_info.return_type); // result must be stored in the stack and we return a pointer @@ -2108,7 +2110,7 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { .op = .load, .width = @as(u8, @intCast(scalar_type.abiSize(pt) * 8)), .signedness = if (scalar_type.isSignedInt(mod)) .signed else .unsigned, - .valtype1 = typeToValtype(scalar_type, pt), + .valtype1 = typeToValtype(scalar_type, pt, func.target.*), }); try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{ .offset = operand.offset(), @@ -2140,8 +2142,8 @@ fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :result try func.allocStack(Type.usize); // create pointer to void } - const fn_info = mod.typeToFunc(func.decl.typeOf(mod)).?; - if (firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt)) { + const fn_info = mod.typeToFunc(mod.navValue(func.owner_nav).typeOf(mod)).?; + if (firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt, func.target.*)) { break :result func.return_value; } @@ -2158,12 +2160,12 @@ fn airRetLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(un_op); const ret_ty = func.typeOf(un_op).childType(mod); - const fn_info = mod.typeToFunc(func.decl.typeOf(mod)).?; + const fn_info = mod.typeToFunc(mod.navValue(func.owner_nav).typeOf(mod)).?; if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) { if (ret_ty.isError(mod)) { try func.addImm32(0); } - } else if (!firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt)) { + } else if (!firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt, func.target.*)) { // leave on the stack _ = try func.load(operand, ret_ty, 0); } @@ -2190,34 +2192,43 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif }; const ret_ty = fn_ty.fnReturnType(mod); const fn_info = mod.typeToFunc(fn_ty).?; - const first_param_sret = firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt); + const first_param_sret = firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt, func.target.*); - const callee: ?InternPool.DeclIndex = blk: { + const callee: ?InternPool.Nav.Index = blk: { const func_val = (try func.air.value(pl_op.operand, pt)) orelse break :blk null; - if (func_val.getFunction(mod)) |function| { - _ = try func.bin_file.getOrCreateAtomForDecl(pt, function.owner_decl); - break :blk function.owner_decl; - } else if (func_val.getExternFunc(mod)) |extern_func| { - const ext_decl = mod.declPtr(extern_func.decl); - const ext_info = mod.typeToFunc(ext_decl.typeOf(mod)).?; - var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types.get(ip), Type.fromInterned(ext_info.return_type), pt); - defer func_type.deinit(func.gpa); - const atom_index = try func.bin_file.getOrCreateAtomForDecl(pt, extern_func.decl); - const atom = func.bin_file.getAtomPtr(atom_index); - const type_index = try func.bin_file.storeDeclType(extern_func.decl, func_type); - try func.bin_file.addOrUpdateImport( - ext_decl.name.toSlice(&mod.intern_pool), - atom.sym_index, - ext_decl.getOwnedExternFunc(mod).?.lib_name.toSlice(&mod.intern_pool), - type_index, - ); - break :blk extern_func.decl; - } else switch (mod.intern_pool.indexToKey(func_val.ip_index)) { + switch (ip.indexToKey(func_val.toIntern())) { + .func => |function| { + _ = try func.bin_file.getOrCreateAtomForNav(pt, function.owner_nav); + break :blk function.owner_nav; + }, + .@"extern" => |@"extern"| { + const ext_nav = ip.getNav(@"extern".owner_nav); + const ext_info = mod.typeToFunc(Type.fromInterned(@"extern".ty)).?; + var func_type = try genFunctype( + func.gpa, + ext_info.cc, + ext_info.param_types.get(ip), + Type.fromInterned(ext_info.return_type), + pt, + func.target.*, + ); + defer func_type.deinit(func.gpa); + const atom_index = try func.bin_file.getOrCreateAtomForNav(pt, @"extern".owner_nav); + const atom = func.bin_file.getAtomPtr(atom_index); + const type_index = try func.bin_file.storeNavType(@"extern".owner_nav, func_type); + try func.bin_file.addOrUpdateImport( + ext_nav.name.toSlice(ip), + atom.sym_index, + @"extern".lib_name.toSlice(ip), + type_index, + ); + break :blk @"extern".owner_nav; + }, .ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) { - .decl => |decl| { - _ = try func.bin_file.getOrCreateAtomForDecl(pt, decl); - break :blk decl; + .nav => |nav| { + _ = try func.bin_file.getOrCreateAtomForNav(pt, nav); + break :blk nav; }, else => {}, }, @@ -2242,7 +2253,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif } if (callee) |direct| { - const atom_index = func.bin_file.zigObjectPtr().?.decls_map.get(direct).?.atom; + const atom_index = func.bin_file.zigObjectPtr().?.navs.get(direct).?.atom; try func.addLabel(.call, @intFromEnum(func.bin_file.getAtom(atom_index).sym_index)); } else { // in this case we call a function pointer @@ -2251,7 +2262,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const operand = try func.resolveInst(pl_op.operand); try func.emitWValue(operand); - var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types.get(ip), Type.fromInterned(fn_info.return_type), pt); + var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types.get(ip), Type.fromInterned(fn_info.return_type), pt, func.target.*); defer fn_type.deinit(func.gpa); const fn_type_index = try func.bin_file.zigObjectPtr().?.putOrGetFuncType(func.gpa, fn_type); @@ -2315,7 +2326,7 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void // load the value, and then shift+or the rhs into the result location. const int_elem_ty = try pt.intType(.unsigned, ptr_info.packed_offset.host_size * 8); - if (isByRef(int_elem_ty, pt)) { + if (isByRef(int_elem_ty, pt, func.target.*)) { return func.fail("TODO: airStore for pointers to bitfields with backing type larger than 64bits", .{}); } @@ -2381,11 +2392,11 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE const len = @as(u32, @intCast(abi_size)); return func.memcpy(lhs, rhs, .{ .imm32 = len }); }, - .Struct, .Array, .Union => if (isByRef(ty, pt)) { + .Struct, .Array, .Union => if (isByRef(ty, pt, func.target.*)) { const len = @as(u32, @intCast(abi_size)); return func.memcpy(lhs, rhs, .{ .imm32 = len }); }, - .Vector => switch (determineSimdStoreStrategy(ty, pt)) { + .Vector => switch (determineSimdStoreStrategy(ty, pt, func.target.*)) { .unrolled => { const len: u32 = @intCast(abi_size); return func.memcpy(lhs, rhs, .{ .imm32 = len }); @@ -2443,7 +2454,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE // into lhs, so we calculate that and emit that instead try func.lowerToStack(rhs); - const valtype = typeToValtype(ty, pt); + const valtype = typeToValtype(ty, pt, func.target.*); const opcode = buildOpcode(.{ .valtype1 = valtype, .width = @as(u8, @intCast(abi_size * 8)), @@ -2472,7 +2483,7 @@ fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { if (!ty.hasRuntimeBitsIgnoreComptime(pt)) return func.finishAir(inst, .none, &.{ty_op.operand}); const result = result: { - if (isByRef(ty, pt)) { + if (isByRef(ty, pt, func.target.*)) { const new_local = try func.allocStack(ty); try func.store(new_local, operand, ty, 0); break :result new_local; @@ -2522,7 +2533,7 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu const abi_size: u8 = @intCast(ty.abiSize(pt)); const opcode = buildOpcode(.{ - .valtype1 = typeToValtype(ty, pt), + .valtype1 = typeToValtype(ty, pt, func.target.*), .width = abi_size * 8, .op = .load, .signedness = if (ty.isSignedInt(mod)) .signed else .unsigned, @@ -2544,7 +2555,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = pt.zcu; const arg_index = func.arg_index; const arg = func.args[arg_index]; - const cc = mod.typeToFunc(func.decl.typeOf(mod)).?.cc; + const cc = mod.typeToFunc(mod.navValue(func.owner_nav).typeOf(mod)).?.cc; const arg_ty = func.typeOfIndex(inst); if (cc == .C) { const arg_classes = abi.classifyType(arg_ty, pt); @@ -2577,7 +2588,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const name_nts = func.air.instructions.items(.data)[@intFromEnum(inst)].arg.name; if (name_nts != .none) { const name = func.air.nullTerminatedString(@intFromEnum(name_nts)); - try dwarf.genArgDbgInfo(name, arg_ty, mod.funcOwnerDeclIndex(func.func_index), .{ + try dwarf.genArgDbgInfo(name, arg_ty, func.owner_nav, .{ .wasm_local = arg.local.value, }); } @@ -2631,7 +2642,7 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError! return func.floatOp(float_op, ty, &.{ lhs, rhs }); } - if (isByRef(ty, pt)) { + if (isByRef(ty, pt, func.target.*)) { if (ty.zigTypeTag(mod) == .Int) { return func.binOpBigInt(lhs, rhs, ty, op); } else { @@ -2644,7 +2655,7 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError! const opcode: wasm.Opcode = buildOpcode(.{ .op = op, - .valtype1 = typeToValtype(ty, pt), + .valtype1 = typeToValtype(ty, pt, func.target.*), .signedness = if (ty.isSignedInt(mod)) .signed else .unsigned, }); try func.emitWValue(lhs); @@ -2896,7 +2907,7 @@ fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) In return func.fail("TODO: Implement floatOps for vectors", .{}); } - const float_bits = ty.floatBits(func.target); + const float_bits = ty.floatBits(func.target.*); if (float_op == .neg) { return func.floatNeg(ty, args[0]); @@ -2907,7 +2918,7 @@ fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) In for (args) |operand| { try func.emitWValue(operand); } - const opcode = buildOpcode(.{ .op = op, .valtype1 = typeToValtype(ty, pt) }); + const opcode = buildOpcode(.{ .op = op, .valtype1 = typeToValtype(ty, pt, func.target.*) }); try func.addTag(Mir.Inst.Tag.fromOpcode(opcode)); return .stack; } @@ -2955,7 +2966,7 @@ fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) In /// NOTE: The result value remains on top of the stack. fn floatNeg(func: *CodeGen, ty: Type, arg: WValue) InnerError!WValue { - const float_bits = ty.floatBits(func.target); + const float_bits = ty.floatBits(func.target.*); switch (float_bits) { 16 => { try func.emitWValue(arg); @@ -3115,8 +3126,8 @@ fn lowerPtr(func: *CodeGen, ptr_val: InternPool.Index, prev_offset: u64) InnerEr const ptr = zcu.intern_pool.indexToKey(ptr_val).ptr; const offset: u64 = prev_offset + ptr.byte_offset; return switch (ptr.base_addr) { - .decl => |decl| return func.lowerDeclRefValue(decl, @intCast(offset)), - .anon_decl => |ad| return func.lowerAnonDeclRef(ad, @intCast(offset)), + .nav => |nav| return func.lowerNavRef(nav, @intCast(offset)), + .uav => |uav| return func.lowerUavRef(uav, @intCast(offset)), .int => return func.lowerConstant(try pt.intValue(Type.usize, offset), Type.usize), .eu_payload => return func.fail("Wasm TODO: lower error union payload pointer", .{}), .opt_payload => |opt_ptr| return func.lowerPtr(opt_ptr, offset), @@ -3128,7 +3139,7 @@ fn lowerPtr(func: *CodeGen, ptr_val: InternPool.Index, prev_offset: u64) InnerEr assert(base_ty.isSlice(zcu)); break :off switch (field.index) { Value.slice_ptr_index => 0, - Value.slice_len_index => @divExact(zcu.getTarget().ptrBitWidth(), 8), + Value.slice_len_index => @divExact(func.target.ptrBitWidth(), 8), else => unreachable, }; }, @@ -3160,32 +3171,29 @@ fn lowerPtr(func: *CodeGen, ptr_val: InternPool.Index, prev_offset: u64) InnerEr }; } -fn lowerAnonDeclRef( +fn lowerUavRef( func: *CodeGen, - anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl, + uav: InternPool.Key.Ptr.BaseAddr.Uav, offset: u32, ) InnerError!WValue { const pt = func.pt; const mod = pt.zcu; - const decl_val = anon_decl.val; - const ty = Type.fromInterned(mod.intern_pool.typeOf(decl_val)); + const ty = Type.fromInterned(mod.intern_pool.typeOf(uav.val)); const is_fn_body = ty.zigTypeTag(mod) == .Fn; if (!is_fn_body and !ty.hasRuntimeBitsIgnoreComptime(pt)) { return .{ .imm32 = 0xaaaaaaaa }; } - const decl_align = mod.intern_pool.indexToKey(anon_decl.orig_ty).ptr_type.flags.alignment; - const res = try func.bin_file.lowerAnonDecl(pt, decl_val, decl_align, func.decl.navSrcLoc(mod)); - switch (res) { - .ok => {}, - .fail => |em| { - func.err_msg = em; + const decl_align = mod.intern_pool.indexToKey(uav.orig_ty).ptr_type.flags.alignment; + const res = try func.bin_file.lowerUav(pt, uav.val, decl_align, func.src_loc); + const target_sym_index = switch (res) { + .mcv => |mcv| mcv.load_symbol, + .fail => |err_msg| { + func.err_msg = err_msg; return error.CodegenFail; }, - } - const target_atom_index = func.bin_file.zigObjectPtr().?.anon_decls.get(decl_val).?; - const target_sym_index = @intFromEnum(func.bin_file.getAtom(target_atom_index).sym_index); + }; if (is_fn_body) { return .{ .function_index = target_sym_index }; } else if (offset == 0) { @@ -3193,32 +3201,29 @@ fn lowerAnonDeclRef( } else return .{ .memory_offset = .{ .pointer = target_sym_index, .offset = offset } }; } -fn lowerDeclRefValue(func: *CodeGen, decl_index: InternPool.DeclIndex, offset: u32) InnerError!WValue { +fn lowerNavRef(func: *CodeGen, nav_index: InternPool.Nav.Index, offset: u32) InnerError!WValue { const pt = func.pt; const mod = pt.zcu; + const ip = &mod.intern_pool; - const decl = mod.declPtr(decl_index); // check if decl is an alias to a function, in which case we // want to lower the actual decl, rather than the alias itself. - if (decl.val.getFunction(mod)) |func_val| { - if (func_val.owner_decl != decl_index) { - return func.lowerDeclRefValue(func_val.owner_decl, offset); - } - } else if (decl.val.getExternFunc(mod)) |func_val| { - if (func_val.decl != decl_index) { - return func.lowerDeclRefValue(func_val.decl, offset); - } - } - const decl_ty = decl.typeOf(mod); - if (decl_ty.zigTypeTag(mod) != .Fn and !decl_ty.hasRuntimeBitsIgnoreComptime(pt)) { + const owner_nav = switch (ip.indexToKey(mod.navValue(nav_index).toIntern())) { + .func => |function| function.owner_nav, + .variable => |variable| variable.owner_nav, + .@"extern" => |@"extern"| @"extern".owner_nav, + else => nav_index, + }; + const nav_ty = ip.getNav(owner_nav).typeOf(ip); + if (!ip.isFunctionType(nav_ty) and !Type.fromInterned(nav_ty).hasRuntimeBitsIgnoreComptime(pt)) { return .{ .imm32 = 0xaaaaaaaa }; } - const atom_index = try func.bin_file.getOrCreateAtomForDecl(pt, decl_index); + const atom_index = try func.bin_file.getOrCreateAtomForNav(pt, nav_index); const atom = func.bin_file.getAtom(atom_index); const target_sym_index = @intFromEnum(atom.sym_index); - if (decl_ty.zigTypeTag(mod) == .Fn) { + if (ip.isFunctionType(nav_ty)) { return .{ .function_index = target_sym_index }; } else if (offset == 0) { return .{ .memory = target_sym_index }; @@ -3229,7 +3234,7 @@ fn lowerDeclRefValue(func: *CodeGen, decl_index: InternPool.DeclIndex, offset: u fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue { const pt = func.pt; const mod = pt.zcu; - assert(!isByRef(ty, pt)); + assert(!isByRef(ty, pt, func.target.*)); const ip = &mod.intern_pool; if (val.isUndefDeep(mod)) return func.emitUndefined(ty); @@ -3268,7 +3273,7 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue { } }, }, .variable, - .extern_func, + .@"extern", .func, .enum_literal, .empty_enum_value, @@ -3325,16 +3330,12 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue { .f64 => |f64_val| return .{ .float64 = f64_val }, else => unreachable, }, - .slice => |slice| { - var ptr = ip.indexToKey(slice.ptr).ptr; - const owner_decl = while (true) switch (ptr.base_addr) { - .decl => |decl| break decl, - .int, .anon_decl => return func.fail("Wasm TODO: lower slice where ptr is not owned by decl", .{}), - .opt_payload, .eu_payload => |base| ptr = ip.indexToKey(base).ptr, - .field => |base_index| ptr = ip.indexToKey(base_index.base).ptr, - .arr_elem, .comptime_field, .comptime_alloc => unreachable, - }; - return .{ .memory = try func.bin_file.lowerUnnamedConst(pt, val, owner_decl) }; + .slice => switch (try func.bin_file.lowerUav(pt, val.toIntern(), .none, func.src_loc)) { + .mcv => |mcv| return .{ .memory = mcv.load_symbol }, + .fail => |err_msg| { + func.err_msg = err_msg; + return error.CodegenFail; + }, }, .ptr => return func.lowerPtr(val.toIntern(), 0), .opt => if (ty.optionalReprIsPayload(mod)) { @@ -3350,7 +3351,7 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue { .aggregate => switch (ip.indexToKey(ty.ip_index)) { .array_type => return func.fail("Wasm TODO: LowerConstant for {}", .{ty.fmt(pt)}), .vector_type => { - assert(determineSimdStoreStrategy(ty, pt) == .direct); + assert(determineSimdStoreStrategy(ty, pt, func.target.*) == .direct); var buf: [16]u8 = undefined; val.writeToMemory(ty, pt, &buf) catch unreachable; return func.storeSimdImmd(buf); @@ -3405,7 +3406,7 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { 33...64 => return .{ .imm64 = 0xaaaaaaaaaaaaaaaa }, else => unreachable, }, - .Float => switch (ty.floatBits(func.target)) { + .Float => switch (ty.floatBits(func.target.*)) { 16 => return .{ .imm32 = 0xaaaaaaaa }, 32 => return .{ .float32 = @as(f32, @bitCast(@as(u32, 0xaaaaaaaa))) }, 64 => return .{ .float64 = @as(f64, @bitCast(@as(u64, 0xaaaaaaaaaaaaaaaa))) }, @@ -3480,11 +3481,11 @@ fn airBlock(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn lowerBlock(func: *CodeGen, inst: Air.Inst.Index, block_ty: Type, body: []const Air.Inst.Index) InnerError!void { const pt = func.pt; - const wasm_block_ty = genBlockType(block_ty, pt); + const wasm_block_ty = genBlockType(block_ty, pt, func.target.*); // if wasm_block_ty is non-empty, we create a register to store the temporary value const block_result: WValue = if (wasm_block_ty != wasm.block_empty) blk: { - const ty: Type = if (isByRef(block_ty, pt)) Type.u32 else block_ty; + const ty: Type = if (isByRef(block_ty, pt, func.target.*)) Type.u32 else block_ty; break :blk try func.ensureAllocLocal(ty); // make sure it's a clean local as it may never get overwritten } else .none; @@ -3608,7 +3609,7 @@ fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareO } } else if (ty.isAnyFloat()) { return func.cmpFloat(ty, lhs, rhs, op); - } else if (isByRef(ty, pt)) { + } else if (isByRef(ty, pt, func.target.*)) { return func.cmpBigInt(lhs, rhs, ty, op); } @@ -3626,7 +3627,7 @@ fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareO try func.lowerToStack(rhs); const opcode: wasm.Opcode = buildOpcode(.{ - .valtype1 = typeToValtype(ty, pt), + .valtype1 = typeToValtype(ty, pt, func.target.*), .op = switch (op) { .lt => .lt, .lte => .le, @@ -3645,7 +3646,7 @@ fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareO /// Compares two floats. /// NOTE: Leaves the result of the comparison on top of the stack. fn cmpFloat(func: *CodeGen, ty: Type, lhs: WValue, rhs: WValue, cmp_op: std.math.CompareOperator) InnerError!WValue { - const float_bits = ty.floatBits(func.target); + const float_bits = ty.floatBits(func.target.*); const op: Op = switch (cmp_op) { .lt => .lt, @@ -3829,7 +3830,7 @@ fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :result try func.bitcast(wanted_ty, given_ty, operand); } - if (isByRef(given_ty, pt) and !isByRef(wanted_ty, pt)) { + if (isByRef(given_ty, pt, func.target.*) and !isByRef(wanted_ty, pt, func.target.*)) { const loaded_memory = try func.load(operand, wanted_ty, 0); if (needs_wrapping) { break :result try func.wrapOperand(loaded_memory, wanted_ty); @@ -3837,7 +3838,7 @@ fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :result loaded_memory; } } - if (!isByRef(given_ty, pt) and isByRef(wanted_ty, pt)) { + if (!isByRef(given_ty, pt, func.target.*) and isByRef(wanted_ty, pt, func.target.*)) { const stack_memory = try func.allocStack(wanted_ty); try func.store(stack_memory, operand, given_ty, 0); if (needs_wrapping) { @@ -3867,8 +3868,8 @@ fn bitcast(func: *CodeGen, wanted_ty: Type, given_ty: Type, operand: WValue) Inn const opcode = buildOpcode(.{ .op = .reinterpret, - .valtype1 = typeToValtype(wanted_ty, pt), - .valtype2 = typeToValtype(given_ty, pt), + .valtype1 = typeToValtype(wanted_ty, pt, func.target.*), + .valtype2 = typeToValtype(given_ty, pt, func.target.*), }); try func.emitWValue(operand); try func.addTag(Mir.Inst.Tag.fromOpcode(opcode)); @@ -3990,8 +3991,8 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :result try func.trunc(shifted_value, field_ty, backing_ty); }, .Union => result: { - if (isByRef(struct_ty, pt)) { - if (!isByRef(field_ty, pt)) { + if (isByRef(struct_ty, pt, func.target.*)) { + if (!isByRef(field_ty, pt, func.target.*)) { break :result try func.load(operand, field_ty, 0); } else { const new_stack_val = try func.allocStack(field_ty); @@ -4017,7 +4018,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, pt)) orelse { return func.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(pt)}); }; - if (isByRef(field_ty, pt)) { + if (isByRef(field_ty, pt, func.target.*)) { switch (operand) { .stack_offset => |stack_offset| { break :result .{ .stack_offset = .{ .value = stack_offset.value + offset, .references = 1 } }; @@ -4163,7 +4164,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const val = try func.lowerConstant(case.values[0].value, target_ty); try func.emitWValue(val); const opcode = buildOpcode(.{ - .valtype1 = typeToValtype(target_ty, pt), + .valtype1 = typeToValtype(target_ty, pt, func.target.*), .op = .ne, // not equal, because we want to jump out of this block if it does not match the condition. .signedness = signedness, }); @@ -4177,7 +4178,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const val = try func.lowerConstant(value.value, target_ty); try func.emitWValue(val); const opcode = buildOpcode(.{ - .valtype1 = typeToValtype(target_ty, pt), + .valtype1 = typeToValtype(target_ty, pt, func.target.*), .op = .eq, .signedness = signedness, }); @@ -4265,7 +4266,7 @@ fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: boo } const pl_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, pt))); - if (op_is_ptr or isByRef(payload_ty, pt)) { + if (op_is_ptr or isByRef(payload_ty, pt, func.target.*)) { break :result try func.buildPointerOffset(operand, pl_offset, .new); } @@ -4492,7 +4493,7 @@ fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(ty_op.operand); if (opt_ty.optionalReprIsPayload(mod)) break :result func.reuseOperand(ty_op.operand, operand); - if (isByRef(payload_ty, pt)) { + if (isByRef(payload_ty, pt, func.target.*)) { break :result try func.buildPointerOffset(operand, 0, .new); } @@ -4626,7 +4627,7 @@ fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.addTag(.i32_mul); try func.addTag(.i32_add); - const elem_result = if (isByRef(elem_ty, pt)) + const elem_result = if (isByRef(elem_ty, pt, func.target.*)) .stack else try func.load(.stack, elem_ty, 0); @@ -4784,7 +4785,7 @@ fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.addTag(.i32_mul); try func.addTag(.i32_add); - const elem_result = if (isByRef(elem_ty, pt)) + const elem_result = if (isByRef(elem_ty, pt, func.target.*)) .stack else try func.load(.stack, elem_ty, 0); @@ -4835,7 +4836,7 @@ fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { else => ptr_ty.childType(mod), }; - const valtype = typeToValtype(Type.usize, pt); + const valtype = typeToValtype(Type.usize, pt, func.target.*); const mul_opcode = buildOpcode(.{ .valtype1 = valtype, .op = .mul }); const bin_opcode = buildOpcode(.{ .valtype1 = valtype, .op = op }); @@ -4982,7 +4983,7 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const elem_ty = array_ty.childType(mod); const elem_size = elem_ty.abiSize(pt); - if (isByRef(array_ty, pt)) { + if (isByRef(array_ty, pt, func.target.*)) { try func.lowerToStack(array); try func.emitWValue(index); try func.addImm32(@intCast(elem_size)); @@ -5025,7 +5026,7 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } } - const elem_result = if (isByRef(elem_ty, pt)) + const elem_result = if (isByRef(elem_ty, pt, func.target.*)) .stack else try func.load(.stack, elem_ty, 0); @@ -5040,7 +5041,7 @@ fn airIntFromFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(ty_op.operand); const op_ty = func.typeOf(ty_op.operand); - const op_bits = op_ty.floatBits(func.target); + const op_bits = op_ty.floatBits(func.target.*); const dest_ty = func.typeOfIndex(inst); const dest_info = dest_ty.intInfo(mod); @@ -5069,8 +5070,8 @@ fn airIntFromFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.emitWValue(operand); const op = buildOpcode(.{ .op = .trunc, - .valtype1 = typeToValtype(dest_ty, pt), - .valtype2 = typeToValtype(op_ty, pt), + .valtype1 = typeToValtype(dest_ty, pt, func.target.*), + .valtype2 = typeToValtype(op_ty, pt, func.target.*), .signedness = dest_info.signedness, }); try func.addTag(Mir.Inst.Tag.fromOpcode(op)); @@ -5088,7 +5089,7 @@ fn airFloatFromInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const op_info = op_ty.intInfo(mod); const dest_ty = func.typeOfIndex(inst); - const dest_bits = dest_ty.floatBits(func.target); + const dest_bits = dest_ty.floatBits(func.target.*); if (op_info.bits > 128) { return func.fail("TODO: floatFromInt for integers/floats with bitsize {d} bits", .{op_info.bits}); @@ -5114,8 +5115,8 @@ fn airFloatFromInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.emitWValue(operand); const op = buildOpcode(.{ .op = .convert, - .valtype1 = typeToValtype(dest_ty, pt), - .valtype2 = typeToValtype(op_ty, pt), + .valtype1 = typeToValtype(dest_ty, pt, func.target.*), + .valtype2 = typeToValtype(op_ty, pt, func.target.*), .signedness = op_info.signedness, }); try func.addTag(Mir.Inst.Tag.fromOpcode(op)); @@ -5131,7 +5132,7 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty = func.typeOfIndex(inst); const elem_ty = ty.childType(mod); - if (determineSimdStoreStrategy(ty, pt) == .direct) blk: { + if (determineSimdStoreStrategy(ty, pt, func.target.*) == .direct) blk: { switch (operand) { // when the operand lives in the linear memory section, we can directly // load and splat the value at once. Meaning we do not first have to load @@ -5215,7 +5216,7 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const elem_size = child_ty.abiSize(pt); // TODO: One of them could be by ref; handle in loop - if (isByRef(func.typeOf(extra.a), pt) or isByRef(inst_ty, pt)) { + if (isByRef(func.typeOf(extra.a), pt, func.target.*) or isByRef(inst_ty, pt, func.target.*)) { const result = try func.allocStack(inst_ty); for (0..mask_len) |index| { @@ -5291,7 +5292,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // When the element type is by reference, we must copy the entire // value. It is therefore safer to move the offset pointer and store // each value individually, instead of using store offsets. - if (isByRef(elem_ty, pt)) { + if (isByRef(elem_ty, pt, func.target.*)) { // copy stack pointer into a temporary local, which is // moved for each element to store each value in the right position. const offset = try func.buildPointerOffset(result, 0, .new); @@ -5321,7 +5322,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { }, .Struct => switch (result_ty.containerLayout(mod)) { .@"packed" => { - if (isByRef(result_ty, pt)) { + if (isByRef(result_ty, pt, func.target.*)) { return func.fail("TODO: airAggregateInit for packed structs larger than 64 bits", .{}); } const packed_struct = mod.typeToPackedStruct(result_ty).?; @@ -5424,15 +5425,15 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { if (layout.tag_size == 0) { break :result .none; } - assert(!isByRef(union_ty, pt)); + assert(!isByRef(union_ty, pt, func.target.*)); break :result tag_int; } - if (isByRef(union_ty, pt)) { + if (isByRef(union_ty, pt, func.target.*)) { const result_ptr = try func.allocStack(union_ty); const payload = try func.resolveInst(extra.init); if (layout.tag_align.compare(.gte, layout.payload_align)) { - if (isByRef(field_ty, pt)) { + if (isByRef(field_ty, pt, func.target.*)) { const payload_ptr = try func.buildPointerOffset(result_ptr, layout.tag_size, .new); try func.store(payload_ptr, payload, field_ty, 0); } else { @@ -5513,7 +5514,7 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: _ = try func.load(lhs, payload_ty, 0); _ = try func.load(rhs, payload_ty, 0); - const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, pt) }); + const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, pt, func.target.*) }); try func.addTag(Mir.Inst.Tag.fromOpcode(opcode)); try func.addLabel(.br_if, 0); @@ -5630,8 +5631,8 @@ fn airFpext(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { /// Extends a float from a given `Type` to a larger wanted `Type` /// NOTE: Leaves the result on the stack fn fpext(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue { - const given_bits = given.floatBits(func.target); - const wanted_bits = wanted.floatBits(func.target); + const given_bits = given.floatBits(func.target.*); + const wanted_bits = wanted.floatBits(func.target.*); if (wanted_bits == 64 and given_bits == 32) { try func.emitWValue(operand); @@ -5674,8 +5675,8 @@ fn airFptrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { /// Truncates a float from a given `Type` to its wanted `Type` /// NOTE: The result value remains on the stack fn fptrunc(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue { - const given_bits = given.floatBits(func.target); - const wanted_bits = wanted.floatBits(func.target); + const given_bits = given.floatBits(func.target.*); + const wanted_bits = wanted.floatBits(func.target.*); if (wanted_bits == 32 and given_bits == 64) { try func.emitWValue(operand); @@ -6247,7 +6248,6 @@ fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { assert(op == .max or op == .min); const pt = func.pt; const mod = pt.zcu; - const target = mod.getTarget(); const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const ty = func.typeOfIndex(inst); @@ -6264,7 +6264,7 @@ fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { if (ty.zigTypeTag(mod) == .Float) { var fn_name_buf: [64]u8 = undefined; - const float_bits = ty.floatBits(target); + const float_bits = ty.floatBits(func.target.*); const fn_name = std.fmt.bufPrint(&fn_name_buf, "{s}f{s}{s}", .{ target_util.libcFloatPrefix(float_bits), @tagName(op), @@ -6300,7 +6300,7 @@ fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const result = if (ty.floatBits(func.target) == 16) fl_result: { + const result = if (ty.floatBits(func.target.*) == 16) fl_result: { const rhs_ext = try func.fpext(rhs, ty, Type.f32); const lhs_ext = try func.fpext(lhs, ty, Type.f32); const addend_ext = try func.fpext(addend, ty, Type.f32); @@ -6457,8 +6457,6 @@ fn airDbgInlineBlock(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airDbgVar(func: *CodeGen, inst: Air.Inst.Index, is_ptr: bool) InnerError!void { if (func.debug_output != .dwarf) return func.finishAir(inst, .none, &.{}); - const pt = func.pt; - const mod = pt.zcu; const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const ty = func.typeOf(pl_op.operand); const operand = try func.resolveInst(pl_op.operand); @@ -6468,14 +6466,14 @@ fn airDbgVar(func: *CodeGen, inst: Air.Inst.Index, is_ptr: bool) InnerError!void const name = func.air.nullTerminatedString(pl_op.payload); log.debug(" var name = ({s})", .{name}); - const loc: link.File.Dwarf.DeclState.DbgInfoLoc = switch (operand) { + const loc: link.File.Dwarf.NavState.DbgInfoLoc = switch (operand) { .local => |local| .{ .wasm_local = local.value }, else => blk: { log.debug("TODO generate debug info for {}", .{operand}); break :blk .nop; }, }; - try func.debug_output.dwarf.genVarDbgInfo(name, ty, mod.funcOwnerDeclIndex(func.func_index), is_ptr, loc); + try func.debug_output.dwarf.genVarDbgInfo(name, ty, func.owner_nav, is_ptr, loc); return func.finishAir(inst, .none, &.{}); } @@ -6552,7 +6550,7 @@ fn lowerTry( } const pl_offset: u32 = @intCast(errUnionPayloadOffset(pl_ty, pt)); - if (isByRef(pl_ty, pt)) { + if (isByRef(pl_ty, pt, func.target.*)) { return buildPointerOffset(func, err_union, pl_offset, .new); } const payload = try func.load(err_union, pl_ty, pl_offset); @@ -6712,7 +6710,7 @@ fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { _ = try func.wrapOperand(.stack, ty); } } else { - const float_bits = ty.floatBits(func.target); + const float_bits = ty.floatBits(func.target.*); if (float_bits > 64) { return func.fail("TODO: `@divFloor` for floats with bitsize: {d}", .{float_bits}); } @@ -7126,12 +7124,12 @@ fn callIntrinsic( // Always pass over C-ABI const pt = func.pt; const mod = pt.zcu; - var func_type = try genFunctype(func.gpa, .C, param_types, return_type, pt); + var func_type = try genFunctype(func.gpa, .C, param_types, return_type, pt, func.target.*); defer func_type.deinit(func.gpa); const func_type_index = try func.bin_file.zigObjectPtr().?.putOrGetFuncType(func.gpa, func_type); try func.bin_file.addOrUpdateImport(name, symbol_index, null, func_type_index); - const want_sret_param = firstParamSRet(.C, return_type, pt); + const want_sret_param = firstParamSRet(.C, return_type, pt, func.target.*); // if we want return as first param, we allocate a pointer to stack, // and emit it as our first argument const sret = if (want_sret_param) blk: { @@ -7181,14 +7179,12 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { const pt = func.pt; const mod = pt.zcu; const ip = &mod.intern_pool; - const enum_decl_index = enum_ty.getOwnerDecl(mod); var arena_allocator = std.heap.ArenaAllocator.init(func.gpa); defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); - const decl = mod.declPtr(enum_decl_index); - const func_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{}", .{decl.fqn.fmt(ip)}); + const func_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{}", .{ip.loadEnumType(enum_ty.toIntern()).name.fmt(ip)}); // check if we already generated code for this. if (func.bin_file.findGlobalSymbol(func_name)) |loc| { @@ -7232,11 +7228,13 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { .ty = name_ty.toIntern(), .storage = .{ .bytes = tag_name.toString() }, } }); - const tag_sym_index = try func.bin_file.lowerUnnamedConst( - pt, - Value.fromInterned(name_val), - enum_decl_index, - ); + const tag_sym_index = switch (try func.bin_file.lowerUav(pt, name_val, .none, func.src_loc)) { + .mcv => |mcv| mcv.load_symbol, + .fail => |err_msg| { + func.err_msg = err_msg; + return error.CodegenFail; + }, + }; // block for this if case try writer.writeByte(std.wasm.opcode(.block)); @@ -7333,7 +7331,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { try writer.writeByte(std.wasm.opcode(.end)); const slice_ty = Type.slice_const_u8_sentinel_0; - const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty.ip_index}, slice_ty, pt); + const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty.ip_index}, slice_ty, pt, func.target.*); const sym_index = try func.bin_file.createFunction(func_name, func_type, &body_list, &relocs); return @intFromEnum(sym_index); } @@ -7477,7 +7475,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :val ptr_val; }; - const result = if (isByRef(result_ty, pt)) val: { + const result = if (isByRef(result_ty, pt, func.target.*)) val: { try func.emitWValue(cmp_result); try func.addImm32(~@as(u32, 0)); try func.addTag(.i32_xor); @@ -7706,8 +7704,7 @@ fn airFence(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // Only when the atomic feature is enabled, and we're not building // for a single-threaded build, can we emit the `fence` instruction. // In all other cases, we emit no instructions for a fence. - const func_namespace = zcu.namespacePtr(func.decl.src_namespace); - const single_threaded = func_namespace.fileScope(zcu).mod.single_threaded; + const single_threaded = zcu.navFileScope(func.owner_nav).mod.single_threaded; if (func.useAtomicFeature() and !single_threaded) { try func.addAtomicTag(.atomic_fence); } diff --git a/src/arch/wasm/Emit.zig b/src/arch/wasm/Emit.zig index 73ef723345cf..d795e2afa681 100644 --- a/src/arch/wasm/Emit.zig +++ b/src/arch/wasm/Emit.zig @@ -22,7 +22,7 @@ code: *std.ArrayList(u8), /// List of allocated locals. locals: []const u8, /// The declaration that code is being generated for. -decl_index: InternPool.DeclIndex, +owner_nav: InternPool.Nav.Index, // Debug information /// Holds the debug information for this emission @@ -257,7 +257,7 @@ fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError { const comp = emit.bin_file.base.comp; const zcu = comp.module.?; const gpa = comp.gpa; - emit.error_msg = try Zcu.ErrorMsg.create(gpa, zcu.declPtr(emit.decl_index).navSrcLoc(zcu), format, args); + emit.error_msg = try Zcu.ErrorMsg.create(gpa, zcu.navSrcLoc(emit.owner_nav), format, args); return error.EmitFail; } @@ -310,7 +310,7 @@ fn emitGlobal(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) !void { const global_offset = emit.offset(); try emit.code.appendSlice(&buf); - const atom_index = emit.bin_file.zigObjectPtr().?.decls_map.get(emit.decl_index).?.atom; + const atom_index = emit.bin_file.zigObjectPtr().?.navs.get(emit.owner_nav).?.atom; const atom = emit.bin_file.getAtomPtr(atom_index); try atom.relocs.append(gpa, .{ .index = label, @@ -370,7 +370,7 @@ fn emitCall(emit: *Emit, inst: Mir.Inst.Index) !void { try emit.code.appendSlice(&buf); if (label != 0) { - const atom_index = emit.bin_file.zigObjectPtr().?.decls_map.get(emit.decl_index).?.atom; + const atom_index = emit.bin_file.zigObjectPtr().?.navs.get(emit.owner_nav).?.atom; const atom = emit.bin_file.getAtomPtr(atom_index); try atom.relocs.append(gpa, .{ .offset = call_offset, @@ -390,7 +390,7 @@ fn emitCallIndirect(emit: *Emit, inst: Mir.Inst.Index) !void { leb128.writeUnsignedFixed(5, &buf, type_index); try emit.code.appendSlice(&buf); if (type_index != 0) { - const atom_index = emit.bin_file.zigObjectPtr().?.decls_map.get(emit.decl_index).?.atom; + const atom_index = emit.bin_file.zigObjectPtr().?.navs.get(emit.owner_nav).?.atom; const atom = emit.bin_file.getAtomPtr(atom_index); try atom.relocs.append(emit.bin_file.base.comp.gpa, .{ .offset = call_offset, @@ -412,7 +412,7 @@ fn emitFunctionIndex(emit: *Emit, inst: Mir.Inst.Index) !void { try emit.code.appendSlice(&buf); if (symbol_index != 0) { - const atom_index = emit.bin_file.zigObjectPtr().?.decls_map.get(emit.decl_index).?.atom; + const atom_index = emit.bin_file.zigObjectPtr().?.navs.get(emit.owner_nav).?.atom; const atom = emit.bin_file.getAtomPtr(atom_index); try atom.relocs.append(gpa, .{ .offset = index_offset, @@ -443,7 +443,7 @@ fn emitMemAddress(emit: *Emit, inst: Mir.Inst.Index) !void { } if (mem.pointer != 0) { - const atom_index = emit.bin_file.zigObjectPtr().?.decls_map.get(emit.decl_index).?.atom; + const atom_index = emit.bin_file.zigObjectPtr().?.navs.get(emit.owner_nav).?.atom; const atom = emit.bin_file.getAtomPtr(atom_index); try atom.relocs.append(gpa, .{ .offset = mem_offset, diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 924de9d25af1..a36bd981bcc8 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -116,48 +116,36 @@ const RegisterOffset = struct { reg: Register, off: i32 = 0 }; const SymbolOffset = struct { sym: u32, off: i32 = 0 }; const Owner = union(enum) { - func_index: InternPool.Index, + nav_index: InternPool.Nav.Index, lazy_sym: link.File.LazySymbol, - fn getDecl(owner: Owner, zcu: *Zcu) InternPool.DeclIndex { - return switch (owner) { - .func_index => |func_index| zcu.funcOwnerDeclIndex(func_index), - .lazy_sym => |lazy_sym| lazy_sym.ty.getOwnerDecl(zcu), - }; - } - fn getSymbolIndex(owner: Owner, ctx: *Self) !u32 { const pt = ctx.pt; switch (owner) { - .func_index => |func_index| { - const decl_index = ctx.pt.zcu.funcOwnerDeclIndex(func_index); - if (ctx.bin_file.cast(link.File.Elf)) |elf_file| { - return elf_file.zigObjectPtr().?.getOrCreateMetadataForDecl(elf_file, decl_index); - } else if (ctx.bin_file.cast(link.File.MachO)) |macho_file| { - return macho_file.getZigObject().?.getOrCreateMetadataForDecl(macho_file, decl_index); - } else if (ctx.bin_file.cast(link.File.Coff)) |coff_file| { - const atom = try coff_file.getOrCreateAtomForDecl(decl_index); - return coff_file.getAtom(atom).getSymbolIndex().?; - } else if (ctx.bin_file.cast(link.File.Plan9)) |p9_file| { - return p9_file.seeDecl(decl_index); - } else unreachable; - }, - .lazy_sym => |lazy_sym| { - if (ctx.bin_file.cast(link.File.Elf)) |elf_file| { - return elf_file.zigObjectPtr().?.getOrCreateMetadataForLazySymbol(elf_file, pt, lazy_sym) catch |err| - ctx.fail("{s} creating lazy symbol", .{@errorName(err)}); - } else if (ctx.bin_file.cast(link.File.MachO)) |macho_file| { - return macho_file.getZigObject().?.getOrCreateMetadataForLazySymbol(macho_file, pt, lazy_sym) catch |err| - ctx.fail("{s} creating lazy symbol", .{@errorName(err)}); - } else if (ctx.bin_file.cast(link.File.Coff)) |coff_file| { - const atom = coff_file.getOrCreateAtomForLazySymbol(pt, lazy_sym) catch |err| - return ctx.fail("{s} creating lazy symbol", .{@errorName(err)}); - return coff_file.getAtom(atom).getSymbolIndex().?; - } else if (ctx.bin_file.cast(link.File.Plan9)) |p9_file| { - return p9_file.getOrCreateAtomForLazySymbol(pt, lazy_sym) catch |err| - return ctx.fail("{s} creating lazy symbol", .{@errorName(err)}); - } else unreachable; - }, + .nav_index => |nav_index| if (ctx.bin_file.cast(.elf)) |elf_file| { + return elf_file.zigObjectPtr().?.getOrCreateMetadataForNav(elf_file, nav_index); + } else if (ctx.bin_file.cast(.macho)) |macho_file| { + return macho_file.getZigObject().?.getOrCreateMetadataForNav(macho_file, nav_index); + } else if (ctx.bin_file.cast(.coff)) |coff_file| { + const atom = try coff_file.getOrCreateAtomForNav(nav_index); + return coff_file.getAtom(atom).getSymbolIndex().?; + } else if (ctx.bin_file.cast(.plan9)) |p9_file| { + return p9_file.seeNav(pt, nav_index); + } else unreachable, + .lazy_sym => |lazy_sym| if (ctx.bin_file.cast(.elf)) |elf_file| { + return elf_file.zigObjectPtr().?.getOrCreateMetadataForLazySymbol(elf_file, pt, lazy_sym) catch |err| + ctx.fail("{s} creating lazy symbol", .{@errorName(err)}); + } else if (ctx.bin_file.cast(.macho)) |macho_file| { + return macho_file.getZigObject().?.getOrCreateMetadataForLazySymbol(macho_file, pt, lazy_sym) catch |err| + ctx.fail("{s} creating lazy symbol", .{@errorName(err)}); + } else if (ctx.bin_file.cast(.coff)) |coff_file| { + const atom = coff_file.getOrCreateAtomForLazySymbol(pt, lazy_sym) catch |err| + return ctx.fail("{s} creating lazy symbol", .{@errorName(err)}); + return coff_file.getAtom(atom).getSymbolIndex().?; + } else if (ctx.bin_file.cast(.plan9)) |p9_file| { + return p9_file.getOrCreateAtomForLazySymbol(pt, lazy_sym) catch |err| + return ctx.fail("{s} creating lazy symbol", .{@errorName(err)}); + } else unreachable, } } }; @@ -803,14 +791,12 @@ pub fn generate( debug_output: DebugInfoOutput, ) CodeGenError!Result { const zcu = pt.zcu; - const gpa = zcu.gpa; const comp = zcu.comp; + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; const func = zcu.funcInfo(func_index); - const fn_owner_decl = zcu.declPtr(func.owner_decl); - assert(fn_owner_decl.has_tv); - const fn_type = fn_owner_decl.typeOf(zcu); - const namespace = zcu.namespacePtr(fn_owner_decl.src_namespace); - const mod = namespace.fileScope(zcu).mod; + const fn_type = Type.fromInterned(func.ty); + const mod = zcu.navFileScope(func.owner_nav).mod; var function: Self = .{ .gpa = gpa, @@ -821,7 +807,7 @@ pub fn generate( .mod = mod, .bin_file = bin_file, .debug_output = debug_output, - .owner = .{ .func_index = func_index }, + .owner = .{ .nav_index = func.owner_nav }, .inline_func = func_index, .err_msg = null, .args = undefined, // populated after `resolveCallingConventionValues` @@ -847,9 +833,7 @@ pub fn generate( function.mir_extra.deinit(gpa); } - wip_mir_log.debug("{}:", .{function.fmtDecl(func.owner_decl)}); - - const ip = &zcu.intern_pool; + wip_mir_log.debug("{}:", .{fmtNav(func.owner_nav, ip)}); try function.frame_allocs.resize(gpa, FrameIndex.named_count); function.frame_allocs.set( @@ -1067,22 +1051,22 @@ pub fn generateLazy( } } -const FormatDeclData = struct { - zcu: *Zcu, - decl_index: InternPool.DeclIndex, +const FormatNavData = struct { + ip: *const InternPool, + nav_index: InternPool.Nav.Index, }; -fn formatDecl( - data: FormatDeclData, +fn formatNav( + data: FormatNavData, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { - try writer.print("{}", .{data.zcu.declPtr(data.decl_index).fqn.fmt(&data.zcu.intern_pool)}); + try writer.print("{}", .{data.ip.getNav(data.nav_index).fqn.fmt(data.ip)}); } -fn fmtDecl(self: *Self, decl_index: InternPool.DeclIndex) std.fmt.Formatter(formatDecl) { +fn fmtNav(nav_index: InternPool.Nav.Index, ip: *const InternPool) std.fmt.Formatter(formatNav) { return .{ .data = .{ - .zcu = self.pt.zcu, - .decl_index = decl_index, + .ip = ip, + .nav_index = nav_index, } }; } @@ -2230,9 +2214,9 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void { const pt = self.pt; const mod = pt.zcu; const ip = &mod.intern_pool; - switch (lazy_sym.ty.zigTypeTag(mod)) { + switch (Type.fromInterned(lazy_sym.ty).zigTypeTag(mod)) { .Enum => { - const enum_ty = lazy_sym.ty; + const enum_ty = Type.fromInterned(lazy_sym.ty); wip_mir_log.debug("{}.@tagName:", .{enum_ty.fmt(pt)}); const resolved_cc = abi.resolveCallingConvention(.Unspecified, self.target.*); @@ -2249,7 +2233,7 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void { const data_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp); const data_lock = self.register_manager.lockRegAssumeUnused(data_reg); defer self.register_manager.unlockReg(data_lock); - try self.genLazySymbolRef(.lea, data_reg, .{ .kind = .const_data, .ty = enum_ty }); + try self.genLazySymbolRef(.lea, data_reg, .{ .kind = .const_data, .ty = enum_ty.toIntern() }); var data_off: i32 = 0; const tag_names = enum_ty.enumFields(mod); @@ -2288,7 +2272,7 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void { }, else => return self.fail( "TODO implement {s} for {}", - .{ @tagName(lazy_sym.kind), lazy_sym.ty.fmt(pt) }, + .{ @tagName(lazy_sym.kind), Type.fromInterned(lazy_sym.ty).fmt(pt) }, ), } } @@ -11932,11 +11916,9 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { } fn genArgDbgInfo(self: Self, ty: Type, name: [:0]const u8, mcv: MCValue) !void { - const pt = self.pt; - const mod = pt.zcu; switch (self.debug_output) { .dwarf => |dw| { - const loc: link.File.Dwarf.DeclState.DbgInfoLoc = switch (mcv) { + const loc: link.File.Dwarf.NavState.DbgInfoLoc = switch (mcv) { .register => |reg| .{ .register = reg.dwarfNum() }, .register_pair => |regs| .{ .register_pair = .{ regs[0].dwarfNum(), regs[1].dwarfNum(), @@ -11955,7 +11937,7 @@ fn genArgDbgInfo(self: Self, ty: Type, name: [:0]const u8, mcv: MCValue) !void { // TODO: this might need adjusting like the linkers do. // Instead of flattening the owner and passing Decl.Index here we may // want to special case LazySymbol in DWARF linker too. - try dw.genArgDbgInfo(name, ty, self.owner.getDecl(mod), loc); + try dw.genArgDbgInfo(name, ty, self.owner.nav_index, loc); }, .plan9 => {}, .none => {}, @@ -11969,8 +11951,6 @@ fn genVarDbgInfo( mcv: MCValue, name: [:0]const u8, ) !void { - const pt = self.pt; - const mod = pt.zcu; const is_ptr = switch (tag) { .dbg_var_ptr => true, .dbg_var_val => false, @@ -11979,7 +11959,7 @@ fn genVarDbgInfo( switch (self.debug_output) { .dwarf => |dw| { - const loc: link.File.Dwarf.DeclState.DbgInfoLoc = switch (mcv) { + const loc: link.File.Dwarf.NavState.DbgInfoLoc = switch (mcv) { .register => |reg| .{ .register = reg.dwarfNum() }, // TODO use a frame index .load_frame, .lea_frame => return, @@ -12007,7 +11987,7 @@ fn genVarDbgInfo( // TODO: this might need adjusting like the linkers do. // Instead of flattening the owner and passing Decl.Index here we may // want to special case LazySymbol in DWARF linker too. - try dw.genVarDbgInfo(name, ty, self.owner.getDecl(mod), is_ptr, loc); + try dw.genVarDbgInfo(name, ty, self.owner.nav_index, is_ptr, loc); }, .plan9 => {}, .none => {}, @@ -12090,14 +12070,15 @@ fn genCall(self: *Self, info: union(enum) { }, }, arg_types: []const Type, args: []const MCValue) !MCValue { const pt = self.pt; - const mod = pt.zcu; + const zcu = pt.zcu; + const ip = &zcu.intern_pool; const fn_ty = switch (info) { .air => |callee| fn_info: { const callee_ty = self.typeOf(callee); - break :fn_info switch (callee_ty.zigTypeTag(mod)) { + break :fn_info switch (callee_ty.zigTypeTag(zcu)) { .Fn => callee_ty, - .Pointer => callee_ty.childType(mod), + .Pointer => callee_ty.childType(zcu), else => unreachable, }; }, @@ -12107,7 +12088,7 @@ fn genCall(self: *Self, info: union(enum) { .cc = .C, }), }; - const fn_info = mod.typeToFunc(fn_ty).?; + const fn_info = zcu.typeToFunc(fn_ty).?; const resolved_cc = abi.resolveCallingConvention(fn_info.cc, self.target.*); const ExpectedContents = extern struct { @@ -12225,7 +12206,7 @@ fn genCall(self: *Self, info: union(enum) { try self.asmRegisterImmediate( .{ ._, .cmp }, index_reg.to32(), - Immediate.u(arg_ty.vectorLen(mod)), + Immediate.u(arg_ty.vectorLen(zcu)), ); _ = try self.asmJccReloc(.b, loop); @@ -12317,18 +12298,18 @@ fn genCall(self: *Self, info: union(enum) { // on linking. switch (info) { .air => |callee| if (try self.air.value(callee, pt)) |func_value| { - const func_key = mod.intern_pool.indexToKey(func_value.ip_index); + const func_key = ip.indexToKey(func_value.ip_index); switch (switch (func_key) { else => func_key, .ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) { - .decl => |decl| mod.intern_pool.indexToKey(mod.declPtr(decl).val.toIntern()), + .nav => |nav| ip.indexToKey(zcu.navValue(nav).toIntern()), else => func_key, } else func_key, }) { .func => |func| { - if (self.bin_file.cast(link.File.Elf)) |elf_file| { + if (self.bin_file.cast(.elf)) |elf_file| { const zo = elf_file.zigObjectPtr().?; - const sym_index = try zo.getOrCreateMetadataForDecl(elf_file, func.owner_decl); + const sym_index = try zo.getOrCreateMetadataForNav(elf_file, func.owner_nav); if (self.mod.pic) { const callee_reg: Register = switch (resolved_cc) { .SysV => callee: { @@ -12356,14 +12337,14 @@ fn genCall(self: *Self, info: union(enum) { } }, .mod = .{ .rm = .{ .size = .qword } }, }); - } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { - const atom = try coff_file.getOrCreateAtomForDecl(func.owner_decl); + } else if (self.bin_file.cast(.coff)) |coff_file| { + const atom = try coff_file.getOrCreateAtomForNav(func.owner_nav); const sym_index = coff_file.getAtom(atom).getSymbolIndex().?; try self.genSetReg(.rax, Type.usize, .{ .lea_got = sym_index }, .{}); try self.asmRegister(.{ ._, .call }, .rax); - } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { + } else if (self.bin_file.cast(.macho)) |macho_file| { const zo = macho_file.getZigObject().?; - const sym_index = try zo.getOrCreateMetadataForDecl(macho_file, func.owner_decl); + const sym_index = try zo.getOrCreateMetadataForNav(macho_file, func.owner_nav); const sym = zo.symbols.items[sym_index]; try self.genSetReg( .rax, @@ -12372,8 +12353,8 @@ fn genCall(self: *Self, info: union(enum) { .{}, ); try self.asmRegister(.{ ._, .call }, .rax); - } else if (self.bin_file.cast(link.File.Plan9)) |p9| { - const atom_index = try p9.seeDecl(func.owner_decl); + } else if (self.bin_file.cast(.plan9)) |p9| { + const atom_index = try p9.seeNav(pt, func.owner_nav); const atom = p9.getAtom(atom_index); try self.asmMemory(.{ ._, .call }, .{ .base = .{ .reg = .ds }, @@ -12384,16 +12365,15 @@ fn genCall(self: *Self, info: union(enum) { }); } else unreachable; }, - .extern_func => |extern_func| { - const owner_decl = mod.declPtr(extern_func.decl); - const lib_name = extern_func.lib_name.toSlice(&mod.intern_pool); - const decl_name = owner_decl.name.toSlice(&mod.intern_pool); - try self.genExternSymbolRef(.call, lib_name, decl_name); - }, + .@"extern" => |@"extern"| try self.genExternSymbolRef( + .call, + @"extern".lib_name.toSlice(ip), + @"extern".name.toSlice(ip), + ), else => return self.fail("TODO implement calling bitcasted functions", .{}), } } else { - assert(self.typeOf(callee).zigTypeTag(mod) == .Pointer); + assert(self.typeOf(callee).zigTypeTag(zcu) == .Pointer); try self.genSetReg(.rax, Type.usize, .{ .air_ref = callee }, .{}); try self.asmRegister(.{ ._, .call }, .rax); }, @@ -12919,13 +12899,13 @@ fn airCmpVector(self: *Self, inst: Air.Inst.Index) !void { fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void { const pt = self.pt; - const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const addr_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp); const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg); defer self.register_manager.unlockReg(addr_lock); - try self.genLazySymbolRef(.lea, addr_reg, link.File.LazySymbol.initDecl(.const_data, null, mod)); + const anyerror_lazy_sym: link.File.LazySymbol = .{ .kind = .const_data, .ty = .anyerror_type }; + try self.genLazySymbolRef(.lea, addr_reg, anyerror_lazy_sym); try self.spillEflagsIfOccupied(); @@ -15273,7 +15253,7 @@ fn genExternSymbolRef( callee: []const u8, ) InnerError!void { const atom_index = try self.owner.getSymbolIndex(self); - if (self.bin_file.cast(link.File.Elf)) |elf_file| { + if (self.bin_file.cast(.elf)) |elf_file| { _ = try self.addInst(.{ .tag = tag, .ops = .extern_fn_reloc, @@ -15282,7 +15262,7 @@ fn genExternSymbolRef( .sym_index = try elf_file.getGlobalSymbol(callee, lib), } }, }); - } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { + } else if (self.bin_file.cast(.coff)) |coff_file| { const global_index = try coff_file.getGlobalSymbol(callee, lib); _ = try self.addInst(.{ .tag = .mov, @@ -15300,7 +15280,7 @@ fn genExternSymbolRef( .call => try self.asmRegister(.{ ._, .call }, .rax), else => unreachable, } - } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { + } else if (self.bin_file.cast(.macho)) |macho_file| { _ = try self.addInst(.{ .tag = .call, .ops = .extern_fn_reloc, @@ -15319,7 +15299,7 @@ fn genLazySymbolRef( lazy_sym: link.File.LazySymbol, ) InnerError!void { const pt = self.pt; - if (self.bin_file.cast(link.File.Elf)) |elf_file| { + if (self.bin_file.cast(.elf)) |elf_file| { const zo = elf_file.zigObjectPtr().?; const sym_index = zo.getOrCreateMetadataForLazySymbol(elf_file, pt, lazy_sym) catch |err| return self.fail("{s} creating lazy symbol", .{@errorName(err)}); @@ -15355,7 +15335,7 @@ fn genLazySymbolRef( else => unreachable, } } - } else if (self.bin_file.cast(link.File.Plan9)) |p9_file| { + } else if (self.bin_file.cast(.plan9)) |p9_file| { const atom_index = p9_file.getOrCreateAtomForLazySymbol(pt, lazy_sym) catch |err| return self.fail("{s} creating lazy symbol", .{@errorName(err)}); var atom = p9_file.getAtom(atom_index); @@ -15382,7 +15362,7 @@ fn genLazySymbolRef( ), else => unreachable, } - } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { + } else if (self.bin_file.cast(.coff)) |coff_file| { const atom_index = coff_file.getOrCreateAtomForLazySymbol(pt, lazy_sym) catch |err| return self.fail("{s} creating lazy symbol", .{@errorName(err)}); const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?; @@ -15396,7 +15376,7 @@ fn genLazySymbolRef( .call => try self.asmRegister(.{ ._, .call }, reg), else => unreachable, } - } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { + } else if (self.bin_file.cast(.macho)) |macho_file| { const zo = macho_file.getZigObject().?; const sym_index = zo.getOrCreateMetadataForLazySymbol(macho_file, pt, lazy_sym) catch |err| return self.fail("{s} creating lazy symbol", .{@errorName(err)}); @@ -16361,7 +16341,6 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void { fn airTagName(self: *Self, inst: Air.Inst.Index) !void { const pt = self.pt; - const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const inst_ty = self.typeOfIndex(inst); const enum_ty = self.typeOf(un_op); @@ -16393,18 +16372,13 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void { const operand = try self.resolveInst(un_op); try self.genSetReg(param_regs[1], enum_ty, operand, .{}); - try self.genLazySymbolRef( - .call, - .rax, - link.File.LazySymbol.initDecl(.code, enum_ty.getOwnerDecl(mod), mod), - ); + const enum_lazy_sym: link.File.LazySymbol = .{ .kind = .code, .ty = enum_ty.toIntern() }; + try self.genLazySymbolRef(.call, .rax, enum_lazy_sym); return self.finishAir(inst, dst_mcv, .{ un_op, .none, .none }); } fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { - const pt = self.pt; - const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const err_ty = self.typeOf(un_op); @@ -16416,7 +16390,8 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { const addr_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp); const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg); defer self.register_manager.unlockReg(addr_lock); - try self.genLazySymbolRef(.lea, addr_reg, link.File.LazySymbol.initDecl(.const_data, null, mod)); + const anyerror_lazy_sym: link.File.LazySymbol = .{ .kind = .const_data, .ty = .anyerror_type }; + try self.genLazySymbolRef(.lea, addr_reg, anyerror_lazy_sym); const start_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp); const start_lock = self.register_manager.lockRegAssumeUnused(start_reg); @@ -18808,7 +18783,7 @@ fn limitImmediateType(self: *Self, operand: Air.Inst.Ref, comptime T: type) !MCV fn genTypedValue(self: *Self, val: Value) InnerError!MCValue { const pt = self.pt; - return switch (try codegen.genTypedValue(self.bin_file, pt, self.src_loc, val, self.owner.getDecl(pt.zcu))) { + return switch (try codegen.genTypedValue(self.bin_file, pt, self.src_loc, val, self.target.*)) { .mcv => |mcv| switch (mcv) { .none => .none, .undef => .undef, diff --git a/src/arch/x86_64/Emit.zig b/src/arch/x86_64/Emit.zig index bc439bd7abc1..b6c1fbb96882 100644 --- a/src/arch/x86_64/Emit.zig +++ b/src/arch/x86_64/Emit.zig @@ -40,7 +40,7 @@ pub fn emitMir(emit: *Emit) Error!void { .offset = end_offset - 4, .length = @intCast(end_offset - start_offset), }), - .linker_extern_fn => |symbol| if (emit.lower.bin_file.cast(link.File.Elf)) |elf_file| { + .linker_extern_fn => |symbol| if (emit.lower.bin_file.cast(.elf)) |elf_file| { // Add relocation to the decl. const zo = elf_file.zigObjectPtr().?; const atom_ptr = zo.symbol(symbol.atom_index).atom(elf_file).?; @@ -50,7 +50,7 @@ pub fn emitMir(emit: *Emit) Error!void { .r_info = (@as(u64, @intCast(symbol.sym_index)) << 32) | r_type, .r_addend = -4, }); - } else if (emit.lower.bin_file.cast(link.File.MachO)) |macho_file| { + } else if (emit.lower.bin_file.cast(.macho)) |macho_file| { // Add relocation to the decl. const zo = macho_file.getZigObject().?; const atom = zo.symbols.items[symbol.atom_index].getAtom(macho_file).?; @@ -67,7 +67,7 @@ pub fn emitMir(emit: *Emit) Error!void { .symbolnum = @intCast(symbol.sym_index), }, }); - } else if (emit.lower.bin_file.cast(link.File.Coff)) |coff_file| { + } else if (emit.lower.bin_file.cast(.coff)) |coff_file| { // Add relocation to the decl. const atom_index = coff_file.getAtomIndexForSymbol( .{ .sym_index = symbol.atom_index, .file = null }, @@ -88,7 +88,7 @@ pub fn emitMir(emit: *Emit) Error!void { @tagName(emit.lower.bin_file.tag), }), .linker_tlsld => |data| { - const elf_file = emit.lower.bin_file.cast(link.File.Elf).?; + const elf_file = emit.lower.bin_file.cast(.elf).?; const zo = elf_file.zigObjectPtr().?; const atom = zo.symbol(data.atom_index).atom(elf_file).?; const r_type = @intFromEnum(std.elf.R_X86_64.TLSLD); @@ -99,7 +99,7 @@ pub fn emitMir(emit: *Emit) Error!void { }); }, .linker_dtpoff => |data| { - const elf_file = emit.lower.bin_file.cast(link.File.Elf).?; + const elf_file = emit.lower.bin_file.cast(.elf).?; const zo = elf_file.zigObjectPtr().?; const atom = zo.symbol(data.atom_index).atom(elf_file).?; const r_type = @intFromEnum(std.elf.R_X86_64.DTPOFF32); @@ -109,7 +109,7 @@ pub fn emitMir(emit: *Emit) Error!void { .r_addend = 0, }); }, - .linker_reloc => |data| if (emit.lower.bin_file.cast(link.File.Elf)) |elf_file| { + .linker_reloc => |data| if (emit.lower.bin_file.cast(.elf)) |elf_file| { const is_obj_or_static_lib = switch (emit.lower.output_mode) { .Exe => false, .Obj => true, @@ -157,7 +157,7 @@ pub fn emitMir(emit: *Emit) Error!void { }); } } - } else if (emit.lower.bin_file.cast(link.File.MachO)) |macho_file| { + } else if (emit.lower.bin_file.cast(.macho)) |macho_file| { const is_obj_or_static_lib = switch (emit.lower.output_mode) { .Exe => false, .Obj => true, @@ -196,11 +196,11 @@ pub fn emitMir(emit: *Emit) Error!void { .linker_got, .linker_direct, .linker_import, - => |symbol| if (emit.lower.bin_file.cast(link.File.Elf)) |_| { + => |symbol| if (emit.lower.bin_file.cast(.elf)) |_| { unreachable; - } else if (emit.lower.bin_file.cast(link.File.MachO)) |_| { + } else if (emit.lower.bin_file.cast(.macho)) |_| { unreachable; - } else if (emit.lower.bin_file.cast(link.File.Coff)) |coff_file| { + } else if (emit.lower.bin_file.cast(.coff)) |coff_file| { const atom_index = coff_file.getAtomIndexForSymbol(.{ .sym_index = symbol.atom_index, .file = null, @@ -222,7 +222,7 @@ pub fn emitMir(emit: *Emit) Error!void { .pcrel = true, .length = 2, }); - } else if (emit.lower.bin_file.cast(link.File.Plan9)) |p9_file| { + } else if (emit.lower.bin_file.cast(.plan9)) |p9_file| { const atom_index = symbol.atom_index; try p9_file.addReloc(atom_index, .{ // TODO we may need to add a .type field to the relocs if they are .linker_got instead of just .linker_direct .target = symbol.sym_index, // we set sym_index to just be the atom index diff --git a/src/arch/x86_64/Lower.zig b/src/arch/x86_64/Lower.zig index 1efcacdc2abc..11fe279dd9dd 100644 --- a/src/arch/x86_64/Lower.zig +++ b/src/arch/x86_64/Lower.zig @@ -348,7 +348,7 @@ fn emit(lower: *Lower, prefix: Prefix, mnemonic: Mnemonic, ops: []const Operand) assert(mem_op.sib.disp == 0); assert(mem_op.sib.scale_index.scale == 0); - if (lower.bin_file.cast(link.File.Elf)) |elf_file| { + if (lower.bin_file.cast(.elf)) |elf_file| { const zo = elf_file.zigObjectPtr().?; const elf_sym = zo.symbol(sym.sym_index); @@ -424,7 +424,7 @@ fn emit(lower: *Lower, prefix: Prefix, mnemonic: Mnemonic, ops: []const Operand) }, else => unreachable, }; - } else if (lower.bin_file.cast(link.File.MachO)) |macho_file| { + } else if (lower.bin_file.cast(.macho)) |macho_file| { const zo = macho_file.getZigObject().?; const macho_sym = zo.symbols.items[sym.sym_index]; diff --git a/src/codegen.zig b/src/codegen.zig index 50688151ed73..f2fa60fdf8be 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -17,7 +17,7 @@ const ErrorMsg = Zcu.ErrorMsg; const InternPool = @import("InternPool.zig"); const Liveness = @import("Liveness.zig"); const Zcu = @import("Zcu.zig"); -const Target = std.Target; + const Type = @import("Type.zig"); const Value = @import("Value.zig"); const Zir = std.zig.Zir; @@ -26,7 +26,7 @@ const dev = @import("dev.zig"); pub const Result = union(enum) { /// The `code` parameter passed to `generateSymbol` has the value ok. - ok: void, + ok, /// There was a codegen error. fail: *ErrorMsg, @@ -39,7 +39,7 @@ pub const CodeGenError = error{ }; pub const DebugInfoOutput = union(enum) { - dwarf: *link.File.Dwarf.DeclState, + dwarf: *link.File.Dwarf.NavState, plan9: *link.File.Plan9.DebugInfoOutput, none, }; @@ -73,9 +73,7 @@ pub fn generateFunction( ) CodeGenError!Result { const zcu = pt.zcu; const func = zcu.funcInfo(func_index); - const decl = zcu.declPtr(func.owner_decl); - const namespace = zcu.namespacePtr(decl.src_namespace); - const target = namespace.fileScope(zcu).mod.resolved_target.result; + const target = zcu.navFileScope(func.owner_nav).mod.resolved_target.result; switch (target_util.zigBackend(target, false)) { else => unreachable, inline .stage2_aarch64, @@ -100,10 +98,8 @@ pub fn generateLazyFunction( debug_output: DebugInfoOutput, ) CodeGenError!Result { const zcu = pt.zcu; - const decl_index = lazy_sym.ty.getOwnerDecl(zcu); - const decl = zcu.declPtr(decl_index); - const namespace = zcu.namespacePtr(decl.src_namespace); - const target = namespace.fileScope(zcu).mod.resolved_target.result; + const file = Type.fromInterned(lazy_sym.ty).typeDeclInstAllowGeneratedTag(zcu).?.resolveFull(&zcu.intern_pool).file; + const target = zcu.fileByIndex(file).mod.resolved_target.result; switch (target_util.zigBackend(target, false)) { else => unreachable, inline .stage2_x86_64, @@ -115,7 +111,7 @@ pub fn generateLazyFunction( } } -fn writeFloat(comptime F: type, f: F, target: Target, endian: std.builtin.Endian, code: []u8) void { +fn writeFloat(comptime F: type, f: F, target: std.Target, endian: std.builtin.Endian, code: []u8) void { _ = target; const bits = @typeInfo(F).Float.bits; const Int = @Type(.{ .Int = .{ .signedness = .unsigned, .bits = bits } }); @@ -147,7 +143,7 @@ pub fn generateLazySymbol( log.debug("generateLazySymbol: kind = {s}, ty = {}", .{ @tagName(lazy_sym.kind), - lazy_sym.ty.fmt(pt), + Type.fromInterned(lazy_sym.ty).fmt(pt), }); if (lazy_sym.kind == .code) { @@ -155,7 +151,7 @@ pub fn generateLazySymbol( return generateLazyFunction(bin_file, pt, src_loc, lazy_sym, code, debug_output); } - if (lazy_sym.ty.isAnyError(pt.zcu)) { + if (lazy_sym.ty == .anyerror_type) { alignment.* = .@"4"; const err_names = ip.global_error_set.getNamesFromMainThread(); mem.writeInt(u32, try code.addManyAsArray(4), @intCast(err_names.len), endian); @@ -171,9 +167,10 @@ pub fn generateLazySymbol( } mem.writeInt(u32, code.items[offset..][0..4], @intCast(code.items.len), endian); return Result.ok; - } else if (lazy_sym.ty.zigTypeTag(pt.zcu) == .Enum) { + } else if (Type.fromInterned(lazy_sym.ty).zigTypeTag(pt.zcu) == .Enum) { alignment.* = .@"1"; - const tag_names = lazy_sym.ty.enumFields(pt.zcu); + const enum_ty = Type.fromInterned(lazy_sym.ty); + const tag_names = enum_ty.enumFields(pt.zcu); for (0..tag_names.len) |tag_index| { const tag_name = tag_names.get(ip)[tag_index].toSlice(ip); try code.ensureUnusedCapacity(tag_name.len + 1); @@ -185,7 +182,7 @@ pub fn generateLazySymbol( gpa, src_loc, "TODO implement generateLazySymbol for {s} {}", - .{ @tagName(lazy_sym.kind), lazy_sym.ty.fmt(pt) }, + .{ @tagName(lazy_sym.kind), Type.fromInterned(lazy_sym.ty).fmt(pt) }, ) }; } @@ -251,7 +248,7 @@ pub fn generateSymbol( }), }, .variable, - .extern_func, + .@"extern", .func, .enum_literal, .empty_enum_value, @@ -651,8 +648,8 @@ fn lowerPtr( const ptr = zcu.intern_pool.indexToKey(ptr_val).ptr; const offset: u64 = prev_offset + ptr.byte_offset; return switch (ptr.base_addr) { - .decl => |decl| try lowerDeclRef(bin_file, pt, src_loc, decl, code, debug_output, reloc_info, offset), - .anon_decl => |ad| try lowerAnonDeclRef(bin_file, pt, src_loc, ad, code, debug_output, reloc_info, offset), + .nav => |nav| try lowerNavRef(bin_file, pt, src_loc, nav, code, debug_output, reloc_info, offset), + .uav => |uav| try lowerUavRef(bin_file, pt, src_loc, uav, code, debug_output, reloc_info, offset), .int => try generateSymbol(bin_file, pt, src_loc, try pt.intValue(Type.usize, offset), code, debug_output, reloc_info), .eu_payload => |eu_ptr| try lowerPtr( bin_file, @@ -705,11 +702,11 @@ const RelocInfo = struct { parent_atom_index: u32, }; -fn lowerAnonDeclRef( +fn lowerUavRef( lf: *link.File, pt: Zcu.PerThread, src_loc: Zcu.LazySrcLoc, - anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl, + uav: InternPool.Key.Ptr.BaseAddr.Uav, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, reloc_info: RelocInfo, @@ -720,23 +717,23 @@ fn lowerAnonDeclRef( const target = lf.comp.root_mod.resolved_target.result; const ptr_width_bytes = @divExact(target.ptrBitWidth(), 8); - const decl_val = anon_decl.val; - const decl_ty = Type.fromInterned(ip.typeOf(decl_val)); - log.debug("lowerAnonDecl: ty = {}", .{decl_ty.fmt(pt)}); - const is_fn_body = decl_ty.zigTypeTag(pt.zcu) == .Fn; - if (!is_fn_body and !decl_ty.hasRuntimeBits(pt)) { + const uav_val = uav.val; + const uav_ty = Type.fromInterned(ip.typeOf(uav_val)); + log.debug("lowerUavRef: ty = {}", .{uav_ty.fmt(pt)}); + const is_fn_body = uav_ty.zigTypeTag(pt.zcu) == .Fn; + if (!is_fn_body and !uav_ty.hasRuntimeBits(pt)) { try code.appendNTimes(0xaa, ptr_width_bytes); return Result.ok; } - const decl_align = ip.indexToKey(anon_decl.orig_ty).ptr_type.flags.alignment; - const res = try lf.lowerAnonDecl(pt, decl_val, decl_align, src_loc); + const uav_align = ip.indexToKey(uav.orig_ty).ptr_type.flags.alignment; + const res = try lf.lowerUav(pt, uav_val, uav_align, src_loc); switch (res) { - .ok => {}, + .mcv => {}, .fail => |em| return .{ .fail = em }, } - const vaddr = try lf.getAnonDeclVAddr(decl_val, .{ + const vaddr = try lf.getUavVAddr(uav_val, .{ .parent_atom_index = reloc_info.parent_atom_index, .offset = code.items.len, .addend = @intCast(offset), @@ -752,11 +749,11 @@ fn lowerAnonDeclRef( return Result.ok; } -fn lowerDeclRef( +fn lowerNavRef( lf: *link.File, pt: Zcu.PerThread, src_loc: Zcu.LazySrcLoc, - decl_index: InternPool.DeclIndex, + nav_index: InternPool.Nav.Index, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, reloc_info: RelocInfo, @@ -765,18 +762,18 @@ fn lowerDeclRef( _ = src_loc; _ = debug_output; const zcu = pt.zcu; - const decl = zcu.declPtr(decl_index); - const namespace = zcu.namespacePtr(decl.src_namespace); - const target = namespace.fileScope(zcu).mod.resolved_target.result; + const ip = &zcu.intern_pool; + const target = zcu.navFileScope(nav_index).mod.resolved_target.result; const ptr_width = target.ptrBitWidth(); - const is_fn_body = decl.typeOf(zcu).zigTypeTag(zcu) == .Fn; - if (!is_fn_body and !decl.typeOf(zcu).hasRuntimeBits(pt)) { + const nav_ty = Type.fromInterned(ip.getNav(nav_index).typeOf(ip)); + const is_fn_body = nav_ty.zigTypeTag(zcu) == .Fn; + if (!is_fn_body and !nav_ty.hasRuntimeBits(pt)) { try code.appendNTimes(0xaa, @divExact(ptr_width, 8)); return Result.ok; } - const vaddr = try lf.getDeclVAddr(pt, decl_index, .{ + const vaddr = try lf.getNavVAddr(pt, nav_index, .{ .parent_atom_index = reloc_info.parent_atom_index, .offset = code.items.len, .addend = @intCast(offset), @@ -848,34 +845,21 @@ pub const GenResult = union(enum) { } }; -fn genDeclRef( +fn genNavRef( lf: *link.File, pt: Zcu.PerThread, src_loc: Zcu.LazySrcLoc, val: Value, - ptr_decl_index: InternPool.DeclIndex, + ref_nav_index: InternPool.Nav.Index, + target: std.Target, ) CodeGenError!GenResult { const zcu = pt.zcu; const ip = &zcu.intern_pool; const ty = val.typeOf(zcu); - log.debug("genDeclRef: val = {}", .{val.fmtValue(pt)}); - - const ptr_decl = zcu.declPtr(ptr_decl_index); - const namespace = zcu.namespacePtr(ptr_decl.src_namespace); - const target = namespace.fileScope(zcu).mod.resolved_target.result; - - const ptr_bits = target.ptrBitWidth(); - const ptr_bytes: u64 = @divExact(ptr_bits, 8); - - const decl_index = switch (ip.indexToKey(ptr_decl.val.toIntern())) { - .func => |func| func.owner_decl, - .extern_func => |extern_func| extern_func.decl, - else => ptr_decl_index, - }; - const decl = zcu.declPtr(decl_index); + log.debug("genNavRef: val = {}", .{val.fmtValue(pt)}); - if (!decl.typeOf(zcu).isFnOrHasRuntimeBitsIgnoreComptime(pt)) { - const imm: u64 = switch (ptr_bytes) { + if (!ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) { + const imm: u64 = switch (@divExact(target.ptrBitWidth(), 8)) { 1 => 0xaa, 2 => 0xaaaa, 4 => 0xaaaaaaaa, @@ -900,96 +884,56 @@ fn genDeclRef( } } - const decl_namespace = zcu.namespacePtr(decl.src_namespace); - const single_threaded = decl_namespace.fileScope(zcu).mod.single_threaded; - const is_threadlocal = val.isPtrToThreadLocal(zcu) and !single_threaded; - const is_extern = decl.isExtern(zcu); - - if (lf.cast(link.File.Elf)) |elf_file| { + const nav_index, const is_extern, const lib_name, const is_threadlocal = switch (ip.indexToKey(zcu.navValue(ref_nav_index).toIntern())) { + .func => |func| .{ func.owner_nav, false, .none, false }, + .variable => |variable| .{ variable.owner_nav, false, variable.lib_name, variable.is_threadlocal }, + .@"extern" => |@"extern"| .{ @"extern".owner_nav, true, @"extern".lib_name, @"extern".is_threadlocal }, + else => .{ ref_nav_index, false, .none, false }, + }; + const single_threaded = zcu.navFileScope(nav_index).mod.single_threaded; + const name = ip.getNav(nav_index).name; + if (lf.cast(.elf)) |elf_file| { const zo = elf_file.zigObjectPtr().?; if (is_extern) { - const name = decl.name.toSlice(ip); // TODO audit this - const lib_name = if (decl.getOwnedVariable(zcu)) |ov| ov.lib_name.toSlice(ip) else null; - const sym_index = try elf_file.getGlobalSymbol(name, lib_name); + const sym_index = try elf_file.getGlobalSymbol(name.toSlice(ip), lib_name.toSlice(ip)); zo.symbol(sym_index).flags.needs_got = true; return GenResult.mcv(.{ .load_symbol = sym_index }); } - const sym_index = try zo.getOrCreateMetadataForDecl(elf_file, decl_index); - if (is_threadlocal) { + const sym_index = try zo.getOrCreateMetadataForNav(elf_file, nav_index); + if (!single_threaded and is_threadlocal) { return GenResult.mcv(.{ .load_tlv = sym_index }); } return GenResult.mcv(.{ .load_symbol = sym_index }); - } else if (lf.cast(link.File.MachO)) |macho_file| { + } else if (lf.cast(.macho)) |macho_file| { const zo = macho_file.getZigObject().?; if (is_extern) { - const name = decl.name.toSlice(ip); - const lib_name = if (decl.getOwnedVariable(zcu)) |ov| ov.lib_name.toSlice(ip) else null; - const sym_index = try macho_file.getGlobalSymbol(name, lib_name); + const sym_index = try macho_file.getGlobalSymbol(name.toSlice(ip), lib_name.toSlice(ip)); zo.symbols.items[sym_index].setSectionFlags(.{ .needs_got = true }); return GenResult.mcv(.{ .load_symbol = sym_index }); } - const sym_index = try zo.getOrCreateMetadataForDecl(macho_file, decl_index); + const sym_index = try zo.getOrCreateMetadataForNav(macho_file, nav_index); const sym = zo.symbols.items[sym_index]; - if (is_threadlocal) { + if (!single_threaded and is_threadlocal) { return GenResult.mcv(.{ .load_tlv = sym.nlist_idx }); } return GenResult.mcv(.{ .load_symbol = sym.nlist_idx }); - } else if (lf.cast(link.File.Coff)) |coff_file| { + } else if (lf.cast(.coff)) |coff_file| { if (is_extern) { - const name = decl.name.toSlice(ip); // TODO audit this - const lib_name = if (decl.getOwnedVariable(zcu)) |ov| ov.lib_name.toSlice(ip) else null; - const global_index = try coff_file.getGlobalSymbol(name, lib_name); + const global_index = try coff_file.getGlobalSymbol(name.toSlice(ip), lib_name.toSlice(ip)); try coff_file.need_got_table.put(gpa, global_index, {}); // needs GOT return GenResult.mcv(.{ .load_got = link.File.Coff.global_symbol_bit | global_index }); } - const atom_index = try coff_file.getOrCreateAtomForDecl(decl_index); + const atom_index = try coff_file.getOrCreateAtomForNav(nav_index); const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?; return GenResult.mcv(.{ .load_got = sym_index }); - } else if (lf.cast(link.File.Plan9)) |p9| { - const atom_index = try p9.seeDecl(decl_index); + } else if (lf.cast(.plan9)) |p9| { + const atom_index = try p9.seeNav(pt, nav_index); const atom = p9.getAtom(atom_index); return GenResult.mcv(.{ .memory = atom.getOffsetTableAddress(p9) }); } else { - return GenResult.fail(gpa, src_loc, "TODO genDeclRef for target {}", .{target}); - } -} - -fn genUnnamedConst( - lf: *link.File, - pt: Zcu.PerThread, - src_loc: Zcu.LazySrcLoc, - val: Value, - owner_decl_index: InternPool.DeclIndex, -) CodeGenError!GenResult { - const gpa = lf.comp.gpa; - log.debug("genUnnamedConst: val = {}", .{val.fmtValue(pt)}); - - const local_sym_index = lf.lowerUnnamedConst(pt, val, owner_decl_index) catch |err| { - return GenResult.fail(gpa, src_loc, "lowering unnamed constant failed: {s}", .{@errorName(err)}); - }; - switch (lf.tag) { - .elf => { - return GenResult.mcv(.{ .load_symbol = local_sym_index }); - }, - .macho => { - const macho_file = lf.cast(link.File.MachO).?; - const local = macho_file.getZigObject().?.symbols.items[local_sym_index]; - return GenResult.mcv(.{ .load_symbol = local.nlist_idx }); - }, - .coff => { - return GenResult.mcv(.{ .load_direct = local_sym_index }); - }, - .plan9 => { - const atom_index = local_sym_index; // plan9 returns the atom_index - return GenResult.mcv(.{ .load_direct = atom_index }); - }, - - .c => return GenResult.fail(gpa, src_loc, "TODO genUnnamedConst for -ofmt=c", .{}), - .wasm => return GenResult.fail(gpa, src_loc, "TODO genUnnamedConst for wasm", .{}), - .spirv => return GenResult.fail(gpa, src_loc, "TODO genUnnamedConst for spirv", .{}), - .nvptx => return GenResult.fail(gpa, src_loc, "TODO genUnnamedConst for nvptx", .{}), + return GenResult.fail(gpa, src_loc, "TODO genNavRef for target {}", .{target}); } } @@ -998,7 +942,7 @@ pub fn genTypedValue( pt: Zcu.PerThread, src_loc: Zcu.LazySrcLoc, val: Value, - owner_decl_index: InternPool.DeclIndex, + target: std.Target, ) CodeGenError!GenResult { const zcu = pt.zcu; const ip = &zcu.intern_pool; @@ -1010,14 +954,9 @@ pub fn genTypedValue( return GenResult.mcv(.undef); } - const owner_decl = zcu.declPtr(owner_decl_index); - const namespace = zcu.namespacePtr(owner_decl.src_namespace); - const target = namespace.fileScope(zcu).mod.resolved_target.result; - const ptr_bits = target.ptrBitWidth(); - if (!ty.isSlice(zcu)) switch (ip.indexToKey(val.toIntern())) { .ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) { - .decl => |decl| return genDeclRef(lf, pt, src_loc, val, decl), + .nav => |nav| return genNavRef(lf, pt, src_loc, val, nav, target), else => {}, }, else => {}, @@ -1042,7 +981,7 @@ pub fn genTypedValue( }, .Int => { const info = ty.intInfo(zcu); - if (info.bits <= ptr_bits) { + if (info.bits <= target.ptrBitWidth()) { const unsigned: u64 = switch (info.signedness) { .signed => @bitCast(val.toSignedInt(pt)), .unsigned => val.toUnsignedInt(pt), @@ -1060,7 +999,7 @@ pub fn genTypedValue( pt, src_loc, val.optionalValue(zcu) orelse return GenResult.mcv(.{ .immediate = 0 }), - owner_decl_index, + target, ); } else if (ty.abiSize(pt) == 1) { return GenResult.mcv(.{ .immediate = @intFromBool(!val.isNull(zcu)) }); @@ -1073,7 +1012,7 @@ pub fn genTypedValue( pt, src_loc, Value.fromInterned(enum_tag.int), - owner_decl_index, + target, ); }, .ErrorSet => { @@ -1096,14 +1035,14 @@ pub fn genTypedValue( .ty = err_type.toIntern(), .name = err_name, } })), - owner_decl_index, + target, ), .payload => return genTypedValue( lf, pt, src_loc, try pt.intValue(err_int_ty, 0), - owner_decl_index, + target, ), } } @@ -1121,7 +1060,7 @@ pub fn genTypedValue( else => {}, } - return genUnnamedConst(lf, pt, src_loc, val, owner_decl_index); + return lf.lowerUav(pt, val.toIntern(), .none, src_loc); } pub fn errUnionPayloadOffset(payload_ty: Type, pt: Zcu.PerThread) u64 { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 4dda4d083ba4..03a1ea374618 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -38,8 +38,8 @@ pub const CValue = union(enum) { /// Index into a tuple's fields field: usize, /// By-value - decl: InternPool.DeclIndex, - decl_ref: InternPool.DeclIndex, + nav: InternPool.Nav.Index, + nav_ref: InternPool.Nav.Index, /// An undefined value (cannot be dereferenced) undef: Type, /// Rendered as an identifier (using fmtIdent) @@ -58,19 +58,12 @@ const BlockData = struct { pub const CValueMap = std.AutoHashMap(Air.Inst.Ref, CValue); pub const LazyFnKey = union(enum) { - tag_name: InternPool.DeclIndex, - never_tail: InternPool.DeclIndex, - never_inline: InternPool.DeclIndex, + tag_name: InternPool.Index, + never_tail: InternPool.Nav.Index, + never_inline: InternPool.Nav.Index, }; pub const LazyFnValue = struct { fn_name: CType.Pool.String, - data: Data, - - const Data = union { - tag_name: Type, - never_tail: void, - never_inline: void, - }; }; pub const LazyFnMap = std.AutoArrayHashMapUnmanaged(LazyFnKey, LazyFnValue); @@ -498,10 +491,11 @@ pub const Function = struct { return f.object.dg.fmtIntLiteral(val, .Other); } - fn getLazyFnName(f: *Function, key: LazyFnKey, data: LazyFnValue.Data) ![]const u8 { + fn getLazyFnName(f: *Function, key: LazyFnKey) ![]const u8 { const gpa = f.object.dg.gpa; const pt = f.object.dg.pt; const zcu = pt.zcu; + const ip = &zcu.intern_pool; const ctype_pool = &f.object.dg.ctype_pool; const gop = try f.lazy_fns.getOrPut(gpa, key); @@ -511,19 +505,19 @@ pub const Function = struct { gop.value_ptr.* = .{ .fn_name = switch (key) { .tag_name, + => |enum_ty| try ctype_pool.fmt(gpa, "zig_{s}_{}__{d}", .{ + @tagName(key), + fmtIdent(ip.loadEnumType(enum_ty).name.toSlice(ip)), + @intFromEnum(enum_ty), + }), .never_tail, .never_inline, - => |owner_decl| try ctype_pool.fmt(gpa, "zig_{s}_{}__{d}", .{ + => |owner_nav| try ctype_pool.fmt(gpa, "zig_{s}_{}__{d}", .{ @tagName(key), - fmtIdent(zcu.declPtr(owner_decl).name.toSlice(&zcu.intern_pool)), - @intFromEnum(owner_decl), + fmtIdent(ip.getNav(owner_nav).name.toSlice(ip)), + @intFromEnum(owner_nav), }), }, - .data = switch (key) { - .tag_name => .{ .tag_name = data.tag_name }, - .never_tail => .{ .never_tail = data.never_tail }, - .never_inline => .{ .never_inline = data.never_inline }, - }, }; } return gop.value_ptr.fn_name.toSlice(ctype_pool).?; @@ -618,12 +612,12 @@ pub const DeclGen = struct { scratch: std.ArrayListUnmanaged(u32), /// Keeps track of anonymous decls that need to be rendered before this /// (named) Decl in the output C code. - anon_decl_deps: std.AutoArrayHashMapUnmanaged(InternPool.Index, C.DeclBlock), - aligned_anon_decls: std.AutoArrayHashMapUnmanaged(InternPool.Index, Alignment), + uav_deps: std.AutoArrayHashMapUnmanaged(InternPool.Index, C.AvBlock), + aligned_uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, Alignment), pub const Pass = union(enum) { - decl: InternPool.DeclIndex, - anon: InternPool.Index, + nav: InternPool.Nav.Index, + uav: InternPool.Index, flush, }; @@ -634,39 +628,37 @@ pub const DeclGen = struct { fn fail(dg: *DeclGen, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } { @setCold(true); const zcu = dg.pt.zcu; - const decl_index = dg.pass.decl; - const decl = zcu.declPtr(decl_index); - const src_loc = decl.navSrcLoc(zcu); + const src_loc = zcu.navSrcLoc(dg.pass.nav); dg.error_msg = try Zcu.ErrorMsg.create(dg.gpa, src_loc, format, args); return error.AnalysisFail; } - fn renderAnonDeclValue( + fn renderUav( dg: *DeclGen, writer: anytype, - anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl, + uav: InternPool.Key.Ptr.BaseAddr.Uav, location: ValueRenderLocation, ) error{ OutOfMemory, AnalysisFail }!void { const pt = dg.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; const ctype_pool = &dg.ctype_pool; - const decl_val = Value.fromInterned(anon_decl.val); - const decl_ty = decl_val.typeOf(zcu); + const uav_val = Value.fromInterned(uav.val); + const uav_ty = uav_val.typeOf(zcu); // Render an undefined pointer if we have a pointer to a zero-bit or comptime type. - const ptr_ty = Type.fromInterned(anon_decl.orig_ty); - if (ptr_ty.isPtrAtRuntime(zcu) and !decl_ty.isFnOrHasRuntimeBits(pt)) { + const ptr_ty = Type.fromInterned(uav.orig_ty); + if (ptr_ty.isPtrAtRuntime(zcu) and !uav_ty.isFnOrHasRuntimeBits(pt)) { return dg.writeCValue(writer, .{ .undef = ptr_ty }); } // Chase function values in order to be able to reference the original function. - if (decl_val.getFunction(zcu)) |func| - return dg.renderDeclValue(writer, func.owner_decl, location); - if (decl_val.getExternFunc(zcu)) |extern_func| - return dg.renderDeclValue(writer, extern_func.decl, location); - - assert(decl_val.getVariable(zcu) == null); + switch (ip.indexToKey(uav.val)) { + .variable => unreachable, + .func => |func| return dg.renderNav(writer, func.owner_nav, location), + .@"extern" => |@"extern"| return dg.renderNav(writer, @"extern".owner_nav, location), + else => {}, + } // We shouldn't cast C function pointers as this is UB (when you call // them). The analysis until now should ensure that the C function @@ -674,22 +666,22 @@ pub const DeclGen = struct { // somewhere and we should let the C compiler tell us about it. const ptr_ctype = try dg.ctypeFromType(ptr_ty, .complete); const elem_ctype = ptr_ctype.info(ctype_pool).pointer.elem_ctype; - const decl_ctype = try dg.ctypeFromType(decl_ty, .complete); - const need_cast = !elem_ctype.eql(decl_ctype) and - (elem_ctype.info(ctype_pool) != .function or decl_ctype.info(ctype_pool) != .function); + const uav_ctype = try dg.ctypeFromType(uav_ty, .complete); + const need_cast = !elem_ctype.eql(uav_ctype) and + (elem_ctype.info(ctype_pool) != .function or uav_ctype.info(ctype_pool) != .function); if (need_cast) { try writer.writeAll("(("); try dg.renderCType(writer, ptr_ctype); try writer.writeByte(')'); } try writer.writeByte('&'); - try renderAnonDeclName(writer, decl_val); + try renderUavName(writer, uav_val); if (need_cast) try writer.writeByte(')'); // Indicate that the anon decl should be rendered to the output so that // our reference above is not undefined. - const ptr_type = ip.indexToKey(anon_decl.orig_ty).ptr_type; - const gop = try dg.anon_decl_deps.getOrPut(dg.gpa, anon_decl.val); + const ptr_type = ip.indexToKey(uav.orig_ty).ptr_type; + const gop = try dg.uav_deps.getOrPut(dg.gpa, uav.val); if (!gop.found_existing) gop.value_ptr.* = .{}; // Only insert an alignment entry if the alignment is greater than ABI @@ -698,7 +690,7 @@ pub const DeclGen = struct { if (explicit_alignment != .none) { const abi_alignment = Type.fromInterned(ptr_type.child).abiAlignment(pt); if (explicit_alignment.order(abi_alignment).compare(.gt)) { - const aligned_gop = try dg.aligned_anon_decls.getOrPut(dg.gpa, anon_decl.val); + const aligned_gop = try dg.aligned_uavs.getOrPut(dg.gpa, uav.val); aligned_gop.value_ptr.* = if (aligned_gop.found_existing) aligned_gop.value_ptr.maxStrict(explicit_alignment) else @@ -707,47 +699,49 @@ pub const DeclGen = struct { } } - fn renderDeclValue( + fn renderNav( dg: *DeclGen, writer: anytype, - decl_index: InternPool.DeclIndex, + nav_index: InternPool.Nav.Index, location: ValueRenderLocation, ) error{ OutOfMemory, AnalysisFail }!void { + _ = location; const pt = dg.pt; const zcu = pt.zcu; + const ip = &zcu.intern_pool; const ctype_pool = &dg.ctype_pool; - const decl = zcu.declPtr(decl_index); - assert(decl.has_tv); + + // Chase function values in order to be able to reference the original function. + const owner_nav = switch (ip.indexToKey(zcu.navValue(nav_index).toIntern())) { + .variable => |variable| variable.owner_nav, + .func => |func| func.owner_nav, + .@"extern" => |@"extern"| @"extern".owner_nav, + else => nav_index, + }; // Render an undefined pointer if we have a pointer to a zero-bit or comptime type. - const decl_ty = decl.typeOf(zcu); - const ptr_ty = try decl.declPtrType(pt); - if (!decl_ty.isFnOrHasRuntimeBits(pt)) { + const nav_ty = Type.fromInterned(ip.getNav(owner_nav).typeOf(ip)); + const ptr_ty = try pt.navPtrType(owner_nav); + if (!nav_ty.isFnOrHasRuntimeBits(pt)) { return dg.writeCValue(writer, .{ .undef = ptr_ty }); } - // Chase function values in order to be able to reference the original function. - if (decl.val.getFunction(zcu)) |func| if (func.owner_decl != decl_index) - return dg.renderDeclValue(writer, func.owner_decl, location); - if (decl.val.getExternFunc(zcu)) |extern_func| if (extern_func.decl != decl_index) - return dg.renderDeclValue(writer, extern_func.decl, location); - // We shouldn't cast C function pointers as this is UB (when you call // them). The analysis until now should ensure that the C function // pointers are compatible. If they are not, then there is a bug // somewhere and we should let the C compiler tell us about it. const ctype = try dg.ctypeFromType(ptr_ty, .complete); const elem_ctype = ctype.info(ctype_pool).pointer.elem_ctype; - const decl_ctype = try dg.ctypeFromType(decl_ty, .complete); - const need_cast = !elem_ctype.eql(decl_ctype) and - (elem_ctype.info(ctype_pool) != .function or decl_ctype.info(ctype_pool) != .function); + const nav_ctype = try dg.ctypeFromType(nav_ty, .complete); + const need_cast = !elem_ctype.eql(nav_ctype) and + (elem_ctype.info(ctype_pool) != .function or nav_ctype.info(ctype_pool) != .function); if (need_cast) { try writer.writeAll("(("); try dg.renderCType(writer, ctype); try writer.writeByte(')'); } try writer.writeByte('&'); - try dg.renderDeclName(writer, decl_index); + try dg.renderNavName(writer, owner_nav); if (need_cast) try writer.writeByte(')'); } @@ -769,8 +763,8 @@ pub const DeclGen = struct { try writer.print("){x}", .{try dg.fmtIntLiteral(addr_val, .Other)}); }, - .decl_ptr => |decl| try dg.renderDeclValue(writer, decl, location), - .anon_decl_ptr => |ad| try dg.renderAnonDeclValue(writer, ad, location), + .nav_ptr => |nav| try dg.renderNav(writer, nav, location), + .uav_ptr => |uav| try dg.renderUav(writer, uav, location), inline .eu_payload_ptr, .opt_payload_ptr => |info| { try writer.writeAll("&("); @@ -918,7 +912,7 @@ pub const DeclGen = struct { .true => try writer.writeAll("true"), }, .variable, - .extern_func, + .@"extern", .func, .enum_literal, .empty_enum_value, @@ -1743,7 +1737,7 @@ pub const DeclGen = struct { .undef, .simple_value, .variable, - .extern_func, + .@"extern", .func, .int, .err, @@ -1758,7 +1752,7 @@ pub const DeclGen = struct { .aggregate, .un, .memoized_call, - => unreachable, + => unreachable, // values, not types }, } } @@ -1770,7 +1764,7 @@ pub const DeclGen = struct { fn_align: InternPool.Alignment, kind: CType.Kind, name: union(enum) { - decl: InternPool.DeclIndex, + nav: InternPool.Nav.Index, fmt_ctype_pool_string: std.fmt.Formatter(formatCTypePoolString), @"export": struct { main_name: InternPool.NullTerminatedString, @@ -1805,7 +1799,7 @@ pub const DeclGen = struct { try w.print("{}", .{trailing}); switch (name) { - .decl => |decl_index| try dg.renderDeclName(w, decl_index), + .nav => |nav| try dg.renderNavName(w, nav), .fmt_ctype_pool_string => |fmt| try w.print("{ }", .{fmt}), .@"export" => |@"export"| try w.print("{ }", .{fmtIdent(@"export".extern_name.toSlice(ip))}), } @@ -1828,7 +1822,7 @@ pub const DeclGen = struct { .forward => { if (fn_align.toByteUnits()) |a| try w.print(" zig_align_fn({})", .{a}); switch (name) { - .decl, .fmt_ctype_pool_string => {}, + .nav, .fmt_ctype_pool_string => {}, .@"export" => |@"export"| { const extern_name = @"export".extern_name.toSlice(ip); const is_mangled = isMangledIdent(extern_name, true); @@ -2069,8 +2063,8 @@ pub const DeclGen = struct { fn writeName(dg: *DeclGen, w: anytype, c_value: CValue) !void { switch (c_value) { .new_local, .local => |i| try w.print("t{d}", .{i}), - .constant => |val| try renderAnonDeclName(w, val), - .decl => |decl| try dg.renderDeclName(w, decl), + .constant => |uav| try renderUavName(w, uav), + .nav => |nav| try dg.renderNavName(w, nav), .identifier => |ident| try w.print("{ }", .{fmtIdent(ident)}), else => unreachable, } @@ -2079,13 +2073,13 @@ pub const DeclGen = struct { fn writeCValue(dg: *DeclGen, w: anytype, c_value: CValue) !void { switch (c_value) { .none, .new_local, .local, .local_ref => unreachable, - .constant => |val| try renderAnonDeclName(w, val), + .constant => |uav| try renderUavName(w, uav), .arg, .arg_array => unreachable, .field => |i| try w.print("f{d}", .{i}), - .decl => |decl| try dg.renderDeclName(w, decl), - .decl_ref => |decl| { + .nav => |nav| try dg.renderNavName(w, nav), + .nav_ref => |nav| { try w.writeByte('&'); - try dg.renderDeclName(w, decl); + try dg.renderNavName(w, nav); }, .undef => |ty| try dg.renderUndefValue(w, ty, .Other), .identifier => |ident| try w.print("{ }", .{fmtIdent(ident)}), @@ -2111,12 +2105,12 @@ pub const DeclGen = struct { .ctype_pool_string, => unreachable, .field => |i| try w.print("f{d}", .{i}), - .decl => |decl| { + .nav => |nav| { try w.writeAll("(*"); - try dg.renderDeclName(w, decl); + try dg.renderNavName(w, nav); try w.writeByte(')'); }, - .decl_ref => |decl| try dg.renderDeclName(w, decl), + .nav_ref => |nav| try dg.renderNavName(w, nav), .undef => unreachable, .identifier => |ident| try w.print("(*{ })", .{fmtIdent(ident)}), .payload_identifier => |ident| try w.print("(*{ }.{ })", .{ @@ -2150,11 +2144,11 @@ pub const DeclGen = struct { .arg_array, .ctype_pool_string, => unreachable, - .decl, .identifier, .payload_identifier => { + .nav, .identifier, .payload_identifier => { try dg.writeCValue(writer, c_value); try writer.writeAll("->"); }, - .decl_ref => { + .nav_ref => { try dg.writeCValueDeref(writer, c_value); try writer.writeByte('.'); }, @@ -2164,46 +2158,53 @@ pub const DeclGen = struct { fn renderFwdDecl( dg: *DeclGen, - decl_index: InternPool.DeclIndex, - variable: InternPool.Key.Variable, + nav_index: InternPool.Nav.Index, + flags: struct { + is_extern: bool, + is_const: bool, + is_threadlocal: bool, + is_weak_linkage: bool, + }, ) !void { const zcu = dg.pt.zcu; - const decl = zcu.declPtr(decl_index); + const ip = &zcu.intern_pool; + const nav = ip.getNav(nav_index); const fwd = dg.fwdDeclWriter(); - try fwd.writeAll(if (variable.is_extern) "zig_extern " else "static "); - if (variable.is_weak_linkage) try fwd.writeAll("zig_weak_linkage "); - if (variable.is_threadlocal and !dg.mod.single_threaded) try fwd.writeAll("zig_threadlocal "); + try fwd.writeAll(if (flags.is_extern) "zig_extern " else "static "); + if (flags.is_weak_linkage) try fwd.writeAll("zig_weak_linkage "); + if (flags.is_threadlocal and !dg.mod.single_threaded) try fwd.writeAll("zig_threadlocal "); try dg.renderTypeAndName( fwd, - decl.typeOf(zcu), - .{ .decl = decl_index }, - CQualifiers.init(.{ .@"const" = variable.is_const }), - decl.alignment, + Type.fromInterned(nav.typeOf(ip)), + .{ .nav = nav_index }, + CQualifiers.init(.{ .@"const" = flags.is_const }), + nav.status.resolved.alignment, .complete, ); try fwd.writeAll(";\n"); } - fn renderDeclName(dg: *DeclGen, writer: anytype, decl_index: InternPool.DeclIndex) !void { + fn renderNavName(dg: *DeclGen, writer: anytype, nav_index: InternPool.Nav.Index) !void { const zcu = dg.pt.zcu; const ip = &zcu.intern_pool; - const decl = zcu.declPtr(decl_index); - - if (decl.getExternDecl(zcu).unwrap()) |extern_decl_index| try writer.print("{ }", .{ - fmtIdent(zcu.declPtr(extern_decl_index).name.toSlice(ip)), - }) else { - // MSVC has a limit of 4095 character token length limit, and fmtIdent can (worst case), - // expand to 3x the length of its input, but let's cut it off at a much shorter limit. - const fqn_slice = decl.fqn.toSlice(ip); - try writer.print("{}__{d}", .{ - fmtIdent(fqn_slice[0..@min(fqn_slice.len, 100)]), - @intFromEnum(decl_index), - }); + switch (ip.indexToKey(zcu.navValue(nav_index).toIntern())) { + .@"extern" => |@"extern"| try writer.print("{ }", .{ + fmtIdent(ip.getNav(@"extern".owner_nav).name.toSlice(ip)), + }), + else => { + // MSVC has a limit of 4095 character token length limit, and fmtIdent can (worst case), + // expand to 3x the length of its input, but let's cut it off at a much shorter limit. + const fqn_slice = ip.getNav(nav_index).fqn.toSlice(ip); + try writer.print("{}__{d}", .{ + fmtIdent(fqn_slice[0..@min(fqn_slice.len, 100)]), + @intFromEnum(nav_index), + }); + }, } } - fn renderAnonDeclName(writer: anytype, anon_decl_val: Value) !void { - try writer.print("__anon_{d}", .{@intFromEnum(anon_decl_val.toIntern())}); + fn renderUavName(writer: anytype, uav: Value) !void { + try writer.print("__anon_{d}", .{@intFromEnum(uav.toIntern())}); } fn renderTypeForBuiltinFnName(dg: *DeclGen, writer: anytype, ty: Type) !void { @@ -2301,12 +2302,13 @@ fn renderFwdDeclTypeName( fwd_decl: CType.Info.FwdDecl, attributes: []const u8, ) !void { + const ip = &zcu.intern_pool; try w.print("{s} {s}", .{ @tagName(fwd_decl.tag), attributes }); switch (fwd_decl.name) { .anon => try w.print("anon__lazy_{d}", .{@intFromEnum(ctype.index)}), - .owner_decl => |owner_decl| try w.print("{}__{d}", .{ - fmtIdent(zcu.declPtr(owner_decl).name.toSlice(&zcu.intern_pool)), - @intFromEnum(owner_decl), + .index => |index| try w.print("{}__{d}", .{ + fmtIdent(Type.fromInterned(index).containerTypeName(ip).toSlice(&zcu.intern_pool)), + @intFromEnum(index), }), } } @@ -2340,11 +2342,11 @@ fn renderTypePrefix( }, .aligned => switch (pass) { - .decl => |decl_index| try w.print("decl__{d}_{d}", .{ - @intFromEnum(decl_index), @intFromEnum(ctype.index), + .nav => |nav| try w.print("nav__{d}_{d}", .{ + @intFromEnum(nav), @intFromEnum(ctype.index), }), - .anon => |anon_decl| try w.print("anon__{d}_{d}", .{ - @intFromEnum(anon_decl), @intFromEnum(ctype.index), + .uav => |uav| try w.print("uav__{d}_{d}", .{ + @intFromEnum(uav), @intFromEnum(ctype.index), }), .flush => try renderAlignedTypeName(w, ctype), }, @@ -2370,15 +2372,15 @@ fn renderTypePrefix( .fwd_decl => |fwd_decl_info| switch (fwd_decl_info.name) { .anon => switch (pass) { - .decl => |decl_index| try w.print("decl__{d}_{d}", .{ - @intFromEnum(decl_index), @intFromEnum(ctype.index), + .nav => |nav| try w.print("nav__{d}_{d}", .{ + @intFromEnum(nav), @intFromEnum(ctype.index), }), - .anon => |anon_decl| try w.print("anon__{d}_{d}", .{ - @intFromEnum(anon_decl), @intFromEnum(ctype.index), + .uav => |uav| try w.print("uav__{d}_{d}", .{ + @intFromEnum(uav), @intFromEnum(ctype.index), }), .flush => try renderFwdDeclTypeName(zcu, w, ctype, fwd_decl_info, ""), }, - .owner_decl => try renderFwdDeclTypeName(zcu, w, ctype, fwd_decl_info, ""), + .index => try renderFwdDeclTypeName(zcu, w, ctype, fwd_decl_info, ""), }, .aggregate => |aggregate_info| switch (aggregate_info.name) { @@ -2557,7 +2559,7 @@ pub fn genTypeDecl( try writer.writeAll(";\n"); } switch (pass) { - .decl, .anon => { + .nav, .uav => { try writer.writeAll("typedef "); _ = try renderTypePrefix(.flush, global_ctype_pool, zcu, writer, global_ctype, .suffix, .{}); try writer.writeByte(' '); @@ -2569,7 +2571,7 @@ pub fn genTypeDecl( }, .fwd_decl => |fwd_decl_info| switch (fwd_decl_info.name) { .anon => switch (pass) { - .decl, .anon => { + .nav, .uav => { try writer.writeAll("typedef "); _ = try renderTypePrefix(.flush, global_ctype_pool, zcu, writer, global_ctype, .suffix, .{}); try writer.writeByte(' '); @@ -2578,13 +2580,14 @@ pub fn genTypeDecl( }, .flush => {}, }, - .owner_decl => |owner_decl_index| if (!found_existing) { + .index => |index| if (!found_existing) { + const ip = &zcu.intern_pool; + const ty = Type.fromInterned(index); _ = try renderTypePrefix(.flush, global_ctype_pool, zcu, writer, global_ctype, .suffix, .{}); try writer.writeByte(';'); - const owner_decl = zcu.declPtr(owner_decl_index); - const owner_mod = zcu.namespacePtr(owner_decl.src_namespace).fileScope(zcu).mod; - if (!owner_mod.strip) try writer.print(" /* {} */", .{ - owner_decl.fqn.fmt(&zcu.intern_pool), + const file_scope = ty.typeDeclInstAllowGeneratedTag(zcu).?.resolveFull(ip).file; + if (!zcu.fileByIndex(file_scope).mod.strip) try writer.print(" /* {} */", .{ + ty.containerTypeName(ip).fmt(ip), }); try writer.writeByte('\n'); }, @@ -2709,9 +2712,8 @@ pub fn genLazyFn(o: *Object, lazy_ctype_pool: *const CType.Pool, lazy_fn: LazyFn const key = lazy_fn.key_ptr.*; const val = lazy_fn.value_ptr; switch (key) { - .tag_name => { - const enum_ty = val.data.tag_name; - + .tag_name => |enum_ty_ip| { + const enum_ty = Type.fromInterned(enum_ty_ip); const name_slice_ty = Type.slice_const_u8_sentinel_0; try w.writeAll("static "); @@ -2756,25 +2758,25 @@ pub fn genLazyFn(o: *Object, lazy_ctype_pool: *const CType.Pool, lazy_fn: LazyFn _ = try airBreakpoint(w); try w.writeAll("}\n"); }, - .never_tail, .never_inline => |fn_decl_index| { - const fn_decl = zcu.declPtr(fn_decl_index); - const fn_ctype = try o.dg.ctypeFromType(fn_decl.typeOf(zcu), .complete); + .never_tail, .never_inline => |fn_nav_index| { + const fn_val = zcu.navValue(fn_nav_index); + const fn_ctype = try o.dg.ctypeFromType(fn_val.typeOf(zcu), .complete); const fn_info = fn_ctype.info(ctype_pool).function; const fn_name = fmtCTypePoolString(val.fn_name, lazy_ctype_pool); const fwd = o.dg.fwdDeclWriter(); try fwd.print("static zig_{s} ", .{@tagName(key)}); - try o.dg.renderFunctionSignature(fwd, fn_decl.val, fn_decl.alignment, .forward, .{ + try o.dg.renderFunctionSignature(fwd, fn_val, ip.getNav(fn_nav_index).status.resolved.alignment, .forward, .{ .fmt_ctype_pool_string = fn_name, }); try fwd.writeAll(";\n"); try w.print("zig_{s} ", .{@tagName(key)}); - try o.dg.renderFunctionSignature(w, fn_decl.val, .none, .complete, .{ + try o.dg.renderFunctionSignature(w, fn_val, .none, .complete, .{ .fmt_ctype_pool_string = fn_name, }); try w.writeAll(" {\n return "); - try o.dg.renderDeclName(w, fn_decl_index); + try o.dg.renderNavName(w, fn_nav_index); try w.writeByte('('); for (0..fn_info.param_ctypes.len) |arg| { if (arg > 0) try w.writeAll(", "); @@ -2791,9 +2793,11 @@ pub fn genFunc(f: *Function) !void { const o = &f.object; const zcu = o.dg.pt.zcu; + const ip = &zcu.intern_pool; const gpa = o.dg.gpa; - const decl_index = o.dg.pass.decl; - const decl = zcu.declPtr(decl_index); + const nav_index = o.dg.pass.nav; + const nav_val = zcu.navValue(nav_index); + const nav = ip.getNav(nav_index); o.code_header = std.ArrayList(u8).init(gpa); defer o.code_header.deinit(); @@ -2802,21 +2806,21 @@ pub fn genFunc(f: *Function) !void { try fwd.writeAll("static "); try o.dg.renderFunctionSignature( fwd, - decl.val, - decl.alignment, + nav_val, + nav.status.resolved.alignment, .forward, - .{ .decl = decl_index }, + .{ .nav = nav_index }, ); try fwd.writeAll(";\n"); - if (decl.@"linksection".toSlice(&zcu.intern_pool)) |s| + if (nav.status.resolved.@"linksection".toSlice(ip)) |s| try o.writer().print("zig_linksection_fn({s}) ", .{fmtStringLiteral(s, null)}); try o.dg.renderFunctionSignature( o.writer(), - decl.val, + nav_val, .none, .complete, - .{ .decl = decl_index }, + .{ .nav = nav_index }, ); try o.writer().writeByte(' '); @@ -2883,44 +2887,66 @@ pub fn genDecl(o: *Object) !void { const pt = o.dg.pt; const zcu = pt.zcu; - const decl_index = o.dg.pass.decl; - const decl = zcu.declPtr(decl_index); - const decl_ty = decl.typeOf(zcu); + const ip = &zcu.intern_pool; + const nav = ip.getNav(o.dg.pass.nav); + const nav_ty = Type.fromInterned(nav.typeOf(ip)); + + if (!nav_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return; + switch (ip.indexToKey(nav.status.resolved.val)) { + .@"extern" => |@"extern"| { + if (!ip.isFunctionType(nav_ty.toIntern())) return o.dg.renderFwdDecl(o.dg.pass.nav, .{ + .is_extern = true, + .is_const = @"extern".is_const, + .is_threadlocal = @"extern".is_threadlocal, + .is_weak_linkage = @"extern".is_weak_linkage, + }); - if (!decl_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) return; - if (decl.val.getExternFunc(zcu)) |_| { - const fwd = o.dg.fwdDeclWriter(); - try fwd.writeAll("zig_extern "); - try o.dg.renderFunctionSignature( - fwd, - decl.val, - decl.alignment, - .forward, - .{ .@"export" = .{ - .main_name = decl.name, - .extern_name = decl.name, - } }, - ); - try fwd.writeAll(";\n"); - } else if (decl.val.getVariable(zcu)) |variable| { - try o.dg.renderFwdDecl(decl_index, variable); - - if (variable.is_extern) return; - - const w = o.writer(); - if (variable.is_weak_linkage) try w.writeAll("zig_weak_linkage "); - if (variable.is_threadlocal and !o.dg.mod.single_threaded) try w.writeAll("zig_threadlocal "); - if (decl.@"linksection".toSlice(&zcu.intern_pool)) |s| - try w.print("zig_linksection({s}) ", .{fmtStringLiteral(s, null)}); - const decl_c_value = .{ .decl = decl_index }; - try o.dg.renderTypeAndName(w, decl_ty, decl_c_value, .{}, decl.alignment, .complete); - try w.writeAll(" = "); - try o.dg.renderValue(w, Value.fromInterned(variable.init), .StaticInitializer); - try w.writeByte(';'); - try o.indent_writer.insertNewline(); - } else { - const decl_c_value = .{ .decl = decl_index }; - try genDeclValue(o, decl.val, decl_c_value, decl.alignment, decl.@"linksection"); + const fwd = o.dg.fwdDeclWriter(); + try fwd.writeAll("zig_extern "); + try o.dg.renderFunctionSignature( + fwd, + Value.fromInterned(nav.status.resolved.val), + nav.status.resolved.alignment, + .forward, + .{ .@"export" = .{ + .main_name = nav.name, + .extern_name = nav.name, + } }, + ); + try fwd.writeAll(";\n"); + }, + .variable => |variable| { + try o.dg.renderFwdDecl(o.dg.pass.nav, .{ + .is_extern = false, + .is_const = false, + .is_threadlocal = variable.is_threadlocal, + .is_weak_linkage = variable.is_weak_linkage, + }); + const w = o.writer(); + if (variable.is_weak_linkage) try w.writeAll("zig_weak_linkage "); + if (variable.is_threadlocal and !o.dg.mod.single_threaded) try w.writeAll("zig_threadlocal "); + if (nav.status.resolved.@"linksection".toSlice(&zcu.intern_pool)) |s| + try w.print("zig_linksection({s}) ", .{fmtStringLiteral(s, null)}); + try o.dg.renderTypeAndName( + w, + nav_ty, + .{ .nav = o.dg.pass.nav }, + .{}, + nav.status.resolved.alignment, + .complete, + ); + try w.writeAll(" = "); + try o.dg.renderValue(w, Value.fromInterned(variable.init), .StaticInitializer); + try w.writeByte(';'); + try o.indent_writer.insertNewline(); + }, + else => try genDeclValue( + o, + Value.fromInterned(nav.status.resolved.val), + .{ .nav = o.dg.pass.nav }, + nav.status.resolved.alignment, + nav.status.resolved.@"linksection", + ), } } @@ -2956,31 +2982,34 @@ pub fn genExports(dg: *DeclGen, exported: Zcu.Exported, export_indices: []const const main_name = zcu.all_exports.items[export_indices[0]].opts.name; try fwd.writeAll("#define "); switch (exported) { - .decl_index => |decl_index| try dg.renderDeclName(fwd, decl_index), - .value => |value| try DeclGen.renderAnonDeclName(fwd, Value.fromInterned(value)), + .nav => |nav| try dg.renderNavName(fwd, nav), + .uav => |uav| try DeclGen.renderUavName(fwd, Value.fromInterned(uav)), } try fwd.writeByte(' '); try fwd.print("{ }", .{fmtIdent(main_name.toSlice(ip))}); try fwd.writeByte('\n'); - const is_const = switch (ip.indexToKey(exported.getValue(zcu).toIntern())) { - .func, .extern_func => return for (export_indices) |export_index| { - const @"export" = &zcu.all_exports.items[export_index]; - try fwd.writeAll("zig_extern "); - if (@"export".opts.linkage == .weak) try fwd.writeAll("zig_weak_linkage_fn "); - try dg.renderFunctionSignature( - fwd, - exported.getValue(zcu), - exported.getAlign(zcu), - .forward, - .{ .@"export" = .{ - .main_name = main_name, - .extern_name = @"export".opts.name, - } }, - ); - try fwd.writeAll(";\n"); - }, - .variable => |variable| variable.is_const, + const exported_val = exported.getValue(zcu); + if (ip.isFunctionType(exported_val.typeOf(zcu).toIntern())) return for (export_indices) |export_index| { + const @"export" = &zcu.all_exports.items[export_index]; + try fwd.writeAll("zig_extern "); + if (@"export".opts.linkage == .weak) try fwd.writeAll("zig_weak_linkage_fn "); + try dg.renderFunctionSignature( + fwd, + exported.getValue(zcu), + exported.getAlign(zcu), + .forward, + .{ .@"export" = .{ + .main_name = main_name, + .extern_name = @"export".opts.name, + } }, + ); + try fwd.writeAll(";\n"); + }; + const is_const = switch (ip.indexToKey(exported_val.toIntern())) { + .func => unreachable, + .@"extern" => |@"extern"| @"extern".is_const, + .variable => false, else => true, }; for (export_indices) |export_index| { @@ -4474,24 +4503,19 @@ fn airCall( callee: { known: { - const fn_decl = fn_decl: { - const callee_val = (try f.air.value(pl_op.operand, pt)) orelse break :known; - break :fn_decl switch (zcu.intern_pool.indexToKey(callee_val.toIntern())) { - .extern_func => |extern_func| extern_func.decl, - .func => |func| func.owner_decl, - .ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) { - .decl => |decl| decl, - else => break :known, - } else break :known, + const callee_val = (try f.air.value(pl_op.operand, pt)) orelse break :known; + const fn_nav = switch (zcu.intern_pool.indexToKey(callee_val.toIntern())) { + .@"extern" => |@"extern"| @"extern".owner_nav, + .func => |func| func.owner_nav, + .ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) { + .nav => |nav| nav, else => break :known, - }; + } else break :known, + else => break :known, }; switch (modifier) { - .auto, .always_tail => try f.object.dg.renderDeclName(writer, fn_decl), - inline .never_tail, .never_inline => |m| try writer.writeAll(try f.getLazyFnName( - @unionInit(LazyFnKey, @tagName(m), fn_decl), - @unionInit(LazyFnValue.Data, @tagName(m), {}), - )), + .auto, .always_tail => try f.object.dg.renderNavName(writer, fn_nav), + inline .never_tail, .never_inline => |m| try writer.writeAll(try f.getLazyFnName(@unionInit(LazyFnKey, @tagName(m), fn_nav))), else => unreachable, } break :callee; @@ -4554,11 +4578,12 @@ fn airDbgStmt(f: *Function, inst: Air.Inst.Index) !CValue { fn airDbgInlineBlock(f: *Function, inst: Air.Inst.Index) !CValue { const pt = f.object.dg.pt; const zcu = pt.zcu; + const ip = &zcu.intern_pool; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = f.air.extraData(Air.DbgInlineBlock, ty_pl.payload); - const owner_decl = zcu.funcOwnerDeclPtr(extra.data.func); + const owner_nav = ip.getNav(zcu.funcInfo(extra.data.func).owner_nav); const writer = f.object.writer(); - try writer.print("/* inline:{} */\n", .{owner_decl.fqn.fmt(&zcu.intern_pool)}); + try writer.print("/* inline:{} */\n", .{owner_nav.fqn.fmt(&zcu.intern_pool)}); return lowerBlock(f, inst, @ptrCast(f.air.extra[extra.end..][0..extra.data.body_len])); } @@ -5059,7 +5084,7 @@ fn asmInputNeedsLocal(f: *Function, constraint: []const u8, value: CValue) bool else => switch (value) { .constant => |val| switch (dg.pt.zcu.intern_pool.indexToKey(val.toIntern())) { .ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) { - .decl => false, + .nav => false, else => true, } else true, else => true, @@ -6841,8 +6866,6 @@ fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { } fn airTagName(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; - const zcu = pt.zcu; const un_op = f.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const inst_ty = f.typeOfIndex(inst); @@ -6854,7 +6877,7 @@ fn airTagName(f: *Function, inst: Air.Inst.Index) !CValue { const local = try f.allocLocal(inst, inst_ty); try f.writeCValue(writer, local, .Other); try writer.print(" = {s}(", .{ - try f.getLazyFnName(.{ .tag_name = enum_ty.getOwnerDecl(zcu) }, .{ .tag_name = enum_ty }), + try f.getLazyFnName(.{ .tag_name = enum_ty.toIntern() }), }); try f.writeCValue(writer, operand, .Other); try writer.writeAll(");\n"); @@ -7390,18 +7413,17 @@ fn airCVaStart(f: *Function, inst: Air.Inst.Index) !CValue { const pt = f.object.dg.pt; const zcu = pt.zcu; const inst_ty = f.typeOfIndex(inst); - const decl_index = f.object.dg.pass.decl; - const decl = zcu.declPtr(decl_index); - const function_ctype = try f.ctypeFromType(decl.typeOf(zcu), .complete); - const params_len = function_ctype.info(&f.object.dg.ctype_pool).function.param_ctypes.len; + const function_ty = zcu.navValue(f.object.dg.pass.nav).typeOf(zcu); + const function_info = (try f.ctypeFromType(function_ty, .complete)).info(&f.object.dg.ctype_pool).function; + assert(function_info.varargs); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); try writer.writeAll("va_start(*(va_list *)&"); try f.writeCValue(writer, local, .Other); - if (params_len > 0) { + if (function_info.param_ctypes.len > 0) { try writer.writeAll(", "); - try f.writeCValue(writer, .{ .arg = params_len - 1 }, .FunctionArgument); + try f.writeCValue(writer, .{ .arg = function_info.param_ctypes.len - 1 }, .FunctionArgument); } try writer.writeAll(");\n"); return local; @@ -7941,7 +7963,7 @@ const Materialize = struct { pub fn start(f: *Function, inst: Air.Inst.Index, ty: Type, value: CValue) !Materialize { return .{ .local = switch (value) { - .local_ref, .constant, .decl_ref, .undef => try f.moveCValue(inst, ty, value), + .local_ref, .constant, .nav_ref, .undef => try f.moveCValue(inst, ty, value), .new_local => |local| .{ .local = local }, else => value, } }; diff --git a/src/codegen/c/Type.zig b/src/codegen/c/Type.zig index ecd1b8c2f7bf..943f54ae9606 100644 --- a/src/codegen/c/Type.zig +++ b/src/codegen/c/Type.zig @@ -449,18 +449,18 @@ pub fn info(ctype: CType, pool: *const Pool) Info { }, .fwd_decl_struct => return .{ .fwd_decl = .{ .tag = .@"struct", - .name = .{ .owner_decl = @enumFromInt(item.data) }, + .name = .{ .index = @enumFromInt(item.data) }, } }, .fwd_decl_union => return .{ .fwd_decl = .{ .tag = .@"union", - .name = .{ .owner_decl = @enumFromInt(item.data) }, + .name = .{ .index = @enumFromInt(item.data) }, } }, .aggregate_struct_anon => { const extra_trail = pool.getExtraTrail(Pool.AggregateAnon, item.data); return .{ .aggregate = .{ .tag = .@"struct", .name = .{ .anon = .{ - .owner_decl = extra_trail.extra.owner_decl, + .index = extra_trail.extra.index, .id = extra_trail.extra.id, } }, .fields = .{ @@ -474,7 +474,7 @@ pub fn info(ctype: CType, pool: *const Pool) Info { return .{ .aggregate = .{ .tag = .@"union", .name = .{ .anon = .{ - .owner_decl = extra_trail.extra.owner_decl, + .index = extra_trail.extra.index, .id = extra_trail.extra.id, } }, .fields = .{ @@ -489,7 +489,7 @@ pub fn info(ctype: CType, pool: *const Pool) Info { .tag = .@"struct", .@"packed" = true, .name = .{ .anon = .{ - .owner_decl = extra_trail.extra.owner_decl, + .index = extra_trail.extra.index, .id = extra_trail.extra.id, } }, .fields = .{ @@ -504,7 +504,7 @@ pub fn info(ctype: CType, pool: *const Pool) Info { .tag = .@"union", .@"packed" = true, .name = .{ .anon = .{ - .owner_decl = extra_trail.extra.owner_decl, + .index = extra_trail.extra.index, .id = extra_trail.extra.id, } }, .fields = .{ @@ -834,7 +834,7 @@ pub const Info = union(enum) { tag: AggregateTag, name: union(enum) { anon: Field.Slice, - owner_decl: DeclIndex, + index: InternPool.Index, }, }; @@ -843,7 +843,7 @@ pub const Info = union(enum) { @"packed": bool = false, name: union(enum) { anon: struct { - owner_decl: DeclIndex, + index: InternPool.Index, id: u32, }, fwd_decl: CType, @@ -885,14 +885,14 @@ pub const Info = union(enum) { rhs_pool, pool_adapter, ), - .owner_decl => |lhs_owner_decl| rhs_info.fwd_decl.name == .owner_decl and - lhs_owner_decl == rhs_info.fwd_decl.name.owner_decl, + .index => |lhs_index| rhs_info.fwd_decl.name == .index and + lhs_index == rhs_info.fwd_decl.name.index, }, .aggregate => |lhs_aggregate_info| lhs_aggregate_info.tag == rhs_info.aggregate.tag and lhs_aggregate_info.@"packed" == rhs_info.aggregate.@"packed" and switch (lhs_aggregate_info.name) { .anon => |lhs_anon| rhs_info.aggregate.name == .anon and - lhs_anon.owner_decl == rhs_info.aggregate.name.anon.owner_decl and + lhs_anon.index == rhs_info.aggregate.name.anon.index and lhs_anon.id == rhs_info.aggregate.name.anon.id, .fwd_decl => |lhs_fwd_decl| rhs_info.aggregate.name == .fwd_decl and pool_adapter.eql(lhs_fwd_decl, rhs_info.aggregate.name.fwd_decl), @@ -1105,7 +1105,7 @@ pub const Pool = struct { tag: Info.AggregateTag, name: union(enum) { anon: []const Info.Field, - owner_decl: DeclIndex, + index: InternPool.Index, }, }, ) !CType { @@ -1145,13 +1145,13 @@ pub const Pool = struct { .@"enum" => unreachable, }, extra_index); }, - .owner_decl => |owner_decl| { - hasher.update(owner_decl); + .index => |index| { + hasher.update(index); return pool.tagData(allocator, hasher, switch (fwd_decl_info.tag) { .@"struct" => .fwd_decl_struct, .@"union" => .fwd_decl_union, .@"enum" => unreachable, - }, @intFromEnum(owner_decl)); + }, @intFromEnum(index)); }, } } @@ -1164,7 +1164,7 @@ pub const Pool = struct { @"packed": bool = false, name: union(enum) { anon: struct { - owner_decl: DeclIndex, + index: InternPool.Index, id: u32, }, fwd_decl: CType, @@ -1176,7 +1176,7 @@ pub const Pool = struct { switch (aggregate_info.name) { .anon => |anon| { const extra: AggregateAnon = .{ - .owner_decl = anon.owner_decl, + .index = anon.index, .id = anon.id, .fields_len = @intCast(aggregate_info.fields.len), }; @@ -1683,7 +1683,7 @@ pub const Pool = struct { .auto, .@"extern" => { const fwd_decl = try pool.getFwdDecl(allocator, .{ .tag = .@"struct", - .name = .{ .owner_decl = loaded_struct.decl.unwrap().? }, + .name = .{ .index = ip_index }, }); if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(pt)) fwd_decl @@ -1822,7 +1822,7 @@ pub const Pool = struct { const has_tag = loaded_union.hasTag(ip); const fwd_decl = try pool.getFwdDecl(allocator, .{ .tag = if (has_tag) .@"struct" else .@"union", - .name = .{ .owner_decl = loaded_union.decl }, + .name = .{ .index = ip_index }, }); if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(pt)) fwd_decl @@ -1837,7 +1837,7 @@ pub const Pool = struct { ); var hasher = Hasher.init; var tag: Pool.Tag = .aggregate_union; - var payload_align: Alignment = .@"1"; + var payload_align: InternPool.Alignment = .@"1"; for (0..loaded_union.field_types.len) |field_index| { const field_type = Type.fromInterned( loaded_union.field_types.get(ip)[field_index], @@ -1915,7 +1915,7 @@ pub const Pool = struct { &hasher, AggregateAnon, .{ - .owner_decl = loaded_union.decl, + .index = ip_index, .id = 0, .fields_len = fields_len, }, @@ -2017,7 +2017,7 @@ pub const Pool = struct { .undef, .simple_value, .variable, - .extern_func, + .@"extern", .func, .int, .err, @@ -2032,7 +2032,7 @@ pub const Pool = struct { .aggregate, .un, .memoized_call, - => unreachable, + => unreachable, // values, not types }, } } @@ -2123,9 +2123,9 @@ pub const Pool = struct { }); } }, - .owner_decl => |owner_decl| pool.items.appendAssumeCapacity(.{ + .index => |index| pool.items.appendAssumeCapacity(.{ .tag = tag, - .data = @intFromEnum(owner_decl), + .data = @intFromEnum(index), }), }, .aggregate => |aggregate_info| { @@ -2133,7 +2133,7 @@ pub const Pool = struct { .tag = tag, .data = switch (aggregate_info.name) { .anon => |anon| try pool.addExtra(allocator, AggregateAnon, .{ - .owner_decl = anon.owner_decl, + .index = anon.index, .id = anon.id, .fields_len = aggregate_info.fields.len, }, aggregate_info.fields.len * @typeInfo(Field).Struct.fields.len), @@ -2221,7 +2221,7 @@ pub const Pool = struct { Pool.Tag => @compileError("pass tag to final"), CType, CType.Index => @compileError("hash ctype.hash(pool) instead"), String, String.Index => @compileError("hash string.slice(pool) instead"), - u32, DeclIndex, Aligned.Flags => hasher.impl.update(std.mem.asBytes(&data)), + u32, InternPool.Index, Aligned.Flags => hasher.impl.update(std.mem.asBytes(&data)), []const u8 => hasher.impl.update(data), else => @compileError("unhandled type: " ++ @typeName(@TypeOf(data))), } @@ -2426,7 +2426,7 @@ pub const Pool = struct { }; const AggregateAnon = struct { - owner_decl: DeclIndex, + index: InternPool.Index, id: u32, fields_len: u32, }; @@ -2467,7 +2467,7 @@ pub const Pool = struct { const value = @field(extra, field.name); array.appendAssumeCapacity(switch (field.type) { u32 => value, - CType.Index, String.Index, DeclIndex => @intFromEnum(value), + CType.Index, String.Index, InternPool.Index => @intFromEnum(value), Aligned.Flags => @bitCast(value), else => @compileError("bad field type: " ++ field.name ++ ": " ++ @typeName(field.type)), @@ -2530,7 +2530,7 @@ pub const Pool = struct { inline for (fields, pool.extra.items[extra_index..][0..fields.len]) |field, value| @field(extra, field.name) = switch (field.type) { u32 => value, - CType.Index, String.Index, DeclIndex => @enumFromInt(value), + CType.Index, String.Index, InternPool.Index => @enumFromInt(value), Aligned.Flags => @bitCast(value), else => @compileError("bad field type: " ++ field.name ++ ": " ++ @typeName(field.type)), }; @@ -2546,8 +2546,8 @@ pub const Pool = struct { }; pub const AlignAs = packed struct { - @"align": Alignment, - abi: Alignment, + @"align": InternPool.Alignment, + abi: InternPool.Alignment, pub fn fromAlignment(alignas: AlignAs) AlignAs { assert(alignas.abi != .none); @@ -2556,14 +2556,14 @@ pub const AlignAs = packed struct { .abi = alignas.abi, }; } - pub fn fromAbiAlignment(abi: Alignment) AlignAs { + pub fn fromAbiAlignment(abi: InternPool.Alignment) AlignAs { assert(abi != .none); return .{ .@"align" = abi, .abi = abi }; } pub fn fromByteUnits(@"align": u64, abi: u64) AlignAs { return fromAlignment(.{ - .@"align" = Alignment.fromByteUnits(@"align"), - .abi = Alignment.fromNonzeroByteUnits(abi), + .@"align" = InternPool.Alignment.fromByteUnits(@"align"), + .abi = InternPool.Alignment.fromNonzeroByteUnits(abi), }); } @@ -2578,11 +2578,10 @@ pub const AlignAs = packed struct { } }; -const Alignment = @import("../../InternPool.zig").Alignment; const assert = std.debug.assert; const CType = @This(); +const InternPool = @import("../../InternPool.zig"); const Module = @import("../../Package/Module.zig"); const std = @import("std"); const Type = @import("../../Type.zig"); const Zcu = @import("../../Zcu.zig"); -const DeclIndex = @import("../../InternPool.zig").DeclIndex; diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 8b13b1f2055a..89a24152fcb5 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -776,7 +776,7 @@ pub const Object = struct { debug_enums: std.ArrayListUnmanaged(Builder.Metadata), debug_globals: std.ArrayListUnmanaged(Builder.Metadata), - debug_file_map: std.AutoHashMapUnmanaged(*const Zcu.File, Builder.Metadata), + debug_file_map: std.AutoHashMapUnmanaged(Zcu.File.Index, Builder.Metadata), debug_type_map: std.AutoHashMapUnmanaged(Type, Builder.Metadata), debug_unresolved_namespace_scopes: std.AutoArrayHashMapUnmanaged(InternPool.NamespaceIndex, Builder.Metadata), @@ -790,11 +790,13 @@ pub const Object = struct { /// version of the name and incorrectly get function not found in the llvm module. /// * it works for functions not all globals. /// Therefore, this table keeps track of the mapping. - decl_map: std.AutoHashMapUnmanaged(InternPool.DeclIndex, Builder.Global.Index), + nav_map: std.AutoHashMapUnmanaged(InternPool.Nav.Index, Builder.Global.Index), /// Same deal as `decl_map` but for anonymous declarations, which are always global constants. - anon_decl_map: std.AutoHashMapUnmanaged(InternPool.Index, Builder.Global.Index), - /// Serves the same purpose as `decl_map` but only used for the `is_named_enum_value` instruction. - named_enum_map: std.AutoHashMapUnmanaged(InternPool.DeclIndex, Builder.Function.Index), + uav_map: std.AutoHashMapUnmanaged(InternPool.Index, Builder.Global.Index), + /// Maps enum types to their corresponding LLVM functions for implementing the `tag_name` instruction. + enum_tag_name_map: std.AutoHashMapUnmanaged(InternPool.Index, Builder.Global.Index), + /// Serves the same purpose as `enum_tag_name_map` but for the `is_named_enum_value` instruction. + named_enum_map: std.AutoHashMapUnmanaged(InternPool.Index, Builder.Function.Index), /// Maps Zig types to LLVM types. The table memory is backed by the GPA of /// the compiler. /// TODO when InternPool garbage collection is implemented, this map needs @@ -963,8 +965,9 @@ pub const Object = struct { .debug_type_map = .{}, .debug_unresolved_namespace_scopes = .{}, .target = target, - .decl_map = .{}, - .anon_decl_map = .{}, + .nav_map = .{}, + .uav_map = .{}, + .enum_tag_name_map = .{}, .named_enum_map = .{}, .type_map = .{}, .error_name_table = .none, @@ -981,8 +984,9 @@ pub const Object = struct { self.debug_file_map.deinit(gpa); self.debug_type_map.deinit(gpa); self.debug_unresolved_namespace_scopes.deinit(gpa); - self.decl_map.deinit(gpa); - self.anon_decl_map.deinit(gpa); + self.nav_map.deinit(gpa); + self.uav_map.deinit(gpa); + self.enum_tag_name_map.deinit(gpa); self.named_enum_map.deinit(gpa); self.type_map.deinit(gpa); self.builder.deinit(); @@ -1108,7 +1112,7 @@ pub const Object = struct { const fwd_ref = self.debug_unresolved_namespace_scopes.values()[i]; const namespace = zcu.namespacePtr(namespace_index); - const debug_type = try self.lowerDebugType(namespace.getType(zcu)); + const debug_type = try self.lowerDebugType(Type.fromInterned(namespace.owner_type)); self.builder.debugForwardReferenceSetType(fwd_ref, debug_type); } @@ -1328,24 +1332,22 @@ pub const Object = struct { assert(std.meta.eql(pt, o.pt)); const zcu = pt.zcu; const comp = zcu.comp; + const ip = &zcu.intern_pool; const func = zcu.funcInfo(func_index); - const decl_index = func.owner_decl; - const decl = zcu.declPtr(decl_index); - const namespace = zcu.namespacePtr(decl.src_namespace); - const file_scope = namespace.fileScope(zcu); - const owner_mod = file_scope.mod; - const fn_info = zcu.typeToFunc(decl.typeOf(zcu)).?; + const nav = ip.getNav(func.owner_nav); + const file_scope = zcu.navFileScopeIndex(func.owner_nav); + const owner_mod = zcu.fileByIndex(file_scope).mod; + const fn_ty = Type.fromInterned(func.ty); + const fn_info = zcu.typeToFunc(fn_ty).?; const target = owner_mod.resolved_target.result; - const ip = &zcu.intern_pool; - var dg: DeclGen = .{ + var ng: NavGen = .{ .object = o, - .decl_index = decl_index, - .decl = decl, + .nav_index = func.owner_nav, .err_msg = null, }; - const function_index = try o.resolveLlvmFunction(decl_index); + const function_index = try o.resolveLlvmFunction(func.owner_nav); var attributes = try function_index.ptrConst(&o.builder).attributes.toWip(&o.builder); defer attributes.deinit(&o.builder); @@ -1409,7 +1411,7 @@ pub const Object = struct { } }, &o.builder); } - if (decl.@"linksection".toSlice(ip)) |section| + if (nav.status.resolved.@"linksection".toSlice(ip)) |section| function_index.setSection(try o.builder.string(section), &o.builder); var deinit_wip = true; @@ -1422,7 +1424,7 @@ pub const Object = struct { var llvm_arg_i: u32 = 0; - // This gets the LLVM values from the function and stores them in `dg.args`. + // This gets the LLVM values from the function and stores them in `ng.args`. const sret = firstParamSRet(fn_info, pt, target); const ret_ptr: Builder.Value = if (sret) param: { const param = wip.arg(llvm_arg_i); @@ -1622,13 +1624,13 @@ pub const Object = struct { const file, const subprogram = if (!wip.strip) debug_info: { const file = try o.getDebugFile(file_scope); - const line_number = decl.navSrcLine(zcu) + 1; - const is_internal_linkage = decl.val.getExternFunc(zcu) == null; - const debug_decl_type = try o.lowerDebugType(decl.typeOf(zcu)); + const line_number = zcu.navSrcLine(func.owner_nav) + 1; + const is_internal_linkage = ip.indexToKey(nav.status.resolved.val) != .@"extern"; + const debug_decl_type = try o.lowerDebugType(fn_ty); const subprogram = try o.builder.debugSubprogram( file, - try o.builder.metadataString(decl.name.toSlice(ip)), + try o.builder.metadataString(nav.name.toSlice(ip)), try o.builder.metadataStringFromStrtabString(function_index.name(&o.builder)), line_number, line_number + func.lbrace_line, @@ -1654,7 +1656,7 @@ pub const Object = struct { .gpa = gpa, .air = air, .liveness = liveness, - .dg = &dg, + .ng = &ng, .wip = wip, .is_naked = fn_info.cc == .Naked, .ret_ptr = ret_ptr, @@ -1665,7 +1667,7 @@ pub const Object = struct { .sync_scope = if (owner_mod.single_threaded) .singlethread else .system, .file = file, .scope = subprogram, - .base_line = dg.decl.navSrcLine(zcu), + .base_line = zcu.navSrcLine(func.owner_nav), .prev_dbg_line = 0, .prev_dbg_column = 0, .err_ret_trace = err_ret_trace, @@ -1675,9 +1677,8 @@ pub const Object = struct { fg.genBody(air.getMainBody()) catch |err| switch (err) { error.CodegenFail => { - decl.analysis = .codegen_failure; - try zcu.failed_analysis.put(zcu.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index }), dg.err_msg.?); - dg.err_msg = null; + try zcu.failed_codegen.put(zcu.gpa, func.owner_nav, ng.err_msg.?); + ng.err_msg = null; return; }, else => |e| return e, @@ -1686,20 +1687,17 @@ pub const Object = struct { try fg.wip.finish(); } - pub fn updateDecl(self: *Object, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { + pub fn updateNav(self: *Object, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !void { assert(std.meta.eql(pt, self.pt)); - const decl = pt.zcu.declPtr(decl_index); - var dg: DeclGen = .{ + var ng: NavGen = .{ .object = self, - .decl = decl, - .decl_index = decl_index, + .nav_index = nav_index, .err_msg = null, }; - dg.genDecl() catch |err| switch (err) { + ng.genDecl() catch |err| switch (err) { error.CodegenFail => { - decl.analysis = .codegen_failure; - try pt.zcu.failed_analysis.put(pt.zcu.gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index }), dg.err_msg.?); - dg.err_msg = null; + try pt.zcu.failed_codegen.put(pt.zcu.gpa, nav_index, ng.err_msg.?); + ng.err_msg = null; return; }, else => |e| return e, @@ -1714,19 +1712,18 @@ pub const Object = struct { ) link.File.UpdateExportsError!void { assert(std.meta.eql(pt, self.pt)); const zcu = pt.zcu; - const decl_index = switch (exported) { - .decl_index => |i| i, - .value => |val| return updateExportedValue(self, zcu, val, export_indices), + const nav_index = switch (exported) { + .nav => |nav| nav, + .uav => |uav| return updateExportedValue(self, zcu, uav, export_indices), }; const ip = &zcu.intern_pool; - const global_index = self.decl_map.get(decl_index).?; - const decl = zcu.declPtr(decl_index); + const global_index = self.nav_map.get(nav_index).?; const comp = zcu.comp; if (export_indices.len != 0) { return updateExportedGlobal(self, zcu, global_index, export_indices); } else { - const fqn = try self.builder.strtabString(decl.fqn.toSlice(ip)); + const fqn = try self.builder.strtabString(ip.getNav(nav_index).fqn.toSlice(ip)); try global_index.rename(fqn, &self.builder); global_index.setLinkage(.internal, &self.builder); if (comp.config.dll_export_fns) @@ -1745,7 +1742,7 @@ pub const Object = struct { const ip = &mod.intern_pool; const main_exp_name = try o.builder.strtabString(mod.all_exports.items[export_indices[0]].opts.name.toSlice(ip)); const global_index = i: { - const gop = try o.anon_decl_map.getOrPut(gpa, exported_value); + const gop = try o.uav_map.getOrPut(gpa, exported_value); if (gop.found_existing) { const global_index = gop.value_ptr.*; try global_index.rename(main_exp_name, &o.builder); @@ -1868,11 +1865,12 @@ pub const Object = struct { global.delete(&self.builder); } - fn getDebugFile(o: *Object, file: *const Zcu.File) Allocator.Error!Builder.Metadata { + fn getDebugFile(o: *Object, file_index: Zcu.File.Index) Allocator.Error!Builder.Metadata { const gpa = o.gpa; - const gop = try o.debug_file_map.getOrPut(gpa, file); - errdefer assert(o.debug_file_map.remove(file)); + const gop = try o.debug_file_map.getOrPut(gpa, file_index); + errdefer assert(o.debug_file_map.remove(file_index)); if (gop.found_existing) return gop.value_ptr.*; + const file = o.pt.zcu.fileByIndex(file_index); gop.value_ptr.* = try o.builder.debugFile( try o.builder.metadataString(std.fs.path.basename(file.sub_file_path)), dir_path: { @@ -1930,17 +1928,13 @@ pub const Object = struct { return debug_int_type; }, .Enum => { - const owner_decl_index = ty.getOwnerDecl(zcu); - const owner_decl = zcu.declPtr(owner_decl_index); - if (!ty.hasRuntimeBitsIgnoreComptime(pt)) { - const debug_enum_type = try o.makeEmptyNamespaceDebugType(owner_decl_index); + const debug_enum_type = try o.makeEmptyNamespaceDebugType(ty); try o.debug_type_map.put(gpa, ty, debug_enum_type); return debug_enum_type; } const enum_type = ip.loadEnumType(ty.toIntern()); - const enumerators = try gpa.alloc(Builder.Metadata, enum_type.names.len); defer gpa.free(enumerators); @@ -1963,9 +1957,11 @@ pub const Object = struct { ); } - const file_scope = zcu.namespacePtr(owner_decl.src_namespace).fileScope(zcu); - const file = try o.getDebugFile(file_scope); - const scope = try o.namespaceToDebugScope(owner_decl.src_namespace); + const file = try o.getDebugFile(ty.typeDeclInstAllowGeneratedTag(zcu).?.resolveFull(ip).file); + const scope = if (ty.getParentNamespace(zcu).?.unwrap()) |parent_namespace| + try o.namespaceToDebugScope(parent_namespace) + else + file; const name = try o.allocTypeName(ty); defer gpa.free(name); @@ -1974,7 +1970,7 @@ pub const Object = struct { try o.builder.metadataString(name), file, scope, - owner_decl.typeSrcLine(zcu) + 1, // Line + ty.typeDeclSrcLine(zcu).? + 1, // Line try o.lowerDebugType(int_ty), ty.abiSize(pt) * 8, (ty.abiAlignment(pt).toByteUnits() orelse 0) * 8, @@ -2138,14 +2134,18 @@ pub const Object = struct { const name = try o.allocTypeName(ty); defer gpa.free(name); - const owner_decl_index = ty.getOwnerDecl(zcu); - const owner_decl = zcu.declPtr(owner_decl_index); - const file_scope = zcu.namespacePtr(owner_decl.src_namespace).fileScope(zcu); + + const file = try o.getDebugFile(ty.typeDeclInstAllowGeneratedTag(zcu).?.resolveFull(ip).file); + const scope = if (ty.getParentNamespace(zcu).?.unwrap()) |parent_namespace| + try o.namespaceToDebugScope(parent_namespace) + else + file; + const debug_opaque_type = try o.builder.debugStructType( try o.builder.metadataString(name), - try o.getDebugFile(file_scope), - try o.namespaceToDebugScope(owner_decl.src_namespace), - owner_decl.typeSrcLine(zcu) + 1, // Line + file, + scope, + ty.typeDeclSrcLine(zcu).? + 1, // Line .none, // Underlying type 0, // Size 0, // Align @@ -2460,8 +2460,7 @@ pub const Object = struct { // into. Therefore we can satisfy this by making an empty namespace, // rather than changing the frontend to unnecessarily resolve the // struct field types. - const owner_decl_index = ty.getOwnerDecl(zcu); - const debug_struct_type = try o.makeEmptyNamespaceDebugType(owner_decl_index); + const debug_struct_type = try o.makeEmptyNamespaceDebugType(ty); try o.debug_type_map.put(gpa, ty, debug_struct_type); return debug_struct_type; } @@ -2470,8 +2469,7 @@ pub const Object = struct { } if (!ty.hasRuntimeBitsIgnoreComptime(pt)) { - const owner_decl_index = ty.getOwnerDecl(zcu); - const debug_struct_type = try o.makeEmptyNamespaceDebugType(owner_decl_index); + const debug_struct_type = try o.makeEmptyNamespaceDebugType(ty); try o.debug_type_map.put(gpa, ty, debug_struct_type); return debug_struct_type; } @@ -2536,8 +2534,6 @@ pub const Object = struct { return debug_struct_type; }, .Union => { - const owner_decl_index = ty.getOwnerDecl(zcu); - const name = try o.allocTypeName(ty); defer gpa.free(name); @@ -2546,7 +2542,7 @@ pub const Object = struct { !ty.hasRuntimeBitsIgnoreComptime(pt) or !union_type.haveLayout(ip)) { - const debug_union_type = try o.makeEmptyNamespaceDebugType(owner_decl_index); + const debug_union_type = try o.makeEmptyNamespaceDebugType(ty); try o.debug_type_map.put(gpa, ty, debug_union_type); return debug_union_type; } @@ -2762,8 +2758,7 @@ pub const Object = struct { fn namespaceToDebugScope(o: *Object, namespace_index: InternPool.NamespaceIndex) !Builder.Metadata { const zcu = o.pt.zcu; const namespace = zcu.namespacePtr(namespace_index); - const file_scope = namespace.fileScope(zcu); - if (namespace.parent == .none) return try o.getDebugFile(file_scope); + if (namespace.parent == .none) return try o.getDebugFile(namespace.file_scope); const gop = try o.debug_unresolved_namespace_scopes.getOrPut(o.gpa, namespace_index); @@ -2772,15 +2767,19 @@ pub const Object = struct { return gop.value_ptr.*; } - fn makeEmptyNamespaceDebugType(o: *Object, decl_index: InternPool.DeclIndex) !Builder.Metadata { + fn makeEmptyNamespaceDebugType(o: *Object, ty: Type) !Builder.Metadata { const zcu = o.pt.zcu; - const decl = zcu.declPtr(decl_index); - const file_scope = zcu.namespacePtr(decl.src_namespace).fileScope(zcu); + const ip = &zcu.intern_pool; + const file = try o.getDebugFile(ty.typeDeclInstAllowGeneratedTag(zcu).?.resolveFull(ip).file); + const scope = if (ty.getParentNamespace(zcu).?.unwrap()) |parent_namespace| + try o.namespaceToDebugScope(parent_namespace) + else + file; return o.builder.debugStructType( - try o.builder.metadataString(decl.name.toSlice(&zcu.intern_pool)), // TODO use fully qualified name - try o.getDebugFile(file_scope), - try o.namespaceToDebugScope(decl.src_namespace), - decl.typeSrcLine(zcu) + 1, + try o.builder.metadataString(ty.containerTypeName(ip).toSlice(ip)), // TODO use fully qualified name + file, + scope, + ty.typeDeclSrcLine(zcu).? + 1, .none, 0, 0, @@ -2791,25 +2790,24 @@ pub const Object = struct { fn getStackTraceType(o: *Object) Allocator.Error!Type { const pt = o.pt; const zcu = pt.zcu; + const ip = &zcu.intern_pool; const std_mod = zcu.std_mod; const std_file_imported = pt.importPkg(std_mod) catch unreachable; - const builtin_str = try zcu.intern_pool.getOrPutString(zcu.gpa, pt.tid, "builtin", .no_embedded_nulls); - const std_file_root_decl = zcu.fileRootDecl(std_file_imported.file_index); - const std_namespace = zcu.namespacePtr(zcu.declPtr(std_file_root_decl.unwrap().?).src_namespace); - const builtin_decl = std_namespace.decls.getKeyAdapted(builtin_str, Zcu.DeclAdapter{ .zcu = zcu }).?; + const builtin_str = try ip.getOrPutString(zcu.gpa, pt.tid, "builtin", .no_embedded_nulls); + const std_file_root_type = Type.fromInterned(zcu.fileRootType(std_file_imported.file_index)); + const std_namespace = ip.namespacePtr(std_file_root_type.getNamespaceIndex(zcu).unwrap().?); + const builtin_nav = std_namespace.pub_decls.getKeyAdapted(builtin_str, Zcu.Namespace.NameAdapter{ .zcu = zcu }).?; - const stack_trace_str = try zcu.intern_pool.getOrPutString(zcu.gpa, pt.tid, "StackTrace", .no_embedded_nulls); + const stack_trace_str = try ip.getOrPutString(zcu.gpa, pt.tid, "StackTrace", .no_embedded_nulls); // buffer is only used for int_type, `builtin` is a struct. - const builtin_ty = zcu.declPtr(builtin_decl).val.toType(); + const builtin_ty = zcu.navValue(builtin_nav).toType(); const builtin_namespace = zcu.namespacePtrUnwrap(builtin_ty.getNamespaceIndex(zcu)).?; - const stack_trace_decl_index = builtin_namespace.decls.getKeyAdapted(stack_trace_str, Zcu.DeclAdapter{ .zcu = zcu }).?; - const stack_trace_decl = zcu.declPtr(stack_trace_decl_index); + const stack_trace_nav = builtin_namespace.pub_decls.getKeyAdapted(stack_trace_str, Zcu.Namespace.NameAdapter{ .zcu = zcu }).?; // Sema should have ensured that StackTrace was analyzed. - assert(stack_trace_decl.has_tv); - return stack_trace_decl.val.toType(); + return zcu.navValue(stack_trace_nav).toType(); } fn allocTypeName(o: *Object, ty: Type) Allocator.Error![:0]const u8 { @@ -2824,29 +2822,33 @@ pub const Object = struct { /// completed, so if any attributes rely on that, they must be done in updateFunc, not here. fn resolveLlvmFunction( o: *Object, - decl_index: InternPool.DeclIndex, + nav_index: InternPool.Nav.Index, ) Allocator.Error!Builder.Function.Index { const pt = o.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; const gpa = o.gpa; - const decl = zcu.declPtr(decl_index); - const namespace = zcu.namespacePtr(decl.src_namespace); - const owner_mod = namespace.fileScope(zcu).mod; - const zig_fn_type = decl.typeOf(zcu); - const gop = try o.decl_map.getOrPut(gpa, decl_index); + const nav = ip.getNav(nav_index); + const owner_mod = zcu.navFileScope(nav_index).mod; + const resolved = nav.status.resolved; + const val = Value.fromInterned(resolved.val); + const ty = val.typeOf(zcu); + const gop = try o.nav_map.getOrPut(gpa, nav_index); if (gop.found_existing) return gop.value_ptr.ptr(&o.builder).kind.function; - assert(decl.has_tv); - const fn_info = zcu.typeToFunc(zig_fn_type).?; + const fn_info = zcu.typeToFunc(ty).?; const target = owner_mod.resolved_target.result; const sret = firstParamSRet(fn_info, pt, target); - const is_extern = decl.isExtern(zcu); + const is_extern, const lib_name = switch (ip.indexToKey(val.toIntern())) { + .variable => |variable| .{ false, variable.lib_name }, + .@"extern" => |@"extern"| .{ true, @"extern".lib_name }, + else => .{ false, .none }, + }; const function_index = try o.builder.addFunction( - try o.lowerType(zig_fn_type), - try o.builder.strtabString((if (is_extern) decl.name else decl.fqn).toSlice(ip)), - toLlvmAddressSpace(decl.@"addrspace", target), + try o.lowerType(ty), + try o.builder.strtabString((if (is_extern) nav.name else nav.fqn).toSlice(ip)), + toLlvmAddressSpace(resolved.@"addrspace", target), ); gop.value_ptr.* = function_index.ptrConst(&o.builder).global; @@ -2860,12 +2862,12 @@ pub const Object = struct { if (target.isWasm()) { try attributes.addFnAttr(.{ .string = .{ .kind = try o.builder.string("wasm-import-name"), - .value = try o.builder.string(decl.name.toSlice(ip)), + .value = try o.builder.string(nav.name.toSlice(ip)), } }, &o.builder); - if (decl.getOwnedExternFunc(zcu).?.lib_name.toSlice(ip)) |lib_name| { - if (!std.mem.eql(u8, lib_name, "c")) try attributes.addFnAttr(.{ .string = .{ + if (lib_name.toSlice(ip)) |lib_name_slice| { + if (!std.mem.eql(u8, lib_name_slice, "c")) try attributes.addFnAttr(.{ .string = .{ .kind = try o.builder.string("wasm-import-module"), - .value = try o.builder.string(lib_name), + .value = try o.builder.string(lib_name_slice), } }, &o.builder); } } @@ -2901,8 +2903,8 @@ pub const Object = struct { else => function_index.setCallConv(toLlvmCallConv(fn_info.cc, target), &o.builder), } - if (decl.alignment != .none) - function_index.setAlignment(decl.alignment.toLlvm(), &o.builder); + if (resolved.alignment != .none) + function_index.setAlignment(resolved.alignment.toLlvm(), &o.builder); // Function attributes that are independent of analysis results of the function body. try o.addCommonFnAttributes(&attributes, owner_mod); @@ -3006,15 +3008,15 @@ pub const Object = struct { } } - fn resolveGlobalAnonDecl( + fn resolveGlobalUav( o: *Object, - decl_val: InternPool.Index, + uav: InternPool.Index, llvm_addr_space: Builder.AddrSpace, alignment: InternPool.Alignment, ) Error!Builder.Variable.Index { assert(alignment != .none); // TODO: Add address space to the anon_decl_map - const gop = try o.anon_decl_map.getOrPut(o.gpa, decl_val); + const gop = try o.uav_map.getOrPut(o.gpa, uav); if (gop.found_existing) { // Keep the greater of the two alignments. const variable_index = gop.value_ptr.ptr(&o.builder).kind.variable; @@ -3023,19 +3025,19 @@ pub const Object = struct { variable_index.setAlignment(max_alignment.toLlvm(), &o.builder); return variable_index; } - errdefer assert(o.anon_decl_map.remove(decl_val)); + errdefer assert(o.uav_map.remove(uav)); const mod = o.pt.zcu; - const decl_ty = mod.intern_pool.typeOf(decl_val); + const decl_ty = mod.intern_pool.typeOf(uav); const variable_index = try o.builder.addVariable( - try o.builder.strtabStringFmt("__anon_{d}", .{@intFromEnum(decl_val)}), + try o.builder.strtabStringFmt("__anon_{d}", .{@intFromEnum(uav)}), try o.lowerType(Type.fromInterned(decl_ty)), llvm_addr_space, ); gop.value_ptr.* = variable_index.ptrConst(&o.builder).global; - try variable_index.setInitializer(try o.lowerValue(decl_val), &o.builder); + try variable_index.setInitializer(try o.lowerValue(uav), &o.builder); variable_index.setLinkage(.internal, &o.builder); variable_index.setMutability(.constant, &o.builder); variable_index.setUnnamedAddr(.unnamed_addr, &o.builder); @@ -3043,24 +3045,29 @@ pub const Object = struct { return variable_index; } - fn resolveGlobalDecl( + fn resolveGlobalNav( o: *Object, - decl_index: InternPool.DeclIndex, + nav_index: InternPool.Nav.Index, ) Allocator.Error!Builder.Variable.Index { - const gop = try o.decl_map.getOrPut(o.gpa, decl_index); + const gop = try o.nav_map.getOrPut(o.gpa, nav_index); if (gop.found_existing) return gop.value_ptr.ptr(&o.builder).kind.variable; - errdefer assert(o.decl_map.remove(decl_index)); + errdefer assert(o.nav_map.remove(nav_index)); const pt = o.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; - const decl = zcu.declPtr(decl_index); - const is_extern = decl.isExtern(zcu); + const nav = ip.getNav(nav_index); + const resolved = nav.status.resolved; + const is_extern, const is_threadlocal, const is_weak_linkage = switch (ip.indexToKey(resolved.val)) { + .variable => |variable| .{ false, variable.is_threadlocal, variable.is_weak_linkage }, + .@"extern" => |@"extern"| .{ true, @"extern".is_threadlocal, @"extern".is_weak_linkage }, + else => .{ false, false, false }, + }; const variable_index = try o.builder.addVariable( - try o.builder.strtabString((if (is_extern) decl.name else decl.fqn).toSlice(ip)), - try o.lowerType(decl.typeOf(zcu)), - toLlvmGlobalAddressSpace(decl.@"addrspace", zcu.getTarget()), + try o.builder.strtabString((if (is_extern) nav.name else nav.fqn).toSlice(ip)), + try o.lowerType(Type.fromInterned(nav.typeOf(ip))), + toLlvmGlobalAddressSpace(resolved.@"addrspace", zcu.getTarget()), ); gop.value_ptr.* = variable_index.ptrConst(&o.builder).global; @@ -3068,15 +3075,9 @@ pub const Object = struct { if (is_extern) { variable_index.setLinkage(.external, &o.builder); variable_index.setUnnamedAddr(.default, &o.builder); - if (decl.val.getVariable(zcu)) |decl_var| { - const decl_namespace = zcu.namespacePtr(decl.src_namespace); - const single_threaded = decl_namespace.fileScope(zcu).mod.single_threaded; - variable_index.setThreadLocal( - if (decl_var.is_threadlocal and !single_threaded) .generaldynamic else .default, - &o.builder, - ); - if (decl_var.is_weak_linkage) variable_index.setLinkage(.extern_weak, &o.builder); - } + if (is_threadlocal and !zcu.navFileScope(nav_index).mod.single_threaded) + variable_index.setThreadLocal(.generaldynamic, &o.builder); + if (is_weak_linkage) variable_index.setLinkage(.extern_weak, &o.builder); } else { variable_index.setLinkage(.internal, &o.builder); variable_index.setUnnamedAddr(.unnamed_addr, &o.builder); @@ -3286,8 +3287,6 @@ pub const Object = struct { return int_ty; } - const decl = mod.declPtr(struct_type.decl.unwrap().?); - var llvm_field_types = std.ArrayListUnmanaged(Builder.Type){}; defer llvm_field_types.deinit(o.gpa); // Although we can estimate how much capacity to add, these cannot be @@ -3351,7 +3350,7 @@ pub const Object = struct { ); } - const ty = try o.builder.opaqueType(try o.builder.string(decl.fqn.toSlice(ip))); + const ty = try o.builder.opaqueType(try o.builder.string(t.containerTypeName(ip).toSlice(ip))); try o.type_map.put(o.gpa, t.toIntern(), ty); o.builder.namedTypeSetBody( @@ -3440,8 +3439,6 @@ pub const Object = struct { return enum_tag_ty; } - const decl = mod.declPtr(union_obj.decl); - const aligned_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[layout.most_aligned_field]); const aligned_field_llvm_ty = try o.lowerType(aligned_field_ty); @@ -3460,7 +3457,7 @@ pub const Object = struct { }; if (layout.tag_size == 0) { - const ty = try o.builder.opaqueType(try o.builder.string(decl.fqn.toSlice(ip))); + const ty = try o.builder.opaqueType(try o.builder.string(t.containerTypeName(ip).toSlice(ip))); try o.type_map.put(o.gpa, t.toIntern(), ty); o.builder.namedTypeSetBody( @@ -3488,7 +3485,7 @@ pub const Object = struct { llvm_fields_len += 1; } - const ty = try o.builder.opaqueType(try o.builder.string(decl.fqn.toSlice(ip))); + const ty = try o.builder.opaqueType(try o.builder.string(t.containerTypeName(ip).toSlice(ip))); try o.type_map.put(o.gpa, t.toIntern(), ty); o.builder.namedTypeSetBody( @@ -3500,8 +3497,7 @@ pub const Object = struct { .opaque_type => { const gop = try o.type_map.getOrPut(o.gpa, t.toIntern()); if (!gop.found_existing) { - const decl = mod.declPtr(ip.loadOpaqueType(t.toIntern()).decl); - gop.value_ptr.* = try o.builder.opaqueType(try o.builder.string(decl.fqn.toSlice(ip))); + gop.value_ptr.* = try o.builder.opaqueType(try o.builder.string(t.containerTypeName(ip).toSlice(ip))); } return gop.value_ptr.*; }, @@ -3512,7 +3508,7 @@ pub const Object = struct { .undef, .simple_value, .variable, - .extern_func, + .@"extern", .func, .int, .err, @@ -3632,15 +3628,13 @@ pub const Object = struct { const ty = Type.fromInterned(val_key.typeOf()); switch (val_key) { - .extern_func => |extern_func| { - const fn_decl_index = extern_func.decl; - const function_index = try o.resolveLlvmFunction(fn_decl_index); + .@"extern" => |@"extern"| { + const function_index = try o.resolveLlvmFunction(@"extern".owner_nav); const ptr = function_index.ptrConst(&o.builder).global.toConst(); return o.builder.convConst(ptr, llvm_int_ty); }, .func => |func| { - const fn_decl_index = func.owner_decl; - const function_index = try o.resolveLlvmFunction(fn_decl_index); + const function_index = try o.resolveLlvmFunction(func.owner_nav); const ptr = function_index.ptrConst(&o.builder).global.toConst(); return o.builder.convConst(ptr, llvm_int_ty); }, @@ -3783,14 +3777,12 @@ pub const Object = struct { .enum_literal, .empty_enum_value, => unreachable, // non-runtime values - .extern_func => |extern_func| { - const fn_decl_index = extern_func.decl; - const function_index = try o.resolveLlvmFunction(fn_decl_index); + .@"extern" => |@"extern"| { + const function_index = try o.resolveLlvmFunction(@"extern".owner_nav); return function_index.ptrConst(&o.builder).global.toConst(); }, .func => |func| { - const fn_decl_index = func.owner_decl; - const function_index = try o.resolveLlvmFunction(fn_decl_index); + const function_index = try o.resolveLlvmFunction(func.owner_nav); return function_index.ptrConst(&o.builder).global.toConst(); }, .int => { @@ -4284,14 +4276,14 @@ pub const Object = struct { const ptr = zcu.intern_pool.indexToKey(ptr_val).ptr; const offset: u64 = prev_offset + ptr.byte_offset; return switch (ptr.base_addr) { - .decl => |decl| { - const base_ptr = try o.lowerDeclRefValue(decl); + .nav => |nav| { + const base_ptr = try o.lowerNavRefValue(nav); return o.builder.gepConst(.inbounds, .i8, base_ptr, null, &.{ try o.builder.intConst(.i64, offset), }); }, - .anon_decl => |ad| { - const base_ptr = try o.lowerAnonDeclRef(ad); + .uav => |uav| { + const base_ptr = try o.lowerUavRef(uav); return o.builder.gepConst(.inbounds, .i8, base_ptr, null, &.{ try o.builder.intConst(.i64, offset), }); @@ -4332,39 +4324,37 @@ pub const Object = struct { }; } - /// This logic is very similar to `lowerDeclRefValue` but for anonymous declarations. + /// This logic is very similar to `lowerNavRefValue` but for anonymous declarations. /// Maybe the logic could be unified. - fn lowerAnonDeclRef( + fn lowerUavRef( o: *Object, - anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl, + uav: InternPool.Key.Ptr.BaseAddr.Uav, ) Error!Builder.Constant { const pt = o.pt; const mod = pt.zcu; const ip = &mod.intern_pool; - const decl_val = anon_decl.val; - const decl_ty = Type.fromInterned(ip.typeOf(decl_val)); + const uav_val = uav.val; + const uav_ty = Type.fromInterned(ip.typeOf(uav_val)); const target = mod.getTarget(); - if (Value.fromInterned(decl_val).getFunction(mod)) |func| { - _ = func; - @panic("TODO"); - } else if (Value.fromInterned(decl_val).getExternFunc(mod)) |func| { - _ = func; - @panic("TODO"); + switch (ip.indexToKey(uav_val)) { + .func => @panic("TODO"), + .@"extern" => @panic("TODO"), + else => {}, } - const ptr_ty = Type.fromInterned(anon_decl.orig_ty); + const ptr_ty = Type.fromInterned(uav.orig_ty); - const is_fn_body = decl_ty.zigTypeTag(mod) == .Fn; - if ((!is_fn_body and !decl_ty.hasRuntimeBits(pt)) or - (is_fn_body and mod.typeToFunc(decl_ty).?.is_generic)) return o.lowerPtrToVoid(ptr_ty); + const is_fn_body = uav_ty.zigTypeTag(mod) == .Fn; + if ((!is_fn_body and !uav_ty.hasRuntimeBits(pt)) or + (is_fn_body and mod.typeToFunc(uav_ty).?.is_generic)) return o.lowerPtrToVoid(ptr_ty); if (is_fn_body) @panic("TODO"); const llvm_addr_space = toLlvmAddressSpace(ptr_ty.ptrAddressSpace(mod), target); const alignment = ptr_ty.ptrAlignment(pt); - const llvm_global = (try o.resolveGlobalAnonDecl(decl_val, llvm_addr_space, alignment)).ptrConst(&o.builder).global; + const llvm_global = (try o.resolveGlobalUav(uav.val, llvm_addr_space, alignment)).ptrConst(&o.builder).global; const llvm_val = try o.builder.convConst( llvm_global.toConst(), @@ -4374,44 +4364,41 @@ pub const Object = struct { return o.builder.convConst(llvm_val, try o.lowerType(ptr_ty)); } - fn lowerDeclRefValue(o: *Object, decl_index: InternPool.DeclIndex) Allocator.Error!Builder.Constant { + fn lowerNavRefValue(o: *Object, nav_index: InternPool.Nav.Index) Allocator.Error!Builder.Constant { const pt = o.pt; - const mod = pt.zcu; + const zcu = pt.zcu; + const ip = &zcu.intern_pool; // In the case of something like: // fn foo() void {} // const bar = foo; // ... &bar; // `bar` is just an alias and we actually want to lower a reference to `foo`. - const decl = mod.declPtr(decl_index); - if (decl.val.getFunction(mod)) |func| { - if (func.owner_decl != decl_index) { - return o.lowerDeclRefValue(func.owner_decl); - } - } else if (decl.val.getExternFunc(mod)) |func| { - if (func.decl != decl_index) { - return o.lowerDeclRefValue(func.decl); - } - } + const owner_nav_index = switch (ip.indexToKey(zcu.navValue(nav_index).toIntern())) { + .func => |func| func.owner_nav, + .@"extern" => |@"extern"| @"extern".owner_nav, + else => nav_index, + }; + const owner_nav = ip.getNav(owner_nav_index); - const decl_ty = decl.typeOf(mod); - const ptr_ty = try decl.declPtrType(pt); + const nav_ty = Type.fromInterned(owner_nav.typeOf(ip)); + const ptr_ty = try pt.navPtrType(owner_nav_index); - const is_fn_body = decl_ty.zigTypeTag(mod) == .Fn; - if ((!is_fn_body and !decl_ty.hasRuntimeBits(pt)) or - (is_fn_body and mod.typeToFunc(decl_ty).?.is_generic)) + const is_fn_body = nav_ty.zigTypeTag(zcu) == .Fn; + if ((!is_fn_body and !nav_ty.hasRuntimeBits(pt)) or + (is_fn_body and zcu.typeToFunc(nav_ty).?.is_generic)) { return o.lowerPtrToVoid(ptr_ty); } const llvm_global = if (is_fn_body) - (try o.resolveLlvmFunction(decl_index)).ptrConst(&o.builder).global + (try o.resolveLlvmFunction(owner_nav_index)).ptrConst(&o.builder).global else - (try o.resolveGlobalDecl(decl_index)).ptrConst(&o.builder).global; + (try o.resolveGlobalNav(owner_nav_index)).ptrConst(&o.builder).global; const llvm_val = try o.builder.convConst( llvm_global.toConst(), - try o.builder.ptrType(toLlvmAddressSpace(decl.@"addrspace", mod.getTarget())), + try o.builder.ptrType(toLlvmAddressSpace(owner_nav.status.resolved.@"addrspace", zcu.getTarget())), ); return o.builder.convConst(llvm_val, try o.lowerType(ptr_ty)); @@ -4553,18 +4540,16 @@ pub const Object = struct { const ip = &zcu.intern_pool; const enum_type = ip.loadEnumType(enum_ty.toIntern()); - // TODO: detect when the type changes and re-emit this function. - const gop = try o.decl_map.getOrPut(o.gpa, enum_type.decl); + const gop = try o.enum_tag_name_map.getOrPut(o.gpa, enum_ty.toIntern()); if (gop.found_existing) return gop.value_ptr.ptrConst(&o.builder).kind.function; - errdefer assert(o.decl_map.remove(enum_type.decl)); + errdefer assert(o.enum_tag_name_map.remove(enum_ty.toIntern())); const usize_ty = try o.lowerType(Type.usize); const ret_ty = try o.lowerType(Type.slice_const_u8_sentinel_0); - const decl = zcu.declPtr(enum_type.decl); const target = zcu.root_mod.resolved_target.result; const function_index = try o.builder.addFunction( try o.builder.fnType(ret_ty, &.{try o.lowerType(Type.fromInterned(enum_type.tag_ty))}, .normal), - try o.builder.strtabStringFmt("__zig_tag_name_{}", .{decl.fqn.fmt(ip)}), + try o.builder.strtabStringFmt("__zig_tag_name_{}", .{enum_type.name.fmt(ip)}), toLlvmAddressSpace(.generic, target), ); @@ -4624,86 +4609,73 @@ pub const Object = struct { } }; -pub const DeclGen = struct { +pub const NavGen = struct { object: *Object, - decl: *Zcu.Decl, - decl_index: InternPool.DeclIndex, + nav_index: InternPool.Nav.Index, err_msg: ?*Zcu.ErrorMsg, - fn ownerModule(dg: DeclGen) *Package.Module { - const o = dg.object; - const zcu = o.pt.zcu; - const namespace = zcu.namespacePtr(dg.decl.src_namespace); - const file_scope = namespace.fileScope(zcu); - return file_scope.mod; + fn ownerModule(ng: NavGen) *Package.Module { + return ng.object.pt.zcu.navFileScope(ng.nav_index).mod; } - fn todo(dg: *DeclGen, comptime format: []const u8, args: anytype) Error { + fn todo(ng: *NavGen, comptime format: []const u8, args: anytype) Error { @setCold(true); - assert(dg.err_msg == null); - const o = dg.object; + assert(ng.err_msg == null); + const o = ng.object; const gpa = o.gpa; - const src_loc = dg.decl.navSrcLoc(o.pt.zcu); - dg.err_msg = try Zcu.ErrorMsg.create(gpa, src_loc, "TODO (LLVM): " ++ format, args); + const src_loc = o.pt.zcu.navSrcLoc(ng.nav_index); + ng.err_msg = try Zcu.ErrorMsg.create(gpa, src_loc, "TODO (LLVM): " ++ format, args); return error.CodegenFail; } - fn genDecl(dg: *DeclGen) !void { - const o = dg.object; + fn genDecl(ng: *NavGen) !void { + const o = ng.object; const pt = o.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; - const decl = dg.decl; - const decl_index = dg.decl_index; - assert(decl.has_tv); + const nav_index = ng.nav_index; + const nav = ip.getNav(nav_index); + const resolved = nav.status.resolved; + + const is_extern, const lib_name, const is_threadlocal, const is_weak_linkage, const is_const, const init_val, const owner_nav = switch (ip.indexToKey(resolved.val)) { + .variable => |variable| .{ false, variable.lib_name, variable.is_threadlocal, variable.is_weak_linkage, false, variable.init, variable.owner_nav }, + .@"extern" => |@"extern"| .{ true, @"extern".lib_name, @"extern".is_threadlocal, @"extern".is_weak_linkage, @"extern".is_const, .none, @"extern".owner_nav }, + else => .{ false, .none, false, false, true, resolved.val, nav_index }, + }; + const ty = Type.fromInterned(nav.typeOf(ip)); - if (decl.val.getExternFunc(zcu)) |extern_func| { - _ = try o.resolveLlvmFunction(extern_func.decl); + if (is_extern and ip.isFunctionType(ty.toIntern())) { + _ = try o.resolveLlvmFunction(owner_nav); } else { - const variable_index = try o.resolveGlobalDecl(decl_index); - variable_index.setAlignment( - decl.getAlignment(pt).toLlvm(), - &o.builder, - ); - if (decl.@"linksection".toSlice(ip)) |section| + const variable_index = try o.resolveGlobalNav(nav_index); + variable_index.setAlignment(pt.navAlignment(nav_index).toLlvm(), &o.builder); + if (resolved.@"linksection".toSlice(ip)) |section| variable_index.setSection(try o.builder.string(section), &o.builder); - assert(decl.has_tv); - const init_val = if (decl.val.getVariable(zcu)) |decl_var| decl_var.init else init_val: { - variable_index.setMutability(.constant, &o.builder); - break :init_val decl.val.toIntern(); - }; + if (is_const) variable_index.setMutability(.constant, &o.builder); try variable_index.setInitializer(switch (init_val) { .none => .no_init, else => try o.lowerValue(init_val), }, &o.builder); - if (decl.val.getVariable(zcu)) |decl_var| { - const decl_namespace = zcu.namespacePtr(decl.src_namespace); - const single_threaded = decl_namespace.fileScope(zcu).mod.single_threaded; - variable_index.setThreadLocal( - if (decl_var.is_threadlocal and !single_threaded) .generaldynamic else .default, - &o.builder, - ); - } - - const line_number = decl.navSrcLine(zcu) + 1; + const file_scope = zcu.navFileScopeIndex(nav_index); + const mod = zcu.fileByIndex(file_scope).mod; + if (is_threadlocal and !mod.single_threaded) + variable_index.setThreadLocal(.generaldynamic, &o.builder); - const namespace = zcu.namespacePtr(decl.src_namespace); - const file_scope = namespace.fileScope(zcu); - const owner_mod = file_scope.mod; + const line_number = zcu.navSrcLine(nav_index) + 1; - if (!owner_mod.strip) { + if (!mod.strip) { const debug_file = try o.getDebugFile(file_scope); const debug_global_var = try o.builder.debugGlobalVar( - try o.builder.metadataString(decl.name.toSlice(ip)), // Name + try o.builder.metadataString(nav.name.toSlice(ip)), // Name try o.builder.metadataStringFromStrtabString(variable_index.name(&o.builder)), // Linkage name debug_file, // File debug_file, // Scope line_number, - try o.lowerDebugType(decl.typeOf(zcu)), + try o.lowerDebugType(ty), variable_index, - .{ .local = !decl.isExtern(zcu) }, + .{ .local = !is_extern }, ); const debug_expression = try o.builder.debugExpression(&.{}); @@ -4718,18 +4690,18 @@ pub const DeclGen = struct { } } - if (decl.isExtern(zcu)) { - const global_index = o.decl_map.get(decl_index).?; + if (is_extern) { + const global_index = o.nav_map.get(nav_index).?; const decl_name = decl_name: { - if (zcu.getTarget().isWasm() and decl.typeOf(zcu).zigTypeTag(zcu) == .Fn) { - if (decl.getOwnedExternFunc(zcu).?.lib_name.toSlice(ip)) |lib_name| { - if (!std.mem.eql(u8, lib_name, "c")) { - break :decl_name try o.builder.strtabStringFmt("{}|{s}", .{ decl.name.fmt(ip), lib_name }); + if (zcu.getTarget().isWasm() and ty.zigTypeTag(zcu) == .Fn) { + if (lib_name.toSlice(ip)) |lib_name_slice| { + if (!std.mem.eql(u8, lib_name_slice, "c")) { + break :decl_name try o.builder.strtabStringFmt("{}|{s}", .{ nav.name.fmt(ip), lib_name_slice }); } } } - break :decl_name try o.builder.strtabString(decl.name.toSlice(ip)); + break :decl_name try o.builder.strtabString(nav.name.toSlice(ip)); }; if (o.builder.getGlobal(decl_name)) |other_global| { @@ -4746,16 +4718,14 @@ pub const DeclGen = struct { if (zcu.comp.config.dll_export_fns) global_index.setDllStorageClass(.default, &o.builder); - if (decl.val.getVariable(zcu)) |decl_var| { - if (decl_var.is_weak_linkage) global_index.setLinkage(.extern_weak, &o.builder); - } + if (is_weak_linkage) global_index.setLinkage(.extern_weak, &o.builder); } } }; pub const FuncGen = struct { gpa: Allocator, - dg: *DeclGen, + ng: *NavGen, air: Air, liveness: Liveness, wip: Builder.WipFunction, @@ -4815,7 +4785,7 @@ pub const FuncGen = struct { fn todo(self: *FuncGen, comptime format: []const u8, args: anytype) Error { @setCold(true); - return self.dg.todo(format, args); + return self.ng.todo(format, args); } fn resolveInst(self: *FuncGen, inst: Air.Inst.Ref) !Builder.Value { @@ -4823,13 +4793,13 @@ pub const FuncGen = struct { const gop = try self.func_inst_table.getOrPut(gpa, inst); if (gop.found_existing) return gop.value_ptr.*; - const llvm_val = try self.resolveValue((try self.air.value(inst, self.dg.object.pt)).?); + const llvm_val = try self.resolveValue((try self.air.value(inst, self.ng.object.pt)).?); gop.value_ptr.* = llvm_val.toValue(); return llvm_val.toValue(); } fn resolveValue(self: *FuncGen, val: Value) Error!Builder.Constant { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const ty = val.typeOf(pt.zcu); const llvm_val = try o.lowerValue(val.toIntern()); @@ -4855,7 +4825,7 @@ pub const FuncGen = struct { } fn resolveNullOptUsize(self: *FuncGen) Error!Builder.Constant { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; if (o.null_opt_usize == .no_init) { o.null_opt_usize = try self.resolveValue(Value.fromInterned(try pt.intern(.{ .opt = .{ @@ -4867,7 +4837,7 @@ pub const FuncGen = struct { } fn genBody(self: *FuncGen, body: []const Air.Inst.Index) Error!void { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); @@ -5132,20 +5102,19 @@ pub const FuncGen = struct { defer self.scope = old_scope; if (maybe_inline_func) |inline_func| { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const zcu = pt.zcu; + const ip = &zcu.intern_pool; const func = zcu.funcInfo(inline_func); - const decl_index = func.owner_decl; - const decl = zcu.declPtr(decl_index); - const namespace = zcu.namespacePtr(decl.src_namespace); - const file_scope = namespace.fileScope(zcu); - const owner_mod = file_scope.mod; + const nav = ip.getNav(func.owner_nav); + const file_scope = zcu.navFileScopeIndex(func.owner_nav); + const mod = zcu.fileByIndex(file_scope).mod; self.file = try o.getDebugFile(file_scope); - const line_number = decl.navSrcLine(zcu) + 1; + const line_number = zcu.navSrcLine(func.owner_nav) + 1; self.inlined = self.wip.debug_location; const fn_ty = try pt.funcType(.{ @@ -5155,15 +5124,15 @@ pub const FuncGen = struct { self.scope = try o.builder.debugSubprogram( self.file, - try o.builder.metadataString(decl.name.toSlice(&zcu.intern_pool)), - try o.builder.metadataString(decl.fqn.toSlice(&zcu.intern_pool)), + try o.builder.metadataString(nav.name.toSlice(&zcu.intern_pool)), + try o.builder.metadataString(nav.fqn.toSlice(&zcu.intern_pool)), line_number, line_number + func.lbrace_line, try o.lowerDebugType(fn_ty), .{ .di_flags = .{ .StaticMember = true }, .sp_flags = .{ - .Optimized = owner_mod.optimize_mode != .Debug, + .Optimized = mod.optimize_mode != .Debug, .Definition = true, .LocalToUnit = true, // TODO: we can't know this at this point, since the function could be exported later! }, @@ -5171,7 +5140,7 @@ pub const FuncGen = struct { o.debug_compile_unit, ); - self.base_line = decl.navSrcLine(zcu); + self.base_line = zcu.navSrcLine(func.owner_nav); const inlined_at_location = try self.wip.debug_location.toMetadata(&o.builder); self.wip.debug_location = .{ .location = .{ @@ -5183,7 +5152,7 @@ pub const FuncGen = struct { }; } - self.scope = try self.dg.object.builder.debugLexicalBlock( + self.scope = try self.ng.object.builder.debugLexicalBlock( self.scope, self.file, self.prev_dbg_line, @@ -5214,7 +5183,7 @@ pub const FuncGen = struct { const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const extra = self.air.extraData(Air.Call, pl_op.payload); const args: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len]); - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const ip = &mod.intern_pool; @@ -5515,14 +5484,15 @@ pub const FuncGen = struct { } fn buildSimplePanic(fg: *FuncGen, panic_id: Zcu.PanicId) !void { - const o = fg.dg.object; - const mod = o.pt.zcu; - const msg_decl_index = mod.panic_messages[@intFromEnum(panic_id)].unwrap().?; - const msg_decl = mod.declPtr(msg_decl_index); - const msg_len = msg_decl.typeOf(mod).childType(mod).arrayLen(mod); - const msg_ptr = try o.lowerValue(msg_decl.val.toIntern()); + const o = fg.ng.object; + const zcu = o.pt.zcu; + const ip = &zcu.intern_pool; + const msg_nav_index = zcu.panic_messages[@intFromEnum(panic_id)].unwrap().?; + const msg_nav = ip.getNav(msg_nav_index); + const msg_len = Type.fromInterned(msg_nav.typeOf(ip)).childType(zcu).arrayLen(zcu); + const msg_ptr = try o.lowerValue(msg_nav.status.resolved.val); const null_opt_addr_global = try fg.resolveNullOptUsize(); - const target = mod.getTarget(); + const target = zcu.getTarget(); const llvm_usize = try o.lowerType(Type.usize); // example: // call fastcc void @test2.panic( @@ -5531,10 +5501,10 @@ pub const FuncGen = struct { // ptr null, ; stack trace // ptr @2, ; addr (null ?usize) // ) - const panic_func = mod.funcInfo(mod.panic_func_index); - const panic_decl = mod.declPtr(panic_func.owner_decl); - const fn_info = mod.typeToFunc(panic_decl.typeOf(mod)).?; - const panic_global = try o.resolveLlvmFunction(panic_func.owner_decl); + const panic_func = zcu.funcInfo(zcu.panic_func_index); + const panic_nav = ip.getNav(panic_func.owner_nav); + const fn_info = zcu.typeToFunc(Type.fromInterned(panic_nav.typeOf(ip))).?; + const panic_global = try o.resolveLlvmFunction(panic_func.owner_nav); _ = try fg.wip.call( .normal, toLlvmCallConv(fn_info.cc, target), @@ -5553,9 +5523,10 @@ pub const FuncGen = struct { } fn airRet(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; + const ip = &mod.intern_pool; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const ret_ty = self.typeOf(un_op); @@ -5581,7 +5552,7 @@ pub const FuncGen = struct { len, if (ptr_ty.isVolatilePtr(mod)) .@"volatile" else .normal, ); - const owner_mod = self.dg.ownerModule(); + const owner_mod = self.ng.ownerModule(); if (owner_mod.valgrind) { try self.valgrindMarkUndef(self.ret_ptr, len); } @@ -5602,7 +5573,7 @@ pub const FuncGen = struct { _ = try self.wip.retVoid(); return .none; } - const fn_info = mod.typeToFunc(self.dg.decl.typeOf(mod)).?; + const fn_info = mod.typeToFunc(Type.fromInterned(ip.getNav(self.ng.nav_index).typeOf(ip))).?; if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) { if (Type.fromInterned(fn_info.return_type).isError(mod)) { // Functions with an empty error set are emitted with an error code @@ -5631,7 +5602,7 @@ pub const FuncGen = struct { len, .normal, ); - const owner_mod = self.dg.ownerModule(); + const owner_mod = self.ng.ownerModule(); if (owner_mod.valgrind) { try self.valgrindMarkUndef(rp, len); } @@ -5659,13 +5630,14 @@ pub const FuncGen = struct { } fn airRetLoad(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; + const ip = &mod.intern_pool; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const ptr_ty = self.typeOf(un_op); const ret_ty = ptr_ty.childType(mod); - const fn_info = mod.typeToFunc(self.dg.decl.typeOf(mod)).?; + const fn_info = mod.typeToFunc(Type.fromInterned(ip.getNav(self.ng.nav_index).typeOf(ip))).?; if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) { if (Type.fromInterned(fn_info.return_type).isError(mod)) { // Functions with an empty error set are emitted with an error code @@ -5689,7 +5661,7 @@ pub const FuncGen = struct { } fn airCVaArg(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const list = try self.resolveInst(ty_op.operand); const arg_ty = ty_op.ty.toType(); @@ -5699,7 +5671,7 @@ pub const FuncGen = struct { } fn airCVaCopy(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const src_list = try self.resolveInst(ty_op.operand); @@ -5725,7 +5697,7 @@ pub const FuncGen = struct { } fn airCVaStart(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const va_list_ty = self.typeOfIndex(inst); const llvm_va_list_ty = try o.lowerType(va_list_ty); @@ -5767,7 +5739,7 @@ pub const FuncGen = struct { } fn airCmpLtErrorsLen(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try self.resolveInst(un_op); const llvm_fn = try o.getCmpLtErrorsLenFunction(); @@ -5790,7 +5762,7 @@ pub const FuncGen = struct { lhs: Builder.Value, rhs: Builder.Value, ) Allocator.Error!Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const scalar_ty = operand_ty.scalarType(mod); @@ -5897,7 +5869,7 @@ pub const FuncGen = struct { maybe_inline_func: ?InternPool.Index, body: []const Air.Inst.Index, ) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const inst_ty = self.typeOfIndex(inst); @@ -5948,7 +5920,7 @@ pub const FuncGen = struct { } fn airBr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const branch = self.air.instructions.items(.data)[@intFromEnum(inst)].br; const block = self.blocks.get(branch.block_inst).?; @@ -5988,7 +5960,7 @@ pub const FuncGen = struct { } fn airTry(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const inst = body_tail[0]; const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; @@ -6003,7 +5975,7 @@ pub const FuncGen = struct { } fn airTryPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.TryPtr, ty_pl.payload); @@ -6023,7 +5995,7 @@ pub const FuncGen = struct { can_elide_load: bool, is_unused: bool, ) !Builder.Value { - const o = fg.dg.object; + const o = fg.ng.object; const pt = o.pt; const mod = pt.zcu; const payload_ty = err_union_ty.errorUnionPayload(mod); @@ -6088,7 +6060,7 @@ pub const FuncGen = struct { } fn airSwitchBr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const cond = try self.resolveInst(pl_op.operand); const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); @@ -6152,7 +6124,7 @@ pub const FuncGen = struct { } fn airLoop(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const loop = self.air.extraData(Air.Block, ty_pl.payload); @@ -6176,7 +6148,7 @@ pub const FuncGen = struct { } fn airArrayToSlice(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -6195,7 +6167,7 @@ pub const FuncGen = struct { } fn airFloatFromInt(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -6280,7 +6252,7 @@ pub const FuncGen = struct { ) !Builder.Value { _ = fast; - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const target = mod.getTarget(); @@ -6342,13 +6314,13 @@ pub const FuncGen = struct { } fn sliceOrArrayPtr(fg: *FuncGen, ptr: Builder.Value, ty: Type) Allocator.Error!Builder.Value { - const o = fg.dg.object; + const o = fg.ng.object; const mod = o.pt.zcu; return if (ty.isSlice(mod)) fg.wip.extractValue(ptr, &.{0}, "") else ptr; } fn sliceOrArrayLenInBytes(fg: *FuncGen, ptr: Builder.Value, ty: Type) Allocator.Error!Builder.Value { - const o = fg.dg.object; + const o = fg.ng.object; const pt = o.pt; const mod = pt.zcu; const llvm_usize = try o.lowerType(Type.usize); @@ -6378,7 +6350,7 @@ pub const FuncGen = struct { } fn airPtrSliceFieldPtr(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const slice_ptr = try self.resolveInst(ty_op.operand); @@ -6389,7 +6361,7 @@ pub const FuncGen = struct { } fn airSliceElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const inst = body_tail[0]; @@ -6413,7 +6385,7 @@ pub const FuncGen = struct { } fn airSliceElemPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; @@ -6427,7 +6399,7 @@ pub const FuncGen = struct { } fn airArrayElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const inst = body_tail[0]; @@ -6460,7 +6432,7 @@ pub const FuncGen = struct { } fn airPtrElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const inst = body_tail[0]; @@ -6486,7 +6458,7 @@ pub const FuncGen = struct { } fn airPtrElemPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; @@ -6529,7 +6501,7 @@ pub const FuncGen = struct { } fn airStructFieldVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const inst = body_tail[0]; @@ -6635,7 +6607,7 @@ pub const FuncGen = struct { } fn airFieldParentPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; @@ -6697,7 +6669,7 @@ pub const FuncGen = struct { } fn airDbgVarPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const operand = try self.resolveInst(pl_op.operand); @@ -6729,7 +6701,7 @@ pub const FuncGen = struct { } fn airDbgVarVal(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const operand = try self.resolveInst(pl_op.operand); const operand_ty = self.typeOf(pl_op.operand); @@ -6746,7 +6718,7 @@ pub const FuncGen = struct { ); const pt = o.pt; - const owner_mod = self.dg.ownerModule(); + const owner_mod = self.ng.ownerModule(); if (isByRef(operand_ty, pt)) { _ = try self.wip.callIntrinsic( .normal, @@ -6800,7 +6772,7 @@ pub const FuncGen = struct { // We don't have such an assembler implemented yet though. For now, // this implementation feeds the inline assembly code directly to LLVM. - const o = self.dg.object; + const o = self.ng.object; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Asm, ty_pl.payload); const is_volatile = @as(u1, @truncate(extra.data.flags >> 31)) != 0; @@ -7181,7 +7153,7 @@ pub const FuncGen = struct { operand_is_ptr: bool, cond: Builder.IntegerCondition, ) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; @@ -7226,7 +7198,7 @@ pub const FuncGen = struct { cond: Builder.IntegerCondition, operand_is_ptr: bool, ) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; @@ -7266,7 +7238,7 @@ pub const FuncGen = struct { } fn airOptionalPayloadPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -7288,7 +7260,7 @@ pub const FuncGen = struct { fn airOptionalPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { comptime assert(optional_layout_version == 3); - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -7320,7 +7292,7 @@ pub const FuncGen = struct { } fn airOptionalPayload(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const inst = body_tail[0]; @@ -7345,7 +7317,7 @@ pub const FuncGen = struct { body_tail: []const Air.Inst.Index, operand_is_ptr: bool, ) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const inst = body_tail[0]; @@ -7381,7 +7353,7 @@ pub const FuncGen = struct { inst: Air.Inst.Index, operand_is_ptr: bool, ) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -7415,7 +7387,7 @@ pub const FuncGen = struct { } fn airErrUnionPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -7456,7 +7428,7 @@ pub const FuncGen = struct { } fn airSaveErrReturnTraceIndex(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; @@ -7502,7 +7474,7 @@ pub const FuncGen = struct { } fn airWrapOptional(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const inst = body_tail[0]; @@ -7536,7 +7508,7 @@ pub const FuncGen = struct { } fn airWrapErrUnionPayload(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const inst = body_tail[0]; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -7577,7 +7549,7 @@ pub const FuncGen = struct { } fn airWrapErrUnionErr(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const inst = body_tail[0]; @@ -7618,7 +7590,7 @@ pub const FuncGen = struct { } fn airWasmMemorySize(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const index = pl_op.payload; const llvm_usize = try o.lowerType(Type.usize); @@ -7628,7 +7600,7 @@ pub const FuncGen = struct { } fn airWasmMemoryGrow(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const index = pl_op.payload; const llvm_isize = try o.lowerType(Type.isize); @@ -7638,7 +7610,7 @@ pub const FuncGen = struct { } fn airVectorStoreElem(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const data = self.air.instructions.items(.data)[@intFromEnum(inst)].vector_store_elem; @@ -7661,7 +7633,7 @@ pub const FuncGen = struct { } fn airMin(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -7681,7 +7653,7 @@ pub const FuncGen = struct { } fn airMax(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -7701,7 +7673,7 @@ pub const FuncGen = struct { } fn airSlice(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr = try self.resolveInst(bin_op.lhs); @@ -7711,7 +7683,7 @@ pub const FuncGen = struct { } fn airAdd(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -7729,7 +7701,7 @@ pub const FuncGen = struct { signed_intrinsic: Builder.Intrinsic, unsigned_intrinsic: Builder.Intrinsic, ) !Builder.Value { - const o = fg.dg.object; + const o = fg.ng.object; const mod = o.pt.zcu; const bin_op = fg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; @@ -7777,7 +7749,7 @@ pub const FuncGen = struct { } fn airAddSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -7797,7 +7769,7 @@ pub const FuncGen = struct { } fn airSub(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -7818,7 +7790,7 @@ pub const FuncGen = struct { } fn airSubSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -7838,7 +7810,7 @@ pub const FuncGen = struct { } fn airMul(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -7859,7 +7831,7 @@ pub const FuncGen = struct { } fn airMulSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -7888,7 +7860,7 @@ pub const FuncGen = struct { } fn airDivTrunc(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -7904,7 +7876,7 @@ pub const FuncGen = struct { } fn airDivFloor(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -7936,7 +7908,7 @@ pub const FuncGen = struct { } fn airDivExact(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -7954,7 +7926,7 @@ pub const FuncGen = struct { } fn airRem(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -7971,7 +7943,7 @@ pub const FuncGen = struct { } fn airMod(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -8007,7 +7979,7 @@ pub const FuncGen = struct { } fn airPtrAdd(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; @@ -8029,7 +8001,7 @@ pub const FuncGen = struct { } fn airPtrSub(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; @@ -8057,7 +8029,7 @@ pub const FuncGen = struct { signed_intrinsic: Builder.Intrinsic, unsigned_intrinsic: Builder.Intrinsic, ) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; @@ -8111,7 +8083,7 @@ pub const FuncGen = struct { result_vector: Builder.Value, vector_len: usize, ) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; assert(args_vectors.len <= 3); var i: usize = 0; @@ -8143,7 +8115,7 @@ pub const FuncGen = struct { param_types: []const Builder.Type, return_type: Builder.Type, ) Allocator.Error!Builder.Function.Index { - const o = self.dg.object; + const o = self.ng.object; if (o.builder.getGlobal(fn_name)) |global| return switch (global.ptrConst(&o.builder).kind) { .alias => |alias| alias.getAliasee(&o.builder).ptrConst(&o.builder).kind.function, .function => |function| function, @@ -8165,7 +8137,7 @@ pub const FuncGen = struct { ty: Type, params: [2]Builder.Value, ) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const target = mod.getTarget(); const scalar_ty = ty.scalarType(mod); @@ -8271,7 +8243,7 @@ pub const FuncGen = struct { comptime params_len: usize, params: [params_len]Builder.Value, ) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const target = mod.getTarget(); const scalar_ty = ty.scalarType(mod); @@ -8412,7 +8384,7 @@ pub const FuncGen = struct { } fn airShlWithOverflow(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; @@ -8483,7 +8455,7 @@ pub const FuncGen = struct { } fn airShlExact(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; @@ -8501,7 +8473,7 @@ pub const FuncGen = struct { } fn airShl(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -8514,7 +8486,7 @@ pub const FuncGen = struct { } fn airShlSat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; @@ -8557,7 +8529,7 @@ pub const FuncGen = struct { } fn airShr(self: *FuncGen, inst: Air.Inst.Index, is_exact: bool) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; @@ -8576,7 +8548,7 @@ pub const FuncGen = struct { } fn airAbs(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); @@ -8598,7 +8570,7 @@ pub const FuncGen = struct { } fn airIntCast(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const dest_ty = self.typeOfIndex(inst); @@ -8614,7 +8586,7 @@ pub const FuncGen = struct { } fn airTrunc(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); const dest_llvm_ty = try o.lowerType(self.typeOfIndex(inst)); @@ -8622,7 +8594,7 @@ pub const FuncGen = struct { } fn airFptrunc(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); @@ -8656,7 +8628,7 @@ pub const FuncGen = struct { } fn airFpext(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.resolveInst(ty_op.operand); @@ -8696,7 +8668,7 @@ pub const FuncGen = struct { } fn airIntFromPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try self.resolveInst(un_op); const ptr_ty = self.typeOf(un_op); @@ -8714,7 +8686,7 @@ pub const FuncGen = struct { } fn bitCast(self: *FuncGen, operand: Builder.Value, operand_ty: Type, inst_ty: Type) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const operand_is_ref = isByRef(operand_ty, pt); @@ -8739,7 +8711,7 @@ pub const FuncGen = struct { if (operand_ty.zigTypeTag(mod) == .Vector and inst_ty.zigTypeTag(mod) == .Array) { const elem_ty = operand_ty.childType(mod); if (!result_is_ref) { - return self.dg.todo("implement bitcast vector to non-ref array", .{}); + return self.ng.todo("implement bitcast vector to non-ref array", .{}); } const alignment = inst_ty.abiAlignment(pt).toLlvm(); const array_ptr = try self.buildAllocaWorkaround(inst_ty, alignment); @@ -8766,7 +8738,7 @@ pub const FuncGen = struct { } else if (operand_ty.zigTypeTag(mod) == .Array and inst_ty.zigTypeTag(mod) == .Vector) { const elem_ty = operand_ty.childType(mod); const llvm_vector_ty = try o.lowerType(inst_ty); - if (!operand_is_ref) return self.dg.todo("implement bitcast non-ref array to vector", .{}); + if (!operand_is_ref) return self.ng.todo("implement bitcast non-ref array to vector", .{}); const bitcast_ok = elem_ty.bitSize(pt) == elem_ty.abiSize(pt) * 8; if (bitcast_ok) { @@ -8831,9 +8803,9 @@ pub const FuncGen = struct { } fn airArg(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; - const mod = pt.zcu; + const zcu = pt.zcu; const arg_val = self.args[self.arg_index]; self.arg_index += 1; @@ -8846,9 +8818,8 @@ pub const FuncGen = struct { const name = self.air.instructions.items(.data)[@intFromEnum(inst)].arg.name; if (name == .none) return arg_val; - const func_index = self.dg.decl.getOwnedFunctionIndex(); - const func = mod.funcInfo(func_index); - const lbrace_line = mod.declPtr(func.owner_decl).navSrcLine(mod) + func.lbrace_line + 1; + const func = zcu.funcInfo(zcu.navValue(self.ng.nav_index).toIntern()); + const lbrace_line = zcu.navSrcLine(func.owner_nav) + func.lbrace_line + 1; const lbrace_col = func.lbrace_column + 1; const debug_parameter = try o.builder.debugParameter( @@ -8870,7 +8841,7 @@ pub const FuncGen = struct { }, }; - const owner_mod = self.dg.ownerModule(); + const mod = self.ng.ownerModule(); if (isByRef(inst_ty, pt)) { _ = try self.wip.callIntrinsic( .normal, @@ -8884,7 +8855,7 @@ pub const FuncGen = struct { }, "", ); - } else if (owner_mod.optimize_mode == .Debug) { + } else if (mod.optimize_mode == .Debug) { const alignment = inst_ty.abiAlignment(pt).toLlvm(); const alloca = try self.buildAlloca(arg_val.typeOfWip(&self.wip), alignment); _ = try self.wip.store(.normal, arg_val, alloca, alignment); @@ -8920,7 +8891,7 @@ pub const FuncGen = struct { } fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const ptr_ty = self.typeOfIndex(inst); @@ -8934,7 +8905,7 @@ pub const FuncGen = struct { } fn airRetPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const ptr_ty = self.typeOfIndex(inst); @@ -8954,7 +8925,7 @@ pub const FuncGen = struct { llvm_ty: Builder.Type, alignment: Builder.Alignment, ) Allocator.Error!Builder.Value { - const target = self.dg.object.pt.zcu.getTarget(); + const target = self.ng.object.pt.zcu.getTarget(); return buildAllocaInner(&self.wip, llvm_ty, alignment, target); } @@ -8964,12 +8935,12 @@ pub const FuncGen = struct { ty: Type, alignment: Builder.Alignment, ) Allocator.Error!Builder.Value { - const o = self.dg.object; + const o = self.ng.object; return self.buildAlloca(try o.builder.arrayType(ty.abiSize(o.pt), .i8), alignment); } fn airStore(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; @@ -8979,7 +8950,7 @@ pub const FuncGen = struct { const val_is_undef = if (try self.air.value(bin_op.rhs, pt)) |val| val.isUndefDeep(mod) else false; if (val_is_undef) { - const owner_mod = self.dg.ownerModule(); + const owner_mod = self.ng.ownerModule(); // Even if safety is disabled, we still emit a memset to undefined since it conveys // extra information to LLVM, and LLVM will optimize it out. Safety makes the difference @@ -9029,7 +9000,7 @@ pub const FuncGen = struct { /// /// The first instruction of `body_tail` is the one whose copy we want to elide. fn canElideLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) bool { - const o = fg.dg.object; + const o = fg.ng.object; const mod = o.pt.zcu; const ip = &mod.intern_pool; for (body_tail[1..]) |body_inst| { @@ -9045,7 +9016,7 @@ pub const FuncGen = struct { } fn airLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value { - const o = fg.dg.object; + const o = fg.ng.object; const pt = o.pt; const mod = pt.zcu; const inst = body_tail[0]; @@ -9077,7 +9048,7 @@ pub const FuncGen = struct { fn airRetAddr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { _ = inst; - const o = self.dg.object; + const o = self.ng.object; const llvm_usize = try o.lowerType(Type.usize); if (!target_util.supportsReturnAddress(o.pt.zcu.getTarget())) { // https://github.com/ziglang/zig/issues/11946 @@ -9089,7 +9060,7 @@ pub const FuncGen = struct { fn airFrameAddress(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { _ = inst; - const o = self.dg.object; + const o = self.ng.object; const result = try self.wip.callIntrinsic(.normal, .none, .frameaddress, &.{.ptr}, &.{.@"0"}, ""); return self.wip.cast(.ptrtoint, result, try o.lowerType(Type.usize), ""); } @@ -9106,7 +9077,7 @@ pub const FuncGen = struct { inst: Air.Inst.Index, kind: Builder.Function.Instruction.CmpXchg.Kind, ) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; @@ -9157,7 +9128,7 @@ pub const FuncGen = struct { } fn airAtomicRmw(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; @@ -9221,7 +9192,7 @@ pub const FuncGen = struct { } fn airAtomicLoad(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const atomic_load = self.air.instructions.items(.data)[@intFromEnum(inst)].atomic_load; @@ -9269,7 +9240,7 @@ pub const FuncGen = struct { inst: Air.Inst.Index, ordering: Builder.AtomicOrdering, ) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; @@ -9294,7 +9265,7 @@ pub const FuncGen = struct { } fn airMemset(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; @@ -9329,7 +9300,7 @@ pub const FuncGen = struct { } else { _ = try self.wip.callMemSet(dest_ptr, dest_ptr_align, fill_byte, len, access_kind); } - const owner_mod = self.dg.ownerModule(); + const owner_mod = self.ng.ownerModule(); if (safety and owner_mod.valgrind) { try self.valgrindMarkUndef(dest_ptr, len); } @@ -9435,7 +9406,7 @@ pub const FuncGen = struct { dest_ptr_align: Builder.Alignment, access_kind: Builder.MemoryAccessKind, ) !void { - const o = self.dg.object; + const o = self.ng.object; const usize_zero = try o.builder.intValue(try o.lowerType(Type.usize), 0); const cond = try self.cmp(.normal, .neq, Type.usize, len, usize_zero); const memset_block = try self.wip.block(1, "MemsetTrapSkip"); @@ -9448,7 +9419,7 @@ pub const FuncGen = struct { } fn airMemcpy(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; @@ -9502,7 +9473,7 @@ pub const FuncGen = struct { } fn airSetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; @@ -9524,7 +9495,7 @@ pub const FuncGen = struct { } fn airGetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const un_ty = self.typeOf(ty_op.operand); @@ -9563,7 +9534,7 @@ pub const FuncGen = struct { } fn airClzCtz(self: *FuncGen, inst: Air.Inst.Index, intrinsic: Builder.Intrinsic) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const inst_ty = self.typeOfIndex(inst); const operand_ty = self.typeOf(ty_op.operand); @@ -9581,7 +9552,7 @@ pub const FuncGen = struct { } fn airBitOp(self: *FuncGen, inst: Air.Inst.Index, intrinsic: Builder.Intrinsic) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const inst_ty = self.typeOfIndex(inst); const operand_ty = self.typeOf(ty_op.operand); @@ -9599,7 +9570,7 @@ pub const FuncGen = struct { } fn airByteSwap(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand_ty = self.typeOf(ty_op.operand); @@ -9633,7 +9604,7 @@ pub const FuncGen = struct { } fn airErrorSetHasValue(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const ip = &mod.intern_pool; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -9665,7 +9636,7 @@ pub const FuncGen = struct { } fn airIsNamedEnumValue(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try self.resolveInst(un_op); const enum_ty = self.typeOf(un_op); @@ -9683,22 +9654,21 @@ pub const FuncGen = struct { } fn getIsNamedEnumValueFunction(self: *FuncGen, enum_ty: Type) !Builder.Function.Index { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; const enum_type = ip.loadEnumType(enum_ty.toIntern()); // TODO: detect when the type changes and re-emit this function. - const gop = try o.named_enum_map.getOrPut(o.gpa, enum_type.decl); + const gop = try o.named_enum_map.getOrPut(o.gpa, enum_ty.toIntern()); if (gop.found_existing) return gop.value_ptr.*; - errdefer assert(o.named_enum_map.remove(enum_type.decl)); + errdefer assert(o.named_enum_map.remove(enum_ty.toIntern())); - const decl = zcu.declPtr(enum_type.decl); const target = zcu.root_mod.resolved_target.result; const function_index = try o.builder.addFunction( try o.builder.fnType(.i1, &.{try o.lowerType(Type.fromInterned(enum_type.tag_ty))}, .normal), - try o.builder.strtabStringFmt("__zig_is_named_enum_value_{}", .{decl.fqn.fmt(ip)}), + try o.builder.strtabStringFmt("__zig_is_named_enum_value_{}", .{enum_type.name.fmt(ip)}), toLlvmAddressSpace(.generic, target), ); @@ -9741,7 +9711,7 @@ pub const FuncGen = struct { } fn airTagName(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try self.resolveInst(un_op); const enum_ty = self.typeOf(un_op); @@ -9759,7 +9729,7 @@ pub const FuncGen = struct { } fn airErrorName(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try self.resolveInst(un_op); const slice_ty = self.typeOfIndex(inst); @@ -9774,7 +9744,7 @@ pub const FuncGen = struct { } fn airSplat(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const scalar = try self.resolveInst(ty_op.operand); const vector_ty = self.typeOfIndex(inst); @@ -9792,7 +9762,7 @@ pub const FuncGen = struct { } fn airShuffle(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; @@ -9848,7 +9818,7 @@ pub const FuncGen = struct { vector_len: usize, accum_init: Builder.Value, ) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const usize_ty = try o.lowerType(Type.usize); const llvm_vector_len = try o.builder.intValue(usize_ty, vector_len); const llvm_result_ty = accum_init.typeOfWip(&self.wip); @@ -9902,7 +9872,7 @@ pub const FuncGen = struct { } fn airReduce(self: *FuncGen, inst: Air.Inst.Index, fast: Builder.FastMathKind) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const mod = o.pt.zcu; const target = mod.getTarget(); @@ -10012,7 +9982,7 @@ pub const FuncGen = struct { } fn airAggregateInit(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const ip = &mod.intern_pool; @@ -10133,7 +10103,7 @@ pub const FuncGen = struct { } fn airUnionInit(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const ip = &mod.intern_pool; @@ -10256,7 +10226,7 @@ pub const FuncGen = struct { } fn airPrefetch(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const prefetch = self.air.instructions.items(.data)[@intFromEnum(inst)].prefetch; comptime assert(@intFromEnum(std.builtin.PrefetchOptions.Rw.read) == 0); @@ -10306,7 +10276,7 @@ pub const FuncGen = struct { } fn airAddrSpaceCast(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const inst_ty = self.typeOfIndex(inst); const operand = try self.resolveInst(ty_op.operand); @@ -10324,12 +10294,12 @@ pub const FuncGen = struct { 0 => @field(Builder.Intrinsic, basename ++ ".x"), 1 => @field(Builder.Intrinsic, basename ++ ".y"), 2 => @field(Builder.Intrinsic, basename ++ ".z"), - else => return self.dg.object.builder.intValue(.i32, default), + else => return self.ng.object.builder.intValue(.i32, default), }, &.{}, &.{}, ""); } fn airWorkItemId(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const target = o.pt.zcu.getTarget(); assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures @@ -10339,7 +10309,7 @@ pub const FuncGen = struct { } fn airWorkGroupSize(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const target = o.pt.zcu.getTarget(); assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures @@ -10362,7 +10332,7 @@ pub const FuncGen = struct { } fn airWorkGroupId(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const target = o.pt.zcu.getTarget(); assert(target.cpu.arch == .amdgcn); // TODO is to port this function to other GPU architectures @@ -10372,7 +10342,7 @@ pub const FuncGen = struct { } fn getErrorNameTable(self: *FuncGen) Allocator.Error!Builder.Variable.Index { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const table = o.error_name_table; @@ -10401,7 +10371,7 @@ pub const FuncGen = struct { opt_handle: Builder.Value, is_by_ref: bool, ) Allocator.Error!Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const field = b: { if (is_by_ref) { const field_ptr = try self.wip.gepStruct(opt_llvm_ty, opt_handle, 1, ""); @@ -10422,7 +10392,7 @@ pub const FuncGen = struct { opt_ty: Type, can_elide_load: bool, ) !Builder.Value { - const o = fg.dg.object; + const o = fg.ng.object; const pt = o.pt; const mod = pt.zcu; const payload_ty = opt_ty.optionalChild(mod); @@ -10451,7 +10421,7 @@ pub const FuncGen = struct { payload: Builder.Value, non_null_bit: Builder.Value, ) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const optional_llvm_ty = try o.lowerType(optional_ty); const non_null_field = try self.wip.cast(.zext, non_null_bit, .i8, ""); @@ -10483,7 +10453,7 @@ pub const FuncGen = struct { struct_ptr_ty: Type, field_index: u32, ) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const struct_ty = struct_ptr_ty.childType(mod); @@ -10552,7 +10522,7 @@ pub const FuncGen = struct { // "When loading a value of a type like i20 with a size that is not an integral number of bytes, the result is undefined if the value was not originally written using a store of the same type. " // => so load the byte aligned value and trunc the unwanted bits. - const o = fg.dg.object; + const o = fg.ng.object; const pt = o.pt; const mod = pt.zcu; const payload_llvm_ty = try o.lowerType(payload_ty); @@ -10599,7 +10569,7 @@ pub const FuncGen = struct { ptr_alignment: Builder.Alignment, access_kind: Builder.MemoryAccessKind, ) !Builder.Value { - const o = fg.dg.object; + const o = fg.ng.object; const pt = o.pt; //const pointee_llvm_ty = try o.lowerType(pointee_type); const result_align = InternPool.Alignment.fromLlvm(ptr_alignment).max(pointee_type.abiAlignment(pt)).toLlvm(); @@ -10620,7 +10590,7 @@ pub const FuncGen = struct { /// alloca and copies the value into it, then returns the alloca instruction. /// For isByRef=false types, it creates a load instruction and returns it. fn load(self: *FuncGen, ptr: Builder.Value, ptr_ty: Type) !Builder.Value { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const info = ptr_ty.ptrInfo(mod); @@ -10693,7 +10663,7 @@ pub const FuncGen = struct { elem: Builder.Value, ordering: Builder.AtomicOrdering, ) !void { - const o = self.dg.object; + const o = self.ng.object; const pt = o.pt; const mod = pt.zcu; const info = ptr_ty.ptrInfo(mod); @@ -10784,7 +10754,7 @@ pub const FuncGen = struct { fn valgrindMarkUndef(fg: *FuncGen, ptr: Builder.Value, len: Builder.Value) Allocator.Error!void { const VG_USERREQ__MAKE_MEM_UNDEFINED = 1296236545; - const o = fg.dg.object; + const o = fg.ng.object; const usize_ty = try o.lowerType(Type.usize); const zero = try o.builder.intValue(usize_ty, 0); const req = try o.builder.intValue(usize_ty, VG_USERREQ__MAKE_MEM_UNDEFINED); @@ -10802,7 +10772,7 @@ pub const FuncGen = struct { a4: Builder.Value, a5: Builder.Value, ) Allocator.Error!Builder.Value { - const o = fg.dg.object; + const o = fg.ng.object; const pt = o.pt; const mod = pt.zcu; const target = mod.getTarget(); @@ -10869,13 +10839,13 @@ pub const FuncGen = struct { } fn typeOf(fg: *FuncGen, inst: Air.Inst.Ref) Type { - const o = fg.dg.object; + const o = fg.ng.object; const mod = o.pt.zcu; return fg.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(fg: *FuncGen, inst: Air.Inst.Index) Type { - const o = fg.dg.object; + const o = fg.ng.object; const mod = o.pt.zcu; return fg.air.typeOfIndex(inst, &mod.intern_pool); } diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 23911c4fbf70..b13be401abf9 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -31,9 +31,9 @@ const InstMap = std.AutoHashMapUnmanaged(Air.Inst.Index, IdRef); pub const zig_call_abi_ver = 3; -const InternMap = std.AutoHashMapUnmanaged(struct { InternPool.Index, DeclGen.Repr }, IdResult); +const InternMap = std.AutoHashMapUnmanaged(struct { InternPool.Index, NavGen.Repr }, IdResult); const PtrTypeMap = std.AutoHashMapUnmanaged( - struct { InternPool.Index, StorageClass, DeclGen.Repr }, + struct { InternPool.Index, StorageClass, NavGen.Repr }, struct { ty_id: IdRef, fwd_emitted: bool }, ); @@ -142,7 +142,7 @@ const ControlFlow = union(enum) { }; /// This structure holds information that is relevant to the entire compilation, -/// in contrast to `DeclGen`, which only holds relevant information about a +/// in contrast to `NavGen`, which only holds relevant information about a /// single decl. pub const Object = struct { /// A general-purpose allocator that can be used for any allocation for this Object. @@ -153,10 +153,10 @@ pub const Object = struct { /// The Zig module that this object file is generated for. /// A map of Zig decl indices to SPIR-V decl indices. - decl_link: std.AutoHashMapUnmanaged(InternPool.DeclIndex, SpvModule.Decl.Index) = .{}, + nav_link: std.AutoHashMapUnmanaged(InternPool.Nav.Index, SpvModule.Decl.Index) = .{}, /// A map of Zig InternPool indices for anonymous decls to SPIR-V decl indices. - anon_decl_link: std.AutoHashMapUnmanaged(struct { InternPool.Index, StorageClass }, SpvModule.Decl.Index) = .{}, + uav_link: std.AutoHashMapUnmanaged(struct { InternPool.Index, StorageClass }, SpvModule.Decl.Index) = .{}, /// A map that maps AIR intern pool indices to SPIR-V result-ids. intern_map: InternMap = .{}, @@ -178,31 +178,29 @@ pub const Object = struct { pub fn deinit(self: *Object) void { self.spv.deinit(); - self.decl_link.deinit(self.gpa); - self.anon_decl_link.deinit(self.gpa); + self.nav_link.deinit(self.gpa); + self.uav_link.deinit(self.gpa); self.intern_map.deinit(self.gpa); self.ptr_types.deinit(self.gpa); } - fn genDecl( + fn genNav( self: *Object, pt: Zcu.PerThread, - decl_index: InternPool.DeclIndex, + nav_index: InternPool.Nav.Index, air: Air, liveness: Liveness, ) !void { const zcu = pt.zcu; const gpa = zcu.gpa; - const decl = zcu.declPtr(decl_index); - const namespace = zcu.namespacePtr(decl.src_namespace); - const structured_cfg = namespace.fileScope(zcu).mod.structured_cfg; + const structured_cfg = zcu.navFileScope(nav_index).mod.structured_cfg; - var decl_gen = DeclGen{ + var nav_gen = NavGen{ .gpa = gpa, .object = self, .pt = pt, .spv = &self.spv, - .decl_index = decl_index, + .owner_nav = nav_index, .air = air, .liveness = liveness, .intern_map = &self.intern_map, @@ -212,18 +210,18 @@ pub const Object = struct { false => .{ .unstructured = .{} }, }, .current_block_label = undefined, - .base_line = decl.navSrcLine(zcu), + .base_line = zcu.navSrcLine(nav_index), }; - defer decl_gen.deinit(); + defer nav_gen.deinit(); - decl_gen.genDecl() catch |err| switch (err) { + nav_gen.genNav() catch |err| switch (err) { error.CodegenFail => { - try zcu.failed_analysis.put(gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index }), decl_gen.error_msg.?); + try zcu.failed_codegen.put(gpa, nav_index, nav_gen.error_msg.?); }, else => |other| { // There might be an error that happened *after* self.error_msg // was already allocated, so be sure to free it. - if (decl_gen.error_msg) |error_msg| { + if (nav_gen.error_msg) |error_msg| { error_msg.deinit(gpa); } @@ -239,31 +237,30 @@ pub const Object = struct { air: Air, liveness: Liveness, ) !void { - const decl_index = pt.zcu.funcInfo(func_index).owner_decl; + const nav = pt.zcu.funcInfo(func_index).owner_nav; // TODO: Separate types for generating decls and functions? - try self.genDecl(pt, decl_index, air, liveness); + try self.genNav(pt, nav, air, liveness); } - pub fn updateDecl( + pub fn updateNav( self: *Object, pt: Zcu.PerThread, - decl_index: InternPool.DeclIndex, + nav: InternPool.Nav.Index, ) !void { - try self.genDecl(pt, decl_index, undefined, undefined); + try self.genNav(pt, nav, undefined, undefined); } - /// Fetch or allocate a result id for decl index. This function also marks the decl as alive. - /// Note: Function does not actually generate the decl, it just allocates an index. - pub fn resolveDecl(self: *Object, zcu: *Zcu, decl_index: InternPool.DeclIndex) !SpvModule.Decl.Index { - const decl = zcu.declPtr(decl_index); - assert(decl.has_tv); // TODO: Do we need to handle a situation where this is false? - - const entry = try self.decl_link.getOrPut(self.gpa, decl_index); + /// Fetch or allocate a result id for nav index. This function also marks the nav as alive. + /// Note: Function does not actually generate the nav, it just allocates an index. + pub fn resolveNav(self: *Object, zcu: *Zcu, nav_index: InternPool.Nav.Index) !SpvModule.Decl.Index { + const ip = &zcu.intern_pool; + const entry = try self.nav_link.getOrPut(self.gpa, nav_index); if (!entry.found_existing) { + const nav = ip.getNav(nav_index); // TODO: Extern fn? - const kind: SpvModule.Decl.Kind = if (decl.val.isFuncBody(zcu)) + const kind: SpvModule.Decl.Kind = if (ip.isFunctionType(nav.typeOf(ip))) .func - else switch (decl.@"addrspace") { + else switch (nav.status.resolved.@"addrspace") { .generic => .invocation_global, else => .global, }; @@ -276,8 +273,8 @@ pub const Object = struct { }; /// This structure is used to compile a declaration, and contains all relevant meta-information to deal with that. -const DeclGen = struct { - /// A general-purpose allocator that can be used for any allocations for this DeclGen. +const NavGen = struct { + /// A general-purpose allocator that can be used for any allocations for this NavGen. gpa: Allocator, /// The object that this decl is generated into. @@ -291,7 +288,7 @@ const DeclGen = struct { spv: *SpvModule, /// The decl we are currently generating code for. - decl_index: InternPool.DeclIndex, + owner_nav: InternPool.Nav.Index, /// The intermediate code of the declaration we are currently generating. Note: If /// the declaration is not a function, this value will be undefined! @@ -399,8 +396,8 @@ const DeclGen = struct { indirect, }; - /// Free resources owned by the DeclGen. - pub fn deinit(self: *DeclGen) void { + /// Free resources owned by the NavGen. + pub fn deinit(self: *NavGen) void { self.args.deinit(self.gpa); self.inst_results.deinit(self.gpa); self.control_flow.deinit(self.gpa); @@ -408,26 +405,26 @@ const DeclGen = struct { } /// Return the target which we are currently compiling for. - pub fn getTarget(self: *DeclGen) std.Target { + pub fn getTarget(self: *NavGen) std.Target { return self.pt.zcu.getTarget(); } - pub fn fail(self: *DeclGen, comptime format: []const u8, args: anytype) Error { + pub fn fail(self: *NavGen, comptime format: []const u8, args: anytype) Error { @setCold(true); const zcu = self.pt.zcu; - const src_loc = zcu.declPtr(self.decl_index).navSrcLoc(zcu); + const src_loc = zcu.navSrcLoc(self.owner_nav); assert(self.error_msg == null); self.error_msg = try Zcu.ErrorMsg.create(zcu.gpa, src_loc, format, args); return error.CodegenFail; } - pub fn todo(self: *DeclGen, comptime format: []const u8, args: anytype) Error { + pub fn todo(self: *NavGen, comptime format: []const u8, args: anytype) Error { return self.fail("TODO (SPIR-V): " ++ format, args); } /// This imports the "default" extended instruction set for the target /// For OpenCL, OpenCL.std.100. For Vulkan, GLSL.std.450. - fn importExtendedSet(self: *DeclGen) !IdResult { + fn importExtendedSet(self: *NavGen) !IdResult { const target = self.getTarget(); return switch (target.os.tag) { .opencl => try self.spv.importInstructionSet(.@"OpenCL.std"), @@ -437,18 +434,18 @@ const DeclGen = struct { } /// Fetch the result-id for a previously generated instruction or constant. - fn resolve(self: *DeclGen, inst: Air.Inst.Ref) !IdRef { + fn resolve(self: *NavGen, inst: Air.Inst.Ref) !IdRef { const pt = self.pt; const mod = pt.zcu; if (try self.air.value(inst, pt)) |val| { const ty = self.typeOf(inst); if (ty.zigTypeTag(mod) == .Fn) { - const fn_decl_index = switch (mod.intern_pool.indexToKey(val.ip_index)) { - .extern_func => |extern_func| extern_func.decl, - .func => |func| func.owner_decl, + const fn_nav = switch (mod.intern_pool.indexToKey(val.ip_index)) { + .@"extern" => |@"extern"| @"extern".owner_nav, + .func => |func| func.owner_nav, else => unreachable, }; - const spv_decl_index = try self.object.resolveDecl(mod, fn_decl_index); + const spv_decl_index = try self.object.resolveNav(mod, fn_nav); try self.func.decl_deps.put(self.spv.gpa, spv_decl_index, {}); return self.spv.declPtr(spv_decl_index).result_id; } @@ -459,7 +456,7 @@ const DeclGen = struct { return self.inst_results.get(index).?; // Assertion means instruction does not dominate usage. } - fn resolveAnonDecl(self: *DeclGen, val: InternPool.Index) !IdRef { + fn resolveUav(self: *NavGen, val: InternPool.Index) !IdRef { // TODO: This cannot be a function at this point, but it should probably be handled anyway. const mod = self.pt.zcu; @@ -467,7 +464,7 @@ const DeclGen = struct { const decl_ptr_ty_id = try self.ptrType(ty, .Generic); const spv_decl_index = blk: { - const entry = try self.object.anon_decl_link.getOrPut(self.object.gpa, .{ val, .Function }); + const entry = try self.object.uav_link.getOrPut(self.object.gpa, .{ val, .Function }); if (entry.found_existing) { try self.addFunctionDep(entry.value_ptr.*, .Function); @@ -540,7 +537,7 @@ const DeclGen = struct { return try self.castToGeneric(decl_ptr_ty_id, result_id); } - fn addFunctionDep(self: *DeclGen, decl_index: SpvModule.Decl.Index, storage_class: StorageClass) !void { + fn addFunctionDep(self: *NavGen, decl_index: SpvModule.Decl.Index, storage_class: StorageClass) !void { const target = self.getTarget(); if (target.os.tag == .vulkan) { // Shader entry point dependencies must be variables with Input or Output storage class @@ -555,7 +552,7 @@ const DeclGen = struct { } } - fn castToGeneric(self: *DeclGen, type_id: IdRef, ptr_id: IdRef) !IdRef { + fn castToGeneric(self: *NavGen, type_id: IdRef, ptr_id: IdRef) !IdRef { const target = self.getTarget(); if (target.os.tag == .vulkan) { @@ -575,7 +572,7 @@ const DeclGen = struct { /// block we are currently generating. /// Note that there is no such thing as nested blocks like in ZIR or AIR, so we don't need to /// keep track of the previous block. - fn beginSpvBlock(self: *DeclGen, label: IdResult) !void { + fn beginSpvBlock(self: *NavGen, label: IdResult) !void { try self.func.body.emit(self.spv.gpa, .OpLabel, .{ .id_result = label }); self.current_block_label = label; } @@ -590,7 +587,7 @@ const DeclGen = struct { /// TODO: The extension SPV_INTEL_arbitrary_precision_integers allows any integer size (at least up to 32 bits). /// TODO: This probably needs an ABI-version as well (especially in combination with SPV_INTEL_arbitrary_precision_integers). /// TODO: Should the result of this function be cached? - fn backingIntBits(self: *DeclGen, bits: u16) ?u16 { + fn backingIntBits(self: *NavGen, bits: u16) ?u16 { const target = self.getTarget(); // The backend will never be asked to compiler a 0-bit integer, so we won't have to handle those in this function. @@ -625,7 +622,7 @@ const DeclGen = struct { /// In theory that could also be used, but since the spec says that it only guarantees support up to 32-bit ints there /// is no way of knowing whether those are actually supported. /// TODO: Maybe this should be cached? - fn largestSupportedIntBits(self: *DeclGen) u16 { + fn largestSupportedIntBits(self: *NavGen) u16 { const target = self.getTarget(); return if (Target.spirv.featureSetHas(target.cpu.features, .Int64)) 64 @@ -636,12 +633,12 @@ const DeclGen = struct { /// Checks whether the type is "composite int", an integer consisting of multiple native integers. These are represented by /// arrays of largestSupportedIntBits(). /// Asserts `ty` is an integer. - fn isCompositeInt(self: *DeclGen, ty: Type) bool { + fn isCompositeInt(self: *NavGen, ty: Type) bool { return self.backingIntBits(ty) == null; } /// Checks whether the type can be directly translated to SPIR-V vectors - fn isSpvVector(self: *DeclGen, ty: Type) bool { + fn isSpvVector(self: *NavGen, ty: Type) bool { const mod = self.pt.zcu; const target = self.getTarget(); if (ty.zigTypeTag(mod) != .Vector) return false; @@ -667,7 +664,7 @@ const DeclGen = struct { return is_scalar and (spirv_len or opencl_len); } - fn arithmeticTypeInfo(self: *DeclGen, ty: Type) ArithmeticTypeInfo { + fn arithmeticTypeInfo(self: *NavGen, ty: Type) ArithmeticTypeInfo { const mod = self.pt.zcu; const target = self.getTarget(); var scalar_ty = ty.scalarType(mod); @@ -715,7 +712,7 @@ const DeclGen = struct { } /// Emits a bool constant in a particular representation. - fn constBool(self: *DeclGen, value: bool, repr: Repr) !IdRef { + fn constBool(self: *NavGen, value: bool, repr: Repr) !IdRef { // TODO: Cache? const section = &self.spv.sections.types_globals_constants; @@ -742,7 +739,7 @@ const DeclGen = struct { /// Emits an integer constant. /// This function, unlike SpvModule.constInt, takes care to bitcast /// the value to an unsigned int first for Kernels. - fn constInt(self: *DeclGen, ty: Type, value: anytype, repr: Repr) !IdRef { + fn constInt(self: *NavGen, ty: Type, value: anytype, repr: Repr) !IdRef { // TODO: Cache? const mod = self.pt.zcu; const scalar_ty = ty.scalarType(mod); @@ -809,7 +806,7 @@ const DeclGen = struct { /// ty must be a struct type. /// Constituents should be in `indirect` representation (as the elements of a struct should be). /// Result is in `direct` representation. - fn constructStruct(self: *DeclGen, ty: Type, types: []const Type, constituents: []const IdRef) !IdRef { + fn constructStruct(self: *NavGen, ty: Type, types: []const Type, constituents: []const IdRef) !IdRef { assert(types.len == constituents.len); const result_id = self.spv.allocId(); @@ -823,7 +820,7 @@ const DeclGen = struct { /// Construct a vector at runtime. /// ty must be an vector type. - fn constructVector(self: *DeclGen, ty: Type, constituents: []const IdRef) !IdRef { + fn constructVector(self: *NavGen, ty: Type, constituents: []const IdRef) !IdRef { const mod = self.pt.zcu; assert(ty.vectorLen(mod) == constituents.len); @@ -847,7 +844,7 @@ const DeclGen = struct { /// Construct a vector at runtime with all lanes set to the same value. /// ty must be an vector type. - fn constructVectorSplat(self: *DeclGen, ty: Type, constituent: IdRef) !IdRef { + fn constructVectorSplat(self: *NavGen, ty: Type, constituent: IdRef) !IdRef { const mod = self.pt.zcu; const n = ty.vectorLen(mod); @@ -862,7 +859,7 @@ const DeclGen = struct { /// ty must be an array type. /// Constituents should be in `indirect` representation (as the elements of an array should be). /// Result is in `direct` representation. - fn constructArray(self: *DeclGen, ty: Type, constituents: []const IdRef) !IdRef { + fn constructArray(self: *NavGen, ty: Type, constituents: []const IdRef) !IdRef { const result_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpCompositeConstruct, .{ .id_result_type = try self.resolveType(ty, .direct), @@ -878,7 +875,7 @@ const DeclGen = struct { /// is done by emitting a sequence of instructions that initialize the value. // /// This function should only be called during function code generation. - fn constant(self: *DeclGen, ty: Type, val: Value, repr: Repr) !IdRef { + fn constant(self: *NavGen, ty: Type, val: Value, repr: Repr) !IdRef { // Note: Using intern_map can only be used with constants that DO NOT generate any runtime code!! // Ideally that should be all constants in the future, or it should be cleaned up somehow. For // now, only use the intern_map on case-by-case basis by breaking to :cache. @@ -922,7 +919,7 @@ const DeclGen = struct { .undef => unreachable, // handled above .variable, - .extern_func, + .@"extern", .func, .enum_literal, .empty_enum_value, @@ -1142,7 +1139,7 @@ const DeclGen = struct { return cacheable_id; } - fn constantPtr(self: *DeclGen, ptr_val: Value) Error!IdRef { + fn constantPtr(self: *NavGen, ptr_val: Value) Error!IdRef { // TODO: Caching?? const pt = self.pt; @@ -1160,7 +1157,7 @@ const DeclGen = struct { return self.derivePtr(derivation); } - fn derivePtr(self: *DeclGen, derivation: Value.PointerDeriveStep) Error!IdRef { + fn derivePtr(self: *NavGen, derivation: Value.PointerDeriveStep) Error!IdRef { const pt = self.pt; const zcu = pt.zcu; switch (derivation) { @@ -1178,13 +1175,13 @@ const DeclGen = struct { }); return result_ptr_id; }, - .decl_ptr => |decl| { - const result_ptr_ty = try zcu.declPtr(decl).declPtrType(pt); - return self.constantDeclRef(result_ptr_ty, decl); + .nav_ptr => |nav| { + const result_ptr_ty = try pt.navPtrType(nav); + return self.constantNavRef(result_ptr_ty, nav); }, - .anon_decl_ptr => |ad| { - const result_ptr_ty = Type.fromInterned(ad.orig_ty); - return self.constantAnonDeclRef(result_ptr_ty, ad); + .uav_ptr => |uav| { + const result_ptr_ty = Type.fromInterned(uav.orig_ty); + return self.constantUavRef(result_ptr_ty, uav); }, .eu_payload_ptr => @panic("TODO"), .opt_payload_ptr => @panic("TODO"), @@ -1227,10 +1224,10 @@ const DeclGen = struct { } } - fn constantAnonDeclRef( - self: *DeclGen, + fn constantUavRef( + self: *NavGen, ty: Type, - anon_decl: InternPool.Key.Ptr.BaseAddr.AnonDecl, + uav: InternPool.Key.Ptr.BaseAddr.Uav, ) !IdRef { // TODO: Merge this function with constantDeclRef. @@ -1238,31 +1235,24 @@ const DeclGen = struct { const mod = pt.zcu; const ip = &mod.intern_pool; const ty_id = try self.resolveType(ty, .direct); - const decl_val = anon_decl.val; - const decl_ty = Type.fromInterned(ip.typeOf(decl_val)); + const uav_ty = Type.fromInterned(ip.typeOf(uav.val)); - if (Value.fromInterned(decl_val).getFunction(mod)) |func| { - _ = func; - unreachable; // TODO - } else if (Value.fromInterned(decl_val).getExternFunc(mod)) |func| { - _ = func; - unreachable; + switch (ip.indexToKey(uav.val)) { + .func => unreachable, // TODO + .@"extern" => assert(!ip.isFunctionType(uav_ty.toIntern())), + else => {}, } // const is_fn_body = decl_ty.zigTypeTag(mod) == .Fn; - if (!decl_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) { - // Pointer to nothing - return undefoined + if (!uav_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) { + // Pointer to nothing - return undefined return self.spv.constUndef(ty_id); } - if (decl_ty.zigTypeTag(mod) == .Fn) { - unreachable; // TODO - } - - // Anon decl refs are always generic. + // Uav refs are always generic. assert(ty.ptrAddressSpace(mod) == .generic); - const decl_ptr_ty_id = try self.ptrType(decl_ty, .Generic); - const ptr_id = try self.resolveAnonDecl(decl_val); + const decl_ptr_ty_id = try self.ptrType(uav_ty, .Generic); + const ptr_id = try self.resolveUav(uav.val); if (decl_ptr_ty_id != ty_id) { // Differing pointer types, insert a cast. @@ -1278,28 +1268,31 @@ const DeclGen = struct { } } - fn constantDeclRef(self: *DeclGen, ty: Type, decl_index: InternPool.DeclIndex) !IdRef { + fn constantNavRef(self: *NavGen, ty: Type, nav_index: InternPool.Nav.Index) !IdRef { const pt = self.pt; const mod = pt.zcu; + const ip = &mod.intern_pool; const ty_id = try self.resolveType(ty, .direct); - const decl = mod.declPtr(decl_index); + const nav = ip.getNav(nav_index); + const nav_val = mod.navValue(nav_index); + const nav_ty = nav_val.typeOf(mod); - switch (mod.intern_pool.indexToKey(decl.val.ip_index)) { + switch (ip.indexToKey(nav_val.toIntern())) { .func => { // TODO: Properly lower function pointers. For now we are going to hack around it and // just generate an empty pointer. Function pointers are represented by a pointer to usize. return try self.spv.constUndef(ty_id); }, - .extern_func => unreachable, // TODO + .@"extern" => assert(!ip.isFunctionType(nav_ty.toIntern())), // TODO else => {}, } - if (!decl.typeOf(mod).isFnOrHasRuntimeBitsIgnoreComptime(pt)) { + if (!nav_ty.isFnOrHasRuntimeBitsIgnoreComptime(pt)) { // Pointer to nothing - return undefined. return self.spv.constUndef(ty_id); } - const spv_decl_index = try self.object.resolveDecl(mod, decl_index); + const spv_decl_index = try self.object.resolveNav(mod, nav_index); const spv_decl = self.spv.declPtr(spv_decl_index); const decl_id = switch (spv_decl.kind) { @@ -1307,10 +1300,10 @@ const DeclGen = struct { .global, .invocation_global => spv_decl.result_id, }; - const final_storage_class = self.spvStorageClass(decl.@"addrspace"); + const final_storage_class = self.spvStorageClass(nav.status.resolved.@"addrspace"); try self.addFunctionDep(spv_decl_index, final_storage_class); - const decl_ptr_ty_id = try self.ptrType(decl.typeOf(mod), final_storage_class); + const decl_ptr_ty_id = try self.ptrType(nav_ty, final_storage_class); const ptr_id = switch (final_storage_class) { .Generic => try self.castToGeneric(decl_ptr_ty_id, decl_id), @@ -1332,7 +1325,7 @@ const DeclGen = struct { } // Turn a Zig type's name into a cache reference. - fn resolveTypeName(self: *DeclGen, ty: Type) ![]const u8 { + fn resolveTypeName(self: *NavGen, ty: Type) ![]const u8 { var name = std.ArrayList(u8).init(self.gpa); defer name.deinit(); try ty.print(name.writer(), self.pt); @@ -1343,7 +1336,7 @@ const DeclGen = struct { /// The integer type that is returned by this function is the type that is used to perform /// actual operations (as well as store) a Zig type of a particular number of bits. To create /// a type with an exact size, use SpvModule.intType. - fn intType(self: *DeclGen, signedness: std.builtin.Signedness, bits: u16) !IdRef { + fn intType(self: *NavGen, signedness: std.builtin.Signedness, bits: u16) !IdRef { const backing_bits = self.backingIntBits(bits) orelse { // TODO: Integers too big for any native type are represented as "composite integers": // An array of largestSupportedIntBits. @@ -1358,7 +1351,7 @@ const DeclGen = struct { return self.spv.intType(.unsigned, backing_bits); } - fn arrayType(self: *DeclGen, len: u32, child_ty: IdRef) !IdRef { + fn arrayType(self: *NavGen, len: u32, child_ty: IdRef) !IdRef { // TODO: Cache?? const len_id = try self.constInt(Type.u32, len, .direct); const result_id = self.spv.allocId(); @@ -1371,11 +1364,11 @@ const DeclGen = struct { return result_id; } - fn ptrType(self: *DeclGen, child_ty: Type, storage_class: StorageClass) !IdRef { + fn ptrType(self: *NavGen, child_ty: Type, storage_class: StorageClass) !IdRef { return try self.ptrType2(child_ty, storage_class, .indirect); } - fn ptrType2(self: *DeclGen, child_ty: Type, storage_class: StorageClass, child_repr: Repr) !IdRef { + fn ptrType2(self: *NavGen, child_ty: Type, storage_class: StorageClass, child_repr: Repr) !IdRef { const key = .{ child_ty.toIntern(), storage_class, child_repr }; const entry = try self.ptr_types.getOrPut(self.gpa, key); if (entry.found_existing) { @@ -1407,7 +1400,7 @@ const DeclGen = struct { return result_id; } - fn functionType(self: *DeclGen, return_ty: Type, param_types: []const Type) !IdRef { + fn functionType(self: *NavGen, return_ty: Type, param_types: []const Type) !IdRef { // TODO: Cache?? const param_ids = try self.gpa.alloc(IdRef, param_types.len); @@ -1427,7 +1420,7 @@ const DeclGen = struct { return ty_id; } - fn zigScalarOrVectorTypeLike(self: *DeclGen, new_ty: Type, base_ty: Type) !Type { + fn zigScalarOrVectorTypeLike(self: *NavGen, new_ty: Type, base_ty: Type) !Type { const pt = self.pt; const new_scalar_ty = new_ty.scalarType(pt.zcu); if (!base_ty.isVector(pt.zcu)) { @@ -1458,7 +1451,7 @@ const DeclGen = struct { /// padding: [padding_size]u8, /// } /// If any of the fields' size is 0, it will be omitted. - fn resolveUnionType(self: *DeclGen, ty: Type) !IdRef { + fn resolveUnionType(self: *NavGen, ty: Type) !IdRef { const mod = self.pt.zcu; const ip = &mod.intern_pool; const union_obj = mod.typeToUnion(ty).?; @@ -1509,7 +1502,7 @@ const DeclGen = struct { return result_id; } - fn resolveFnReturnType(self: *DeclGen, ret_ty: Type) !IdRef { + fn resolveFnReturnType(self: *NavGen, ret_ty: Type) !IdRef { const pt = self.pt; if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) { // If the return type is an error set or an error union, then we make this @@ -1526,7 +1519,7 @@ const DeclGen = struct { } /// Turn a Zig type into a SPIR-V Type, and return a reference to it. - fn resolveType(self: *DeclGen, ty: Type, repr: Repr) Error!IdRef { + fn resolveType(self: *NavGen, ty: Type, repr: Repr) Error!IdRef { if (self.intern_map.get(.{ ty.toIntern(), repr })) |id| { return id; } @@ -1536,7 +1529,7 @@ const DeclGen = struct { return id; } - fn resolveTypeInner(self: *DeclGen, ty: Type, repr: Repr) Error!IdRef { + fn resolveTypeInner(self: *NavGen, ty: Type, repr: Repr) Error!IdRef { const pt = self.pt; const mod = pt.zcu; const ip = &mod.intern_pool; @@ -1839,7 +1832,7 @@ const DeclGen = struct { } } - fn spvStorageClass(self: *DeclGen, as: std.builtin.AddressSpace) StorageClass { + fn spvStorageClass(self: *NavGen, as: std.builtin.AddressSpace) StorageClass { const target = self.getTarget(); return switch (as) { .generic => switch (target.os.tag) { @@ -1882,7 +1875,7 @@ const DeclGen = struct { } }; - fn errorUnionLayout(self: *DeclGen, payload_ty: Type) ErrorUnionLayout { + fn errorUnionLayout(self: *NavGen, payload_ty: Type) ErrorUnionLayout { const pt = self.pt; const error_align = Type.anyerror.abiAlignment(pt); @@ -1913,7 +1906,7 @@ const DeclGen = struct { total_fields: u32, }; - fn unionLayout(self: *DeclGen, ty: Type) UnionLayout { + fn unionLayout(self: *NavGen, ty: Type) UnionLayout { const pt = self.pt; const mod = pt.zcu; const ip = &mod.intern_pool; @@ -2004,25 +1997,25 @@ const DeclGen = struct { return .{ .ty = ty, .value = .{ .singleton = singleton } }; } - fn materialize(self: Temporary, dg: *DeclGen) !IdResult { - const mod = dg.pt.zcu; + fn materialize(self: Temporary, ng: *NavGen) !IdResult { + const mod = ng.pt.zcu; switch (self.value) { .singleton => |id| return id, .exploded_vector => |range| { assert(self.ty.isVector(mod)); assert(self.ty.vectorLen(mod) == range.len); - const consituents = try dg.gpa.alloc(IdRef, range.len); - defer dg.gpa.free(consituents); + const consituents = try ng.gpa.alloc(IdRef, range.len); + defer ng.gpa.free(consituents); for (consituents, 0..range.len) |*id, i| { id.* = range.at(i); } - return dg.constructVector(self.ty, consituents); + return ng.constructVector(self.ty, consituents); }, } } - fn vectorization(self: Temporary, dg: *DeclGen) Vectorization { - return Vectorization.fromType(self.ty, dg); + fn vectorization(self: Temporary, ng: *NavGen) Vectorization { + return Vectorization.fromType(self.ty, ng); } fn pun(self: Temporary, new_ty: Type) Temporary { @@ -2034,8 +2027,8 @@ const DeclGen = struct { /// 'Explode' a temporary into separate elements. This turns a vector /// into a bag of elements. - fn explode(self: Temporary, dg: *DeclGen) !IdRange { - const mod = dg.pt.zcu; + fn explode(self: Temporary, ng: *NavGen) !IdRange { + const mod = ng.pt.zcu; // If the value is a scalar, then this is a no-op. if (!self.ty.isVector(mod)) { @@ -2045,9 +2038,9 @@ const DeclGen = struct { }; } - const ty_id = try dg.resolveType(self.ty.scalarType(mod), .direct); + const ty_id = try ng.resolveType(self.ty.scalarType(mod), .direct); const n = self.ty.vectorLen(mod); - const results = dg.spv.allocIds(n); + const results = ng.spv.allocIds(n); const id = switch (self.value) { .singleton => |id| id, @@ -2056,7 +2049,7 @@ const DeclGen = struct { for (0..n) |i| { const indexes = [_]u32{@intCast(i)}; - try dg.func.body.emit(dg.spv.gpa, .OpCompositeExtract, .{ + try ng.func.body.emit(ng.spv.gpa, .OpCompositeExtract, .{ .id_result_type = ty_id, .id_result = results.at(i), .composite = id, @@ -2069,7 +2062,7 @@ const DeclGen = struct { }; /// Initialize a `Temporary` from an AIR value. - fn temporary(self: *DeclGen, inst: Air.Inst.Ref) !Temporary { + fn temporary(self: *NavGen, inst: Air.Inst.Ref) !Temporary { return .{ .ty = self.typeOf(inst), .value = .{ .singleton = try self.resolve(inst) }, @@ -2093,11 +2086,11 @@ const DeclGen = struct { /// Derive a vectorization from a particular type. This usually /// only checks the size, but the source-of-truth is implemented /// by `isSpvVector()`. - fn fromType(ty: Type, dg: *DeclGen) Vectorization { - const mod = dg.pt.zcu; + fn fromType(ty: Type, ng: *NavGen) Vectorization { + const mod = ng.pt.zcu; if (!ty.isVector(mod)) { return .scalar; - } else if (dg.isSpvVector(ty)) { + } else if (ng.isSpvVector(ty)) { return .{ .spv_vectorized = ty.vectorLen(mod) }; } else { return .{ .unrolled = ty.vectorLen(mod) }; @@ -2169,8 +2162,8 @@ const DeclGen = struct { /// Turns `ty` into the result-type of an individual vector operation. /// `ty` may be a scalar or vector, it doesn't matter. - fn operationType(self: Vectorization, dg: *DeclGen, ty: Type) !Type { - const pt = dg.pt; + fn operationType(self: Vectorization, ng: *NavGen, ty: Type) !Type { + const pt = ng.pt; const scalar_ty = ty.scalarType(pt.zcu); return switch (self) { .scalar, .unrolled => scalar_ty, @@ -2183,8 +2176,8 @@ const DeclGen = struct { /// Turns `ty` into the result-type of the entire operation. /// `ty` may be a scalar or vector, it doesn't matter. - fn resultType(self: Vectorization, dg: *DeclGen, ty: Type) !Type { - const pt = dg.pt; + fn resultType(self: Vectorization, ng: *NavGen, ty: Type) !Type { + const pt = ng.pt; const scalar_ty = ty.scalarType(pt.zcu); return switch (self) { .scalar => scalar_ty, @@ -2198,10 +2191,10 @@ const DeclGen = struct { /// Before a temporary can be used, some setup may need to be one. This function implements /// this setup, and returns a new type that holds the relevant information on how to access /// elements of the input. - fn prepare(self: Vectorization, dg: *DeclGen, tmp: Temporary) !PreparedOperand { - const pt = dg.pt; + fn prepare(self: Vectorization, ng: *NavGen, tmp: Temporary) !PreparedOperand { + const pt = ng.pt; const is_vector = tmp.ty.isVector(pt.zcu); - const is_spv_vector = dg.isSpvVector(tmp.ty); + const is_spv_vector = ng.isSpvVector(tmp.ty); const value: PreparedOperand.Value = switch (tmp.value) { .singleton => |id| switch (self) { .scalar => blk: { @@ -2220,7 +2213,7 @@ const DeclGen = struct { .child = tmp.ty.toIntern(), }); - const vector = try dg.constructVectorSplat(vector_ty, id); + const vector = try ng.constructVectorSplat(vector_ty, id); return .{ .ty = vector_ty, .value = .{ .spv_vectorwise = vector }, @@ -2228,7 +2221,7 @@ const DeclGen = struct { }, .unrolled => blk: { if (is_vector) { - break :blk .{ .vector_exploded = try tmp.explode(dg) }; + break :blk .{ .vector_exploded = try tmp.explode(ng) }; } else { break :blk .{ .scalar_broadcast = id }; } @@ -2243,7 +2236,7 @@ const DeclGen = struct { // a type that cannot do that. assert(is_spv_vector); assert(range.len == n); - const vec = try tmp.materialize(dg); + const vec = try tmp.materialize(ng); break :blk .{ .spv_vectorwise = vec }; }, .unrolled => |n| blk: { @@ -2324,7 +2317,7 @@ const DeclGen = struct { /// - A `Vectorization` instance /// - A Type, in which case the vectorization is computed via `Vectorization.fromType`. /// - A Temporary, in which case the vectorization is computed via `Temporary.vectorization`. - fn vectorization(self: *DeclGen, args: anytype) Vectorization { + fn vectorization(self: *NavGen, args: anytype) Vectorization { var v: Vectorization = undefined; assert(args.len >= 1); inline for (args, 0..) |arg, i| { @@ -2345,7 +2338,7 @@ const DeclGen = struct { /// This function builds an OpSConvert of OpUConvert depending on the /// signedness of the types. - fn buildIntConvert(self: *DeclGen, dst_ty: Type, src: Temporary) !Temporary { + fn buildIntConvert(self: *NavGen, dst_ty: Type, src: Temporary) !Temporary { const mod = self.pt.zcu; const dst_ty_id = try self.resolveType(dst_ty.scalarType(mod), .direct); @@ -2384,7 +2377,7 @@ const DeclGen = struct { return v.finalize(result_ty, results); } - fn buildFma(self: *DeclGen, a: Temporary, b: Temporary, c: Temporary) !Temporary { + fn buildFma(self: *NavGen, a: Temporary, b: Temporary, c: Temporary) !Temporary { const target = self.getTarget(); const v = self.vectorization(.{ a, b, c }); @@ -2424,7 +2417,7 @@ const DeclGen = struct { return v.finalize(result_ty, results); } - fn buildSelect(self: *DeclGen, condition: Temporary, lhs: Temporary, rhs: Temporary) !Temporary { + fn buildSelect(self: *NavGen, condition: Temporary, lhs: Temporary, rhs: Temporary) !Temporary { const mod = self.pt.zcu; const v = self.vectorization(.{ condition, lhs, rhs }); @@ -2475,7 +2468,7 @@ const DeclGen = struct { f_oge, }; - fn buildCmp(self: *DeclGen, pred: CmpPredicate, lhs: Temporary, rhs: Temporary) !Temporary { + fn buildCmp(self: *NavGen, pred: CmpPredicate, lhs: Temporary, rhs: Temporary) !Temporary { const v = self.vectorization(.{ lhs, rhs }); const ops = v.operations(); const results = self.spv.allocIds(ops); @@ -2543,7 +2536,7 @@ const DeclGen = struct { log10, }; - fn buildUnary(self: *DeclGen, op: UnaryOp, operand: Temporary) !Temporary { + fn buildUnary(self: *NavGen, op: UnaryOp, operand: Temporary) !Temporary { const target = self.getTarget(); const v = blk: { const v = self.vectorization(.{operand}); @@ -2673,7 +2666,7 @@ const DeclGen = struct { l_or, }; - fn buildBinary(self: *DeclGen, op: BinaryOp, lhs: Temporary, rhs: Temporary) !Temporary { + fn buildBinary(self: *NavGen, op: BinaryOp, lhs: Temporary, rhs: Temporary) !Temporary { const target = self.getTarget(); const v = self.vectorization(.{ lhs, rhs }); @@ -2762,7 +2755,7 @@ const DeclGen = struct { /// This function builds an extended multiplication, either OpSMulExtended or OpUMulExtended on Vulkan, /// or OpIMul and s_mul_hi or u_mul_hi on OpenCL. fn buildWideMul( - self: *DeclGen, + self: *NavGen, op: enum { s_mul_extended, u_mul_extended, @@ -2893,7 +2886,7 @@ const DeclGen = struct { /// OpFunctionEnd /// TODO is to also write out the error as a function call parameter, and to somehow fetch /// the name of an error in the text executor. - fn generateTestEntryPoint(self: *DeclGen, name: []const u8, spv_test_decl_index: SpvModule.Decl.Index) !void { + fn generateTestEntryPoint(self: *NavGen, name: []const u8, spv_test_decl_index: SpvModule.Decl.Index) !void { const anyerror_ty_id = try self.resolveType(Type.anyerror, .direct); const ptr_anyerror_ty = try self.pt.ptrType(.{ .child = Type.anyerror.toIntern(), @@ -2946,21 +2939,22 @@ const DeclGen = struct { try self.spv.declareEntryPoint(spv_decl_index, test_name, .Kernel); } - fn genDecl(self: *DeclGen) !void { + fn genNav(self: *NavGen) !void { const pt = self.pt; const mod = pt.zcu; const ip = &mod.intern_pool; - const decl = mod.declPtr(self.decl_index); - const spv_decl_index = try self.object.resolveDecl(mod, self.decl_index); + const spv_decl_index = try self.object.resolveNav(mod, self.owner_nav); const result_id = self.spv.declPtr(spv_decl_index).result_id; + const nav = ip.getNav(self.owner_nav); + const val = mod.navValue(self.owner_nav); + const ty = val.typeOf(mod); switch (self.spv.declPtr(spv_decl_index).kind) { .func => { - assert(decl.typeOf(mod).zigTypeTag(mod) == .Fn); - const fn_info = mod.typeToFunc(decl.typeOf(mod)).?; + const fn_info = mod.typeToFunc(ty).?; const return_ty_id = try self.resolveFnReturnType(Type.fromInterned(fn_info.return_type)); - const prototype_ty_id = try self.resolveType(decl.typeOf(mod), .direct); + const prototype_ty_id = try self.resolveType(ty, .direct); try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{ .id_result_type = return_ty_id, .id_result = result_id, @@ -3012,27 +3006,26 @@ const DeclGen = struct { // Append the actual code into the functions section. try self.spv.addFunction(spv_decl_index, self.func); - try self.spv.debugName(result_id, decl.fqn.toSlice(ip)); + try self.spv.debugName(result_id, nav.fqn.toSlice(ip)); // Temporarily generate a test kernel declaration if this is a test function. - if (self.pt.zcu.test_functions.contains(self.decl_index)) { - try self.generateTestEntryPoint(decl.fqn.toSlice(ip), spv_decl_index); + if (self.pt.zcu.test_functions.contains(self.owner_nav)) { + try self.generateTestEntryPoint(nav.fqn.toSlice(ip), spv_decl_index); } }, .global => { - const maybe_init_val: ?Value = blk: { - if (decl.val.getVariable(mod)) |payload| { - if (payload.is_extern) break :blk null; - break :blk Value.fromInterned(payload.init); - } - break :blk decl.val; + const maybe_init_val: ?Value = switch (ip.indexToKey(val.toIntern())) { + .func => unreachable, + .variable => |variable| Value.fromInterned(variable.init), + .@"extern" => null, + else => val, }; assert(maybe_init_val == null); // TODO - const final_storage_class = self.spvStorageClass(decl.@"addrspace"); + const final_storage_class = self.spvStorageClass(nav.status.resolved.@"addrspace"); assert(final_storage_class != .Generic); // These should be instance globals - const ptr_ty_id = try self.ptrType(decl.typeOf(mod), final_storage_class); + const ptr_ty_id = try self.ptrType(ty, final_storage_class); try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpVariable, .{ .id_result_type = ptr_ty_id, @@ -3040,21 +3033,20 @@ const DeclGen = struct { .storage_class = final_storage_class, }); - try self.spv.debugName(result_id, decl.fqn.toSlice(ip)); + try self.spv.debugName(result_id, nav.fqn.toSlice(ip)); try self.spv.declareDeclDeps(spv_decl_index, &.{}); }, .invocation_global => { - const maybe_init_val: ?Value = blk: { - if (decl.val.getVariable(mod)) |payload| { - if (payload.is_extern) break :blk null; - break :blk Value.fromInterned(payload.init); - } - break :blk decl.val; + const maybe_init_val: ?Value = switch (ip.indexToKey(val.toIntern())) { + .func => unreachable, + .variable => |variable| Value.fromInterned(variable.init), + .@"extern" => null, + else => val, }; try self.spv.declareDeclDeps(spv_decl_index, &.{}); - const ptr_ty_id = try self.ptrType(decl.typeOf(mod), .Function); + const ptr_ty_id = try self.ptrType(ty, .Function); if (maybe_init_val) |init_val| { // TODO: Combine with resolveAnonDecl? @@ -3074,7 +3066,7 @@ const DeclGen = struct { }); self.current_block_label = root_block_id; - const val_id = try self.constant(decl.typeOf(mod), init_val, .indirect); + const val_id = try self.constant(ty, init_val, .indirect); try self.func.body.emit(self.spv.gpa, .OpStore, .{ .pointer = result_id, .object = val_id, @@ -3084,7 +3076,7 @@ const DeclGen = struct { try self.func.body.emit(self.spv.gpa, .OpFunctionEnd, {}); try self.spv.addFunction(spv_decl_index, self.func); - try self.spv.debugNameFmt(initializer_id, "initializer of {}", .{decl.fqn.fmt(ip)}); + try self.spv.debugNameFmt(initializer_id, "initializer of {}", .{nav.fqn.fmt(ip)}); try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpExtInst, .{ .id_result_type = ptr_ty_id, @@ -3106,11 +3098,11 @@ const DeclGen = struct { } } - fn intFromBool(self: *DeclGen, value: Temporary) !Temporary { + fn intFromBool(self: *NavGen, value: Temporary) !Temporary { return try self.intFromBool2(value, Type.u1); } - fn intFromBool2(self: *DeclGen, value: Temporary, result_ty: Type) !Temporary { + fn intFromBool2(self: *NavGen, value: Temporary, result_ty: Type) !Temporary { const zero_id = try self.constInt(result_ty, 0, .direct); const one_id = try self.constInt(result_ty, 1, .direct); @@ -3123,7 +3115,7 @@ const DeclGen = struct { /// Convert representation from indirect (in memory) to direct (in 'register') /// This converts the argument type from resolveType(ty, .indirect) to resolveType(ty, .direct). - fn convertToDirect(self: *DeclGen, ty: Type, operand_id: IdRef) !IdRef { + fn convertToDirect(self: *NavGen, ty: Type, operand_id: IdRef) !IdRef { const mod = self.pt.zcu; switch (ty.scalarType(mod).zigTypeTag(mod)) { .Bool => { @@ -3149,7 +3141,7 @@ const DeclGen = struct { /// Convert representation from direct (in 'register) to direct (in memory) /// This converts the argument type from resolveType(ty, .direct) to resolveType(ty, .indirect). - fn convertToIndirect(self: *DeclGen, ty: Type, operand_id: IdRef) !IdRef { + fn convertToIndirect(self: *NavGen, ty: Type, operand_id: IdRef) !IdRef { const mod = self.pt.zcu; switch (ty.scalarType(mod).zigTypeTag(mod)) { .Bool => { @@ -3160,7 +3152,7 @@ const DeclGen = struct { } } - fn extractField(self: *DeclGen, result_ty: Type, object: IdRef, field: u32) !IdRef { + fn extractField(self: *NavGen, result_ty: Type, object: IdRef, field: u32) !IdRef { const result_ty_id = try self.resolveType(result_ty, .indirect); const result_id = self.spv.allocId(); const indexes = [_]u32{field}; @@ -3174,7 +3166,7 @@ const DeclGen = struct { return try self.convertToDirect(result_ty, result_id); } - fn extractVectorComponent(self: *DeclGen, result_ty: Type, vector_id: IdRef, field: u32) !IdRef { + fn extractVectorComponent(self: *NavGen, result_ty: Type, vector_id: IdRef, field: u32) !IdRef { // Whether this is an OpTypeVector or OpTypeArray, we need to emit the same instruction regardless. const result_ty_id = try self.resolveType(result_ty, .direct); const result_id = self.spv.allocId(); @@ -3193,7 +3185,7 @@ const DeclGen = struct { is_volatile: bool = false, }; - fn load(self: *DeclGen, value_ty: Type, ptr_id: IdRef, options: MemoryOptions) !IdRef { + fn load(self: *NavGen, value_ty: Type, ptr_id: IdRef, options: MemoryOptions) !IdRef { const indirect_value_ty_id = try self.resolveType(value_ty, .indirect); const result_id = self.spv.allocId(); const access = spec.MemoryAccess.Extended{ @@ -3208,7 +3200,7 @@ const DeclGen = struct { return try self.convertToDirect(value_ty, result_id); } - fn store(self: *DeclGen, value_ty: Type, ptr_id: IdRef, value_id: IdRef, options: MemoryOptions) !void { + fn store(self: *NavGen, value_ty: Type, ptr_id: IdRef, value_id: IdRef, options: MemoryOptions) !void { const indirect_value_id = try self.convertToIndirect(value_ty, value_id); const access = spec.MemoryAccess.Extended{ .Volatile = options.is_volatile, @@ -3220,13 +3212,13 @@ const DeclGen = struct { }); } - fn genBody(self: *DeclGen, body: []const Air.Inst.Index) Error!void { + fn genBody(self: *NavGen, body: []const Air.Inst.Index) Error!void { for (body) |inst| { try self.genInst(inst); } } - fn genInst(self: *DeclGen, inst: Air.Inst.Index) !void { + fn genInst(self: *NavGen, inst: Air.Inst.Index) !void { const mod = self.pt.zcu; const ip = &mod.intern_pool; if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) @@ -3397,7 +3389,7 @@ const DeclGen = struct { try self.inst_results.putNoClobber(self.gpa, inst, result_id); } - fn airBinOpSimple(self: *DeclGen, inst: Air.Inst.Index, op: BinaryOp) !?IdRef { + fn airBinOpSimple(self: *NavGen, inst: Air.Inst.Index, op: BinaryOp) !?IdRef { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.temporary(bin_op.lhs); const rhs = try self.temporary(bin_op.rhs); @@ -3406,7 +3398,7 @@ const DeclGen = struct { return try result.materialize(self); } - fn airShift(self: *DeclGen, inst: Air.Inst.Index, unsigned: BinaryOp, signed: BinaryOp) !?IdRef { + fn airShift(self: *NavGen, inst: Air.Inst.Index, unsigned: BinaryOp, signed: BinaryOp) !?IdRef { const mod = self.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; @@ -3441,7 +3433,7 @@ const DeclGen = struct { const MinMax = enum { min, max }; - fn airMinMax(self: *DeclGen, inst: Air.Inst.Index, op: MinMax) !?IdRef { + fn airMinMax(self: *NavGen, inst: Air.Inst.Index, op: MinMax) !?IdRef { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.temporary(bin_op.lhs); @@ -3451,7 +3443,7 @@ const DeclGen = struct { return try result.materialize(self); } - fn minMax(self: *DeclGen, lhs: Temporary, rhs: Temporary, op: MinMax) !Temporary { + fn minMax(self: *NavGen, lhs: Temporary, rhs: Temporary, op: MinMax) !Temporary { const info = self.arithmeticTypeInfo(lhs.ty); const binop: BinaryOp = switch (info.class) { @@ -3484,7 +3476,7 @@ const DeclGen = struct { /// - Signed integers are also sign extended if they are negative. /// All other values are returned unmodified (this makes strange integer /// wrapping easier to use in generic operations). - fn normalize(self: *DeclGen, value: Temporary, info: ArithmeticTypeInfo) !Temporary { + fn normalize(self: *NavGen, value: Temporary, info: ArithmeticTypeInfo) !Temporary { const mod = self.pt.zcu; const ty = value.ty; switch (info.class) { @@ -3507,7 +3499,7 @@ const DeclGen = struct { } } - fn airDivFloor(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airDivFloor(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.temporary(bin_op.lhs); @@ -3564,7 +3556,7 @@ const DeclGen = struct { } } - fn airDivTrunc(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airDivTrunc(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const lhs = try self.temporary(bin_op.lhs); @@ -3592,7 +3584,7 @@ const DeclGen = struct { } } - fn airUnOpSimple(self: *DeclGen, inst: Air.Inst.Index, op: UnaryOp) !?IdRef { + fn airUnOpSimple(self: *NavGen, inst: Air.Inst.Index, op: UnaryOp) !?IdRef { const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try self.temporary(un_op); const result = try self.buildUnary(op, operand); @@ -3600,7 +3592,7 @@ const DeclGen = struct { } fn airArithOp( - self: *DeclGen, + self: *NavGen, inst: Air.Inst.Index, comptime fop: BinaryOp, comptime sop: BinaryOp, @@ -3626,7 +3618,7 @@ const DeclGen = struct { return try result.materialize(self); } - fn airAbs(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airAbs(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.temporary(ty_op.operand); // Note: operand_ty may be signed, while ty is always unsigned! @@ -3635,7 +3627,7 @@ const DeclGen = struct { return try result.materialize(self); } - fn abs(self: *DeclGen, result_ty: Type, value: Temporary) !Temporary { + fn abs(self: *NavGen, result_ty: Type, value: Temporary) !Temporary { const target = self.getTarget(); const operand_info = self.arithmeticTypeInfo(value.ty); @@ -3658,7 +3650,7 @@ const DeclGen = struct { } fn airAddSubOverflow( - self: *DeclGen, + self: *NavGen, inst: Air.Inst.Index, comptime add: BinaryOp, comptime ucmp: CmpPredicate, @@ -3724,7 +3716,7 @@ const DeclGen = struct { ); } - fn airMulOverflow(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airMulOverflow(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const target = self.getTarget(); const pt = self.pt; @@ -3904,7 +3896,7 @@ const DeclGen = struct { ); } - fn airShlOverflow(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airShlOverflow(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const mod = self.pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; @@ -3944,7 +3936,7 @@ const DeclGen = struct { ); } - fn airMulAdd(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airMulAdd(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const extra = self.air.extraData(Air.Bin, pl_op.payload).data; @@ -3960,7 +3952,7 @@ const DeclGen = struct { return try result.materialize(self); } - fn airClzCtz(self: *DeclGen, inst: Air.Inst.Index, op: UnaryOp) !?IdRef { + fn airClzCtz(self: *NavGen, inst: Air.Inst.Index, op: UnaryOp) !?IdRef { if (self.liveness.isUnused(inst)) return null; const mod = self.pt.zcu; @@ -3991,7 +3983,7 @@ const DeclGen = struct { return try result.materialize(self); } - fn airSelect(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airSelect(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const extra = self.air.extraData(Air.Bin, pl_op.payload).data; const pred = try self.temporary(pl_op.operand); @@ -4002,7 +3994,7 @@ const DeclGen = struct { return try result.materialize(self); } - fn airSplat(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airSplat(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand_id = try self.resolve(ty_op.operand); @@ -4011,7 +4003,7 @@ const DeclGen = struct { return try self.constructVectorSplat(result_ty, operand_id); } - fn airReduce(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airReduce(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const mod = self.pt.zcu; const reduce = self.air.instructions.items(.data)[@intFromEnum(inst)].reduce; const operand = try self.resolve(reduce.operand); @@ -4086,7 +4078,7 @@ const DeclGen = struct { return result_id; } - fn airShuffle(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airShuffle(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const pt = self.pt; const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; @@ -4163,7 +4155,7 @@ const DeclGen = struct { return try self.constructVector(result_ty, components); } - fn indicesToIds(self: *DeclGen, indices: []const u32) ![]IdRef { + fn indicesToIds(self: *NavGen, indices: []const u32) ![]IdRef { const ids = try self.gpa.alloc(IdRef, indices.len); errdefer self.gpa.free(ids); for (indices, ids) |index, *id| { @@ -4174,7 +4166,7 @@ const DeclGen = struct { } fn accessChainId( - self: *DeclGen, + self: *NavGen, result_ty_id: IdRef, base: IdRef, indices: []const IdRef, @@ -4194,7 +4186,7 @@ const DeclGen = struct { /// same as that of the base pointer, or that of a dereferenced base pointer. AccessChain /// is the latter and PtrAccessChain is the former. fn accessChain( - self: *DeclGen, + self: *NavGen, result_ty_id: IdRef, base: IdRef, indices: []const u32, @@ -4205,7 +4197,7 @@ const DeclGen = struct { } fn ptrAccessChain( - self: *DeclGen, + self: *NavGen, result_ty_id: IdRef, base: IdRef, element: IdRef, @@ -4225,7 +4217,7 @@ const DeclGen = struct { return result_id; } - fn ptrAdd(self: *DeclGen, result_ty: Type, ptr_ty: Type, ptr_id: IdRef, offset_id: IdRef) !IdRef { + fn ptrAdd(self: *NavGen, result_ty: Type, ptr_ty: Type, ptr_id: IdRef, offset_id: IdRef) !IdRef { const mod = self.pt.zcu; const result_ty_id = try self.resolveType(result_ty, .direct); @@ -4246,7 +4238,7 @@ const DeclGen = struct { } } - fn airPtrAdd(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airPtrAdd(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr_id = try self.resolve(bin_op.lhs); @@ -4257,7 +4249,7 @@ const DeclGen = struct { return try self.ptrAdd(result_ty, ptr_ty, ptr_id, offset_id); } - fn airPtrSub(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airPtrSub(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr_id = try self.resolve(bin_op.lhs); @@ -4277,7 +4269,7 @@ const DeclGen = struct { } fn cmp( - self: *DeclGen, + self: *NavGen, op: std.math.CompareOperator, lhs: Temporary, rhs: Temporary, @@ -4443,7 +4435,7 @@ const DeclGen = struct { } fn airCmp( - self: *DeclGen, + self: *NavGen, inst: Air.Inst.Index, comptime op: std.math.CompareOperator, ) !?IdRef { @@ -4455,7 +4447,7 @@ const DeclGen = struct { return try result.materialize(self); } - fn airVectorCmp(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airVectorCmp(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const vec_cmp = self.air.extraData(Air.VectorCmp, ty_pl.payload).data; const lhs = try self.temporary(vec_cmp.lhs); @@ -4468,7 +4460,7 @@ const DeclGen = struct { /// Bitcast one type to another. Note: both types, input, output are expected in **direct** representation. fn bitCast( - self: *DeclGen, + self: *NavGen, dst_ty: Type, src_ty: Type, src_id: IdRef, @@ -4536,7 +4528,7 @@ const DeclGen = struct { return result_id; } - fn airBitCast(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airBitCast(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand_id = try self.resolve(ty_op.operand); const operand_ty = self.typeOf(ty_op.operand); @@ -4544,7 +4536,7 @@ const DeclGen = struct { return try self.bitCast(result_ty, operand_ty, operand_id); } - fn airIntCast(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airIntCast(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const src = try self.temporary(ty_op.operand); const dst_ty = self.typeOfIndex(inst); @@ -4570,7 +4562,7 @@ const DeclGen = struct { return try result.materialize(self); } - fn intFromPtr(self: *DeclGen, operand_id: IdRef) !IdRef { + fn intFromPtr(self: *NavGen, operand_id: IdRef) !IdRef { const result_type_id = try self.resolveType(Type.usize, .direct); const result_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpConvertPtrToU, .{ @@ -4581,13 +4573,13 @@ const DeclGen = struct { return result_id; } - fn airIntFromPtr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airIntFromPtr(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand_id = try self.resolve(un_op); return try self.intFromPtr(operand_id); } - fn airFloatFromInt(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airFloatFromInt(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand_ty = self.typeOf(ty_op.operand); const operand_id = try self.resolve(ty_op.operand); @@ -4595,7 +4587,7 @@ const DeclGen = struct { return try self.floatFromInt(result_ty, operand_ty, operand_id); } - fn floatFromInt(self: *DeclGen, result_ty: Type, operand_ty: Type, operand_id: IdRef) !IdRef { + fn floatFromInt(self: *NavGen, result_ty: Type, operand_ty: Type, operand_id: IdRef) !IdRef { const operand_info = self.arithmeticTypeInfo(operand_ty); const result_id = self.spv.allocId(); const result_ty_id = try self.resolveType(result_ty, .direct); @@ -4614,14 +4606,14 @@ const DeclGen = struct { return result_id; } - fn airIntFromFloat(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airIntFromFloat(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand_id = try self.resolve(ty_op.operand); const result_ty = self.typeOfIndex(inst); return try self.intFromFloat(result_ty, operand_id); } - fn intFromFloat(self: *DeclGen, result_ty: Type, operand_id: IdRef) !IdRef { + fn intFromFloat(self: *NavGen, result_ty: Type, operand_id: IdRef) !IdRef { const result_info = self.arithmeticTypeInfo(result_ty); const result_ty_id = try self.resolveType(result_ty, .direct); const result_id = self.spv.allocId(); @@ -4640,14 +4632,14 @@ const DeclGen = struct { return result_id; } - fn airIntFromBool(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airIntFromBool(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try self.temporary(un_op); const result = try self.intFromBool(operand); return try result.materialize(self); } - fn airFloatCast(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airFloatCast(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand_id = try self.resolve(ty_op.operand); const dest_ty = self.typeOfIndex(inst); @@ -4662,7 +4654,7 @@ const DeclGen = struct { return result_id; } - fn airNot(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airNot(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try self.temporary(ty_op.operand); const result_ty = self.typeOfIndex(inst); @@ -4681,7 +4673,7 @@ const DeclGen = struct { return try result.materialize(self); } - fn airArrayToSlice(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airArrayToSlice(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const pt = self.pt; const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -4709,7 +4701,7 @@ const DeclGen = struct { ); } - fn airSlice(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airSlice(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr_id = try self.resolve(bin_op.lhs); @@ -4726,7 +4718,7 @@ const DeclGen = struct { ); } - fn airAggregateInit(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airAggregateInit(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const pt = self.pt; const mod = pt.zcu; const ip = &mod.intern_pool; @@ -4816,7 +4808,7 @@ const DeclGen = struct { } } - fn sliceOrArrayLen(self: *DeclGen, operand_id: IdRef, ty: Type) !IdRef { + fn sliceOrArrayLen(self: *NavGen, operand_id: IdRef, ty: Type) !IdRef { const pt = self.pt; const mod = pt.zcu; switch (ty.ptrSize(mod)) { @@ -4832,7 +4824,7 @@ const DeclGen = struct { } } - fn sliceOrArrayPtr(self: *DeclGen, operand_id: IdRef, ty: Type) !IdRef { + fn sliceOrArrayPtr(self: *NavGen, operand_id: IdRef, ty: Type) !IdRef { const mod = self.pt.zcu; if (ty.isSlice(mod)) { const ptr_ty = ty.slicePtrFieldType(mod); @@ -4841,7 +4833,7 @@ const DeclGen = struct { return operand_id; } - fn airMemcpy(self: *DeclGen, inst: Air.Inst.Index) !void { + fn airMemcpy(self: *NavGen, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const dest_slice = try self.resolve(bin_op.lhs); const src_slice = try self.resolve(bin_op.rhs); @@ -4857,14 +4849,14 @@ const DeclGen = struct { }); } - fn airSliceField(self: *DeclGen, inst: Air.Inst.Index, field: u32) !?IdRef { + fn airSliceField(self: *NavGen, inst: Air.Inst.Index, field: u32) !?IdRef { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const field_ty = self.typeOfIndex(inst); const operand_id = try self.resolve(ty_op.operand); return try self.extractField(field_ty, operand_id, field); } - fn airSliceElemPtr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airSliceElemPtr(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const mod = self.pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; @@ -4881,7 +4873,7 @@ const DeclGen = struct { return try self.ptrAccessChain(ptr_ty_id, slice_ptr, index_id, &.{}); } - fn airSliceElemVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airSliceElemVal(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const mod = self.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const slice_ty = self.typeOf(bin_op.lhs); @@ -4898,7 +4890,7 @@ const DeclGen = struct { return try self.load(slice_ty.childType(mod), elem_ptr, .{ .is_volatile = slice_ty.isVolatilePtr(mod) }); } - fn ptrElemPtr(self: *DeclGen, ptr_ty: Type, ptr_id: IdRef, index_id: IdRef) !IdRef { + fn ptrElemPtr(self: *NavGen, ptr_ty: Type, ptr_id: IdRef, index_id: IdRef) !IdRef { const mod = self.pt.zcu; // Construct new pointer type for the resulting pointer const elem_ty = ptr_ty.elemType2(mod); // use elemType() so that we get T for *[N]T. @@ -4913,7 +4905,7 @@ const DeclGen = struct { } } - fn airPtrElemPtr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airPtrElemPtr(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const pt = self.pt; const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; @@ -4931,7 +4923,7 @@ const DeclGen = struct { return try self.ptrElemPtr(src_ptr_ty, ptr_id, index_id); } - fn airArrayElemVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airArrayElemVal(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const mod = self.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const array_ty = self.typeOf(bin_op.lhs); @@ -4992,7 +4984,7 @@ const DeclGen = struct { return try self.convertToDirect(elem_ty, result_id); } - fn airPtrElemVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airPtrElemVal(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const mod = self.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const ptr_ty = self.typeOf(bin_op.lhs); @@ -5003,7 +4995,7 @@ const DeclGen = struct { return try self.load(elem_ty, elem_ptr_id, .{ .is_volatile = ptr_ty.isVolatilePtr(mod) }); } - fn airVectorStoreElem(self: *DeclGen, inst: Air.Inst.Index) !void { + fn airVectorStoreElem(self: *NavGen, inst: Air.Inst.Index) !void { const mod = self.pt.zcu; const data = self.air.instructions.items(.data)[@intFromEnum(inst)].vector_store_elem; const extra = self.air.extraData(Air.Bin, data.payload).data; @@ -5025,7 +5017,7 @@ const DeclGen = struct { }); } - fn airSetUnionTag(self: *DeclGen, inst: Air.Inst.Index) !void { + fn airSetUnionTag(self: *NavGen, inst: Air.Inst.Index) !void { const mod = self.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const un_ptr_ty = self.typeOf(bin_op.lhs); @@ -5048,7 +5040,7 @@ const DeclGen = struct { } } - fn airGetUnionTag(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airGetUnionTag(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const un_ty = self.typeOf(ty_op.operand); @@ -5064,7 +5056,7 @@ const DeclGen = struct { } fn unionInit( - self: *DeclGen, + self: *NavGen, ty: Type, active_field: u32, payload: ?IdRef, @@ -5129,7 +5121,7 @@ const DeclGen = struct { return try self.load(ty, tmp_id, .{}); } - fn airUnionInit(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airUnionInit(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const pt = self.pt; const mod = pt.zcu; const ip = &mod.intern_pool; @@ -5146,7 +5138,7 @@ const DeclGen = struct { return try self.unionInit(ty, extra.field_index, payload); } - fn airStructFieldVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airStructFieldVal(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const pt = self.pt; const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; @@ -5191,7 +5183,7 @@ const DeclGen = struct { } } - fn airFieldParentPtr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airFieldParentPtr(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const pt = self.pt; const mod = pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; @@ -5225,7 +5217,7 @@ const DeclGen = struct { } fn structFieldPtr( - self: *DeclGen, + self: *NavGen, result_ptr_ty: Type, object_ptr_ty: Type, object_ptr: IdRef, @@ -5273,7 +5265,7 @@ const DeclGen = struct { } } - fn airStructFieldPtrIndex(self: *DeclGen, inst: Air.Inst.Index, field_index: u32) !?IdRef { + fn airStructFieldPtrIndex(self: *NavGen, inst: Air.Inst.Index, field_index: u32) !?IdRef { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const struct_ptr = try self.resolve(ty_op.operand); const struct_ptr_ty = self.typeOf(ty_op.operand); @@ -5294,7 +5286,7 @@ const DeclGen = struct { // which is in the Generic address space. The variable is actually // placed in the Function address space. fn alloc( - self: *DeclGen, + self: *NavGen, ty: Type, options: AllocOptions, ) !IdRef { @@ -5326,7 +5318,7 @@ const DeclGen = struct { } } - fn airAlloc(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airAlloc(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const mod = self.pt.zcu; const ptr_ty = self.typeOfIndex(inst); assert(ptr_ty.ptrAddressSpace(mod) == .generic); @@ -5334,7 +5326,7 @@ const DeclGen = struct { return try self.alloc(child_ty, .{}); } - fn airArg(self: *DeclGen) IdRef { + fn airArg(self: *NavGen) IdRef { defer self.next_arg_index += 1; return self.args.items[self.next_arg_index]; } @@ -5343,7 +5335,7 @@ const DeclGen = struct { /// block to jump to. This function emits instructions, so it should be emitted /// inside the merge block of the block. /// This function should only be called with structured control flow generation. - fn structuredNextBlock(self: *DeclGen, incoming: []const ControlFlow.Structured.Block.Incoming) !IdRef { + fn structuredNextBlock(self: *NavGen, incoming: []const ControlFlow.Structured.Block.Incoming) !IdRef { assert(self.control_flow == .structured); const result_id = self.spv.allocId(); @@ -5362,7 +5354,7 @@ const DeclGen = struct { /// Jumps to the block with the target block-id. This function must only be called when /// terminating a body, there should be no instructions after it. /// This function should only be called with structured control flow generation. - fn structuredBreak(self: *DeclGen, target_block: IdRef) !void { + fn structuredBreak(self: *NavGen, target_block: IdRef) !void { assert(self.control_flow == .structured); const sblock = self.control_flow.structured.block_stack.getLast(); @@ -5393,7 +5385,7 @@ const DeclGen = struct { /// should still be emitted to the block that should follow this structured body. /// This function should only be called with structured control flow generation. fn genStructuredBody( - self: *DeclGen, + self: *NavGen, /// This parameter defines the method that this structured body is exited with. block_merge_type: union(enum) { /// Using selection; early exits from this body are surrounded with @@ -5487,13 +5479,13 @@ const DeclGen = struct { } } - fn airBlock(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airBlock(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const inst_datas = self.air.instructions.items(.data); const extra = self.air.extraData(Air.Block, inst_datas[@intFromEnum(inst)].ty_pl.payload); return self.lowerBlock(inst, @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len])); } - fn lowerBlock(self: *DeclGen, inst: Air.Inst.Index, body: []const Air.Inst.Index) !?IdRef { + fn lowerBlock(self: *NavGen, inst: Air.Inst.Index, body: []const Air.Inst.Index) !?IdRef { // In AIR, a block doesn't really define an entry point like a block, but // more like a scope that breaks can jump out of and "return" a value from. // This cannot be directly modelled in SPIR-V, so in a block instruction, @@ -5633,7 +5625,7 @@ const DeclGen = struct { return null; } - fn airBr(self: *DeclGen, inst: Air.Inst.Index) !void { + fn airBr(self: *NavGen, inst: Air.Inst.Index) !void { const pt = self.pt; const br = self.air.instructions.items(.data)[@intFromEnum(inst)].br; const operand_ty = self.typeOf(br.operand); @@ -5670,7 +5662,7 @@ const DeclGen = struct { } } - fn airCondBr(self: *DeclGen, inst: Air.Inst.Index) !void { + fn airCondBr(self: *NavGen, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const cond_br = self.air.extraData(Air.CondBr, pl_op.payload); const then_body: []const Air.Inst.Index = @ptrCast(self.air.extra[cond_br.end..][0..cond_br.data.then_body_len]); @@ -5730,7 +5722,7 @@ const DeclGen = struct { } } - fn airLoop(self: *DeclGen, inst: Air.Inst.Index) !void { + fn airLoop(self: *NavGen, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const loop = self.air.extraData(Air.Block, ty_pl.payload); const body: []const Air.Inst.Index = @ptrCast(self.air.extra[loop.end..][0..loop.data.body_len]); @@ -5777,7 +5769,7 @@ const DeclGen = struct { } } - fn airLoad(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airLoad(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const mod = self.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const ptr_ty = self.typeOf(ty_op.operand); @@ -5788,7 +5780,7 @@ const DeclGen = struct { return try self.load(elem_ty, operand, .{ .is_volatile = ptr_ty.isVolatilePtr(mod) }); } - fn airStore(self: *DeclGen, inst: Air.Inst.Index) !void { + fn airStore(self: *NavGen, inst: Air.Inst.Index) !void { const mod = self.pt.zcu; const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const ptr_ty = self.typeOf(bin_op.lhs); @@ -5799,14 +5791,13 @@ const DeclGen = struct { try self.store(elem_ty, ptr, value, .{ .is_volatile = ptr_ty.isVolatilePtr(mod) }); } - fn airRet(self: *DeclGen, inst: Air.Inst.Index) !void { + fn airRet(self: *NavGen, inst: Air.Inst.Index) !void { const pt = self.pt; const mod = pt.zcu; const operand = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const ret_ty = self.typeOf(operand); if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) { - const decl = mod.declPtr(self.decl_index); - const fn_info = mod.typeToFunc(decl.typeOf(mod)).?; + const fn_info = mod.typeToFunc(mod.navValue(self.owner_nav).typeOf(mod)).?; if (Type.fromInterned(fn_info.return_type).isError(mod)) { // Functions with an empty error set are emitted with an error code // return type and return zero so they can be function pointers coerced @@ -5822,7 +5813,7 @@ const DeclGen = struct { try self.func.body.emit(self.spv.gpa, .OpReturnValue, .{ .value = operand_id }); } - fn airRetLoad(self: *DeclGen, inst: Air.Inst.Index) !void { + fn airRetLoad(self: *NavGen, inst: Air.Inst.Index) !void { const pt = self.pt; const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; @@ -5830,8 +5821,7 @@ const DeclGen = struct { const ret_ty = ptr_ty.childType(mod); if (!ret_ty.hasRuntimeBitsIgnoreComptime(pt)) { - const decl = mod.declPtr(self.decl_index); - const fn_info = mod.typeToFunc(decl.typeOf(mod)).?; + const fn_info = mod.typeToFunc(mod.navValue(self.owner_nav).typeOf(mod)).?; if (Type.fromInterned(fn_info.return_type).isError(mod)) { // Functions with an empty error set are emitted with an error code // return type and return zero so they can be function pointers coerced @@ -5850,7 +5840,7 @@ const DeclGen = struct { }); } - fn airTry(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airTry(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const mod = self.pt.zcu; const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const err_union_id = try self.resolve(pl_op.operand); @@ -5920,7 +5910,7 @@ const DeclGen = struct { return try self.extractField(payload_ty, err_union_id, eu_layout.payloadFieldIndex()); } - fn airErrUnionErr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airErrUnionErr(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const mod = self.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand_id = try self.resolve(ty_op.operand); @@ -5943,7 +5933,7 @@ const DeclGen = struct { return try self.extractField(Type.anyerror, operand_id, eu_layout.errorFieldIndex()); } - fn airErrUnionPayload(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airErrUnionPayload(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand_id = try self.resolve(ty_op.operand); const payload_ty = self.typeOfIndex(inst); @@ -5956,7 +5946,7 @@ const DeclGen = struct { return try self.extractField(payload_ty, operand_id, eu_layout.payloadFieldIndex()); } - fn airWrapErrUnionErr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airWrapErrUnionErr(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const mod = self.pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const err_union_ty = self.typeOfIndex(inst); @@ -5981,7 +5971,7 @@ const DeclGen = struct { return try self.constructStruct(err_union_ty, &types, &members); } - fn airWrapErrUnionPayload(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airWrapErrUnionPayload(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const err_union_ty = self.typeOfIndex(inst); const operand_id = try self.resolve(ty_op.operand); @@ -6003,7 +5993,7 @@ const DeclGen = struct { return try self.constructStruct(err_union_ty, &types, &members); } - fn airIsNull(self: *DeclGen, inst: Air.Inst.Index, is_pointer: bool, pred: enum { is_null, is_non_null }) !?IdRef { + fn airIsNull(self: *NavGen, inst: Air.Inst.Index, is_pointer: bool, pred: enum { is_null, is_non_null }) !?IdRef { const pt = self.pt; const mod = pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; @@ -6080,7 +6070,7 @@ const DeclGen = struct { }; } - fn airIsErr(self: *DeclGen, inst: Air.Inst.Index, pred: enum { is_err, is_non_err }) !?IdRef { + fn airIsErr(self: *NavGen, inst: Air.Inst.Index, pred: enum { is_err, is_non_err }) !?IdRef { const mod = self.pt.zcu; const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand_id = try self.resolve(un_op); @@ -6113,7 +6103,7 @@ const DeclGen = struct { return result_id; } - fn airUnwrapOptional(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airUnwrapOptional(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const pt = self.pt; const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -6130,7 +6120,7 @@ const DeclGen = struct { return try self.extractField(payload_ty, operand_id, 0); } - fn airUnwrapOptionalPtr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airUnwrapOptionalPtr(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const pt = self.pt; const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -6155,7 +6145,7 @@ const DeclGen = struct { return try self.accessChain(result_ty_id, operand_id, &.{0}); } - fn airWrapOptional(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airWrapOptional(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const pt = self.pt; const mod = pt.zcu; const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -6178,7 +6168,7 @@ const DeclGen = struct { return try self.constructStruct(optional_ty, &types, &members); } - fn airSwitchBr(self: *DeclGen, inst: Air.Inst.Index) !void { + fn airSwitchBr(self: *NavGen, inst: Air.Inst.Index) !void { const pt = self.pt; const mod = pt.zcu; const target = self.getTarget(); @@ -6347,16 +6337,15 @@ const DeclGen = struct { } } - fn airUnreach(self: *DeclGen) !void { + fn airUnreach(self: *NavGen) !void { try self.func.body.emit(self.spv.gpa, .OpUnreachable, {}); } - fn airDbgStmt(self: *DeclGen, inst: Air.Inst.Index) !void { + fn airDbgStmt(self: *NavGen, inst: Air.Inst.Index) !void { const pt = self.pt; const mod = pt.zcu; const dbg_stmt = self.air.instructions.items(.data)[@intFromEnum(inst)].dbg_stmt; - const decl = mod.declPtr(self.decl_index); - const path = decl.getFileScope(mod).sub_file_path; + const path = mod.navFileScope(self.owner_nav).sub_file_path; try self.func.body.emit(self.spv.gpa, .OpLine, .{ .file = try self.spv.resolveString(path), .line = self.base_line + dbg_stmt.line + 1, @@ -6364,25 +6353,24 @@ const DeclGen = struct { }); } - fn airDbgInlineBlock(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airDbgInlineBlock(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const mod = self.pt.zcu; const inst_datas = self.air.instructions.items(.data); const extra = self.air.extraData(Air.DbgInlineBlock, inst_datas[@intFromEnum(inst)].ty_pl.payload); - const decl = mod.funcOwnerDeclPtr(extra.data.func); const old_base_line = self.base_line; defer self.base_line = old_base_line; - self.base_line = decl.navSrcLine(mod); + self.base_line = mod.navSrcLine(mod.funcInfo(extra.data.func).owner_nav); return self.lowerBlock(inst, @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len])); } - fn airDbgVar(self: *DeclGen, inst: Air.Inst.Index) !void { + fn airDbgVar(self: *NavGen, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const target_id = try self.resolve(pl_op.operand); const name = self.air.nullTerminatedString(pl_op.payload); try self.spv.debugName(target_id, name); } - fn airAssembly(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airAssembly(self: *NavGen, inst: Air.Inst.Index) !?IdRef { const mod = self.pt.zcu; const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = self.air.extraData(Air.Asm, ty_pl.payload); @@ -6465,7 +6453,7 @@ const DeclGen = struct { // TODO: Translate proper error locations. assert(as.errors.items.len != 0); assert(self.error_msg == null); - const src_loc = mod.declPtr(self.decl_index).navSrcLoc(mod); + const src_loc = mod.navSrcLoc(self.owner_nav); self.error_msg = try Zcu.ErrorMsg.create(mod.gpa, src_loc, "failed to assemble SPIR-V inline assembly", .{}); const notes = try mod.gpa.alloc(Zcu.ErrorMsg, as.errors.items.len); @@ -6511,7 +6499,7 @@ const DeclGen = struct { return null; } - fn airCall(self: *DeclGen, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !?IdRef { + fn airCall(self: *NavGen, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !?IdRef { _ = modifier; const pt = self.pt; @@ -6566,7 +6554,7 @@ const DeclGen = struct { return result_id; } - fn builtin3D(self: *DeclGen, result_ty: Type, builtin: spec.BuiltIn, dimension: u32, out_of_range_value: anytype) !IdRef { + fn builtin3D(self: *NavGen, result_ty: Type, builtin: spec.BuiltIn, dimension: u32, out_of_range_value: anytype) !IdRef { if (dimension >= 3) { return try self.constInt(result_ty, out_of_range_value, .direct); } @@ -6582,7 +6570,7 @@ const DeclGen = struct { return try self.extractVectorComponent(result_ty, vec, dimension); } - fn airWorkItemId(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airWorkItemId(self: *NavGen, inst: Air.Inst.Index) !?IdRef { if (self.liveness.isUnused(inst)) return null; const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const dimension = pl_op.payload; @@ -6593,7 +6581,7 @@ const DeclGen = struct { return try result.materialize(self); } - fn airWorkGroupSize(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airWorkGroupSize(self: *NavGen, inst: Air.Inst.Index) !?IdRef { if (self.liveness.isUnused(inst)) return null; const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const dimension = pl_op.payload; @@ -6604,7 +6592,7 @@ const DeclGen = struct { return try result.materialize(self); } - fn airWorkGroupId(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + fn airWorkGroupId(self: *NavGen, inst: Air.Inst.Index) !?IdRef { if (self.liveness.isUnused(inst)) return null; const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const dimension = pl_op.payload; @@ -6615,12 +6603,12 @@ const DeclGen = struct { return try result.materialize(self); } - fn typeOf(self: *DeclGen, inst: Air.Inst.Ref) Type { + fn typeOf(self: *NavGen, inst: Air.Inst.Ref) Type { const mod = self.pt.zcu; return self.air.typeOf(inst, &mod.intern_pool); } - fn typeOfIndex(self: *DeclGen, inst: Air.Inst.Index) Type { + fn typeOfIndex(self: *NavGen, inst: Air.Inst.Index) Type { const mod = self.pt.zcu; return self.air.typeOfIndex(inst, &mod.intern_pool); } diff --git a/src/link.zig b/src/link.zig index d8a09b689fad..59ace7ce3e85 100644 --- a/src/link.zig +++ b/src/link.zig @@ -216,8 +216,8 @@ pub const File = struct { } } - pub fn cast(base: *File, comptime T: type) ?*T { - return if (base.tag == T.base_tag) @fieldParentPtr("base", base) else null; + pub fn cast(base: *File, comptime tag: Tag) if (dev.env.supports(tag.devFeature())) ?*tag.Type() else ?noreturn { + return if (dev.env.supports(tag.devFeature()) and base.tag == tag) @fieldParentPtr("base", base) else null; } pub fn makeWritable(base: *File) !void { @@ -231,7 +231,7 @@ pub const File = struct { const emit = base.emit; if (base.child_pid) |pid| { if (builtin.os.tag == .windows) { - base.cast(Coff).?.ptraceAttach(pid) catch |err| { + base.cast(.coff).?.ptraceAttach(pid) catch |err| { log.warn("attaching failed with error: {s}", .{@errorName(err)}); }; } else { @@ -249,7 +249,7 @@ pub const File = struct { .linux => std.posix.ptrace(std.os.linux.PTRACE.ATTACH, pid, 0, 0) catch |err| { log.warn("ptrace failure: {s}", .{@errorName(err)}); }, - .macos => base.cast(MachO).?.ptraceAttach(pid) catch |err| { + .macos => base.cast(.macho).?.ptraceAttach(pid) catch |err| { log.warn("attaching failed with error: {s}", .{@errorName(err)}); }, .windows => unreachable, @@ -317,10 +317,10 @@ pub const File = struct { if (base.child_pid) |pid| { switch (builtin.os.tag) { - .macos => base.cast(MachO).?.ptraceDetach(pid) catch |err| { + .macos => base.cast(.macho).?.ptraceDetach(pid) catch |err| { log.warn("detaching failed with error: {s}", .{@errorName(err)}); }, - .windows => base.cast(Coff).?.ptraceDetach(pid), + .windows => base.cast(.coff).?.ptraceDetach(pid), else => return error.HotSwapUnavailableOnHostOperatingSystem, } } @@ -329,7 +329,7 @@ pub const File = struct { } } - pub const UpdateDeclError = error{ + pub const UpdateNavError = error{ OutOfMemory, Overflow, Underflow, @@ -367,27 +367,12 @@ pub const File = struct { HotSwapUnavailableOnHostOperatingSystem, }; - /// Called from within the CodeGen to lower a local variable instantion as an unnamed - /// constant. Returns the symbol index of the lowered constant in the read-only section - /// of the final binary. - pub fn lowerUnnamedConst(base: *File, pt: Zcu.PerThread, val: Value, decl_index: InternPool.DeclIndex) UpdateDeclError!u32 { - switch (base.tag) { - .spirv => unreachable, - .c => unreachable, - .nvptx => unreachable, - inline else => |tag| { - dev.check(tag.devFeature()); - return @as(*tag.Type(), @fieldParentPtr("base", base)).lowerUnnamedConst(pt, val, decl_index); - }, - } - } - /// Called from within CodeGen to retrieve the symbol index of a global symbol. /// If no symbol exists yet with this name, a new undefined global symbol will /// be created. This symbol may get resolved once all relocatables are (re-)linked. /// Optionally, it is possible to specify where to expect the symbol defined if it /// is an import. - pub fn getGlobalSymbol(base: *File, name: []const u8, lib_name: ?[]const u8) UpdateDeclError!u32 { + pub fn getGlobalSymbol(base: *File, name: []const u8, lib_name: ?[]const u8) UpdateNavError!u32 { log.debug("getGlobalSymbol '{s}' (expected in '{?s}')", .{ name, lib_name }); switch (base.tag) { .plan9 => unreachable, @@ -401,14 +386,14 @@ pub const File = struct { } } - /// May be called before or after updateExports for any given Decl. - pub fn updateDecl(base: *File, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) UpdateDeclError!void { - const decl = pt.zcu.declPtr(decl_index); - assert(decl.has_tv); + /// May be called before or after updateExports for any given Nav. + pub fn updateNav(base: *File, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) UpdateNavError!void { + const nav = pt.zcu.intern_pool.getNav(nav_index); + assert(nav.status == .resolved); switch (base.tag) { inline else => |tag| { dev.check(tag.devFeature()); - return @as(*tag.Type(), @fieldParentPtr("base", base)).updateDecl(pt, decl_index); + return @as(*tag.Type(), @fieldParentPtr("base", base)).updateNav(pt, nav_index); }, } } @@ -420,7 +405,7 @@ pub const File = struct { func_index: InternPool.Index, air: Air, liveness: Liveness, - ) UpdateDeclError!void { + ) UpdateNavError!void { switch (base.tag) { inline else => |tag| { dev.check(tag.devFeature()); @@ -429,14 +414,16 @@ pub const File = struct { } } - pub fn updateDeclLineNumber(base: *File, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) UpdateDeclError!void { - const decl = pt.zcu.declPtr(decl_index); - assert(decl.has_tv); + pub fn updateNavLineNumber( + base: *File, + pt: Zcu.PerThread, + nav_index: InternPool.Nav.Index, + ) UpdateNavError!void { switch (base.tag) { .spirv, .nvptx => {}, inline else => |tag| { dev.check(tag.devFeature()); - return @as(*tag.Type(), @fieldParentPtr("base", base)).updateDeclLineNumber(pt, decl_index); + return @as(*tag.Type(), @fieldParentPtr("base", base)).updateNavineNumber(pt, nav_index); }, } } @@ -675,52 +662,50 @@ pub const File = struct { addend: u32, }; - /// Get allocated `Decl`'s address in virtual memory. + /// Get allocated `Nav`'s address in virtual memory. /// The linker is passed information about the containing atom, `parent_atom_index`, and offset within it's /// memory buffer, `offset`, so that it can make a note of potential relocation sites, should the - /// `Decl`'s address was not yet resolved, or the containing atom gets moved in virtual memory. - /// May be called before or after updateFunc/updateDecl therefore it is up to the linker to allocate + /// `Nav`'s address was not yet resolved, or the containing atom gets moved in virtual memory. + /// May be called before or after updateFunc/updateNav therefore it is up to the linker to allocate /// the block/atom. - pub fn getDeclVAddr(base: *File, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex, reloc_info: RelocInfo) !u64 { + pub fn getNavVAddr(base: *File, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index, reloc_info: RelocInfo) !u64 { switch (base.tag) { .c => unreachable, .spirv => unreachable, .nvptx => unreachable, inline else => |tag| { dev.check(tag.devFeature()); - return @as(*tag.Type(), @fieldParentPtr("base", base)).getDeclVAddr(pt, decl_index, reloc_info); + return @as(*tag.Type(), @fieldParentPtr("base", base)).getNavVAddr(pt, nav_index, reloc_info); }, } } - pub const LowerResult = @import("codegen.zig").Result; - - pub fn lowerAnonDecl( + pub fn lowerUav( base: *File, pt: Zcu.PerThread, decl_val: InternPool.Index, decl_align: InternPool.Alignment, src_loc: Zcu.LazySrcLoc, - ) !LowerResult { + ) !@import("codegen.zig").GenResult { switch (base.tag) { .c => unreachable, .spirv => unreachable, .nvptx => unreachable, inline else => |tag| { dev.check(tag.devFeature()); - return @as(*tag.Type(), @fieldParentPtr("base", base)).lowerAnonDecl(pt, decl_val, decl_align, src_loc); + return @as(*tag.Type(), @fieldParentPtr("base", base)).lowerUav(pt, decl_val, decl_align, src_loc); }, } } - pub fn getAnonDeclVAddr(base: *File, decl_val: InternPool.Index, reloc_info: RelocInfo) !u64 { + pub fn getUavVAddr(base: *File, decl_val: InternPool.Index, reloc_info: RelocInfo) !u64 { switch (base.tag) { .c => unreachable, .spirv => unreachable, .nvptx => unreachable, inline else => |tag| { dev.check(tag.devFeature()); - return @as(*tag.Type(), @fieldParentPtr("base", base)).getAnonDeclVAddr(decl_val, reloc_info); + return @as(*tag.Type(), @fieldParentPtr("base", base)).getUavVAddr(decl_val, reloc_info); }, } } @@ -964,18 +949,7 @@ pub const File = struct { pub const Kind = enum { code, const_data }; kind: Kind, - ty: Type, - - pub fn initDecl(kind: Kind, decl: ?InternPool.DeclIndex, mod: *Zcu) LazySymbol { - return .{ .kind = kind, .ty = if (decl) |decl_index| - mod.declPtr(decl_index).val.toType() - else - Type.anyerror }; - } - - pub fn getDecl(self: LazySymbol, mod: *Zcu) InternPool.OptionalDeclIndex { - return InternPool.OptionalDeclIndex.init(self.ty.getOwnerDeclOrNull(mod)); - } + ty: InternPool.Index, }; pub fn effectiveOutputMode( diff --git a/src/link/C.zig b/src/link/C.zig index 1a6cee068ebc..e7c8f6a7b009 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -19,28 +19,27 @@ const Value = @import("../Value.zig"); const Air = @import("../Air.zig"); const Liveness = @import("../Liveness.zig"); -pub const base_tag: link.File.Tag = .c; pub const zig_h = "#include \"zig.h\"\n"; base: link.File, /// This linker backend does not try to incrementally link output C source code. /// Instead, it tracks all declarations in this table, and iterates over it /// in the flush function, stitching pre-rendered pieces of C code together. -decl_table: std.AutoArrayHashMapUnmanaged(InternPool.DeclIndex, DeclBlock) = .{}, +navs: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, AvBlock) = .{}, /// All the string bytes of rendered C code, all squished into one array. /// While in progress, a separate buffer is used, and then when finished, the /// buffer is copied into this one. string_bytes: std.ArrayListUnmanaged(u8) = .{}, /// Tracks all the anonymous decls that are used by all the decls so they can /// be rendered during flush(). -anon_decls: std.AutoArrayHashMapUnmanaged(InternPool.Index, DeclBlock) = .{}, -/// Sparse set of anon decls that are overaligned. Underaligned anon decls are +uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, AvBlock) = .{}, +/// Sparse set of uavs that are overaligned. Underaligned anon decls are /// lowered the same as ABI-aligned anon decls. The keys here are a subset of -/// the keys of `anon_decls`. -aligned_anon_decls: std.AutoArrayHashMapUnmanaged(InternPool.Index, Alignment) = .{}, +/// the keys of `uavs`. +aligned_uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, Alignment) = .{}, -exported_decls: std.AutoArrayHashMapUnmanaged(InternPool.DeclIndex, ExportedBlock) = .{}, -exported_values: std.AutoArrayHashMapUnmanaged(InternPool.Index, ExportedBlock) = .{}, +exported_navs: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, ExportedBlock) = .{}, +exported_uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, ExportedBlock) = .{}, /// Optimization, `updateDecl` reuses this buffer rather than creating a new /// one with every call. @@ -67,7 +66,7 @@ const String = extern struct { }; /// Per-declaration data. -pub const DeclBlock = struct { +pub const AvBlock = struct { code: String = String.empty, fwd_decl: String = String.empty, /// Each `Decl` stores a set of used `CType`s. In `flush()`, we iterate @@ -76,10 +75,10 @@ pub const DeclBlock = struct { /// May contain string references to ctype_pool lazy_fns: codegen.LazyFnMap = .{}, - fn deinit(db: *DeclBlock, gpa: Allocator) void { - db.lazy_fns.deinit(gpa); - db.ctype_pool.deinit(gpa); - db.* = undefined; + fn deinit(ab: *AvBlock, gpa: Allocator) void { + ab.lazy_fns.deinit(gpa); + ab.ctype_pool.deinit(gpa); + ab.* = undefined; } }; @@ -158,16 +157,16 @@ pub fn createEmpty( pub fn deinit(self: *C) void { const gpa = self.base.comp.gpa; - for (self.decl_table.values()) |*db| { + for (self.navs.values()) |*db| { db.deinit(gpa); } - self.decl_table.deinit(gpa); + self.navs.deinit(gpa); - for (self.anon_decls.values()) |*db| { + for (self.uavs.values()) |*db| { db.deinit(gpa); } - self.anon_decls.deinit(gpa); - self.aligned_anon_decls.deinit(gpa); + self.uavs.deinit(gpa); + self.aligned_uavs.deinit(gpa); self.string_bytes.deinit(gpa); self.fwd_decl_buf.deinit(gpa); @@ -194,9 +193,7 @@ pub fn updateFunc( const zcu = pt.zcu; const gpa = zcu.gpa; const func = zcu.funcInfo(func_index); - const decl_index = func.owner_decl; - const decl = zcu.declPtr(decl_index); - const gop = try self.decl_table.getOrPut(gpa, decl_index); + const gop = try self.navs.getOrPut(gpa, func.owner_nav); if (!gop.found_existing) gop.value_ptr.* = .{}; const ctype_pool = &gop.value_ptr.ctype_pool; const lazy_fns = &gop.value_ptr.lazy_fns; @@ -208,8 +205,6 @@ pub fn updateFunc( fwd_decl.clearRetainingCapacity(); code.clearRetainingCapacity(); - const file_scope = zcu.namespacePtr(decl.src_namespace).fileScope(zcu); - var function: codegen.Function = .{ .value_map = codegen.CValueMap.init(gpa), .air = air, @@ -219,15 +214,15 @@ pub fn updateFunc( .dg = .{ .gpa = gpa, .pt = pt, - .mod = file_scope.mod, + .mod = zcu.navFileScope(func.owner_nav).mod, .error_msg = null, - .pass = .{ .decl = decl_index }, - .is_naked_fn = decl.typeOf(zcu).fnCallingConvention(zcu) == .Naked, + .pass = .{ .nav = func.owner_nav }, + .is_naked_fn = zcu.navValue(func.owner_nav).typeOf(zcu).fnCallingConvention(zcu) == .Naked, .fwd_decl = fwd_decl.toManaged(gpa), .ctype_pool = ctype_pool.*, .scratch = .{}, - .anon_decl_deps = self.anon_decls, - .aligned_anon_decls = self.aligned_anon_decls, + .uav_deps = self.uavs, + .aligned_uavs = self.aligned_uavs, }, .code = code.toManaged(gpa), .indent_writer = undefined, // set later so we can get a pointer to object.code @@ -236,8 +231,8 @@ pub fn updateFunc( }; function.object.indent_writer = .{ .underlying_writer = function.object.code.writer() }; defer { - self.anon_decls = function.object.dg.anon_decl_deps; - self.aligned_anon_decls = function.object.dg.aligned_anon_decls; + self.uavs = function.object.dg.uav_deps; + self.aligned_uavs = function.object.dg.aligned_uavs; fwd_decl.* = function.object.dg.fwd_decl.moveToUnmanaged(); ctype_pool.* = function.object.dg.ctype_pool.move(); ctype_pool.freeUnusedCapacity(gpa); @@ -248,13 +243,10 @@ pub fn updateFunc( function.deinit(); } - try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); + try zcu.failed_codegen.ensureUnusedCapacity(gpa, 1); codegen.genFunc(&function) catch |err| switch (err) { error.AnalysisFail => { - zcu.failed_analysis.putAssumeCapacityNoClobber( - InternPool.AnalUnit.wrap(.{ .decl = decl_index }), - function.object.dg.error_msg.?, - ); + zcu.failed_codegen.putAssumeCapacityNoClobber(func.owner_nav, function.object.dg.error_msg.?); return; }, else => |e| return e, @@ -263,9 +255,9 @@ pub fn updateFunc( gop.value_ptr.code = try self.addString(function.object.code.items); } -fn updateAnonDecl(self: *C, pt: Zcu.PerThread, i: usize) !void { +fn updateUav(self: *C, pt: Zcu.PerThread, i: usize) !void { const gpa = self.base.comp.gpa; - const anon_decl = self.anon_decls.keys()[i]; + const uav = self.uavs.keys()[i]; const fwd_decl = &self.fwd_decl_buf; const code = &self.code_buf; @@ -278,21 +270,21 @@ fn updateAnonDecl(self: *C, pt: Zcu.PerThread, i: usize) !void { .pt = pt, .mod = pt.zcu.root_mod, .error_msg = null, - .pass = .{ .anon = anon_decl }, + .pass = .{ .uav = uav }, .is_naked_fn = false, .fwd_decl = fwd_decl.toManaged(gpa), .ctype_pool = codegen.CType.Pool.empty, .scratch = .{}, - .anon_decl_deps = self.anon_decls, - .aligned_anon_decls = self.aligned_anon_decls, + .uav_deps = self.uavs, + .aligned_uavs = self.aligned_uavs, }, .code = code.toManaged(gpa), .indent_writer = undefined, // set later so we can get a pointer to object.code }; object.indent_writer = .{ .underlying_writer = object.code.writer() }; defer { - self.anon_decls = object.dg.anon_decl_deps; - self.aligned_anon_decls = object.dg.aligned_anon_decls; + self.uavs = object.dg.uav_deps; + self.aligned_uavs = object.dg.aligned_uavs; fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged(); object.dg.ctype_pool.deinit(object.dg.gpa); object.dg.scratch.deinit(gpa); @@ -300,8 +292,8 @@ fn updateAnonDecl(self: *C, pt: Zcu.PerThread, i: usize) !void { } try object.dg.ctype_pool.init(gpa); - const c_value: codegen.CValue = .{ .constant = Value.fromInterned(anon_decl) }; - const alignment: Alignment = self.aligned_anon_decls.get(anon_decl) orelse .none; + const c_value: codegen.CValue = .{ .constant = Value.fromInterned(uav) }; + const alignment: Alignment = self.aligned_uavs.get(uav) orelse .none; codegen.genDeclValue(&object, c_value.constant, c_value, alignment, .none) catch |err| switch (err) { error.AnalysisFail => { @panic("TODO: C backend AnalysisFail on anonymous decl"); @@ -312,23 +304,22 @@ fn updateAnonDecl(self: *C, pt: Zcu.PerThread, i: usize) !void { }; object.dg.ctype_pool.freeUnusedCapacity(gpa); - object.dg.anon_decl_deps.values()[i] = .{ + object.dg.uav_deps.values()[i] = .{ .code = try self.addString(object.code.items), .fwd_decl = try self.addString(object.dg.fwd_decl.items), .ctype_pool = object.dg.ctype_pool.move(), }; } -pub fn updateDecl(self: *C, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { +pub fn updateNav(self: *C, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !void { const tracy = trace(@src()); defer tracy.end(); const gpa = self.base.comp.gpa; const zcu = pt.zcu; - const decl = zcu.declPtr(decl_index); - const gop = try self.decl_table.getOrPut(gpa, decl_index); - errdefer _ = self.decl_table.pop(); + const gop = try self.navs.getOrPut(gpa, nav_index); + errdefer _ = self.navs.pop(); if (!gop.found_existing) gop.value_ptr.* = .{}; const ctype_pool = &gop.value_ptr.ctype_pool; const fwd_decl = &self.fwd_decl_buf; @@ -338,29 +329,27 @@ pub fn updateDecl(self: *C, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) fwd_decl.clearRetainingCapacity(); code.clearRetainingCapacity(); - const file_scope = zcu.namespacePtr(decl.src_namespace).fileScope(zcu); - var object: codegen.Object = .{ .dg = .{ .gpa = gpa, .pt = pt, - .mod = file_scope.mod, + .mod = zcu.navFileScope(nav_index).mod, .error_msg = null, - .pass = .{ .decl = decl_index }, + .pass = .{ .nav = nav_index }, .is_naked_fn = false, .fwd_decl = fwd_decl.toManaged(gpa), .ctype_pool = ctype_pool.*, .scratch = .{}, - .anon_decl_deps = self.anon_decls, - .aligned_anon_decls = self.aligned_anon_decls, + .uav_deps = self.uavs, + .aligned_uavs = self.aligned_uavs, }, .code = code.toManaged(gpa), .indent_writer = undefined, // set later so we can get a pointer to object.code }; object.indent_writer = .{ .underlying_writer = object.code.writer() }; defer { - self.anon_decls = object.dg.anon_decl_deps; - self.aligned_anon_decls = object.dg.aligned_anon_decls; + self.uavs = object.dg.uav_deps; + self.aligned_uavs = object.dg.aligned_uavs; fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged(); ctype_pool.* = object.dg.ctype_pool.move(); ctype_pool.freeUnusedCapacity(gpa); @@ -368,13 +357,10 @@ pub fn updateDecl(self: *C, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) code.* = object.code.moveToUnmanaged(); } - try zcu.failed_analysis.ensureUnusedCapacity(gpa, 1); + try zcu.failed_codegen.ensureUnusedCapacity(gpa, 1); codegen.genDecl(&object) catch |err| switch (err) { error.AnalysisFail => { - zcu.failed_analysis.putAssumeCapacityNoClobber( - InternPool.AnalUnit.wrap(.{ .decl = decl_index }), - object.dg.error_msg.?, - ); + zcu.failed_codegen.putAssumeCapacityNoClobber(nav_index, object.dg.error_msg.?); return; }, else => |e| return e, @@ -383,12 +369,12 @@ pub fn updateDecl(self: *C, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) gop.value_ptr.fwd_decl = try self.addString(object.dg.fwd_decl.items); } -pub fn updateDeclLineNumber(self: *C, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { +pub fn updateNavLineNumber(self: *C, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !void { // The C backend does not have the ability to fix line numbers without re-generating // the entire Decl. _ = self; _ = pt; - _ = decl_index; + _ = nav_index; } pub fn flush(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) !void { @@ -422,12 +408,13 @@ pub fn flushModule(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: const comp = self.base.comp; const gpa = comp.gpa; const zcu = self.base.comp.module.?; + const ip = &zcu.intern_pool; const pt: Zcu.PerThread = .{ .zcu = zcu, .tid = tid }; { var i: usize = 0; - while (i < self.anon_decls.count()) : (i += 1) { - try updateAnonDecl(self, pt, i); + while (i < self.uavs.count()) : (i += 1) { + try self.updateUav(pt, i); } } @@ -484,30 +471,28 @@ pub fn flushModule(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: } } - for (self.anon_decls.keys(), self.anon_decls.values()) |value, *decl_block| try self.flushDeclBlock( + for (self.uavs.keys(), self.uavs.values()) |uav, *av_block| try self.flushAvBlock( pt, zcu.root_mod, &f, - decl_block, - self.exported_values.getPtr(value), + av_block, + self.exported_uavs.getPtr(uav), export_names, .none, ); - for (self.decl_table.keys(), self.decl_table.values()) |decl_index, *decl_block| { - const decl = zcu.declPtr(decl_index); - const extern_name = if (decl.isExtern(zcu)) decl.name.toOptional() else .none; - const mod = zcu.namespacePtr(decl.src_namespace).fileScope(zcu).mod; - try self.flushDeclBlock( - pt, - mod, - &f, - decl_block, - self.exported_decls.getPtr(decl_index), - export_names, - extern_name, - ); - } + for (self.navs.keys(), self.navs.values()) |nav, *av_block| try self.flushAvBlock( + pt, + zcu.navFileScope(nav).mod, + &f, + av_block, + self.exported_navs.getPtr(nav), + export_names, + if (ip.indexToKey(zcu.navValue(nav).toIntern()) == .@"extern") + ip.getNav(nav).name.toOptional() + else + .none, + ); } { @@ -516,12 +501,12 @@ pub fn flushModule(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: try f.ctype_pool.init(gpa); try self.flushCTypes(zcu, &f, .flush, &f.lazy_ctype_pool); - for (self.anon_decls.keys(), self.anon_decls.values()) |anon_decl, decl_block| { - try self.flushCTypes(zcu, &f, .{ .anon = anon_decl }, &decl_block.ctype_pool); + for (self.uavs.keys(), self.uavs.values()) |uav, av_block| { + try self.flushCTypes(zcu, &f, .{ .uav = uav }, &av_block.ctype_pool); } - for (self.decl_table.keys(), self.decl_table.values()) |decl_index, decl_block| { - try self.flushCTypes(zcu, &f, .{ .decl = decl_index }, &decl_block.ctype_pool); + for (self.navs.keys(), self.navs.values()) |nav, av_block| { + try self.flushCTypes(zcu, &f, .{ .nav = nav }, &av_block.ctype_pool); } } @@ -539,26 +524,21 @@ pub fn flushModule(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: f.file_size += lazy_fwd_decl_len; // Now the code. - try f.all_buffers.ensureUnusedCapacity(gpa, 1 + (self.anon_decls.count() + self.decl_table.count()) * 2); + try f.all_buffers.ensureUnusedCapacity(gpa, 1 + (self.uavs.count() + self.navs.count()) * 2); f.appendBufAssumeCapacity(self.lazy_code_buf.items); - for (self.anon_decls.keys(), self.anon_decls.values()) |anon_decl, decl_block| f.appendCodeAssumeCapacity( - if (self.exported_values.contains(anon_decl)) - .default - else switch (zcu.intern_pool.indexToKey(anon_decl)) { - .extern_func => .zig_extern, - .variable => |variable| if (variable.is_extern) .zig_extern else .static, + for (self.uavs.keys(), self.uavs.values()) |uav, av_block| f.appendCodeAssumeCapacity( + if (self.exported_uavs.contains(uav)) .default else switch (ip.indexToKey(uav)) { + .@"extern" => .zig_extern, else => .static, }, - self.getString(decl_block.code), + self.getString(av_block.code), ); - for (self.decl_table.keys(), self.decl_table.values()) |decl_index, decl_block| f.appendCodeAssumeCapacity( - if (self.exported_decls.contains(decl_index)) - .default - else if (zcu.declPtr(decl_index).isExtern(zcu)) - .zig_extern - else - .static, - self.getString(decl_block.code), + for (self.navs.keys(), self.navs.values()) |nav, av_block| f.appendCodeAssumeCapacity( + if (self.exported_navs.contains(nav)) .default else switch (ip.indexToKey(zcu.navValue(nav).toIntern())) { + .@"extern" => .zig_extern, + else => .static, + }, + self.getString(av_block.code), ); const file = self.base.file.?; @@ -689,16 +669,16 @@ fn flushErrDecls(self: *C, pt: Zcu.PerThread, ctype_pool: *codegen.CType.Pool) F .fwd_decl = fwd_decl.toManaged(gpa), .ctype_pool = ctype_pool.*, .scratch = .{}, - .anon_decl_deps = self.anon_decls, - .aligned_anon_decls = self.aligned_anon_decls, + .uav_deps = self.uavs, + .aligned_uavs = self.aligned_uavs, }, .code = code.toManaged(gpa), .indent_writer = undefined, // set later so we can get a pointer to object.code }; object.indent_writer = .{ .underlying_writer = object.code.writer() }; defer { - self.anon_decls = object.dg.anon_decl_deps; - self.aligned_anon_decls = object.dg.aligned_anon_decls; + self.uavs = object.dg.uav_deps; + self.aligned_uavs = object.dg.aligned_uavs; fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged(); ctype_pool.* = object.dg.ctype_pool.move(); ctype_pool.freeUnusedCapacity(gpa); @@ -736,8 +716,8 @@ fn flushLazyFn( .fwd_decl = fwd_decl.toManaged(gpa), .ctype_pool = ctype_pool.*, .scratch = .{}, - .anon_decl_deps = .{}, - .aligned_anon_decls = .{}, + .uav_deps = .{}, + .aligned_uavs = .{}, }, .code = code.toManaged(gpa), .indent_writer = undefined, // set later so we can get a pointer to object.code @@ -746,8 +726,8 @@ fn flushLazyFn( defer { // If this assert trips just handle the anon_decl_deps the same as // `updateFunc()` does. - assert(object.dg.anon_decl_deps.count() == 0); - assert(object.dg.aligned_anon_decls.count() == 0); + assert(object.dg.uav_deps.count() == 0); + assert(object.dg.aligned_uavs.count() == 0); fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged(); ctype_pool.* = object.dg.ctype_pool.move(); ctype_pool.freeUnusedCapacity(gpa); @@ -781,31 +761,33 @@ fn flushLazyFns( } } -fn flushDeclBlock( +fn flushAvBlock( self: *C, pt: Zcu.PerThread, mod: *Module, f: *Flush, - decl_block: *const DeclBlock, + av_block: *const AvBlock, exported_block: ?*const ExportedBlock, export_names: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void), extern_name: InternPool.OptionalNullTerminatedString, ) FlushDeclError!void { const gpa = self.base.comp.gpa; - try self.flushLazyFns(pt, mod, f, &decl_block.ctype_pool, decl_block.lazy_fns); + try self.flushLazyFns(pt, mod, f, &av_block.ctype_pool, av_block.lazy_fns); try f.all_buffers.ensureUnusedCapacity(gpa, 1); // avoid emitting extern decls that are already exported if (extern_name.unwrap()) |name| if (export_names.contains(name)) return; f.appendBufAssumeCapacity(self.getString(if (exported_block) |exported| exported.fwd_decl else - decl_block.fwd_decl)); + av_block.fwd_decl)); } pub fn flushEmitH(zcu: *Zcu) !void { const tracy = trace(@src()); defer tracy.end(); + if (true) return; // emit-h is regressed + const emit_h = zcu.emit_h orelse return; // We collect a list of buffers to write, and write them all at once with pwritev 😎 @@ -854,17 +836,17 @@ pub fn updateExports( const zcu = pt.zcu; const gpa = zcu.gpa; const mod, const pass: codegen.DeclGen.Pass, const decl_block, const exported_block = switch (exported) { - .decl_index => |decl_index| .{ - zcu.namespacePtr(zcu.declPtr(decl_index).src_namespace).fileScope(zcu).mod, - .{ .decl = decl_index }, - self.decl_table.getPtr(decl_index).?, - (try self.exported_decls.getOrPut(gpa, decl_index)).value_ptr, + .nav => |nav| .{ + zcu.navFileScope(nav).mod, + .{ .nav = nav }, + self.navs.getPtr(nav).?, + (try self.exported_navs.getOrPut(gpa, nav)).value_ptr, }, - .value => |value| .{ + .uav => |uav| .{ zcu.root_mod, - .{ .anon = value }, - self.anon_decls.getPtr(value).?, - (try self.exported_values.getOrPut(gpa, value)).value_ptr, + .{ .uav = uav }, + self.uavs.getPtr(uav).?, + (try self.exported_uavs.getOrPut(gpa, uav)).value_ptr, }, }; const ctype_pool = &decl_block.ctype_pool; @@ -880,12 +862,12 @@ pub fn updateExports( .fwd_decl = fwd_decl.toManaged(gpa), .ctype_pool = decl_block.ctype_pool, .scratch = .{}, - .anon_decl_deps = .{}, - .aligned_anon_decls = .{}, + .uav_deps = .{}, + .aligned_uavs = .{}, }; defer { - assert(dg.anon_decl_deps.count() == 0); - assert(dg.aligned_anon_decls.count() == 0); + assert(dg.uav_deps.count() == 0); + assert(dg.aligned_uavs.count() == 0); fwd_decl.* = dg.fwd_decl.moveToUnmanaged(); ctype_pool.* = dg.ctype_pool.move(); ctype_pool.freeUnusedCapacity(gpa); @@ -901,7 +883,7 @@ pub fn deleteExport( _: InternPool.NullTerminatedString, ) void { switch (exported) { - .decl_index => |decl_index| _ = self.exported_decls.swapRemove(decl_index), - .value => |value| _ = self.exported_values.swapRemove(value), + .nav => |nav| _ = self.exported_navs.swapRemove(nav), + .uav => |uav| _ = self.exported_uavs.swapRemove(uav), } } diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 54e8504d00de..73822dfec86e 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -65,8 +65,8 @@ imports_count_dirty: bool = true, /// Table of tracked LazySymbols. lazy_syms: LazySymbolTable = .{}, -/// Table of tracked Decls. -decls: DeclTable = .{}, +/// Table of tracked `Nav`s. +navs: NavTable = .{}, /// List of atoms that are either synthetic or map directly to the Zig source program. atoms: std.ArrayListUnmanaged(Atom) = .{}, @@ -74,27 +74,7 @@ atoms: std.ArrayListUnmanaged(Atom) = .{}, /// Table of atoms indexed by the symbol index. atom_by_index_table: std.AutoHashMapUnmanaged(u32, Atom.Index) = .{}, -/// Table of unnamed constants associated with a parent `Decl`. -/// We store them here so that we can free the constants whenever the `Decl` -/// needs updating or is freed. -/// -/// For example, -/// -/// ```zig -/// const Foo = struct{ -/// a: u8, -/// }; -/// -/// pub fn main() void { -/// var foo = Foo{ .a = 1 }; -/// _ = foo; -/// } -/// ``` -/// -/// value assigned to label `foo` is an unnamed constant belonging/associated -/// with `Decl` `main`, and lives as long as that `Decl`. -unnamed_const_atoms: UnnamedConstTable = .{}, -anon_decls: AnonDeclTable = .{}, +uavs: UavTable = .{}, /// A table of relocations indexed by the owning them `Atom`. /// Note that once we refactor `Atom`'s lifetime and ownership rules, @@ -120,11 +100,10 @@ const HotUpdateState = struct { loaded_base_address: ?std.os.windows.HMODULE = null, }; -const DeclTable = std.AutoArrayHashMapUnmanaged(InternPool.DeclIndex, DeclMetadata); -const AnonDeclTable = std.AutoHashMapUnmanaged(InternPool.Index, DeclMetadata); +const NavTable = std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, AvMetadata); +const UavTable = std.AutoHashMapUnmanaged(InternPool.Index, AvMetadata); const RelocTable = std.AutoArrayHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(Relocation)); const BaseRelocationTable = std.AutoArrayHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(u32)); -const UnnamedConstTable = std.AutoArrayHashMapUnmanaged(InternPool.DeclIndex, std.ArrayListUnmanaged(Atom.Index)); const default_file_alignment: u16 = 0x200; const default_size_of_stack_reserve: u32 = 0x1000000; @@ -155,7 +134,7 @@ const Section = struct { free_list: std.ArrayListUnmanaged(Atom.Index) = .{}, }; -const LazySymbolTable = std.AutoArrayHashMapUnmanaged(InternPool.OptionalDeclIndex, LazySymbolMetadata); +const LazySymbolTable = std.AutoArrayHashMapUnmanaged(InternPool.Index, LazySymbolMetadata); const LazySymbolMetadata = struct { const State = enum { unused, pending_flush, flushed }; @@ -165,17 +144,17 @@ const LazySymbolMetadata = struct { rdata_state: State = .unused, }; -const DeclMetadata = struct { +const AvMetadata = struct { atom: Atom.Index, section: u16, /// A list of all exports aliases of this Decl. exports: std.ArrayListUnmanaged(u32) = .{}, - fn deinit(m: *DeclMetadata, allocator: Allocator) void { + fn deinit(m: *AvMetadata, allocator: Allocator) void { m.exports.deinit(allocator); } - fn getExport(m: DeclMetadata, coff_file: *const Coff, name: []const u8) ?u32 { + fn getExport(m: AvMetadata, coff_file: *const Coff, name: []const u8) ?u32 { for (m.exports.items) |exp| { if (mem.eql(u8, name, coff_file.getSymbolName(.{ .sym_index = exp, @@ -185,7 +164,7 @@ const DeclMetadata = struct { return null; } - fn getExportPtr(m: *DeclMetadata, coff_file: *Coff, name: []const u8) ?*u32 { + fn getExportPtr(m: *AvMetadata, coff_file: *Coff, name: []const u8) ?*u32 { for (m.exports.items) |*exp| { if (mem.eql(u8, name, coff_file.getSymbolName(.{ .sym_index = exp.*, @@ -486,24 +465,19 @@ pub fn deinit(self: *Coff) void { self.lazy_syms.deinit(gpa); - for (self.decls.values()) |*metadata| { + for (self.navs.values()) |*metadata| { metadata.deinit(gpa); } - self.decls.deinit(gpa); + self.navs.deinit(gpa); self.atom_by_index_table.deinit(gpa); - for (self.unnamed_const_atoms.values()) |*atoms| { - atoms.deinit(gpa); - } - self.unnamed_const_atoms.deinit(gpa); - { - var it = self.anon_decls.iterator(); + var it = self.uavs.iterator(); while (it.next()) |entry| { entry.value_ptr.exports.deinit(gpa); } - self.anon_decls.deinit(gpa); + self.uavs.deinit(gpa); } for (self.relocs.values()) |*relocs| { @@ -1132,23 +1106,20 @@ pub fn updateFunc(self: *Coff, pt: Zcu.PerThread, func_index: InternPool.Index, const tracy = trace(@src()); defer tracy.end(); - const mod = pt.zcu; - const func = mod.funcInfo(func_index); - const decl_index = func.owner_decl; - const decl = mod.declPtr(decl_index); + const zcu = pt.zcu; + const gpa = zcu.gpa; + const func = zcu.funcInfo(func_index); - const atom_index = try self.getOrCreateAtomForDecl(decl_index); - self.freeUnnamedConsts(decl_index); + const atom_index = try self.getOrCreateAtomForNav(func.owner_nav); Atom.freeRelocations(self, atom_index); - const gpa = self.base.comp.gpa; var code_buffer = std.ArrayList(u8).init(gpa); defer code_buffer.deinit(); const res = try codegen.generateFunction( &self.base, pt, - decl.navSrcLoc(mod), + zcu.navSrcLoc(func.owner_nav), func_index, air, liveness, @@ -1158,45 +1129,16 @@ pub fn updateFunc(self: *Coff, pt: Zcu.PerThread, func_index: InternPool.Index, const code = switch (res) { .ok => code_buffer.items, .fail => |em| { - func.setAnalysisState(&mod.intern_pool, .codegen_failure); - try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); + try zcu.failed_codegen.put(zcu.gpa, func.owner_nav, em); return; }, }; - try self.updateDeclCode(pt, decl_index, code, .FUNCTION); + try self.updateNavCode(pt, func.owner_nav, code, .FUNCTION); // Exports will be updated by `Zcu.processExports` after the update. } -pub fn lowerUnnamedConst(self: *Coff, pt: Zcu.PerThread, val: Value, decl_index: InternPool.DeclIndex) !u32 { - const mod = pt.zcu; - const gpa = mod.gpa; - const decl = mod.declPtr(decl_index); - const gop = try self.unnamed_const_atoms.getOrPut(gpa, decl_index); - if (!gop.found_existing) { - gop.value_ptr.* = .{}; - } - const unnamed_consts = gop.value_ptr; - const index = unnamed_consts.items.len; - const sym_name = try std.fmt.allocPrint(gpa, "__unnamed_{}_{d}", .{ - decl.fqn.fmt(&mod.intern_pool), index, - }); - defer gpa.free(sym_name); - const ty = val.typeOf(mod); - const atom_index = switch (try self.lowerConst(pt, sym_name, val, ty.abiAlignment(pt), self.rdata_section_index.?, decl.navSrcLoc(mod))) { - .ok => |atom_index| atom_index, - .fail => |em| { - decl.analysis = .codegen_failure; - try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); - log.err("{s}", .{em.msg}); - return error.CodegenFail; - }, - }; - try unnamed_consts.append(gpa, atom_index); - return self.getAtom(atom_index).getSymbolIndex().?; -} - const LowerConstResult = union(enum) { ok: Atom.Index, fail: *Module.ErrorMsg, @@ -1246,57 +1188,62 @@ fn lowerConst( return .{ .ok = atom_index }; } -pub fn updateDecl( +pub fn updateNav( self: *Coff, pt: Zcu.PerThread, - decl_index: InternPool.DeclIndex, -) link.File.UpdateDeclError!void { - const mod = pt.zcu; + nav_index: InternPool.Nav.Index, +) link.File.UpdateNavError!void { if (build_options.skip_non_native and builtin.object_format != .coff) { @panic("Attempted to compile for object format that was disabled by build configuration"); } - if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(pt, decl_index); + if (self.llvm_object) |llvm_object| return llvm_object.updateNav(pt, nav_index); const tracy = trace(@src()); defer tracy.end(); - const decl = mod.declPtr(decl_index); - - if (decl.val.getExternFunc(mod)) |_| { - return; - } - - const gpa = self.base.comp.gpa; - if (decl.isExtern(mod)) { - // TODO make this part of getGlobalSymbol - const variable = decl.getOwnedVariable(mod).?; - const name = decl.name.toSlice(&mod.intern_pool); - const lib_name = variable.lib_name.toSlice(&mod.intern_pool); - const global_index = try self.getGlobalSymbol(name, lib_name); - try self.need_got_table.put(gpa, global_index, {}); - return; - } + const zcu = pt.zcu; + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; + const nav = ip.getNav(nav_index); + + const init_val = switch (ip.indexToKey(nav.status.resolved.val)) { + .variable => |variable| variable.init, + .@"extern" => |@"extern"| { + if (ip.isFunctionType(nav.typeOf(ip))) return; + // TODO make this part of getGlobalSymbol + const name = nav.name.toSlice(ip); + const lib_name = @"extern".lib_name.toSlice(ip); + const global_index = try self.getGlobalSymbol(name, lib_name); + try self.need_got_table.put(gpa, global_index, {}); + return; + }, + else => nav.status.resolved.val, + }; - const atom_index = try self.getOrCreateAtomForDecl(decl_index); + const atom_index = try self.getOrCreateAtomForNav(nav_index); Atom.freeRelocations(self, atom_index); const atom = self.getAtom(atom_index); var code_buffer = std.ArrayList(u8).init(gpa); defer code_buffer.deinit(); - const decl_val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val; - const res = try codegen.generateSymbol(&self.base, pt, decl.navSrcLoc(mod), decl_val, &code_buffer, .none, .{ - .parent_atom_index = atom.getSymbolIndex().?, - }); + const res = try codegen.generateSymbol( + &self.base, + pt, + zcu.navSrcLoc(nav_index), + Value.fromInterned(init_val), + &code_buffer, + .none, + .{ .parent_atom_index = atom.getSymbolIndex().? }, + ); const code = switch (res) { .ok => code_buffer.items, .fail => |em| { - decl.analysis = .codegen_failure; - try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); + try zcu.failed_codegen.put(gpa, nav_index, em); return; }, }; - try self.updateDeclCode(pt, decl_index, code, .NULL); + try self.updateNavCode(pt, nav_index, code, .NULL); // Exports will be updated by `Zcu.processExports` after the update. } @@ -1317,14 +1264,14 @@ fn updateLazySymbolAtom( const name = try std.fmt.allocPrint(gpa, "__lazy_{s}_{}", .{ @tagName(sym.kind), - sym.ty.fmt(pt), + Type.fromInterned(sym.ty).fmt(pt), }); defer gpa.free(name); const atom = self.getAtomPtr(atom_index); const local_sym_index = atom.getSymbolIndex().?; - const src = sym.ty.srcLocOrNull(mod) orelse Module.LazySrcLoc.unneeded; + const src = Type.fromInterned(sym.ty).srcLocOrNull(mod) orelse Module.LazySrcLoc.unneeded; const res = try codegen.generateLazySymbol( &self.base, pt, @@ -1362,52 +1309,55 @@ fn updateLazySymbolAtom( try self.writeAtom(atom_index, code); } -pub fn getOrCreateAtomForLazySymbol(self: *Coff, pt: Zcu.PerThread, sym: link.File.LazySymbol) !Atom.Index { - const gpa = self.base.comp.gpa; - const mod = self.base.comp.module.?; - const gop = try self.lazy_syms.getOrPut(gpa, sym.getDecl(mod)); +pub fn getOrCreateAtomForLazySymbol( + self: *Coff, + pt: Zcu.PerThread, + lazy_sym: link.File.LazySymbol, +) !Atom.Index { + const gop = try self.lazy_syms.getOrPut(pt.zcu.gpa, lazy_sym.ty); errdefer _ = if (!gop.found_existing) self.lazy_syms.pop(); if (!gop.found_existing) gop.value_ptr.* = .{}; - const metadata: struct { atom: *Atom.Index, state: *LazySymbolMetadata.State } = switch (sym.kind) { - .code => .{ .atom = &gop.value_ptr.text_atom, .state = &gop.value_ptr.text_state }, - .const_data => .{ .atom = &gop.value_ptr.rdata_atom, .state = &gop.value_ptr.rdata_state }, + const atom_ptr, const state_ptr = switch (lazy_sym.kind) { + .code => .{ &gop.value_ptr.text_atom, &gop.value_ptr.text_state }, + .const_data => .{ &gop.value_ptr.rdata_atom, &gop.value_ptr.rdata_state }, }; - switch (metadata.state.*) { - .unused => metadata.atom.* = try self.createAtom(), - .pending_flush => return metadata.atom.*, + switch (state_ptr.*) { + .unused => atom_ptr.* = try self.createAtom(), + .pending_flush => return atom_ptr.*, .flushed => {}, } - metadata.state.* = .pending_flush; - const atom = metadata.atom.*; + state_ptr.* = .pending_flush; + const atom = atom_ptr.*; // anyerror needs to be deferred until flushModule - if (sym.getDecl(mod) != .none) try self.updateLazySymbolAtom(pt, sym, atom, switch (sym.kind) { + if (lazy_sym.ty != .anyerror_type) try self.updateLazySymbolAtom(pt, lazy_sym, atom, switch (lazy_sym.kind) { .code => self.text_section_index.?, .const_data => self.rdata_section_index.?, }); return atom; } -pub fn getOrCreateAtomForDecl(self: *Coff, decl_index: InternPool.DeclIndex) !Atom.Index { +pub fn getOrCreateAtomForNav(self: *Coff, nav_index: InternPool.Nav.Index) !Atom.Index { const gpa = self.base.comp.gpa; - const gop = try self.decls.getOrPut(gpa, decl_index); + const gop = try self.navs.getOrPut(gpa, nav_index); if (!gop.found_existing) { gop.value_ptr.* = .{ .atom = try self.createAtom(), - .section = self.getDeclOutputSection(decl_index), + .section = self.getNavOutputSection(nav_index), .exports = .{}, }; } return gop.value_ptr.atom; } -fn getDeclOutputSection(self: *Coff, decl_index: InternPool.DeclIndex) u16 { - const decl = self.base.comp.module.?.declPtr(decl_index); - const mod = self.base.comp.module.?; - const ty = decl.typeOf(mod); - const zig_ty = ty.zigTypeTag(mod); - const val = decl.val; +fn getNavOutputSection(self: *Coff, nav_index: InternPool.Nav.Index) u16 { + const zcu = self.base.comp.module.?; + const ip = &zcu.intern_pool; + const nav = ip.getNav(nav_index); + const ty = Type.fromInterned(nav.typeOf(ip)); + const zig_ty = ty.zigTypeTag(zcu); + const val = Value.fromInterned(nav.status.resolved.val); const index: u16 = blk: { - if (val.isUndefDeep(mod)) { + if (val.isUndefDeep(zcu)) { // TODO in release-fast and release-small, we should put undef in .bss break :blk self.data_section_index.?; } @@ -1416,7 +1366,7 @@ fn getDeclOutputSection(self: *Coff, decl_index: InternPool.DeclIndex) u16 { // TODO: what if this is a function pointer? .Fn => break :blk self.text_section_index.?, else => { - if (val.getVariable(mod)) |_| { + if (val.getVariable(zcu)) |_| { break :blk self.data_section_index.?; } break :blk self.rdata_section_index.?; @@ -1426,31 +1376,41 @@ fn getDeclOutputSection(self: *Coff, decl_index: InternPool.DeclIndex) u16 { return index; } -fn updateDeclCode(self: *Coff, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex, code: []u8, complex_type: coff.ComplexType) !void { - const mod = pt.zcu; - const decl = mod.declPtr(decl_index); +fn updateNavCode( + self: *Coff, + pt: Zcu.PerThread, + nav_index: InternPool.Nav.Index, + code: []u8, + complex_type: coff.ComplexType, +) !void { + const zcu = pt.zcu; + const ip = &zcu.intern_pool; + const nav = ip.getNav(nav_index); - log.debug("updateDeclCode {}{*}", .{ decl.fqn.fmt(&mod.intern_pool), decl }); - const required_alignment: u32 = @intCast(decl.getAlignment(pt).toByteUnits() orelse 0); + log.debug("updateNavCode {} 0x{x}", .{ nav.fqn.fmt(ip), nav_index }); - const decl_metadata = self.decls.get(decl_index).?; - const atom_index = decl_metadata.atom; + const required_alignment = pt.navAlignment(nav_index).max( + target_util.minFunctionAlignment(zcu.navFileScope(nav_index).mod.resolved_target.result), + ); + + const nav_metadata = self.navs.get(nav_index).?; + const atom_index = nav_metadata.atom; const atom = self.getAtom(atom_index); const sym_index = atom.getSymbolIndex().?; - const sect_index = decl_metadata.section; + const sect_index = nav_metadata.section; const code_len = @as(u32, @intCast(code.len)); if (atom.size != 0) { const sym = atom.getSymbolPtr(self); - try self.setSymbolName(sym, decl.fqn.toSlice(&mod.intern_pool)); + try self.setSymbolName(sym, nav.fqn.toSlice(ip)); sym.section_number = @as(coff.SectionNumber, @enumFromInt(sect_index + 1)); sym.type = .{ .complex_type = complex_type, .base_type = .NULL }; const capacity = atom.capacity(self); - const need_realloc = code.len > capacity or !mem.isAlignedGeneric(u64, sym.value, required_alignment); + const need_realloc = code.len > capacity or !required_alignment.check(sym.value); if (need_realloc) { - const vaddr = try self.growAtom(atom_index, code_len, required_alignment); - log.debug("growing {} from 0x{x} to 0x{x}", .{ decl.fqn.fmt(&mod.intern_pool), sym.value, vaddr }); + const vaddr = try self.growAtom(atom_index, code_len, @intCast(required_alignment.toByteUnits() orelse 0)); + log.debug("growing {} from 0x{x} to 0x{x}", .{ nav.fqn.fmt(ip), sym.value, vaddr }); log.debug(" (required alignment 0x{x}", .{required_alignment}); if (vaddr != sym.value) { @@ -1466,13 +1426,13 @@ fn updateDeclCode(self: *Coff, pt: Zcu.PerThread, decl_index: InternPool.DeclInd self.getAtomPtr(atom_index).size = code_len; } else { const sym = atom.getSymbolPtr(self); - try self.setSymbolName(sym, decl.fqn.toSlice(&mod.intern_pool)); + try self.setSymbolName(sym, nav.fqn.toSlice(ip)); sym.section_number = @as(coff.SectionNumber, @enumFromInt(sect_index + 1)); sym.type = .{ .complex_type = complex_type, .base_type = .NULL }; - const vaddr = try self.allocateAtom(atom_index, code_len, required_alignment); + const vaddr = try self.allocateAtom(atom_index, code_len, @intCast(required_alignment.toByteUnits() orelse 0)); errdefer self.freeAtom(atom_index); - log.debug("allocated atom for {} at 0x{x}", .{ decl.fqn.fmt(&mod.intern_pool), vaddr }); + log.debug("allocated atom for {} at 0x{x}", .{ nav.fqn.fmt(ip), vaddr }); self.getAtomPtr(atom_index).size = code_len; sym.value = vaddr; @@ -1482,28 +1442,15 @@ fn updateDeclCode(self: *Coff, pt: Zcu.PerThread, decl_index: InternPool.DeclInd try self.writeAtom(atom_index, code); } -fn freeUnnamedConsts(self: *Coff, decl_index: InternPool.DeclIndex) void { - const gpa = self.base.comp.gpa; - const unnamed_consts = self.unnamed_const_atoms.getPtr(decl_index) orelse return; - for (unnamed_consts.items) |atom_index| { - self.freeAtom(atom_index); - } - unnamed_consts.clearAndFree(gpa); -} - -pub fn freeDecl(self: *Coff, decl_index: InternPool.DeclIndex) void { - if (self.llvm_object) |llvm_object| return llvm_object.freeDecl(decl_index); +pub fn freeNav(self: *Coff, nav_index: InternPool.NavIndex) void { + if (self.llvm_object) |llvm_object| return llvm_object.freeNav(nav_index); const gpa = self.base.comp.gpa; - const mod = self.base.comp.module.?; - const decl = mod.declPtr(decl_index); + log.debug("freeDecl 0x{x}", .{nav_index}); - log.debug("freeDecl {*}", .{decl}); - - if (self.decls.fetchOrderedRemove(decl_index)) |const_kv| { + if (self.decls.fetchOrderedRemove(nav_index)) |const_kv| { var kv = const_kv; self.freeAtom(kv.value.atom); - self.freeUnnamedConsts(decl_index); kv.value.exports.deinit(gpa); } } @@ -1528,20 +1475,21 @@ pub fn updateExports( // detect the default subsystem. for (export_indices) |export_idx| { const exp = mod.all_exports.items[export_idx]; - const exported_decl_index = switch (exp.exported) { - .decl_index => |i| i, - .value => continue, + const exported_nav_index = switch (exp.exported) { + .nav => |nav| nav, + .uav => continue, }; - const exported_decl = mod.declPtr(exported_decl_index); - if (exported_decl.getOwnedFunction(mod) == null) continue; - const winapi_cc = switch (target.cpu.arch) { - .x86 => std.builtin.CallingConvention.Stdcall, - else => std.builtin.CallingConvention.C, + const exported_nav = ip.getNav(exported_nav_index); + const exported_ty = exported_nav.typeOf(ip); + if (!ip.isFunctionType(exported_ty)) continue; + const winapi_cc: std.builtin.CallingConvention = switch (target.cpu.arch) { + .x86 => .Stdcall, + else => .C, }; - const decl_cc = exported_decl.typeOf(mod).fnCallingConvention(mod); - if (decl_cc == .C and exp.opts.name.eqlSlice("main", ip) and comp.config.link_libc) { + const exported_cc = Type.fromInterned(exported_ty).fnCallingConvention(mod); + if (exported_cc == .C and exp.opts.name.eqlSlice("main", ip) and comp.config.link_libc) { mod.stage1_flags.have_c_main = true; - } else if (decl_cc == winapi_cc and target.os.tag == .windows) { + } else if (exported_cc == winapi_cc and target.os.tag == .windows) { if (exp.opts.name.eqlSlice("WinMain", ip)) { mod.stage1_flags.have_winmain = true; } else if (exp.opts.name.eqlSlice("wWinMain", ip)) { @@ -1562,15 +1510,15 @@ pub fn updateExports( const gpa = comp.gpa; const metadata = switch (exported) { - .decl_index => |decl_index| blk: { - _ = try self.getOrCreateAtomForDecl(decl_index); - break :blk self.decls.getPtr(decl_index).?; + .nav => |nav| blk: { + _ = try self.getOrCreateAtomForNav(nav); + break :blk self.navs.getPtr(nav).?; }, - .value => |value| self.anon_decls.getPtr(value) orelse blk: { + .uav => |uav| self.uavs.getPtr(uav) orelse blk: { const first_exp = mod.all_exports.items[export_indices[0]]; - const res = try self.lowerAnonDecl(pt, value, .none, first_exp.src); + const res = try self.lowerUav(pt, uav, .none, first_exp.src); switch (res) { - .ok => {}, + .mcv => {}, .fail => |em| { // TODO maybe it's enough to return an error here and let Module.processExportsInner // handle the error? @@ -1579,7 +1527,7 @@ pub fn updateExports( return; }, } - break :blk self.anon_decls.getPtr(value).?; + break :blk self.uavs.getPtr(uav).?; }, }; const atom_index = metadata.atom; @@ -1654,9 +1602,9 @@ pub fn deleteExport( ) void { if (self.llvm_object) |_| return; const metadata = switch (exported) { - .decl_index => |decl_index| self.decls.getPtr(decl_index) orelse return, - .value => |value| self.anon_decls.getPtr(value) orelse return, - }; + .nav => |nav| self.navs.getPtr(nav), + .uav => |uav| self.uavs.getPtr(uav), + } orelse return; const mod = self.base.comp.module.?; const name_slice = name.toSlice(&mod.intern_pool); const sym_index = metadata.getExportPtr(self, name_slice) orelse return; @@ -1748,7 +1696,7 @@ pub fn flushModule(self: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_no // anyerror needs to wait for everything to be flushed. if (metadata.text_state != .unused) self.updateLazySymbolAtom( pt, - link.File.LazySymbol.initDecl(.code, null, pt.zcu), + .{ .kind = .code, .ty = .anyerror_type }, metadata.text_atom, self.text_section_index.?, ) catch |err| return switch (err) { @@ -1757,7 +1705,7 @@ pub fn flushModule(self: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_no }; if (metadata.rdata_state != .unused) self.updateLazySymbolAtom( pt, - link.File.LazySymbol.initDecl(.const_data, null, pt.zcu), + .{ .kind = .const_data, .ty = .anyerror_type }, metadata.rdata_atom, self.rdata_section_index.?, ) catch |err| return switch (err) { @@ -1856,22 +1804,20 @@ pub fn flushModule(self: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_no assert(!self.imports_count_dirty); } -pub fn getDeclVAddr(self: *Coff, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex, reloc_info: link.File.RelocInfo) !u64 { +pub fn getNavVAddr( + self: *Coff, + pt: Zcu.PerThread, + nav_index: InternPool.Nav.Index, + reloc_info: link.File.RelocInfo, +) !u64 { assert(self.llvm_object == null); const zcu = pt.zcu; const ip = &zcu.intern_pool; - const decl = zcu.declPtr(decl_index); - log.debug("getDeclVAddr {}({d})", .{ decl.fqn.fmt(ip), decl_index }); - const sym_index = if (decl.isExtern(zcu)) blk: { - const name = decl.name.toSlice(ip); - const lib_name = if (decl.getOwnedExternFunc(zcu)) |ext_fn| - ext_fn.lib_name.toSlice(ip) - else - decl.getOwnedVariable(zcu).?.lib_name.toSlice(ip); - break :blk try self.getGlobalSymbol(name, lib_name); - } else blk: { - const this_atom_index = try self.getOrCreateAtomForDecl(decl_index); - break :blk self.getAtom(this_atom_index).getSymbolIndex().?; + const nav = ip.getNav(nav_index); + log.debug("getNavVAddr {}({d})", .{ nav.fqn.fmt(ip), nav_index }); + const sym_index = switch (ip.indexToKey(nav.status.resolved.val)) { + .@"extern" => |@"extern"| try self.getGlobalSymbol(nav.name.toSlice(ip), @"extern".lib_name.toSlice(ip)), + else => self.getAtom(try self.getOrCreateAtomForNav(nav_index)).getSymbolIndex().?, }; const atom_index = self.getAtomIndexForSymbol(.{ .sym_index = reloc_info.parent_atom_index, .file = null }).?; const target = SymbolWithLoc{ .sym_index = sym_index, .file = null }; @@ -1888,36 +1834,36 @@ pub fn getDeclVAddr(self: *Coff, pt: Zcu.PerThread, decl_index: InternPool.DeclI return 0; } -pub fn lowerAnonDecl( +pub fn lowerUav( self: *Coff, pt: Zcu.PerThread, - decl_val: InternPool.Index, + uav: InternPool.Index, explicit_alignment: InternPool.Alignment, src_loc: Module.LazySrcLoc, -) !codegen.Result { - const gpa = self.base.comp.gpa; - const mod = self.base.comp.module.?; - const ty = Type.fromInterned(mod.intern_pool.typeOf(decl_val)); - const decl_alignment = switch (explicit_alignment) { - .none => ty.abiAlignment(pt), +) !codegen.GenResult { + const zcu = pt.zcu; + const gpa = zcu.gpa; + const val = Value.fromInterned(uav); + const uav_alignment = switch (explicit_alignment) { + .none => val.typeOf(zcu).abiAlignment(pt), else => explicit_alignment, }; - if (self.anon_decls.get(decl_val)) |metadata| { - const existing_addr = self.getAtom(metadata.atom).getSymbol(self).value; - if (decl_alignment.check(existing_addr)) - return .ok; + if (self.uavs.get(uav)) |metadata| { + const atom = self.getAtom(metadata.atom); + const existing_addr = atom.getSymbol(self).value; + if (uav_alignment.check(existing_addr)) + return .{ .mcv = .{ .load_direct = atom.getSymbolIndex().? } }; } - const val = Value.fromInterned(decl_val); var name_buf: [32]u8 = undefined; const name = std.fmt.bufPrint(&name_buf, "__anon_{d}", .{ - @intFromEnum(decl_val), + @intFromEnum(uav), }) catch unreachable; const res = self.lowerConst( pt, name, val, - decl_alignment, + uav_alignment, self.rdata_section_index.?, src_loc, ) catch |err| switch (err) { @@ -1933,14 +1879,23 @@ pub fn lowerAnonDecl( .ok => |atom_index| atom_index, .fail => |em| return .{ .fail = em }, }; - try self.anon_decls.put(gpa, decl_val, .{ .atom = atom_index, .section = self.rdata_section_index.? }); - return .ok; + try self.uavs.put(gpa, uav, .{ + .atom = atom_index, + .section = self.rdata_section_index.?, + }); + return .{ .mcv = .{ + .load_direct = self.getAtom(atom_index).getSymbolIndex().?, + } }; } -pub fn getAnonDeclVAddr(self: *Coff, decl_val: InternPool.Index, reloc_info: link.File.RelocInfo) !u64 { +pub fn getUavVAddr( + self: *Coff, + uav: InternPool.Index, + reloc_info: link.File.RelocInfo, +) !u64 { assert(self.llvm_object == null); - const this_atom_index = self.anon_decls.get(decl_val).?.atom; + const this_atom_index = self.uavs.get(uav).?.atom; const sym_index = self.getAtom(this_atom_index).getSymbolIndex().?; const atom_index = self.getAtomIndexForSymbol(.{ .sym_index = reloc_info.parent_atom_index, .file = null }).?; const target = SymbolWithLoc{ .sym_index = sym_index, .file = null }; @@ -2760,6 +2715,7 @@ const Allocator = std.mem.Allocator; const codegen = @import("../codegen.zig"); const link = @import("../link.zig"); const lld = @import("Coff/lld.zig"); +const target_util = @import("../target.zig"); const trace = @import("../tracy.zig").trace; const Air = @import("../Air.zig"); @@ -2781,6 +2737,4 @@ const Value = @import("../Value.zig"); const AnalUnit = InternPool.AnalUnit; const dev = @import("../dev.zig"); -pub const base_tag: link.File.Tag = .coff; - const msdos_stub = @embedFile("msdos-stub.bin"); diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 9f2781549c31..30b286cac4a8 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -9,7 +9,7 @@ src_fn_free_list: std.AutoHashMapUnmanaged(Atom.Index, void) = .{}, src_fn_first_index: ?Atom.Index = null, src_fn_last_index: ?Atom.Index = null, src_fns: std.ArrayListUnmanaged(Atom) = .{}, -src_fn_decls: AtomTable = .{}, +src_fn_navs: AtomTable = .{}, /// A list of `Atom`s whose corresponding .debug_info tags have surplus capacity. /// This is the same concept as `text_block_free_list`; see those doc comments. @@ -17,7 +17,7 @@ di_atom_free_list: std.AutoHashMapUnmanaged(Atom.Index, void) = .{}, di_atom_first_index: ?Atom.Index = null, di_atom_last_index: ?Atom.Index = null, di_atoms: std.ArrayListUnmanaged(Atom) = .{}, -di_atom_decls: AtomTable = .{}, +di_atom_navs: AtomTable = .{}, dbg_line_header: DbgLineHeader, @@ -27,7 +27,7 @@ abbrev_table_offset: ?u64 = null, /// Table of debug symbol names. strtab: StringTable = .{}, -/// Quick lookup array of all defined source files referenced by at least one Decl. +/// Quick lookup array of all defined source files referenced by at least one Nav. /// They will end up in the DWARF debug_line header as two lists: /// * []include_directory /// * []file_names @@ -35,13 +35,13 @@ di_files: std.AutoArrayHashMapUnmanaged(*const Zcu.File, void) = .{}, global_abbrev_relocs: std.ArrayListUnmanaged(AbbrevRelocation) = .{}, -const AtomTable = std.AutoHashMapUnmanaged(InternPool.DeclIndex, Atom.Index); +const AtomTable = std.AutoHashMapUnmanaged(InternPool.Nav.Index, Atom.Index); const Atom = struct { - /// Offset into .debug_info pointing to the tag for this Decl, or + /// Offset into .debug_info pointing to the tag for this Nav, or /// offset from the beginning of the Debug Line Program header that contains this function. off: u32, - /// Size of the .debug_info tag for this Decl, not including padding, or + /// Size of the .debug_info tag for this Nav, not including padding, or /// size of the line number program component belonging to this function, not /// including padding. len: u32, @@ -61,14 +61,14 @@ const DbgLineHeader = struct { opcode_base: u8, }; -/// Represents state of the analysed Decl. -/// Includes Decl's abbrev table of type Types, matching arena +/// Represents state of the analysed Nav. +/// Includes Nav's abbrev table of type Types, matching arena /// and a set of relocations that will be resolved once this -/// Decl's inner Atom is assigned an offset within the DWARF section. -pub const DeclState = struct { +/// Nav's inner Atom is assigned an offset within the DWARF section. +pub const NavState = struct { dwarf: *Dwarf, pt: Zcu.PerThread, - di_atom_decls: *const AtomTable, + di_atom_navs: *const AtomTable, dbg_line_func: InternPool.Index, dbg_line: std.ArrayList(u8), dbg_info: std.ArrayList(u8), @@ -78,20 +78,20 @@ pub const DeclState = struct { abbrev_relocs: std.ArrayListUnmanaged(AbbrevRelocation), exprloc_relocs: std.ArrayListUnmanaged(ExprlocRelocation), - pub fn deinit(self: *DeclState) void { - const gpa = self.dwarf.allocator; - self.dbg_line.deinit(); - self.dbg_info.deinit(); - self.abbrev_type_arena.deinit(); - self.abbrev_table.deinit(gpa); - self.abbrev_resolver.deinit(gpa); - self.abbrev_relocs.deinit(gpa); - self.exprloc_relocs.deinit(gpa); + pub fn deinit(ns: *NavState) void { + const gpa = ns.dwarf.allocator; + ns.dbg_line.deinit(); + ns.dbg_info.deinit(); + ns.abbrev_type_arena.deinit(); + ns.abbrev_table.deinit(gpa); + ns.abbrev_resolver.deinit(gpa); + ns.abbrev_relocs.deinit(gpa); + ns.exprloc_relocs.deinit(gpa); } /// Adds local type relocation of the form: @offset => @this + addend /// @this signifies the offset within the .debug_abbrev section of the containing atom. - fn addTypeRelocLocal(self: *DeclState, atom_index: Atom.Index, offset: u32, addend: u32) !void { + fn addTypeRelocLocal(self: *NavState, atom_index: Atom.Index, offset: u32, addend: u32) !void { log.debug("{x}: @this + {x}", .{ offset, addend }); try self.abbrev_relocs.append(self.dwarf.allocator, .{ .target = null, @@ -104,7 +104,7 @@ pub const DeclState = struct { /// Adds global type relocation of the form: @offset => @symbol + 0 /// @symbol signifies a type abbreviation posititioned somewhere in the .debug_abbrev section /// which we use as our target of the relocation. - fn addTypeRelocGlobal(self: *DeclState, atom_index: Atom.Index, ty: Type, offset: u32) !void { + fn addTypeRelocGlobal(self: *NavState, atom_index: Atom.Index, ty: Type, offset: u32) !void { const gpa = self.dwarf.allocator; const resolv = self.abbrev_resolver.get(ty.toIntern()) orelse blk: { const sym_index: u32 = @intCast(self.abbrev_table.items.len); @@ -127,7 +127,7 @@ pub const DeclState = struct { } fn addDbgInfoType( - self: *DeclState, + self: *NavState, pt: Zcu.PerThread, atom_index: Atom.Index, ty: Type, @@ -550,15 +550,15 @@ pub const DeclState = struct { }; pub fn genArgDbgInfo( - self: *DeclState, + self: *NavState, name: [:0]const u8, ty: Type, - owner_decl: InternPool.DeclIndex, + owner_nav: InternPool.Nav.Index, loc: DbgInfoLoc, ) error{OutOfMemory}!void { const pt = self.pt; const dbg_info = &self.dbg_info; - const atom_index = self.di_atom_decls.get(owner_decl).?; + const atom_index = self.di_atom_navs.get(owner_nav).?; const name_with_null = name.ptr[0 .. name.len + 1]; switch (loc) { @@ -639,6 +639,7 @@ pub const DeclState = struct { leb128.writeIleb128(dbg_info.writer(), info.offset) catch unreachable; }, .wasm_local => |value| { + @import("../dev.zig").check(.wasm_linker); const leb_size = link.File.Wasm.getUleb128Size(value); try dbg_info.ensureUnusedCapacity(3 + leb_size); // wasm locations are encoded as follow: @@ -665,15 +666,15 @@ pub const DeclState = struct { } pub fn genVarDbgInfo( - self: *DeclState, + self: *NavState, name: [:0]const u8, ty: Type, - owner_decl: InternPool.DeclIndex, + owner_nav: InternPool.Nav.Index, is_ptr: bool, loc: DbgInfoLoc, ) error{OutOfMemory}!void { const dbg_info = &self.dbg_info; - const atom_index = self.di_atom_decls.get(owner_decl).?; + const atom_index = self.di_atom_navs.get(owner_nav).?; const name_with_null = name.ptr[0 .. name.len + 1]; try dbg_info.append(@intFromEnum(AbbrevCode.variable)); const gpa = self.dwarf.allocator; @@ -881,7 +882,7 @@ pub const DeclState = struct { } pub fn advancePCAndLine( - self: *DeclState, + self: *NavState, delta_line: i33, delta_pc: u64, ) error{OutOfMemory}!void { @@ -921,21 +922,21 @@ pub const DeclState = struct { } } - pub fn setColumn(self: *DeclState, column: u32) error{OutOfMemory}!void { + pub fn setColumn(self: *NavState, column: u32) error{OutOfMemory}!void { try self.dbg_line.ensureUnusedCapacity(1 + 5); self.dbg_line.appendAssumeCapacity(DW.LNS.set_column); leb128.writeUleb128(self.dbg_line.writer(), column + 1) catch unreachable; } - pub fn setPrologueEnd(self: *DeclState) error{OutOfMemory}!void { + pub fn setPrologueEnd(self: *NavState) error{OutOfMemory}!void { try self.dbg_line.append(DW.LNS.set_prologue_end); } - pub fn setEpilogueBegin(self: *DeclState) error{OutOfMemory}!void { + pub fn setEpilogueBegin(self: *NavState) error{OutOfMemory}!void { try self.dbg_line.append(DW.LNS.set_epilogue_begin); } - pub fn setInlineFunc(self: *DeclState, func: InternPool.Index) error{OutOfMemory}!void { + pub fn setInlineFunc(self: *NavState, func: InternPool.Index) error{OutOfMemory}!void { const zcu = self.pt.zcu; if (self.dbg_line_func == func) return; @@ -944,15 +945,15 @@ pub const DeclState = struct { const old_func_info = zcu.funcInfo(self.dbg_line_func); const new_func_info = zcu.funcInfo(func); - const old_file = try self.dwarf.addDIFile(zcu, old_func_info.owner_decl); - const new_file = try self.dwarf.addDIFile(zcu, new_func_info.owner_decl); + const old_file = try self.dwarf.addDIFile(zcu, old_func_info.owner_nav); + const new_file = try self.dwarf.addDIFile(zcu, new_func_info.owner_nav); if (old_file != new_file) { self.dbg_line.appendAssumeCapacity(DW.LNS.set_file); leb128.writeUnsignedFixed(4, self.dbg_line.addManyAsArrayAssumeCapacity(4), new_file); } - const old_src_line: i33 = zcu.declPtr(old_func_info.owner_decl).navSrcLine(zcu); - const new_src_line: i33 = zcu.declPtr(new_func_info.owner_decl).navSrcLine(zcu); + const old_src_line: i33 = zcu.navSrcLine(old_func_info.owner_nav); + const new_src_line: i33 = zcu.navSrcLine(new_func_info.owner_nav); if (new_src_line != old_src_line) { self.dbg_line.appendAssumeCapacity(DW.LNS.advance_line); leb128.writeSignedFixed(5, self.dbg_line.addManyAsArrayAssumeCapacity(5), new_src_line - old_src_line); @@ -1064,31 +1065,31 @@ pub fn deinit(self: *Dwarf) void { self.src_fn_free_list.deinit(gpa); self.src_fns.deinit(gpa); - self.src_fn_decls.deinit(gpa); + self.src_fn_navs.deinit(gpa); self.di_atom_free_list.deinit(gpa); self.di_atoms.deinit(gpa); - self.di_atom_decls.deinit(gpa); + self.di_atom_navs.deinit(gpa); self.strtab.deinit(gpa); self.di_files.deinit(gpa); self.global_abbrev_relocs.deinit(gpa); } -/// Initializes Decl's state and its matching output buffers. -/// Call this before `commitDeclState`. -pub fn initDeclState(self: *Dwarf, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !DeclState { +/// Initializes Nav's state and its matching output buffers. +/// Call this before `commitNavState`. +pub fn initNavState(self: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !NavState { const tracy = trace(@src()); defer tracy.end(); - const decl = pt.zcu.declPtr(decl_index); - log.debug("initDeclState {}{*}", .{ decl.fqn.fmt(&pt.zcu.intern_pool), decl }); + const nav = pt.zcu.intern_pool.getNav(nav_index); + log.debug("initNavState {}", .{nav.fqn.fmt(&pt.zcu.intern_pool)}); const gpa = self.allocator; - var decl_state: DeclState = .{ + var nav_state: NavState = .{ .dwarf = self, .pt = pt, - .di_atom_decls = &self.di_atom_decls, + .di_atom_navs = &self.di_atom_navs, .dbg_line_func = undefined, .dbg_line = std.ArrayList(u8).init(gpa), .dbg_info = std.ArrayList(u8).init(gpa), @@ -1098,30 +1099,30 @@ pub fn initDeclState(self: *Dwarf, pt: Zcu.PerThread, decl_index: InternPool.Dec .abbrev_relocs = .{}, .exprloc_relocs = .{}, }; - errdefer decl_state.deinit(); - const dbg_line_buffer = &decl_state.dbg_line; - const dbg_info_buffer = &decl_state.dbg_info; + errdefer nav_state.deinit(); + const dbg_line_buffer = &nav_state.dbg_line; + const dbg_info_buffer = &nav_state.dbg_info; - const di_atom_index = try self.getOrCreateAtomForDecl(.di_atom, decl_index); + const di_atom_index = try self.getOrCreateAtomForNav(.di_atom, nav_index); - assert(decl.has_tv); + const nav_val = Value.fromInterned(nav.status.resolved.val); - switch (decl.typeOf(pt.zcu).zigTypeTag(pt.zcu)) { + switch (nav_val.typeOf(pt.zcu).zigTypeTag(pt.zcu)) { .Fn => { - _ = try self.getOrCreateAtomForDecl(.src_fn, decl_index); + _ = try self.getOrCreateAtomForNav(.src_fn, nav_index); // For functions we need to add a prologue to the debug line program. const ptr_width_bytes = self.ptrWidthBytes(); try dbg_line_buffer.ensureTotalCapacity((3 + ptr_width_bytes) + (1 + 4) + (1 + 4) + (1 + 5) + 1); - decl_state.dbg_line_func = decl.val.toIntern(); - const func = decl.val.getFunction(pt.zcu).?; - log.debug("decl.src_line={d}, func.lbrace_line={d}, func.rbrace_line={d}", .{ - decl.navSrcLine(pt.zcu), + nav_state.dbg_line_func = nav_val.toIntern(); + const func = nav_val.getFunction(pt.zcu).?; + log.debug("src_line={d}, func.lbrace_line={d}, func.rbrace_line={d}", .{ + pt.zcu.navSrcLine(nav_index), func.lbrace_line, func.rbrace_line, }); - const line: u28 = @intCast(decl.navSrcLine(pt.zcu) + func.lbrace_line); + const line: u28 = @intCast(pt.zcu.navSrcLine(nav_index) + func.lbrace_line); dbg_line_buffer.appendSliceAssumeCapacity(&.{ DW.LNS.extended_op, @@ -1143,7 +1144,7 @@ pub fn initDeclState(self: *Dwarf, pt: Zcu.PerThread, decl_index: InternPool.Dec assert(self.getRelocDbgFileIndex() == dbg_line_buffer.items.len); // Once we support more than one source file, this will have the ability to be more // than one possible value. - const file_index = try self.addDIFile(pt.zcu, decl_index); + const file_index = try self.addDIFile(pt.zcu, nav_index); leb128.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), file_index); dbg_line_buffer.appendAssumeCapacity(DW.LNS.set_column); @@ -1154,12 +1155,12 @@ pub fn initDeclState(self: *Dwarf, pt: Zcu.PerThread, decl_index: InternPool.Dec dbg_line_buffer.appendAssumeCapacity(DW.LNS.copy); // .debug_info subprogram - const decl_name_slice = decl.name.toSlice(&pt.zcu.intern_pool); - const decl_linkage_name_slice = decl.fqn.toSlice(&pt.zcu.intern_pool); + const nav_name_slice = nav.name.toSlice(&pt.zcu.intern_pool); + const nav_linkage_name_slice = nav.fqn.toSlice(&pt.zcu.intern_pool); try dbg_info_buffer.ensureUnusedCapacity(1 + ptr_width_bytes + 4 + 4 + - (decl_name_slice.len + 1) + (decl_linkage_name_slice.len + 1)); + (nav_name_slice.len + 1) + (nav_linkage_name_slice.len + 1)); - const fn_ret_type = decl.typeOf(pt.zcu).fnReturnType(pt.zcu); + const fn_ret_type = nav_val.typeOf(pt.zcu).fnReturnType(pt.zcu); const fn_ret_has_bits = fn_ret_type.hasRuntimeBits(pt); dbg_info_buffer.appendAssumeCapacity(@intFromEnum( @as(AbbrevCode, if (fn_ret_has_bits) .subprogram else .subprogram_retvoid), @@ -1172,14 +1173,14 @@ pub fn initDeclState(self: *Dwarf, pt: Zcu.PerThread, decl_index: InternPool.Dec assert(self.getRelocDbgInfoSubprogramHighPC() == dbg_info_buffer.items.len); dbg_info_buffer.appendNTimesAssumeCapacity(0, 4); // DW.AT.high_pc, DW.FORM.data4 if (fn_ret_has_bits) { - try decl_state.addTypeRelocGlobal(di_atom_index, fn_ret_type, @intCast(dbg_info_buffer.items.len)); + try nav_state.addTypeRelocGlobal(di_atom_index, fn_ret_type, @intCast(dbg_info_buffer.items.len)); dbg_info_buffer.appendNTimesAssumeCapacity(0, 4); // DW.AT.type, DW.FORM.ref4 } dbg_info_buffer.appendSliceAssumeCapacity( - decl_name_slice[0 .. decl_name_slice.len + 1], + nav_name_slice[0 .. nav_name_slice.len + 1], ); // DW.AT.name, DW.FORM.string dbg_info_buffer.appendSliceAssumeCapacity( - decl_linkage_name_slice[0 .. decl_linkage_name_slice.len + 1], + nav_linkage_name_slice[0 .. nav_linkage_name_slice.len + 1], ); // DW.AT.linkage_name, DW.FORM.string }, else => { @@ -1187,37 +1188,36 @@ pub fn initDeclState(self: *Dwarf, pt: Zcu.PerThread, decl_index: InternPool.Dec }, } - return decl_state; + return nav_state; } -pub fn commitDeclState( +pub fn commitNavState( self: *Dwarf, pt: Zcu.PerThread, - decl_index: InternPool.DeclIndex, + nav_index: InternPool.Nav.Index, sym_addr: u64, sym_size: u64, - decl_state: *DeclState, + nav_state: *NavState, ) !void { const tracy = trace(@src()); defer tracy.end(); const gpa = self.allocator; const zcu = pt.zcu; - const decl = zcu.declPtr(decl_index); const ip = &zcu.intern_pool; - const namespace = zcu.namespacePtr(decl.src_namespace); - const target = namespace.fileScope(zcu).mod.resolved_target.result; + const nav = ip.getNav(nav_index); + const target = zcu.navFileScope(nav_index).mod.resolved_target.result; const target_endian = target.cpu.arch.endian(); - var dbg_line_buffer = &decl_state.dbg_line; - var dbg_info_buffer = &decl_state.dbg_info; + var dbg_line_buffer = &nav_state.dbg_line; + var dbg_info_buffer = &nav_state.dbg_info; - assert(decl.has_tv); - switch (decl.typeOf(zcu).zigTypeTag(zcu)) { + const nav_val = Value.fromInterned(nav.status.resolved.val); + switch (nav_val.typeOf(zcu).zigTypeTag(zcu)) { .Fn => { - try decl_state.setInlineFunc(decl.val.toIntern()); + try nav_state.setInlineFunc(nav_val.toIntern()); - // Since the Decl is a function, we need to update the .debug_line program. + // Since the Nav is a function, we need to update the .debug_line program. // Perform the relocations based on vaddr. switch (self.ptr_width) { .p32 => { @@ -1254,10 +1254,10 @@ pub fn commitDeclState( // Now we have the full contents and may allocate a region to store it. - // This logic is nearly identical to the logic below in `updateDeclDebugInfo` for + // This logic is nearly identical to the logic below in `updateNavDebugInfo` for // `TextBlock` and the .debug_info. If you are editing this logic, you // probably need to edit that logic too. - const src_fn_index = self.src_fn_decls.get(decl_index).?; + const src_fn_index = self.src_fn_navs.get(nav_index).?; const src_fn = self.getAtomPtr(.src_fn, src_fn_index); src_fn.len = @intCast(dbg_line_buffer.items.len); @@ -1275,33 +1275,26 @@ pub fn commitDeclState( next.prev_index = src_fn.prev_index; src_fn.next_index = null; // Populate where it used to be with NOPs. - switch (self.bin_file.tag) { - .elf => { - const elf_file = self.bin_file.cast(File.Elf).?; - const debug_line_sect = &elf_file.shdrs.items[elf_file.debug_line_section_index.?]; - const file_pos = debug_line_sect.sh_offset + src_fn.off; - try pwriteDbgLineNops(elf_file.base.file.?, file_pos, 0, &[0]u8{}, src_fn.len); - }, - .macho => { - const macho_file = self.bin_file.cast(File.MachO).?; - if (macho_file.base.isRelocatable()) { - const debug_line_sect = &macho_file.sections.items(.header)[macho_file.debug_line_sect_index.?]; - const file_pos = debug_line_sect.offset + src_fn.off; - try pwriteDbgLineNops(macho_file.base.file.?, file_pos, 0, &[0]u8{}, src_fn.len); - } else { - const d_sym = macho_file.getDebugSymbols().?; - const debug_line_sect = d_sym.getSectionPtr(d_sym.debug_line_section_index.?); - const file_pos = debug_line_sect.offset + src_fn.off; - try pwriteDbgLineNops(d_sym.file, file_pos, 0, &[0]u8{}, src_fn.len); - } - }, - .wasm => { - // const wasm_file = self.bin_file.cast(File.Wasm).?; - // const debug_line = wasm_file.getAtomPtr(wasm_file.debug_line_atom.?).code; - // writeDbgLineNopsBuffered(debug_line.items, src_fn.off, 0, &.{}, src_fn.len); - }, - else => unreachable, - } + if (self.bin_file.cast(.elf)) |elf_file| { + const debug_line_sect = &elf_file.shdrs.items[elf_file.debug_line_section_index.?]; + const file_pos = debug_line_sect.sh_offset + src_fn.off; + try pwriteDbgLineNops(elf_file.base.file.?, file_pos, 0, &[0]u8{}, src_fn.len); + } else if (self.bin_file.cast(.macho)) |macho_file| { + if (macho_file.base.isRelocatable()) { + const debug_line_sect = &macho_file.sections.items(.header)[macho_file.debug_line_sect_index.?]; + const file_pos = debug_line_sect.offset + src_fn.off; + try pwriteDbgLineNops(macho_file.base.file.?, file_pos, 0, &[0]u8{}, src_fn.len); + } else { + const d_sym = macho_file.getDebugSymbols().?; + const debug_line_sect = d_sym.getSectionPtr(d_sym.debug_line_section_index.?); + const file_pos = debug_line_sect.offset + src_fn.off; + try pwriteDbgLineNops(d_sym.file, file_pos, 0, &[0]u8{}, src_fn.len); + } + } else if (self.bin_file.cast(.wasm)) |wasm_file| { + _ = wasm_file; + // const debug_line = wasm_file.getAtomPtr(wasm_file.debug_line_atom.?).code; + // writeDbgLineNopsBuffered(debug_line.items, src_fn.off, 0, &.{}, src_fn.len); + } else unreachable; // TODO Look at the free list before appending at the end. src_fn.prev_index = last_index; const last = self.getAtomPtr(.src_fn, last_index); @@ -1342,76 +1335,67 @@ pub fn commitDeclState( // We only have support for one compilation unit so far, so the offsets are directly // from the .debug_line section. - switch (self.bin_file.tag) { - .elf => { - const elf_file = self.bin_file.cast(File.Elf).?; - const shdr_index = elf_file.debug_line_section_index.?; - try elf_file.growNonAllocSection(shdr_index, needed_size, 1, true); - const debug_line_sect = elf_file.shdrs.items[shdr_index]; - const file_pos = debug_line_sect.sh_offset + src_fn.off; + if (self.bin_file.cast(.elf)) |elf_file| { + const shdr_index = elf_file.debug_line_section_index.?; + try elf_file.growNonAllocSection(shdr_index, needed_size, 1, true); + const debug_line_sect = elf_file.shdrs.items[shdr_index]; + const file_pos = debug_line_sect.sh_offset + src_fn.off; + try pwriteDbgLineNops( + elf_file.base.file.?, + file_pos, + prev_padding_size, + dbg_line_buffer.items, + next_padding_size, + ); + } else if (self.bin_file.cast(.macho)) |macho_file| { + if (macho_file.base.isRelocatable()) { + const sect_index = macho_file.debug_line_sect_index.?; + try macho_file.growSection(sect_index, needed_size); + const sect = macho_file.sections.items(.header)[sect_index]; + const file_pos = sect.offset + src_fn.off; try pwriteDbgLineNops( - elf_file.base.file.?, + macho_file.base.file.?, file_pos, prev_padding_size, dbg_line_buffer.items, next_padding_size, ); - }, - - .macho => { - const macho_file = self.bin_file.cast(File.MachO).?; - if (macho_file.base.isRelocatable()) { - const sect_index = macho_file.debug_line_sect_index.?; - try macho_file.growSection(sect_index, needed_size); - const sect = macho_file.sections.items(.header)[sect_index]; - const file_pos = sect.offset + src_fn.off; - try pwriteDbgLineNops( - macho_file.base.file.?, - file_pos, - prev_padding_size, - dbg_line_buffer.items, - next_padding_size, - ); - } else { - const d_sym = macho_file.getDebugSymbols().?; - const sect_index = d_sym.debug_line_section_index.?; - try d_sym.growSection(sect_index, needed_size, true, macho_file); - const sect = d_sym.getSection(sect_index); - const file_pos = sect.offset + src_fn.off; - try pwriteDbgLineNops( - d_sym.file, - file_pos, - prev_padding_size, - dbg_line_buffer.items, - next_padding_size, - ); - } - }, - - .wasm => { - // const wasm_file = self.bin_file.cast(File.Wasm).?; - // const atom = wasm_file.getAtomPtr(wasm_file.debug_line_atom.?); - // const debug_line = &atom.code; - // const segment_size = debug_line.items.len; - // if (needed_size != segment_size) { - // log.debug(" needed size does not equal allocated size: {d}", .{needed_size}); - // if (needed_size > segment_size) { - // log.debug(" allocating {d} bytes for 'debug line' information", .{needed_size - segment_size}); - // try debug_line.resize(self.allocator, needed_size); - // @memset(debug_line.items[segment_size..], 0); - // } - // debug_line.items.len = needed_size; - // } - // writeDbgLineNopsBuffered( - // debug_line.items, - // src_fn.off, - // prev_padding_size, - // dbg_line_buffer.items, - // next_padding_size, - // ); - }, - else => unreachable, - } + } else { + const d_sym = macho_file.getDebugSymbols().?; + const sect_index = d_sym.debug_line_section_index.?; + try d_sym.growSection(sect_index, needed_size, true, macho_file); + const sect = d_sym.getSection(sect_index); + const file_pos = sect.offset + src_fn.off; + try pwriteDbgLineNops( + d_sym.file, + file_pos, + prev_padding_size, + dbg_line_buffer.items, + next_padding_size, + ); + } + } else if (self.bin_file.cast(.wasm)) |wasm_file| { + _ = wasm_file; + // const atom = wasm_file.getAtomPtr(wasm_file.debug_line_atom.?); + // const debug_line = &atom.code; + // const segment_size = debug_line.items.len; + // if (needed_size != segment_size) { + // log.debug(" needed size does not equal allocated size: {d}", .{needed_size}); + // if (needed_size > segment_size) { + // log.debug(" allocating {d} bytes for 'debug line' information", .{needed_size - segment_size}); + // try debug_line.resize(self.allocator, needed_size); + // @memset(debug_line.items[segment_size..], 0); + // } + // debug_line.items.len = needed_size; + // } + // writeDbgLineNopsBuffered( + // debug_line.items, + // src_fn.off, + // prev_padding_size, + // dbg_line_buffer.items, + // next_padding_size, + // ); + } else unreachable; // .debug_info - End the TAG.subprogram children. try dbg_info_buffer.append(0); @@ -1422,27 +1406,27 @@ pub fn commitDeclState( if (dbg_info_buffer.items.len == 0) return; - const di_atom_index = self.di_atom_decls.get(decl_index).?; - if (decl_state.abbrev_table.items.len > 0) { - // Now we emit the .debug_info types of the Decl. These will count towards the size of + const di_atom_index = self.di_atom_navs.get(nav_index).?; + if (nav_state.abbrev_table.items.len > 0) { + // Now we emit the .debug_info types of the Nav. These will count towards the size of // the buffer, so we have to do it before computing the offset, and we can't perform the actual // relocations yet. var sym_index: usize = 0; - while (sym_index < decl_state.abbrev_table.items.len) : (sym_index += 1) { - const symbol = &decl_state.abbrev_table.items[sym_index]; + while (sym_index < nav_state.abbrev_table.items.len) : (sym_index += 1) { + const symbol = &nav_state.abbrev_table.items[sym_index]; const ty = symbol.type; if (ip.isErrorSetType(ty.toIntern())) continue; symbol.offset = @intCast(dbg_info_buffer.items.len); - try decl_state.addDbgInfoType(pt, di_atom_index, ty); + try nav_state.addDbgInfoType(pt, di_atom_index, ty); } } - try self.updateDeclDebugInfoAllocation(di_atom_index, @intCast(dbg_info_buffer.items.len)); + try self.updateNavDebugInfoAllocation(di_atom_index, @intCast(dbg_info_buffer.items.len)); - while (decl_state.abbrev_relocs.popOrNull()) |reloc| { + while (nav_state.abbrev_relocs.popOrNull()) |reloc| { if (reloc.target) |reloc_target| { - const symbol = decl_state.abbrev_table.items[reloc_target]; + const symbol = nav_state.abbrev_table.items[reloc_target]; const ty = symbol.type; if (ip.isErrorSetType(ty.toIntern())) { log.debug("resolving %{d} deferred until flush", .{reloc_target}); @@ -1479,38 +1463,35 @@ pub fn commitDeclState( } } - while (decl_state.exprloc_relocs.popOrNull()) |reloc| { - switch (self.bin_file.tag) { - .macho => { - const macho_file = self.bin_file.cast(File.MachO).?; - if (macho_file.base.isRelocatable()) { - // TODO - } else { - const d_sym = macho_file.getDebugSymbols().?; - try d_sym.relocs.append(d_sym.allocator, .{ - .type = switch (reloc.type) { - .direct_load => .direct_load, - .got_load => .got_load, - }, - .target = reloc.target, - .offset = reloc.offset + self.getAtom(.di_atom, di_atom_index).off, - .addend = 0, - }); - } - }, - .elf => {}, // TODO - else => unreachable, - } + while (nav_state.exprloc_relocs.popOrNull()) |reloc| { + if (self.bin_file.cast(.elf)) |elf_file| { + _ = elf_file; // TODO + } else if (self.bin_file.cast(.macho)) |macho_file| { + if (macho_file.base.isRelocatable()) { + // TODO + } else { + const d_sym = macho_file.getDebugSymbols().?; + try d_sym.relocs.append(d_sym.allocator, .{ + .type = switch (reloc.type) { + .direct_load => .direct_load, + .got_load => .got_load, + }, + .target = reloc.target, + .offset = reloc.offset + self.getAtom(.di_atom, di_atom_index).off, + .addend = 0, + }); + } + } else unreachable; } - try self.writeDeclDebugInfo(di_atom_index, dbg_info_buffer.items); + try self.writeNavDebugInfo(di_atom_index, dbg_info_buffer.items); } -fn updateDeclDebugInfoAllocation(self: *Dwarf, atom_index: Atom.Index, len: u32) !void { +fn updateNavDebugInfoAllocation(self: *Dwarf, atom_index: Atom.Index, len: u32) !void { const tracy = trace(@src()); defer tracy.end(); - // This logic is nearly identical to the logic above in `updateDecl` for + // This logic is nearly identical to the logic above in `updateNav` for // `SrcFn` and the line number programs. If you are editing this logic, you // probably need to edit that logic too. const gpa = self.allocator; @@ -1521,7 +1502,7 @@ fn updateDeclDebugInfoAllocation(self: *Dwarf, atom_index: Atom.Index, len: u32) if (atom_index == last_index) break :blk; if (atom.next_index) |next_index| { const next = self.getAtomPtr(.di_atom, next_index); - // Update existing Decl - non-last item. + // Update existing Nav - non-last item. if (atom.off + atom.len + min_nop_size > next.off) { // It grew too big, so we move it to a new location. if (atom.prev_index) |prev_index| { @@ -1531,34 +1512,27 @@ fn updateDeclDebugInfoAllocation(self: *Dwarf, atom_index: Atom.Index, len: u32) next.prev_index = atom.prev_index; atom.next_index = null; // Populate where it used to be with NOPs. - switch (self.bin_file.tag) { - .elf => { - const elf_file = self.bin_file.cast(File.Elf).?; - const debug_info_sect = &elf_file.shdrs.items[elf_file.debug_info_section_index.?]; - const file_pos = debug_info_sect.sh_offset + atom.off; - try pwriteDbgInfoNops(elf_file.base.file.?, file_pos, 0, &[0]u8{}, atom.len, false); - }, - .macho => { - const macho_file = self.bin_file.cast(File.MachO).?; - if (macho_file.base.isRelocatable()) { - const debug_info_sect = macho_file.sections.items(.header)[macho_file.debug_info_sect_index.?]; - const file_pos = debug_info_sect.offset + atom.off; - try pwriteDbgInfoNops(macho_file.base.file.?, file_pos, 0, &[0]u8{}, atom.len, false); - } else { - const d_sym = macho_file.getDebugSymbols().?; - const debug_info_sect = d_sym.getSectionPtr(d_sym.debug_info_section_index.?); - const file_pos = debug_info_sect.offset + atom.off; - try pwriteDbgInfoNops(d_sym.file, file_pos, 0, &[0]u8{}, atom.len, false); - } - }, - .wasm => { - // const wasm_file = self.bin_file.cast(File.Wasm).?; - // const debug_info_index = wasm_file.debug_info_atom.?; - // const debug_info = &wasm_file.getAtomPtr(debug_info_index).code; - // try writeDbgInfoNopsToArrayList(gpa, debug_info, atom.off, 0, &.{0}, atom.len, false); - }, - else => unreachable, - } + if (self.bin_file.cast(.elf)) |elf_file| { + const debug_info_sect = &elf_file.shdrs.items[elf_file.debug_info_section_index.?]; + const file_pos = debug_info_sect.sh_offset + atom.off; + try pwriteDbgInfoNops(elf_file.base.file.?, file_pos, 0, &[0]u8{}, atom.len, false); + } else if (self.bin_file.cast(.macho)) |macho_file| { + if (macho_file.base.isRelocatable()) { + const debug_info_sect = macho_file.sections.items(.header)[macho_file.debug_info_sect_index.?]; + const file_pos = debug_info_sect.offset + atom.off; + try pwriteDbgInfoNops(macho_file.base.file.?, file_pos, 0, &[0]u8{}, atom.len, false); + } else { + const d_sym = macho_file.getDebugSymbols().?; + const debug_info_sect = d_sym.getSectionPtr(d_sym.debug_info_section_index.?); + const file_pos = debug_info_sect.offset + atom.off; + try pwriteDbgInfoNops(d_sym.file, file_pos, 0, &[0]u8{}, atom.len, false); + } + } else if (self.bin_file.cast(.wasm)) |wasm_file| { + _ = wasm_file; + // const debug_info_index = wasm_file.debug_info_atom.?; + // const debug_info = &wasm_file.getAtomPtr(debug_info_index).code; + // try writeDbgInfoNopsToArrayList(gpa, debug_info, atom.off, 0, &.{0}, atom.len, false); + } else unreachable; // TODO Look at the free list before appending at the end. atom.prev_index = last_index; const last = self.getAtomPtr(.di_atom, last_index); @@ -1568,7 +1542,7 @@ fn updateDeclDebugInfoAllocation(self: *Dwarf, atom_index: Atom.Index, len: u32) atom.off = last.off + padToIdeal(last.len); } } else if (atom.prev_index == null) { - // Append new Decl. + // Append new Nav. // TODO Look at the free list before appending at the end. atom.prev_index = last_index; const last = self.getAtomPtr(.di_atom, last_index); @@ -1578,7 +1552,7 @@ fn updateDeclDebugInfoAllocation(self: *Dwarf, atom_index: Atom.Index, len: u32) atom.off = last.off + padToIdeal(last.len); } } else { - // This is the first Decl of the .debug_info + // This is the first Nav of the .debug_info self.di_atom_first_index = atom_index; self.di_atom_last_index = atom_index; @@ -1586,19 +1560,19 @@ fn updateDeclDebugInfoAllocation(self: *Dwarf, atom_index: Atom.Index, len: u32) } } -fn writeDeclDebugInfo(self: *Dwarf, atom_index: Atom.Index, dbg_info_buf: []const u8) !void { +fn writeNavDebugInfo(self: *Dwarf, atom_index: Atom.Index, dbg_info_buf: []const u8) !void { const tracy = trace(@src()); defer tracy.end(); - // This logic is nearly identical to the logic above in `updateDecl` for + // This logic is nearly identical to the logic above in `updateNav` for // `SrcFn` and the line number programs. If you are editing this logic, you // probably need to edit that logic too. const atom = self.getAtom(.di_atom, atom_index); - const last_decl_index = self.di_atom_last_index.?; - const last_decl = self.getAtom(.di_atom, last_decl_index); - // +1 for a trailing zero to end the children of the decl tag. - const needed_size = last_decl.off + last_decl.len + 1; + const last_nav_index = self.di_atom_last_index.?; + const last_nav = self.getAtom(.di_atom, last_nav_index); + // +1 for a trailing zero to end the children of the nav tag. + const needed_size = last_nav.off + last_nav.len + 1; const prev_padding_size: u32 = if (atom.prev_index) |prev_index| blk: { const prev = self.getAtom(.di_atom, prev_index); break :blk atom.off - (prev.off + prev.len); @@ -1608,107 +1582,99 @@ fn writeDeclDebugInfo(self: *Dwarf, atom_index: Atom.Index, dbg_info_buf: []cons break :blk next.off - (atom.off + atom.len); } else 0; - // To end the children of the decl tag. + // To end the children of the nav tag. const trailing_zero = atom.next_index == null; // We only have support for one compilation unit so far, so the offsets are directly // from the .debug_info section. - switch (self.bin_file.tag) { - .elf => { - const elf_file = self.bin_file.cast(File.Elf).?; - const shdr_index = elf_file.debug_info_section_index.?; - try elf_file.growNonAllocSection(shdr_index, needed_size, 1, true); - const debug_info_sect = &elf_file.shdrs.items[shdr_index]; - const file_pos = debug_info_sect.sh_offset + atom.off; + if (self.bin_file.cast(.elf)) |elf_file| { + const shdr_index = elf_file.debug_info_section_index.?; + try elf_file.growNonAllocSection(shdr_index, needed_size, 1, true); + const debug_info_sect = &elf_file.shdrs.items[shdr_index]; + const file_pos = debug_info_sect.sh_offset + atom.off; + try pwriteDbgInfoNops( + elf_file.base.file.?, + file_pos, + prev_padding_size, + dbg_info_buf, + next_padding_size, + trailing_zero, + ); + } else if (self.bin_file.cast(.macho)) |macho_file| { + if (macho_file.base.isRelocatable()) { + const sect_index = macho_file.debug_info_sect_index.?; + try macho_file.growSection(sect_index, needed_size); + const sect = macho_file.sections.items(.header)[sect_index]; + const file_pos = sect.offset + atom.off; try pwriteDbgInfoNops( - elf_file.base.file.?, + macho_file.base.file.?, file_pos, prev_padding_size, dbg_info_buf, next_padding_size, trailing_zero, ); - }, - - .macho => { - const macho_file = self.bin_file.cast(File.MachO).?; - if (macho_file.base.isRelocatable()) { - const sect_index = macho_file.debug_info_sect_index.?; - try macho_file.growSection(sect_index, needed_size); - const sect = macho_file.sections.items(.header)[sect_index]; - const file_pos = sect.offset + atom.off; - try pwriteDbgInfoNops( - macho_file.base.file.?, - file_pos, - prev_padding_size, - dbg_info_buf, - next_padding_size, - trailing_zero, - ); - } else { - const d_sym = macho_file.getDebugSymbols().?; - const sect_index = d_sym.debug_info_section_index.?; - try d_sym.growSection(sect_index, needed_size, true, macho_file); - const sect = d_sym.getSection(sect_index); - const file_pos = sect.offset + atom.off; - try pwriteDbgInfoNops( - d_sym.file, - file_pos, - prev_padding_size, - dbg_info_buf, - next_padding_size, - trailing_zero, - ); - } - }, - - .wasm => { - // const wasm_file = self.bin_file.cast(File.Wasm).?; - // const info_atom = wasm_file.debug_info_atom.?; - // const debug_info = &wasm_file.getAtomPtr(info_atom).code; - // const segment_size = debug_info.items.len; - // if (needed_size != segment_size) { - // log.debug(" needed size does not equal allocated size: {d}", .{needed_size}); - // if (needed_size > segment_size) { - // log.debug(" allocating {d} bytes for 'debug info' information", .{needed_size - segment_size}); - // try debug_info.resize(self.allocator, needed_size); - // @memset(debug_info.items[segment_size..], 0); - // } - // debug_info.items.len = needed_size; - // } - // log.debug(" writeDbgInfoNopsToArrayList debug_info_len={d} offset={d} content_len={d} next_padding_size={d}", .{ - // debug_info.items.len, atom.off, dbg_info_buf.len, next_padding_size, - // }); - // try writeDbgInfoNopsToArrayList( - // gpa, - // debug_info, - // atom.off, - // prev_padding_size, - // dbg_info_buf, - // next_padding_size, - // trailing_zero, - // ); - }, - else => unreachable, - } + } else { + const d_sym = macho_file.getDebugSymbols().?; + const sect_index = d_sym.debug_info_section_index.?; + try d_sym.growSection(sect_index, needed_size, true, macho_file); + const sect = d_sym.getSection(sect_index); + const file_pos = sect.offset + atom.off; + try pwriteDbgInfoNops( + d_sym.file, + file_pos, + prev_padding_size, + dbg_info_buf, + next_padding_size, + trailing_zero, + ); + } + } else if (self.bin_file.cast(.wasm)) |wasm_file| { + _ = wasm_file; + // const info_atom = wasm_file.debug_info_atom.?; + // const debug_info = &wasm_file.getAtomPtr(info_atom).code; + // const segment_size = debug_info.items.len; + // if (needed_size != segment_size) { + // log.debug(" needed size does not equal allocated size: {d}", .{needed_size}); + // if (needed_size > segment_size) { + // log.debug(" allocating {d} bytes for 'debug info' information", .{needed_size - segment_size}); + // try debug_info.resize(self.allocator, needed_size); + // @memset(debug_info.items[segment_size..], 0); + // } + // debug_info.items.len = needed_size; + // } + // log.debug(" writeDbgInfoNopsToArrayList debug_info_len={d} offset={d} content_len={d} next_padding_size={d}", .{ + // debug_info.items.len, atom.off, dbg_info_buf.len, next_padding_size, + // }); + // try writeDbgInfoNopsToArrayList( + // gpa, + // debug_info, + // atom.off, + // prev_padding_size, + // dbg_info_buf, + // next_padding_size, + // trailing_zero, + // ); + } else unreachable; } -pub fn updateDeclLineNumber(self: *Dwarf, zcu: *Zcu, decl_index: InternPool.DeclIndex) !void { +pub fn updateNavLineNumber(self: *Dwarf, zcu: *Zcu, nav_index: InternPool.Nav.Index) !void { const tracy = trace(@src()); defer tracy.end(); - const atom_index = try self.getOrCreateAtomForDecl(.src_fn, decl_index); + const atom_index = try self.getOrCreateAtomForNav(.src_fn, nav_index); const atom = self.getAtom(.src_fn, atom_index); if (atom.len == 0) return; - const decl = zcu.declPtr(decl_index); - const func = decl.val.getFunction(zcu).?; - log.debug("decl.src_line={d}, func.lbrace_line={d}, func.rbrace_line={d}", .{ - decl.navSrcLine(zcu), + const nav = zcu.intern_pool.getNav(nav_index); + const nav_val = Value.fromInterned(nav.status.resolved.val); + const func = nav_val.getFunction(zcu).?; + log.debug("src_line={d}, func.lbrace_line={d}, func.rbrace_line={d}", .{ + zcu.navSrcLine(nav_index), func.lbrace_line, func.rbrace_line, }); - const line: u28 = @intCast(decl.navSrcLine(zcu) + func.lbrace_line); + const line: u28 = @intCast(zcu.navSrcLine(nav_index) + func.lbrace_line); var data: [4]u8 = undefined; leb128.writeUnsignedFixed(4, &data, line); @@ -1742,11 +1708,11 @@ pub fn updateDeclLineNumber(self: *Dwarf, zcu: *Zcu, decl_index: InternPool.Decl } } -pub fn freeDecl(self: *Dwarf, decl_index: InternPool.DeclIndex) void { +pub fn freeNav(self: *Dwarf, nav_index: InternPool.Nav.Index) void { const gpa = self.allocator; // Free SrcFn atom - if (self.src_fn_decls.fetchRemove(decl_index)) |kv| { + if (self.src_fn_navs.fetchRemove(nav_index)) |kv| { const src_fn_index = kv.value; const src_fn = self.getAtom(.src_fn, src_fn_index); _ = self.src_fn_free_list.remove(src_fn_index); @@ -1773,7 +1739,7 @@ pub fn freeDecl(self: *Dwarf, decl_index: InternPool.DeclIndex) void { } // Free DI atom - if (self.di_atom_decls.fetchRemove(decl_index)) |kv| { + if (self.di_atom_navs.fetchRemove(nav_index)) |kv| { const di_atom_index = kv.value; const di_atom = self.getAtomPtr(.di_atom, di_atom_index); @@ -1930,40 +1896,33 @@ pub fn writeDbgAbbrev(self: *Dwarf) !void { self.abbrev_table_offset = abbrev_offset; const needed_size = abbrev_buf.len; - switch (self.bin_file.tag) { - .elf => { - const elf_file = self.bin_file.cast(File.Elf).?; - const shdr_index = elf_file.debug_abbrev_section_index.?; - try elf_file.growNonAllocSection(shdr_index, needed_size, 1, false); - const debug_abbrev_sect = &elf_file.shdrs.items[shdr_index]; - const file_pos = debug_abbrev_sect.sh_offset + abbrev_offset; - try elf_file.base.file.?.pwriteAll(&abbrev_buf, file_pos); - }, - .macho => { - const macho_file = self.bin_file.cast(File.MachO).?; - if (macho_file.base.isRelocatable()) { - const sect_index = macho_file.debug_abbrev_sect_index.?; - try macho_file.growSection(sect_index, needed_size); - const sect = macho_file.sections.items(.header)[sect_index]; - const file_pos = sect.offset + abbrev_offset; - try macho_file.base.file.?.pwriteAll(&abbrev_buf, file_pos); - } else { - const d_sym = macho_file.getDebugSymbols().?; - const sect_index = d_sym.debug_abbrev_section_index.?; - try d_sym.growSection(sect_index, needed_size, false, macho_file); - const sect = d_sym.getSection(sect_index); - const file_pos = sect.offset + abbrev_offset; - try d_sym.file.pwriteAll(&abbrev_buf, file_pos); - } - }, - .wasm => { - // const wasm_file = self.bin_file.cast(File.Wasm).?; - // const debug_abbrev = &wasm_file.getAtomPtr(wasm_file.debug_abbrev_atom.?).code; - // try debug_abbrev.resize(gpa, needed_size); - // debug_abbrev.items[0..abbrev_buf.len].* = abbrev_buf; - }, - else => unreachable, - } + if (self.bin_file.cast(.elf)) |elf_file| { + const shdr_index = elf_file.debug_abbrev_section_index.?; + try elf_file.growNonAllocSection(shdr_index, needed_size, 1, false); + const debug_abbrev_sect = &elf_file.shdrs.items[shdr_index]; + const file_pos = debug_abbrev_sect.sh_offset + abbrev_offset; + try elf_file.base.file.?.pwriteAll(&abbrev_buf, file_pos); + } else if (self.bin_file.cast(.macho)) |macho_file| { + if (macho_file.base.isRelocatable()) { + const sect_index = macho_file.debug_abbrev_sect_index.?; + try macho_file.growSection(sect_index, needed_size); + const sect = macho_file.sections.items(.header)[sect_index]; + const file_pos = sect.offset + abbrev_offset; + try macho_file.base.file.?.pwriteAll(&abbrev_buf, file_pos); + } else { + const d_sym = macho_file.getDebugSymbols().?; + const sect_index = d_sym.debug_abbrev_section_index.?; + try d_sym.growSection(sect_index, needed_size, false, macho_file); + const sect = d_sym.getSection(sect_index); + const file_pos = sect.offset + abbrev_offset; + try d_sym.file.pwriteAll(&abbrev_buf, file_pos); + } + } else if (self.bin_file.cast(.wasm)) |wasm_file| { + _ = wasm_file; + // const debug_abbrev = &wasm_file.getAtomPtr(wasm_file.debug_abbrev_atom.?).code; + // try debug_abbrev.resize(gpa, needed_size); + // debug_abbrev.items[0..abbrev_buf.len].* = abbrev_buf; + } else unreachable; } fn dbgInfoHeaderBytes(self: *Dwarf) usize { @@ -2027,37 +1986,30 @@ pub fn writeDbgInfoHeader(self: *Dwarf, zcu: *Zcu, low_pc: u64, high_pc: u64) !v mem.writeInt(u16, di_buf.addManyAsArrayAssumeCapacity(2), DW.LANG.C99, target_endian); if (di_buf.items.len > first_dbg_info_off) { - // Move the first N decls to the end to make more padding for the header. + // Move the first N navs to the end to make more padding for the header. @panic("TODO: handle .debug_info header exceeding its padding"); } const jmp_amt = first_dbg_info_off - di_buf.items.len; - switch (self.bin_file.tag) { - .elf => { - const elf_file = self.bin_file.cast(File.Elf).?; - const debug_info_sect = &elf_file.shdrs.items[elf_file.debug_info_section_index.?]; - const file_pos = debug_info_sect.sh_offset; - try pwriteDbgInfoNops(elf_file.base.file.?, file_pos, 0, di_buf.items, jmp_amt, false); - }, - .macho => { - const macho_file = self.bin_file.cast(File.MachO).?; - if (macho_file.base.isRelocatable()) { - const debug_info_sect = macho_file.sections.items(.header)[macho_file.debug_info_sect_index.?]; - const file_pos = debug_info_sect.offset; - try pwriteDbgInfoNops(macho_file.base.file.?, file_pos, 0, di_buf.items, jmp_amt, false); - } else { - const d_sym = macho_file.getDebugSymbols().?; - const debug_info_sect = d_sym.getSection(d_sym.debug_info_section_index.?); - const file_pos = debug_info_sect.offset; - try pwriteDbgInfoNops(d_sym.file, file_pos, 0, di_buf.items, jmp_amt, false); - } - }, - .wasm => { - // const wasm_file = self.bin_file.cast(File.Wasm).?; - // const debug_info = &wasm_file.getAtomPtr(wasm_file.debug_info_atom.?).code; - // try writeDbgInfoNopsToArrayList(self.allocator, debug_info, 0, 0, di_buf.items, jmp_amt, false); - }, - else => unreachable, - } + if (self.bin_file.cast(.elf)) |elf_file| { + const debug_info_sect = &elf_file.shdrs.items[elf_file.debug_info_section_index.?]; + const file_pos = debug_info_sect.sh_offset; + try pwriteDbgInfoNops(elf_file.base.file.?, file_pos, 0, di_buf.items, jmp_amt, false); + } else if (self.bin_file.cast(.macho)) |macho_file| { + if (macho_file.base.isRelocatable()) { + const debug_info_sect = macho_file.sections.items(.header)[macho_file.debug_info_sect_index.?]; + const file_pos = debug_info_sect.offset; + try pwriteDbgInfoNops(macho_file.base.file.?, file_pos, 0, di_buf.items, jmp_amt, false); + } else { + const d_sym = macho_file.getDebugSymbols().?; + const debug_info_sect = d_sym.getSection(d_sym.debug_info_section_index.?); + const file_pos = debug_info_sect.offset; + try pwriteDbgInfoNops(d_sym.file, file_pos, 0, di_buf.items, jmp_amt, false); + } + } else if (self.bin_file.cast(.wasm)) |wasm_file| { + _ = wasm_file; + // const debug_info = &wasm_file.getAtomPtr(wasm_file.debug_info_atom.?).code; + // try writeDbgInfoNopsToArrayList(self.allocator, debug_info, 0, 0, di_buf.items, jmp_amt, false); + } else unreachable; } fn resolveCompilationDir(zcu: *Zcu, buffer: *[std.fs.max_path_bytes]u8) []const u8 { @@ -2360,40 +2312,33 @@ pub fn writeDbgAranges(self: *Dwarf, addr: u64, size: u64) !void { } const needed_size: u32 = @intCast(di_buf.items.len); - switch (self.bin_file.tag) { - .elf => { - const elf_file = self.bin_file.cast(File.Elf).?; - const shdr_index = elf_file.debug_aranges_section_index.?; - try elf_file.growNonAllocSection(shdr_index, needed_size, 16, false); - const debug_aranges_sect = &elf_file.shdrs.items[shdr_index]; - const file_pos = debug_aranges_sect.sh_offset; - try elf_file.base.file.?.pwriteAll(di_buf.items, file_pos); - }, - .macho => { - const macho_file = self.bin_file.cast(File.MachO).?; - if (macho_file.base.isRelocatable()) { - const sect_index = macho_file.debug_aranges_sect_index.?; - try macho_file.growSection(sect_index, needed_size); - const sect = macho_file.sections.items(.header)[sect_index]; - const file_pos = sect.offset; - try macho_file.base.file.?.pwriteAll(di_buf.items, file_pos); - } else { - const d_sym = macho_file.getDebugSymbols().?; - const sect_index = d_sym.debug_aranges_section_index.?; - try d_sym.growSection(sect_index, needed_size, false, macho_file); - const sect = d_sym.getSection(sect_index); - const file_pos = sect.offset; - try d_sym.file.pwriteAll(di_buf.items, file_pos); - } - }, - .wasm => { - // const wasm_file = self.bin_file.cast(File.Wasm).?; - // const debug_ranges = &wasm_file.getAtomPtr(wasm_file.debug_ranges_atom.?).code; - // try debug_ranges.resize(gpa, needed_size); - // @memcpy(debug_ranges.items[0..di_buf.items.len], di_buf.items); - }, - else => unreachable, - } + if (self.bin_file.cast(.elf)) |elf_file| { + const shdr_index = elf_file.debug_aranges_section_index.?; + try elf_file.growNonAllocSection(shdr_index, needed_size, 16, false); + const debug_aranges_sect = &elf_file.shdrs.items[shdr_index]; + const file_pos = debug_aranges_sect.sh_offset; + try elf_file.base.file.?.pwriteAll(di_buf.items, file_pos); + } else if (self.bin_file.cast(.macho)) |macho_file| { + if (macho_file.base.isRelocatable()) { + const sect_index = macho_file.debug_aranges_sect_index.?; + try macho_file.growSection(sect_index, needed_size); + const sect = macho_file.sections.items(.header)[sect_index]; + const file_pos = sect.offset; + try macho_file.base.file.?.pwriteAll(di_buf.items, file_pos); + } else { + const d_sym = macho_file.getDebugSymbols().?; + const sect_index = d_sym.debug_aranges_section_index.?; + try d_sym.growSection(sect_index, needed_size, false, macho_file); + const sect = d_sym.getSection(sect_index); + const file_pos = sect.offset; + try d_sym.file.pwriteAll(di_buf.items, file_pos); + } + } else if (self.bin_file.cast(.wasm)) |wasm_file| { + _ = wasm_file; + // const debug_ranges = &wasm_file.getAtomPtr(wasm_file.debug_ranges_atom.?).code; + // try debug_ranges.resize(gpa, needed_size); + // @memcpy(debug_ranges.items[0..di_buf.items.len], di_buf.items); + } else unreachable; } pub fn writeDbgLineHeader(self: *Dwarf) !void { @@ -2502,60 +2447,52 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void { var src_fn_index = first_fn_index; - var buffer = try gpa.alloc(u8, last_fn.off + last_fn.len - first_fn.off); + const buffer = try gpa.alloc(u8, last_fn.off + last_fn.len - first_fn.off); defer gpa.free(buffer); - switch (self.bin_file.tag) { - .elf => { - const elf_file = self.bin_file.cast(File.Elf).?; - const shdr_index = elf_file.debug_line_section_index.?; - const needed_size = elf_file.shdrs.items[shdr_index].sh_size + delta; - try elf_file.growNonAllocSection(shdr_index, needed_size, 1, true); - const file_pos = elf_file.shdrs.items[shdr_index].sh_offset + first_fn.off; + if (self.bin_file.cast(.elf)) |elf_file| { + const shdr_index = elf_file.debug_line_section_index.?; + const needed_size = elf_file.shdrs.items[shdr_index].sh_size + delta; + try elf_file.growNonAllocSection(shdr_index, needed_size, 1, true); + const file_pos = elf_file.shdrs.items[shdr_index].sh_offset + first_fn.off; - const amt = try elf_file.base.file.?.preadAll(buffer, file_pos); - if (amt != buffer.len) return error.InputOutput; + const amt = try elf_file.base.file.?.preadAll(buffer, file_pos); + if (amt != buffer.len) return error.InputOutput; - try elf_file.base.file.?.pwriteAll(buffer, file_pos + delta); - }, - .macho => { - const macho_file = self.bin_file.cast(File.MachO).?; - if (macho_file.base.isRelocatable()) { - const sect_index = macho_file.debug_line_sect_index.?; - const needed_size: u32 = @intCast(macho_file.sections.items(.header)[sect_index].size + delta); - try macho_file.growSection(sect_index, needed_size); - const file_pos = macho_file.sections.items(.header)[sect_index].offset + first_fn.off; + try elf_file.base.file.?.pwriteAll(buffer, file_pos + delta); + } else if (self.bin_file.cast(.macho)) |macho_file| { + if (macho_file.base.isRelocatable()) { + const sect_index = macho_file.debug_line_sect_index.?; + const needed_size: u32 = @intCast(macho_file.sections.items(.header)[sect_index].size + delta); + try macho_file.growSection(sect_index, needed_size); + const file_pos = macho_file.sections.items(.header)[sect_index].offset + first_fn.off; - const amt = try macho_file.base.file.?.preadAll(buffer, file_pos); - if (amt != buffer.len) return error.InputOutput; + const amt = try macho_file.base.file.?.preadAll(buffer, file_pos); + if (amt != buffer.len) return error.InputOutput; - try macho_file.base.file.?.pwriteAll(buffer, file_pos + delta); - } else { - const d_sym = macho_file.getDebugSymbols().?; - const sect_index = d_sym.debug_line_section_index.?; - const needed_size: u32 = @intCast(d_sym.getSection(sect_index).size + delta); - try d_sym.growSection(sect_index, needed_size, true, macho_file); - const file_pos = d_sym.getSection(sect_index).offset + first_fn.off; + try macho_file.base.file.?.pwriteAll(buffer, file_pos + delta); + } else { + const d_sym = macho_file.getDebugSymbols().?; + const sect_index = d_sym.debug_line_section_index.?; + const needed_size: u32 = @intCast(d_sym.getSection(sect_index).size + delta); + try d_sym.growSection(sect_index, needed_size, true, macho_file); + const file_pos = d_sym.getSection(sect_index).offset + first_fn.off; - const amt = try d_sym.file.preadAll(buffer, file_pos); - if (amt != buffer.len) return error.InputOutput; + const amt = try d_sym.file.preadAll(buffer, file_pos); + if (amt != buffer.len) return error.InputOutput; - try d_sym.file.pwriteAll(buffer, file_pos + delta); - } - }, - .wasm => { - _ = &buffer; - // const wasm_file = self.bin_file.cast(File.Wasm).?; - // const debug_line = &wasm_file.getAtomPtr(wasm_file.debug_line_atom.?).code; - // { - // const src = debug_line.items[first_fn.off..]; - // @memcpy(buffer[0..src.len], src); - // } - // try debug_line.resize(self.allocator, debug_line.items.len + delta); - // @memcpy(debug_line.items[first_fn.off + delta ..][0..buffer.len], buffer); - }, - else => unreachable, - } + try d_sym.file.pwriteAll(buffer, file_pos + delta); + } + } else if (self.bin_file.cast(.wasm)) |wasm_file| { + _ = wasm_file; + // const debug_line = &wasm_file.getAtomPtr(wasm_file.debug_line_atom.?).code; + // { + // const src = debug_line.items[first_fn.off..]; + // @memcpy(buffer[0..src.len], src); + // } + // try debug_line.resize(self.allocator, debug_line.items.len + delta); + // @memcpy(debug_line.items[first_fn.off + delta ..][0..buffer.len], buffer); + } else unreachable; while (true) { const src_fn = self.getAtomPtr(.src_fn, src_fn_index); @@ -2580,33 +2517,26 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void { // We use NOPs because consumers empirically do not respect the header length field. const jmp_amt = self.getDebugLineProgramOff().? - di_buf.items.len; - switch (self.bin_file.tag) { - .elf => { - const elf_file = self.bin_file.cast(File.Elf).?; - const debug_line_sect = &elf_file.shdrs.items[elf_file.debug_line_section_index.?]; - const file_pos = debug_line_sect.sh_offset; - try pwriteDbgLineNops(elf_file.base.file.?, file_pos, 0, di_buf.items, jmp_amt); - }, - .macho => { - const macho_file = self.bin_file.cast(File.MachO).?; - if (macho_file.base.isRelocatable()) { - const debug_line_sect = macho_file.sections.items(.header)[macho_file.debug_line_sect_index.?]; - const file_pos = debug_line_sect.offset; - try pwriteDbgLineNops(macho_file.base.file.?, file_pos, 0, di_buf.items, jmp_amt); - } else { - const d_sym = macho_file.getDebugSymbols().?; - const debug_line_sect = d_sym.getSection(d_sym.debug_line_section_index.?); - const file_pos = debug_line_sect.offset; - try pwriteDbgLineNops(d_sym.file, file_pos, 0, di_buf.items, jmp_amt); - } - }, - .wasm => { - // const wasm_file = self.bin_file.cast(File.Wasm).?; - // const debug_line = &wasm_file.getAtomPtr(wasm_file.debug_line_atom.?).code; - // writeDbgLineNopsBuffered(debug_line.items, 0, 0, di_buf.items, jmp_amt); - }, - else => unreachable, - } + if (self.bin_file.cast(.elf)) |elf_file| { + const debug_line_sect = &elf_file.shdrs.items[elf_file.debug_line_section_index.?]; + const file_pos = debug_line_sect.sh_offset; + try pwriteDbgLineNops(elf_file.base.file.?, file_pos, 0, di_buf.items, jmp_amt); + } else if (self.bin_file.cast(.macho)) |macho_file| { + if (macho_file.base.isRelocatable()) { + const debug_line_sect = macho_file.sections.items(.header)[macho_file.debug_line_sect_index.?]; + const file_pos = debug_line_sect.offset; + try pwriteDbgLineNops(macho_file.base.file.?, file_pos, 0, di_buf.items, jmp_amt); + } else { + const d_sym = macho_file.getDebugSymbols().?; + const debug_line_sect = d_sym.getSection(d_sym.debug_line_section_index.?); + const file_pos = debug_line_sect.offset; + try pwriteDbgLineNops(d_sym.file, file_pos, 0, di_buf.items, jmp_amt); + } + } else if (self.bin_file.cast(.wasm)) |wasm_file| { + _ = wasm_file; + // const debug_line = &wasm_file.getAtomPtr(wasm_file.debug_line_atom.?).code; + // writeDbgLineNopsBuffered(debug_line.items, 0, 0, di_buf.items, jmp_amt); + } else unreachable; } fn getDebugInfoOff(self: Dwarf) ?u32 { @@ -2704,85 +2634,66 @@ pub fn flushModule(self: *Dwarf, pt: Zcu.PerThread) !void { ); const di_atom_index = try self.createAtom(.di_atom); - log.debug("updateDeclDebugInfoAllocation in flushModule", .{}); - try self.updateDeclDebugInfoAllocation(di_atom_index, @intCast(dbg_info_buffer.items.len)); - log.debug("writeDeclDebugInfo in flushModule", .{}); - try self.writeDeclDebugInfo(di_atom_index, dbg_info_buffer.items); - - const file_pos = switch (self.bin_file.tag) { - .elf => pos: { - const elf_file = self.bin_file.cast(File.Elf).?; - const debug_info_sect = &elf_file.shdrs.items[elf_file.debug_info_section_index.?]; - break :pos debug_info_sect.sh_offset; - }, - .macho => pos: { - const macho_file = self.bin_file.cast(File.MachO).?; - if (macho_file.base.isRelocatable()) { - const debug_info_sect = &macho_file.sections.items(.header)[macho_file.debug_info_sect_index.?]; - break :pos debug_info_sect.offset; - } else { - const d_sym = macho_file.getDebugSymbols().?; - const debug_info_sect = d_sym.getSectionPtr(d_sym.debug_info_section_index.?); - break :pos debug_info_sect.offset; - } - }, + log.debug("updateNavDebugInfoAllocation in flushModule", .{}); + try self.updateNavDebugInfoAllocation(di_atom_index, @intCast(dbg_info_buffer.items.len)); + log.debug("writeNavDebugInfo in flushModule", .{}); + try self.writeNavDebugInfo(di_atom_index, dbg_info_buffer.items); + + const file_pos = if (self.bin_file.cast(.elf)) |elf_file| pos: { + const debug_info_sect = &elf_file.shdrs.items[elf_file.debug_info_section_index.?]; + break :pos debug_info_sect.sh_offset; + } else if (self.bin_file.cast(.macho)) |macho_file| pos: { + if (macho_file.base.isRelocatable()) { + const debug_info_sect = &macho_file.sections.items(.header)[macho_file.debug_info_sect_index.?]; + break :pos debug_info_sect.offset; + } else { + const d_sym = macho_file.getDebugSymbols().?; + const debug_info_sect = d_sym.getSectionPtr(d_sym.debug_info_section_index.?); + break :pos debug_info_sect.offset; + } + } else if (self.bin_file.cast(.wasm)) |_| // for wasm, the offset is always 0 as we write to memory first - .wasm => 0, - else => unreachable, - }; + 0 + else + unreachable; var buf: [@sizeOf(u32)]u8 = undefined; mem.writeInt(u32, &buf, self.getAtom(.di_atom, di_atom_index).off, target.cpu.arch.endian()); while (self.global_abbrev_relocs.popOrNull()) |reloc| { const atom = self.getAtom(.di_atom, reloc.atom_index); - switch (self.bin_file.tag) { - .elf => { - const elf_file = self.bin_file.cast(File.Elf).?; - try elf_file.base.file.?.pwriteAll(&buf, file_pos + atom.off + reloc.offset); - }, - .macho => { - const macho_file = self.bin_file.cast(File.MachO).?; - if (macho_file.base.isRelocatable()) { - try macho_file.base.file.?.pwriteAll(&buf, file_pos + atom.off + reloc.offset); - } else { - const d_sym = macho_file.getDebugSymbols().?; - try d_sym.file.pwriteAll(&buf, file_pos + atom.off + reloc.offset); - } - }, - .wasm => { - // const wasm_file = self.bin_file.cast(File.Wasm).?; - // const debug_info = wasm_file.getAtomPtr(wasm_file.debug_info_atom.?).code; - // debug_info.items[atom.off + reloc.offset ..][0..buf.len].* = buf; - }, - else => unreachable, - } + if (self.bin_file.cast(.elf)) |elf_file| { + try elf_file.base.file.?.pwriteAll(&buf, file_pos + atom.off + reloc.offset); + } else if (self.bin_file.cast(.macho)) |macho_file| { + if (macho_file.base.isRelocatable()) { + try macho_file.base.file.?.pwriteAll(&buf, file_pos + atom.off + reloc.offset); + } else { + const d_sym = macho_file.getDebugSymbols().?; + try d_sym.file.pwriteAll(&buf, file_pos + atom.off + reloc.offset); + } + } else if (self.bin_file.cast(.wasm)) |wasm_file| { + _ = wasm_file; + // const debug_info = wasm_file.getAtomPtr(wasm_file.debug_info_atom.?).code; + // debug_info.items[atom.off + reloc.offset ..][0..buf.len].* = buf; + } else unreachable; } } } -fn addDIFile(self: *Dwarf, zcu: *Zcu, decl_index: InternPool.DeclIndex) !u28 { - const decl = zcu.declPtr(decl_index); - const file_scope = decl.getFileScope(zcu); +fn addDIFile(self: *Dwarf, zcu: *Zcu, nav_index: InternPool.Nav.Index) !u28 { + const file_scope = zcu.navFileScope(nav_index); const gop = try self.di_files.getOrPut(self.allocator, file_scope); if (!gop.found_existing) { - switch (self.bin_file.tag) { - .elf => { - const elf_file = self.bin_file.cast(File.Elf).?; - elf_file.markDirty(elf_file.debug_line_section_index.?); - }, - .macho => { - const macho_file = self.bin_file.cast(File.MachO).?; - if (macho_file.base.isRelocatable()) { - macho_file.markDirty(macho_file.debug_line_sect_index.?); - } else { - const d_sym = macho_file.getDebugSymbols().?; - d_sym.markDirty(d_sym.debug_line_section_index.?, macho_file); - } - }, - .wasm => {}, - else => unreachable, - } + if (self.bin_file.cast(.elf)) |elf_file| { + elf_file.markDirty(elf_file.debug_line_section_index.?); + } else if (self.bin_file.cast(.macho)) |macho_file| { + if (macho_file.base.isRelocatable()) { + macho_file.markDirty(macho_file.debug_line_sect_index.?); + } else { + const d_sym = macho_file.getDebugSymbols().?; + d_sym.markDirty(d_sym.debug_line_section_index.?, macho_file); + } + } else if (self.bin_file.cast(.wasm)) |_| {} else unreachable; } return @intCast(gop.index + 1); } @@ -2909,17 +2820,17 @@ fn createAtom(self: *Dwarf, comptime kind: Kind) !Atom.Index { return index; } -fn getOrCreateAtomForDecl(self: *Dwarf, comptime kind: Kind, decl_index: InternPool.DeclIndex) !Atom.Index { +fn getOrCreateAtomForNav(self: *Dwarf, comptime kind: Kind, nav_index: InternPool.Nav.Index) !Atom.Index { switch (kind) { .src_fn => { - const gop = try self.src_fn_decls.getOrPut(self.allocator, decl_index); + const gop = try self.src_fn_navs.getOrPut(self.allocator, nav_index); if (!gop.found_existing) { gop.value_ptr.* = try self.createAtom(kind); } return gop.value_ptr.*; }, .di_atom => { - const gop = try self.di_atom_decls.getOrPut(self.allocator, decl_index); + const gop = try self.di_atom_navs.getOrPut(self.allocator, nav_index); if (!gop.found_existing) { gop.value_ptr.* = try self.createAtom(kind); } diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 16f8739b0228..103c69202bad 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -478,24 +478,24 @@ pub fn deinit(self: *Elf) void { self.comdat_group_sections.deinit(gpa); } -pub fn getDeclVAddr(self: *Elf, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex, reloc_info: link.File.RelocInfo) !u64 { +pub fn getNavVAddr(self: *Elf, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index, reloc_info: link.File.RelocInfo) !u64 { assert(self.llvm_object == null); - return self.zigObjectPtr().?.getDeclVAddr(self, pt, decl_index, reloc_info); + return self.zigObjectPtr().?.getNavVAddr(self, pt, nav_index, reloc_info); } -pub fn lowerAnonDecl( +pub fn lowerUav( self: *Elf, pt: Zcu.PerThread, - decl_val: InternPool.Index, + uav: InternPool.Index, explicit_alignment: InternPool.Alignment, src_loc: Zcu.LazySrcLoc, -) !codegen.Result { - return self.zigObjectPtr().?.lowerAnonDecl(self, pt, decl_val, explicit_alignment, src_loc); +) !codegen.GenResult { + return self.zigObjectPtr().?.lowerUav(self, pt, uav, explicit_alignment, src_loc); } -pub fn getAnonDeclVAddr(self: *Elf, decl_val: InternPool.Index, reloc_info: link.File.RelocInfo) !u64 { +pub fn getUavVAddr(self: *Elf, uav: InternPool.Index, reloc_info: link.File.RelocInfo) !u64 { assert(self.llvm_object == null); - return self.zigObjectPtr().?.getAnonDeclVAddr(self, decl_val, reloc_info); + return self.zigObjectPtr().?.getUavVAddr(self, uav, reloc_info); } /// Returns end pos of collision, if any. @@ -2913,9 +2913,9 @@ pub fn writeElfHeader(self: *Elf) !void { try self.base.file.?.pwriteAll(hdr_buf[0..index], 0); } -pub fn freeDecl(self: *Elf, decl_index: InternPool.DeclIndex) void { - if (self.llvm_object) |llvm_object| return llvm_object.freeDecl(decl_index); - return self.zigObjectPtr().?.freeDecl(self, decl_index); +pub fn freeNav(self: *Elf, nav: InternPool.Nav.Index) void { + if (self.llvm_object) |llvm_object| return llvm_object.freeNav(nav); + return self.zigObjectPtr().?.freeNav(self, nav); } pub fn updateFunc(self: *Elf, pt: Zcu.PerThread, func_index: InternPool.Index, air: Air, liveness: Liveness) !void { @@ -2926,20 +2926,16 @@ pub fn updateFunc(self: *Elf, pt: Zcu.PerThread, func_index: InternPool.Index, a return self.zigObjectPtr().?.updateFunc(self, pt, func_index, air, liveness); } -pub fn updateDecl( +pub fn updateNav( self: *Elf, pt: Zcu.PerThread, - decl_index: InternPool.DeclIndex, -) link.File.UpdateDeclError!void { + nav: InternPool.Nav.Index, +) link.File.UpdateNavError!void { if (build_options.skip_non_native and builtin.object_format != .elf) { @panic("Attempted to compile for object format that was disabled by build configuration"); } - if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(pt, decl_index); - return self.zigObjectPtr().?.updateDecl(self, pt, decl_index); -} - -pub fn lowerUnnamedConst(self: *Elf, pt: Zcu.PerThread, val: Value, decl_index: InternPool.DeclIndex) !u32 { - return self.zigObjectPtr().?.lowerUnnamedConst(self, pt, val, decl_index); + if (self.llvm_object) |llvm_object| return llvm_object.updateNav(pt, nav); + return self.zigObjectPtr().?.updateNav(self, pt, nav); } pub fn updateExports( @@ -2955,9 +2951,9 @@ pub fn updateExports( return self.zigObjectPtr().?.updateExports(self, pt, exported, export_indices); } -pub fn updateDeclLineNumber(self: *Elf, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { +pub fn updateNavLineNumber(self: *Elf, pt: Zcu.PerThread, nav: InternPool.Nav.Index) !void { if (self.llvm_object) |_| return; - return self.zigObjectPtr().?.updateDeclLineNumber(pt, decl_index); + return self.zigObjectPtr().?.updateNavLineNumber(pt, nav); } pub fn deleteExport( diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index 8384399eb968..ef3e2ed77c78 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -32,35 +32,14 @@ dwarf: ?Dwarf = null, /// Table of tracked LazySymbols. lazy_syms: LazySymbolTable = .{}, -/// Table of tracked Decls. -decls: DeclTable = .{}, +/// Table of tracked `Nav`s. +navs: NavTable = .{}, /// TLS variables indexed by Atom.Index. tls_variables: TlsTable = .{}, -/// Table of unnamed constants associated with a parent `Decl`. -/// We store them here so that we can free the constants whenever the `Decl` -/// needs updating or is freed. -/// -/// For example, -/// -/// ```zig -/// const Foo = struct{ -/// a: u8, -/// }; -/// -/// pub fn main() void { -/// var foo = Foo{ .a = 1 }; -/// _ = foo; -/// } -/// ``` -/// -/// value assigned to label `foo` is an unnamed constant belonging/associated -/// with `Decl` `main`, and lives as long as that `Decl`. -unnamed_consts: UnnamedConstTable = .{}, - -/// Table of tracked AnonDecls. -anon_decls: AnonDeclTable = .{}, +/// Table of tracked `Uav`s. +uavs: UavTable = .{}, debug_strtab_dirty: bool = false, debug_abbrev_section_dirty: bool = false, @@ -124,29 +103,21 @@ pub fn deinit(self: *ZigObject, allocator: Allocator) void { self.relocs.deinit(allocator); { - var it = self.decls.iterator(); + var it = self.navs.iterator(); while (it.next()) |entry| { entry.value_ptr.exports.deinit(allocator); } - self.decls.deinit(allocator); + self.navs.deinit(allocator); } self.lazy_syms.deinit(allocator); { - var it = self.unnamed_consts.valueIterator(); - while (it.next()) |syms| { - syms.deinit(allocator); - } - self.unnamed_consts.deinit(allocator); - } - - { - var it = self.anon_decls.iterator(); + var it = self.uavs.iterator(); while (it.next()) |entry| { entry.value_ptr.exports.deinit(allocator); } - self.anon_decls.deinit(allocator); + self.uavs.deinit(allocator); } for (self.tls_variables.values()) |*tlv| { @@ -161,7 +132,7 @@ pub fn deinit(self: *ZigObject, allocator: Allocator) void { pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !void { // Handle any lazy symbols that were emitted by incremental compilation. - if (self.lazy_syms.getPtr(.none)) |metadata| { + if (self.lazy_syms.getPtr(.anyerror_type)) |metadata| { const pt: Zcu.PerThread = .{ .zcu = elf_file.base.comp.module.?, .tid = tid }; // Most lazy symbols can be updated on first use, but @@ -169,7 +140,7 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi if (metadata.text_state != .unused) self.updateLazySymbol( elf_file, pt, - link.File.LazySymbol.initDecl(.code, null, pt.zcu), + .{ .kind = .code, .ty = .anyerror_type }, metadata.text_symbol_index, ) catch |err| return switch (err) { error.CodegenFail => error.FlushFailure, @@ -178,7 +149,7 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi if (metadata.rodata_state != .unused) self.updateLazySymbol( elf_file, pt, - link.File.LazySymbol.initDecl(.const_data, null, pt.zcu), + .{ .kind = .const_data, .ty = .anyerror_type }, metadata.rodata_symbol_index, ) catch |err| return switch (err) { error.CodegenFail => error.FlushFailure, @@ -661,25 +632,25 @@ pub fn codeAlloc(self: *ZigObject, elf_file: *Elf, atom_index: Atom.Index) ![]u8 return code; } -pub fn getDeclVAddr( +pub fn getNavVAddr( self: *ZigObject, elf_file: *Elf, pt: Zcu.PerThread, - decl_index: InternPool.DeclIndex, + nav_index: InternPool.Nav.Index, reloc_info: link.File.RelocInfo, ) !u64 { const zcu = pt.zcu; const ip = &zcu.intern_pool; - const decl = zcu.declPtr(decl_index); - log.debug("getDeclVAddr {}({d})", .{ decl.fqn.fmt(ip), decl_index }); - const this_sym_index = if (decl.isExtern(zcu)) blk: { - const name = decl.name.toSlice(ip); - const lib_name = if (decl.getOwnedExternFunc(zcu)) |ext_fn| - ext_fn.lib_name.toSlice(ip) - else - decl.getOwnedVariable(zcu).?.lib_name.toSlice(ip); - break :blk try self.getGlobalSymbol(elf_file, name, lib_name); - } else try self.getOrCreateMetadataForDecl(elf_file, decl_index); + const nav = ip.getNav(nav_index); + log.debug("getNavVAddr {}({d})", .{ nav.fqn.fmt(ip), nav_index }); + const this_sym_index = switch (ip.indexToKey(nav.status.resolved.val)) { + .@"extern" => |@"extern"| try self.getGlobalSymbol( + elf_file, + nav.name.toSlice(ip), + @"extern".lib_name.toSlice(ip), + ), + else => try self.getOrCreateMetadataForNav(elf_file, nav_index), + }; const this_sym = self.symbol(this_sym_index); const vaddr = this_sym.address(.{}, elf_file); const parent_atom = self.symbol(reloc_info.parent_atom_index).atom(elf_file).?; @@ -692,13 +663,13 @@ pub fn getDeclVAddr( return @intCast(vaddr); } -pub fn getAnonDeclVAddr( +pub fn getUavVAddr( self: *ZigObject, elf_file: *Elf, - decl_val: InternPool.Index, + uav: InternPool.Index, reloc_info: link.File.RelocInfo, ) !u64 { - const sym_index = self.anon_decls.get(decl_val).?.symbol_index; + const sym_index = self.uavs.get(uav).?.symbol_index; const sym = self.symbol(sym_index); const vaddr = sym.address(.{}, elf_file); const parent_atom = self.symbol(reloc_info.parent_atom_index).atom(elf_file).?; @@ -711,43 +682,43 @@ pub fn getAnonDeclVAddr( return @intCast(vaddr); } -pub fn lowerAnonDecl( +pub fn lowerUav( self: *ZigObject, elf_file: *Elf, pt: Zcu.PerThread, - decl_val: InternPool.Index, + uav: InternPool.Index, explicit_alignment: InternPool.Alignment, - src_loc: Module.LazySrcLoc, -) !codegen.Result { - const gpa = elf_file.base.comp.gpa; - const mod = elf_file.base.comp.module.?; - const ty = Type.fromInterned(mod.intern_pool.typeOf(decl_val)); - const decl_alignment = switch (explicit_alignment) { - .none => ty.abiAlignment(pt), + src_loc: Zcu.LazySrcLoc, +) !codegen.GenResult { + const zcu = pt.zcu; + const gpa = zcu.gpa; + const val = Value.fromInterned(uav); + const uav_alignment = switch (explicit_alignment) { + .none => val.typeOf(zcu).abiAlignment(pt), else => explicit_alignment, }; - if (self.anon_decls.get(decl_val)) |metadata| { - const existing_alignment = self.symbol(metadata.symbol_index).atom(elf_file).?.alignment; - if (decl_alignment.order(existing_alignment).compare(.lte)) - return .ok; + if (self.uavs.get(uav)) |metadata| { + const sym = self.symbol(metadata.symbol_index); + const existing_alignment = sym.atom(elf_file).?.alignment; + if (uav_alignment.order(existing_alignment).compare(.lte)) + return .{ .mcv = .{ .load_symbol = metadata.symbol_index } }; } - const val = Value.fromInterned(decl_val); var name_buf: [32]u8 = undefined; const name = std.fmt.bufPrint(&name_buf, "__anon_{d}", .{ - @intFromEnum(decl_val), + @intFromEnum(uav), }) catch unreachable; const res = self.lowerConst( elf_file, pt, name, val, - decl_alignment, + uav_alignment, elf_file.zig_data_rel_ro_section_index.?, src_loc, ) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, - else => |e| return .{ .fail = try Module.ErrorMsg.create( + else => |e| return .{ .fail = try Zcu.ErrorMsg.create( gpa, src_loc, "unable to lower constant value: {s}", @@ -758,8 +729,8 @@ pub fn lowerAnonDecl( .ok => |sym_index| sym_index, .fail => |em| return .{ .fail = em }, }; - try self.anon_decls.put(gpa, decl_val, .{ .symbol_index = sym_index }); - return .ok; + try self.uavs.put(gpa, uav, .{ .symbol_index = sym_index }); + return .{ .mcv = .{ .load_symbol = sym_index } }; } pub fn getOrCreateMetadataForLazySymbol( @@ -768,51 +739,32 @@ pub fn getOrCreateMetadataForLazySymbol( pt: Zcu.PerThread, lazy_sym: link.File.LazySymbol, ) !Symbol.Index { - const mod = pt.zcu; - const gpa = mod.gpa; - const gop = try self.lazy_syms.getOrPut(gpa, lazy_sym.getDecl(mod)); + const gop = try self.lazy_syms.getOrPut(pt.zcu.gpa, lazy_sym.ty); errdefer _ = if (!gop.found_existing) self.lazy_syms.pop(); if (!gop.found_existing) gop.value_ptr.* = .{}; - const metadata: struct { - symbol_index: *Symbol.Index, - state: *LazySymbolMetadata.State, - } = switch (lazy_sym.kind) { - .code => .{ - .symbol_index = &gop.value_ptr.text_symbol_index, - .state = &gop.value_ptr.text_state, - }, - .const_data => .{ - .symbol_index = &gop.value_ptr.rodata_symbol_index, - .state = &gop.value_ptr.rodata_state, - }, + const symbol_index_ptr, const state_ptr = switch (lazy_sym.kind) { + .code => .{ &gop.value_ptr.text_symbol_index, &gop.value_ptr.text_state }, + .const_data => .{ &gop.value_ptr.rodata_symbol_index, &gop.value_ptr.rodata_state }, }; - switch (metadata.state.*) { + switch (state_ptr.*) { .unused => { + const gpa = elf_file.base.comp.gpa; const symbol_index = try self.newSymbolWithAtom(gpa, 0); const sym = self.symbol(symbol_index); sym.flags.needs_zig_got = true; - metadata.symbol_index.* = symbol_index; + symbol_index_ptr.* = symbol_index; }, - .pending_flush => return metadata.symbol_index.*, + .pending_flush => return symbol_index_ptr.*, .flushed => {}, } - metadata.state.* = .pending_flush; - const symbol_index = metadata.symbol_index.*; + state_ptr.* = .pending_flush; + const symbol_index = symbol_index_ptr.*; // anyerror needs to be deferred until flushModule - if (lazy_sym.getDecl(mod) != .none) try self.updateLazySymbol(elf_file, pt, lazy_sym, symbol_index); + if (lazy_sym.ty != .anyerror_type) try self.updateLazySymbol(elf_file, pt, lazy_sym, symbol_index); return symbol_index; } -fn freeUnnamedConsts(self: *ZigObject, elf_file: *Elf, decl_index: InternPool.DeclIndex) void { - const gpa = elf_file.base.comp.gpa; - const unnamed_consts = self.unnamed_consts.getPtr(decl_index) orelse return; - for (unnamed_consts.items) |sym_index| { - self.freeDeclMetadata(elf_file, sym_index); - } - unnamed_consts.clearAndFree(gpa); -} - -fn freeDeclMetadata(self: *ZigObject, elf_file: *Elf, sym_index: Symbol.Index) void { +fn freeNavMetadata(self: *ZigObject, elf_file: *Elf, sym_index: Symbol.Index) void { const sym = self.symbol(sym_index); sym.atom(elf_file).?.free(elf_file); log.debug("adding %{d} to local symbols free list", .{sym_index}); @@ -820,38 +772,37 @@ fn freeDeclMetadata(self: *ZigObject, elf_file: *Elf, sym_index: Symbol.Index) v // TODO free GOT entry here } -pub fn freeDecl(self: *ZigObject, elf_file: *Elf, decl_index: InternPool.DeclIndex) void { +pub fn freeNav(self: *ZigObject, elf_file: *Elf, nav_index: InternPool.Nav.Index) void { const gpa = elf_file.base.comp.gpa; - log.debug("freeDecl ({d})", .{decl_index}); + log.debug("freeNav ({d})", .{nav_index}); - if (self.decls.fetchRemove(decl_index)) |const_kv| { + if (self.navs.fetchRemove(nav_index)) |const_kv| { var kv = const_kv; const sym_index = kv.value.symbol_index; - self.freeDeclMetadata(elf_file, sym_index); - self.freeUnnamedConsts(elf_file, decl_index); + self.freeNavMetadata(elf_file, sym_index); kv.value.exports.deinit(gpa); } if (self.dwarf) |*dw| { - dw.freeDecl(decl_index); + dw.freeNav(nav_index); } } -pub fn getOrCreateMetadataForDecl( +pub fn getOrCreateMetadataForNav( self: *ZigObject, elf_file: *Elf, - decl_index: InternPool.DeclIndex, + nav_index: InternPool.Nav.Index, ) !Symbol.Index { const gpa = elf_file.base.comp.gpa; - const gop = try self.decls.getOrPut(gpa, decl_index); + const gop = try self.navs.getOrPut(gpa, nav_index); if (!gop.found_existing) { const any_non_single_threaded = elf_file.base.comp.config.any_non_single_threaded; const symbol_index = try self.newSymbolWithAtom(gpa, 0); - const mod = elf_file.base.comp.module.?; - const decl = mod.declPtr(decl_index); + const zcu = elf_file.base.comp.module.?; + const nav_val = Value.fromInterned(zcu.intern_pool.getNav(nav_index).status.resolved.val); const sym = self.symbol(symbol_index); - if (decl.getOwnedVariable(mod)) |variable| { + if (nav_val.getVariable(zcu)) |variable| { if (variable.is_threadlocal and any_non_single_threaded) { sym.flags.is_tls = true; } @@ -864,89 +815,81 @@ pub fn getOrCreateMetadataForDecl( return gop.value_ptr.symbol_index; } -fn getDeclShdrIndex( +fn getNavShdrIndex( self: *ZigObject, elf_file: *Elf, - decl: *const Module.Decl, + zcu: *Zcu, + nav_index: InternPool.Nav.Index, code: []const u8, ) error{OutOfMemory}!u32 { _ = self; - const mod = elf_file.base.comp.module.?; + const ip = &zcu.intern_pool; const any_non_single_threaded = elf_file.base.comp.config.any_non_single_threaded; - const shdr_index = switch (decl.typeOf(mod).zigTypeTag(mod)) { - .Fn => elf_file.zig_text_section_index.?, - else => blk: { - if (decl.getOwnedVariable(mod)) |variable| { - if (variable.is_threadlocal and any_non_single_threaded) { - const is_all_zeroes = for (code) |byte| { - if (byte != 0) break false; - } else true; - if (is_all_zeroes) break :blk elf_file.sectionByName(".tbss") orelse try elf_file.addSection(.{ - .type = elf.SHT_NOBITS, - .flags = elf.SHF_ALLOC | elf.SHF_WRITE | elf.SHF_TLS, - .name = try elf_file.insertShString(".tbss"), - .offset = std.math.maxInt(u64), - }); - - break :blk elf_file.sectionByName(".tdata") orelse try elf_file.addSection(.{ - .type = elf.SHT_PROGBITS, - .flags = elf.SHF_ALLOC | elf.SHF_WRITE | elf.SHF_TLS, - .name = try elf_file.insertShString(".tdata"), - .offset = std.math.maxInt(u64), - }); - } - if (variable.is_const) break :blk elf_file.zig_data_rel_ro_section_index.?; - if (Value.fromInterned(variable.init).isUndefDeep(mod)) { - // TODO: get the optimize_mode from the Module that owns the decl instead - // of using the root module here. - break :blk switch (elf_file.base.comp.root_mod.optimize_mode) { - .Debug, .ReleaseSafe => elf_file.zig_data_section_index.?, - .ReleaseFast, .ReleaseSmall => elf_file.zig_bss_section_index.?, - }; - } - // TODO I blatantly copied the logic from the Wasm linker, but is there a less - // intrusive check for all zeroes than this? - const is_all_zeroes = for (code) |byte| { - if (byte != 0) break false; - } else true; - if (is_all_zeroes) break :blk elf_file.zig_bss_section_index.?; - break :blk elf_file.zig_data_section_index.?; - } - break :blk elf_file.zig_data_rel_ro_section_index.?; - }, + const nav_val = zcu.navValue(nav_index); + if (ip.isFunctionType(nav_val.typeOf(zcu).toIntern())) return elf_file.zig_text_section_index.?; + const is_const, const is_threadlocal, const nav_init = switch (ip.indexToKey(nav_val.toIntern())) { + .variable => |variable| .{ false, variable.is_threadlocal, variable.init }, + .@"extern" => |@"extern"| .{ @"extern".is_const, @"extern".is_threadlocal, .none }, + else => .{ true, false, nav_val.toIntern() }, }; - return shdr_index; + if (any_non_single_threaded and is_threadlocal) { + for (code) |byte| { + if (byte != 0) break; + } else return elf_file.sectionByName(".tbss") orelse try elf_file.addSection(.{ + .type = elf.SHT_NOBITS, + .flags = elf.SHF_ALLOC | elf.SHF_WRITE | elf.SHF_TLS, + .name = try elf_file.insertShString(".tbss"), + .offset = std.math.maxInt(u64), + }); + return elf_file.sectionByName(".tdata") orelse try elf_file.addSection(.{ + .type = elf.SHT_PROGBITS, + .flags = elf.SHF_ALLOC | elf.SHF_WRITE | elf.SHF_TLS, + .name = try elf_file.insertShString(".tdata"), + .offset = std.math.maxInt(u64), + }); + } + if (is_const) return elf_file.zig_data_rel_ro_section_index.?; + if (nav_init != .none and Value.fromInterned(nav_init).isUndefDeep(zcu)) + return switch (zcu.navFileScope(nav_index).mod.optimize_mode) { + .Debug, .ReleaseSafe => elf_file.zig_data_section_index.?, + .ReleaseFast, .ReleaseSmall => elf_file.zig_bss_section_index.?, + }; + for (code) |byte| { + if (byte != 0) break; + } else return elf_file.zig_bss_section_index.?; + return elf_file.zig_data_section_index.?; } -fn updateDeclCode( +fn updateNavCode( self: *ZigObject, elf_file: *Elf, pt: Zcu.PerThread, - decl_index: InternPool.DeclIndex, + nav_index: InternPool.Nav.Index, sym_index: Symbol.Index, shdr_index: u32, code: []const u8, stt_bits: u8, ) !void { - const gpa = elf_file.base.comp.gpa; - const mod = pt.zcu; - const ip = &mod.intern_pool; - const decl = mod.declPtr(decl_index); + const zcu = pt.zcu; + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; + const nav = ip.getNav(nav_index); - log.debug("updateDeclCode {}({d})", .{ decl.fqn.fmt(ip), decl_index }); + log.debug("updateNavCode {}({d})", .{ nav.fqn.fmt(ip), nav_index }); - const required_alignment = decl.getAlignment(pt).max( - target_util.minFunctionAlignment(mod.getTarget()), + const required_alignment = pt.navAlignment(nav_index).max( + target_util.minFunctionAlignment(zcu.navFileScope(nav_index).mod.resolved_target.result), ); const sym = self.symbol(sym_index); const esym = &self.symtab.items(.elf_sym)[sym.esym_index]; const atom_ptr = sym.atom(elf_file).?; - const name_offset = try self.strtab.insert(gpa, decl.fqn.toSlice(ip)); + const name_offset = try self.strtab.insert(gpa, nav.fqn.toSlice(ip)); atom_ptr.alive = true; atom_ptr.name_offset = name_offset; atom_ptr.output_section_index = shdr_index; + sym.name_offset = name_offset; esym.st_name = name_offset; esym.st_info |= stt_bits; @@ -962,7 +905,7 @@ fn updateDeclCode( const need_realloc = code.len > capacity or !required_alignment.check(@intCast(atom_ptr.value)); if (need_realloc) { try atom_ptr.grow(elf_file); - log.debug("growing {} from 0x{x} to 0x{x}", .{ decl.fqn.fmt(ip), old_vaddr, atom_ptr.value }); + log.debug("growing {} from 0x{x} to 0x{x}", .{ nav.fqn.fmt(ip), old_vaddr, atom_ptr.value }); if (old_vaddr != atom_ptr.value) { sym.value = 0; esym.st_value = 0; @@ -979,7 +922,7 @@ fn updateDeclCode( } } else { try atom_ptr.allocate(elf_file); - errdefer self.freeDeclMetadata(elf_file, sym_index); + errdefer self.freeNavMetadata(elf_file, sym_index); sym.value = 0; sym.flags.needs_zig_got = true; @@ -1023,24 +966,24 @@ fn updateTlv( self: *ZigObject, elf_file: *Elf, pt: Zcu.PerThread, - decl_index: InternPool.DeclIndex, + nav_index: InternPool.Nav.Index, sym_index: Symbol.Index, shndx: u32, code: []const u8, ) !void { - const mod = pt.zcu; - const ip = &mod.intern_pool; - const gpa = mod.gpa; - const decl = mod.declPtr(decl_index); + const zcu = pt.zcu; + const ip = &zcu.intern_pool; + const gpa = zcu.gpa; + const nav = ip.getNav(nav_index); - log.debug("updateTlv {}({d})", .{ decl.fqn.fmt(ip), decl_index }); + log.debug("updateTlv {}({d})", .{ nav.fqn.fmt(ip), nav_index }); - const required_alignment = decl.getAlignment(pt); + const required_alignment = pt.navAlignment(nav_index); const sym = self.symbol(sym_index); const esym = &self.symtab.items(.elf_sym)[sym.esym_index]; const atom_ptr = sym.atom(elf_file).?; - const name_offset = try self.strtab.insert(gpa, decl.fqn.toSlice(ip)); + const name_offset = try self.strtab.insert(gpa, nav.fqn.toSlice(ip)); sym.value = 0; sym.name_offset = name_offset; @@ -1049,6 +992,7 @@ fn updateTlv( atom_ptr.alive = true; atom_ptr.name_offset = name_offset; + sym.name_offset = name_offset; esym.st_value = 0; esym.st_name = name_offset; esym.st_info = elf.STT_TLS; @@ -1086,53 +1030,49 @@ pub fn updateFunc( const tracy = trace(@src()); defer tracy.end(); - const mod = pt.zcu; - const ip = &mod.intern_pool; + const zcu = pt.zcu; + const ip = &zcu.intern_pool; const gpa = elf_file.base.comp.gpa; - const func = mod.funcInfo(func_index); - const decl_index = func.owner_decl; - const decl = mod.declPtr(decl_index); + const func = zcu.funcInfo(func_index); - log.debug("updateFunc {}({d})", .{ decl.fqn.fmt(ip), decl_index }); + log.debug("updateFunc {}({d})", .{ ip.getNav(func.owner_nav).fqn.fmt(ip), func.owner_nav }); - const sym_index = try self.getOrCreateMetadataForDecl(elf_file, decl_index); - self.freeUnnamedConsts(elf_file, decl_index); + const sym_index = try self.getOrCreateMetadataForNav(elf_file, func.owner_nav); self.symbol(sym_index).atom(elf_file).?.freeRelocs(elf_file); var code_buffer = std.ArrayList(u8).init(gpa); defer code_buffer.deinit(); - var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(pt, decl_index) else null; - defer if (decl_state) |*ds| ds.deinit(); + var dwarf_state = if (self.dwarf) |*dw| try dw.initNavState(pt, func.owner_nav) else null; + defer if (dwarf_state) |*ds| ds.deinit(); const res = try codegen.generateFunction( &elf_file.base, pt, - decl.navSrcLoc(mod), + zcu.navSrcLoc(func.owner_nav), func_index, air, liveness, &code_buffer, - if (decl_state) |*ds| .{ .dwarf = ds } else .none, + if (dwarf_state) |*ds| .{ .dwarf = ds } else .none, ); const code = switch (res) { .ok => code_buffer.items, .fail => |em| { - func.setAnalysisState(&mod.intern_pool, .codegen_failure); - try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); + try zcu.failed_codegen.put(gpa, func.owner_nav, em); return; }, }; - const shndx = try self.getDeclShdrIndex(elf_file, decl, code); - try self.updateDeclCode(elf_file, pt, decl_index, sym_index, shndx, code, elf.STT_FUNC); + const shndx = try self.getNavShdrIndex(elf_file, zcu, func.owner_nav, code); + try self.updateNavCode(elf_file, pt, func.owner_nav, sym_index, shndx, code, elf.STT_FUNC); - if (decl_state) |*ds| { + if (dwarf_state) |*ds| { const sym = self.symbol(sym_index); - try self.dwarf.?.commitDeclState( + try self.dwarf.?.commitNavState( pt, - decl_index, + func.owner_nav, @intCast(sym.address(.{}, elf_file)), sym.atom(elf_file).?.size, ds, @@ -1142,78 +1082,80 @@ pub fn updateFunc( // Exports will be updated by `Zcu.processExports` after the update. } -pub fn updateDecl( +pub fn updateNav( self: *ZigObject, elf_file: *Elf, pt: Zcu.PerThread, - decl_index: InternPool.DeclIndex, -) link.File.UpdateDeclError!void { + nav_index: InternPool.Nav.Index, +) link.File.UpdateNavError!void { const tracy = trace(@src()); defer tracy.end(); - const mod = pt.zcu; - const ip = &mod.intern_pool; - const decl = mod.declPtr(decl_index); - - log.debug("updateDecl {}({d})", .{ decl.fqn.fmt(ip), decl_index }); - - if (decl.val.getExternFunc(mod)) |_| return; - if (decl.isExtern(mod)) { - // Extern variable gets a .got entry only. - const variable = decl.getOwnedVariable(mod).?; - const name = decl.name.toSlice(&mod.intern_pool); - const lib_name = variable.lib_name.toSlice(&mod.intern_pool); - const sym_index = try self.getGlobalSymbol(elf_file, name, lib_name); - self.symbol(sym_index).flags.needs_got = true; - return; - } + const zcu = pt.zcu; + const ip = &zcu.intern_pool; + const nav = ip.getNav(nav_index); + + log.debug("updateNav {}({d})", .{ nav.fqn.fmt(ip), nav_index }); + + const nav_val = zcu.navValue(nav_index); + const nav_init = switch (ip.indexToKey(nav_val.toIntern())) { + .variable => |variable| Value.fromInterned(variable.init), + .@"extern" => |@"extern"| { + if (ip.isFunctionType(@"extern".ty)) return; + // Extern variable gets a .got entry only. + const sym_index = try self.getGlobalSymbol( + elf_file, + nav.name.toSlice(ip), + @"extern".lib_name.toSlice(ip), + ); + self.symbol(sym_index).flags.needs_got = true; + return; + }, + else => nav_val, + }; - const sym_index = try self.getOrCreateMetadataForDecl(elf_file, decl_index); + const sym_index = try self.getOrCreateMetadataForNav(elf_file, nav_index); self.symbol(sym_index).atom(elf_file).?.freeRelocs(elf_file); - const gpa = elf_file.base.comp.gpa; - var code_buffer = std.ArrayList(u8).init(gpa); + var code_buffer = std.ArrayList(u8).init(zcu.gpa); defer code_buffer.deinit(); - var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(pt, decl_index) else null; - defer if (decl_state) |*ds| ds.deinit(); + var nav_state: ?Dwarf.NavState = if (self.dwarf) |*dw| try dw.initNavState(pt, nav_index) else null; + defer if (nav_state) |*ns| ns.deinit(); // TODO implement .debug_info for global variables - const decl_val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val; - const res = if (decl_state) |*ds| - try codegen.generateSymbol(&elf_file.base, pt, decl.navSrcLoc(mod), decl_val, &code_buffer, .{ - .dwarf = ds, - }, .{ - .parent_atom_index = sym_index, - }) - else - try codegen.generateSymbol(&elf_file.base, pt, decl.navSrcLoc(mod), decl_val, &code_buffer, .none, .{ - .parent_atom_index = sym_index, - }); + const res = try codegen.generateSymbol( + &elf_file.base, + pt, + zcu.navSrcLoc(nav_index), + nav_init, + &code_buffer, + if (nav_state) |*ns| .{ .dwarf = ns } else .none, + .{ .parent_atom_index = sym_index }, + ); const code = switch (res) { .ok => code_buffer.items, .fail => |em| { - decl.analysis = .codegen_failure; - try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); + try zcu.failed_codegen.put(zcu.gpa, nav_index, em); return; }, }; - const shndx = try self.getDeclShdrIndex(elf_file, decl, code); + const shndx = try self.getNavShdrIndex(elf_file, zcu, nav_index, code); if (elf_file.shdrs.items[shndx].sh_flags & elf.SHF_TLS != 0) - try self.updateTlv(elf_file, pt, decl_index, sym_index, shndx, code) + try self.updateTlv(elf_file, pt, nav_index, sym_index, shndx, code) else - try self.updateDeclCode(elf_file, pt, decl_index, sym_index, shndx, code, elf.STT_OBJECT); + try self.updateNavCode(elf_file, pt, nav_index, sym_index, shndx, code, elf.STT_OBJECT); - if (decl_state) |*ds| { + if (nav_state) |*ns| { const sym = self.symbol(sym_index); - try self.dwarf.?.commitDeclState( + try self.dwarf.?.commitNavState( pt, - decl_index, + nav_index, @intCast(sym.address(.{}, elf_file)), sym.atom(elf_file).?.size, - ds, + ns, ); } @@ -1237,13 +1179,13 @@ fn updateLazySymbol( const name_str_index = blk: { const name = try std.fmt.allocPrint(gpa, "__lazy_{s}_{}", .{ @tagName(sym.kind), - sym.ty.fmt(pt), + Type.fromInterned(sym.ty).fmt(pt), }); defer gpa.free(name); break :blk try self.strtab.insert(gpa, name); }; - const src = sym.ty.srcLocOrNull(mod) orelse Module.LazySrcLoc.unneeded; + const src = Type.fromInterned(sym.ty).srcLocOrNull(mod) orelse Zcu.LazySrcLoc.unneeded; const res = try codegen.generateLazySymbol( &elf_file.base, pt, @@ -1280,7 +1222,7 @@ fn updateLazySymbol( atom_ptr.output_section_index = output_section_index; try atom_ptr.allocate(elf_file); - errdefer self.freeDeclMetadata(elf_file, symbol_index); + errdefer self.freeNavMetadata(elf_file, symbol_index); local_sym.value = 0; local_sym.flags.needs_zig_got = true; @@ -1296,49 +1238,9 @@ fn updateLazySymbol( try elf_file.base.file.?.pwriteAll(code, file_offset); } -pub fn lowerUnnamedConst( - self: *ZigObject, - elf_file: *Elf, - pt: Zcu.PerThread, - val: Value, - decl_index: InternPool.DeclIndex, -) !u32 { - const gpa = elf_file.base.comp.gpa; - const mod = elf_file.base.comp.module.?; - const gop = try self.unnamed_consts.getOrPut(gpa, decl_index); - if (!gop.found_existing) { - gop.value_ptr.* = .{}; - } - const unnamed_consts = gop.value_ptr; - const decl = mod.declPtr(decl_index); - const index = unnamed_consts.items.len; - const name = try std.fmt.allocPrint(gpa, "__unnamed_{}_{d}", .{ decl.fqn.fmt(&mod.intern_pool), index }); - defer gpa.free(name); - const ty = val.typeOf(mod); - const sym_index = switch (try self.lowerConst( - elf_file, - pt, - name, - val, - ty.abiAlignment(pt), - elf_file.zig_data_rel_ro_section_index.?, - decl.navSrcLoc(mod), - )) { - .ok => |sym_index| sym_index, - .fail => |em| { - decl.analysis = .codegen_failure; - try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); - log.err("{s}", .{em.msg}); - return error.CodegenFail; - }, - }; - try unnamed_consts.append(gpa, sym_index); - return sym_index; -} - const LowerConstResult = union(enum) { ok: Symbol.Index, - fail: *Module.ErrorMsg, + fail: *Zcu.ErrorMsg, }; fn lowerConst( @@ -1349,7 +1251,7 @@ fn lowerConst( val: Value, required_alignment: InternPool.Alignment, output_section_index: u32, - src_loc: Module.LazySrcLoc, + src_loc: Zcu.LazySrcLoc, ) !LowerConstResult { const gpa = pt.zcu.gpa; @@ -1384,7 +1286,8 @@ fn lowerConst( atom_ptr.output_section_index = output_section_index; try atom_ptr.allocate(elf_file); - errdefer self.freeDeclMetadata(elf_file, sym_index); + // TODO rename and re-audit this method + errdefer self.freeNavMetadata(elf_file, sym_index); const shdr = elf_file.shdrs.items[output_section_index]; const file_offset = shdr.sh_offset + @as(u64, @intCast(atom_ptr.value)); @@ -1397,7 +1300,7 @@ pub fn updateExports( self: *ZigObject, elf_file: *Elf, pt: Zcu.PerThread, - exported: Module.Exported, + exported: Zcu.Exported, export_indices: []const u32, ) link.File.UpdateExportsError!void { const tracy = trace(@src()); @@ -1406,24 +1309,24 @@ pub fn updateExports( const mod = pt.zcu; const gpa = elf_file.base.comp.gpa; const metadata = switch (exported) { - .decl_index => |decl_index| blk: { - _ = try self.getOrCreateMetadataForDecl(elf_file, decl_index); - break :blk self.decls.getPtr(decl_index).?; + .nav => |nav| blk: { + _ = try self.getOrCreateMetadataForNav(elf_file, nav); + break :blk self.navs.getPtr(nav).?; }, - .value => |value| self.anon_decls.getPtr(value) orelse blk: { + .uav => |uav| self.uavs.getPtr(uav) orelse blk: { const first_exp = mod.all_exports.items[export_indices[0]]; - const res = try self.lowerAnonDecl(elf_file, pt, value, .none, first_exp.src); + const res = try self.lowerUav(elf_file, pt, uav, .none, first_exp.src); switch (res) { - .ok => {}, + .mcv => {}, .fail => |em| { - // TODO maybe it's enough to return an error here and let Module.processExportsInner + // TODO maybe it's enough to return an error here and let Zcu.processExportsInner // handle the error? try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1); mod.failed_exports.putAssumeCapacityNoClobber(export_indices[0], em); return; }, } - break :blk self.anon_decls.getPtr(value).?; + break :blk self.uavs.getPtr(uav).?; }, }; const sym_index = metadata.symbol_index; @@ -1436,7 +1339,7 @@ pub fn updateExports( if (exp.opts.section.unwrap()) |section_name| { if (!section_name.eqlSlice(".text", &mod.intern_pool)) { try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1); - mod.failed_exports.putAssumeCapacityNoClobber(export_idx, try Module.ErrorMsg.create( + mod.failed_exports.putAssumeCapacityNoClobber(export_idx, try Zcu.ErrorMsg.create( gpa, exp.src, "Unimplemented: ExportOptions.section", @@ -1451,7 +1354,7 @@ pub fn updateExports( .weak => elf.STB_WEAK, .link_once => { try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1); - mod.failed_exports.putAssumeCapacityNoClobber(export_idx, try Module.ErrorMsg.create( + mod.failed_exports.putAssumeCapacityNoClobber(export_idx, try Zcu.ErrorMsg.create( gpa, exp.src, "Unimplemented: GlobalLinkage.LinkOnce", @@ -1487,21 +1390,22 @@ pub fn updateExports( } } -/// Must be called only after a successful call to `updateDecl`. -pub fn updateDeclLineNumber( +/// Must be called only after a successful call to `updateNav`. +pub fn updateNavLineNumber( self: *ZigObject, pt: Zcu.PerThread, - decl_index: InternPool.DeclIndex, + nav_index: InternPool.Nav.Index, ) !void { const tracy = trace(@src()); defer tracy.end(); - const decl = pt.zcu.declPtr(decl_index); + const ip = &pt.zcu.intern_pool; + const nav = ip.getNav(nav_index); - log.debug("updateDeclLineNumber {}({d})", .{ decl.fqn.fmt(&pt.zcu.intern_pool), decl_index }); + log.debug("updateNavLineNumber {}({d})", .{ nav.fqn.fmt(ip), nav_index }); if (self.dwarf) |*dw| { - try dw.updateDeclLineNumber(pt.zcu, decl_index); + try dw.updateNavLineNumber(pt.zcu, nav_index); } } @@ -1512,9 +1416,9 @@ pub fn deleteExport( name: InternPool.NullTerminatedString, ) void { const metadata = switch (exported) { - .decl_index => |decl_index| self.decls.getPtr(decl_index) orelse return, - .value => |value| self.anon_decls.getPtr(value) orelse return, - }; + .nav => |nav| self.navs.getPtr(nav), + .uav => |uav| self.uavs.getPtr(uav), + } orelse return; const mod = elf_file.base.comp.module.?; const exp_name = name.toSlice(&mod.intern_pool); const esym_index = metadata.@"export"(self, exp_name) orelse return; @@ -1754,14 +1658,14 @@ const LazySymbolMetadata = struct { rodata_state: State = .unused, }; -const DeclMetadata = struct { +const AvMetadata = struct { symbol_index: Symbol.Index, - /// A list of all exports aliases of this Decl. + /// A list of all exports aliases of this Av. exports: std.ArrayListUnmanaged(Symbol.Index) = .{}, - fn @"export"(m: DeclMetadata, zo: *ZigObject, name: []const u8) ?*u32 { + fn @"export"(m: AvMetadata, zig_object: *ZigObject, name: []const u8) ?*u32 { for (m.exports.items) |*exp| { - const exp_name = zo.getString(zo.symbol(exp.*).name_offset); + const exp_name = zig_object.getString(zig_object.symbol(exp.*).name_offset); if (mem.eql(u8, name, exp_name)) return exp; } return null; @@ -1778,10 +1682,9 @@ const TlsVariable = struct { }; const AtomList = std.ArrayListUnmanaged(Atom.Index); -const UnnamedConstTable = std.AutoHashMapUnmanaged(InternPool.DeclIndex, std.ArrayListUnmanaged(Symbol.Index)); -const DeclTable = std.AutoHashMapUnmanaged(InternPool.DeclIndex, DeclMetadata); -const AnonDeclTable = std.AutoHashMapUnmanaged(InternPool.Index, DeclMetadata); -const LazySymbolTable = std.AutoArrayHashMapUnmanaged(InternPool.OptionalDeclIndex, LazySymbolMetadata); +const NavTable = std.AutoHashMapUnmanaged(InternPool.Nav.Index, AvMetadata); +const UavTable = std.AutoHashMapUnmanaged(InternPool.Index, AvMetadata); +const LazySymbolTable = std.AutoArrayHashMapUnmanaged(InternPool.Index, LazySymbolMetadata); const TlsTable = std.AutoArrayHashMapUnmanaged(Atom.Index, TlsVariable); const assert = std.debug.assert; @@ -1792,8 +1695,8 @@ const link = @import("../../link.zig"); const log = std.log.scoped(.link); const mem = std.mem; const relocation = @import("relocation.zig"); -const trace = @import("../../tracy.zig").trace; const target_util = @import("../../target.zig"); +const trace = @import("../../tracy.zig").trace; const std = @import("std"); const Air = @import("../../Air.zig"); @@ -1806,8 +1709,6 @@ const File = @import("file.zig").File; const InternPool = @import("../../InternPool.zig"); const Liveness = @import("../../Liveness.zig"); const Zcu = @import("../../Zcu.zig"); -/// Deprecated. -const Module = Zcu; const Object = @import("Object.zig"); const Symbol = @import("Symbol.zig"); const StringTable = @import("../StringTable.zig"); diff --git a/src/link/MachO.zig b/src/link/MachO.zig index d57a7ff7c097..7c0b79a0f170 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -2998,21 +2998,17 @@ pub fn updateFunc(self: *MachO, pt: Zcu.PerThread, func_index: InternPool.Index, return self.getZigObject().?.updateFunc(self, pt, func_index, air, liveness); } -pub fn lowerUnnamedConst(self: *MachO, pt: Zcu.PerThread, val: Value, decl_index: InternPool.DeclIndex) !u32 { - return self.getZigObject().?.lowerUnnamedConst(self, pt, val, decl_index); -} - -pub fn updateDecl(self: *MachO, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { +pub fn updateNav(self: *MachO, pt: Zcu.PerThread, nav: InternPool.Nav.Index) !void { if (build_options.skip_non_native and builtin.object_format != .macho) { @panic("Attempted to compile for object format that was disabled by build configuration"); } - if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(pt, decl_index); - return self.getZigObject().?.updateDecl(self, pt, decl_index); + if (self.llvm_object) |llvm_object| return llvm_object.updateNav(pt, nav); + return self.getZigObject().?.updateNav(self, pt, nav); } -pub fn updateDeclLineNumber(self: *MachO, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { +pub fn updateNavLineNumber(self: *MachO, pt: Zcu.PerThread, nav: InternPool.NavIndex) !void { if (self.llvm_object) |_| return; - return self.getZigObject().?.updateDeclLineNumber(pt, decl_index); + return self.getZigObject().?.updateNavLineNumber(pt, nav); } pub fn updateExports( @@ -3037,29 +3033,29 @@ pub fn deleteExport( return self.getZigObject().?.deleteExport(self, exported, name); } -pub fn freeDecl(self: *MachO, decl_index: InternPool.DeclIndex) void { - if (self.llvm_object) |llvm_object| return llvm_object.freeDecl(decl_index); - return self.getZigObject().?.freeDecl(decl_index); +pub fn freeNav(self: *MachO, nav: InternPool.Nav.Index) void { + if (self.llvm_object) |llvm_object| return llvm_object.freeNav(nav); + return self.getZigObject().?.freeNav(nav); } -pub fn getDeclVAddr(self: *MachO, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex, reloc_info: link.File.RelocInfo) !u64 { +pub fn getNavVAddr(self: *MachO, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index, reloc_info: link.File.RelocInfo) !u64 { assert(self.llvm_object == null); - return self.getZigObject().?.getDeclVAddr(self, pt, decl_index, reloc_info); + return self.getZigObject().?.getNavVAddr(self, pt, nav_index, reloc_info); } -pub fn lowerAnonDecl( +pub fn lowerUav( self: *MachO, pt: Zcu.PerThread, - decl_val: InternPool.Index, + uav: InternPool.Index, explicit_alignment: InternPool.Alignment, src_loc: Module.LazySrcLoc, -) !codegen.Result { - return self.getZigObject().?.lowerAnonDecl(self, pt, decl_val, explicit_alignment, src_loc); +) !codegen.GenResult { + return self.getZigObject().?.lowerUav(self, pt, uav, explicit_alignment, src_loc); } -pub fn getAnonDeclVAddr(self: *MachO, decl_val: InternPool.Index, reloc_info: link.File.RelocInfo) !u64 { +pub fn getUavVAddr(self: *MachO, uav: InternPool.Index, reloc_info: link.File.RelocInfo) !u64 { assert(self.llvm_object == null); - return self.getZigObject().?.getAnonDeclVAddr(self, decl_val, reloc_info); + return self.getZigObject().?.getUavVAddr(self, uav, reloc_info); } pub fn getGlobalSymbol(self: *MachO, name: []const u8, lib_name: ?[]const u8) !u32 { @@ -4051,8 +4047,6 @@ const is_hot_update_compatible = switch (builtin.target.os.tag) { const default_entry_symbol_name = "_main"; -pub const base_tag: link.File.Tag = link.File.Tag.macho; - const Section = struct { header: macho.section_64, segment_id: u8, diff --git a/src/link/MachO/Atom.zig b/src/link/MachO/Atom.zig index 8a47a30264b1..d0e0fe377d2d 100644 --- a/src/link/MachO/Atom.zig +++ b/src/link/MachO/Atom.zig @@ -992,6 +992,8 @@ pub fn writeRelocs(self: Atom, macho_file: *MachO, code: []u8, buffer: []macho.r const tracy = trace(@src()); defer tracy.end(); + relocs_log.debug("{x}: {s}", .{ self.getAddress(macho_file), self.getName(macho_file) }); + const cpu_arch = macho_file.getTarget().cpu.arch; const relocs = self.getRelocs(macho_file); @@ -1015,6 +1017,24 @@ pub fn writeRelocs(self: Atom, macho_file: *MachO, code: []u8, buffer: []macho.r addend += target; } + switch (rel.tag) { + .local => relocs_log.debug(" {}: [{x} => {d}({s},{s})] + {x}", .{ + rel.fmtPretty(cpu_arch), + r_address, + r_symbolnum, + macho_file.sections.items(.header)[r_symbolnum - 1].segName(), + macho_file.sections.items(.header)[r_symbolnum - 1].sectName(), + addend, + }), + .@"extern" => relocs_log.debug(" {}: [{x} => {d}({s})] + {x}", .{ + rel.fmtPretty(cpu_arch), + r_address, + r_symbolnum, + rel.getTargetSymbol(self, macho_file).getName(macho_file), + addend, + }), + } + switch (cpu_arch) { .aarch64 => { if (rel.type == .unsigned) switch (rel.meta.length) { diff --git a/src/link/MachO/ZigObject.zig b/src/link/MachO/ZigObject.zig index 896bcf7afc20..7d69e4ad7664 100644 --- a/src/link/MachO/ZigObject.zig +++ b/src/link/MachO/ZigObject.zig @@ -19,32 +19,11 @@ atoms_extra: std.ArrayListUnmanaged(u32) = .{}, /// Table of tracked LazySymbols. lazy_syms: LazySymbolTable = .{}, -/// Table of tracked Decls. -decls: DeclTable = .{}, - -/// Table of unnamed constants associated with a parent `Decl`. -/// We store them here so that we can free the constants whenever the `Decl` -/// needs updating or is freed. -/// -/// For example, -/// -/// ```zig -/// const Foo = struct{ -/// a: u8, -/// }; -/// -/// pub fn main() void { -/// var foo = Foo{ .a = 1 }; -/// _ = foo; -/// } -/// ``` -/// -/// value assigned to label `foo` is an unnamed constant belonging/associated -/// with `Decl` `main`, and lives as long as that `Decl`. -unnamed_consts: UnnamedConstTable = .{}, - -/// Table of tracked AnonDecls. -anon_decls: AnonDeclTable = .{}, +/// Table of tracked Navs. +navs: NavTable = .{}, + +/// Table of tracked Uavs. +uavs: UavTable = .{}, /// TLV initializers indexed by Atom.Index. tlv_initializers: TlvInitializerTable = .{}, @@ -100,31 +79,17 @@ pub fn deinit(self: *ZigObject, allocator: Allocator) void { self.atoms_indexes.deinit(allocator); self.atoms_extra.deinit(allocator); - { - var it = self.decls.iterator(); - while (it.next()) |entry| { - entry.value_ptr.exports.deinit(allocator); - } - self.decls.deinit(allocator); + for (self.navs.values()) |*meta| { + meta.exports.deinit(allocator); } + self.navs.deinit(allocator); self.lazy_syms.deinit(allocator); - { - var it = self.unnamed_consts.valueIterator(); - while (it.next()) |syms| { - syms.deinit(allocator); - } - self.unnamed_consts.deinit(allocator); - } - - { - var it = self.anon_decls.iterator(); - while (it.next()) |entry| { - entry.value_ptr.exports.deinit(allocator); - } - self.anon_decls.deinit(allocator); + for (self.uavs.values()) |*meta| { + meta.exports.deinit(allocator); } + self.uavs.deinit(allocator); for (self.relocs.items) |*list| { list.deinit(allocator); @@ -601,7 +566,7 @@ pub fn getInputSection(self: ZigObject, atom: Atom, macho_file: *MachO) macho.se pub fn flushModule(self: *ZigObject, macho_file: *MachO, tid: Zcu.PerThread.Id) !void { // Handle any lazy symbols that were emitted by incremental compilation. - if (self.lazy_syms.getPtr(.none)) |metadata| { + if (self.lazy_syms.getPtr(.anyerror_type)) |metadata| { const pt: Zcu.PerThread = .{ .zcu = macho_file.base.comp.module.?, .tid = tid }; // Most lazy symbols can be updated on first use, but @@ -609,7 +574,7 @@ pub fn flushModule(self: *ZigObject, macho_file: *MachO, tid: Zcu.PerThread.Id) if (metadata.text_state != .unused) self.updateLazySymbol( macho_file, pt, - link.File.LazySymbol.initDecl(.code, null, pt.zcu), + .{ .kind = .code, .ty = .anyerror_type }, metadata.text_symbol_index, ) catch |err| return switch (err) { error.CodegenFail => error.FlushFailure, @@ -618,7 +583,7 @@ pub fn flushModule(self: *ZigObject, macho_file: *MachO, tid: Zcu.PerThread.Id) if (metadata.const_state != .unused) self.updateLazySymbol( macho_file, pt, - link.File.LazySymbol.initDecl(.const_data, null, pt.zcu), + .{ .kind = .const_data, .ty = .anyerror_type }, metadata.const_symbol_index, ) catch |err| return switch (err) { error.CodegenFail => error.FlushFailure, @@ -691,25 +656,25 @@ pub fn flushModule(self: *ZigObject, macho_file: *MachO, tid: Zcu.PerThread.Id) assert(!self.debug_strtab_dirty); } -pub fn getDeclVAddr( +pub fn getNavVAddr( self: *ZigObject, macho_file: *MachO, pt: Zcu.PerThread, - decl_index: InternPool.DeclIndex, + nav_index: InternPool.Nav.Index, reloc_info: link.File.RelocInfo, ) !u64 { const zcu = pt.zcu; const ip = &zcu.intern_pool; - const decl = zcu.declPtr(decl_index); - log.debug("getDeclVAddr {}({d})", .{ decl.fqn.fmt(ip), decl_index }); - const sym_index = if (decl.isExtern(zcu)) blk: { - const name = decl.name.toSlice(ip); - const lib_name = if (decl.getOwnedExternFunc(zcu)) |ext_fn| - ext_fn.lib_name.toSlice(ip) - else - decl.getOwnedVariable(zcu).?.lib_name.toSlice(ip); - break :blk try self.getGlobalSymbol(macho_file, name, lib_name); - } else try self.getOrCreateMetadataForDecl(macho_file, decl_index); + const nav = ip.getNav(nav_index); + log.debug("getNavVAddr {}({d})", .{ nav.fqn.fmt(ip), nav_index }); + const sym_index = switch (ip.indexToKey(nav.status.resolved.val)) { + .@"extern" => |@"extern"| try self.getGlobalSymbol( + macho_file, + nav.name.toSlice(ip), + @"extern".lib_name.toSlice(ip), + ), + else => try self.getOrCreateMetadataForNav(macho_file, nav_index), + }; const sym = self.symbols.items[sym_index]; const vaddr = sym.getAddress(.{}, macho_file); const parent_atom = self.symbols.items[reloc_info.parent_atom_index].getAtom(macho_file).?; @@ -729,13 +694,13 @@ pub fn getDeclVAddr( return vaddr; } -pub fn getAnonDeclVAddr( +pub fn getUavVAddr( self: *ZigObject, macho_file: *MachO, - decl_val: InternPool.Index, + uav: InternPool.Index, reloc_info: link.File.RelocInfo, ) !u64 { - const sym_index = self.anon_decls.get(decl_val).?.symbol_index; + const sym_index = self.uavs.get(uav).?.symbol_index; const sym = self.symbols.items[sym_index]; const vaddr = sym.getAddress(.{}, macho_file); const parent_atom = self.symbols.items[reloc_info.parent_atom_index].getAtom(macho_file).?; @@ -755,42 +720,43 @@ pub fn getAnonDeclVAddr( return vaddr; } -pub fn lowerAnonDecl( +pub fn lowerUav( self: *ZigObject, macho_file: *MachO, pt: Zcu.PerThread, - decl_val: InternPool.Index, + uav: InternPool.Index, explicit_alignment: Atom.Alignment, - src_loc: Module.LazySrcLoc, -) !codegen.Result { - const gpa = macho_file.base.comp.gpa; - const mod = macho_file.base.comp.module.?; - const ty = Type.fromInterned(mod.intern_pool.typeOf(decl_val)); - const decl_alignment = switch (explicit_alignment) { - .none => ty.abiAlignment(pt), + src_loc: Zcu.LazySrcLoc, +) !codegen.GenResult { + const zcu = pt.zcu; + const gpa = zcu.gpa; + const val = Value.fromInterned(uav); + const uav_alignment = switch (explicit_alignment) { + .none => val.typeOf(zcu).abiAlignment(pt), else => explicit_alignment, }; - if (self.anon_decls.get(decl_val)) |metadata| { - const existing_alignment = self.symbols.items[metadata.symbol_index].getAtom(macho_file).?.alignment; - if (decl_alignment.order(existing_alignment).compare(.lte)) - return .ok; + if (self.uavs.get(uav)) |metadata| { + const sym = self.symbols.items[metadata.symbol_index]; + const existing_alignment = sym.getAtom(macho_file).?.alignment; + if (uav_alignment.order(existing_alignment).compare(.lte)) + return .{ .mcv = .{ .load_symbol = sym.nlist_idx } }; } var name_buf: [32]u8 = undefined; const name = std.fmt.bufPrint(&name_buf, "__anon_{d}", .{ - @intFromEnum(decl_val), + @intFromEnum(uav), }) catch unreachable; const res = self.lowerConst( macho_file, pt, name, - Value.fromInterned(decl_val), - decl_alignment, + val, + uav_alignment, macho_file.zig_const_sect_index.?, src_loc, ) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, - else => |e| return .{ .fail = try Module.ErrorMsg.create( + else => |e| return .{ .fail = try Zcu.ErrorMsg.create( gpa, src_loc, "unable to lower constant value: {s}", @@ -801,20 +767,13 @@ pub fn lowerAnonDecl( .ok => |sym_index| sym_index, .fail => |em| return .{ .fail = em }, }; - try self.anon_decls.put(gpa, decl_val, .{ .symbol_index = sym_index }); - return .ok; -} - -fn freeUnnamedConsts(self: *ZigObject, macho_file: *MachO, decl_index: InternPool.DeclIndex) void { - const gpa = macho_file.base.comp.gpa; - const unnamed_consts = self.unnamed_consts.getPtr(decl_index) orelse return; - for (unnamed_consts.items) |sym_index| { - self.freeDeclMetadata(macho_file, sym_index); - } - unnamed_consts.clearAndFree(gpa); + try self.uavs.put(gpa, uav, .{ .symbol_index = sym_index }); + return .{ .mcv = .{ + .load_symbol = self.symbols.items[sym_index].nlist_idx, + } }; } -fn freeDeclMetadata(self: *ZigObject, macho_file: *MachO, sym_index: Symbol.Index) void { +fn freeNavMetadata(self: *ZigObject, macho_file: *MachO, sym_index: Symbol.Index) void { const sym = self.symbols.items[sym_index]; sym.getAtom(macho_file).?.free(macho_file); log.debug("adding %{d} to local symbols free list", .{sym_index}); @@ -822,18 +781,14 @@ fn freeDeclMetadata(self: *ZigObject, macho_file: *MachO, sym_index: Symbol.Inde // TODO free GOT entry here } -pub fn freeDecl(self: *ZigObject, macho_file: *MachO, decl_index: InternPool.DeclIndex) void { +pub fn freeNav(self: *ZigObject, macho_file: *MachO, nav_index: InternPool.Nav.Index) void { const gpa = macho_file.base.comp.gpa; - const mod = macho_file.base.comp.module.?; - const decl = mod.declPtr(decl_index); + log.debug("freeNav 0x{x}", .{nav_index}); - log.debug("freeDecl {*}", .{decl}); - - if (self.decls.fetchRemove(decl_index)) |const_kv| { + if (self.navs.fetchRemove(nav_index)) |const_kv| { var kv = const_kv; const sym_index = kv.value.symbol_index; - self.freeDeclMetadata(macho_file, sym_index); - self.freeUnnamedConsts(macho_file, decl_index); + self.freeNavMetadata(macho_file, sym_index); kv.value.exports.deinit(gpa); } @@ -851,51 +806,46 @@ pub fn updateFunc( const tracy = trace(@src()); defer tracy.end(); - const mod = pt.zcu; - const gpa = mod.gpa; - const func = mod.funcInfo(func_index); - const decl_index = func.owner_decl; - const decl = mod.declPtr(decl_index); + const zcu = pt.zcu; + const gpa = zcu.gpa; + const func = zcu.funcInfo(func_index); - const sym_index = try self.getOrCreateMetadataForDecl(macho_file, decl_index); - self.freeUnnamedConsts(macho_file, decl_index); + const sym_index = try self.getOrCreateMetadataForNav(macho_file, func.owner_nav); self.symbols.items[sym_index].getAtom(macho_file).?.freeRelocs(macho_file); var code_buffer = std.ArrayList(u8).init(gpa); defer code_buffer.deinit(); - var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(pt, decl_index) else null; - defer if (decl_state) |*ds| ds.deinit(); + var dwarf_state = if (self.dwarf) |*dw| try dw.initNavState(pt, func.owner_nav) else null; + defer if (dwarf_state) |*ds| ds.deinit(); - const dio: codegen.DebugInfoOutput = if (decl_state) |*ds| .{ .dwarf = ds } else .none; const res = try codegen.generateFunction( &macho_file.base, pt, - decl.navSrcLoc(mod), + zcu.navSrcLoc(func.owner_nav), func_index, air, liveness, &code_buffer, - dio, + if (dwarf_state) |*ds| .{ .dwarf = ds } else .none, ); const code = switch (res) { .ok => code_buffer.items, .fail => |em| { - func.setAnalysisState(&mod.intern_pool, .codegen_failure); - try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); + try zcu.failed_codegen.put(gpa, func.owner_nav, em); return; }, }; - const sect_index = try self.getDeclOutputSection(macho_file, decl, code); - try self.updateDeclCode(macho_file, pt, decl_index, sym_index, sect_index, code); + const sect_index = try self.getNavOutputSection(macho_file, zcu, func.owner_nav, code); + try self.updateNavCode(macho_file, pt, func.owner_nav, sym_index, sect_index, code); - if (decl_state) |*ds| { + if (dwarf_state) |*ds| { const sym = self.symbols.items[sym_index]; - try self.dwarf.?.commitDeclState( + try self.dwarf.?.commitNavState( pt, - decl_index, + func.owner_nav, sym.getAddress(.{}, macho_file), sym.getAtom(macho_file).?.size, ds, @@ -905,96 +855,98 @@ pub fn updateFunc( // Exports will be updated by `Zcu.processExports` after the update. } -pub fn updateDecl( +pub fn updateNav( self: *ZigObject, macho_file: *MachO, pt: Zcu.PerThread, - decl_index: InternPool.DeclIndex, -) link.File.UpdateDeclError!void { + nav_index: InternPool.Nav.Index, +) link.File.UpdateNavError!void { const tracy = trace(@src()); defer tracy.end(); - const mod = pt.zcu; - const decl = mod.declPtr(decl_index); - - if (decl.val.getExternFunc(mod)) |_| { - return; - } - - if (decl.isExtern(mod)) { - // Extern variable gets a __got entry only - const variable = decl.getOwnedVariable(mod).?; - const name = decl.name.toSlice(&mod.intern_pool); - const lib_name = variable.lib_name.toSlice(&mod.intern_pool); - const index = try self.getGlobalSymbol(macho_file, name, lib_name); - const sym = &self.symbols.items[index]; - sym.setSectionFlags(.{ .needs_got = true }); - return; - } + const zcu = pt.zcu; + const ip = &zcu.intern_pool; + const nav_val = zcu.navValue(nav_index); + const nav_init = switch (ip.indexToKey(nav_val.toIntern())) { + .variable => |variable| Value.fromInterned(variable.init), + .@"extern" => |@"extern"| { + if (ip.isFunctionType(@"extern".ty)) return; + // Extern variable gets a __got entry only + const name = @"extern".name.toSlice(ip); + const lib_name = @"extern".lib_name.toSlice(ip); + const index = try self.getGlobalSymbol(macho_file, name, lib_name); + const sym = &self.symbols.items[index]; + sym.setSectionFlags(.{ .needs_got = true }); + return; + }, + else => nav_val, + }; - const sym_index = try self.getOrCreateMetadataForDecl(macho_file, decl_index); + const sym_index = try self.getOrCreateMetadataForNav(macho_file, nav_index); self.symbols.items[sym_index].getAtom(macho_file).?.freeRelocs(macho_file); - const gpa = macho_file.base.comp.gpa; - var code_buffer = std.ArrayList(u8).init(gpa); + var code_buffer = std.ArrayList(u8).init(zcu.gpa); defer code_buffer.deinit(); - var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(pt, decl_index) else null; - defer if (decl_state) |*ds| ds.deinit(); + var nav_state: ?Dwarf.NavState = if (self.dwarf) |*dw| try dw.initNavState(pt, nav_index) else null; + defer if (nav_state) |*ns| ns.deinit(); - const decl_val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val; - const dio: codegen.DebugInfoOutput = if (decl_state) |*ds| .{ .dwarf = ds } else .none; - const res = try codegen.generateSymbol(&macho_file.base, pt, decl.navSrcLoc(mod), decl_val, &code_buffer, dio, .{ - .parent_atom_index = sym_index, - }); + const res = try codegen.generateSymbol( + &macho_file.base, + pt, + zcu.navSrcLoc(nav_index), + nav_init, + &code_buffer, + if (nav_state) |*ns| .{ .dwarf = ns } else .none, + .{ .parent_atom_index = sym_index }, + ); const code = switch (res) { .ok => code_buffer.items, .fail => |em| { - decl.analysis = .codegen_failure; - try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); + try zcu.failed_codegen.put(zcu.gpa, nav_index, em); return; }, }; - if (isThreadlocal(macho_file, decl_index)) { - const sect_index = try self.getDeclOutputSection(macho_file, decl, code); - try self.updateTlv(macho_file, pt, decl_index, sym_index, sect_index, code); - } else { - const sect_index = try self.getDeclOutputSection(macho_file, decl, code); - try self.updateDeclCode(macho_file, pt, decl_index, sym_index, sect_index, code); - } + const sect_index = try self.getNavOutputSection(macho_file, zcu, nav_index, code); + if (isThreadlocal(macho_file, nav_index)) + try self.updateTlv(macho_file, pt, nav_index, sym_index, sect_index, code) + else + try self.updateNavCode(macho_file, pt, nav_index, sym_index, sect_index, code); - if (decl_state) |*ds| { + if (nav_state) |*ns| { const sym = self.symbols.items[sym_index]; - try self.dwarf.?.commitDeclState( + try self.dwarf.?.commitNavState( pt, - decl_index, + nav_index, sym.getAddress(.{}, macho_file), sym.getAtom(macho_file).?.size, - ds, + ns, ); } // Exports will be updated by `Zcu.processExports` after the update. } -fn updateDeclCode( +fn updateNavCode( self: *ZigObject, macho_file: *MachO, pt: Zcu.PerThread, - decl_index: InternPool.DeclIndex, + nav_index: InternPool.Nav.Index, sym_index: Symbol.Index, sect_index: u8, code: []const u8, ) !void { - const gpa = macho_file.base.comp.gpa; - const mod = pt.zcu; - const ip = &mod.intern_pool; - const decl = mod.declPtr(decl_index); + const zcu = pt.zcu; + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; + const nav = ip.getNav(nav_index); - log.debug("updateDeclCode {}{*}", .{ decl.fqn.fmt(ip), decl }); + log.debug("updateNavCode {} 0x{x}", .{ nav.fqn.fmt(ip), nav_index }); - const required_alignment = decl.getAlignment(pt); + const required_alignment = pt.navAlignment(nav_index).max( + target_util.minFunctionAlignment(zcu.navFileScope(nav_index).mod.resolved_target.result), + ); const sect = &macho_file.sections.items(.header)[sect_index]; const sym = &self.symbols.items[sym_index]; @@ -1004,7 +956,7 @@ fn updateDeclCode( sym.out_n_sect = sect_index; atom.out_n_sect = sect_index; - const sym_name = try std.fmt.allocPrintZ(gpa, "_{s}", .{decl.fqn.toSlice(ip)}); + const sym_name = try std.fmt.allocPrintZ(gpa, "_{s}", .{nav.fqn.toSlice(ip)}); defer gpa.free(sym_name); sym.name = try self.addString(gpa, sym_name); atom.setAlive(true); @@ -1025,7 +977,7 @@ fn updateDeclCode( if (need_realloc) { try atom.grow(macho_file); - log.debug("growing {} from 0x{x} to 0x{x}", .{ decl.fqn.fmt(ip), old_vaddr, atom.value }); + log.debug("growing {} from 0x{x} to 0x{x}", .{ nav.fqn.fmt(ip), old_vaddr, atom.value }); if (old_vaddr != atom.value) { sym.value = 0; nlist.n_value = 0; @@ -1045,7 +997,7 @@ fn updateDeclCode( } } else { try atom.allocate(macho_file); - errdefer self.freeDeclMetadata(macho_file, sym_index); + errdefer self.freeNavMetadata(macho_file, sym_index); sym.value = 0; sym.setSectionFlags(.{ .needs_zig_got = true }); @@ -1070,27 +1022,27 @@ fn updateTlv( self: *ZigObject, macho_file: *MachO, pt: Zcu.PerThread, - decl_index: InternPool.DeclIndex, + nav_index: InternPool.Nav.Index, sym_index: Symbol.Index, sect_index: u8, code: []const u8, ) !void { const ip = &pt.zcu.intern_pool; - const decl = pt.zcu.declPtr(decl_index); + const nav = ip.getNav(nav_index); - log.debug("updateTlv {} ({*})", .{ decl.fqn.fmt(&pt.zcu.intern_pool), decl }); + log.debug("updateTlv {} (0x{x})", .{ nav.fqn.fmt(ip), nav_index }); // 1. Lower TLV initializer const init_sym_index = try self.createTlvInitializer( macho_file, - decl.fqn.toSlice(ip), - decl.getAlignment(pt), + nav.fqn.toSlice(ip), + pt.navAlignment(nav_index), sect_index, code, ); // 2. Create TLV descriptor - try self.createTlvDescriptor(macho_file, sym_index, init_sym_index, decl.fqn.toSlice(ip)); + try self.createTlvDescriptor(macho_file, sym_index, init_sym_index, nav.fqn.toSlice(ip)); } fn createTlvInitializer( @@ -1197,102 +1149,52 @@ fn createTlvDescriptor( }); } -fn getDeclOutputSection( +fn getNavOutputSection( self: *ZigObject, macho_file: *MachO, - decl: *const Module.Decl, + zcu: *Zcu, + nav_index: InternPool.Nav.Index, code: []const u8, ) error{OutOfMemory}!u8 { _ = self; - const mod = macho_file.base.comp.module.?; + const ip = &zcu.intern_pool; const any_non_single_threaded = macho_file.base.comp.config.any_non_single_threaded; - const sect_id: u8 = switch (decl.typeOf(mod).zigTypeTag(mod)) { - .Fn => macho_file.zig_text_sect_index.?, - else => blk: { - if (decl.getOwnedVariable(mod)) |variable| { - if (variable.is_threadlocal and any_non_single_threaded) { - const is_all_zeroes = for (code) |byte| { - if (byte != 0) break false; - } else true; - if (is_all_zeroes) break :blk macho_file.getSectionByName("__DATA", "__thread_bss") orelse try macho_file.addSection( - "__DATA", - "__thread_bss", - .{ .flags = macho.S_THREAD_LOCAL_ZEROFILL }, - ); - break :blk macho_file.getSectionByName("__DATA", "__thread_data") orelse try macho_file.addSection( - "__DATA", - "__thread_data", - .{ .flags = macho.S_THREAD_LOCAL_REGULAR }, - ); - } - - if (variable.is_const) break :blk macho_file.zig_const_sect_index.?; - if (Value.fromInterned(variable.init).isUndefDeep(mod)) { - // TODO: get the optimize_mode from the Module that owns the decl instead - // of using the root module here. - break :blk switch (macho_file.base.comp.root_mod.optimize_mode) { - .Debug, .ReleaseSafe => macho_file.zig_data_sect_index.?, - .ReleaseFast, .ReleaseSmall => macho_file.zig_bss_sect_index.?, - }; - } - - // TODO I blatantly copied the logic from the Wasm linker, but is there a less - // intrusive check for all zeroes than this? - const is_all_zeroes = for (code) |byte| { - if (byte != 0) break false; - } else true; - if (is_all_zeroes) break :blk macho_file.zig_bss_sect_index.?; - break :blk macho_file.zig_data_sect_index.?; - } - break :blk macho_file.zig_const_sect_index.?; - }, + const nav_val = zcu.navValue(nav_index); + if (ip.isFunctionType(nav_val.typeOf(zcu).toIntern())) return macho_file.zig_text_sect_index.?; + const is_const, const is_threadlocal, const nav_init = switch (ip.indexToKey(nav_val.toIntern())) { + .variable => |variable| .{ false, variable.is_threadlocal, variable.init }, + .@"extern" => |@"extern"| .{ @"extern".is_const, @"extern".is_threadlocal, .none }, + else => .{ true, false, nav_val.toIntern() }, }; - return sect_id; -} - -pub fn lowerUnnamedConst( - self: *ZigObject, - macho_file: *MachO, - pt: Zcu.PerThread, - val: Value, - decl_index: InternPool.DeclIndex, -) !u32 { - const mod = pt.zcu; - const gpa = mod.gpa; - const gop = try self.unnamed_consts.getOrPut(gpa, decl_index); - if (!gop.found_existing) { - gop.value_ptr.* = .{}; + if (any_non_single_threaded and is_threadlocal) { + for (code) |byte| { + if (byte != 0) break; + } else return macho_file.getSectionByName("__DATA", "__thread_bss") orelse try macho_file.addSection( + "__DATA", + "__thread_bss", + .{ .flags = macho.S_THREAD_LOCAL_ZEROFILL }, + ); + return macho_file.getSectionByName("__DATA", "__thread_data") orelse try macho_file.addSection( + "__DATA", + "__thread_data", + .{ .flags = macho.S_THREAD_LOCAL_REGULAR }, + ); } - const unnamed_consts = gop.value_ptr; - const decl = mod.declPtr(decl_index); - const index = unnamed_consts.items.len; - const name = try std.fmt.allocPrint(gpa, "__unnamed_{}_{d}", .{ decl.fqn.fmt(&mod.intern_pool), index }); - defer gpa.free(name); - const sym_index = switch (try self.lowerConst( - macho_file, - pt, - name, - val, - val.typeOf(mod).abiAlignment(pt), - macho_file.zig_const_sect_index.?, - decl.navSrcLoc(mod), - )) { - .ok => |sym_index| sym_index, - .fail => |em| { - decl.analysis = .codegen_failure; - try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); - log.err("{s}", .{em.msg}); - return error.CodegenFail; - }, - }; - const sym = self.symbols.items[sym_index]; - try unnamed_consts.append(gpa, sym.atom_ref.index); - return sym_index; + if (is_const) return macho_file.zig_const_sect_index.?; + if (nav_init != .none and Value.fromInterned(nav_init).isUndefDeep(zcu)) + return switch (zcu.navFileScope(nav_index).mod.optimize_mode) { + .Debug, .ReleaseSafe => macho_file.zig_data_sect_index.?, + .ReleaseFast, .ReleaseSmall => macho_file.zig_bss_sect_index.?, + }; + for (code) |byte| { + if (byte != 0) break; + } else return macho_file.zig_bss_sect_index.?; + return macho_file.zig_data_sect_index.?; } const LowerConstResult = union(enum) { ok: Symbol.Index, - fail: *Module.ErrorMsg, + fail: *Zcu.ErrorMsg, }; fn lowerConst( @@ -1303,7 +1205,7 @@ fn lowerConst( val: Value, required_alignment: Atom.Alignment, output_section_index: u8, - src_loc: Module.LazySrcLoc, + src_loc: Zcu.LazySrcLoc, ) !LowerConstResult { const gpa = macho_file.base.comp.gpa; @@ -1338,7 +1240,7 @@ fn lowerConst( try atom.allocate(macho_file); // TODO rename and re-audit this method - errdefer self.freeDeclMetadata(macho_file, sym_index); + errdefer self.freeNavMetadata(macho_file, sym_index); const sect = macho_file.sections.items(.header)[output_section_index]; const file_offset = sect.offset + atom.value; @@ -1351,7 +1253,7 @@ pub fn updateExports( self: *ZigObject, macho_file: *MachO, pt: Zcu.PerThread, - exported: Module.Exported, + exported: Zcu.Exported, export_indices: []const u32, ) link.File.UpdateExportsError!void { const tracy = trace(@src()); @@ -1360,24 +1262,24 @@ pub fn updateExports( const mod = pt.zcu; const gpa = macho_file.base.comp.gpa; const metadata = switch (exported) { - .decl_index => |decl_index| blk: { - _ = try self.getOrCreateMetadataForDecl(macho_file, decl_index); - break :blk self.decls.getPtr(decl_index).?; + .nav => |nav| blk: { + _ = try self.getOrCreateMetadataForNav(macho_file, nav); + break :blk self.navs.getPtr(nav).?; }, - .value => |value| self.anon_decls.getPtr(value) orelse blk: { + .uav => |uav| self.uavs.getPtr(uav) orelse blk: { const first_exp = mod.all_exports.items[export_indices[0]]; - const res = try self.lowerAnonDecl(macho_file, pt, value, .none, first_exp.src); + const res = try self.lowerUav(macho_file, pt, uav, .none, first_exp.src); switch (res) { - .ok => {}, + .mcv => {}, .fail => |em| { - // TODO maybe it's enough to return an error here and let Module.processExportsInner + // TODO maybe it's enough to return an error here and let Zcu.processExportsInner // handle the error? try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1); mod.failed_exports.putAssumeCapacityNoClobber(export_indices[0], em); return; }, } - break :blk self.anon_decls.getPtr(value).?; + break :blk self.uavs.getPtr(uav).?; }, }; const sym_index = metadata.symbol_index; @@ -1389,7 +1291,7 @@ pub fn updateExports( if (exp.opts.section.unwrap()) |section_name| { if (!section_name.eqlSlice("__text", &mod.intern_pool)) { try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1); - mod.failed_exports.putAssumeCapacityNoClobber(export_idx, try Module.ErrorMsg.create( + mod.failed_exports.putAssumeCapacityNoClobber(export_idx, try Zcu.ErrorMsg.create( gpa, exp.src, "Unimplemented: ExportOptions.section", @@ -1399,7 +1301,7 @@ pub fn updateExports( } } if (exp.opts.linkage == .link_once) { - try mod.failed_exports.putNoClobber(mod.gpa, export_idx, try Module.ErrorMsg.create( + try mod.failed_exports.putNoClobber(mod.gpa, export_idx, try Zcu.ErrorMsg.create( gpa, exp.src, "Unimplemented: GlobalLinkage.link_once", @@ -1454,8 +1356,8 @@ fn updateLazySymbol( lazy_sym: link.File.LazySymbol, symbol_index: Symbol.Index, ) !void { - const gpa = macho_file.base.comp.gpa; - const mod = macho_file.base.comp.module.?; + const zcu = pt.zcu; + const gpa = zcu.gpa; var required_alignment: Atom.Alignment = .none; var code_buffer = std.ArrayList(u8).init(gpa); @@ -1464,13 +1366,13 @@ fn updateLazySymbol( const name_str = blk: { const name = try std.fmt.allocPrint(gpa, "__lazy_{s}_{}", .{ @tagName(lazy_sym.kind), - lazy_sym.ty.fmt(pt), + Type.fromInterned(lazy_sym.ty).fmt(pt), }); defer gpa.free(name); break :blk try self.addString(gpa, name); }; - const src = lazy_sym.ty.srcLocOrNull(mod) orelse Module.LazySrcLoc.unneeded; + const src = Type.fromInterned(lazy_sym.ty).srcLocOrNull(zcu) orelse Zcu.LazySrcLoc.unneeded; const res = try codegen.generateLazySymbol( &macho_file.base, pt, @@ -1511,7 +1413,7 @@ fn updateLazySymbol( atom.out_n_sect = output_section_index; try atom.allocate(macho_file); - errdefer self.freeDeclMetadata(macho_file, symbol_index); + errdefer self.freeNavMetadata(macho_file, symbol_index); sym.value = 0; sym.setSectionFlags(.{ .needs_zig_got = true }); @@ -1527,10 +1429,14 @@ fn updateLazySymbol( try macho_file.base.file.?.pwriteAll(code, file_offset); } -/// Must be called only after a successful call to `updateDecl`. -pub fn updateDeclLineNumber(self: *ZigObject, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { +/// Must be called only after a successful call to `updateNav`. +pub fn updateNavLineNumber( + self: *ZigObject, + pt: Zcu.PerThread, + nav_index: InternPool.Nav.Index, +) !void { if (self.dwarf) |*dw| { - try dw.updateDeclLineNumber(pt.zcu, decl_index); + try dw.updateNavLineNumber(pt.zcu, nav_index); } } @@ -1543,9 +1449,9 @@ pub fn deleteExport( const mod = macho_file.base.comp.module.?; const metadata = switch (exported) { - .decl_index => |decl_index| self.decls.getPtr(decl_index) orelse return, - .value => |value| self.anon_decls.getPtr(value) orelse return, - }; + .nav => |nav| self.navs.getPtr(nav), + .uav => |uav| self.uavs.getPtr(uav), + } orelse return; const nlist_index = metadata.@"export"(self, name.toSlice(&mod.intern_pool)) orelse return; log.debug("deleting export '{}'", .{name.fmt(&mod.intern_pool)}); @@ -1577,17 +1483,17 @@ pub fn getGlobalSymbol(self: *ZigObject, macho_file: *MachO, name: []const u8, l return lookup_gop.value_ptr.*; } -pub fn getOrCreateMetadataForDecl( +pub fn getOrCreateMetadataForNav( self: *ZigObject, macho_file: *MachO, - decl_index: InternPool.DeclIndex, + nav_index: InternPool.Nav.Index, ) !Symbol.Index { const gpa = macho_file.base.comp.gpa; - const gop = try self.decls.getOrPut(gpa, decl_index); + const gop = try self.navs.getOrPut(gpa, nav_index); if (!gop.found_existing) { const sym_index = try self.newSymbolWithAtom(gpa, .{}, macho_file); const sym = &self.symbols.items[sym_index]; - if (isThreadlocal(macho_file, decl_index)) { + if (isThreadlocal(macho_file, nav_index)) { sym.flags.tlv = true; } else { sym.setSectionFlags(.{ .needs_zig_got = true }); @@ -1603,47 +1509,39 @@ pub fn getOrCreateMetadataForLazySymbol( pt: Zcu.PerThread, lazy_sym: link.File.LazySymbol, ) !Symbol.Index { - const mod = pt.zcu; - const gpa = mod.gpa; - const gop = try self.lazy_syms.getOrPut(gpa, lazy_sym.getDecl(mod)); + const gop = try self.lazy_syms.getOrPut(pt.zcu.gpa, lazy_sym.ty); errdefer _ = if (!gop.found_existing) self.lazy_syms.pop(); if (!gop.found_existing) gop.value_ptr.* = .{}; - const metadata: struct { - symbol_index: *Symbol.Index, - state: *LazySymbolMetadata.State, - } = switch (lazy_sym.kind) { - .code => .{ - .symbol_index = &gop.value_ptr.text_symbol_index, - .state = &gop.value_ptr.text_state, - }, - .const_data => .{ - .symbol_index = &gop.value_ptr.const_symbol_index, - .state = &gop.value_ptr.const_state, - }, + const symbol_index_ptr, const state_ptr = switch (lazy_sym.kind) { + .code => .{ &gop.value_ptr.text_symbol_index, &gop.value_ptr.text_state }, + .const_data => .{ &gop.value_ptr.const_symbol_index, &gop.value_ptr.const_state }, }; - switch (metadata.state.*) { + switch (state_ptr.*) { .unused => { - const symbol_index = try self.newSymbolWithAtom(gpa, .{}, macho_file); + const symbol_index = try self.newSymbolWithAtom(pt.zcu.gpa, .{}, macho_file); const sym = &self.symbols.items[symbol_index]; sym.setSectionFlags(.{ .needs_zig_got = true }); - metadata.symbol_index.* = symbol_index; + symbol_index_ptr.* = symbol_index; }, - .pending_flush => return metadata.symbol_index.*, + .pending_flush => return symbol_index_ptr.*, .flushed => {}, } - metadata.state.* = .pending_flush; - const symbol_index = metadata.symbol_index.*; + state_ptr.* = .pending_flush; + const symbol_index = symbol_index_ptr.*; // anyerror needs to be deferred until flushModule - if (lazy_sym.getDecl(mod) != .none) try self.updateLazySymbol(macho_file, pt, lazy_sym, symbol_index); + if (lazy_sym.ty != .anyerror_type) try self.updateLazySymbol(macho_file, pt, lazy_sym, symbol_index); return symbol_index; } -fn isThreadlocal(macho_file: *MachO, decl_index: InternPool.DeclIndex) bool { - const any_non_single_threaded = macho_file.base.comp.config.any_non_single_threaded; - const zcu = macho_file.base.comp.module.?; - const decl = zcu.declPtr(decl_index); - const variable = decl.getOwnedVariable(zcu) orelse return false; - return variable.is_threadlocal and any_non_single_threaded; +fn isThreadlocal(macho_file: *MachO, nav_index: InternPool.Nav.Index) bool { + if (!macho_file.base.comp.config.any_non_single_threaded) + return false; + const ip = &macho_file.base.comp.module.?.intern_pool; + return switch (ip.indexToKey(ip.getNav(nav_index).status.resolved.val)) { + .variable => |variable| variable.is_threadlocal, + .@"extern" => |@"extern"| @"extern".is_threadlocal, + else => false, + }; } fn addAtom(self: *ZigObject, allocator: Allocator) !Atom.Index { @@ -1848,12 +1746,12 @@ fn formatAtoms( } } -const DeclMetadata = struct { +const AvMetadata = struct { symbol_index: Symbol.Index, - /// A list of all exports aliases of this Decl. + /// A list of all exports aliases of this Av. exports: std.ArrayListUnmanaged(Symbol.Index) = .{}, - fn @"export"(m: DeclMetadata, zig_object: *ZigObject, name: []const u8) ?*u32 { + fn @"export"(m: AvMetadata, zig_object: *ZigObject, name: []const u8) ?*u32 { for (m.exports.items) |*exp| { const nlist = zig_object.symtab.items(.nlist)[exp.*]; const exp_name = zig_object.strtab.getAssumeExists(nlist.n_strx); @@ -1880,10 +1778,9 @@ const TlvInitializer = struct { } }; -const DeclTable = std.AutoHashMapUnmanaged(InternPool.DeclIndex, DeclMetadata); -const UnnamedConstTable = std.AutoHashMapUnmanaged(InternPool.DeclIndex, std.ArrayListUnmanaged(Symbol.Index)); -const AnonDeclTable = std.AutoHashMapUnmanaged(InternPool.Index, DeclMetadata); -const LazySymbolTable = std.AutoArrayHashMapUnmanaged(InternPool.OptionalDeclIndex, LazySymbolMetadata); +const NavTable = std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, AvMetadata); +const UavTable = std.AutoArrayHashMapUnmanaged(InternPool.Index, AvMetadata); +const LazySymbolTable = std.AutoArrayHashMapUnmanaged(InternPool.Index, LazySymbolMetadata); const RelocationTable = std.ArrayListUnmanaged(std.ArrayListUnmanaged(Relocation)); const TlvInitializerTable = std.AutoArrayHashMapUnmanaged(Atom.Index, TlvInitializer); @@ -1894,6 +1791,7 @@ const link = @import("../../link.zig"); const log = std.log.scoped(.link); const macho = std.macho; const mem = std.mem; +const target_util = @import("../../target.zig"); const trace = @import("../../tracy.zig").trace; const std = @import("std"); @@ -1908,8 +1806,6 @@ const Liveness = @import("../../Liveness.zig"); const MachO = @import("../MachO.zig"); const Nlist = Object.Nlist; const Zcu = @import("../../Zcu.zig"); -/// Deprecated. -const Module = Zcu; const Object = @import("Object.zig"); const Relocation = @import("Relocation.zig"); const Symbol = @import("Symbol.zig"); diff --git a/src/link/NvPtx.zig b/src/link/NvPtx.zig index 8caaed05da92..cb95779d8e96 100644 --- a/src/link/NvPtx.zig +++ b/src/link/NvPtx.zig @@ -86,8 +86,8 @@ pub fn updateFunc(self: *NvPtx, pt: Zcu.PerThread, func_index: InternPool.Index, try self.llvm_object.updateFunc(pt, func_index, air, liveness); } -pub fn updateDecl(self: *NvPtx, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { - return self.llvm_object.updateDecl(pt, decl_index); +pub fn updateNav(self: *NvPtx, pt: Zcu.PerThread, nav: InternPool.Nav.Index) !void { + return self.llvm_object.updateNav(pt, nav); } pub fn updateExports( diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index e954bf700437..afd4c57ff1ab 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -24,8 +24,6 @@ const Allocator = std.mem.Allocator; const log = std.log.scoped(.link); const assert = std.debug.assert; -pub const base_tag = .plan9; - base: link.File, sixtyfour_bit: bool, bases: Bases, @@ -53,40 +51,19 @@ path_arena: std.heap.ArenaAllocator, /// The debugger looks for the first file (aout.Sym.Type.z) preceeding the text symbol /// of the function to know what file it came from. /// If we group the decls by file, it makes it really easy to do this (put the symbol in the correct place) -fn_decl_table: std.AutoArrayHashMapUnmanaged( - *Zcu.File, - struct { sym_index: u32, functions: std.AutoArrayHashMapUnmanaged(InternPool.DeclIndex, FnDeclOutput) = .{} }, +fn_nav_table: std.AutoArrayHashMapUnmanaged( + Zcu.File.Index, + struct { sym_index: u32, functions: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, FnNavOutput) = .{} }, ) = .{}, /// the code is modified when relocated, so that is why it is mutable -data_decl_table: std.AutoArrayHashMapUnmanaged(InternPool.DeclIndex, []u8) = .{}, +data_nav_table: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, []u8) = .{}, /// When `updateExports` is called, we store the export indices here, to be used /// during flush. -decl_exports: std.AutoArrayHashMapUnmanaged(InternPool.DeclIndex, []u32) = .{}, - -/// Table of unnamed constants associated with a parent `Decl`. -/// We store them here so that we can free the constants whenever the `Decl` -/// needs updating or is freed. -/// -/// For example, -/// -/// ```zig -/// const Foo = struct{ -/// a: u8, -/// }; -/// -/// pub fn main() void { -/// var foo = Foo{ .a = 1 }; -/// _ = foo; -/// } -/// ``` -/// -/// value assigned to label `foo` is an unnamed constant belonging/associated -/// with `Decl` `main`, and lives as long as that `Decl`. -unnamed_const_atoms: UnnamedConstTable = .{}, +nav_exports: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, []u32) = .{}, lazy_syms: LazySymbolTable = .{}, -anon_decls: std.AutoHashMapUnmanaged(InternPool.Index, Atom.Index) = .{}, +uavs: std.AutoHashMapUnmanaged(InternPool.Index, Atom.Index) = .{}, relocs: std.AutoHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(Reloc)) = .{}, hdr: aout.ExecHdr = undefined, @@ -104,7 +81,7 @@ got_index_free_list: std.ArrayListUnmanaged(usize) = .{}, syms_index_free_list: std.ArrayListUnmanaged(usize) = .{}, atoms: std.ArrayListUnmanaged(Atom) = .{}, -decls: std.AutoHashMapUnmanaged(InternPool.DeclIndex, DeclMetadata) = .{}, +navs: std.AutoHashMapUnmanaged(InternPool.Nav.Index, NavMetadata) = .{}, /// Indices of the three "special" symbols into atoms etext_edata_end_atom_indices: [3]?Atom.Index = .{ null, null, null }, @@ -131,9 +108,7 @@ const Bases = struct { data: u64, }; -const UnnamedConstTable = std.AutoHashMapUnmanaged(InternPool.DeclIndex, std.ArrayListUnmanaged(Atom.Index)); - -const LazySymbolTable = std.AutoArrayHashMapUnmanaged(InternPool.OptionalDeclIndex, LazySymbolMetadata); +const LazySymbolTable = std.AutoArrayHashMapUnmanaged(InternPool.Index, LazySymbolMetadata); const LazySymbolMetadata = struct { const State = enum { unused, pending_flush, flushed }; @@ -161,7 +136,7 @@ pub const Atom = struct { /// offset into got got_index: ?usize, /// We include the code here to be use in relocs - /// In the case of unnamed_const_atoms and lazy_syms, this atom owns the code. + /// In the case of lazy_syms, this atom owns the code. /// But, in the case of function and data decls, they own the code and this field /// is just a pointer for convience. code: CodePtr, @@ -170,22 +145,23 @@ pub const Atom = struct { code_ptr: ?[*]u8, other: union { code_len: usize, - decl_index: InternPool.DeclIndex, + nav_index: InternPool.Nav.Index, }, fn fromSlice(slice: []u8) CodePtr { return .{ .code_ptr = slice.ptr, .other = .{ .code_len = slice.len } }; } fn getCode(self: CodePtr, plan9: *const Plan9) []u8 { - const mod = plan9.base.comp.module.?; + const zcu = plan9.base.comp.module.?; + const ip = &zcu.intern_pool; return if (self.code_ptr) |p| p[0..self.other.code_len] else blk: { - const decl_index = self.other.decl_index; - const decl = mod.declPtr(decl_index); - if (decl.typeOf(mod).zigTypeTag(mod) == .Fn) { - const table = plan9.fn_decl_table.get(decl.getFileScope(mod)).?.functions; - const output = table.get(decl_index).?; + const nav_index = self.other.nav_index; + const nav = ip.getNav(nav_index); + if (ip.isFunctionType(nav.typeOf(ip))) { + const table = plan9.fn_nav_table.get(zcu.navFileScopeIndex(nav_index)).?.functions; + const output = table.get(nav_index).?; break :blk output.code; } else { - break :blk plan9.data_decl_table.get(decl_index).?; + break :blk plan9.data_nav_table.get(nav_index).?; } }; } @@ -241,11 +217,11 @@ pub const DebugInfoOutput = struct { pc_quanta: u8, }; -const DeclMetadata = struct { +const NavMetadata = struct { index: Atom.Index, exports: std.ArrayListUnmanaged(usize) = .{}, - fn getExport(m: DeclMetadata, p9: *const Plan9, name: []const u8) ?usize { + fn getExport(m: NavMetadata, p9: *const Plan9, name: []const u8) ?usize { for (m.exports.items) |exp| { const sym = p9.syms.items[exp]; if (mem.eql(u8, name, sym.name)) return exp; @@ -254,7 +230,7 @@ const DeclMetadata = struct { } }; -const FnDeclOutput = struct { +const FnNavOutput = struct { /// this code is modified when relocated so it is mutable code: []u8, /// this might have to be modified in the linker, so thats why its mutable @@ -338,18 +314,18 @@ pub fn createEmpty( return self; } -fn putFn(self: *Plan9, decl_index: InternPool.DeclIndex, out: FnDeclOutput) !void { +fn putFn(self: *Plan9, nav_index: InternPool.Nav.Index, out: FnNavOutput) !void { const gpa = self.base.comp.gpa; const mod = self.base.comp.module.?; - const decl = mod.declPtr(decl_index); - const fn_map_res = try self.fn_decl_table.getOrPut(gpa, decl.getFileScope(mod)); + const file_scope = mod.navFileScopeIndex(nav_index); + const fn_map_res = try self.fn_nav_table.getOrPut(gpa, file_scope); if (fn_map_res.found_existing) { - if (try fn_map_res.value_ptr.functions.fetchPut(gpa, decl_index, out)) |old_entry| { + if (try fn_map_res.value_ptr.functions.fetchPut(gpa, nav_index, out)) |old_entry| { gpa.free(old_entry.value.code); gpa.free(old_entry.value.lineinfo); } } else { - const file = decl.getFileScope(mod); + const file = mod.fileByIndex(file_scope); const arena = self.path_arena.allocator(); // each file gets a symbol fn_map_res.value_ptr.* = .{ @@ -359,7 +335,7 @@ fn putFn(self: *Plan9, decl_index: InternPool.DeclIndex, out: FnDeclOutput) !voi break :blk @as(u32, @intCast(self.syms.items.len - 1)); }, }; - try fn_map_res.value_ptr.functions.put(gpa, decl_index, out); + try fn_map_res.value_ptr.functions.put(gpa, nav_index, out); var a = std.ArrayList(u8).init(arena); errdefer a.deinit(); @@ -418,11 +394,8 @@ pub fn updateFunc(self: *Plan9, pt: Zcu.PerThread, func_index: InternPool.Index, const gpa = mod.gpa; const target = self.base.comp.root_mod.resolved_target.result; const func = mod.funcInfo(func_index); - const decl_index = func.owner_decl; - const decl = mod.declPtr(decl_index); - self.freeUnnamedConsts(decl_index); - const atom_idx = try self.seeDecl(decl_index); + const atom_idx = try self.seeNav(pt, func.owner_nav); var code_buffer = std.ArrayList(u8).init(gpa); defer code_buffer.deinit(); @@ -439,7 +412,7 @@ pub fn updateFunc(self: *Plan9, pt: Zcu.PerThread, func_index: InternPool.Index, const res = try codegen.generateFunction( &self.base, pt, - decl.navSrcLoc(mod), + mod.navSrcLoc(func.owner_nav), func_index, air, liveness, @@ -449,128 +422,72 @@ pub fn updateFunc(self: *Plan9, pt: Zcu.PerThread, func_index: InternPool.Index, const code = switch (res) { .ok => try code_buffer.toOwnedSlice(), .fail => |em| { - func.setAnalysisState(&mod.intern_pool, .codegen_failure); - try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); + try mod.failed_codegen.put(gpa, func.owner_nav, em); return; }, }; self.getAtomPtr(atom_idx).code = .{ .code_ptr = null, - .other = .{ .decl_index = decl_index }, + .other = .{ .nav_index = func.owner_nav }, }; - const out: FnDeclOutput = .{ + const out: FnNavOutput = .{ .code = code, .lineinfo = try dbg_info_output.dbg_line.toOwnedSlice(), .start_line = dbg_info_output.start_line.?, .end_line = dbg_info_output.end_line, }; - try self.putFn(decl_index, out); - return self.updateFinish(decl_index); + try self.putFn(func.owner_nav, out); + return self.updateFinish(pt, func.owner_nav); } -pub fn lowerUnnamedConst(self: *Plan9, pt: Zcu.PerThread, val: Value, decl_index: InternPool.DeclIndex) !u32 { - const mod = pt.zcu; - const gpa = mod.gpa; - _ = try self.seeDecl(decl_index); - var code_buffer = std.ArrayList(u8).init(gpa); - defer code_buffer.deinit(); - - const decl = mod.declPtr(decl_index); - - const gop = try self.unnamed_const_atoms.getOrPut(gpa, decl_index); - if (!gop.found_existing) { - gop.value_ptr.* = .{}; - } - const unnamed_consts = gop.value_ptr; - - const index = unnamed_consts.items.len; - // name is freed when the unnamed const is freed - const name = try std.fmt.allocPrint(gpa, "__unnamed_{}_{d}", .{ decl.fqn.fmt(&mod.intern_pool), index }); - - const sym_index = try self.allocateSymbolIndex(); - const new_atom_idx = try self.createAtom(); - const info: Atom = .{ - .type = .d, - .offset = null, - .sym_index = sym_index, - .got_index = self.allocateGotIndex(), - .code = undefined, // filled in later - }; - const sym: aout.Sym = .{ - .value = undefined, - .type = info.type, - .name = name, - }; - self.syms.items[info.sym_index.?] = sym; - - const res = try codegen.generateSymbol(&self.base, pt, decl.navSrcLoc(mod), val, &code_buffer, .{ - .none = {}, - }, .{ - .parent_atom_index = new_atom_idx, - }); - const code = switch (res) { - .ok => code_buffer.items, - .fail => |em| { - decl.analysis = .codegen_failure; - try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); - log.err("{s}", .{em.msg}); - return error.CodegenFail; +pub fn updateNav(self: *Plan9, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !void { + const zcu = pt.zcu; + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; + const nav = ip.getNav(nav_index); + const nav_val = zcu.navValue(nav_index); + const nav_init = switch (ip.indexToKey(nav_val.toIntern())) { + .variable => |variable| Value.fromInterned(variable.init), + .@"extern" => { + log.debug("found extern decl: {}", .{nav.name.fmt(ip)}); + return; }, + else => nav_val, }; - // duped_code is freed when the unnamed const is freed - const duped_code = try gpa.dupe(u8, code); - errdefer gpa.free(duped_code); - const new_atom = self.getAtomPtr(new_atom_idx); - new_atom.* = info; - new_atom.code = .{ .code_ptr = duped_code.ptr, .other = .{ .code_len = duped_code.len } }; - try unnamed_consts.append(gpa, new_atom_idx); - // we return the new_atom_idx to codegen - return new_atom_idx; -} - -pub fn updateDecl(self: *Plan9, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { - const gpa = self.base.comp.gpa; - const mod = pt.zcu; - const decl = mod.declPtr(decl_index); - - if (decl.isExtern(mod)) { - log.debug("found extern decl: {}", .{decl.name.fmt(&mod.intern_pool)}); - return; - } - const atom_idx = try self.seeDecl(decl_index); + const atom_idx = try self.seeNav(pt, nav_index); var code_buffer = std.ArrayList(u8).init(gpa); defer code_buffer.deinit(); - const decl_val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val; // TODO we need the symbol index for symbol in the table of locals for the containing atom - const res = try codegen.generateSymbol(&self.base, pt, decl.navSrcLoc(mod), decl_val, &code_buffer, .{ .none = {} }, .{ - .parent_atom_index = @as(Atom.Index, @intCast(atom_idx)), + const res = try codegen.generateSymbol(&self.base, pt, zcu.navSrcLoc(nav_index), nav_init, &code_buffer, .none, .{ + .parent_atom_index = @intCast(atom_idx), }); const code = switch (res) { .ok => code_buffer.items, .fail => |em| { - decl.analysis = .codegen_failure; - try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); + try zcu.failed_codegen.put(gpa, nav_index, em); return; }, }; - try self.data_decl_table.ensureUnusedCapacity(gpa, 1); + try self.data_nav_table.ensureUnusedCapacity(gpa, 1); const duped_code = try gpa.dupe(u8, code); - self.getAtomPtr(self.decls.get(decl_index).?.index).code = .{ .code_ptr = null, .other = .{ .decl_index = decl_index } }; - if (self.data_decl_table.fetchPutAssumeCapacity(decl_index, duped_code)) |old_entry| { + self.getAtomPtr(self.navs.get(nav_index).?.index).code = .{ .code_ptr = null, .other = .{ .nav_index = nav_index } }; + if (self.data_nav_table.fetchPutAssumeCapacity(nav_index, duped_code)) |old_entry| { gpa.free(old_entry.value); } - return self.updateFinish(decl_index); + return self.updateFinish(pt, nav_index); } + /// called at the end of update{Decl,Func} -fn updateFinish(self: *Plan9, decl_index: InternPool.DeclIndex) !void { - const gpa = self.base.comp.gpa; - const mod = self.base.comp.module.?; - const decl = mod.declPtr(decl_index); - const is_fn = (decl.typeOf(mod).zigTypeTag(mod) == .Fn); +fn updateFinish(self: *Plan9, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !void { + const zcu = pt.zcu; + const gpa = zcu.gpa; + const ip = &zcu.intern_pool; + const nav = ip.getNav(nav_index); + const is_fn = ip.isFunctionType(nav.typeOf(ip)); const sym_t: aout.Sym.Type = if (is_fn) .t else .d; - const atom = self.getAtomPtr(self.decls.get(decl_index).?.index); + const atom = self.getAtomPtr(self.navs.get(nav_index).?.index); // write the internal linker metadata atom.type = sym_t; // write the symbol @@ -578,7 +495,7 @@ fn updateFinish(self: *Plan9, decl_index: InternPool.DeclIndex) !void { const sym: aout.Sym = .{ .value = undefined, // the value of stuff gets filled in in flushModule .type = atom.type, - .name = try gpa.dupe(u8, decl.name.toSlice(&mod.intern_pool)), + .name = try gpa.dupe(u8, nav.name.toSlice(ip)), }; if (atom.sym_index) |s| { @@ -643,29 +560,24 @@ fn externCount(self: *Plan9) usize { } return extern_atom_count; } -// counts decls, unnamed consts, and lazy syms +// counts decls, and lazy syms fn atomCount(self: *Plan9) usize { - var fn_decl_count: usize = 0; - var itf_files = self.fn_decl_table.iterator(); + var fn_nav_count: usize = 0; + var itf_files = self.fn_nav_table.iterator(); while (itf_files.next()) |ent| { // get the submap var submap = ent.value_ptr.functions; - fn_decl_count += submap.count(); - } - const data_decl_count = self.data_decl_table.count(); - var unnamed_const_count: usize = 0; - var it_unc = self.unnamed_const_atoms.iterator(); - while (it_unc.next()) |unnamed_consts| { - unnamed_const_count += unnamed_consts.value_ptr.items.len; + fn_nav_count += submap.count(); } + const data_nav_count = self.data_nav_table.count(); var lazy_atom_count: usize = 0; var it_lazy = self.lazy_syms.iterator(); while (it_lazy.next()) |kv| { lazy_atom_count += kv.value_ptr.numberOfAtoms(); } - const anon_atom_count = self.anon_decls.count(); + const uav_atom_count = self.uavs.count(); const extern_atom_count = self.externCount(); - return data_decl_count + fn_decl_count + unnamed_const_count + lazy_atom_count + extern_atom_count + anon_atom_count; + return data_nav_count + fn_nav_count + lazy_atom_count + extern_atom_count + uav_atom_count; } pub fn flushModule(self: *Plan9, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void { @@ -700,7 +612,7 @@ pub fn flushModule(self: *Plan9, arena: Allocator, tid: Zcu.PerThread.Id, prog_n // anyerror needs to wait for everything to be flushed. if (metadata.text_state != .unused) self.updateLazySymbolAtom( pt, - File.LazySymbol.initDecl(.code, null, pt.zcu), + .{ .kind = .code, .ty = .anyerror_type }, metadata.text_atom, ) catch |err| return switch (err) { error.CodegenFail => error.FlushFailure, @@ -708,7 +620,7 @@ pub fn flushModule(self: *Plan9, arena: Allocator, tid: Zcu.PerThread.Id, prog_n }; if (metadata.rodata_state != .unused) self.updateLazySymbolAtom( pt, - File.LazySymbol.initDecl(.const_data, null, pt.zcu), + .{ .kind = .const_data, .ty = .anyerror_type }, metadata.rodata_atom, ) catch |err| return switch (err) { error.CodegenFail => error.FlushFailure, @@ -734,7 +646,7 @@ pub fn flushModule(self: *Plan9, arena: Allocator, tid: Zcu.PerThread.Id, prog_n var hdr_buf: [40]u8 = undefined; // account for the fat header - const hdr_size = if (self.sixtyfour_bit) @as(usize, 40) else 32; + const hdr_size: usize = if (self.sixtyfour_bit) 40 else 32; const hdr_slice: []u8 = hdr_buf[0..hdr_size]; var foff = hdr_size; iovecs[0] = .{ .base = hdr_slice.ptr, .len = hdr_slice.len }; @@ -746,13 +658,13 @@ pub fn flushModule(self: *Plan9, arena: Allocator, tid: Zcu.PerThread.Id, prog_n // text { var linecount: i64 = -1; - var it_file = self.fn_decl_table.iterator(); + var it_file = self.fn_nav_table.iterator(); while (it_file.next()) |fentry| { var it = fentry.value_ptr.functions.iterator(); while (it.next()) |entry| { - const decl_index = entry.key_ptr.*; - const decl = pt.zcu.declPtr(decl_index); - const atom = self.getAtomPtr(self.decls.get(decl_index).?.index); + const nav_index = entry.key_ptr.*; + const nav = pt.zcu.intern_pool.getNav(nav_index); + const atom = self.getAtomPtr(self.navs.get(nav_index).?.index); const out = entry.value_ptr.*; { // connect the previous decl to the next @@ -771,15 +683,15 @@ pub fn flushModule(self: *Plan9, arena: Allocator, tid: Zcu.PerThread.Id, prog_n const off = self.getAddr(text_i, .t); text_i += out.code.len; atom.offset = off; - log.debug("write text decl {*} ({}), lines {d} to {d}.;__GOT+0x{x} vaddr: 0x{x}", .{ decl, decl.name.fmt(&pt.zcu.intern_pool), out.start_line + 1, out.end_line, atom.got_index.? * 8, off }); + log.debug("write text nav 0x{x} ({}), lines {d} to {d}.;__GOT+0x{x} vaddr: 0x{x}", .{ nav_index, nav.name.fmt(&pt.zcu.intern_pool), out.start_line + 1, out.end_line, atom.got_index.? * 8, off }); if (!self.sixtyfour_bit) { - mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(off)), target.cpu.arch.endian()); + mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @intCast(off), target.cpu.arch.endian()); } else { mem.writeInt(u64, got_table[atom.got_index.? * 8 ..][0..8], off, target.cpu.arch.endian()); } self.syms.items[atom.sym_index.?].value = off; - if (self.decl_exports.get(decl_index)) |export_indices| { - try self.addDeclExports(pt.zcu, decl_index, export_indices); + if (self.nav_exports.get(nav_index)) |export_indices| { + try self.addNavExports(pt.zcu, nav_index, export_indices); } } } @@ -826,10 +738,10 @@ pub fn flushModule(self: *Plan9, arena: Allocator, tid: Zcu.PerThread.Id, prog_n // data var data_i: u64 = got_size; { - var it = self.data_decl_table.iterator(); + var it = self.data_nav_table.iterator(); while (it.next()) |entry| { - const decl_index = entry.key_ptr.*; - const atom = self.getAtomPtr(self.decls.get(decl_index).?.index); + const nav_index = entry.key_ptr.*; + const atom = self.getAtomPtr(self.navs.get(nav_index).?.index); const code = entry.value_ptr.*; foff += code.len; @@ -844,35 +756,13 @@ pub fn flushModule(self: *Plan9, arena: Allocator, tid: Zcu.PerThread.Id, prog_n mem.writeInt(u64, got_table[atom.got_index.? * 8 ..][0..8], off, target.cpu.arch.endian()); } self.syms.items[atom.sym_index.?].value = off; - if (self.decl_exports.get(decl_index)) |export_indices| { - try self.addDeclExports(pt.zcu, decl_index, export_indices); + if (self.nav_exports.get(nav_index)) |export_indices| { + try self.addNavExports(pt.zcu, nav_index, export_indices); } } - // write the unnamed constants after the other data decls - var it_unc = self.unnamed_const_atoms.iterator(); - while (it_unc.next()) |unnamed_consts| { - for (unnamed_consts.value_ptr.items) |atom_idx| { - const atom = self.getAtomPtr(atom_idx); - const code = atom.code.getOwnedCode().?; // unnamed consts must own their code - log.debug("write unnamed const: ({s})", .{self.syms.items[atom.sym_index.?].name}); - foff += code.len; - iovecs[iovecs_i] = .{ .base = code.ptr, .len = code.len }; - iovecs_i += 1; - const off = self.getAddr(data_i, .d); - data_i += code.len; - atom.offset = off; - if (!self.sixtyfour_bit) { - mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @as(u32, @intCast(off)), target.cpu.arch.endian()); - } else { - mem.writeInt(u64, got_table[atom.got_index.? * 8 ..][0..8], off, target.cpu.arch.endian()); - } - self.syms.items[atom.sym_index.?].value = off; - } - } - // the anon decls { - var it_anon = self.anon_decls.iterator(); - while (it_anon.next()) |kv| { + var it_uav = self.uavs.iterator(); + while (it_uav.next()) |kv| { const atom = self.getAtomPtr(kv.value_ptr.*); const code = atom.code.getOwnedCode().?; log.debug("write anon decl: {s}", .{self.syms.items[atom.sym_index.?].name}); @@ -1011,14 +901,14 @@ pub fn flushModule(self: *Plan9, arena: Allocator, tid: Zcu.PerThread.Id, prog_n // write it all! try file.pwritevAll(iovecs, 0); } -fn addDeclExports( +fn addNavExports( self: *Plan9, mod: *Zcu, - decl_index: InternPool.DeclIndex, + nav_index: InternPool.Nav.Index, export_indices: []const u32, ) !void { const gpa = self.base.comp.gpa; - const metadata = self.decls.getPtr(decl_index).?; + const metadata = self.navs.getPtr(nav_index).?; const atom = self.getAtom(metadata.index); for (export_indices) |export_idx| { @@ -1031,7 +921,7 @@ fn addDeclExports( { try mod.failed_exports.put(mod.gpa, export_idx, try Zcu.ErrorMsg.create( gpa, - mod.declPtr(decl_index).navSrcLoc(mod), + mod.navSrcLoc(nav_index), "plan9 does not support extra sections", .{}, )); @@ -1090,7 +980,6 @@ pub fn freeDecl(self: *Plan9, decl_index: InternPool.DeclIndex) void { } kv.value.exports.deinit(gpa); } - self.freeUnnamedConsts(decl_index); { const atom_index = self.decls.get(decl_index).?.index; const relocs = self.relocs.getPtr(atom_index) orelse return; @@ -1098,18 +987,6 @@ pub fn freeDecl(self: *Plan9, decl_index: InternPool.DeclIndex) void { assert(self.relocs.remove(atom_index)); } } -fn freeUnnamedConsts(self: *Plan9, decl_index: InternPool.DeclIndex) void { - const gpa = self.base.comp.gpa; - const unnamed_consts = self.unnamed_const_atoms.getPtr(decl_index) orelse return; - for (unnamed_consts.items) |atom_idx| { - const atom = self.getAtom(atom_idx); - gpa.free(self.syms.items[atom.sym_index.?].name); - self.syms.items[atom.sym_index.?] = aout.Sym.undefined_symbol; - self.syms_index_free_list.append(gpa, atom.sym_index.?) catch {}; - } - unnamed_consts.clearAndFree(gpa); -} - fn createAtom(self: *Plan9) !Atom.Index { const gpa = self.base.comp.gpa; const index = @as(Atom.Index, @intCast(self.atoms.items.len)); @@ -1124,9 +1001,11 @@ fn createAtom(self: *Plan9) !Atom.Index { return index; } -pub fn seeDecl(self: *Plan9, decl_index: InternPool.DeclIndex) !Atom.Index { - const gpa = self.base.comp.gpa; - const gop = try self.decls.getOrPut(gpa, decl_index); +pub fn seeNav(self: *Plan9, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !Atom.Index { + const zcu = pt.zcu; + const ip = &zcu.intern_pool; + const gpa = zcu.gpa; + const gop = try self.navs.getOrPut(gpa, nav_index); if (!gop.found_existing) { const index = try self.createAtom(); self.getAtomPtr(index).got_index = self.allocateGotIndex(); @@ -1137,23 +1016,22 @@ pub fn seeDecl(self: *Plan9, decl_index: InternPool.DeclIndex) !Atom.Index { } const atom_idx = gop.value_ptr.index; // handle externs here because they might not get updateDecl called on them - const mod = self.base.comp.module.?; - const decl = mod.declPtr(decl_index); - if (decl.isExtern(mod)) { + const nav = ip.getNav(nav_index); + if (ip.indexToKey(nav.status.resolved.val) == .@"extern") { // this is a "phantom atom" - it is never actually written to disk, just convenient for us to store stuff about externs - if (decl.name.eqlSlice("etext", &mod.intern_pool)) { + if (nav.name.eqlSlice("etext", ip)) { self.etext_edata_end_atom_indices[0] = atom_idx; - } else if (decl.name.eqlSlice("edata", &mod.intern_pool)) { + } else if (nav.name.eqlSlice("edata", ip)) { self.etext_edata_end_atom_indices[1] = atom_idx; - } else if (decl.name.eqlSlice("end", &mod.intern_pool)) { + } else if (nav.name.eqlSlice("end", ip)) { self.etext_edata_end_atom_indices[2] = atom_idx; } - try self.updateFinish(decl_index); - log.debug("seeDecl(extern) for {} (got_addr=0x{x})", .{ - decl.name.fmt(&mod.intern_pool), + try self.updateFinish(pt, nav_index); + log.debug("seeNav(extern) for {} (got_addr=0x{x})", .{ + nav.name.fmt(ip), self.getAtom(atom_idx).getOffsetTableAddress(self), }); - } else log.debug("seeDecl for {}", .{decl.name.fmt(&mod.intern_pool)}); + } else log.debug("seeNav for {}", .{nav.name.fmt(ip)}); return atom_idx; } @@ -1165,45 +1043,41 @@ pub fn updateExports( ) !void { const gpa = self.base.comp.gpa; switch (exported) { - .value => @panic("TODO: plan9 updateExports handling values"), - .decl_index => |decl_index| { - _ = try self.seeDecl(decl_index); - if (self.decl_exports.fetchSwapRemove(decl_index)) |kv| { + .uav => @panic("TODO: plan9 updateExports handling values"), + .nav => |nav| { + _ = try self.seeNav(pt, nav); + if (self.nav_exports.fetchSwapRemove(nav)) |kv| { gpa.free(kv.value); } - try self.decl_exports.ensureUnusedCapacity(gpa, 1); + try self.nav_exports.ensureUnusedCapacity(gpa, 1); const duped_indices = try gpa.dupe(u32, export_indices); - self.decl_exports.putAssumeCapacityNoClobber(decl_index, duped_indices); + self.nav_exports.putAssumeCapacityNoClobber(nav, duped_indices); }, } // all proper work is done in flush - _ = pt; } -pub fn getOrCreateAtomForLazySymbol(self: *Plan9, pt: Zcu.PerThread, sym: File.LazySymbol) !Atom.Index { - const gpa = pt.zcu.gpa; - const gop = try self.lazy_syms.getOrPut(gpa, sym.getDecl(self.base.comp.module.?)); +pub fn getOrCreateAtomForLazySymbol(self: *Plan9, pt: Zcu.PerThread, lazy_sym: File.LazySymbol) !Atom.Index { + const gop = try self.lazy_syms.getOrPut(pt.zcu.gpa, lazy_sym.ty); errdefer _ = if (!gop.found_existing) self.lazy_syms.pop(); if (!gop.found_existing) gop.value_ptr.* = .{}; - const metadata: struct { atom: *Atom.Index, state: *LazySymbolMetadata.State } = switch (sym.kind) { - .code => .{ .atom = &gop.value_ptr.text_atom, .state = &gop.value_ptr.text_state }, - .const_data => .{ .atom = &gop.value_ptr.rodata_atom, .state = &gop.value_ptr.rodata_state }, + const atom_ptr, const state_ptr = switch (lazy_sym.kind) { + .code => .{ &gop.value_ptr.text_atom, &gop.value_ptr.text_state }, + .const_data => .{ &gop.value_ptr.rodata_atom, &gop.value_ptr.rodata_state }, }; - switch (metadata.state.*) { - .unused => metadata.atom.* = try self.createAtom(), - .pending_flush => return metadata.atom.*, + switch (state_ptr.*) { + .unused => atom_ptr.* = try self.createAtom(), + .pending_flush => return atom_ptr.*, .flushed => {}, } - metadata.state.* = .pending_flush; - const atom = metadata.atom.*; + state_ptr.* = .pending_flush; + const atom = atom_ptr.*; _ = try self.getAtomPtr(atom).getOrCreateSymbolTableEntry(self); _ = self.getAtomPtr(atom).getOrCreateOffsetTableEntry(self); // anyerror needs to be deferred until flushModule - if (sym.getDecl(self.base.comp.module.?) != .none) { - try self.updateLazySymbolAtom(pt, sym, atom); - } + if (lazy_sym.ty != .anyerror_type) try self.updateLazySymbolAtom(pt, lazy_sym, atom); return atom; } @@ -1217,7 +1091,7 @@ fn updateLazySymbolAtom(self: *Plan9, pt: Zcu.PerThread, sym: File.LazySymbol, a // create the symbol for the name const name = try std.fmt.allocPrint(gpa, "__lazy_{s}_{}", .{ @tagName(sym.kind), - sym.ty.fmt(pt), + Type.fromInterned(sym.ty).fmt(pt), }); const symbol: aout.Sym = .{ @@ -1228,7 +1102,7 @@ fn updateLazySymbolAtom(self: *Plan9, pt: Zcu.PerThread, sym: File.LazySymbol, a self.syms.items[self.getAtomPtr(atom_index).sym_index.?] = symbol; // generate the code - const src = sym.ty.srcLocOrNull(pt.zcu) orelse Zcu.LazySrcLoc.unneeded; + const src = Type.fromInterned(sym.ty).srcLocOrNull(pt.zcu) orelse Zcu.LazySrcLoc.unneeded; const res = try codegen.generateLazySymbol( &self.base, pt, @@ -1264,12 +1138,6 @@ pub fn deinit(self: *Plan9) void { } self.relocs.deinit(gpa); } - // free the unnamed consts - var it_unc = self.unnamed_const_atoms.iterator(); - while (it_unc.next()) |kv| { - self.freeUnnamedConsts(kv.key_ptr.*); - } - self.unnamed_const_atoms.deinit(gpa); var it_lzc = self.lazy_syms.iterator(); while (it_lzc.next()) |kv| { if (kv.value_ptr.text_state != .unused) @@ -1278,7 +1146,7 @@ pub fn deinit(self: *Plan9) void { gpa.free(self.syms.items[self.getAtom(kv.value_ptr.rodata_atom).sym_index.?].name); } self.lazy_syms.deinit(gpa); - var itf_files = self.fn_decl_table.iterator(); + var itf_files = self.fn_nav_table.iterator(); while (itf_files.next()) |ent| { // get the submap var submap = ent.value_ptr.functions; @@ -1289,21 +1157,21 @@ pub fn deinit(self: *Plan9) void { gpa.free(entry.value_ptr.lineinfo); } } - self.fn_decl_table.deinit(gpa); - var itd = self.data_decl_table.iterator(); + self.fn_nav_table.deinit(gpa); + var itd = self.data_nav_table.iterator(); while (itd.next()) |entry| { gpa.free(entry.value_ptr.*); } - var it_anon = self.anon_decls.iterator(); - while (it_anon.next()) |entry| { + var it_uav = self.uavs.iterator(); + while (it_uav.next()) |entry| { const sym_index = self.getAtom(entry.value_ptr.*).sym_index.?; gpa.free(self.syms.items[sym_index].name); } - self.data_decl_table.deinit(gpa); - for (self.decl_exports.values()) |export_indices| { + self.data_nav_table.deinit(gpa); + for (self.nav_exports.values()) |export_indices| { gpa.free(export_indices); } - self.decl_exports.deinit(gpa); + self.nav_exports.deinit(gpa); self.syms.deinit(gpa); self.got_index_free_list.deinit(gpa); self.syms_index_free_list.deinit(gpa); @@ -1317,11 +1185,11 @@ pub fn deinit(self: *Plan9) void { self.atoms.deinit(gpa); { - var it = self.decls.iterator(); + var it = self.navs.iterator(); while (it.next()) |entry| { entry.value_ptr.exports.deinit(gpa); } - self.decls.deinit(gpa); + self.navs.deinit(gpa); } } @@ -1402,17 +1270,17 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { // write the data symbols { - var it = self.data_decl_table.iterator(); + var it = self.data_nav_table.iterator(); while (it.next()) |entry| { - const decl_index = entry.key_ptr.*; - const decl_metadata = self.decls.get(decl_index).?; - const atom = self.getAtom(decl_metadata.index); + const nav_index = entry.key_ptr.*; + const nav_metadata = self.navs.get(nav_index).?; + const atom = self.getAtom(nav_metadata.index); const sym = self.syms.items[atom.sym_index.?]; try self.writeSym(writer, sym); - if (self.decl_exports.get(decl_index)) |export_indices| { + if (self.nav_exports.get(nav_index)) |export_indices| { for (export_indices) |export_idx| { const exp = mod.all_exports.items[export_idx]; - if (decl_metadata.getExport(self, exp.opts.name.toSlice(ip))) |exp_i| { + if (nav_metadata.getExport(self, exp.opts.name.toSlice(ip))) |exp_i| { try self.writeSym(writer, self.syms.items[exp_i]); } } @@ -1429,22 +1297,11 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { try self.writeSym(writer, sym); } } - // unnamed consts - { - var it = self.unnamed_const_atoms.iterator(); - while (it.next()) |kv| { - const consts = kv.value_ptr; - for (consts.items) |atom_index| { - const sym = self.syms.items[self.getAtom(atom_index).sym_index.?]; - try self.writeSym(writer, sym); - } - } - } // text symbols are the hardest: // the file of a text symbol is the .z symbol before it // so we have to write everything in the right order { - var it_file = self.fn_decl_table.iterator(); + var it_file = self.fn_nav_table.iterator(); while (it_file.next()) |fentry| { var symidx_and_submap = fentry.value_ptr; // write the z symbols @@ -1454,15 +1311,15 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { // write all the decls come from the file of the z symbol var submap_it = symidx_and_submap.functions.iterator(); while (submap_it.next()) |entry| { - const decl_index = entry.key_ptr.*; - const decl_metadata = self.decls.get(decl_index).?; - const atom = self.getAtom(decl_metadata.index); + const nav_index = entry.key_ptr.*; + const nav_metadata = self.navs.get(nav_index).?; + const atom = self.getAtom(nav_metadata.index); const sym = self.syms.items[atom.sym_index.?]; try self.writeSym(writer, sym); - if (self.decl_exports.get(decl_index)) |export_indices| { + if (self.nav_exports.get(nav_index)) |export_indices| { for (export_indices) |export_idx| { const exp = mod.all_exports.items[export_idx]; - if (decl_metadata.getExport(self, exp.opts.name.toSlice(ip))) |exp_i| { + if (nav_metadata.getExport(self, exp.opts.name.toSlice(ip))) |exp_i| { const s = self.syms.items[exp_i]; if (mem.eql(u8, s.name, "_start")) self.entry_val = s.value; @@ -1500,31 +1357,31 @@ pub fn updateDeclLineNumber(self: *Plan9, pt: Zcu.PerThread, decl_index: InternP _ = decl_index; } -pub fn getDeclVAddr( +pub fn getNavVAddr( self: *Plan9, pt: Zcu.PerThread, - decl_index: InternPool.DeclIndex, + nav_index: InternPool.Nav.Index, reloc_info: link.File.RelocInfo, ) !u64 { const ip = &pt.zcu.intern_pool; - const decl = pt.zcu.declPtr(decl_index); - log.debug("getDeclVAddr for {}", .{decl.name.fmt(ip)}); - if (decl.isExtern(pt.zcu)) { - if (decl.name.eqlSlice("etext", ip)) { + const nav = ip.getNav(nav_index); + log.debug("getDeclVAddr for {}", .{nav.name.fmt(ip)}); + if (ip.indexToKey(nav.status.resolved.val) == .@"extern") { + if (nav.name.eqlSlice("etext", ip)) { try self.addReloc(reloc_info.parent_atom_index, .{ .target = undefined, .offset = reloc_info.offset, .addend = reloc_info.addend, .type = .special_etext, }); - } else if (decl.name.eqlSlice("edata", ip)) { + } else if (nav.name.eqlSlice("edata", ip)) { try self.addReloc(reloc_info.parent_atom_index, .{ .target = undefined, .offset = reloc_info.offset, .addend = reloc_info.addend, .type = .special_edata, }); - } else if (decl.name.eqlSlice("end", ip)) { + } else if (nav.name.eqlSlice("end", ip)) { try self.addReloc(reloc_info.parent_atom_index, .{ .target = undefined, .offset = reloc_info.offset, @@ -1536,7 +1393,7 @@ pub fn getDeclVAddr( return undefined; } // otherwise, we just add a relocation - const atom_index = try self.seeDecl(decl_index); + const atom_index = try self.seeNav(pt, nav_index); // the parent_atom_index in this case is just the decl_index of the parent try self.addReloc(reloc_info.parent_atom_index, .{ .target = atom_index, @@ -1546,15 +1403,14 @@ pub fn getDeclVAddr( return undefined; } -pub fn lowerAnonDecl( +pub fn lowerUav( self: *Plan9, pt: Zcu.PerThread, - decl_val: InternPool.Index, + uav: InternPool.Index, explicit_alignment: InternPool.Alignment, src_loc: Zcu.LazySrcLoc, -) !codegen.Result { +) !codegen.GenResult { _ = explicit_alignment; - // This is basically the same as lowerUnnamedConst. // example: // const ty = mod.intern_pool.typeOf(decl_val).toType(); // const val = decl_val.toValue(); @@ -1564,41 +1420,40 @@ pub fn lowerAnonDecl( // to put it in some location. // ... const gpa = self.base.comp.gpa; - const gop = try self.anon_decls.getOrPut(gpa, decl_val); - if (!gop.found_existing) { - const val = Value.fromInterned(decl_val); - const name = try std.fmt.allocPrint(gpa, "__anon_{d}", .{@intFromEnum(decl_val)}); - - const index = try self.createAtom(); - const got_index = self.allocateGotIndex(); - gop.value_ptr.* = index; - // we need to free name latex - var code_buffer = std.ArrayList(u8).init(gpa); - const res = try codegen.generateSymbol(&self.base, pt, src_loc, val, &code_buffer, .{ .none = {} }, .{ .parent_atom_index = index }); - const code = switch (res) { - .ok => code_buffer.items, - .fail => |em| return .{ .fail = em }, - }; - const atom_ptr = self.getAtomPtr(index); - atom_ptr.* = .{ - .type = .d, - .offset = undefined, - .sym_index = null, - .got_index = got_index, - .code = Atom.CodePtr.fromSlice(code), - }; - _ = try atom_ptr.getOrCreateSymbolTableEntry(self); - self.syms.items[atom_ptr.sym_index.?] = .{ - .type = .d, - .value = undefined, - .name = name, - }; - } - return .ok; + const gop = try self.uavs.getOrPut(gpa, uav); + if (gop.found_existing) return .{ .mcv = .{ .load_direct = gop.value_ptr.* } }; + const val = Value.fromInterned(uav); + const name = try std.fmt.allocPrint(gpa, "__anon_{d}", .{@intFromEnum(uav)}); + + const index = try self.createAtom(); + const got_index = self.allocateGotIndex(); + gop.value_ptr.* = index; + // we need to free name latex + var code_buffer = std.ArrayList(u8).init(gpa); + const res = try codegen.generateSymbol(&self.base, pt, src_loc, val, &code_buffer, .{ .none = {} }, .{ .parent_atom_index = index }); + const code = switch (res) { + .ok => code_buffer.items, + .fail => |em| return .{ .fail = em }, + }; + const atom_ptr = self.getAtomPtr(index); + atom_ptr.* = .{ + .type = .d, + .offset = undefined, + .sym_index = null, + .got_index = got_index, + .code = Atom.CodePtr.fromSlice(code), + }; + _ = try atom_ptr.getOrCreateSymbolTableEntry(self); + self.syms.items[atom_ptr.sym_index.?] = .{ + .type = .d, + .value = undefined, + .name = name, + }; + return .{ .mcv = .{ .load_direct = index } }; } -pub fn getAnonDeclVAddr(self: *Plan9, decl_val: InternPool.Index, reloc_info: link.File.RelocInfo) !u64 { - const atom_index = self.anon_decls.get(decl_val).?; +pub fn getUavVAddr(self: *Plan9, uav: InternPool.Index, reloc_info: link.File.RelocInfo) !u64 { + const atom_index = self.uavs.get(uav).?; try self.addReloc(reloc_info.parent_atom_index, .{ .target = atom_index, .offset = reloc_info.offset, diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index ce7e25824ce7..e97c80c3feb6 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -36,6 +36,7 @@ const trace = @import("../tracy.zig").trace; const build_options = @import("build_options"); const Air = @import("../Air.zig"); const Liveness = @import("../Liveness.zig"); +const Type = @import("../Type.zig"); const Value = @import("../Value.zig"); const SpvModule = @import("../codegen/spirv/Module.zig"); @@ -50,8 +51,6 @@ base: link.File, object: codegen.Object, -pub const base_tag: link.File.Tag = .spirv; - pub fn createEmpty( arena: Allocator, comp: *Compilation, @@ -128,22 +127,22 @@ pub fn updateFunc(self: *SpirV, pt: Zcu.PerThread, func_index: InternPool.Index, @panic("Attempted to compile for architecture that was disabled by build configuration"); } + const ip = &pt.zcu.intern_pool; const func = pt.zcu.funcInfo(func_index); - const decl = pt.zcu.declPtr(func.owner_decl); - log.debug("lowering function {}", .{decl.name.fmt(&pt.zcu.intern_pool)}); + log.debug("lowering function {}", .{ip.getNav(func.owner_nav).name.fmt(ip)}); try self.object.updateFunc(pt, func_index, air, liveness); } -pub fn updateDecl(self: *SpirV, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { +pub fn updateNav(self: *SpirV, pt: Zcu.PerThread, nav: InternPool.Nav.Index) !void { if (build_options.skip_non_native) { @panic("Attempted to compile for architecture that was disabled by build configuration"); } - const decl = pt.zcu.declPtr(decl_index); - log.debug("lowering declaration {}", .{decl.name.fmt(&pt.zcu.intern_pool)}); + const ip = &pt.zcu.intern_pool; + log.debug("lowering declaration {}", .{ip.getNav(nav).name.fmt(ip)}); - try self.object.updateDecl(pt, decl_index); + try self.object.updateNav(pt, nav); } pub fn updateExports( @@ -152,19 +151,20 @@ pub fn updateExports( exported: Zcu.Exported, export_indices: []const u32, ) !void { - const mod = pt.zcu; - const decl_index = switch (exported) { - .decl_index => |i| i, - .value => |val| { - _ = val; + const zcu = pt.zcu; + const ip = &zcu.intern_pool; + const nav_index = switch (exported) { + .nav => |nav| nav, + .uav => |uav| { + _ = uav; @panic("TODO: implement SpirV linker code for exporting a constant value"); }, }; - const decl = mod.declPtr(decl_index); - if (decl.val.isFuncBody(mod)) { - const target = mod.getTarget(); - const spv_decl_index = try self.object.resolveDecl(mod, decl_index); - const execution_model = switch (decl.typeOf(mod).fnCallingConvention(mod)) { + const nav_ty = ip.getNav(nav_index).typeOf(ip); + if (ip.isFunctionType(nav_ty)) { + const target = zcu.getTarget(); + const spv_decl_index = try self.object.resolveNav(zcu, nav_index); + const execution_model = switch (Type.fromInterned(nav_ty).fnCallingConvention(zcu)) { .Vertex => spec.ExecutionModel.Vertex, .Fragment => spec.ExecutionModel.Fragment, .Kernel => spec.ExecutionModel.Kernel, @@ -177,10 +177,10 @@ pub fn updateExports( (is_vulkan and (execution_model == .Fragment or execution_model == .Vertex))) { for (export_indices) |export_idx| { - const exp = mod.all_exports.items[export_idx]; + const exp = zcu.all_exports.items[export_idx]; try self.object.spv.declareEntryPoint( spv_decl_index, - exp.opts.name.toSlice(&mod.intern_pool), + exp.opts.name.toSlice(ip), execution_model, ); } diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 4f655f2ea993..87dd8c13f957 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -39,8 +39,6 @@ const ZigObject = @import("Wasm/ZigObject.zig"); pub const Atom = @import("Wasm/Atom.zig"); pub const Relocation = types.Relocation; -pub const base_tag: link.File.Tag = .wasm; - base: link.File, /// Symbol name of the entry function to export entry_name: ?[]const u8, @@ -1451,19 +1449,19 @@ pub fn updateFunc(wasm: *Wasm, pt: Zcu.PerThread, func_index: InternPool.Index, try wasm.zigObjectPtr().?.updateFunc(wasm, pt, func_index, air, liveness); } -// Generate code for the Decl, storing it in memory to be later written to +// Generate code for the "Nav", storing it in memory to be later written to // the file on flush(). -pub fn updateDecl(wasm: *Wasm, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { +pub fn updateNav(wasm: *Wasm, pt: Zcu.PerThread, nav: InternPool.Nav.Index) !void { if (build_options.skip_non_native and builtin.object_format != .wasm) { @panic("Attempted to compile for object format that was disabled by build configuration"); } - if (wasm.llvm_object) |llvm_object| return llvm_object.updateDecl(pt, decl_index); - try wasm.zigObjectPtr().?.updateDecl(wasm, pt, decl_index); + if (wasm.llvm_object) |llvm_object| return llvm_object.updateNav(pt, nav); + try wasm.zigObjectPtr().?.updateNav(wasm, pt, nav); } -pub fn updateDeclLineNumber(wasm: *Wasm, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !void { +pub fn updateNavLineNumber(wasm: *Wasm, pt: Zcu.PerThread, nav: InternPool.Nav.Index) !void { if (wasm.llvm_object) |_| return; - try wasm.zigObjectPtr().?.updateDeclLineNumber(pt, decl_index); + try wasm.zigObjectPtr().?.updateNavLineNumber(pt, nav); } /// From a given symbol location, returns its `wasm.GlobalType`. @@ -1505,13 +1503,6 @@ fn getFunctionSignature(wasm: *const Wasm, loc: SymbolLoc) std.wasm.Type { return wasm.func_types.items[wasm.functions.get(.{ .file = loc.file, .index = symbol.index }).?.func.type_index]; } -/// Lowers a constant typed value to a local symbol and atom. -/// Returns the symbol index of the local -/// The given `decl` is the parent decl whom owns the constant. -pub fn lowerUnnamedConst(wasm: *Wasm, pt: Zcu.PerThread, val: Value, decl_index: InternPool.DeclIndex) !u32 { - return wasm.zigObjectPtr().?.lowerUnnamedConst(wasm, pt, val, decl_index); -} - /// Returns the symbol index from a symbol of which its flag is set global, /// such as an exported or imported symbol. /// If the symbol does not yet exist, creates a new one symbol instead @@ -1521,29 +1512,29 @@ pub fn getGlobalSymbol(wasm: *Wasm, name: []const u8, lib_name: ?[]const u8) !Sy return wasm.zigObjectPtr().?.getGlobalSymbol(wasm.base.comp.gpa, name); } -/// For a given decl, find the given symbol index's atom, and create a relocation for the type. +/// For a given `Nav`, find the given symbol index's atom, and create a relocation for the type. /// Returns the given pointer address -pub fn getDeclVAddr( +pub fn getNavVAddr( wasm: *Wasm, pt: Zcu.PerThread, - decl_index: InternPool.DeclIndex, + nav: InternPool.Nav.Index, reloc_info: link.File.RelocInfo, ) !u64 { - return wasm.zigObjectPtr().?.getDeclVAddr(wasm, pt, decl_index, reloc_info); + return wasm.zigObjectPtr().?.getNavVAddr(wasm, pt, nav, reloc_info); } -pub fn lowerAnonDecl( +pub fn lowerUav( wasm: *Wasm, pt: Zcu.PerThread, - decl_val: InternPool.Index, + uav: InternPool.Index, explicit_alignment: Alignment, src_loc: Zcu.LazySrcLoc, -) !codegen.Result { - return wasm.zigObjectPtr().?.lowerAnonDecl(wasm, pt, decl_val, explicit_alignment, src_loc); +) !codegen.GenResult { + return wasm.zigObjectPtr().?.lowerUav(wasm, pt, uav, explicit_alignment, src_loc); } -pub fn getAnonDeclVAddr(wasm: *Wasm, decl_val: InternPool.Index, reloc_info: link.File.RelocInfo) !u64 { - return wasm.zigObjectPtr().?.getAnonDeclVAddr(wasm, decl_val, reloc_info); +pub fn getUavVAddr(wasm: *Wasm, uav: InternPool.Index, reloc_info: link.File.RelocInfo) !u64 { + return wasm.zigObjectPtr().?.getUavVAddr(wasm, uav, reloc_info); } pub fn deleteExport( @@ -4018,11 +4009,11 @@ pub fn putOrGetFuncType(wasm: *Wasm, func_type: std.wasm.Type) !u32 { return index; } -/// For the given `decl_index`, stores the corresponding type representing the function signature. +/// For the given `nav`, stores the corresponding type representing the function signature. /// Asserts declaration has an associated `Atom`. /// Returns the index into the list of types. -pub fn storeDeclType(wasm: *Wasm, decl_index: InternPool.DeclIndex, func_type: std.wasm.Type) !u32 { - return wasm.zigObjectPtr().?.storeDeclType(wasm.base.comp.gpa, decl_index, func_type); +pub fn storeNavType(wasm: *Wasm, nav: InternPool.Nav.Index, func_type: std.wasm.Type) !u32 { + return wasm.zigObjectPtr().?.storeDeclType(wasm.base.comp.gpa, nav, func_type); } /// Returns the symbol index of the error name table. @@ -4036,8 +4027,8 @@ pub fn getErrorTableSymbol(wasm_file: *Wasm, pt: Zcu.PerThread) !u32 { /// For a given `InternPool.DeclIndex` returns its corresponding `Atom.Index`. /// When the index was not found, a new `Atom` will be created, and its index will be returned. /// The newly created Atom is empty with default fields as specified by `Atom.empty`. -pub fn getOrCreateAtomForDecl(wasm_file: *Wasm, pt: Zcu.PerThread, decl_index: InternPool.DeclIndex) !Atom.Index { - return wasm_file.zigObjectPtr().?.getOrCreateAtomForDecl(wasm_file, pt, decl_index); +pub fn getOrCreateAtomForNav(wasm_file: *Wasm, pt: Zcu.PerThread, nav: InternPool.Nav.Index) !Atom.Index { + return wasm_file.zigObjectPtr().?.getOrCreateAtomForNav(wasm_file, pt, nav); } /// Verifies all resolved symbols and checks whether itself needs to be marked alive, diff --git a/src/link/Wasm/ZigObject.zig b/src/link/Wasm/ZigObject.zig index e3c257cd701b..3424006523af 100644 --- a/src/link/Wasm/ZigObject.zig +++ b/src/link/Wasm/ZigObject.zig @@ -6,9 +6,9 @@ path: []const u8, /// Index within the list of relocatable objects of the linker driver. index: File.Index, -/// Map of all `Decl` that are currently alive. -/// Each index maps to the corresponding `DeclInfo`. -decls_map: std.AutoHashMapUnmanaged(InternPool.DeclIndex, DeclInfo) = .{}, +/// Map of all `Nav` that are currently alive. +/// Each index maps to the corresponding `NavInfo`. +navs: std.AutoHashMapUnmanaged(InternPool.Nav.Index, NavInfo) = .{}, /// List of function type signatures for this Zig module. func_types: std.ArrayListUnmanaged(std.wasm.Type) = .{}, /// List of `std.wasm.Func`. Each entry contains the function signature, @@ -36,7 +36,7 @@ segment_free_list: std.ArrayListUnmanaged(u32) = .{}, /// File encapsulated string table, used to deduplicate strings within the generated file. string_table: StringTable = .{}, /// Map for storing anonymous declarations. Each anonymous decl maps to its Atom's index. -anon_decls: std.AutoArrayHashMapUnmanaged(InternPool.Index, Atom.Index) = .{}, +uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, Atom.Index) = .{}, /// List of atom indexes of functions that are generated by the backend. synthetic_functions: std.ArrayListUnmanaged(Atom.Index) = .{}, /// Represents the symbol index of the error name table @@ -86,12 +86,12 @@ debug_str_index: ?u32 = null, /// The index of the segment representing the custom '.debug_pubtypes' section. debug_abbrev_index: ?u32 = null, -const DeclInfo = struct { +const NavInfo = struct { atom: Atom.Index = .null, exports: std.ArrayListUnmanaged(Symbol.Index) = .{}, - fn @"export"(di: DeclInfo, zig_object: *const ZigObject, name: []const u8) ?Symbol.Index { - for (di.exports.items) |sym_index| { + fn @"export"(ni: NavInfo, zig_object: *const ZigObject, name: []const u8) ?Symbol.Index { + for (ni.exports.items) |sym_index| { const sym_name_index = zig_object.symbol(sym_index).name; const sym_name = zig_object.string_table.getAssumeExists(sym_name_index); if (std.mem.eql(u8, name, sym_name)) { @@ -101,14 +101,14 @@ const DeclInfo = struct { return null; } - fn appendExport(di: *DeclInfo, gpa: std.mem.Allocator, sym_index: Symbol.Index) !void { - return di.exports.append(gpa, sym_index); + fn appendExport(ni: *NavInfo, gpa: std.mem.Allocator, sym_index: Symbol.Index) !void { + return ni.exports.append(gpa, sym_index); } - fn deleteExport(di: *DeclInfo, sym_index: Symbol.Index) void { - for (di.exports.items, 0..) |idx, index| { + fn deleteExport(ni: *NavInfo, sym_index: Symbol.Index) void { + for (ni.exports.items, 0..) |idx, index| { if (idx == sym_index) { - _ = di.exports.swapRemove(index); + _ = ni.exports.swapRemove(index); return; } } @@ -155,19 +155,19 @@ pub fn deinit(zig_object: *ZigObject, wasm_file: *Wasm) void { } { - var it = zig_object.decls_map.valueIterator(); - while (it.next()) |decl_info| { - const atom = wasm_file.getAtomPtr(decl_info.atom); + var it = zig_object.navs.valueIterator(); + while (it.next()) |nav_info| { + const atom = wasm_file.getAtomPtr(nav_info.atom); for (atom.locals.items) |local_index| { const local_atom = wasm_file.getAtomPtr(local_index); local_atom.deinit(gpa); } atom.deinit(gpa); - decl_info.exports.deinit(gpa); + nav_info.exports.deinit(gpa); } } { - for (zig_object.anon_decls.values()) |atom_index| { + for (zig_object.uavs.values()) |atom_index| { const atom = wasm_file.getAtomPtr(atom_index); for (atom.locals.items) |local_index| { const local_atom = wasm_file.getAtomPtr(local_index); @@ -201,8 +201,8 @@ pub fn deinit(zig_object: *ZigObject, wasm_file: *Wasm) void { zig_object.atom_types.deinit(gpa); zig_object.functions.deinit(gpa); zig_object.imports.deinit(gpa); - zig_object.decls_map.deinit(gpa); - zig_object.anon_decls.deinit(gpa); + zig_object.navs.deinit(gpa); + zig_object.uavs.deinit(gpa); zig_object.symbols.deinit(gpa); zig_object.symbols_free_list.deinit(gpa); zig_object.segment_info.deinit(gpa); @@ -236,34 +236,35 @@ pub fn allocateSymbol(zig_object: *ZigObject, gpa: std.mem.Allocator) !Symbol.In return index; } -// Generate code for the Decl, storing it in memory to be later written to +// Generate code for the `Nav`, storing it in memory to be later written to // the file on flush(). -pub fn updateDecl( +pub fn updateNav( zig_object: *ZigObject, wasm_file: *Wasm, pt: Zcu.PerThread, - decl_index: InternPool.DeclIndex, + nav_index: InternPool.Nav.Index, ) !void { - const mod = pt.zcu; - const decl = mod.declPtr(decl_index); - if (decl.val.getFunction(mod)) |_| { - return; - } else if (decl.val.getExternFunc(mod)) |_| { - return; - } + const zcu = pt.zcu; + const ip = &zcu.intern_pool; + const nav = ip.getNav(nav_index); + + const is_extern, const lib_name, const nav_init = switch (ip.indexToKey(nav.status.resolved.val)) { + .variable => |variable| .{ false, variable.lib_name, variable.init }, + .func => return, + .@"extern" => |@"extern"| if (ip.isFunctionType(nav.typeOf(ip))) + return + else + .{ true, @"extern".lib_name, nav.status.resolved.val }, + else => .{ false, .none, nav.status.resolved.val }, + }; const gpa = wasm_file.base.comp.gpa; - const atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, pt, decl_index); + const atom_index = try zig_object.getOrCreateAtomForNav(wasm_file, pt, nav_index); const atom = wasm_file.getAtomPtr(atom_index); atom.clear(); - if (decl.isExtern(mod)) { - const variable = decl.getOwnedVariable(mod).?; - const name = decl.name.toSlice(&mod.intern_pool); - const lib_name = variable.lib_name.toSlice(&mod.intern_pool); - return zig_object.addOrUpdateImport(wasm_file, name, atom.sym_index, lib_name, null); - } - const val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val; + if (is_extern) + return zig_object.addOrUpdateImport(wasm_file, nav.name.toSlice(ip), atom.sym_index, lib_name.toSlice(ip), null); var code_writer = std.ArrayList(u8).init(gpa); defer code_writer.deinit(); @@ -271,8 +272,8 @@ pub fn updateDecl( const res = try codegen.generateSymbol( &wasm_file.base, pt, - decl.navSrcLoc(mod), - val, + zcu.navSrcLoc(nav_index), + Value.fromInterned(nav_init), &code_writer, .none, .{ .parent_atom_index = @intFromEnum(atom.sym_index) }, @@ -281,13 +282,12 @@ pub fn updateDecl( const code = switch (res) { .ok => code_writer.items, .fail => |em| { - decl.analysis = .codegen_failure; - try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); + try zcu.failed_codegen.put(zcu.gpa, nav_index, em); return; }, }; - return zig_object.finishUpdateDecl(wasm_file, pt, decl_index, code); + return zig_object.finishUpdateNav(wasm_file, pt, nav_index, code); } pub fn updateFunc( @@ -298,11 +298,10 @@ pub fn updateFunc( air: Air, liveness: Liveness, ) !void { - const gpa = wasm_file.base.comp.gpa; + const zcu = pt.zcu; + const gpa = zcu.gpa; const func = pt.zcu.funcInfo(func_index); - const decl_index = func.owner_decl; - const decl = pt.zcu.declPtr(decl_index); - const atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, pt, decl_index); + const atom_index = try zig_object.getOrCreateAtomForNav(wasm_file, pt, func.owner_nav); const atom = wasm_file.getAtomPtr(atom_index); atom.clear(); @@ -311,7 +310,7 @@ pub fn updateFunc( const result = try codegen.generateFunction( &wasm_file.base, pt, - decl.navSrcLoc(pt.zcu), + zcu.navSrcLoc(func.owner_nav), func_index, air, liveness, @@ -322,79 +321,75 @@ pub fn updateFunc( const code = switch (result) { .ok => code_writer.items, .fail => |em| { - decl.analysis = .codegen_failure; - try pt.zcu.failed_analysis.put(gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); + try pt.zcu.failed_codegen.put(gpa, func.owner_nav, em); return; }, }; - return zig_object.finishUpdateDecl(wasm_file, pt, decl_index, code); + return zig_object.finishUpdateNav(wasm_file, pt, func.owner_nav, code); } -fn finishUpdateDecl( +fn finishUpdateNav( zig_object: *ZigObject, wasm_file: *Wasm, pt: Zcu.PerThread, - decl_index: InternPool.DeclIndex, + nav_index: InternPool.Nav.Index, code: []const u8, ) !void { const zcu = pt.zcu; const ip = &zcu.intern_pool; const gpa = zcu.gpa; - const decl = zcu.declPtr(decl_index); - const decl_info = zig_object.decls_map.get(decl_index).?; - const atom_index = decl_info.atom; + const nav = ip.getNav(nav_index); + const nav_val = zcu.navValue(nav_index); + const nav_info = zig_object.navs.get(nav_index).?; + const atom_index = nav_info.atom; const atom = wasm_file.getAtomPtr(atom_index); const sym = zig_object.symbol(atom.sym_index); - sym.name = try zig_object.string_table.insert(gpa, decl.fqn.toSlice(ip)); + sym.name = try zig_object.string_table.insert(gpa, nav.fqn.toSlice(ip)); try atom.code.appendSlice(gpa, code); atom.size = @intCast(code.len); - switch (decl.typeOf(zcu).zigTypeTag(zcu)) { - .Fn => { - sym.index = try zig_object.appendFunction(gpa, .{ .type_index = zig_object.atom_types.get(atom_index).? }); - sym.tag = .function; - }, - else => { - const segment_name: []const u8 = if (decl.getOwnedVariable(zcu)) |variable| name: { - if (variable.is_const) { - break :name ".rodata."; - } else if (Value.fromInterned(variable.init).isUndefDeep(zcu)) { - const decl_namespace = zcu.namespacePtr(decl.src_namespace); - const optimize_mode = decl_namespace.fileScope(zcu).mod.optimize_mode; - const is_initialized = switch (optimize_mode) { - .Debug, .ReleaseSafe => true, - .ReleaseFast, .ReleaseSmall => false, - }; - if (is_initialized) { - break :name ".data."; - } - break :name ".bss."; - } - // when the decl is all zeroes, we store the atom in the bss segment, - // in all other cases it will be in the data segment. - for (atom.code.items) |byte| { - if (byte != 0) break :name ".data."; - } - break :name ".bss."; - } else ".rodata."; - if ((wasm_file.base.isObject() or wasm_file.base.comp.config.import_memory) and - std.mem.startsWith(u8, segment_name, ".bss")) - { - @memset(atom.code.items, 0); + if (ip.isFunctionType(nav.typeOf(ip))) { + sym.index = try zig_object.appendFunction(gpa, .{ .type_index = zig_object.atom_types.get(atom_index).? }); + sym.tag = .function; + } else { + const is_const, const nav_init = switch (ip.indexToKey(nav_val.toIntern())) { + .variable => |variable| .{ false, variable.init }, + .@"extern" => |@"extern"| .{ @"extern".is_const, .none }, + else => .{ true, nav_val.toIntern() }, + }; + const segment_name = name: { + if (is_const) break :name ".rodata."; + + if (nav_init != .none and Value.fromInterned(nav_init).isUndefDeep(zcu)) { + break :name switch (zcu.navFileScope(nav_index).mod.optimize_mode) { + .Debug, .ReleaseSafe => ".data.", + .ReleaseFast, .ReleaseSmall => ".bss.", + }; } - // Will be freed upon freeing of decl or after cleanup of Wasm binary. - const full_segment_name = try std.mem.concat(gpa, u8, &.{ - segment_name, - decl.fqn.toSlice(ip), - }); - errdefer gpa.free(full_segment_name); - sym.tag = .data; - sym.index = try zig_object.createDataSegment(gpa, full_segment_name, decl.alignment); - }, + // when the decl is all zeroes, we store the atom in the bss segment, + // in all other cases it will be in the data segment. + for (atom.code.items) |byte| { + if (byte != 0) break :name ".data."; + } + break :name ".bss."; + }; + if ((wasm_file.base.isObject() or wasm_file.base.comp.config.import_memory) and + std.mem.startsWith(u8, segment_name, ".bss")) + { + @memset(atom.code.items, 0); + } + // Will be freed upon freeing of decl or after cleanup of Wasm binary. + const full_segment_name = try std.mem.concat(gpa, u8, &.{ + segment_name, + nav.fqn.toSlice(ip), + }); + errdefer gpa.free(full_segment_name); + sym.tag = .data; + sym.index = try zig_object.createDataSegment(gpa, full_segment_name, pt.navAlignment(nav_index)); } if (code.len == 0) return; - atom.alignment = decl.getAlignment(pt); + atom.alignment = pt.navAlignment(nav_index); } /// Creates and initializes a new segment in the 'Data' section. @@ -420,50 +415,51 @@ fn createDataSegment( return segment_index; } -/// For a given `InternPool.DeclIndex` returns its corresponding `Atom.Index`. +/// For a given `InternPool.Nav.Index` returns its corresponding `Atom.Index`. /// When the index was not found, a new `Atom` will be created, and its index will be returned. /// The newly created Atom is empty with default fields as specified by `Atom.empty`. -pub fn getOrCreateAtomForDecl( +pub fn getOrCreateAtomForNav( zig_object: *ZigObject, wasm_file: *Wasm, pt: Zcu.PerThread, - decl_index: InternPool.DeclIndex, + nav_index: InternPool.Nav.Index, ) !Atom.Index { + const ip = &pt.zcu.intern_pool; const gpa = pt.zcu.gpa; - const gop = try zig_object.decls_map.getOrPut(gpa, decl_index); + const gop = try zig_object.navs.getOrPut(gpa, nav_index); if (!gop.found_existing) { const sym_index = try zig_object.allocateSymbol(gpa); gop.value_ptr.* = .{ .atom = try wasm_file.createAtom(sym_index, zig_object.index) }; - const decl = pt.zcu.declPtr(decl_index); + const nav = ip.getNav(nav_index); const sym = zig_object.symbol(sym_index); - sym.name = try zig_object.string_table.insert(gpa, decl.fqn.toSlice(&pt.zcu.intern_pool)); + sym.name = try zig_object.string_table.insert(gpa, nav.fqn.toSlice(ip)); } return gop.value_ptr.atom; } -pub fn lowerAnonDecl( +pub fn lowerUav( zig_object: *ZigObject, wasm_file: *Wasm, pt: Zcu.PerThread, - decl_val: InternPool.Index, + uav: InternPool.Index, explicit_alignment: InternPool.Alignment, src_loc: Zcu.LazySrcLoc, -) !codegen.Result { +) !codegen.GenResult { const gpa = wasm_file.base.comp.gpa; - const gop = try zig_object.anon_decls.getOrPut(gpa, decl_val); + const gop = try zig_object.uavs.getOrPut(gpa, uav); if (!gop.found_existing) { var name_buf: [32]u8 = undefined; const name = std.fmt.bufPrint(&name_buf, "__anon_{d}", .{ - @intFromEnum(decl_val), + @intFromEnum(uav), }) catch unreachable; - switch (try zig_object.lowerConst(wasm_file, pt, name, Value.fromInterned(decl_val), src_loc)) { - .ok => |atom_index| zig_object.anon_decls.values()[gop.index] = atom_index, + switch (try zig_object.lowerConst(wasm_file, pt, name, Value.fromInterned(uav), src_loc)) { + .ok => |atom_index| zig_object.uavs.values()[gop.index] = atom_index, .fail => |em| return .{ .fail = em }, } } - const atom = wasm_file.getAtomPtr(zig_object.anon_decls.values()[gop.index]); + const atom = wasm_file.getAtomPtr(zig_object.uavs.values()[gop.index]); atom.alignment = switch (atom.alignment) { .none => explicit_alignment, else => switch (explicit_alignment) { @@ -471,53 +467,7 @@ pub fn lowerAnonDecl( else => atom.alignment.maxStrict(explicit_alignment), }, }; - return .ok; -} - -/// Lowers a constant typed value to a local symbol and atom. -/// Returns the symbol index of the local -/// The given `decl` is the parent decl whom owns the constant. -pub fn lowerUnnamedConst( - zig_object: *ZigObject, - wasm_file: *Wasm, - pt: Zcu.PerThread, - val: Value, - decl_index: InternPool.DeclIndex, -) !u32 { - const mod = pt.zcu; - const gpa = mod.gpa; - std.debug.assert(val.typeOf(mod).zigTypeTag(mod) != .Fn); // cannot create local symbols for functions - const decl = mod.declPtr(decl_index); - - const parent_atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, pt, decl_index); - const parent_atom = wasm_file.getAtom(parent_atom_index); - const local_index = parent_atom.locals.items.len; - const name = try std.fmt.allocPrintZ(gpa, "__unnamed_{}_{d}", .{ - decl.fqn.fmt(&mod.intern_pool), local_index, - }); - defer gpa.free(name); - - // We want to lower the source location of `decl`. However, when generating - // lazy functions (for e.g. `@tagName`), `decl` may correspond to a type - // rather than a `Nav`! - // The future split of `Decl` into `Nav` and `Cau` may require rethinking this - // logic. For now, just get the source location conditionally as needed. - const decl_src = if (decl.typeOf(mod).toIntern() == .type_type) - decl.val.toType().srcLoc(mod) - else - decl.navSrcLoc(mod); - - switch (try zig_object.lowerConst(wasm_file, pt, name, val, decl_src)) { - .ok => |atom_index| { - try wasm_file.getAtomPtr(parent_atom_index).locals.append(gpa, atom_index); - return @intFromEnum(wasm_file.getAtom(atom_index).sym_index); - }, - .fail => |em| { - decl.analysis = .codegen_failure; - try mod.failed_analysis.put(mod.gpa, AnalUnit.wrap(.{ .decl = decl_index }), em); - return error.CodegenFail; - }, - } + return .{ .mcv = .{ .load_symbol = @intFromEnum(atom.sym_index) } }; } const LowerConstResult = union(enum) { @@ -782,36 +732,38 @@ pub fn getGlobalSymbol(zig_object: *ZigObject, gpa: std.mem.Allocator, name: []c /// For a given decl, find the given symbol index's atom, and create a relocation for the type. /// Returns the given pointer address -pub fn getDeclVAddr( +pub fn getNavVAddr( zig_object: *ZigObject, wasm_file: *Wasm, pt: Zcu.PerThread, - decl_index: InternPool.DeclIndex, + nav_index: InternPool.Nav.Index, reloc_info: link.File.RelocInfo, ) !u64 { - const target = wasm_file.base.comp.root_mod.resolved_target.result; const zcu = pt.zcu; const ip = &zcu.intern_pool; const gpa = zcu.gpa; - const decl = zcu.declPtr(decl_index); + const nav = ip.getNav(nav_index); + const target = &zcu.navFileScope(nav_index).mod.resolved_target.result; - const target_atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, pt, decl_index); + const target_atom_index = try zig_object.getOrCreateAtomForNav(wasm_file, pt, nav_index); const target_atom = wasm_file.getAtom(target_atom_index); const target_symbol_index = @intFromEnum(target_atom.sym_index); - if (decl.isExtern(zcu)) { - const name = decl.name.toSlice(ip); - const lib_name = if (decl.getOwnedExternFunc(zcu)) |ext_fn| - ext_fn.lib_name.toSlice(ip) - else - decl.getOwnedVariable(zcu).?.lib_name.toSlice(ip); - try zig_object.addOrUpdateImport(wasm_file, name, target_atom.sym_index, lib_name, null); + switch (ip.indexToKey(nav.status.resolved.val)) { + .@"extern" => |@"extern"| try zig_object.addOrUpdateImport( + wasm_file, + nav.name.toSlice(ip), + target_atom.sym_index, + @"extern".lib_name.toSlice(ip), + null, + ), + else => {}, } std.debug.assert(reloc_info.parent_atom_index != 0); const atom_index = wasm_file.symbol_atom.get(.{ .file = zig_object.index, .index = @enumFromInt(reloc_info.parent_atom_index) }).?; const atom = wasm_file.getAtomPtr(atom_index); const is_wasm32 = target.cpu.arch == .wasm32; - if (decl.typeOf(pt.zcu).zigTypeTag(pt.zcu) == .Fn) { + if (ip.isFunctionType(ip.getNav(nav_index).typeOf(ip))) { std.debug.assert(reloc_info.addend == 0); // addend not allowed for function relocations try atom.relocs.append(gpa, .{ .index = target_symbol_index, @@ -834,22 +786,22 @@ pub fn getDeclVAddr( return target_symbol_index; } -pub fn getAnonDeclVAddr( +pub fn getUavVAddr( zig_object: *ZigObject, wasm_file: *Wasm, - decl_val: InternPool.Index, + uav: InternPool.Index, reloc_info: link.File.RelocInfo, ) !u64 { const gpa = wasm_file.base.comp.gpa; const target = wasm_file.base.comp.root_mod.resolved_target.result; - const atom_index = zig_object.anon_decls.get(decl_val).?; + const atom_index = zig_object.uavs.get(uav).?; const target_symbol_index = @intFromEnum(wasm_file.getAtom(atom_index).sym_index); const parent_atom_index = wasm_file.symbol_atom.get(.{ .file = zig_object.index, .index = @enumFromInt(reloc_info.parent_atom_index) }).?; const parent_atom = wasm_file.getAtomPtr(parent_atom_index); const is_wasm32 = target.cpu.arch == .wasm32; const mod = wasm_file.base.comp.module.?; - const ty = Type.fromInterned(mod.intern_pool.typeOf(decl_val)); + const ty = Type.fromInterned(mod.intern_pool.typeOf(uav)); if (ty.zigTypeTag(mod) == .Fn) { std.debug.assert(reloc_info.addend == 0); // addend not allowed for function relocations try parent_atom.relocs.append(gpa, .{ @@ -880,14 +832,14 @@ pub fn deleteExport( name: InternPool.NullTerminatedString, ) void { const mod = wasm_file.base.comp.module.?; - const decl_index = switch (exported) { - .decl_index => |decl_index| decl_index, - .value => @panic("TODO: implement Wasm linker code for exporting a constant value"), + const nav_index = switch (exported) { + .nav => |nav_index| nav_index, + .uav => @panic("TODO: implement Wasm linker code for exporting a constant value"), }; - const decl_info = zig_object.decls_map.getPtr(decl_index) orelse return; - if (decl_info.@"export"(zig_object, name.toSlice(&mod.intern_pool))) |sym_index| { + const nav_info = zig_object.navs.getPtr(nav_index) orelse return; + if (nav_info.@"export"(zig_object, name.toSlice(&mod.intern_pool))) |sym_index| { const sym = zig_object.symbol(sym_index); - decl_info.deleteExport(sym_index); + nav_info.deleteExport(sym_index); std.debug.assert(zig_object.global_syms.remove(sym.name)); std.debug.assert(wasm_file.symbol_atom.remove(.{ .file = zig_object.index, .index = sym_index })); zig_object.symbols_free_list.append(wasm_file.base.comp.gpa, sym_index) catch {}; @@ -902,38 +854,39 @@ pub fn updateExports( exported: Zcu.Exported, export_indices: []const u32, ) !void { - const mod = pt.zcu; - const decl_index = switch (exported) { - .decl_index => |i| i, - .value => |val| { - _ = val; + const zcu = pt.zcu; + const ip = &zcu.intern_pool; + const nav_index = switch (exported) { + .nav => |nav| nav, + .uav => |uav| { + _ = uav; @panic("TODO: implement Wasm linker code for exporting a constant value"); }, }; - const decl = mod.declPtr(decl_index); - const atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, pt, decl_index); - const decl_info = zig_object.decls_map.getPtr(decl_index).?; + const nav = ip.getNav(nav_index); + const atom_index = try zig_object.getOrCreateAtomForNav(wasm_file, pt, nav_index); + const nav_info = zig_object.navs.getPtr(nav_index).?; const atom = wasm_file.getAtom(atom_index); const atom_sym = atom.symbolLoc().getSymbol(wasm_file).*; - const gpa = mod.gpa; - log.debug("Updating exports for decl '{}'", .{decl.name.fmt(&mod.intern_pool)}); + const gpa = zcu.gpa; + log.debug("Updating exports for decl '{}'", .{nav.name.fmt(ip)}); for (export_indices) |export_idx| { - const exp = mod.all_exports.items[export_idx]; - if (exp.opts.section.toSlice(&mod.intern_pool)) |section| { - try mod.failed_exports.putNoClobber(gpa, export_idx, try Zcu.ErrorMsg.create( + const exp = zcu.all_exports.items[export_idx]; + if (exp.opts.section.toSlice(ip)) |section| { + try zcu.failed_exports.putNoClobber(gpa, export_idx, try Zcu.ErrorMsg.create( gpa, - decl.navSrcLoc(mod), + zcu.navSrcLoc(nav_index), "Unimplemented: ExportOptions.section '{s}'", .{section}, )); continue; } - const export_string = exp.opts.name.toSlice(&mod.intern_pool); - const sym_index = if (decl_info.@"export"(zig_object, export_string)) |idx| idx else index: { + const export_string = exp.opts.name.toSlice(ip); + const sym_index = if (nav_info.@"export"(zig_object, export_string)) |idx| idx else index: { const sym_index = try zig_object.allocateSymbol(gpa); - try decl_info.appendExport(gpa, sym_index); + try nav_info.appendExport(gpa, sym_index); break :index sym_index; }; @@ -954,9 +907,9 @@ pub fn updateExports( }, .strong => {}, // symbols are strong by default .link_once => { - try mod.failed_exports.putNoClobber(gpa, export_idx, try Zcu.ErrorMsg.create( + try zcu.failed_exports.putNoClobber(gpa, export_idx, try Zcu.ErrorMsg.create( gpa, - decl.navSrcLoc(mod), + zcu.navSrcLoc(nav_index), "Unimplemented: LinkOnce", .{}, )); @@ -972,21 +925,21 @@ pub fn updateExports( } } -pub fn freeDecl(zig_object: *ZigObject, wasm_file: *Wasm, decl_index: InternPool.DeclIndex) void { +pub fn freeNav(zig_object: *ZigObject, wasm_file: *Wasm, nav_index: InternPool.Nav.Index) void { const gpa = wasm_file.base.comp.gpa; const mod = wasm_file.base.comp.module.?; - const decl = mod.declPtr(decl_index); - const decl_info = zig_object.decls_map.getPtr(decl_index).?; - const atom_index = decl_info.atom; + const ip = &mod.intern_pool; + const nav_info = zig_object.navs.getPtr(nav_index).?; + const atom_index = nav_info.atom; const atom = wasm_file.getAtomPtr(atom_index); zig_object.symbols_free_list.append(gpa, atom.sym_index) catch {}; - for (decl_info.exports.items) |exp_sym_index| { + for (nav_info.exports.items) |exp_sym_index| { const exp_sym = zig_object.symbol(exp_sym_index); exp_sym.tag = .dead; zig_object.symbols_free_list.append(exp_sym_index) catch {}; } - decl_info.exports.deinit(gpa); - std.debug.assert(zig_object.decls_map.remove(decl_index)); + nav_info.exports.deinit(gpa); + std.debug.assert(zig_object.navs.remove(nav_index)); const sym = &zig_object.symbols.items[atom.sym_index]; for (atom.locals.items) |local_atom_index| { const local_atom = wasm_file.getAtom(local_atom_index); @@ -1000,7 +953,8 @@ pub fn freeDecl(zig_object: *ZigObject, wasm_file: *Wasm, decl_index: InternPool segment.name = &.{}; // Ensure no accidental double free } - if (decl.isExtern(mod)) { + const nav_val = mod.navValue(nav_index).toIntern(); + if (ip.indexToKey(nav_val) == .@"extern") { std.debug.assert(zig_object.imports.remove(atom.sym_index)); } std.debug.assert(wasm_file.symbol_atom.remove(atom.symbolLoc())); @@ -1014,17 +968,14 @@ pub fn freeDecl(zig_object: *ZigObject, wasm_file: *Wasm, decl_index: InternPool if (sym.isGlobal()) { std.debug.assert(zig_object.global_syms.remove(atom.sym_index)); } - switch (decl.typeOf(mod).zigTypeTag(mod)) { - .Fn => { - zig_object.functions_free_list.append(gpa, sym.index) catch {}; - std.debug.assert(zig_object.atom_types.remove(atom_index)); - }, - else => { - zig_object.segment_free_list.append(gpa, sym.index) catch {}; - const segment = &zig_object.segment_info.items[sym.index]; - gpa.free(segment.name); - segment.name = &.{}; // Prevent accidental double free - }, + if (ip.isFunctionType(ip.typeOf(nav_val))) { + zig_object.functions_free_list.append(gpa, sym.index) catch {}; + std.debug.assert(zig_object.atom_types.remove(atom_index)); + } else { + zig_object.segment_free_list.append(gpa, sym.index) catch {}; + const segment = &zig_object.segment_info.items[sym.index]; + gpa.free(segment.name); + segment.name = &.{}; // Prevent accidental double free } } @@ -1182,10 +1133,10 @@ fn allocateDebugAtoms(zig_object: *ZigObject) !void { /// For the given `decl_index`, stores the corresponding type representing the function signature. /// Asserts declaration has an associated `Atom`. /// Returns the index into the list of types. -pub fn storeDeclType(zig_object: *ZigObject, gpa: std.mem.Allocator, decl_index: InternPool.DeclIndex, func_type: std.wasm.Type) !u32 { - const decl_info = zig_object.decls_map.get(decl_index).?; +pub fn storeDeclType(zig_object: *ZigObject, gpa: std.mem.Allocator, nav_index: InternPool.Nav.Index, func_type: std.wasm.Type) !u32 { + const nav_info = zig_object.navs.get(nav_index).?; const index = try zig_object.putOrGetFuncType(gpa, func_type); - try zig_object.atom_types.put(gpa, decl_info.atom, index); + try zig_object.atom_types.put(gpa, nav_info.atom, index); return index; } diff --git a/src/print_air.zig b/src/print_air.zig index f84904dec805..137461e6e452 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -675,7 +675,7 @@ const Writer = struct { } } const asm_source = std.mem.sliceAsBytes(w.air.extra[extra_i..])[0..extra.data.source_len]; - try s.print(", \"{s}\"", .{asm_source}); + try s.print(", \"{}\"", .{std.zig.fmtEscapes(asm_source)}); } fn writeDbgStmt(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { diff --git a/src/print_value.zig b/src/print_value.zig index 46b92030682a..4cd52c98021e 100644 --- a/src/print_value.zig +++ b/src/print_value.zig @@ -90,12 +90,8 @@ pub fn print( else => try writer.writeAll(@tagName(simple_value)), }, .variable => try writer.writeAll("(variable)"), - .extern_func => |extern_func| try writer.print("(extern function '{}')", .{ - mod.declPtr(extern_func.decl).name.fmt(ip), - }), - .func => |func| try writer.print("(function '{}')", .{ - mod.declPtr(func.owner_decl).name.fmt(ip), - }), + .@"extern" => |e| try writer.print("(extern '{}')", .{e.name.fmt(ip)}), + .func => |func| try writer.print("(function '{}')", .{ip.getNav(func.owner_nav).name.fmt(ip)}), .int => |int| switch (int.storage) { inline .u64, .i64, .big_int => |x| try writer.print("{}", .{x}), .lazy_align => |ty| if (have_sema) { @@ -138,8 +134,8 @@ pub fn print( .slice => |slice| { const print_contents = switch (ip.getBackingAddrTag(slice.ptr).?) { .field, .arr_elem, .eu_payload, .opt_payload => unreachable, - .anon_decl, .comptime_alloc, .comptime_field => true, - .decl, .int => false, + .uav, .comptime_alloc, .comptime_field => true, + .nav, .int => false, }; if (print_contents) { // TODO: eventually we want to load the slice as an array with `sema`, but that's @@ -157,8 +153,8 @@ pub fn print( .ptr => { const print_contents = switch (ip.getBackingAddrTag(val.toIntern()).?) { .field, .arr_elem, .eu_payload, .opt_payload => unreachable, - .anon_decl, .comptime_alloc, .comptime_field => true, - .decl, .int => false, + .uav, .comptime_alloc, .comptime_field => true, + .nav, .int => false, }; if (print_contents) { // TODO: eventually we want to load the pointer with `sema`, but that's @@ -294,11 +290,11 @@ fn printPtr( else => unreachable, }; - if (ptr.base_addr == .anon_decl) { + if (ptr.base_addr == .uav) { // If the value is an aggregate, we can potentially print it more nicely. - switch (pt.zcu.intern_pool.indexToKey(ptr.base_addr.anon_decl.val)) { + switch (pt.zcu.intern_pool.indexToKey(ptr.base_addr.uav.val)) { .aggregate => |agg| return printAggregate( - Value.fromInterned(ptr.base_addr.anon_decl.val), + Value.fromInterned(ptr.base_addr.uav.val), agg, true, writer, @@ -333,13 +329,13 @@ fn printPtrDerivation( int.ptr_ty.fmt(pt), int.addr, }), - .decl_ptr => |decl_index| { - try writer.print("{}", .{zcu.declPtr(decl_index).fqn.fmt(ip)}); + .nav_ptr => |nav| { + try writer.print("{}", .{ip.getNav(nav).fqn.fmt(ip)}); }, - .anon_decl_ptr => |anon| { - const ty = Value.fromInterned(anon.val).typeOf(zcu); + .uav_ptr => |uav| { + const ty = Value.fromInterned(uav.val).typeOf(zcu); try writer.print("@as({}, ", .{ty.fmt(pt)}); - try print(Value.fromInterned(anon.val), writer, level - 1, pt, have_sema, sema); + try print(Value.fromInterned(uav.val), writer, level - 1, pt, have_sema, sema); try writer.writeByte(')'); }, .comptime_alloc_ptr => |info| { diff --git a/test/behavior/type_info.zig b/test/behavior/type_info.zig index b650248e424a..46af309f0fe7 100644 --- a/test/behavior/type_info.zig +++ b/test/behavior/type_info.zig @@ -605,9 +605,9 @@ test "@typeInfo decls and usingnamespace" { }; const decls = @typeInfo(B).Struct.decls; try expect(decls.len == 3); - try expectEqualStrings(decls[0].name, "x"); - try expectEqualStrings(decls[1].name, "y"); - try expectEqualStrings(decls[2].name, "z"); + try expectEqualStrings(decls[0].name, "z"); + try expectEqualStrings(decls[1].name, "x"); + try expectEqualStrings(decls[2].name, "y"); } test "@typeInfo decls ignore dependency loops" { diff --git a/test/behavior/usingnamespace.zig b/test/behavior/usingnamespace.zig index 9be734dd3201..418cae3ca3e6 100644 --- a/test/behavior/usingnamespace.zig +++ b/test/behavior/usingnamespace.zig @@ -90,10 +90,6 @@ test { try expect(a.x == AA.c().expected); } -comptime { - _ = @import("usingnamespace/file_1.zig"); -} - const Bar = struct { usingnamespace Mixin; }; diff --git a/test/behavior/usingnamespace/file_0.zig b/test/behavior/usingnamespace/file_0.zig deleted file mode 100644 index 584f583c56d1..000000000000 --- a/test/behavior/usingnamespace/file_0.zig +++ /dev/null @@ -1 +0,0 @@ -pub const A = 123; diff --git a/test/behavior/usingnamespace/file_1.zig b/test/behavior/usingnamespace/file_1.zig deleted file mode 100644 index e16ae80e48e7..000000000000 --- a/test/behavior/usingnamespace/file_1.zig +++ /dev/null @@ -1,12 +0,0 @@ -const std = @import("std"); -const expect = std.testing.expect; -const imports = @import("imports.zig"); -const builtin = @import("builtin"); - -const A = 456; - -test { - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - - try expect(imports.A == 123); -} diff --git a/test/behavior/usingnamespace/imports.zig b/test/behavior/usingnamespace/imports.zig deleted file mode 100644 index bbbc7dd8ca87..000000000000 --- a/test/behavior/usingnamespace/imports.zig +++ /dev/null @@ -1,5 +0,0 @@ -const file_0 = @import("file_0.zig"); -const file_1 = @import("file_1.zig"); - -pub usingnamespace file_0; -pub usingnamespace file_1; diff --git a/test/cases/compile_errors/setAlignStack_in_inline_function.zig b/test/cases/compile_errors/setAlignStack_in_inline_function.zig deleted file mode 100644 index a84424e368c0..000000000000 --- a/test/cases/compile_errors/setAlignStack_in_inline_function.zig +++ /dev/null @@ -1,22 +0,0 @@ -export fn entry() void { - foo(); -} -inline fn foo() void { - @setAlignStack(16); -} - -export fn entry1() void { - comptime bar(); -} -fn bar() void { - @setAlignStack(16); -} - -// error -// backend=stage2 -// target=native -// -// :5:5: error: @setAlignStack in inline function -// :2:8: note: called from here -// :12:5: error: @setAlignStack in inline call -// :9:17: note: called from here diff --git a/test/cases/compile_errors/setAlignStack_set_twice.zig b/test/cases/compile_errors/setAlignStack_set_twice.zig deleted file mode 100644 index 3c61b26becd8..000000000000 --- a/test/cases/compile_errors/setAlignStack_set_twice.zig +++ /dev/null @@ -1,11 +0,0 @@ -export fn entry() void { - @setAlignStack(16); - @setAlignStack(16); -} - -// error -// backend=stage2 -// target=native -// -// :3:5: error: multiple @setAlignStack in the same function body -// :2:5: note: other instance here diff --git a/test/cases/compile_errors/tagName_on_invalid_value_of_non-exhaustive_enum.zig b/test/cases/compile_errors/tagName_on_invalid_value_of_non-exhaustive_enum.zig index 06e17a1e873b..14138557f8a9 100644 --- a/test/cases/compile_errors/tagName_on_invalid_value_of_non-exhaustive_enum.zig +++ b/test/cases/compile_errors/tagName_on_invalid_value_of_non-exhaustive_enum.zig @@ -8,5 +8,5 @@ test "enum" { // target=native // is_test=true // -// :3:9: error: no field with value '@enumFromInt(5)' in enum 'test.enum.E' +// :3:9: error: no field with value '@enumFromInt(5)' in enum 'tmp.test.enum.E' // :2:15: note: declared here