Last active
March 23, 2025 16:29
-
-
Save marler8997/28dccc0d10907398d877915a36a6e227 to your computer and use it in GitHub Desktop.
PerfTest
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// perftest.zig | |
// | |
// build with | zig build-exe -O ReleaseFast perftest.zig | |
// linux test | poop "./perftest std" "./perftest custom" | |
// | |
const std = @import("std"); | |
const tokens = @embedFile("tokens.zig"); | |
pub fn main() void { | |
const is_windows = (@import("builtin").os.tag == .windows); | |
var buf: [if (is_windows) 3000 else 0]u8 = undefined; | |
const impl: Impl = blk: { | |
var fbs = std.heap.FixedBufferAllocator.init(&buf); | |
const cmd_args = if (is_windows) | |
(std.process.argsAlloc(fbs.allocator()) catch @panic("oom"))[1..] | |
else | |
std.os.argv[1..] | |
; | |
if (cmd_args.len != 1) @panic("expected 1 cmdline arg"); | |
const impl_str = if (is_windows) cmd_args[0] else std.mem.span(cmd_args[0]); | |
break :blk { | |
if (std.mem.eql(u8, impl_str, "std")) break :blk .std; | |
if (std.mem.eql(u8, impl_str, "custom")) break :blk .custom; | |
std.debug.panic("unknown impl '{s}'", .{impl_str}); | |
}; | |
}; | |
const loop_count = 1000; | |
var token_count: u32 = 0; | |
switch (impl) { | |
.std => for (0 .. loop_count) |_| { | |
var tokenizer: std.zig.Tokenizer = .{ | |
.buffer = tokens, | |
.index = 0, | |
}; | |
while (true) { | |
const token = tokenizer.next(); | |
if (token.tag == .eof) break; | |
token_count += 1; | |
} | |
}, | |
.custom =>for (0 .. loop_count) |_| { | |
var tokenizer: custom.Tokenizer = .{ | |
.index = 0, | |
}; | |
while (true) { | |
const token = tokenizer.next(tokens); | |
if (token.tag == .eof) break; | |
token_count += 1; | |
} | |
}, | |
} | |
if (token_count != 34767 * loop_count) @panic("bug"); | |
// for (0..3) |_| { | |
// testPerf(.std, loop_count); | |
// testPerf(.custom, loop_count); | |
// } | |
} | |
const Impl = enum { std, custom }; | |
// fn testPerf(impl: Impl, loop: usize) void { | |
// var elapsed_ns: u64 = 0; | |
// for (0 .. loop) |_| { | |
// elapsed_ns += switch (impl) { | |
// .std => stdImpl(), | |
// .custom => customImpl(), | |
// }; | |
// } | |
// const elapsed_ms = elapsed_ns / 1000000; | |
// std.debug.print("{s}: {} ms\n", .{ @tagName(impl), elapsed_ms }); | |
// } | |
// fn stdImpl() u64 { | |
// var timer = std.time.Timer.start() catch |e| @panic(@errorName(e)); | |
// var tokenizer: std.zig.Tokenizer = .{ | |
// .buffer = tokens, | |
// .index = 0, | |
// }; | |
// while (true) { | |
// const token = tokenizer.next(); | |
// if (token.tag == .eof) break; | |
// } | |
// return timer.read(); | |
// } | |
// fn customImpl() u64 { | |
// var timer = std.time.Timer.start() catch |e| @panic(@errorName(e)); | |
// var tokenizer: custom.Tokenizer = .{ | |
// .index = 0, | |
// }; | |
// while (true) { | |
// const token = tokenizer.next(tokens); | |
// if (token.tag == .eof) break; | |
// } | |
// return timer.read(); | |
// } | |
const custom = struct { | |
pub const Token = struct { | |
tag: Tag, | |
loc: Loc, | |
pub const Loc = struct { | |
start: usize, | |
end: usize, | |
}; | |
pub const keywords = std.StaticStringMap(Tag).initComptime(.{ | |
.{ "addrspace", .keyword_addrspace }, | |
.{ "align", .keyword_align }, | |
.{ "allowzero", .keyword_allowzero }, | |
.{ "and", .keyword_and }, | |
.{ "anyframe", .keyword_anyframe }, | |
.{ "anytype", .keyword_anytype }, | |
.{ "asm", .keyword_asm }, | |
.{ "async", .keyword_async }, | |
.{ "await", .keyword_await }, | |
.{ "break", .keyword_break }, | |
.{ "callconv", .keyword_callconv }, | |
.{ "catch", .keyword_catch }, | |
.{ "comptime", .keyword_comptime }, | |
.{ "const", .keyword_const }, | |
.{ "continue", .keyword_continue }, | |
.{ "defer", .keyword_defer }, | |
.{ "else", .keyword_else }, | |
.{ "enum", .keyword_enum }, | |
.{ "errdefer", .keyword_errdefer }, | |
.{ "error", .keyword_error }, | |
.{ "export", .keyword_export }, | |
.{ "extern", .keyword_extern }, | |
.{ "fn", .keyword_fn }, | |
.{ "for", .keyword_for }, | |
.{ "if", .keyword_if }, | |
.{ "inline", .keyword_inline }, | |
.{ "noalias", .keyword_noalias }, | |
.{ "noinline", .keyword_noinline }, | |
.{ "nosuspend", .keyword_nosuspend }, | |
.{ "opaque", .keyword_opaque }, | |
.{ "or", .keyword_or }, | |
.{ "orelse", .keyword_orelse }, | |
.{ "packed", .keyword_packed }, | |
.{ "pub", .keyword_pub }, | |
.{ "resume", .keyword_resume }, | |
.{ "return", .keyword_return }, | |
.{ "linksection", .keyword_linksection }, | |
.{ "struct", .keyword_struct }, | |
.{ "suspend", .keyword_suspend }, | |
.{ "switch", .keyword_switch }, | |
.{ "test", .keyword_test }, | |
.{ "threadlocal", .keyword_threadlocal }, | |
.{ "try", .keyword_try }, | |
.{ "union", .keyword_union }, | |
.{ "unreachable", .keyword_unreachable }, | |
.{ "usingnamespace", .keyword_usingnamespace }, | |
.{ "var", .keyword_var }, | |
.{ "volatile", .keyword_volatile }, | |
.{ "while", .keyword_while }, | |
}); | |
pub fn getKeyword(bytes: []const u8) ?Tag { | |
return keywords.get(bytes); | |
} | |
pub const Tag = enum { | |
invalid, | |
invalid_periodasterisks, | |
identifier, | |
string_literal, | |
multiline_string_literal_line, | |
char_literal, | |
eof, | |
builtin, | |
bang, | |
pipe, | |
pipe_pipe, | |
pipe_equal, | |
equal, | |
equal_equal, | |
equal_angle_bracket_right, | |
bang_equal, | |
l_paren, | |
r_paren, | |
semicolon, | |
percent, | |
percent_equal, | |
l_brace, | |
r_brace, | |
l_bracket, | |
r_bracket, | |
period, | |
period_asterisk, | |
ellipsis2, | |
ellipsis3, | |
caret, | |
caret_equal, | |
plus, | |
plus_plus, | |
plus_equal, | |
plus_percent, | |
plus_percent_equal, | |
plus_pipe, | |
plus_pipe_equal, | |
minus, | |
minus_equal, | |
minus_percent, | |
minus_percent_equal, | |
minus_pipe, | |
minus_pipe_equal, | |
asterisk, | |
asterisk_equal, | |
asterisk_asterisk, | |
asterisk_percent, | |
asterisk_percent_equal, | |
asterisk_pipe, | |
asterisk_pipe_equal, | |
arrow, | |
colon, | |
slash, | |
slash_equal, | |
comma, | |
ampersand, | |
ampersand_equal, | |
question_mark, | |
angle_bracket_left, | |
angle_bracket_left_equal, | |
angle_bracket_angle_bracket_left, | |
angle_bracket_angle_bracket_left_equal, | |
angle_bracket_angle_bracket_left_pipe, | |
angle_bracket_angle_bracket_left_pipe_equal, | |
angle_bracket_right, | |
angle_bracket_right_equal, | |
angle_bracket_angle_bracket_right, | |
angle_bracket_angle_bracket_right_equal, | |
tilde, | |
number_literal, | |
doc_comment, | |
container_doc_comment, | |
keyword_addrspace, | |
keyword_align, | |
keyword_allowzero, | |
keyword_and, | |
keyword_anyframe, | |
keyword_anytype, | |
keyword_asm, | |
keyword_async, | |
keyword_await, | |
keyword_break, | |
keyword_callconv, | |
keyword_catch, | |
keyword_comptime, | |
keyword_const, | |
keyword_continue, | |
keyword_defer, | |
keyword_else, | |
keyword_enum, | |
keyword_errdefer, | |
keyword_error, | |
keyword_export, | |
keyword_extern, | |
keyword_fn, | |
keyword_for, | |
keyword_if, | |
keyword_inline, | |
keyword_noalias, | |
keyword_noinline, | |
keyword_nosuspend, | |
keyword_opaque, | |
keyword_or, | |
keyword_orelse, | |
keyword_packed, | |
keyword_pub, | |
keyword_resume, | |
keyword_return, | |
keyword_linksection, | |
keyword_struct, | |
keyword_suspend, | |
keyword_switch, | |
keyword_test, | |
keyword_threadlocal, | |
keyword_try, | |
keyword_union, | |
keyword_unreachable, | |
keyword_usingnamespace, | |
keyword_var, | |
keyword_volatile, | |
keyword_while, | |
pub fn lexeme(tag: Tag) ?[]const u8 { | |
return switch (tag) { | |
.invalid, | |
.identifier, | |
.string_literal, | |
.multiline_string_literal_line, | |
.char_literal, | |
.eof, | |
.builtin, | |
.number_literal, | |
.doc_comment, | |
.container_doc_comment, | |
=> null, | |
.invalid_periodasterisks => ".**", | |
.bang => "!", | |
.pipe => "|", | |
.pipe_pipe => "||", | |
.pipe_equal => "|=", | |
.equal => "=", | |
.equal_equal => "==", | |
.equal_angle_bracket_right => "=>", | |
.bang_equal => "!=", | |
.l_paren => "(", | |
.r_paren => ")", | |
.semicolon => ";", | |
.percent => "%", | |
.percent_equal => "%=", | |
.l_brace => "{", | |
.r_brace => "}", | |
.l_bracket => "[", | |
.r_bracket => "]", | |
.period => ".", | |
.period_asterisk => ".*", | |
.ellipsis2 => "..", | |
.ellipsis3 => "...", | |
.caret => "^", | |
.caret_equal => "^=", | |
.plus => "+", | |
.plus_plus => "++", | |
.plus_equal => "+=", | |
.plus_percent => "+%", | |
.plus_percent_equal => "+%=", | |
.plus_pipe => "+|", | |
.plus_pipe_equal => "+|=", | |
.minus => "-", | |
.minus_equal => "-=", | |
.minus_percent => "-%", | |
.minus_percent_equal => "-%=", | |
.minus_pipe => "-|", | |
.minus_pipe_equal => "-|=", | |
.asterisk => "*", | |
.asterisk_equal => "*=", | |
.asterisk_asterisk => "**", | |
.asterisk_percent => "*%", | |
.asterisk_percent_equal => "*%=", | |
.asterisk_pipe => "*|", | |
.asterisk_pipe_equal => "*|=", | |
.arrow => "->", | |
.colon => ":", | |
.slash => "/", | |
.slash_equal => "/=", | |
.comma => ",", | |
.ampersand => "&", | |
.ampersand_equal => "&=", | |
.question_mark => "?", | |
.angle_bracket_left => "<", | |
.angle_bracket_left_equal => "<=", | |
.angle_bracket_angle_bracket_left => "<<", | |
.angle_bracket_angle_bracket_left_equal => "<<=", | |
.angle_bracket_angle_bracket_left_pipe => "<<|", | |
.angle_bracket_angle_bracket_left_pipe_equal => "<<|=", | |
.angle_bracket_right => ">", | |
.angle_bracket_right_equal => ">=", | |
.angle_bracket_angle_bracket_right => ">>", | |
.angle_bracket_angle_bracket_right_equal => ">>=", | |
.tilde => "~", | |
.keyword_addrspace => "addrspace", | |
.keyword_align => "align", | |
.keyword_allowzero => "allowzero", | |
.keyword_and => "and", | |
.keyword_anyframe => "anyframe", | |
.keyword_anytype => "anytype", | |
.keyword_asm => "asm", | |
.keyword_async => "async", | |
.keyword_await => "await", | |
.keyword_break => "break", | |
.keyword_callconv => "callconv", | |
.keyword_catch => "catch", | |
.keyword_comptime => "comptime", | |
.keyword_const => "const", | |
.keyword_continue => "continue", | |
.keyword_defer => "defer", | |
.keyword_else => "else", | |
.keyword_enum => "enum", | |
.keyword_errdefer => "errdefer", | |
.keyword_error => "error", | |
.keyword_export => "export", | |
.keyword_extern => "extern", | |
.keyword_fn => "fn", | |
.keyword_for => "for", | |
.keyword_if => "if", | |
.keyword_inline => "inline", | |
.keyword_noalias => "noalias", | |
.keyword_noinline => "noinline", | |
.keyword_nosuspend => "nosuspend", | |
.keyword_opaque => "opaque", | |
.keyword_or => "or", | |
.keyword_orelse => "orelse", | |
.keyword_packed => "packed", | |
.keyword_pub => "pub", | |
.keyword_resume => "resume", | |
.keyword_return => "return", | |
.keyword_linksection => "linksection", | |
.keyword_struct => "struct", | |
.keyword_suspend => "suspend", | |
.keyword_switch => "switch", | |
.keyword_test => "test", | |
.keyword_threadlocal => "threadlocal", | |
.keyword_try => "try", | |
.keyword_union => "union", | |
.keyword_unreachable => "unreachable", | |
.keyword_usingnamespace => "usingnamespace", | |
.keyword_var => "var", | |
.keyword_volatile => "volatile", | |
.keyword_while => "while", | |
}; | |
} | |
pub fn symbol(tag: Tag) []const u8 { | |
return tag.lexeme() orelse switch (tag) { | |
.invalid => "invalid token", | |
.identifier => "an identifier", | |
.string_literal, .multiline_string_literal_line => "a string literal", | |
.char_literal => "a character literal", | |
.eof => "EOF", | |
.builtin => "a builtin function", | |
.number_literal => "a number literal", | |
.doc_comment, .container_doc_comment => "a document comment", | |
else => unreachable, | |
}; | |
} | |
}; | |
}; | |
pub const Tokenizer = struct { | |
index: usize, | |
pub fn init(buffer: [:0]const u8) Tokenizer { | |
// Skip the UTF-8 BOM if present. | |
return .{ | |
.buffer = buffer, | |
.index = if (std.mem.startsWith(u8, buffer, "\xEF\xBB\xBF")) 3 else 0, | |
}; | |
} | |
const State = enum { | |
start, | |
expect_newline, | |
identifier, | |
builtin, | |
string_literal, | |
string_literal_backslash, | |
multiline_string_literal_line, | |
char_literal, | |
char_literal_backslash, | |
backslash, | |
equal, | |
bang, | |
pipe, | |
minus, | |
minus_percent, | |
minus_pipe, | |
asterisk, | |
asterisk_percent, | |
asterisk_pipe, | |
slash, | |
line_comment_start, | |
line_comment, | |
doc_comment_start, | |
doc_comment, | |
int, | |
int_exponent, | |
int_period, | |
float, | |
float_exponent, | |
ampersand, | |
caret, | |
percent, | |
plus, | |
plus_percent, | |
plus_pipe, | |
angle_bracket_left, | |
angle_bracket_angle_bracket_left, | |
angle_bracket_angle_bracket_left_pipe, | |
angle_bracket_right, | |
angle_bracket_angle_bracket_right, | |
period, | |
period_2, | |
period_asterisk, | |
saw_at_sign, | |
invalid, | |
}; | |
/// After this returns invalid, it will reset on the next newline, returning tokens starting from there. | |
/// An eof token will always be returned at the end. | |
pub fn next(self: *Tokenizer, buffer: [:0]const u8) Token { | |
var result: Token = .{ | |
.tag = undefined, | |
.loc = .{ | |
.start = self.index, | |
.end = undefined, | |
}, | |
}; | |
state: switch (State.start) { | |
.start => switch (buffer[self.index]) { | |
0 => { | |
if (self.index == buffer.len) { | |
return .{ | |
.tag = .eof, | |
.loc = .{ | |
.start = self.index, | |
.end = self.index, | |
}, | |
}; | |
} else { | |
continue :state .invalid; | |
} | |
}, | |
' ', '\n', '\t', '\r' => { | |
self.index += 1; | |
result.loc.start = self.index; | |
continue :state .start; | |
}, | |
'"' => { | |
result.tag = .string_literal; | |
continue :state .string_literal; | |
}, | |
'\'' => { | |
result.tag = .char_literal; | |
continue :state .char_literal; | |
}, | |
'a'...'z', 'A'...'Z', '_' => { | |
result.tag = .identifier; | |
continue :state .identifier; | |
}, | |
'@' => continue :state .saw_at_sign, | |
'=' => continue :state .equal, | |
'!' => continue :state .bang, | |
'|' => continue :state .pipe, | |
'(' => { | |
result.tag = .l_paren; | |
self.index += 1; | |
}, | |
')' => { | |
result.tag = .r_paren; | |
self.index += 1; | |
}, | |
'[' => { | |
result.tag = .l_bracket; | |
self.index += 1; | |
}, | |
']' => { | |
result.tag = .r_bracket; | |
self.index += 1; | |
}, | |
';' => { | |
result.tag = .semicolon; | |
self.index += 1; | |
}, | |
',' => { | |
result.tag = .comma; | |
self.index += 1; | |
}, | |
'?' => { | |
result.tag = .question_mark; | |
self.index += 1; | |
}, | |
':' => { | |
result.tag = .colon; | |
self.index += 1; | |
}, | |
'%' => continue :state .percent, | |
'*' => continue :state .asterisk, | |
'+' => continue :state .plus, | |
'<' => continue :state .angle_bracket_left, | |
'>' => continue :state .angle_bracket_right, | |
'^' => continue :state .caret, | |
'\\' => { | |
result.tag = .multiline_string_literal_line; | |
continue :state .backslash; | |
}, | |
'{' => { | |
result.tag = .l_brace; | |
self.index += 1; | |
}, | |
'}' => { | |
result.tag = .r_brace; | |
self.index += 1; | |
}, | |
'~' => { | |
result.tag = .tilde; | |
self.index += 1; | |
}, | |
'.' => continue :state .period, | |
'-' => continue :state .minus, | |
'/' => continue :state .slash, | |
'&' => continue :state .ampersand, | |
'0'...'9' => { | |
result.tag = .number_literal; | |
self.index += 1; | |
continue :state .int; | |
}, | |
else => continue :state .invalid, | |
}, | |
.expect_newline => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
0 => { | |
if (self.index == buffer.len) { | |
result.tag = .invalid; | |
} else { | |
continue :state .invalid; | |
} | |
}, | |
'\n' => { | |
self.index += 1; | |
result.loc.start = self.index; | |
continue :state .start; | |
}, | |
else => continue :state .invalid, | |
} | |
}, | |
.invalid => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
0 => if (self.index == buffer.len) { | |
result.tag = .invalid; | |
} else { | |
continue :state .invalid; | |
}, | |
'\n' => result.tag = .invalid, | |
else => continue :state .invalid, | |
} | |
}, | |
.saw_at_sign => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
0, '\n' => result.tag = .invalid, | |
'"' => { | |
result.tag = .identifier; | |
continue :state .string_literal; | |
}, | |
'a'...'z', 'A'...'Z', '_' => { | |
result.tag = .builtin; | |
continue :state .builtin; | |
}, | |
else => continue :state .invalid, | |
} | |
}, | |
.ampersand => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
'=' => { | |
result.tag = .ampersand_equal; | |
self.index += 1; | |
}, | |
else => result.tag = .ampersand, | |
} | |
}, | |
.asterisk => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
'=' => { | |
result.tag = .asterisk_equal; | |
self.index += 1; | |
}, | |
'*' => { | |
result.tag = .asterisk_asterisk; | |
self.index += 1; | |
}, | |
'%' => continue :state .asterisk_percent, | |
'|' => continue :state .asterisk_pipe, | |
else => result.tag = .asterisk, | |
} | |
}, | |
.asterisk_percent => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
'=' => { | |
result.tag = .asterisk_percent_equal; | |
self.index += 1; | |
}, | |
else => result.tag = .asterisk_percent, | |
} | |
}, | |
.asterisk_pipe => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
'=' => { | |
result.tag = .asterisk_pipe_equal; | |
self.index += 1; | |
}, | |
else => result.tag = .asterisk_pipe, | |
} | |
}, | |
.percent => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
'=' => { | |
result.tag = .percent_equal; | |
self.index += 1; | |
}, | |
else => result.tag = .percent, | |
} | |
}, | |
.plus => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
'=' => { | |
result.tag = .plus_equal; | |
self.index += 1; | |
}, | |
'+' => { | |
result.tag = .plus_plus; | |
self.index += 1; | |
}, | |
'%' => continue :state .plus_percent, | |
'|' => continue :state .plus_pipe, | |
else => result.tag = .plus, | |
} | |
}, | |
.plus_percent => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
'=' => { | |
result.tag = .plus_percent_equal; | |
self.index += 1; | |
}, | |
else => result.tag = .plus_percent, | |
} | |
}, | |
.plus_pipe => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
'=' => { | |
result.tag = .plus_pipe_equal; | |
self.index += 1; | |
}, | |
else => result.tag = .plus_pipe, | |
} | |
}, | |
.caret => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
'=' => { | |
result.tag = .caret_equal; | |
self.index += 1; | |
}, | |
else => result.tag = .caret, | |
} | |
}, | |
.identifier => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
'a'...'z', 'A'...'Z', '_', '0'...'9' => continue :state .identifier, | |
else => { | |
const ident = buffer[result.loc.start..self.index]; | |
if (Token.getKeyword(ident)) |tag| { | |
result.tag = tag; | |
} | |
}, | |
} | |
}, | |
.builtin => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
'a'...'z', 'A'...'Z', '_', '0'...'9' => continue :state .builtin, | |
else => {}, | |
} | |
}, | |
.backslash => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
0 => result.tag = .invalid, | |
'\\' => continue :state .multiline_string_literal_line, | |
'\n' => result.tag = .invalid, | |
else => continue :state .invalid, | |
} | |
}, | |
.string_literal => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
0 => { | |
if (self.index != buffer.len) { | |
continue :state .invalid; | |
} else { | |
result.tag = .invalid; | |
} | |
}, | |
'\n' => result.tag = .invalid, | |
'\\' => continue :state .string_literal_backslash, | |
'"' => self.index += 1, | |
0x01...0x09, 0x0b...0x1f, 0x7f => { | |
continue :state .invalid; | |
}, | |
else => continue :state .string_literal, | |
} | |
}, | |
.string_literal_backslash => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
0, '\n' => result.tag = .invalid, | |
else => continue :state .string_literal, | |
} | |
}, | |
.char_literal => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
0 => { | |
if (self.index != buffer.len) { | |
continue :state .invalid; | |
} else { | |
result.tag = .invalid; | |
} | |
}, | |
'\n' => result.tag = .invalid, | |
'\\' => continue :state .char_literal_backslash, | |
'\'' => self.index += 1, | |
0x01...0x09, 0x0b...0x1f, 0x7f => { | |
continue :state .invalid; | |
}, | |
else => continue :state .char_literal, | |
} | |
}, | |
.char_literal_backslash => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
0 => { | |
if (self.index != buffer.len) { | |
continue :state .invalid; | |
} else { | |
result.tag = .invalid; | |
} | |
}, | |
'\n' => result.tag = .invalid, | |
0x01...0x09, 0x0b...0x1f, 0x7f => { | |
continue :state .invalid; | |
}, | |
else => continue :state .char_literal, | |
} | |
}, | |
.multiline_string_literal_line => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
0 => if (self.index != buffer.len) { | |
continue :state .invalid; | |
}, | |
'\n' => {}, | |
'\r' => if (buffer[self.index + 1] != '\n') { | |
continue :state .invalid; | |
}, | |
0x01...0x09, 0x0b...0x0c, 0x0e...0x1f, 0x7f => continue :state .invalid, | |
else => continue :state .multiline_string_literal_line, | |
} | |
}, | |
.bang => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
'=' => { | |
result.tag = .bang_equal; | |
self.index += 1; | |
}, | |
else => result.tag = .bang, | |
} | |
}, | |
.pipe => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
'=' => { | |
result.tag = .pipe_equal; | |
self.index += 1; | |
}, | |
'|' => { | |
result.tag = .pipe_pipe; | |
self.index += 1; | |
}, | |
else => result.tag = .pipe, | |
} | |
}, | |
.equal => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
'=' => { | |
result.tag = .equal_equal; | |
self.index += 1; | |
}, | |
'>' => { | |
result.tag = .equal_angle_bracket_right; | |
self.index += 1; | |
}, | |
else => result.tag = .equal, | |
} | |
}, | |
.minus => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
'>' => { | |
result.tag = .arrow; | |
self.index += 1; | |
}, | |
'=' => { | |
result.tag = .minus_equal; | |
self.index += 1; | |
}, | |
'%' => continue :state .minus_percent, | |
'|' => continue :state .minus_pipe, | |
else => result.tag = .minus, | |
} | |
}, | |
.minus_percent => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
'=' => { | |
result.tag = .minus_percent_equal; | |
self.index += 1; | |
}, | |
else => result.tag = .minus_percent, | |
} | |
}, | |
.minus_pipe => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
'=' => { | |
result.tag = .minus_pipe_equal; | |
self.index += 1; | |
}, | |
else => result.tag = .minus_pipe, | |
} | |
}, | |
.angle_bracket_left => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
'<' => continue :state .angle_bracket_angle_bracket_left, | |
'=' => { | |
result.tag = .angle_bracket_left_equal; | |
self.index += 1; | |
}, | |
else => result.tag = .angle_bracket_left, | |
} | |
}, | |
.angle_bracket_angle_bracket_left => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
'=' => { | |
result.tag = .angle_bracket_angle_bracket_left_equal; | |
self.index += 1; | |
}, | |
'|' => continue :state .angle_bracket_angle_bracket_left_pipe, | |
else => result.tag = .angle_bracket_angle_bracket_left, | |
} | |
}, | |
.angle_bracket_angle_bracket_left_pipe => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
'=' => { | |
result.tag = .angle_bracket_angle_bracket_left_pipe_equal; | |
self.index += 1; | |
}, | |
else => result.tag = .angle_bracket_angle_bracket_left_pipe, | |
} | |
}, | |
.angle_bracket_right => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
'>' => continue :state .angle_bracket_angle_bracket_right, | |
'=' => { | |
result.tag = .angle_bracket_right_equal; | |
self.index += 1; | |
}, | |
else => result.tag = .angle_bracket_right, | |
} | |
}, | |
.angle_bracket_angle_bracket_right => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
'=' => { | |
result.tag = .angle_bracket_angle_bracket_right_equal; | |
self.index += 1; | |
}, | |
else => result.tag = .angle_bracket_angle_bracket_right, | |
} | |
}, | |
.period => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
'.' => continue :state .period_2, | |
'*' => continue :state .period_asterisk, | |
else => result.tag = .period, | |
} | |
}, | |
.period_2 => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
'.' => { | |
result.tag = .ellipsis3; | |
self.index += 1; | |
}, | |
else => result.tag = .ellipsis2, | |
} | |
}, | |
.period_asterisk => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
'*' => result.tag = .invalid_periodasterisks, | |
else => result.tag = .period_asterisk, | |
} | |
}, | |
.slash => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
'/' => continue :state .line_comment_start, | |
'=' => { | |
result.tag = .slash_equal; | |
self.index += 1; | |
}, | |
else => result.tag = .slash, | |
} | |
}, | |
.line_comment_start => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
0 => { | |
if (self.index != buffer.len) { | |
continue :state .invalid; | |
} else return .{ | |
.tag = .eof, | |
.loc = .{ | |
.start = self.index, | |
.end = self.index, | |
}, | |
}; | |
}, | |
'!' => { | |
result.tag = .container_doc_comment; | |
continue :state .doc_comment; | |
}, | |
'\n' => { | |
self.index += 1; | |
result.loc.start = self.index; | |
continue :state .start; | |
}, | |
'/' => continue :state .doc_comment_start, | |
'\r' => continue :state .expect_newline, | |
0x01...0x09, 0x0b...0x0c, 0x0e...0x1f, 0x7f => { | |
continue :state .invalid; | |
}, | |
else => continue :state .line_comment, | |
} | |
}, | |
.doc_comment_start => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
0, '\n' => result.tag = .doc_comment, | |
'\r' => { | |
if (buffer[self.index + 1] == '\n') { | |
result.tag = .doc_comment; | |
} else { | |
continue :state .invalid; | |
} | |
}, | |
'/' => continue :state .line_comment, | |
0x01...0x09, 0x0b...0x0c, 0x0e...0x1f, 0x7f => { | |
continue :state .invalid; | |
}, | |
else => { | |
result.tag = .doc_comment; | |
continue :state .doc_comment; | |
}, | |
} | |
}, | |
.line_comment => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
0 => { | |
if (self.index != buffer.len) { | |
continue :state .invalid; | |
} else return .{ | |
.tag = .eof, | |
.loc = .{ | |
.start = self.index, | |
.end = self.index, | |
}, | |
}; | |
}, | |
'\n' => { | |
self.index += 1; | |
result.loc.start = self.index; | |
continue :state .start; | |
}, | |
'\r' => continue :state .expect_newline, | |
0x01...0x09, 0x0b...0x0c, 0x0e...0x1f, 0x7f => { | |
continue :state .invalid; | |
}, | |
else => continue :state .line_comment, | |
} | |
}, | |
.doc_comment => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
0, '\n' => {}, | |
'\r' => if (buffer[self.index + 1] != '\n') { | |
continue :state .invalid; | |
}, | |
0x01...0x09, 0x0b...0x0c, 0x0e...0x1f, 0x7f => { | |
continue :state .invalid; | |
}, | |
else => continue :state .doc_comment, | |
} | |
}, | |
.int => switch (buffer[self.index]) { | |
'.' => continue :state .int_period, | |
'_', 'a'...'d', 'f'...'o', 'q'...'z', 'A'...'D', 'F'...'O', 'Q'...'Z', '0'...'9' => { | |
self.index += 1; | |
continue :state .int; | |
}, | |
'e', 'E', 'p', 'P' => { | |
continue :state .int_exponent; | |
}, | |
else => {}, | |
}, | |
.int_exponent => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
'-', '+' => { | |
self.index += 1; | |
continue :state .float; | |
}, | |
else => continue :state .int, | |
} | |
}, | |
.int_period => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
'_', 'a'...'d', 'f'...'o', 'q'...'z', 'A'...'D', 'F'...'O', 'Q'...'Z', '0'...'9' => { | |
self.index += 1; | |
continue :state .float; | |
}, | |
'e', 'E', 'p', 'P' => { | |
continue :state .float_exponent; | |
}, | |
else => self.index -= 1, | |
} | |
}, | |
.float => switch (buffer[self.index]) { | |
'_', 'a'...'d', 'f'...'o', 'q'...'z', 'A'...'D', 'F'...'O', 'Q'...'Z', '0'...'9' => { | |
self.index += 1; | |
continue :state .float; | |
}, | |
'e', 'E', 'p', 'P' => { | |
continue :state .float_exponent; | |
}, | |
else => {}, | |
}, | |
.float_exponent => { | |
self.index += 1; | |
switch (buffer[self.index]) { | |
'-', '+' => { | |
self.index += 1; | |
continue :state .float; | |
}, | |
else => continue :state .float, | |
} | |
}, | |
} | |
result.loc.end = self.index; | |
return result; | |
} | |
}; | |
}; |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// Various source code copied from zig's repository | |
/// One item per thread, indexed by `tid`, which is dense and unique per thread. | |
locals: []Local, | |
/// Length must be a power of two and represents the number of simultaneous | |
/// writers that can mutate any single sharded data structure. | |
shards: []Shard, | |
/// Key is the error name, index is the error tag value. Index 0 has a length-0 string. | |
global_error_set: GlobalErrorSet, | |
/// Cached number of active bits in a `tid`. | |
tid_width: if (single_threaded) u0 else std.math.Log2Int(u32), | |
/// Cached shift amount to put a `tid` in the top bits of a 30-bit value. | |
tid_shift_30: if (single_threaded) u0 else std.math.Log2Int(u32), | |
/// Cached shift amount to put a `tid` in the top bits of a 31-bit value. | |
tid_shift_31: if (single_threaded) u0 else std.math.Log2Int(u32), | |
/// Cached shift amount to put a `tid` in the top bits of a 32-bit value. | |
tid_shift_32: if (single_threaded) u0 else std.math.Log2Int(u32), | |
interned_deps: std.AutoArrayHashMapUnmanaged(Index, DepEntry.Index), | |
/// Dependencies on an embedded file. | |
/// Introduced by `@embedFile`; invalidated when the file changes. | |
/// Value is index into `dep_entries` of the first dependency on this `Zcu.EmbedFile`. | |
embed_file_deps: std.AutoArrayHashMapUnmanaged(Zcu.EmbedFile.Index, DepEntry.Index), | |
/// Whether a multi-threaded intern pool is useful. | |
/// Currently `false` until the intern pool is actually accessed | |
/// from multiple threads to reduce the cost of this data structure. | |
const want_multi_threaded = true; | |
/// Whether a single-threaded intern pool impl is in use. | |
pub const single_threaded = builtin.single_threaded or !want_multi_threaded; | |
pub const empty: InternPool = .{ | |
.locals = &.{}, | |
.shards = &.{}, | |
.global_error_set = .empty, | |
.tid_width = 0, | |
.tid_shift_30 = if (single_threaded) 0 else 31, | |
.tid_shift_31 = if (single_threaded) 0 else 31, | |
.tid_shift_32 = if (single_threaded) 0 else 31, | |
.file_deps = .empty, | |
.src_hash_deps = .empty, | |
.nav_val_deps = .empty, | |
.nav_ty_deps = .empty, | |
.interned_deps = .empty, | |
.embed_file_deps = .empty, | |
.dep_entries = .empty, | |
.free_dep_entries = .empty, | |
}; | |
/// A `TrackedInst.Index` provides a single, unchanging reference to a ZIR instruction across a whole | |
/// compilation. From this index, you can acquire a `TrackedInst`, which containss a reference to both | |
/// the file which the instruction lives in, and the instruction index itself, which is updated on | |
/// incremental updates by `Zcu.updateZirRefs`. | |
pub const TrackedInst = extern struct { | |
file: FileIndex, | |
inst: Zir.Inst.Index, | |
/// It is possible on an incremental update that we "lose" a ZIR instruction: some tracked `%x` in | |
/// the old ZIR failed to map to any `%y` in the new ZIR. For this reason, we actually store values | |
/// of type `MaybeLost`, which uses `ZirIndex.lost` to represent this case. `Index.resolve` etc | |
/// return `null` when the `TrackedInst` being resolved has been lost. | |
pub const MaybeLost = extern struct { | |
file: FileIndex, | |
inst: ZirIndex, | |
pub const ZirIndex = enum(u32) { | |
/// Tracking failed for this ZIR instruction. Uses of it should fail. | |
lost = std.math.maxInt(u32), | |
_, | |
pub fn unwrap(inst: ZirIndex) ?Zir.Inst.Index { | |
return switch (inst) { | |
.lost => null, | |
_ => @enumFromInt(@intFromEnum(inst)), | |
}; | |
} | |
pub fn wrap(inst: Zir.Inst.Index) ZirIndex { | |
return @enumFromInt(@intFromEnum(inst)); | |
} | |
}; | |
comptime { | |
// The fields should be tightly packed. See also serialiation logic in `Compilation.saveState`. | |
assert(@sizeOf(@This()) == @sizeOf(FileIndex) + @sizeOf(ZirIndex)); | |
} | |
}; | |
pub const Index = enum(u32) { | |
_, | |
pub fn resolveFull(tracked_inst_index: TrackedInst.Index, ip: *const InternPool) ?TrackedInst { | |
const tracked_inst_unwrapped = tracked_inst_index.unwrap(ip); | |
const tracked_insts = ip.getLocalShared(tracked_inst_unwrapped.tid).tracked_insts.acquire(); | |
const maybe_lost = tracked_insts.view().items(.@"0")[tracked_inst_unwrapped.index]; | |
return .{ | |
.file = maybe_lost.file, | |
.inst = maybe_lost.inst.unwrap() orelse return null, | |
}; | |
} | |
pub fn resolveFile(tracked_inst_index: TrackedInst.Index, ip: *const InternPool) FileIndex { | |
const tracked_inst_unwrapped = tracked_inst_index.unwrap(ip); | |
const tracked_insts = ip.getLocalShared(tracked_inst_unwrapped.tid).tracked_insts.acquire(); | |
const maybe_lost = tracked_insts.view().items(.@"0")[tracked_inst_unwrapped.index]; | |
return maybe_lost.file; | |
} | |
pub fn resolve(i: TrackedInst.Index, ip: *const InternPool) ?Zir.Inst.Index { | |
return (i.resolveFull(ip) orelse return null).inst; | |
} | |
pub fn toOptional(i: TrackedInst.Index) Optional { | |
return @enumFromInt(@intFromEnum(i)); | |
} | |
pub const Optional = enum(u32) { | |
none = std.math.maxInt(u32), | |
_, | |
pub fn unwrap(opt: Optional) ?TrackedInst.Index { | |
return switch (opt) { | |
.none => null, | |
_ => @enumFromInt(@intFromEnum(opt)), | |
}; | |
} | |
const debug_state = InternPool.debug_state; | |
}; | |
pub const Unwrapped = struct { | |
tid: Zcu.PerThread.Id, | |
index: u32, | |
pub fn wrap(unwrapped: Unwrapped, ip: *const InternPool) TrackedInst.Index { | |
assert(@intFromEnum(unwrapped.tid) <= ip.getTidMask()); | |
assert(unwrapped.index <= ip.getIndexMask(u32)); | |
return @enumFromInt(@as(u32, @intFromEnum(unwrapped.tid)) << ip.tid_shift_32 | | |
unwrapped.index); | |
} | |
}; | |
pub fn unwrap(tracked_inst_index: TrackedInst.Index, ip: *const InternPool) Unwrapped { | |
return .{ | |
.tid = @enumFromInt(@intFromEnum(tracked_inst_index) >> ip.tid_shift_32 & ip.getTidMask()), | |
.index = @intFromEnum(tracked_inst_index) & ip.getIndexMask(u32), | |
}; | |
} | |
const debug_state = InternPool.debug_state; | |
}; | |
}; | |
pub fn trackZir( | |
ip: *InternPool, | |
gpa: Allocator, | |
tid: Zcu.PerThread.Id, | |
key: TrackedInst, | |
) Allocator.Error!TrackedInst.Index { | |
const maybe_lost_key: TrackedInst.MaybeLost = .{ | |
.file = key.file, | |
.inst = TrackedInst.MaybeLost.ZirIndex.wrap(key.inst), | |
}; | |
const full_hash = Hash.hash(0, std.mem.asBytes(&maybe_lost_key)); | |
const hash: u32 = @truncate(full_hash >> 32); | |
const shard = &ip.shards[@intCast(full_hash & (ip.shards.len - 1))]; | |
var map = shard.shared.tracked_inst_map.acquire(); | |
const Map = @TypeOf(map); | |
var map_mask = map.header().mask(); | |
var map_index = hash; | |
while (true) : (map_index += 1) { | |
map_index &= map_mask; | |
const entry = &map.entries[map_index]; | |
const index = entry.acquire().unwrap() orelse break; | |
if (entry.hash != hash) continue; | |
if (std.meta.eql(index.resolveFull(ip) orelse continue, key)) return index; | |
} | |
shard.mutate.tracked_inst_map.mutex.lock(); | |
defer shard.mutate.tracked_inst_map.mutex.unlock(); | |
if (map.entries != shard.shared.tracked_inst_map.entries) { | |
map = shard.shared.tracked_inst_map; | |
map_mask = map.header().mask(); | |
map_index = hash; | |
} | |
while (true) : (map_index += 1) { | |
map_index &= map_mask; | |
const entry = &map.entries[map_index]; | |
const index = entry.acquire().unwrap() orelse break; | |
if (entry.hash != hash) continue; | |
if (std.meta.eql(index.resolveFull(ip) orelse continue, key)) return index; | |
} | |
defer shard.mutate.tracked_inst_map.len += 1; | |
const local = ip.getLocal(tid); | |
const list = local.getMutableTrackedInsts(gpa); | |
try list.ensureUnusedCapacity(1); | |
const map_header = map.header().*; | |
if (shard.mutate.tracked_inst_map.len < map_header.capacity * 3 / 5) { | |
const entry = &map.entries[map_index]; | |
entry.hash = hash; | |
const index = (TrackedInst.Index.Unwrapped{ | |
.tid = tid, | |
.index = list.mutate.len, | |
}).wrap(ip); | |
list.appendAssumeCapacity(.{maybe_lost_key}); | |
entry.release(index.toOptional()); | |
return index; | |
} | |
const arena_state = &local.mutate.arena; | |
var arena = arena_state.promote(gpa); | |
defer arena_state.* = arena.state; | |
const new_map_capacity = map_header.capacity * 2; | |
const new_map_buf = try arena.allocator().alignedAlloc( | |
u8, | |
Map.alignment, | |
Map.entries_offset + new_map_capacity * @sizeOf(Map.Entry), | |
); | |
const new_map: Map = .{ .entries = @ptrCast(new_map_buf[Map.entries_offset..].ptr) }; | |
new_map.header().* = .{ .capacity = new_map_capacity }; | |
@memset(new_map.entries[0..new_map_capacity], .{ .value = .none, .hash = undefined }); | |
const new_map_mask = new_map.header().mask(); | |
map_index = 0; | |
while (map_index < map_header.capacity) : (map_index += 1) { | |
const entry = &map.entries[map_index]; | |
const index = entry.value.unwrap() orelse continue; | |
const item_hash = entry.hash; | |
var new_map_index = item_hash; | |
while (true) : (new_map_index += 1) { | |
new_map_index &= new_map_mask; | |
const new_entry = &new_map.entries[new_map_index]; | |
if (new_entry.value != .none) continue; | |
new_entry.* = .{ | |
.value = index.toOptional(), | |
.hash = item_hash, | |
}; | |
break; | |
} | |
} | |
map = new_map; | |
map_index = hash; | |
while (true) : (map_index += 1) { | |
map_index &= new_map_mask; | |
if (map.entries[map_index].value == .none) break; | |
} | |
const index = (TrackedInst.Index.Unwrapped{ | |
.tid = tid, | |
.index = list.mutate.len, | |
}).wrap(ip); | |
list.appendAssumeCapacity(.{maybe_lost_key}); | |
map.entries[map_index] = .{ .value = index.toOptional(), .hash = hash }; | |
shard.shared.tracked_inst_map.release(new_map); | |
return index; | |
} | |
/// At the start of an incremental update, we update every entry in `tracked_insts` to include | |
/// the new ZIR index. Once this is done, we must update the hashmap metadata so that lookups | |
/// return correct entries where they already exist. | |
pub fn rehashTrackedInsts( | |
ip: *InternPool, | |
gpa: Allocator, | |
tid: Zcu.PerThread.Id, | |
) Allocator.Error!void { | |
assert(tid == .main); // we shouldn't have any other threads active right now | |
// TODO: this function doesn't handle OOM well. What should it do? | |
// We don't lock anything, as this function assumes that no other thread is | |
// accessing `tracked_insts`. This is necessary because we're going to be | |
// iterating the `TrackedInst`s in each `Local`, so we have to know that | |
// none will be added as we work. | |
// Figure out how big each shard need to be and store it in its mutate `len`. | |
for (ip.shards) |*shard| shard.mutate.tracked_inst_map.len = 0; | |
for (ip.locals) |*local| { | |
// `getMutableTrackedInsts` is okay only because no other thread is currently active. | |
// We need the `mutate` for the len. | |
for (local.getMutableTrackedInsts(gpa).viewAllowEmpty().items(.@"0")) |tracked_inst| { | |
if (tracked_inst.inst == .lost) continue; // we can ignore this one! | |
const full_hash = Hash.hash(0, std.mem.asBytes(&tracked_inst)); | |
const shard = &ip.shards[@intCast(full_hash & (ip.shards.len - 1))]; | |
shard.mutate.tracked_inst_map.len += 1; | |
} | |
} | |
const Map = Shard.Map(TrackedInst.Index.Optional); | |
const arena_state = &ip.getLocal(tid).mutate.arena; | |
// We know how big each shard must be, so ensure we have the capacity we need. | |
for (ip.shards) |*shard| { | |
const want_capacity = if (shard.mutate.tracked_inst_map.len == 0) 0 else cap: { | |
// We need to return a capacity of at least 2 to make sure we don't have the `Map(...).empty` value. | |
// For this reason, note the `+ 1` in the below expression. This matches the behavior of `trackZir`. | |
break :cap std.math.ceilPowerOfTwo(u32, shard.mutate.tracked_inst_map.len * 5 / 3 + 1) catch unreachable; | |
}; | |
const have_capacity = shard.shared.tracked_inst_map.header().capacity; // no acquire because we hold the mutex | |
if (have_capacity >= want_capacity) { | |
if (have_capacity == 1) { | |
// The map is `.empty` -- we can't memset the entries, or we'll segfault, because | |
// the buffer is secretly constant. | |
} else { | |
@memset(shard.shared.tracked_inst_map.entries[0..have_capacity], .{ .value = .none, .hash = undefined }); | |
} | |
continue; | |
} | |
var arena = arena_state.promote(gpa); | |
defer arena_state.* = arena.state; | |
const new_map_buf = try arena.allocator().alignedAlloc( | |
u8, | |
Map.alignment, | |
Map.entries_offset + want_capacity * @sizeOf(Map.Entry), | |
); | |
const new_map: Map = .{ .entries = @ptrCast(new_map_buf[Map.entries_offset..].ptr) }; | |
new_map.header().* = .{ .capacity = want_capacity }; | |
@memset(new_map.entries[0..want_capacity], .{ .value = .none, .hash = undefined }); | |
shard.shared.tracked_inst_map.release(new_map); | |
} | |
// Now, actually insert the items. | |
for (ip.locals, 0..) |*local, local_tid| { | |
// `getMutableTrackedInsts` is okay only because no other thread is currently active. | |
// We need the `mutate` for the len. | |
for (local.getMutableTrackedInsts(gpa).viewAllowEmpty().items(.@"0"), 0..) |tracked_inst, local_inst_index| { | |
if (tracked_inst.inst == .lost) continue; // we can ignore this one! | |
const full_hash = Hash.hash(0, std.mem.asBytes(&tracked_inst)); | |
const hash: u32 = @truncate(full_hash >> 32); | |
const shard = &ip.shards[@intCast(full_hash & (ip.shards.len - 1))]; | |
const map = shard.shared.tracked_inst_map; // no acquire because we hold the mutex | |
const map_mask = map.header().mask(); | |
var map_index = hash; | |
const entry = while (true) : (map_index += 1) { | |
map_index &= map_mask; | |
const entry = &map.entries[map_index]; | |
if (entry.acquire() == .none) break entry; | |
}; | |
const index = TrackedInst.Index.Unwrapped.wrap(.{ | |
.tid = @enumFromInt(local_tid), | |
.index = @intCast(local_inst_index), | |
}, ip); | |
entry.hash = hash; | |
entry.release(index.toOptional()); | |
} | |
} | |
} | |
/// Analysis Unit. Represents a single entity which undergoes semantic analysis. | |
/// This is the "source" of an incremental dependency edge. | |
pub const AnalUnit = packed struct(u64) { | |
kind: Kind, | |
id: u32, | |
pub const Kind = enum(u32) { | |
@"comptime", | |
nav_val, | |
nav_ty, | |
type, | |
func, | |
memoized_state, | |
}; | |
pub const Unwrapped = union(Kind) { | |
/// This `AnalUnit` analyzes the body of the given `comptime` declaration. | |
@"comptime": ComptimeUnit.Id, | |
/// This `AnalUnit` resolves the value of the given `Nav`. | |
nav_val: Nav.Index, | |
/// This `AnalUnit` resolves the type of the given `Nav`. | |
nav_ty: Nav.Index, | |
/// This `AnalUnit` resolves the given `struct`/`union`/`enum` type. | |
/// Generated tag enums are never used here (they do not undergo type resolution). | |
type: InternPool.Index, | |
/// This `AnalUnit` analyzes the body of the given runtime function. | |
func: InternPool.Index, | |
/// This `AnalUnit` resolves all state which is memoized in fields on `Zcu`. | |
memoized_state: MemoizedStateStage, | |
}; | |
pub fn unwrap(au: AnalUnit) Unwrapped { | |
return switch (au.kind) { | |
inline else => |tag| @unionInit( | |
Unwrapped, | |
@tagName(tag), | |
@enumFromInt(au.id), | |
), | |
}; | |
} | |
pub fn wrap(raw: Unwrapped) AnalUnit { | |
return switch (raw) { | |
inline else => |id, tag| .{ | |
.kind = tag, | |
.id = @intFromEnum(id), | |
}, | |
}; | |
} | |
pub fn toOptional(as: AnalUnit) Optional { | |
return @enumFromInt(@as(u64, @bitCast(as))); | |
} | |
pub const Optional = enum(u64) { | |
none = std.math.maxInt(u64), | |
_, | |
pub fn unwrap(opt: Optional) ?AnalUnit { | |
return switch (opt) { | |
.none => null, | |
_ => @bitCast(@intFromEnum(opt)), | |
}; | |
} | |
}; | |
}; | |
pub const MemoizedStateStage = enum(u32) { | |
/// Everything other than panics and `VaList`. | |
main, | |
/// Everything within `std.builtin.Panic`. | |
/// Since the panic handler is user-provided, this must be able to reference the other memoized state. | |
panic, | |
/// Specifically `std.builtin.VaList`. See `Zcu.BuiltinDecl.stage`. | |
va_list, | |
}; | |
pub const ComptimeUnit = extern struct { | |
zir_index: TrackedInst.Index, | |
namespace: NamespaceIndex, | |
comptime { | |
assert(std.meta.hasUniqueRepresentation(ComptimeUnit)); | |
} | |
pub const Id = enum(u32) { | |
_, | |
const Unwrapped = struct { | |
tid: Zcu.PerThread.Id, | |
index: u32, | |
fn wrap(unwrapped: Unwrapped, ip: *const InternPool) ComptimeUnit.Id { | |
assert(@intFromEnum(unwrapped.tid) <= ip.getTidMask()); | |
assert(unwrapped.index <= ip.getIndexMask(u32)); | |
return @enumFromInt(@as(u32, @intFromEnum(unwrapped.tid)) << ip.tid_shift_32 | | |
unwrapped.index); | |
} | |
}; | |
fn unwrap(id: Id, ip: *const InternPool) Unwrapped { | |
return .{ | |
.tid = @enumFromInt(@intFromEnum(id) >> ip.tid_shift_32 & ip.getTidMask()), | |
.index = @intFromEnum(id) & ip.getIndexMask(u31), | |
}; | |
} | |
const debug_state = InternPool.debug_state; | |
}; | |
}; | |
/// Named Addressable Value. Represents a global value with a name and address. This name may be | |
/// generated, and the type (and hence address) may be comptime-only. A `Nav` whose type has runtime | |
/// bits is sent to the linker to be emitted to the binary. | |
/// | |
/// * Every ZIR `declaration` which is not a `comptime` declaration has a `Nav` (post-instantiation) | |
/// which stores the declaration's resolved value. | |
/// * Generic instances have a `Nav` corresponding to the instantiated function. | |
/// * `@extern` calls create a `Nav` whose value is a `.@"extern"`. | |
/// | |
/// This data structure is optimized for the `analysis_info != null` case, because this is much more | |
/// common in practice; the other case is used only for externs and for generic instances. At the time | |
/// of writing, in the compiler itself, around 74% of all `Nav`s have `analysis_info != null`. | |
/// (Specifically, 104225 / 140923) | |
/// | |
/// `Nav.Repr` is the in-memory representation. | |
pub const Nav = struct { | |
/// The unqualified name of this `Nav`. Namespace lookups use this name, and error messages may use it. | |
/// Additionally, extern `Nav`s (i.e. those whose value is an `extern`) use this name. | |
name: NullTerminatedString, | |
/// The fully-qualified name of this `Nav`. | |
fqn: NullTerminatedString, | |
/// This field is populated iff this `Nav` is resolved by semantic analysis. | |
/// If this is `null`, then `status == .fully_resolved` always. | |
analysis: ?struct { | |
namespace: NamespaceIndex, | |
zir_index: TrackedInst.Index, | |
}, | |
/// TODO: this is a hack! If #20663 isn't accepted, let's figure out something a bit better. | |
is_usingnamespace: bool, | |
status: union(enum) { | |
/// This `Nav` is pending semantic analysis. | |
unresolved, | |
/// The type of this `Nav` is resolved; the value is queued for resolution. | |
type_resolved: struct { | |
type: InternPool.Index, | |
alignment: Alignment, | |
@"linksection": OptionalNullTerminatedString, | |
@"addrspace": std.builtin.AddressSpace, | |
is_const: bool, | |
is_threadlocal: bool, | |
/// This field is whether this `Nav` is a literal `extern` definition. | |
/// It does *not* tell you whether this might alias an extern fn (see #21027). | |
is_extern_decl: bool, | |
}, | |
/// The value of this `Nav` is resolved. | |
fully_resolved: struct { | |
val: InternPool.Index, | |
alignment: Alignment, | |
@"linksection": OptionalNullTerminatedString, | |
@"addrspace": std.builtin.AddressSpace, | |
}, | |
}, | |
/// Asserts that `status != .unresolved`. | |
pub fn typeOf(nav: Nav, ip: *const InternPool) InternPool.Index { | |
return switch (nav.status) { | |
.unresolved => unreachable, | |
.type_resolved => |r| r.type, | |
.fully_resolved => |r| ip.typeOf(r.val), | |
}; | |
} | |
/// This function is intended to be used by code generation, since semantic | |
/// analysis will ensure that any `Nav` which is potentially `extern` is | |
/// fully resolved. | |
/// Asserts that `status == .fully_resolved`. | |
pub fn getResolvedExtern(nav: Nav, ip: *const InternPool) ?Key.Extern { | |
assert(nav.status == .fully_resolved); | |
return nav.getExtern(ip); | |
} | |
/// Always returns `null` for `status == .type_resolved`. This function is inteded | |
/// to be used by code generation, since semantic analysis will ensure that any `Nav` | |
/// which is potentially `extern` is fully resolved. | |
/// Asserts that `status != .unresolved`. | |
pub fn getExtern(nav: Nav, ip: *const InternPool) ?Key.Extern { | |
return switch (nav.status) { | |
.unresolved => unreachable, | |
.type_resolved => null, | |
.fully_resolved => |r| switch (ip.indexToKey(r.val)) { | |
.@"extern" => |e| e, | |
else => null, | |
}, | |
}; | |
} | |
/// Asserts that `status != .unresolved`. | |
pub fn getAddrspace(nav: Nav) std.builtin.AddressSpace { | |
return switch (nav.status) { | |
.unresolved => unreachable, | |
.type_resolved => |r| r.@"addrspace", | |
.fully_resolved => |r| r.@"addrspace", | |
}; | |
} | |
/// Asserts that `status != .unresolved`. | |
pub fn getAlignment(nav: Nav) Alignment { | |
return switch (nav.status) { | |
.unresolved => unreachable, | |
.type_resolved => |r| r.alignment, | |
.fully_resolved => |r| r.alignment, | |
}; | |
} | |
/// Asserts that `status != .unresolved`. | |
pub fn getLinkSection(nav: Nav) OptionalNullTerminatedString { | |
return switch (nav.status) { | |
.unresolved => unreachable, | |
.type_resolved => |r| r.@"linksection", | |
.fully_resolved => |r| r.@"linksection", | |
}; | |
} | |
/// Asserts that `status != .unresolved`. | |
pub fn isThreadlocal(nav: Nav, ip: *const InternPool) bool { | |
return switch (nav.status) { | |
.unresolved => unreachable, | |
.type_resolved => |r| r.is_threadlocal, | |
.fully_resolved => |r| switch (ip.indexToKey(r.val)) { | |
.@"extern" => |e| e.is_threadlocal, | |
.variable => |v| v.is_threadlocal, | |
else => false, | |
}, | |
}; | |
} | |
pub fn isFn(nav: Nav, ip: *const InternPool) bool { | |
return switch (nav.status) { | |
.unresolved => unreachable, | |
.type_resolved => |r| { | |
const tag = ip.zigTypeTag(r.type); | |
return tag == .@"fn"; | |
}, | |
.fully_resolved => |r| { | |
const tag = ip.zigTypeTag(ip.typeOf(r.val)); | |
return tag == .@"fn"; | |
}, | |
}; | |
} | |
/// If this returns `true`, then a pointer to this `Nav` might actually be encoded as a pointer | |
/// to some other `Nav` due to an extern definition or extern alias (see #21027). | |
/// This query is valid on `Nav`s for whom only the type is resolved. | |
/// Asserts that `status != .unresolved`. | |
pub fn isExternOrFn(nav: Nav, ip: *const InternPool) bool { | |
return switch (nav.status) { | |
.unresolved => unreachable, | |
.type_resolved => |r| { | |
if (r.is_extern_decl) return true; | |
const tag = ip.zigTypeTag(r.type); | |
if (tag == .@"fn") return true; | |
return false; | |
}, | |
.fully_resolved => |r| { | |
if (ip.indexToKey(r.val) == .@"extern") return true; | |
const tag = ip.zigTypeTag(ip.typeOf(r.val)); | |
if (tag == .@"fn") return true; | |
return false; | |
}, | |
}; | |
} | |
/// Get the ZIR instruction corresponding to this `Nav`, used to resolve source locations. | |
/// This is a `declaration`. | |
pub fn srcInst(nav: Nav, ip: *const InternPool) TrackedInst.Index { | |
if (nav.analysis) |a| { | |
return a.zir_index; | |
} | |
// A `Nav` which does not undergo analysis always has a resolved value. | |
return switch (ip.indexToKey(nav.status.fully_resolved.val)) { | |
.func => |func| { | |
// Since `analysis` was not populated, this must be an instantiation. | |
// Go up to the generic owner and consult *its* `analysis` field. | |
const go_nav = ip.getNav(ip.indexToKey(func.generic_owner).func.owner_nav); | |
return go_nav.analysis.?.zir_index; | |
}, | |
.@"extern" => |@"extern"| @"extern".zir_index, // extern / @extern | |
else => unreachable, | |
}; | |
} | |
pub const Index = enum(u32) { | |
_, | |
pub const Optional = enum(u32) { | |
none = std.math.maxInt(u32), | |
_, | |
pub fn unwrap(opt: Optional) ?Nav.Index { | |
return switch (opt) { | |
.none => null, | |
_ => @enumFromInt(@intFromEnum(opt)), | |
}; | |
} | |
const debug_state = InternPool.debug_state; | |
}; | |
pub fn toOptional(i: Nav.Index) Optional { | |
return @enumFromInt(@intFromEnum(i)); | |
} | |
const Unwrapped = struct { | |
tid: Zcu.PerThread.Id, | |
index: u32, | |
fn wrap(unwrapped: Unwrapped, ip: *const InternPool) Nav.Index { | |
assert(@intFromEnum(unwrapped.tid) <= ip.getTidMask()); | |
assert(unwrapped.index <= ip.getIndexMask(u32)); | |
return @enumFromInt(@as(u32, @intFromEnum(unwrapped.tid)) << ip.tid_shift_32 | | |
unwrapped.index); | |
} | |
}; | |
fn unwrap(nav_index: Nav.Index, ip: *const InternPool) Unwrapped { | |
return .{ | |
.tid = @enumFromInt(@intFromEnum(nav_index) >> ip.tid_shift_32 & ip.getTidMask()), | |
.index = @intFromEnum(nav_index) & ip.getIndexMask(u32), | |
}; | |
} | |
const debug_state = InternPool.debug_state; | |
}; | |
/// The compact in-memory representation of a `Nav`. | |
/// 26 bytes. | |
const Repr = struct { | |
name: NullTerminatedString, | |
fqn: NullTerminatedString, | |
// The following 1 fields are either both populated, or both `.none`. | |
analysis_namespace: OptionalNamespaceIndex, | |
analysis_zir_index: TrackedInst.Index.Optional, | |
/// Populated only if `bits.status != .unresolved`. | |
type_or_val: InternPool.Index, | |
/// Populated only if `bits.status != .unresolved`. | |
@"linksection": OptionalNullTerminatedString, | |
bits: Bits, | |
const Bits = packed struct(u16) { | |
status: enum(u2) { unresolved, type_resolved, fully_resolved, type_resolved_extern_decl }, | |
/// Populated only if `bits.status != .unresolved`. | |
alignment: Alignment, | |
/// Populated only if `bits.status != .unresolved`. | |
@"addrspace": std.builtin.AddressSpace, | |
/// Populated only if `bits.status == .type_resolved`. | |
is_const: bool, | |
/// Populated only if `bits.status == .type_resolved`. | |
is_threadlocal: bool, | |
is_usingnamespace: bool, | |
}; | |
fn unpack(repr: Repr) Nav { | |
return .{ | |
.name = repr.name, | |
.fqn = repr.fqn, | |
.analysis = if (repr.analysis_namespace.unwrap()) |namespace| .{ | |
.namespace = namespace, | |
.zir_index = repr.analysis_zir_index.unwrap().?, | |
} else a: { | |
assert(repr.analysis_zir_index == .none); | |
break :a null; | |
}, | |
.is_usingnamespace = repr.bits.is_usingnamespace, | |
.status = switch (repr.bits.status) { | |
.unresolved => .unresolved, | |
.type_resolved, .type_resolved_extern_decl => .{ .type_resolved = .{ | |
.type = repr.type_or_val, | |
.alignment = repr.bits.alignment, | |
.@"linksection" = repr.@"linksection", | |
.@"addrspace" = repr.bits.@"addrspace", | |
.is_const = repr.bits.is_const, | |
.is_threadlocal = repr.bits.is_threadlocal, | |
.is_extern_decl = repr.bits.status == .type_resolved_extern_decl, | |
} }, | |
.fully_resolved => .{ .fully_resolved = .{ | |
.val = repr.type_or_val, | |
.alignment = repr.bits.alignment, | |
.@"linksection" = repr.@"linksection", | |
.@"addrspace" = repr.bits.@"addrspace", | |
} }, | |
}, | |
}; | |
} | |
}; | |
fn pack(nav: Nav) Repr { | |
// Note that in the `unresolved` case, we do not mark fields as `undefined`, even though they should not be used. | |
// This is to avoid writing undefined bytes to disk when serializing buffers. | |
return .{ | |
.name = nav.name, | |
.fqn = nav.fqn, | |
.analysis_namespace = if (nav.analysis) |a| a.namespace.toOptional() else .none, | |
.analysis_zir_index = if (nav.analysis) |a| a.zir_index.toOptional() else .none, | |
.type_or_val = switch (nav.status) { | |
.unresolved => .none, | |
.type_resolved => |r| r.type, | |
.fully_resolved => |r| r.val, | |
}, | |
.@"linksection" = switch (nav.status) { | |
.unresolved => .none, | |
.type_resolved => |r| r.@"linksection", | |
.fully_resolved => |r| r.@"linksection", | |
}, | |
.bits = switch (nav.status) { | |
.unresolved => .{ | |
.status = .unresolved, | |
.alignment = .none, | |
.@"addrspace" = .generic, | |
.is_usingnamespace = nav.is_usingnamespace, | |
.is_const = false, | |
.is_threadlocal = false, | |
}, | |
.type_resolved => |r| .{ | |
.status = if (r.is_extern_decl) .type_resolved_extern_decl else .type_resolved, | |
.alignment = r.alignment, | |
.@"addrspace" = r.@"addrspace", | |
.is_usingnamespace = nav.is_usingnamespace, | |
.is_const = r.is_const, | |
.is_threadlocal = r.is_threadlocal, | |
}, | |
.fully_resolved => |r| .{ | |
.status = .fully_resolved, | |
.alignment = r.alignment, | |
.@"addrspace" = r.@"addrspace", | |
.is_usingnamespace = nav.is_usingnamespace, | |
.is_const = false, | |
.is_threadlocal = false, | |
}, | |
}, | |
}; | |
} | |
}; | |
pub const Dependee = union(enum) { | |
file: FileIndex, | |
src_hash: TrackedInst.Index, | |
nav_val: Nav.Index, | |
nav_ty: Nav.Index, | |
interned: Index, | |
embed_file: Zcu.EmbedFile.Index, | |
namespace: TrackedInst.Index, | |
namespace_name: NamespaceNameKey, | |
memoized_state: MemoizedStateStage, | |
}; | |
pub fn removeDependenciesForDepender(ip: *InternPool, gpa: Allocator, depender: AnalUnit) void { | |
var opt_idx = (ip.first_dependency.fetchSwapRemove(depender) orelse return).value.toOptional(); | |
while (opt_idx.unwrap()) |idx| { | |
const dep = ip.dep_entries.items[@intFromEnum(idx)]; | |
opt_idx = dep.next_dependee; | |
const prev_idx = dep.prev.unwrap() orelse { | |
// This entry is the start of a list in some `*_deps`. | |
// We cannot easily remove this mapping, so this must remain as a dummy entry. | |
ip.dep_entries.items[@intFromEnum(idx)].depender = .none; | |
continue; | |
}; | |
ip.dep_entries.items[@intFromEnum(prev_idx)].next = dep.next; | |
if (dep.next.unwrap()) |next_idx| { | |
ip.dep_entries.items[@intFromEnum(next_idx)].prev = dep.prev; | |
} | |
ip.free_dep_entries.append(gpa, idx) catch { | |
// This memory will be reclaimed on the next garbage collection. | |
// Thus, we do not need to propagate this error. | |
}; | |
} | |
} | |
pub const DependencyIterator = struct { | |
ip: *const InternPool, | |
next_entry: DepEntry.Index.Optional, | |
pub fn next(it: *DependencyIterator) ?AnalUnit { | |
while (true) { | |
const idx = it.next_entry.unwrap() orelse return null; | |
const entry = it.ip.dep_entries.items[@intFromEnum(idx)]; | |
it.next_entry = entry.next; | |
if (entry.depender.unwrap()) |depender| return depender; | |
} | |
} | |
}; | |
pub fn dependencyIterator(ip: *const InternPool, dependee: Dependee) DependencyIterator { | |
const first_entry = switch (dependee) { | |
.file => |x| ip.file_deps.get(x), | |
.src_hash => |x| ip.src_hash_deps.get(x), | |
.nav_val => |x| ip.nav_val_deps.get(x), | |
.nav_ty => |x| ip.nav_ty_deps.get(x), | |
.interned => |x| ip.interned_deps.get(x), | |
.embed_file => |x| ip.embed_file_deps.get(x), | |
.namespace => |x| ip.namespace_deps.get(x), | |
.namespace_name => |x| ip.namespace_name_deps.get(x), | |
.memoized_state => |stage| switch (stage) { | |
.main => ip.memoized_state_main_deps.unwrap(), | |
.panic => ip.memoized_state_panic_deps.unwrap(), | |
.va_list => ip.memoized_state_va_list_deps.unwrap(), | |
}, | |
} orelse return .{ | |
.ip = ip, | |
.next_entry = .none, | |
}; | |
return .{ | |
.ip = ip, | |
.next_entry = first_entry.toOptional(), | |
}; | |
} | |
pub fn addDependency(ip: *InternPool, gpa: Allocator, depender: AnalUnit, dependee: Dependee) Allocator.Error!void { | |
const first_depender_dep: DepEntry.Index.Optional = if (ip.first_dependency.get(depender)) |idx| dep: { | |
// The entry already exists, so there is capacity to overwrite it later. | |
break :dep idx.toOptional(); | |
} else none: { | |
// Ensure there is capacity available to add this dependency later. | |
try ip.first_dependency.ensureUnusedCapacity(gpa, 1); | |
break :none .none; | |
}; | |
// We're very likely to need space for a new entry - reserve it now to avoid | |
// the need for error cleanup logic. | |
if (ip.free_dep_entries.items.len == 0) { | |
try ip.dep_entries.ensureUnusedCapacity(gpa, 1); | |
} | |
// This block should allocate an entry and prepend it to the relevant `*_deps` list. | |
// The `next` field should be correctly initialized; all other fields may be undefined. | |
const new_index: DepEntry.Index = switch (dependee) { | |
.memoized_state => |stage| new_index: { | |
const deps = switch (stage) { | |
.main => &ip.memoized_state_main_deps, | |
.panic => &ip.memoized_state_panic_deps, | |
.va_list => &ip.memoized_state_va_list_deps, | |
}; | |
if (deps.unwrap()) |first| { | |
if (ip.dep_entries.items[@intFromEnum(first)].depender == .none) { | |
// Dummy entry, so we can reuse it rather than allocating a new one! | |
break :new_index first; | |
} | |
} | |
// Prepend a new dependency. | |
const new_index: DepEntry.Index, const ptr = if (ip.free_dep_entries.popOrNull()) |new_index| new: { | |
break :new .{ new_index, &ip.dep_entries.items[@intFromEnum(new_index)] }; | |
} else .{ @enumFromInt(ip.dep_entries.items.len), ip.dep_entries.addOneAssumeCapacity() }; | |
if (deps.unwrap()) |old_first| { | |
ptr.next = old_first.toOptional(); | |
ip.dep_entries.items[@intFromEnum(old_first)].prev = new_index.toOptional(); | |
} else { | |
ptr.next = .none; | |
} | |
deps.* = new_index.toOptional(); | |
break :new_index new_index; | |
}, | |
inline else => |dependee_payload, tag| new_index: { | |
const gop = try switch (tag) { | |
.file => ip.file_deps, | |
.src_hash => ip.src_hash_deps, | |
.nav_val => ip.nav_val_deps, | |
.nav_ty => ip.nav_ty_deps, | |
.interned => ip.interned_deps, | |
.embed_file => ip.embed_file_deps, | |
.namespace => ip.namespace_deps, | |
.namespace_name => ip.namespace_name_deps, | |
.memoized_state => comptime unreachable, | |
}.getOrPut(gpa, dependee_payload); | |
if (gop.found_existing and ip.dep_entries.items[@intFromEnum(gop.value_ptr.*)].depender == .none) { | |
// Dummy entry, so we can reuse it rather than allocating a new one! | |
break :new_index gop.value_ptr.*; | |
} | |
// Prepend a new dependency. | |
const new_index: DepEntry.Index, const ptr = if (ip.free_dep_entries.popOrNull()) |new_index| new: { | |
break :new .{ new_index, &ip.dep_entries.items[@intFromEnum(new_index)] }; | |
} else .{ @enumFromInt(ip.dep_entries.items.len), ip.dep_entries.addOneAssumeCapacity() }; | |
if (gop.found_existing) { | |
ptr.next = gop.value_ptr.*.toOptional(); | |
ip.dep_entries.items[@intFromEnum(gop.value_ptr.*)].prev = new_index.toOptional(); | |
} else { | |
ptr.next = .none; | |
} | |
gop.value_ptr.* = new_index; | |
break :new_index new_index; | |
}, | |
}; | |
ip.dep_entries.items[@intFromEnum(new_index)].depender = depender.toOptional(); | |
ip.dep_entries.items[@intFromEnum(new_index)].prev = .none; | |
ip.dep_entries.items[@intFromEnum(new_index)].next_dependee = first_depender_dep; | |
ip.first_dependency.putAssumeCapacity(depender, new_index); | |
} | |
/// String is the name whose existence the dependency is on. | |
/// DepEntry.Index refers to the first such dependency. | |
pub const NamespaceNameKey = struct { | |
/// The instruction (`struct_decl` etc) which owns the namespace in question. | |
namespace: TrackedInst.Index, | |
/// The name whose existence the dependency is on. | |
name: NullTerminatedString, | |
}; | |
pub const DepEntry = extern struct { | |
/// If null, this is a dummy entry. `next_dependee` is undefined. This is the first | |
/// entry in one of `*_deps`, and does not appear in any list by `first_dependency`, | |
/// but is not in `free_dep_entries` since `*_deps` stores a reference to it. | |
depender: AnalUnit.Optional, | |
/// Index into `dep_entries` forming a doubly linked list of all dependencies on this dependee. | |
/// Used to iterate all dependers for a given dependee during an update. | |
/// null if this is the end of the list. | |
next: DepEntry.Index.Optional, | |
/// The other link for `next`. | |
/// null if this is the start of the list. | |
prev: DepEntry.Index.Optional, | |
/// Index into `dep_entries` forming a singly linked list of dependencies *of* `depender`. | |
/// Used to efficiently remove all `DepEntry`s for a single `depender` when it is re-analyzed. | |
/// null if this is the end of the list. | |
next_dependee: DepEntry.Index.Optional, | |
pub const Index = enum(u32) { | |
_, | |
pub fn toOptional(dep: DepEntry.Index) Optional { | |
return @enumFromInt(@intFromEnum(dep)); | |
} | |
pub const Optional = enum(u32) { | |
none = std.math.maxInt(u32), | |
_, | |
pub fn unwrap(opt: Optional) ?DepEntry.Index { | |
return switch (opt) { | |
.none => null, | |
_ => @enumFromInt(@intFromEnum(opt)), | |
}; | |
} | |
}; | |
}; | |
}; | |
const Local = struct { | |
/// These fields can be accessed from any thread by calling `acquire`. | |
/// They are only modified by the owning thread. | |
shared: Shared align(std.atomic.cache_line), | |
/// This state is fully local to the owning thread and does not require any | |
/// atomic access. | |
mutate: struct { | |
/// When we need to allocate any long-lived buffer for mutating the `InternPool`, it is | |
/// allocated into this `arena` (for the `Id` of the thread performing the mutation). An | |
/// arena is used to avoid contention on the GPA, and to ensure that any code which retains | |
/// references to old state remains valid. For instance, when reallocing hashmap metadata, | |
/// a racing lookup on another thread may still retain a handle to the old metadata pointer, | |
/// so it must remain valid. | |
/// This arena's lifetime is tied to that of `Compilation`, although it can be cleared on | |
/// garbage collection (currently vaporware). | |
arena: std.heap.ArenaAllocator.State, | |
items: ListMutate, | |
extra: ListMutate, | |
limbs: ListMutate, | |
strings: ListMutate, | |
tracked_insts: ListMutate, | |
files: ListMutate, | |
maps: ListMutate, | |
navs: ListMutate, | |
comptime_units: ListMutate, | |
namespaces: BucketListMutate, | |
} align(std.atomic.cache_line), | |
const Shared = struct { | |
items: List(Item), | |
extra: Extra, | |
limbs: Limbs, | |
strings: Strings, | |
tracked_insts: TrackedInsts, | |
files: List(File), | |
maps: Maps, | |
navs: Navs, | |
comptime_units: ComptimeUnits, | |
namespaces: Namespaces, | |
pub fn getLimbs(shared: *const Local.Shared) Limbs { | |
return switch (@sizeOf(Limb)) { | |
@sizeOf(u32) => shared.extra, | |
@sizeOf(u64) => shared.limbs, | |
else => @compileError("unsupported host"), | |
}.acquire(); | |
} | |
}; | |
const Extra = List(struct { u32 }); | |
const Limbs = switch (@sizeOf(Limb)) { | |
@sizeOf(u32) => Extra, | |
@sizeOf(u64) => List(struct { u64 }), | |
else => @compileError("unsupported host"), | |
}; | |
const Strings = List(struct { u8 }); | |
const TrackedInsts = List(struct { TrackedInst.MaybeLost }); | |
const Maps = List(struct { FieldMap }); | |
const Navs = List(Nav.Repr); | |
const ComptimeUnits = List(struct { ComptimeUnit }); | |
const namespaces_bucket_width = 8; | |
const namespaces_bucket_mask = (1 << namespaces_bucket_width) - 1; | |
const namespace_next_free_field = "owner_type"; | |
const Namespaces = List(struct { *[1 << namespaces_bucket_width]Zcu.Namespace }); | |
const ListMutate = struct { | |
mutex: std.Thread.Mutex, | |
len: u32, | |
const empty: ListMutate = .{ | |
.mutex = .{}, | |
.len = 0, | |
}; | |
}; | |
const BucketListMutate = struct { | |
last_bucket_len: u32, | |
buckets_list: ListMutate, | |
free_list: u32, | |
const free_list_sentinel = std.math.maxInt(u32); | |
const empty: BucketListMutate = .{ | |
.last_bucket_len = 0, | |
.buckets_list = ListMutate.empty, | |
.free_list = free_list_sentinel, | |
}; | |
}; | |
fn List(comptime Elem: type) type { | |
assert(@typeInfo(Elem) == .@"struct"); | |
return struct { | |
bytes: [*]align(@alignOf(Elem)) u8, | |
const ListSelf = @This(); | |
const Mutable = struct { | |
gpa: Allocator, | |
arena: *std.heap.ArenaAllocator.State, | |
mutate: *ListMutate, | |
list: *ListSelf, | |
const fields = std.enums.values(std.meta.FieldEnum(Elem)); | |
fn PtrArrayElem(comptime len: usize) type { | |
const elem_info = @typeInfo(Elem).@"struct"; | |
const elem_fields = elem_info.fields; | |
var new_fields: [elem_fields.len]std.builtin.Type.StructField = undefined; | |
for (&new_fields, elem_fields) |*new_field, elem_field| new_field.* = .{ | |
.name = elem_field.name, | |
.type = *[len]elem_field.type, | |
.default_value_ptr = null, | |
.is_comptime = false, | |
.alignment = 0, | |
}; | |
return @Type(.{ .@"struct" = .{ | |
.layout = .auto, | |
.fields = &new_fields, | |
.decls = &.{}, | |
.is_tuple = elem_info.is_tuple, | |
} }); | |
} | |
fn PtrElem(comptime opts: struct { | |
size: std.builtin.Type.Pointer.Size, | |
is_const: bool = false, | |
}) type { | |
const elem_info = @typeInfo(Elem).@"struct"; | |
const elem_fields = elem_info.fields; | |
var new_fields: [elem_fields.len]std.builtin.Type.StructField = undefined; | |
for (&new_fields, elem_fields) |*new_field, elem_field| new_field.* = .{ | |
.name = elem_field.name, | |
.type = @Type(.{ .pointer = .{ | |
.size = opts.size, | |
.is_const = opts.is_const, | |
.is_volatile = false, | |
.alignment = 0, | |
.address_space = .generic, | |
.child = elem_field.type, | |
.is_allowzero = false, | |
.sentinel_ptr = null, | |
} }), | |
.default_value_ptr = null, | |
.is_comptime = false, | |
.alignment = 0, | |
}; | |
return @Type(.{ .@"struct" = .{ | |
.layout = .auto, | |
.fields = &new_fields, | |
.decls = &.{}, | |
.is_tuple = elem_info.is_tuple, | |
} }); | |
} | |
pub fn addOne(mutable: Mutable) Allocator.Error!PtrElem(.{ .size = .one }) { | |
try mutable.ensureUnusedCapacity(1); | |
return mutable.addOneAssumeCapacity(); | |
} | |
pub fn addOneAssumeCapacity(mutable: Mutable) PtrElem(.{ .size = .one }) { | |
const index = mutable.mutate.len; | |
assert(index < mutable.list.header().capacity); | |
mutable.mutate.len = index + 1; | |
const mutable_view = mutable.view().slice(); | |
var ptr: PtrElem(.{ .size = .one }) = undefined; | |
inline for (fields) |field| { | |
@field(ptr, @tagName(field)) = &mutable_view.items(field)[index]; | |
} | |
return ptr; | |
} | |
pub fn append(mutable: Mutable, elem: Elem) Allocator.Error!void { | |
try mutable.ensureUnusedCapacity(1); | |
mutable.appendAssumeCapacity(elem); | |
} | |
pub fn appendAssumeCapacity(mutable: Mutable, elem: Elem) void { | |
var mutable_view = mutable.view(); | |
defer mutable.mutate.len = @intCast(mutable_view.len); | |
mutable_view.appendAssumeCapacity(elem); | |
} | |
pub fn appendSliceAssumeCapacity( | |
mutable: Mutable, | |
slice: PtrElem(.{ .size = .slice, .is_const = true }), | |
) void { | |
if (fields.len == 0) return; | |
const start = mutable.mutate.len; | |
const slice_len = @field(slice, @tagName(fields[0])).len; | |
assert(slice_len <= mutable.list.header().capacity - start); | |
mutable.mutate.len = @intCast(start + slice_len); | |
const mutable_view = mutable.view().slice(); | |
inline for (fields) |field| { | |
const field_slice = @field(slice, @tagName(field)); | |
assert(field_slice.len == slice_len); | |
@memcpy(mutable_view.items(field)[start..][0..slice_len], field_slice); | |
} | |
} | |
pub fn appendNTimes(mutable: Mutable, elem: Elem, len: usize) Allocator.Error!void { | |
try mutable.ensureUnusedCapacity(len); | |
mutable.appendNTimesAssumeCapacity(elem, len); | |
} | |
pub fn appendNTimesAssumeCapacity(mutable: Mutable, elem: Elem, len: usize) void { | |
const start = mutable.mutate.len; | |
assert(len <= mutable.list.header().capacity - start); | |
mutable.mutate.len = @intCast(start + len); | |
const mutable_view = mutable.view().slice(); | |
inline for (fields) |field| { | |
@memset(mutable_view.items(field)[start..][0..len], @field(elem, @tagName(field))); | |
} | |
} | |
pub fn addManyAsArray(mutable: Mutable, comptime len: usize) Allocator.Error!PtrArrayElem(len) { | |
try mutable.ensureUnusedCapacity(len); | |
return mutable.addManyAsArrayAssumeCapacity(len); | |
} | |
pub fn addManyAsArrayAssumeCapacity(mutable: Mutable, comptime len: usize) PtrArrayElem(len) { | |
const start = mutable.mutate.len; | |
assert(len <= mutable.list.header().capacity - start); | |
mutable.mutate.len = @intCast(start + len); | |
const mutable_view = mutable.view().slice(); | |
var ptr_array: PtrArrayElem(len) = undefined; | |
inline for (fields) |field| { | |
@field(ptr_array, @tagName(field)) = mutable_view.items(field)[start..][0..len]; | |
} | |
return ptr_array; | |
} | |
pub fn addManyAsSlice(mutable: Mutable, len: usize) Allocator.Error!PtrElem(.{ .size = .slice }) { | |
try mutable.ensureUnusedCapacity(len); | |
return mutable.addManyAsSliceAssumeCapacity(len); | |
} | |
pub fn addManyAsSliceAssumeCapacity(mutable: Mutable, len: usize) PtrElem(.{ .size = .slice }) { | |
const start = mutable.mutate.len; | |
assert(len <= mutable.list.header().capacity - start); | |
mutable.mutate.len = @intCast(start + len); | |
const mutable_view = mutable.view().slice(); | |
var slice: PtrElem(.{ .size = .slice }) = undefined; | |
inline for (fields) |field| { | |
@field(slice, @tagName(field)) = mutable_view.items(field)[start..][0..len]; | |
} | |
return slice; | |
} | |
pub fn shrinkRetainingCapacity(mutable: Mutable, len: usize) void { | |
assert(len <= mutable.mutate.len); | |
mutable.mutate.len = @intCast(len); | |
} | |
pub fn ensureUnusedCapacity(mutable: Mutable, unused_capacity: usize) Allocator.Error!void { | |
try mutable.ensureTotalCapacity(@intCast(mutable.mutate.len + unused_capacity)); | |
} | |
pub fn ensureTotalCapacity(mutable: Mutable, total_capacity: usize) Allocator.Error!void { | |
const old_capacity = mutable.list.header().capacity; | |
if (old_capacity >= total_capacity) return; | |
var new_capacity = old_capacity; | |
while (new_capacity < total_capacity) new_capacity = (new_capacity + 10) * 2; | |
try mutable.setCapacity(new_capacity); | |
} | |
fn setCapacity(mutable: Mutable, capacity: u32) Allocator.Error!void { | |
var arena = mutable.arena.promote(mutable.gpa); | |
defer mutable.arena.* = arena.state; | |
const buf = try arena.allocator().alignedAlloc( | |
u8, | |
alignment, | |
bytes_offset + View.capacityInBytes(capacity), | |
); | |
var new_list: ListSelf = .{ .bytes = @ptrCast(buf[bytes_offset..].ptr) }; | |
new_list.header().* = .{ .capacity = capacity }; | |
const len = mutable.mutate.len; | |
// this cold, quickly predictable, condition enables | |
// the `MultiArrayList` optimization in `view` | |
if (len > 0) { | |
const old_slice = mutable.list.view().slice(); | |
const new_slice = new_list.view().slice(); | |
inline for (fields) |field| @memcpy(new_slice.items(field)[0..len], old_slice.items(field)[0..len]); | |
} | |
mutable.mutate.mutex.lock(); | |
defer mutable.mutate.mutex.unlock(); | |
mutable.list.release(new_list); | |
} | |
pub fn viewAllowEmpty(mutable: Mutable) View { | |
const capacity = mutable.list.header().capacity; | |
return .{ | |
.bytes = mutable.list.bytes, | |
.len = mutable.mutate.len, | |
.capacity = capacity, | |
}; | |
} | |
pub fn view(mutable: Mutable) View { | |
const capacity = mutable.list.header().capacity; | |
assert(capacity > 0); // optimizes `MultiArrayList.Slice.items` | |
return .{ | |
.bytes = mutable.list.bytes, | |
.len = mutable.mutate.len, | |
.capacity = capacity, | |
}; | |
} | |
}; | |
const empty: ListSelf = .{ .bytes = @constCast(&(extern struct { | |
header: Header, | |
bytes: [0]u8 align(@alignOf(Elem)), | |
}{ | |
.header = .{ .capacity = 0 }, | |
.bytes = .{}, | |
}).bytes) }; | |
const alignment = @max(@alignOf(Header), @alignOf(Elem)); | |
const bytes_offset = std.mem.alignForward(usize, @sizeOf(Header), @alignOf(Elem)); | |
const View = std.MultiArrayList(Elem); | |
/// Must be called when accessing from another thread. | |
pub fn acquire(list: *const ListSelf) ListSelf { | |
return .{ .bytes = @atomicLoad([*]align(@alignOf(Elem)) u8, &list.bytes, .acquire) }; | |
} | |
fn release(list: *ListSelf, new_list: ListSelf) void { | |
@atomicStore([*]align(@alignOf(Elem)) u8, &list.bytes, new_list.bytes, .release); | |
} | |
const Header = extern struct { | |
capacity: u32, | |
}; | |
fn header(list: ListSelf) *Header { | |
return @alignCast(@ptrCast(list.bytes - bytes_offset)); | |
} | |
pub fn view(list: ListSelf) View { | |
const capacity = list.header().capacity; | |
assert(capacity > 0); // optimizes `MultiArrayList.Slice.items` | |
return .{ | |
.bytes = list.bytes, | |
.len = capacity, | |
.capacity = capacity, | |
}; | |
} | |
}; | |
} | |
pub fn getMutableItems(local: *Local, gpa: Allocator) List(Item).Mutable { | |
return .{ | |
.gpa = gpa, | |
.arena = &local.mutate.arena, | |
.mutate = &local.mutate.items, | |
.list = &local.shared.items, | |
}; | |
} | |
pub fn getMutableExtra(local: *Local, gpa: Allocator) Extra.Mutable { | |
return .{ | |
.gpa = gpa, | |
.arena = &local.mutate.arena, | |
.mutate = &local.mutate.extra, | |
.list = &local.shared.extra, | |
}; | |
} | |
/// On 32-bit systems, this array is ignored and extra is used for everything. | |
/// On 64-bit systems, this array is used for big integers and associated metadata. | |
/// Use the helper methods instead of accessing this directly in order to not | |
/// violate the above mechanism. | |
pub fn getMutableLimbs(local: *Local, gpa: Allocator) Limbs.Mutable { | |
return switch (@sizeOf(Limb)) { | |
@sizeOf(u32) => local.getMutableExtra(gpa), | |
@sizeOf(u64) => .{ | |
.gpa = gpa, | |
.arena = &local.mutate.arena, | |
.mutate = &local.mutate.limbs, | |
.list = &local.shared.limbs, | |
}, | |
else => @compileError("unsupported host"), | |
}; | |
} | |
/// In order to store references to strings in fewer bytes, we copy all | |
/// string bytes into here. String bytes can be null. It is up to whomever | |
/// is referencing the data here whether they want to store both index and length, | |
/// thus allowing null bytes, or store only index, and use null-termination. The | |
/// `strings` array is agnostic to either usage. | |
pub fn getMutableStrings(local: *Local, gpa: Allocator) Strings.Mutable { | |
return .{ | |
.gpa = gpa, | |
.arena = &local.mutate.arena, | |
.mutate = &local.mutate.strings, | |
.list = &local.shared.strings, | |
}; | |
} | |
/// An index into `tracked_insts` gives a reference to a single ZIR instruction which | |
/// persists across incremental updates. | |
pub fn getMutableTrackedInsts(local: *Local, gpa: Allocator) TrackedInsts.Mutable { | |
return .{ | |
.gpa = gpa, | |
.arena = &local.mutate.arena, | |
.mutate = &local.mutate.tracked_insts, | |
.list = &local.shared.tracked_insts, | |
}; | |
} | |
/// Elements are ordered identically to the `import_table` field of `Zcu`. | |
/// | |
/// Unlike `import_table`, this data is serialized as part of incremental | |
/// compilation state. | |
/// | |
/// Key is the hash of the path to this file, used to store | |
/// `InternPool.TrackedInst`. | |
pub fn getMutableFiles(local: *Local, gpa: Allocator) List(File).Mutable { | |
return .{ | |
.gpa = gpa, | |
.arena = &local.mutate.arena, | |
.mutate = &local.mutate.files, | |
.list = &local.shared.files, | |
}; | |
} | |
/// Some types such as enums, structs, and unions need to store mappings from field names | |
/// to field index, or value to field index. In such cases, they will store the underlying | |
/// field names and values directly, relying on one of these maps, stored separately, | |
/// to provide lookup. | |
/// These are not serialized; it is computed upon deserialization. | |
pub fn getMutableMaps(local: *Local, gpa: Allocator) Maps.Mutable { | |
return .{ | |
.gpa = gpa, | |
.arena = &local.mutate.arena, | |
.mutate = &local.mutate.maps, | |
.list = &local.shared.maps, | |
}; | |
} | |
pub fn getMutableNavs(local: *Local, gpa: Allocator) Navs.Mutable { | |
return .{ | |
.gpa = gpa, | |
.arena = &local.mutate.arena, | |
.mutate = &local.mutate.navs, | |
.list = &local.shared.navs, | |
}; | |
} | |
pub fn getMutableComptimeUnits(local: *Local, gpa: Allocator) ComptimeUnits.Mutable { | |
return .{ | |
.gpa = gpa, | |
.arena = &local.mutate.arena, | |
.mutate = &local.mutate.comptime_units, | |
.list = &local.shared.comptime_units, | |
}; | |
} | |
/// Rather than allocating Namespace objects with an Allocator, we instead allocate | |
/// them with this BucketList. This provides four advantages: | |
/// * Stable memory so that one thread can access a Namespace object while another | |
/// thread allocates additional Namespace objects from this list. | |
/// * It allows us to use u32 indexes to reference Namespace objects rather than | |
/// pointers, saving memory in types. | |
/// * Using integers to reference Namespace objects rather than pointers makes | |
/// serialization trivial. | |
/// * It provides a unique integer to be used for anonymous symbol names, avoiding | |
/// multi-threaded contention on an atomic counter. | |
pub fn getMutableNamespaces(local: *Local, gpa: Allocator) Namespaces.Mutable { | |
return .{ | |
.gpa = gpa, | |
.arena = &local.mutate.arena, | |
.mutate = &local.mutate.namespaces.buckets_list, | |
.list = &local.shared.namespaces, | |
}; | |
} | |
}; | |
pub fn getLocal(ip: *InternPool, tid: Zcu.PerThread.Id) *Local { | |
return &ip.locals[@intFromEnum(tid)]; | |
} | |
pub fn getLocalShared(ip: *const InternPool, tid: Zcu.PerThread.Id) *const Local.Shared { | |
return &ip.locals[@intFromEnum(tid)].shared; | |
} | |
const Shard = struct { | |
shared: struct { | |
map: Map(Index), | |
string_map: Map(OptionalNullTerminatedString), | |
tracked_inst_map: Map(TrackedInst.Index.Optional), | |
} align(std.atomic.cache_line), | |
mutate: struct { | |
// TODO: measure cost of sharing unrelated mutate state | |
map: Mutate align(std.atomic.cache_line), | |
string_map: Mutate align(std.atomic.cache_line), | |
tracked_inst_map: Mutate align(std.atomic.cache_line), | |
}, | |
const Mutate = struct { | |
mutex: std.Thread.Mutex.Recursive, | |
len: u32, | |
const empty: Mutate = .{ | |
.mutex = std.Thread.Mutex.Recursive.init, | |
.len = 0, | |
}; | |
}; | |
fn Map(comptime Value: type) type { | |
comptime assert(@typeInfo(Value).@"enum".tag_type == u32); | |
_ = @as(Value, .none); // expected .none key | |
return struct { | |
/// header: Header, | |
/// entries: [header.capacity]Entry, | |
entries: [*]Entry, | |
const empty: @This() = .{ .entries = @constCast(&(extern struct { | |
header: Header, | |
entries: [1]Entry, | |
}{ | |
.header = .{ .capacity = 1 }, | |
.entries = .{.{ .value = .none, .hash = undefined }}, | |
}).entries) }; | |
const alignment = @max(@alignOf(Header), @alignOf(Entry)); | |
const entries_offset = std.mem.alignForward(usize, @sizeOf(Header), @alignOf(Entry)); | |
/// Must be called unless the mutate mutex is locked. | |
fn acquire(map: *const @This()) @This() { | |
return .{ .entries = @atomicLoad([*]Entry, &map.entries, .acquire) }; | |
} | |
fn release(map: *@This(), new_map: @This()) void { | |
@atomicStore([*]Entry, &map.entries, new_map.entries, .release); | |
} | |
const Header = extern struct { | |
capacity: u32, | |
fn mask(head: *const Header) u32 { | |
assert(std.math.isPowerOfTwo(head.capacity)); | |
return head.capacity - 1; | |
} | |
}; | |
fn header(map: @This()) *Header { | |
return @alignCast(@ptrCast(@as([*]u8, @ptrCast(map.entries)) - entries_offset)); | |
} | |
const Entry = extern struct { | |
value: Value, | |
hash: u32, | |
fn acquire(entry: *const Entry) Value { | |
return @atomicLoad(Value, &entry.value, .acquire); | |
} | |
fn release(entry: *Entry, value: Value) void { | |
assert(value != .none); | |
@atomicStore(Value, &entry.value, value, .release); | |
} | |
fn resetUnordered(entry: *Entry) void { | |
@atomicStore(Value, &entry.value, .none, .unordered); | |
} | |
}; | |
}; | |
} | |
}; | |
fn getTidMask(ip: *const InternPool) u32 { | |
return (@as(u32, 1) << ip.tid_width) - 1; | |
} | |
fn getIndexMask(ip: *const InternPool, comptime BackingInt: type) u32 { | |
return @as(u32, std.math.maxInt(BackingInt)) >> ip.tid_width; | |
} | |
const FieldMap = std.ArrayHashMapUnmanaged(void, void, std.array_hash_map.AutoContext(void), false); | |
const builtin = @import("builtin"); | |
const std = @import("std"); | |
const Allocator = std.mem.Allocator; | |
const assert = std.debug.assert; | |
const BigIntConst = std.math.big.int.Const; | |
const BigIntMutable = std.math.big.int.Mutable; | |
const Cache = std.Build.Cache; | |
const Limb = std.math.big.Limb; | |
const Hash = std.hash.Wyhash; | |
const InternPool = @This(); | |
const Zcu = @import("Zcu.zig"); | |
const Zir = std.zig.Zir; | |
/// An index into `maps` which might be `none`. | |
pub const OptionalMapIndex = enum(u32) { | |
none = std.math.maxInt(u32), | |
_, | |
pub fn unwrap(oi: OptionalMapIndex) ?MapIndex { | |
if (oi == .none) return null; | |
return @enumFromInt(@intFromEnum(oi)); | |
} | |
}; | |
/// An index into `maps`. | |
pub const MapIndex = enum(u32) { | |
_, | |
pub fn get(map_index: MapIndex, ip: *const InternPool) *FieldMap { | |
const unwrapped_map_index = map_index.unwrap(ip); | |
const maps = ip.getLocalShared(unwrapped_map_index.tid).maps.acquire(); | |
return &maps.view().items(.@"0")[unwrapped_map_index.index]; | |
} | |
pub fn toOptional(i: MapIndex) OptionalMapIndex { | |
return @enumFromInt(@intFromEnum(i)); | |
} | |
const Unwrapped = struct { | |
tid: Zcu.PerThread.Id, | |
index: u32, | |
fn wrap(unwrapped: Unwrapped, ip: *const InternPool) MapIndex { | |
assert(@intFromEnum(unwrapped.tid) <= ip.getTidMask()); | |
assert(unwrapped.index <= ip.getIndexMask(u32)); | |
return @enumFromInt(@as(u32, @intFromEnum(unwrapped.tid)) << ip.tid_shift_32 | | |
unwrapped.index); | |
} | |
}; | |
fn unwrap(map_index: MapIndex, ip: *const InternPool) Unwrapped { | |
return .{ | |
.tid = @enumFromInt(@intFromEnum(map_index) >> ip.tid_shift_32 & ip.getTidMask()), | |
.index = @intFromEnum(map_index) & ip.getIndexMask(u32), | |
}; | |
} | |
}; | |
pub const ComptimeAllocIndex = enum(u32) { _ }; | |
pub const NamespaceIndex = enum(u32) { | |
_, | |
const Unwrapped = struct { | |
tid: Zcu.PerThread.Id, | |
bucket_index: u32, | |
index: u32, | |
fn wrap(unwrapped: Unwrapped, ip: *const InternPool) NamespaceIndex { | |
assert(@intFromEnum(unwrapped.tid) <= ip.getTidMask()); | |
assert(unwrapped.bucket_index <= ip.getIndexMask(u32) >> Local.namespaces_bucket_width); | |
assert(unwrapped.index <= Local.namespaces_bucket_mask); | |
return @enumFromInt(@as(u32, @intFromEnum(unwrapped.tid)) << ip.tid_shift_32 | | |
unwrapped.bucket_index << Local.namespaces_bucket_width | | |
unwrapped.index); | |
} | |
}; | |
fn unwrap(namespace_index: NamespaceIndex, ip: *const InternPool) Unwrapped { | |
const index = @intFromEnum(namespace_index) & ip.getIndexMask(u32); | |
return .{ | |
.tid = @enumFromInt(@intFromEnum(namespace_index) >> ip.tid_shift_32 & ip.getTidMask()), | |
.bucket_index = index >> Local.namespaces_bucket_width, | |
.index = index & Local.namespaces_bucket_mask, | |
}; | |
} | |
pub fn toOptional(i: NamespaceIndex) OptionalNamespaceIndex { | |
return @enumFromInt(@intFromEnum(i)); | |
} | |
}; | |
pub const OptionalNamespaceIndex = enum(u32) { | |
none = std.math.maxInt(u32), | |
_, | |
pub fn init(oi: ?NamespaceIndex) OptionalNamespaceIndex { | |
return @enumFromInt(@intFromEnum(oi orelse return .none)); | |
} | |
pub fn unwrap(oi: OptionalNamespaceIndex) ?NamespaceIndex { | |
if (oi == .none) return null; | |
return @enumFromInt(@intFromEnum(oi)); | |
} | |
}; | |
pub const FileIndex = enum(u32) { | |
_, | |
const Unwrapped = struct { | |
tid: Zcu.PerThread.Id, | |
index: u32, | |
fn wrap(unwrapped: Unwrapped, ip: *const InternPool) FileIndex { | |
assert(@intFromEnum(unwrapped.tid) <= ip.getTidMask()); | |
assert(unwrapped.index <= ip.getIndexMask(u32)); | |
return @enumFromInt(@as(u32, @intFromEnum(unwrapped.tid)) << ip.tid_shift_32 | | |
unwrapped.index); | |
} | |
}; | |
pub fn unwrap(file_index: FileIndex, ip: *const InternPool) Unwrapped { | |
return .{ | |
.tid = @enumFromInt(@intFromEnum(file_index) >> ip.tid_shift_32 & ip.getTidMask()), | |
.index = @intFromEnum(file_index) & ip.getIndexMask(u32), | |
}; | |
} | |
}; | |
const File = struct { | |
bin_digest: Cache.BinDigest, | |
file: *Zcu.File, | |
/// `.none` means no type has been created yet. | |
root_type: InternPool.Index, | |
}; | |
/// An index into `strings`. | |
pub const String = enum(u32) { | |
/// An empty string. | |
empty = 0, | |
_, | |
pub fn toSlice(string: String, len: u64, ip: *const InternPool) []const u8 { | |
return string.toOverlongSlice(ip)[0..@intCast(len)]; | |
} | |
pub fn at(string: String, index: u64, ip: *const InternPool) u8 { | |
return string.toOverlongSlice(ip)[@intCast(index)]; | |
} | |
pub fn toNullTerminatedString(string: String, len: u64, ip: *const InternPool) NullTerminatedString { | |
assert(std.mem.indexOfScalar(u8, string.toSlice(len, ip), 0) == null); | |
assert(string.at(len, ip) == 0); | |
return @enumFromInt(@intFromEnum(string)); | |
} | |
const Unwrapped = struct { | |
tid: Zcu.PerThread.Id, | |
index: u32, | |
fn wrap(unwrapped: Unwrapped, ip: *const InternPool) String { | |
assert(@intFromEnum(unwrapped.tid) <= ip.getTidMask()); | |
assert(unwrapped.index <= ip.getIndexMask(u32)); | |
return @enumFromInt(@as(u32, @intFromEnum(unwrapped.tid)) << ip.tid_shift_32 | unwrapped.index); | |
} | |
}; | |
fn unwrap(string: String, ip: *const InternPool) Unwrapped { | |
return .{ | |
.tid = @enumFromInt(@intFromEnum(string) >> ip.tid_shift_32 & ip.getTidMask()), | |
.index = @intFromEnum(string) & ip.getIndexMask(u32), | |
}; | |
} | |
fn toOverlongSlice(string: String, ip: *const InternPool) []const u8 { | |
const unwrapped_string = string.unwrap(ip); | |
const strings = ip.getLocalShared(unwrapped_string.tid).strings.acquire(); | |
return strings.view().items(.@"0")[unwrapped_string.index..]; | |
} | |
const debug_state = InternPool.debug_state; | |
}; | |
/// An index into `strings` which might be `none`. | |
pub const OptionalString = enum(u32) { | |
/// This is distinct from `none` - it is a valid index that represents empty string. | |
empty = 0, | |
none = std.math.maxInt(u32), | |
_, | |
pub fn unwrap(string: OptionalString) ?String { | |
return if (string != .none) @enumFromInt(@intFromEnum(string)) else null; | |
} | |
pub fn toSlice(string: OptionalString, len: u64, ip: *const InternPool) ?[]const u8 { | |
return (string.unwrap() orelse return null).toSlice(len, ip); | |
} | |
const debug_state = InternPool.debug_state; | |
}; | |
/// An index into `strings`. | |
pub const NullTerminatedString = enum(u32) { | |
/// An empty string. | |
empty = 0, | |
_, | |
/// An array of `NullTerminatedString` existing within the `extra` array. | |
/// This type exists to provide a struct with lifetime that is | |
/// not invalidated when items are added to the `InternPool`. | |
pub const Slice = struct { | |
tid: Zcu.PerThread.Id, | |
start: u32, | |
len: u32, | |
pub const empty: Slice = .{ .tid = .main, .start = 0, .len = 0 }; | |
pub fn get(slice: Slice, ip: *const InternPool) []NullTerminatedString { | |
const extra = ip.getLocalShared(slice.tid).extra.acquire(); | |
return @ptrCast(extra.view().items(.@"0")[slice.start..][0..slice.len]); | |
} | |
}; | |
pub fn toString(self: NullTerminatedString) String { | |
return @enumFromInt(@intFromEnum(self)); | |
} | |
pub fn toOptional(self: NullTerminatedString) OptionalNullTerminatedString { | |
return @enumFromInt(@intFromEnum(self)); | |
} | |
pub fn toSlice(string: NullTerminatedString, ip: *const InternPool) [:0]const u8 { | |
const overlong_slice = string.toString().toOverlongSlice(ip); | |
return overlong_slice[0..std.mem.indexOfScalar(u8, overlong_slice, 0).? :0]; | |
} | |
pub fn length(string: NullTerminatedString, ip: *const InternPool) u32 { | |
return @intCast(string.toSlice(ip).len); | |
} | |
pub fn eqlSlice(string: NullTerminatedString, slice: []const u8, ip: *const InternPool) bool { | |
const overlong_slice = string.toString().toOverlongSlice(ip); | |
return overlong_slice.len > slice.len and | |
std.mem.eql(u8, overlong_slice[0..slice.len], slice) and | |
overlong_slice[slice.len] == 0; | |
} | |
const Adapter = struct { | |
strings: []const NullTerminatedString, | |
pub fn eql(ctx: @This(), a: NullTerminatedString, b_void: void, b_map_index: usize) bool { | |
_ = b_void; | |
return a == ctx.strings[b_map_index]; | |
} | |
pub fn hash(ctx: @This(), a: NullTerminatedString) u32 { | |
_ = ctx; | |
return std.hash.uint32(@intFromEnum(a)); | |
} | |
}; | |
/// Compare based on integer value alone, ignoring the string contents. | |
pub fn indexLessThan(ctx: void, a: NullTerminatedString, b: NullTerminatedString) bool { | |
_ = ctx; | |
return @intFromEnum(a) < @intFromEnum(b); | |
} | |
pub fn toUnsigned(string: NullTerminatedString, ip: *const InternPool) ?u32 { | |
const slice = string.toSlice(ip); | |
if (slice.len > 1 and slice[0] == '0') return null; | |
if (std.mem.indexOfScalar(u8, slice, '_')) |_| return null; | |
return std.fmt.parseUnsigned(u32, slice, 10) catch null; | |
} | |
const FormatData = struct { | |
string: NullTerminatedString, | |
ip: *const InternPool, | |
}; | |
fn format( | |
data: FormatData, | |
comptime specifier: []const u8, | |
_: std.fmt.FormatOptions, | |
writer: anytype, | |
) @TypeOf(writer).Error!void { | |
const slice = data.string.toSlice(data.ip); | |
if (comptime std.mem.eql(u8, specifier, "")) { | |
try writer.writeAll(slice); | |
} else if (comptime std.mem.eql(u8, specifier, "i")) { | |
try writer.print("{p}", .{std.zig.fmtId(slice)}); | |
} else @compileError("invalid format string '" ++ specifier ++ "' for '" ++ @typeName(NullTerminatedString) ++ "'"); | |
} | |
pub fn fmt(string: NullTerminatedString, ip: *const InternPool) std.fmt.Formatter(format) { | |
return .{ .data = .{ .string = string, .ip = ip } }; | |
} | |
const debug_state = InternPool.debug_state; | |
}; | |
/// An index into `strings` which might be `none`. | |
pub const OptionalNullTerminatedString = enum(u32) { | |
/// This is distinct from `none` - it is a valid index that represents empty string. | |
empty = 0, | |
none = std.math.maxInt(u32), | |
_, | |
pub fn unwrap(string: OptionalNullTerminatedString) ?NullTerminatedString { | |
return if (string != .none) @enumFromInt(@intFromEnum(string)) else null; | |
} | |
pub fn toSlice(string: OptionalNullTerminatedString, ip: *const InternPool) ?[:0]const u8 { | |
return (string.unwrap() orelse return null).toSlice(ip); | |
} | |
const debug_state = InternPool.debug_state; | |
}; | |
/// A single value captured in the closure of a namespace type. This is not a plain | |
/// `Index` because we must differentiate between the following cases: | |
/// * runtime-known value (where we store the type) | |
/// * comptime-known value (where we store the value) | |
/// * `Nav` val (so that we can analyze the value lazily) | |
/// * `Nav` ref (so that we can analyze the reference lazily) | |
pub const CaptureValue = packed struct(u32) { | |
tag: enum(u2) { @"comptime", runtime, nav_val, nav_ref }, | |
idx: u30, | |
pub fn wrap(val: Unwrapped) CaptureValue { | |
return switch (val) { | |
.@"comptime" => |i| .{ .tag = .@"comptime", .idx = @intCast(@intFromEnum(i)) }, | |
.runtime => |i| .{ .tag = .runtime, .idx = @intCast(@intFromEnum(i)) }, | |
.nav_val => |i| .{ .tag = .nav_val, .idx = @intCast(@intFromEnum(i)) }, | |
.nav_ref => |i| .{ .tag = .nav_ref, .idx = @intCast(@intFromEnum(i)) }, | |
}; | |
} | |
pub fn unwrap(val: CaptureValue) Unwrapped { | |
return switch (val.tag) { | |
.@"comptime" => .{ .@"comptime" = @enumFromInt(val.idx) }, | |
.runtime => .{ .runtime = @enumFromInt(val.idx) }, | |
.nav_val => .{ .nav_val = @enumFromInt(val.idx) }, | |
.nav_ref => .{ .nav_ref = @enumFromInt(val.idx) }, | |
}; | |
} | |
pub const Unwrapped = union(enum) { | |
/// Index refers to the value. | |
@"comptime": Index, | |
/// Index refers to the type. | |
runtime: Index, | |
nav_val: Nav.Index, | |
nav_ref: Nav.Index, | |
}; | |
pub const Slice = struct { | |
tid: Zcu.PerThread.Id, | |
start: u32, | |
len: u32, | |
pub const empty: Slice = .{ .tid = .main, .start = 0, .len = 0 }; | |
pub fn get(slice: Slice, ip: *const InternPool) []CaptureValue { | |
const extra = ip.getLocalShared(slice.tid).extra.acquire(); | |
return @ptrCast(extra.view().items(.@"0")[slice.start..][0..slice.len]); | |
} | |
}; | |
}; | |
pub const Key = union(enum) { | |
int_type: IntType, | |
ptr_type: PtrType, | |
array_type: ArrayType, | |
vector_type: VectorType, | |
opt_type: Index, | |
/// `anyframe->T`. The payload is the child type, which may be `none` to indicate | |
/// `anyframe`. | |
anyframe_type: Index, | |
error_union_type: ErrorUnionType, | |
simple_type: SimpleType, | |
/// This represents a struct that has been explicitly declared in source code, | |
/// or was created with `@Type`. It is unique and based on a declaration. | |
/// It may be a tuple, if declared like this: `struct {A, B, C}`. | |
struct_type: NamespaceType, | |
/// This is a tuple type. Tuples are logically similar to structs, but have some | |
/// important differences in semantics; they do not undergo staged type resolution, | |
/// so cannot be self-referential, and they are not considered container/namespace | |
/// types, so cannot have declarations and have structural equality properties. | |
tuple_type: TupleType, | |
union_type: NamespaceType, | |
opaque_type: NamespaceType, | |
enum_type: NamespaceType, | |
func_type: FuncType, | |
error_set_type: ErrorSetType, | |
/// The payload is the function body, either a `func_decl` or `func_instance`. | |
inferred_error_set_type: Index, | |
/// Typed `undefined`. This will never be `none`; untyped `undefined` is represented | |
/// via `simple_value` and has a named `Index` tag for it. | |
undef: Index, | |
simple_value: SimpleValue, | |
variable: Variable, | |
@"extern": Extern, | |
func: Func, | |
int: Key.Int, | |
err: Error, | |
error_union: ErrorUnion, | |
enum_literal: NullTerminatedString, | |
/// A specific enum tag, indicated by the integer tag value. | |
enum_tag: EnumTag, | |
/// An empty enum or union. TODO: this value's existence is strange, because such a type in | |
/// reality has no values. See #15909. | |
/// Payload is the type for which we are an empty value. | |
empty_enum_value: Index, | |
float: Float, | |
ptr: Ptr, | |
slice: Slice, | |
opt: Opt, | |
/// An instance of a struct, array, or vector. | |
/// Each element/field stored as an `Index`. | |
/// In the case of sentinel-terminated arrays, the sentinel value *is* stored, | |
/// so the slice length will be one more than the type's array length. | |
aggregate: Aggregate, | |
/// An instance of a union. | |
un: Union, | |
/// A comptime function call with a memoized result. | |
memoized_call: Key.MemoizedCall, | |
pub const TypeValue = extern struct { | |
ty: Index, | |
val: Index, | |
}; | |
pub const IntType = std.builtin.Type.Int; | |
/// Extern for hashing via memory reinterpretation. | |
pub const ErrorUnionType = extern struct { | |
error_set_type: Index, | |
payload_type: Index, | |
}; | |
pub const ErrorSetType = struct { | |
/// Set of error names, sorted by null terminated string index. | |
names: NullTerminatedString.Slice, | |
/// This is ignored by `get` but will always be provided by `indexToKey`. | |
names_map: OptionalMapIndex = .none, | |
/// Look up field index based on field name. | |
pub fn nameIndex(self: ErrorSetType, ip: *const InternPool, name: NullTerminatedString) ?u32 { | |
const map = self.names_map.unwrap().?.get(ip); | |
const adapter: NullTerminatedString.Adapter = .{ .strings = self.names.get(ip) }; | |
const field_index = map.getIndexAdapted(name, adapter) orelse return null; | |
return @intCast(field_index); | |
} | |
}; | |
/// Extern layout so it can be hashed with `std.mem.asBytes`. | |
pub const PtrType = extern struct { | |
child: Index, | |
sentinel: Index = .none, | |
flags: Flags = .{}, | |
packed_offset: PackedOffset = .{ .bit_offset = 0, .host_size = 0 }, | |
pub const VectorIndex = enum(u16) { | |
none = std.math.maxInt(u16), | |
runtime = std.math.maxInt(u16) - 1, | |
_, | |
}; | |
pub const Flags = packed struct(u32) { | |
size: Size = .one, | |
/// `none` indicates the ABI alignment of the pointee_type. In this | |
/// case, this field *must* be set to `none`, otherwise the | |
/// `InternPool` equality and hashing functions will return incorrect | |
/// results. | |
alignment: Alignment = .none, | |
is_const: bool = false, | |
is_volatile: bool = false, | |
is_allowzero: bool = false, | |
/// See src/target.zig defaultAddressSpace function for how to obtain | |
/// an appropriate value for this field. | |
address_space: AddressSpace = .generic, | |
vector_index: VectorIndex = .none, | |
}; | |
pub const PackedOffset = packed struct(u32) { | |
/// If this is non-zero it means the pointer points to a sub-byte | |
/// range of data, which is backed by a "host integer" with this | |
/// number of bytes. | |
/// When host_size=pointee_abi_size and bit_offset=0, this must be | |
/// represented with host_size=0 instead. | |
host_size: u16, | |
bit_offset: u16, | |
}; | |
pub const Size = std.builtin.Type.Pointer.Size; | |
pub const AddressSpace = std.builtin.AddressSpace; | |
}; | |
/// Extern so that hashing can be done via memory reinterpreting. | |
pub const ArrayType = extern struct { | |
len: u64, | |
child: Index, | |
sentinel: Index = .none, | |
pub fn lenIncludingSentinel(array_type: ArrayType) u64 { | |
return array_type.len + @intFromBool(array_type.sentinel != .none); | |
} | |
}; | |
/// Extern so that hashing can be done via memory reinterpreting. | |
pub const VectorType = extern struct { | |
len: u32, | |
child: Index, | |
}; | |
pub const TupleType = struct { | |
types: Index.Slice, | |
/// These elements may be `none`, indicating runtime-known. | |
values: Index.Slice, | |
}; | |
/// This is the hashmap key. To fetch other data associated with the type, see: | |
/// * `loadStructType` | |
/// * `loadUnionType` | |
/// * `loadEnumType` | |
/// * `loadOpaqueType` | |
pub const NamespaceType = union(enum) { | |
/// This type corresponds to an actual source declaration, e.g. `struct { ... }`. | |
/// It is hashed based on its ZIR instruction index and set of captures. | |
declared: Declared, | |
/// This type is an automatically-generated enum tag type for a union. | |
/// It is hashed based on the index of the union type it corresponds to. | |
generated_tag: struct { | |
/// The union for which this is a tag type. | |
union_type: Index, | |
}, | |
/// This type originates from a reification via `@Type`, or from an anonymous initialization. | |
/// It is hashed based on its ZIR instruction index and fields, attributes, etc. | |
/// To avoid making this key overly complex, the type-specific data is hashed by Sema. | |
reified: struct { | |
/// A `reify`, `struct_init`, `struct_init_ref`, or `struct_init_anon` instruction. | |
zir_index: TrackedInst.Index, | |
/// A hash of this type's attributes, fields, etc, generated by Sema. | |
type_hash: u64, | |
}, | |
pub const Declared = struct { | |
/// A `struct_decl`, `union_decl`, `enum_decl`, or `opaque_decl` instruction. | |
zir_index: TrackedInst.Index, | |
/// The captured values of this type. These values must be fully resolved per the language spec. | |
captures: union(enum) { | |
owned: CaptureValue.Slice, | |
external: []const CaptureValue, | |
}, | |
}; | |
}; | |
pub const FuncType = struct { | |
param_types: Index.Slice, | |
return_type: Index, | |
/// Tells whether a parameter is comptime. See `paramIsComptime` helper | |
/// method for accessing this. | |
comptime_bits: u32, | |
/// Tells whether a parameter is noalias. See `paramIsNoalias` helper | |
/// method for accessing this. | |
noalias_bits: u32, | |
cc: std.builtin.CallingConvention, | |
is_var_args: bool, | |
is_generic: bool, | |
is_noinline: bool, | |
pub fn paramIsComptime(self: @This(), i: u5) bool { | |
assert(i < self.param_types.len); | |
return @as(u1, @truncate(self.comptime_bits >> i)) != 0; | |
} | |
pub fn paramIsNoalias(self: @This(), i: u5) bool { | |
assert(i < self.param_types.len); | |
return @as(u1, @truncate(self.noalias_bits >> i)) != 0; | |
} | |
pub fn eql(a: FuncType, b: FuncType, ip: *const InternPool) bool { | |
return std.mem.eql(Index, a.param_types.get(ip), b.param_types.get(ip)) and | |
a.return_type == b.return_type and | |
a.comptime_bits == b.comptime_bits and | |
a.noalias_bits == b.noalias_bits and | |
a.is_var_args == b.is_var_args and | |
a.is_generic == b.is_generic and | |
a.is_noinline == b.is_noinline and | |
std.meta.eql(a.cc, b.cc); | |
} | |
pub fn hash(self: FuncType, hasher: *Hash, ip: *const InternPool) void { | |
for (self.param_types.get(ip)) |param_type| { | |
std.hash.autoHash(hasher, param_type); | |
} | |
std.hash.autoHash(hasher, self.return_type); | |
std.hash.autoHash(hasher, self.comptime_bits); | |
std.hash.autoHash(hasher, self.noalias_bits); | |
std.hash.autoHash(hasher, self.cc); | |
std.hash.autoHash(hasher, self.is_var_args); | |
std.hash.autoHash(hasher, self.is_generic); | |
std.hash.autoHash(hasher, self.is_noinline); | |
} | |
}; | |
/// A runtime variable defined in this `Zcu`. | |
pub const Variable = struct { | |
ty: Index, | |
init: Index, | |
owner_nav: Nav.Index, | |
is_threadlocal: bool, | |
is_weak_linkage: bool, | |
}; | |
pub const Extern = struct { | |
/// The name of the extern symbol. | |
name: NullTerminatedString, | |
/// The type of the extern symbol itself. | |
/// This may be `.anyopaque_type`, in which case the value may not be loaded. | |
ty: Index, | |
/// Library name if specified. | |
/// For example `extern "c" fn write(...) usize` would have 'c' as library name. | |
/// Index into the string table bytes. | |
lib_name: OptionalNullTerminatedString, | |
is_const: bool, | |
is_threadlocal: bool, | |
is_weak_linkage: bool, | |
is_dll_import: bool, | |
alignment: Alignment, | |
@"addrspace": std.builtin.AddressSpace, | |
/// The ZIR instruction which created this extern; used only for source locations. | |
/// This is a `declaration`. | |
zir_index: TrackedInst.Index, | |
/// The `Nav` corresponding to this extern symbol. | |
/// This is ignored by hashing and equality. | |
owner_nav: Nav.Index, | |
}; | |
pub const Func = struct { | |
tid: Zcu.PerThread.Id, | |
/// In the case of a generic function, this type will potentially have fewer parameters | |
/// than the generic owner's type, because the comptime parameters will be deleted. | |
ty: Index, | |
/// If this is a function body that has been coerced to a different type, for example | |
/// ``` | |
/// fn f2() !void {} | |
/// const f: fn()anyerror!void = f2; | |
/// ``` | |
/// then it contains the original type of the function body. | |
uncoerced_ty: Index, | |
/// Index into extra array of the `FuncAnalysis` corresponding to this function. | |
/// Used for mutating that data. | |
analysis_extra_index: u32, | |
/// Index into extra array of the `zir_body_inst` corresponding to this function. | |
/// Used for mutating that data. | |
zir_body_inst_extra_index: u32, | |
/// Index into extra array of the resolved inferred error set for this function. | |
/// Used for mutating that data. | |
/// 0 when the function does not have an inferred error set. | |
resolved_error_set_extra_index: u32, | |
/// When a generic function is instantiated, branch_quota is inherited from the | |
/// active Sema context. Importantly, this value is also updated when an existing | |
/// generic function instantiation is found and called. | |
/// This field contains the index into the extra array of this value, | |
/// so that it can be mutated. | |
/// This will be 0 when the function is not a generic function instantiation. | |
branch_quota_extra_index: u32, | |
owner_nav: Nav.Index, | |
/// The ZIR instruction that is a function instruction. Use this to find | |
/// the body. We store this rather than the body directly so that when ZIR | |
/// is regenerated on update(), we can map this to the new corresponding | |
/// ZIR instruction. | |
zir_body_inst: TrackedInst.Index, | |
/// Relative to owner Decl. | |
lbrace_line: u32, | |
/// Relative to owner Decl. | |
rbrace_line: u32, | |
lbrace_column: u32, | |
rbrace_column: u32, | |
/// The `func_decl` which is the generic function from whence this instance was spawned. | |
/// If this is `none` it means the function is not a generic instantiation. | |
generic_owner: Index, | |
/// If this is a generic function instantiation, this will be non-empty. | |
/// Corresponds to the parameters of the `generic_owner` type, which | |
/// may have more parameters than `ty`. | |
/// Each element is the comptime-known value the generic function was instantiated with, | |
/// or `none` if the element is runtime-known. | |
/// TODO: as a follow-up optimization, don't store `none` values here since that data | |
/// is redundant with `comptime_bits` stored elsewhere. | |
comptime_args: Index.Slice, | |
/// Returns a pointer that becomes invalid after any additions to the `InternPool`. | |
fn analysisPtr(func: Func, ip: *const InternPool) *FuncAnalysis { | |
const extra = ip.getLocalShared(func.tid).extra.acquire(); | |
return @ptrCast(&extra.view().items(.@"0")[func.analysis_extra_index]); | |
} | |
pub fn analysisUnordered(func: Func, ip: *const InternPool) FuncAnalysis { | |
return @atomicLoad(FuncAnalysis, func.analysisPtr(ip), .unordered); | |
} | |
pub fn setBranchHint(func: Func, ip: *InternPool, hint: std.builtin.BranchHint) void { | |
const extra_mutex = &ip.getLocal(func.tid).mutate.extra.mutex; | |
extra_mutex.lock(); | |
defer extra_mutex.unlock(); | |
const analysis_ptr = func.analysisPtr(ip); | |
var analysis = analysis_ptr.*; | |
analysis.branch_hint = hint; | |
@atomicStore(FuncAnalysis, analysis_ptr, analysis, .release); | |
} | |
pub fn setAnalyzed(func: Func, ip: *InternPool) void { | |
const extra_mutex = &ip.getLocal(func.tid).mutate.extra.mutex; | |
extra_mutex.lock(); | |
defer extra_mutex.unlock(); | |
const analysis_ptr = func.analysisPtr(ip); | |
var analysis = analysis_ptr.*; | |
analysis.is_analyzed = true; | |
@atomicStore(FuncAnalysis, analysis_ptr, analysis, .release); | |
} | |
/// Returns a pointer that becomes invalid after any additions to the `InternPool`. | |
fn zirBodyInstPtr(func: Func, ip: *const InternPool) *TrackedInst.Index { | |
const extra = ip.getLocalShared(func.tid).extra.acquire(); | |
return @ptrCast(&extra.view().items(.@"0")[func.zir_body_inst_extra_index]); | |
} | |
pub fn zirBodyInstUnordered(func: Func, ip: *const InternPool) TrackedInst.Index { | |
return @atomicLoad(TrackedInst.Index, func.zirBodyInstPtr(ip), .unordered); | |
} | |
/// Returns a pointer that becomes invalid after any additions to the `InternPool`. | |
fn branchQuotaPtr(func: Func, ip: *const InternPool) *u32 { | |
const extra = ip.getLocalShared(func.tid).extra.acquire(); | |
return &extra.view().items(.@"0")[func.branch_quota_extra_index]; | |
} | |
pub fn branchQuotaUnordered(func: Func, ip: *const InternPool) u32 { | |
return @atomicLoad(u32, func.branchQuotaPtr(ip), .unordered); | |
} | |
pub fn maxBranchQuota(func: Func, ip: *InternPool, new_branch_quota: u32) void { | |
const extra_mutex = &ip.getLocal(func.tid).mutate.extra.mutex; | |
extra_mutex.lock(); | |
defer extra_mutex.unlock(); | |
const branch_quota_ptr = func.branchQuotaPtr(ip); | |
@atomicStore(u32, branch_quota_ptr, @max(branch_quota_ptr.*, new_branch_quota), .release); | |
} | |
/// Returns a pointer that becomes invalid after any additions to the `InternPool`. | |
fn resolvedErrorSetPtr(func: Func, ip: *const InternPool) *Index { | |
const extra = ip.getLocalShared(func.tid).extra.acquire(); | |
assert(func.analysisUnordered(ip).inferred_error_set); | |
return @ptrCast(&extra.view().items(.@"0")[func.resolved_error_set_extra_index]); | |
} | |
pub fn resolvedErrorSetUnordered(func: Func, ip: *const InternPool) Index { | |
return @atomicLoad(Index, func.resolvedErrorSetPtr(ip), .unordered); | |
} | |
pub fn setResolvedErrorSet(func: Func, ip: *InternPool, ies: Index) void { | |
const extra_mutex = &ip.getLocal(func.tid).mutate.extra.mutex; | |
extra_mutex.lock(); | |
defer extra_mutex.unlock(); | |
@atomicStore(Index, func.resolvedErrorSetPtr(ip), ies, .release); | |
} | |
}; | |
pub const Int = struct { | |
ty: Index, | |
storage: Storage, | |
pub const Storage = union(enum) { | |
u64: u64, | |
i64: i64, | |
big_int: BigIntConst, | |
lazy_align: Index, | |
lazy_size: Index, | |
/// Big enough to fit any non-BigInt value | |
pub const BigIntSpace = struct { | |
/// The +1 is headroom so that operations such as incrementing once | |
/// or decrementing once are possible without using an allocator. | |
limbs: [(@sizeOf(u64) / @sizeOf(std.math.big.Limb)) + 1]std.math.big.Limb, | |
}; | |
pub fn toBigInt(storage: Storage, space: *BigIntSpace) BigIntConst { | |
return switch (storage) { | |
.big_int => |x| x, | |
inline .u64, .i64 => |x| BigIntMutable.init(&space.limbs, x).toConst(), | |
.lazy_align, .lazy_size => unreachable, | |
}; | |
} | |
}; | |
}; | |
pub const Error = extern struct { | |
ty: Index, | |
name: NullTerminatedString, | |
}; | |
pub const ErrorUnion = struct { | |
ty: Index, | |
val: Value, | |
pub const Value = union(enum) { | |
err_name: NullTerminatedString, | |
payload: Index, | |
}; | |
}; | |
pub const EnumTag = extern struct { | |
/// The enum type. | |
ty: Index, | |
/// The integer tag value which has the integer tag type of the enum. | |
int: Index, | |
}; | |
pub const Float = struct { | |
ty: Index, | |
/// The storage used must match the size of the float type being represented. | |
storage: Storage, | |
pub const Storage = union(enum) { | |
f16: f16, | |
f32: f32, | |
f64: f64, | |
f80: f80, | |
f128: f128, | |
}; | |
}; | |
pub const Ptr = struct { | |
/// This is the pointer type, not the element type. | |
ty: Index, | |
/// The base address which this pointer is offset from. | |
base_addr: BaseAddr, | |
/// The offset of this pointer from `base_addr` in bytes. | |
byte_offset: u64, | |
pub const BaseAddr = union(enum) { | |
const Tag = @typeInfo(BaseAddr).@"union".tag_type.?; | |
/// Points to the value of a single `Nav`, which may be constant or a `variable`. | |
nav: Nav.Index, | |
/// Points to the value of a single comptime alloc stored in `Sema`. | |
comptime_alloc: ComptimeAllocIndex, | |
/// Points to a single unnamed constant value. | |
uav: Uav, | |
/// Points to a comptime field of a struct. Index is the field's value. | |
/// | |
/// TODO: this exists because these fields are semantically mutable. We | |
/// should probably change the language so that this isn't the case. | |
comptime_field: Index, | |
/// A pointer with a fixed integer address, usually from `@ptrFromInt`. | |
/// | |
/// The address is stored entirely by `byte_offset`, which will be positive | |
/// and in-range of a `usize`. The base address is, for all intents and purposes, 0. | |
int, | |
/// A pointer to the payload of an error union. Index is the error union pointer. | |
/// To ensure a canonical representation, the type of the base pointer must: | |
/// * be a one-pointer | |
/// * be `const`, `volatile` and `allowzero` | |
/// * have alignment 1 | |
/// * have the same address space as this pointer | |
/// * have a host size, bit offset, and vector index of 0 | |
/// See `Value.canonicalizeBasePtr` which enforces these properties. | |
eu_payload: Index, | |
/// A pointer to the payload of a non-pointer-like optional. Index is the | |
/// optional pointer. To ensure a canonical representation, the base | |
/// pointer is subject to the same restrictions as in `eu_payload`. | |
opt_payload: Index, | |
/// A pointer to a field of a slice, or of an auto-layout struct or union. Slice fields | |
/// are referenced according to `Value.slice_ptr_index` and `Value.slice_len_index`. | |
/// Base is the aggregate pointer, which is subject to the same restrictions as | |
/// in `eu_payload`. | |
field: BaseIndex, | |
/// A pointer to an element of a comptime-only array. Base is the | |
/// many-pointer we are indexing into. It is subject to the same restrictions | |
/// as in `eu_payload`, except it must be a many-pointer rather than a one-pointer. | |
/// | |
/// The element type of the base pointer must NOT be an array. Additionally, the | |
/// base pointer is guaranteed to not be an `arr_elem` into a pointer with the | |
/// same child type. Thus, since there are no two comptime-only types which are | |
/// IMC to one another, the only case where the base pointer may also be an | |
/// `arr_elem` is when this pointer is semantically invalid (e.g. it reinterprets | |
/// a `type` as a `comptime_int`). These restrictions are in place to ensure | |
/// a canonical representation. | |
/// | |
/// This kind of base address differs from others in that it may refer to any | |
/// sequence of values; for instance, an `arr_elem` at index 2 may refer to | |
/// any number of elements starting from index 2. | |
/// | |
/// Index must not be 0. To refer to the element at index 0, simply reinterpret | |
/// the aggregate pointer. | |
arr_elem: BaseIndex, | |
pub const BaseIndex = struct { | |
base: Index, | |
index: u64, | |
}; | |
pub const Uav = extern struct { | |
val: Index, | |
/// Contains the canonical pointer type of the anonymous | |
/// declaration. This may equal `ty` of the `Ptr` or it may be | |
/// different. Importantly, when lowering the anonymous decl, | |
/// the original pointer type alignment must be used. | |
orig_ty: Index, | |
}; | |
pub fn eql(a: BaseAddr, b: BaseAddr) bool { | |
if (@as(Key.Ptr.BaseAddr.Tag, a) != @as(Key.Ptr.BaseAddr.Tag, b)) return false; | |
return switch (a) { | |
.nav => |a_nav| a_nav == b.nav, | |
.comptime_alloc => |a_alloc| a_alloc == b.comptime_alloc, | |
.uav => |ad| ad.val == b.uav.val and | |
ad.orig_ty == b.uav.orig_ty, | |
.int => true, | |
.eu_payload => |a_eu_payload| a_eu_payload == b.eu_payload, | |
.opt_payload => |a_opt_payload| a_opt_payload == b.opt_payload, | |
.comptime_field => |a_comptime_field| a_comptime_field == b.comptime_field, | |
.arr_elem => |a_elem| std.meta.eql(a_elem, b.arr_elem), | |
.field => |a_field| std.meta.eql(a_field, b.field), | |
}; | |
} | |
}; | |
}; | |
pub const Slice = struct { | |
/// This is the slice type, not the element type. | |
ty: Index, | |
/// The slice's `ptr` field. Must be a many-ptr with the same properties as `ty`. | |
ptr: Index, | |
/// The slice's `len` field. Must be a `usize`. | |
len: Index, | |
}; | |
/// `null` is represented by the `val` field being `none`. | |
pub const Opt = extern struct { | |
/// This is the optional type; not the payload type. | |
ty: Index, | |
/// This could be `none`, indicating the optional is `null`. | |
val: Index, | |
}; | |
pub const Union = extern struct { | |
/// This is the union type; not the field type. | |
ty: Index, | |
/// Indicates the active field. This could be `none`, which indicates the tag is not known. `none` is only a valid value for extern and packed unions. | |
/// In those cases, the type of `val` is: | |
/// extern: a u8 array of the same byte length as the union | |
/// packed: an unsigned integer with the same bit size as the union | |
tag: Index, | |
/// The value of the active field. | |
val: Index, | |
}; | |
pub const Aggregate = struct { | |
ty: Index, | |
storage: Storage, | |
pub const Storage = union(enum) { | |
bytes: String, | |
elems: []const Index, | |
repeated_elem: Index, | |
pub fn values(self: *const Storage) []const Index { | |
return switch (self.*) { | |
.bytes => &.{}, | |
.elems => |elems| elems, | |
.repeated_elem => |*elem| @as(*const [1]Index, elem), | |
}; | |
} | |
}; | |
}; | |
pub const MemoizedCall = struct { | |
func: Index, | |
arg_values: []const Index, | |
result: Index, | |
branch_count: u32, | |
}; | |
pub fn hash32(key: Key, ip: *const InternPool) u32 { | |
return @truncate(key.hash64(ip)); | |
} | |
pub fn hash64(key: Key, ip: *const InternPool) u64 { | |
const asBytes = std.mem.asBytes; | |
const KeyTag = @typeInfo(Key).@"union".tag_type.?; | |
const seed = @intFromEnum(@as(KeyTag, key)); | |
return switch (key) { | |
// TODO: assert no padding in these types | |
inline .ptr_type, | |
.array_type, | |
.vector_type, | |
.opt_type, | |
.anyframe_type, | |
.error_union_type, | |
.simple_type, | |
.simple_value, | |
.opt, | |
.undef, | |
.err, | |
.enum_literal, | |
.enum_tag, | |
.empty_enum_value, | |
.inferred_error_set_type, | |
.un, | |
=> |x| Hash.hash(seed, asBytes(&x)), | |
.int_type => |x| Hash.hash(seed + @intFromEnum(x.signedness), asBytes(&x.bits)), | |
.error_union => |x| switch (x.val) { | |
.err_name => |y| Hash.hash(seed + 0, asBytes(&x.ty) ++ asBytes(&y)), | |
.payload => |y| Hash.hash(seed + 1, asBytes(&x.ty) ++ asBytes(&y)), | |
}, | |
.variable => |variable| Hash.hash(seed, asBytes(&variable.owner_nav)), | |
.opaque_type, | |
.enum_type, | |
.union_type, | |
.struct_type, | |
=> |namespace_type| { | |
var hasher = Hash.init(seed); | |
std.hash.autoHash(&hasher, std.meta.activeTag(namespace_type)); | |
switch (namespace_type) { | |
.declared => |declared| { | |
std.hash.autoHash(&hasher, declared.zir_index); | |
const captures = switch (declared.captures) { | |
.owned => |cvs| cvs.get(ip), | |
.external => |cvs| cvs, | |
}; | |
for (captures) |cv| { | |
std.hash.autoHash(&hasher, cv); | |
} | |
}, | |
.generated_tag => |generated_tag| { | |
std.hash.autoHash(&hasher, generated_tag.union_type); | |
}, | |
.reified => |reified| { | |
std.hash.autoHash(&hasher, reified.zir_index); | |
std.hash.autoHash(&hasher, reified.type_hash); | |
}, | |
} | |
return hasher.final(); | |
}, | |
.int => |int| { | |
var hasher = Hash.init(seed); | |
// Canonicalize all integers by converting them to BigIntConst. | |
switch (int.storage) { | |
.u64, .i64, .big_int => { | |
var buffer: Key.Int.Storage.BigIntSpace = undefined; | |
const big_int = int.storage.toBigInt(&buffer); | |
std.hash.autoHash(&hasher, int.ty); | |
std.hash.autoHash(&hasher, big_int.positive); | |
for (big_int.limbs) |limb| std.hash.autoHash(&hasher, limb); | |
}, | |
.lazy_align, .lazy_size => |lazy_ty| { | |
std.hash.autoHash( | |
&hasher, | |
@as(@typeInfo(Key.Int.Storage).@"union".tag_type.?, int.storage), | |
); | |
std.hash.autoHash(&hasher, lazy_ty); | |
}, | |
} | |
return hasher.final(); | |
}, | |
.float => |float| { | |
var hasher = Hash.init(seed); | |
std.hash.autoHash(&hasher, float.ty); | |
switch (float.storage) { | |
inline else => |val| std.hash.autoHash( | |
&hasher, | |
@as(std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(val))), @bitCast(val)), | |
), | |
} | |
return hasher.final(); | |
}, | |
.slice => |slice| Hash.hash(seed, asBytes(&slice.ty) ++ asBytes(&slice.ptr) ++ asBytes(&slice.len)), | |
.ptr => |ptr| { | |
// Int-to-ptr pointers are hashed separately than decl-referencing pointers. | |
// This is sound due to pointer provenance rules. | |
const addr_tag: Key.Ptr.BaseAddr.Tag = ptr.base_addr; | |
const seed2 = seed + @intFromEnum(addr_tag); | |
const big_offset: i128 = ptr.byte_offset; | |
const common = asBytes(&ptr.ty) ++ asBytes(&big_offset); | |
return switch (ptr.base_addr) { | |
inline .nav, | |
.comptime_alloc, | |
.uav, | |
.int, | |
.eu_payload, | |
.opt_payload, | |
.comptime_field, | |
=> |x| Hash.hash(seed2, common ++ asBytes(&x)), | |
.arr_elem, .field => |x| Hash.hash( | |
seed2, | |
common ++ asBytes(&x.base) ++ asBytes(&x.index), | |
), | |
}; | |
}, | |
.aggregate => |aggregate| { | |
var hasher = Hash.init(seed); | |
std.hash.autoHash(&hasher, aggregate.ty); | |
const len = ip.aggregateTypeLen(aggregate.ty); | |
const child = switch (ip.indexToKey(aggregate.ty)) { | |
.array_type => |array_type| array_type.child, | |
.vector_type => |vector_type| vector_type.child, | |
.tuple_type, .struct_type => .none, | |
else => unreachable, | |
}; | |
if (child == .u8_type) { | |
switch (aggregate.storage) { | |
.bytes => |bytes| for (bytes.toSlice(len, ip)) |byte| { | |
std.hash.autoHash(&hasher, KeyTag.int); | |
std.hash.autoHash(&hasher, byte); | |
}, | |
.elems => |elems| for (elems[0..@intCast(len)]) |elem| { | |
const elem_key = ip.indexToKey(elem); | |
std.hash.autoHash(&hasher, @as(KeyTag, elem_key)); | |
switch (elem_key) { | |
.undef => {}, | |
.int => |int| std.hash.autoHash( | |
&hasher, | |
@as(u8, @intCast(int.storage.u64)), | |
), | |
else => unreachable, | |
} | |
}, | |
.repeated_elem => |elem| { | |
const elem_key = ip.indexToKey(elem); | |
var remaining = len; | |
while (remaining > 0) : (remaining -= 1) { | |
std.hash.autoHash(&hasher, @as(KeyTag, elem_key)); | |
switch (elem_key) { | |
.undef => {}, | |
.int => |int| std.hash.autoHash( | |
&hasher, | |
@as(u8, @intCast(int.storage.u64)), | |
), | |
else => unreachable, | |
} | |
} | |
}, | |
} | |
return hasher.final(); | |
} | |
switch (aggregate.storage) { | |
.bytes => unreachable, | |
.elems => |elems| for (elems[0..@intCast(len)]) |elem| | |
std.hash.autoHash(&hasher, elem), | |
.repeated_elem => |elem| { | |
var remaining = len; | |
while (remaining > 0) : (remaining -= 1) std.hash.autoHash(&hasher, elem); | |
}, | |
} | |
return hasher.final(); | |
}, | |
.error_set_type => |x| Hash.hash(seed, std.mem.sliceAsBytes(x.names.get(ip))), | |
.tuple_type => |tuple_type| { | |
var hasher = Hash.init(seed); | |
for (tuple_type.types.get(ip)) |elem| std.hash.autoHash(&hasher, elem); | |
for (tuple_type.values.get(ip)) |elem| std.hash.autoHash(&hasher, elem); | |
return hasher.final(); | |
}, | |
.func_type => |func_type| { | |
var hasher = Hash.init(seed); | |
func_type.hash(&hasher, ip); | |
return hasher.final(); | |
}, | |
.memoized_call => |memoized_call| { | |
var hasher = Hash.init(seed); | |
std.hash.autoHash(&hasher, memoized_call.func); | |
for (memoized_call.arg_values) |arg| std.hash.autoHash(&hasher, arg); | |
return hasher.final(); | |
}, | |
.func => |func| { | |
// In the case of a function with an inferred error set, we | |
// must not include the inferred error set type in the hash, | |
// otherwise we would get false negatives for interning generic | |
// function instances which have inferred error sets. | |
if (func.generic_owner == .none and func.resolved_error_set_extra_index == 0) { | |
const bytes = asBytes(&func.owner_nav) ++ asBytes(&func.ty) ++ | |
[1]u8{@intFromBool(func.uncoerced_ty == func.ty)}; | |
return Hash.hash(seed, bytes); | |
} | |
var hasher = Hash.init(seed); | |
std.hash.autoHash(&hasher, func.generic_owner); | |
std.hash.autoHash(&hasher, func.uncoerced_ty == func.ty); | |
for (func.comptime_args.get(ip)) |arg| std.hash.autoHash(&hasher, arg); | |
if (func.resolved_error_set_extra_index == 0) { | |
std.hash.autoHash(&hasher, func.ty); | |
} else { | |
var ty_info = ip.indexToFuncType(func.ty).?; | |
ty_info.return_type = ip.errorUnionPayload(ty_info.return_type); | |
ty_info.hash(&hasher, ip); | |
} | |
return hasher.final(); | |
}, | |
.@"extern" => |e| Hash.hash(seed, asBytes(&e.name) ++ | |
asBytes(&e.ty) ++ asBytes(&e.lib_name) ++ | |
asBytes(&e.is_const) ++ asBytes(&e.is_threadlocal) ++ | |
asBytes(&e.is_weak_linkage) ++ asBytes(&e.alignment) ++ | |
asBytes(&e.is_dll_import) ++ asBytes(&e.@"addrspace") ++ | |
asBytes(&e.zir_index)), | |
}; | |
} | |
pub fn eql(a: Key, b: Key, ip: *const InternPool) bool { | |
const KeyTag = @typeInfo(Key).@"union".tag_type.?; | |
const a_tag: KeyTag = a; | |
const b_tag: KeyTag = b; | |
if (a_tag != b_tag) return false; | |
switch (a) { | |
.int_type => |a_info| { | |
const b_info = b.int_type; | |
return std.meta.eql(a_info, b_info); | |
}, | |
.ptr_type => |a_info| { | |
const b_info = b.ptr_type; | |
return std.meta.eql(a_info, b_info); | |
}, | |
.array_type => |a_info| { | |
const b_info = b.array_type; | |
return std.meta.eql(a_info, b_info); | |
}, | |
.vector_type => |a_info| { | |
const b_info = b.vector_type; | |
return std.meta.eql(a_info, b_info); | |
}, | |
.opt_type => |a_info| { | |
const b_info = b.opt_type; | |
return a_info == b_info; | |
}, | |
.anyframe_type => |a_info| { | |
const b_info = b.anyframe_type; | |
return a_info == b_info; | |
}, | |
.error_union_type => |a_info| { | |
const b_info = b.error_union_type; | |
return std.meta.eql(a_info, b_info); | |
}, | |
.simple_type => |a_info| { | |
const b_info = b.simple_type; | |
return a_info == b_info; | |
}, | |
.simple_value => |a_info| { | |
const b_info = b.simple_value; | |
return a_info == b_info; | |
}, | |
.undef => |a_info| { | |
const b_info = b.undef; | |
return a_info == b_info; | |
}, | |
.opt => |a_info| { | |
const b_info = b.opt; | |
return std.meta.eql(a_info, b_info); | |
}, | |
.un => |a_info| { | |
const b_info = b.un; | |
return std.meta.eql(a_info, b_info); | |
}, | |
.err => |a_info| { | |
const b_info = b.err; | |
return std.meta.eql(a_info, b_info); | |
}, | |
.error_union => |a_info| { | |
const b_info = b.error_union; | |
return std.meta.eql(a_info, b_info); | |
}, | |
.enum_literal => |a_info| { | |
const b_info = b.enum_literal; | |
return a_info == b_info; | |
}, | |
.enum_tag => |a_info| { | |
const b_info = b.enum_tag; | |
return std.meta.eql(a_info, b_info); | |
}, | |
.empty_enum_value => |a_info| { | |
const b_info = b.empty_enum_value; | |
return a_info == b_info; | |
}, | |
.variable => |a_info| { | |
const b_info = b.variable; | |
return a_info.owner_nav == b_info.owner_nav and | |
a_info.ty == b_info.ty and | |
a_info.init == b_info.init and | |
a_info.is_threadlocal == b_info.is_threadlocal and | |
a_info.is_weak_linkage == b_info.is_weak_linkage; | |
}, | |
.@"extern" => |a_info| { | |
const b_info = b.@"extern"; | |
return a_info.name == b_info.name and | |
a_info.ty == b_info.ty and | |
a_info.lib_name == b_info.lib_name and | |
a_info.is_const == b_info.is_const and | |
a_info.is_threadlocal == b_info.is_threadlocal and | |
a_info.is_weak_linkage == b_info.is_weak_linkage and | |
a_info.is_dll_import == b_info.is_dll_import and | |
a_info.alignment == b_info.alignment and | |
a_info.@"addrspace" == b_info.@"addrspace" and | |
a_info.zir_index == b_info.zir_index; | |
}, | |
.func => |a_info| { | |
const b_info = b.func; | |
if (a_info.generic_owner != b_info.generic_owner) | |
return false; | |
if (a_info.generic_owner == .none) { | |
if (a_info.owner_nav != b_info.owner_nav) | |
return false; | |
} else { | |
if (!std.mem.eql( | |
Index, | |
a_info.comptime_args.get(ip), | |
b_info.comptime_args.get(ip), | |
)) return false; | |
} | |
if ((a_info.ty == a_info.uncoerced_ty) != | |
(b_info.ty == b_info.uncoerced_ty)) | |
{ | |
return false; | |
} | |
if (a_info.ty == b_info.ty) | |
return true; | |
// There is one case where the types may be inequal but we | |
// still want to find the same function body instance. In the | |
// case of the functions having an inferred error set, the key | |
// used to find an existing function body will necessarily have | |
// a unique inferred error set type, because it refers to the | |
// function body InternPool Index. To make this case work we | |
// omit the inferred error set from the equality check. | |
if (a_info.resolved_error_set_extra_index == 0 or | |
b_info.resolved_error_set_extra_index == 0) | |
{ | |
return false; | |
} | |
var a_ty_info = ip.indexToFuncType(a_info.ty).?; | |
a_ty_info.return_type = ip.errorUnionPayload(a_ty_info.return_type); | |
var b_ty_info = ip.indexToFuncType(b_info.ty).?; | |
b_ty_info.return_type = ip.errorUnionPayload(b_ty_info.return_type); | |
return a_ty_info.eql(b_ty_info, ip); | |
}, | |
.slice => |a_info| { | |
const b_info = b.slice; | |
if (a_info.ty != b_info.ty) return false; | |
if (a_info.ptr != b_info.ptr) return false; | |
if (a_info.len != b_info.len) return false; | |
return true; | |
}, | |
.ptr => |a_info| { | |
const b_info = b.ptr; | |
if (a_info.ty != b_info.ty) return false; | |
if (a_info.byte_offset != b_info.byte_offset) return false; | |
if (!a_info.base_addr.eql(b_info.base_addr)) return false; | |
return true; | |
}, | |
.int => |a_info| { | |
const b_info = b.int; | |
if (a_info.ty != b_info.ty) | |
return false; | |
return switch (a_info.storage) { | |
.u64 => |aa| switch (b_info.storage) { | |
.u64 => |bb| aa == bb, | |
.i64 => |bb| aa == bb, | |
.big_int => |bb| bb.orderAgainstScalar(aa) == .eq, | |
.lazy_align, .lazy_size => false, | |
}, | |
.i64 => |aa| switch (b_info.storage) { | |
.u64 => |bb| aa == bb, | |
.i64 => |bb| aa == bb, | |
.big_int => |bb| bb.orderAgainstScalar(aa) == .eq, | |
.lazy_align, .lazy_size => false, | |
}, | |
.big_int => |aa| switch (b_info.storage) { | |
.u64 => |bb| aa.orderAgainstScalar(bb) == .eq, | |
.i64 => |bb| aa.orderAgainstScalar(bb) == .eq, | |
.big_int => |bb| aa.eql(bb), | |
.lazy_align, .lazy_size => false, | |
}, | |
.lazy_align => |aa| switch (b_info.storage) { | |
.u64, .i64, .big_int, .lazy_size => false, | |
.lazy_align => |bb| aa == bb, | |
}, | |
.lazy_size => |aa| switch (b_info.storage) { | |
.u64, .i64, .big_int, .lazy_align => false, | |
.lazy_size => |bb| aa == bb, | |
}, | |
}; | |
}, | |
.float => |a_info| { | |
const b_info = b.float; | |
if (a_info.ty != b_info.ty) | |
return false; | |
if (a_info.ty == .c_longdouble_type and a_info.storage != .f80) { | |
// These are strange: we'll sometimes represent them as f128, even if the | |
// underlying type is smaller. f80 is an exception: see float_c_longdouble_f80. | |
const a_val: u128 = switch (a_info.storage) { | |
inline else => |val| @bitCast(@as(f128, @floatCast(val))), | |
}; | |
const b_val: u128 = switch (b_info.storage) { | |
inline else => |val| @bitCast(@as(f128, @floatCast(val))), | |
}; | |
return a_val == b_val; | |
} | |
const StorageTag = @typeInfo(Key.Float.Storage).@"union".tag_type.?; | |
assert(@as(StorageTag, a_info.storage) == @as(StorageTag, b_info.storage)); | |
switch (a_info.storage) { | |
inline else => |val, tag| { | |
const Bits = std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(val))); | |
const a_bits: Bits = @bitCast(val); | |
const b_bits: Bits = @bitCast(@field(b_info.storage, @tagName(tag))); | |
return a_bits == b_bits; | |
}, | |
} | |
}, | |
inline .opaque_type, .enum_type, .union_type, .struct_type => |a_info, a_tag_ct| { | |
const b_info = @field(b, @tagName(a_tag_ct)); | |
if (std.meta.activeTag(a_info) != b_info) return false; | |
switch (a_info) { | |
.declared => |a_d| { | |
const b_d = b_info.declared; | |
if (a_d.zir_index != b_d.zir_index) return false; | |
const a_captures = switch (a_d.captures) { | |
.owned => |s| s.get(ip), | |
.external => |cvs| cvs, | |
}; | |
const b_captures = switch (b_d.captures) { | |
.owned => |s| s.get(ip), | |
.external => |cvs| cvs, | |
}; | |
return std.mem.eql(u32, @ptrCast(a_captures), @ptrCast(b_captures)); | |
}, | |
.generated_tag => |a_gt| return a_gt.union_type == b_info.generated_tag.union_type, | |
.reified => |a_r| { | |
const b_r = b_info.reified; | |
return a_r.zir_index == b_r.zir_index and | |
a_r.type_hash == b_r.type_hash; | |
}, | |
} | |
}, | |
.aggregate => |a_info| { | |
const b_info = b.aggregate; | |
if (a_info.ty != b_info.ty) return false; | |
const len = ip.aggregateTypeLen(a_info.ty); | |
const StorageTag = @typeInfo(Key.Aggregate.Storage).@"union".tag_type.?; | |
if (@as(StorageTag, a_info.storage) != @as(StorageTag, b_info.storage)) { | |
for (0..@intCast(len)) |elem_index| { | |
const a_elem = switch (a_info.storage) { | |
.bytes => |bytes| ip.getIfExists(.{ .int = .{ | |
.ty = .u8_type, | |
.storage = .{ .u64 = bytes.at(elem_index, ip) }, | |
} }) orelse return false, | |
.elems => |elems| elems[elem_index], | |
.repeated_elem => |elem| elem, | |
}; | |
const b_elem = switch (b_info.storage) { | |
.bytes => |bytes| ip.getIfExists(.{ .int = .{ | |
.ty = .u8_type, | |
.storage = .{ .u64 = bytes.at(elem_index, ip) }, | |
} }) orelse return false, | |
.elems => |elems| elems[elem_index], | |
.repeated_elem => |elem| elem, | |
}; | |
if (a_elem != b_elem) return false; | |
} | |
return true; | |
} | |
switch (a_info.storage) { | |
.bytes => |a_bytes| { | |
const b_bytes = b_info.storage.bytes; | |
return a_bytes == b_bytes or | |
std.mem.eql(u8, a_bytes.toSlice(len, ip), b_bytes.toSlice(len, ip)); | |
}, | |
.elems => |a_elems| { | |
const b_elems = b_info.storage.elems; | |
return std.mem.eql( | |
Index, | |
a_elems[0..@intCast(len)], | |
b_elems[0..@intCast(len)], | |
); | |
}, | |
.repeated_elem => |a_elem| { | |
const b_elem = b_info.storage.repeated_elem; | |
return a_elem == b_elem; | |
}, | |
} | |
}, | |
.tuple_type => |a_info| { | |
const b_info = b.tuple_type; | |
return std.mem.eql(Index, a_info.types.get(ip), b_info.types.get(ip)) and | |
std.mem.eql(Index, a_info.values.get(ip), b_info.values.get(ip)); | |
}, | |
.error_set_type => |a_info| { | |
const b_info = b.error_set_type; | |
return std.mem.eql(NullTerminatedString, a_info.names.get(ip), b_info.names.get(ip)); | |
}, | |
.inferred_error_set_type => |a_info| { | |
const b_info = b.inferred_error_set_type; | |
return a_info == b_info; | |
}, | |
.func_type => |a_info| { | |
const b_info = b.func_type; | |
return Key.FuncType.eql(a_info, b_info, ip); | |
}, | |
.memoized_call => |a_info| { | |
const b_info = b.memoized_call; | |
return a_info.func == b_info.func and | |
std.mem.eql(Index, a_info.arg_values, b_info.arg_values); | |
}, | |
} | |
} | |
pub fn typeOf(key: Key) Index { | |
return switch (key) { | |
.int_type, | |
.ptr_type, | |
.array_type, | |
.vector_type, | |
.opt_type, | |
.anyframe_type, | |
.error_union_type, | |
.error_set_type, | |
.inferred_error_set_type, | |
.simple_type, | |
.struct_type, | |
.union_type, | |
.opaque_type, | |
.enum_type, | |
.tuple_type, | |
.func_type, | |
=> .type_type, | |
inline .ptr, | |
.slice, | |
.int, | |
.float, | |
.opt, | |
.variable, | |
.@"extern", | |
.func, | |
.err, | |
.error_union, | |
.enum_tag, | |
.aggregate, | |
.un, | |
=> |x| x.ty, | |
.enum_literal => .enum_literal_type, | |
.undef => |x| x, | |
.empty_enum_value => |x| x, | |
.simple_value => |s| switch (s) { | |
.undefined => .undefined_type, | |
.void => .void_type, | |
.null => .null_type, | |
.false, .true => .bool_type, | |
.empty_tuple => .empty_tuple_type, | |
.@"unreachable" => .noreturn_type, | |
}, | |
.memoized_call => unreachable, | |
}; | |
} | |
}; | |
pub const RequiresComptime = enum(u2) { no, yes, unknown, wip }; | |
// Unlike `Tag.TypeUnion` which is an encoding, and `Key.UnionType` which is a | |
// minimal hashmap key, this type is a convenience type that contains info | |
// needed by semantic analysis. | |
pub const LoadedUnionType = struct { | |
tid: Zcu.PerThread.Id, | |
/// The index of the `Tag.TypeUnion` payload. | |
extra_index: u32, | |
// TODO: the non-fqn will be needed by the new dwarf structure | |
/// The name of this union type. | |
name: NullTerminatedString, | |
/// Represents the declarations inside this union. | |
namespace: NamespaceIndex, | |
/// The enum tag type. | |
enum_tag_ty: Index, | |
/// List of field types in declaration order. | |
/// These are `none` until `status` is `have_field_types` or `have_layout`. | |
field_types: Index.Slice, | |
/// List of field alignments in declaration order. | |
/// `none` means the ABI alignment of the type. | |
/// If this slice has length 0 it means all elements are `none`. | |
field_aligns: Alignment.Slice, | |
/// Index of the union_decl or reify ZIR instruction. | |
zir_index: TrackedInst.Index, | |
captures: CaptureValue.Slice, | |
pub const RuntimeTag = enum(u2) { | |
none, | |
safety, | |
tagged, | |
pub fn hasTag(self: RuntimeTag) bool { | |
return switch (self) { | |
.none => false, | |
.tagged, .safety => true, | |
}; | |
} | |
}; | |
pub const Status = enum(u3) { | |
none, | |
field_types_wip, | |
have_field_types, | |
layout_wip, | |
have_layout, | |
fully_resolved_wip, | |
/// The types and all its fields have had their layout resolved. | |
/// Even through pointer, which `have_layout` does not ensure. | |
fully_resolved, | |
pub fn haveFieldTypes(status: Status) bool { | |
return switch (status) { | |
.none, | |
.field_types_wip, | |
=> false, | |
.have_field_types, | |
.layout_wip, | |
.have_layout, | |
.fully_resolved_wip, | |
.fully_resolved, | |
=> true, | |
}; | |
} | |
pub fn haveLayout(status: Status) bool { | |
return switch (status) { | |
.none, | |
.field_types_wip, | |
.have_field_types, | |
.layout_wip, | |
=> false, | |
.have_layout, | |
.fully_resolved_wip, | |
.fully_resolved, | |
=> true, | |
}; | |
} | |
}; | |
pub fn loadTagType(self: LoadedUnionType, ip: *const InternPool) LoadedEnumType { | |
return ip.loadEnumType(self.enum_tag_ty); | |
} | |
/// Pointer to an enum type which is used for the tag of the union. | |
/// This type is created even for untagged unions, even when the memory | |
/// layout does not store the tag. | |
/// Whether zig chooses this type or the user specifies it, it is stored here. | |
/// This will be set to the null type until status is `have_field_types`. | |
/// This accessor is provided so that the tag type can be mutated, and so that | |
/// when it is mutated, the mutations are observed. | |
/// The returned pointer expires with any addition to the `InternPool`. | |
fn tagTypePtr(self: LoadedUnionType, ip: *const InternPool) *Index { | |
const extra = ip.getLocalShared(self.tid).extra.acquire(); | |
const field_index = std.meta.fieldIndex(Tag.TypeUnion, "tag_ty").?; | |
return @ptrCast(&extra.view().items(.@"0")[self.extra_index + field_index]); | |
} | |
pub fn tagTypeUnordered(u: LoadedUnionType, ip: *const InternPool) Index { | |
return @atomicLoad(Index, u.tagTypePtr(ip), .unordered); | |
} | |
pub fn setTagType(u: LoadedUnionType, ip: *InternPool, tag_type: Index) void { | |
const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex; | |
extra_mutex.lock(); | |
defer extra_mutex.unlock(); | |
@atomicStore(Index, u.tagTypePtr(ip), tag_type, .release); | |
} | |
/// The returned pointer expires with any addition to the `InternPool`. | |
fn flagsPtr(self: LoadedUnionType, ip: *const InternPool) *Tag.TypeUnion.Flags { | |
const extra = ip.getLocalShared(self.tid).extra.acquire(); | |
const field_index = std.meta.fieldIndex(Tag.TypeUnion, "flags").?; | |
return @ptrCast(&extra.view().items(.@"0")[self.extra_index + field_index]); | |
} | |
pub fn flagsUnordered(u: LoadedUnionType, ip: *const InternPool) Tag.TypeUnion.Flags { | |
return @atomicLoad(Tag.TypeUnion.Flags, u.flagsPtr(ip), .unordered); | |
} | |
pub fn setStatus(u: LoadedUnionType, ip: *InternPool, status: Status) void { | |
const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex; | |
extra_mutex.lock(); | |
defer extra_mutex.unlock(); | |
const flags_ptr = u.flagsPtr(ip); | |
var flags = flags_ptr.*; | |
flags.status = status; | |
@atomicStore(Tag.TypeUnion.Flags, flags_ptr, flags, .release); | |
} | |
pub fn setStatusIfLayoutWip(u: LoadedUnionType, ip: *InternPool, status: Status) void { | |
const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex; | |
extra_mutex.lock(); | |
defer extra_mutex.unlock(); | |
const flags_ptr = u.flagsPtr(ip); | |
var flags = flags_ptr.*; | |
if (flags.status == .layout_wip) flags.status = status; | |
@atomicStore(Tag.TypeUnion.Flags, flags_ptr, flags, .release); | |
} | |
pub fn setAlignment(u: LoadedUnionType, ip: *InternPool, alignment: Alignment) void { | |
const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex; | |
extra_mutex.lock(); | |
defer extra_mutex.unlock(); | |
const flags_ptr = u.flagsPtr(ip); | |
var flags = flags_ptr.*; | |
flags.alignment = alignment; | |
@atomicStore(Tag.TypeUnion.Flags, flags_ptr, flags, .release); | |
} | |
pub fn assumeRuntimeBitsIfFieldTypesWip(u: LoadedUnionType, ip: *InternPool) bool { | |
const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex; | |
extra_mutex.lock(); | |
defer extra_mutex.unlock(); | |
const flags_ptr = u.flagsPtr(ip); | |
var flags = flags_ptr.*; | |
defer if (flags.status == .field_types_wip) { | |
flags.assumed_runtime_bits = true; | |
@atomicStore(Tag.TypeUnion.Flags, flags_ptr, flags, .release); | |
}; | |
return flags.status == .field_types_wip; | |
} | |
pub fn requiresComptime(u: LoadedUnionType, ip: *const InternPool) RequiresComptime { | |
return u.flagsUnordered(ip).requires_comptime; | |
} | |
pub fn setRequiresComptimeWip(u: LoadedUnionType, ip: *InternPool) RequiresComptime { | |
const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex; | |
extra_mutex.lock(); | |
defer extra_mutex.unlock(); | |
const flags_ptr = u.flagsPtr(ip); | |
var flags = flags_ptr.*; | |
defer if (flags.requires_comptime == .unknown) { | |
flags.requires_comptime = .wip; | |
@atomicStore(Tag.TypeUnion.Flags, flags_ptr, flags, .release); | |
}; | |
return flags.requires_comptime; | |
} | |
pub fn setRequiresComptime(u: LoadedUnionType, ip: *InternPool, requires_comptime: RequiresComptime) void { | |
assert(requires_comptime != .wip); // see setRequiresComptimeWip | |
const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex; | |
extra_mutex.lock(); | |
defer extra_mutex.unlock(); | |
const flags_ptr = u.flagsPtr(ip); | |
var flags = flags_ptr.*; | |
flags.requires_comptime = requires_comptime; | |
@atomicStore(Tag.TypeUnion.Flags, flags_ptr, flags, .release); | |
} | |
pub fn assumePointerAlignedIfFieldTypesWip(u: LoadedUnionType, ip: *InternPool, ptr_align: Alignment) bool { | |
const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex; | |
extra_mutex.lock(); | |
defer extra_mutex.unlock(); | |
const flags_ptr = u.flagsPtr(ip); | |
var flags = flags_ptr.*; | |
defer if (flags.status == .field_types_wip) { | |
flags.alignment = ptr_align; | |
flags.assumed_pointer_aligned = true; | |
@atomicStore(Tag.TypeUnion.Flags, flags_ptr, flags, .release); | |
}; | |
return flags.status == .field_types_wip; | |
} | |
/// The returned pointer expires with any addition to the `InternPool`. | |
fn sizePtr(self: LoadedUnionType, ip: *const InternPool) *u32 { | |
const extra = ip.getLocalShared(self.tid).extra.acquire(); | |
const field_index = std.meta.fieldIndex(Tag.TypeUnion, "size").?; | |
return &extra.view().items(.@"0")[self.extra_index + field_index]; | |
} | |
pub fn sizeUnordered(u: LoadedUnionType, ip: *const InternPool) u32 { | |
return @atomicLoad(u32, u.sizePtr(ip), .unordered); | |
} | |
/// The returned pointer expires with any addition to the `InternPool`. | |
fn paddingPtr(self: LoadedUnionType, ip: *const InternPool) *u32 { | |
const extra = ip.getLocalShared(self.tid).extra.acquire(); | |
const field_index = std.meta.fieldIndex(Tag.TypeUnion, "padding").?; | |
return &extra.view().items(.@"0")[self.extra_index + field_index]; | |
} | |
pub fn paddingUnordered(u: LoadedUnionType, ip: *const InternPool) u32 { | |
return @atomicLoad(u32, u.paddingPtr(ip), .unordered); | |
} | |
pub fn hasTag(self: LoadedUnionType, ip: *const InternPool) bool { | |
return self.flagsUnordered(ip).runtime_tag.hasTag(); | |
} | |
pub fn haveFieldTypes(self: LoadedUnionType, ip: *const InternPool) bool { | |
return self.flagsUnordered(ip).status.haveFieldTypes(); | |
} | |
pub fn haveLayout(self: LoadedUnionType, ip: *const InternPool) bool { | |
return self.flagsUnordered(ip).status.haveLayout(); | |
} | |
pub fn setHaveLayout(u: LoadedUnionType, ip: *InternPool, size: u32, padding: u32, alignment: Alignment) void { | |
const extra_mutex = &ip.getLocal(u.tid).mutate.extra.mutex; | |
extra_mutex.lock(); | |
defer extra_mutex.unlock(); | |
@atomicStore(u32, u.sizePtr(ip), size, .unordered); | |
@atomicStore(u32, u.paddingPtr(ip), padding, .unordered); | |
const flags_ptr = u.flagsPtr(ip); | |
var flags = flags_ptr.*; | |
flags.alignment = alignment; | |
flags.status = .have_layout; | |
@atomicStore(Tag.TypeUnion.Flags, flags_ptr, flags, .release); | |
} | |
pub fn fieldAlign(self: LoadedUnionType, ip: *const InternPool, field_index: usize) Alignment { | |
if (self.field_aligns.len == 0) return .none; | |
return self.field_aligns.get(ip)[field_index]; | |
} | |
/// This does not mutate the field of LoadedUnionType. | |
pub fn setZirIndex(self: LoadedUnionType, ip: *InternPool, new_zir_index: TrackedInst.Index.Optional) void { | |
const flags_field_index = std.meta.fieldIndex(Tag.TypeUnion, "flags").?; | |
const zir_index_field_index = std.meta.fieldIndex(Tag.TypeUnion, "zir_index").?; | |
const ptr: *TrackedInst.Index.Optional = | |
@ptrCast(&ip.extra_.items[self.flags_index - flags_field_index + zir_index_field_index]); | |
ptr.* = new_zir_index; | |
} | |
pub fn setFieldTypes(self: LoadedUnionType, ip: *const InternPool, types: []const Index) void { | |
@memcpy(self.field_types.get(ip), types); | |
} | |
pub fn setFieldAligns(self: LoadedUnionType, ip: *const InternPool, aligns: []const Alignment) void { | |
if (aligns.len == 0) return; | |
assert(self.flagsUnordered(ip).any_aligned_fields); | |
@memcpy(self.field_aligns.get(ip), aligns); | |
} | |
}; | |
pub fn loadUnionType(ip: *const InternPool, index: Index) LoadedUnionType { | |
const unwrapped_index = index.unwrap(ip); | |
const extra_list = unwrapped_index.getExtra(ip); | |
const data = unwrapped_index.getData(ip); | |
const type_union = extraDataTrail(extra_list, Tag.TypeUnion, data); | |
const fields_len = type_union.data.fields_len; | |
var extra_index = type_union.end; | |
const captures_len = if (type_union.data.flags.any_captures) c: { | |
const len = extra_list.view().items(.@"0")[extra_index]; | |
extra_index += 1; | |
break :c len; | |
} else 0; | |
const captures: CaptureValue.Slice = .{ | |
.tid = unwrapped_index.tid, | |
.start = extra_index, | |
.len = captures_len, | |
}; | |
extra_index += captures_len; | |
if (type_union.data.flags.is_reified) { | |
extra_index += 2; // PackedU64 | |
} | |
const field_types: Index.Slice = .{ | |
.tid = unwrapped_index.tid, | |
.start = extra_index, | |
.len = fields_len, | |
}; | |
extra_index += fields_len; | |
const field_aligns = if (type_union.data.flags.any_aligned_fields) a: { | |
const a: Alignment.Slice = .{ | |
.tid = unwrapped_index.tid, | |
.start = extra_index, | |
.len = fields_len, | |
}; | |
extra_index += std.math.divCeil(u32, fields_len, 4) catch unreachable; | |
break :a a; | |
} else Alignment.Slice.empty; | |
return .{ | |
.tid = unwrapped_index.tid, | |
.extra_index = data, | |
.name = type_union.data.name, | |
.namespace = type_union.data.namespace, | |
.enum_tag_ty = type_union.data.tag_ty, | |
.field_types = field_types, | |
.field_aligns = field_aligns, | |
.zir_index = type_union.data.zir_index, | |
.captures = captures, | |
}; | |
} | |
pub const LoadedStructType = struct { | |
tid: Zcu.PerThread.Id, | |
/// The index of the `Tag.TypeStruct` or `Tag.TypeStructPacked` payload. | |
extra_index: u32, | |
// TODO: the non-fqn will be needed by the new dwarf structure | |
/// The name of this struct type. | |
name: NullTerminatedString, | |
namespace: NamespaceIndex, | |
/// Index of the `struct_decl` or `reify` ZIR instruction. | |
zir_index: TrackedInst.Index, | |
layout: std.builtin.Type.ContainerLayout, | |
field_names: NullTerminatedString.Slice, | |
field_types: Index.Slice, | |
field_inits: Index.Slice, | |
field_aligns: Alignment.Slice, | |
runtime_order: RuntimeOrder.Slice, | |
comptime_bits: ComptimeBits, | |
offsets: Offsets, | |
names_map: OptionalMapIndex, | |
captures: CaptureValue.Slice, | |
pub const ComptimeBits = struct { | |
tid: Zcu.PerThread.Id, | |
start: u32, | |
/// This is the number of u32 elements, not the number of struct fields. | |
len: u32, | |
pub const empty: ComptimeBits = .{ .tid = .main, .start = 0, .len = 0 }; | |
pub fn get(this: ComptimeBits, ip: *const InternPool) []u32 { | |
const extra = ip.getLocalShared(this.tid).extra.acquire(); | |
return extra.view().items(.@"0")[this.start..][0..this.len]; | |
} | |
pub fn getBit(this: ComptimeBits, ip: *const InternPool, i: usize) bool { | |
if (this.len == 0) return false; | |
return @as(u1, @truncate(this.get(ip)[i / 32] >> @intCast(i % 32))) != 0; | |
} | |
pub fn setBit(this: ComptimeBits, ip: *const InternPool, i: usize) void { | |
this.get(ip)[i / 32] |= @as(u32, 1) << @intCast(i % 32); | |
} | |
pub fn clearBit(this: ComptimeBits, ip: *const InternPool, i: usize) void { | |
this.get(ip)[i / 32] &= ~(@as(u32, 1) << @intCast(i % 32)); | |
} | |
}; | |
pub const Offsets = struct { | |
tid: Zcu.PerThread.Id, | |
start: u32, | |
len: u32, | |
pub const empty: Offsets = .{ .tid = .main, .start = 0, .len = 0 }; | |
pub fn get(this: Offsets, ip: *const InternPool) []u32 { | |
const extra = ip.getLocalShared(this.tid).extra.acquire(); | |
return @ptrCast(extra.view().items(.@"0")[this.start..][0..this.len]); | |
} | |
}; | |
pub const RuntimeOrder = enum(u32) { | |
/// Placeholder until layout is resolved. | |
unresolved = std.math.maxInt(u32) - 0, | |
/// Field not present at runtime | |
omitted = std.math.maxInt(u32) - 1, | |
_, | |
pub const Slice = struct { | |
tid: Zcu.PerThread.Id, | |
start: u32, | |
len: u32, | |
pub const empty: Slice = .{ .tid = .main, .start = 0, .len = 0 }; | |
pub fn get(slice: RuntimeOrder.Slice, ip: *const InternPool) []RuntimeOrder { | |
const extra = ip.getLocalShared(slice.tid).extra.acquire(); | |
return @ptrCast(extra.view().items(.@"0")[slice.start..][0..slice.len]); | |
} | |
}; | |
pub fn toInt(i: RuntimeOrder) ?u32 { | |
return switch (i) { | |
.omitted => null, | |
.unresolved => unreachable, | |
else => @intFromEnum(i), | |
}; | |
} | |
}; | |
/// Look up field index based on field name. | |
pub fn nameIndex(s: LoadedStructType, ip: *const InternPool, name: NullTerminatedString) ?u32 { | |
const names_map = s.names_map.unwrap() orelse { | |
const i = name.toUnsigned(ip) orelse return null; | |
if (i >= s.field_types.len) return null; | |
return i; | |
}; | |
const map = names_map.get(ip); | |
const adapter: NullTerminatedString.Adapter = .{ .strings = s.field_names.get(ip) }; | |
const field_index = map.getIndexAdapted(name, adapter) orelse return null; | |
return @intCast(field_index); | |
} | |
/// Returns the already-existing field with the same name, if any. | |
pub fn addFieldName( | |
s: LoadedStructType, | |
ip: *InternPool, | |
name: NullTerminatedString, | |
) ?u32 { | |
const extra = ip.getLocalShared(s.tid).extra.acquire(); | |
return ip.addFieldName(extra, s.names_map.unwrap().?, s.field_names.start, name); | |
} | |
pub fn fieldAlign(s: LoadedStructType, ip: *const InternPool, i: usize) Alignment { | |
if (s.field_aligns.len == 0) return .none; | |
return s.field_aligns.get(ip)[i]; | |
} | |
pub fn fieldInit(s: LoadedStructType, ip: *const InternPool, i: usize) Index { | |
if (s.field_inits.len == 0) return .none; | |
assert(s.haveFieldInits(ip)); | |
return s.field_inits.get(ip)[i]; | |
} | |
/// Returns `none` in the case the struct is a tuple. | |
pub fn fieldName(s: LoadedStructType, ip: *const InternPool, i: usize) OptionalNullTerminatedString { | |
if (s.field_names.len == 0) return .none; | |
return s.field_names.get(ip)[i].toOptional(); | |
} | |
pub fn fieldIsComptime(s: LoadedStructType, ip: *const InternPool, i: usize) bool { | |
return s.comptime_bits.getBit(ip, i); | |
} | |
pub fn setFieldComptime(s: LoadedStructType, ip: *InternPool, i: usize) void { | |
s.comptime_bits.setBit(ip, i); | |
} | |
/// The returned pointer expires with any addition to the `InternPool`. | |
/// Asserts the struct is not packed. | |
fn flagsPtr(s: LoadedStructType, ip: *const InternPool) *Tag.TypeStruct.Flags { | |
assert(s.layout != .@"packed"); | |
const extra = ip.getLocalShared(s.tid).extra.acquire(); | |
const flags_field_index = std.meta.fieldIndex(Tag.TypeStruct, "flags").?; | |
return @ptrCast(&extra.view().items(.@"0")[s.extra_index + flags_field_index]); | |
} | |
pub fn flagsUnordered(s: LoadedStructType, ip: *const InternPool) Tag.TypeStruct.Flags { | |
return @atomicLoad(Tag.TypeStruct.Flags, s.flagsPtr(ip), .unordered); | |
} | |
/// The returned pointer expires with any addition to the `InternPool`. | |
/// Asserts that the struct is packed. | |
fn packedFlagsPtr(s: LoadedStructType, ip: *const InternPool) *Tag.TypeStructPacked.Flags { | |
assert(s.layout == .@"packed"); | |
const extra = ip.getLocalShared(s.tid).extra.acquire(); | |
const flags_field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "flags").?; | |
return @ptrCast(&extra.view().items(.@"0")[s.extra_index + flags_field_index]); | |
} | |
pub fn packedFlagsUnordered(s: LoadedStructType, ip: *const InternPool) Tag.TypeStructPacked.Flags { | |
return @atomicLoad(Tag.TypeStructPacked.Flags, s.packedFlagsPtr(ip), .unordered); | |
} | |
/// Reads the non-opv flag calculated during AstGen. Used to short-circuit more | |
/// complicated logic. | |
pub fn knownNonOpv(s: LoadedStructType, ip: *const InternPool) bool { | |
return switch (s.layout) { | |
.@"packed" => false, | |
.auto, .@"extern" => s.flagsUnordered(ip).known_non_opv, | |
}; | |
} | |
pub fn requiresComptime(s: LoadedStructType, ip: *const InternPool) RequiresComptime { | |
return s.flagsUnordered(ip).requires_comptime; | |
} | |
pub fn setRequiresComptimeWip(s: LoadedStructType, ip: *InternPool) RequiresComptime { | |
const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex; | |
extra_mutex.lock(); | |
defer extra_mutex.unlock(); | |
const flags_ptr = s.flagsPtr(ip); | |
var flags = flags_ptr.*; | |
defer if (flags.requires_comptime == .unknown) { | |
flags.requires_comptime = .wip; | |
@atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release); | |
}; | |
return flags.requires_comptime; | |
} | |
pub fn setRequiresComptime(s: LoadedStructType, ip: *InternPool, requires_comptime: RequiresComptime) void { | |
assert(requires_comptime != .wip); // see setRequiresComptimeWip | |
const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex; | |
extra_mutex.lock(); | |
defer extra_mutex.unlock(); | |
const flags_ptr = s.flagsPtr(ip); | |
var flags = flags_ptr.*; | |
flags.requires_comptime = requires_comptime; | |
@atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release); | |
} | |
pub fn assumeRuntimeBitsIfFieldTypesWip(s: LoadedStructType, ip: *InternPool) bool { | |
if (s.layout == .@"packed") return false; | |
const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex; | |
extra_mutex.lock(); | |
defer extra_mutex.unlock(); | |
const flags_ptr = s.flagsPtr(ip); | |
var flags = flags_ptr.*; | |
defer if (flags.field_types_wip) { | |
flags.assumed_runtime_bits = true; | |
@atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release); | |
}; | |
return flags.field_types_wip; | |
} | |
pub fn setFieldTypesWip(s: LoadedStructType, ip: *InternPool) bool { | |
if (s.layout == .@"packed") return false; | |
const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex; | |
extra_mutex.lock(); | |
defer extra_mutex.unlock(); | |
const flags_ptr = s.flagsPtr(ip); | |
var flags = flags_ptr.*; | |
defer { | |
flags.field_types_wip = true; | |
@atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release); | |
} | |
return flags.field_types_wip; | |
} | |
pub fn clearFieldTypesWip(s: LoadedStructType, ip: *InternPool) void { | |
if (s.layout == .@"packed") return; | |
const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex; | |
extra_mutex.lock(); | |
defer extra_mutex.unlock(); | |
const flags_ptr = s.flagsPtr(ip); | |
var flags = flags_ptr.*; | |
flags.field_types_wip = false; | |
@atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release); | |
} | |
pub fn setLayoutWip(s: LoadedStructType, ip: *InternPool) bool { | |
if (s.layout == .@"packed") return false; | |
const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex; | |
extra_mutex.lock(); | |
defer extra_mutex.unlock(); | |
const flags_ptr = s.flagsPtr(ip); | |
var flags = flags_ptr.*; | |
defer { | |
flags.layout_wip = true; | |
@atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release); | |
} | |
return flags.layout_wip; | |
} | |
pub fn clearLayoutWip(s: LoadedStructType, ip: *InternPool) void { | |
if (s.layout == .@"packed") return; | |
const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex; | |
extra_mutex.lock(); | |
defer extra_mutex.unlock(); | |
const flags_ptr = s.flagsPtr(ip); | |
var flags = flags_ptr.*; | |
flags.layout_wip = false; | |
@atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release); | |
} | |
pub fn setAlignment(s: LoadedStructType, ip: *InternPool, alignment: Alignment) void { | |
const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex; | |
extra_mutex.lock(); | |
defer extra_mutex.unlock(); | |
const flags_ptr = s.flagsPtr(ip); | |
var flags = flags_ptr.*; | |
flags.alignment = alignment; | |
@atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release); | |
} | |
pub fn assumePointerAlignedIfFieldTypesWip(s: LoadedStructType, ip: *InternPool, ptr_align: Alignment) bool { | |
const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex; | |
extra_mutex.lock(); | |
defer extra_mutex.unlock(); | |
const flags_ptr = s.flagsPtr(ip); | |
var flags = flags_ptr.*; | |
defer if (flags.field_types_wip) { | |
flags.alignment = ptr_align; | |
flags.assumed_pointer_aligned = true; | |
@atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release); | |
}; | |
return flags.field_types_wip; | |
} | |
pub fn assumePointerAlignedIfWip(s: LoadedStructType, ip: *InternPool, ptr_align: Alignment) bool { | |
const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex; | |
extra_mutex.lock(); | |
defer extra_mutex.unlock(); | |
const flags_ptr = s.flagsPtr(ip); | |
var flags = flags_ptr.*; | |
defer { | |
if (flags.alignment_wip) { | |
flags.alignment = ptr_align; | |
flags.assumed_pointer_aligned = true; | |
} else flags.alignment_wip = true; | |
@atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release); | |
} | |
return flags.alignment_wip; | |
} | |
pub fn clearAlignmentWip(s: LoadedStructType, ip: *InternPool) void { | |
if (s.layout == .@"packed") return; | |
const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex; | |
extra_mutex.lock(); | |
defer extra_mutex.unlock(); | |
const flags_ptr = s.flagsPtr(ip); | |
var flags = flags_ptr.*; | |
flags.alignment_wip = false; | |
@atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release); | |
} | |
pub fn setInitsWip(s: LoadedStructType, ip: *InternPool) bool { | |
const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex; | |
extra_mutex.lock(); | |
defer extra_mutex.unlock(); | |
switch (s.layout) { | |
.@"packed" => { | |
const flags_ptr = s.packedFlagsPtr(ip); | |
var flags = flags_ptr.*; | |
defer { | |
flags.field_inits_wip = true; | |
@atomicStore(Tag.TypeStructPacked.Flags, flags_ptr, flags, .release); | |
} | |
return flags.field_inits_wip; | |
}, | |
.auto, .@"extern" => { | |
const flags_ptr = s.flagsPtr(ip); | |
var flags = flags_ptr.*; | |
defer { | |
flags.field_inits_wip = true; | |
@atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release); | |
} | |
return flags.field_inits_wip; | |
}, | |
} | |
} | |
pub fn clearInitsWip(s: LoadedStructType, ip: *InternPool) void { | |
const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex; | |
extra_mutex.lock(); | |
defer extra_mutex.unlock(); | |
switch (s.layout) { | |
.@"packed" => { | |
const flags_ptr = s.packedFlagsPtr(ip); | |
var flags = flags_ptr.*; | |
flags.field_inits_wip = false; | |
@atomicStore(Tag.TypeStructPacked.Flags, flags_ptr, flags, .release); | |
}, | |
.auto, .@"extern" => { | |
const flags_ptr = s.flagsPtr(ip); | |
var flags = flags_ptr.*; | |
flags.field_inits_wip = false; | |
@atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release); | |
}, | |
} | |
} | |
pub fn setFullyResolved(s: LoadedStructType, ip: *InternPool) bool { | |
if (s.layout == .@"packed") return true; | |
const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex; | |
extra_mutex.lock(); | |
defer extra_mutex.unlock(); | |
const flags_ptr = s.flagsPtr(ip); | |
var flags = flags_ptr.*; | |
defer { | |
flags.fully_resolved = true; | |
@atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release); | |
} | |
return flags.fully_resolved; | |
} | |
pub fn clearFullyResolved(s: LoadedStructType, ip: *InternPool) void { | |
const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex; | |
extra_mutex.lock(); | |
defer extra_mutex.unlock(); | |
const flags_ptr = s.flagsPtr(ip); | |
var flags = flags_ptr.*; | |
flags.fully_resolved = false; | |
@atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release); | |
} | |
/// The returned pointer expires with any addition to the `InternPool`. | |
/// Asserts the struct is not packed. | |
fn sizePtr(s: LoadedStructType, ip: *const InternPool) *u32 { | |
assert(s.layout != .@"packed"); | |
const extra = ip.getLocalShared(s.tid).extra.acquire(); | |
const size_field_index = std.meta.fieldIndex(Tag.TypeStruct, "size").?; | |
return @ptrCast(&extra.view().items(.@"0")[s.extra_index + size_field_index]); | |
} | |
pub fn sizeUnordered(s: LoadedStructType, ip: *const InternPool) u32 { | |
return @atomicLoad(u32, s.sizePtr(ip), .unordered); | |
} | |
/// The backing integer type of the packed struct. Whether zig chooses | |
/// this type or the user specifies it, it is stored here. This will be | |
/// set to `none` until the layout is resolved. | |
/// Asserts the struct is packed. | |
fn backingIntTypePtr(s: LoadedStructType, ip: *const InternPool) *Index { | |
assert(s.layout == .@"packed"); | |
const extra = ip.getLocalShared(s.tid).extra.acquire(); | |
const field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "backing_int_ty").?; | |
return @ptrCast(&extra.view().items(.@"0")[s.extra_index + field_index]); | |
} | |
pub fn backingIntTypeUnordered(s: LoadedStructType, ip: *const InternPool) Index { | |
return @atomicLoad(Index, s.backingIntTypePtr(ip), .unordered); | |
} | |
pub fn setBackingIntType(s: LoadedStructType, ip: *InternPool, backing_int_ty: Index) void { | |
const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex; | |
extra_mutex.lock(); | |
defer extra_mutex.unlock(); | |
@atomicStore(Index, s.backingIntTypePtr(ip), backing_int_ty, .release); | |
} | |
/// Asserts the struct is not packed. | |
pub fn setZirIndex(s: LoadedStructType, ip: *InternPool, new_zir_index: TrackedInst.Index.Optional) void { | |
assert(s.layout != .@"packed"); | |
const field_index = std.meta.fieldIndex(Tag.TypeStruct, "zir_index").?; | |
ip.extra_.items[s.extra_index + field_index] = @intFromEnum(new_zir_index); | |
} | |
pub fn haveFieldTypes(s: LoadedStructType, ip: *const InternPool) bool { | |
const types = s.field_types.get(ip); | |
return types.len == 0 or types[0] != .none; | |
} | |
pub fn haveFieldInits(s: LoadedStructType, ip: *const InternPool) bool { | |
return switch (s.layout) { | |
.@"packed" => s.packedFlagsUnordered(ip).inits_resolved, | |
.auto, .@"extern" => s.flagsUnordered(ip).inits_resolved, | |
}; | |
} | |
pub fn setHaveFieldInits(s: LoadedStructType, ip: *InternPool) void { | |
const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex; | |
extra_mutex.lock(); | |
defer extra_mutex.unlock(); | |
switch (s.layout) { | |
.@"packed" => { | |
const flags_ptr = s.packedFlagsPtr(ip); | |
var flags = flags_ptr.*; | |
flags.inits_resolved = true; | |
@atomicStore(Tag.TypeStructPacked.Flags, flags_ptr, flags, .release); | |
}, | |
.auto, .@"extern" => { | |
const flags_ptr = s.flagsPtr(ip); | |
var flags = flags_ptr.*; | |
flags.inits_resolved = true; | |
@atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release); | |
}, | |
} | |
} | |
pub fn haveLayout(s: LoadedStructType, ip: *const InternPool) bool { | |
return switch (s.layout) { | |
.@"packed" => s.backingIntTypeUnordered(ip) != .none, | |
.auto, .@"extern" => s.flagsUnordered(ip).layout_resolved, | |
}; | |
} | |
pub fn setLayoutResolved(s: LoadedStructType, ip: *InternPool, size: u32, alignment: Alignment) void { | |
const extra_mutex = &ip.getLocal(s.tid).mutate.extra.mutex; | |
extra_mutex.lock(); | |
defer extra_mutex.unlock(); | |
@atomicStore(u32, s.sizePtr(ip), size, .unordered); | |
const flags_ptr = s.flagsPtr(ip); | |
var flags = flags_ptr.*; | |
flags.alignment = alignment; | |
flags.layout_resolved = true; | |
@atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release); | |
} | |
pub fn hasReorderedFields(s: LoadedStructType) bool { | |
return s.layout == .auto; | |
} | |
pub const RuntimeOrderIterator = struct { | |
ip: *InternPool, | |
field_index: u32, | |
struct_type: InternPool.LoadedStructType, | |
pub fn next(it: *@This()) ?u32 { | |
var i = it.field_index; | |
if (i >= it.struct_type.field_types.len) | |
return null; | |
if (it.struct_type.hasReorderedFields()) { | |
it.field_index += 1; | |
return it.struct_type.runtime_order.get(it.ip)[i].toInt(); | |
} | |
while (it.struct_type.fieldIsComptime(it.ip, i)) { | |
i += 1; | |
if (i >= it.struct_type.field_types.len) | |
return null; | |
} | |
it.field_index = i + 1; | |
return i; | |
} | |
}; | |
/// Iterates over non-comptime fields in the order they are laid out in memory at runtime. | |
/// May or may not include zero-bit fields. | |
/// Asserts the struct is not packed. | |
pub fn iterateRuntimeOrder(s: LoadedStructType, ip: *InternPool) RuntimeOrderIterator { | |
assert(s.layout != .@"packed"); | |
return .{ | |
.ip = ip, | |
.field_index = 0, | |
.struct_type = s, | |
}; | |
} | |
pub const ReverseRuntimeOrderIterator = struct { | |
ip: *InternPool, | |
last_index: u32, | |
struct_type: InternPool.LoadedStructType, | |
pub fn next(it: *@This()) ?u32 { | |
if (it.last_index == 0) | |
return null; | |
if (it.struct_type.hasReorderedFields()) { | |
it.last_index -= 1; | |
const order = it.struct_type.runtime_order.get(it.ip); | |
while (order[it.last_index] == .omitted) { | |
it.last_index -= 1; | |
if (it.last_index == 0) | |
return null; | |
} | |
return order[it.last_index].toInt(); | |
} | |
it.last_index -= 1; | |
while (it.struct_type.fieldIsComptime(it.ip, it.last_index)) { | |
it.last_index -= 1; | |
if (it.last_index == 0) | |
return null; | |
} | |
return it.last_index; | |
} | |
}; | |
pub fn iterateRuntimeOrderReverse(s: LoadedStructType, ip: *InternPool) ReverseRuntimeOrderIterator { | |
assert(s.layout != .@"packed"); | |
return .{ | |
.ip = ip, | |
.last_index = s.field_types.len, | |
.struct_type = s, | |
}; | |
} | |
}; | |
pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { | |
const unwrapped_index = index.unwrap(ip); | |
const extra_list = unwrapped_index.getExtra(ip); | |
const extra_items = extra_list.view().items(.@"0"); | |
const item = unwrapped_index.getItem(ip); | |
switch (item.tag) { | |
.type_struct => { | |
const name: NullTerminatedString = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "name").?]); | |
const namespace: NamespaceIndex = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "namespace").?]); | |
const zir_index: TrackedInst.Index = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "zir_index").?]); | |
const fields_len = extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "fields_len").?]; | |
const flags: Tag.TypeStruct.Flags = @bitCast(@atomicLoad(u32, &extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "flags").?], .unordered)); | |
var extra_index = item.data + @as(u32, @typeInfo(Tag.TypeStruct).@"struct".fields.len); | |
const captures_len = if (flags.any_captures) c: { | |
const len = extra_list.view().items(.@"0")[extra_index]; | |
extra_index += 1; | |
break :c len; | |
} else 0; | |
const captures: CaptureValue.Slice = .{ | |
.tid = unwrapped_index.tid, | |
.start = extra_index, | |
.len = captures_len, | |
}; | |
extra_index += captures_len; | |
if (flags.is_reified) { | |
extra_index += 2; // type_hash: PackedU64 | |
} | |
const field_types: Index.Slice = .{ | |
.tid = unwrapped_index.tid, | |
.start = extra_index, | |
.len = fields_len, | |
}; | |
extra_index += fields_len; | |
const names_map: OptionalMapIndex, const names = n: { | |
const names_map: OptionalMapIndex = @enumFromInt(extra_list.view().items(.@"0")[extra_index]); | |
extra_index += 1; | |
const names: NullTerminatedString.Slice = .{ | |
.tid = unwrapped_index.tid, | |
.start = extra_index, | |
.len = fields_len, | |
}; | |
extra_index += fields_len; | |
break :n .{ names_map, names }; | |
}; | |
const inits: Index.Slice = if (flags.any_default_inits) i: { | |
const inits: Index.Slice = .{ | |
.tid = unwrapped_index.tid, | |
.start = extra_index, | |
.len = fields_len, | |
}; | |
extra_index += fields_len; | |
break :i inits; | |
} else Index.Slice.empty; | |
const aligns: Alignment.Slice = if (flags.any_aligned_fields) a: { | |
const a: Alignment.Slice = .{ | |
.tid = unwrapped_index.tid, | |
.start = extra_index, | |
.len = fields_len, | |
}; | |
extra_index += std.math.divCeil(u32, fields_len, 4) catch unreachable; | |
break :a a; | |
} else Alignment.Slice.empty; | |
const comptime_bits: LoadedStructType.ComptimeBits = if (flags.any_comptime_fields) c: { | |
const len = std.math.divCeil(u32, fields_len, 32) catch unreachable; | |
const c: LoadedStructType.ComptimeBits = .{ | |
.tid = unwrapped_index.tid, | |
.start = extra_index, | |
.len = len, | |
}; | |
extra_index += len; | |
break :c c; | |
} else LoadedStructType.ComptimeBits.empty; | |
const runtime_order: LoadedStructType.RuntimeOrder.Slice = if (!flags.is_extern) ro: { | |
const ro: LoadedStructType.RuntimeOrder.Slice = .{ | |
.tid = unwrapped_index.tid, | |
.start = extra_index, | |
.len = fields_len, | |
}; | |
extra_index += fields_len; | |
break :ro ro; | |
} else LoadedStructType.RuntimeOrder.Slice.empty; | |
const offsets: LoadedStructType.Offsets = o: { | |
const o: LoadedStructType.Offsets = .{ | |
.tid = unwrapped_index.tid, | |
.start = extra_index, | |
.len = fields_len, | |
}; | |
extra_index += fields_len; | |
break :o o; | |
}; | |
return .{ | |
.tid = unwrapped_index.tid, | |
.extra_index = item.data, | |
.name = name, | |
.namespace = namespace, | |
.zir_index = zir_index, | |
.layout = if (flags.is_extern) .@"extern" else .auto, | |
.field_names = names, | |
.field_types = field_types, | |
.field_inits = inits, | |
.field_aligns = aligns, | |
.runtime_order = runtime_order, | |
.comptime_bits = comptime_bits, | |
.offsets = offsets, | |
.names_map = names_map, | |
.captures = captures, | |
}; | |
}, | |
.type_struct_packed, .type_struct_packed_inits => { | |
const name: NullTerminatedString = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "name").?]); | |
const zir_index: TrackedInst.Index = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "zir_index").?]); | |
const fields_len = extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "fields_len").?]; | |
const namespace: NamespaceIndex = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "namespace").?]); | |
const names_map: MapIndex = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "names_map").?]); | |
const flags: Tag.TypeStructPacked.Flags = @bitCast(@atomicLoad(u32, &extra_items[item.data + std.meta.fieldIndex(Tag.TypeStructPacked, "flags").?], .unordered)); | |
var extra_index = item.data + @as(u32, @typeInfo(Tag.TypeStructPacked).@"struct".fields.len); | |
const has_inits = item.tag == .type_struct_packed_inits; | |
const captures_len = if (flags.any_captures) c: { | |
const len = extra_list.view().items(.@"0")[extra_index]; | |
extra_index += 1; | |
break :c len; | |
} else 0; | |
const captures: CaptureValue.Slice = .{ | |
.tid = unwrapped_index.tid, | |
.start = extra_index, | |
.len = captures_len, | |
}; | |
extra_index += captures_len; | |
if (flags.is_reified) { | |
extra_index += 2; // PackedU64 | |
} | |
const field_types: Index.Slice = .{ | |
.tid = unwrapped_index.tid, | |
.start = extra_index, | |
.len = fields_len, | |
}; | |
extra_index += fields_len; | |
const field_names: NullTerminatedString.Slice = .{ | |
.tid = unwrapped_index.tid, | |
.start = extra_index, | |
.len = fields_len, | |
}; | |
extra_index += fields_len; | |
const field_inits: Index.Slice = if (has_inits) inits: { | |
const i: Index.Slice = .{ | |
.tid = unwrapped_index.tid, | |
.start = extra_index, | |
.len = fields_len, | |
}; | |
extra_index += fields_len; | |
break :inits i; | |
} else Index.Slice.empty; | |
return .{ | |
.tid = unwrapped_index.tid, | |
.extra_index = item.data, | |
.name = name, | |
.namespace = namespace, | |
.zir_index = zir_index, | |
.layout = .@"packed", | |
.field_names = field_names, | |
.field_types = field_types, | |
.field_inits = field_inits, | |
.field_aligns = Alignment.Slice.empty, | |
.runtime_order = LoadedStructType.RuntimeOrder.Slice.empty, | |
.comptime_bits = LoadedStructType.ComptimeBits.empty, | |
.offsets = LoadedStructType.Offsets.empty, | |
.names_map = names_map.toOptional(), | |
.captures = captures, | |
}; | |
}, | |
else => unreachable, | |
} | |
} | |
pub const LoadedEnumType = struct { | |
// TODO: the non-fqn will be needed by the new dwarf structure | |
/// The name of this enum type. | |
name: NullTerminatedString, | |
/// Represents the declarations inside this enum. | |
namespace: NamespaceIndex, | |
/// An integer type which is used for the numerical value of the enum. | |
/// This field is present regardless of whether the enum has an | |
/// explicitly provided tag type or auto-numbered. | |
tag_ty: Index, | |
/// Set of field names in declaration order. | |
names: NullTerminatedString.Slice, | |
/// Maps integer tag value to field index. | |
/// Entries are in declaration order, same as `fields`. | |
/// If this is empty, it means the enum tags are auto-numbered. | |
values: Index.Slice, | |
tag_mode: TagMode, | |
names_map: MapIndex, | |
/// This is guaranteed to not be `.none` if explicit values are provided. | |
values_map: OptionalMapIndex, | |
/// This is `none` only if this is a generated tag type. | |
zir_index: TrackedInst.Index.Optional, | |
captures: CaptureValue.Slice, | |
pub const TagMode = enum { | |
/// The integer tag type was auto-numbered by zig. | |
auto, | |
/// The integer tag type was provided by the enum declaration, and the enum | |
/// is exhaustive. | |
explicit, | |
/// The integer tag type was provided by the enum declaration, and the enum | |
/// is non-exhaustive. | |
nonexhaustive, | |
}; | |
/// Look up field index based on field name. | |
pub fn nameIndex(self: LoadedEnumType, ip: *const InternPool, name: NullTerminatedString) ?u32 { | |
const map = self.names_map.get(ip); | |
const adapter: NullTerminatedString.Adapter = .{ .strings = self.names.get(ip) }; | |
const field_index = map.getIndexAdapted(name, adapter) orelse return null; | |
return @intCast(field_index); | |
} | |
/// Look up field index based on tag value. | |
/// Asserts that `values_map` is not `none`. | |
/// This function returns `null` when `tag_val` does not have the | |
/// integer tag type of the enum. | |
pub fn tagValueIndex(self: LoadedEnumType, ip: *const InternPool, tag_val: Index) ?u32 { | |
assert(tag_val != .none); | |
// TODO: we should probably decide a single interface for this function, but currently | |
// it's being called with both tag values and underlying ints. Fix this! | |
const int_tag_val = switch (ip.indexToKey(tag_val)) { | |
.enum_tag => |enum_tag| enum_tag.int, | |
.int => tag_val, | |
else => unreachable, | |
}; | |
if (self.values_map.unwrap()) |values_map| { | |
const map = values_map.get(ip); | |
const adapter: Index.Adapter = .{ .indexes = self.values.get(ip) }; | |
const field_index = map.getIndexAdapted(int_tag_val, adapter) orelse return null; | |
return @intCast(field_index); | |
} | |
// Auto-numbered enum. Convert `int_tag_val` to field index. | |
const field_index = switch (ip.indexToKey(int_tag_val).int.storage) { | |
inline .u64, .i64 => |x| std.math.cast(u32, x) orelse return null, | |
.big_int => |x| x.to(u32) catch return null, | |
.lazy_align, .lazy_size => unreachable, | |
}; | |
return if (field_index < self.names.len) field_index else null; | |
} | |
}; | |
pub fn loadEnumType(ip: *const InternPool, index: Index) LoadedEnumType { | |
const unwrapped_index = index.unwrap(ip); | |
const extra_list = unwrapped_index.getExtra(ip); | |
const item = unwrapped_index.getItem(ip); | |
const tag_mode: LoadedEnumType.TagMode = switch (item.tag) { | |
.type_enum_auto => { | |
const extra = extraDataTrail(extra_list, EnumAuto, item.data); | |
var extra_index: u32 = @intCast(extra.end); | |
if (extra.data.zir_index == .none) { | |
extra_index += 1; // owner_union | |
} | |
const captures_len = if (extra.data.captures_len == std.math.maxInt(u32)) c: { | |
extra_index += 2; // type_hash: PackedU64 | |
break :c 0; | |
} else extra.data.captures_len; | |
return .{ | |
.name = extra.data.name, | |
.namespace = extra.data.namespace, | |
.tag_ty = extra.data.int_tag_type, | |
.names = .{ | |
.tid = unwrapped_index.tid, | |
.start = extra_index + captures_len, | |
.len = extra.data.fields_len, | |
}, | |
.values = Index.Slice.empty, | |
.tag_mode = .auto, | |
.names_map = extra.data.names_map, | |
.values_map = .none, | |
.zir_index = extra.data.zir_index, | |
.captures = .{ | |
.tid = unwrapped_index.tid, | |
.start = extra_index, | |
.len = captures_len, | |
}, | |
}; | |
}, | |
.type_enum_explicit => .explicit, | |
.type_enum_nonexhaustive => .nonexhaustive, | |
else => unreachable, | |
}; | |
const extra = extraDataTrail(extra_list, EnumExplicit, item.data); | |
var extra_index: u32 = @intCast(extra.end); | |
if (extra.data.zir_index == .none) { | |
extra_index += 1; // owner_union | |
} | |
const captures_len = if (extra.data.captures_len == std.math.maxInt(u32)) c: { | |
extra_index += 2; // type_hash: PackedU64 | |
break :c 0; | |
} else extra.data.captures_len; | |
return .{ | |
.name = extra.data.name, | |
.namespace = extra.data.namespace, | |
.tag_ty = extra.data.int_tag_type, | |
.names = .{ | |
.tid = unwrapped_index.tid, | |
.start = extra_index + captures_len, | |
.len = extra.data.fields_len, | |
}, | |
.values = .{ | |
.tid = unwrapped_index.tid, | |
.start = extra_index + captures_len + extra.data.fields_len, | |
.len = if (extra.data.values_map != .none) extra.data.fields_len else 0, | |
}, | |
.tag_mode = tag_mode, | |
.names_map = extra.data.names_map, | |
.values_map = extra.data.values_map, | |
.zir_index = extra.data.zir_index, | |
.captures = .{ | |
.tid = unwrapped_index.tid, | |
.start = extra_index, | |
.len = captures_len, | |
}, | |
}; | |
} | |
/// Note that this type doubles as the payload for `Tag.type_opaque`. | |
pub const LoadedOpaqueType = struct { | |
/// Contains the declarations inside this opaque. | |
namespace: NamespaceIndex, | |
// TODO: the non-fqn will be needed by the new dwarf structure | |
/// The name of this opaque type. | |
name: NullTerminatedString, | |
/// Index of the `opaque_decl` or `reify` instruction. | |
zir_index: TrackedInst.Index, | |
captures: CaptureValue.Slice, | |
}; | |
pub fn loadOpaqueType(ip: *const InternPool, index: Index) LoadedOpaqueType { | |
const unwrapped_index = index.unwrap(ip); | |
const item = unwrapped_index.getItem(ip); | |
assert(item.tag == .type_opaque); | |
const extra = extraDataTrail(unwrapped_index.getExtra(ip), Tag.TypeOpaque, item.data); | |
const captures_len = if (extra.data.captures_len == std.math.maxInt(u32)) | |
0 | |
else | |
extra.data.captures_len; | |
return .{ | |
.name = extra.data.name, | |
.namespace = extra.data.namespace, | |
.zir_index = extra.data.zir_index, | |
.captures = .{ | |
.tid = unwrapped_index.tid, | |
.start = extra.end, | |
.len = captures_len, | |
}, | |
}; | |
} | |
pub const Item = struct { | |
tag: Tag, | |
/// The doc comments on the respective Tag explain how to interpret this. | |
data: u32, | |
}; | |
/// Represents an index into `map`. It represents the canonical index | |
/// of a `Value` within this `InternPool`. The values are typed. | |
/// Two values which have the same type can be equality compared simply | |
/// by checking if their indexes are equal, provided they are both in | |
/// the same `InternPool`. | |
/// When adding a tag to this enum, consider adding a corresponding entry to | |
/// `primitives` in AstGen.zig. | |
pub const Index = enum(u32) { | |
pub const first_type: Index = .u0_type; | |
pub const last_type: Index = .empty_tuple_type; | |
pub const first_value: Index = .undef; | |
pub const last_value: Index = .empty_tuple; | |
u0_type, | |
i0_type, | |
u1_type, | |
u8_type, | |
i8_type, | |
u16_type, | |
i16_type, | |
u29_type, | |
u32_type, | |
i32_type, | |
u64_type, | |
i64_type, | |
u80_type, | |
u128_type, | |
i128_type, | |
usize_type, | |
isize_type, | |
c_char_type, | |
c_short_type, | |
c_ushort_type, | |
c_int_type, | |
c_uint_type, | |
c_long_type, | |
c_ulong_type, | |
c_longlong_type, | |
c_ulonglong_type, | |
c_longdouble_type, | |
f16_type, | |
f32_type, | |
f64_type, | |
f80_type, | |
f128_type, | |
anyopaque_type, | |
bool_type, | |
void_type, | |
type_type, | |
anyerror_type, | |
comptime_int_type, | |
comptime_float_type, | |
noreturn_type, | |
anyframe_type, | |
null_type, | |
undefined_type, | |
enum_literal_type, | |
manyptr_u8_type, | |
manyptr_const_u8_type, | |
manyptr_const_u8_sentinel_0_type, | |
single_const_pointer_to_comptime_int_type, | |
slice_const_u8_type, | |
slice_const_u8_sentinel_0_type, | |
vector_16_i8_type, | |
vector_32_i8_type, | |
vector_16_u8_type, | |
vector_32_u8_type, | |
vector_8_i16_type, | |
vector_16_i16_type, | |
vector_8_u16_type, | |
vector_16_u16_type, | |
vector_4_i32_type, | |
vector_8_i32_type, | |
vector_4_u32_type, | |
vector_8_u32_type, | |
vector_2_i64_type, | |
vector_4_i64_type, | |
vector_2_u64_type, | |
vector_4_u64_type, | |
vector_4_f16_type, | |
vector_8_f16_type, | |
vector_4_f32_type, | |
vector_8_f32_type, | |
vector_2_f64_type, | |
vector_4_f64_type, | |
optional_noreturn_type, | |
anyerror_void_error_union_type, | |
/// Used for the inferred error set of inline/comptime function calls. | |
adhoc_inferred_error_set_type, | |
/// Represents a type which is unknown. | |
/// This is used in functions to represent generic parameter/return types, and | |
/// during semantic analysis to represent unknown result types (i.e. where AstGen | |
/// thought we would have a result type, but we do not). | |
generic_poison_type, | |
/// `@TypeOf(.{})`; a tuple with zero elements. | |
/// This is not the same as `struct {}`, since that is a struct rather than a tuple. | |
empty_tuple_type, | |
/// `undefined` (untyped) | |
undef, | |
/// `0` (comptime_int) | |
zero, | |
/// `0` (usize) | |
zero_usize, | |
/// `0` (u8) | |
zero_u8, | |
/// `1` (comptime_int) | |
one, | |
/// `1` (usize) | |
one_usize, | |
/// `1` (u8) | |
one_u8, | |
/// `4` (u8) | |
four_u8, | |
/// `-1` (comptime_int) | |
negative_one, | |
/// `{}` | |
void_value, | |
/// `unreachable` (noreturn type) | |
unreachable_value, | |
/// `null` (untyped) | |
null_value, | |
/// `true` | |
bool_true, | |
/// `false` | |
bool_false, | |
/// `.{}` | |
empty_tuple, | |
/// Used by Air/Sema only. | |
none = std.math.maxInt(u32), | |
_, | |
/// An array of `Index` existing within the `extra` array. | |
/// This type exists to provide a struct with lifetime that is | |
/// not invalidated when items are added to the `InternPool`. | |
pub const Slice = struct { | |
tid: Zcu.PerThread.Id, | |
start: u32, | |
len: u32, | |
pub const empty: Slice = .{ .tid = .main, .start = 0, .len = 0 }; | |
pub fn get(slice: Slice, ip: *const InternPool) []Index { | |
const extra = ip.getLocalShared(slice.tid).extra.acquire(); | |
return @ptrCast(extra.view().items(.@"0")[slice.start..][0..slice.len]); | |
} | |
}; | |
/// Used for a map of `Index` values to the index within a list of `Index` values. | |
const Adapter = struct { | |
indexes: []const Index, | |
pub fn eql(ctx: @This(), a: Index, b_void: void, b_map_index: usize) bool { | |
_ = b_void; | |
return a == ctx.indexes[b_map_index]; | |
} | |
pub fn hash(ctx: @This(), a: Index) u32 { | |
_ = ctx; | |
return std.hash.uint32(@intFromEnum(a)); | |
} | |
}; | |
const Unwrapped = struct { | |
tid: Zcu.PerThread.Id, | |
index: u32, | |
fn wrap(unwrapped: Unwrapped, ip: *const InternPool) Index { | |
assert(@intFromEnum(unwrapped.tid) <= ip.getTidMask()); | |
assert(unwrapped.index <= ip.getIndexMask(u30)); | |
return @enumFromInt(@as(u32, @intFromEnum(unwrapped.tid)) << ip.tid_shift_30 | unwrapped.index); | |
} | |
pub fn getExtra(unwrapped: Unwrapped, ip: *const InternPool) Local.Extra { | |
return ip.getLocalShared(unwrapped.tid).extra.acquire(); | |
} | |
pub fn getItem(unwrapped: Unwrapped, ip: *const InternPool) Item { | |
const item_ptr = unwrapped.itemPtr(ip); | |
const tag = @atomicLoad(Tag, item_ptr.tag_ptr, .acquire); | |
return .{ .tag = tag, .data = item_ptr.data_ptr.* }; | |
} | |
pub fn getTag(unwrapped: Unwrapped, ip: *const InternPool) Tag { | |
const item_ptr = unwrapped.itemPtr(ip); | |
return @atomicLoad(Tag, item_ptr.tag_ptr, .acquire); | |
} | |
pub fn getData(unwrapped: Unwrapped, ip: *const InternPool) u32 { | |
return unwrapped.getItem(ip).data; | |
} | |
const ItemPtr = struct { | |
tag_ptr: *Tag, | |
data_ptr: *u32, | |
}; | |
fn itemPtr(unwrapped: Unwrapped, ip: *const InternPool) ItemPtr { | |
const slice = ip.getLocalShared(unwrapped.tid).items.acquire().view().slice(); | |
return .{ | |
.tag_ptr = &slice.items(.tag)[unwrapped.index], | |
.data_ptr = &slice.items(.data)[unwrapped.index], | |
}; | |
} | |
const debug_state = InternPool.debug_state; | |
}; | |
pub fn unwrap(index: Index, ip: *const InternPool) Unwrapped { | |
return if (single_threaded) .{ | |
.tid = .main, | |
.index = @intFromEnum(index), | |
} else .{ | |
.tid = @enumFromInt(@intFromEnum(index) >> ip.tid_shift_30 & ip.getTidMask()), | |
.index = @intFromEnum(index) & ip.getIndexMask(u30), | |
}; | |
} | |
/// This function is used in the debugger pretty formatters in tools/ to fetch the | |
/// Tag to encoding mapping to facilitate fancy debug printing for this type. | |
fn dbHelper(self: *Index, tag_to_encoding_map: *struct { | |
const DataIsIndex = struct { data: Index }; | |
const DataIsExtraIndexOfEnumExplicit = struct { | |
const @"data.fields_len" = opaque {}; | |
data: *EnumExplicit, | |
@"trailing.names.len": *@"data.fields_len", | |
@"trailing.values.len": *@"data.fields_len", | |
trailing: struct { | |
names: []NullTerminatedString, | |
values: []Index, | |
}, | |
}; | |
const DataIsExtraIndexOfTypeTuple = struct { | |
const @"data.fields_len" = opaque {}; | |
data: *TypeTuple, | |
@"trailing.types.len": *@"data.fields_len", | |
@"trailing.values.len": *@"data.fields_len", | |
trailing: struct { | |
types: []Index, | |
values: []Index, | |
}, | |
}; | |
removed: void, | |
type_int_signed: struct { data: u32 }, | |
type_int_unsigned: struct { data: u32 }, | |
type_array_big: struct { data: *Array }, | |
type_array_small: struct { data: *Vector }, | |
type_vector: struct { data: *Vector }, | |
type_pointer: struct { data: *Tag.TypePointer }, | |
type_slice: DataIsIndex, | |
type_optional: DataIsIndex, | |
type_anyframe: DataIsIndex, | |
type_error_union: struct { data: *Key.ErrorUnionType }, | |
type_anyerror_union: DataIsIndex, | |
type_error_set: struct { | |
const @"data.names_len" = opaque {}; | |
data: *Tag.ErrorSet, | |
@"trailing.names.len": *@"data.names_len", | |
trailing: struct { names: []NullTerminatedString }, | |
}, | |
type_inferred_error_set: DataIsIndex, | |
type_enum_auto: struct { | |
const @"data.fields_len" = opaque {}; | |
data: *EnumAuto, | |
@"trailing.names.len": *@"data.fields_len", | |
trailing: struct { names: []NullTerminatedString }, | |
}, | |
type_enum_explicit: DataIsExtraIndexOfEnumExplicit, | |
type_enum_nonexhaustive: DataIsExtraIndexOfEnumExplicit, | |
simple_type: void, | |
type_opaque: struct { data: *Tag.TypeOpaque }, | |
type_struct: struct { data: *Tag.TypeStruct }, | |
type_struct_packed: struct { data: *Tag.TypeStructPacked }, | |
type_struct_packed_inits: struct { data: *Tag.TypeStructPacked }, | |
type_tuple: DataIsExtraIndexOfTypeTuple, | |
type_union: struct { data: *Tag.TypeUnion }, | |
type_function: struct { | |
const @"data.flags.has_comptime_bits" = opaque {}; | |
const @"data.flags.has_noalias_bits" = opaque {}; | |
const @"data.params_len" = opaque {}; | |
data: *Tag.TypeFunction, | |
@"trailing.comptime_bits.len": *@"data.flags.has_comptime_bits", | |
@"trailing.noalias_bits.len": *@"data.flags.has_noalias_bits", | |
@"trailing.param_types.len": *@"data.params_len", | |
trailing: struct { comptime_bits: []u32, noalias_bits: []u32, param_types: []Index }, | |
}, | |
undef: DataIsIndex, | |
simple_value: void, | |
ptr_nav: struct { data: *PtrNav }, | |
ptr_comptime_alloc: struct { data: *PtrComptimeAlloc }, | |
ptr_uav: struct { data: *PtrUav }, | |
ptr_uav_aligned: struct { data: *PtrUavAligned }, | |
ptr_comptime_field: struct { data: *PtrComptimeField }, | |
ptr_int: struct { data: *PtrInt }, | |
ptr_eu_payload: struct { data: *PtrBase }, | |
ptr_opt_payload: struct { data: *PtrBase }, | |
ptr_elem: struct { data: *PtrBaseIndex }, | |
ptr_field: struct { data: *PtrBaseIndex }, | |
ptr_slice: struct { data: *PtrSlice }, | |
opt_payload: struct { data: *Tag.TypeValue }, | |
opt_null: DataIsIndex, | |
int_u8: struct { data: u8 }, | |
int_u16: struct { data: u16 }, | |
int_u32: struct { data: u32 }, | |
int_i32: struct { data: i32 }, | |
int_usize: struct { data: u32 }, | |
int_comptime_int_u32: struct { data: u32 }, | |
int_comptime_int_i32: struct { data: i32 }, | |
int_small: struct { data: *IntSmall }, | |
int_positive: struct { data: u32 }, | |
int_negative: struct { data: u32 }, | |
int_lazy_align: struct { data: *IntLazy }, | |
int_lazy_size: struct { data: *IntLazy }, | |
error_set_error: struct { data: *Key.Error }, | |
error_union_error: struct { data: *Key.Error }, | |
error_union_payload: struct { data: *Tag.TypeValue }, | |
enum_literal: struct { data: NullTerminatedString }, | |
enum_tag: struct { data: *Tag.EnumTag }, | |
float_f16: struct { data: f16 }, | |
float_f32: struct { data: f32 }, | |
float_f64: struct { data: *Float64 }, | |
float_f80: struct { data: *Float80 }, | |
float_f128: struct { data: *Float128 }, | |
float_c_longdouble_f80: struct { data: *Float80 }, | |
float_c_longdouble_f128: struct { data: *Float128 }, | |
float_comptime_float: struct { data: *Float128 }, | |
variable: struct { data: *Tag.Variable }, | |
@"extern": struct { data: *Tag.Extern }, | |
func_decl: struct { | |
const @"data.analysis.inferred_error_set" = opaque {}; | |
data: *Tag.FuncDecl, | |
@"trailing.resolved_error_set.len": *@"data.analysis.inferred_error_set", | |
trailing: struct { resolved_error_set: []Index }, | |
}, | |
func_instance: struct { | |
const @"data.analysis.inferred_error_set" = opaque {}; | |
const @"data.generic_owner.data.ty.data.params_len" = opaque {}; | |
data: *Tag.FuncInstance, | |
@"trailing.resolved_error_set.len": *@"data.analysis.inferred_error_set", | |
@"trailing.comptime_args.len": *@"data.generic_owner.data.ty.data.params_len", | |
trailing: struct { resolved_error_set: []Index, comptime_args: []Index }, | |
}, | |
func_coerced: struct { | |
data: *Tag.FuncCoerced, | |
}, | |
only_possible_value: DataIsIndex, | |
union_value: struct { data: *Key.Union }, | |
bytes: struct { data: *Bytes }, | |
aggregate: struct { | |
const @"data.ty.data.len orelse data.ty.data.fields_len" = opaque {}; | |
data: *Tag.Aggregate, | |
@"trailing.element_values.len": *@"data.ty.data.len orelse data.ty.data.fields_len", | |
trailing: struct { element_values: []Index }, | |
}, | |
repeated: struct { data: *Repeated }, | |
memoized_call: struct { | |
const @"data.args_len" = opaque {}; | |
data: *MemoizedCall, | |
@"trailing.arg_values.len": *@"data.args_len", | |
trailing: struct { arg_values: []Index }, | |
}, | |
}) void { | |
_ = self; | |
const map_fields = @typeInfo(@typeInfo(@TypeOf(tag_to_encoding_map)).pointer.child).@"struct".fields; | |
@setEvalBranchQuota(2_000); | |
inline for (@typeInfo(Tag).@"enum".fields, 0..) |tag, start| { | |
inline for (0..map_fields.len) |offset| { | |
if (comptime std.mem.eql(u8, tag.name, map_fields[(start + offset) % map_fields.len].name)) break; | |
} else { | |
@compileError(@typeName(Tag) ++ "." ++ tag.name ++ " missing dbHelper tag_to_encoding_map entry"); | |
} | |
} | |
} | |
comptime { | |
if (!builtin.strip_debug_info) switch (builtin.zig_backend) { | |
.stage2_llvm => _ = &dbHelper, | |
.stage2_x86_64 => for (@typeInfo(Tag).@"enum".fields) |tag| { | |
if (!@hasField(@TypeOf(Tag.encodings), tag.name)) @compileLog("missing: " ++ @typeName(Tag) ++ ".encodings." ++ tag.name); | |
const encoding = @field(Tag.encodings, tag.name); | |
if (@hasField(@TypeOf(encoding), "trailing")) for (@typeInfo(encoding.trailing).@"struct".fields) |field| { | |
struct { | |
fn checkConfig(name: []const u8) void { | |
if (!@hasField(@TypeOf(encoding.config), name)) @compileError("missing field: " ++ @typeName(Tag) ++ ".encodings." ++ tag.name ++ ".config.@\"" ++ name ++ "\""); | |
const FieldType = @TypeOf(@field(encoding.config, name)); | |
if (@typeInfo(FieldType) != .enum_literal) @compileError("expected enum literal: " ++ @typeName(Tag) ++ ".encodings." ++ tag.name ++ ".config.@\"" ++ name ++ "\": " ++ @typeName(FieldType)); | |
} | |
fn checkField(name: []const u8, Type: type) void { | |
switch (@typeInfo(Type)) { | |
.int => {}, | |
.@"enum" => {}, | |
.@"struct" => |info| assert(info.layout == .@"packed"), | |
.optional => |info| { | |
checkConfig(name ++ ".?"); | |
checkField(name ++ ".?", info.child); | |
}, | |
.pointer => |info| { | |
assert(info.size == .slice); | |
checkConfig(name ++ ".len"); | |
checkField(name ++ "[0]", info.child); | |
}, | |
else => @compileError("unsupported type: " ++ @typeName(Tag) ++ ".encodings." ++ tag.name ++ "." ++ name ++ ": " ++ @typeName(Type)), | |
} | |
} | |
}.checkField("trailing." ++ field.name, field.type); | |
}; | |
}, | |
else => {}, | |
}; | |
} | |
}; | |
pub const static_keys = [_]Key{ | |
.{ .int_type = .{ | |
.signedness = .unsigned, | |
.bits = 0, | |
} }, | |
.{ .int_type = .{ | |
.signedness = .signed, | |
.bits = 0, | |
} }, | |
.{ .int_type = .{ | |
.signedness = .unsigned, | |
.bits = 1, | |
} }, | |
.{ .int_type = .{ | |
.signedness = .unsigned, | |
.bits = 8, | |
} }, | |
.{ .int_type = .{ | |
.signedness = .signed, | |
.bits = 8, | |
} }, | |
.{ .int_type = .{ | |
.signedness = .unsigned, | |
.bits = 16, | |
} }, | |
.{ .int_type = .{ | |
.signedness = .signed, | |
.bits = 16, | |
} }, | |
.{ .int_type = .{ | |
.signedness = .unsigned, | |
.bits = 29, | |
} }, | |
.{ .int_type = .{ | |
.signedness = .unsigned, | |
.bits = 32, | |
} }, | |
.{ .int_type = .{ | |
.signedness = .signed, | |
.bits = 32, | |
} }, | |
.{ .int_type = .{ | |
.signedness = .unsigned, | |
.bits = 64, | |
} }, | |
.{ .int_type = .{ | |
.signedness = .signed, | |
.bits = 64, | |
} }, | |
.{ .int_type = .{ | |
.signedness = .unsigned, | |
.bits = 80, | |
} }, | |
.{ .int_type = .{ | |
.signedness = .unsigned, | |
.bits = 128, | |
} }, | |
.{ .int_type = .{ | |
.signedness = .signed, | |
.bits = 128, | |
} }, | |
.{ .simple_type = .usize }, | |
.{ .simple_type = .isize }, | |
.{ .simple_type = .c_char }, | |
.{ .simple_type = .c_short }, | |
.{ .simple_type = .c_ushort }, | |
.{ .simple_type = .c_int }, | |
.{ .simple_type = .c_uint }, | |
.{ .simple_type = .c_long }, | |
.{ .simple_type = .c_ulong }, | |
.{ .simple_type = .c_longlong }, | |
.{ .simple_type = .c_ulonglong }, | |
.{ .simple_type = .c_longdouble }, | |
.{ .simple_type = .f16 }, | |
.{ .simple_type = .f32 }, | |
.{ .simple_type = .f64 }, | |
.{ .simple_type = .f80 }, | |
.{ .simple_type = .f128 }, | |
.{ .simple_type = .anyopaque }, | |
.{ .simple_type = .bool }, | |
.{ .simple_type = .void }, | |
.{ .simple_type = .type }, | |
.{ .simple_type = .anyerror }, | |
.{ .simple_type = .comptime_int }, | |
.{ .simple_type = .comptime_float }, | |
.{ .simple_type = .noreturn }, | |
.{ .anyframe_type = .none }, | |
.{ .simple_type = .null }, | |
.{ .simple_type = .undefined }, | |
.{ .simple_type = .enum_literal }, | |
// [*]u8 | |
.{ .ptr_type = .{ | |
.child = .u8_type, | |
.flags = .{ | |
.size = .many, | |
}, | |
} }, | |
// [*]const u8 | |
.{ .ptr_type = .{ | |
.child = .u8_type, | |
.flags = .{ | |
.size = .many, | |
.is_const = true, | |
}, | |
} }, | |
// [*:0]const u8 | |
.{ .ptr_type = .{ | |
.child = .u8_type, | |
.sentinel = .zero_u8, | |
.flags = .{ | |
.size = .many, | |
.is_const = true, | |
}, | |
} }, | |
// *const comptime_int | |
.{ .ptr_type = .{ | |
.child = .comptime_int_type, | |
.flags = .{ | |
.size = .one, | |
.is_const = true, | |
}, | |
} }, | |
// []const u8 | |
.{ .ptr_type = .{ | |
.child = .u8_type, | |
.flags = .{ | |
.size = .slice, | |
.is_const = true, | |
}, | |
} }, | |
// [:0]const u8 | |
.{ .ptr_type = .{ | |
.child = .u8_type, | |
.sentinel = .zero_u8, | |
.flags = .{ | |
.size = .slice, | |
.is_const = true, | |
}, | |
} }, | |
// @Vector(16, i8) | |
.{ .vector_type = .{ .len = 16, .child = .i8_type } }, | |
// @Vector(32, i8) | |
.{ .vector_type = .{ .len = 32, .child = .i8_type } }, | |
// @Vector(16, u8) | |
.{ .vector_type = .{ .len = 16, .child = .u8_type } }, | |
// @Vector(32, u8) | |
.{ .vector_type = .{ .len = 32, .child = .u8_type } }, | |
// @Vector(8, i16) | |
.{ .vector_type = .{ .len = 8, .child = .i16_type } }, | |
// @Vector(16, i16) | |
.{ .vector_type = .{ .len = 16, .child = .i16_type } }, | |
// @Vector(8, u16) | |
.{ .vector_type = .{ .len = 8, .child = .u16_type } }, | |
// @Vector(16, u16) | |
.{ .vector_type = .{ .len = 16, .child = .u16_type } }, | |
// @Vector(4, i32) | |
.{ .vector_type = .{ .len = 4, .child = .i32_type } }, | |
// @Vector(8, i32) | |
.{ .vector_type = .{ .len = 8, .child = .i32_type } }, | |
// @Vector(4, u32) | |
.{ .vector_type = .{ .len = 4, .child = .u32_type } }, | |
// @Vector(8, u32) | |
.{ .vector_type = .{ .len = 8, .child = .u32_type } }, | |
// @Vector(2, i64) | |
.{ .vector_type = .{ .len = 2, .child = .i64_type } }, | |
// @Vector(4, i64) | |
.{ .vector_type = .{ .len = 4, .child = .i64_type } }, | |
// @Vector(2, u64) | |
.{ .vector_type = .{ .len = 2, .child = .u64_type } }, | |
// @Vector(8, u64) | |
.{ .vector_type = .{ .len = 4, .child = .u64_type } }, | |
// @Vector(4, f16) | |
.{ .vector_type = .{ .len = 4, .child = .f16_type } }, | |
// @Vector(8, f16) | |
.{ .vector_type = .{ .len = 8, .child = .f16_type } }, | |
// @Vector(4, f32) | |
.{ .vector_type = .{ .len = 4, .child = .f32_type } }, | |
// @Vector(8, f32) | |
.{ .vector_type = .{ .len = 8, .child = .f32_type } }, | |
// @Vector(2, f64) | |
.{ .vector_type = .{ .len = 2, .child = .f64_type } }, | |
// @Vector(4, f64) | |
.{ .vector_type = .{ .len = 4, .child = .f64_type } }, | |
// ?noreturn | |
.{ .opt_type = .noreturn_type }, | |
// anyerror!void | |
.{ .error_union_type = .{ | |
.error_set_type = .anyerror_type, | |
.payload_type = .void_type, | |
} }, | |
// adhoc_inferred_error_set_type | |
.{ .simple_type = .adhoc_inferred_error_set }, | |
// generic_poison_type | |
.{ .simple_type = .generic_poison }, | |
// empty_tuple_type | |
.{ .tuple_type = .{ | |
.types = .empty, | |
.values = .empty, | |
} }, | |
.{ .simple_value = .undefined }, | |
.{ .int = .{ | |
.ty = .comptime_int_type, | |
.storage = .{ .u64 = 0 }, | |
} }, | |
.{ .int = .{ | |
.ty = .usize_type, | |
.storage = .{ .u64 = 0 }, | |
} }, | |
.{ .int = .{ | |
.ty = .u8_type, | |
.storage = .{ .u64 = 0 }, | |
} }, | |
.{ .int = .{ | |
.ty = .comptime_int_type, | |
.storage = .{ .u64 = 1 }, | |
} }, | |
.{ .int = .{ | |
.ty = .usize_type, | |
.storage = .{ .u64 = 1 }, | |
} }, | |
// one_u8 | |
.{ .int = .{ | |
.ty = .u8_type, | |
.storage = .{ .u64 = 1 }, | |
} }, | |
// four_u8 | |
.{ .int = .{ | |
.ty = .u8_type, | |
.storage = .{ .u64 = 4 }, | |
} }, | |
// negative_one | |
.{ .int = .{ | |
.ty = .comptime_int_type, | |
.storage = .{ .i64 = -1 }, | |
} }, | |
.{ .simple_value = .void }, | |
.{ .simple_value = .@"unreachable" }, | |
.{ .simple_value = .null }, | |
.{ .simple_value = .true }, | |
.{ .simple_value = .false }, | |
.{ .simple_value = .empty_tuple }, | |
}; | |
/// How many items in the InternPool are statically known. | |
/// This is specified with an integer literal and a corresponding comptime | |
/// assert below to break an unfortunate and arguably incorrect dependency loop | |
/// when compiling. | |
pub const static_len = Zir.Inst.Index.static_len; | |
comptime { | |
//@compileLog(static_keys.len); | |
assert(static_len == static_keys.len); | |
} | |
pub const Tag = enum(u8) { | |
/// This special tag represents a value which was removed from this pool via | |
/// `InternPool.remove`. The item remains allocated to preserve indices, but | |
/// lookups will consider it not equal to any other item, and all queries | |
/// assert not this tag. `data` is unused. | |
removed, | |
/// An integer type. | |
/// data is number of bits | |
type_int_signed, | |
/// An integer type. | |
/// data is number of bits | |
type_int_unsigned, | |
/// An array type whose length requires 64 bits or which has a sentinel. | |
/// data is payload to Array. | |
type_array_big, | |
/// An array type that has no sentinel and whose length fits in 32 bits. | |
/// data is payload to Vector. | |
type_array_small, | |
/// A vector type. | |
/// data is payload to Vector. | |
type_vector, | |
/// A fully explicitly specified pointer type. | |
type_pointer, | |
/// A slice type. | |
/// data is Index of underlying pointer type. | |
type_slice, | |
/// An optional type. | |
/// data is the child type. | |
type_optional, | |
/// The type `anyframe->T`. | |
/// data is the child type. | |
/// If the child type is `none`, the type is `anyframe`. | |
type_anyframe, | |
/// An error union type. | |
/// data is payload to `Key.ErrorUnionType`. | |
type_error_union, | |
/// An error union type of the form `anyerror!T`. | |
/// data is `Index` of payload type. | |
type_anyerror_union, | |
/// An error set type. | |
/// data is payload to `ErrorSet`. | |
type_error_set, | |
/// The inferred error set type of a function. | |
/// data is `Index` of a `func_decl` or `func_instance`. | |
type_inferred_error_set, | |
/// An enum type with auto-numbered tag values. | |
/// The enum is exhaustive. | |
/// data is payload index to `EnumAuto`. | |
type_enum_auto, | |
/// An enum type with an explicitly provided integer tag type. | |
/// The enum is exhaustive. | |
/// data is payload index to `EnumExplicit`. | |
type_enum_explicit, | |
/// An enum type with an explicitly provided integer tag type. | |
/// The enum is non-exhaustive. | |
/// data is payload index to `EnumExplicit`. | |
type_enum_nonexhaustive, | |
/// A type that can be represented with only an enum tag. | |
simple_type, | |
/// An opaque type. | |
/// data is index of Tag.TypeOpaque in extra. | |
type_opaque, | |
/// A non-packed struct type. | |
/// data is 0 or extra index of `TypeStruct`. | |
type_struct, | |
/// A packed struct, no fields have any init values. | |
/// data is extra index of `TypeStructPacked`. | |
type_struct_packed, | |
/// A packed struct, one or more fields have init values. | |
/// data is extra index of `TypeStructPacked`. | |
type_struct_packed_inits, | |
/// A `TupleType`. | |
/// data is extra index of `TypeTuple`. | |
type_tuple, | |
/// A union type. | |
/// `data` is extra index of `TypeUnion`. | |
type_union, | |
/// A function body type. | |
/// `data` is extra index to `TypeFunction`. | |
type_function, | |
/// Typed `undefined`. | |
/// `data` is `Index` of the type. | |
/// Untyped `undefined` is stored instead via `simple_value`. | |
undef, | |
/// A value that can be represented with only an enum tag. | |
simple_value, | |
/// A pointer to a `Nav`. | |
/// data is extra index of `PtrNav`, which contains the type and address. | |
ptr_nav, | |
/// A pointer to a decl that can be mutated at comptime. | |
/// data is extra index of `PtrComptimeAlloc`, which contains the type and address. | |
ptr_comptime_alloc, | |
/// A pointer to an anonymous addressable value. | |
/// data is extra index of `PtrUav`, which contains the pointer type and decl value. | |
/// The alignment of the uav is communicated via the pointer type. | |
ptr_uav, | |
/// A pointer to an unnamed addressable value. | |
/// data is extra index of `PtrUavAligned`, which contains the pointer | |
/// type and decl value. | |
/// The original pointer type is also provided, which will be different than `ty`. | |
/// This encoding is only used when a pointer to a Uav is | |
/// coerced to a different pointer type with a different alignment. | |
ptr_uav_aligned, | |
/// data is extra index of `PtrComptimeField`, which contains the pointer type and field value. | |
ptr_comptime_field, | |
/// A pointer with an integer value. | |
/// data is extra index of `PtrInt`, which contains the type and address (byte offset from 0). | |
/// Only pointer types are allowed to have this encoding. Optional types must use | |
/// `opt_payload` or `opt_null`. | |
ptr_int, | |
/// A pointer to the payload of an error union. | |
/// data is extra index of `PtrBase`, which contains the type and base pointer. | |
ptr_eu_payload, | |
/// A pointer to the payload of an optional. | |
/// data is extra index of `PtrBase`, which contains the type and base pointer. | |
ptr_opt_payload, | |
/// A pointer to an array element. | |
/// data is extra index of PtrBaseIndex, which contains the base array and element index. | |
/// In order to use this encoding, one must ensure that the `InternPool` | |
/// already contains the elem pointer type corresponding to this payload. | |
ptr_elem, | |
/// A pointer to a container field. | |
/// data is extra index of PtrBaseIndex, which contains the base container and field index. | |
ptr_field, | |
/// A slice. | |
/// data is extra index of PtrSlice, which contains the ptr and len values | |
ptr_slice, | |
/// An optional value that is non-null. | |
/// data is extra index of `TypeValue`. | |
/// The type is the optional type (not the payload type). | |
opt_payload, | |
/// An optional value that is null. | |
/// data is Index of the optional type. | |
opt_null, | |
/// Type: u8 | |
/// data is integer value | |
int_u8, | |
/// Type: u16 | |
/// data is integer value | |
int_u16, | |
/// Type: u32 | |
/// data is integer value | |
int_u32, | |
/// Type: i32 | |
/// data is integer value bitcasted to u32. | |
int_i32, | |
/// A usize that fits in 32 bits. | |
/// data is integer value. | |
int_usize, | |
/// A comptime_int that fits in a u32. | |
/// data is integer value. | |
int_comptime_int_u32, | |
/// A comptime_int that fits in an i32. | |
/// data is integer value bitcasted to u32. | |
int_comptime_int_i32, | |
/// An integer value that fits in 32 bits with an explicitly provided type. | |
/// data is extra index of `IntSmall`. | |
int_small, | |
/// A positive integer value. | |
/// data is a limbs index to `Int`. | |
int_positive, | |
/// A negative integer value. | |
/// data is a limbs index to `Int`. | |
int_negative, | |
/// The ABI alignment of a lazy type. | |
/// data is extra index of `IntLazy`. | |
int_lazy_align, | |
/// The ABI size of a lazy type. | |
/// data is extra index of `IntLazy`. | |
int_lazy_size, | |
/// An error value. | |
/// data is extra index of `Key.Error`. | |
error_set_error, | |
/// An error union error. | |
/// data is extra index of `Key.Error`. | |
error_union_error, | |
/// An error union payload. | |
/// data is extra index of `TypeValue`. | |
error_union_payload, | |
/// An enum literal value. | |
/// data is `NullTerminatedString` of the error name. | |
enum_literal, | |
/// An enum tag value. | |
/// data is extra index of `EnumTag`. | |
enum_tag, | |
/// An f16 value. | |
/// data is float value bitcasted to u16 and zero-extended. | |
float_f16, | |
/// An f32 value. | |
/// data is float value bitcasted to u32. | |
float_f32, | |
/// An f64 value. | |
/// data is extra index to Float64. | |
float_f64, | |
/// An f80 value. | |
/// data is extra index to Float80. | |
float_f80, | |
/// An f128 value. | |
/// data is extra index to Float128. | |
float_f128, | |
/// A c_longdouble value of 80 bits. | |
/// data is extra index to Float80. | |
/// This is used when a c_longdouble value is provided as an f80, because f80 has unnormalized | |
/// values which cannot be losslessly represented as f128. It should only be used when the type | |
/// underlying c_longdouble for the target is 80 bits. | |
float_c_longdouble_f80, | |
/// A c_longdouble value of 128 bits. | |
/// data is extra index to Float128. | |
/// This is used when a c_longdouble value is provided as any type other than an f80, since all | |
/// other float types can be losslessly converted to and from f128. | |
float_c_longdouble_f128, | |
/// A comptime_float value. | |
/// data is extra index to Float128. | |
float_comptime_float, | |
/// A global variable. | |
/// data is extra index to Variable. | |
variable, | |
/// An extern function or variable. | |
/// data is extra index to Extern. | |
/// Some parts of the key are stored in `owner_nav`. | |
@"extern", | |
/// A non-extern function corresponding directly to the AST node from whence it originated. | |
/// data is extra index to `FuncDecl`. | |
/// Only the owner Decl is used for hashing and equality because the other | |
/// fields can get patched up during incremental compilation. | |
func_decl, | |
/// A generic function instantiation. | |
/// data is extra index to `FuncInstance`. | |
func_instance, | |
/// A `func_decl` or a `func_instance` that has been coerced to a different type. | |
/// data is extra index to `FuncCoerced`. | |
func_coerced, | |
/// This represents the only possible value for *some* types which have | |
/// only one possible value. Not all only-possible-values are encoded this way; | |
/// for example structs which have all comptime fields are not encoded this way. | |
/// The set of values that are encoded this way is: | |
/// * An array or vector which has length 0. | |
/// * A struct which has all fields comptime-known. | |
/// * An empty enum or union. TODO: this value's existence is strange, because such a type in reality has no values. See #15909 | |
/// data is Index of the type, which is known to be zero bits at runtime. | |
only_possible_value, | |
/// data is extra index to Key.Union. | |
union_value, | |
/// An array of bytes. | |
/// data is extra index to `Bytes`. | |
bytes, | |
/// An instance of a struct, array, or vector. | |
/// data is extra index to `Aggregate`. | |
aggregate, | |
/// An instance of an array or vector with every element being the same value. | |
/// data is extra index to `Repeated`. | |
repeated, | |
/// A memoized comptime function call result. | |
/// data is extra index to `MemoizedCall` | |
memoized_call, | |
const ErrorUnionType = Key.ErrorUnionType; | |
const TypeValue = Key.TypeValue; | |
const Error = Key.Error; | |
const EnumTag = Key.EnumTag; | |
const Union = Key.Union; | |
const TypePointer = Key.PtrType; | |
const enum_explicit_encoding = .{ | |
.summary = .@"{.payload.name%summary#\"}", | |
.payload = EnumExplicit, | |
.trailing = struct { | |
owner_union: Index, | |
captures: ?[]CaptureValue, | |
type_hash: ?u64, | |
field_names: []NullTerminatedString, | |
tag_values: []Index, | |
}, | |
.config = .{ | |
.@"trailing.owner_union.?" = .@"payload.zir_index == .none", | |
.@"trailing.cau.?" = .@"payload.zir_index != .none", | |
.@"trailing.captures.?" = .@"payload.captures_len < 0xffffffff", | |
.@"trailing.captures.?.len" = .@"payload.captures_len", | |
.@"trailing.type_hash.?" = .@"payload.captures_len == 0xffffffff", | |
.@"trailing.field_names.len" = .@"payload.fields_len", | |
.@"trailing.tag_values.len" = .@"payload.fields_len", | |
}, | |
}; | |
const encodings = .{ | |
.removed = .{}, | |
.type_int_signed = .{ .summary = .@"i{.data%value}", .data = u32 }, | |
.type_int_unsigned = .{ .summary = .@"u{.data%value}", .data = u32 }, | |
.type_array_big = .{ | |
.summary = .@"[{.payload.len1%value} << 32 | {.payload.len0%value}:{.payload.sentinel%summary}]{.payload.child%summary}", | |
.payload = Array, | |
}, | |
.type_array_small = .{ .summary = .@"[{.payload.len%value}]{.payload.child%summary}", .payload = Vector }, | |
.type_vector = .{ .summary = .@"@Vector({.payload.len%value}, {.payload.child%summary})", .payload = Vector }, | |
.type_pointer = .{ .summary = .@"*... {.payload.child%summary}", .payload = TypePointer }, | |
.type_slice = .{ .summary = .@"[]... {.data.unwrapped.payload.child%summary}", .data = Index }, | |
.type_optional = .{ .summary = .@"?{.data%summary}", .data = Index }, | |
.type_anyframe = .{ .summary = .@"anyframe->{.data%summary}", .data = Index }, | |
.type_error_union = .{ | |
.summary = .@"{.payload.error_set_type%summary}!{.payload.payload_type%summary}", | |
.payload = ErrorUnionType, | |
}, | |
.type_anyerror_union = .{ .summary = .@"anyerror!{.data%summary}", .data = Index }, | |
.type_error_set = .{ .summary = .@"error{...}", .payload = ErrorSet }, | |
.type_inferred_error_set = .{ | |
.summary = .@"@typeInfo(@typeInfo(@TypeOf({.data%summary})).@\"fn\".return_type.?).error_union.error_set", | |
.data = Index, | |
}, | |
.type_enum_auto = .{ | |
.summary = .@"{.payload.name%summary#\"}", | |
.payload = EnumAuto, | |
.trailing = struct { | |
owner_union: ?Index, | |
captures: ?[]CaptureValue, | |
type_hash: ?u64, | |
field_names: []NullTerminatedString, | |
}, | |
.config = .{ | |
.@"trailing.owner_union.?" = .@"payload.zir_index == .none", | |
.@"trailing.cau.?" = .@"payload.zir_index != .none", | |
.@"trailing.captures.?" = .@"payload.captures_len < 0xffffffff", | |
.@"trailing.captures.?.len" = .@"payload.captures_len", | |
.@"trailing.type_hash.?" = .@"payload.captures_len == 0xffffffff", | |
.@"trailing.field_names.len" = .@"payload.fields_len", | |
}, | |
}, | |
.type_enum_explicit = enum_explicit_encoding, | |
.type_enum_nonexhaustive = enum_explicit_encoding, | |
.simple_type = .{ .summary = .@"{.index%value#.}", .index = SimpleType }, | |
.type_opaque = .{ | |
.summary = .@"{.payload.name%summary#\"}", | |
.payload = TypeOpaque, | |
.trailing = struct { captures: []CaptureValue }, | |
.config = .{ .@"trailing.captures.len" = .@"payload.captures_len" }, | |
}, | |
.type_struct = .{ | |
.summary = .@"{.payload.name%summary#\"}", | |
.payload = TypeStruct, | |
.trailing = struct { | |
captures_len: ?u32, | |
captures: ?[]CaptureValue, | |
type_hash: ?u64, | |
field_types: []Index, | |
field_names_map: OptionalMapIndex, | |
field_names: []NullTerminatedString, | |
field_inits: ?[]Index, | |
field_aligns: ?[]Alignment, | |
field_is_comptime_bits: ?[]u32, | |
field_index: ?[]LoadedStructType.RuntimeOrder, | |
field_offset: []u32, | |
}, | |
.config = .{ | |
.@"trailing.captures_len.?" = .@"payload.flags.any_captures", | |
.@"trailing.captures.?" = .@"payload.flags.any_captures", | |
.@"trailing.captures.?.len" = .@"trailing.captures_len.?", | |
.@"trailing.type_hash.?" = .@"payload.flags.is_reified", | |
.@"trailing.field_types.len" = .@"payload.fields_len", | |
.@"trailing.field_names.len" = .@"payload.fields_len", | |
.@"trailing.field_inits.?" = .@"payload.flags.any_default_inits", | |
.@"trailing.field_inits.?.len" = .@"payload.fields_len", | |
.@"trailing.field_aligns.?" = .@"payload.flags.any_aligned_fields", | |
.@"trailing.field_aligns.?.len" = .@"payload.fields_len", | |
.@"trailing.field_is_comptime_bits.?" = .@"payload.flags.any_comptime_fields", | |
.@"trailing.field_is_comptime_bits.?.len" = .@"(payload.fields_len + 31) / 32", | |
.@"trailing.field_index.?" = .@"!payload.flags.is_extern", | |
.@"trailing.field_index.?.len" = .@"payload.fields_len", | |
.@"trailing.field_offset.len" = .@"payload.fields_len", | |
}, | |
}, | |
.type_struct_packed = .{ | |
.summary = .@"{.payload.name%summary#\"}", | |
.payload = TypeStructPacked, | |
.trailing = struct { | |
captures_len: ?u32, | |
captures: ?[]CaptureValue, | |
type_hash: ?u64, | |
field_types: []Index, | |
field_names: []NullTerminatedString, | |
}, | |
.config = .{ | |
.@"trailing.captures_len.?" = .@"payload.flags.any_captures", | |
.@"trailing.captures.?" = .@"payload.flags.any_captures", | |
.@"trailing.captures.?.len" = .@"trailing.captures_len.?", | |
.@"trailing.type_hash.?" = .@"payload.is_flags.is_reified", | |
.@"trailing.field_types.len" = .@"payload.fields_len", | |
.@"trailing.field_names.len" = .@"payload.fields_len", | |
}, | |
}, | |
.type_struct_packed_inits = .{ | |
.summary = .@"{.payload.name%summary#\"}", | |
.payload = TypeStructPacked, | |
.trailing = struct { | |
captures_len: ?u32, | |
captures: ?[]CaptureValue, | |
type_hash: ?u64, | |
field_types: []Index, | |
field_names: []NullTerminatedString, | |
field_inits: []Index, | |
}, | |
.config = .{ | |
.@"trailing.captures_len.?" = .@"payload.flags.any_captures", | |
.@"trailing.captures.?" = .@"payload.flags.any_captures", | |
.@"trailing.captures.?.len" = .@"trailing.captures_len.?", | |
.@"trailing.type_hash.?" = .@"payload.is_flags.is_reified", | |
.@"trailing.field_types.len" = .@"payload.fields_len", | |
.@"trailing.field_names.len" = .@"payload.fields_len", | |
.@"trailing.field_inits.len" = .@"payload.fields_len", | |
}, | |
}, | |
.type_tuple = .{ | |
.summary = .@"struct {...}", | |
.payload = TypeTuple, | |
.trailing = struct { | |
field_types: []Index, | |
field_values: []Index, | |
}, | |
.config = .{ | |
.@"trailing.field_types.len" = .@"payload.fields_len", | |
.@"trailing.field_values.len" = .@"payload.fields_len", | |
}, | |
}, | |
.type_union = .{ | |
.summary = .@"{.payload.name%summary#\"}", | |
.payload = TypeUnion, | |
.trailing = struct { | |
captures_len: ?u32, | |
captures: ?[]CaptureValue, | |
type_hash: ?u64, | |
field_types: []Index, | |
field_aligns: []Alignment, | |
}, | |
.config = .{ | |
.@"trailing.captures_len.?" = .@"payload.flags.any_captures", | |
.@"trailing.captures.?" = .@"payload.flags.any_captures", | |
.@"trailing.captures.?.len" = .@"trailing.captures_len.?", | |
.@"trailing.type_hash.?" = .@"payload.is_flags.is_reified", | |
.@"trailing.field_types.len" = .@"payload.fields_len", |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment