hare

[hare] The Hare programming language
git clone https://git.torresjrjr.com/hare.git
Log | Files | Refs | README | LICENSE

commit d93249d007ec19ff0e3a549ada735992edcf4272
parent e6eab611b890f818f42ef60736c3818ecea90eaf
Author: Sebastian <sebastian@sebsite.pw>
Date:   Sat, 28 Oct 2023 02:34:16 -0400

hare::lex: take in scanner

The lexer previously was initialized with an io::handle, and then
created and managed its own bufio::scanner internally.

This meant that there was no reliable way to know where a stream will be
positioned after lexing is completed. So it's not possible to read from
a handle after lexing from it.

This is solved by changing the interface to receive a scanner which is
managed by the user. This way, code can use a scanner, lex something
from that scanner, and then continue to use the same scanner, with the
guarantee that the position will be immediately after the last token
lexed.

This is useful in e.g. hare::parse::doc, which scans a comment, and when
"[[" is encountered, it parses an identifier from the scanner, and then
continues scanning. If the lexer interface weren't changed, this code
would instead need to allocate a buffer to store the identifier, then
pass a memio::fixed stream to the lexer, which is wasteful since the
lexer is internally using a scanner anyway, so there's no reason it
shouldn't be able to share the same scanner.

In addition, the internal storage of the lexer is overhauled in this
commit, to take advantage of the scanner for some information, and to
minimize the size of fields elsewhere. The effect in both cases is less
information needs to be stored in the lexer itself, which also
simplifies the implementation in various places.

Signed-off-by: Sebastian <sebastian@sebsite.pw>

Diffstat:
Mcmd/hare/build.ha | 8+++++---
Mcmd/harec/main.ha | 6++++--
Mcmd/haredoc/doc/resolve.ha | 6+++++-
Mcmd/haredoc/main.ha | 9++++++---
Mcmd/haretype/main.ha | 9++++++---
Mcmd/ioctlgen/main.ha | 14+++++++++-----
Mhare/lex/+test.ha | 86+++++++++++++++++++++++++++++++------------------------------------------------
Mhare/lex/lex.ha | 172+++++++++++++++++++++++++++++++++++--------------------------------------------
Mhare/module/deps.ha | 6++++--
Mhare/parse/+test/ident_test.ha | 7+++++--
Mhare/parse/+test/loc.ha | 17+++++++++++------
Mhare/parse/+test/roundtrip.ha | 7+++++--
Mhare/parse/+test/unit_test.ha | 12++++++++----
Mhare/parse/ident.ha | 9++++++---
Mhare/types/+test.ha | 7+++++--
Mhare/unit/+test.ha | 9++++++---
Mmakefiles/freebsd.aarch64.mk | 4++--
Mmakefiles/freebsd.riscv64.mk | 4++--
Mmakefiles/freebsd.x86_64.mk | 4++--
Mmakefiles/linux.aarch64.mk | 4++--
Mmakefiles/linux.riscv64.mk | 4++--
Mmakefiles/linux.x86_64.mk | 4++--
22 files changed, 207 insertions(+), 201 deletions(-)

diff --git a/cmd/hare/build.ha b/cmd/hare/build.ha @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-only // (c) Hare authors <https://harelang.org> +use bufio; use cmd::hare::build; use errors; use fmt; @@ -56,9 +57,10 @@ fn build(name: str, cmd: *getopt::command) (void | error) = { case 'a' => arch = get_arch(opt.1)?; case 'D' => - const buf = memio::fixed(strings::toutf8(opt.1)); - const lexer = lex::init(&buf, "<-D argument>"); - defer lex::finish(&lexer); + let buf = memio::fixed(strings::toutf8(opt.1)); + let sc = bufio::newscanner(&buf, len(opt.1)); + defer bufio::finish(&sc); + let lexer = lex::init(&sc, "<-D argument>"); append(ctx.defines, parse::define(&lexer)?); case 'F' => ctx.freestanding = true; diff --git a/cmd/harec/main.ha b/cmd/harec/main.ha @@ -12,6 +12,7 @@ use hare::types; use hare::unit; use io; use os; +use types::{SIZE_MAX}; export fn main() void = { let usage: []getopt::help = [ @@ -71,9 +72,10 @@ export fn main() void = { static let buf: [os::BUFSZ]u8 = [0...]; let bufin = bufio::init(input, buf, []); defer io::close(&bufin)!; + let sc = bufio::newscanner(&bufin, SIZE_MAX); + defer bufio::finish(&sc); - let lexer = lex::init(&bufin, cmd.args[i]); - defer lex::finish(&lexer); + let lexer = lex::init(&sc, cmd.args[i]); let su = match (parse::subunit(&lexer)) { case let err: parse::error => printerr(err); diff --git a/cmd/haredoc/doc/resolve.ha b/cmd/haredoc/doc/resolve.ha @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-only // (c) Hare authors <https://harelang.org> +use bufio; use fmt; use fs; use hare::ast; @@ -10,6 +11,7 @@ use hare::parse; use io; use os; use path; +use types; type symkind = enum { LOCAL, @@ -185,6 +187,8 @@ export fn scan(path: str) (ast::subunit | error) = { fmt::fatalf("Error reading {}: {}", path, fs::strerror(err)); }; defer io::close(input)!; - const lexer = lex::init(input, path, lex::flag::COMMENTS); + let sc = bufio::newscanner(input, types::SIZE_MAX); + defer bufio::finish(&sc); + let lexer = lex::init(&sc, path, lex::flag::COMMENTS); return parse::subunit(&lexer)?; }; diff --git a/cmd/haredoc/main.ha b/cmd/haredoc/main.ha @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-only // (c) Hare authors <https://harelang.org> +use bufio; use cmd::haredoc::doc; use fmt; use fs; @@ -17,6 +18,7 @@ use os::exec; use path; use strconv; use strings; +use types; use unix::tty; const help: []getopt::help = [ @@ -256,9 +258,10 @@ fn doc(name: str, cmd: *getopt::command) (void | error) = { // to the ident in the string. For example, this function will parse `rt::abort` // as a valid identifier. fn parseident(in: str) ((ast::ident, bool) | void) = { - const buf = memio::fixed(strings::toutf8(in)); - const lexer = lex::init(&buf, "<string>"); - defer lex::finish(&lexer); + let buf = memio::fixed(strings::toutf8(in)); + let sc = bufio::newscanner(&buf, types::SIZE_MAX); + defer bufio::finish(&sc); + let lexer = lex::init(&sc, "<string>"); let success = false; let ident: ast::ident = []; defer if (!success) ast::ident_free(ident); diff --git a/cmd/haretype/main.ha b/cmd/haretype/main.ha @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-only // (c) Hare authors <https://harelang.org> +use bufio; use fmt; use getopt; use hare::ast; @@ -13,6 +14,7 @@ use memio; use os; use path; use strings; +use types::{SIZE_MAX}; fn typeinfo( store: *types::typestore, @@ -22,10 +24,11 @@ fn typeinfo( fmt::println("null")?; yield types::lookup_builtin(store, ast::builtin_type::NULL); } else { - const stream = memio::fixed(strings::toutf8(s)); + let stream = memio::fixed(strings::toutf8(s)); defer io::close(&stream)!; - const lexer = lex::init(&stream, "-"); - defer lex::finish(&lexer); + let sc = bufio::newscanner(&stream, SIZE_MAX); + defer bufio::finish(&sc); + let lexer = lex::init(&sc, "-"); const atype = parse::_type(&lexer)?; defer ast::type_finish(&atype); const typ = types::lookup(store, &atype)?; diff --git a/cmd/ioctlgen/main.ha b/cmd/ioctlgen/main.ha @@ -13,6 +13,7 @@ use memio; use os; use regex; use strings; +use types::{SIZE_MAX}; let ioctlre: regex::regex = regex::regex { ... }; let typedefre: regex::regex = regex::regex { ... }; @@ -85,9 +86,10 @@ export fn main() void = { }; fn loadtype(store: *types::typestore) void = { - const tee = io::tee(os::stdin, os::stdout); - const lex = lex::init(&tee, "<ioctl>"); - defer lex::finish(&lex); + let tee = io::tee(os::stdin, os::stdout); + let sc = bufio::newscanner(&tee, SIZE_MAX); + defer bufio::finish(&sc); + let lex = lex::init(&sc, "<ioctl>"); const decl = match (parse::decl(&lex)) { case let err: parse::error => fmt::fatal("Error parsing type declaration:", @@ -106,8 +108,10 @@ fn loadtype(store: *types::typestore) void = { }; fn parseioctl(store: *types::typestore, d: dir, params: str) ioctl = { - const buf = memio::fixed(strings::toutf8(params)); - const lex = lex::init(&buf, "<ioctl>"); + let buf = memio::fixed(strings::toutf8(params)); + let sc = bufio::newscanner(&buf, SIZE_MAX); + defer bufio::finish(&sc); + let lex = lex::init(&sc, "<ioctl>"); const rn = expect(&lex, ltok::LIT_RCONST).1 as rune; expect(&lex, ltok::COMMA); diff --git a/hare/lex/+test.ha b/hare/lex/+test.ha @@ -1,33 +1,36 @@ // SPDX-License-Identifier: MPL-2.0 // (c) Hare authors <https://harelang.org> +use bufio; use fmt; use io; -use io::{mode}; use memio; use strings; -@test fn unget() void = { - let buf = memio::fixed(strings::toutf8("z")); - let lexer = init(&buf, "<test>"); - unget(&lexer, ('x', location { path = "<test>", line = 1, col = 2 })); - unget(&lexer, ('y', location { path = "<test>", line = 1, col = 3 })); - let r = next(&lexer) as (rune, location); - assert(r.0 == 'y'); - assert(r.1.path == "<test>" && r.1.line == 1 && r.1.col == 3); - r = next(&lexer) as (rune, location); - assert(r.0 == 'x'); - assert(r.1.path == "<test>" && r.1.line == 1 && r.1.col == 2); - r = next(&lexer) as (rune, location); - assert(r.0 == 'z'); - assert(r.1.path == "<test>" && r.1.line == 1 && r.1.col == 1); - assert(next(&lexer) is io::EOF); - unget(&lexer, io::EOF); - assert(next(&lexer) is io::EOF); +fn initbuf(in: []u8, flags: flag...) lexer = { + static let buf: [256]u8 = [0...]; + static let s = memio::stream { + stream = null: io::stream, + ... + }; + static let sc = bufio::scanner { + stream = null: io::stream, + src = 0, + ... + }; + + s = memio::fixed(in); + sc = bufio::newscanner_static(&s, buf); + return init(&sc, "<test>", flags...); +}; + +fn initstr(in: str, flags: flag...) lexer = { + return initbuf(strings::toutf8(in), flags...); }; @test fn unlex() void = { - let lexer = init(io::empty, "<test>"); + let sc = bufio::newscanner_static(io::empty, []); + let lexer = init(&sc, "<test>"); unlex(&lexer, (ltok::IF, void, location { path = "<test>", line = 1234, @@ -56,8 +59,7 @@ fn vassert(expected: value, actual: value) void = { }; fn lextest(in: str, expected: []token) void = { - let buf = memio::fixed(strings::toutf8(in)); - let lexer = init(&buf, "<test>"); + let lexer = initstr(in); for (let i = 0z; i < len(expected); i += 1) { let etok = expected[i]; let tl = match (lex(&lexer)) { @@ -184,8 +186,7 @@ fn loc(line: uint, col: uint) location = location { @test fn keywords() void = { let keywords = bmap[..ltok::LAST_KEYWORD+1]; for (let i = 0z; i < len(keywords); i += 1) { - let buf = memio::fixed(strings::toutf8(keywords[i])); - let lexer = init(&buf, "<test>"); + let lexer = initstr(keywords[i]); let tok = lex(&lexer) as token; assert(tok.0 == i: ltok); }; @@ -200,9 +201,8 @@ fn loc(line: uint, col: uint) location = location { ]; lextest(in, expected); - let in = "// foo\n// bar\nhello world// baz\n\n// bad\ntest"; - let buf = memio::fixed(strings::toutf8(in)); - let lexer = init(&buf, "<input>", flag::COMMENTS); + let lexer = initstr("// foo\n// bar\nhello world// baz\n\n// bad\ntest", + flag::COMMENTS); assert(lex(&lexer) is token); assert(comment(&lexer) == " foo\n bar\n"); assert(lex(&lexer) is token); @@ -305,40 +305,24 @@ fn loc(line: uint, col: uint) location = location { @test fn invalid() void = { // Using \x80 within a string literal will cause this to output an // empty string - const in = ['1': u8, 0x80]; - - let buf = memio::fixed(in); - let lexer = init(&buf, "<test>"); - + let lexer = initbuf(['1', 0x80]); const s = lex(&lexer) as error as syntax; assert(s.1 == "Source file is not valid UTF-8"); // Regression: invalid UTF-8 at the beginning of a token used to cause // a crash in nextw - const in = [0x80: u8]; - - let buf = memio::fixed(in); - let lexer = init(&buf, "<test>"); - + let lexer = initbuf([0x80]); const s = lex(&lexer) as error as syntax; assert(s.1 == "Source file is not valid UTF-8"); // Regression: invalid escape sequences such as "\^" used to casue a // crash - const in = ['"': u8, '\\': u8, '^': u8, '"': u8]; - - let buf = memio::fixed(in); - let lexer = init(&buf, "<test>"); - + let lexer = initstr(`"\^"`); const s = lex(&lexer) as error as syntax; assert(s.1 == "unknown escape sequence"); // Regression: <X>e followed by another token used to cause a crash - const in = ['0': u8, 'e': u8, ')': u8]; - - let buf = memio::fixed(in); - let lexer = init(&buf, "<test>"); - + let lexer = initstr("0e)"); const s = lex(&lexer) as error as syntax; assert(s.1 == "expected exponent"); }; @@ -357,9 +341,7 @@ type op = enum { }; @test fn loc() void = { - const src = "h ello: my name is Inigo Montoya"; - let buf = memio::fixed(strings::toutf8(src)); - let lexer = init(&buf, "<test>"); + let lexer = initstr("h ello: my name is\nInigo Montoya."); const ops: [_]op = [ op::NEXT, op::NEXT, @@ -396,7 +378,8 @@ type op = enum { (loc(1, 17), loc(1, 16)), (loc(1, 29), loc(1, 28)), (loc(1, 32), loc(1, 31)), - (loc(1, 38), loc(1, 37)), + (loc(2, 6), loc(2, 5)), + (loc(2, 14), loc(2, 13)), ]; // We could statically allocate r and t, but what's the point @@ -411,7 +394,7 @@ type op = enum { case op::NEXT => append(r, next(&lexer) as (rune, location)); case op::UNGET => - unget(&lexer, r[len(r) - 1]); + unget(&lexer, r[len(r) - 1].0); delete(r[len(r) - 1]); case op::UNLEX => unlex(&lexer, t[len(t) - 1]); @@ -419,7 +402,6 @@ type op = enum { }; let loc = mkloc(&lexer); let ploc = prevloc(&lexer); - // TODO: Aggregate equality assert(loc.path == vector[i].0.path && loc.line == vector[i].0.line && loc.col == vector[i].0.col); diff --git a/hare/lex/lex.ha b/hare/lex/lex.ha @@ -16,14 +16,12 @@ use strings; use types; export type lexer = struct { - in: bufio::scanner, + in: *bufio::scanner, path: str, loc: (uint, uint), - un: (token | void), - rb: [2]((rune, location) | io::EOF | void), - // 1 more than the size of un and rb respectively - prevunlocs: [2](location, location), - prevrlocs: [3]location, + prevrloc: (uint, uint), + un: token, // ltok::EOF when no token was unlexed + prevunlocs: [2]((uint, uint), (uint, uint)), flags: flag, comment: str, require_int: bool, @@ -55,47 +53,45 @@ export fn strerror(err: error) const str = { }; }; -// Initializes a new lexer for the given input. The path is borrowed. -export fn init(in: io::handle, path: str, flags: flag...) lexer = { +// Initializes a new lexer for the given [[bufio::scanner]]. The path is +// borrowed. +export fn init(in: *bufio::scanner, path: str, flags: flag...) lexer = { let f = flag::NONE; for (let i = 0z; i < len(flags); i += 1) { f |= flags[i]; }; - let scanner = bufio::newscanner(in, os::BUFSZ); const loc = location { path = path, line = 1, col = 1 }; return lexer { - in = scanner, + in = in, path = path, loc = (1, 1), - un = void, - rb = [void...], - prevunlocs = [(loc, loc)...], - prevrlocs = [loc...], + prevrloc = (1, 1), + un = (ltok::EOF, void, loc), + prevunlocs = [((1, 1), (1, 1))...], flags = f, ... }; }; -export fn finish(lex: *lexer) void = { - bufio::finish(&lex.in); -}; - // Returns the current value of the comment buffer, or empty string if unset (or // if [[flag::COMMENTS]] was not enabled for this lexer). export fn comment(lex: *lexer) str = lex.comment; // Returns the next token from the lexer. export fn lex(lex: *lexer) (token | error) = { - match (lex.un) { - case let tok: token => - lex.un = void; - return tok; - case void => void; + if (lex.un.0 != ltok::EOF) { + defer lex.un.0 = ltok::EOF; + return lex.un; }; defer { lex.prevunlocs[1] = lex.prevunlocs[0]; - lex.prevunlocs[0] = (prevloc(lex), mkloc(lex)); + const prev = prevloc(lex); + const loc = mkloc(lex); + lex.prevunlocs[0] = ( + (prev.line, prev.col), + (loc.line, loc.col), + ); }; let r = match (nextw(lex)?) { @@ -106,25 +102,25 @@ export fn lex(lex: *lexer) (token | error) = { }; if (ascii::isdigit(r.0)) { - unget(lex, r); + unget(lex, r.0); return lex_literal(lex); }; lex.require_int = false; if (is_name(r.0, false)) { - unget(lex, r); + unget(lex, r.0); return lex_name(lex, r.1); }; let tok = switch (r.0) { case '"', '\'', '`' => - unget(lex, r); + unget(lex, r.0); return lex_rn_str(lex); case '.', '<', '>', '&', '|', '^' => - unget(lex, r); + unget(lex, r.0); return lex3(lex); case '*', '%', '/', '+', '-', ':', '!', '=' => - unget(lex, r); + unget(lex, r.0); return lex2(lex); case '~' => yield ltok::BNOT; @@ -236,8 +232,8 @@ fn lex_string(lex: *lexer, loc: location, delim: rune) (token | error) = { return syntaxerr(loc, "unexpected EOF scanning string literal"); case let r: (rune, location) => if (r.0 == delim) break - else if (delim == '"') { - unget(lex, r); + else if (delim == '"' && r.0 == '\\') { + unget(lex, r.0); let r = lex_rune(lex, loc)?; memio::appendrune(&buf, r)?; } else { @@ -258,19 +254,19 @@ fn lex_string(lex: *lexer, loc: location, delim: rune) (token | error) = { case '/' => match (nextw(lex)?) { case io::EOF => - unget(lex, r); + unget(lex, r.0); case let s: (rune, location) => if (s.0 == '/') { lex_comment(lex)?; continue; } else { - unget(lex, s); - unget(lex, r); + unget(lex, s.0); + unget(lex, r.0); }; }; break; case => - unget(lex, r); + unget(lex, r.0); break; }; }; @@ -321,7 +317,7 @@ fn lex_name(lex: *lexer, loc: location) (token | error) = { case io::EOF => break; case let r: (rune, location) => if (!is_name(r.0, true)) { - unget(lex, r); + unget(lex, r.0); break; }; memio::appendrune(&buf, r.0)?; @@ -361,7 +357,7 @@ fn line_comment(lex: *lexer) (void | error) = { }; if (try(lex, '/')? is void) { - unget(lex, r); + unget(lex, r.0); return; }; @@ -432,9 +428,9 @@ fn lex_literal(lex: *lexer) (token | error) = { "Leading zeros in number literals aren't permitted (for octal, use the 0o prefix instead)"); }; started = true; - unget(lex, r); + unget(lex, r.0); }; - } else unget(lex, r); + } else unget(lex, r.0); let basechrs = switch (base) { case strconv::base::BIN => yield "01"; @@ -467,7 +463,7 @@ fn lex_literal(lex: *lexer) (token | error) = { if (float || exp is size || suff is size || base != strconv::base::DEC || lex.require_int) { - unget(lex, r); + unget(lex, r.0); break; } else { r = match (next(lex)?) { @@ -477,15 +473,11 @@ fn lex_literal(lex: *lexer) (token | error) = { yield r; }; if (!strings::contains(basechrs, r.0)) { - unget(lex, r); - unget(lex, ('.', location { - path = r.1.path, - line = r.1.line, - col = r.1.col - 1, - })); + unget(lex, r.0); + unget(lex, '.'); break; }; - unget(lex, r); + unget(lex, r.0); float = true; append(chars, utf8::encoderune('.')...); }; @@ -496,7 +488,7 @@ fn lex_literal(lex: *lexer) (token | error) = { }; if (exp is size || suff is size || base != strconv::base::DEC) { - unget(lex, r); + unget(lex, r.0); break; } else { if (end == 0) end = len(chars); @@ -512,7 +504,7 @@ fn lex_literal(lex: *lexer) (token | error) = { case '+', '-' => append(chars, utf8::encoderune(r.0)...); case => - unget(lex, r); + unget(lex, r.0); }; basechrs = "0123456789"; }; @@ -524,7 +516,7 @@ fn lex_literal(lex: *lexer) (token | error) = { if (suff is size || r.0 != 'f' && float || r.0 == 'f' && base != strconv::base::DEC) { - unget(lex, r); + unget(lex, r.0); break; } else { suff = len(chars); @@ -533,7 +525,7 @@ fn lex_literal(lex: *lexer) (token | error) = { basechrs = "0123456789"; }; case => - unget(lex, r); + unget(lex, r.0); break; } else append(chars, utf8::encoderune(r.0)...); started = true; @@ -652,7 +644,7 @@ fn lex2(lexr: *lexer) (token | error) = { lex_comment(lexr)?; return lex(lexr); case => - unget(lexr, r); + unget(lexr, r.0); return (ltok::DIV, void, first.1); }; case io::EOF => @@ -679,7 +671,7 @@ fn lex2(lexr: *lexer) (token | error) = { return (tok.1[i].1, void, first.1); }; }; - unget(lexr, r); + unget(lexr, r.0); line_comment(lexr)?; case io::EOF => void; }; @@ -736,27 +728,16 @@ fn lex3(lex: *lexer) (token | error) = { // unlex is supported at a time; you must call [[lex]] before calling [[unlex]] // again. export fn unlex(lex: *lexer, tok: token) void = { - assert(lex.un is void, "attempted to unlex more than one token"); + assert(lex.un.0 == ltok::EOF, "attempted to unlex more than one token"); lex.un = tok; }; fn next(lex: *lexer) ((rune, location) | syntax | io::EOF | io::error) = { - match (lex.rb[0]) { - case void => void; - case let r: ((rune, location) | io::EOF) => - lex.rb[0] = lex.rb[1]; - lex.rb[1] = void; - return r; - }; - - match (bufio::scan_rune(&lex.in)) { + match (bufio::scan_rune(lex.in)) { case let e: (io::EOF | io::error) => return e; case let r: rune => const loc = mkloc(lex); - let tmp = lex.prevrlocs; - lex.prevrlocs[1..] = tmp[..len(tmp) - 1]; - lex.prevrlocs[0] = loc; lexloc(lex, r); return (r, loc); case utf8::invalid => @@ -801,10 +782,26 @@ fn try( return r; }; }; - unget(lex, r); + unget(lex, r.0); +}; + +fn unget(lex: *lexer, r: rune) void = { + bufio::unreadrune(lex.in, r); + + // here, we set the current location to the previous location, then + // subtract one from the previous location's column. this is always + // correct, even for tabs and newlines, since a tab or newline will + // never be ungot after a previous unget call. besides tabs and + // newlines, the rune will always be a printable ASCII character + assert(ascii::isprint(r) || r == '\t' || r == '\n'); + assert(r != '\n' || lex.prevrloc.0 == lex.loc.0 - 1); + + lex.loc = lex.prevrloc; + lex.prevrloc.1 -= 1; }; fn lexloc(lex: *lexer, r: rune) void = { + lex.prevrloc = lex.loc; switch (r) { case '\n' => lex.loc.0 += 1; @@ -816,40 +813,23 @@ fn lexloc(lex: *lexer, r: rune) void = { }; }; -fn unget(lex: *lexer, r: ((rune, location) | io::EOF)) void = { - if (!(lex.rb[0] is void)) { - assert(lex.rb[1] is void, "ungot too many runes"); - lex.rb[1] = lex.rb[0]; - }; - lex.rb[0] = r; -}; - export fn mkloc(lex: *lexer) location = { - match (lex.un) { - case let t: token => - return lex.prevunlocs[1].1; - case void => - match (lex.rb[0]) { - case let r: (rune, location) => - return r.1; - case void => - return location { - path = lex.path, - line = lex.loc.0, - col = lex.loc.1, - }; - }; + const loc = if (lex.un.0 == ltok::EOF) lex.loc + else lex.prevunlocs[1].1; + return location { + path = lex.path, + line = loc.0, + col = loc.1, }; }; export fn prevloc(lex: *lexer) location = { - match (lex.un) { - case let t: token => - return lex.prevunlocs[1].0; - case void => - let i = 0z; - for (i < len(lex.rb); i += 1) if (lex.rb[i] is void) break; - return lex.prevrlocs[i]; + const loc = if (lex.un.0 == ltok::EOF) lex.prevrloc + else lex.prevunlocs[1].0; + return location { + path = lex.path, + line = loc.0, + col = loc.1, }; }; diff --git a/hare/module/deps.ha b/hare/module/deps.ha @@ -14,6 +14,7 @@ use os; use path; use sort; use strings; +use types; // A hare module. export type module = struct { @@ -37,8 +38,9 @@ export fn parse_deps(files: str...) ([]ast::ident | error) = { }; defer io::close(handle)!; - let lexer = lex::init(handle, files[i]); - defer lex::finish(&lexer); + let sc = bufio::newscanner(handle, types::SIZE_MAX); + defer bufio::finish(&sc); + let lexer = lex::init(&sc, files[i]); let imports = parse::imports(&lexer)?; defer ast::imports_finish(imports); diff --git a/hare/parse/+test/ident_test.ha b/hare/parse/+test/ident_test.ha @@ -1,6 +1,7 @@ // SPDX-License-Identifier: MPL-2.0 // (c) Hare authors <https://harelang.org> +use bufio; use hare::ast; use hare::lex; use hare::lex::{ltok}; @@ -8,11 +9,13 @@ use io; use io::{mode}; use memio; use strings; +use types; fn ident_test(in: str, expected: ast::ident, extra: ltok...) void = { let buf = memio::fixed(strings::toutf8(in)); - let lexer = lex::init(&buf, "<test>"); - defer lex::finish(&lexer); + let sc = bufio::newscanner(&buf, types::SIZE_MAX); + defer bufio::finish(&sc); + let lexer = lex::init(&sc, "<test>"); match (ident(&lexer)) { case let id: ast::ident => diff --git a/hare/parse/+test/loc.ha b/hare/parse/+test/loc.ha @@ -1,6 +1,7 @@ // SPDX-License-Identifier: MPL-2.0 // (c) Hare authors <https://harelang.org> +use bufio; use encoding::utf8; use fmt; use hare::ast; @@ -9,11 +10,13 @@ use io; use io::{mode}; use memio; use strings; +use types; fn expr_testloc(srcs: str...) void = for (let i = 0z; i < len(srcs); i += 1) { let buf = memio::fixed(strings::toutf8(srcs[i])); - let lexer = lex::init(&buf, "<test>"); - defer lex::finish(&lexer); + let sc = bufio::newscanner(&buf, types::SIZE_MAX); + defer bufio::finish(&sc); + let lexer = lex::init(&sc, "<test>"); let exp = match (expr(&lexer)) { case let exp: ast::expr => yield exp; @@ -77,8 +80,9 @@ fn expr_testloc(srcs: str...) void = for (let i = 0z; i < len(srcs); i += 1) { // We want to check the location of nested expressions, so this can't // use expr_testloc let buf = memio::fixed(strings::toutf8("foo: bar: baz")); - let lexer = lex::init(&buf, "<test>"); - defer lex::finish(&lexer); + let sc = bufio::newscanner(&buf, types::SIZE_MAX); + defer bufio::finish(&sc); + let lexer = lex::init(&sc, "<test>"); let exp = match (expr(&lexer)) { case let exp: ast::expr => yield exp; @@ -101,8 +105,9 @@ fn expr_testloc(srcs: str...) void = for (let i = 0z; i < len(srcs); i += 1) { fn type_testloc(srcs: str...) void = for (let i = 0z; i < len(srcs); i += 1) { let buf = memio::fixed(strings::toutf8(srcs[i])); - let lexer = lex::init(&buf, "<test>"); - defer lex::finish(&lexer); + let sc = bufio::newscanner(&buf, types::SIZE_MAX); + defer bufio::finish(&sc); + let lexer = lex::init(&sc, "<test>"); let typ = match (_type(&lexer)) { case let typ: ast::_type => yield typ; diff --git a/hare/parse/+test/roundtrip.ha b/hare/parse/+test/roundtrip.ha @@ -1,6 +1,7 @@ // SPDX-License-Identifier: MPL-2.0 // (c) Hare authors <https://harelang.org> +use bufio; use fmt; use hare::ast; use hare::lex; @@ -9,6 +10,7 @@ use io; use io::{mode}; use memio; use strings; +use types; fn roundtrip(src: str) void = { let unsrc = _roundtrip(src); @@ -28,8 +30,9 @@ fn roundtrip_reparse(src: str) void = { fn _roundtrip(src: str) str = { let buf = memio::fixed(strings::toutf8(src)); - let lexer = lex::init(&buf, "<test>", lex::flag::COMMENTS); - defer lex::finish(&lexer); + let sc = bufio::newscanner(&buf, types::SIZE_MAX); + defer bufio::finish(&sc); + let lexer = lex::init(&sc, "<test>", lex::flag::COMMENTS); let u = ast::subunit { imports = [], decls: []ast::decl = match (decls(&lexer)) { diff --git a/hare/parse/+test/unit_test.ha b/hare/parse/+test/unit_test.ha @@ -1,11 +1,13 @@ // SPDX-License-Identifier: MPL-2.0 // (c) Hare authors <https://harelang.org> +use bufio; use hare::ast; use hare::lex; use io::{mode}; use memio; use strings; +use types; fn import_eq(i1: ast::import, i2: ast::import) bool = { if (i1.mode != i2.mode) { @@ -73,8 +75,9 @@ fn tup_to_import(tup: import_tuple) ast::import = ast::import { "export fn main() void = void;"; let buf = memio::fixed(strings::toutf8(in)); - let lexer = lex::init(&buf, "<test>"); - defer lex::finish(&lexer); + let sc = bufio::newscanner(&buf, types::SIZE_MAX); + defer bufio::finish(&sc); + let lexer = lex::init(&sc, "<test>"); let mods = imports(&lexer)!; defer ast::imports_finish(mods); @@ -114,8 +117,9 @@ fn tup_to_import(tup: import_tuple) ast::import = ast::import { const in = "use a::{b = c = d};\n"; let buf = memio::fixed(strings::toutf8(in)); - let lexer = lex::init(&buf, "<test>"); - defer lex::finish(&lexer); + let sc = bufio::newscanner(&buf, types::SIZE_MAX); + defer bufio::finish(&sc); + let lexer = lex::init(&sc, "<test>"); assert(imports(&lexer) is error); }; diff --git a/hare/parse/ident.ha b/hare/parse/ident.ha @@ -1,11 +1,13 @@ // SPDX-License-Identifier: MPL-2.0 // (c) Hare authors <https://harelang.org> +use bufio; use hare::ast; use hare::lex; use hare::lex::{ltok}; use memio; use strings; +use types; fn ident_trailing(lexer: *lex::lexer) ((ast::ident, bool) | error) = { let ident: []str = []; @@ -47,9 +49,10 @@ export fn ident(lexer: *lex::lexer) (ast::ident | error) = { // A convenience function which parses an identifier from a string, so the // caller needn't provide a lexer instance. export fn identstr(in: str) (ast::ident | error) = { - const buf = memio::fixed(strings::toutf8(in)); - const lexer = lex::init(&buf, "<string>"); - defer lex::finish(&lexer); + let in = memio::fixed(strings::toutf8(in)); + let sc = bufio::newscanner(&in, types::SIZE_MAX); + defer bufio::finish(&sc); + let lexer = lex::init(&sc, "<string>"); let ret = ident(&lexer); want(&lexer, ltok::EOF)?; return ret; diff --git a/hare/types/+test.ha b/hare/types/+test.ha @@ -1,6 +1,7 @@ // SPDX-License-Identifier: MPL-2.0 // (c) Hare authors <https://harelang.org> +use bufio; use fmt; use hare::ast; use hare::lex; @@ -8,11 +9,13 @@ use hare::parse; use io; use memio; use strings; +use types; fn parse_type(in: str) ast::_type = { let buf = memio::fixed(strings::toutf8(in)); - let lex = lex::init(&buf, "<test>"); - defer lex::finish(&lex); + let sc = bufio::newscanner(&buf, types::SIZE_MAX); + defer bufio::finish(&sc); + let lex = lex::init(&sc, "<test>"); return parse::_type(&lex)!; }; diff --git a/hare/unit/+test.ha b/hare/unit/+test.ha @@ -1,6 +1,7 @@ // SPDX-License-Identifier: MPL-2.0 // (c) Hare authors <https://harelang.org> +use bufio; use hare::ast; use hare::lex; use hare::parse; @@ -8,11 +9,13 @@ use hare::types; use io; use memio; use strings; +use types::{SIZE_MAX}; fn parse_expr(src: str) *ast::expr = { - const stream = memio::fixed(strings::toutf8(src)); - const lexer = lex::init(&stream, "<test>"); - defer lex::finish(&lexer); + let stream = memio::fixed(strings::toutf8(src)); + let sc = bufio::newscanner(&stream, SIZE_MAX); + defer bufio::finish(&sc); + let lexer = lex::init(&sc, "<test>"); return alloc(parse::expr(&lexer)!); }; diff --git a/makefiles/freebsd.aarch64.mk b/makefiles/freebsd.aarch64.mk @@ -172,7 +172,7 @@ $(HARECACHE)/hare_ast.ssa: $(hare_ast_ha) $(HARECACHE)/hare_lex.td $(HARECACHE)/ @$(TDENV) $(HAREC) $(HARECFLAGS) -o $(HARECACHE)/hare_ast.ssa -t $(HARECACHE)/hare_ast.td.tmp -N hare::ast $(hare_ast_ha) hare_parse_ha = hare/parse/decl.ha hare/parse/expr.ha hare/parse/ident.ha hare/parse/import.ha hare/parse/parse.ha hare/parse/type.ha hare/parse/unit.ha -$(HARECACHE)/hare_parse.ssa: $(hare_parse_ha) $(HARECACHE)/ascii.td $(HARECACHE)/fmt.td $(HARECACHE)/hare_ast.td $(HARECACHE)/hare_lex.td $(HARECACHE)/io.td $(HARECACHE)/math.td $(HARECACHE)/memio.td $(HARECACHE)/strings.td $(HARECACHE)/types.td +$(HARECACHE)/hare_parse.ssa: $(hare_parse_ha) $(HARECACHE)/ascii.td $(HARECACHE)/bufio.td $(HARECACHE)/fmt.td $(HARECACHE)/hare_ast.td $(HARECACHE)/hare_lex.td $(HARECACHE)/io.td $(HARECACHE)/math.td $(HARECACHE)/memio.td $(HARECACHE)/strings.td $(HARECACHE)/types.td @mkdir -p -- "$(HARECACHE)" @printf 'HAREC\t%s\n' "$@" @$(TDENV) $(HAREC) $(HARECFLAGS) -o $(HARECACHE)/hare_parse.ssa -t $(HARECACHE)/hare_parse.td.tmp -N hare::parse $(hare_parse_ha) @@ -196,7 +196,7 @@ $(HARECACHE)/time_date.ssa: $(time_date_ha) $(HARECACHE)/ascii.td $(HARECACHE)/f @$(TDENV) $(HAREC) $(HARECFLAGS) -o $(HARECACHE)/time_date.ssa -t $(HARECACHE)/time_date.td.tmp -N time::date $(time_date_ha) hare_module_ha = hare/module/cache.ha hare/module/deps.ha hare/module/format.ha hare/module/srcs.ha hare/module/types.ha hare/module/util.ha -$(HARECACHE)/hare_module.ssa: $(hare_module_ha) $(HARECACHE)/ascii.td $(HARECACHE)/bufio.td $(HARECACHE)/bytes.td $(HARECACHE)/encoding_utf8.td $(HARECACHE)/fmt.td $(HARECACHE)/fs.td $(HARECACHE)/hare_ast.td $(HARECACHE)/hare_lex.td $(HARECACHE)/hare_parse.td $(HARECACHE)/hare_unparse.td $(HARECACHE)/io.td $(HARECACHE)/memio.td $(HARECACHE)/os.td $(HARECACHE)/path.td $(HARECACHE)/sort.td $(HARECACHE)/sort_cmp.td $(HARECACHE)/strings.td $(HARECACHE)/time.td $(HARECACHE)/time_chrono.td $(HARECACHE)/time_date.td +$(HARECACHE)/hare_module.ssa: $(hare_module_ha) $(HARECACHE)/ascii.td $(HARECACHE)/bufio.td $(HARECACHE)/bytes.td $(HARECACHE)/encoding_utf8.td $(HARECACHE)/fmt.td $(HARECACHE)/fs.td $(HARECACHE)/hare_ast.td $(HARECACHE)/hare_lex.td $(HARECACHE)/hare_parse.td $(HARECACHE)/hare_unparse.td $(HARECACHE)/io.td $(HARECACHE)/memio.td $(HARECACHE)/os.td $(HARECACHE)/path.td $(HARECACHE)/sort.td $(HARECACHE)/sort_cmp.td $(HARECACHE)/strings.td $(HARECACHE)/time.td $(HARECACHE)/time_chrono.td $(HARECACHE)/time_date.td $(HARECACHE)/types.td @mkdir -p -- "$(HARECACHE)" @printf 'HAREC\t%s\n' "$@" @$(TDENV) $(HAREC) $(HARECFLAGS) -o $(HARECACHE)/hare_module.ssa -t $(HARECACHE)/hare_module.td.tmp -N hare::module $(hare_module_ha) diff --git a/makefiles/freebsd.riscv64.mk b/makefiles/freebsd.riscv64.mk @@ -172,7 +172,7 @@ $(HARECACHE)/hare_ast.ssa: $(hare_ast_ha) $(HARECACHE)/hare_lex.td $(HARECACHE)/ @$(TDENV) $(HAREC) $(HARECFLAGS) -o $(HARECACHE)/hare_ast.ssa -t $(HARECACHE)/hare_ast.td.tmp -N hare::ast $(hare_ast_ha) hare_parse_ha = hare/parse/decl.ha hare/parse/expr.ha hare/parse/ident.ha hare/parse/import.ha hare/parse/parse.ha hare/parse/type.ha hare/parse/unit.ha -$(HARECACHE)/hare_parse.ssa: $(hare_parse_ha) $(HARECACHE)/ascii.td $(HARECACHE)/fmt.td $(HARECACHE)/hare_ast.td $(HARECACHE)/hare_lex.td $(HARECACHE)/io.td $(HARECACHE)/math.td $(HARECACHE)/memio.td $(HARECACHE)/strings.td $(HARECACHE)/types.td +$(HARECACHE)/hare_parse.ssa: $(hare_parse_ha) $(HARECACHE)/ascii.td $(HARECACHE)/bufio.td $(HARECACHE)/fmt.td $(HARECACHE)/hare_ast.td $(HARECACHE)/hare_lex.td $(HARECACHE)/io.td $(HARECACHE)/math.td $(HARECACHE)/memio.td $(HARECACHE)/strings.td $(HARECACHE)/types.td @mkdir -p -- "$(HARECACHE)" @printf 'HAREC\t%s\n' "$@" @$(TDENV) $(HAREC) $(HARECFLAGS) -o $(HARECACHE)/hare_parse.ssa -t $(HARECACHE)/hare_parse.td.tmp -N hare::parse $(hare_parse_ha) @@ -196,7 +196,7 @@ $(HARECACHE)/time_date.ssa: $(time_date_ha) $(HARECACHE)/ascii.td $(HARECACHE)/f @$(TDENV) $(HAREC) $(HARECFLAGS) -o $(HARECACHE)/time_date.ssa -t $(HARECACHE)/time_date.td.tmp -N time::date $(time_date_ha) hare_module_ha = hare/module/cache.ha hare/module/deps.ha hare/module/format.ha hare/module/srcs.ha hare/module/types.ha hare/module/util.ha -$(HARECACHE)/hare_module.ssa: $(hare_module_ha) $(HARECACHE)/ascii.td $(HARECACHE)/bufio.td $(HARECACHE)/bytes.td $(HARECACHE)/encoding_utf8.td $(HARECACHE)/fmt.td $(HARECACHE)/fs.td $(HARECACHE)/hare_ast.td $(HARECACHE)/hare_lex.td $(HARECACHE)/hare_parse.td $(HARECACHE)/hare_unparse.td $(HARECACHE)/io.td $(HARECACHE)/memio.td $(HARECACHE)/os.td $(HARECACHE)/path.td $(HARECACHE)/sort.td $(HARECACHE)/sort_cmp.td $(HARECACHE)/strings.td $(HARECACHE)/time.td $(HARECACHE)/time_chrono.td $(HARECACHE)/time_date.td +$(HARECACHE)/hare_module.ssa: $(hare_module_ha) $(HARECACHE)/ascii.td $(HARECACHE)/bufio.td $(HARECACHE)/bytes.td $(HARECACHE)/encoding_utf8.td $(HARECACHE)/fmt.td $(HARECACHE)/fs.td $(HARECACHE)/hare_ast.td $(HARECACHE)/hare_lex.td $(HARECACHE)/hare_parse.td $(HARECACHE)/hare_unparse.td $(HARECACHE)/io.td $(HARECACHE)/memio.td $(HARECACHE)/os.td $(HARECACHE)/path.td $(HARECACHE)/sort.td $(HARECACHE)/sort_cmp.td $(HARECACHE)/strings.td $(HARECACHE)/time.td $(HARECACHE)/time_chrono.td $(HARECACHE)/time_date.td $(HARECACHE)/types.td @mkdir -p -- "$(HARECACHE)" @printf 'HAREC\t%s\n' "$@" @$(TDENV) $(HAREC) $(HARECFLAGS) -o $(HARECACHE)/hare_module.ssa -t $(HARECACHE)/hare_module.td.tmp -N hare::module $(hare_module_ha) diff --git a/makefiles/freebsd.x86_64.mk b/makefiles/freebsd.x86_64.mk @@ -172,7 +172,7 @@ $(HARECACHE)/hare_ast.ssa: $(hare_ast_ha) $(HARECACHE)/hare_lex.td $(HARECACHE)/ @$(TDENV) $(HAREC) $(HARECFLAGS) -o $(HARECACHE)/hare_ast.ssa -t $(HARECACHE)/hare_ast.td.tmp -N hare::ast $(hare_ast_ha) hare_parse_ha = hare/parse/decl.ha hare/parse/expr.ha hare/parse/ident.ha hare/parse/import.ha hare/parse/parse.ha hare/parse/type.ha hare/parse/unit.ha -$(HARECACHE)/hare_parse.ssa: $(hare_parse_ha) $(HARECACHE)/ascii.td $(HARECACHE)/fmt.td $(HARECACHE)/hare_ast.td $(HARECACHE)/hare_lex.td $(HARECACHE)/io.td $(HARECACHE)/math.td $(HARECACHE)/memio.td $(HARECACHE)/strings.td $(HARECACHE)/types.td +$(HARECACHE)/hare_parse.ssa: $(hare_parse_ha) $(HARECACHE)/ascii.td $(HARECACHE)/bufio.td $(HARECACHE)/fmt.td $(HARECACHE)/hare_ast.td $(HARECACHE)/hare_lex.td $(HARECACHE)/io.td $(HARECACHE)/math.td $(HARECACHE)/memio.td $(HARECACHE)/strings.td $(HARECACHE)/types.td @mkdir -p -- "$(HARECACHE)" @printf 'HAREC\t%s\n' "$@" @$(TDENV) $(HAREC) $(HARECFLAGS) -o $(HARECACHE)/hare_parse.ssa -t $(HARECACHE)/hare_parse.td.tmp -N hare::parse $(hare_parse_ha) @@ -196,7 +196,7 @@ $(HARECACHE)/time_date.ssa: $(time_date_ha) $(HARECACHE)/ascii.td $(HARECACHE)/f @$(TDENV) $(HAREC) $(HARECFLAGS) -o $(HARECACHE)/time_date.ssa -t $(HARECACHE)/time_date.td.tmp -N time::date $(time_date_ha) hare_module_ha = hare/module/cache.ha hare/module/deps.ha hare/module/format.ha hare/module/srcs.ha hare/module/types.ha hare/module/util.ha -$(HARECACHE)/hare_module.ssa: $(hare_module_ha) $(HARECACHE)/ascii.td $(HARECACHE)/bufio.td $(HARECACHE)/bytes.td $(HARECACHE)/encoding_utf8.td $(HARECACHE)/fmt.td $(HARECACHE)/fs.td $(HARECACHE)/hare_ast.td $(HARECACHE)/hare_lex.td $(HARECACHE)/hare_parse.td $(HARECACHE)/hare_unparse.td $(HARECACHE)/io.td $(HARECACHE)/memio.td $(HARECACHE)/os.td $(HARECACHE)/path.td $(HARECACHE)/sort.td $(HARECACHE)/sort_cmp.td $(HARECACHE)/strings.td $(HARECACHE)/time.td $(HARECACHE)/time_chrono.td $(HARECACHE)/time_date.td +$(HARECACHE)/hare_module.ssa: $(hare_module_ha) $(HARECACHE)/ascii.td $(HARECACHE)/bufio.td $(HARECACHE)/bytes.td $(HARECACHE)/encoding_utf8.td $(HARECACHE)/fmt.td $(HARECACHE)/fs.td $(HARECACHE)/hare_ast.td $(HARECACHE)/hare_lex.td $(HARECACHE)/hare_parse.td $(HARECACHE)/hare_unparse.td $(HARECACHE)/io.td $(HARECACHE)/memio.td $(HARECACHE)/os.td $(HARECACHE)/path.td $(HARECACHE)/sort.td $(HARECACHE)/sort_cmp.td $(HARECACHE)/strings.td $(HARECACHE)/time.td $(HARECACHE)/time_chrono.td $(HARECACHE)/time_date.td $(HARECACHE)/types.td @mkdir -p -- "$(HARECACHE)" @printf 'HAREC\t%s\n' "$@" @$(TDENV) $(HAREC) $(HARECFLAGS) -o $(HARECACHE)/hare_module.ssa -t $(HARECACHE)/hare_module.td.tmp -N hare::module $(hare_module_ha) diff --git a/makefiles/linux.aarch64.mk b/makefiles/linux.aarch64.mk @@ -190,7 +190,7 @@ $(HARECACHE)/hare_ast.ssa: $(hare_ast_ha) $(HARECACHE)/hare_lex.td $(HARECACHE)/ @$(TDENV) $(HAREC) $(HARECFLAGS) -o $(HARECACHE)/hare_ast.ssa -t $(HARECACHE)/hare_ast.td.tmp -N hare::ast $(hare_ast_ha) hare_parse_ha = hare/parse/decl.ha hare/parse/expr.ha hare/parse/ident.ha hare/parse/import.ha hare/parse/parse.ha hare/parse/type.ha hare/parse/unit.ha -$(HARECACHE)/hare_parse.ssa: $(hare_parse_ha) $(HARECACHE)/ascii.td $(HARECACHE)/fmt.td $(HARECACHE)/hare_ast.td $(HARECACHE)/hare_lex.td $(HARECACHE)/io.td $(HARECACHE)/math.td $(HARECACHE)/memio.td $(HARECACHE)/strings.td $(HARECACHE)/types.td +$(HARECACHE)/hare_parse.ssa: $(hare_parse_ha) $(HARECACHE)/ascii.td $(HARECACHE)/bufio.td $(HARECACHE)/fmt.td $(HARECACHE)/hare_ast.td $(HARECACHE)/hare_lex.td $(HARECACHE)/io.td $(HARECACHE)/math.td $(HARECACHE)/memio.td $(HARECACHE)/strings.td $(HARECACHE)/types.td @mkdir -p -- "$(HARECACHE)" @printf 'HAREC\t%s\n' "$@" @$(TDENV) $(HAREC) $(HARECFLAGS) -o $(HARECACHE)/hare_parse.ssa -t $(HARECACHE)/hare_parse.td.tmp -N hare::parse $(hare_parse_ha) @@ -214,7 +214,7 @@ $(HARECACHE)/time_date.ssa: $(time_date_ha) $(HARECACHE)/ascii.td $(HARECACHE)/f @$(TDENV) $(HAREC) $(HARECFLAGS) -o $(HARECACHE)/time_date.ssa -t $(HARECACHE)/time_date.td.tmp -N time::date $(time_date_ha) hare_module_ha = hare/module/cache.ha hare/module/deps.ha hare/module/format.ha hare/module/srcs.ha hare/module/types.ha hare/module/util.ha -$(HARECACHE)/hare_module.ssa: $(hare_module_ha) $(HARECACHE)/ascii.td $(HARECACHE)/bufio.td $(HARECACHE)/bytes.td $(HARECACHE)/encoding_utf8.td $(HARECACHE)/fmt.td $(HARECACHE)/fs.td $(HARECACHE)/hare_ast.td $(HARECACHE)/hare_lex.td $(HARECACHE)/hare_parse.td $(HARECACHE)/hare_unparse.td $(HARECACHE)/io.td $(HARECACHE)/memio.td $(HARECACHE)/os.td $(HARECACHE)/path.td $(HARECACHE)/sort.td $(HARECACHE)/sort_cmp.td $(HARECACHE)/strings.td $(HARECACHE)/time.td $(HARECACHE)/time_chrono.td $(HARECACHE)/time_date.td +$(HARECACHE)/hare_module.ssa: $(hare_module_ha) $(HARECACHE)/ascii.td $(HARECACHE)/bufio.td $(HARECACHE)/bytes.td $(HARECACHE)/encoding_utf8.td $(HARECACHE)/fmt.td $(HARECACHE)/fs.td $(HARECACHE)/hare_ast.td $(HARECACHE)/hare_lex.td $(HARECACHE)/hare_parse.td $(HARECACHE)/hare_unparse.td $(HARECACHE)/io.td $(HARECACHE)/memio.td $(HARECACHE)/os.td $(HARECACHE)/path.td $(HARECACHE)/sort.td $(HARECACHE)/sort_cmp.td $(HARECACHE)/strings.td $(HARECACHE)/time.td $(HARECACHE)/time_chrono.td $(HARECACHE)/time_date.td $(HARECACHE)/types.td @mkdir -p -- "$(HARECACHE)" @printf 'HAREC\t%s\n' "$@" @$(TDENV) $(HAREC) $(HARECFLAGS) -o $(HARECACHE)/hare_module.ssa -t $(HARECACHE)/hare_module.td.tmp -N hare::module $(hare_module_ha) diff --git a/makefiles/linux.riscv64.mk b/makefiles/linux.riscv64.mk @@ -190,7 +190,7 @@ $(HARECACHE)/hare_ast.ssa: $(hare_ast_ha) $(HARECACHE)/hare_lex.td $(HARECACHE)/ @$(TDENV) $(HAREC) $(HARECFLAGS) -o $(HARECACHE)/hare_ast.ssa -t $(HARECACHE)/hare_ast.td.tmp -N hare::ast $(hare_ast_ha) hare_parse_ha = hare/parse/decl.ha hare/parse/expr.ha hare/parse/ident.ha hare/parse/import.ha hare/parse/parse.ha hare/parse/type.ha hare/parse/unit.ha -$(HARECACHE)/hare_parse.ssa: $(hare_parse_ha) $(HARECACHE)/ascii.td $(HARECACHE)/fmt.td $(HARECACHE)/hare_ast.td $(HARECACHE)/hare_lex.td $(HARECACHE)/io.td $(HARECACHE)/math.td $(HARECACHE)/memio.td $(HARECACHE)/strings.td $(HARECACHE)/types.td +$(HARECACHE)/hare_parse.ssa: $(hare_parse_ha) $(HARECACHE)/ascii.td $(HARECACHE)/bufio.td $(HARECACHE)/fmt.td $(HARECACHE)/hare_ast.td $(HARECACHE)/hare_lex.td $(HARECACHE)/io.td $(HARECACHE)/math.td $(HARECACHE)/memio.td $(HARECACHE)/strings.td $(HARECACHE)/types.td @mkdir -p -- "$(HARECACHE)" @printf 'HAREC\t%s\n' "$@" @$(TDENV) $(HAREC) $(HARECFLAGS) -o $(HARECACHE)/hare_parse.ssa -t $(HARECACHE)/hare_parse.td.tmp -N hare::parse $(hare_parse_ha) @@ -214,7 +214,7 @@ $(HARECACHE)/time_date.ssa: $(time_date_ha) $(HARECACHE)/ascii.td $(HARECACHE)/f @$(TDENV) $(HAREC) $(HARECFLAGS) -o $(HARECACHE)/time_date.ssa -t $(HARECACHE)/time_date.td.tmp -N time::date $(time_date_ha) hare_module_ha = hare/module/cache.ha hare/module/deps.ha hare/module/format.ha hare/module/srcs.ha hare/module/types.ha hare/module/util.ha -$(HARECACHE)/hare_module.ssa: $(hare_module_ha) $(HARECACHE)/ascii.td $(HARECACHE)/bufio.td $(HARECACHE)/bytes.td $(HARECACHE)/encoding_utf8.td $(HARECACHE)/fmt.td $(HARECACHE)/fs.td $(HARECACHE)/hare_ast.td $(HARECACHE)/hare_lex.td $(HARECACHE)/hare_parse.td $(HARECACHE)/hare_unparse.td $(HARECACHE)/io.td $(HARECACHE)/memio.td $(HARECACHE)/os.td $(HARECACHE)/path.td $(HARECACHE)/sort.td $(HARECACHE)/sort_cmp.td $(HARECACHE)/strings.td $(HARECACHE)/time.td $(HARECACHE)/time_chrono.td $(HARECACHE)/time_date.td +$(HARECACHE)/hare_module.ssa: $(hare_module_ha) $(HARECACHE)/ascii.td $(HARECACHE)/bufio.td $(HARECACHE)/bytes.td $(HARECACHE)/encoding_utf8.td $(HARECACHE)/fmt.td $(HARECACHE)/fs.td $(HARECACHE)/hare_ast.td $(HARECACHE)/hare_lex.td $(HARECACHE)/hare_parse.td $(HARECACHE)/hare_unparse.td $(HARECACHE)/io.td $(HARECACHE)/memio.td $(HARECACHE)/os.td $(HARECACHE)/path.td $(HARECACHE)/sort.td $(HARECACHE)/sort_cmp.td $(HARECACHE)/strings.td $(HARECACHE)/time.td $(HARECACHE)/time_chrono.td $(HARECACHE)/time_date.td $(HARECACHE)/types.td @mkdir -p -- "$(HARECACHE)" @printf 'HAREC\t%s\n' "$@" @$(TDENV) $(HAREC) $(HARECFLAGS) -o $(HARECACHE)/hare_module.ssa -t $(HARECACHE)/hare_module.td.tmp -N hare::module $(hare_module_ha) diff --git a/makefiles/linux.x86_64.mk b/makefiles/linux.x86_64.mk @@ -190,7 +190,7 @@ $(HARECACHE)/hare_ast.ssa: $(hare_ast_ha) $(HARECACHE)/hare_lex.td $(HARECACHE)/ @$(TDENV) $(HAREC) $(HARECFLAGS) -o $(HARECACHE)/hare_ast.ssa -t $(HARECACHE)/hare_ast.td.tmp -N hare::ast $(hare_ast_ha) hare_parse_ha = hare/parse/decl.ha hare/parse/expr.ha hare/parse/ident.ha hare/parse/import.ha hare/parse/parse.ha hare/parse/type.ha hare/parse/unit.ha -$(HARECACHE)/hare_parse.ssa: $(hare_parse_ha) $(HARECACHE)/ascii.td $(HARECACHE)/fmt.td $(HARECACHE)/hare_ast.td $(HARECACHE)/hare_lex.td $(HARECACHE)/io.td $(HARECACHE)/math.td $(HARECACHE)/memio.td $(HARECACHE)/strings.td $(HARECACHE)/types.td +$(HARECACHE)/hare_parse.ssa: $(hare_parse_ha) $(HARECACHE)/ascii.td $(HARECACHE)/bufio.td $(HARECACHE)/fmt.td $(HARECACHE)/hare_ast.td $(HARECACHE)/hare_lex.td $(HARECACHE)/io.td $(HARECACHE)/math.td $(HARECACHE)/memio.td $(HARECACHE)/strings.td $(HARECACHE)/types.td @mkdir -p -- "$(HARECACHE)" @printf 'HAREC\t%s\n' "$@" @$(TDENV) $(HAREC) $(HARECFLAGS) -o $(HARECACHE)/hare_parse.ssa -t $(HARECACHE)/hare_parse.td.tmp -N hare::parse $(hare_parse_ha) @@ -214,7 +214,7 @@ $(HARECACHE)/time_date.ssa: $(time_date_ha) $(HARECACHE)/ascii.td $(HARECACHE)/f @$(TDENV) $(HAREC) $(HARECFLAGS) -o $(HARECACHE)/time_date.ssa -t $(HARECACHE)/time_date.td.tmp -N time::date $(time_date_ha) hare_module_ha = hare/module/cache.ha hare/module/deps.ha hare/module/format.ha hare/module/srcs.ha hare/module/types.ha hare/module/util.ha -$(HARECACHE)/hare_module.ssa: $(hare_module_ha) $(HARECACHE)/ascii.td $(HARECACHE)/bufio.td $(HARECACHE)/bytes.td $(HARECACHE)/encoding_utf8.td $(HARECACHE)/fmt.td $(HARECACHE)/fs.td $(HARECACHE)/hare_ast.td $(HARECACHE)/hare_lex.td $(HARECACHE)/hare_parse.td $(HARECACHE)/hare_unparse.td $(HARECACHE)/io.td $(HARECACHE)/memio.td $(HARECACHE)/os.td $(HARECACHE)/path.td $(HARECACHE)/sort.td $(HARECACHE)/sort_cmp.td $(HARECACHE)/strings.td $(HARECACHE)/time.td $(HARECACHE)/time_chrono.td $(HARECACHE)/time_date.td +$(HARECACHE)/hare_module.ssa: $(hare_module_ha) $(HARECACHE)/ascii.td $(HARECACHE)/bufio.td $(HARECACHE)/bytes.td $(HARECACHE)/encoding_utf8.td $(HARECACHE)/fmt.td $(HARECACHE)/fs.td $(HARECACHE)/hare_ast.td $(HARECACHE)/hare_lex.td $(HARECACHE)/hare_parse.td $(HARECACHE)/hare_unparse.td $(HARECACHE)/io.td $(HARECACHE)/memio.td $(HARECACHE)/os.td $(HARECACHE)/path.td $(HARECACHE)/sort.td $(HARECACHE)/sort_cmp.td $(HARECACHE)/strings.td $(HARECACHE)/time.td $(HARECACHE)/time_chrono.td $(HARECACHE)/time_date.td $(HARECACHE)/types.td @mkdir -p -- "$(HARECACHE)" @printf 'HAREC\t%s\n' "$@" @$(TDENV) $(HAREC) $(HARECFLAGS) -o $(HARECACHE)/hare_module.ssa -t $(HARECACHE)/hare_module.td.tmp -N hare::module $(hare_module_ha)