commit 6760f9a1bb6dd0e0e4042dd4e1313e5f000a1fb2
parent 2bf3ac9ba06edd5389da88cff317380411c78a2d
Author: Drew DeVault <sir@cmpwn.com>
Date: Fri, 22 Jan 2021 13:09:50 -0500
rt: import allocator from harec
We'll replace this with a better one eventually.
Diffstat:
5 files changed, 273 insertions(+), 0 deletions(-)
diff --git a/rt/+linux/abort.ha b/rt/+linux/abort.ha
@@ -5,3 +5,12 @@ export @noreturn @symbol("rt.abort") fn _abort(msg: str) void = {
write(2, "\n": *const char, 1z);
kill(getpid(), SIGABRT);
};
+
+// See harec:include/gen.h
+const reasons: [3]str = [
+ "slice or array access out of bounds", // 0
+ "type assertion failed", // 1
+ "allocation failed", // 2
+];
+
+export @noreturn fn abort_fixed(i: int) void = _abort(reasons[i]);
diff --git a/rt/+linux/segmalloc.ha b/rt/+linux/segmalloc.ha
@@ -0,0 +1,17 @@
+// Allocates a segment.
+fn segmalloc(n: size) nullable *void = {
+ let p: *void = mmap(null, n,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON, -1, 0z);
+ // TODO: remove the cast to nullable *void
+ return if (p: uintptr: int == -ENOMEM) null: nullable *void else p: nullable *void;
+};
+
+// Frees a segment allocated with segmalloc.
+fn segfree(p: *void, s: size) int = munmap(p, s);
+
+// Marks a segment as writable and drops the execute bit.
+fn segwrite(seg: *void, n: size) void = mprotect(seg, n, PROT_READ | PROT_WRITE);
+
+// Marks a segment as executable and drops the write bit.
+fn segexec(seg: *void, n: size) void = mprotect(seg, n, PROT_READ | PROT_EXEC);
diff --git a/rt/+linux/syscalls.ha b/rt/+linux/syscalls.ha
@@ -48,3 +48,65 @@ export def SIGIO: int = 29;
export def SIGPOLL: int = 29;
export def SIGPWR: int = 30;
export def SIGSYS: int = 31;
+
+export def MAP_SHARED: uint = 0x01u;
+export def MAP_PRIVATE: uint = 0x02u;
+export def MAP_SHARED_VALIDATE: uint = 0x03u;
+export def MAP_FIXED: uint = 0x10u;
+export def MAP_ANON: uint = 0x20u;
+export def MAP_NORESERVE: uint = 0x4000u;
+export def MAP_GROWSDOWN: uint = 0x0100u;
+export def MAP_DENYWRITE: uint = 0x0800u;
+export def MAP_EXECUTABLE: uint = 0x1000u;
+export def MAP_LOCKED: uint = 0x2000u;
+export def MAP_POPULATE: uint = 0x8000u;
+export def MAP_NONBLOCK: uint = 0x10000u;
+export def MAP_STACK: uint = 0x20000u;
+export def MAP_HUGETLB: uint = 0x40000u;
+export def MAP_SYNC: uint = 0x80000u;
+export def MAP_FIXED_NOREPLACE: uint = 0x100000u;
+export def MAP_FILE: uint = 0u;
+export def MAP_HUGE_SHIFT: uint = 26u;
+export def MAP_HUGE_MASK: uint = 0x3fu;
+export def MAP_HUGE_64KB: uint = 16u << 26u;
+export def MAP_HUGE_512KB: uint = 19u << 26u;
+export def MAP_HUGE_1MB: uint = 20u << 26u;
+export def MAP_HUGE_2MB: uint = 21u << 26u;
+export def MAP_HUGE_8MB: uint = 23u << 26u;
+export def MAP_HUGE_16MB: uint = 24u << 26u;
+export def MAP_HUGE_32MB: uint = 25u << 26u;
+export def MAP_HUGE_256MB: uint = 28u << 26u;
+export def MAP_HUGE_512MB: uint = 29u << 26u;
+export def MAP_HUGE_1GB: uint = 30u << 26u;
+export def MAP_HUGE_2GB: uint = 31u << 26u;
+export def MAP_HUGE_16GB: uint = 34u << 26u;
+
+export def PROT_NONE: uint = 0u;
+export def PROT_READ: uint = 1u;
+export def PROT_WRITE: uint = 2u;
+export def PROT_EXEC: uint = 4u;
+export def PROT_GROWSDOWN: uint = 0x01000000u;
+export def PROT_GROWSUP: uint = 0x02000000u;
+
+export fn mmap(
+ addr: nullable *void,
+ length: size,
+ prot: uint,
+ flags: uint,
+ fd: int,
+ offs: size
+) *void = {
+ let r: u64 = syscall6(SYS_mmap, addr: uintptr: u64,
+ length: u64, prot: u64, flags: u64, fd: u64, offs: u64): u64;
+ // TODO: Type promotion
+ return if (r: int == -EPERM && addr: uintptr == null: uintptr
+ && (flags & MAP_ANON) > 0u && (flags & MAP_FIXED) == 0u) {
+ (-ENOMEM): uintptr: *void; // Fix up incorrect EPERM from kernel
+ } else r: uintptr: *void;
+};
+
+export fn munmap(addr: *void, length: size) int =
+ syscall2(SYS_munmap, addr: uintptr: u64, length: u64): int;
+
+export fn mprotect(addr: *void, length: size, prot: uint) int =
+ syscall3(SYS_mprotect, addr: uintptr: u64, length: u64, prot: u64): int;
diff --git a/rt/malloc.ha b/rt/malloc.ha
@@ -0,0 +1,179 @@
+// This is a simple memory allocator, based on Appel, Andrew W., and David A.
+// Naumann. "Verified sequential malloc/free." It is not thread-safe.
+//
+// Large allocations are handled with mmap.
+//
+// For small allocations, we set up 50 bins, where each bin is responsible for
+// 16 different allocation sizes (e.g. bin 1 handles allocations from 10 thru 26
+// bytes); except for the first and last bin, which are responsible for fewer
+// than 16 allocation sizes.
+//
+// Each bin is 1MiB (BIGBLOCK) in size. We ceil the allocation size to the
+// largest size supported for this bin, then break the bin up into smaller
+// blocks. Each block is structured as [{sz: size, data..., link: *void}...];
+// where sz is the size of this (small) block, data is is set aside for the
+// user's actual allocation, and link is a pointer to the next bin's data field.
+//
+// In short, a bin for a particular size is pre-filled with all allocations of
+// that size, and the first word of each allocation is set to a pointer to the
+// next allocation. As such, malloc becomes:
+//
+// 1. Look up bin; pre-fill if not already allocated
+// 2. Let p = bin; bin = *bin; return p
+//
+// Then, free is simply:
+// 1. Look up bin
+// 2. *p = bin;
+// 3. bin = p;
+//
+// Note that over time this can cause the ordering of the allocations in each
+// bin to become non-continuous. This has no consequences for performance or
+// correctness.
+
+// TODO: Type promotion
+def ALIGN: size = 2z;
+def WORD: size = size(size);
+def WASTE: size = WORD * ALIGN - WORD;
+def BIGBLOCK: size = (2z << 16z) * WORD;
+
+// TODO: Expandable arrays
+let bins: [50]nullable *void = [
+ null, null, null, null, null, null, null, null, null, null, null, null,
+ null, null, null, null, null, null, null, null, null, null, null, null,
+ null, null, null, null, null, null, null, null, null, null, null, null,
+ null, null, null, null, null, null, null, null, null, null, null, null,
+ null, null,
+];
+
+fn bin2size(b: size) size = ((b + 1z) * ALIGN - 1z) * WORD;
+
+fn size2bin(s: size) size = {
+ assert(s < bin2size(len(bins) - 1z), "Size exceeds maximum for bin");
+ return (s + (WORD * (ALIGN - 1z) - 1z)) / (WORD * ALIGN);
+};
+
+// Allocates n bytes of memory and returns a pointer to them, or null if there
+// is insufficient memory.
+export fn malloc(n: size) nullable *void = {
+ assert(n > 0z);
+ let r: nullable *void =
+ if (n > bin2size(len(bins) - 1z)) malloc_large(n)
+ else malloc_small(n);
+ return r;
+};
+
+fn malloc_large(n: size) nullable *void = {
+ let p = segmalloc(n + WASTE + WORD);
+ if (p == null: nullable *void) {
+ return null;
+ };
+ let bsize = (p: uintptr + WASTE: uintptr): *[1]size;
+ bsize[0] = n;
+ return (p: uintptr + WASTE: uintptr + WORD: uintptr): nullable *void;
+};
+
+fn malloc_small(n: size) nullable *void = {
+ const b = size2bin(n);
+ let p = bins[b];
+ if (p == null: nullable *void) {
+ p = fill_bin(b);
+ if (p != null: nullable *void) {
+ bins[b] = p;
+ };
+ };
+ return if (p != null: nullable *void) {
+ let q = *(p: **void);
+ bins[b] = q;
+ p: nullable *void;
+ } else null: nullable *void;
+};
+
+fn fill_bin(b: size) nullable *void = {
+ const s = bin2size(b);
+ let p = segmalloc(BIGBLOCK);
+ return if (p == null: nullable *void) null: nullable *void
+ else list_from_block(s, p: uintptr);
+};
+
+fn list_from_block(s: size, p: uintptr) nullable *void = {
+ const nblocks = (BIGBLOCK - WASTE) / (s + WORD);
+
+ let q = p + WASTE: uintptr; // align q+WORD
+ for (let j = 0z; j != nblocks - 1z; j += 1z) {
+ let sz = q: *size;
+ let useralloc = q + WORD: uintptr; // aligned
+ let next = (useralloc + s: uintptr + WORD: uintptr): *void;
+ *sz = s;
+ *(useralloc: **void) = next;
+ q += s: uintptr + WORD: uintptr;
+ };
+
+ // Terminate last block:
+ (q: *[1]size)[0] = s;
+ *((q + 1: uintptr): *nullable *void) = null;
+
+ // Return first block:
+ return (p + WASTE: uintptr + WORD: uintptr): *void;
+};
+
+// Frees a pointer previously allocated with [malloc] or [must_malloc].
+export @symbol("rt.free") fn free_(_p: nullable *void) void = {
+ if (_p != null: nullable *void) {
+ let p = _p: *void;
+ let bsize = (p: uintptr - size(size): uintptr): *[1]size;
+ let s = bsize[0];
+ if (s <= bin2size(len(bins) - 1z)) free_small(p, s)
+ else free_large(p, s);
+ };
+};
+
+fn free_large(_p: *void, s: size) void = {
+ let p = (_p: uintptr - (WASTE: uintptr + WORD: uintptr)): *void;
+ let r = segfree(p, s + WASTE + WORD);
+ assert(r == 0, "free: munmap failed");
+};
+
+fn free_small(p: *void, s: size) void = {
+ let b = size2bin(s);
+ let q = bins[b];
+ *(p: **void) = q;
+ bins[b] = p: nullable *void;
+};
+
+// Changes the allocation size of a pointer to n bytes. If n is smaller than
+// the prior allocation, it is truncated; otherwise the allocation is expanded
+// and the values of the new bytes are undefined. May return a different pointer
+// than the one given if there is insufficient space to expand the pointer
+// in-place. Returns null if there is insufficient memory to support the
+// request.
+export fn realloc(_p: nullable *void, n: size) nullable *void = {
+ assert(n > 0z);
+ if (_p == null: nullable *void) {
+ return malloc(n);
+ };
+
+ let p = _p: *void;
+ let bsize = (p: uintptr - size(size): uintptr): *size;
+ let s = *bsize;
+ if (s < n) {
+ return p;
+ };
+
+ if (n < bin2size(len(bins) - 1z) && size2bin(n) == size2bin(s)) {
+ return p;
+ };
+
+ let new = malloc(n);
+ if (new != null: nullable *void) {
+ memcpy(new: *void, p, s);
+ };
+
+ return new;
+};
+
+// Like [realloc], but aborts the program if there is insufficient memory.
+export fn must_realloc(p: nullable *void, n: size) *void = {
+ let new = realloc(p, n);
+ assert(new != null: nullable *void, "out of memory");
+ return new: *void;
+};
diff --git a/rt/memcpy.ha b/rt/memcpy.ha
@@ -0,0 +1,6 @@
+export fn memcpy(dest: *void, src: *void, amt: size) void = {
+ let a = dest: *[*]u8, b = src: *[*]u8;
+ for (let i = 0z; i < amt; i += 1z) {
+ a[i] = b[i];
+ };
+};