summaryrefslogtreecommitdiff
path: root/src/vm.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/vm.c')
-rw-r--r--src/vm.c477
1 files changed, 477 insertions, 0 deletions
diff --git a/src/vm.c b/src/vm.c
new file mode 100644
index 0000000..fdcd71b
--- /dev/null
+++ b/src/vm.c
@@ -0,0 +1,477 @@
+#include <stdio.h>
+
+#include <ejit/ejit.h>
+#include <berg/vm.h>
+
+#define VEC_NAME funcs
+#define VEC_TYPE struct berg_func *
+#include <conts/vec.h>
+
+#define VEC_NAME insns
+#define VEC_TYPE struct berg_insn
+#include <conts/vec.h>
+
+#define VEC_NAME operands
+#define VEC_TYPE struct berg_operand
+#include <conts/vec.h>
+
+struct alloc {
+ void *base, *top;
+};
+
+struct allocs {
+ /* must always be pow2 */
+ size_t size;
+ struct alloc *buf;
+};
+
+struct berg_vm {
+ struct funcs funcs;
+ struct allocs allocs;
+};
+
+struct berg_func {
+ /* owning vm */
+ struct berg_vm *vm;
+
+ /* vm instructions */
+ struct insns insns;
+
+ /* arguments */
+ struct operands operands;
+
+ /* actual function */
+ struct ejit_func *func;
+};
+
+static struct allocs allocs_create(size_t pow2)
+{
+ size_t size = 1ULL << pow2;
+ struct alloc *buf = calloc(size, sizeof(struct alloc));
+ struct allocs allocs = {.size = size, buf = buf};
+ return allocs;
+}
+
+static size_t allocs_idx(void *ptr, size_t size)
+{
+ /* presumably all pointers have at least 16 bytes alignment, although
+ * this might only apply to amd64 (check max_alignment_t?) */
+ return ((uintptr_t)ptr >> 4) & (size - 1);
+}
+
+static long allocs_resize(struct allocs *allocs)
+{
+ /* very naive allocation strategy, a single collision causes a regrow */
+ size_t new_size = allocs->size * 2;
+
+top:
+ struct alloc *new_buf = calloc(new_size, sizeof(struct alloc));
+ if (!new_buf)
+ return -1;
+
+ for (size_t i = 0; i < allocs->size; ++i) {
+ struct alloc *alloc = &allocs->buf[i];
+ if (!alloc->base)
+ continue;
+
+ uintptr_t idx = allocs_idx(alloc->base, new_size);
+ struct alloc *new_alloc = &new_buf[idx];
+ if (new_alloc->base) {
+ /* try again */
+ free(new_buf);
+ new_size *= 2;
+ goto top;
+ }
+
+ *new_alloc = *alloc;
+ }
+
+ allocs->size = new_size;
+ allocs->buf = new_buf;
+ return 0;
+}
+
+static long allocs_insert(struct allocs *allocs, void *ptr, size_t size)
+{
+ void *top = ptr + size;
+
+ while (1) {
+ /* try to place allocation into map */
+ size_t idx = allocs_idx(ptr, allocs->size);
+ struct alloc *node = &allocs->buf[idx];
+
+ /* free slot */
+ if (node->base == NULL) {
+ node->base = ptr;
+ node->top = top;
+ return 0;
+ }
+
+ int ret = allocs_resize(allocs);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static long allocs_remove(struct allocs *allocs, void *ptr)
+{
+ uintptr_t idx = allocs_idx(ptr, allocs->size);
+ struct alloc *alloc = &allocs->buf[idx];
+ if (alloc->base != ptr)
+ return -1;
+
+ alloc->base = NULL;
+ alloc->top = NULL;
+ return 0;
+}
+
+struct berg_vm *create_berg_vm()
+{
+ struct berg_vm *vm = calloc(1, sizeof(struct berg_vm));
+ vm->funcs = funcs_create(0);
+
+ /* 2^4 elements to start with */
+ vm->allocs = allocs_create(4);
+ return vm;
+}
+
+static enum ejit_type ejit_type_from(enum berg_type type)
+{
+ switch (type) {
+ case BERG_POINTER: return EJIT_POINTER;
+ case BERG_VOID: return EJIT_VOID;
+ default: abort();
+ }
+}
+
+const uint8_t NOGPR = 255;
+/* two reserved registers for keeping track of allocation stuff. Should be
+ * fetched from VM to registers (or stack I guess?) whenever a call happens,
+ * since the size of the allocation hashmap might've changed while we weren't
+ * looking */
+const struct ejit_gpr ALLOC_MASK = EJIT_GPR(0);
+const struct ejit_gpr ALLOC_BUF = EJIT_GPR(1);
+const struct ejit_gpr VM_PTR = EJIT_GPR(2);
+
+static struct ejit_gpr ejit_gpr_from(uint8_t r)
+{
+ assert(r != NOGPR);
+ /* three reserved registers */
+ return EJIT_GPR(r + 3);
+}
+
+struct berg_func *create_berg_func(struct berg_vm *vm, enum berg_type rtype, size_t argc, const struct berg_operand args[argc])
+{
+ struct berg_func *f = calloc(1, sizeof(struct berg_func));
+ f->vm = vm;
+ f->insns = insns_create(0);
+ f->operands = operands_create(argc);
+
+ struct ejit_operand ejit_operands[argc];
+ for (size_t i = 0; i < argc; ++i) {
+ operands_append(&f->operands, args[i]);
+ ejit_operands[i] = EJIT_OPERAND_GPR(
+ ejit_gpr_from(args[i].gpr.r).r,
+ ejit_type_from(args[i].type)
+ );
+ }
+
+ f->func = ejit_create_func(ejit_type_from(rtype), argc, ejit_operands);
+ funcs_append(&vm->funcs, f);
+ return f;
+}
+
+
+static struct berg_insn insn_ori(enum berg_op op, struct berg_gpr ra, int64_t imm)
+{
+ return (struct berg_insn){
+ .op = op,
+ .ra = ra.r,
+ .rb = NOGPR,
+ .rc = NOGPR,
+ .rd = NOGPR,
+ .imm = imm
+ };
+}
+
+static struct berg_insn insn_orr(enum berg_op op, struct berg_gpr ra, struct berg_gpr rb)
+{
+ return (struct berg_insn){
+ .op = op,
+ .ra = ra.r,
+ .rb = rb.r,
+ .rc = NOGPR,
+ .rd = NOGPR,
+ .imm = 0
+ };
+}
+
+static struct berg_insn insn_o(enum berg_op op)
+{
+ return (struct berg_insn){
+ .op = op,
+ .ra = NOGPR,
+ .rb = NOGPR,
+ .rc = NOGPR,
+ .rd = NOGPR,
+ .imm = 0
+ };
+}
+
+static struct berg_insn insn_op(enum berg_op op, void *p)
+{
+ return (struct berg_insn){
+ .op = op,
+ .ra = NOGPR,
+ .rb = NOGPR,
+ .rc = NOGPR,
+ .rd = NOGPR,
+ .p = p
+ };
+}
+
+long berg_movi(struct berg_func *f, struct berg_gpr ra, int64_t imm)
+{
+ insns_append(&f->insns, insn_ori(BERG_MOVI, ra, imm));
+ /* for now */
+ return 0;
+}
+
+long berg_movr(struct berg_func *f, struct berg_gpr ra, struct berg_gpr rb)
+{
+ insns_append(&f->insns, insn_orr(BERG_MOVR, ra, rb));
+ return 0;
+}
+
+long berg_ret(struct berg_func *f)
+{
+ insns_append(&f->insns, insn_o(BERG_RET));
+ return 0;
+}
+
+long berg_ecall(struct berg_func *f, struct berg_gpr ra, int64_t imm)
+{
+ insns_append(&f->insns, insn_ori(BERG_ECALL, ra, imm));
+ return 0;
+}
+
+long berg_call(struct berg_func *f, struct berg_func *c)
+{
+ assert(f->vm == c->vm);
+ insns_append(&f->insns, insn_op(BERG_CALL, c));
+ return 0;
+}
+
+const char *berg_op_str(enum berg_op op)
+{
+#define CASE(x) case x: return #x;
+ switch (op) {
+ CASE(BERG_LABEL);
+ CASE(BERG_LD32);
+ CASE(BERG_ST32);
+ CASE(BERG_CALL);
+ CASE(BERG_RET);
+ CASE(BERG_ECALL);
+ CASE(BERG_MOVI);
+ CASE(BERG_MOVR);
+ }
+
+ return "???";
+#undef CASE
+}
+
+static long compile_ret(struct berg_func *f)
+{
+ ejit_ret(f->func);
+ return 0;
+}
+
+static long compile_movi(struct berg_func *f, struct berg_insn *i)
+{
+ ejit_movi(f->func, ejit_gpr_from(i->ra), i->imm);
+ return 0;
+}
+
+/* should really be uintptr_t... */
+static long escape_malloc(size_t argc, const struct ejit_arg args[argc])
+{
+ struct berg_vm *vm = EJIT_PARAM(argc, args, 0, struct berg_vm *);
+ size_t size = EJIT_PARAM(argc, args, 1, size_t);
+ void *ptr = malloc(size);
+ if (!ptr)
+ return 0;
+
+ if (allocs_insert(&vm->allocs, ptr, size)) {
+ free(ptr);
+ return 0;
+ }
+
+ return (long)ptr;
+}
+
+static long escape_free(size_t argc, const struct ejit_arg args[argc])
+{
+ struct berg_vm *vm = EJIT_PARAM(argc, args, 0, struct berg_vm *);
+ void *ptr = EJIT_PARAM(argc, args, 1, void *);
+ if (allocs_remove(&vm->allocs, ptr)) {
+ fprintf(stderr, "trying to free nonexistant pointer\n");
+ abort();
+ }
+
+ free(ptr);
+ return 0;
+}
+
+static long compile_malloc(struct berg_func *f, struct berg_insn *i)
+{
+ struct berg_gpr size = berg_gpr_arg(0);
+
+ ejit_movi(f->func, VM_PTR, (int64_t)f->vm);
+ struct ejit_operand args[] = {
+ EJIT_OPERAND_GPR(VM_PTR.r, EJIT_POINTER),
+ EJIT_OPERAND_GPR(ejit_gpr_from(size.r).r, EJIT_TYPE(size_t)),
+ };
+ ejit_escapei_l(f->func, escape_malloc, 2, args);
+ ejit_retval(f->func, ejit_gpr_from(i->ra));
+
+ /* reload allocation context if the hashmap has changed */
+ EJIT_LDI(f->func, size_t, ALLOC_MASK, &f->vm->allocs.size);
+ EJIT_LDI(f->func, void *, ALLOC_BUF, &f->vm->allocs.buf);
+ ejit_subi(f->func, ALLOC_MASK, ALLOC_MASK, 1);
+ return 0;
+}
+
+static long compile_free(struct berg_func *f, struct berg_insn *i)
+{
+ struct berg_gpr ptr = berg_gpr_arg(0);
+
+ ejit_movi(f->func, VM_PTR, (int64_t)f->vm);
+ struct ejit_operand args[] = {
+ EJIT_OPERAND_GPR(VM_PTR.r, EJIT_POINTER),
+ EJIT_OPERAND_GPR(ejit_gpr_from(ptr.r).r, EJIT_POINTER),
+ };
+ ejit_escapei_l(f->func, escape_free, 2, args);
+ ejit_retval(f->func, ejit_gpr_from(i->ra));
+
+ /* no need to reload anything since (at least currently) hashmap can't
+ * shrink */
+ return 0;
+}
+
+static long compile_ecall(struct berg_func *f, struct berg_insn *i)
+{
+ switch (i->imm) {
+ case BERG_MALLOC: return compile_malloc(f, i);
+ case BERG_FREE: return compile_free(f, i);
+ }
+
+ fprintf(stderr, "unknown envcall: %lli\n", (long long)i->imm);
+ return -1;
+}
+
+/** @todo this doesn't really work for calling functions via registers, unsure
+ * how that situation should be handled */
+static long compile_call(struct berg_func *f, struct berg_insn *i)
+{
+ struct berg_func *c = i->p;
+
+ size_t operands = operands_len(&c->operands);
+ struct ejit_operand ejit_operands[operands];
+ for (size_t i = 0; i < operands; ++i) {
+ struct berg_gpr reg = berg_gpr_arg(i);
+ struct berg_operand *operand = operands_at(&c->operands, i);
+ ejit_operands[i] = EJIT_OPERAND_GPR(
+ ejit_gpr_from(reg.r).r,
+ ejit_type_from(operand->type)
+ );
+ }
+
+ ejit_calli(f->func, c->func, operands, ejit_operands);
+
+ /* reload allocation map since we don't know if something was maybe
+ * freed */
+ EJIT_LDI(f->func, size_t, ALLOC_MASK, &f->vm->allocs.size);
+ EJIT_LDI(f->func, void *, ALLOC_BUF, &f->vm->allocs.buf);
+ ejit_subi(f->func, ALLOC_MASK, ALLOC_MASK, 1);
+ return 0;
+}
+
+static long compile_movr(struct berg_func *f, struct berg_insn *i)
+{
+ ejit_movr(f->func, ejit_gpr_from(i->ra), ejit_gpr_from(i->rb));
+ return 0;
+}
+
+long compile_berg_func(struct berg_func *f)
+{
+ /** @todo eventually should make this be block-specific */
+ long ret = 0;
+ foreach(insns, i, &f->insns) switch (i->op) {
+ case BERG_RET:
+ if ((ret = compile_ret(f)))
+ return ret;
+ break;
+
+ case BERG_MOVI:
+ if ((ret = compile_movi(f, i)))
+ return ret;
+ break;
+
+ case BERG_MOVR:
+ if ((ret = compile_movr(f, i)))
+ return ret;
+ break;
+
+ case BERG_CALL:
+ if ((ret = compile_call(f, i)))
+ return ret;
+ break;
+
+ case BERG_ECALL:
+ if ((ret = compile_ecall(f, i)))
+ return ret;
+ break;
+
+ default:
+ fprintf(stderr, "unhandled op %s\n", berg_op_str(i->op));
+ return -1;
+ }
+
+ ejit_compile_func(f->func);
+ return 0;
+}
+
+long run_berg_func(struct berg_func *f)
+{
+ struct ejit_arg arg = ejit_run_func(f->func, 0, NULL);
+ /* for now */
+ assert(arg.type == EJIT_VOID);
+ return 0;
+}
+
+static void destroy_berg_func(struct berg_func *f)
+{
+ insns_destroy(&f->insns);
+ operands_destroy(&f->operands);
+ ejit_destroy_func(f->func);
+ free(f);
+}
+
+void destroy_berg_vm(struct berg_vm *vm)
+{
+ foreach(funcs, f, &vm->funcs) {
+ destroy_berg_func(*f);
+ }
+ funcs_destroy(&vm->funcs);
+
+ for (size_t i = 0; i < vm->allocs.size; ++i) {
+ struct alloc a = vm->allocs.buf[i];
+ if (a.base)
+ free(a.base);
+ }
+
+ free(vm->allocs.buf);
+ free(vm);
+}