diff options
Diffstat (limited to 'libguile/vm.c')
-rw-r--r-- | libguile/vm.c | 1613 |
1 files changed, 993 insertions, 620 deletions
diff --git a/libguile/vm.c b/libguile/vm.c index c313119e7..d7b1788d8 100644 --- a/libguile/vm.c +++ b/libguile/vm.c @@ -1,59 +1,91 @@ -/* Copyright (C) 2001, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2018 Free Software Foundation, Inc. - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public License - * as published by the Free Software Foundation; either version 3 of - * the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301 USA - */ +/* Copyright 2001,2009-2015,2017-2019 + Free Software Foundation, Inc. + + This file is part of Guile. + + Guile is free software: you can redistribute it and/or modify it + under the terms of the GNU Lesser General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Guile is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with Guile. If not, see + <https://www.gnu.org/licenses/>. */ #if HAVE_CONFIG_H # include <config.h> #endif -#include <stdlib.h> -#include <alloca.h> #include <alignof.h> -#include <string.h> +#include <alloca.h> +#include <errno.h> +#include <math.h> #include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> #include <unistd.h> #ifdef HAVE_SYS_MMAN_H #include <sys/mman.h> #endif -#include "libguile/bdw-gc.h" +#include "alist.h" +#include "async.h" +#include "atomic.h" +#include "atomics-internal.h" +#include "bdw-gc.h" +#include "cache-internal.h" +#include "continuations.h" +#include "control.h" +#include "dynwind.h" +#include "eval.h" +#include "extensions.h" +#include "foreign.h" +#include "frames.h" +#include "gc-inline.h" +#include "gsubr.h" +#include "hooks.h" +#include "instructions.h" +#include "intrinsics.h" +#include "jit.h" +#include "keywords.h" +#include "list.h" +#include "loader.h" +#include "modules.h" +#include "numbers.h" +#include "pairs.h" +#include "ports.h" +#include "procprop.h" +#include "programs.h" +#include "simpos.h" +#include "smob.h" +#include "stackchk.h" +#include "symbols.h" +#include "values.h" +#include "vectors.h" +#include "version.h" +#include "vm-builtins.h" + +#include "vm.h" + #include <gc/gc_mark.h> -#include "libguile/_scm.h" -#include "libguile/atomic.h" -#include "libguile/atomics-internal.h" -#include "libguile/cache-internal.h" -#include "libguile/control.h" -#include "libguile/frames.h" -#include "libguile/gc-inline.h" -#include "libguile/instructions.h" -#include "libguile/loader.h" -#include "libguile/programs.h" -#include "libguile/simpos.h" -#include "libguile/vm.h" -#include "libguile/vm-builtins.h" +#if (defined __GNUC__) +# define SCM_NOINLINE __attribute__ ((__noinline__)) +#else +# define SCM_NOINLINE /* noinline */ +#endif static int vm_default_engine = SCM_VM_REGULAR_ENGINE; /* Unfortunately we can't snarf these: snarfed things are only loaded up from (system vm vm), which might not be loaded before an error happens. */ -static SCM sym_vm_run; -static SCM sym_vm_error; static SCM sym_keyword_argument_error; static SCM sym_regular; static SCM sym_debug; @@ -122,7 +154,7 @@ scm_i_vm_cont_to_frame (SCM cont, struct scm_frame *frame) frame->stack_holder = data; frame->fp_offset = data->fp_offset; frame->sp_offset = data->stack_size; - frame->ip = data->ra; + frame->ip = data->vra; return 1; } @@ -130,11 +162,13 @@ scm_i_vm_cont_to_frame (SCM cont, struct scm_frame *frame) /* Ideally we could avoid copying the C stack if the continuation root is inside VM code, and call/cc was invoked within that same call to vm_run. That's currently not implemented. */ -SCM -scm_i_vm_capture_stack (union scm_vm_stack_element *stack_top, - union scm_vm_stack_element *fp, - union scm_vm_stack_element *sp, scm_t_uint32 *ra, - scm_t_dynstack *dynstack, scm_t_uint32 flags) +static SCM +capture_stack (union scm_vm_stack_element *stack_top, + union scm_vm_stack_element *fp, + union scm_vm_stack_element *sp, + uint32_t *vra, + uint8_t *mra, + scm_t_dynstack *dynstack, uint32_t flags) { struct scm_vm_cont *p; @@ -142,7 +176,8 @@ scm_i_vm_capture_stack (union scm_vm_stack_element *stack_top, p->stack_size = stack_top - sp; p->stack_bottom = scm_gc_malloc (p->stack_size * sizeof (*p->stack_bottom), "capture_vm_cont"); - p->ra = ra; + p->vra = vra; + p->mra = mra; p->fp_offset = stack_top - fp; memcpy (p->stack_bottom, sp, p->stack_size * sizeof (*p->stack_bottom)); p->dynstack = dynstack; @@ -150,102 +185,92 @@ scm_i_vm_capture_stack (union scm_vm_stack_element *stack_top, return scm_cell (scm_tc7_vm_cont, (scm_t_bits) p); } -struct return_to_continuation_data +SCM +scm_i_capture_current_stack (void) { - struct scm_vm_cont *cp; + scm_thread *thread; struct scm_vm *vp; -}; - -/* Called with the GC lock to prevent the stack marker from traversing a - stack in an inconsistent state. */ -static void * -vm_return_to_continuation_inner (void *data_ptr) -{ - struct return_to_continuation_data *data = data_ptr; - struct scm_vm *vp = data->vp; - struct scm_vm_cont *cp = data->cp; - /* We know that there is enough space for the continuation, because we - captured it in the past. However there may have been an expansion - since the capture, so we may have to re-link the frame - pointers. */ - memcpy (vp->stack_top - cp->stack_size, - cp->stack_bottom, - cp->stack_size * sizeof (*cp->stack_bottom)); - vp->fp = vp->stack_top - cp->fp_offset; - vm_restore_sp (vp, vp->stack_top - cp->stack_size); + thread = SCM_I_CURRENT_THREAD; + vp = &thread->vm; - return NULL; + return capture_stack (vp->stack_top, vp->fp, vp->sp, vp->ip, NULL, + scm_dynstack_capture_all (&thread->dynstack), + 0); } +#define FOR_EACH_HOOK(M) \ + M(apply) \ + M(return) \ + M(next) \ + M(abort) + static void -vm_return_to_continuation (struct scm_vm *vp, SCM cont, size_t n, - union scm_vm_stack_element *argv) +vm_hook_compute_enabled (scm_thread *thread, SCM hook, uint8_t *enabled) { - struct scm_vm_cont *cp; - union scm_vm_stack_element *argv_copy; - struct return_to_continuation_data data; - - argv_copy = alloca (n * sizeof (*argv)); - memcpy (argv_copy, argv, n * sizeof (*argv)); - - cp = SCM_VM_CONT_DATA (cont); + if (thread->vm.trace_level <= 0 + || thread->vm.engine == SCM_VM_REGULAR_ENGINE + || scm_is_false (hook) + || scm_is_true (scm_hook_empty_p (hook))) + *enabled = 0; + else + *enabled = 1; +} - data.cp = cp; - data.vp = vp; - GC_call_with_alloc_lock (vm_return_to_continuation_inner, &data); +static void +vm_recompute_disable_mcode (scm_thread *thread) +{ + uint8_t was_disabled = thread->vm.disable_mcode; + thread->vm.disable_mcode = 0; - /* Now we have the continuation properly copied over. We just need to - copy on an empty frame and the return values, as the continuation - expects. */ - vm_push_sp (vp, vp->sp - 3 - n); - vp->sp[n+2].as_scm = SCM_BOOL_F; - vp->sp[n+1].as_scm = SCM_BOOL_F; - vp->sp[n].as_scm = SCM_BOOL_F; - memcpy(vp->sp, argv_copy, n * sizeof (union scm_vm_stack_element)); +#define DISABLE_MCODE_IF_HOOK_ENABLED(h) \ + if (thread->vm.h##_hook_enabled) \ + thread->vm.disable_mcode = 1; + FOR_EACH_HOOK (DISABLE_MCODE_IF_HOOK_ENABLED); +#undef DISABLE_MCODE_IF_HOOK_ENABLED - vp->ip = cp->ra; + if (thread->vm.disable_mcode && !was_disabled) + scm_jit_clear_mcode_return_addresses (thread); } -static struct scm_vm * thread_vm (scm_i_thread *t); -SCM -scm_i_capture_current_stack (void) +static int +set_vm_trace_level (scm_thread *thread, int level) { - scm_i_thread *thread; - struct scm_vm *vp; + int old_level; + struct scm_vm *vp = &thread->vm; - thread = SCM_I_CURRENT_THREAD; - vp = thread_vm (thread); + old_level = vp->trace_level; + vp->trace_level = level; + vp->disable_mcode = 0; +#define RESET_LEVEL(h) \ + vm_hook_compute_enabled (thread, vp->h##_hook, &vp->h##_hook_enabled); + FOR_EACH_HOOK (RESET_LEVEL); +#undef RESET_LEVEL + vm_recompute_disable_mcode (thread); - return scm_i_vm_capture_stack (vp->stack_top, vp->fp, vp->sp, vp->ip, - scm_dynstack_capture_all (&thread->dynstack), - 0); + return old_level; } -static void vm_dispatch_apply_hook (struct scm_vm *vp) SCM_NOINLINE; -static void vm_dispatch_push_continuation_hook (struct scm_vm *vp) SCM_NOINLINE; -static void vm_dispatch_pop_continuation_hook - (struct scm_vm *vp, union scm_vm_stack_element *old_fp) SCM_NOINLINE; -static void vm_dispatch_next_hook (struct scm_vm *vp) SCM_NOINLINE; -static void vm_dispatch_abort_hook (struct scm_vm *vp) SCM_NOINLINE; +/* Return the first integer greater than or equal to LEN such that + LEN % ALIGN == 0. Return LEN if ALIGN is zero. */ +#define ROUND_UP(len, align) \ + ((align) ? (((len) - 1UL) | ((align) - 1UL)) + 1UL : (len)) static void -vm_dispatch_hook (struct scm_vm *vp, int hook_num, - union scm_vm_stack_element *argv, int n) +invoke_hook (scm_thread *thread, SCM hook) { - SCM hook; + struct scm_vm *vp = &thread->vm; struct scm_frame c_frame; scm_t_cell *frame; + SCM scm_frame; int saved_trace_level; + uint8_t saved_compare_result; - hook = vp->hooks[hook_num]; - - if (SCM_LIKELY (scm_is_false (hook)) - || scm_is_null (SCM_HOOK_PROCEDURES (hook))) + if (scm_is_false (hook) || scm_is_null (SCM_HOOK_PROCEDURES (hook))) return; - saved_trace_level = vp->trace_level; - vp->trace_level = 0; + saved_trace_level = set_vm_trace_level (thread, 0); + saved_compare_result = vp->compare_result; /* Allocate a frame object on the stack. This is more efficient than calling `scm_c_make_frame ()' to allocate on the heap, but it forces hooks to not @@ -262,423 +287,69 @@ vm_dispatch_hook (struct scm_vm *vp, int hook_num, /* Arrange for FRAME to be 8-byte aligned, like any other cell. */ frame = alloca (sizeof (*frame) + 8); - frame = (scm_t_cell *) ROUND_UP ((scm_t_uintptr) frame, 8UL); + frame = (scm_t_cell *) ROUND_UP ((uintptr_t) frame, 8UL); frame->word_0 = SCM_PACK (scm_tc7_frame | (SCM_VM_FRAME_KIND_VM << 8)); frame->word_1 = SCM_PACK_POINTER (&c_frame); - if (n == 0) - { - SCM args[1]; - - args[0] = SCM_PACK_POINTER (frame); - scm_c_run_hookn (hook, args, 1); - } - else if (n == 1) - { - SCM args[2]; - - args[0] = SCM_PACK_POINTER (frame); - args[1] = argv[0].as_scm; - scm_c_run_hookn (hook, args, 2); - } - else - { - SCM args = SCM_EOL; - int i; - - for (i = 0; i < n; i++) - args = scm_cons (argv[i].as_scm, args); - scm_c_run_hook (hook, scm_cons (SCM_PACK_POINTER (frame), args)); - } + scm_frame = SCM_PACK_POINTER (frame); + scm_c_run_hookn (hook, &scm_frame, 1); - vp->trace_level = saved_trace_level; + vp->compare_result = saved_compare_result; + set_vm_trace_level (thread, saved_trace_level); } -static void -vm_dispatch_apply_hook (struct scm_vm *vp) -{ - return vm_dispatch_hook (vp, SCM_VM_APPLY_HOOK, NULL, 0); -} -static void vm_dispatch_push_continuation_hook (struct scm_vm *vp) -{ - return vm_dispatch_hook (vp, SCM_VM_PUSH_CONTINUATION_HOOK, NULL, 0); -} -static void vm_dispatch_pop_continuation_hook (struct scm_vm *vp, - union scm_vm_stack_element *old_fp) -{ - return vm_dispatch_hook (vp, SCM_VM_POP_CONTINUATION_HOOK, - vp->sp, SCM_FRAME_NUM_LOCALS (old_fp, vp->sp) - 1); -} -static void vm_dispatch_next_hook (struct scm_vm *vp) -{ - return vm_dispatch_hook (vp, SCM_VM_NEXT_HOOK, NULL, 0); -} -static void vm_dispatch_abort_hook (struct scm_vm *vp) -{ - return vm_dispatch_hook (vp, SCM_VM_ABORT_CONTINUATION_HOOK, - vp->sp, SCM_FRAME_NUM_LOCALS (vp->fp, vp->sp) - 1); -} - -static void -vm_abort (struct scm_vm *vp, SCM tag, size_t nargs, - scm_i_jmp_buf *current_registers) SCM_NORETURN; - -static void -vm_abort (struct scm_vm *vp, SCM tag, size_t nargs, - scm_i_jmp_buf *current_registers) -{ - size_t i; - SCM *argv; - - argv = alloca (nargs * sizeof (SCM)); - for (i = 0; i < nargs; i++) - argv[i] = vp->sp[nargs - i - 1].as_scm; - - vp->sp = vp->fp; - - scm_c_abort (vp, tag, nargs, argv, current_registers); -} - -struct vm_reinstate_partial_continuation_data -{ - struct scm_vm *vp; - struct scm_vm_cont *cp; -}; - -static void * -vm_reinstate_partial_continuation_inner (void *data_ptr) -{ - struct vm_reinstate_partial_continuation_data *data = data_ptr; - struct scm_vm *vp = data->vp; - struct scm_vm_cont *cp = data->cp; - - memcpy (vp->fp - cp->stack_size, - cp->stack_bottom, - cp->stack_size * sizeof (*cp->stack_bottom)); - - vp->fp -= cp->fp_offset; - vp->ip = cp->ra; - - return NULL; -} - -static void -vm_reinstate_partial_continuation (struct scm_vm *vp, SCM cont, size_t nargs, - scm_t_dynstack *dynstack, - scm_i_jmp_buf *registers) -{ - struct vm_reinstate_partial_continuation_data data; - struct scm_vm_cont *cp; - union scm_vm_stack_element *args; - scm_t_ptrdiff old_fp_offset; - - args = alloca (nargs * sizeof (*args)); - memcpy (args, vp->sp, nargs * sizeof (*args)); - - cp = SCM_VM_CONT_DATA (cont); - - old_fp_offset = vp->stack_top - vp->fp; - - vm_push_sp (vp, vp->fp - (cp->stack_size + nargs + 1)); - - data.vp = vp; - data.cp = cp; - GC_call_with_alloc_lock (vm_reinstate_partial_continuation_inner, &data); - - /* The resume continuation will expect ARGS on the stack as if from a - multiple-value return. Fill in the closure slot with #f, and copy - the arguments into place. */ - vp->sp[nargs].as_scm = SCM_BOOL_F; - memcpy (vp->sp, args, nargs * sizeof (*args)); +#define DEFINE_INVOKE_HOOK(h) \ + static void \ + invoke_##h##_hook (scm_thread *thread) SCM_NOINLINE; \ + static void \ + invoke_##h##_hook (scm_thread *thread) \ + { \ + if (thread->vm.h##_hook_enabled) \ + return invoke_hook (thread, thread->vm.h##_hook); \ + } - /* The prompt captured a slice of the dynamic stack. Here we wind - those entries onto the current thread's stack. We also have to - relocate any prompts that we see along the way. */ - { - scm_t_bits *walk; +FOR_EACH_HOOK (DEFINE_INVOKE_HOOK) - for (walk = SCM_DYNSTACK_FIRST (cp->dynstack); - SCM_DYNSTACK_TAG (walk); - walk = SCM_DYNSTACK_NEXT (walk)) - { - scm_t_bits tag = SCM_DYNSTACK_TAG (walk); - - if (SCM_DYNSTACK_TAG_TYPE (tag) == SCM_DYNSTACK_TYPE_PROMPT) - scm_dynstack_wind_prompt (dynstack, walk, old_fp_offset, registers); - else - scm_dynstack_wind_1 (dynstack, walk); - } - } -} +#undef DEFINE_INVOKE_HOOK /* * VM Error Handling */ -static void vm_error (const char *msg, SCM arg) SCM_NORETURN; -static void vm_error_bad_instruction (scm_t_uint32 inst) SCM_NORETURN SCM_NOINLINE; -static void vm_error_unbound (SCM sym) SCM_NORETURN SCM_NOINLINE; -static void vm_error_not_a_variable (const char *func_name, SCM x) SCM_NORETURN SCM_NOINLINE; -static void vm_error_apply_to_non_list (SCM x) SCM_NORETURN SCM_NOINLINE; -static void vm_error_kwargs_missing_value (SCM proc, SCM kw) SCM_NORETURN SCM_NOINLINE; -static void vm_error_kwargs_invalid_keyword (SCM proc, SCM obj) SCM_NORETURN SCM_NOINLINE; -static void vm_error_kwargs_unrecognized_keyword (SCM proc, SCM kw) SCM_NORETURN SCM_NOINLINE; -static void vm_error_wrong_num_args (SCM proc) SCM_NORETURN SCM_NOINLINE; -static void vm_error_wrong_type_apply (SCM proc) SCM_NORETURN SCM_NOINLINE; -static void vm_error_not_a_char (const char *subr, SCM x) SCM_NORETURN SCM_NOINLINE; -static void vm_error_not_a_pair (const char *subr, SCM x) SCM_NORETURN SCM_NOINLINE; -static void vm_error_not_a_mutable_pair (const char *subr, SCM x) SCM_NORETURN SCM_NOINLINE; -static void vm_error_not_a_string (const char *subr, SCM x) SCM_NORETURN SCM_NOINLINE; -static void vm_error_not_a_atomic_box (const char *subr, SCM x) SCM_NORETURN SCM_NOINLINE; -static void vm_error_not_a_bytevector (const char *subr, SCM x) SCM_NORETURN SCM_NOINLINE; -static void vm_error_not_a_mutable_bytevector (const char *subr, SCM v) SCM_NORETURN SCM_NOINLINE; -static void vm_error_not_a_struct (const char *subr, SCM x) SCM_NORETURN SCM_NOINLINE; -static void vm_error_not_a_vector (const char *subr, SCM v) SCM_NORETURN SCM_NOINLINE; -static void vm_error_not_a_mutable_vector (const char *subr, SCM v) SCM_NORETURN SCM_NOINLINE; -static void vm_error_out_of_range_uint64 (const char *subr, scm_t_uint64 idx) SCM_NORETURN SCM_NOINLINE; -static void vm_error_out_of_range_int64 (const char *subr, scm_t_int64 idx) SCM_NORETURN SCM_NOINLINE; -static void vm_error_no_values (void) SCM_NORETURN SCM_NOINLINE; -static void vm_error_not_enough_values (void) SCM_NORETURN SCM_NOINLINE; -static void vm_error_wrong_number_of_values (scm_t_uint32 expected) SCM_NORETURN SCM_NOINLINE; -static void vm_error_continuation_not_rewindable (SCM cont) SCM_NORETURN SCM_NOINLINE; -static void -vm_error (const char *msg, SCM arg) -{ - scm_throw (sym_vm_error, - scm_list_3 (sym_vm_run, scm_from_latin1_string (msg), - SCM_UNBNDP (arg) ? SCM_EOL : scm_list_1 (arg))); - abort(); /* not reached */ -} +static void vm_error_bad_instruction (uint32_t inst) SCM_NORETURN SCM_NOINLINE; static void -vm_error_bad_instruction (scm_t_uint32 inst) +vm_error_bad_instruction (uint32_t inst) { - vm_error ("VM: Bad instruction: ~s", scm_from_uint32 (inst)); -} - -static void -vm_error_unbound (SCM sym) -{ - scm_error_scm (scm_misc_error_key, SCM_BOOL_F, - scm_from_latin1_string ("Unbound variable: ~s"), - scm_list_1 (sym), SCM_BOOL_F); -} - -static void -vm_error_not_a_variable (const char *func_name, SCM x) -{ - scm_error (scm_arg_type_key, func_name, "Not a variable: ~S", - scm_list_1 (x), scm_list_1 (x)); -} - -static void -vm_error_apply_to_non_list (SCM x) -{ - scm_error (scm_arg_type_key, "apply", "Apply to non-list: ~S", - scm_list_1 (x), scm_list_1 (x)); -} - -static void -vm_error_kwargs_missing_value (SCM proc, SCM kw) -{ - scm_error_scm (sym_keyword_argument_error, proc, - scm_from_latin1_string ("Keyword argument has no value"), - SCM_EOL, scm_list_1 (kw)); -} - -static void -vm_error_kwargs_invalid_keyword (SCM proc, SCM obj) -{ - scm_error_scm (sym_keyword_argument_error, proc, - scm_from_latin1_string ("Invalid keyword"), - SCM_EOL, scm_list_1 (obj)); -} - -static void -vm_error_kwargs_unrecognized_keyword (SCM proc, SCM kw) -{ - scm_error_scm (sym_keyword_argument_error, proc, - scm_from_latin1_string ("Unrecognized keyword"), - SCM_EOL, scm_list_1 (kw)); -} - -static void -vm_error_wrong_num_args (SCM proc) -{ - scm_wrong_num_args (proc); -} - -static void -vm_error_wrong_type_apply (SCM proc) -{ - scm_error (scm_arg_type_key, NULL, "Wrong type to apply: ~S", - scm_list_1 (proc), scm_list_1 (proc)); -} - -static void -vm_error_not_a_char (const char *subr, SCM x) -{ - scm_wrong_type_arg_msg (subr, 1, x, "char"); -} - -static void -vm_error_not_a_pair (const char *subr, SCM x) -{ - scm_wrong_type_arg_msg (subr, 1, x, "pair"); -} - -static void -vm_error_not_a_mutable_pair (const char *subr, SCM x) -{ - scm_wrong_type_arg_msg (subr, 1, x, "mutable pair"); -} - -static void -vm_error_not_a_string (const char *subr, SCM x) -{ - scm_wrong_type_arg_msg (subr, 1, x, "string"); -} - -static void -vm_error_not_a_atomic_box (const char *subr, SCM x) -{ - scm_wrong_type_arg_msg (subr, 1, x, "atomic box"); -} - -static void -vm_error_not_a_bytevector (const char *subr, SCM x) -{ - scm_wrong_type_arg_msg (subr, 1, x, "bytevector"); -} - -static void -vm_error_not_a_mutable_bytevector (const char *subr, SCM x) -{ - scm_wrong_type_arg_msg (subr, 1, x, "mutable bytevector"); -} - -static void -vm_error_not_a_struct (const char *subr, SCM x) -{ - scm_wrong_type_arg_msg (subr, 1, x, "struct"); -} - -static void -vm_error_not_a_vector (const char *subr, SCM x) -{ - scm_wrong_type_arg_msg (subr, 1, x, "vector"); -} - -static void -vm_error_not_a_mutable_vector (const char *subr, SCM x) -{ - scm_wrong_type_arg_msg (subr, 1, x, "mutable vector"); -} - -static void -vm_error_out_of_range_uint64 (const char *subr, scm_t_uint64 idx) -{ - scm_out_of_range (subr, scm_from_uint64 (idx)); -} - -static void -vm_error_out_of_range_int64 (const char *subr, scm_t_int64 idx) -{ - scm_out_of_range (subr, scm_from_int64 (idx)); -} - -static void -vm_error_no_values (void) -{ - vm_error ("Zero values returned to single-valued continuation", - SCM_UNDEFINED); -} - -static void -vm_error_not_enough_values (void) -{ - vm_error ("Too few values returned to continuation", SCM_UNDEFINED); -} - -static void -vm_error_wrong_number_of_values (scm_t_uint32 expected) -{ - vm_error ("Wrong number of values returned to continuation (expected ~a)", - scm_from_uint32 (expected)); -} - -static void -vm_error_continuation_not_rewindable (SCM cont) -{ - vm_error ("Unrewindable partial continuation", cont); + fprintf (stderr, "VM: Bad instruction: %x\n", inst); + abort (); } static SCM vm_boot_continuation; -static SCM vm_builtin_apply; -static SCM vm_builtin_values; -static SCM vm_builtin_abort_to_prompt; -static SCM vm_builtin_call_with_values; -static SCM vm_builtin_call_with_current_continuation; - -static const scm_t_uint32 vm_boot_continuation_code[] = { - SCM_PACK_OP_24 (halt, 0) -}; -static const scm_t_uint32 vm_apply_non_program_code[] = { - SCM_PACK_OP_24 (apply_non_program, 0) -}; - -static const scm_t_uint32 vm_builtin_apply_code[] = { - SCM_PACK_OP_24 (assert_nargs_ge, 3), - SCM_PACK_OP_24 (tail_apply, 0), /* proc in r1, args from r2 */ -}; - -static const scm_t_uint32 vm_builtin_values_code[] = { - SCM_PACK_OP_24 (return_values, 0) /* vals from r1 */ -}; - -static const scm_t_uint32 vm_builtin_abort_to_prompt_code[] = { - SCM_PACK_OP_24 (assert_nargs_ge, 2), - SCM_PACK_OP_24 (abort, 0), /* tag in r1, vals from r2 */ - /* FIXME: Partial continuation should capture caller regs. */ - SCM_PACK_OP_24 (return_values, 0) /* vals from r1 */ -}; - -static const scm_t_uint32 vm_builtin_call_with_values_code[] = { - SCM_PACK_OP_24 (assert_nargs_ee, 3), - SCM_PACK_OP_24 (alloc_frame, 7), - SCM_PACK_OP_12_12 (mov, 0, 5), - SCM_PACK_OP_24 (call, 6), SCM_PACK_OP_ARG_8_24 (0, 1), - SCM_PACK_OP_24 (long_fmov, 0), SCM_PACK_OP_ARG_8_24 (0, 2), - SCM_PACK_OP_24 (tail_call_shuffle, 7) -}; +#define DECLARE_BUILTIN(builtin, BUILTIN, req, opt, rest) \ + static SCM vm_builtin_##builtin; \ + static uint32_t *vm_builtin_##builtin##_code; +FOR_EACH_VM_BUILTIN (DECLARE_BUILTIN) +#undef DECLARE_BUILTIN -static const scm_t_uint32 vm_builtin_call_with_current_continuation_code[] = { - SCM_PACK_OP_24 (assert_nargs_ee, 2), - SCM_PACK_OP_24 (call_cc, 0) -}; - -static const scm_t_uint32 vm_handle_interrupt_code[] = { - SCM_PACK_OP_24 (alloc_frame, 3), - SCM_PACK_OP_12_12 (mov, 0, 2), - SCM_PACK_OP_24 (call, 2), SCM_PACK_OP_ARG_8_24 (0, 1), - SCM_PACK_OP_24 (return_from_interrupt, 0) +static const uint32_t vm_boot_continuation_code[] = { + SCM_PACK_OP_24 (halt, 0) }; - int -scm_i_vm_is_boot_continuation_code (scm_t_uint32 *ip) +scm_i_vm_is_boot_continuation_code (uint32_t *ip) { return ip == vm_boot_continuation_code; } -static SCM +SCM scm_vm_builtin_ref (unsigned idx) { switch (idx) @@ -741,6 +412,77 @@ scm_init_vm_builtins (void) scm_vm_builtin_index_to_name); } +static uint32_t* +instrumented_code (const uint32_t *code, size_t byte_size) +{ + uint32_t *ret, *write; + ret = scm_i_alloc_primitive_code_with_instrumentation (byte_size / 4, &write); + memcpy (write, code, byte_size); + return ret; +} + +static void +define_vm_builtins (void) +{ + const uint32_t apply_code[] = { + SCM_PACK_OP_24 (assert_nargs_ge, 3), + SCM_PACK_OP_12_12 (shuffle_down, 1, 0), + SCM_PACK_OP_24 (expand_apply_argument, 0), + SCM_PACK_OP_24 (tail_call, 0), + }; + + const uint32_t values_code[] = { + SCM_PACK_OP_12_12 (shuffle_down, 1, 0), + SCM_PACK_OP_24 (return_values, 0) + }; + + const uint32_t abort_to_prompt_code[] = { + SCM_PACK_OP_24 (assert_nargs_ge, 2), + SCM_PACK_OP_24 (abort, 0), /* tag in r1, vals from r2 */ + /* FIXME: Partial continuation should capture caller regs. */ + SCM_PACK_OP_24 (return_values, 0) /* vals from r0 */ + }; + + const uint32_t call_with_values_code[] = { + SCM_PACK_OP_24 (assert_nargs_ee, 3), + SCM_PACK_OP_24 (alloc_frame, 8), + SCM_PACK_OP_12_12 (mov, 0, 6), + SCM_PACK_OP_24 (call, 7), SCM_PACK_OP_ARG_8_24 (0, 1), + SCM_PACK_OP_24 (long_fmov, 0), SCM_PACK_OP_ARG_8_24 (0, 2), + SCM_PACK_OP_12_12 (shuffle_down, 7, 1), + SCM_PACK_OP_24 (tail_call, 0) + }; + + const uint32_t call_with_current_continuation_code[] = { + SCM_PACK_OP_24 (assert_nargs_ee, 2), + SCM_PACK_OP_12_12 (mov, 1, 0), + SCM_PACK_OP_24 (capture_continuation, 0), + SCM_PACK_OP_24 (tail_call, 0) + }; + + /* This one isn't exactly a builtin but we still handle it here. */ + const uint32_t handle_interrupt_code[] = { + SCM_PACK_OP_24 (alloc_frame, 4), + SCM_PACK_OP_12_12 (mov, 0, 3), + SCM_PACK_OP_24 (call, 3), SCM_PACK_OP_ARG_8_24 (0, 1), + SCM_PACK_OP_24 (return_from_interrupt, 0) + }; + +#define DEFINE_BUILTIN(builtin, BUILTIN, req, opt, rest) \ + { \ + size_t sz = sizeof (builtin##_code); \ + vm_builtin_##builtin##_code = instrumented_code (builtin##_code, sz); \ + vm_builtin_##builtin = \ + scm_cell (scm_tc7_program | SCM_F_PROGRAM_IS_PRIMITIVE, \ + (scm_t_bits)vm_builtin_##builtin##_code); \ + } + FOR_EACH_VM_BUILTIN (DEFINE_BUILTIN); +#undef INDEX_TO_NAME + + scm_vm_intrinsics.handle_interrupt_code = + instrumented_code (handle_interrupt_code, sizeof (handle_interrupt_code)); +} + SCM scm_i_call_with_current_continuation (SCM proc) { @@ -768,8 +510,7 @@ scm_i_call_with_current_continuation (SCM proc) #undef VM_USE_HOOKS #undef VM_NAME -typedef SCM (*scm_t_vm_engine) (scm_i_thread *current_thread, struct scm_vm *vp, - scm_i_jmp_buf *registers, int resume); +typedef SCM (*scm_t_vm_engine) (scm_thread *current_thread); static const scm_t_vm_engine vm_engines[SCM_VM_NUM_ENGINES] = { vm_regular_engine, vm_debug_engine }; @@ -849,14 +590,18 @@ expand_stack (union scm_vm_stack_element *old_bottom, size_t old_size, } #undef FUNC_NAME -static struct scm_vm * -make_vm (void) -#define FUNC_NAME "make_vm" +void +scm_i_vm_prepare_stack (struct scm_vm *vp) { - int i; - struct scm_vm *vp; - - vp = scm_gc_malloc (sizeof (struct scm_vm), "vm"); + /* Not racey, as this will be run the first time a thread enters + Guile. */ + if (page_size == 0) + { + page_size = getpagesize (); + /* page_size should be a power of two. */ + if (page_size & (page_size - 1)) + abort (); + } vp->stack_size = page_size / sizeof (union scm_vm_stack_element); vp->stack_bottom = allocate_stack (vp->stack_size); @@ -872,24 +617,23 @@ make_vm (void) vp->sp = vp->stack_top; vp->sp_min_since_gc = vp->sp; vp->fp = vp->stack_top; + vp->compare_result = SCM_F_COMPARE_NONE; vp->engine = vm_default_engine; vp->trace_level = 0; - for (i = 0; i < SCM_VM_NUM_HOOKS; i++) - vp->hooks[i] = SCM_BOOL_F; - - return vp; +#define INIT_HOOK(h) vp->h##_hook = SCM_BOOL_F; + FOR_EACH_HOOK (INIT_HOOK) +#undef INIT_HOOK } -#undef FUNC_NAME static void return_unused_stack_to_os (struct scm_vm *vp) { #if HAVE_SYS_MMAN_H - scm_t_uintptr lo = (scm_t_uintptr) vp->stack_bottom; - scm_t_uintptr hi = (scm_t_uintptr) vp->sp; + uintptr_t lo = (uintptr_t) vp->stack_bottom; + uintptr_t hi = (uintptr_t) vp->sp; /* The second condition is needed to protect against wrap-around. */ if (vp->sp_min_since_gc >= vp->stack_bottom && vp->sp >= vp->sp_min_since_gc) - lo = (scm_t_uintptr) vp->sp_min_since_gc; + lo = (uintptr_t) vp->sp_min_since_gc; lo &= ~(page_size - 1U); /* round down */ hi &= ~(page_size - 1U); /* round down */ @@ -918,8 +662,8 @@ return_unused_stack_to_os (struct scm_vm *vp) #define SLOT_MAP_CACHE_SIZE 32U struct slot_map_cache_entry { - scm_t_uint32 *ip; - const scm_t_uint8 *map; + uint32_t *ip; + const uint8_t *map; }; struct slot_map_cache @@ -927,13 +671,13 @@ struct slot_map_cache struct slot_map_cache_entry entries[SLOT_MAP_CACHE_SIZE]; }; -static const scm_t_uint8 * -find_slot_map (scm_t_uint32 *ip, struct slot_map_cache *cache) +static const uint8_t * +find_slot_map (uint32_t *ip, struct slot_map_cache *cache) { /* The lower two bits should be zero. FIXME: Use a better hash function; we don't expose scm_raw_hashq currently. */ - size_t slot = (((scm_t_uintptr) ip) >> 2) % SLOT_MAP_CACHE_SIZE; - const scm_t_uint8 *map; + size_t slot = (((uintptr_t) ip) >> 2) % SLOT_MAP_CACHE_SIZE; + const uint8_t *map; if (cache->entries[slot].ip == ip) map = cache->entries[slot].map; @@ -951,7 +695,7 @@ enum slot_desc { SLOT_DESC_DEAD = 0, SLOT_DESC_LIVE_RAW = 1, - SLOT_DESC_LIVE_SCM = 2, + SLOT_DESC_LIVE_GC = 2, SLOT_DESC_UNUSED = 3 }; @@ -966,7 +710,7 @@ scm_i_vm_mark_stack (struct scm_vm *vp, struct GC_ms_entry *mark_stack_ptr, activation, due to multiple threads or per-instruction hooks, and providing slot maps for all points in a program would take a prohibitive amount of space. */ - const scm_t_uint8 *slot_map = NULL; + const uint8_t *slot_map = NULL; void *upper = (void *) GC_greatest_plausible_heap_addr; void *lower = (void *) GC_least_plausible_heap_addr; struct slot_map_cache cache; @@ -977,11 +721,11 @@ scm_i_vm_mark_stack (struct scm_vm *vp, struct GC_ms_entry *mark_stack_ptr, fp < vp->stack_top; fp = SCM_FRAME_DYNAMIC_LINK (fp)) { - scm_t_ptrdiff nlocals = SCM_FRAME_NUM_LOCALS (fp, sp); + ptrdiff_t nlocals = SCM_FRAME_NUM_LOCALS (fp, sp); size_t slot = nlocals - 1; for (slot = nlocals - 1; sp < fp; sp++, slot--) { - enum slot_desc desc = SLOT_DESC_LIVE_SCM; + enum slot_desc desc = SLOT_DESC_LIVE_GC; if (slot_map) desc = (slot_map[slot / 4U] >> ((slot % 4U) * 2)) & 3U; @@ -991,7 +735,7 @@ scm_i_vm_mark_stack (struct scm_vm *vp, struct GC_ms_entry *mark_stack_ptr, case SLOT_DESC_LIVE_RAW: break; case SLOT_DESC_UNUSED: - case SLOT_DESC_LIVE_SCM: + case SLOT_DESC_LIVE_GC: if (SCM_NIMP (sp->as_scm) && sp->as_ptr >= lower && sp->as_ptr <= upper) mark_stack_ptr = GC_mark_and_push (sp->as_ptr, @@ -1011,7 +755,7 @@ scm_i_vm_mark_stack (struct scm_vm *vp, struct GC_ms_entry *mark_stack_ptr, Note that there may be other reasons to not have a dead slots map, e.g. if all of the frame's slots below the callee frame are live. */ - slot_map = find_slot_map (SCM_FRAME_RETURN_ADDRESS (fp), &cache); + slot_map = find_slot_map (SCM_FRAME_VIRTUAL_RETURN_ADDRESS (fp), &cache); } return_unused_stack_to_os (vp); @@ -1024,8 +768,9 @@ void scm_i_vm_free_stack (struct scm_vm *vp) { free_stack (vp->stack_bottom, vp->stack_size); - vp->stack_bottom = vp->stack_top = vp->stack_limit = NULL; - vp->stack_size = 0; + /* Not strictly necessary, but good to avoid confusion when debugging + thread-related GC issues. */ + memset (vp, 0, sizeof (*vp)); } struct vm_expand_stack_data @@ -1043,7 +788,7 @@ vm_expand_stack_inner (void *data_ptr) struct scm_vm *vp = data->vp; union scm_vm_stack_element *old_top, *new_bottom; size_t new_size; - scm_t_ptrdiff reloc; + ptrdiff_t reloc; old_top = vp->stack_top; new_size = vp->stack_size; @@ -1067,7 +812,7 @@ vm_expand_stack_inner (void *data_ptr) return new_bottom; } -static scm_t_ptrdiff +static ptrdiff_t current_overflow_size (struct scm_vm *vp) { if (scm_is_pair (vp->overflow_handler_stack)) @@ -1076,9 +821,9 @@ current_overflow_size (struct scm_vm *vp) } static int -should_handle_stack_overflow (struct scm_vm *vp, scm_t_ptrdiff stack_size) +should_handle_stack_overflow (struct scm_vm *vp, ptrdiff_t stack_size) { - scm_t_ptrdiff overflow_size = current_overflow_size (vp); + ptrdiff_t overflow_size = current_overflow_size (vp); return overflow_size >= 0 && stack_size >= overflow_size; } @@ -1120,7 +865,7 @@ unwind_overflow_handler (void *ptr) static void vm_expand_stack (struct scm_vm *vp, union scm_vm_stack_element *new_sp) { - scm_t_ptrdiff stack_size = vp->stack_top - new_sp; + ptrdiff_t stack_size = vp->stack_top - new_sp; if (stack_size > vp->stack_size) { @@ -1180,37 +925,616 @@ vm_expand_stack (struct scm_vm *vp, union scm_vm_stack_element *new_sp) } } -static struct scm_vm * -thread_vm (scm_i_thread *t) +static uint32_t +frame_locals_count (scm_thread *thread) +{ + return SCM_FRAME_NUM_LOCALS (thread->vm.fp, thread->vm.sp); +} + +static void +thread_expand_stack (scm_thread *thread, union scm_vm_stack_element *new_sp) +{ + vm_expand_stack (&thread->vm, new_sp); +} + +/* This duplicates the inlined "ALLOC_FRAME" macro from vm-engine.c, but + it seems to be necessary for perf; the inlined version avoids the + needs to flush IP in the common case. */ +static void +alloc_frame (scm_thread *thread, uint32_t nlocals) +{ + union scm_vm_stack_element *sp = thread->vm.fp - nlocals; + + if (sp < thread->vm.sp_min_since_gc) + { + if (SCM_UNLIKELY (sp < thread->vm.stack_limit)) + thread_expand_stack (thread, sp); + else + thread->vm.sp_min_since_gc = thread->vm.sp = sp; + } + else + thread->vm.sp = sp; +} + +static uint32_t +compute_kwargs_npositional (scm_thread *thread, uint32_t nreq, uint32_t nopt) +{ + uint32_t npositional, nargs; + + nargs = frame_locals_count (thread); + + /* look in optionals for first keyword or last positional */ + /* starting after the last required positional arg */ + npositional = nreq; + while (/* while we have args */ + npositional < nargs + /* and we still have positionals to fill */ + && npositional < nreq + nopt + /* and we haven't reached a keyword yet */ + && !scm_is_keyword (SCM_FRAME_LOCAL (thread->vm.fp, npositional))) + /* bind this optional arg (by leaving it in place) */ + npositional++; + + return npositional; +} + +static void +bind_kwargs (scm_thread *thread, uint32_t npositional, uint32_t nlocals, + SCM kwargs, uint8_t strict, uint8_t allow_other_keys) +{ + uint32_t nargs, nkw, n; + union scm_vm_stack_element *fp; + + nargs = frame_locals_count (thread); + nkw = nargs - npositional; + + /* shuffle non-positional arguments above nlocals */ + alloc_frame (thread, nlocals + nkw); + + fp = thread->vm.fp; + n = nkw; + while (n--) + SCM_FRAME_LOCAL (fp, nlocals + n) = SCM_FRAME_LOCAL (fp, npositional + n); + + /* Fill optionals & keyword args with SCM_UNDEFINED */ + n = npositional; + while (n < nlocals) + SCM_FRAME_LOCAL (fp, n++) = SCM_UNDEFINED; + + /* Now bind keywords, in the order given. */ + for (n = 0; n < nkw; n++) + { + SCM kw = SCM_FRAME_LOCAL (fp, nlocals + n); + + if (scm_is_keyword (kw)) + { + SCM walk; + for (walk = kwargs; scm_is_pair (walk); walk = SCM_CDR (walk)) + if (scm_is_eq (SCM_CAAR (walk), kw)) + { + SCM si = SCM_CDAR (walk); + if (n + 1 < nkw) + SCM_FRAME_LOCAL (fp, scm_to_uint32 (si)) = + SCM_FRAME_LOCAL (fp, nlocals + n + 1); + else + scm_error_scm (sym_keyword_argument_error, SCM_BOOL_F, + scm_from_latin1_string + ("Keyword argument has no value"), + SCM_EOL, scm_list_1 (kw)); + break; + } + if (!allow_other_keys && !scm_is_pair (walk)) + scm_error_scm (sym_keyword_argument_error, SCM_BOOL_F, + scm_from_latin1_string ("Unrecognized keyword"), + SCM_EOL, scm_list_1 (kw)); + n++; + } + else if (strict) + { + scm_error_scm (sym_keyword_argument_error, SCM_BOOL_F, + scm_from_latin1_string ("Invalid keyword"), + SCM_EOL, scm_list_1 (kw)); + } + else + { + /* Ignore this argument. It might get consed onto a rest list. */ + } + } +} + +static SCM +cons_rest (scm_thread *thread, uint32_t base) +{ + SCM rest = SCM_EOL; + uint32_t n = frame_locals_count (thread) - base; + + while (n--) + rest = scm_inline_cons (thread, SCM_FRAME_LOCAL (thread->vm.fp, base + n), + rest); + + return rest; +} + +static void +push_interrupt_frame (scm_thread *thread, uint8_t *mra) +{ + union scm_vm_stack_element *old_fp, *new_fp; + size_t frame_overhead = 3; + size_t old_frame_size = frame_locals_count (thread); + SCM proc = scm_i_async_pop (thread); + + /* Reserve space for frame and callee. */ + alloc_frame (thread, old_frame_size + frame_overhead + 1); + + old_fp = thread->vm.fp; + new_fp = SCM_FRAME_SLOT (old_fp, old_frame_size + frame_overhead - 1); + SCM_FRAME_SET_DYNAMIC_LINK (new_fp, old_fp); + /* Arrange to return to the same handle-interrupts opcode to handle + any additional interrupts. */ + SCM_FRAME_SET_VIRTUAL_RETURN_ADDRESS (new_fp, thread->vm.ip); + SCM_FRAME_SET_MACHINE_RETURN_ADDRESS (new_fp, mra); + SCM_FRAME_LOCAL (new_fp, 0) = proc; + + thread->vm.fp = new_fp; +} + +struct return_to_continuation_data +{ + struct scm_vm_cont *cp; + struct scm_vm *vp; +}; + +/* Called with the GC lock to prevent the stack marker from traversing a + stack in an inconsistent state. */ +static void * +vm_return_to_continuation_inner (void *data_ptr) +{ + struct return_to_continuation_data *data = data_ptr; + struct scm_vm *vp = data->vp; + struct scm_vm_cont *cp = data->cp; + + /* We know that there is enough space for the continuation, because we + captured it in the past. However there may have been an expansion + since the capture, so we may have to re-link the frame + pointers. */ + memcpy (vp->stack_top - cp->stack_size, + cp->stack_bottom, + cp->stack_size * sizeof (*cp->stack_bottom)); + vp->fp = vp->stack_top - cp->fp_offset; + vm_restore_sp (vp, vp->stack_top - cp->stack_size); + + return NULL; +} + +static void reinstate_continuation_x (scm_thread *thread, SCM cont) SCM_NORETURN; + +static void +reinstate_continuation_x (scm_thread *thread, SCM cont) { - if (SCM_UNLIKELY (!t->vp)) - t->vp = make_vm (); + scm_t_contregs *continuation = scm_i_contregs (cont); + struct scm_vm *vp = &thread->vm; + struct scm_vm_cont *cp; + size_t n, i, frame_overhead = 3; + union scm_vm_stack_element *argv; + struct return_to_continuation_data data; + + if (!scm_is_eq (continuation->root, thread->continuation_root)) + scm_misc_error + ("%continuation-call", + "invoking continuation would cross continuation barrier: ~A", + scm_list_1 (cont)); + + n = frame_locals_count (thread) - 1; + argv = alloca (n * sizeof (*argv)); + memcpy (argv, vp->sp, n * sizeof (*argv)); + + cp = SCM_VM_CONT_DATA (continuation->vm_cont); + + data.cp = cp; + data.vp = vp; + GC_call_with_alloc_lock (vm_return_to_continuation_inner, &data); + + /* Now we have the continuation properly copied over. We just need to + copy on an empty frame and the return values, as the continuation + expects. */ + vm_push_sp (vp, vp->sp - frame_overhead - n); + for (i = 0; i < frame_overhead; i++) + vp->sp[n+i].as_scm = SCM_BOOL_F; + memcpy(vp->sp, argv, n * sizeof (union scm_vm_stack_element)); - return t->vp; + vp->ip = cp->vra; + + scm_i_reinstate_continuation (cont, cp->mra); } -struct scm_vm * -scm_the_vm (void) +static SCM +capture_continuation (scm_thread *thread) { - return thread_vm (SCM_I_CURRENT_THREAD); + struct scm_vm *vp = &thread->vm; + void *mra = SCM_FRAME_MACHINE_RETURN_ADDRESS (vp->fp); + if (mra == scm_jit_return_to_interpreter_trampoline) + mra = NULL; + SCM vm_cont = capture_stack (vp->stack_top, + SCM_FRAME_DYNAMIC_LINK (vp->fp), + SCM_FRAME_PREVIOUS_SP (vp->fp), + SCM_FRAME_VIRTUAL_RETURN_ADDRESS (vp->fp), + mra, + scm_dynstack_capture_all (&thread->dynstack), + 0); + return scm_i_make_continuation (thread, vm_cont); +} + +struct compose_continuation_data +{ + struct scm_vm *vp; + struct scm_vm_cont *cp; +}; + +static void * +compose_continuation_inner (void *data_ptr) +{ + struct compose_continuation_data *data = data_ptr; + struct scm_vm *vp = data->vp; + struct scm_vm_cont *cp = data->cp; + + memcpy (vp->fp - cp->stack_size, + cp->stack_bottom, + cp->stack_size * sizeof (*cp->stack_bottom)); + + vp->fp -= cp->fp_offset; + vp->ip = cp->vra; + + return cp->mra; +} + +static uint8_t* +compose_continuation (scm_thread *thread, SCM cont) +{ + struct scm_vm *vp = &thread->vm; + size_t nargs; + struct compose_continuation_data data; + struct scm_vm_cont *cp; + union scm_vm_stack_element *args; + ptrdiff_t old_fp_offset; + uint8_t *mra; + + if (SCM_UNLIKELY (! SCM_VM_CONT_REWINDABLE_P (cont))) + scm_wrong_type_arg_msg (NULL, 0, cont, "resumable continuation"); + + nargs = frame_locals_count (thread) - 1; + args = alloca (nargs * sizeof (*args)); + memcpy (args, vp->sp, nargs * sizeof (*args)); + + cp = SCM_VM_CONT_DATA (cont); + + old_fp_offset = vp->stack_top - vp->fp; + + vm_push_sp (vp, vp->fp - (cp->stack_size + nargs)); + + data.vp = vp; + data.cp = cp; + mra = GC_call_with_alloc_lock (compose_continuation_inner, &data); + + /* The resumed continuation will expect ARGS on the stack as if from a + multiple-value return. */ + memcpy (vp->sp, args, nargs * sizeof (*args)); + + /* The prompt captured a slice of the dynamic stack. Here we wind + those entries onto the current thread's stack. We also have to + relocate any prompts that we see along the way. */ + { + scm_t_bits *walk; + + for (walk = SCM_DYNSTACK_FIRST (cp->dynstack); + SCM_DYNSTACK_TAG (walk); + walk = SCM_DYNSTACK_NEXT (walk)) + { + scm_t_bits tag = SCM_DYNSTACK_TAG (walk); + + if (SCM_DYNSTACK_TAG_TYPE (tag) == SCM_DYNSTACK_TYPE_PROMPT) + scm_dynstack_wind_prompt (&thread->dynstack, walk, old_fp_offset, + thread->vm.registers); + else + scm_dynstack_wind_1 (&thread->dynstack, walk); + } + } + + return mra; +} + +static void +expand_apply_argument (scm_thread *thread) +{ + SCM x = thread->vm.sp[0].as_scm; + int len = scm_ilength (x); + + if (SCM_UNLIKELY (len < 0)) + scm_error (scm_arg_type_key, "apply", "Apply to non-list: ~S", + scm_list_1 (x), scm_list_1 (x)); + + alloc_frame (thread, frame_locals_count (thread) - 1 + len); + + while (len--) + { + thread->vm.sp[len].as_scm = SCM_CAR (x); + x = SCM_CDR (x); + } +} + +/* This is here to avoid putting the code for "alloc-frame" in subr + calls. */ +static void +unpack_values_object (scm_thread *thread, SCM obj) +{ + size_t n, nvals = scm_i_nvalues (obj); + alloc_frame (thread, nvals); + for (n = 0; n < nvals; n++) + SCM_FRAME_LOCAL (thread->vm.fp, n) = scm_i_value_ref (obj, n); +} + +static void +foreign_call (scm_thread *thread, SCM cif, SCM pointer) +{ + SCM ret; + int err = 0; + + ret = scm_i_foreign_call (cif, pointer, &err, thread->vm.sp); + + alloc_frame (thread, 2); + SCM_FRAME_LOCAL (thread->vm.fp, 0) = ret; + SCM_FRAME_LOCAL (thread->vm.fp, 1) = scm_from_int (err); +} + +static SCM +capture_delimited_continuation (struct scm_vm *vp, + union scm_vm_stack_element *saved_fp, + uint8_t *saved_mra, + jmp_buf *saved_registers, + scm_t_dynstack *dynstack, + jmp_buf *current_registers) +{ + SCM vm_cont; + uint32_t flags; + union scm_vm_stack_element *base_fp; + + flags = SCM_F_VM_CONT_PARTIAL; + /* If we are aborting to a prompt that has the same registers as those + of the abort, it means there are no intervening C frames on the + stack, and so the continuation can be relocated elsewhere on the + stack: it is rewindable. */ + if (saved_registers && saved_registers == current_registers) + flags |= SCM_F_VM_CONT_REWINDABLE; + + /* Walk the stack until we find the first frame newer than saved_fp. + We will save the stack until that frame. It used to be that we + could determine the stack base in O(1) time, but that's no longer + the case, since the thunk application doesn't occur where the + prompt is saved. */ + for (base_fp = vp->fp; + SCM_FRAME_DYNAMIC_LINK (base_fp) < saved_fp; + base_fp = SCM_FRAME_DYNAMIC_LINK (base_fp)); + + if (SCM_FRAME_DYNAMIC_LINK (base_fp) != saved_fp) + abort(); + + scm_dynstack_relocate_prompts (dynstack, vp->stack_top - base_fp); + + /* Capture from the base_fp to the top thunk application frame. Don't + capture values from the most recent frame, as they are the abort + args. */ + vm_cont = capture_stack (base_fp, vp->fp, vp->fp, vp->ip, + saved_mra, dynstack, flags); + + return scm_i_make_composable_continuation (vm_cont); +} + +void +scm_i_vm_abort (SCM *tag_and_argv, size_t n) +{ + scm_call_n (vm_builtin_abort_to_prompt, tag_and_argv, n); + /* Unreachable. */ + abort (); +} + +/* The same as scm_i_vm_abort(), but possibly called in response to + resource allocation failures, so we might not be able to make a + call, as that might require stack expansion. Grrr. */ +void +scm_i_vm_emergency_abort (SCM *tag_and_argv, size_t n) +{ + scm_thread *thread = SCM_I_CURRENT_THREAD; + struct scm_vm *vp = &thread->vm; + scm_t_dynstack *dynstack = &thread->dynstack; + SCM tag, cont; + size_t nargs; + scm_t_bits *prompt; + scm_t_dynstack_prompt_flags flags; + ptrdiff_t fp_offset, sp_offset; + union scm_vm_stack_element *fp, *sp; + SCM *argv; + uint32_t *vra; + uint8_t *mra; + jmp_buf *registers; + + tag = tag_and_argv[0]; + argv = tag_and_argv + 1; + nargs = n - 1; + + prompt = scm_dynstack_find_prompt (dynstack, tag, + &flags, &fp_offset, &sp_offset, + &vra, &mra, ®isters); + + if (!prompt) + { + fprintf (stderr, "guile: fatal: emergency abort to unknown prompt\n"); + abort (); + } + + fp = vp->stack_top - fp_offset; + sp = vp->stack_top - sp_offset; + + if (!(flags & SCM_F_DYNSTACK_PROMPT_ESCAPE_ONLY)) + { + fprintf (stderr, "guile: fatal: emergency abort to non-linear prompt\n"); + abort (); + } + + cont = SCM_BOOL_F; + + /* Unwind. */ + scm_dynstack_unwind (dynstack, prompt); + + /* Continuation gets nargs+1 values: the one more is for the cont. */ + sp = sp - nargs - 1; + + /* Shuffle abort arguments down to the prompt continuation. We have + to be jumping to an older part of the stack. */ + if (sp < vp->sp) + abort (); + sp[nargs].as_scm = cont; + + while (nargs--) + sp[nargs].as_scm = *argv++; + + /* Restore VM regs */ + vp->fp = fp; + vp->sp = sp; + vp->ip = vra; + + /* Jump! */ + vp->mra_after_abort = mra; + longjmp (*registers, 1); +} + +static uint8_t * +abort_to_prompt (scm_thread *thread, uint8_t *saved_mra) +{ + struct scm_vm *vp = &thread->vm; + scm_t_dynstack *dynstack = &thread->dynstack; + SCM tag, cont; + size_t nargs; + scm_t_bits *prompt; + scm_t_dynstack_prompt_flags flags; + ptrdiff_t fp_offset, sp_offset; + union scm_vm_stack_element *fp, *sp; + uint32_t *vra; + uint8_t *mra; + jmp_buf *registers; + + tag = SCM_FRAME_LOCAL (vp->fp, 1); + nargs = frame_locals_count (thread) - 2; + + prompt = scm_dynstack_find_prompt (dynstack, tag, + &flags, &fp_offset, &sp_offset, + &vra, &mra, ®isters); + + if (!prompt) + scm_misc_error ("abort", "Abort to unknown prompt", scm_list_1 (tag)); + + fp = vp->stack_top - fp_offset; + sp = vp->stack_top - sp_offset; + + /* Only reify if the continuation referenced in the handler. */ + if (flags & SCM_F_DYNSTACK_PROMPT_ESCAPE_ONLY) + cont = SCM_BOOL_F; + else + { + scm_t_dynstack *captured; + + captured = scm_dynstack_capture (dynstack, SCM_DYNSTACK_NEXT (prompt)); + cont = capture_delimited_continuation (vp, fp, saved_mra, registers, + captured, thread->vm.registers); + } + + /* Unwind. */ + scm_dynstack_unwind (dynstack, prompt); + + /* Continuation gets nargs+1 values: the one more is for the cont. */ + sp = sp - nargs - 1; + + /* Shuffle abort arguments down to the prompt continuation. We have + to be jumping to an older part of the stack. */ + if (sp < vp->sp) + abort (); + sp[nargs].as_scm = cont; + while (nargs--) + sp[nargs] = vp->sp[nargs]; + + /* Restore VM regs */ + vp->fp = fp; + vp->sp = sp; + vp->ip = vra; + + /* If there are intervening C frames, then jump over them, making a + nonlocal exit. Otherwise fall through and let the VM pick up where + it left off. */ + if (thread->vm.registers != registers) + { + vp->mra_after_abort = mra; + longjmp (*registers, 1); + } + + return mra; +} + +static uint32_t * +get_callee_vcode (scm_thread *thread) +{ + struct scm_vm *vp = &thread->vm; + + SCM proc = SCM_FRAME_LOCAL (vp->fp, 0); + + if (SCM_LIKELY (SCM_PROGRAM_P (proc))) + return SCM_PROGRAM_CODE (proc); + + while (SCM_STRUCTP (proc) && SCM_STRUCT_APPLICABLE_P (proc)) + { + proc = SCM_STRUCT_PROCEDURE (proc); + SCM_FRAME_LOCAL (vp->fp, 0) = proc; + + if (SCM_PROGRAM_P (proc)) + return SCM_PROGRAM_CODE (proc); + } + + if (SCM_HAS_TYP7 (proc, scm_tc7_smob) && SCM_SMOB_APPLICABLE_P (proc)) + { + uint32_t n = frame_locals_count (thread); + + alloc_frame (thread, n + 1); + + /* Although we could make VM modifications to avoid this shuffle, + it's easier to piggy-back on the subr arg parsing machinery. + Hopefully applicable smobs will go away in the mid-term. */ + while (n--) + SCM_FRAME_LOCAL (vp->fp, n + 1) = SCM_FRAME_LOCAL (vp->fp, n); + + proc = SCM_SMOB_DESCRIPTOR (proc).apply_trampoline; + SCM_FRAME_LOCAL (vp->fp, 0) = proc; + return SCM_PROGRAM_CODE (proc); + } + + vp->ip = SCM_FRAME_VIRTUAL_RETURN_ADDRESS (vp->fp); + + scm_error (scm_arg_type_key, NULL, "Wrong type to apply: ~S", + scm_list_1 (proc), scm_list_1 (proc)); } SCM scm_call_n (SCM proc, SCM *argv, size_t nargs) { - scm_i_thread *thread; + scm_thread *thread; struct scm_vm *vp; union scm_vm_stack_element *return_fp, *call_fp; /* Since nargs can only describe the length of a valid argv array in elements and each element is at least 4 bytes, nargs will not be greater than INTMAX/2 and therefore we don't have to check for overflow here or below. */ - size_t return_nlocals = 1, call_nlocals = nargs + 1, frame_size = 2; - scm_t_ptrdiff stack_reserve_words; + size_t return_nlocals = 0, call_nlocals = nargs + 1, frame_size = 3; + ptrdiff_t stack_reserve_words; size_t i; thread = SCM_I_CURRENT_THREAD; - vp = thread_vm (thread); + vp = &thread->vm; SCM_CHECK_STACK; @@ -1229,36 +1553,48 @@ scm_call_n (SCM proc, SCM *argv, size_t nargs) call_fp = vp->sp + call_nlocals; return_fp = call_fp + frame_size + return_nlocals; - SCM_FRAME_SET_RETURN_ADDRESS (return_fp, vp->ip); + SCM_FRAME_SET_VIRTUAL_RETURN_ADDRESS (return_fp, vp->ip); + SCM_FRAME_SET_MACHINE_RETURN_ADDRESS (return_fp, 0); SCM_FRAME_SET_DYNAMIC_LINK (return_fp, vp->fp); - SCM_FRAME_LOCAL (return_fp, 0) = vm_boot_continuation; - vp->ip = (scm_t_uint32 *) vm_boot_continuation_code; - vp->fp = call_fp; + vp->ip = (uint32_t *) vm_boot_continuation_code; - SCM_FRAME_SET_RETURN_ADDRESS (call_fp, vp->ip); + SCM_FRAME_SET_VIRTUAL_RETURN_ADDRESS (call_fp, vp->ip); + SCM_FRAME_SET_MACHINE_RETURN_ADDRESS (call_fp, 0); SCM_FRAME_SET_DYNAMIC_LINK (call_fp, return_fp); SCM_FRAME_LOCAL (call_fp, 0) = proc; for (i = 0; i < nargs; i++) SCM_FRAME_LOCAL (call_fp, i + 1) = argv[i]; + vp->fp = call_fp; + { - scm_i_jmp_buf registers; + jmp_buf registers; int resume; - const void *prev_cookie = vp->resumable_prompt_cookie; + jmp_buf *prev_registers = thread->vm.registers; SCM ret; - resume = SCM_I_SETJMP (registers); + resume = setjmp (registers); + + thread->vm.registers = ®isters; + if (SCM_UNLIKELY (resume)) { + uint8_t *mcode = vp->mra_after_abort; scm_gc_after_nonlocal_exit (); /* Non-local return. */ - vm_dispatch_abort_hook (vp); + if (vp->abort_hook_enabled) + invoke_abort_hook (thread); +#if ENABLE_JIT + if (mcode && !vp->disable_mcode) + scm_jit_enter_mcode (thread, mcode); +#endif } + else + vp->ip = get_callee_vcode (thread); - vp->resumable_prompt_cookie = ®isters; - ret = vm_engines[vp->engine](thread, vp, ®isters, resume); - vp->resumable_prompt_cookie = prev_cookie; + ret = vm_engines[vp->engine](thread); + thread->vm.registers = prev_registers; return ret; } @@ -1266,57 +1602,98 @@ scm_call_n (SCM proc, SCM *argv, size_t nargs) /* Scheme interface */ -#define VM_DEFINE_HOOK(n) \ -{ \ - struct scm_vm *vp; \ - vp = scm_the_vm (); \ - if (scm_is_false (vp->hooks[n])) \ - vp->hooks[n] = scm_make_hook (SCM_I_MAKINUM (1)); \ - return vp->hooks[n]; \ +#define VM_ADD_HOOK(h, f) \ + { \ + scm_thread *t = SCM_I_CURRENT_THREAD; \ + SCM hook = t->vm.h##_hook; \ + if (scm_is_false (hook)) \ + hook = t->vm.h##_hook = scm_make_hook (SCM_I_MAKINUM (1)); \ + scm_add_hook_x (hook, f, SCM_UNDEFINED); \ + vm_hook_compute_enabled (t, hook, &t->vm.h##_hook_enabled); \ + vm_recompute_disable_mcode (t); \ + return SCM_UNSPECIFIED; \ + } + +#define VM_REMOVE_HOOK(h, f) \ + { \ + scm_thread *t = SCM_I_CURRENT_THREAD; \ + SCM hook = t->vm.h##_hook; \ + if (scm_is_true (hook)) \ + scm_remove_hook_x (hook, f); \ + vm_hook_compute_enabled (t, hook, &t->vm.h##_hook_enabled); \ + vm_recompute_disable_mcode (t); \ + return SCM_UNSPECIFIED; \ + } + +SCM_DEFINE (scm_vm_add_apply_hook_x, "vm-add-apply-hook!", 1, 0, 0, + (SCM f), + "") +#define FUNC_NAME s_scm_vm_add_apply_hook_x +{ + VM_ADD_HOOK (apply, f); } +#undef FUNC_NAME -SCM_DEFINE (scm_vm_apply_hook, "vm-apply-hook", 0, 0, 0, - (void), +SCM_DEFINE (scm_vm_remove_apply_hook_x, "vm-remove-apply-hook!", 1, 0, 0, + (SCM f), "") -#define FUNC_NAME s_scm_vm_apply_hook +#define FUNC_NAME s_scm_vm_remove_apply_hook_x { - VM_DEFINE_HOOK (SCM_VM_APPLY_HOOK); + VM_REMOVE_HOOK (apply, f); } #undef FUNC_NAME -SCM_DEFINE (scm_vm_push_continuation_hook, "vm-push-continuation-hook", 0, 0, 0, - (void), +SCM_DEFINE (scm_vm_add_return_hook_x, "vm-add-return-hook!", 1, 0, 0, + (SCM f), "") -#define FUNC_NAME s_scm_vm_push_continuation_hook +#define FUNC_NAME s_scm_vm_add_return_hook_x { - VM_DEFINE_HOOK (SCM_VM_PUSH_CONTINUATION_HOOK); + VM_ADD_HOOK (return, f); } #undef FUNC_NAME -SCM_DEFINE (scm_vm_pop_continuation_hook, "vm-pop-continuation-hook", 0, 0, 0, - (void), +SCM_DEFINE (scm_vm_remove_return_hook_x, "vm-remove-return-hook!", 1, 0, 0, + (SCM f), "") -#define FUNC_NAME s_scm_vm_pop_continuation_hook +#define FUNC_NAME s_scm_vm_remove_return_hook_x { - VM_DEFINE_HOOK (SCM_VM_POP_CONTINUATION_HOOK); + VM_REMOVE_HOOK (return, f); } #undef FUNC_NAME -SCM_DEFINE (scm_vm_next_hook, "vm-next-hook", 0, 0, 0, - (void), +SCM_DEFINE (scm_vm_add_next_hook_x, "vm-add-next-hook!", 1, 0, 0, + (SCM f), "") -#define FUNC_NAME s_scm_vm_next_hook +#define FUNC_NAME s_scm_vm_add_next_hook_x { - VM_DEFINE_HOOK (SCM_VM_NEXT_HOOK); + VM_ADD_HOOK (next, f); } #undef FUNC_NAME -SCM_DEFINE (scm_vm_abort_continuation_hook, "vm-abort-continuation-hook", 0, 0, 0, - (void), +SCM_DEFINE (scm_vm_remove_next_hook_x, "vm-remove-next-hook!", 1, 0, 0, + (SCM f), + "") +#define FUNC_NAME s_scm_vm_remove_next_hook_x +{ + VM_REMOVE_HOOK (next, f); +} +#undef FUNC_NAME + +SCM_DEFINE (scm_vm_add_abort_hook_x, "vm-add-abort-hook!", 1, 0, 0, + (SCM f), "") -#define FUNC_NAME s_scm_vm_abort_continuation_hook +#define FUNC_NAME s_scm_vm_add_abort_hook_x { - VM_DEFINE_HOOK (SCM_VM_ABORT_CONTINUATION_HOOK); + VM_ADD_HOOK (abort, f); +} +#undef FUNC_NAME + +SCM_DEFINE (scm_vm_remove_abort_hook_x, "vm-remove-abort-hook!", 1, 0, 0, + (SCM f), + "") +#define FUNC_NAME s_scm_vm_remove_abort_hook_x +{ + VM_REMOVE_HOOK (abort, f); } #undef FUNC_NAME @@ -1325,7 +1702,7 @@ SCM_DEFINE (scm_vm_trace_level, "vm-trace-level", 0, 0, 0, "") #define FUNC_NAME s_scm_vm_trace_level { - return scm_from_int (scm_the_vm ()->trace_level); + return scm_from_int (SCM_I_CURRENT_THREAD->vm.trace_level); } #undef FUNC_NAME @@ -1334,8 +1711,8 @@ SCM_DEFINE (scm_set_vm_trace_level_x, "set-vm-trace-level!", 1, 0, 0, "") #define FUNC_NAME s_scm_set_vm_trace_level_x { - scm_the_vm ()->trace_level = scm_to_int (level); - return SCM_UNSPECIFIED; + scm_thread *thread = SCM_I_CURRENT_THREAD; + return scm_from_int (set_vm_trace_level (thread, scm_to_int (level))); } #undef FUNC_NAME @@ -1376,7 +1753,7 @@ SCM_DEFINE (scm_vm_engine, "vm-engine", 0, 0, 0, "") #define FUNC_NAME s_scm_vm_engine { - return vm_engine_to_symbol (scm_the_vm ()->engine, FUNC_NAME); + return vm_engine_to_symbol (SCM_I_CURRENT_THREAD->vm.engine, FUNC_NAME); } #undef FUNC_NAME @@ -1384,11 +1761,15 @@ void scm_c_set_vm_engine_x (int engine) #define FUNC_NAME "set-vm-engine!" { + scm_thread *thread = SCM_I_CURRENT_THREAD; + if (engine < 0 || engine >= SCM_VM_NUM_ENGINES) SCM_MISC_ERROR ("Unknown VM engine: ~a", scm_list_1 (scm_from_int (engine))); - scm_the_vm ()->engine = engine; + thread->vm.engine = engine; + /* Trigger update of the various hook_enabled flags. */ + set_vm_trace_level (thread, thread->vm.trace_level); } #undef FUNC_NAME @@ -1448,29 +1829,28 @@ SCM_DEFINE (scm_call_with_stack_overflow_handler, "@code{call-with-stack-overflow-handler} was called.") #define FUNC_NAME s_scm_call_with_stack_overflow_handler { - struct scm_vm *vp; - scm_t_ptrdiff c_limit, stack_size; + struct scm_thread *t = SCM_I_CURRENT_THREAD; + ptrdiff_t c_limit, stack_size; struct overflow_handler_data data; SCM new_limit, ret; - vp = scm_the_vm (); - stack_size = vp->stack_top - vp->sp; + stack_size = t->vm.stack_top - t->vm.sp; c_limit = scm_to_ptrdiff_t (limit); if (c_limit <= 0) scm_out_of_range (FUNC_NAME, limit); new_limit = scm_sum (scm_from_ptrdiff_t (stack_size), limit); - if (scm_is_pair (vp->overflow_handler_stack)) - new_limit = scm_min (new_limit, scm_caar (vp->overflow_handler_stack)); + if (scm_is_pair (t->vm.overflow_handler_stack)) + new_limit = scm_min (new_limit, scm_caar (t->vm.overflow_handler_stack)); /* Hacky check that the current stack depth plus the limit is within the range of a ptrdiff_t. */ scm_to_ptrdiff_t (new_limit); - data.vp = vp; + data.vp = &t->vm; data.overflow_handler_stack = - scm_acons (limit, handler, vp->overflow_handler_stack); + scm_acons (limit, handler, t->vm.overflow_handler_stack); scm_dynwind_begin (SCM_F_DYNWIND_REWINDABLE); @@ -1479,9 +1859,8 @@ SCM_DEFINE (scm_call_with_stack_overflow_handler, scm_dynwind_unwind_handler (unwind_overflow_handler, &data, SCM_F_WIND_EXPLICITLY); - /* Reset vp->sp_min_since_gc so that the VM checks actually - trigger. */ - return_unused_stack_to_os (vp); + /* Reset sp_min_since_gc so that the VM checks actually trigger. */ + return_unused_stack_to_os (&t->vm); ret = scm_call_0 (thunk); @@ -1516,11 +1895,7 @@ scm_init_vm_builtin_properties (void) #define INIT_BUILTIN(builtin, BUILTIN, req, opt, rest) \ scm_set_procedure_property_x (vm_builtin_##builtin, scm_sym_name, \ - scm_sym_##builtin); \ - scm_set_procedure_minimum_arity_x (vm_builtin_##builtin, \ - SCM_I_MAKINUM (req), \ - SCM_I_MAKINUM (opt), \ - scm_from_bool (rest)); + scm_sym_##builtin); FOR_EACH_VM_BUILTIN (INIT_BUILTIN); #undef INIT_BUILTIN } @@ -1536,13 +1911,20 @@ scm_bootstrap_vm (void) (scm_t_extension_init_func)scm_init_vm_builtins, NULL); - page_size = getpagesize (); - /* page_size should be a power of two. */ - if (page_size & (page_size - 1)) - abort (); + scm_vm_intrinsics.expand_stack = thread_expand_stack; + scm_vm_intrinsics.cons_rest = cons_rest; + scm_vm_intrinsics.compute_kwargs_npositional = compute_kwargs_npositional; + scm_vm_intrinsics.bind_kwargs = bind_kwargs; + scm_vm_intrinsics.push_interrupt_frame = push_interrupt_frame; + scm_vm_intrinsics.reinstate_continuation_x = reinstate_continuation_x; + scm_vm_intrinsics.capture_continuation = capture_continuation; + scm_vm_intrinsics.compose_continuation = compose_continuation; + scm_vm_intrinsics.expand_apply_argument = expand_apply_argument; + scm_vm_intrinsics.abort_to_prompt = abort_to_prompt; + scm_vm_intrinsics.get_callee_vcode = get_callee_vcode; + scm_vm_intrinsics.unpack_values_object = unpack_values_object; + scm_vm_intrinsics.foreign_call = foreign_call; - sym_vm_run = scm_from_latin1_symbol ("vm-run"); - sym_vm_error = scm_from_latin1_symbol ("vm-error"); sym_keyword_argument_error = scm_from_latin1_symbol ("keyword-argument-error"); sym_regular = scm_from_latin1_symbol ("regular"); sym_debug = scm_from_latin1_symbol ("debug"); @@ -1552,22 +1934,13 @@ scm_bootstrap_vm (void) (SCM_CELL_WORD_0 (vm_boot_continuation) | SCM_F_PROGRAM_IS_BOOT)); -#define DEFINE_BUILTIN(builtin, BUILTIN, req, opt, rest) \ - vm_builtin_##builtin = scm_i_make_program (vm_builtin_##builtin##_code); - FOR_EACH_VM_BUILTIN (DEFINE_BUILTIN); -#undef DEFINE_BUILTIN + define_vm_builtins (); } void scm_init_vm (void) { #ifndef SCM_MAGIC_SNARFER -#include "libguile/vm.x" +#include "vm.x" #endif } - -/* - Local Variables: - c-file-style: "gnu" - End: -*/ |