summaryrefslogtreecommitdiff
path: root/libguile
diff options
context:
space:
mode:
Diffstat (limited to 'libguile')
-rw-r--r--libguile/Makefile.am15
-rw-r--r--libguile/atomics-internal.h17
-rw-r--r--libguile/bdw-gc.h65
-rw-r--r--libguile/deprecated.h8
-rw-r--r--libguile/evalext.c1
-rw-r--r--libguile/fluids.c1
-rw-r--r--libguile/gc.c76
-rw-r--r--libguile/hash.c1
-rw-r--r--libguile/hashtab.c1
-rw-r--r--libguile/init.c2
-rw-r--r--libguile/ioext.c1
-rw-r--r--libguile/loader.c42
-rw-r--r--libguile/numbers.c3
-rw-r--r--libguile/ports.c1
-rw-r--r--libguile/print.c4
-rw-r--r--libguile/scm.h2
-rw-r--r--libguile/scmsigs.c9
-rw-r--r--libguile/smob.c202
-rw-r--r--libguile/smob.h58
-rw-r--r--libguile/srfi-4.c3
-rw-r--r--libguile/struct.c1
-rw-r--r--libguile/threads.c61
-rw-r--r--libguile/trace.h10
-rw-r--r--libguile/vectors.c1
-rw-r--r--libguile/vm.c128
-rw-r--r--libguile/weak-set.c873
-rw-r--r--libguile/weak-set.h62
-rw-r--r--libguile/whippet-embedder.h25
-rw-r--r--libguile/whippet/api/gc-api.h24
-rw-r--r--libguile/whippet/api/gc-embedder-api.h19
-rw-r--r--libguile/whippet/benchmarks/ephemerons.c4
-rw-r--r--libguile/whippet/benchmarks/finalizers.c4
-rw-r--r--libguile/whippet/benchmarks/mt-gcbench.c4
-rw-r--r--libguile/whippet/benchmarks/simple-gc-embedder.h20
-rw-r--r--libguile/whippet/src/bdw.c69
-rw-r--r--libguile/whippet/src/mmc.c58
-rw-r--r--libguile/whippet/src/pcc.c42
-rw-r--r--libguile/whippet/src/root.h16
-rw-r--r--libguile/whippet/src/semi.c21
39 files changed, 459 insertions, 1495 deletions
diff --git a/libguile/Makefile.am b/libguile/Makefile.am
index 68bda9160..21ea143a5 100644
--- a/libguile/Makefile.am
+++ b/libguile/Makefile.am
@@ -236,8 +236,7 @@ libguile_@GUILE_EFFECTIVE_VERSION@_la_SOURCES = \
vectors.c \
version.c \
vm.c \
- vports.c \
- weak-set.c
+ vports.c
if ENABLE_JIT
libguile_@GUILE_EFFECTIVE_VERSION@_la_SOURCES += $(lightening_c_files)
@@ -345,8 +344,7 @@ DOT_X_FILES = \
variable.x \
vectors.x \
version.x \
- vm.x \
- weak-set.x
+ vm.x
EXTRA_DOT_X_FILES = @EXTRA_DOT_X_FILES@
@@ -442,8 +440,7 @@ DOT_DOC_FILES = \
variable.doc \
vectors.doc \
version.doc \
- vports.doc \
- weak-set.doc
+ vports.doc
EXTRA_DOT_DOC_FILES = @EXTRA_DOT_DOC_FILES@
@@ -549,7 +546,7 @@ libguile_@GUILE_EFFECTIVE_VERSION@_la_LIBADD = \
version_info = @LIBGUILE_INTERFACE_CURRENT@:@LIBGUILE_INTERFACE_REVISION@:@LIBGUILE_INTERFACE_AGE@
libguile_@GUILE_EFFECTIVE_VERSION@_la_LDFLAGS = \
- $(BDW_GC_LIBS) $(LIBFFI_LIBS) \
+ $(LIBFFI_LIBS) \
$(CEIL_LIBM) \
$(FLOOR_LIBM) \
$(FREXP_LIBM) \
@@ -597,7 +594,6 @@ modinclude_HEADERS = \
async.h \
atomic.h \
backtrace.h \
- bdw-gc.h \
boolean.h \
bitvectors.h \
bytevectors.h \
@@ -702,8 +698,7 @@ modinclude_HEADERS = \
vm-builtins.h \
vm-expand.h \
vm.h \
- vports.h \
- weak-set.h
+ vports.h
nodist_modinclude_HEADERS = version.h scmconfig.h
diff --git a/libguile/atomics-internal.h b/libguile/atomics-internal.h
index dd1e71f10..f733aa55f 100644
--- a/libguile/atomics-internal.h
+++ b/libguile/atomics-internal.h
@@ -45,6 +45,12 @@ scm_atomic_compare_and_swap_uint32 (uint32_t *loc, uint32_t *expected,
atomic_uint_least32_t *a_loc = (atomic_uint_least32_t *) loc;
return atomic_compare_exchange_weak (a_loc, expected, desired);
}
+static inline size_t
+scm_atomic_subtract_size (size_t *loc, size_t arg)
+{
+ atomic_size_t *a_loc = (atomic_size_t *) loc;
+ return atomic_fetch_sub (a_loc, arg);
+}
static inline void
scm_atomic_set_pointer (void **loc, void *val)
{
@@ -131,6 +137,17 @@ scm_atomic_compare_and_swap_uint32 (uint32_t *loc, uint32_t *expected,
return ret;
}
+static inline size_t
+scm_atomic_subtract_size (size_t *loc, size_t arg)
+{
+ size_t ret;
+ scm_i_pthread_mutex_lock (&atomics_lock);
+ ret = *loc;
+ *loc -= arg;
+ scm_i_pthread_mutex_unlock (&atomics_lock);
+ return ret;
+}
+
static inline void
scm_atomic_set_pointer (void **loc, void *val)
{
diff --git a/libguile/bdw-gc.h b/libguile/bdw-gc.h
deleted file mode 100644
index 3107ebcef..000000000
--- a/libguile/bdw-gc.h
+++ /dev/null
@@ -1,65 +0,0 @@
-#ifndef SCM_BDW_GC_H
-#define SCM_BDW_GC_H
-
-/* Copyright 2006,2008-2009,2011-2014,2018
- Free Software Foundation, Inc.
-
- This file is part of Guile.
-
- Guile is free software: you can redistribute it and/or modify it
- under the terms of the GNU Lesser General Public License as published
- by the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- Guile is distributed in the hope that it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
- License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with Guile. If not, see
- <https://www.gnu.org/licenses/>. */
-
-/* Correct header inclusion. */
-
-#include "libguile/scmconfig.h"
-
-#if SCM_USE_PTHREAD_THREADS
-
-/* When pthreads are used, let `libgc' know about it and redirect allocation
- calls such as `GC_MALLOC ()' to (contention-free, faster) thread-local
- allocation. */
-
-# define GC_THREADS 1
-# define GC_REDIRECT_TO_LOCAL 1
-
-/* Don't #define pthread routines to their GC_pthread counterparts.
- Instead we will be careful inside Guile to use the GC_pthread
- routines. */
-# define GC_NO_THREAD_REDIRECTS 1
-
-#ifdef __MINGW32__
-/* Rely on pthreads-w32. */
-#define GC_WIN32_PTHREADS
-#endif
-
-#endif
-
-#include <gc/gc.h>
-
-/* Return true if PTR points to the heap. */
-#define SCM_I_IS_POINTER_TO_THE_HEAP(ptr) \
- (GC_base (ptr) != NULL)
-
-/* Register a disappearing link for the object pointed to by OBJ such that
- the pointer pointed to be LINK is cleared when OBJ is reclaimed. Do so
- only if OBJ actually points to the heap. See
- http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2563
- for details. */
-#define SCM_I_REGISTER_DISAPPEARING_LINK(link, obj) \
- ((SCM_I_IS_POINTER_TO_THE_HEAP (obj)) \
- ? GC_GENERAL_REGISTER_DISAPPEARING_LINK ((link), (obj)) \
- : 0)
-
-
-#endif /* SCM_BDW_GC_H */
diff --git a/libguile/deprecated.h b/libguile/deprecated.h
index ab99d6581..f0189a676 100644
--- a/libguile/deprecated.h
+++ b/libguile/deprecated.h
@@ -24,6 +24,14 @@
#if (SCM_ENABLE_DEPRECATED == 1)
+#define SCM_SMOB_MARK SCM_SMOB_MARK__Gone__Contact_guile_devel_for_alternatives
+#define SCM_GLOBAL_SMOB_MARK SCM_GLOBAL_SMOB_MARK__Gone__Contact_guile_devel_for_alternatives
+#define scm_mark0 scm_mark0__Gone__Contact_guile_devel_for_alternatives
+#define scm_markcdr scm_markcdr__Gone__Contact_guile_devel_for_alternatives
+#define scm_free0 scm_free0__Gone__Contact_guile_devel_for_alternatives
+#define scm_set_smob_mark scm_set_smob_mark__Gone__Contact_guile_devel_for_alternatives
+#define scm_gc_mark scm_gc_mark__Gone__Contact_guile_devel_for_alternatives
+
SCM_DEPRECATED SCM scm_make_guardian (void);
#define SCM_I_WVECTP(x) (scm_is_weak_vector (x))
diff --git a/libguile/evalext.c b/libguile/evalext.c
index 07c9a0239..a816623f8 100644
--- a/libguile/evalext.c
+++ b/libguile/evalext.c
@@ -79,7 +79,6 @@ SCM_DEFINE (scm_self_evaluating_p, "self-evaluating?", 1, 0, 0,
case scm_tc7_vector:
case scm_tc7_pointer:
case scm_tc7_hashtable:
- case scm_tc7_weak_set:
case scm_tc7_fluid:
case scm_tc7_dynamic_state:
case scm_tc7_frame:
diff --git a/libguile/fluids.c b/libguile/fluids.c
index 6de7b83f7..b95930782 100644
--- a/libguile/fluids.c
+++ b/libguile/fluids.c
@@ -26,7 +26,6 @@
#include "alist.h"
#include "atomics-internal.h"
-#include "bdw-gc.h"
#include "cache-internal.h"
#include "deprecation.h"
#include "dynwind.h"
diff --git a/libguile/gc.c b/libguile/gc.c
index 42c93cfc3..1270b368f 100644
--- a/libguile/gc.c
+++ b/libguile/gc.c
@@ -32,7 +32,7 @@
#include "arrays.h"
#include "async.h"
-#include "bdw-gc.h"
+#include "atomics-internal.h"
#include "deprecation.h"
#include "dynwind.h"
#include "eval.h"
@@ -53,6 +53,7 @@
#include "strings.h"
#include "struct.h"
#include "symbols.h"
+#include "trace.h"
#include "vectors.h"
#include "gc.h"
@@ -60,10 +61,6 @@
#include "gc-basic-stats.h"
-/* For GC_set_start_callback. */
-#include <gc/gc_mark.h>
-
-
struct scm_gc_event_listener {
@@ -113,6 +110,10 @@ SCM scm_after_gc_hook;
static SCM after_gc_async_cell;
+/* This counter is decremented at each off-heap allocation. When it
+ crosses zero, trigger a manual collection. */
+static size_t off_heap_allocation_countdown = DEFAULT_INITIAL_HEAP_SIZE;
+
@@ -200,6 +201,9 @@ scm_gc_event_listener_restarting_mutators (void *data)
SCM_SETCDR (after_gc_async_cell, t->pending_asyncs);
t->pending_asyncs = after_gc_async_cell;
}
+
+ /* Reset the off-heap allocation counter. */
+ off_heap_allocation_countdown = scm_listener->stats.heap_size;
}
static inline void*
@@ -300,15 +304,6 @@ scm_oom_fn (struct gc_heap *heap, size_t nbytes)
return NULL;
}
-/* Called within GC -- cannot allocate GC memory. */
-static void
-scm_gc_warn_proc (char *fmt, GC_word arg)
-{
- /* avoid scm_current_warning_port() b/c the GC lock is already taken
- and the fluid ref might require it */
- fprintf (stderr, fmt, arg);
-}
-
void
scm_gc_after_nonlocal_exit (struct scm_thread *thread)
{
@@ -409,29 +404,6 @@ SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
#undef FUNC_NAME
-SCM_DEFINE (scm_gc_disable, "gc-disable", 0, 0, 0,
- (),
- "Disables the garbage collector. Nested calls are permitted. "
- "GC is re-enabled once @code{gc-enable} has been called the "
- "same number of times @code{gc-disable} was called.")
-#define FUNC_NAME s_scm_gc_disable
-{
- GC_disable ();
- return SCM_UNSPECIFIED;
-}
-#undef FUNC_NAME
-
-SCM_DEFINE (scm_gc_enable, "gc-enable", 0, 0, 0,
- (),
- "Enables the garbage collector.")
-#define FUNC_NAME s_scm_gc_enable
-{
- GC_enable ();
- return SCM_UNSPECIFIED;
-}
-#undef FUNC_NAME
-
-
SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
(),
"Scans all of SCM objects and reclaims for further use those that are\n"
@@ -449,7 +421,7 @@ SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
void
scm_i_gc (const char *what)
{
- GC_gcollect ();
+ gc_collect (SCM_I_CURRENT_THREAD->mutator, GC_COLLECTION_COMPACTING);
}
@@ -634,6 +606,7 @@ scm_gc_unregister_roots (SCM *b, unsigned long n)
+static struct gc_heap_roots heap_roots;
struct gc_mutator *
scm_storage_prehistory (struct gc_stack_addr base)
@@ -661,9 +634,9 @@ scm_storage_prehistory (struct gc_stack_addr base)
abort ();
}
- /* Sanity check. */
- if (!GC_is_visible (&scm_protects))
- abort ();
+ // We need to set roots so that scm_trace_loader_conservative_roots
+ // gets called.
+ gc_heap_set_roots (the_gc_heap, &heap_roots);
scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
@@ -683,24 +656,12 @@ scm_init_gc_protect_object ()
-static size_t bytes_until_gc = DEFAULT_INITIAL_HEAP_SIZE;
-static scm_i_pthread_mutex_t bytes_until_gc_lock = SCM_I_PTHREAD_MUTEX_INITIALIZER;
-
void
scm_gc_register_allocation (size_t size)
{
- scm_i_pthread_mutex_lock (&bytes_until_gc_lock);
- if (size > bytes_until_gc)
- {
- bytes_until_gc = GC_get_heap_size ();
- scm_i_pthread_mutex_unlock (&bytes_until_gc_lock);
- GC_gcollect ();
- }
- else
- {
- bytes_until_gc -= size;
- scm_i_pthread_mutex_unlock (&bytes_until_gc_lock);
- }
+ size_t prev = scm_atomic_subtract_size(&off_heap_allocation_countdown, size);
+ if (prev < size)
+ gc_collect (SCM_I_CURRENT_THREAD->mutator, GC_COLLECTION_ANY);
}
@@ -718,8 +679,6 @@ after_gc_async_thunk (void)
void
scm_init_gc ()
{
- /* `GC_INIT ()' was invoked in `scm_storage_prehistory ()'. */
-
scm_after_gc_hook = scm_make_hook (SCM_INUM0);
scm_c_define ("after-gc-hook", scm_after_gc_hook);
@@ -730,7 +689,6 @@ scm_init_gc ()
SCM_BOOL_F);
gc_heap_set_allocation_failure_handler (the_gc_heap, scm_oom_fn);
- GC_set_warn_proc (scm_gc_warn_proc);
#include "gc.x"
}
diff --git a/libguile/hash.c b/libguile/hash.c
index b30d7750e..17e978c6e 100644
--- a/libguile/hash.c
+++ b/libguile/hash.c
@@ -346,7 +346,6 @@ scm_raw_ihash (SCM obj, size_t depth)
case scm_tc7_atomic_box:
case scm_tc7_program:
case scm_tc7_vm_cont:
- case scm_tc7_weak_set:
case scm_tc7_port:
return scm_raw_ihashq (SCM_UNPACK (obj));
diff --git a/libguile/hashtab.c b/libguile/hashtab.c
index 09738436c..c96961c2e 100644
--- a/libguile/hashtab.c
+++ b/libguile/hashtab.c
@@ -28,7 +28,6 @@
#include <stdio.h>
#include "alist.h"
-#include "bdw-gc.h"
#include "boolean.h"
#include "deprecation.h"
#include "eq.h"
diff --git a/libguile/init.c b/libguile/init.c
index 7348a9916..7a00024a3 100644
--- a/libguile/init.c
+++ b/libguile/init.c
@@ -145,7 +145,6 @@
#include "vectors.h"
#include "version.h"
#include "vm.h"
-#include "weak-set.h"
#include "init.h"
@@ -451,7 +450,6 @@ scm_i_init_guile (struct gc_stack_addr base)
scm_init_exceptions ();
scm_init_throw (); /* Requires smob_prehistory */
scm_init_version ();
- scm_init_weak_set ();
scm_init_standard_ports (); /* Requires fports */
scm_init_expand (); /* Requires structs */
scm_init_memoize (); /* Requires smob_prehistory */
diff --git a/libguile/ioext.c b/libguile/ioext.c
index 2bae6875d..9a057c812 100644
--- a/libguile/ioext.c
+++ b/libguile/ioext.c
@@ -47,7 +47,6 @@
#include "ports.h"
#include "strings.h"
#include "syscalls.h"
-#include "weak-set.h"
#include "version.h"
#include "ioext.h"
diff --git a/libguile/loader.c b/libguile/loader.c
index 09bd166ec..fd2a04609 100644
--- a/libguile/loader.c
+++ b/libguile/loader.c
@@ -37,18 +37,19 @@
#include <sys/mman.h>
#endif
-#include "bdw-gc.h"
#include "boolean.h"
#include "bytevectors.h"
#include "elf.h"
#include "eval.h"
#include "extensions.h"
#include "gsubr.h"
+#include "gc-internal.h"
#include "list.h"
#include "pairs.h"
#include "programs.h"
#include "strings.h"
#include "threads.h"
+#include "trace.h"
#include "version.h"
#include "loader.h"
@@ -93,6 +94,43 @@
#define ELFDATA ELFDATA2LSB
#endif
+static scm_i_pthread_mutex_t roots_lock = SCM_I_PTHREAD_MUTEX_INITIALIZER;
+static size_t roots_count = 0;
+static size_t roots_capacity = 0;
+struct loader_roots { uintptr_t lo, hi; };
+static struct loader_roots *roots;
+
+static void
+add_roots(char *lo, char *hi)
+{
+ scm_i_pthread_mutex_lock (&roots_lock);
+ if (roots_count == roots_capacity)
+ {
+ roots_capacity = roots_capacity * 2 + 128;
+ size_t elt_size = sizeof(*roots);
+ struct loader_roots *new_roots = calloc (roots_capacity, elt_size);
+ // Leak the old roots; we're still O(n) as all previous root
+ // arrays sum to less than the new roots_capacity.
+ memcpy (new_roots, roots, roots_count * elt_size);
+ roots = new_roots;
+ }
+ roots[roots_count++] = (struct loader_roots){ (uintptr_t)lo, (uintptr_t)hi };
+ scm_i_pthread_mutex_unlock (&roots_lock);
+}
+
+void
+scm_trace_loader_conservative_roots (void (*trace_range)(uintptr_t lo,
+ uintptr_t hi,
+ int possibly_interior,
+ struct gc_heap *heap,
+ void *trace_data),
+ struct gc_heap *heap,
+ void *trace_data)
+{
+ for (size_t i = 0; i < roots_count; i++)
+ trace_range(roots[i].lo, roots[i].hi, 0, heap, trace_data);
+}
+
/* The page size. */
static size_t page_size;
@@ -345,7 +383,7 @@ process_dynamic_segment (char *base, Elf_Phdr *dyn_phdr,
}
if (gc_root)
- GC_add_roots (gc_root, gc_root + gc_root_size);
+ add_roots (gc_root, gc_root + gc_root_size);
*init_out = init ? pointer_to_procedure (bytecode_kind, init) : SCM_BOOL_F;
*entry_out = pointer_to_procedure (bytecode_kind, entry);
diff --git a/libguile/numbers.c b/libguile/numbers.c
index ae2aa7766..80ace24f6 100644
--- a/libguile/numbers.c
+++ b/libguile/numbers.c
@@ -1,4 +1,4 @@
-/* Copyright 1995-2016,2018-2022
+/* Copyright 1995-2016,2018-2022,2025
Free Software Foundation, Inc.
Portions Copyright 1990-1993 by AT&T Bell Laboratories and Bellcore.
@@ -57,7 +57,6 @@
#include <complex.h>
#endif
-#include "bdw-gc.h"
#include "boolean.h"
#include "deprecation.h"
#include "dynwind.h"
diff --git a/libguile/ports.c b/libguile/ports.c
index 8ddaf06c1..83634eccc 100644
--- a/libguile/ports.c
+++ b/libguile/ports.c
@@ -78,7 +78,6 @@
#include "variable.h"
#include "vectors.h"
#include "version.h"
-#include "weak-set.h"
#include "ports.h"
diff --git a/libguile/print.c b/libguile/print.c
index 98150ff8c..ab4abb8d9 100644
--- a/libguile/print.c
+++ b/libguile/print.c
@@ -68,7 +68,6 @@
#include "variable.h"
#include "vectors.h"
#include "vm.h"
-#include "weak-set.h"
#include "print.h"
@@ -721,9 +720,6 @@ iprin1 (SCM exp, SCM port, scm_print_state *pstate)
case scm_tc7_hashtable:
scm_i_hashtable_print (exp, port, pstate);
break;
- case scm_tc7_weak_set:
- scm_i_weak_set_print (exp, port, pstate);
- break;
case scm_tc7_fluid:
scm_i_fluid_print (exp, port, pstate);
break;
diff --git a/libguile/scm.h b/libguile/scm.h
index fde767174..2f657f0e1 100644
--- a/libguile/scm.h
+++ b/libguile/scm.h
@@ -494,7 +494,7 @@ typedef uintptr_t scm_t_bits;
#define scm_tc7_vm_cont 0x47
#define scm_tc7_bytevector 0x4d
#define scm_tc7_thread 0x4f
-#define scm_tc7_weak_set 0x55
+#define scm_tc7_unused_55 0x55
#define scm_tc7_unused_57 0x57
#define scm_tc7_array 0x5d
#define scm_tc7_bitvector 0x5f
diff --git a/libguile/scmsigs.c b/libguile/scmsigs.c
index 697cc7072..82522b57a 100644
--- a/libguile/scmsigs.c
+++ b/libguile/scmsigs.c
@@ -43,11 +43,11 @@
#include <full-write.h>
#include "async.h"
-#include "bdw-gc.h"
#include "boolean.h"
#include "dynwind.h"
#include "eval.h"
#include "feature.h"
+#include "gc-internal.h"
#include "gsubr.h"
#include "list.h"
#include "modules.h"
@@ -161,11 +161,8 @@ signal_delivery_thread (void *data)
#if HAVE_PTHREAD_SIGMASK /* not on mingw, see notes above */
sigset_t all_sigs;
sigfillset (&all_sigs);
- /* On libgc 7.1 and earlier, GC_do_blocking doesn't actually do
- anything. So in that case, libgc will want to suspend the signal
- delivery thread, so we need to allow it to do so by unmasking the
- suspend signal. */
- sigdelset (&all_sigs, GC_get_suspend_signal ());
+ if (gc_safepoint_mechanism() == GC_SAFEPOINT_MECHANISM_SIGNAL)
+ sigdelset (&all_sigs, gc_safepoint_signal_number());
scm_i_pthread_sigmask (SIG_SETMASK, &all_sigs, NULL);
#endif
diff --git a/libguile/smob.c b/libguile/smob.c
index 917cf1cb5..8b8091c04 100644
--- a/libguile/smob.c
+++ b/libguile/smob.c
@@ -30,7 +30,6 @@
#include "async.h"
#include "atomics-internal.h"
-#include "bdw-gc.h"
#include "finalizers.h"
#include "goops.h"
#include "gsubr.h"
@@ -41,8 +40,6 @@
#include "smob.h"
-#include <gc/gc_mark.h>
-
@@ -64,43 +61,6 @@ scm_assert_smob_type (scm_t_bits tag, SCM val)
scm_wrong_type_arg_msg (NULL, 0, val, scm_smobs[SCM_TC2SMOBNUM(tag)].name);
}
-/* {Mark}
- */
-
-/* This function is vestigial. It used to be the mark function's
- responsibility to set the mark bit on the smob or port, but now the
- generic marking routine in gc.c takes care of that, and a zero
- pointer for a mark function means "don't bother". So you never
- need scm_mark0.
-
- However, we leave it here because it's harmless to call it, and
- people out there have smob code that uses it, and there's no reason
- to make their links fail. */
-
-SCM
-scm_mark0 (SCM ptr SCM_UNUSED)
-{
- return SCM_BOOL_F;
-}
-
-SCM
-/* Dirk::FIXME: The name markcdr is misleading, since the term cdr should only
- be used for real pairs. */
-scm_markcdr (SCM ptr)
-{
- return SCM_CELL_OBJECT_1 (ptr);
-}
-
-
-/* {Free}
- */
-
-size_t
-scm_free0 (SCM ptr SCM_UNUSED)
-{
- return 0;
-}
-
/* {Print}
*/
@@ -231,12 +191,6 @@ scm_make_smob_type (char const *name, size_t size)
void
-scm_set_smob_mark (scm_t_bits tc, SCM (*mark) (SCM))
-{
- scm_smobs[SCM_TC2SMOBNUM (tc)].mark = mark;
-}
-
-void
scm_set_smob_free (scm_t_bits tc, size_t (*free) (SCM))
{
scm_smobs[SCM_TC2SMOBNUM (tc)].free = free;
@@ -281,101 +235,6 @@ scm_make_smob (scm_t_bits tc)
-/* Marking SMOBs using user-supplied mark procedures. */
-
-
-/* The GC kind used for SMOB types that provide a custom mark procedure. */
-static int smob_gc_kind;
-
-/* Mark stack pointer and limit, used by `scm_gc_mark'. */
-static scm_i_pthread_key_t current_mark_stack_pointer;
-static scm_i_pthread_key_t current_mark_stack_limit;
-
-
-/* The generic SMOB mark procedure that gets called for SMOBs allocated
- with smob_gc_kind. */
-static struct GC_ms_entry *
-smob_mark (GC_word *addr, struct GC_ms_entry *mark_stack_ptr,
- struct GC_ms_entry *mark_stack_limit, GC_word env)
-{
- register SCM cell;
- register scm_t_bits tc, smobnum;
-
- cell = SCM_PACK_POINTER (addr);
-
- if (SCM_TYP7 (cell) != scm_tc7_smob)
- /* It is likely that the GC passed us a pointer to a free-list element
- which we must ignore (see warning in `gc/gc_mark.h'). */
- return mark_stack_ptr;
-
- tc = SCM_CELL_WORD_0 (cell);
- smobnum = SCM_TC2SMOBNUM (tc);
-
- if (smobnum >= scm_numsmob)
- /* The first word looks corrupt. */
- abort ();
-
- mark_stack_ptr = GC_MARK_AND_PUSH (SCM2PTR (SCM_CELL_OBJECT_1 (cell)),
- mark_stack_ptr,
- mark_stack_limit, NULL);
- mark_stack_ptr = GC_MARK_AND_PUSH (SCM2PTR (SCM_CELL_OBJECT_2 (cell)),
- mark_stack_ptr,
- mark_stack_limit, NULL);
- mark_stack_ptr = GC_MARK_AND_PUSH (SCM2PTR (SCM_CELL_OBJECT_3 (cell)),
- mark_stack_ptr,
- mark_stack_limit, NULL);
-
- if (scm_smobs[smobnum].mark)
- {
- SCM obj;
-
- scm_i_pthread_setspecific (current_mark_stack_pointer, mark_stack_ptr);
- scm_i_pthread_setspecific (current_mark_stack_limit, mark_stack_limit);
-
- /* Invoke the SMOB's mark procedure, which will in turn invoke
- `scm_gc_mark', which may modify `current_mark_stack_pointer'. */
- obj = scm_smobs[smobnum].mark (cell);
-
- mark_stack_ptr = scm_i_pthread_getspecific (current_mark_stack_pointer);
-
- if (SCM_HEAP_OBJECT_P (obj))
- /* Mark the returned object. */
- mark_stack_ptr = GC_MARK_AND_PUSH (SCM2PTR (obj),
- mark_stack_ptr,
- mark_stack_limit, NULL);
-
- scm_i_pthread_setspecific (current_mark_stack_pointer, NULL);
- scm_i_pthread_setspecific (current_mark_stack_limit, NULL);
- }
-
- return mark_stack_ptr;
-
-}
-
-/* Mark object O. We assume that this function is only called during the mark
- phase, i.e., from within `smob_mark' or one of its descendants. */
-void
-scm_gc_mark (SCM o)
-{
- if (SCM_HEAP_OBJECT_P (o))
- {
- void *mark_stack_ptr, *mark_stack_limit;
-
- mark_stack_ptr = scm_i_pthread_getspecific (current_mark_stack_pointer);
- mark_stack_limit = scm_i_pthread_getspecific (current_mark_stack_limit);
-
- if (mark_stack_ptr == NULL)
- /* The function was not called from a mark procedure. */
- abort ();
-
- mark_stack_ptr = GC_MARK_AND_PUSH (SCM2PTR (o),
- mark_stack_ptr, mark_stack_limit,
- NULL);
- scm_i_pthread_setspecific (current_mark_stack_pointer, mark_stack_ptr);
- }
-}
-
-
/* Finalize SMOB by calling its SMOB type's free function, if any. */
void
scm_i_finalize_smob (struct scm_thread *thread, SCM smob)
@@ -386,11 +245,7 @@ scm_i_finalize_smob (struct scm_thread *thread, SCM smob)
/* Frob the object's type in place, re-setting it to be the "finalized
smob" type. This will prevent other routines from accessing its
- internals in a way that assumes that the smob data is valid. This
- is notably the case for SMOB's own "mark" procedure, if any; as the
- finalizer is invoked by the mutator, it's possible for a GC to
- occur while it's running, in which case the object is alive and yet
- its data is invalid. */
+ internals in a way that assumes that the smob data is valid. */
scm_t_bits finalized_word = first_word & ~(scm_t_bits) 0xff00;
scm_atomic_set_bits (first_word_loc, finalized_word);
@@ -403,54 +258,28 @@ scm_i_finalize_smob (struct scm_thread *thread, SCM smob)
free_smob (smob);
}
-/* Return a SMOB with typecode TC. The SMOB type corresponding to TC may
- provide a custom mark procedure and it will be honored. */
+/* Return a SMOB with typecode TC. */
SCM
-scm_i_new_smob (scm_t_bits tc, scm_t_bits data)
+scm_new_smob (scm_t_bits tc, scm_t_bits data)
{
scm_t_bits smobnum = SCM_TC2SMOBNUM (tc);
- SCM ret;
-
- /* Use the smob_gc_kind if needed to allow the mark procedure to
- run. Since the marker only deals with double cells, that case
- allocates a double cell. We leave words 2 and 3 to there initial
- values, which is 0. */
- if (scm_smobs [smobnum].mark)
- ret = SCM_PACK_POINTER (GC_generic_malloc (2 * sizeof (scm_t_cell), smob_gc_kind));
- else
- ret = SCM_PACK_POINTER (GC_MALLOC (sizeof (scm_t_cell)));
-
- SCM_SET_CELL_WORD_1 (ret, data);
- SCM_SET_CELL_WORD_0 (ret, tc);
+ SCM ret = scm_cell (tc, data);
- if (scm_smobs[smobnum].free)
+ if (SCM_UNLIKELY (scm_smobs[smobnum].free))
scm_i_add_smob_finalizer (SCM_I_CURRENT_THREAD, ret);
return ret;
}
-/* Return a SMOB with typecode TC. The SMOB type corresponding to TC may
- provide a custom mark procedure and it will be honored. */
+/* Return a SMOB with typecode TC. */
SCM
-scm_i_new_double_smob (scm_t_bits tc, scm_t_bits data1,
- scm_t_bits data2, scm_t_bits data3)
+scm_new_double_smob (scm_t_bits tc, scm_t_bits data1,
+ scm_t_bits data2, scm_t_bits data3)
{
scm_t_bits smobnum = SCM_TC2SMOBNUM (tc);
- SCM ret;
-
- /* Use the smob_gc_kind if needed to allow the mark procedure to
- run. */
- if (scm_smobs [smobnum].mark)
- ret = SCM_PACK_POINTER (GC_generic_malloc (2 * sizeof (scm_t_cell), smob_gc_kind));
- else
- ret = SCM_PACK_POINTER (GC_MALLOC (2 * sizeof (scm_t_cell)));
-
- SCM_SET_CELL_WORD_3 (ret, data3);
- SCM_SET_CELL_WORD_2 (ret, data2);
- SCM_SET_CELL_WORD_1 (ret, data1);
- SCM_SET_CELL_WORD_0 (ret, tc);
+ SCM ret = scm_double_cell (tc, data1, data2, data3);
- if (scm_smobs[smobnum].free)
+ if (SCM_UNLIKELY (scm_smobs[smobnum].free))
scm_i_add_smob_finalizer (SCM_I_CURRENT_THREAD, ret);
return ret;
@@ -475,22 +304,11 @@ scm_smob_prehistory ()
long i;
scm_t_bits finalized_smob_tc16;
- scm_i_pthread_key_create (&current_mark_stack_pointer, NULL);
- scm_i_pthread_key_create (&current_mark_stack_limit, NULL);
-
- smob_gc_kind = GC_new_kind (GC_new_free_list (),
- GC_MAKE_PROC (GC_new_proc (smob_mark), 0),
- 0,
- /* Clear new objects. As of version 7.1, libgc
- doesn't seem to support passing 0 here. */
- 1);
-
scm_numsmob = 0;
for (i = 0; i < MAX_SMOB_COUNT; ++i)
{
scm_smobs[i].name = 0;
scm_smobs[i].size = 0;
- scm_smobs[i].mark = 0;
scm_smobs[i].free = 0;
scm_smobs[i].print = scm_smob_print;
scm_smobs[i].equalp = 0;
diff --git a/libguile/smob.h b/libguile/smob.h
index 990ac057b..22d925b8b 100644
--- a/libguile/smob.h
+++ b/libguile/smob.h
@@ -22,11 +22,10 @@
-#include <libguile/error.h>
-#include <libguile/gc.h>
-#include "libguile/inline.h"
+#include "libguile/error.h"
+#include "libguile/gc.h"
#include "libguile/print.h"
-#include <libguile/snarf.h>
+#include "libguile/snarf.h"
@@ -36,7 +35,6 @@ typedef struct scm_smob_descriptor
{
char const *name;
size_t size;
- SCM (*mark) (SCM);
size_t (*free) (SCM);
int (*print) (SCM exp, SCM port, scm_print_state *pstate);
SCM (*equalp) (SCM, SCM);
@@ -78,14 +76,6 @@ SCM_SNARF_INIT((tag)=scm_make_smob_type((scheme_name), (size));)
SCM_SNARF_HERE(scm_t_bits tag) \
SCM_SNARF_INIT((tag)=scm_make_smob_type((scheme_name), (size));)
-#define SCM_SMOB_MARK(tag, c_name, arg) \
-SCM_SNARF_HERE(static SCM c_name(SCM arg)) \
-SCM_SNARF_INIT(scm_set_smob_mark((tag), (c_name));)
-
-#define SCM_GLOBAL_SMOB_MARK(tag, c_name, arg) \
-SCM_SNARF_HERE(SCM c_name(SCM arg)) \
-SCM_SNARF_INIT(scm_set_smob_mark((tag), (c_name));)
-
#define SCM_SMOB_FREE(tag, c_name, arg) \
SCM_SNARF_HERE(static size_t c_name(SCM arg)) \
SCM_SNARF_INIT(scm_set_smob_free((tag), (c_name));)
@@ -121,39 +111,9 @@ SCM_SNARF_INIT(scm_set_smob_apply((tag), (c_name), (req), (opt), (rest));)
-SCM_API SCM scm_i_new_smob (scm_t_bits tc, scm_t_bits);
-SCM_API SCM scm_i_new_double_smob (scm_t_bits tc, scm_t_bits,
- scm_t_bits, scm_t_bits);
-
-
-SCM_INLINE SCM scm_new_smob (scm_t_bits tc, scm_t_bits);
-SCM_INLINE SCM scm_new_double_smob (scm_t_bits tc, scm_t_bits,
- scm_t_bits, scm_t_bits);
-
-#if SCM_CAN_INLINE || defined SCM_INLINE_C_IMPLEMENTING_INLINES
-SCM_INLINE_IMPLEMENTATION SCM
-scm_new_smob (scm_t_bits tc, scm_t_bits data)
-{
- scm_t_bits smobnum = SCM_TC2SMOBNUM (tc);
-
- if (SCM_UNLIKELY (scm_smobs[smobnum].mark || scm_smobs[smobnum].free))
- return scm_i_new_smob (tc, data);
- else
- return scm_cell (tc, data);
-}
-
-SCM_INLINE_IMPLEMENTATION SCM
-scm_new_double_smob (scm_t_bits tc, scm_t_bits data1,
- scm_t_bits data2, scm_t_bits data3)
-{
- scm_t_bits smobnum = SCM_TC2SMOBNUM (tc);
-
- if (SCM_UNLIKELY (scm_smobs[smobnum].mark || scm_smobs[smobnum].free))
- return scm_i_new_double_smob (tc, data1, data2, data3);
- else
- return scm_double_cell (tc, data1, data2, data3);
-}
-#endif
+SCM_API SCM scm_new_smob (scm_t_bits tc, scm_t_bits);
+SCM_API SCM scm_new_double_smob (scm_t_bits tc, scm_t_bits,
+ scm_t_bits, scm_t_bits);
#define SCM_NEWSMOB(z, tc, data) \
z = scm_new_smob ((tc), (scm_t_bits)(data))
@@ -222,22 +182,18 @@ scm_new_double_smob (scm_t_bits tc, scm_t_bits data1,
-SCM_API SCM scm_mark0 (SCM ptr);
-SCM_API SCM scm_markcdr (SCM ptr);
-SCM_API size_t scm_free0 (SCM ptr);
SCM_API int scm_smob_print (SCM exp, SCM port, scm_print_state *pstate);
/* The following set of functions is the standard way to create new
* SMOB types.
*
* Create a type tag using `scm_make_smob_type', accept default values
- * for mark, free, print and/or equalp functions, or set your own
+ * for free, print and/or equalp functions, or set your own
* values using `scm_set_smob_xxx'.
*/
SCM_API scm_t_bits scm_make_smob_type (char const *name, size_t size);
-SCM_API void scm_set_smob_mark (scm_t_bits tc, SCM (*mark) (SCM));
SCM_API void scm_set_smob_free (scm_t_bits tc, size_t (*free) (SCM));
SCM_API void scm_set_smob_print (scm_t_bits tc,
int (*print) (SCM, SCM, scm_print_state*));
diff --git a/libguile/srfi-4.c b/libguile/srfi-4.c
index 23896c32c..48f90f0b6 100644
--- a/libguile/srfi-4.c
+++ b/libguile/srfi-4.c
@@ -1,6 +1,6 @@
/* srfi-4.c --- Uniform numeric vector datatypes.
- Copyright 2001,2004,2006,2009-2011,2014,2018
+ Copyright 2001,2004,2006,2009-2011,2014,2018,2025
Free Software Foundation, Inc.
This file is part of Guile.
@@ -25,7 +25,6 @@
#include <string.h>
-#include "bdw-gc.h"
#include "boolean.h"
#include "bytevectors.h"
#include "error.h"
diff --git a/libguile/struct.c b/libguile/struct.c
index f41859cda..82329ec76 100644
--- a/libguile/struct.c
+++ b/libguile/struct.c
@@ -31,7 +31,6 @@
#include "alist.h"
#include "async.h"
-#include "bdw-gc.h"
#include "boolean.h"
#include "chars.h"
#include "deprecation.h"
diff --git a/libguile/threads.c b/libguile/threads.c
index 380864f6f..8dbd75982 100644
--- a/libguile/threads.c
+++ b/libguile/threads.c
@@ -40,7 +40,6 @@
#endif
#include "async.h"
-#include "bdw-gc.h"
#include "boolean.h"
#include "continuations.h"
#include "deprecation.h"
@@ -72,8 +71,6 @@
#include "threads.h"
-#include <gc/gc_mark.h>
-
@@ -612,20 +609,12 @@ scm_init_guile (void)
struct with_guile_args
{
- GC_fn_type func;
+ void* (*func) (void*);
void *data;
SCM dynamic_state;
};
static void *
-with_guile_trampoline (void *data)
-{
- struct with_guile_args *args = data;
-
- return scm_c_with_continuation_barrier (args->func, args->data);
-}
-
-static void *
with_guile (struct gc_stack_addr base, void *data)
{
void *res;
@@ -635,22 +624,10 @@ with_guile (struct gc_stack_addr base, void *data)
new_thread = scm_i_init_thread_for_guile (base, args->dynamic_state);
t = SCM_I_CURRENT_THREAD;
- if (new_thread)
- {
- /* We are in Guile mode. */
- assert (t->guile_mode);
-
- res = scm_c_with_continuation_barrier (args->func, args->data);
+ int reactivate = !t->guile_mode;
+ int deactivate = reactivate || new_thread;
- /* Leave Guile mode. */
- t->guile_mode = 0;
- }
- else if (t->guile_mode)
- {
- /* Already in Guile mode. */
- res = scm_c_with_continuation_barrier (args->func, args->data);
- }
- else
+ if (reactivate)
{
/* We are not in Guile mode, either because we are not within a
scm_with_guile, or because we are within a scm_without_guile.
@@ -668,9 +645,17 @@ with_guile (struct gc_stack_addr base, void *data)
#endif
t->guile_mode = 1;
- res = GC_call_with_gc_active (with_guile_trampoline, args);
+ gc_reactivate (t->mutator);
+ }
+
+ res = scm_c_with_continuation_barrier (args->func, args->data);
+
+ if (deactivate)
+ {
+ gc_deactivate (t->mutator);
t->guile_mode = 0;
}
+
return res;
}
@@ -697,16 +682,21 @@ scm_without_guile (void *(*func)(void *), void *data)
{
void *result;
scm_thread *t = SCM_I_CURRENT_THREAD;
+ int was_active = t->guile_mode;
- if (t->guile_mode)
+ if (was_active)
{
- SCM_I_CURRENT_THREAD->guile_mode = 0;
- result = GC_do_blocking (func, data);
- SCM_I_CURRENT_THREAD->guile_mode = 1;
+ t->guile_mode = 0;
+ gc_deactivate (t->mutator);
+ }
+
+ result = func (data);
+
+ if (was_active)
+ {
+ gc_reactivate (t->mutator);
+ t->guile_mode = 1;
}
- else
- /* Otherwise we're not in guile mode, so nothing to do. */
- result = func (data);
return result;
}
@@ -797,7 +787,6 @@ SCM_DEFINE (scm_sys_call_with_new_thread, "%call-with-new-thread", 1, 0, 0,
SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
- GC_collect_a_little ();
data = scm_gc_typed_calloc (launch_data);
data->dynamic_state = scm_current_dynamic_state ();
data->thunk = thunk;
diff --git a/libguile/trace.h b/libguile/trace.h
index e05050470..d2705c112 100644
--- a/libguile/trace.h
+++ b/libguile/trace.h
@@ -29,6 +29,7 @@
struct scm_thread;
struct scm_vm;
struct gc_heap;
+struct gc_heap_roots { int unused; };
SCM_INTERNAL void
scm_trace_thread_mutator_roots (struct scm_thread *thread,
@@ -52,5 +53,14 @@ SCM_INTERNAL void scm_trace_vm (struct scm_vm *vp,
struct gc_heap *heap,
void *trace_data);
+SCM_INTERNAL void
+scm_trace_loader_conservative_roots (void (*trace_range)(uintptr_t lo,
+ uintptr_t hi,
+ int possibly_interior,
+ struct gc_heap *heap,
+ void *trace_data),
+ struct gc_heap *heap,
+ void *trace_data);
+
#endif /* SCM_THREADS_INTERNAL_H */
diff --git a/libguile/vectors.c b/libguile/vectors.c
index d81dc61a7..a2a620b79 100644
--- a/libguile/vectors.c
+++ b/libguile/vectors.c
@@ -27,7 +27,6 @@
#include <string.h>
#include "array-handle.h"
-#include "bdw-gc.h"
#include "boolean.h"
#include "deprecation.h"
#include "eq.h"
diff --git a/libguile/vm.c b/libguile/vm.c
index a042e9535..cb46c54cc 100644
--- a/libguile/vm.c
+++ b/libguile/vm.c
@@ -39,7 +39,6 @@
#include "async.h"
#include "atomic.h"
#include "atomics-internal.h"
-#include "bdw-gc.h"
#include "cache-internal.h"
#include "continuations.h"
#include "control.h"
@@ -79,8 +78,6 @@
#include "vm.h"
-#include <gc/gc_mark.h>
-
#if (defined __GNUC__)
# define SCM_NOINLINE __attribute__ ((__noinline__))
#else
@@ -777,29 +774,17 @@ scm_i_vm_free_stack (struct scm_vm *vp)
memset (vp, 0, sizeof (*vp));
}
-struct vm_expand_stack_data
-{
- struct scm_vm *vp;
- size_t stack_size;
- union scm_vm_stack_element *new_sp;
-};
-
-static void *
-vm_expand_stack_inner (void *data_ptr)
+static union scm_vm_stack_element *
+vm_expand_stack_inner (struct scm_vm *vp, size_t needed_size,
+ union scm_vm_stack_element *new_sp)
{
- struct vm_expand_stack_data *data = data_ptr;
-
- struct scm_vm *vp = data->vp;
- union scm_vm_stack_element *old_top, *new_bottom;
- size_t new_size;
- ptrdiff_t reloc;
-
- old_top = vp->stack_top;
- new_size = vp->stack_size;
- while (new_size < data->stack_size)
+ union scm_vm_stack_element *old_top = vp->stack_top;
+ size_t new_size = vp->stack_size;
+ while (new_size < needed_size)
new_size *= 2;
- new_bottom = expand_stack (vp->stack_bottom, vp->stack_size, new_size);
+ union scm_vm_stack_element *new_bottom =
+ expand_stack (vp->stack_bottom, vp->stack_size, new_size);
if (!new_bottom)
return NULL;
@@ -807,13 +792,12 @@ vm_expand_stack_inner (void *data_ptr)
vp->stack_size = new_size;
vp->stack_top = vp->stack_bottom + new_size;
vp->stack_limit = vp->stack_bottom;
- reloc = vp->stack_top - old_top;
+ ptrdiff_t reloc = vp->stack_top - old_top;
if (vp->fp)
vp->fp += reloc;
- data->new_sp += reloc;
- return new_bottom;
+ return new_sp + reloc;
}
static ptrdiff_t
@@ -873,17 +857,13 @@ vm_expand_stack (struct scm_vm *vp, union scm_vm_stack_element *new_sp)
if (stack_size > vp->stack_size)
{
- struct vm_expand_stack_data data;
+ gc_inhibit_preemption (SCM_I_CURRENT_THREAD->mutator);
+ new_sp = vm_expand_stack_inner (vp, stack_size, new_sp);
+ gc_reallow_preemption (SCM_I_CURRENT_THREAD->mutator);
- data.vp = vp;
- data.stack_size = stack_size;
- data.new_sp = new_sp;
-
- if (!GC_call_with_alloc_lock (vm_expand_stack_inner, &data))
+ if (!new_sp)
/* Throw an unwind-only exception. */
scm_report_stack_overflow ();
-
- new_sp = data.new_sp;
}
vp->sp = new_sp;
@@ -1082,34 +1062,6 @@ push_interrupt_frame (scm_thread *thread, uint8_t *mra)
thread->vm.fp = new_fp;
}
-struct return_to_continuation_data
-{
- struct scm_vm_cont *cp;
- struct scm_vm *vp;
-};
-
-/* Called with the GC lock to prevent the stack marker from traversing a
- stack in an inconsistent state. */
-static void *
-vm_return_to_continuation_inner (void *data_ptr)
-{
- struct return_to_continuation_data *data = data_ptr;
- struct scm_vm *vp = data->vp;
- struct scm_vm_cont *cp = data->cp;
-
- /* We know that there is enough space for the continuation, because we
- captured it in the past. However there may have been an expansion
- since the capture, so we may have to re-link the frame
- pointers. */
- memcpy (vp->stack_top - cp->stack_size,
- cp->stack_bottom,
- cp->stack_size * sizeof (*cp->stack_bottom));
- vp->fp = vp->stack_top - cp->fp_offset;
- vm_restore_sp (vp, vp->stack_top - cp->stack_size);
-
- return NULL;
-}
-
static void reinstate_continuation_x (scm_thread *thread, SCM cont) SCM_NORETURN;
static void
@@ -1120,7 +1072,6 @@ reinstate_continuation_x (scm_thread *thread, SCM cont)
struct scm_vm_cont *cp;
size_t n, i, frame_overhead = 3;
union scm_vm_stack_element *argv;
- struct return_to_continuation_data data;
if (!scm_is_eq (continuation->root, thread->continuation_root))
scm_misc_error
@@ -1134,9 +1085,17 @@ reinstate_continuation_x (scm_thread *thread, SCM cont)
cp = SCM_VM_CONT_DATA (continuation->vm_cont);
- data.cp = cp;
- data.vp = vp;
- GC_call_with_alloc_lock (vm_return_to_continuation_inner, &data);
+ gc_inhibit_preemption (thread->mutator);
+ /* We know that there is enough space for the continuation, because we
+ captured it in the past. However there may have been an expansion
+ since the capture, so we may have to re-link the frame
+ pointers. */
+ memcpy (vp->stack_top - cp->stack_size,
+ cp->stack_bottom,
+ cp->stack_size * sizeof (*cp->stack_bottom));
+ vp->fp = vp->stack_top - cp->fp_offset;
+ vm_restore_sp (vp, vp->stack_top - cp->stack_size);
+ gc_reallow_preemption (thread->mutator);
/* Now we have the continuation properly copied over. We just need to
copy on an empty frame and the return values, as the continuation
@@ -1170,39 +1129,14 @@ capture_continuation (scm_thread *thread)
return scm_i_make_continuation (thread, vm_cont);
}
-struct compose_continuation_data
-{
- struct scm_vm *vp;
- struct scm_vm_cont *cp;
-};
-
-static void *
-compose_continuation_inner (void *data_ptr)
-{
- struct compose_continuation_data *data = data_ptr;
- struct scm_vm *vp = data->vp;
- struct scm_vm_cont *cp = data->cp;
-
- memcpy (vp->fp - cp->stack_size,
- cp->stack_bottom,
- cp->stack_size * sizeof (*cp->stack_bottom));
-
- vp->fp -= cp->fp_offset;
- vp->ip = cp->vra;
-
- return cp->mra;
-}
-
static uint8_t*
compose_continuation (scm_thread *thread, SCM cont)
{
struct scm_vm *vp = &thread->vm;
size_t nargs;
- struct compose_continuation_data data;
struct scm_vm_cont *cp;
union scm_vm_stack_element *args;
ptrdiff_t old_fp_offset;
- uint8_t *mra;
if (SCM_UNLIKELY (! SCM_VM_CONT_REWINDABLE_P (cont)))
scm_wrong_type_arg_msg (NULL, 0, cont, "resumable continuation");
@@ -1223,9 +1157,13 @@ compose_continuation (scm_thread *thread, SCM cont)
vm_push_sp (vp, vp->fp - (cp->stack_size + nargs));
- data.vp = vp;
- data.cp = cp;
- mra = GC_call_with_alloc_lock (compose_continuation_inner, &data);
+ gc_inhibit_preemption (thread->mutator);
+ memcpy (vp->fp - cp->stack_size,
+ cp->stack_bottom,
+ cp->stack_size * sizeof (*cp->stack_bottom));
+ vp->fp -= cp->fp_offset;
+ vp->ip = cp->vra;
+ gc_reallow_preemption (thread->mutator);
/* The resumed continuation will expect ARGS on the stack as if from a
multiple-value return. */
@@ -1251,7 +1189,7 @@ compose_continuation (scm_thread *thread, SCM cont)
}
}
- return mra;
+ return cp->mra;
}
static void
diff --git a/libguile/weak-set.c b/libguile/weak-set.c
deleted file mode 100644
index ef6a5231d..000000000
--- a/libguile/weak-set.c
+++ /dev/null
@@ -1,873 +0,0 @@
-/* Copyright 2011-2013,2018,2025
- Free Software Foundation, Inc.
-
- This file is part of Guile.
-
- Guile is free software: you can redistribute it and/or modify it
- under the terms of the GNU Lesser General Public License as published
- by the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- Guile is distributed in the hope that it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
- License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with Guile. If not, see
- <https://www.gnu.org/licenses/>. */
-
-
-
-
-#ifdef HAVE_CONFIG_H
-# include <config.h>
-#endif
-
-#include <assert.h>
-#include <string.h>
-
-#include "bdw-gc.h"
-#include "eval.h"
-#include "finalizers.h"
-#include "hash.h"
-#include "pairs.h"
-#include "ports.h"
-#include "threads.h"
-#include "weak-set.h"
-
-
-/* Weak Sets
-
- This file implements weak sets. One example of a weak set is the
- symbol table, where you want all instances of the `foo' symbol to map
- to one object. So when you load a file and it wants a symbol with
- the characters "foo", you one up in the table, using custom hash and
- equality predicates. Only if one is not found will you bother to
- cons one up and intern it.
-
- Another use case for weak sets is the set of open ports. Guile needs
- to be able to flush them all when the process exits, but the set
- shouldn't prevent the GC from collecting the port (and thus closing
- it).
-
- Weak sets are implemented using an open-addressed hash table.
- Basically this means that there is an array of entries, and the item
- is expected to be found the slot corresponding to its hash code,
- modulo the length of the array.
-
- Collisions are handled using linear probing with the Robin Hood
- technique. See Pedro Celis' paper, "Robin Hood Hashing":
-
- http://www.cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
-
- The vector of entries is allocated as an "atomic" piece of memory, so
- that the GC doesn't trace it. When an item is added to the set, a
- disappearing link is registered to its location. If the item is
- collected, then that link will be zeroed out.
-
- An entry is not just an item, though; the hash code is also stored in
- the entry. We munge hash codes so that they are never 0. In this
- way we can detect removed entries (key of zero but nonzero hash
- code), and can then reshuffle elements as needed to maintain the
- robin hood ordering.
-
- Compared to buckets-and-chains hash tables, open addressing has the
- advantage that it is very cache-friendly. It also uses less memory.
-
- Implementation-wise, there are two things to note.
-
- 1. We assume that hash codes are evenly distributed across the
- range of unsigned longs. The actual hash code stored in the
- entry is left-shifted by 1 bit (losing 1 bit of hash precision),
- and then or'd with 1. In this way we ensure that the hash field
- of an occupied entry is nonzero. To map to an index, we
- right-shift the hash by one, divide by the size, and take the
- remainder.
-
- 2. Since the "keys" (the objects in the set) are stored in an
- atomic region with disappearing links, they need to be accessed
- with the GC alloc lock. `copy_weak_entry' will do that for
- you. The hash code itself can be read outside the lock,
- though.
-*/
-
-
-typedef struct {
- unsigned long hash;
- scm_t_bits key;
-} scm_t_weak_entry;
-
-
-struct weak_entry_data {
- scm_t_weak_entry *in;
- scm_t_weak_entry *out;
-};
-
-static void*
-do_copy_weak_entry (void *data)
-{
- struct weak_entry_data *e = data;
-
- e->out->hash = e->in->hash;
- e->out->key = e->in->key;
-
- return NULL;
-}
-
-static void
-copy_weak_entry (scm_t_weak_entry *src, scm_t_weak_entry *dst)
-{
- struct weak_entry_data data;
-
- data.in = src;
- data.out = dst;
-
- GC_call_with_alloc_lock (do_copy_weak_entry, &data);
-}
-
-
-typedef struct {
- scm_t_weak_entry *entries; /* the data */
- scm_i_pthread_mutex_t lock; /* the lock */
- unsigned long size; /* total number of slots. */
- unsigned long n_items; /* number of items in set */
- unsigned long lower; /* when to shrink */
- unsigned long upper; /* when to grow */
- int size_index; /* index into hashset_size */
- int min_size_index; /* minimum size_index */
-} scm_t_weak_set;
-
-
-#define SCM_WEAK_SET_P(x) (SCM_HAS_TYP7 (x, scm_tc7_weak_set))
-#define SCM_VALIDATE_WEAK_SET(pos, arg) \
- SCM_MAKE_VALIDATE_MSG (pos, arg, WEAK_SET_P, "weak-set")
-#define SCM_WEAK_SET(x) ((scm_t_weak_set *) SCM_CELL_WORD_1 (x))
-
-
-static unsigned long
-hash_to_index (unsigned long hash, unsigned long size)
-{
- return (hash >> 1) % size;
-}
-
-static unsigned long
-entry_distance (unsigned long hash, unsigned long k, unsigned long size)
-{
- unsigned long origin = hash_to_index (hash, size);
-
- if (k >= origin)
- return k - origin;
- else
- /* The other key was displaced and wrapped around. */
- return size - origin + k;
-}
-
-#ifndef HAVE_GC_MOVE_DISAPPEARING_LINK
-static void
-GC_move_disappearing_link (void **from, void **to)
-{
- GC_unregister_disappearing_link (from);
- SCM_I_REGISTER_DISAPPEARING_LINK (to, *to);
-}
-#endif
-
-static void
-move_weak_entry (scm_t_weak_entry *from, scm_t_weak_entry *to)
-{
- if (from->hash)
- {
- scm_t_weak_entry copy;
-
- copy_weak_entry (from, &copy);
- to->hash = copy.hash;
- to->key = copy.key;
-
- if (copy.key && SCM_HEAP_OBJECT_P (SCM_PACK (copy.key)))
- GC_move_disappearing_link ((void **) &from->key, (void **) &to->key);
- }
- else
- {
- to->hash = 0;
- to->key = 0;
- }
-}
-
-static void
-rob_from_rich (scm_t_weak_set *set, unsigned long k)
-{
- unsigned long empty, size;
-
- size = set->size;
-
- /* If we are to free up slot K in the set, we need room to do so. */
- assert (set->n_items < size);
-
- empty = k;
- do
- empty = (empty + 1) % size;
- /* Here we access key outside the lock. Is this a problem? At first
- glance, I wouldn't think so. */
- while (set->entries[empty].key);
-
- do
- {
- unsigned long last = empty ? (empty - 1) : (size - 1);
- move_weak_entry (&set->entries[last], &set->entries[empty]);
- empty = last;
- }
- while (empty != k);
-
- /* Just for sanity. */
- set->entries[empty].hash = 0;
- set->entries[empty].key = 0;
-}
-
-static void
-give_to_poor (scm_t_weak_set *set, unsigned long k)
-{
- /* Slot K was just freed up; possibly shuffle others down. */
- unsigned long size = set->size;
-
- while (1)
- {
- unsigned long next = (k + 1) % size;
- unsigned long hash;
- scm_t_weak_entry copy;
-
- hash = set->entries[next].hash;
-
- if (!hash || hash_to_index (hash, size) == next)
- break;
-
- copy_weak_entry (&set->entries[next], &copy);
-
- if (!copy.key)
- /* Lost weak reference. */
- {
- give_to_poor (set, next);
- set->n_items--;
- continue;
- }
-
- move_weak_entry (&set->entries[next], &set->entries[k]);
-
- k = next;
- }
-
- /* We have shuffled down any entries that should be shuffled down; now
- free the end. */
- set->entries[k].hash = 0;
- set->entries[k].key = 0;
-}
-
-
-
-
-/* Growing or shrinking is triggered when the load factor
- *
- * L = N / S (N: number of items in set, S: bucket vector length)
- *
- * passes an upper limit of 0.9 or a lower limit of 0.2.
- *
- * The implementation stores the upper and lower number of items which
- * trigger a resize in the hashset object.
- *
- * Possible hash set sizes (primes) are stored in the array
- * hashset_size.
- */
-
-static unsigned long hashset_size[] = {
- 31, 61, 113, 223, 443, 883, 1759, 3517, 7027, 14051, 28099, 56197, 112363,
- 224717, 449419, 898823, 1797641, 3595271, 7190537, 14381041, 28762081,
- 57524111, 115048217, 230096423
-};
-
-#define HASHSET_SIZE_N (sizeof(hashset_size)/sizeof(unsigned long))
-
-static int
-compute_size_index (scm_t_weak_set *set)
-{
- int i = set->size_index;
-
- if (set->n_items < set->lower)
- {
- /* rehashing is not triggered when i <= min_size */
- do
- --i;
- while (i > set->min_size_index
- && set->n_items < hashset_size[i] / 5);
- }
- else if (set->n_items > set->upper)
- {
- ++i;
- if (i >= HASHSET_SIZE_N)
- /* The biggest size currently is 230096423, which for a 32-bit
- machine will occupy 1.5GB of memory at a load of 80%. There
- is probably something better to do here, but if you have a
- weak map of that size, you are hosed in any case. */
- abort ();
- }
-
- return i;
-}
-
-static int
-is_acceptable_size_index (scm_t_weak_set *set, int size_index)
-{
- int computed = compute_size_index (set);
-
- if (size_index == computed)
- /* We were going to grow or shrink, and allocating the new vector
- didn't change the target size. */
- return 1;
-
- if (size_index == computed + 1)
- {
- /* We were going to enlarge the set, but allocating the new
- vector finalized some objects, making an enlargement
- unnecessary. It might still be a good idea to use the larger
- set, though. (This branch also gets hit if, while allocating
- the vector, some other thread was actively removing items from
- the set. That is less likely, though.) */
- unsigned long new_lower = hashset_size[size_index] / 5;
-
- return set->size > new_lower;
- }
-
- if (size_index == computed - 1)
- {
- /* We were going to shrink the set, but when we dropped the lock
- to allocate the new vector, some other thread added elements to
- the set. */
- return 0;
- }
-
- /* The computed size differs from our newly allocated size by more
- than one size index -- recalculate. */
- return 0;
-}
-
-static void
-resize_set (scm_t_weak_set *set)
-{
- scm_t_weak_entry *old_entries, *new_entries;
- int new_size_index;
- unsigned long old_size, new_size, old_k;
-
- do
- {
- new_size_index = compute_size_index (set);
- if (new_size_index == set->size_index)
- return;
- new_size = hashset_size[new_size_index];
- }
- while (!is_acceptable_size_index (set, new_size_index));
-
- new_entries = scm_gc_malloc_pointerless (new_size * sizeof (scm_t_weak_entry),
- "weak set");
- old_entries = set->entries;
- old_size = set->size;
-
- memset (new_entries, 0, new_size * sizeof(scm_t_weak_entry));
-
- set->size_index = new_size_index;
- set->size = new_size;
- if (new_size_index <= set->min_size_index)
- set->lower = 0;
- else
- set->lower = new_size / 5;
- set->upper = 9 * new_size / 10;
- set->n_items = 0;
- set->entries = new_entries;
-
- for (old_k = 0; old_k < old_size; old_k++)
- {
- scm_t_weak_entry copy;
- unsigned long new_k, distance;
-
- if (!old_entries[old_k].hash)
- continue;
-
- copy_weak_entry (&old_entries[old_k], &copy);
-
- if (!copy.key)
- continue;
-
- new_k = hash_to_index (copy.hash, new_size);
-
- for (distance = 0; ; distance++, new_k = (new_k + 1) % new_size)
- {
- unsigned long other_hash = new_entries[new_k].hash;
-
- if (!other_hash)
- /* Found an empty entry. */
- break;
-
- /* Displace the entry if our distance is less, otherwise keep
- looking. */
- if (entry_distance (other_hash, new_k, new_size) < distance)
- {
- rob_from_rich (set, new_k);
- break;
- }
- }
-
- set->n_items++;
- new_entries[new_k].hash = copy.hash;
- new_entries[new_k].key = copy.key;
-
- if (SCM_HEAP_OBJECT_P (SCM_PACK (copy.key)))
- SCM_I_REGISTER_DISAPPEARING_LINK ((void **) &new_entries[new_k].key,
- (void *) new_entries[new_k].key);
- }
-}
-
-/* Run from a finalizer via do_vacuum_weak_set, this function runs over
- the whole table, removing lost weak references, reshuffling the set
- as it goes. It might resize the set if it reaps enough entries. */
-static void
-vacuum_weak_set (scm_t_weak_set *set)
-{
- scm_t_weak_entry *entries = set->entries;
- unsigned long size = set->size;
- unsigned long k;
-
- for (k = 0; k < size; k++)
- {
- unsigned long hash = entries[k].hash;
-
- if (hash)
- {
- scm_t_weak_entry copy;
-
- copy_weak_entry (&entries[k], &copy);
-
- if (!copy.key)
- /* Lost weak reference; reshuffle. */
- {
- give_to_poor (set, k);
- set->n_items--;
- }
- }
- }
-
- if (set->n_items < set->lower)
- resize_set (set);
-}
-
-
-
-
-static SCM
-weak_set_lookup (scm_t_weak_set *set, unsigned long hash,
- scm_t_set_predicate_fn pred, void *closure,
- SCM dflt)
-{
- unsigned long k, distance, size;
- scm_t_weak_entry *entries;
-
- size = set->size;
- entries = set->entries;
-
- hash = (hash << 1) | 0x1;
- k = hash_to_index (hash, size);
-
- for (distance = 0; distance < size; distance++, k = (k + 1) % size)
- {
- unsigned long other_hash;
-
- retry:
- other_hash = entries[k].hash;
-
- if (!other_hash)
- /* Not found. */
- return dflt;
-
- if (hash == other_hash)
- {
- scm_t_weak_entry copy;
-
- copy_weak_entry (&entries[k], &copy);
-
- if (!copy.key)
- /* Lost weak reference; reshuffle. */
- {
- give_to_poor (set, k);
- set->n_items--;
- goto retry;
- }
-
- if (pred (SCM_PACK (copy.key), closure))
- /* Found. */
- return SCM_PACK (copy.key);
- }
-
- /* If the entry's distance is less, our key is not in the set. */
- if (entry_distance (other_hash, k, size) < distance)
- return dflt;
- }
-
- /* If we got here, then we were unfortunate enough to loop through the
- whole set. Shouldn't happen, but hey. */
- return dflt;
-}
-
-
-static SCM
-weak_set_add_x (scm_t_weak_set *set, unsigned long hash,
- scm_t_set_predicate_fn pred, void *closure,
- SCM obj)
-{
- unsigned long k, distance, size;
- scm_t_weak_entry *entries;
-
- size = set->size;
- entries = set->entries;
-
- hash = (hash << 1) | 0x1;
- k = hash_to_index (hash, size);
-
- for (distance = 0; ; distance++, k = (k + 1) % size)
- {
- unsigned long other_hash;
-
- retry:
- other_hash = entries[k].hash;
-
- if (!other_hash)
- /* Found an empty entry. */
- break;
-
- if (other_hash == hash)
- {
- scm_t_weak_entry copy;
-
- copy_weak_entry (&entries[k], &copy);
-
- if (!copy.key)
- /* Lost weak reference; reshuffle. */
- {
- give_to_poor (set, k);
- set->n_items--;
- goto retry;
- }
-
- if (pred (SCM_PACK (copy.key), closure))
- /* Found an entry with this key. */
- return SCM_PACK (copy.key);
- }
-
- if (set->n_items > set->upper)
- /* Full set, time to resize. */
- {
- vacuum_weak_set (set);
- resize_set (set);
- return weak_set_add_x (set, hash >> 1, pred, closure, obj);
- }
-
- /* Displace the entry if our distance is less, otherwise keep
- looking. */
- if (entry_distance (other_hash, k, size) < distance)
- {
- rob_from_rich (set, k);
- break;
- }
- }
-
- set->n_items++;
- entries[k].hash = hash;
- entries[k].key = SCM_UNPACK (obj);
-
- if (SCM_HEAP_OBJECT_P (obj))
- SCM_I_REGISTER_DISAPPEARING_LINK ((void **) &entries[k].key,
- (void *) SCM2PTR (obj));
-
- return obj;
-}
-
-
-static void
-weak_set_remove_x (scm_t_weak_set *set, unsigned long hash,
- scm_t_set_predicate_fn pred, void *closure)
-{
- unsigned long k, distance, size;
- scm_t_weak_entry *entries;
-
- size = set->size;
- entries = set->entries;
-
- hash = (hash << 1) | 0x1;
- k = hash_to_index (hash, size);
-
- for (distance = 0; distance < size; distance++, k = (k + 1) % size)
- {
- unsigned long other_hash;
-
- retry:
- other_hash = entries[k].hash;
-
- if (!other_hash)
- /* Not found. */
- return;
-
- if (other_hash == hash)
- {
- scm_t_weak_entry copy;
-
- copy_weak_entry (&entries[k], &copy);
-
- if (!copy.key)
- /* Lost weak reference; reshuffle. */
- {
- give_to_poor (set, k);
- set->n_items--;
- goto retry;
- }
-
- if (pred (SCM_PACK (copy.key), closure))
- /* Found an entry with this key. */
- {
- entries[k].hash = 0;
- entries[k].key = 0;
-
- if (SCM_HEAP_OBJECT_P (SCM_PACK (copy.key)))
- GC_unregister_disappearing_link ((void **) &entries[k].key);
-
- if (--set->n_items < set->lower)
- resize_set (set);
- else
- give_to_poor (set, k);
-
- return;
- }
- }
-
- /* If the entry's distance is less, our key is not in the set. */
- if (entry_distance (other_hash, k, size) < distance)
- return;
- }
-}
-
-
-
-static SCM
-make_weak_set (unsigned long k)
-{
- scm_t_weak_set *set;
-
- int i = 0, n = k ? k : 31;
- while (i + 1 < HASHSET_SIZE_N && n > hashset_size[i])
- ++i;
- n = hashset_size[i];
-
- set = scm_gc_malloc (sizeof (*set), "weak-set");
- set->entries = scm_gc_malloc_pointerless (n * sizeof(scm_t_weak_entry),
- "weak-set");
- memset (set->entries, 0, n * sizeof(scm_t_weak_entry));
- set->n_items = 0;
- set->size = n;
- set->lower = 0;
- set->upper = 9 * n / 10;
- set->size_index = i;
- set->min_size_index = i;
- scm_i_pthread_mutex_init (&set->lock, NULL);
-
- return scm_cell (scm_tc7_weak_set, (scm_t_bits)set);
-}
-
-void
-scm_i_weak_set_print (SCM exp, SCM port, scm_print_state *pstate)
-{
- scm_puts ("#<", port);
- scm_puts ("weak-set ", port);
- scm_uintprint (SCM_WEAK_SET (exp)->n_items, 10, port);
- scm_putc ('/', port);
- scm_uintprint (SCM_WEAK_SET (exp)->size, 10, port);
- scm_puts (">", port);
-}
-
-SCM
-scm_c_make_weak_set (unsigned long k)
-{
- SCM ret;
-
- ret = make_weak_set (k);
-
- return ret;
-}
-
-SCM
-scm_weak_set_p (SCM obj)
-{
- return scm_from_bool (SCM_WEAK_SET_P (obj));
-}
-
-SCM
-scm_weak_set_clear_x (SCM set)
-{
- scm_t_weak_set *s = SCM_WEAK_SET (set);
-
- scm_i_pthread_mutex_lock (&s->lock);
-
- memset (s->entries, 0, sizeof (scm_t_weak_entry) * s->size);
- s->n_items = 0;
-
- scm_i_pthread_mutex_unlock (&s->lock);
-
- return SCM_UNSPECIFIED;
-}
-
-SCM
-scm_c_weak_set_lookup (SCM set, unsigned long raw_hash,
- scm_t_set_predicate_fn pred,
- void *closure, SCM dflt)
-{
- SCM ret;
- scm_t_weak_set *s = SCM_WEAK_SET (set);
-
- scm_i_pthread_mutex_lock (&s->lock);
-
- ret = weak_set_lookup (s, raw_hash, pred, closure, dflt);
-
- scm_i_pthread_mutex_unlock (&s->lock);
-
- return ret;
-}
-
-SCM
-scm_c_weak_set_add_x (SCM set, unsigned long raw_hash,
- scm_t_set_predicate_fn pred,
- void *closure, SCM obj)
-{
- SCM ret;
- scm_t_weak_set *s = SCM_WEAK_SET (set);
-
- scm_i_pthread_mutex_lock (&s->lock);
-
- ret = weak_set_add_x (s, raw_hash, pred, closure, obj);
-
- scm_i_pthread_mutex_unlock (&s->lock);
-
- return ret;
-}
-
-void
-scm_c_weak_set_remove_x (SCM set, unsigned long raw_hash,
- scm_t_set_predicate_fn pred,
- void *closure)
-{
- scm_t_weak_set *s = SCM_WEAK_SET (set);
-
- scm_i_pthread_mutex_lock (&s->lock);
-
- weak_set_remove_x (s, raw_hash, pred, closure);
-
- scm_i_pthread_mutex_unlock (&s->lock);
-}
-
-static int
-eq_predicate (SCM x, void *closure)
-{
- return scm_is_eq (x, SCM_PACK_POINTER (closure));
-}
-
-SCM
-scm_weak_set_add_x (SCM set, SCM obj)
-{
- return scm_c_weak_set_add_x (set, scm_ihashq (obj, -1),
- eq_predicate, SCM_UNPACK_POINTER (obj), obj);
-}
-
-SCM
-scm_weak_set_remove_x (SCM set, SCM obj)
-{
- scm_c_weak_set_remove_x (set, scm_ihashq (obj, -1),
- eq_predicate, SCM_UNPACK_POINTER (obj));
-
- return SCM_UNSPECIFIED;
-}
-
-SCM
-scm_c_weak_set_fold (scm_t_set_fold_fn proc, void *closure,
- SCM init, SCM set)
-{
- scm_t_weak_set *s;
- scm_t_weak_entry *entries;
- unsigned long k, size;
-
- s = SCM_WEAK_SET (set);
-
- scm_i_pthread_mutex_lock (&s->lock);
-
- size = s->size;
- entries = s->entries;
-
- for (k = 0; k < size; k++)
- {
- if (entries[k].hash)
- {
- scm_t_weak_entry copy;
-
- copy_weak_entry (&entries[k], &copy);
-
- if (copy.key)
- {
- /* Release set lock while we call the function. */
- scm_i_pthread_mutex_unlock (&s->lock);
- init = proc (closure, SCM_PACK (copy.key), init);
- scm_i_pthread_mutex_lock (&s->lock);
- }
- }
- }
-
- scm_i_pthread_mutex_unlock (&s->lock);
-
- return init;
-}
-
-static SCM
-fold_trampoline (void *closure, SCM item, SCM init)
-{
- return scm_call_2 (SCM_PACK_POINTER (closure), item, init);
-}
-
-SCM
-scm_weak_set_fold (SCM proc, SCM init, SCM set)
-{
- return scm_c_weak_set_fold (fold_trampoline, SCM_UNPACK_POINTER (proc), init, set);
-}
-
-static SCM
-for_each_trampoline (void *closure, SCM item, SCM seed)
-{
- scm_call_1 (SCM_PACK_POINTER (closure), item);
- return seed;
-}
-
-SCM
-scm_weak_set_for_each (SCM proc, SCM set)
-{
- scm_c_weak_set_fold (for_each_trampoline, SCM_UNPACK_POINTER (proc), SCM_BOOL_F, set);
-
- return SCM_UNSPECIFIED;
-}
-
-static SCM
-map_trampoline (void *closure, SCM item, SCM seed)
-{
- return scm_cons (scm_call_1 (SCM_PACK_POINTER (closure), item), seed);
-}
-
-SCM
-scm_weak_set_map_to_list (SCM proc, SCM set)
-{
- return scm_c_weak_set_fold (map_trampoline, SCM_UNPACK_POINTER (proc), SCM_EOL, set);
-}
-
-
-void
-scm_init_weak_set ()
-{
-#include "weak-set.x"
-}
diff --git a/libguile/weak-set.h b/libguile/weak-set.h
deleted file mode 100644
index 621bce85f..000000000
--- a/libguile/weak-set.h
+++ /dev/null
@@ -1,62 +0,0 @@
-#ifndef SCM_WEAK_SET_H
-#define SCM_WEAK_SET_H
-
-/* Copyright 2011,2018
- Free Software Foundation, Inc.
-
- This file is part of Guile.
-
- Guile is free software: you can redistribute it and/or modify it
- under the terms of the GNU Lesser General Public License as published
- by the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- Guile is distributed in the hope that it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
- License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with Guile. If not, see
- <https://www.gnu.org/licenses/>. */
-
-
-
-#include "libguile/scm.h"
-
-
-
-/* The weak set API is currently only used internally. We could make it
- public later, after some API review. */
-
-/* Function that returns nonzero if the given object is the one we are
- looking for. */
-typedef int (*scm_t_set_predicate_fn) (SCM obj, void *closure);
-
-/* Function to fold over the elements of a set. */
-typedef SCM (*scm_t_set_fold_fn) (void *closure, SCM key, SCM result);
-
-SCM_INTERNAL SCM scm_c_make_weak_set (unsigned long k);
-SCM_INTERNAL SCM scm_weak_set_p (SCM h);
-SCM_INTERNAL SCM scm_c_weak_set_lookup (SCM set, unsigned long raw_hash,
- scm_t_set_predicate_fn pred,
- void *closure, SCM dflt);
-SCM_INTERNAL SCM scm_c_weak_set_add_x (SCM set, unsigned long raw_hash,
- scm_t_set_predicate_fn pred,
- void *closure, SCM obj);
-SCM_INTERNAL void scm_c_weak_set_remove_x (SCM set, unsigned long raw_hash,
- scm_t_set_predicate_fn pred,
- void *closure);
-SCM_INTERNAL SCM scm_weak_set_add_x (SCM set, SCM obj);
-SCM_INTERNAL SCM scm_weak_set_remove_x (SCM set, SCM obj);
-SCM_INTERNAL SCM scm_weak_set_clear_x (SCM set);
-SCM_INTERNAL SCM scm_c_weak_set_fold (scm_t_set_fold_fn proc, void *closure,
- SCM init, SCM set);
-SCM_INTERNAL SCM scm_weak_set_fold (SCM proc, SCM init, SCM set);
-SCM_INTERNAL SCM scm_weak_set_for_each (SCM proc, SCM set);
-SCM_INTERNAL SCM scm_weak_set_map_to_list (SCM proc, SCM set);
-
-SCM_INTERNAL void scm_i_weak_set_print (SCM exp, SCM port, scm_print_state *pstate);
-SCM_INTERNAL void scm_init_weak_set (void);
-
-#endif /* SCM_WEAK_SET_H */
diff --git a/libguile/whippet-embedder.h b/libguile/whippet-embedder.h
index aa82eb4f1..82e177936 100644
--- a/libguile/whippet-embedder.h
+++ b/libguile/whippet-embedder.h
@@ -109,6 +109,31 @@ static inline void gc_trace_heap_roots (struct gc_heap_roots *roots,
void *trace_data) {
}
+static inline void
+gc_trace_mutator_conservative_roots (struct gc_mutator_roots *roots,
+ void (*trace_range) (uintptr_t lo,
+ uintptr_t hi,
+ int possibly_interior,
+ struct gc_heap *heap,
+ void *trace_data),
+ struct gc_heap *heap,
+ void *trace_data) {
+ /* FIXME: thread stack? Currently traced via the precise
+ gc_trace_mutator_roots. */
+}
+
+static inline void
+gc_trace_heap_conservative_roots (struct gc_heap_roots *roots,
+ void (*trace_range) (uintptr_t lo,
+ uintptr_t hi,
+ int possibly_interior,
+ struct gc_heap *heap,
+ void *trace_data),
+ struct gc_heap *heap,
+ void *trace_data) {
+ scm_trace_loader_conservative_roots(trace_range, heap, trace_data);
+}
+
static inline SCM scm_from_gc_ref (struct gc_ref ref) {
return SCM_PACK (gc_ref_value (ref));
}
diff --git a/libguile/whippet/api/gc-api.h b/libguile/whippet/api/gc-api.h
index e34e0d57e..2ab063f8d 100644
--- a/libguile/whippet/api/gc-api.h
+++ b/libguile/whippet/api/gc-api.h
@@ -53,8 +53,14 @@ GC_API_ void gc_heap_set_extern_space(struct gc_heap *heap,
GC_API_ struct gc_mutator* gc_init_for_thread(struct gc_stack_addr base,
struct gc_heap *heap);
GC_API_ void gc_finish_for_thread(struct gc_mutator *mut);
-GC_API_ void* gc_call_without_gc(struct gc_mutator *mut, void* (*f)(void*),
- void *data) GC_NEVER_INLINE;
+GC_API_ void gc_deactivate(struct gc_mutator *mut);
+GC_API_ void gc_reactivate(struct gc_mutator *mut);
+GC_API_ void* gc_deactivate_for_call(struct gc_mutator *mut,
+ void* (*f)(struct gc_mutator*, void*),
+ void *data);
+GC_API_ void* gc_reactivate_for_call(struct gc_mutator *mut,
+ void* (*f)(struct gc_mutator*, void*),
+ void *data);
GC_API_ void gc_collect(struct gc_mutator *mut,
enum gc_collection_kind requested_kind);
@@ -301,4 +307,18 @@ static inline void gc_safepoint(struct gc_mutator *mut) {
gc_safepoint_slow(mut);
}
+GC_API_ int gc_safepoint_signal_number(void);
+GC_API_ void gc_safepoint_signal_inhibit(struct gc_mutator *mut);
+GC_API_ void gc_safepoint_signal_reallow(struct gc_mutator *mut);
+
+static inline void gc_inhibit_preemption(struct gc_mutator *mut) {
+ if (gc_safepoint_mechanism() == GC_SAFEPOINT_MECHANISM_SIGNAL)
+ gc_safepoint_signal_inhibit(mut);
+}
+
+static inline void gc_reallow_preemption(struct gc_mutator *mut) {
+ if (gc_safepoint_mechanism() == GC_SAFEPOINT_MECHANISM_SIGNAL)
+ gc_safepoint_signal_reallow(mut);
+}
+
#endif // GC_API_H_
diff --git a/libguile/whippet/api/gc-embedder-api.h b/libguile/whippet/api/gc-embedder-api.h
index c1b272a51..28a0fb6de 100644
--- a/libguile/whippet/api/gc-embedder-api.h
+++ b/libguile/whippet/api/gc-embedder-api.h
@@ -50,6 +50,25 @@ GC_EMBEDDER_API inline void gc_trace_heap_roots(struct gc_heap_roots *roots,
struct gc_heap *heap,
void *trace_data);
+GC_EMBEDDER_API inline void
+gc_trace_mutator_conservative_roots(struct gc_mutator_roots *roots,
+ void (*trace_range)(uintptr_t start,
+ uintptr_t end,
+ int possibly_interior,
+ struct gc_heap *heap,
+ void *data),
+ struct gc_heap *heap,
+ void *data);
+GC_EMBEDDER_API inline void
+gc_trace_heap_conservative_roots(struct gc_heap_roots *roots,
+ void (*trace_range)(uintptr_t start,
+ uintptr_t end,
+ int possibly_interior,
+ struct gc_heap *heap,
+ void *data),
+ struct gc_heap *heap,
+ void *data);
+
GC_EMBEDDER_API inline uintptr_t gc_object_forwarded_nonatomic(struct gc_ref ref);
GC_EMBEDDER_API inline void gc_object_forward_nonatomic(struct gc_ref ref,
struct gc_ref new_ref);
diff --git a/libguile/whippet/benchmarks/ephemerons.c b/libguile/whippet/benchmarks/ephemerons.c
index c74db9d43..f0459dab4 100644
--- a/libguile/whippet/benchmarks/ephemerons.c
+++ b/libguile/whippet/benchmarks/ephemerons.c
@@ -183,7 +183,7 @@ static void* run_one_test_in_thread(void *arg) {
}
struct join_data { int status; pthread_t thread; };
-static void *join_thread(void *data) {
+static void *join_thread(struct gc_mutator *unused, void *data) {
struct join_data *join_data = data;
void *ret;
join_data->status = pthread_join(join_data->thread, &ret);
@@ -256,7 +256,7 @@ int main(int argc, char *argv[]) {
run_one_test(&main_thread);
for (size_t i = 1; i < nthreads; i++) {
struct join_data data = { 0, threads[i] };
- gc_call_without_gc(mut, join_thread, &data);
+ gc_deactivate_for_call(mut, join_thread, &data);
if (data.status) {
errno = data.status;
perror("Failed to join thread");
diff --git a/libguile/whippet/benchmarks/finalizers.c b/libguile/whippet/benchmarks/finalizers.c
index 79a2660c3..8d4b5c25a 100644
--- a/libguile/whippet/benchmarks/finalizers.c
+++ b/libguile/whippet/benchmarks/finalizers.c
@@ -190,7 +190,7 @@ static void* run_one_test_in_thread(void *arg) {
}
struct join_data { int status; pthread_t thread; };
-static void *join_thread(void *data) {
+static void *join_thread(struct gc_mutator *unused, void *data) {
struct join_data *join_data = data;
void *ret;
join_data->status = pthread_join(join_data->thread, &ret);
@@ -263,7 +263,7 @@ int main(int argc, char *argv[]) {
ssize_t outstanding = (size_t)run_one_test(&main_thread);
for (size_t i = 1; i < nthreads; i++) {
struct join_data data = { 0, threads[i] };
- void *ret = gc_call_without_gc(mut, join_thread, &data);
+ void *ret = gc_deactivate_for_call(mut, join_thread, &data);
if (data.status) {
errno = data.status;
perror("Failed to join thread");
diff --git a/libguile/whippet/benchmarks/mt-gcbench.c b/libguile/whippet/benchmarks/mt-gcbench.c
index 62fdc7154..e705702bb 100644
--- a/libguile/whippet/benchmarks/mt-gcbench.c
+++ b/libguile/whippet/benchmarks/mt-gcbench.c
@@ -319,7 +319,7 @@ static void* run_one_test_in_thread(void *arg) {
}
struct join_data { int status; pthread_t thread; };
-static void *join_thread(void *data) {
+static void *join_thread(struct gc_mutator *unused, void *data) {
struct join_data *join_data = data;
void *ret;
join_data->status = pthread_join(join_data->thread, &ret);
@@ -389,7 +389,7 @@ int main(int argc, char *argv[]) {
run_one_test(&main_thread);
for (size_t i = 1; i < nthreads; i++) {
struct join_data data = { 0, threads[i] };
- gc_call_without_gc(mut, join_thread, &data);
+ gc_deactivate_for_call(mut, join_thread, &data);
if (data.status) {
errno = data.status;
perror("Failed to join thread");
diff --git a/libguile/whippet/benchmarks/simple-gc-embedder.h b/libguile/whippet/benchmarks/simple-gc-embedder.h
index 904d2c740..6f329573b 100644
--- a/libguile/whippet/benchmarks/simple-gc-embedder.h
+++ b/libguile/whippet/benchmarks/simple-gc-embedder.h
@@ -92,6 +92,26 @@ static inline void gc_trace_heap_roots(struct gc_heap_roots *roots,
visit_roots(roots->roots, trace_edge, heap, trace_data);
}
+static inline void
+gc_trace_mutator_conservative_roots(struct gc_mutator_roots *roots,
+ void (*trace_range)(uintptr_t start,
+ uintptr_t end,
+ int possibly_interior,
+ struct gc_heap *heap,
+ void *data),
+ struct gc_heap *heap,
+ void *data) {}
+
+static inline void
+gc_trace_heap_conservative_roots(struct gc_heap_roots *roots,
+ void (*trace_range)(uintptr_t start,
+ uintptr_t end,
+ int possibly_interior,
+ struct gc_heap *heap,
+ void *data),
+ struct gc_heap *heap,
+ void *data) {}
+
static inline uintptr_t gc_object_forwarded_nonatomic(struct gc_ref ref) {
uintptr_t tag = *tag_word(ref);
return (tag & gcobj_not_forwarded_bit) ? 0 : tag;
diff --git a/libguile/whippet/src/bdw.c b/libguile/whippet/src/bdw.c
index e6b3f4ef5..fc228c15e 100644
--- a/libguile/whippet/src/bdw.c
+++ b/libguile/whippet/src/bdw.c
@@ -203,6 +203,16 @@ void gc_write_barrier_slow(struct gc_mutator *mut, struct gc_ref obj,
int* gc_safepoint_flag_loc(struct gc_mutator *mut) { GC_CRASH(); }
void gc_safepoint_slow(struct gc_mutator *mut) { GC_CRASH(); }
+int gc_safepoint_signal_number(void) {
+ return GC_get_suspend_signal();
+}
+void gc_safepoint_signal_inhibit(struct gc_mutator *mut) {
+ GC_alloc_lock();
+}
+void gc_safepoint_signal_reallow(struct gc_mutator *mut) {
+ GC_alloc_unlock();
+}
+
struct bdw_mark_state {
struct GC_ms_entry *mark_stack_ptr;
struct GC_ms_entry *mark_stack_limit;
@@ -218,6 +228,20 @@ static void bdw_mark_edge(struct gc_edge edge, struct gc_heap *heap,
NULL);
}
+static void bdw_mark_range(uintptr_t lo, uintptr_t hi, int possibly_interior,
+ struct gc_heap *heap, void *visit_data) {
+ struct bdw_mark_state *state = visit_data;
+
+ GC_ASSERT_EQ (lo, align_up (lo, sizeof(void*)));
+ GC_ASSERT_EQ (hi, align_up (hi, sizeof(void*)));
+
+ for (void **walk = (void**)lo, **end = (void**)hi; walk < end; walk++)
+ state->mark_stack_ptr = GC_MARK_AND_PUSH (*walk,
+ state->mark_stack_ptr,
+ state->mark_stack_limit,
+ NULL);
+}
+
static int heap_gc_kind;
static int mutator_gc_kind;
static int ephemeron_gc_kind;
@@ -370,8 +394,10 @@ mark_heap(GC_word *addr, struct GC_ms_entry *mark_stack_ptr,
if (heap != __the_bdw_gc_heap)
return state.mark_stack_ptr;
- if (heap->roots)
+ if (heap->roots) {
+ gc_trace_heap_conservative_roots(heap->roots, bdw_mark_range, heap, &state);
gc_trace_heap_roots(heap->roots, bdw_mark_edge, heap, &state);
+ }
gc_visit_finalizer_roots(heap->finalizer_state, bdw_mark_edge, heap, &state);
@@ -405,8 +431,11 @@ mark_mutator(GC_word *addr, struct GC_ms_entry *mark_stack_ptr,
memset(mut->freelists, 0, sizeof(void*) * GC_INLINE_FREELIST_COUNT);
- if (mut->roots)
+ if (mut->roots) {
+ gc_trace_mutator_conservative_roots(mut->roots, bdw_mark_range,
+ mut->heap, &state);
gc_trace_mutator_roots(mut->roots, bdw_mark_edge, mut->heap, &state);
+ }
state.mark_stack_ptr = GC_MARK_AND_PUSH (mut->next,
state.mark_stack_ptr,
@@ -529,6 +558,11 @@ static void* oom_fn(size_t nbytes) {
return NULL;
}
+static void warn_fn(char *fmt, GC_word arg) {
+ /* FIXME: Do something better with this. */
+ fprintf (stderr, fmt, arg);
+}
+
void gc_heap_set_allocation_failure_handler(struct gc_heap *heap,
void* (*handler)(struct gc_heap*,
size_t)) {
@@ -587,6 +621,7 @@ int gc_init(const struct gc_options *options, struct gc_stack_addr stack_base,
snprintf(markers, sizeof(markers), "%d", options->common.parallelism);
setenv("GC_MARKERS", markers, 1);
GC_init();
+ GC_set_warn_proc (warn_fn);
size_t current_heap_size = GC_get_heap_size();
if (options->common.heap_size > current_heap_size)
GC_expand_hp(options->common.heap_size - current_heap_size);
@@ -650,10 +685,32 @@ void gc_finish_for_thread(struct gc_mutator *mut) {
GC_unregister_my_thread();
}
-void* gc_call_without_gc(struct gc_mutator *mut,
- void* (*f)(void*),
- void *data) {
- return GC_do_blocking(f, data);
+struct call_with_mutator_data {
+ void* (*proc) (struct gc_mutator*, void*);
+ struct gc_mutator *mutator;
+ void *data;
+};
+
+static void* call_with_mutator (void *p) {
+ struct call_with_mutator_data *data = p;
+ return data->proc(data->mutator, data->data);
+}
+
+void gc_deactivate(struct gc_mutator *mut) {};
+void gc_reactivate(struct gc_mutator *mut) {};
+
+void* gc_deactivate_for_call(struct gc_mutator *mut,
+ void* (*f)(struct gc_mutator *, void*),
+ void *data) {
+ struct call_with_mutator_data d = { f, mut, data };
+ return GC_do_blocking(call_with_mutator, &d);
+}
+
+void* gc_reactivate_for_call(struct gc_mutator *mut,
+ void* (*f)(struct gc_mutator *, void*),
+ void *data) {
+ struct call_with_mutator_data d = { f, mut, data };
+ return GC_call_with_gc_active(call_with_mutator, &d);
}
void gc_mutator_set_roots(struct gc_mutator *mut,
diff --git a/libguile/whippet/src/mmc.c b/libguile/whippet/src/mmc.c
index 1ae342f5e..4e79675cc 100644
--- a/libguile/whippet/src/mmc.c
+++ b/libguile/whippet/src/mmc.c
@@ -88,6 +88,7 @@ struct gc_mutator {
void *event_listener_data;
struct gc_mutator *next;
struct gc_mutator *prev;
+ int active;
};
struct gc_trace_worker_data {
@@ -216,6 +217,7 @@ add_mutator(struct gc_heap *heap, struct gc_mutator *mut) {
while (mutators_are_stopping(heap))
pthread_cond_wait(&heap->mutator_cond, &heap->lock);
mut->next = mut->prev = NULL;
+ mut->active = 1;
struct gc_mutator *tail = heap->mutators;
if (tail) {
mut->next = tail;
@@ -235,6 +237,7 @@ remove_mutator(struct gc_heap *heap, struct gc_mutator *mut) {
mut->heap = NULL;
heap_lock(heap);
heap->mutator_count--;
+ mut->active = 0;
if (mut->next)
mut->next->prev = mut->prev;
if (mut->prev)
@@ -325,7 +328,8 @@ load_conservative_ref(uintptr_t addr) {
static inline void
trace_conservative_edges(uintptr_t low, uintptr_t high, int possibly_interior,
- struct gc_heap *heap, struct gc_trace_worker *worker) {
+ struct gc_heap *heap, void *data) {
+ struct gc_trace_worker *worker = data;
GC_ASSERT(low == align_down(low, sizeof(uintptr_t)));
GC_ASSERT(high == align_down(high, sizeof(uintptr_t)));
for (uintptr_t addr = low; addr < high; addr += sizeof(uintptr_t))
@@ -399,6 +403,14 @@ trace_root(struct gc_root root, struct gc_heap *heap,
gc_field_set_visit_edge_buffer(&heap->remembered_set, root.edge_buffer,
trace_remembered_edge, heap, worker);
break;
+ case GC_ROOT_KIND_HEAP_CONSERVATIVE_ROOTS:
+ gc_trace_heap_conservative_roots(root.heap->roots, trace_conservative_edges,
+ heap, worker);
+ break;
+ case GC_ROOT_KIND_MUTATOR_CONSERVATIVE_ROOTS:
+ gc_trace_mutator_conservative_roots(root.mutator->roots,
+ trace_conservative_edges, heap, worker);
+ break;
default:
GC_CRASH();
}
@@ -636,9 +648,13 @@ enqueue_mutator_conservative_roots(struct gc_heap *heap) {
int possibly_interior = gc_mutator_conservative_roots_may_be_interior();
for (struct gc_mutator *mut = heap->mutators;
mut;
- mut = mut->next)
+ mut = mut->next) {
gc_stack_visit(&mut->stack, enqueue_conservative_roots, heap,
&possibly_interior);
+ if (mut->roots)
+ gc_tracer_add_root(&heap->tracer,
+ gc_root_mutator_conservative_roots(mut));
+ }
return 1;
}
return 0;
@@ -650,6 +666,8 @@ enqueue_global_conservative_roots(struct gc_heap *heap) {
int possibly_interior = 0;
gc_platform_visit_global_conservative_roots
(enqueue_conservative_roots, heap, &possibly_interior);
+ if (heap->roots)
+ gc_tracer_add_root(&heap->tracer, gc_root_heap_conservative_roots(heap));
return 1;
}
return 0;
@@ -869,6 +887,10 @@ gc_safepoint_slow(struct gc_mutator *mut) {
heap_unlock(heap);
}
+int gc_safepoint_signal_number(void) { GC_CRASH(); }
+void gc_safepoint_signal_inhibit(struct gc_mutator *mut) { GC_CRASH(); }
+void gc_safepoint_signal_reallow(struct gc_mutator *mut) { GC_CRASH(); }
+
static enum gc_trace_kind
compute_trace_kind(enum gc_allocation_kind kind) {
if (GC_CONSERVATIVE_TRACE) {
@@ -1274,6 +1296,7 @@ deactivate_mutator(struct gc_heap *heap, struct gc_mutator *mut) {
gc_field_set_writer_release_buffer(&mut->logger);
heap_lock(heap);
heap->inactive_mutator_count++;
+ mut->active = 0;
gc_stack_capture_hot(&mut->stack);
if (all_mutators_stopped(heap))
pthread_cond_signal(&heap->collector_cond);
@@ -1285,15 +1308,42 @@ reactivate_mutator(struct gc_heap *heap, struct gc_mutator *mut) {
heap_lock(heap);
while (mutators_are_stopping(heap))
pthread_cond_wait(&heap->mutator_cond, &heap->lock);
+ mut->active = 1;
heap->inactive_mutator_count--;
heap_unlock(heap);
}
+void gc_deactivate(struct gc_mutator *mut) {
+ GC_ASSERT(mut->active);
+ deactivate_mutator(mutator_heap(mut), mut);
+}
+
+void gc_reactivate(struct gc_mutator *mut) {
+ GC_ASSERT(!mut->active);
+ reactivate_mutator(mutator_heap(mut), mut);
+}
+
void*
-gc_call_without_gc(struct gc_mutator *mut, void* (*f)(void*), void *data) {
+gc_deactivate_for_call(struct gc_mutator *mut,
+ void* (*f)(struct gc_mutator*, void*),
+ void *data) {
struct gc_heap *heap = mutator_heap(mut);
deactivate_mutator(heap, mut);
- void *ret = f(data);
+ void *ret = f(mut, data);
reactivate_mutator(heap, mut);
return ret;
}
+
+void*
+gc_reactivate_for_call(struct gc_mutator *mut,
+ void* (*f)(struct gc_mutator*, void*),
+ void *data) {
+ struct gc_heap *heap = mutator_heap(mut);
+ int reactivate = !mut->active;
+ if (reactivate)
+ reactivate_mutator(heap, mut);
+ void *ret = f(mut, data);
+ if (reactivate)
+ deactivate_mutator(heap, mut);
+ return ret;
+}
diff --git a/libguile/whippet/src/pcc.c b/libguile/whippet/src/pcc.c
index 6b656360c..6902b3412 100644
--- a/libguile/whippet/src/pcc.c
+++ b/libguile/whippet/src/pcc.c
@@ -96,6 +96,7 @@ struct gc_mutator {
void *event_listener_data;
struct gc_mutator *next;
struct gc_mutator *prev;
+ int active;
};
struct gc_trace_worker_data {
@@ -501,6 +502,7 @@ static void add_mutator(struct gc_heap *heap, struct gc_mutator *mut) {
while (mutators_are_stopping(heap))
pthread_cond_wait(&heap->mutator_cond, &heap->lock);
mut->next = mut->prev = NULL;
+ mut->active = 1;
struct gc_mutator *tail = heap->mutators;
if (tail) {
mut->next = tail;
@@ -520,6 +522,7 @@ static void remove_mutator(struct gc_heap *heap, struct gc_mutator *mut) {
mut->heap = NULL;
heap_lock(heap);
heap->mutator_count--;
+ mut->active = 0;
if (mut->next)
mut->next->prev = mut->prev;
if (mut->prev)
@@ -1081,6 +1084,10 @@ void gc_safepoint_slow(struct gc_mutator *mut) {
heap_unlock(heap);
}
+int gc_safepoint_signal_number(void) { GC_CRASH(); }
+void gc_safepoint_signal_inhibit(struct gc_mutator *mut) { GC_CRASH(); }
+void gc_safepoint_signal_reallow(struct gc_mutator *mut) { GC_CRASH(); }
+
struct gc_ephemeron* gc_allocate_ephemeron(struct gc_mutator *mut) {
return gc_allocate(mut, gc_ephemeron_size(), GC_ALLOCATION_TAGGED);
}
@@ -1354,31 +1361,58 @@ void gc_finish_for_thread(struct gc_mutator *mut) {
static void deactivate_mutator(struct gc_heap *heap, struct gc_mutator *mut) {
GC_ASSERT(mut->next == NULL);
+ GC_ASSERT(mut->active);
copy_space_allocator_finish(&mut->allocator, heap_allocation_space(heap));
if (GC_GENERATIONAL)
gc_field_set_writer_release_buffer(mutator_field_logger(mut));
heap_lock(heap);
heap->inactive_mutator_count++;
+ mut->active = 0;
if (all_mutators_stopped(heap))
pthread_cond_signal(&heap->collector_cond);
heap_unlock(heap);
}
static void reactivate_mutator(struct gc_heap *heap, struct gc_mutator *mut) {
+ GC_ASSERT(!mut->active);
heap_lock(heap);
while (mutators_are_stopping(heap))
pthread_cond_wait(&heap->mutator_cond, &heap->lock);
+ mut->active = 1;
heap->inactive_mutator_count--;
maybe_increase_max_active_mutator_count(heap);
heap_unlock(heap);
}
-void* gc_call_without_gc(struct gc_mutator *mut,
- void* (*f)(void*),
- void *data) {
+void gc_deactivate(struct gc_mutator *mut) {
+ GC_ASSERT(mut->active);
+ deactivate_mutator(mutator_heap(mut), mut);
+}
+
+void gc_reactivate(struct gc_mutator *mut) {
+ GC_ASSERT(!mut->active);
+ reactivate_mutator(mutator_heap(mut), mut);
+}
+
+void* gc_deactivate_for_call(struct gc_mutator *mut,
+ void* (*f)(struct gc_mutator*, void*),
+ void *data) {
struct gc_heap *heap = mutator_heap(mut);
deactivate_mutator(heap, mut);
- void *ret = f(data);
+ void *ret = f(mut, data);
reactivate_mutator(heap, mut);
return ret;
}
+
+void* gc_reactivate_for_call(struct gc_mutator *mut,
+ void* (*f)(struct gc_mutator*, void*),
+ void *data) {
+ struct gc_heap *heap = mutator_heap(mut);
+ int reactivate = !mut->active;
+ if (reactivate)
+ reactivate_mutator(heap, mut);
+ void *ret = f(mut, data);
+ if (reactivate)
+ deactivate_mutator(heap, mut);
+ return ret;
+}
diff --git a/libguile/whippet/src/root.h b/libguile/whippet/src/root.h
index 4fc705e61..597e8d840 100644
--- a/libguile/whippet/src/root.h
+++ b/libguile/whippet/src/root.h
@@ -18,6 +18,8 @@ enum gc_root_kind {
GC_ROOT_KIND_RESOLVED_EPHEMERONS,
GC_ROOT_KIND_EDGE,
GC_ROOT_KIND_EDGE_BUFFER,
+ GC_ROOT_KIND_HEAP_CONSERVATIVE_ROOTS,
+ GC_ROOT_KIND_MUTATOR_CONSERVATIVE_ROOTS,
};
struct gc_root {
@@ -78,4 +80,18 @@ gc_root_edge_buffer(struct gc_edge_buffer *buf) {
return ret;
}
+static inline struct gc_root
+gc_root_heap_conservative_roots(struct gc_heap* heap) {
+ struct gc_root ret = { GC_ROOT_KIND_HEAP_CONSERVATIVE_ROOTS };
+ ret.heap = heap;
+ return ret;
+}
+
+static inline struct gc_root
+gc_root_mutator_conservative_roots(struct gc_mutator* mutator) {
+ struct gc_root ret = { GC_ROOT_KIND_MUTATOR_CONSERVATIVE_ROOTS };
+ ret.mutator = mutator;
+ return ret;
+}
+
#endif // ROOT_H
diff --git a/libguile/whippet/src/semi.c b/libguile/whippet/src/semi.c
index 9754b3fa2..a1526cec4 100644
--- a/libguile/whippet/src/semi.c
+++ b/libguile/whippet/src/semi.c
@@ -487,6 +487,9 @@ void gc_write_barrier_slow(struct gc_mutator *mut, struct gc_ref obj,
int* gc_safepoint_flag_loc(struct gc_mutator *mut) { GC_CRASH(); }
void gc_safepoint_slow(struct gc_mutator *mut) { GC_CRASH(); }
+int gc_safepoint_signal_number(void) { GC_CRASH(); }
+void gc_safepoint_signal_inhibit(struct gc_mutator *mut) { GC_CRASH(); }
+void gc_safepoint_signal_reallow(struct gc_mutator *mut) { GC_CRASH(); }
static int collect_for_large_alloc(struct gc_mutator *mut, size_t npages) {
size_t bytes = npages * mutator_semi_space(mut)->page_size;
@@ -761,8 +764,20 @@ struct gc_mutator* gc_init_for_thread(struct gc_stack_addr base,
void gc_finish_for_thread(struct gc_mutator *space) {
}
-void* gc_call_without_gc(struct gc_mutator *mut, void* (*f)(void*),
- void *data) {
+void gc_deactivate(struct gc_mutator *mut) {}
+
+void gc_reactivate(struct gc_mutator *mut) {}
+
+void* gc_deactivate_for_call(struct gc_mutator *mut,
+ void* (*f)(struct gc_mutator *, void*),
+ void *data) {
+ // Can't be threads, then there won't be collection.
+ return f(mut, data);
+}
+
+void* gc_reactivate_for_call(struct gc_mutator *mut,
+ void* (*f)(struct gc_mutator *mut, void*),
+ void *data) {
// Can't be threads, then there won't be collection.
- return f(data);
+ return f(mut, data);
}