summaryrefslogtreecommitdiff
path: root/libguile/threads.c
diff options
context:
space:
mode:
authorAndy Wingo <wingo@pobox.com>2011-04-11 23:30:52 +0200
committerAndy Wingo <wingo@pobox.com>2011-04-11 23:30:52 +0200
commit21c05db45b69f8a62b84c36abc86cb935fa967d7 (patch)
tree743df01da540e7fd628f54894e7d5fbe94b03996 /libguile/threads.c
parent4db853d747ac115f799c93e2de93f5159ad84109 (diff)
parentc89b45299329d034875429804f18768c1ea96713 (diff)
downloadguile-21c05db45b69f8a62b84c36abc86cb935fa967d7.tar.gz
Merge remote branch 'origin/stable-2.0'
Conflicts: GUILE-VERSION test-suite/tests/srfi-4.test
Diffstat (limited to 'libguile/threads.c')
-rw-r--r--libguile/threads.c451
1 files changed, 260 insertions, 191 deletions
diff --git a/libguile/threads.c b/libguile/threads.c
index e7347ad57..14bda1d2f 100644
--- a/libguile/threads.c
+++ b/libguile/threads.c
@@ -79,6 +79,122 @@ typedef void * (* GC_fn_type) (void *);
#endif
+#ifndef GC_SUCCESS
+#define GC_SUCCESS 0
+#endif
+
+#ifndef GC_UNIMPLEMENTED
+#define GC_UNIMPLEMENTED 3
+#endif
+
+/* Likewise struct GC_stack_base is missing before 7.1. */
+#ifndef HAVE_GC_STACK_BASE
+struct GC_stack_base {
+ void * mem_base; /* Base of memory stack. */
+#ifdef __ia64__
+ void * reg_base; /* Base of separate register stack. */
+#endif
+};
+
+static int
+GC_register_my_thread (struct GC_stack_base *stack_base)
+{
+ return GC_UNIMPLEMENTED;
+}
+
+static void
+GC_unregister_my_thread ()
+{
+}
+
+#if !SCM_USE_PTHREAD_THREADS
+/* No threads; we can just use GC_stackbottom. */
+static void *
+get_thread_stack_base ()
+{
+ return GC_stackbottom;
+}
+
+#elif defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP \
+ && defined PTHREAD_ATTR_GETSTACK_WORKS
+/* This method for GNU/Linux and perhaps some other systems.
+ It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
+ available on them. */
+static void *
+get_thread_stack_base ()
+{
+ pthread_attr_t attr;
+ void *start, *end;
+ size_t size;
+
+ pthread_getattr_np (pthread_self (), &attr);
+ pthread_attr_getstack (&attr, &start, &size);
+ end = (char *)start + size;
+
+#if SCM_STACK_GROWS_UP
+ return start;
+#else
+ return end;
+#endif
+}
+
+#elif defined HAVE_PTHREAD_GET_STACKADDR_NP
+/* This method for MacOS X.
+ It'd be nice if there was some documentation on pthread_get_stackaddr_np,
+ but as of 2006 there's nothing obvious at apple.com. */
+static void *
+get_thread_stack_base ()
+{
+ return pthread_get_stackaddr_np (pthread_self ());
+}
+
+#else
+#error Threads enabled with old BDW-GC, but missing get_thread_stack_base impl. Please upgrade to libgc >= 7.1.
+#endif
+
+static int
+GC_get_stack_base (struct GC_stack_base *stack_base)
+{
+ stack_base->mem_base = get_thread_stack_base ();
+#ifdef __ia64__
+ /* Calculate and store off the base of this thread's register
+ backing store (RBS). Unfortunately our implementation(s) of
+ scm_ia64_register_backing_store_base are only reliable for the
+ main thread. For other threads, therefore, find out the current
+ top of the RBS, and use that as a maximum. */
+ stack_base->reg_base = scm_ia64_register_backing_store_base ();
+ {
+ ucontext_t ctx;
+ void *bsp;
+ getcontext (&ctx);
+ bsp = scm_ia64_ar_bsp (&ctx);
+ if (stack_base->reg_base > bsp)
+ stack_base->reg_base = bsp;
+ }
+#endif
+ return GC_SUCCESS;
+}
+
+static void *
+GC_call_with_stack_base(void * (*fn) (struct GC_stack_base*, void*), void *arg)
+{
+ struct GC_stack_base stack_base;
+
+ stack_base.mem_base = (void*)&stack_base;
+#ifdef __ia64__
+ /* FIXME: Untested. */
+ {
+ ucontext_t ctx;
+ getcontext (&ctx);
+ stack_base.reg_base = scm_ia64_ar_bsp (&ctx);
+ }
+#endif
+
+ return fn (&stack_base, arg);
+}
+#endif /* HAVE_GC_STACK_BASE */
+
+
/* Now define with_gc_active and with_gc_inactive. */
#if (defined(HAVE_GC_DO_BLOCKING) && defined (HAVE_DECL_GC_DO_BLOCKING) && defined (HAVE_GC_CALL_WITH_GC_ACTIVE))
@@ -343,6 +459,12 @@ unblock_from_queue (SCM queue)
/* Getting into and out of guile mode.
*/
+/* Key used to attach a cleanup handler to a given thread. Also, if
+ thread-local storage is unavailable, this key is used to retrieve the
+ current thread with `pthread_getspecific ()'. */
+scm_i_pthread_key_t scm_i_thread_key;
+
+
#ifdef SCM_HAVE_THREAD_STORAGE_CLASS
/* When thread-local storage (TLS) is available, a pointer to the
@@ -352,17 +474,7 @@ unblock_from_queue (SCM queue)
represent. */
SCM_THREAD_LOCAL scm_i_thread *scm_i_current_thread = NULL;
-# define SET_CURRENT_THREAD(_t) scm_i_current_thread = (_t)
-
-#else /* !SCM_HAVE_THREAD_STORAGE_CLASS */
-
-/* Key used to retrieve the current thread with `pthread_getspecific ()'. */
-scm_i_pthread_key_t scm_i_thread_key;
-
-# define SET_CURRENT_THREAD(_t) \
- scm_i_pthread_setspecific (scm_i_thread_key, (_t))
-
-#endif /* !SCM_HAVE_THREAD_STORAGE_CLASS */
+#endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
@@ -374,67 +486,75 @@ static SCM scm_i_default_dynamic_state;
/* Perform first stage of thread initialisation, in non-guile mode.
*/
static void
-guilify_self_1 (SCM_STACKITEM *base)
-{
- scm_i_thread *t = scm_gc_malloc (sizeof (scm_i_thread), "thread");
-
- t->pthread = scm_i_pthread_self ();
- t->handle = SCM_BOOL_F;
- t->result = SCM_BOOL_F;
- t->cleanup_handler = SCM_BOOL_F;
- t->mutexes = SCM_EOL;
- t->held_mutex = NULL;
- t->join_queue = SCM_EOL;
- t->dynamic_state = SCM_BOOL_F;
- t->dynwinds = SCM_EOL;
- t->active_asyncs = SCM_EOL;
- t->block_asyncs = 1;
- t->pending_asyncs = 1;
- t->critical_section_level = 0;
- t->base = base;
+guilify_self_1 (struct GC_stack_base *base)
+{
+ scm_i_thread t;
+
+ /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
+ before allocating anything in this thread, because allocation could
+ cause GC to run, and GC could cause finalizers, which could invoke
+ Scheme functions, which need the current thread to be set. */
+
+ t.pthread = scm_i_pthread_self ();
+ t.handle = SCM_BOOL_F;
+ t.result = SCM_BOOL_F;
+ t.cleanup_handler = SCM_BOOL_F;
+ t.mutexes = SCM_EOL;
+ t.held_mutex = NULL;
+ t.join_queue = SCM_EOL;
+ t.dynamic_state = SCM_BOOL_F;
+ t.dynwinds = SCM_EOL;
+ t.active_asyncs = SCM_EOL;
+ t.block_asyncs = 1;
+ t.pending_asyncs = 1;
+ t.critical_section_level = 0;
+ t.base = base->mem_base;
#ifdef __ia64__
- /* Calculate and store off the base of this thread's register
- backing store (RBS). Unfortunately our implementation(s) of
- scm_ia64_register_backing_store_base are only reliable for the
- main thread. For other threads, therefore, find out the current
- top of the RBS, and use that as a maximum. */
- t->register_backing_store_base = scm_ia64_register_backing_store_base ();
- {
- ucontext_t ctx;
- void *bsp;
- getcontext (&ctx);
- bsp = scm_ia64_ar_bsp (&ctx);
- if (t->register_backing_store_base > bsp)
- t->register_backing_store_base = bsp;
- }
+ t.register_backing_store_base = base->reg-base;
#endif
- t->continuation_root = SCM_EOL;
- t->continuation_base = base;
- scm_i_pthread_cond_init (&t->sleep_cond, NULL);
- t->sleep_mutex = NULL;
- t->sleep_object = SCM_BOOL_F;
- t->sleep_fd = -1;
-
- if (pipe (t->sleep_pipe) != 0)
+ t.continuation_root = SCM_EOL;
+ t.continuation_base = t.base;
+ scm_i_pthread_cond_init (&t.sleep_cond, NULL);
+ t.sleep_mutex = NULL;
+ t.sleep_object = SCM_BOOL_F;
+ t.sleep_fd = -1;
+
+ if (pipe (t.sleep_pipe) != 0)
/* FIXME: Error conditions during the initialization phase are handled
gracelessly since public functions such as `scm_init_guile ()'
currently have type `void'. */
abort ();
- scm_i_pthread_mutex_init (&t->admin_mutex, NULL);
- t->current_mark_stack_ptr = NULL;
- t->current_mark_stack_limit = NULL;
- t->canceled = 0;
- t->exited = 0;
- t->guile_mode = 0;
+ scm_i_pthread_mutex_init (&t.admin_mutex, NULL);
+ t.current_mark_stack_ptr = NULL;
+ t.current_mark_stack_limit = NULL;
+ t.canceled = 0;
+ t.exited = 0;
+ t.guile_mode = 0;
- SET_CURRENT_THREAD (t);
+ /* The switcheroo. */
+ {
+ scm_i_thread *t_ptr = &t;
+
+ GC_disable ();
+ t_ptr = GC_malloc (sizeof (scm_i_thread));
+ memcpy (t_ptr, &t, sizeof t);
- scm_i_pthread_mutex_lock (&thread_admin_mutex);
- t->next_thread = all_threads;
- all_threads = t;
- thread_count++;
- scm_i_pthread_mutex_unlock (&thread_admin_mutex);
+ scm_i_pthread_setspecific (scm_i_thread_key, t_ptr);
+
+#ifdef SCM_HAVE_THREAD_STORAGE_CLASS
+ /* Cache the current thread in TLS for faster lookup. */
+ scm_i_current_thread = t_ptr;
+#endif
+
+ scm_i_pthread_mutex_lock (&thread_admin_mutex);
+ t_ptr->next_thread = all_threads;
+ all_threads = t_ptr;
+ thread_count++;
+ scm_i_pthread_mutex_unlock (&thread_admin_mutex);
+
+ GC_enable ();
+ }
}
/* Perform second stage of thread initialisation, in guile mode.
@@ -537,6 +657,15 @@ do_thread_exit (void *v)
return NULL;
}
+static void *
+do_thread_exit_trampoline (struct GC_stack_base *sb, void *v)
+{
+ /* Won't hurt if we are already registered. */
+ GC_register_my_thread (sb);
+
+ return scm_with_guile (do_thread_exit, v);
+}
+
static void
on_thread_exit (void *v)
{
@@ -551,19 +680,18 @@ on_thread_exit (void *v)
t->held_mutex = NULL;
}
- SET_CURRENT_THREAD (v);
+ /* Reinstate the current thread for purposes of scm_with_guile
+ guile-mode cleanup handlers. Only really needed in the non-TLS
+ case but it doesn't hurt to be consistent. */
+ scm_i_pthread_setspecific (scm_i_thread_key, t);
/* Ensure the signal handling thread has been launched, because we might be
shutting it down. */
scm_i_ensure_signal_delivery_thread ();
- /* Unblocking the joining threads needs to happen in guile mode
- since the queue is a SCM data structure. */
-
- /* Note: Since `do_thread_exit ()' uses allocates memory via `libgc', we
- assume the GC is usable at this point, and notably that thread-local
- storage (TLS) hasn't been deallocated yet. */
- do_thread_exit (v);
+ /* Scheme-level thread finalizers and other cleanup needs to happen in
+ guile mode. */
+ GC_call_with_stack_base (do_thread_exit_trampoline, t);
/* Removing ourself from the list of all threads needs to happen in
non-guile mode since all SCM values on our stack become
@@ -590,21 +718,21 @@ on_thread_exit (void *v)
scm_i_pthread_mutex_unlock (&thread_admin_mutex);
- SET_CURRENT_THREAD (NULL);
-}
+ scm_i_pthread_setspecific (scm_i_thread_key, NULL);
-#ifndef SCM_HAVE_THREAD_STORAGE_CLASS
+#if !SCM_USE_NULL_THREADS
+ GC_unregister_my_thread ();
+#endif
+}
static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
static void
init_thread_key (void)
{
- scm_i_pthread_key_create (&scm_i_thread_key, NULL);
+ scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
}
-#endif
-
/* Perform any initializations necessary to make the current thread
known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
if necessary.
@@ -623,11 +751,9 @@ init_thread_key (void)
be sure. New threads are put into guile mode implicitly. */
static int
-scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
+scm_i_init_thread_for_guile (struct GC_stack_base *base, SCM parent)
{
-#ifndef SCM_HAVE_THREAD_STORAGE_CLASS
scm_i_pthread_once (&init_thread_key_once, init_thread_key);
-#endif
if (SCM_I_CURRENT_THREAD)
{
@@ -647,6 +773,12 @@ scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
initialization.
*/
scm_i_init_guile (base);
+
+#ifdef HAVE_GC_ALLOW_REGISTER_THREADS
+ /* Allow other threads to come in later. */
+ GC_allow_register_threads ();
+#endif
+
scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
}
else
@@ -655,6 +787,10 @@ scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
the first time. Only initialize this thread.
*/
scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
+
+ /* Register this thread with libgc. */
+ GC_register_my_thread (base);
+
guilify_self_1 (base);
guilify_self_2 (parent);
}
@@ -662,137 +798,58 @@ scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
}
}
-#if SCM_USE_PTHREAD_THREADS
-
-#if defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP
-/* This method for GNU/Linux and perhaps some other systems.
- It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
- available on them. */
-#define HAVE_GET_THREAD_STACK_BASE
-
-static SCM_STACKITEM *
-get_thread_stack_base ()
+void
+scm_init_guile ()
{
- pthread_attr_t attr;
- void *start, *end;
- size_t size;
-
- pthread_getattr_np (pthread_self (), &attr);
- pthread_attr_getstack (&attr, &start, &size);
- end = (char *)start + size;
-
- /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
- for the main thread, but we can use scm_get_stack_base in that
- case.
- */
-
-#ifndef PTHREAD_ATTR_GETSTACK_WORKS
- if ((void *)&attr < start || (void *)&attr >= end)
- return (SCM_STACKITEM *) GC_stackbottom;
+ struct GC_stack_base stack_base;
+
+ if (GC_get_stack_base (&stack_base) == GC_SUCCESS)
+ scm_i_init_thread_for_guile (&stack_base,
+ scm_i_default_dynamic_state);
else
-#endif
{
-#if SCM_STACK_GROWS_UP
- return start;
-#else
- return end;
-#endif
+ fprintf (stderr, "Failed to get stack base for current thread.\n");
+ exit (1);
}
}
-#elif defined HAVE_PTHREAD_GET_STACKADDR_NP
-/* This method for MacOS X.
- It'd be nice if there was some documentation on pthread_get_stackaddr_np,
- but as of 2006 there's nothing obvious at apple.com. */
-#define HAVE_GET_THREAD_STACK_BASE
-static SCM_STACKITEM *
-get_thread_stack_base ()
-{
- return pthread_get_stackaddr_np (pthread_self ());
-}
-
-#elif defined (__MINGW32__)
-/* This method for mingw. In mingw the basic scm_get_stack_base can be used
- in any thread. We don't like hard-coding the name of a system, but there
- doesn't seem to be a cleaner way of knowing scm_get_stack_base can
- work. */
-#define HAVE_GET_THREAD_STACK_BASE
-static SCM_STACKITEM *
-get_thread_stack_base ()
-{
- return (SCM_STACKITEM *) GC_stackbottom;
-}
-
-#endif /* pthread methods of get_thread_stack_base */
-
-#else /* !SCM_USE_PTHREAD_THREADS */
-
-#define HAVE_GET_THREAD_STACK_BASE
-
-static SCM_STACKITEM *
-get_thread_stack_base ()
-{
- return (SCM_STACKITEM *) GC_stackbottom;
-}
-
-#endif /* !SCM_USE_PTHREAD_THREADS */
-
-#ifdef HAVE_GET_THREAD_STACK_BASE
-
-void
-scm_init_guile ()
-{
- scm_i_init_thread_for_guile (get_thread_stack_base (),
- scm_i_default_dynamic_state);
-}
-
-#endif
-
-void *
-scm_with_guile (void *(*func)(void *), void *data)
-{
- return scm_i_with_guile_and_parent (func, data,
- scm_i_default_dynamic_state);
-}
-
SCM_UNUSED static void
scm_leave_guile_cleanup (void *x)
{
on_thread_exit (SCM_I_CURRENT_THREAD);
}
-struct with_guile_trampoline_args
+struct with_guile_args
{
GC_fn_type func;
void *data;
+ SCM parent;
};
static void *
with_guile_trampoline (void *data)
{
- struct with_guile_trampoline_args *args = data;
+ struct with_guile_args *args = data;
return scm_c_with_continuation_barrier (args->func, args->data);
}
-void *
-scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
+static void *
+with_guile_and_parent (struct GC_stack_base *base, void *data)
{
void *res;
int new_thread;
scm_i_thread *t;
- SCM_STACKITEM base_item;
+ struct with_guile_args *args = data;
- new_thread = scm_i_init_thread_for_guile (&base_item, parent);
+ new_thread = scm_i_init_thread_for_guile (base, args->parent);
t = SCM_I_CURRENT_THREAD;
if (new_thread)
{
/* We are in Guile mode. */
assert (t->guile_mode);
- scm_i_pthread_cleanup_push (scm_leave_guile_cleanup, NULL);
- res = scm_c_with_continuation_barrier (func, data);
- scm_i_pthread_cleanup_pop (0);
+ res = scm_c_with_continuation_barrier (args->func, args->data);
/* Leave Guile mode. */
t->guile_mode = 0;
@@ -800,14 +857,10 @@ scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
else if (t->guile_mode)
{
/* Already in Guile mode. */
- res = scm_c_with_continuation_barrier (func, data);
+ res = scm_c_with_continuation_barrier (args->func, args->data);
}
else
{
- struct with_guile_trampoline_args args;
- args.func = func;
- args.data = data;
-
/* We are not in Guile mode, either because we are not within a
scm_with_guile, or because we are within a scm_without_guile.
@@ -816,20 +869,39 @@ scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
when this thread was first guilified. Thus, `base' must be
updated. */
#if SCM_STACK_GROWS_UP
- if (SCM_STACK_PTR (&base_item) < t->base)
- t->base = SCM_STACK_PTR (&base_item);
+ if (SCM_STACK_PTR (base->mem_base) < t->base)
+ t->base = SCM_STACK_PTR (base->mem_base);
#else
- if (SCM_STACK_PTR (&base_item) > t->base)
- t->base = SCM_STACK_PTR (&base_item);
+ if (SCM_STACK_PTR (base->mem_base) > t->base)
+ t->base = SCM_STACK_PTR (base->mem_base);
#endif
t->guile_mode = 1;
- res = with_gc_active (with_guile_trampoline, &args);
+ res = with_gc_active (with_guile_trampoline, args);
t->guile_mode = 0;
}
return res;
}
+static void *
+scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
+{
+ struct with_guile_args args;
+
+ args.func = func;
+ args.data = data;
+ args.parent = parent;
+
+ return GC_call_with_stack_base (with_guile_and_parent, &args);
+}
+
+void *
+scm_with_guile (void *(*func)(void *), void *data)
+{
+ return scm_i_with_guile_and_parent (func, data,
+ scm_i_default_dynamic_state);
+}
+
void *
scm_without_guile (void *(*func)(void *), void *data)
{
@@ -880,9 +952,6 @@ really_launch (void *d)
else
t->result = scm_catch (SCM_BOOL_T, thunk, handler);
- /* Trigger a call to `on_thread_exit ()'. */
- pthread_exit (NULL);
-
return 0;
}
@@ -1965,7 +2034,7 @@ pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
#endif
void
-scm_threads_prehistory (SCM_STACKITEM *base)
+scm_threads_prehistory (void *base)
{
#if SCM_USE_PTHREAD_THREADS
pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
@@ -1978,7 +2047,7 @@ scm_threads_prehistory (SCM_STACKITEM *base)
scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
scm_i_pthread_cond_init (&wake_up_cond, NULL);
- guilify_self_1 (base);
+ guilify_self_1 ((struct GC_stack_base *) base);
}
scm_t_bits scm_tc16_thread;