summaryrefslogtreecommitdiff
path: root/libguile/weak-set.c
diff options
context:
space:
mode:
authorAndy Wingo <wingo@pobox.com>2012-02-17 11:47:52 +0100
committerAndy Wingo <wingo@pobox.com>2012-02-17 12:09:28 +0100
commitf609480611cfd1585409fd6b1b90beb730b026cf (patch)
treeae054e365a2980bf9836447ba4cd9bf6c0f88e72 /libguile/weak-set.c
parent58565208bdfe7544f7e4da8762e4c331171f9876 (diff)
downloadguile-f609480611cfd1585409fd6b1b90beb730b026cf.tar.gz
with a threaded guile, lock weak sets and tables during a fork
* libguile/weak-set.c (make_weak_set): * libguile/weak-table.c (make_weak_table): If we have a threaded Guile, * keep a weak set (table) of weak sets (tables). Use this and the pthread_atfork mechanism to lock and unlock weak sets and weak tables during a fork(). * libguile/weak-set.h (scm_weak_set_prehistory): New internal API. * libguile/init.c: Add call to scm_weak_set_prehistory().
Diffstat (limited to 'libguile/weak-set.c')
-rw-r--r--libguile/weak-set.c108
1 files changed, 95 insertions, 13 deletions
diff --git a/libguile/weak-set.c b/libguile/weak-set.c
index 004fedb82..a1ae4ea6b 100644
--- a/libguile/weak-set.c
+++ b/libguile/weak-set.c
@@ -614,6 +614,74 @@ weak_set_remove_x (scm_t_weak_set *set, unsigned long hash,
+
+static void
+lock_weak_set (scm_t_weak_set *set)
+{
+ scm_i_pthread_mutex_lock (&set->lock);
+}
+
+static void
+unlock_weak_set (scm_t_weak_set *set)
+{
+ scm_i_pthread_mutex_unlock (&set->lock);
+}
+
+/* A weak set of weak sets, for use in the pthread_atfork handler. */
+static SCM all_weak_sets = SCM_BOOL_F;
+
+#if SCM_USE_PTHREAD_THREADS
+
+static void
+lock_all_weak_sets (void)
+{
+ scm_t_weak_set *s;
+ scm_t_weak_entry *entries;
+ unsigned long k, size;
+ scm_t_weak_entry copy;
+
+ s = SCM_WEAK_SET (all_weak_sets);
+ lock_weak_set (s);
+ size = s->size;
+ entries = s->entries;
+
+ for (k = 0; k < size; k++)
+ if (entries[k].hash)
+ {
+ copy_weak_entry (&entries[k], &copy);
+ if (copy.key)
+ lock_weak_set (SCM_WEAK_SET (SCM_PACK (copy.key)));
+ }
+}
+
+static void
+unlock_all_weak_sets (void)
+{
+ scm_t_weak_set *s;
+ scm_t_weak_entry *entries;
+ unsigned long k, size;
+ scm_t_weak_entry copy;
+
+ s = SCM_WEAK_SET (all_weak_sets);
+ size = s->size;
+ entries = s->entries;
+
+ for (k = 0; k < size; k++)
+ if (entries[k].hash)
+ {
+ copy_weak_entry (&entries[k], &copy);
+ if (copy.key)
+ unlock_weak_set (SCM_WEAK_SET (SCM_PACK (copy.key)));
+ }
+
+ unlock_weak_set (s);
+}
+
+#endif /* SCM_USE_PTHREAD_THREADS */
+
+
+
+
static SCM
make_weak_set (unsigned long k)
{
@@ -660,7 +728,7 @@ do_vacuum_weak_set (SCM set)
if (scm_i_pthread_mutex_trylock (&s->lock) == 0)
{
vacuum_weak_set (s);
- scm_i_pthread_mutex_unlock (&s->lock);
+ unlock_weak_set (s);
}
return;
@@ -725,6 +793,9 @@ scm_c_make_weak_set (unsigned long k)
scm_c_register_weak_gc_callback (ret, do_vacuum_weak_set);
+ if (scm_is_true (all_weak_sets))
+ scm_weak_set_add_x (all_weak_sets, ret);
+
return ret;
}
@@ -739,12 +810,12 @@ scm_weak_set_clear_x (SCM set)
{
scm_t_weak_set *s = SCM_WEAK_SET (set);
- scm_i_pthread_mutex_lock (&s->lock);
+ lock_weak_set (s);
memset (s->entries, 0, sizeof (scm_t_weak_entry) * s->size);
s->n_items = 0;
- scm_i_pthread_mutex_unlock (&s->lock);
+ unlock_weak_set (s);
return SCM_UNSPECIFIED;
}
@@ -757,11 +828,11 @@ scm_c_weak_set_lookup (SCM set, unsigned long raw_hash,
SCM ret;
scm_t_weak_set *s = SCM_WEAK_SET (set);
- scm_i_pthread_mutex_lock (&s->lock);
+ lock_weak_set (s);
ret = weak_set_lookup (s, raw_hash, pred, closure, dflt);
- scm_i_pthread_mutex_unlock (&s->lock);
+ unlock_weak_set (s);
return ret;
}
@@ -774,11 +845,11 @@ scm_c_weak_set_add_x (SCM set, unsigned long raw_hash,
SCM ret;
scm_t_weak_set *s = SCM_WEAK_SET (set);
- scm_i_pthread_mutex_lock (&s->lock);
+ lock_weak_set (s);
ret = weak_set_add_x (s, raw_hash, pred, closure, obj);
- scm_i_pthread_mutex_unlock (&s->lock);
+ unlock_weak_set (s);
return ret;
}
@@ -790,11 +861,11 @@ scm_c_weak_set_remove_x (SCM set, unsigned long raw_hash,
{
scm_t_weak_set *s = SCM_WEAK_SET (set);
- scm_i_pthread_mutex_lock (&s->lock);
+ lock_weak_set (s);
weak_set_remove_x (s, raw_hash, pred, closure);
- scm_i_pthread_mutex_unlock (&s->lock);
+ unlock_weak_set (s);
}
static int
@@ -829,7 +900,7 @@ scm_c_weak_set_fold (scm_t_set_fold_fn proc, void *closure,
s = SCM_WEAK_SET (set);
- scm_i_pthread_mutex_lock (&s->lock);
+ lock_weak_set (s);
size = s->size;
entries = s->entries;
@@ -845,14 +916,14 @@ scm_c_weak_set_fold (scm_t_set_fold_fn proc, void *closure,
if (copy.key)
{
/* Release set lock while we call the function. */
- scm_i_pthread_mutex_unlock (&s->lock);
+ unlock_weak_set (s);
init = proc (closure, SCM_PACK (copy.key), init);
- scm_i_pthread_mutex_lock (&s->lock);
+ lock_weak_set (s);
}
}
}
- scm_i_pthread_mutex_unlock (&s->lock);
+ unlock_weak_set (s);
return init;
}
@@ -897,6 +968,17 @@ scm_weak_set_map_to_list (SCM proc, SCM set)
}
+
+
+void
+scm_weak_set_prehistory (void)
+{
+#if SCM_USE_PTHREAD_THREADS
+ all_weak_sets = scm_c_make_weak_set (0);
+ pthread_atfork (lock_all_weak_sets, unlock_all_weak_sets, unlock_all_weak_sets);
+#endif
+}
+
void
scm_init_weak_set ()
{